diff --git a/.gitignore b/.gitignore index 3c7666defe..84b395859f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,11 +1,34 @@ cothority .idea notes.txt -deploy/deterlab/build -deploy/deterlab/deploy/* -deploy/deterlab/remote/* -deploy/deterlab/config.toml test_data -app/app -deploy/deterlab/remote/* +app/sign/sign +app/stamp/stamp +app/shamir/shamir +app/naive/naive +app/ntree/ntree +deploy/platform/deterlab/build +deploy/platform/deterlab/remote/* +deploy/platform/deterlab/deter.toml +deploy/platform/localhost/* +deploy/platform/localhost +deploy/deploy +deploy/matplotlib/*csv +deploy/matplotlib/*png +deploy/matplotlib/imgs test/ +deploy/platform/deterlab/forkexec/forkexec +deploy/platform/Deterlab/users/users +app/conode/config.toml +app/conode/conode +app/conode/conode*.tar.gz +app/conode/conode-bin/ +app/conode/hosts.list +app/conode/key.priv +app/conode/key.pub +app/conode/local_keys/ +app/conode/old/ +app/conode/real/ + +*.pyc +*.toml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000000..609fa7466a --- /dev/null +++ b/.travis.yml @@ -0,0 +1,21 @@ +# build script for dedis/cothority libraries +go: + - 1.5.1 +env: + global: + - REPO="dedis/cothority" + - HOME="/home/travis" + - GOPATH="$HOME" + - PATH="$HOME/bin:$PATH" +before_install: + - mkdir -p $HOME/src/github.com/dedis + - mkdir -p $HOME/bin + - ls $TRAVIS_BUILD_DIR + - mv $TRAVIS_BUILD_DIR $HOME/src/github.com/dedis + - cd $HOME/src/github.com/dedis/cothority + - go list -f '{{join .Deps "\n"}} {{join .TestImports "\n"}}' ./... | grep -v "^github.com/dedis/cothority" | xargs go get -t -v +before_script: + - git remote add production https://github.com/DeDiS/cothority.git + - git fetch -a production +script: + - go run dedis-travis-script.go diff --git a/README.md b/README.md index f7a1268851..f5ab08ad58 100644 --- a/README.md +++ b/README.md @@ -1,79 +1,108 @@ # Cothority -The code permits the testing and running of a cothority-system together with the applications. It is split up in -deployment, application and protocols. The basic cryptographic code comes from DeDiS/crypto. The following modules -are used: +The code in this repository permits the testing and running of a cothority-system together with some applications. It is split up in deployment, application and protocols. The basic cryptographic code comes from [DeDiS/crypto](https://github.com/DeDiS/crypto). -Deploy +## Warning +**The software provided in this repository is highly experimental and under heavy development. Do not use it for anything security-critical. All usage is at your own risk!** - * Deter - running - * Go-routines - in preparation - * Future: - * Docker - * LXC +## Deploy -Applications +* Available: + * [DeterLab](deterlab.net) + * Localhost +* Planned: + * Docker + * LXC - * timestamping - * signing - needs to collect more data - * vote - doesn't run yet. - -Protocols +## Applications - * collective signing - * joint threshold signing - work in progress - -# How to run +* Available: + * Timestamping + * Signing + * Shamir-secret-service: regular or tree signing +* Planned: + * Randhound: decentrailzed randomness cothority + * Vote -For the moment only the timestamping on Deterlab works: +## Protocols -In the top-level directory, type +* Collective signing + +# How to Run a Cothority + +All applications in `app/*` are stand-alone. Currently, they can be used by deploying to either localhost or DeterLab. + +## Localhost +To run a simple signing check on localhost, execute the following commands: ``` -go get ./... -go build -./cothority +$ go get ./... +$ cd deploy +$ go build +$ ./deploy -deploy localhost simulation/sign_single.toml ``` -then enter the name of the deterlab-installation, your username and your project-name, and you should -be ready to go. The arguments are: +## DeterLab + +If you use the `-deploy deterlab` option, then you are prompted to enter the name of the DeterLab installation, your username, and the names of project and experiment. There are some flags which make your life as a cothority developer simpler when deploying to DeterLab: + +* `-nobuild`: don't build any of the helpers which is useful if you're working on the main code +* `-build "helper1,helper2"`: only build the helpers, separated by a ",", which speeds up recompiling +* `-range start:end`: runs only the simulation-lines including `start` and `end`. Counts from 0, start and end can be omitted and represent beginning and end of lines, respectively. - * -debug - number between 0 and 5 - 0 is silent, 5 is very verbose - * -deploy [deterlab,gochannels] - by default is "deterlab" - gochannels are next - * -app [server,client] - whether to run the application as server or client - not yet implemented +### SSH-keys +For convenience, we recommend that you upload a public SSH-key to the DeterLab site. If your SSH-key is protected through a passphrase (which should be the case for security reasons!) we further recommend that you add your private key to your SSH-agent / keychain. Afterwards you only need to unlock your SSH-agent / keychain once (per session) and can access all your stored keys without typing the passphrase each time. -For the sake of easy development there are some switches that are to be used only for the -deterlab implementation: +**OSX:** + +You can store your SSH-key directly in the OSX-keychain by executing: + +``` +$ /usr/bin/ssh-add -K ~/.ssh/ +``` - * -nobuild - don't build any of the helpers - useful if you're working on the main code - * -build "helper1,helper2" - only build the helpers, separated by a "," - speeds up recompiling - * -machines # - tells how many machines are to be used for the run +Make sure that you actually use the `ssh-add` program that comes with your OSX installation, since those installed through [homebrew](http://brew.sh/), [MacPorts](https://www.macports.org/) etc. **do not support** the `-K` flag per default. +**Linux:** + +Make sure that the `ssh-agent` is running. Afterwards you can add your SSH-key via: + +``` +$ ssh-add ~/.ssh/ +``` # Applications +## CoNode + +You can find more information about CoNode in the corresponding [README](https://github.com/DeDiS/cothority/blob/development/app/conode/README.md). + ## Timestamping -For the moment the only running application - it sets up servers that listen for client-requests, collect all -requests and handle them to a root-node. +Sets up servers that listen for client-requests, collects all requests and hands them to a root-node for timestamping. ## Signing -A simple mechanism that only receives a message, signs it, and returns it. +A simple mechanism that is capable of receiving messages and returning their signatures. -## Voting +## RandHound -Not done yet +Test-implementation of a randomization-protocol based on cothority. # Protocols We want to compare different protocols for signing and timestamping uses. -## Collective signing +## Collective Signing This one runs well and is described in a pre-print from Dylan Visher. -## Join threshold signing +## Shamir Signing + +A textbook shamir signing for baseline-comparison against the collective signing protocol. + -A baseline-comparison being developed by the DeDiS-lab at EPFL. +# Further Information +* Decentralizing Authorities into Scalable Strongest-Link Cothorities: [paper](http://arxiv.org/pdf/1503.08768v1.pdf), [slides](http://dedis.cs.yale.edu/dissent/pres/150610-nist-cothorities.pdf) +* Certificate Cothority - Towards Trustworthy Collective CAs: [paper](https://petsymposium.org/2015/papers/syta-cc-hotpets2015.pdf) diff --git a/app/app.go b/app/app.go deleted file mode 100644 index ba583398c8..0000000000 --- a/app/app.go +++ /dev/null @@ -1,114 +0,0 @@ -// usage exec: -// -// exec -name "appConf.Hostname" -config "tree.json" -// -// -name indicates the name of the node in the tree.json -// -// -config points to the file that holds the configuration. -// This configuration must be in terms of the final appConf.Hostnames. -// -// pprof runs on the physical address space [if there is a virtual and physical network layer] -// and if one is specified. - -package main - -import ( - "flag" - "net" - "net/http" - _ "net/http/pprof" - "strconv" - - log "github.com/Sirupsen/logrus" - "github.com/dedis/cothority/lib/logutils" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/cothority/deploy" - "github.com/dedis/cothority/app/coll_sign" - "github.com/dedis/cothority/app/schnorr_sign" - "github.com/dedis/cothority/app/coll_stamp" - "github.com/dedis/cothority/lib/config" - "os" -) - -var deter *deploy.Deter -var conf *deploy.Config -var appConf config.AppConfig - -// TODO: add debug flag for more debugging information (memprofilerate...) -func init() { - flag.StringVar(&appConf.Hostname, "hostname", "", "the appConf.Hostname of this node") - flag.StringVar(&appConf.Logger, "logger", "", "remote appConf.Logger") - flag.StringVar(&appConf.PhysAddr, "physaddr", "", "the physical address of the noded [for deterlab]") - flag.BoolVar(&appConf.AmRoot, "amroot", false, "am I root node") - flag.BoolVar(&appConf.TestConnect, "test_connect", false, "test connecting and disconnecting") - flag.StringVar(&appConf.App, "app", appConf.App, "Which application to run [coll_sign, coll_stamp]") - flag.StringVar(&appConf.Mode, "mode", appConf.Mode, "Run the app in [server,client] mode") - flag.StringVar(&appConf.Name, "name", appConf.Name, "Name of the node") - flag.StringVar(&appConf.Server, "server", "", "the timestamping servers to contact") -} - -func main() { - deter, err := deploy.ReadConfig() - if err != nil { - log.Fatal("Couldn't load config-file in exec") - } - conf = deter.Config - dbg.DebugVisible = conf.Debug - - flag.Parse() - - dbg.Lvl3("Running", appConf.App, appConf.Hostname, "with logger at", appConf.Logger) - defer func() { - log.Errorln("Terminating host", appConf.Hostname) - }() - - // connect with the logging server - if appConf.Logger != "" && (appConf.AmRoot || conf.Debug > 0) { - // blocks until we can connect to the appConf.Logger - dbg.Lvl3(appConf.Hostname, "Connecting to Logger") - lh, err := logutils.NewLoggerHook(appConf.Logger, appConf.Hostname, conf.App) - if err != nil { - log.WithFields(log.Fields{ - "file": logutils.File(), - }).Fatalln("Error setting up logging server:", err) - } - log.AddHook(lh) - //log.SetOutput(ioutil.Discard) - //fmt.Println("exiting appConf.Logger block") - dbg.Lvl4(appConf.Hostname, "Done setting up hook") - } - - if appConf.Mode == "server" { - if appConf.PhysAddr == "" { - h, _, err := net.SplitHostPort(appConf.Hostname) - if err != nil { - log.Fatal(appConf.Hostname, "improperly formatted hostname", os.Args) - } - appConf.PhysAddr = h - } - - // run an http server to serve the cpu and memory profiles - go func() { - _, port, err := net.SplitHostPort(appConf.Hostname) - if err != nil { - log.Fatal(appConf.Hostname, "improperly formatted hostname: should be host:port") - } - p, _ := strconv.Atoi(port) - // uncomment if more fine grained memory debuggin is needed - //runtime.MemProfileRate = 1 - dbg.Lvl3(http.ListenAndServe(net.JoinHostPort(appConf.PhysAddr, strconv.Itoa(p + 2)), nil)) - }() - } - - dbg.Lvl3("Running timestamp with rFail and fFail: ", conf.RFail, conf.FFail) - - switch appConf.App{ - case "coll_sign": - coll_sign.Run(&appConf, conf) - case "coll_stamp": - coll_stamp.Run(&appConf, conf) - case "schnorr_sign": - schnorr_sign.Run(&appConf, conf) - } -} - diff --git a/app/coll_sign/client.go b/app/coll_sign/client.go deleted file mode 100644 index f7c8c87574..0000000000 --- a/app/coll_sign/client.go +++ /dev/null @@ -1,110 +0,0 @@ -package coll_sign -import ( - "github.com/dedis/cothority/deploy" - "time" - "github.com/dedis/cothority/lib/logutils" - log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/cothority/lib/config" - "sync/atomic" - - "strconv" -"github.com/dedis/cothority/lib/proof" - "github.com/dedis/cothority/lib/hashid" -) - -var MAX_N_SECONDS int = 1 * 60 * 60 // 1 hours' worth of seconds -var MAX_N_ROUNDS int = MAX_N_SECONDS / int(ROUND_TIME / time.Second) -var ROUND_TIME time.Duration = 1 * time.Second - -var done = make(chan string, 1) - -func RunClient(conf *deploy.Config, hc *config.HostConfig) { - buck := make([]int64, 300) - roundsAfter := make([]int64, MAX_N_ROUNDS) - times := make([]int64, MAX_N_SECONDS * 1000) // maximum number of milliseconds (maximum rate > 1 per millisecond) - - dbg.Lvl1("Going to run client and asking servers to print") - time.Sleep(3 * time.Second) - hc.SNodes[0].RegisterDoneFunc(RoundDone) - start := time.Now() - tFirst := time.Now() - - for i := 0; i < conf.Rounds; i++ { - time.Sleep(time.Second) - //fmt.Println("ANNOUNCING") - hc.SNodes[0].LogTest = []byte("Hello World") - dbg.Lvl3("Going to launch announcement ", hc.SNodes[0].Name()) - start = time.Now() - t0 := time.Now() - - err := hc.SNodes[0].StartSigningRound() - if err != nil { - dbg.Lvl1(err) - } - - select { - case msg := <-done: - dbg.Lvl3("Received reply from children", msg) - case <-time.After(10 * ROUND_TIME): - dbg.Lvl3("client timeouted on waiting for response from") - continue - } - - t := time.Since(t0) - elapsed := time.Since(start) - secToTimeStamp := t.Seconds() - secSinceFirst := time.Since(tFirst).Seconds() - atomic.AddInt64(&buck[int(secSinceFirst)], 1) - index := int(secToTimeStamp) / int(ROUND_TIME / time.Second) - atomic.AddInt64(&roundsAfter[index], 1) - atomic.AddInt64(×[i], t.Nanoseconds()) - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "root_announce", - "round": i, - "time": elapsed, - }).Info("") - - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "root_round", - "round": i, - "time": elapsed, - }).Info("root round") - } - - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "client_msg_stats", - "buck": removeTrailingZeroes(buck), - "roundsAfter": removeTrailingZeroes(roundsAfter), - "times": removeTrailingZeroes(times), - }).Info("") - - // And tell everybody to quit - err := hc.SNodes[0].CloseAll(hc.SNodes[0].Round) - if err != nil { - log.Fatal("Couldn't close:", err) - } -} - -func RoundDone(view int, SNRoot hashid.HashId, LogHash hashid.HashId, p proof.Proof) { - dbg.Lvl3(view, "finished round") - done <- "Done with view: " + strconv.Itoa(view) -} - -func RunClientStandalone(conf *deploy.Config, hc *config.HostConfig){ - -} - -func removeTrailingZeroes(a []int64) []int64 { - i := len(a) - 1 - for ; i >= 0; i-- { - if a[i] != 0 { - break - } - } - return a[:i + 1] -} - diff --git a/app/coll_sign/coll_sign.go b/app/coll_sign/coll_sign.go deleted file mode 100644 index 04ce2a83c3..0000000000 --- a/app/coll_sign/coll_sign.go +++ /dev/null @@ -1,107 +0,0 @@ -package coll_sign - -import ( - "fmt" - "github.com/dedis/cothority/deploy" - "github.com/dedis/cothority/lib/config" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/cothority/proto/sign" - "github.com/dedis/crypto/abstract" - "time" - "os" - "io/ioutil" - "github.com/dedis/crypto/edwards/ed25519" - "github.com/dedis/crypto/nist" - "log" -) - -// Dispatch-function for running either client or server (mode-parameter) -func Run(app *config.AppConfig, conf *deploy.Config) { - // Do some common setup - if app.Mode == "client"{ - app.Hostname = app.Name - } - dbg.Lvl3(app.Hostname, "Starting to run") - if conf.Debug > 1 { - sign.DEBUG = true - } - - if app.Hostname == "" { - log.Fatal("no hostname given", app.Hostname) - } - - // load the configuration - dbg.Lvl3("loading configuration for", app.Hostname) - var hc *config.HostConfig - var err error - s := GetSuite(conf.Suite) - opts := config.ConfigOptions{ConnType: "tcp", Host: app.Hostname, Suite: s} - if conf.Failures > 0 || conf.FFail > 0 { - opts.Faulty = true - } - hc, err = config.LoadConfig("tree.json", opts) - if err != nil { - fmt.Println(err) - log.Fatal(err) - } - - // Wait for everybody to be ready before going on - ioutil.WriteFile("coll_stamp_up/up" + app.Hostname, []byte("started"), 0666) - for { - _, err := os.Stat("coll_stamp_up") - if err == nil { - files, _ := ioutil.ReadDir("coll_stamp_up") - dbg.Lvl4(app.Hostname, "waiting for others to finish", len(files)) - time.Sleep(time.Second) - } else { - break - } - } - dbg.Lvl2(app.Hostname, "thinks everybody's here") - - // set FailureRates - if conf.Failures > 0 { - for i := range hc.SNodes { - hc.SNodes[i].FailureRate = conf.Failures - } - } - - // set root failures - if conf.RFail > 0 { - for i := range hc.SNodes { - hc.SNodes[i].FailAsRootEvery = conf.RFail - - } - } - // set follower failures - // a follower fails on %ffail round with failureRate probability - for i := range hc.SNodes { - hc.SNodes[i].FailAsFollowerEvery = conf.FFail - } - - defer func() { - dbg.Lvl1("Collective Signing", app.Hostname, "has terminated in mode", app.Mode) - }() - - switch app.Mode { - case "client": - log.Panic("No client mode") - case "server": - RunServer(app, conf, hc) - } -} - -func GetSuite(suite string) abstract.Suite { - var s abstract.Suite - switch { - case suite == "nist256": - s = nist.NewAES128SHA256P256() - case suite == "nist512": - s = nist.NewAES128SHA256QR512() - case suite == "ed25519": - s = ed25519.NewAES128SHA256Ed25519(true) - default: - s = nist.NewAES128SHA256P256() - } - return s -} diff --git a/app/coll_sign/server.go b/app/coll_sign/server.go deleted file mode 100644 index 852726e438..0000000000 --- a/app/coll_sign/server.go +++ /dev/null @@ -1,31 +0,0 @@ -package coll_sign -import -( - "github.com/dedis/cothority/deploy" - "time" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/cothority/lib/config" - log "github.com/Sirupsen/logrus" - "github.com/dedis/cothority/proto/sign" -) - -func RunServer(app *config.AppConfig, conf *deploy.Config, hc *config.HostConfig) { - // run this specific host - err := hc.Run(false, sign.MerkleTree, app.Hostname) - if err != nil { - log.Fatal(err) - } - - dbg.Lvl3(app.Hostname, "started up in server-mode") - - // Let's start the client if we're the root-node - if hc.SNodes[0].IsRoot(0) { - dbg.Lvl1(app.Hostname, "started client") - RunClient(conf, hc) - } else{ - // Endless-loop till we stop by tearing down the connections - for { - time.Sleep(time.Minute) - } - } -} diff --git a/app/coll_stamp/client.go b/app/coll_stamp/client.go deleted file mode 100644 index a8f136365c..0000000000 --- a/app/coll_stamp/client.go +++ /dev/null @@ -1,154 +0,0 @@ -package coll_stamp - -import ( - "crypto/rand" - "io" - "net" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" - - "github.com/dedis/cothority/lib/coconet" - "github.com/dedis/cothority/lib/hashid" - "github.com/dedis/cothority/lib/logutils" -) - -var muStats sync.Mutex - -var MAX_N_SECONDS int = 1 * 60 * 60 // 1 hours' worth of seconds -var MAX_N_ROUNDS int = MAX_N_SECONDS / int(ROUND_TIME / time.Second) - -func RunClient(server string, nmsgs int, name string, rate int) { - dbg.Lvl4("Starting to run stampclient") - c := NewClient(name) - servers := strings.Split(server, ",") - - // connect to all the servers listed - for _, s := range servers { - h, p, err := net.SplitHostPort(s) - if err != nil { - log.Fatal("improperly formatted host") - } - pn, _ := strconv.Atoi(p) - c.AddServer(s, coconet.NewTCPConn(net.JoinHostPort(h, strconv.Itoa(pn + 1)))) - } - - // Check if somebody asks for the old way - if rate < 0 { - log.Fatal("Rounds based limiting deprecated") - } - - // Stream time coll_stamp requests - // if rate specified send out one message every rate milliseconds - dbg.Lvl1(name, "starting to stream at rate", rate) - streamMessgs(c, servers, rate) - dbg.Lvl4("Finished streaming") - return -} - -func AggregateStats(buck, roundsAfter, times []int64) string { - muStats.Lock() - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "client_msg_stats", - "buck": removeTrailingZeroes(buck), - "roundsAfter": removeTrailingZeroes(roundsAfter), - "times": removeTrailingZeroes(times), - }).Info("") - muStats.Unlock() - return "Client Finished Aggregating Statistics" -} - -func genRandomMessages(n int) [][]byte { - msgs := make([][]byte, n) - for i := range msgs { - msgs[i] = make([]byte, hashid.Size) - _, err := rand.Read(msgs[i]) - if err != nil { - log.Fatal("failed to generate random commit:", err) - } - } - return msgs -} - -func removeTrailingZeroes(a []int64) []int64 { - i := len(a) - 1 - for ; i >= 0; i-- { - if a[i] != 0 { - break - } - } - return a[:i + 1] -} - -func streamMessgs(c *Client, servers []string, rate int) { - dbg.Lvl4(c.Name(), "streaming at given rate", rate) - // buck[i] = # of timestamp responses received in second i - buck := make([]int64, MAX_N_SECONDS) - // roundsAfter[i] = # of timestamp requests that were processed i rounds late - roundsAfter := make([]int64, MAX_N_ROUNDS) - times := make([]int64, MAX_N_SECONDS * 1000) // maximum number of milliseconds (maximum rate > 1 per millisecond) - ticker := time.Tick(time.Duration(rate) * time.Millisecond) - msg := genRandomMessages(1)[0] - i := 0 - nServers := len(servers) - - retry: - dbg.Lvl3(c.Name(), "checking if", servers[0], "is already up") - err := c.TimeStamp(msg, servers[0]) - if err == io.EOF || err == coconet.ErrClosed { - dbg.Lvl4("Client", c.Name(), "DONE: couldn't connect to TimeStamp") - log.Fatal(AggregateStats(buck, roundsAfter, times)) - } else if err == ErrClientToTSTimeout { - dbg.Lvl4(err.Error()) - } else if err != nil { - time.Sleep(500 * time.Millisecond) - goto retry - } - dbg.Lvl3(c.Name(), "successfully connected to", servers[0]) - - tFirst := time.Now() - - // every tick send a time coll_stamp request to every server specified - // this will stream until we get an EOF - tick := 0 - for _ = range ticker { - tick += 1 - go func(msg []byte, s string, tick int) { - t0 := time.Now() - err := c.TimeStamp(msg, s) - t := time.Since(t0) - - if err == io.EOF || err == coconet.ErrClosed { - if err == io.EOF { - dbg.Lvl4("CLIENT ", c.Name(), "DONE: terminating due to EOF", s) - } else { - dbg.Lvl4("CLIENT ", c.Name(), "DONE: terminating due to Connection Error Closed", s) - } - log.Fatal(AggregateStats(buck, roundsAfter, times)) - } else if err != nil { - // ignore errors - dbg.Lvl4("Client", c.Name(), "Leaving out streamMessages. ", err) - return - } - - // TODO: we might want to subtract a buffer from secToTimeStamp - // to account for computation time - secToTimeStamp := t.Seconds() - secSinceFirst := time.Since(tFirst).Seconds() - atomic.AddInt64(&buck[int(secSinceFirst)], 1) - index := int(secToTimeStamp) / int(ROUND_TIME / time.Second) - atomic.AddInt64(&roundsAfter[index], 1) - atomic.AddInt64(×[tick], t.Nanoseconds()) - - }(msg, servers[i], tick) - - i = (i + 1) % nServers - } - -} diff --git a/app/coll_stamp/coll_stamp.go b/app/coll_stamp/coll_stamp.go deleted file mode 100644 index 8f781a8d9c..0000000000 --- a/app/coll_stamp/coll_stamp.go +++ /dev/null @@ -1,230 +0,0 @@ -package coll_stamp -import ( - "github.com/dedis/cothority/lib/coconet" - "strconv" - "net" - "errors" - "github.com/dedis/cothority/proto/sign" - "github.com/dedis/cothority/lib/config" - "time" - "fmt" - "github.com/dedis/crypto/nist" - "github.com/dedis/crypto/edwards/ed25519" - "github.com/dedis/crypto/abstract" - log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/cothority/deploy" - "io/ioutil" - "os" -) - -var totalHosts int - -func Run(app *config.AppConfig, proto *deploy.Config) { - totalHosts = proto.Nmachs * proto.Hpn - switch app.Mode{ - case "server": - RunServer(app.Hostname, app.App, proto.Rounds, proto.RootWait, proto.Debug, app.TestConnect, - proto.Failures, proto.RFail, proto.FFail, app.Logger, proto.Suite) - case "client": - RunClient(app.Server, proto.Nmsgs, app.Name, proto.Rate) - } -} - - -func GetSuite(suite string) abstract.Suite { - var s abstract.Suite - switch { - case suite == "nist256": - s = nist.NewAES128SHA256P256() - case suite == "nist512": - s = nist.NewAES128SHA256QR512() - case suite == "ed25519": - s = ed25519.NewAES128SHA256Ed25519(true) - default: - s = nist.NewAES128SHA256P256() - } - return s -} - - -func RunServer(hostname, app string, rounds int, rootwait int, debug int, testConnect bool, -failureRate, rFail, fFail int, logger, suite string) { - dbg.Lvl3(hostname, "Starting to run") - if debug > 1 { - sign.DEBUG = true - } - - // fmt.Println("EXEC TIMESTAMPER: " + hostname) - if hostname == "" { - log.Fatal("no hostname given") - } - - // load the configuration - //dbg.Lvl3("loading configuration") - var hc *config.HostConfig - var err error - s := GetSuite(suite) - opts := config.ConfigOptions{ConnType: "tcp", Host: hostname, Suite: s} - if failureRate > 0 || fFail > 0 { - opts.Faulty = true - } - - configTime := time.Now() - hc, err = config.LoadConfig("tree.json", opts) - if err != nil { - fmt.Println(err) - log.Fatal(err) - } - dbg.Lvl3(hostname, "finished loading config after", time.Since(configTime)) - - for i := range hc.SNodes { - // set FailureRates - if failureRate > 0 { - hc.SNodes[i].FailureRate = failureRate - } - // set root failures - if rFail > 0 { - hc.SNodes[i].FailAsRootEvery = rFail - } - // set follower failures - // a follower fails on %ffail round with failureRate probability - hc.SNodes[i].FailAsFollowerEvery = fFail - } - - // Wait for everybody to be ready before going on - ioutil.WriteFile("coll_stamp_up/up" + hostname, []byte("started"), 0666) - for { - _, err := os.Stat("coll_stamp_up") - if err == nil { - dbg.Lvl4(hostname, "waiting for others to finish") - time.Sleep(time.Second) - } else { - break - } - } - dbg.Lvl3(hostname, "thinks everybody's here") - - err = hc.Run(app != "coll_sign", sign.MerkleTree, hostname) - if err != nil { - log.Fatal(err) - } - - defer func(sn *sign.Node) { - //log.Panicln("program has terminated:", hostname) - dbg.Lvl1("Program timestamper has terminated:", hostname) - sn.Close() - }(hc.SNodes[0]) - - stampers, _, err := RunTimestamper(hc, 0, hostname) - // get rid of the hc information so it can be GC'ed - hc = nil - if err != nil { - log.Fatal(err) - } - for _, s := range stampers { - // only listen if this is the hostname specified - if s.Name() == hostname { - s.Logger = logger - s.Hostname = hostname - s.App = app - if s.IsRoot(0) { - dbg.Lvl1("Root timestamper at:", hostname, rounds, "Waiting: ", rootwait) - // wait for the other nodes to get set up - time.Sleep(time.Duration(rootwait) * time.Second) - - dbg.Lvl1("Starting root-round") - s.Run("root", rounds) - // dbg.Lvl3("\n\nROOT DONE\n\n") - - } else if !testConnect { - dbg.Lvl2("Running regular timestamper on:", hostname) - s.Run("regular", rounds) - // dbg.Lvl1("\n\nREGULAR DONE\n\n") - } else { - // testing connection - dbg.Lvl1("Running connection-test on:", hostname) - s.Run("test_connect", rounds) - } - } - } -} - -// run each host in hostnameSlice with the number of clients given -func RunTimestamper(hc *config.HostConfig, nclients int, hostnameSlice ...string) ([]*Server, []*Client, error) { - dbg.Lvl3("RunTimestamper on", hc.Hosts) - hostnames := make(map[string]*sign.Node) - // make a list of hostnames we want to run - if hostnameSlice == nil { - hostnames = hc.Hosts - } else { - for _, h := range hostnameSlice { - sn, ok := hc.Hosts[h] - if !ok { - return nil, nil, errors.New("hostname given not in config file:" + h) - } - hostnames[h] = sn - } - } - - Clients := make([]*Client, 0, len(hostnames) * nclients) - // for each client in - stampers := make([]*Server, 0, len(hostnames)) - for _, sn := range hc.SNodes { - if _, ok := hostnames[sn.Name()]; !ok { - log.Errorln("signing node not in hostnmaes") - continue - } - stampers = append(stampers, NewServer(sn)) - if hc.Dir == nil { - dbg.Lvl3(hc.Hosts, "listening for clients") - stampers[len(stampers) - 1].Listen() - } - } - dbg.Lvl3("stampers:", stampers) - clientsLists := make([][]*Client, len(hc.SNodes[1:])) - for i, s := range stampers[1:] { - // cant assume the type of connection - clients := make([]*Client, nclients) - - h, p, err := net.SplitHostPort(s.Name()) - if hc.Dir != nil { - h = s.Name() - } else if err != nil { - log.Fatal("RunTimestamper: bad Tcp host") - } - pn, err := strconv.Atoi(p) - if hc.Dir != nil { - pn = 0 - } else if err != nil { - log.Fatal("port is not valid integer") - } - hp := net.JoinHostPort(h, strconv.Itoa(pn + 1)) - //dbg.Lvl4("client connecting to:", hp) - - for j := range clients { - clients[j] = NewClient("client" + strconv.Itoa((i - 1) * len(stampers) + j)) - var c coconet.Conn - - // if we are using tcp connections - if hc.Dir == nil { - // the timestamp server serves at the old port + 1 - dbg.Lvl4("new tcp conn") - c = coconet.NewTCPConn(hp) - } else { - dbg.Lvl4("new go conn") - c, _ = coconet.NewGoConn(hc.Dir, clients[j].Name(), s.Name()) - stoc, _ := coconet.NewGoConn(hc.Dir, s.Name(), clients[j].Name()) - s.Clients[clients[j].Name()] = stoc - } - // connect to the server from the client - clients[j].AddServer(s.Name(), c) - //clients[j].Sns[s.Name()] = c - //clients[j].Connect() - } - Clients = append(Clients, clients...) - clientsLists[i] = clients - } - - return stampers, Clients, nil -} diff --git a/app/coll_stamp/constants.go b/app/coll_stamp/constants.go deleted file mode 100644 index f3b17b7993..0000000000 --- a/app/coll_stamp/constants.go +++ /dev/null @@ -1,10 +0,0 @@ -package coll_stamp - -import ( - "time" - - "github.com/dedis/cothority/proto/sign" -) - -// time we wait between rounds -var ROUND_TIME time.Duration = sign.ROUND_TIME diff --git a/app/coll_stamp/messg.go b/app/coll_stamp/messg.go deleted file mode 100644 index 4ecfd104b3..0000000000 --- a/app/coll_stamp/messg.go +++ /dev/null @@ -1,151 +0,0 @@ -package coll_stamp - -import ( - "bytes" - "encoding/gob" - - "github.com/dedis/cothority/lib/hashid" - "github.com/dedis/cothority/lib/proof" -) - -type SeqNo byte - -// struct to ease keeping track of who requires a reply after -// tsm is processed/ aggregated by the TSServer -type MustReplyMessage struct { - Tsm TimeStampMessage - To string // name of reply destination -} - -type LogEntry struct { - Seq SeqNo // Consecutively-incrementing log entry sequence number - Root hashid.HashId // Merkle root of values committed this time-step - Time *int64 // Optional wall-clock time this entry was created -} - -type SignedEntry struct { - Ent []byte // Encoded LogEntry to which the signature applies - Sig []byte // Digital signature on the LogEntry -} - -type StampRequest struct { - Val []byte // Hash-size value to timestamp -} -type StampReply struct { - Sig []byte // Signature on the root - Prf proof.Proof // Merkle proof of value -} - -// Request to obtain an old log-entry and, optionally, -// a cryptographic proof that it happened before a given newer entry. -// The TSServer may be unable to process if Seq is beyond the retention window. -type EntryRequest struct { - Seq SeqNo // Sequence number of old entry requested -} -type EntryReply struct { - Log SignedEntry // Signed log entry -} - -// Request a cryptographic Merkle proof that log-entry Old happened before New. -// Produces a path to a Merkle tree node containing a hash of the node itself -// and the root of the history values committed within the node. -// The TSServer may be unable to process if Old is beyond the retention window. -type ProofRequest struct { - Old, New SeqNo // Sequence number of old and new log records -} -type ProofReply struct { - Prf proof.Proof // Requested Merkle proof -} - -// XXX not sure we need block requests? -type BlockRequest struct { - Ids []hashid.HashId // Hash of block(s) requested -} - -type BlockReply struct { - Dat [][]byte // Content of block(s) requested -} - -type ErrorReply struct { - Msg string // Human-readable error message -} - -type MessageType int - -const ( - Error MessageType = iota - StampRequestType - StampReplyType -) - -type TimeStampMessage struct { - ReqNo SeqNo // Request sequence number - // ErrorReply *ErrorReply // Generic error reply to any request - Type MessageType - Sreq *StampRequest - Srep *StampReply -} - -func (tsm TimeStampMessage) MarshalBinary() ([]byte, error) { - var b bytes.Buffer - var sub []byte - var err error - b.WriteByte(byte(tsm.Type)) - b.WriteByte(byte(tsm.ReqNo)) - // marshal sub message based on its Type - switch tsm.Type { - case StampRequestType: - sub, err = tsm.Sreq.MarshalBinary() - case StampReplyType: - sub, err = tsm.Srep.MarshalBinary() - } - if err == nil { - b.Write(sub) - } - return b.Bytes(), err -} - -func (sm *TimeStampMessage) UnmarshalBinary(data []byte) error { - sm.Type = MessageType(data[0]) - sm.ReqNo = SeqNo(data[1]) - msgBytes := data[2:] - var err error - switch sm.Type { - case StampRequestType: - sm.Sreq = &StampRequest{} - err = sm.Sreq.UnmarshalBinary(msgBytes) - case StampReplyType: - sm.Srep = &StampReply{} - err = sm.Srep.UnmarshalBinary(msgBytes) - - } - return err -} - -func (Sreq StampRequest) MarshalBinary() ([]byte, error) { - var b bytes.Buffer - enc := gob.NewEncoder(&b) - err := enc.Encode(Sreq.Val) - return b.Bytes(), err -} - -func (Sreq *StampRequest) UnmarshalBinary(data []byte) error { - b := bytes.NewBuffer(data) - dec := gob.NewDecoder(b) - err := dec.Decode(&Sreq.Val) - return err -} - -func (Srep StampReply) MarshalBinary() ([]byte, error) { - var b bytes.Buffer - enc := gob.NewEncoder(&b) - err := enc.Encode(Srep.Sig) - return b.Bytes(), err -} - -func (Srep *StampReply) UnmarshalBinary(data []byte) error { - b := bytes.NewBuffer(data) - dec := gob.NewDecoder(b) - err := dec.Decode(&Srep.Sig) - return err -} diff --git a/app/coll_stamp/server.go b/app/coll_stamp/server.go deleted file mode 100644 index b95d45e27e..0000000000 --- a/app/coll_stamp/server.go +++ /dev/null @@ -1,468 +0,0 @@ -package coll_stamp - -import ( - "net" - "strconv" - "sync" - "time" - - log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" - - "github.com/dedis/cothority/lib/coconet" - "github.com/dedis/cothority/lib/hashid" - "github.com/dedis/cothority/lib/proof" - "github.com/dedis/cothority/proto/sign" - "github.com/dedis/cothority/lib/logutils" -) - -type Server struct { - sign.Signer - name string - Clients map[string]coconet.Conn - - // for aggregating messages from clients - mux sync.Mutex - Queue [][]MustReplyMessage - READING int - PROCESSING int - - // Leaves, Root and Proof for a round - Leaves []hashid.HashId // can be removed after we verify protocol - Root hashid.HashId - Proofs []proof.Proof - - rLock sync.Mutex - maxRounds int - closeChan chan bool - - Logger string - Hostname string - App string -} - -func NewServer(signer sign.Signer) *Server { - s := &Server{} - - s.Clients = make(map[string]coconet.Conn) - s.Queue = make([][]MustReplyMessage, 2) - s.READING = 0 - s.PROCESSING = 1 - - s.Signer = signer - s.Signer.RegisterAnnounceFunc(s.OnAnnounce()) - s.Signer.RegisterDoneFunc(s.OnDone()) - s.rLock = sync.Mutex{} - - // listen for client requests at one port higher - // than the signing node - h, p, err := net.SplitHostPort(s.Signer.Name()) - if err == nil { - i, err := strconv.Atoi(p) - if err != nil { - log.Fatal(err) - } - s.name = net.JoinHostPort(h, strconv.Itoa(i+1)) - } - s.Queue[s.READING] = make([]MustReplyMessage, 0) - s.Queue[s.PROCESSING] = make([]MustReplyMessage, 0) - s.closeChan = make(chan bool, 5) - return s -} - -var clientNumber int = 0 - -func (s *Server) Close() { - dbg.Lvl4("closing stampserver: %p", s.name) - s.closeChan <- true - s.Signer.Close() -} - -// listen for clients connections -// this server needs to be running on a different port -// than the Signer that is beneath it -func (s *Server) Listen() error { - dbg.Lvl3("Listening in server at", s.name) - ln, err := net.Listen("tcp4", s.name) - if err != nil { - panic(err) - } - - go func() { - for { - // dbg.Lvl4("LISTENING TO CLIENTS: %p", s) - conn, err := ln.Accept() - if err != nil { - // handle error - dbg.Lvl3("failed to accept connection") - continue - } - - c := coconet.NewTCPConnFromNet(conn) - // dbg.Lvl4("CLIENT TCP CONNECTION SUCCESSFULLY ESTABLISHED:", c) - - if _, ok := s.Clients[c.Name()]; !ok { - s.Clients[c.Name()] = c - - go func(c coconet.Conn) { - for { - tsm := TimeStampMessage{} - err := c.Get(&tsm) - if err != nil { - log.Errorf("%p Failed to get from child:", s, err) - s.Close() - return - } - switch tsm.Type { - default: - log.Errorf("Message of unknown type: %v\n", tsm.Type) - case StampRequestType: - // dbg.Lvl4("RECEIVED STAMP REQUEST") - s.mux.Lock() - READING := s.READING - s.Queue[READING] = append(s.Queue[READING], - MustReplyMessage{Tsm: tsm, To: c.Name()}) - s.mux.Unlock() - } - } - }(c) - } - - } - }() - - return nil -} - -// Used for goconns -// should only be used if clients are created in batch -func (s *Server) ListenToClients() { - // dbg.Lvl4("LISTENING TO CLIENTS: %p", s, s.Clients) - for _, c := range s.Clients { - go func(c coconet.Conn) { - for { - tsm := TimeStampMessage{} - err := c.Get(&tsm) - if err == coconet.ErrClosed { - log.Errorf("%p Failed to get from client:", s, err) - s.Close() - return - } - if err != nil { - log.WithFields(log.Fields{ - "file": logutils.File(), - }).Errorf("%p failed To get message:", s, err) - } - switch tsm.Type { - default: - log.Errorln("Message of unknown type") - case StampRequestType: - // dbg.Lvl4("STAMP REQUEST") - s.mux.Lock() - READING := s.READING - s.Queue[READING] = append(s.Queue[READING], - MustReplyMessage{Tsm: tsm, To: c.Name()}) - s.mux.Unlock() - } - } - }(c) - } -} - -func (s *Server) ConnectToLogger() { - return - if s.Logger == "" || s.Hostname == "" || s.App == "" { - dbg.Lvl4("skipping connect to logger") - return - } - dbg.Lvl4("Connecting to Logger") - lh, _ := logutils.NewLoggerHook(s.Logger, s.Hostname, s.App) - dbg.Lvl4("Connected to Logger") - log.AddHook(lh) -} - -func (s *Server) LogReRun(nextRole string, curRole string) { - if nextRole == "root" { - var messg = s.Name() + " became root" - if curRole == "root" { - messg = s.Name() + " remained root" - } - - go s.ConnectToLogger() - - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "role_change", - }).Infoln(messg) - // dbg.Lvl4("role change: %p", s) - - } else { - var messg = s.Name() + " remained regular" - if curRole == "root" { - messg = s.Name() + " became regular" - } - - if curRole == "root" { - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "role_change", - }).Infoln(messg) - dbg.Lvl4("role change: %p", s) - } - - } - -} - -func (s *Server) runAsRoot(nRounds int) string { - // every 5 seconds start a new round - ticker := time.Tick(ROUND_TIME) - if s.LastRound()+1 > nRounds { - log.Errorln(s.Name(), "runAsRoot called with too large round number") - return "close" - } - - dbg.Lvl3(s.Name(), "running as root", s.LastRound(), int64(nRounds)) - for { - select { - case nextRole := <-s.ViewChangeCh(): - dbg.Lvl4(s.Name(), "assuming next role") - return nextRole - // s.reRunWith(nextRole, nRounds, true) - case <-ticker: - - start := time.Now() - dbg.Lvl4(s.Name(), "is STAMP SERVER STARTING SIGNING ROUND FOR:", s.LastRound()+1, "of", nRounds) - - var err error - if s.App == "vote" { - vote := &sign.Vote{ - Type: sign.AddVT, - Av: &sign.AddVote{ - Parent: s.Name(), - Name: "test-add-node"}} - err = s.StartVotingRound(vote) - } else { - err = s.StartSigningRound() - } - - if err == sign.ChangingViewError { - // report change in view, and continue with the select - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "view_change", - }).Info("Tried to stary signing round on " + s.Name() + " but it reports view change in progress") - // skip # of failed round - time.Sleep(1 * time.Second) - break - } else if err != nil { - log.Errorln(err) - time.Sleep(1 * time.Second) - break - } - - if s.LastRound()+1 >= nRounds { - log.Infoln(s.Name(), "reports exceeded the max round: terminating", s.LastRound()+1, ">=", nRounds) - return "close" - } - - elapsed := time.Since(start) - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "root_round", - "round": s.LastRound(), - "time": elapsed, - }).Info("root round") - - } - } -} - -func (s *Server) runAsRegular() string { - select { - case <-s.closeChan: - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "close", - }).Infoln("server" + s.Name() + "has closed") - return "" - - case nextRole := <-s.ViewChangeCh(): - return nextRole - } -} - -// Listen on client connections. If role is root also send annoucement -// for all of the nRounds -func (s *Server) Run(role string, nRounds int) { - // defer func() { - // log.Infoln(s.Name(), "CLOSE AFTER RUN") - // s.Close() - // }() - - dbg.Lvl3("Stamp-server", s.name, "starting with ", role, "and rounds", nRounds) - closed := make(chan bool, 1) - - go func() { err := s.Signer.Listen(); closed <- true; s.Close(); log.Error(err) }() - if role == "test_connect" { - role = "regular" - go func() { - //time.Sleep(30 * time.Second) - hostlist := s.Hostlist() - ticker := time.Tick(15 * time.Second) - i := 0 - for _ = range ticker { - select { - case <-closed: - dbg.Lvl4("server.Run: received closed") - return - default: - } - if i%2 == 0 { - dbg.Lvl4("removing self") - s.Signer.RemoveSelf() - } else { - dbg.Lvl4("adding self: ", hostlist[(i/2)%len(hostlist)]) - s.Signer.AddSelf(hostlist[(i/2)%len(hostlist)]) - } - i++ - } - }() - } - s.rLock.Lock() - s.maxRounds = nRounds - s.rLock.Unlock() - - var nextRole string // next role when view changes - for { - switch role { - - case "root": - dbg.Lvl4("running as root") - nextRole = s.runAsRoot(nRounds) - case "regular": - dbg.Lvl4("running as regular") - nextRole = s.runAsRegular() - case "test": - dbg.Lvl4("running as test") - ticker := time.Tick(2000 * time.Millisecond) - for _ = range ticker { - s.AggregateCommits(0) - } - default: - dbg.Lvl4("UNABLE TO RUN AS ANYTHING") - return - } - - // dbg.Lvl4(s.Name(), "nextRole: ", nextRole) - if nextRole == "close" { - s.Close() - return - } - if nextRole == "" { - return - } - s.LogReRun(nextRole, role) - role = nextRole - } - -} - -func (s *Server) OnAnnounce() sign.CommitFunc { - return func(view int) []byte { - //dbg.Lvl4("Aggregating Commits") - return s.AggregateCommits(view) - } -} - -func (s *Server) OnDone() sign.DoneFunc { - return func(view int, SNRoot hashid.HashId, LogHash hashid.HashId, p proof.Proof) { - s.mux.Lock() - for i, msg := range s.Queue[s.PROCESSING] { - // proof to get from s.Root to big root - combProof := make(proof.Proof, len(p)) - copy(combProof, p) - - // add my proof to get from a leaf message to my root s.Root - combProof = append(combProof, s.Proofs[i]...) - - // proof that i can get from a leaf message to the big root - if sign.DEBUG == true { - proof.CheckProof(s.Signer.(*sign.Node).Suite().Hash, SNRoot, s.Leaves[i], combProof) - } - - respMessg := TimeStampMessage{ - Type: StampReplyType, - ReqNo: msg.Tsm.ReqNo, - Srep: &StampReply{Sig: SNRoot, Prf: combProof}} - - s.PutToClient(msg.To, respMessg) - } - s.mux.Unlock() - } - -} - -func (s *Server) AggregateCommits(view int) []byte { - //dbg.Lvl4(s.Name(), "calling AggregateCommits") - s.mux.Lock() - // get data from s once to avoid refetching from structure - Queue := s.Queue - READING := s.READING - PROCESSING := s.PROCESSING - // messages read will now be processed - READING, PROCESSING = PROCESSING, READING - s.READING, s.PROCESSING = s.PROCESSING, s.READING - s.Queue[READING] = s.Queue[READING][:0] - - // give up if nothing to process - if len(Queue[PROCESSING]) == 0 { - s.mux.Unlock() - s.Root = make([]byte, hashid.Size) - s.Proofs = make([]proof.Proof, 1) - return s.Root - } - - // pull out to be Merkle Tree leaves - s.Leaves = make([]hashid.HashId, 0) - for _, msg := range Queue[PROCESSING] { - s.Leaves = append(s.Leaves, hashid.HashId(msg.Tsm.Sreq.Val)) - } - s.mux.Unlock() - - // non root servers keep track of rounds here - if !s.IsRoot(view) { - s.rLock.Lock() - lsr := s.LastRound() - mr := s.maxRounds - s.rLock.Unlock() - // if this is our last round then close the connections - if lsr >= mr && mr >= 0 { - s.closeChan <- true - } - } - - // create Merkle tree for this round's messages and check corectness - s.Root, s.Proofs = proof.ProofTree(s.Suite().Hash, s.Leaves) - if sign.DEBUG == true { - if proof.CheckLocalProofs(s.Suite().Hash, s.Root, s.Leaves, s.Proofs) == true { - dbg.Lvl4("Local Proofs of", s.Name(), "successful for round "+strconv.Itoa(int(s.LastRound()))) - } else { - panic("Local Proofs" + s.Name() + " unsuccessful for round " + strconv.Itoa(int(s.LastRound()))) - } - } - - return s.Root -} - -// Send message to client given by name -func (s *Server) PutToClient(name string, data coconet.BinaryMarshaler) { - err := s.Clients[name].Put(data) - if err == coconet.ErrClosed { - s.Close() - return - } - if err != nil && err != coconet.ErrNotEstablished { - log.Warnf("%p error putting to client: %v", s, err) - } -} diff --git a/app/coll_stamp/tsserver_test.go b/app/coll_stamp/tsserver_test.go deleted file mode 100644 index 312999a50d..0000000000 --- a/app/coll_stamp/tsserver_test.go +++ /dev/null @@ -1,464 +0,0 @@ -package coll_stamp_test - -import ( - "fmt" - "strconv" - "sync" - "testing" - "time" - - log "github.com/Sirupsen/logrus" - - "github.com/dedis/cothority/coconet" - "github.com/dedis/cothority/sign" - "github.com/dedis/cothority/stamp" - "github.com/dedis/cothority/lib/config" -) - -// TODO: messages should be sent hashed eventually - -// func init() { -// log.SetFlags(log.Lshortfile) -// //log.SetOutput(ioutil.Discard) -// } - -// Configuration file data/exconf.json -// 0 -// / \ -// 1 4 -// / \ \ -// 2 3 5 -func init() { - sign.DEBUG = true -} - -func TestTSSIntegrationHealthy(t *testing.T) { - failAsRootEvery := 0 // never fail on announce - failAsFollowerEvery := 0 // never fail on commit or response - RoundsPerView := 100 - if err := runTSSIntegration(RoundsPerView, 4, 5, 0, failAsRootEvery, failAsFollowerEvery); err != nil { - t.Fatal(err) - } -} - -func TestTSSIntegrationFaulty(t *testing.T) { - if testing.Short() { - t.Skip("skipping faulty test in short mode.") - } - - // not mixing view changes with faults - RoundsPerView := 100 - failAsRootEvery := 0 // never fail on announce - failAsFollowerEvery := 0 // never fail on commit or response - - faultyNodes := make([]int, 0) - faultyNodes = append(faultyNodes, 2, 5) - if err := runTSSIntegration(RoundsPerView, 4, 4, 20, failAsRootEvery, failAsFollowerEvery, faultyNodes...); err != nil { - t.Fatal(err) - } -} - -func TestTSSViewChange1(t *testing.T) { - RoundsPerView := 2 - nRounds := 12 - failAsRootEvery := 0 // never fail on announce - failAsFollowerEvery := 0 // never fail on commit or response - - if err := runTSSIntegration(RoundsPerView, 1, nRounds, 0, failAsRootEvery, failAsFollowerEvery); err != nil { - t.Fatal(err) - } -} - -func TestTSSViewChange2(t *testing.T) { - RoundsPerView := 3 - nRounds := 8 - failAsRootEvery := 0 // never fail on announce - failAsFollowerEvery := 0 // never fail on commit or response - - if err := runTSSIntegration(RoundsPerView, 1, nRounds, 0, failAsRootEvery, failAsFollowerEvery); err != nil { - t.Fatal(err) - } -} - -// Each View Root fails on its 3rd round of being root -// View Change is initiated as a result -// RoundsPerView very large to avoid other reason for ViewChange -func TestTSSViewChangeOnRootFailure(t *testing.T) { - RoundsPerView := 1000 - nRounds := 12 - failAsRootEvery := 3 // fail on announce every 3rd round - failAsFollowerEvery := 0 // never fail on commit or response - - if err := runTSSIntegration(RoundsPerView, 1, nRounds, 0, failAsRootEvery, failAsFollowerEvery); err != nil { - t.Fatal(err) - } -} - -// Faulty Followers fail every 3rd round -func TestTSSViewChangeOnFollowerFailureNoRate(t *testing.T) { - RoundsPerView := 1000 - nRounds := 12 - failAsRootEvery := 0 // never fail on announce - // selected faultyNodes will fail on commit and response every 3 rounds - // if they are followers in the view - failAsFollowerEvery := 3 - - faultyNodes := make([]int, 0) - faultyNodes = append(faultyNodes, 2, 3) - if err := runTSSIntegration(RoundsPerView, 1, nRounds, 0, failAsRootEvery, failAsFollowerEvery, faultyNodes...); err != nil { - t.Fatal(err) - } -} - -// Faulty Followers fail every 3rd round, with probability failureRate% -func TestTSSViewChangeOnFollowerFailureWithRate(t *testing.T) { - RoundsPerView := 1000 - nRounds := 12 - failAsRootEvery := 0 // never fail on announce - failureRate := 10 - // selected faultyNodes will fail on commit and response every 3 rounds - // if they are followers in the view - failAsFollowerEvery := 3 - - faultyNodes := make([]int, 0) - faultyNodes = append(faultyNodes, 2, 3) - if err := runTSSIntegration(RoundsPerView, 1, nRounds, failureRate, failAsRootEvery, failAsFollowerEvery, faultyNodes...); err != nil { - t.Fatal(err) - } -} - -// # Messages per round, # rounds, failure rate[0..100], list of faulty nodes -func runTSSIntegration(RoundsPerView, nMessages, nRounds, failureRate, failAsRootEvery, failAsFollowerEvery int, faultyNodes ...int) error { - //coll_stamp.ROUND_TIME = 1 * time.Second - var hostConfig *config.HostConfig - var err error - - // load config with faulty or healthy hosts - opts := config.ConfigOptions{} - if len(faultyNodes) > 0 { - opts.Faulty = true - } - hostConfig, err = config.LoadConfig("testdata/exconf.json", opts) - if err != nil { - return err - } - log.Printf("load config returned dir: %p", hostConfig.Dir) - - // set FailureRates as pure percentages - if len(faultyNodes) > 0 { - for i := range hostConfig.SNodes { - hostConfig.SNodes[i].FailureRate = failureRate - } - } - - // set root failures - if failAsRootEvery > 0 { - for i := range hostConfig.SNodes { - hostConfig.SNodes[i].FailAsRootEvery = failAsRootEvery - - } - } - // set followerfailures - for _, f := range faultyNodes { - hostConfig.SNodes[f].FailAsFollowerEvery = failAsFollowerEvery - } - - for _, n := range hostConfig.SNodes { - n.RoundsPerView = RoundsPerView - } - - err = hostConfig.Run(true, sign.MerkleTree) - if err != nil { - return err - } - - // Connect all TSServers to their clients, except for root TSServer - ncps := 3 // # clients per TSServer - stampers := make([]*stamp.Server, len(hostConfig.SNodes)) - for i := range stampers { - stampers[i] = stamp.NewServer(hostConfig.SNodes[i]) - defer func() { - hostConfig.SNodes[i].Close() - time.Sleep(1 * time.Second) - }() - } - - clientsLists := make([][]*stamp.Client, len(hostConfig.SNodes[1:])) - for i, s := range stampers[1:] { - clientsLists[i] = createClientsForTSServer(ncps, s, hostConfig.Dir, 0+i+ncps) - } - - for i, s := range stampers[1:] { - go s.Run("regular", nRounds) - go s.ListenToClients() - go func(clients []*stamp.Client, nRounds int, nMessages int, s *stamp.Server) { - log.Println("clients Talk") - time.Sleep(1 * time.Second) - clientsTalk(clients, nRounds, nMessages, s) - log.Println("Clients done Talking") - }(clientsLists[i], nRounds, nMessages, s) - - } - - log.Println("RUNNING ROOT") - stampers[0].ListenToClients() - stampers[0].Run("root", nRounds) - log.Println("Done running root") - // After clients receive messages back we need a better way - // of waiting to make sure servers check ElGamal sigs - // time.Sleep(1 * time.Second) - log.Println("DONE with test") - return nil -} - -func TestGoConnTimestampFromConfig(t *testing.T) { - config.StartConfigPort += 2010 - nMessages := 1 - nClients := 1 - nRounds := 1 - - hc, err := config.LoadConfig("testdata/exconf.json") - if err != nil { - t.Fatal(err) - } - for _, n := range hc.SNodes { - n.RoundsPerView = 1000 - } - err = hc.Run(true, sign.MerkleTree) - if err != nil { - t.Fatal(err) - } - - stampers, clients, err := hc.RunTimestamper(nClients) - if err != nil { - log.Fatal(err) - } - - for _, s := range stampers[1:] { - go s.Run("regular", nRounds) - go s.ListenToClients() - } - go stampers[0].Run("root", nRounds) - go stampers[0].ListenToClients() - log.Println("About to start sending client messages") - - time.Sleep(1 * time.Second) - for r := 0; r < nRounds; r++ { - var wg sync.WaitGroup - for _, c := range clients { - for i := 0; i < nMessages; i++ { - messg := []byte("messg:" + strconv.Itoa(r) + "." + strconv.Itoa(i)) - wg.Add(1) - go func(c *stamp.Client, messg []byte, i int) { - defer wg.Done() - server := "NO VALID SERVER" - c.Mux.Lock() - for k := range c.Servers { - server = k - break - } - c.Mux.Unlock() - c.TimeStamp(messg, server) - }(c, messg, r) - } - } - // wait between rounds - wg.Wait() - fmt.Println("done with round:", r, nRounds) - } - - // give it some time before closing the connections - // so that no essential messages are denied passing through the network - time.Sleep(5 * time.Second) - for _, h := range hc.SNodes { - h.Close() - } - for _, c := range clients { - c.Close() - } -} - -func TestTCPTimestampFromConfigViewChange(t *testing.T) { - RoundsPerView := 5 - if err := runTCPTimestampFromConfig(RoundsPerView, sign.MerkleTree, 1, 1, 5, 0); err != nil { - t.Fatal(err) - } -} - -func TestTCPTimestampFromConfigHealthy(t *testing.T) { - RoundsPerView := 5 - if err := runTCPTimestampFromConfig(RoundsPerView, sign.MerkleTree, 1, 1, 5, 0); err != nil { - t.Fatal(err) - } -} - -func TestTCPTimestampFromConfigFaulty(t *testing.T) { - if testing.Short() { - t.Skip("skipping faulty test in short mode.") - } - - // not mixing view changes with faults - RoundsPerView := 100 - // not mixing view changes with faults - aux2 := sign.HEARTBEAT - sign.HEARTBEAT = 4 * sign.ROUND_TIME - - faultyNodes := make([]int, 0) - faultyNodes = append(faultyNodes, 2, 5) - if err := runTCPTimestampFromConfig(RoundsPerView, sign.MerkleTree, 1, 1, 5, 20, faultyNodes...); err != nil { - t.Fatal(err) - } - - sign.HEARTBEAT = aux2 -} - -func TestTCPTimestampFromConfigVote(t *testing.T) { - // not mixing view changes with faults - RoundsPerView := 3 - // not mixing view changes with faults - aux2 := sign.HEARTBEAT - sign.HEARTBEAT = 4 * sign.ROUND_TIME - - if err := runTCPTimestampFromConfig(RoundsPerView, sign.Voter, 0, 0, 15, 0); err != nil { - t.Fatal(err) - } - - sign.HEARTBEAT = aux2 -} - -func runTCPTimestampFromConfig(RoundsPerView int, signType, nMessages, nClients, nRounds, failureRate int, faultyNodes ...int) error { - var hc *config.HostConfig - var err error - config.StartConfigPort += 2010 - - // load config with faulty or healthy hosts - if len(faultyNodes) > 0 { - hc, err = config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true, Faulty: true}) - } else { - hc, err = config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true}) - } - if err != nil { - return err - } - - // set FailureRates - if len(faultyNodes) > 0 { - for i := range hc.SNodes { - hc.SNodes[i].FailureRate = failureRate - } - } - - for _, n := range hc.SNodes { - n.RoundsPerView = RoundsPerView - } - - err = hc.Run(true, sign.Type(signType)) - if err != nil { - return err - } - - stampers, clients, err := hc.RunTimestamper(nClients) - if err != nil { - return err - } - - for _, s := range stampers[1:] { - go s.Run("regular", nRounds) - } - go stampers[0].Run("root", nRounds) - log.Println("About to start sending client messages") - - for r := 1; r <= nRounds; r++ { - var wg sync.WaitGroup - for _, c := range clients { - for i := 0; i < nMessages; i++ { - messg := []byte("messg:" + strconv.Itoa(r) + "." + strconv.Itoa(i)) - wg.Add(1) - - // CLIENT SENDING - go func(c *stamp.Client, messg []byte, i int) { - defer wg.Done() - server := "NO VALID SERVER" - - retry: - c.Mux.Lock() - for k := range c.Servers { - server = k - break - } - c.Mux.Unlock() - log.Infoln("timestamping") - err := c.TimeStamp(messg, server) - if err == stamp.ErrClientToTSTimeout { - log.Errorln(err) - return - } - if err != nil { - time.Sleep(1 * time.Second) - fmt.Println("retyring because err:", err) - goto retry - } - log.Infoln("timestamped") - }(c, messg, r) - - } - } - // wait between rounds - wg.Wait() - log.Println("done with round:", r, " of ", nRounds) - } - - // give it some time before closing the connections - // so that no essential messages are denied passing through the network - time.Sleep(1 * time.Second) - for _, h := range hc.SNodes { - h.Close() - } - for _, c := range clients { - c.Close() - } - return nil -} - -// Create nClients for the TSServer, with first client associated with number fClient -func createClientsForTSServer(nClients int, s *stamp.Server, dir *coconet.GoDirectory, fClient int) []*stamp.Client { - clients := make([]*stamp.Client, 0, nClients) - for i := 0; i < nClients; i++ { - clients = append(clients, stamp.NewClient("client"+strconv.Itoa(fClient+i))) - - // intialize TSServer conn to client - ngc, err := coconet.NewGoConn(dir, s.Name(), clients[i].Name()) - if err != nil { - panic(err) - } - s.Clients[clients[i].Name()] = ngc - - // intialize client connection to sn - ngc, err = coconet.NewGoConn(dir, clients[i].Name(), s.Name()) - if err != nil { - panic(err) - } - clients[i].AddServer(s.Name(), ngc) - } - - return clients -} - -func clientsTalk(clients []*stamp.Client, nRounds, nMessages int, s *stamp.Server) { - // have client send messages - for r := 0; r < nRounds; r++ { - var wg sync.WaitGroup - for _, client := range clients { - for i := 0; i < nMessages; i++ { - messg := []byte("messg" + strconv.Itoa(r) + strconv.Itoa(i)) - wg.Add(1) - go func(client *stamp.Client, messg []byte, s *stamp.Server, i int) { - defer wg.Done() - client.TimeStamp(messg, s.Name()) - }(client, messg, s, r) - } - } - // wait between rounds - wg.Wait() - } -} diff --git a/app/conode/README.md b/app/conode/README.md new file mode 100644 index 0000000000..18f3d233c0 --- /dev/null +++ b/app/conode/README.md @@ -0,0 +1,234 @@ +#CoNode + +This repository provides a first implementation of a cothority node (CoNode) for public usage. After setup, a CoNode can be used as a public timestamp-server, which takes a hash and computes a signature together with an inclusion-proof. Moreover, a simple stamping-program is provided that can generate and verify signatures of given files through the CoNode. + +Currently you can run CoNode either by participating in the EPFL CoNode project or by setting up your own cluster of CoNodes. Both options are described further below. + +## Warning +**The software provided in this repository is highly experimental and under heavy development. Do not use it for anything security-critical. All usage is at your own risk!** + +## Limitations / Disclaimer + +There are some known limitations that we would like to address as soon as possible: + +* There is no exception-handling if a node is down. +* Each time you add nodes to your tree, the collective public signature changes. + +## Requirements + +* A server with a public IPv4 address and two open ports (default: `2000` and `2001`). +* [Golang](https://golang.org/) version 1.5.1 or newer, in case you plan to compile CoNode yourself. + + +## Getting CoNode + +There are two options to get CoNode: either download the pre-compiled binaries or compile the software yourself. + +### Download Binaries + +The latest binaries of CoNode (for 32-/64-bit Linux and OSX) are available at: + +https://github.com/dedis/cothority/releases/latest + +**Note:** The binaries are currently **not signed**. + +Execute the following steps to get a basic setup: + +``` +$ mkdir conode +$ cd conode +$ wget https://github.com/DeDiS/cothority/releases/download/0.5/conode-0.5.7.tar.gz +$ tar -xvzf conode-0.5.7.tar.gz +``` + +### Compile Binaries + +Compilation of CoNode requires a working [Golang](https://golang.org) installation of version 1.5.1 or newer. To do so, execute the following steps: + +``` +$ go get github.com/dedis/cothority +$ cd $GOPATH/src/github.com/dedis/cothority +$ git checkout development +$ go get ./... +$ cd app/conode +$ go build +``` + + +## Overview of CoNode + +The `conode` binary provides all the required functionality to configure and run a CoNode (cluster). To get an overview on the supported commands call: + +``` +$ ./conode --help +NAME: + CoNode - Runs a cothority node and contacts others CoNodes to form a cothority tree + +USAGE: + ./conode [global options] command [command options] [arguments...] + +VERSION: + 0.1.0 + +AUTHOR(S): + Linus Gasser Nicolas Gailly + +COMMANDS: + check, c Checks if a given CoNode is valid in order to be incorporated into a cothority tree + build, b Builds a cothority configuration file required for CoNodes and clients + exit, x Stops the given CoNode + run, r Runs the CoNode and connects it to the cothority tree as specified in the config file + validate, v Starts validation mode of the CoNode + keygen, k Creates a new key pair and binds the public part to the specified IPv4 address and port + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --debug, -d "1" debug level from 1 (only major operations) to 5 (very noisy text) + --help, -h show help + --version, -v print the version + +``` + +For more information on how to use the above commands please refer to the following sections. The script `start-conode.sh`, which can be found in the CoNode archive and repository, is a wrapper around `conode` and automatises certain tasks. + +**Note:** Since your CoNode should be permanently available, we recommend that you run the program inside a terminal multiplexer such as [GNU screen](https://www.gnu.org/software/screen/) or [tmux](https://tmux.github.io/). This ensures that your CoNode remains online even after you log out of your server. + +## Configuring CoNode + +We assume in the following that you want to participate with your CoNode in an existing cothority tree. For more information on running your own cothority tree, see further below of the README. + +### Key Generation + +The **first step** in configuration is to generate a new key pair: + +``` +$ ./conode keygen : +``` + +This command generates two files: + +* `key.pub`: contains a *public key* as well as the *IP address* and *port number* as specified above. If no port number is given, then the default value `2000` is used. +* `key.priv`: contains the *private key* of your CoNode. + +If you are not the operator of a CoNode cluster yourself, then you need to send the public key `key.pub` to your CoNode administrator and ask for inclusion in the tree. The private key `key.priv`, however, **must remain secret** under all circumstances and should not be shared! + +### Validation Mode + +The **second step** is to bring your CoNode into validation mode: + +``` +$ ./conode validate +``` + +Then wait until your CoNode operator has verified your instance and has sent you the configuration file `config.toml` containing information about the other nodes in the cluster. + +### Running Mode + +The **third step** is finally to bring your CoNode into running mode. Therefore, shutdown your CoNode in validation mode and call: + +``` +$ ./conode run +``` + + +### All-In-One Setup + +For a combination of the above steps you can use the `start-conode.sh` script: + +``` +$ ./start-conode.sh setup : +``` + +This script generates the keys and starts the validation mode. After your CoNode has been available for a long enough time period (usually more than 24 hours) under the specified IP address and port and has been validated by your CoNode operator, it will exit automatically, get the CoNode tree-information, and switch to running-mode. + +**Note:** In case your CoNode is shutdown for whatever reason, you can always manually restart it by simply executing: + +``` +$ ./start-conode.sh run +``` + +**Warning:** The `start-conode`-script comes with an auto-updating mechanism: every time CoNode quits (e.g. when the root-node terminates), it checks on GitHub if a new version is available and if so, downloads the new archive, extracts it and re-launches itself. + +We are aware that this is a security-risk and promise to not use your server for anything but running CoNode. This mechanism will be replaced at some point with a secure variant. + +If you want to avoid this auto-updating, use the `conode` binary directly as described above. + + +## Using CoNode + +Once your CoNode is properly configured and in running mode, you can use it to generate stamps for documents or verify that a document is valid under a given signature. Both functions can be called via the `stamp` utility. + +To **generate a stamp**, run + +``` +$ ./stamp sign file +``` + +where `file` is the document you want to stamp. This generates a signature and inclusion-proof and writes it to `file.sig`. + +To **verify a stamp**, call + +``` +$ ./stamp check file +``` + +If `file` is present, its hash-value is verified against the value stored in `file.sig`, otherwise only the information in `file.sig` is verified. + +## Participate in the EPFL CoNode Cluster + +In order to participate in the EPFL CoNode project, follow the setup steps as described above using either the `start-conode.sh` script or the `conode` binary directly. Please send your `key.pub` file to the google-group at https://groups.google.com/forum/#!forum/cothority and wait until we have validated your instance. For that make sure that your CoNode is available for at least 24 hours under the IP address and port specified in `key.pub`. Once we have verified your CoNode, we will send you the configuration file `config.toml`. Copy that to the folder of your `conode` binary, shutdown the validation-mode and restart CoNode in running-mode. Now your CoNode is configured and you can `stamp` files through the EPFL CoNode cluster. + +## Setup Your Own CoNode Cluster + +If you want to create your own tree of CoNodes, you need to use the `conode` binary directly and **not** the script `start-conode`. + +On receipt of a new `key.pub` file from a user requesting to participate in your CoNode cluster, you can check the availability and affiliation of the server specified in `key.pub` by calling + +``` +$ ./conode check key.pub +``` + +After you checked the availability of all the nodes in your cluster, you can concatenate the `key.pub` files to build a list of hosts and pass that to your CoNode application: + +``` +$ cat key*.pub >> hostlist +$ ./conode build hostlist +``` + +This generates a configuration file `config.toml`, which you then have to distribute to all your users and ask them to restart their CoNodes with the updated settings. + +**Note:** currently there is no way to automatically trigger a restart of all CoNodes in case the configuration changes. + +Finally, start your CoNode by calling: + +``` +$ ./conode run +``` + +## Further Technical Information + +### config.toml + +The file `config.toml` contains: + +* The used suite of cryptographic algorithms (for the moment we stick to AES128+SHA256+Ed25519) +* A list of all hosts +* The aggregate public key of the CoNode +* The tree of all hosts together with the public key of each host + +**Note**: the aggregate public key changes from one CoNode-installation to another and even adding or removing a node will change it. + +### file.sig + +The signature file `file.sig` of a document `file` contains: + +* name: the name of the file +* hash: a SHA256-based hash +* proof: the inclusion-proof for your document +* signature: the collective signature + +If you want to verify a given signature, you need aggregate public key of the CoNode cluster found in the configuration file `config.toml`. + +## Contact Us + +If you are running your own CoNode cluster, we would be very happy to hear from you. Do not hesitate to contact us at the google-group https://groups.google.com/forum/#!forum/cothority. diff --git a/app/conode/VERSION b/app/conode/VERSION new file mode 100644 index 0000000000..b58d89b911 --- /dev/null +++ b/app/conode/VERSION @@ -0,0 +1,3 @@ +30 October 2015 - 0.5.7 + - added exit-command to 'conode validate' that exits the conode, updates + and re-runs in 'conode run'-mode \ No newline at end of file diff --git a/app/conode/check.go b/app/conode/check.go new file mode 100644 index 0000000000..9cdcfd5572 --- /dev/null +++ b/app/conode/check.go @@ -0,0 +1,135 @@ +package main + +import ( + "bytes" + "net" + + "github.com/codegangsta/cli" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/anon" +) + +// This file handles the checking of a host who wants to join the cothority +// tree +// Basically, it will contact the host, waiting for its message containing some +// basics information about its system, and the signature associated + +func init() { + command := cli.Command{ + Name: "check", + Aliases: []string{"c"}, + Usage: "Checks if a given CoNode is valid in order to be incorporated into a cothority tree", + Description: "It checks the public key given and the availability of the server. It will be contacted multiple times a day during 24 hours", + ArgsUsage: "Public-key-file: file where reside the public key of the host to check", + Subcommands: []cli.Command{ + { + Name: "exit", + Usage: "Asks the remote node to exit", + Action: func(c *cli.Context) { + if c.Args().First() == "" { + dbg.Fatal("No public key file given for exit.") + } + CheckExit(c.Args().First()) + }, + }, + }, + Action: func(c *cli.Context) { + if c.Args().First() == "" { + dbg.Fatal("No public key file given for check.") + } + Check(c.Args().First()) + }, + } + registerCommand(command) +} + +// Main entry point for the check mode +func Check(pubKeyFile string) { + // Verifies the remote host and returns the status + conn, ack := verifyHost(pubKeyFile) + defer conn.Close() + + if err := suite.Write(conn, ack); err != nil { + dbg.Fatal("Error writing back the ACK:", err) + } +} + +// Main entry point for the check mode +func CheckExit(pubKeyFile string) { + // Verifies the remote host and returns the status + conn, ack := verifyHost(pubKeyFile) + defer conn.Close() + + // We only ask the node to exit if everything is OK + if ack.Code != SYS_OK { + dbg.Fatal("Not correct key-file?") + } else { + ack.Code = SYS_EXIT + } + + // Finally send the message + dbg.Lvl1("Sending exit to node") + if err := suite.Write(conn, ack); err != nil { + dbg.Fatal("Error asking for exit:", err) + } +} + +// verifyHost will anaylze the systempacket information and verify the signature +// It will return a ACK properly initialized with the right codes in it. +func verifyHost(pubKeyFile string) (net.Conn, Ack) { + // get the right public key + pub, host, err := cliutils.ReadPubKey(suite, pubKeyFile) + if err != nil { + dbg.Fatal("Could not read the public key from the file:", err) + } + dbg.Lvl1("Public key file read") + + // Then get a connection + conn, err := net.Dial("tcp", host) + if err != nil { + dbg.Fatal("Error when getting the connection to the host:", err) + } + + dbg.Lvl1("Verifier connected to the host. Validation in progress...") + // Get the system packet message + var sys SystemPacket + if err = suite.Read(conn, &sys); err != nil { + dbg.Fatal("Error when reading the system packet message from host:", err) + } + // Get the signature length first + var length int + if err := suite.Read(conn, &length); err != nil { + dbg.Fatal("Could not read length of the signature ...") + } + // Get the signature + sig := make([]byte, length) + if err := suite.Read(conn, &sig); err != nil { + dbg.Fatal("Error reading the signature:", err) + } + + // First, encode the sys packet + var b bytes.Buffer + if err := suite.Write(&b, sys); err != nil { + dbg.Fatal("Error when encoding the syspacket to be verified:", err) + } + X := make([]abstract.Point, 1) + X[0] = pub + + // Verify signature + var ack Ack + ack.Type = TYPE_SYS + ack.Code = SYS_EXIT + if _, err := anon.Verify(suite, b.Bytes(), anon.Set(X), nil, sig); err != nil { + // Wrong signature + ack.Code = SYS_WRONG_SIG + dbg.Lvl1("WARNING: signature provided is wrong.") + } else { + // verfiy SystemPacket itself + ack.Code = SYS_OK + dbg.Lvl1("Host's signature verified and system seems healty. OK") + } + + return conn, ack +} diff --git a/app/conode/config.go b/app/conode/config.go new file mode 100644 index 0000000000..714b1b0a34 --- /dev/null +++ b/app/conode/config.go @@ -0,0 +1,167 @@ +package main + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "os" + "strings" + + "github.com/codegangsta/cli" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/graphs" + "github.com/dedis/crypto/abstract" +) + +func init() { + command := cli.Command{ + Name: "build", + Aliases: []string{"b"}, + Usage: "Builds a cothority configuration file required for CoNodes and clients", + Description: "Basically it will statically generate the tree, with the respective names and public key", + ArgsUsage: "HOSTFILE: file where each line is a copy of a public key node (
)", + Flags: []cli.Flag{ + cli.IntFlag{ + Name: "bf", + Value: 2, + Usage: "Defines the branching factor we want in the cothority tree. Default is 2", + }, + cli.StringFlag{ + Name: "config", + Value: defaultConfigFile, + Usage: "where to write the generated cothority configuration file", + }, + }, + Action: func(c *cli.Context) { + if c.Args().First() == "" { + dbg.Fatal("You must provide a host file to generate the config") + } + Build(c.Args().First(), c.Int("bf"), c.String("config")) + }, + } + registerCommand(command) +} + +// This file handles the creation a of cothority tree. +// Basically, it takes a list of files generated by the "key" command by each +// hosts and turn that into a full tree with the hostname and public key in each +// node. +// BuildTree takes a file formatted like this : +// host pubKey +// host2 pubKey +// ... ... +// For the moment it takes a branching factor on how to make the tree +// and the name of the file where to write the config +// It writes the tree + any other configs to output using toml format +// with the app/config_conode.go struct +func Build(hostFile string, bf int, configFile string) { + + // First, read the list of host and public keys + hosts, pubs, err := readHostFile(hostFile) + if err != nil { + dbg.Fatal("Error reading the host file:", err) + } + + // Then construct the tree + tree := constructTree(hosts, pubs, bf) + // then constrcut the aggregated public key K0 + k0 := aggregateKeys(pubs) + var b bytes.Buffer + err = cliutils.WritePub64(suite, &b, k0) + if err != nil { + dbg.Fatal("Could not aggregate public keys in base64") + } + + // Then write the config + conf := app.ConfigConode{ + Suite: suiteStr, + Tree: tree, + Hosts: hosts, + AggPubKey: b.String(), + } + + app.WriteTomlConfig(conf, configFile) + dbg.Lvl1("Written config file with tree to", configFile) +} + +// SImply adds all the public keys we give to it +func aggregateKeys(pubs []string) abstract.Point { + k0 := suite.Point().Null() + for i, ki := range pubs { + // convert from string to public key + kip, _ := cliutils.ReadPub64(suite, strings.NewReader(ki)) + k0 = k0.Add(k0, kip) + dbg.Lvl2("Public key n#", i, ":", kip) + } + dbg.Lvl1("Aggregated public key:", k0) + return k0 +} + +// readHostFile will read the host file +// HOSTNAME PUBLICKEY +// for each line. and returns the whole set and any errror if any are found. +func readHostFile(file string) ([]string, []string, error) { + // open it up + hostFile, err := os.Open(file) + if err != nil { + return nil, nil, err + } + + // Then read it up + hosts := make([]string, 0) + pubs := make([]string, 0) + scanner := bufio.NewScanner(hostFile) + ln := 0 + for scanner.Scan() { + line := scanner.Text() + ln += 1 + spl := strings.Split(line, " ") + if len(spl) != 2 { + return nil, nil, errors.New(fmt.Sprintf("Hostfile misformatted at line %s", ln)) + } + // add it HOSTS -> PUBLIC KEY + h, err := cliutils.VerifyPort(spl[0], conode.DefaultPort) + if err != nil { + dbg.Fatal("Error reading address in host file:", spl[0], err) + } + hosts = append(hosts, h) + pubs = append(pubs, spl[1]) + } + dbg.Lvl1("Read the hosts files:", ln, "entries") + return hosts, pubs, nil +} + +// ConstructTree takes a map of host -> public keys and a branching factor +// so it can constructs a regular tree. THe returned tree is the root +// it is constructed bfs style +func constructTree(hosts, pubs []string, bf int) *graphs.Tree { + var root *graphs.Tree = new(graphs.Tree) + root.Name = hosts[0] + root.PubKey = pubs[0] + var index int = 1 + bfs := make([]*graphs.Tree, 1) + bfs[0] = root + for len(bfs) > 0 && index < len(hosts) { + t := bfs[0] + t.Children = make([]*graphs.Tree, 0) + lbf := 0 + // create space for enough children + // init them + for lbf < bf && index < len(hosts) { + child := new(graphs.Tree) + child.Name = hosts[index] + child.PubKey = pubs[index] + // append the children to the list of trees to visit + bfs = append(bfs, child) + t.Children = append(t.Children, child) + index += 1 + lbf += 1 + } + bfs = bfs[1:] + } + return root +} diff --git a/app/conode/conode.go b/app/conode/conode.go new file mode 100644 index 0000000000..e956e09242 --- /dev/null +++ b/app/conode/conode.go @@ -0,0 +1,118 @@ +package main + +import ( + "os" + + "github.com/codegangsta/cli" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/edwards/ed25519" +) + +// Which suite to use +var suite abstract.Suite = ed25519.NewAES128SHA256Ed25519(false) +var suiteStr string = suite.String() + +// where to write the key file .priv + .pub +var defaultKeyFile string = "key" + +// Returns the name of the file for the private key +func namePriv(key string) string { + return key + ".priv" +} + +// Returns the name of the file for the public key +func namePub(key string) string { + return key + ".pub" +} + +// config file by default +const defaultConfigFile string = "config.toml" + +/////////////////////// +// will sotre each files / packages commands before creating the cli +var commands []cli.Command = make([]cli.Command, 0) + +// register a new command to be added to the cli +func registerCommand(com cli.Command) { + commands = append(commands, com) +} + +func main() { + coApp := cli.NewApp() + coApp.Name = "CoNode" + coApp.Usage = "Runs a cothority node and contacts others CoNodes to form a cothority tree" + coApp.Version = "0.1.0" + coApp.Authors = []cli.Author{ + { + Name: "Linus Gasser", + Email: "linus.gasser@epfl.ch", + }, + { + Name: "Nicolas Gailly", + Email: "not specified", + }, + } + // already create the key gen command + keyGen := cli.Command{ + Name: "keygen", + Aliases: []string{"k"}, + Usage: "Creates a new key pair and binds the public part to the specified IPv4 address and port", + ArgsUsage: "ADRESS[:PORT] is the address (and port) bound to the generated public key.", + Action: func(c *cli.Context) { + KeyGeneration(c.String("key"), c.Args().First()) + }, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "key, k", + Usage: "Basename of the files where reside the keys. If key = 'key'," + + "then conode will search through 'key.pub' and 'key.priv'", + Value: defaultKeyFile, + }, + }, + } + commands = append(commands, keyGen) + coApp.Commands = commands + coApp.Flags = []cli.Flag{ + cli.IntFlag{ + Name: "debug, d", + Usage: "debug level from 1 (only major operations) to 5 (very noisy text)", + Value: 1, + }, + } + // sets the right debug options + coApp.Before = func(c *cli.Context) error { + dbg.DebugVisible = c.GlobalInt("debug") + return nil + } + + coApp.Run(os.Args) +} + +// KeyGeneration will generate a fresh public / private key pair +// and write those down into two separate files +func KeyGeneration(key, address string) { + if address == "" { + dbg.Fatal("You must call keygen with ipadress !") + } + address, err := cliutils.VerifyPort(address, conode.DefaultPort) + dbg.Lvl1("Address is", address) + if err != nil { + dbg.Fatal(err) + } + // gen keypair + kp := cliutils.KeyPair(suite) + // Write private + if err := cliutils.WritePrivKey(suite, namePriv(key), kp.Secret); err != nil { + dbg.Fatal("Error writing private key file:", err) + } + + // Write public + if err := cliutils.WritePubKey(suite, namePub(key), kp.Public, address); err != nil { + dbg.Fatal("Error writing public key file:", err) + } + + dbg.Lvl1("Keypair generated and written to", namePriv(key), "/", namePub(key)) +} diff --git a/app/conode/conode_test.go b/app/conode/conode_test.go new file mode 100644 index 0000000000..ee926a528b --- /dev/null +++ b/app/conode/conode_test.go @@ -0,0 +1,38 @@ +package main + +import ( + "fmt" + "github.com/dedis/cothority/lib/dbg" + "io/ioutil" + "os" + "sync" + "testing" +) + +func TestBuild(t *testing.T) { + // Just testing that build is done correctly +} + +func TestMakeConfig(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 2) + os.Chdir("/tmp") + KeyGeneration("key1", "localhost:2000") + KeyGeneration("key2", "localhost:2010") + key1, _ := ioutil.ReadFile("key1.pub") + key2, _ := ioutil.ReadFile("key2.pub") + ioutil.WriteFile("hosts", []byte(fmt.Sprintf("%s%s", key1, key2)), 0666) + Build("hosts", 2, "config.toml") + + wg := sync.WaitGroup{} + wg.Add(2) + maxRounds = 3 + go (func() { + Run("config.toml", "key1") + wg.Done() + })() + go (func() { + Run("config.toml", "key2") + wg.Done() + })() + wg.Wait() +} diff --git a/app/conode/cross_compile.sh b/app/conode/cross_compile.sh new file mode 100755 index 0000000000..600d39e7a7 --- /dev/null +++ b/app/conode/cross_compile.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +if [ ! "$1" ]; then + echo Please give a version-number + exit +fi +VERSION=$1 + +echo Cross-compiling for platforms and cpus + +compile(){ +BINARY=$1 +echo Compiling $BINARY +rm -rf conode-bin +mkdir conode-bin +for GOOS in linux darwin; do + for GOARCH in amd64 386; do + echo Doing $GOOS / $GOARCH + export GOOS GOARCH + go build -o conode-bin/$BINARY-$GOOS-$GOARCH . + done +done +rm conode-bin/$BINARY-darwin-386 +} + +compile conode +cd stamp +compile stamp +cd .. +mv stamp/conode-bin/* conode-bin +rmdir stamp/conode-bin + +echo Copying scripts to the binary-directory +cp start-conode.sh conode-bin +cp real/config.toml conode-bin +TAR=conode-$VERSION.tar.gz + +echo Creating $TAR +tar cf $TAR -C conode-bin . diff --git a/app/coll_stamp/data/exconf1.json b/app/conode/data/exconf1.json similarity index 100% rename from app/coll_stamp/data/exconf1.json rename to app/conode/data/exconf1.json diff --git a/app/coll_stamp/data/exconf2.json b/app/conode/data/exconf2.json similarity index 100% rename from app/coll_stamp/data/exconf2.json rename to app/conode/data/exconf2.json diff --git a/app/conode/exit.go b/app/conode/exit.go new file mode 100644 index 0000000000..919e349bba --- /dev/null +++ b/app/conode/exit.go @@ -0,0 +1,54 @@ +package main + +/* Sends the 'exit'-command to a certain conode in the hope that he will stop, + * update to the newest version, and restart. + */ + +import ( + "github.com/codegangsta/cli" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/coconet" + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" +) + +func init() { + command := cli.Command{ + Name: "exit", + Aliases: []string{"x"}, + Usage: "Stops the given CoNode", + Description: "Basically it will statically generate the tree, with the respective names and public key.", + ArgsUsage: "ADDRESS: the IPv4[:PORT] of the CoNode to exit.", + Action: func(c *cli.Context) { + if c.Args().First() == "" { + dbg.Fatal("You must provide an address") + } + ForceExit(c.Args().First()) + }, + } + registerCommand(command) +} + +// ForceExit connects to the stamp-port of the conode and asks him to exit +func ForceExit(address string) { + add, err := cliutils.VerifyPort(address, conode.DefaultPort+1) + if err != nil { + dbg.Fatal("Couldn't convert", address, "to a IP:PORT") + } + + conn := coconet.NewTCPConn(add) + err = conn.Connect() + if err != nil { + dbg.Fatal("Error when getting the connection to the host:", err) + } + dbg.Lvl1("Connected to", add) + msg := &conode.TimeStampMessage{ + Type: conode.StampExit, + } + + dbg.Lvl1("Asking to exit") + err = conn.PutData(msg) + if err != nil { + dbg.Fatal("Couldn't send exit-message to server:", err) + } +} diff --git a/app/conode/exit_conodes.sh b/app/conode/exit_conodes.sh new file mode 100755 index 0000000000..bf9b4cfc5d --- /dev/null +++ b/app/conode/exit_conodes.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +HOSTS=$( cat config.toml | grep Hosts | sed -e "s/.*\[\"\(.*\)\"\]/\1/" | perl -pe "s/\", \"/\n/g" ) + +echo Going to ask all servers to exit +for h in $HOSTS; do + # Suppose the last digit is 0 and we replace it by 1 + hp1=$( echo $h | sed -e "s/0\$/1/" ) + echo Asking server $hp1 to exit + ./conode exit $hp1 +done diff --git a/app/conode/packets.go b/app/conode/packets.go new file mode 100644 index 0000000000..282854f148 --- /dev/null +++ b/app/conode/packets.go @@ -0,0 +1,33 @@ +package main + +// This file regroups different packets used throughout the process +const maxSize = 256 + +// A packet containing some info about our system +type SystemPacket struct { + Soft int // the soft limits of number of connection the user can have + Hostname [maxSize]byte // the hostname of the machine so we can try pinging it out +} + +// A packet used to ACK a verification a protocol or whatever +// It contains a first Int to know of what kind of ACK are we talking about +// Then the second int represent the ACK status itself for this specific ACK +type Ack struct { + Type int32 + Code int32 +} + +// Theses consts represent the type of ACK we are reading +const ( + TYPE_SYS = iota // ACK for a SystemPacket +) + +// These consts are there for meaningful interpretation of the reponse ACK after +// an SystemPacket sent ;) +const ( + SYS_OK = iota // everything is fine + SYS_WRONG_HOST // hostname is not valid + SYS_WRONG_SOFT // soft limits is not enough or wrong. See development team. + SYS_WRONG_SIG // The signature sent after systempacket is not valid + SYS_EXIT // Exit the program - should automatically update and run +) diff --git a/app/conode/roundstats.go b/app/conode/roundstats.go new file mode 100644 index 0000000000..4086bcd2c8 --- /dev/null +++ b/app/conode/roundstats.go @@ -0,0 +1,46 @@ +package main + +import ( + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/sign" +) + +/* +ConodeStats implements a simple module that shows some statistics about the +actual connection. +*/ + +// The name type of this round implementation +const RoundStatsType = "conodestats" + +type RoundStats struct { + *conode.RoundStamperListener +} + +func init() { + sign.RegisterRoundFactory(RoundStatsType, + func(node *sign.Node) sign.Round { + return NewRoundStats(node) + }) +} + +func NewRoundStats(node *sign.Node) *RoundStats { + round := &RoundStats{} + round.RoundStamperListener = conode.NewRoundStamperListener(node) + return round +} + +func (round *RoundStats) Commitment(in []*sign.SigningMessage, out *sign.SigningMessage) error { + err := round.RoundStamperListener.Commitment(in, out) + return err +} + +func (round *RoundStats) SignatureBroadcast(in *sign.SigningMessage, out []*sign.SigningMessage) error { + err := round.RoundStamperListener.SignatureBroadcast(in, out) + if err == nil && round.IsRoot { + dbg.Lvlf1("This is round %d with %d messages - %d since start.", + round.RoundNbr, in.SBm.Messages, round.Node.Messages) + } + return err +} diff --git a/app/conode/run.go b/app/conode/run.go new file mode 100644 index 0000000000..ede69f568b --- /dev/null +++ b/app/conode/run.go @@ -0,0 +1,62 @@ +package main + +import ( + "github.com/codegangsta/cli" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" +) + +var maxRounds = -1 + +func init() { + command := cli.Command{ + Name: "run", + Aliases: []string{"r"}, + Usage: "Runs the CoNode and connects it to the cothority tree as specified in the config file", + Action: func(c *cli.Context) { + Run(c.String("config"), c.String("key")) + }, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "key, k", + Usage: "Basename of the files where reside the keys. If key = 'key'," + + "then conode will search through 'key.pub' and 'key.priv'", + Value: defaultKeyFile, + }, + cli.StringFlag{ + Name: "config, c", + Usage: "Configuration file of the cothority tree", + Value: defaultConfigFile, + }, + }, + } + registerCommand(command) +} + +// Run will launch the conode server. It takes a config file and a key file +// First parse the key + config file and then run the actual server +func Run(configFile, key string) { + var address string + // Read the global config + conf := &app.ConfigConode{} + if err := app.ReadTomlConfig(conf, configFile); err != nil { + dbg.Fatal("Could not read toml config:", err) + } + dbg.Lvl1("Configuration file read") + // Read the private / public keys + binded address + if sec, err := cliutils.ReadPrivKey(suite, namePriv(key)); err != nil { + dbg.Fatal("Error reading private key file:", err) + } else { + conf.Secret = sec + } + if pub, addr, err := cliutils.ReadPubKey(suite, namePub(key)); err != nil { + dbg.Fatal("Error reading public key file:", err) + } else { + conf.Public = pub + address = addr + } + peer := conode.NewPeer(address, conf) + peer.LoopRounds(RoundStatsType, maxRounds) +} diff --git a/app/conode/run_locally.sh b/app/conode/run_locally.sh new file mode 100755 index 0000000000..e6dfe85488 --- /dev/null +++ b/app/conode/run_locally.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +DBG=${2:-1} +KEY_DIR=local_keys +KEYS=$KEY_DIR/key +HOSTLIST=$KEY_DIR/hostlist +NUMBER=${1:-2} +rm -f $HOSTLIST + +rm -rf $KEY_DIR +mkdir $KEY_DIR + +for a in $( seq 1 $NUMBER ); do + PORT=$(( 2000 + ( $a - 1 ) * 10 )) + ./conode keygen localhost:$PORT -key $KEYS$a +done +cat $KEYS*.pub >> $HOSTLIST + +./conode build $HOSTLIST + +for a in $( seq 2 $NUMBER ); do + ./conode -debug $DBG run -key $KEYS$a & +done +./conode -debug $DBG run -key ${KEYS}1 diff --git a/app/conode/stamp/check_stampers.sh b/app/conode/stamp/check_stampers.sh new file mode 100755 index 0000000000..0857148250 --- /dev/null +++ b/app/conode/stamp/check_stampers.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +HOSTS=$( cat config.toml | grep Hosts | sed -e "s/.*\[\"\(.*\)\"\]/\1/" | perl -pe "s/\", \"/\n/g" ) + +echo Going to stamp from servers +for h in $HOSTS; do + echo Stamping using server $h + ./stamp sign stamp -server $h +done diff --git a/app/conode/stamp/stamp.go b/app/conode/stamp/stamp.go new file mode 100644 index 0000000000..dae219cf18 --- /dev/null +++ b/app/conode/stamp/stamp.go @@ -0,0 +1,212 @@ +/* +* Stamp - works together with a cothority-tree to sign a file. It can also verify +* that a signature is valid. +* +* # Signature +* For use in signature, run +* ```./stamp -stamp ``` +* It will connect to the stampserver running on the localhost. If you want to +* connect to another stampserver, you can give the address with the ```-server``` +* argument. +* At the end a file signature.sig will be generated which holds all necessary +* information necessary to check the signature. +* +* # Verification +* If you want to verify whether a file is correctly signed, you can run +* ```./stamp -verify ``` +* which will tell whether the signature is valid. If the file referenced in the +* file.sig is in the current directoy, it will also check it's hash. + */ + +package main + +import ( + "bytes" + "encoding/base64" + "github.com/codegangsta/cli" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/crypto/abstract" + "io" + "os" +) + +// Default config file +const defaultConfigFile = "config.toml" + +// Default port where conodes listens +const defaultPort = "2001" + +// extension given to a signature file +const sigExtension = ".sig" + +// Our crypto-suite used in the program +var suite abstract.Suite + +// the configuration file of the cothority tree used +var conf *app.ConfigConode + +// The public aggregate X0 +var public_X0 abstract.Point + +// If the server is only given with it's hostname, it supposes that the stamp +// server is run on port 2001. Else you will have to add the port yourself. +func main() { + stamp := cli.NewApp() + stamp.Name = "collective" + stamp.Usage = "Used to sign files to a cothority tree and to verify issued signatures" + stamp.Version = "0.0.1" + stamp.Authors = []cli.Author{ + { + Name: "Linus Gasser", + Email: "linus.gasser@epfl.ch", + }, + { + Name: "nikkolasg", + Email: "", + }, + } + stamp.Commands = []cli.Command{ + { + Name: "sign", + Aliases: []string{"s"}, + Usage: "Request a signed time-stamp on a file. Provide with FILE.", + Action: func(c *cli.Context) { + dbg.Lvl1("Requesting a timestamp on a cothority tree") + server := c.String("server") + StampFile(c.Args().First(), server) + }, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "server, s", + Value: "", + Usage: "Server in the cothority tree we wish to contact. If not given, it will select a random one.", + }, + }, + }, + { + Name: "check", + Aliases: []string{"c"}, + Usage: "Verify a given signature against a file", + ArgsUsage: "FILE is the name of the file. Signature file should be file.sig otherwise use the sig option", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "sig", + Value: "", + Usage: "signature file to verify", + }, + }, + Action: func(c *cli.Context) { + sigFile := c.String("sig") + if sigFile == "" { + sigFile = c.Args().First() + sigExtension + } + if VerifyFileSignature(c.Args().First(), sigFile) { + dbg.Lvl1("Verification OK") + } else { + dbg.Lvl1("Verification of file failed") + } + }, + }, + } + stamp.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "config, c", + Value: defaultConfigFile, + Usage: "Configuration file of the cothority tree we are using.", + }, + cli.IntFlag{ + Name: "debug, d", + Value: 1, + Usage: "debug level from 1 (major operations) to 5 (very noisy text)", + }, + } + // Read the config file before + stamp.Before = func(c *cli.Context) error { + var cf string = c.String("config") + if c.String("config") == "" { + cf = defaultConfigFile + } + conf = new(app.ConfigConode) + err := app.ReadTomlConfig(conf, cf) + suite = app.GetSuite(conf.Suite) + pub, _ := base64.StdEncoding.DecodeString(conf.AggPubKey) + suite.Read(bytes.NewReader(pub), &public_X0) + + // sets the right debug options + dbg.DebugVisible = c.GlobalInt("debug") + return err + } + + stamp.Run(os.Args) +} + +// Takes a 'file' to hash and being stamped at the 'server'. The output of the +// signing will be written to 'file'.sig +func StampFile(file, server string) { + // Create the hash of the file and send it over the net + myHash := hashFile(file) + + stamper, err := conode.NewStamp("config.toml") + if err != nil { + dbg.Fatal("Couldn't setup stamper:", err) + } + tsm, err := stamper.GetStamp(myHash, server) + if err != nil { + dbg.Fatal("Stamper didn't succeed:", err) + } + + if err := tsm.Srep.Save(file + sigExtension); err != nil { + dbg.Fatal("Could not write signature file:", err) + } + dbg.Lvl1("Signature file", file+".sig", "written.") + + dbg.Lvl1("Stamp OK - signature file", file+".sig", "written.") +} + +// Verify signature takes a file name and the name of the signature file +// if signature file is empty ( sigFile == ""), then the signature file is +// simply the name of the file appended with ".sig" extension. +func VerifyFileSignature(file, sigFile string) bool { + if file == "" { + dbg.Fatal("Can not verify anything with an empty file name !") + } + + // by default + if sigFile == "" { + sigFile = file + sigExtension + } + // read the sig + signature := conode.StampSignature{ + SuiteStr: suite.String(), + } + if err := signature.Open(sigFile); err != nil { + dbg.Fatal("Couldn't read signature-file", sigFile, ":", err) + } + hash := hashFile(file) + dbg.Print(base64.StdEncoding.EncodeToString(hash)) + // Then verify the proper signature + return conode.VerifySignature(suite, &signature, public_X0, hash) +} + +// Takes a file to be hashed - reads in chunks of 1MB +func hashFile(name string) []byte { + hash := suite.Hash() + file, err := os.Open(name) + if err != nil { + dbg.Fatal("Couldn't open file", name) + } + + buflen := 1024 * 1024 + buf := make([]byte, buflen) + read := buflen + for read == buflen { + read, err = file.Read(buf) + if err != nil && err != io.EOF { + dbg.Fatal("Error while reading bytes") + } + hash.Write(buf) + } + return hash.Sum(nil) +} diff --git a/app/conode/stamp/stamp_test.go b/app/conode/stamp/stamp_test.go new file mode 100644 index 0000000000..f421ab4e82 --- /dev/null +++ b/app/conode/stamp/stamp_test.go @@ -0,0 +1,7 @@ +package main_test + +import "testing" + +func TestBuild(t *testing.T) { + +} diff --git a/app/conode/start-conode.sh b/app/conode/start-conode.sh new file mode 100755 index 0000000000..b8cabce0a6 --- /dev/null +++ b/app/conode/start-conode.sh @@ -0,0 +1,94 @@ +#!/bin/bash + +main(){ + echo Starting conode from the correct cpu and arch + if [ ! -x conode ]; then + search_arch + fi + case "$1" in + setup) + if [ -f key.pub ]; then + echo -e "\n*** Key.pub already exists - if you want to re-create, please delete it first\n" + else + ./conode keygen $2 + cat key.pub | mail linus.gasser@epfl.ch + fi + cat key.pub + ./conode validate + if [ "$?" = "1" ]; then + echo Received exit-command - will update and run + update + exec ./start-conode.sh run + fi + ;; + run) + if [ ! -f config.toml ]; then + echo "Didn't find 'config.toml' - searching in update" + update + if [ ! -f config.toml ]; then + echo "Still didn't find config.toml - please copy it first here" + echo + exit 1 + fi + fi + echo Running conode + ./conode run + echo Updating + update + echo Sleeping a bit + sleep 10 + exec ./start-conode.sh run + ;; + *) + echo Usage: + echo $0 setup address + echo or + echo $0 run + echo + ;; + esac +} + +update(){ + RELEASE=$( wget -q -O- https://github.com/dedis/cothority/releases/latest | grep DeDiS/cothority/releases/download | sed -e "s/.*href=.\(.*\). rel.*/\1/" ) + TGZ=$( basename $RELEASE ) + if [ -e $TGZ ]; then + echo $RELEASE already here + else + echo Getting $RELEASE + wget -q https://github.com/$RELEASE + echo Untarring + tar xf $TGZ + fi +} + +run_loop(){ + pkill -f conode + if [ $( which screen ) ]; then + screen -S conode -dm ./conode $@ & + else + nohup ./conode $@ & + rm nohup.out + fi +} + +search_arch(){ + echo searching for correct conode + for GOOS in linux darwin windows netbsd; do + for GOARCH in amd64 386 arm; do + CONODE=conode-$GOOS-$GOARCH + if ./$CONODE 2&>/dev/null; then + cat - > conode < stamp + chmod a+x conode stamp + echo Found $CONODE to run here + return + fi + done + done +} + +main $@ diff --git a/app/conode/validate.go b/app/conode/validate.go new file mode 100644 index 0000000000..44188a3b24 --- /dev/null +++ b/app/conode/validate.go @@ -0,0 +1,155 @@ +package main + +import ( + "bytes" + "net" + "os" + + "github.com/codegangsta/cli" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/anon" + "github.com/dedis/crypto/config" + "github.com/dedis/crypto/random" +) + +// This file handles the validation process +// Basically it listen on default port 2000 +// When a connection occurs, it will create a message containing some system +// stats such as the soft rlimits, +// Signs it, and returns the message + signature +// Then wait for an ACK or FIN msg. An ACK means all went well +// An FIN msg means something went wrong and you should contact +// the development team about it. + +func init() { + command := cli.Command{ + Name: "validate", + Aliases: []string{"v"}, + Usage: "Starts validation mode of the CoNode", + Description: "The CoNode will be running for a whole day during which" + + "the development team will run repeated checks to verify " + + "that your server is eligible for being incorporated in the cothority tree.", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "key, k", + Usage: "KEY: the basename of where to find the public / private keys of this host to be verified.", + Value: defaultKeyFile, + }, + }, + Action: func(c *cli.Context) { + Validation(c.String("key")) + }, + } + registerCommand(command) +} + +// Main entry point of the validation mode +func Validation(keyFile string) { + + // First, retrieve our public / private key pair + address for which it has + // been created + kp, addr := readKeyFile(keyFile) + // Then wait for the connection + + // Accept incoming connections + global, _ := cliutils.GlobalBind(addr) + ln, err := net.Listen("tcp", global) + if err != nil { + dbg.Fatal("Could not listen for validation:", err) + } + + var conn net.Conn + for ; ; conn.Close() { + dbg.Lvl1("Waiting for verifier connection ...") + // Accept the one + conn, err = ln.Accept() + if err != nil { + dbg.Fatal("Could not accept an input connection:", err) + } + + dbg.Lvl1("Verifier connected! Validation in progress...") + // Craft the message about our system,signs it, and then send the whole + msg := createSystemPacket() + signature := signSystemPacket(msg, kp) + // We also send the size of the signature for the receiver to know how much + // byte he is expecting + if err := suite.Write(conn, msg, len(signature), signature); err != nil { + dbg.Lvl1("Error when writing the system packet to the connection:", err) + continue + } + + // Receive the response + var ack Ack + if err := suite.Read(conn, &ack); err != nil { + dbg.Lvl1("Error when reading the response:", err) + } + + var er string = "Validation is NOT correct, something is wrong about your " + // All went fine + dbg.Lvl2("Received code", ack) + switch ack.Code { + default: + dbg.Lvl1("Validation received unknown ACK: type =", ack.Type, "Code =", ack.Code) + continue + case SYS_OK: + dbg.Lvl1("Validation finished successfully! You should receive an email from development team soon.") + case SYS_WRONG_HOST: + dbg.Lvl1(er + "HOSTNAME") + case SYS_WRONG_SOFT: + dbg.Lvl1(er + "SOFT limits") + case SYS_WRONG_SIG: + dbg.Lvl1(er + "signature!") + case SYS_EXIT: + dbg.Lvl1("Exiting - need to update to get config.toml") + os.Exit(1) + } + } +} + +// createSystemMessage will return a packet containing one or many information +// about the system. It is version dependant. +func createSystemPacket() SystemPacket { + host := "myhostname" + arr := [maxSize]byte{} + copy(arr[:], host) + return SystemPacket{ + Soft: 10000, + Hostname: arr, + } +} + +// signSystemPacket will sign the packet using the crypto library with package +// anon. No anonymity set here. Must pass the private / public keys to sign. +func signSystemPacket(sys SystemPacket, kp config.KeyPair) []byte { + var buf bytes.Buffer + if err := suite.Write(&buf, sys); err != nil { + dbg.Fatal("Could not sign the system packet:", err) + } + // setup + X := make([]abstract.Point, 1) + mine := 0 + X[mine] = kp.Public + // The actual signing + sig := anon.Sign(suite, random.Stream, buf.Bytes(), anon.Set(X), nil, mine, kp.Secret) + return sig +} + +// readKeyPair will read both private and public files +// and returns a keypair containing the respective private and public keys +func readKeyFile(keyFile string) (config.KeyPair, string) { + sec, err := cliutils.ReadPrivKey(suite, namePriv(keyFile)) + if err != nil { + dbg.Fatal("Could not read private key:", err) + } + pub, addr, err := cliutils.ReadPubKey(suite, namePub(keyFile)) + if err != nil { + dbg.Fatal("Could not read public key:", err) + } + return config.KeyPair{ + Suite: suite, + Secret: sec, + Public: pub, + }, addr +} diff --git a/app/lib/measure.go b/app/lib/measure.go deleted file mode 100644 index 592f1351f5..0000000000 --- a/app/lib/measure.go +++ /dev/null @@ -1,5 +0,0 @@ -package lib - -type Measure struct { - Message string -} \ No newline at end of file diff --git a/app/naive/crypto.go b/app/naive/crypto.go new file mode 100644 index 0000000000..a34f5369ed --- /dev/null +++ b/app/naive/crypto.go @@ -0,0 +1,63 @@ +package main + +import ( + "errors" + . "github.com/dedis/cothority/lib/network" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/cipher" +) + +// Returns a secret that depends on on a message and a point +func hashSchnorr(suite abstract.Suite, message []byte, p abstract.Point) abstract.Secret { + pb, _ := p.MarshalBinary() + c := suite.Cipher(pb) + c.Message(nil, nil, message) + return suite.Secret().Pick(c) +} + +// This simplified implementation of Schnorr Signatures is based on +// crypto/anon/sig.go +// The ring structure is removed and +// The anonimity set is reduced to one public key = no anonimity +func SchnorrSign(suite abstract.Suite, random cipher.Stream, message []byte, + privateKey abstract.Secret) BasicSignature { + + // Create random secret v and public point commitment T + v := suite.Secret().Pick(random) + T := suite.Point().Mul(nil, v) + + // Create challenge c based on message and T + c := hashSchnorr(suite, message, T) + + // Compute response r = v - x*c + r := suite.Secret() + r.Mul(privateKey, c).Sub(v, r) + + // Return verifiable si,gnature {c, r} + // Verifier will be able to compute v = r + x*c + // And check that hashElgamal for T and the message == c + sig := BasicSignature{Chall: c, Resp: r} + return sig +} + +func SchnorrVerify(suite abstract.Suite, message []byte, + signature BasicSignature) error { + publicKey := signature.Pub + r := signature.Resp + c := signature.Chall + + // Compute base**(r + x*c) == T + var P, T abstract.Point + P = suite.Point() + T = suite.Point() + T.Add(T.Mul(nil, r), P.Mul(publicKey, c)) + + // Verify that the hash based on the message and T + // matches the challange c from the signature + c = hashSchnorr(suite, message, T) + if !c.Equal(signature.Chall) { + return errors.New("invalid signature") + } + + return nil +} diff --git a/app/naive/naive.go b/app/naive/naive.go new file mode 100644 index 0000000000..6dc4d82893 --- /dev/null +++ b/app/naive/naive.go @@ -0,0 +1,19 @@ +package main + +import ( + log "github.com/Sirupsen/logrus" + "github.com/dedis/cothority/lib/app" +) + +func main() { + + conf := new(app.NaiveConfig) + app.ReadConfig(conf) + + if app.RunFlags.Hostname == "" { + log.Fatal("Hostname empty: Abort") + } + + RunServer(conf) + //monitor.End() +} diff --git a/app/naive/naive_test.go b/app/naive/naive_test.go new file mode 100644 index 0000000000..ab86a87dc3 --- /dev/null +++ b/app/naive/naive_test.go @@ -0,0 +1,9 @@ +package main + +import ( + "testing" +) + +func TestBuild(t *testing.T) { + // Just testing that build is done correctly +} diff --git a/app/naive/peer.go b/app/naive/peer.go new file mode 100644 index 0000000000..d636ea31e0 --- /dev/null +++ b/app/naive/peer.go @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "github.com/dedis/cothority/lib/dbg" + net "github.com/dedis/cothority/lib/network" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/edwards" +) + +// Impl of the "naive sign" protocol +// i.e. leader collects every signature from every other peers + +const ServRole string = "server" +const LeadRole string = "leader" + +const msgMaxLenght int = 256 + +var suite abstract.Suite + +// Set up some global variables such as the different messages used during +// this protocol and the general suite to be used +func init() { + suite = edwards.NewAES128SHA256Ed25519(false) + net.Suite = suite +} + +// the struct representing the role of leader +type Peer struct { + net.Host + + // the longterm key of the peer + priv abstract.Secret + pub abstract.Point + + // role is server or leader + role string + + // leader part + Conns []net.Conn + Pubs []abstract.Point + Signatures []net.BasicSignature +} + +func (l *Peer) String() string { + return fmt.Sprintf("%s %s", l.Host.Name(), l.role) +} + +// Will send the message to be signed to everyone +func (l *Peer) SendMessage(msg []byte, c net.Conn) { + if len(msg) > msgMaxLenght { + dbg.Fatal("Tried to send a too big message to sign. Abort") + } + ms := new(net.MessageSigning) + ms.Length = len(msg) + ms.Msg = msg + err := c.Send(*ms) + if err != nil { + dbg.Fatal("Could not send message to", c.PeerName()) + } +} + +// Wait for the leader to receive the generated signatures from the servers +func (l *Peer) ReceiveBasicSignature(c net.Conn) *net.BasicSignature { + + appMsg, err := c.Receive() + if err != nil { + dbg.Fatal(l.String(), "error decoding message from", c.PeerName()) + } + if appMsg.MsgType != net.BasicSignatureType { + dbg.Fatal(l.String(), "Received an unknown type:", appMsg.MsgType.String()) + } + bs := appMsg.Msg.(net.BasicSignature) + return &bs +} + +func (l *Peer) Signature(msg []byte) *net.BasicSignature { + rand := suite.Cipher([]byte("cipher")) + + sign := SchnorrSign(suite, rand, msg, l.priv) + sign.Pub = l.pub + return &sign +} + +func NewPeer(host net.Host, role string, secret abstract.Secret, + public abstract.Point) *Peer { + return &Peer{ + role: role, + Host: host, + priv: secret, + pub: public, + } +} diff --git a/app/naive/server.go b/app/naive/server.go new file mode 100644 index 0000000000..911819b48a --- /dev/null +++ b/app/naive/server.go @@ -0,0 +1,196 @@ +package main + +/* + * This is a simple (naive) implementation of a multi-signature protocol + * where the *leader* sends the message to every *signer* who signs it and + * returns the result to the server. + */ + +import ( + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/monitor" + net "github.com/dedis/cothority/lib/network" + "time" +) + +// Searches for the index in the hostlist and decides if we're the leader +// or one of the clients +func RunServer(conf *app.NaiveConfig) { + indexPeer := -1 + for i, h := range conf.Hosts { + if h == app.RunFlags.Hostname { + indexPeer = i + } + } + if indexPeer == -1 { + dbg.Fatal("Could not find its own hostname. Abort") + } + + if indexPeer == 0 { + dbg.Lvl3("Launching a naiv_sign.: Leader", app.RunFlags.Hostname) + GoLeader(conf) + monitor.End() + } else { + dbg.Lvl3("Launching a naiv_sign: Signer", app.RunFlags.Hostname) + GoSigner(conf) + } +} + +// This is the leader who waits for all connections and then sends the +// message to be signed +func GoLeader(conf *app.NaiveConfig) { + + host := net.NewTcpHost(app.RunFlags.Hostname) + key := cliutils.KeyPair(suite) + leader := NewPeer(host, LeadRole, key.Secret, key.Public) + + // Setting up the connections + // notably to the monitoring process + if app.RunFlags.Logger != "" { + monitor.ConnectSink(app.RunFlags.Logger) + } else { + monitor.EnableMeasure(false) + } + msg := []byte("Hello World\n") + // Listen for connections + dbg.Lvl3(leader.String(), "making connections ...") + // each conn will create its own channel to be used to handle rounds + roundChans := make(chan chan chan *net.BasicSignature) + // Send the message to be signed + proto := func(c net.Conn) { + // make the chan that will receive a new chan + // for each round where to send the signature + roundChan := make(chan chan *net.BasicSignature) + roundChans <- roundChan + n := 0 + // wait for the next round + for sigChan := range roundChan { + dbg.Lvl3(leader.String(), "Round", n, "sending message", msg, "to signer", c.PeerName()) + leader.SendMessage(msg, c) + dbg.Lvl3(leader.String(), "Round", n, "receivng signature from signer", c.PeerName()) + sig := leader.ReceiveBasicSignature(c) + sigChan <- sig + n += 1 + } + c.Close() + dbg.Lvl3(leader.String(), "closed connection with signer", c.PeerName()) + } + + // Connecting to the signer + setup := monitor.NewMeasure("setup") + go leader.Listen(app.RunFlags.Hostname, proto) + dbg.Lvl3(leader.String(), "Listening for channels creation..") + // listen for round chans + signatures for each round + masterRoundChan := make(chan chan *net.BasicSignature) + roundChanns := make([]chan chan *net.BasicSignature, 0) + numberHosts := len(conf.Hosts) + // Make the "setup" of channels + for { + ch := <-roundChans + roundChanns = append(roundChanns, ch) + //Received round channels from every connections- + if len(roundChanns) == numberHosts-1 { + // make the Fanout => master will send to all + go func() { + // send the new SignatureChannel to every conn + for newSigChan := range masterRoundChan { + for i, _ := range roundChanns { + go func(j int) { roundChanns[j] <- newSigChan }(i) + } + } + //close when finished + for _, c := range roundChanns { + close(c) + } + }() + break + } + } + setup.Measure() + dbg.Lvl3(leader.String(), "got all channels ready => starting the", conf.Rounds, "rounds") + + // Starting to run the simulation for conf.Rounds rounds + + roundM := monitor.NewMeasure("round") + for round := 0; round < conf.Rounds; round++ { + // Measure calculation time + calc := monitor.NewMeasure("calc") + dbg.Lvl1("Server starting round", round+1) + n := 0 + faulty := 0 + // launch a new round + connChan := make(chan *net.BasicSignature) + masterRoundChan <- connChan + + // Wait each signatures + sigs := make([]*net.BasicSignature, 0) + for n < numberHosts-1 { + bs := <-connChan + sigs = append(sigs, bs) + n += 1 + } + // All sigs reeived <=> all calcs are done + calc.Measure() + + // verify each signatures + if conf.SkipChecks { + dbg.Lvl3("Skipping check for round", round) + } else { + // Measure verificationt time + verify := monitor.NewMeasure("verify") + for _, sig := range sigs { + if err := SchnorrVerify(suite, msg, *sig); err != nil { + faulty += 1 + dbg.Lvl1(leader.String(), "Round", round, "received a faulty signature!") + } else { + dbg.Lvl3(leader.String(), "Round", round, "received Good signature") + } + } + verify.Measure() + } + roundM.Measure() + dbg.Lvl3(leader.String(), "Round", round, "received", len(conf.Hosts)-1, "signatures (", + faulty, "faulty sign)") + } + + // Close down all connections + close(masterRoundChan) + dbg.Lvl3(leader.String(), "has done all rounds") +} + +// The signer connects to the leader and then waits for a message to be +// signed +func GoSigner(conf *app.NaiveConfig) { + // Wait for leader to be ready + time.Sleep(2 * time.Second) + host := net.NewTcpHost(app.RunFlags.Hostname) + key := cliutils.KeyPair(suite) + signer := NewPeer(host, ServRole, key.Secret, key.Public) + dbg.Lvl3(signer.String(), "will contact leader", conf.Hosts[0]) + l := signer.Open(conf.Hosts[0]) + dbg.Lvl3(signer.String(), "is connected to leader", l.PeerName()) + + // make the protocol for each round + for round := 0; round < conf.Rounds; round++ { + // Receive message + m, err := l.Receive() + dbg.Lvl3(signer.String(), "round", round, "received the message to be signed from the leader") + if err != nil { + dbg.Fatal(signer.String(), "round", round, "received error waiting msg") + } + if m.MsgType != net.MessageSigningType { + dbg.Fatal(app.RunFlags.Hostname, "round", round, "wanted to receive a msg to sign but..", + m.MsgType.String()) + } + msg := m.Msg.(net.MessageSigning).Msg + dbg.Lvl3(signer.String(), "round", round, "received msg:", msg[:]) + // Generate signature & send + s := signer.Signature(msg[:]) + l.Send(*s) + dbg.Lvl3(signer.String(), "round", round, "sent the signature to leader") + } + l.Close() + dbg.Lvl3(app.RunFlags.Hostname, "Finished") +} diff --git a/app/ntree/crypto.go b/app/ntree/crypto.go new file mode 100644 index 0000000000..81197e858c --- /dev/null +++ b/app/ntree/crypto.go @@ -0,0 +1,63 @@ +package main + +import ( + "errors" + net "github.com/dedis/cothority/lib/network" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/cipher" +) + +// Returns a secret that depends on on a message and a point +func hashSchnorr(suite abstract.Suite, message []byte, p abstract.Point) abstract.Secret { + pb, _ := p.MarshalBinary() + c := suite.Cipher(pb) + c.Message(nil, nil, message) + return suite.Secret().Pick(c) +} + +// This simplified implementation of Schnorr Signatures is based on +// crypto/anon/sig.go +// The ring structure is removed and +// The anonimity set is reduced to one public key = no anonimity +func SchnorrSign(suite abstract.Suite, random cipher.Stream, message []byte, + privateKey abstract.Secret) net.BasicSignature { + + // Create random secret v and public point commitment T + v := suite.Secret().Pick(random) + T := suite.Point().Mul(nil, v) + + // Create challenge c based on message and T + c := hashSchnorr(suite, message, T) + + // Compute response r = v - x*c + r := suite.Secret() + r.Mul(privateKey, c).Sub(v, r) + + // Return verifiable si,gnature {c, r} + // Verifier will be able to compute v = r + x*c + // And check that hashElgamal for T and the message == c + sig := net.BasicSignature{Chall: c, Resp: r} + return sig +} + +func SchnorrVerify(suite abstract.Suite, message []byte, + signature net.BasicSignature) error { + publicKey := signature.Pub + r := signature.Resp + c := signature.Chall + + // Compute base**(r + x*c) == T + var P, T abstract.Point + P = suite.Point() + T = suite.Point() + T.Add(T.Mul(nil, r), P.Mul(publicKey, c)) + + // Verify that the hash based on the message and T + // matches the challange c from the signature + c = hashSchnorr(suite, message, T) + if !c.Equal(signature.Chall) { + return errors.New("invalid signature") + } + + return nil +} diff --git a/app/ntree/ntree.go b/app/ntree/ntree.go new file mode 100644 index 0000000000..e5e80f2c6e --- /dev/null +++ b/app/ntree/ntree.go @@ -0,0 +1,53 @@ +package main + +import ( + log "github.com/Sirupsen/logrus" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/dbg" + "io/ioutil" + "os" + "time" +) + +func main() { + + conf := new(app.NTreeConfig) + app.ReadConfig(conf) + + // we must know who we are + if app.RunFlags.Hostname == "" { + log.Fatal("Hostname empty: Abort") + } + + own, depth := conf.Tree.FindByName(app.RunFlags.Hostname, 0) + if depth == 0 { + // i.e. we are root + conf.Root = true + } + if own == nil { + dbg.Fatal("Could not find its name in the tree", app.RunFlags.Hostname) + } + conf.Tree = own + conf.Name = own.Name + // Wait for everybody to be ready before going on + ioutil.WriteFile("coll_stamp_up/up"+app.RunFlags.Hostname, []byte("started"), 0666) + for { + _, err := os.Stat("coll_stamp_up") + if err == nil { + files, _ := ioutil.ReadDir("coll_stamp_up") + dbg.Lvl4(app.RunFlags.Hostname, "waiting for others to finish", len(files)) + time.Sleep(time.Second) + } else { + break + } + } + dbg.Lvl2(app.RunFlags.Hostname, "thinks everybody's here") + + switch app.RunFlags.Mode { + case "client": + log.Panic("No client mode") + case "server": + RunServer(conf) + } + +} diff --git a/app/ntree/ntree_test.go b/app/ntree/ntree_test.go new file mode 100644 index 0000000000..ab86a87dc3 --- /dev/null +++ b/app/ntree/ntree_test.go @@ -0,0 +1,9 @@ +package main + +import ( + "testing" +) + +func TestBuild(t *testing.T) { + // Just testing that build is done correctly +} diff --git a/app/ntree/peer.go b/app/ntree/peer.go new file mode 100644 index 0000000000..24eb0dc661 --- /dev/null +++ b/app/ntree/peer.go @@ -0,0 +1,87 @@ +package main + +import ( + "fmt" + "github.com/dedis/cothority/lib/dbg" + net "github.com/dedis/cothority/lib/network" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/edwards" +) + +const msgMaxLenght int = 256 + +// Treee terminology +const LeadRole = "root" +const ServRole = "node" + +var suite abstract.Suite + +// Set up some global variables such as the different messages used during +// this protocol and the general suite to be used +func init() { + suite = edwards.NewAES128SHA256Ed25519(false) + net.Suite = suite +} + +// the struct representing the role of leader +type Peer struct { + net.Host + + // the longterm key of the peer + priv abstract.Secret + pub abstract.Point + + // role is server or leader + role string + + // leader part + Conns []net.Conn + Pubs []abstract.Point + Signatures []net.BasicSignature +} + +func (l *Peer) String() string { + return fmt.Sprintf("%s (%s)", l.Host.Name(), l.role) +} + +func (l *Peer) Signature(msg []byte) *net.BasicSignature { + rand := suite.Cipher([]byte("cipher")) + + sign := SchnorrSign(suite, rand, msg, l.priv) + sign.Pub = l.pub + return &sign +} + +func (l *Peer) ReceiveMessage(c net.Conn) net.MessageSigning { + app, err := c.Receive() + if err != nil { + dbg.Fatal(l.String(), "could not receive message from", c.PeerName()) + + } + if app.MsgType != net.MessageSigningType { + dbg.Fatal(l.String(), "MS error: received", app.MsgType.String(), "from", c.PeerName()) + } + return app.Msg.(net.MessageSigning) +} + +func (l *Peer) ReceiveListBasicSignature(c net.Conn) net.ListBasicSignature { + app, err := c.Receive() + if err != nil { + dbg.Fatal(l.String(), "could not receive listbasicsig from", c.PeerName()) + } + + if app.MsgType != net.ListBasicSignatureType { + dbg.Fatal(l.String(), "LBS error: received", app.MsgType.String(), "from", c.PeerName()) + } + return app.Msg.(net.ListBasicSignature) + +} +func NewPeer(host net.Host, role string, secret abstract.Secret, + public abstract.Point) *Peer { + return &Peer{ + role: role, + Host: host, + priv: secret, + pub: public, + } +} diff --git a/app/ntree/sign.go b/app/ntree/sign.go new file mode 100644 index 0000000000..7f38839380 --- /dev/null +++ b/app/ntree/sign.go @@ -0,0 +1,345 @@ +package main + +import ( + "sync" + "sync/atomic" + + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/monitor" + net "github.com/dedis/cothority/lib/network" +) + +func RunServer(conf *app.NTreeConfig) { + if conf.Root { + RunRoot(conf) + monitor.End() + } else { + RunPeer(conf) + //RunServer2(conf) + } +} + +func RunRoot(conf *app.NTreeConfig) { + host := net.NewTcpHost(app.RunFlags.Hostname) + key := cliutils.KeyPair(suite) + + peer := NewPeer(host, LeadRole, key.Secret, key.Public) + dbg.Lvl3(peer.String(), "Up and will make connections...") + + // monitor + if app.RunFlags.Logger == "" { + monitor.EnableMeasure(false) + } else { + if err := monitor.ConnectSink(app.RunFlags.Logger); err != nil { + dbg.Fatal(peer.String(), "could not connect to the monitor:", err) + } + } + + // msg to be sent + signed + msg := []byte("Hello World") + + // make setup measurement + setup := monitor.NewMeasure("setup") + + // masterRoundChan is used to tell that everyone is ready + masterRoundChan := make(chan chan chan *net.ListBasicSignature) + // Open connection for each children + for _, c := range conf.Tree.Children { + dbg.Lvl3(peer.String(), "will connect to children", c.Name) + + connPeer := peer.Open(c.Name) + if connPeer == nil { + dbg.Fatal(peer.String(), "Could not open connection to child", c.Name) + } + // then start Root protocol + go func(conn net.Conn) { + dbg.Lvl3(peer.String(), "connected to children", conn.PeerName()) + roundSigChan := make(chan chan *net.ListBasicSignature) + // notify we are ready to begin + masterRoundChan <- roundSigChan + // each rounds... + for lsigChan := range roundSigChan { + dbg.Lvl4(peer.String(), "starting new round with", conn.PeerName()) + m := net.MessageSigning{ + Length: len(msg), + Msg: msg, + } + // send msg to children + err := conn.Send(m) + if err != nil { + dbg.Fatal(peer.String(), "could not send message to children", conn.PeerName(), ":", err) + } + dbg.Lvl3(peer.String(), "sent message to children", conn.PeerName()) + // Receive bundled signatures + sig, err := conn.Receive() + if err != nil { + dbg.Fatal(peer.String(), "could not received bundled signature from", conn.PeerName(), ":", err) + } + if sig.MsgType != net.ListBasicSignatureType { + dbg.Fatal(peer.String(), "received a wrong packet type from", conn.PeerName(), ":", sig.MsgType.String()) + } + // Then pass them on + sigs := sig.Msg.(net.ListBasicSignature) + lsigChan <- &sigs + dbg.Lvl3(peer.String(), "Received list of signatures from child", conn.PeerName()) + } + }(connPeer) + } + // First collect every "ready-connections" + children := make([]chan chan *net.ListBasicSignature, 0) + for round := range masterRoundChan { + children = append(children, round) + if len(children) == len(conf.Tree.Children) { + dbg.Lvl3(peer.String(), "collected each children channels") + break + } + } + close(masterRoundChan) + setup.Measure() + + // Then for each rounds tell them to start the protocol + round := monitor.NewMeasure("round") + for i := 1; i <= conf.Rounds; i++ { + dbg.Lvl1(peer.String(), "will start a new round", i) + calc := monitor.NewMeasure("calc") + // the signature channel used for this round + lsigChan := make(chan *net.ListBasicSignature) + // notify each connections + for _, ch := range children { + ch <- lsigChan + } + + childrenSigs := make([]*net.ListBasicSignature, 0) + // Wait for listsignatures coming + dbg.Lvl3(peer.String(), "Waiting on signatures for round", i, "...") + + for sigs := range lsigChan { + dbg.Lvl3(peer.String(), "will analyze one ListBasicSignature...") + childrenSigs = append(childrenSigs, sigs) + // we have received all bundled signatures so time it + if len(childrenSigs) == len(conf.Tree.Children) { + close(lsigChan) // we have finished for this round + } + } + dbg.Lvl3(peer.String(), "Received all signatures ... ") + calc.Measure() + + var verifyWg sync.WaitGroup + var faulty uint64 = 0 + var total uint64 = 0 + // start timing verification + verify := monitor.NewMeasure("verify") + for _, sigs := range childrenSigs { + // Here it launches one go routine to verify a bundle + verifyWg.Add(1) + go func(s *net.ListBasicSignature) { + defer verifyWg.Done() + if conf.SkipChecks { + return + } + // verify each independant signatures + for _, sig := range s.Sigs { + if err := SchnorrVerify(suite, msg, sig); err != nil { + dbg.Lvl2(peer.String(), "received incorrect signature ><", err) + atomic.AddUint64(&faulty, 1) + } + atomic.AddUint64(&total, 1) + } + }(sigs) + } + // wait for all verifications + verifyWg.Wait() + // finished verifying => time it ! + verify.Measure() + round.Measure() + dbg.Lvl3(peer.String(), "Round", i, "/", conf.Rounds, "has verified all signatures:", total-faulty, "/", total, "good signatures") + } + + // cLosing each channels + for _, ch := range children { + close(ch) + } + + dbg.Lvl2(peer.String(), "Finished all rounds successfully.") +} + +func RunPeer(conf *app.NTreeConfig) { + + host := net.NewTcpHost(app.RunFlags.Hostname) + key := cliutils.KeyPair(suite) + + peer := NewPeer(host, ServRole, key.Secret, key.Public) + dbg.Lvl3(peer.String(), "Up and will make connections...") + + // Chan used to communicate the message from the parent to the children + // Must do a Fan out to communicate this message to all children + masterMsgChan := make(chan net.MessageSigning) + childrenMsgChan := make([]chan net.MessageSigning, len(conf.Tree.Children)) + go func() { + // init + for i := range childrenMsgChan { + childrenMsgChan[i] = make(chan net.MessageSigning) + } + // for each message + for msg := range masterMsgChan { + // broadcast to each channels + for i, ch := range childrenMsgChan { + dbg.Lvl4(peer.String(), "dispatching msg to children (", i+1, "/", len(conf.Tree.Children), ")...") + ch <- msg + } + } + // When finished, close all children channs + for _, ch := range childrenMsgChan { + close(ch) + } + }() + + // chan used to communicate the signature from the children to the parent + // It is also used to specify the start of a new round (coming from the parent + // connection) + masterRoundChan := make(chan chan net.ListBasicSignature) + // dispatch new round to each children + childRoundChan := make([]chan chan net.ListBasicSignature, len(conf.Tree.Children)) + dbg.Lvl3(peer.String(), "created children Signal Channels (length =", len(childRoundChan), ")") + go func() { + // init + for i := range childRoundChan { + childRoundChan[i] = make(chan chan net.ListBasicSignature) + } + // For each new round started by the parent's connection + for sigChan := range masterRoundChan { + // if no children, no signature will come + // so close immediatly so parent connection will continue + if len(conf.Tree.Children) == 0 { + dbg.Lvl3(peer.String(), "Has no children so closing childRoundChan") + close(sigChan) + } else { + // otherwise, dispatch to children + for i, _ := range childRoundChan { + dbg.Lvl4(peer.String(), "Dispatching signature channel to children (", i+1, "/", len(conf.Tree.Children), ")...") + childRoundChan[i] <- sigChan + } + } + } + dbg.Lvl3(peer.String(), "closing the children sig channels...") + for _, ch := range childRoundChan { + close(ch) + } + }() + + // chan used to tell the end of the protocols + done := make(chan bool) + // The parent protocol + proto := func(c net.Conn) { + dbg.Lvl3(peer.String(), "connected with parent", c.PeerName()) + // for each rounds + for i := 1; i <= conf.Rounds; i++ { + // Create the chan for this round + sigChan := make(chan net.ListBasicSignature) + // that wil be used for children to pass up their signatures + masterRoundChan <- sigChan + dbg.Lvl3(peer.String(), "starting round", i) + // First, receive the message to be signed + sig, err := c.Receive() + if err != nil { + dbg.Fatal(peer.String(), "error receiving message from parent", c.PeerName()) + } + if sig.MsgType != net.MessageSigningType { + dbg.Fatal(peer.String(), "received wrong packet type from parent:", sig.MsgType.String()) + } + msg := sig.Msg.(net.MessageSigning) + // Notify the chan so it will be broadcasted down + masterMsgChan <- msg + dbg.Lvl3(peer.String(), "round", i, ": received message from parent", msg.Msg) + // issue our signature + bs := peer.Signature(msg.Msg) + // wait for children signatures + sigs := make([]net.BasicSignature, 0) + sigs = append(sigs, *bs) + + // for each ListBasicSignature + n := 0 + dbg.Lvl3(peer.String(), "round", i, ": waiting on signatures from children ...") + for lsig := range sigChan { + dbg.Lvl3(peer.String(), "round", i, ": receievd a ListSignature !") + // Add each independant signature + for _, sig := range lsig.Sigs { + sigs = append(sigs, sig) + } + n += 1 + //We got them all ;) + if n == len(conf.Tree.Children) { + close(sigChan) + break + } + } + + dbg.Lvl3(peer.String(), "received", len(sigs), "signatures from children") + // Then send to parent the signature + lbs := net.ListBasicSignature{} + lbs.Length = len(sigs) + lbs.Sigs = sigs + err = c.Send(lbs) + if err != nil { + dbg.Fatal(peer.String(), "Could not send list of signature to parents ><", err) + } + dbg.Lvl3(peer.String(), "round", i, ": sent the array of sigs to parent") + } + close(masterRoundChan) + c.Close() + done <- true + } + + dbg.Lvl3(peer.String(), "listen for the parent connection...") + go peer.Listen(conf.Name, proto) + + // Connect to the children + // Relay the msg + // Wait for signatures + dbg.Lvl3(peer.String(), "will contact its siblings..") + // To stop when every children has done all rounds + // Connect to every children + for i, c := range conf.Tree.Children { + dbg.Lvl3(peer.String(), "is connecting to", c.Name, "(", i, ")") + connPeer := peer.Open(c.Name) + if connPeer == nil { + dbg.Fatal(peer.String(), "Could not connect to", c.Name) + } + // Children protocol + go func(child int, conn net.Conn) { + dbg.Lvl3(peer.String(), "is connected to children", conn.PeerName(), "(", child, ")") + + // For each rounds new round + for sigChan := range childRoundChan[child] { + dbg.Lvl3(peer.String(), "starting new round with children", conn.PeerName(), "(", child, ")") + // get & relay the message + msg := <-childrenMsgChan[child] + dbg.Lvl3(peer.String(), "will relay message to child", conn.PeerName(), "(", child, ")") + err := conn.Send(msg) + if err != nil { + dbg.Fatal(peer.String(), "Could not relay message to children", conn.PeerName()) + } + dbg.Lvl4(peer.String(), "sent to the message to children", conn.PeerName()) + // wait for signature bundle + sig, err := conn.Receive() + if err != nil { + dbg.Fatal(peer.String(), "Could not receive the bundled children signature from", conn.PeerName()) + } + if sig.MsgType != net.ListBasicSignatureType { + dbg.Fatal(peer.String(), "received an different package from", conn.PeerName(), ":", sig.MsgType.String()) + } + dbg.Lvl4(peer.String(), "received signature bundle from children", conn.PeerName()) + lbs := sig.Msg.(net.ListBasicSignature) + // send to parent + sigChan <- lbs + } + dbg.Lvl3(peer.String(), "finished with children", conn.PeerName()) + conn.Close() + }(i, connPeer) + } + // Wait for the whole thing to be done (parent connection == master) + <-done + dbg.Lvl3(peer.String(), "leaving...") +} diff --git a/app/schnorr_sign/client.go b/app/schnorr_sign/client.go deleted file mode 100644 index c82b94311d..0000000000 --- a/app/schnorr_sign/client.go +++ /dev/null @@ -1,6 +0,0 @@ -package schnorr_sign -import "github.com/dedis/cothority/deploy" - -func RunClient(conf *deploy.Config) { - -} diff --git a/app/schnorr_sign/schnorr_sign.go b/app/schnorr_sign/schnorr_sign.go deleted file mode 100644 index a5cbe61599..0000000000 --- a/app/schnorr_sign/schnorr_sign.go +++ /dev/null @@ -1,51 +0,0 @@ -package schnorr_sign - -import ( - "encoding/json" - log "github.com/Sirupsen/logrus" - "github.com/dedis/cothority/deploy" - "github.com/dedis/cothority/lib/config" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "io/ioutil" - //"os" -) - -// Dispatch-function for running either client or server (mode-parameter) -func Run(app *config.AppConfig, depl *deploy.Config) { - - // we must know who we are - if app.Hostname == "" { - log.Fatal("Hostname empty : Abort") - } - - dbg.Lvl2(app.Hostname, "Starting to run as ", app.Mode) - var err error - hosts, err := ReadHostsJson("tree.json") - if err != nil { - log.Fatal("Error while reading JSON hosts file on", app.Hostname, ". Abort") - } - switch app.Mode { - case "client": - RunClient(depl) - case "server": - RunServer(hosts, app, depl) - } -} - -// Read the tree json file and return the configFileold containing every hosts name -func ReadHostsJson(file string) (*config.HostsConfig, error) { - var cf config.ConfigFileOld - bFile, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - - err = json.Unmarshal(bFile, &cf) - if err != nil { - return nil, err - } - return &config.HostsConfig{ - Conn: cf.Conn, - Hosts: cf.Hosts, - }, nil -} diff --git a/app/schnorr_sign/server.go b/app/schnorr_sign/server.go deleted file mode 100644 index 61091fa0ff..0000000000 --- a/app/schnorr_sign/server.go +++ /dev/null @@ -1,125 +0,0 @@ -package schnorr_sign - -import log "github.com/Sirupsen/logrus" -import "github.com/dedis/cothority/lib/logutils" -import "github.com/dedis/cothority/deploy" -import "github.com/dedis/cothority/lib/config" -import "github.com/dedis/crypto/poly" -import dbg "github.com/dedis/cothority/lib/debug_lvl" -import "time" - -func RunServer(hosts *config.HostsConfig, app *config.AppConfig, depl *deploy.Config) { - s := config.GetSuite(depl.Suite) - poly.SUITE = s - poly.SECURITY = poly.MODERATE - n := len(hosts.Hosts) - - info := poly.PolyInfo{ - N: n, - R: n, - T: n, - } - indexPeer := -1 - for i, h := range hosts.Hosts { - if h == app.Hostname { - indexPeer = i - break - } - } - if indexPeer == -1 { - log.Fatal("Peer ", app.Hostname, "(", app.PhysAddr, ") did not find any match for its name.Abort") - } - - start := time.Now() - dbg.Lvl1("Creating new peer ", app.Hostname, "(", app.PhysAddr, ") ...") - // indexPeer == 0 <==> peer is root - p := NewPeer(indexPeer, app.Hostname, info, indexPeer == 0) - - // make it listen - dbg.Lvl2("Peer", app.Hostname, "is now listening for incoming connections") - go p.Listen() - - // then connect it to its successor in the list - for _, h := range hosts.Hosts[indexPeer+1:] { - dbg.Lvl2("Peer ", app.Hostname, " will connect to ", h) - // will connect and SYN with the remote peer - p.ConnectTo(h) - } - // Wait until this peer is connected / SYN'd with each other peer - p.WaitSYNs() - - if p.IsRoot(){ - delta := time.Since(start) - dbg.Lvl2(p.String(), "Connections accomplished in", delta) - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "schnorr_connect", - "round": 0, - "time": delta, - }).Info("") - } - - // start to record - start = time.Now() - - // Setup the schnorr system amongst peers - p.SetupDistributedSchnorr() - p.SendACKs() - p.WaitACKs() - dbg.Lvl1(p.String(), "completed Schnorr setup") - - // send setup time if we're root - if p.IsRoot() { - delta := time.Since(start) - dbg.Lvl2(p.String(), "setup accomplished in ", delta) - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "schnorr_setup", - "round": 0, - "time": delta, - }).Info("") - } - - for round := 0; round < depl.Rounds; round++ { - if p.IsRoot() { - dbg.Lvl2("Starting round", round) - } - - // Then issue a signature ! - start = time.Now() - msg := "hello world" - - // Only root calculates if it's OK and sends a log-message - if p.IsRoot() { - sig := p.SchnorrSigRoot([]byte(msg)) - err := p.VerifySchnorrSig(sig, []byte(msg)) - if err != nil { - dbg.Fatal(p.String(), "could not verify schnorr signature :/ ", err) - } - - dbg.Lvl2(p.String(), "verified the schnorr sig !") - // record time - delta := time.Since(start) - dbg.Lvl2(p.String(), "signature done in ", delta) - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "schnorr_round", - "round": round, - "time": delta, - }).Info("") - } else { - // Compute the partial sig and send it to the root - p.SchnorrSigPeer([]byte(msg)) - } - } - - p.WaitFins() - dbg.Lvl1(p.String(), "is leaving ...") - - if p.IsRoot(){ - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "schnorr_end", - }).Info("") - } -} diff --git a/app/shamir/client.go b/app/shamir/client.go new file mode 100644 index 0000000000..ea0413c75b --- /dev/null +++ b/app/shamir/client.go @@ -0,0 +1,9 @@ +package main + +import ( + "github.com/dedis/cothority/lib/app" +) + +func RunClient(conf *app.ConfigShamir) { + +} diff --git a/app/schnorr_sign/message.go b/app/shamir/message.go similarity index 96% rename from app/schnorr_sign/message.go rename to app/shamir/message.go index 81542be661..14aaa3921c 100644 --- a/app/schnorr_sign/message.go +++ b/app/shamir/message.go @@ -1,4 +1,4 @@ -package schnorr_sign +package main import ( "github.com/dedis/crypto/abstract" diff --git a/app/schnorr_sign/peer.go b/app/shamir/peer.go similarity index 71% rename from app/schnorr_sign/peer.go rename to app/shamir/peer.go index 965edc6421..1c3e7cefbf 100644 --- a/app/schnorr_sign/peer.go +++ b/app/shamir/peer.go @@ -1,17 +1,18 @@ -package schnorr_sign +package main import ( "fmt" + "net" + "strings" + "sync" + "time" + log "github.com/Sirupsen/logrus" "github.com/dedis/cothority/lib/cliutils" - dbg "github.com/dedis/cothority/lib/debug_lvl" + "github.com/dedis/cothority/lib/dbg" "github.com/dedis/crypto/abstract" conf "github.com/dedis/crypto/config" "github.com/dedis/crypto/poly" - "net" - "strings" - "sync" - "time" ) // How many times a peer tries to connect to another until it works @@ -30,7 +31,7 @@ type RemotePeer struct { } func (r *RemotePeer) String() string { - return fmt.Sprintf("RemotePeer : %s (id: %d)", r.Hostname, r.Id) + return fmt.Sprintf("RemotePeer: %s (id: %d)", r.Hostname, r.Id) } func (r *RemotePeer) IsRoot() bool { @@ -53,7 +54,10 @@ type Peer struct { root bool // N, R, T parameters + suite used throughout the process - info poly.PolyInfo + info poly.Threshold + + // suite used + suite abstract.Suite // its own private / public key pair key conf.KeyPair @@ -74,18 +78,18 @@ type Peer struct { // NewPeer returns a new peer with its id and the number of peers in the schnorr signature algo // TODO verification of string addr:port -func NewPeer(id int, name string, p poly.PolyInfo, isRoot bool) *Peer { +func NewPeer(id int, name string, suite abstract.Suite, p poly.Threshold, isRoot bool) *Peer { if id >= p.N { - log.Fatal("Error while NewPeer : gien ", id, " as id whereas polyinfo.N = ", p.N) + log.Fatal("Error while NewPeer: gien", id, "as id whereas polyinfo.N =", p.N) } // Setup of the private / public pair - key := cliutils.KeyPair(poly.SUITE) + key := cliutils.KeyPair(suite) // setup of the public list of key pubKeys := make([]abstract.Point, p.N) pubKeys[id] = key.Public - dbg.Lvl3(name, "(id", id, ") has created its private/public key : public => ", key.Public) + dbg.Lvl3(name, "(id", id, ") has created its private/public key: public =>", key.Public) return &Peer{ Id: id, @@ -93,6 +97,7 @@ func NewPeer(id int, name string, p poly.PolyInfo, isRoot bool) *Peer { root: isRoot, Name: name, info: p, + suite: suite, key: key, pubKeys: pubKeys, schnorr: new(poly.Schnorr), @@ -108,12 +113,12 @@ func (p *Peer) Listen() { port := ":" + results[1] ln, err := net.Listen("tcp", port) if err != nil { - dbg.Fatal(p.Name, ": Error while listening on port ", port, "ABORT => ", err) + dbg.Fatal(p.Name, ": Error while listening on port", port, "ABORT =>", err) } for { conn, err := ln.Accept() if err != nil { - dbg.Fatal(p.Name, ": Error while listening on port ", port, " => ", err) + dbg.Fatal(p.Name, ": Error while listening on port", port, "=>", err) } go p.synWithPeer(conn) } @@ -130,15 +135,15 @@ func (p *Peer) ConnectTo(host string) error { // we have tried too many times => abort if count == ConnRetry { tick.Stop() - dbg.Fatal(p.Name, "could not connect to", host, " ", ConnRetry, "times. Abort.") + dbg.Fatal(p.Name, "could not connect to", host, "", ConnRetry, "times. Abort.") // let's try again one more time } else { - dbg.Lvl2(p.Name, "could not connect to", host, ". Retry in ", ConnWaitRetry.String()) + dbg.Lvl2(p.Name, "could not connect to", host, ". Retry in", ConnWaitRetry.String()) count += 1 } } // handle successful connection - dbg.Lvl3(p.Name, "has connected with peer ", host) + dbg.Lvl3(p.Name, "has connected with peer", host) tick.Stop() // start to syn with the respective peer go p.synWithPeer(conn) @@ -159,13 +164,13 @@ func (p *Peer) ForRemotePeers(fn func(RemotePeer)) { func (p *Peer) WaitSYNs() { for { s := <-p.synChan - dbg.Lvl3(p.Name, " synChan received Syn id ", s.Id) + dbg.Lvl3(p.Name, "synChan received Syn id", s.Id) _, ok := p.remote[s.Id] if !ok { dbg.Fatal(p.Name, "received syn'd notification of an unknown peer... ABORT") } if len(p.remote) == p.info.N-1 { - dbg.Lvl2(p.Name, "is SYN'd with every one") + dbg.Lvl3(p.Name, "is SYN'd with every one") break } } @@ -179,7 +184,7 @@ func (p *Peer) SendACKs() { } err := p.SendToAll(&a) if err != nil { - dbg.Fatal(p.Name, "could not sent its ACKs to every one : ", err) + dbg.Fatal(p.Name, "could not sent its ACKs to every one:", err) } } @@ -188,9 +193,9 @@ func (p *Peer) WaitACKs() { var wg sync.WaitGroup fn := func(rp RemotePeer) { a := Ack{} - err := poly.SUITE.Read(rp.Conn, &a) + err := p.suite.Read(rp.Conn, &a) if err != nil { - dbg.Fatal(p.Name, "could not receive an ACK from ", rp.String(), " (err ", err, ")") + dbg.Fatal(p.Name, "could not receive an ACK from", rp.String(), "(err", err, ")") } //p.ackChan <- a wg.Done() @@ -200,18 +205,7 @@ func (p *Peer) WaitACKs() { dbg.Lvl3(p.Name, "is waiting for acks ...") wg.Wait() - dbg.Lvl2(p.String(), "received ALL ACKs") - //n := 0 - //for { - // a := <-p.ackChan - // if a.Valid { - // n += 1 - // } - // if n == p.info.N-1 { - // dbg.Lvl2(p.Name, "received all acks. Continue") - // break - // } - //} + dbg.Lvl3(p.String(), "received all ACKs") } // Wait for the end of the alo so we can close connection nicely @@ -219,30 +213,20 @@ func (p *Peer) WaitFins() { p.wgFin.Add(len(p.remote)) fn := func(rp RemotePeer) { f := Finish{p.Id} - err := poly.SUITE.Write(rp.Conn, &f) + err := p.suite.Write(rp.Conn, &f) if err != nil { - dbg.Fatal(p.String(), "could not send FIN to ", rp.String()) + dbg.Fatal(p.String(), "could not send FIN to", rp.String()) } p.wgFin.Done() } p.ForRemotePeers(fn) - dbg.Lvl2(p.String(), "waiting to send all FIN's packets") + dbg.Lvl3(p.String(), "waiting to send all FIN's packets") p.wgFin.Wait() // close all connections for _, rp := range p.remote { rp.Conn.Close() } - dbg.Lvl2(p.String(), "close every connections") - //for { - // f := <-p.finChan - // rp, ok := p.remote[f.Id] - // if !ok { - // dbg.Lvl2(p.Name, "received invalid FIN : wrong ID ", rp.Id, " ... ") - // } else { - // rp.Conn.Close() - // dbg.Lvl2(p.Name, "received FIN from ", rp.String(), " => closed connection") - // } - //} + dbg.Lvl3(p.String(), "close every connections") } // Peer logic after it has syn'd with another peer @@ -261,7 +245,7 @@ func (p *Peer) SendAcks(rp RemotePeer) { // Helpers to send any aribtrary data to the n-peer func (p *Peer) SendToPeer(i int, data interface{}) error { - return poly.SUITE.Write(p.nConn(i), data) + return p.suite.Write(p.nConn(i), data) } func (p *Peer) SendToRoot(data interface{}) error { return p.SendToPeer(0, data) @@ -292,23 +276,26 @@ func (p *Peer) rootConn() net.Conn { // If all goes well, it will add the peer to the remotePeer array // and notify to the channel synChan func (p *Peer) synWithPeer(conn net.Conn) { + if conn == nil { + dbg.Fatal("Connection of", p.String(), "is nil") + } // First we need to SYN mutually s := Syn{ Id: p.Id, Public: p.key.Public, } - err := poly.SUITE.Write(conn, &s) + err := p.suite.Write(conn, &s) if err != nil { - dbg.Fatal(p.Name, "could not send SYN to ", conn.RemoteAddr().String()) + dbg.Fatal(p.Name, "could not send SYN to", conn.RemoteAddr().String()) } // Receive the other SYN s2 := Syn{} - err = poly.SUITE.Read(conn, &s2) + err = p.suite.Read(conn, &s2) if err != nil { - dbg.Fatal(p.Name, "could not receive SYN from ", conn.RemoteAddr().String()) + dbg.Fatal(p.Name, "could not receive SYN from", conn.RemoteAddr().String()) } if s2.Id < 0 || s2.Id >= p.info.N { - dbg.Fatal(p.Name, "received wrong SYN info from ", conn.RemoteAddr().String()) + dbg.Fatal(p.Name, "received wrong SYN info from", conn.RemoteAddr().String()) } if p.pubKeys[s2.Id] != nil { dbg.Fatal(p.Name, "already received a SYN for this index ") @@ -316,7 +303,7 @@ func (p *Peer) synWithPeer(conn net.Conn) { p.pubKeys[s2.Id] = s2.Public rp := RemotePeer{Conn: conn, Id: s2.Id, Hostname: conn.RemoteAddr().String()} p.remote[s2.Id] = rp - dbg.Lvl3(p.String(), "has SYN'd with peer ", rp.String()) + dbg.Lvl3(p.String(), "has SYN'd with peer", rp.String()) p.synChan <- s2 } @@ -327,35 +314,35 @@ func (p *Peer) String() string { } else { role = "Peer: " } - return fmt.Sprintf("%s: %s (%d) : ", role, p.Name, p.Id) + return fmt.Sprintf("%s: %s (%d):", role, p.Name, p.Id) } // ComputeSharedSecret will make the exchange of dealers between // the peers and will compute the sharedsecret at the end func (p *Peer) ComputeSharedSecret() *poly.SharedSecret { // Construct the dealer - dealerKey := cliutils.KeyPair(poly.SUITE) - dealer := poly.NewDealer(p.info, &p.key, &dealerKey, p.pubKeys) + dealerKey := cliutils.KeyPair(p.suite) + dealer := new(poly.Deal).ConstructDeal(&dealerKey, &p.key, p.info.T, p.info.R, p.pubKeys) // Construct the receiver - receiver := poly.NewReceiver(p.info, &p.key) + receiver := poly.NewReceiver(p.suite, p.info, &p.key) // add already its own dealer - _, err := receiver.AddDealer(p.Id, dealer) + _, err := receiver.AddDeal(p.Id, dealer) if err != nil { dbg.Fatal(p.String(), "could not add its own dealer >< ABORT") } // Send the dealer struct TO every one err = p.SendToAll(dealer) - dbg.Lvl2(p.Name, "sent its dealer to every peers. (err = ", err, ")") + dbg.Lvl3(p.Name, "sent its dealer to every peers. (err =", err, ")") // Receive the dealer struct FROM every one // wait with a chan to get ALL dealers - dealChan := make(chan *poly.Dealer) + dealChan := make(chan *poly.Deal) for _, rp := range p.remote { go func(rp RemotePeer) { - d := new(poly.Dealer).UnmarshalInit(p.info) - err := poly.SUITE.Read(rp.Conn, d) + d := new(poly.Deal).UnmarshalInit(p.info.T, p.info.R, p.info.N, p.suite) + err := p.suite.Read(rp.Conn, d) if err != nil { - dbg.Fatal(p.Name, " received a strange dealer from ", rp.String()) + dbg.Fatal(p.Name, "received a strange dealer from", rp.String(), ":", err) } dealChan <- d }(rp) @@ -367,25 +354,25 @@ func (p *Peer) ComputeSharedSecret() *poly.SharedSecret { for { // get the dealer and add it d := <-dealChan - dbg.Lvl3(p.Name, "collected one more dealer (count = ", n, ")") + dbg.Lvl3(p.Name, "collected one more dealer (count =", n, ")") // TODO: get the response back to the dealer - _, err := receiver.AddDealer(p.Id, d) + _, err := receiver.AddDeal(p.Id, d) if err != nil { - dbg.Fatal(p.Name, "has error when adding the dealer : ", err) + dbg.Fatal(p.Name, "has error when adding the dealer:", err) } n += 1 // we get enough dealers to compute the shared secret if n == p.info.T-1 { - dbg.Lvl2(p.Name, "received every Dealers") + dbg.Lvl3(p.Name, "received every Dealers") break } } sh, err := receiver.ProduceSharedSecret() if err != nil { - dbg.Fatal(p.Name, "could not produce shared secret. Abort. (err ", err, ")") + dbg.Fatal(p.Name, "could not produce shared secret. Abort. (err", err, ")") } - dbg.Lvl2(p.Name, "produced shared secret !") + dbg.Lvl3(p.Name, "produced shared secret !") return sh } @@ -395,7 +382,7 @@ func (p *Peer) SetupDistributedSchnorr() { // first, we have to get the long term shared secret long := p.ComputeSharedSecret() // Then instantiate the Schnoor struct - p.schnorr = p.schnorr.Init(p.info, long) + p.schnorr = p.schnorr.Init(p.suite, p.info, long) } // SchnorrSigRoot will first generate a @@ -405,10 +392,13 @@ func (p *Peer) SetupDistributedSchnorr() { func (p *Peer) SchnorrSigRoot(msg []byte) *poly.SchnorrSig { // First, gen. a random secret random := p.ComputeSharedSecret() + // gen the hash out of the msg + h := p.suite.Hash() + h.Write(msg) // launch the new round - err := p.schnorr.NewRound(random, msg) + err := p.schnorr.NewRound(random, h) if err != nil { - dbg.Fatal(p.String(), "could not make a new round : ", err) + dbg.Fatal(p.String(), "could not make a new round:", err) } // compute its own share of the signature @@ -419,12 +409,12 @@ func (p *Peer) SchnorrSigRoot(msg []byte) *poly.SchnorrSig { // no need to send to all if you are the root // p.SendToAll(ps) // then receive every partial sig - sigChan := make(chan *poly.PartialSchnorrSig) + sigChan := make(chan *poly.SchnorrPartialSig) fn := func(rp RemotePeer) { - psig := new(poly.PartialSchnorrSig) - err := poly.SUITE.Read(rp.Conn, psig) + psig := new(poly.SchnorrPartialSig) + err := p.suite.Read(rp.Conn, psig) if err != nil { - dbg.Fatal(p.String(), "could not decode PartialSig of ", rp.String()) + dbg.Fatal(p.String(), "could not decode PartialSig of", rp.String()) } sigChan <- psig } @@ -436,16 +426,16 @@ func (p *Peer) SchnorrSigRoot(msg []byte) *poly.SchnorrSig { psig := <-sigChan err := p.schnorr.AddPartialSig(psig) if err != nil { - dbg.Fatal(p.String(), "could not add the partial signature received : ", err) + dbg.Fatal(p.String(), "could not add the partial signature received:", err) } n += 1 if n == p.info.N-1 { - dbg.Lvl2(p.String(), "received every other partial sig.") + dbg.Lvl3(p.String(), "received every other partial sig.") break } } - sign, err := p.schnorr.SchnorrSig() + sign, err := p.schnorr.Sig() if err != nil { dbg.Fatal(p.String(), "could not generate the global SchnorrSig", err) } @@ -456,9 +446,11 @@ func (p *Peer) SchnorrSigPeer(msg []byte) { // First, gen. a random secret random := p.ComputeSharedSecret() // launch the new round - err := p.schnorr.NewRound(random, msg) + h := p.suite.Hash() + h.Write(msg) + err := p.schnorr.NewRound(random, h) if err != nil { - dbg.Fatal(p.String(), "could not make a new round : ", err) + dbg.Fatal(p.String(), "could not make a new round:", err) } // compute its own share of the signature @@ -469,7 +461,9 @@ func (p *Peer) SchnorrSigPeer(msg []byte) { // VerifySchnorrSig will basically verify the validity of the issued signature func (p *Peer) VerifySchnorrSig(ps *poly.SchnorrSig, msg []byte) error { - return p.schnorr.VerifySchnorrSig(ps, msg) + h := p.suite.Hash() + h.Write(msg) + return p.schnorr.VerifySchnorrSig(ps, h) } // BroadcastSIgnature will broadcast the given signature to every other peer @@ -484,10 +478,10 @@ func (p *Peer) BroadcastSignature(s *poly.SchnorrSig) []*poly.SchnorrSig { sigChan := make(chan *poly.SchnorrSig) fn := func(rp RemotePeer) { - sch := new(poly.SchnorrSig).Init(p.info) - err := poly.SUITE.Read(rp.Conn, sch) + sch := new(poly.SchnorrSig).Init(p.suite, p.info) + err := p.suite.Read(rp.Conn, sch) if err != nil { - dbg.Fatal(p.String(), "could not decode schnorr sig from ", rp.String()) + dbg.Fatal(p.String(), "could not decode schnorr sig from", rp.String()) } sigChan <- sch } @@ -499,7 +493,7 @@ func (p *Peer) BroadcastSignature(s *poly.SchnorrSig) []*poly.SchnorrSig { arr = append(arr, sig) n += 1 if n == p.info.N-1 { - dbg.Lvl2(p.String(), "received every other schnorr sig.") + dbg.Lvl3(p.String(), "received every other schnorr sig.") break } } diff --git a/app/shamir/server.go b/app/shamir/server.go new file mode 100644 index 0000000000..e4e562f42a --- /dev/null +++ b/app/shamir/server.go @@ -0,0 +1,94 @@ +package main + +import ( + log "github.com/Sirupsen/logrus" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/monitor" + "github.com/dedis/crypto/edwards" + "github.com/dedis/crypto/poly" +) + +func RunServer(conf *app.ConfigShamir) { + flags := app.RunFlags + s := edwards.NewAES128SHA256Ed25519(false) + n := len(conf.Hosts) + + info := poly.Threshold{ + N: n, + R: n, + T: n, + } + indexPeer := -1 + for i, h := range conf.Hosts { + if h == flags.Hostname { + indexPeer = i + break + } + } + if indexPeer == -1 { + log.Fatal("Peer", flags.Hostname, "(", flags.PhysAddr, ") did not find any match for its name.Abort") + } + + dbg.Lvl3("Creating new peer", flags.Hostname, "(", flags.PhysAddr, ") ...") + // indexPeer == 0 <==> peer is root + p := NewPeer(indexPeer, flags.Hostname, s, info, indexPeer == 0) + + // make it listen + setup := monitor.NewMeasure("setup") + dbg.Lvl3("Peer", flags.Hostname, "is now listening for incoming connections") + go p.Listen() + + // then connect it to its successor in the list + for _, h := range conf.Hosts[indexPeer+1:] { + dbg.Lvl3("Peer", flags.Hostname, "will connect to", h) + // will connect and SYN with the remote peer + p.ConnectTo(h) + } + // Wait until this peer is connected / SYN'd with each other peer + p.WaitSYNs() + + // Setup the schnorr system amongst peers + p.SetupDistributedSchnorr() + p.SendACKs() + p.WaitACKs() + dbg.Lvl3(p.String(), "completed Schnorr setup") + + // send setup time if we're root + if p.IsRoot() { + setup.Measure() + } + + roundm := monitor.NewMeasure("round") + for round := 1; round <= conf.Rounds; round++ { + calc := monitor.NewMeasure("calc") + // Then issue a signature ! + //sys, usr := app.GetRTime() + msg := "hello world" + + // Only root calculates if it's OK and sends a log-message + if p.IsRoot() { + dbg.Lvl1("Starting round", round) + sig := p.SchnorrSigRoot([]byte(msg)) + calc.Measure() + verify := monitor.NewMeasure("verify") + err := p.VerifySchnorrSig(sig, []byte(msg)) + if err != nil { + dbg.Fatal(p.String(), "could not verify schnorr signature:/", err) + } + verify.Measure() + roundm.Measure() + dbg.Lvl3(p.String(), "verified the schnorr sig !") + } else { + // Compute the partial sig and send it to the root + p.SchnorrSigPeer([]byte(msg)) + } + } + + p.WaitFins() + dbg.Lvl3(p.String(), "is leaving ...") + + if p.IsRoot() { + monitor.End() + } +} diff --git a/app/shamir/shamir.go b/app/shamir/shamir.go new file mode 100644 index 0000000000..51027f9833 --- /dev/null +++ b/app/shamir/shamir.go @@ -0,0 +1,26 @@ +package main + +import ( + log "github.com/Sirupsen/logrus" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/dbg" +) + +// Dispatch-function for running either client or server (mode-parameter) +func main() { + conf := &app.ConfigShamir{} + app.ReadConfig(conf) + + // we must know who we are + if app.RunFlags.Hostname == "" { + log.Fatal("Hostname empty: Abort") + } + + dbg.Lvl2(app.RunFlags.Hostname, "Starting to run as", app.RunFlags.Mode) + switch app.RunFlags.Mode { + case "client": + RunClient(conf) + case "server": + RunServer(conf) + } +} diff --git a/app/shamir/shamir_test.go b/app/shamir/shamir_test.go new file mode 100644 index 0000000000..ab86a87dc3 --- /dev/null +++ b/app/shamir/shamir_test.go @@ -0,0 +1,9 @@ +package main + +import ( + "testing" +) + +func TestBuild(t *testing.T) { + // Just testing that build is done correctly +} diff --git a/app/sign/sign.go b/app/sign/sign.go new file mode 100644 index 0000000000..c3fec2e5ae --- /dev/null +++ b/app/sign/sign.go @@ -0,0 +1,52 @@ +package main + +import ( + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/monitor" + "github.com/dedis/cothority/lib/sign" + "time" +) + +func main() { + conf := &app.ConfigColl{} + app.ReadConfig(conf) + + // we must know who we are + if app.RunFlags.Hostname == "" { + dbg.Fatal("Hostname empty: Abort") + } + + // Do some common setup + if app.RunFlags.Mode == "client" { + app.RunFlags.Hostname = app.RunFlags.Name + } + hostname := app.RunFlags.Hostname + if hostname == conf.Hosts[0] { + dbg.Lvlf3("Tree is %+v", conf.Tree) + } + dbg.Lvl3(hostname, "Starting to run") + + app.RunFlags.StartedUp(len(conf.Hosts)) + peer := conode.NewPeer(hostname, conf.ConfigConode) + + if app.RunFlags.AmRoot { + for { + time.Sleep(time.Second) + setupRound := sign.NewRoundSetup(peer.Node) + peer.StartAnnouncementWithWait(setupRound, 5*time.Second) + counted := <-setupRound.Counted + dbg.Lvl1("Number of peers counted:", counted) + if counted == len(conf.Hosts) { + dbg.Lvl1("All hosts replied") + break + } + } + } + + RegisterRoundMeasure(peer.Node.LastRound()) + peer.LoopRounds(RoundMeasureType, conf.Rounds) + dbg.Lvlf3("Done - flags are %+v", app.RunFlags) + monitor.End() +} diff --git a/app/sign/sign_round.go b/app/sign/sign_round.go new file mode 100644 index 0000000000..f660b462d5 --- /dev/null +++ b/app/sign/sign_round.go @@ -0,0 +1,50 @@ +package main + +import ( + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/monitor" + "github.com/dedis/cothority/lib/sign" +) + +const RoundMeasureType = "measure" + +type RoundMeasure struct { + measure *monitor.Measure + firstRound int + *sign.RoundCosi +} + +// Pass firstround, as we will have some previous rounds to wait +// for everyone to be setup +func RegisterRoundMeasure(firstRound int) { + sign.RegisterRoundFactory(RoundMeasureType, + func(s *sign.Node) sign.Round { + return NewRoundMeasure(s, firstRound) + }) +} + +func NewRoundMeasure(node *sign.Node, firstRound int) *RoundMeasure { + dbg.Lvlf3("Making new roundmeasure %+v", node) + round := &RoundMeasure{} + round.RoundCosi = sign.NewRoundCosi(node) + round.Type = RoundMeasureType + round.firstRound = firstRound + return round +} + +func (round *RoundMeasure) Announcement(viewNbr, roundNbr int, in *sign.SigningMessage, out []*sign.SigningMessage) error { + if round.IsRoot { + round.measure = monitor.NewMeasure("round") + } + return round.RoundCosi.Announcement(viewNbr, roundNbr, in, out) +} + +func (round *RoundMeasure) Response(in []*sign.SigningMessage, out *sign.SigningMessage) error { + err := round.RoundCosi.Response(in, out) + if round.IsRoot { + round.measure.Measure() + dbg.Lvl1("Round", round.RoundNbr-round.firstRound+1, + "finished - took", round.measure.WallTime) + } + return err +} diff --git a/app/sign/sign_test.go b/app/sign/sign_test.go new file mode 100644 index 0000000000..ab86a87dc3 --- /dev/null +++ b/app/sign/sign_test.go @@ -0,0 +1,9 @@ +package main + +import ( + "testing" +) + +func TestBuild(t *testing.T) { + // Just testing that build is done correctly +} diff --git a/app/coll_stamp/client_funcs.go b/app/stamp/client_funcs.go similarity index 63% rename from app/coll_stamp/client_funcs.go rename to app/stamp/client_funcs.go index 7bd4698c62..3c70f079e2 100644 --- a/app/coll_stamp/client_funcs.go +++ b/app/stamp/client_funcs.go @@ -1,4 +1,4 @@ -package coll_stamp +package main import ( "bytes" @@ -8,38 +8,40 @@ import ( "time" log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" + "github.com/dedis/cothority/lib/dbg" - "github.com/dedis/cothority/lib/coconet" "fmt" + "github.com/dedis/cothority/lib/coconet" + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/sign" ) type Client struct { - Mux sync.Mutex // coarse grained mutex + Mux sync.Mutex // coarse grained mutex - name string - Servers map[string]coconet.Conn // signing nodes I work/ communicate with + name string + Servers map[string]coconet.Conn // signing nodes I work/ communicate with - // client history maps request numbers to replies from TSServer - // maybe at later phases we will want pair(reqno, TSServer) as key - history map[SeqNo]TimeStampMessage - reqno SeqNo // next request number in communications with TSServer + // client history maps request numbers to replies from TSServer + // maybe at later phases we will want pair(reqno, TSServer) as key + history map[conode.SeqNo]conode.TimeStampMessage + reqno conode.SeqNo // next request number in communications with TSServer - // maps response request numbers to channels confirming - // where response confirmations are sent - doneChan map[SeqNo]chan error + // maps response request numbers to channels confirming + // where response confirmations are sent + doneChan map[conode.SeqNo]chan error - nRounds int // # of last round messages were received in, as perceived by client - curRoundSig []byte // merkle tree root of last round - // roundChan chan int // round numberd are sent in as rounds change - Error error + nRounds int // # of last round messages were received in, as perceived by client + curMerkle []byte // MerkleRoot of last round + // roundChan chan int // round numberd are sent in as rounds change + Error error } func NewClient(name string) (c *Client) { c = &Client{name: name} c.Servers = make(map[string]coconet.Conn) - c.history = make(map[SeqNo]TimeStampMessage) - c.doneChan = make(map[SeqNo]chan error) + c.history = make(map[conode.SeqNo]conode.TimeStampMessage) + c.doneChan = make(map[conode.SeqNo]chan error) // c.roundChan = make(chan int) return } @@ -56,13 +58,13 @@ func (c *Client) Close() { func (c *Client) handleServer(s coconet.Conn) error { for { - tsm := &TimeStampMessage{} - err := s.Get(tsm) + tsm := &conode.TimeStampMessage{} + err := s.GetData(tsm) if err != nil { if err == coconet.ErrNotEstablished { continue } - log.Warn("error getting from connection:", err) + dbg.Lvl3("error getting from connection:", err) return err } c.handleResponse(tsm) @@ -70,15 +72,15 @@ func (c *Client) handleServer(s coconet.Conn) error { } // Act on type of response received from srrvr -func (c *Client) handleResponse(tsm *TimeStampMessage) { +func (c *Client) handleResponse(tsm *conode.TimeStampMessage) { switch tsm.Type { default: log.Println("Message of unknown type") - case StampReplyType: + case conode.StampSignatureType: // Process reply and inform done channel associated with // reply sequence number that the reply was received // we know that there is no error at this point - c.ProcessStampReply(tsm) + c.ProcessStampSignature(tsm) } } @@ -106,10 +108,10 @@ func (c *Client) AddServer(name string, conn coconet.Conn) { // if a server encounters any terminating error // terminate all pending client transactions and kill the client if err != nil { - log.Errorln("EOF detected: sending EOF to all pending TimeStamps") + dbg.Lvl3("EOF detected: sending EOF to all pending TimeStamps") c.Mux.Lock() for _, ch := range c.doneChan { - log.Println("Sending to Receiving Channel") + dbg.Lvl3("Sending to Receiving Channel") ch <- io.EOF } c.Error = io.EOF @@ -132,7 +134,7 @@ func (c *Client) PutToServer(name string, data coconet.BinaryMarshaler) error { if conn == nil { return errors.New(fmt.Sprintf("Invalid server/not connected", name, c.Servers[name])) } - return conn.Put(data) + return conn.PutData(data) } var ErrClientToTSTimeout error = errors.New("client timeouted on waiting for response") @@ -151,35 +153,36 @@ func (c *Client) TimeStamp(val []byte, TSServerName string) error { c.Mux.Unlock() // send request to TSServer err := c.PutToServer(TSServerName, - &TimeStampMessage{ - Type: StampRequestType, + &conode.TimeStampMessage{ + Type: conode.StampRequestType, ReqNo: myReqno, - Sreq: &StampRequest{Val: val}}) + Sreq: &conode.StampRequest{Val: val}}) if err != nil { if err != coconet.ErrNotEstablished { - log.Warn(c.Name(), "error timestamping to ", TSServerName, ": ", err) + dbg.Lvl3(c.Name(), "error timestamping to", TSServerName, ":", err) } // pass back up all errors from putting to server return err } + dbg.Lvl4("Client Sent timestamp request to", TSServerName) // get channel associated with request c.Mux.Lock() myChan := c.doneChan[myReqno] c.Mux.Unlock() - // wait until ProcessStampReply signals that reply was received + // wait until ProcessStampSignature signals that reply was received select { case err = <-myChan: - //log.Println("-------------client received response from" + TSServerName) + //log.Println("-------------client received response from" + TSServerName) break - case <-time.After(10 * ROUND_TIME): + case <-time.After(10 * sign.ROUND_TIME): dbg.Lvl3("client timeouted on waiting for response from" + TSServerName) break - // err = ErrClientToTSTimeout + // err = ErrClientToTSTimeout } if err != nil { - log.Errorln(c.Name(), "error received from DoneChan:", err) + dbg.Lvl3(c.Name(), "error received from DoneChan:", err) return err } @@ -190,7 +193,7 @@ func (c *Client) TimeStamp(val []byte, TSServerName string) error { return err } -func (c *Client) ProcessStampReply(tsm *TimeStampMessage) { +func (c *Client) ProcessStampSignature(tsm *conode.TimeStampMessage) { // update client history c.Mux.Lock() c.history[tsm.ReqNo] = *tsm @@ -198,8 +201,8 @@ func (c *Client) ProcessStampReply(tsm *TimeStampMessage) { // can keep track of rounds by looking at changes in the signature // sent back in a messages - if bytes.Compare(tsm.Srep.Sig, c.curRoundSig) != 0 { - c.curRoundSig = tsm.Srep.Sig + if bytes.Compare(tsm.Srep.MerkleRoot, c.curMerkle) != 0 { + c.curMerkle = tsm.Srep.MerkleRoot c.nRounds++ c.Mux.Unlock() diff --git a/app/stamp/stamp.go b/app/stamp/stamp.go new file mode 100644 index 0000000000..9f64e56708 --- /dev/null +++ b/app/stamp/stamp.go @@ -0,0 +1,52 @@ +package main + +import ( + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/monitor" + "github.com/dedis/cothority/lib/sign" + "time" +) + +func main() { + conf := &app.ConfigColl{} + app.ReadConfig(conf) + + switch app.RunFlags.Mode { + case "server": + RunServer(&app.RunFlags, conf) + case "client": + RunClient(&app.RunFlags, conf) + } +} + +func RunServer(flags *app.Flags, conf *app.ConfigColl) { + hostname := flags.Hostname + if hostname == conf.Hosts[0] { + dbg.Lvlf3("Tree is %+v", conf.Tree) + } + dbg.Lvl3(hostname, "Starting to run") + + app.RunFlags.StartedUp(len(conf.Hosts)) + peer := conode.NewPeer(hostname, conf.ConfigConode) + + if app.RunFlags.AmRoot { + for { + setupRound := sign.NewRoundSetup(peer.Node) + peer.StartAnnouncementWithWait(setupRound, 5*time.Second) + counted := <-setupRound.Counted + dbg.Lvl1("Number of peers counted:", counted) + if counted == len(conf.Hosts) { + dbg.Lvl1("All hosts replied") + break + } + time.Sleep(time.Second) + } + } + + RegisterRoundMeasure(peer.Node.LastRound()) + peer.LoopRounds(RoundMeasureType, conf.Rounds) + dbg.Lvlf3("Done - flags are %+v", app.RunFlags) + monitor.End() +} diff --git a/app/stamp/stamp_round.go b/app/stamp/stamp_round.go new file mode 100644 index 0000000000..f005447043 --- /dev/null +++ b/app/stamp/stamp_round.go @@ -0,0 +1,51 @@ +package main + +import ( + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/monitor" + "github.com/dedis/cothority/lib/sign" +) + +const RoundMeasureType = "measure" + +type RoundMeasure struct { + measure *monitor.Measure + firstRound int + *conode.RoundStamperListener +} + +// Pass firstround, as we will have some previous rounds to wait +// for everyone to be setup +func RegisterRoundMeasure(firstRound int) { + sign.RegisterRoundFactory(RoundMeasureType, + func(s *sign.Node) sign.Round { + return NewRoundMeasure(s, firstRound) + }) +} + +func NewRoundMeasure(node *sign.Node, firstRound int) *RoundMeasure { + dbg.Lvlf3("Making new roundmeasure %+v", node) + round := &RoundMeasure{} + round.RoundStamperListener = conode.NewRoundStamperListener(node) + round.Type = RoundMeasureType + round.firstRound = firstRound + return round +} + +func (round *RoundMeasure) Announcement(viewNbr, roundNbr int, in *sign.SigningMessage, out []*sign.SigningMessage) error { + if round.IsRoot { + round.measure = monitor.NewMeasure("round") + } + return round.RoundCosi.Announcement(viewNbr, roundNbr, in, out) +} + +func (round *RoundMeasure) Response(in []*sign.SigningMessage, out *sign.SigningMessage) error { + err := round.RoundCosi.Response(in, out) + if round.IsRoot { + round.measure.Measure() + dbg.Lvl1("Round", round.RoundNbr-round.firstRound+1, + "finished - took", round.measure.WallTime) + } + return err +} diff --git a/app/stamp/stamp_test.go b/app/stamp/stamp_test.go new file mode 100644 index 0000000000..ab86a87dc3 --- /dev/null +++ b/app/stamp/stamp_test.go @@ -0,0 +1,9 @@ +package main + +import ( + "testing" +) + +func TestBuild(t *testing.T) { + // Just testing that build is done correctly +} diff --git a/app/stamp/stamper.go b/app/stamp/stamper.go new file mode 100644 index 0000000000..a2c509b2b6 --- /dev/null +++ b/app/stamp/stamper.go @@ -0,0 +1,164 @@ +package main + +import ( + "crypto/rand" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/dedis/cothority/lib/dbg" + + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/coconet" + "github.com/dedis/cothority/lib/hashid" + "github.com/dedis/cothority/lib/sign" +) + +var muStats sync.Mutex + +var MAX_N_SECONDS int = 1 * 60 * 60 // 1 hours' worth of seconds +var MAX_N_ROUNDS int = MAX_N_SECONDS / int(sign.ROUND_TIME/time.Second) + +func RunClient(flags *app.Flags, conf *app.ConfigColl) { + dbg.Lvl4("Starting to run stampclient") + c := NewClient(flags.Name) + servers := strings.Split(flags.Server, ",") + // take the right percentage of servers + servers = scaleServers(flags, conf, servers) + // connect to all the servers listed + for _, s := range servers { + h, p, err := net.SplitHostPort(s) + if err != nil { + log.Fatal("improperly formatted host") + } + pn, _ := strconv.Atoi(p) + c.AddServer(s, coconet.NewTCPConn(net.JoinHostPort(h, strconv.Itoa(pn+1)))) + } + // Stream time coll_stamp requests + // if rate specified send out one message every rate milliseconds + dbg.Lvl3(flags.Name, "starting to stream at rate", conf.Rate, "with root", flags.AmRoot) + streamMessgs(c, servers, conf.Rate) + dbg.Lvl4("Finished streaming", flags.Name) + return +} + +// scaleServers will take the right percentage of server to contact to stamp +// request. If percentage is 0, only contact the leader (if the client is on the +// same physical machine than the leader/root). +func scaleServers(flags *app.Flags, conf *app.ConfigColl, servers []string) []string { + if len(servers) == 0 || conf.StampRatio > 1 { + dbg.Lvl1("Client wont change the servers percentage ") + return servers + } + if conf.StampRatio == -1 { + // take only the root if we are a "root client" also + if flags.AmRoot { + dbg.Lvl1("Client will only contact root") + return []string{servers[0]} + } else { + // others client dont do nothing + dbg.Lvl3("Client wont contact anyone") + return []string{} + } + } + // else take the right perc + i := int(math.Ceil(conf.StampRatio * float64(len(servers)))) + fn := dbg.Lvl3 + if flags.AmRoot { + fn = dbg.Lvl1 + } + fn("Client will contact", i, "/", len(servers), "servers") + return servers[0:i] +} + +func genRandomMessages(n int) [][]byte { + msgs := make([][]byte, n) + for i := range msgs { + msgs[i] = make([]byte, hashid.Size) + _, err := rand.Read(msgs[i]) + if err != nil { + log.Fatal("failed to generate random commit:", err) + } + } + return msgs +} + +func removeTrailingZeroes(a []int64) []int64 { + i := len(a) - 1 + for ; i >= 0; i-- { + if a[i] != 0 { + break + } + } + return a[:i+1] +} + +func streamMessgs(c *Client, servers []string, rate int) { + nServers := len(servers) + if nServers == 0 { + dbg.Lvl3("Stamp Client wont stream messages") + return + } + ticker := time.NewTicker(time.Second / time.Duration(rate)) + dbg.Lvl2(c.Name(), "streaming at given rate", rate, "msg / s") + msg := genRandomMessages(1)[0] + + i := 0 + +retry: + dbg.Lvl3(c.Name(), "checking if", servers[0], "is already up") + err := c.TimeStamp(msg, servers[0]) + if err == io.EOF || err == coconet.ErrClosed { + dbg.Lvl4("Client", c.Name(), "Couldn't connect to TimeStamp") + return + } else if err == ErrClientToTSTimeout { + dbg.Lvl4(err.Error()) + } else if err != nil { + time.Sleep(500 * time.Millisecond) + goto retry + } + dbg.Lvl3(c.Name(), "successfully connected to", servers[0]) + + // every tick send a time coll_stamp request to every server specified + // this will stream until we get an EOF + tick := 0 + abort := false + for _ = range ticker.C { + tick += 1 + go func(msg []byte, s string, tick int) { + dbg.Lvl4("StampClient will try stamprequest") + err := c.TimeStamp(msg, s) + + if err == io.EOF || err == coconet.ErrClosed { + if err == io.EOF { + dbg.Lvl4("Client", c.Name(), "terminating due to EOF", s) + } else { + dbg.Lvl4("Client", c.Name(), "terminating due to Connection Error Closed", s) + } + abort = true + return + } else if err != nil { + // ignore errors + dbg.Lvl4("Client", c.Name(), "Leaving out streamMessages.", err) + return + } + + }(msg, servers[i], tick) + + i = (i + 1) % nServers + if abort { + break + } + if (tick % 5000) == 0 { + dbg.Lvl3("Sent", tick, "timestamps so far to", nServers, "servers") + } + + } + + return +} diff --git a/dedis-travis-script.go b/dedis-travis-script.go new file mode 100644 index 0000000000..2551bf6b52 --- /dev/null +++ b/dedis-travis-script.go @@ -0,0 +1,60 @@ +// Dedis script modified from github.com/dyv/dedis-ci-script.go +// to take into account deleted files +package main + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +func main() { + // get all changed files + new files - removed files + cmdStr := "git diff --name-status production/master | grep -v ^D | cut -f2" + cmd := exec.Command("bash", "-c", cmdStr) + cmd.Stderr = os.Stderr + output, err := cmd.Output() + if err != nil { + fmt.Println("error running git diff: ", err) + os.Exit(1) + } + fmt.Println("git diff --name-only production/master: ", string(output)) + fnames := strings.Split(string(output), "\n") + fmt.Println("Files changed from origin/master: ", fnames) + good := true + for _, fn := range fnames { + // go source code must be properly formatted + if strings.HasSuffix(fn, ".go") { + if _, err := os.Stat(fn); os.IsNotExist(err) { + fmt.Printf("no such go file: %s\n", fn) + continue + } + fmtCmd := exec.Command("gofmt", "-l", fn) + fmtCmd.Stderr = os.Stderr + out, err := fmtCmd.Output() + if err != nil { + fmt.Println("Error Running go fmt: ", err) + os.Exit(1) + } + // if go fmt returns anything that means the file has been + // formatted and did not conform. + if len(out) != 0 { + fmt.Println("File not properly formatted: ", fn) + good = false + } + } + } + if good == false { + fmt.Println("Failed: files not properly formatted: Use gofmt") + os.Exit(1) + } + tests := exec.Command("go", "test", "-v", "./...", "-run", "[^T][^e][^s][^t][^S][^t][^a][^m][^p]") + tests.Stderr = os.Stderr + tests.Stdout = os.Stdout + err = tests.Run() + if err != nil { + fmt.Println("Tests Failed") + os.Exit(1) + } +} diff --git a/deploy/deploy.go b/deploy/deploy.go index 73bf339b21..3d54122b87 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -1,55 +1,250 @@ -package deploy - -type Platform interface { - Configure(*Config) - Build(build string) error - Deploy() error - Start() error - Stop() error -} - -func NewPlatform() Platform { - return &Deter{Config: NewConfig()} -} - -type Config struct { - // Number of machines/nodes - // Total number of hosts = hpn * nmachs - Nmachs int - // How many logservers to start up - // Total number of servers used: nmachs + nloggers - Nloggers int - // hpn is the replication factor of hosts per node: how many hosts do we want per node - Hpn int - // bf is the branching factor of the tree that we want to build - Bf int - - // How many messages to send - Nmsgs int - // The speed of messages/s - Rate int - // How many rounds - Rounds int - // Pre-defined failure rate - Failures int - // Rounds for root to wait before failing - RFail int - // Rounds for follower to wait before failing - FFail int - - // Debugging-level: 0 is none - 5 is everything - Debug int - // RootWait - how long the root timestamper waits for the clients to start up - RootWait int - // Which app to run - App string - // Coding-suite to run [nist256, nist512, ed25519] - Suite string -} - -func NewConfig() *Config { - return &Config{ - 4, 3, 1, 2, - 100, 30, 10, 0, 0, 0, - 1, 10, "coll_stamp", "ed25519"} +// Outputting data: output to csv files (for loading into excel) +// make a datastructure per test output file +// all output should be in the test_data subdirectory +// +// connect with logging server (receive json until "EOF" seen or "terminating") +// connect to websocket ws://localhost:8080/log +// receive each message as bytes +// if bytes contains "EOF" or contains "terminating" +// wrap up the round, output to test_data directory, kill deploy2deter +// +// for memstats check localhost:8080/d/server-0-0/debug/vars +// parse out the memstats zones that we are concerned with +// +// different graphs needed rounds: +// load on the x-axis: increase messages per round holding everything else constant +// hpn=40 bf=10, bf=50 +// +// latency on y-axis, timestamp servers on x-axis push timestampers as higher as possible +// +// +package main + +import ( + "flag" + "math" + "os" + "path/filepath" + "strconv" + "strings" + + log "github.com/Sirupsen/logrus" + "github.com/dedis/cothority/deploy/platform" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/monitor" +) + +// Configuration-variables +var deployP platform.Platform + +var platform_dst = "localhost" +var app = "" +var nobuild = false +var clean = true +var build = "" +var machines = 3 +var simRange = "" + +// SHORT TERM solution of referencing +// the different apps. +// TODO: make the lib/app/*COnfig.go have their own reference +// so they can issue Stats, read their own config depending on platform, +// etc etc +const ( + ShamirSign string = "shamir" + CollSign string = "sign" + CollStamp string = "stamp" + Naive string = "naive" + NTree string = "ntree" +) + +func init() { + flag.StringVar(&platform_dst, "platform", platform_dst, "platform to deploy to [deterlab,localhost]") + flag.BoolVar(&nobuild, "nobuild", false, "Don't rebuild all helpers") + flag.BoolVar(&clean, "clean", false, "Only clean platform") + flag.StringVar(&build, "build", "", "List of packages to build") + flag.IntVar(&machines, "machines", machines, "Number of machines on Deterlab") + flag.StringVar(&simRange, "range", simRange, "Range of simulations to run. 0: or 3:4 or :4") +} + +// Reads in the platform that we want to use and prepares for the tests +func main() { + flag.Parse() + deployP = platform.NewPlatform(platform_dst) + if deployP == nil { + dbg.Fatal("Platform not recognized.", platform_dst) + } + dbg.Lvl1("Deploying to", platform_dst) + + simulations := flag.Args() + if len(simulations) == 0 { + dbg.Fatal("Please give a simulation to run") + } + + for _, simulation := range simulations { + runconfigs := platform.ReadRunFile(deployP, simulation) + + if len(runconfigs) == 0 { + dbg.Fatal("No tests found in", simulation) + } + deployP.Configure() + + if clean { + deployP.Deploy(runconfigs[0]) + deployP.Cleanup() + } else { + logname := strings.Replace(filepath.Base(simulation), ".toml", "", 1) + RunTests(logname, runconfigs) + } + } +} + +// Runs the given tests and puts the output into the +// given file name. It outputs RunStats in a CSV format. +func RunTests(name string, runconfigs []platform.RunConfig) { + + if nobuild == false { + deployP.Build(build) + } + + MkTestDir() + rs := make([]monitor.Stats, len(runconfigs)) + nTimes := 1 + stopOnSuccess := true + var f *os.File + args := os.O_CREATE | os.O_RDWR | os.O_TRUNC + // If a range is given, we only append + if simRange != "" { + args = os.O_CREATE | os.O_RDWR | os.O_APPEND + } + f, err := os.OpenFile(TestFile(name), args, 0660) + if err != nil { + log.Fatal("error opening test file:", err) + } + defer f.Close() + err = f.Sync() + if err != nil { + log.Fatal("error syncing test file:", err) + } + + start, stop := getStartStop(len(runconfigs)) + for i, t := range runconfigs { + // Implement a simple range-argument that will skip checks not in range + if i < start || i > stop { + dbg.Lvl1("Skipping", t, "because of range") + continue + } + dbg.Lvl1("Doing run", t) + + // run test t nTimes times + // take the average of all successful runs + runs := make([]monitor.Stats, 0, nTimes) + for r := 0; r < nTimes; r++ { + stats, err := RunTest(t) + if err != nil { + log.Fatalln("error running test:", err) + } + + runs = append(runs, stats) + if stopOnSuccess { + break + } + } + + if len(runs) == 0 { + dbg.Lvl1("unable to get any data for test:", t) + continue + } + + s := monitor.AverageStats(runs) + if i == 0 { + s.WriteHeader(f) + } + rs[i] = s + rs[i].WriteValues(f) + err = f.Sync() + if err != nil { + log.Fatal("error syncing data to test file:", err) + } + } +} + +// Runs a single test - takes a test-file as a string that will be copied +// to the deterlab-server +func RunTest(rc platform.RunConfig) (monitor.Stats, error) { + done := make(chan struct{}) + if platform_dst == "localhost" { + machs := rc.Get("machines") + ppms := rc.Get("ppm") + mach, _ := strconv.Atoi(machs) + ppm, _ := strconv.Atoi(ppms) + rc.Put("machines", "1") + rc.Put("ppm", strconv.Itoa(ppm*mach)) + } + rs := monitor.NewStats(rc.Map()) + monitor := monitor.NewMonitor(rs) + + deployP.Deploy(rc) + deployP.Cleanup() + + // Start monitor before so ssh tunnel can connect to the monitor + // in case of deterlab. + err := deployP.Start() + if err != nil { + log.Fatal(err) + return *rs, nil + } + + go func() { + monitor.Listen() + deployP.Wait() + dbg.Lvl3("Test complete:", rs) + done <- struct{}{} + }() + + // can timeout the command if it takes too long + select { + case <-done: + monitor.Stop() + return *rs, nil + } +} + +type runFile struct { + Machines int + Args string + Runs string +} + +func MkTestDir() { + err := os.MkdirAll("test_data/", 0777) + if err != nil { + log.Fatal("failed to make test directory") + } +} + +func TestFile(name string) string { + return "test_data/" + name + ".csv" +} + +func isZero(f float64) bool { + return math.Abs(f) < 0.0000001 +} + +// returns a tuple of start and stop configurations to run +func getStartStop(rcs int) (int, int) { + ss_str := strings.Split(simRange, ":") + start, err := strconv.Atoi(ss_str[0]) + stop := rcs + if err == nil { + stop = start + if len(ss_str) > 1 { + stop, err = strconv.Atoi(ss_str[1]) + if err != nil { + stop = rcs + } + } + } + dbg.Lvl2("Range is", start, "...", stop) + return start, stop } diff --git a/deploy/deploy_deterlab.go b/deploy/deploy_deterlab.go deleted file mode 100644 index 85b6ef445b..0000000000 --- a/deploy/deploy_deterlab.go +++ /dev/null @@ -1,425 +0,0 @@ -// Deterlab is responsible for setting up everything to test the application -// on deterlab.net -// Given a list of hostnames, it will create an overlay -// tree topology, using all but the last node. It will create multiple -// nodes per server and run timestamping processes. The last node is -// reserved for the logging server, which is forwarded to localhost:8081 -// -// Creates the following directory structure in remote: -// build/ - where all cross-compiled executables are stored -// deploy/ - directory to be copied to the deterlab server -// -// The following apps are used: -// deter - runs on the user-machine in deterlab and launches the others -// logserver - runs on the first three servers - first is the master, then two slaves -// forkexec - runs on the other servers and launches exec, so it can measure it's cpu usage -// -package deploy - -import ( - "log" - "os" - "os/exec" - "sync" - - "bufio" - "bytes" - "encoding/json" - _ "errors" - "fmt" - "github.com/BurntSushi/toml" - "github.com/dedis/cothority/lib/cliutils" - "github.com/dedis/cothority/lib/config" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/cothority/lib/graphs" - "io/ioutil" - "path" - "strconv" - "strings" - "time" -) - -type Deter struct { - Config *Config - // The login on the platform - Login string - // The outside host on the platform - Host string - // The name of the internal hosts - Project string - // Directory where everything is copied into - DeployDir string - // Directory for building - BuildDir string - // Working directory of deterlab - DeterDir string - // Where the main logging machine resides - masterLogger string - // DNS-resolvable names - phys []string - // VLAN-IP names - virt []string - physOut string - virtOut string - - // Channel to communication stopping of experiment - sshDeter chan string - - // Testing the connection? - TestConnect bool -} - -func (d *Deter) Configure(config *Config) { - d.Config = config - - // Directory setup - would also be possible in /tmp - pwd, _ := os.Getwd() - d.DeterDir = pwd + "/deploy/deterlab" - d.DeployDir = d.DeterDir + "/remote" - d.BuildDir = d.DeterDir + "/build" - d.Config.Debug = dbg.DebugVisible - - // Setting up channel - d.sshDeter = make(chan string) - d.checkDeterlabVars() -} - -func (d *Deter) Build(build string) error { - dbg.Lvl1("Building for", d.Login, d.Host, d.Project, build) - start := time.Now() - - var wg sync.WaitGroup - - // Start with a clean build-directory - current, _ := os.Getwd() - dbg.Lvl4("Current dir is:", current) - defer os.Chdir(current) - - // Go into deterlab-dir and create the build-dir - os.Chdir(d.DeterDir) - os.RemoveAll(d.BuildDir) - os.Mkdir(d.BuildDir, 0777) - - // start building the necessary packages - packages := []string{"logserver", "forkexec", "../../app", "deter"} - if build != "" { - packages = strings.Split(build, ",") - } - dbg.Lvl3("Starting to build all executables", packages) - for _, p := range packages { - basename := path.Base(p) - dbg.Lvl4("Building ", p, "into", basename) - wg.Add(1) - src := p + "/" + basename + ".go" - dst := d.BuildDir + "/" + basename - if p == "deter" { - go func(s, d string) { - defer wg.Done() - // the users node has a 386 FreeBSD architecture - out, err := cliutils.Build(s, d, "386", "freebsd") - if err != nil { - cliutils.KillGo() - fmt.Println(out) - log.Fatal(err) - } - }(src, dst) - continue - } - go func(s, d string) { - defer wg.Done() - // deter has an amd64, linux architecture - out, err := cliutils.Build(s, d, "amd64", "linux") - if err != nil { - cliutils.KillGo() - fmt.Println(out) - log.Fatal(err) - } - }(src, dst) - } - // wait for the build to finish - wg.Wait() - dbg.Lvl1("Build is finished after", time.Since(start)) - return nil -} - -func (d *Deter) Deploy() error { - dbg.Lvl1("Assembling all files and configuration options") - os.RemoveAll(d.DeployDir) - os.Mkdir(d.DeployDir, 0777) - - dbg.Lvl1("Writing config-files") - - d.generateHostsFile() - d.readHosts() - d.calculateGraph() - d.WriteConfig() - - // copy the webfile-directory of the logserver to the remote directory - err := exec.Command("cp", "-a", d.DeterDir+"/logserver/webfiles", - d.DeterDir+"/cothority.conf", d.DeployDir).Run() - if err != nil { - log.Fatal("error copying webfiles:", err) - } - build, err := ioutil.ReadDir(d.BuildDir) - for _, file := range build { - err = exec.Command("cp", d.BuildDir+"/"+file.Name(), d.DeployDir).Run() - if err != nil { - log.Fatal("error copying build-file:", err) - } - } - - dbg.Lvl1("Copying over to", d.Login, "@", d.Host) - // Copy everything over to deterlabs - err = cliutils.Rsync(d.Login, d.Host, d.DeployDir+"/", "remote/") - if err != nil { - log.Fatal(err) - } - - dbg.Lvl1("Done copying") - - dbg.Lvl3(cliutils.SshRunStdout(d.Login, d.Host, - "")) - - return nil -} - -func (d *Deter) Start() error { - dbg.Lvl1("Running with", d.Config.Nmachs, "nodes *", d.Config.Hpn, "hosts per node =", - d.Config.Nmachs*d.Config.Hpn, "and", d.Config.Nloggers, "loggers") - - // setup port forwarding for viewing log server - dbg.Lvl3("setup port forwarding for master logger: ", d.masterLogger, d.Login, d.Host) - cmd := exec.Command( - "ssh", - "-t", - "-t", - fmt.Sprintf("%s@%s", d.Login, d.Host), - "-L", - "8081:"+d.masterLogger+":10000") - err := cmd.Start() - if err != nil { - log.Fatal("failed to setup portforwarding for logging server") - } - - dbg.Lvl3("runnning deter with nmsgs:", d.Config.Nmsgs, d.Login, d.Host) - // run the deter lab boss nodes process - // it will be responsible for forwarding the files and running the individual - // timestamping servers - - go func() { - dbg.Lvl3(cliutils.SshRunStdout(d.Login, d.Host, - "GOMAXPROCS=8 remote/deter -nmsgs="+strconv.Itoa(d.Config.Nmsgs)+ - " -hpn="+strconv.Itoa(d.Config.Hpn)+ - " -bf="+strconv.Itoa(d.Config.Bf)+ - " -rate="+strconv.Itoa(d.Config.Rate)+ - " -rounds="+strconv.Itoa(d.Config.Rounds)+ - " -debug="+strconv.Itoa(d.Config.Debug)+ - " -failures="+strconv.Itoa(d.Config.Failures)+ - " -rfail="+strconv.Itoa(d.Config.RFail)+ - " -ffail="+strconv.Itoa(d.Config.FFail)+ - " -app="+d.Config.App+ - " -suite="+d.Config.Suite)) - dbg.Lvl3("Sending stop of ssh") - d.sshDeter <- "stop" - }() - - return nil -} - -func (d *Deter) Stop() error { - killssh := exec.Command("pkill", "-f", "ssh -t -t") - killssh.Stdout = os.Stdout - killssh.Stderr = os.Stderr - err := killssh.Run() - if err != nil { - dbg.Lvl3("Stopping ssh: ", err) - } - select { - case msg := <-d.sshDeter: - if msg == "stop" { - dbg.Lvl3("SSh is stopped") - } else { - dbg.Lvl1("Received other command", msg) - } - case <-time.After(time.Second * 3): - dbg.Lvl3("Timeout error when waiting for end of ssh") - } - return nil -} - -func (d *Deter) WriteConfig(dirOpt ...string) { - buf := new(bytes.Buffer) - if err := toml.NewEncoder(buf).Encode(d); err != nil { - log.Fatal(err) - } - dir := d.DeployDir - if len(dirOpt) > 0 { - dir = dirOpt[0] - } - err := ioutil.WriteFile(dir+"/config.toml", buf.Bytes(), 0660) - if err != nil { - log.Fatal(err) - } - dbg.Lvl4("Wrote login", d.Login, "to", dir) -} - -func ReadConfig(dirOpt ...string) (*Deter, error) { - var deter Deter - - dir := "." - if len(dirOpt) > 0 { - dir = dirOpt[0] - } - buf, err := ioutil.ReadFile(dir + "/config.toml") - if err != nil { - return &deter, err - } - - _, err = toml.Decode(string(buf), &deter) - if err != nil { - log.Fatal(err) - } - - return &deter, nil -} - -/* -* Write the hosts.txt file automatically -* from project name and number of servers - */ -func (d *Deter) generateHostsFile() error { - hosts_file := d.DeployDir + "/hosts.txt" - num_servers := d.Config.Nmachs + d.Config.Nloggers - - // open and erase file if needed - if _, err1 := os.Stat(hosts_file); err1 == nil { - dbg.Lvl4("Hosts file", hosts_file, "already exists. Erasing ...") - os.Remove(hosts_file) - } - // create the file - f, err := os.Create(hosts_file) - if err != nil { - log.Fatal("Could not create hosts file description: ", hosts_file, " :: ", err) - return err - } - defer f.Close() - - // write the name of the server + \t + IP address - ip := "10.255.0." - name := "SAFER.isi.deterlab.net" - for i := 1; i <= num_servers; i++ { - f.WriteString(fmt.Sprintf("server-%d.%s.%s\t%s%d\n", i-1, d.Project, name, ip, i)) - } - dbg.Lvl4(fmt.Sprintf("Created hosts file description (%d hosts)", num_servers)) - return err - -} - -// parse the hosts.txt file to create a separate list (and file) -// of physical nodes and virtual nodes. Such that each host on line i, in phys.txt -// corresponds to each host on line i, in virt.txt. -func (d *Deter) readHosts() { - hosts_file := d.DeployDir + "/hosts.txt" - nmachs, nloggers := d.Config.Nmachs, d.Config.Nloggers - - physVirt, err := cliutils.ReadLines(hosts_file) - if err != nil { - log.Panic("Couldn't find", hosts_file) - } - - d.phys = make([]string, 0, len(physVirt)/2) - d.virt = make([]string, 0, len(physVirt)/2) - for i := 0; i < len(physVirt); i += 2 { - d.phys = append(d.phys, physVirt[i]) - d.virt = append(d.virt, physVirt[i+1]) - } - d.phys = d.phys[:nmachs+nloggers] - d.virt = d.virt[:nmachs+nloggers] - d.physOut = strings.Join(d.phys, "\n") - d.virtOut = strings.Join(d.virt, "\n") - d.masterLogger = d.phys[0] - // slaveLogger1 := phys[1] - // slaveLogger2 := phys[2] - - // phys.txt and virt.txt only contain the number of machines that we need - dbg.Lvl3("Reading phys and virt") - err = ioutil.WriteFile(d.DeployDir+"/phys.txt", []byte(d.physOut), 0666) - if err != nil { - log.Fatal("failed to write physical nodes file", err) - } - - err = ioutil.WriteFile(d.DeployDir+"/virt.txt", []byte(d.virtOut), 0666) - if err != nil { - log.Fatal("failed to write virtual nodes file", err) - } -} - -// Calculates a tree that is used for the timestampers -func (d *Deter) calculateGraph() { - d.virt = d.virt[d.Config.Nloggers:] - d.phys = d.phys[d.Config.Nloggers:] - t, hostnames, depth, err := graphs.TreeFromList(d.virt, d.Config.Hpn, d.Config.Bf) - dbg.Lvl2("Depth:", depth) - dbg.Lvl2("Total hosts:", len(hostnames)) - total := d.Config.Nmachs * d.Config.Hpn - if len(hostnames) != total { - dbg.Lvl1("Only calculated", len(hostnames), "out of", total, "hosts - try changing number of", - "machines or hosts per node") - log.Fatal("Didn't calculate enough hosts") - } - - // generate the configuration file from the tree - cf := config.ConfigFromTree(t, hostnames) - cfb, err := json.Marshal(cf) - err = ioutil.WriteFile(d.DeployDir+"/tree.json", cfb, 0666) - if err != nil { - log.Fatal(err) - } -} - -// Checks whether host, login and project are defined. If any of them are missing, it will -// ask on the command-line. -// For the login-variable, it will try to set up a connection to d.Host and copy over the -// public key for a more easy communication -func (d *Deter) checkDeterlabVars() { - // Write - var config, err = ReadConfig(d.DeterDir) - - if err != nil { - dbg.Lvl1("Couldn't read config-file - asking for default values") - } - - if config.Host == "" { - d.Host = readString("Please enter the hostname of deterlab (enter for 'users.deterlab.net'): ", - "users.deterlab.net") - } else { - d.Host = config.Host - } - - if config.Login == "" { - d.Login = readString("Please enter the login-name on "+d.Host+":", "") - } else { - d.Login = config.Login - } - - if config.Project == "" { - d.Project = readString("Please enter the project on deterlab: ", "Dissent-CS") - } else { - d.Project = config.Project - } - - d.WriteConfig(d.DeterDir) -} - -// Shows a messages and reads in a string, eventually returning a default (dft) string -func readString(msg, dft string) string { - fmt.Print(msg) - - reader := bufio.NewReader(os.Stdin) - strnl, _ := reader.ReadString('\n') - str := strings.TrimSpace(strnl) - if str == "" { - return dft - } - return str -} diff --git a/deploy/deploy_test.go b/deploy/deploy_test.go new file mode 100644 index 0000000000..eb10087101 --- /dev/null +++ b/deploy/deploy_test.go @@ -0,0 +1,8 @@ +package main_test + +import ( + "testing" +) + +func TestBuild(t *testing.T) { +} diff --git a/deploy/deterlab/deter/deter.go b/deploy/deterlab/deter/deter.go deleted file mode 100644 index 4837573235..0000000000 --- a/deploy/deterlab/deter/deter.go +++ /dev/null @@ -1,333 +0,0 @@ -// deter is the deterlab process that should run on the boss node -// -// It spawns multiple timestampers and clients, while constructing -// the topology defined on tree.json. It assumes that hosts.txt has -// the entire list of hosts to run timestampers on and that the final -// host is the designated logging server. -// -// The overall topology that is created is defined by tree.json. -// The port layout for each node, however, is specified here. -// tree.json will assign each node a port p. This is the port -// that each singing node is listening on. The timestamp server -// to which clients connect is listneing on port p+1. And the -// pprof server for each node is listening on port p+2. This -// means that in order to debug each client, you can forward -// the p+2 port of each node to your localhost. -// -// In the future the loggingserver will be connecting to the -// servers on the pprof port in order to gather extra data. -package main - -import ( - "encoding/json" - "flag" - "io/ioutil" - "log" - "net" - "strconv" - "strings" - "sync" - "time" - - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/cothority/lib/cliutils" - "github.com/dedis/cothority/lib/config" - "github.com/dedis/cothority/lib/graphs" - "github.com/dedis/cothority/deploy" - "os" -) - -var deter deploy.Deter -var conf *deploy.Config -var rootname string -var kill = false - -func init() { - flag.BoolVar(&kill, "kill", false, "kill everything (and don't start anything)") - -} - -func main() { - deter, err := deploy.ReadConfig("remote") - if err != nil { - log.Fatal("Couldn't read config in deter:", err) - } - conf = deter.Config - dbg.DebugVisible = conf.Debug - - dbg.Lvl1("running deter with nmsgs:", conf.Nmsgs, "rate:", conf.Rate, "rounds:", conf.Rounds, "debug:", conf.Debug) - - virt, err := cliutils.ReadLines("remote/virt.txt") - if err != nil { - log.Fatal(err) - } - phys, err := cliutils.ReadLines("remote/phys.txt") - if err != nil { - log.Fatal(err) - } - vpmap := make(map[string]string) - for i := range virt { - vpmap[virt[i]] = phys[i] - } - // kill old processes - var wg sync.WaitGroup - doneHosts := make([]bool, len(phys)) - for i, h := range phys { - wg.Add(1) - go func(i int, h string) { - defer wg.Done() - dbg.Lvl4("Cleaning up host", h) - cliutils.SshRun("", h, "sudo killall app forkexec logserver timeclient scp ssh 2>/dev/null >/dev/null") - time.Sleep(1 * time.Second) - cliutils.SshRun("", h, "sudo killall app 2>/dev/null >/dev/null") - if dbg.DebugVisible > 3 { - dbg.Lvl4("Killing report:") - cliutils.SshRunStdout("", h, "ps ax") - } - doneHosts[i] = true - dbg.Lvl3("Host", h, "cleaned up") - }(i, h) - } - - cleanupChannel := make( chan string ) - go func() { - wg.Wait() - dbg.Lvl3("Done waiting") - cleanupChannel <- "done" - }() - select { - case msg := <-cleanupChannel: - dbg.Lvl3("Received msg from cleanupChannel", msg) - case <-time.After(time.Second * 10): - for i, m := range doneHosts { - if !m { - dbg.Lvl1("Missing host:", phys[i]) - } - } - dbg.Fatal("Didn't receive all replies.") - } - - if kill { - dbg.Lvl1("Returning only from cleanup") - return - } - - /* - * Why copy the stuff to the other nodes? We have NFS, no? - for _, h := range phys { - wg.Add(1) - go func(h string) { - defer wg.Done() - cliutils.Rsync("", h, "remote", "") - }(h) - } - wg.Wait() - */ - - nloggers := conf.Nloggers - masterLogger := phys[0] - loggers := []string{masterLogger} - dbg.Lvl3("Going to create", nloggers, "loggers") - for n := 1; n < nloggers; n++ { - loggers = append(loggers, phys[n]) - } - - phys = phys[nloggers:] - virt = virt[nloggers:] - - // Read in and parse the configuration file - file, err := ioutil.ReadFile("remote/tree.json") - if err != nil { - log.Fatal("deter.go: error reading configuration file: %v\n", err) - } - dbg.Lvl4("cfg file:", string(file)) - var cf config.ConfigFile - err = json.Unmarshal(file, &cf) - if err != nil { - log.Fatal("unable to unmarshal config.ConfigFile:", err) - } - - hostnames := cf.Hosts - dbg.Lvl4("hostnames:", hostnames) - - depth := graphs.Depth(cf.Tree) - var random_leaf string - cf.Tree.TraverseTree(func(t *graphs.Tree) { - if random_leaf != "" { - return - } - if len(t.Children) == 0 { - random_leaf = t.Name - } - }) - - rootname = hostnames[0] - - dbg.Lvl4("depth of tree:", depth) - - // mapping from physical node name to the timestamp servers that are running there - // essentially a reverse mapping of vpmap except ports are also used - physToServer := make(map[string][]string) - for _, virt := range hostnames { - v, _, _ := net.SplitHostPort(virt) - p := vpmap[v] - ss := physToServer[p] - ss = append(ss, virt) - physToServer[p] = ss - } - - // start up the logging server on the final host at port 10000 - dbg.Lvl1("starting up logservers: ", loggers) - // start up the master logger - loggerports := make([]string, len(loggers)) - for i, logger := range loggers { - loggerport := logger + ":10000" - loggerports[i] = loggerport - // redirect to the master logger - master := masterLogger + ":10000" - // if this is the master logger than don't set the master to anything - if loggerport == masterLogger + ":10000" { - master = "" - } - - // Copy configuration file to make higher file-limits - err = cliutils.SshRunStdout("", logger, "sudo cp remote/cothority.conf /etc/security/limits.d") - - if err != nil { - log.Fatal("Couldn't copy limit-file:", err) - } - - go cliutils.SshRunStdout("", logger, "cd remote; sudo ./logserver -addr=" + loggerport + - " -master=" + master) - } - - i := 0 - // For coll_stamp we have to wait for everything in place which takes quite some time - // We set up a directory and every host writes a file once he's ready to listen - // When everybody is ready, the directory is deleted and the test starts - coll_stamp_dir := "remote/coll_stamp_up" - if conf.App == "coll_stamp" || conf.App == "coll_sign" { - os.RemoveAll(coll_stamp_dir) - os.MkdirAll(coll_stamp_dir, 0777) - time.Sleep(time.Second) - } - dbg.Lvl1("starting", len(physToServer), "forkexecs") - totalServers := 0 - for phys, virts := range physToServer { - if len(virts) == 0 { - continue - } - totalServers += len(virts) - dbg.Lvl1("Launching forkexec for", len(virts), "clients on", phys) - //cmd := GenExecCmd(phys, virts, loggerports[i], random_leaf) - i = (i + 1) % len(loggerports) - wg.Add(1) - go func(phys string) { - //dbg.Lvl4("running on ", phys, cmd) - defer wg.Done() - dbg.Lvl4("Starting servers on physical machine ", phys) - err := cliutils.SshRunStdout("", phys, "cd remote; sudo ./forkexec" + - " -physaddr=" + phys + " -logger=" + loggerports[i]) - if err != nil { - log.Fatal("Error starting timestamper:", err, phys) - } - dbg.Lvl4("Finished with Timestamper", phys) - }(phys) - } - - if conf.App == "coll_stamp" || conf.App == "coll_sign" { - // Every stampserver that started up (mostly waiting for configuration-reading) - // writes its name in coll_stamp_dir - once everybody is there, the directory - // is cleaned to flag it's OK to go on. - start_config := time.Now() - for { - files, err := ioutil.ReadDir(coll_stamp_dir) - if err != nil { - log.Fatal("Couldn't read directory", coll_stamp_dir, err) - } else { - dbg.Lvl1("Stampservers started:", len(files), "/", totalServers, "after", time.Since(start_config)) - if len(files) == totalServers { - os.RemoveAll(coll_stamp_dir) - // 1st second for everybody to see the deleted directory - // 2nd second for everybody to start up listening - time.Sleep(2 * time.Second) - break - } - } - time.Sleep(time.Second) - } - } - - switch conf.App{ - case "coll_stamp": - dbg.Lvl1("starting", len(physToServer), "time clients") - // start up one timeclient per physical machine - // it requests timestamps from all the servers on that machine - for p, ss := range physToServer { - if len(ss) == 0 { - continue - } - servers := strings.Join(ss, ",") - go func(i int, p string) { - _, err := cliutils.SshRun("", p, "cd remote; sudo ./app -mode=client -app=" + conf.App + - " -name=client@" + p + - " -server=" + servers + - " -logger=" + loggerports[i]) - if err != nil { - dbg.Lvl4("Deter.go : timeclient error ", err) - } - dbg.Lvl4("Deter.go : Finished with timeclient", p) - }(i, p) - i = (i + 1) % len(loggerports) - } - case "coll_sign_no": - // TODO: for now it's only a simple startup from the server - dbg.Lvl1("Starting only one client") - /* - p := physToServer[0][0] - servers := strings.Join(physToServer[0][1], ",") - _, err = cliutils.SshRun("", p, "cd remote; sudo ./app -mode=client -app=" + conf.App + - " -name=client@" + p + - " -server=" + servers + - " -logger=" + loggerports[i]) - i = (i + 1) % len(loggerports) - */ - } - - // wait for the servers to finish before stopping - wg.Wait() - //time.Sleep(10 * time.Minute) -} - -// Generate all commands on one single physicial machines to launch every "nodes" -func GenExecCmd(phys string, names []string, loggerport, random_leaf string) string { - dbg.Lvl3("Random_leaf", random_leaf) - dbg.Lvl3("Names", names) - connect := false - cmd := "" - bg := " & " - for i, name := range names { - dbg.Lvl3("deter.go Generate cmd timestamper : name ==", name) - dbg.Lvl3("random_leaf ==", random_leaf) - dbg.Lvl3("testconnect is", deter.TestConnect) - if name == random_leaf && deter.TestConnect { - connect = true - } - amroot := " -amroot=false" - if name == rootname { - amroot = " -amroot=true" - } - - if i == len(names) - 1 { - bg = "" - } - cmd += "(cd remote; sudo ./forkexec" + - " -physaddr=" + phys + - " -hostname=" + name + - " -logger=" + loggerport + - " -test_connect=" + strconv.FormatBool(connect) + - amroot + bg + - " ); " - } - return cmd -} diff --git a/deploy/deterlab/forkexec/forkexec.go b/deploy/deterlab/forkexec/forkexec.go deleted file mode 100644 index 1112bfbf2f..0000000000 --- a/deploy/deterlab/forkexec/forkexec.go +++ /dev/null @@ -1,191 +0,0 @@ -package main - -import ( - "flag" - "os/exec" - "strconv" - - log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/cothority/lib/logutils" - "os" - "github.com/dedis/cothority/deploy" - "github.com/dedis/cothority/lib/cliutils" - "net" - "github.com/dedis/cothority/lib/config" - "encoding/json" - "io/ioutil" - "github.com/dedis/cothority/lib/graphs" - "sync" -) - -// Wrapper around exec.go to enable measuring of cpu time - -var deter *deploy.Deter -var conf *deploy.Config -var logger string -var physaddr string -var testConnect bool - -// TODO: add debug flag for more debugging information (memprofilerate...) -func init() { - flag.StringVar(&logger, "logger", "", "remote logger") - flag.StringVar(&physaddr, "physaddr", "", "the physical address of the noded [for deterlab]") - flag.BoolVar(&testConnect, "test_connect", false, "test connecting and disconnecting") -} - -func main() { - deter, err := deploy.ReadConfig() - if err != nil { - log.Fatal("Couldn't load config-file in forkexec:", err) - } - conf = deter.Config - dbg.DebugVisible = conf.Debug - - flag.Parse() - - // connect with the logging server - if logger != "" { - // blocks until we can connect to the logger - lh, err := logutils.NewLoggerHook(logger, physaddr, conf.App) - if err != nil { - log.WithFields(log.Fields{ - "file": logutils.File(), - }).Fatalln("Error setting up logging server:", err) - } - log.AddHook(lh) - } - - setup_deter() - - i := 0 - var wg sync.WaitGroup - virts := physToServer[physaddr] - if len(virts) > 0 { - dbg.Lvl3("starting timestampers for", len(virts), "client(s)", virts) - i = (i + 1) % len(loggerports) - for _, name := range virts { - dbg.Lvl4("Starting", name, "on", physaddr) - wg.Add(1) - go func(nameport string) { - dbg.Lvl3("Running on", physaddr, "starting", nameport) - defer wg.Done() - - args := []string{ - "-hostname=" + nameport, - "-logger=" + logger, - "-physaddr=" + physaddr, - "-amroot=" + strconv.FormatBool(nameport == rootname), - "-test_connect=" + strconv.FormatBool(testConnect), - "-mode=server", - "-app=" + conf.App, - } - - dbg.Lvl3("Starting on", physaddr, "with args", args) - cmdApp := exec.Command("./app", args...) - //cmd.Stdout = log.StandardLogger().Writer() - //cmd.Stderr = log.StandardLogger().Writer() - cmdApp.Stdout = os.Stdout - cmdApp.Stderr = os.Stderr - dbg.Lvl3("fork-exec is running command:", args) - err = cmdApp.Run() - if err != nil { - dbg.Lvl2("cmd run:", err) - } - - // get CPU usage stats - st := cmdApp.ProcessState.SystemTime() - ut := cmdApp.ProcessState.UserTime() - log.WithFields(log.Fields{ - "file": logutils.File(), - "type": "forkexec", - "systime": st, - "usertime": ut, - }).Info("") - - dbg.Lvl2("Finished with Timestamper", physaddr) - }(name) - } - dbg.Lvl3(physaddr, "Finished starting timestampers") - wg.Wait() - } else { - dbg.Lvl2("No timestampers for", physaddr) - } - dbg.Lvl2(physaddr, "timestampers exited") -} - -var physToServer map[string][]string -var loggerports []string -var random_leaf string -var rootname string - -func setup_deter() { - virt, err := cliutils.ReadLines("virt.txt") - if err != nil { - log.Fatal(err) - } - phys, err := cliutils.ReadLines("phys.txt") - if err != nil { - log.Fatal(err) - } - vpmap := make(map[string]string) - for i := range virt { - vpmap[virt[i]] = phys[i] - } - nloggers := conf.Nloggers - masterLogger := phys[0] - loggers := []string{masterLogger} - for n := 1; n <= nloggers; n++ { - loggers = append(loggers, phys[n]) - } - - phys = phys[nloggers:] - virt = virt[nloggers:] - - // Read in and parse the configuration file - file, err := ioutil.ReadFile("tree.json") - if err != nil { - log.Fatal("deter.go: error reading configuration file: %v\n", err) - } - dbg.Lvl4("cfg file:", string(file)) - var cf config.ConfigFile - err = json.Unmarshal(file, &cf) - if err != nil { - log.Fatal("unable to unmarshal config.ConfigFile:", err) - } - - hostnames := cf.Hosts - dbg.Lvl4("hostnames:", hostnames) - - depth := graphs.Depth(cf.Tree) - cf.Tree.TraverseTree(func(t *graphs.Tree) { - if random_leaf != "" { - return - } - if len(t.Children) == 0 { - random_leaf = t.Name - } - }) - - rootname = hostnames[0] - - dbg.Lvl4("depth of tree:", depth) - - // mapping from physical node name to the timestamp servers that are running there - // essentially a reverse mapping of vpmap except ports are also used - physToServer = make(map[string][]string) - for _, virt := range hostnames { - v, _, _ := net.SplitHostPort(virt) - p := vpmap[v] - ss := physToServer[p] - ss = append(ss, virt) - physToServer[p] = ss - } - dbg.Lvl3("PhysToServer is", physToServer) - - loggerports = make([]string, len(loggers)) - for i, logger := range loggers { - loggerports[i] = logger + ":10000" - } - -} \ No newline at end of file diff --git a/deploy/deterlab/logserver/logserver.go b/deploy/deterlab/logserver/logserver.go deleted file mode 100644 index 8a70bbc659..0000000000 --- a/deploy/deterlab/logserver/logserver.go +++ /dev/null @@ -1,320 +0,0 @@ -package main - -import ( - "encoding/json" - "flag" - "fmt" - "html/template" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/http/httputil" - "net/url" - "runtime" - "strconv" - "strings" - "sync" - "time" - - log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/cothority/lib/cliutils" - "github.com/dedis/cothority/lib/graphs" - - "golang.org/x/net/websocket" - "github.com/dedis/cothority/lib/config" - "github.com/dedis/cothority/deploy" -) - -var deter deploy.Deter -var cfg *deploy.Config -var addr, master string -var homePage *template.Template - -type Home struct { - LogServer string - Hosts string - Depth string - BranchingFactor string - HostsPerNode string - NumberOfMessages string - Rate string -} - -var Log Logger - -func init() { - flag.StringVar(&addr, "addr", "", "the address of the logging server") - flag.StringVar(&master, "master", "", "address of the master of this node") - - Log = Logger{ - Slock: sync.RWMutex{}, - Sox: make(map[*websocket.Conn]bool), - Mlock: sync.RWMutex{}, - Msgs: make([][]byte, 0, 100000), - } - rand.Seed(42) -} - -func main() { - deter, err := deploy.ReadConfig() - if err != nil { - log.Fatal("Couldn't read config in logserver:", err) - } - cfg = deter.Config - dbg.DebugVisible = cfg.Debug - - runtime.GOMAXPROCS(runtime.NumCPU()) - // read in from flags the port I should be listening on - flag.Parse() - - if master == "" { - isMaster = true - } - var role string - if isMaster { - role = "Master" - } else { - role = "Servent" - } - dbg.Lvl3("running logserver", role, "with nmsgs", cfg.Nmsgs, "branching factor: ", cfg.Bf) - if isMaster { - var err error - homePage, err = template.ParseFiles("webfiles/home.html") - if err != nil { - log.Fatal("unable to parse home.html", err) - } - - debugServers := getDebugServers() - for _, s := range debugServers { - reverseProxy(s) - } - - dbg.Lvl4("Log server", role, "running at :", addr) - // /webfiles/Chart.js/Chart.min.js - http.HandleFunc("/", homeHandler) - fs := http.FileServer(http.Dir("webfiles/")) - http.Handle("/webfiles/", http.StripPrefix("/webfiles/", fs)) - } else { - retry: - tries := 0 - var err error - origin := "http://localhost/" - url := "ws://" + master + "/_log" - wsmaster, err = websocket.Dial(url, "", origin) - if err != nil { - tries += 1 - time.Sleep(time.Second) - dbg.Lvl4("Slave log server could not connect to logger master (", master, ") .. Trying again (", tries, ")") - goto retry - } - dbg.Lvl4("Slave Log server", role, "running at :", addr, "& connected to Master ") - } - http.Handle("/_log", websocket.Handler(logEntryHandler)) - http.Handle("/log", websocket.Handler(logHandler)) - http.HandleFunc("/htmllog", logHandlerHtml) - http.HandleFunc("/htmllogrev", logHandlerHtmlReverse) - dbg.Lvl1("Log-server", addr, "ready for service") - log.Fatalln("ERROR: ", http.ListenAndServe(addr, nil)) - // now combine that port -} - -type Logger struct { - Slock sync.RWMutex - Sox map[*websocket.Conn]bool - - Mlock sync.RWMutex - Msgs [][]byte - End int -} - -// keep a list of websockets that people are listening on - -// keep a log of messages received - -func logEntryHandler(ws *websocket.Conn) { - var data []byte - err := websocket.Message.Receive(ws, &data) - for err == nil { - //dbg.Lvl4("logEntryHandler", isMaster) - if !isMaster { - websocket.Message.Send(wsmaster, data) - } else { - Log.Mlock.Lock() - Log.Msgs = append(Log.Msgs, data) - Log.End += 1 - Log.Mlock.Unlock() - } - err = websocket.Message.Receive(ws, &data) - } - dbg.Lvl4("log server client error:", err) -} - -func logHandler(ws *websocket.Conn) { - dbg.Lvl4(master, "log server serving /log (websocket)") - i := 0 - for { - Log.Mlock.RLock() - end := Log.End - Log.Mlock.RUnlock() - if i >= end { - time.Sleep(100 * time.Millisecond) - continue - } - Log.Mlock.RLock() - msg := Log.Msgs[i] - Log.Mlock.RUnlock() - _, err := ws.Write(msg) - if err != nil { - dbg.Lvl4("unable to write to log websocket") - return - } - - i++ - } -} - -func homeHandler(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - dbg.Lvl4("home handler is handling non-home request") - http.NotFound(w, r) - return - } - dbg.Lvl4(master, "log server serving ", r.URL) - host := r.Host - // fmt.Println(host) - ws := "ws://" + host + "/log" - - err := homePage.Execute(w, Home{ws, strconv.Itoa(cfg.Nmachs * cfg.Hpn), strconv.Itoa(cfg.Hpn), strconv.Itoa(cfg.Bf), - strconv.Itoa(cfg.Hpn), strconv.Itoa(cfg.Nmsgs), strconv.Itoa(cfg.Rate)}) - if err != nil { - panic(err) - log.Fatal(err) - } -} - -func logHandlerHtml(w http.ResponseWriter, r *http.Request) { - dbg.Lvl4("Log handler: ", r.URL, "-", len(Log.Msgs)) - //host := r.Host - // fmt.Println(host) - for i, _ := range Log.Msgs { - var jsonlog map[string]*json.RawMessage - err := json.Unmarshal(Log.Msgs[i], &jsonlog) - if err != nil { - log.Error("Couldn't unmarshal string") - } - w.Write([]byte(fmt.Sprintf("%s - %s - %s - %s", *jsonlog["etime"], *jsonlog["eapp"], - *jsonlog["ehost"], *jsonlog["emsg"]))) - w.Write([]byte("\n")) - } -} - -func logHandlerHtmlReverse(w http.ResponseWriter, r *http.Request) { - dbg.Lvl4("Log handler: ", r.URL, "-", len(Log.Msgs)) - //host := r.Host - // fmt.Println(host) - for i := len(Log.Msgs) - 1; i >= 0; i-- { - var jsonlog map[string]*json.RawMessage - err := json.Unmarshal(Log.Msgs[i], &jsonlog) - if err != nil { - log.Error("Couldn't unmarshal string") - } - - w.Write([]byte(fmt.Sprintf("%s - %s - %s - %s", *jsonlog["etime"], *jsonlog["eapp"], - *jsonlog["ehost"], *jsonlog["emsg"]))) - w.Write([]byte("\n")) - } -} - -func NewReverseProxy(target *url.URL) *httputil.ReverseProxy { - director := func(r *http.Request) { - r.URL.Scheme = target.Scheme - r.URL.Host = target.Host - - // get rid of the (/d/short_name)/debug of the url path requested - // --> long_name/debug - pathComp := strings.Split(r.URL.Path, "/") - // remove the first two components /d/short_name - pathComp = pathComp[3:] - r.URL.Path = target.Path + "/" + strings.Join(pathComp, "/") - dbg.Lvl4("redirected to: ", r.URL.String()) - } - dbg.Lvl4("setup reverse proxy for destination url:", target.Host, target.Path) - return &httputil.ReverseProxy{Director: director} -} - -func proxyDebugHandler(p *httputil.ReverseProxy) func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - dbg.Lvl4("proxy serving request for: ", r.URL) - p.ServeHTTP(w, r) - } -} - -var timesSeen = make(map[string]int) - -func reverseProxy(server string) { - remote, err := url.Parse("http://" + server) - if err != nil { - panic(err) - } - // get the short name of this remote - s := strings.Split(server, ".")[0] - short := s + "-" + strconv.Itoa(timesSeen[s]) - timesSeen[s] = timesSeen[s] + 1 - - // setup a reverse proxy s.t. - // - // "/d/short_name/debug" -> http://server/debug - // - proxy := NewReverseProxy(remote) - - //dbg.Lvl4("setup proxy for: /d/"+short+"/", " it points to : "+server) - // register the reverse proxy forwarding for this server - http.HandleFunc("/d/" + short + "/", proxyDebugHandler(proxy)) -} - -func getDebugServers() []string { - // read in physical nodes and virtual nodes into global variables - phys, err := cliutils.ReadLines("phys.txt") - if err != nil { - log.Errorln(err) - } - virt, err := cliutils.ReadLines("virt.txt") - if err != nil { - log.Errorln(err) - } - - // create mapping from virtual nodes to physical nodes - vpmap := make(map[string]string) - for i := range phys { - vpmap[virt[i]] = phys[i] - } - - // now read in the hosttree to get a list of servers - cfg, e := ioutil.ReadFile("tree.json") - if e != nil { - log.Fatal("Error Reading Configuration File:", e) - } - var cf config.ConfigFile - err = json.Unmarshal(cfg, &cf) - if err != nil { - log.Fatal("unable to unmarshal config.ConfigFile:", err) - } - - debugServers := make([]string, 0, len(virt)) - cf.Tree.TraverseTree(func(t *graphs.Tree) { - h, p, err := net.SplitHostPort(t.Name) - if err != nil { - log.Fatal("improperly formatted hostport:", err) - } - pn, _ := strconv.Atoi(p) - s := net.JoinHostPort(vpmap[h], strconv.Itoa(pn + 2)) - debugServers = append(debugServers, s) - }) - return debugServers -} - -var isMaster bool -var wsmaster *websocket.Conn - diff --git a/deploy/deterlab/logserver/webfiles/Chart.js/Chart.min.js b/deploy/deterlab/logserver/webfiles/Chart.js/Chart.min.js deleted file mode 100644 index 3a0a2c8734..0000000000 --- a/deploy/deterlab/logserver/webfiles/Chart.js/Chart.min.js +++ /dev/null @@ -1,11 +0,0 @@ -/*! - * Chart.js - * http://chartjs.org/ - * Version: 1.0.2 - * - * Copyright 2015 Nick Downie - * Released under the MIT license - * https://github.com/nnnick/Chart.js/blob/master/LICENSE.md - */ -(function(){"use strict";var t=this,i=t.Chart,e=function(t){this.canvas=t.canvas,this.ctx=t;var i=function(t,i){return t["offset"+i]?t["offset"+i]:document.defaultView.getComputedStyle(t).getPropertyValue(i)},e=this.width=i(t.canvas,"Width"),n=this.height=i(t.canvas,"Height");t.canvas.width=e,t.canvas.height=n;var e=this.width=t.canvas.width,n=this.height=t.canvas.height;return this.aspectRatio=this.width/this.height,s.retinaScale(this),this};e.defaults={global:{animation:!0,animationSteps:60,animationEasing:"easeOutQuart",showScale:!0,scaleOverride:!1,scaleSteps:null,scaleStepWidth:null,scaleStartValue:null,scaleLineColor:"rgba(0,0,0,.1)",scaleLineWidth:1,scaleShowLabels:!0,scaleLabel:"<%=value%>",scaleIntegersOnly:!0,scaleBeginAtZero:!1,scaleFontFamily:"'Helvetica Neue', 'Helvetica', 'Arial', sans-serif",scaleFontSize:12,scaleFontStyle:"normal",scaleFontColor:"#666",responsive:!1,maintainAspectRatio:!0,showTooltips:!0,customTooltips:!1,tooltipEvents:["mousemove","touchstart","touchmove","mouseout"],tooltipFillColor:"rgba(0,0,0,0.8)",tooltipFontFamily:"'Helvetica Neue', 'Helvetica', 'Arial', sans-serif",tooltipFontSize:14,tooltipFontStyle:"normal",tooltipFontColor:"#fff",tooltipTitleFontFamily:"'Helvetica Neue', 'Helvetica', 'Arial', sans-serif",tooltipTitleFontSize:14,tooltipTitleFontStyle:"bold",tooltipTitleFontColor:"#fff",tooltipYPadding:6,tooltipXPadding:6,tooltipCaretSize:8,tooltipCornerRadius:6,tooltipXOffset:10,tooltipTemplate:"<%if (label){%><%=label%>: <%}%><%= value %>",multiTooltipTemplate:"<%= value %>",multiTooltipKeyBackground:"#fff",onAnimationProgress:function(){},onAnimationComplete:function(){}}},e.types={};var s=e.helpers={},n=s.each=function(t,i,e){var s=Array.prototype.slice.call(arguments,3);if(t)if(t.length===+t.length){var n;for(n=0;n=0;s--){var n=t[s];if(i(n))return n}},s.inherits=function(t){var i=this,e=t&&t.hasOwnProperty("constructor")?t.constructor:function(){return i.apply(this,arguments)},s=function(){this.constructor=e};return s.prototype=i.prototype,e.prototype=new s,e.extend=r,t&&a(e.prototype,t),e.__super__=i.prototype,e}),c=s.noop=function(){},u=s.uid=function(){var t=0;return function(){return"chart-"+t++}}(),d=s.warn=function(t){window.console&&"function"==typeof window.console.warn&&console.warn(t)},p=s.amd="function"==typeof define&&define.amd,f=s.isNumber=function(t){return!isNaN(parseFloat(t))&&isFinite(t)},g=s.max=function(t){return Math.max.apply(Math,t)},m=s.min=function(t){return Math.min.apply(Math,t)},v=(s.cap=function(t,i,e){if(f(i)){if(t>i)return i}else if(f(e)&&e>t)return e;return t},s.getDecimalPlaces=function(t){return t%1!==0&&f(t)?t.toString().split(".")[1].length:0}),S=s.radians=function(t){return t*(Math.PI/180)},x=(s.getAngleFromPoint=function(t,i){var e=i.x-t.x,s=i.y-t.y,n=Math.sqrt(e*e+s*s),o=2*Math.PI+Math.atan2(s,e);return 0>e&&0>s&&(o+=2*Math.PI),{angle:o,distance:n}},s.aliasPixel=function(t){return t%2===0?0:.5}),y=(s.splineCurve=function(t,i,e,s){var n=Math.sqrt(Math.pow(i.x-t.x,2)+Math.pow(i.y-t.y,2)),o=Math.sqrt(Math.pow(e.x-i.x,2)+Math.pow(e.y-i.y,2)),a=s*n/(n+o),h=s*o/(n+o);return{inner:{x:i.x-a*(e.x-t.x),y:i.y-a*(e.y-t.y)},outer:{x:i.x+h*(e.x-t.x),y:i.y+h*(e.y-t.y)}}},s.calculateOrderOfMagnitude=function(t){return Math.floor(Math.log(t)/Math.LN10)}),C=(s.calculateScaleRange=function(t,i,e,s,n){var o=2,a=Math.floor(i/(1.5*e)),h=o>=a,l=g(t),r=m(t);l===r&&(l+=.5,r>=.5&&!s?r-=.5:l+=.5);for(var c=Math.abs(l-r),u=y(c),d=Math.ceil(l/(1*Math.pow(10,u)))*Math.pow(10,u),p=s?0:Math.floor(r/(1*Math.pow(10,u)))*Math.pow(10,u),f=d-p,v=Math.pow(10,u),S=Math.round(f/v);(S>a||a>2*S)&&!h;)if(S>a)v*=2,S=Math.round(f/v),S%1!==0&&(h=!0);else if(n&&u>=0){if(v/2%1!==0)break;v/=2,S=Math.round(f/v)}else v/=2,S=Math.round(f/v);return h&&(S=o,v=f/S),{steps:S,stepValue:v,min:p,max:p+S*v}},s.template=function(t,i){function e(t,i){var e=/\W/.test(t)?new Function("obj","var p=[],print=function(){p.push.apply(p,arguments);};with(obj){p.push('"+t.replace(/[\r\t\n]/g," ").split("<%").join(" ").replace(/((^|%>)[^\t]*)'/g,"$1\r").replace(/\t=(.*?)%>/g,"',$1,'").split(" ").join("');").split("%>").join("p.push('").split("\r").join("\\'")+"');}return p.join('');"):s[t]=s[t];return i?e(i):e}if(t instanceof Function)return t(i);var s={};return e(t,i)}),w=(s.generateLabels=function(t,i,e,s){var o=new Array(i);return labelTemplateString&&n(o,function(i,n){o[n]=C(t,{value:e+s*(n+1)})}),o},s.easingEffects={linear:function(t){return t},easeInQuad:function(t){return t*t},easeOutQuad:function(t){return-1*t*(t-2)},easeInOutQuad:function(t){return(t/=.5)<1?.5*t*t:-0.5*(--t*(t-2)-1)},easeInCubic:function(t){return t*t*t},easeOutCubic:function(t){return 1*((t=t/1-1)*t*t+1)},easeInOutCubic:function(t){return(t/=.5)<1?.5*t*t*t:.5*((t-=2)*t*t+2)},easeInQuart:function(t){return t*t*t*t},easeOutQuart:function(t){return-1*((t=t/1-1)*t*t*t-1)},easeInOutQuart:function(t){return(t/=.5)<1?.5*t*t*t*t:-0.5*((t-=2)*t*t*t-2)},easeInQuint:function(t){return 1*(t/=1)*t*t*t*t},easeOutQuint:function(t){return 1*((t=t/1-1)*t*t*t*t+1)},easeInOutQuint:function(t){return(t/=.5)<1?.5*t*t*t*t*t:.5*((t-=2)*t*t*t*t+2)},easeInSine:function(t){return-1*Math.cos(t/1*(Math.PI/2))+1},easeOutSine:function(t){return 1*Math.sin(t/1*(Math.PI/2))},easeInOutSine:function(t){return-0.5*(Math.cos(Math.PI*t/1)-1)},easeInExpo:function(t){return 0===t?1:1*Math.pow(2,10*(t/1-1))},easeOutExpo:function(t){return 1===t?1:1*(-Math.pow(2,-10*t/1)+1)},easeInOutExpo:function(t){return 0===t?0:1===t?1:(t/=.5)<1?.5*Math.pow(2,10*(t-1)):.5*(-Math.pow(2,-10*--t)+2)},easeInCirc:function(t){return t>=1?t:-1*(Math.sqrt(1-(t/=1)*t)-1)},easeOutCirc:function(t){return 1*Math.sqrt(1-(t=t/1-1)*t)},easeInOutCirc:function(t){return(t/=.5)<1?-0.5*(Math.sqrt(1-t*t)-1):.5*(Math.sqrt(1-(t-=2)*t)+1)},easeInElastic:function(t){var i=1.70158,e=0,s=1;return 0===t?0:1==(t/=1)?1:(e||(e=.3),st?-.5*s*Math.pow(2,10*(t-=1))*Math.sin(2*(1*t-i)*Math.PI/e):s*Math.pow(2,-10*(t-=1))*Math.sin(2*(1*t-i)*Math.PI/e)*.5+1)},easeInBack:function(t){var i=1.70158;return 1*(t/=1)*t*((i+1)*t-i)},easeOutBack:function(t){var i=1.70158;return 1*((t=t/1-1)*t*((i+1)*t+i)+1)},easeInOutBack:function(t){var i=1.70158;return(t/=.5)<1?.5*t*t*(((i*=1.525)+1)*t-i):.5*((t-=2)*t*(((i*=1.525)+1)*t+i)+2)},easeInBounce:function(t){return 1-w.easeOutBounce(1-t)},easeOutBounce:function(t){return(t/=1)<1/2.75?7.5625*t*t:2/2.75>t?1*(7.5625*(t-=1.5/2.75)*t+.75):2.5/2.75>t?1*(7.5625*(t-=2.25/2.75)*t+.9375):1*(7.5625*(t-=2.625/2.75)*t+.984375)},easeInOutBounce:function(t){return.5>t?.5*w.easeInBounce(2*t):.5*w.easeOutBounce(2*t-1)+.5}}),b=s.requestAnimFrame=function(){return window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(t){return window.setTimeout(t,1e3/60)}}(),P=s.cancelAnimFrame=function(){return window.cancelAnimationFrame||window.webkitCancelAnimationFrame||window.mozCancelAnimationFrame||window.oCancelAnimationFrame||window.msCancelAnimationFrame||function(t){return window.clearTimeout(t,1e3/60)}}(),L=(s.animationLoop=function(t,i,e,s,n,o){var a=0,h=w[e]||w.linear,l=function(){a++;var e=a/i,r=h(e);t.call(o,r,e,a),s.call(o,r,e),i>a?o.animationFrame=b(l):n.apply(o)};b(l)},s.getRelativePosition=function(t){var i,e,s=t.originalEvent||t,n=t.currentTarget||t.srcElement,o=n.getBoundingClientRect();return s.touches?(i=s.touches[0].clientX-o.left,e=s.touches[0].clientY-o.top):(i=s.clientX-o.left,e=s.clientY-o.top),{x:i,y:e}},s.addEvent=function(t,i,e){t.addEventListener?t.addEventListener(i,e):t.attachEvent?t.attachEvent("on"+i,e):t["on"+i]=e}),k=s.removeEvent=function(t,i,e){t.removeEventListener?t.removeEventListener(i,e,!1):t.detachEvent?t.detachEvent("on"+i,e):t["on"+i]=c},F=(s.bindEvents=function(t,i,e){t.events||(t.events={}),n(i,function(i){t.events[i]=function(){e.apply(t,arguments)},L(t.chart.canvas,i,t.events[i])})},s.unbindEvents=function(t,i){n(i,function(i,e){k(t.chart.canvas,e,i)})}),R=s.getMaximumWidth=function(t){var i=t.parentNode;return i.clientWidth},T=s.getMaximumHeight=function(t){var i=t.parentNode;return i.clientHeight},A=(s.getMaximumSize=s.getMaximumWidth,s.retinaScale=function(t){var i=t.ctx,e=t.canvas.width,s=t.canvas.height;window.devicePixelRatio&&(i.canvas.style.width=e+"px",i.canvas.style.height=s+"px",i.canvas.height=s*window.devicePixelRatio,i.canvas.width=e*window.devicePixelRatio,i.scale(window.devicePixelRatio,window.devicePixelRatio))}),M=s.clear=function(t){t.ctx.clearRect(0,0,t.width,t.height)},W=s.fontString=function(t,i,e){return i+" "+t+"px "+e},z=s.longestText=function(t,i,e){t.font=i;var s=0;return n(e,function(i){var e=t.measureText(i).width;s=e>s?e:s}),s},B=s.drawRoundedRectangle=function(t,i,e,s,n,o){t.beginPath(),t.moveTo(i+o,e),t.lineTo(i+s-o,e),t.quadraticCurveTo(i+s,e,i+s,e+o),t.lineTo(i+s,e+n-o),t.quadraticCurveTo(i+s,e+n,i+s-o,e+n),t.lineTo(i+o,e+n),t.quadraticCurveTo(i,e+n,i,e+n-o),t.lineTo(i,e+o),t.quadraticCurveTo(i,e,i+o,e),t.closePath()};e.instances={},e.Type=function(t,i,s){this.options=i,this.chart=s,this.id=u(),e.instances[this.id]=this,i.responsive&&this.resize(),this.initialize.call(this,t)},a(e.Type.prototype,{initialize:function(){return this},clear:function(){return M(this.chart),this},stop:function(){return P(this.animationFrame),this},resize:function(t){this.stop();var i=this.chart.canvas,e=R(this.chart.canvas),s=this.options.maintainAspectRatio?e/this.chart.aspectRatio:T(this.chart.canvas);return i.width=this.chart.width=e,i.height=this.chart.height=s,A(this.chart),"function"==typeof t&&t.apply(this,Array.prototype.slice.call(arguments,1)),this},reflow:c,render:function(t){return t&&this.reflow(),this.options.animation&&!t?s.animationLoop(this.draw,this.options.animationSteps,this.options.animationEasing,this.options.onAnimationProgress,this.options.onAnimationComplete,this):(this.draw(),this.options.onAnimationComplete.call(this)),this},generateLegend:function(){return C(this.options.legendTemplate,this)},destroy:function(){this.clear(),F(this,this.events);var t=this.chart.canvas;t.width=this.chart.width,t.height=this.chart.height,t.style.removeProperty?(t.style.removeProperty("width"),t.style.removeProperty("height")):(t.style.removeAttribute("width"),t.style.removeAttribute("height")),delete e.instances[this.id]},showTooltip:function(t,i){"undefined"==typeof this.activeElements&&(this.activeElements=[]);var o=function(t){var i=!1;return t.length!==this.activeElements.length?i=!0:(n(t,function(t,e){t!==this.activeElements[e]&&(i=!0)},this),i)}.call(this,t);if(o||i){if(this.activeElements=t,this.draw(),this.options.customTooltips&&this.options.customTooltips(!1),t.length>0)if(this.datasets&&this.datasets.length>1){for(var a,h,r=this.datasets.length-1;r>=0&&(a=this.datasets[r].points||this.datasets[r].bars||this.datasets[r].segments,h=l(a,t[0]),-1===h);r--);var c=[],u=[],d=function(){var t,i,e,n,o,a=[],l=[],r=[];return s.each(this.datasets,function(i){t=i.points||i.bars||i.segments,t[h]&&t[h].hasValue()&&a.push(t[h])}),s.each(a,function(t){l.push(t.x),r.push(t.y),c.push(s.template(this.options.multiTooltipTemplate,t)),u.push({fill:t._saved.fillColor||t.fillColor,stroke:t._saved.strokeColor||t.strokeColor})},this),o=m(r),e=g(r),n=m(l),i=g(l),{x:n>this.chart.width/2?n:i,y:(o+e)/2}}.call(this,h);new e.MultiTooltip({x:d.x,y:d.y,xPadding:this.options.tooltipXPadding,yPadding:this.options.tooltipYPadding,xOffset:this.options.tooltipXOffset,fillColor:this.options.tooltipFillColor,textColor:this.options.tooltipFontColor,fontFamily:this.options.tooltipFontFamily,fontStyle:this.options.tooltipFontStyle,fontSize:this.options.tooltipFontSize,titleTextColor:this.options.tooltipTitleFontColor,titleFontFamily:this.options.tooltipTitleFontFamily,titleFontStyle:this.options.tooltipTitleFontStyle,titleFontSize:this.options.tooltipTitleFontSize,cornerRadius:this.options.tooltipCornerRadius,labels:c,legendColors:u,legendColorBackground:this.options.multiTooltipKeyBackground,title:t[0].label,chart:this.chart,ctx:this.chart.ctx,custom:this.options.customTooltips}).draw()}else n(t,function(t){var i=t.tooltipPosition();new e.Tooltip({x:Math.round(i.x),y:Math.round(i.y),xPadding:this.options.tooltipXPadding,yPadding:this.options.tooltipYPadding,fillColor:this.options.tooltipFillColor,textColor:this.options.tooltipFontColor,fontFamily:this.options.tooltipFontFamily,fontStyle:this.options.tooltipFontStyle,fontSize:this.options.tooltipFontSize,caretHeight:this.options.tooltipCaretSize,cornerRadius:this.options.tooltipCornerRadius,text:C(this.options.tooltipTemplate,t),chart:this.chart,custom:this.options.customTooltips}).draw()},this);return this}},toBase64Image:function(){return this.chart.canvas.toDataURL.apply(this.chart.canvas,arguments)}}),e.Type.extend=function(t){var i=this,s=function(){return i.apply(this,arguments)};if(s.prototype=o(i.prototype),a(s.prototype,t),s.extend=e.Type.extend,t.name||i.prototype.name){var n=t.name||i.prototype.name,l=e.defaults[i.prototype.name]?o(e.defaults[i.prototype.name]):{};e.defaults[n]=a(l,t.defaults),e.types[n]=s,e.prototype[n]=function(t,i){var o=h(e.defaults.global,e.defaults[n],i||{});return new s(t,o,this)}}else d("Name not provided for this chart, so it hasn't been registered");return i},e.Element=function(t){a(this,t),this.initialize.apply(this,arguments),this.save()},a(e.Element.prototype,{initialize:function(){},restore:function(t){return t?n(t,function(t){this[t]=this._saved[t]},this):a(this,this._saved),this},save:function(){return this._saved=o(this),delete this._saved._saved,this},update:function(t){return n(t,function(t,i){this._saved[i]=this[i],this[i]=t},this),this},transition:function(t,i){return n(t,function(t,e){this[e]=(t-this._saved[e])*i+this._saved[e]},this),this},tooltipPosition:function(){return{x:this.x,y:this.y}},hasValue:function(){return f(this.value)}}),e.Element.extend=r,e.Point=e.Element.extend({display:!0,inRange:function(t,i){var e=this.hitDetectionRadius+this.radius;return Math.pow(t-this.x,2)+Math.pow(i-this.y,2)=this.startAngle&&e.angle<=this.endAngle,o=e.distance>=this.innerRadius&&e.distance<=this.outerRadius;return n&&o},tooltipPosition:function(){var t=this.startAngle+(this.endAngle-this.startAngle)/2,i=(this.outerRadius-this.innerRadius)/2+this.innerRadius;return{x:this.x+Math.cos(t)*i,y:this.y+Math.sin(t)*i}},draw:function(t){var i=this.ctx;i.beginPath(),i.arc(this.x,this.y,this.outerRadius,this.startAngle,this.endAngle),i.arc(this.x,this.y,this.innerRadius,this.endAngle,this.startAngle,!0),i.closePath(),i.strokeStyle=this.strokeColor,i.lineWidth=this.strokeWidth,i.fillStyle=this.fillColor,i.fill(),i.lineJoin="bevel",this.showStroke&&i.stroke()}}),e.Rectangle=e.Element.extend({draw:function(){var t=this.ctx,i=this.width/2,e=this.x-i,s=this.x+i,n=this.base-(this.base-this.y),o=this.strokeWidth/2;this.showStroke&&(e+=o,s-=o,n+=o),t.beginPath(),t.fillStyle=this.fillColor,t.strokeStyle=this.strokeColor,t.lineWidth=this.strokeWidth,t.moveTo(e,this.base),t.lineTo(e,n),t.lineTo(s,n),t.lineTo(s,this.base),t.fill(),this.showStroke&&t.stroke()},height:function(){return this.base-this.y},inRange:function(t,i){return t>=this.x-this.width/2&&t<=this.x+this.width/2&&i>=this.y&&i<=this.base}}),e.Tooltip=e.Element.extend({draw:function(){var t=this.chart.ctx;t.font=W(this.fontSize,this.fontStyle,this.fontFamily),this.xAlign="center",this.yAlign="above";var i=this.caretPadding=2,e=t.measureText(this.text).width+2*this.xPadding,s=this.fontSize+2*this.yPadding,n=s+this.caretHeight+i;this.x+e/2>this.chart.width?this.xAlign="left":this.x-e/2<0&&(this.xAlign="right"),this.y-n<0&&(this.yAlign="below");var o=this.x-e/2,a=this.y-n;if(t.fillStyle=this.fillColor,this.custom)this.custom(this);else{switch(this.yAlign){case"above":t.beginPath(),t.moveTo(this.x,this.y-i),t.lineTo(this.x+this.caretHeight,this.y-(i+this.caretHeight)),t.lineTo(this.x-this.caretHeight,this.y-(i+this.caretHeight)),t.closePath(),t.fill();break;case"below":a=this.y+i+this.caretHeight,t.beginPath(),t.moveTo(this.x,this.y+i),t.lineTo(this.x+this.caretHeight,this.y+i+this.caretHeight),t.lineTo(this.x-this.caretHeight,this.y+i+this.caretHeight),t.closePath(),t.fill()}switch(this.xAlign){case"left":o=this.x-e+(this.cornerRadius+this.caretHeight);break;case"right":o=this.x-(this.cornerRadius+this.caretHeight)}B(t,o,a,e,s,this.cornerRadius),t.fill(),t.fillStyle=this.textColor,t.textAlign="center",t.textBaseline="middle",t.fillText(this.text,o+e/2,a+s/2)}}}),e.MultiTooltip=e.Element.extend({initialize:function(){this.font=W(this.fontSize,this.fontStyle,this.fontFamily),this.titleFont=W(this.titleFontSize,this.titleFontStyle,this.titleFontFamily),this.height=this.labels.length*this.fontSize+(this.labels.length-1)*(this.fontSize/2)+2*this.yPadding+1.5*this.titleFontSize,this.ctx.font=this.titleFont;var t=this.ctx.measureText(this.title).width,i=z(this.ctx,this.font,this.labels)+this.fontSize+3,e=g([i,t]);this.width=e+2*this.xPadding;var s=this.height/2;this.y-s<0?this.y=s:this.y+s>this.chart.height&&(this.y=this.chart.height-s),this.x>this.chart.width/2?this.x-=this.xOffset+this.width:this.x+=this.xOffset},getLineHeight:function(t){var i=this.y-this.height/2+this.yPadding,e=t-1;return 0===t?i+this.titleFontSize/2:i+(1.5*this.fontSize*e+this.fontSize/2)+1.5*this.titleFontSize},draw:function(){if(this.custom)this.custom(this);else{B(this.ctx,this.x,this.y-this.height/2,this.width,this.height,this.cornerRadius);var t=this.ctx;t.fillStyle=this.fillColor,t.fill(),t.closePath(),t.textAlign="left",t.textBaseline="middle",t.fillStyle=this.titleTextColor,t.font=this.titleFont,t.fillText(this.title,this.x+this.xPadding,this.getLineHeight(0)),t.font=this.font,s.each(this.labels,function(i,e){t.fillStyle=this.textColor,t.fillText(i,this.x+this.xPadding+this.fontSize+3,this.getLineHeight(e+1)),t.fillStyle=this.legendColorBackground,t.fillRect(this.x+this.xPadding,this.getLineHeight(e+1)-this.fontSize/2,this.fontSize,this.fontSize),t.fillStyle=this.legendColors[e].fill,t.fillRect(this.x+this.xPadding,this.getLineHeight(e+1)-this.fontSize/2,this.fontSize,this.fontSize)},this)}}}),e.Scale=e.Element.extend({initialize:function(){this.fit()},buildYLabels:function(){this.yLabels=[];for(var t=v(this.stepValue),i=0;i<=this.steps;i++)this.yLabels.push(C(this.templateString,{value:(this.min+i*this.stepValue).toFixed(t)}));this.yLabelWidth=this.display&&this.showLabels?z(this.ctx,this.font,this.yLabels):0},addXLabel:function(t){this.xLabels.push(t),this.valuesCount++,this.fit()},removeXLabel:function(){this.xLabels.shift(),this.valuesCount--,this.fit()},fit:function(){this.startPoint=this.display?this.fontSize:0,this.endPoint=this.display?this.height-1.5*this.fontSize-5:this.height,this.startPoint+=this.padding,this.endPoint-=this.padding;var t,i=this.endPoint-this.startPoint;for(this.calculateYRange(i),this.buildYLabels(),this.calculateXLabelRotation();i>this.endPoint-this.startPoint;)i=this.endPoint-this.startPoint,t=this.yLabelWidth,this.calculateYRange(i),this.buildYLabels(),tthis.yLabelWidth+10?e/2:this.yLabelWidth+10,this.xLabelRotation=0,this.display){var n,o=z(this.ctx,this.font,this.xLabels);this.xLabelWidth=o;for(var a=Math.floor(this.calculateX(1)-this.calculateX(0))-6;this.xLabelWidth>a&&0===this.xLabelRotation||this.xLabelWidth>a&&this.xLabelRotation<=90&&this.xLabelRotation>0;)n=Math.cos(S(this.xLabelRotation)),t=n*e,i=n*s,t+this.fontSize/2>this.yLabelWidth+8&&(this.xScalePaddingLeft=t+this.fontSize/2),this.xScalePaddingRight=this.fontSize/2,this.xLabelRotation++,this.xLabelWidth=n*o;this.xLabelRotation>0&&(this.endPoint-=Math.sin(S(this.xLabelRotation))*o+3)}else this.xLabelWidth=0,this.xScalePaddingRight=this.padding,this.xScalePaddingLeft=this.padding},calculateYRange:c,drawingArea:function(){return this.startPoint-this.endPoint},calculateY:function(t){var i=this.drawingArea()/(this.min-this.max);return this.endPoint-i*(t-this.min)},calculateX:function(t){var i=(this.xLabelRotation>0,this.width-(this.xScalePaddingLeft+this.xScalePaddingRight)),e=i/Math.max(this.valuesCount-(this.offsetGridLines?0:1),1),s=e*t+this.xScalePaddingLeft;return this.offsetGridLines&&(s+=e/2),Math.round(s)},update:function(t){s.extend(this,t),this.fit()},draw:function(){var t=this.ctx,i=(this.endPoint-this.startPoint)/this.steps,e=Math.round(this.xScalePaddingLeft);this.display&&(t.fillStyle=this.textColor,t.font=this.font,n(this.yLabels,function(n,o){var a=this.endPoint-i*o,h=Math.round(a),l=this.showHorizontalLines;t.textAlign="right",t.textBaseline="middle",this.showLabels&&t.fillText(n,e-10,a),0!==o||l||(l=!0),l&&t.beginPath(),o>0?(t.lineWidth=this.gridLineWidth,t.strokeStyle=this.gridLineColor):(t.lineWidth=this.lineWidth,t.strokeStyle=this.lineColor),h+=s.aliasPixel(t.lineWidth),l&&(t.moveTo(e,h),t.lineTo(this.width,h),t.stroke(),t.closePath()),t.lineWidth=this.lineWidth,t.strokeStyle=this.lineColor,t.beginPath(),t.moveTo(e-5,h),t.lineTo(e,h),t.stroke(),t.closePath()},this),n(this.xLabels,function(i,e){var s=this.calculateX(e)+x(this.lineWidth),n=this.calculateX(e-(this.offsetGridLines?.5:0))+x(this.lineWidth),o=this.xLabelRotation>0,a=this.showVerticalLines;0!==e||a||(a=!0),a&&t.beginPath(),e>0?(t.lineWidth=this.gridLineWidth,t.strokeStyle=this.gridLineColor):(t.lineWidth=this.lineWidth,t.strokeStyle=this.lineColor),a&&(t.moveTo(n,this.endPoint),t.lineTo(n,this.startPoint-3),t.stroke(),t.closePath()),t.lineWidth=this.lineWidth,t.strokeStyle=this.lineColor,t.beginPath(),t.moveTo(n,this.endPoint),t.lineTo(n,this.endPoint+5),t.stroke(),t.closePath(),t.save(),t.translate(s,o?this.endPoint+12:this.endPoint+8),t.rotate(-1*S(this.xLabelRotation)),t.font=this.font,t.textAlign=o?"right":"center",t.textBaseline=o?"middle":"top",t.fillText(i,0,0),t.restore()},this))}}),e.RadialScale=e.Element.extend({initialize:function(){this.size=m([this.height,this.width]),this.drawingArea=this.display?this.size/2-(this.fontSize/2+this.backdropPaddingY):this.size/2},calculateCenterOffset:function(t){var i=this.drawingArea/(this.max-this.min);return(t-this.min)*i},update:function(){this.lineArc?this.drawingArea=this.display?this.size/2-(this.fontSize/2+this.backdropPaddingY):this.size/2:this.setScaleSize(),this.buildYLabels()},buildYLabels:function(){this.yLabels=[];for(var t=v(this.stepValue),i=0;i<=this.steps;i++)this.yLabels.push(C(this.templateString,{value:(this.min+i*this.stepValue).toFixed(t)}))},getCircumference:function(){return 2*Math.PI/this.valuesCount},setScaleSize:function(){var t,i,e,s,n,o,a,h,l,r,c,u,d=m([this.height/2-this.pointLabelFontSize-5,this.width/2]),p=this.width,g=0;for(this.ctx.font=W(this.pointLabelFontSize,this.pointLabelFontStyle,this.pointLabelFontFamily),i=0;ip&&(p=t.x+s,n=i),t.x-sp&&(p=t.x+e,n=i):i>this.valuesCount/2&&t.x-e0){var s,n=e*(this.drawingArea/this.steps),o=this.yCenter-n;if(this.lineWidth>0)if(t.strokeStyle=this.lineColor,t.lineWidth=this.lineWidth,this.lineArc)t.beginPath(),t.arc(this.xCenter,this.yCenter,n,0,2*Math.PI),t.closePath(),t.stroke();else{t.beginPath();for(var a=0;a=0;i--){if(this.angleLineWidth>0){var e=this.getPointPosition(i,this.calculateCenterOffset(this.max));t.beginPath(),t.moveTo(this.xCenter,this.yCenter),t.lineTo(e.x,e.y),t.stroke(),t.closePath()}var s=this.getPointPosition(i,this.calculateCenterOffset(this.max)+5);t.font=W(this.pointLabelFontSize,this.pointLabelFontStyle,this.pointLabelFontFamily),t.fillStyle=this.pointLabelFontColor;var o=this.labels.length,a=this.labels.length/2,h=a/2,l=h>i||i>o-h,r=i===h||i===o-h;t.textAlign=0===i?"center":i===a?"center":a>i?"left":"right",t.textBaseline=r?"middle":l?"bottom":"top",t.fillText(this.labels[i],s.x,s.y)}}}}}),s.addEvent(window,"resize",function(){var t;return function(){clearTimeout(t),t=setTimeout(function(){n(e.instances,function(t){t.options.responsive&&t.resize(t.render,!0)})},50)}}()),p?define(function(){return e}):"object"==typeof module&&module.exports&&(module.exports=e),t.Chart=e,e.noConflict=function(){return t.Chart=i,e}}).call(this),function(){"use strict";var t=this,i=t.Chart,e=i.helpers,s={scaleBeginAtZero:!0,scaleShowGridLines:!0,scaleGridLineColor:"rgba(0,0,0,.05)",scaleGridLineWidth:1,scaleShowHorizontalLines:!0,scaleShowVerticalLines:!0,barShowStroke:!0,barStrokeWidth:2,barValueSpacing:5,barDatasetSpacing:1,legendTemplate:'
    <% for (var i=0; i
  • <%if(datasets[i].label){%><%=datasets[i].label%><%}%>
  • <%}%>
'};i.Type.extend({name:"Bar",defaults:s,initialize:function(t){var s=this.options;this.ScaleClass=i.Scale.extend({offsetGridLines:!0,calculateBarX:function(t,i,e){var n=this.calculateBaseWidth(),o=this.calculateX(e)-n/2,a=this.calculateBarWidth(t);return o+a*i+i*s.barDatasetSpacing+a/2},calculateBaseWidth:function(){return this.calculateX(1)-this.calculateX(0)-2*s.barValueSpacing},calculateBarWidth:function(t){var i=this.calculateBaseWidth()-(t-1)*s.barDatasetSpacing;return i/t}}),this.datasets=[],this.options.showTooltips&&e.bindEvents(this,this.options.tooltipEvents,function(t){var i="mouseout"!==t.type?this.getBarsAtEvent(t):[];this.eachBars(function(t){t.restore(["fillColor","strokeColor"])}),e.each(i,function(t){t.fillColor=t.highlightFill,t.strokeColor=t.highlightStroke}),this.showTooltip(i)}),this.BarClass=i.Rectangle.extend({strokeWidth:this.options.barStrokeWidth,showStroke:this.options.barShowStroke,ctx:this.chart.ctx}),e.each(t.datasets,function(i){var s={label:i.label||null,fillColor:i.fillColor,strokeColor:i.strokeColor,bars:[]};this.datasets.push(s),e.each(i.data,function(e,n){s.bars.push(new this.BarClass({value:e,label:t.labels[n],datasetLabel:i.label,strokeColor:i.strokeColor,fillColor:i.fillColor,highlightFill:i.highlightFill||i.fillColor,highlightStroke:i.highlightStroke||i.strokeColor}))},this)},this),this.buildScale(t.labels),this.BarClass.prototype.base=this.scale.endPoint,this.eachBars(function(t,i,s){e.extend(t,{width:this.scale.calculateBarWidth(this.datasets.length),x:this.scale.calculateBarX(this.datasets.length,s,i),y:this.scale.endPoint}),t.save()},this),this.render()},update:function(){this.scale.update(),e.each(this.activeElements,function(t){t.restore(["fillColor","strokeColor"])}),this.eachBars(function(t){t.save()}),this.render()},eachBars:function(t){e.each(this.datasets,function(i,s){e.each(i.bars,t,this,s)},this)},getBarsAtEvent:function(t){for(var i,s=[],n=e.getRelativePosition(t),o=function(t){s.push(t.bars[i])},a=0;a<% for (var i=0; i
  • <%if(segments[i].label){%><%=segments[i].label%><%}%>
  • <%}%>'};i.Type.extend({name:"Doughnut",defaults:s,initialize:function(t){this.segments=[],this.outerRadius=(e.min([this.chart.width,this.chart.height])-this.options.segmentStrokeWidth/2)/2,this.SegmentArc=i.Arc.extend({ctx:this.chart.ctx,x:this.chart.width/2,y:this.chart.height/2}),this.options.showTooltips&&e.bindEvents(this,this.options.tooltipEvents,function(t){var i="mouseout"!==t.type?this.getSegmentsAtEvent(t):[];e.each(this.segments,function(t){t.restore(["fillColor"])}),e.each(i,function(t){t.fillColor=t.highlightColor}),this.showTooltip(i)}),this.calculateTotal(t),e.each(t,function(t,i){this.addData(t,i,!0)},this),this.render()},getSegmentsAtEvent:function(t){var i=[],s=e.getRelativePosition(t);return e.each(this.segments,function(t){t.inRange(s.x,s.y)&&i.push(t)},this),i},addData:function(t,i,e){var s=i||this.segments.length;this.segments.splice(s,0,new this.SegmentArc({value:t.value,outerRadius:this.options.animateScale?0:this.outerRadius,innerRadius:this.options.animateScale?0:this.outerRadius/100*this.options.percentageInnerCutout,fillColor:t.color,highlightColor:t.highlight||t.color,showStroke:this.options.segmentShowStroke,strokeWidth:this.options.segmentStrokeWidth,strokeColor:this.options.segmentStrokeColor,startAngle:1.5*Math.PI,circumference:this.options.animateRotate?0:this.calculateCircumference(t.value),label:t.label})),e||(this.reflow(),this.update())},calculateCircumference:function(t){return 2*Math.PI*(Math.abs(t)/this.total)},calculateTotal:function(t){this.total=0,e.each(t,function(t){this.total+=Math.abs(t.value)},this)},update:function(){this.calculateTotal(this.segments),e.each(this.activeElements,function(t){t.restore(["fillColor"])}),e.each(this.segments,function(t){t.save()}),this.render()},removeData:function(t){var i=e.isNumber(t)?t:this.segments.length-1;this.segments.splice(i,1),this.reflow(),this.update()},reflow:function(){e.extend(this.SegmentArc.prototype,{x:this.chart.width/2,y:this.chart.height/2}),this.outerRadius=(e.min([this.chart.width,this.chart.height])-this.options.segmentStrokeWidth/2)/2,e.each(this.segments,function(t){t.update({outerRadius:this.outerRadius,innerRadius:this.outerRadius/100*this.options.percentageInnerCutout})},this)},draw:function(t){var i=t?t:1;this.clear(),e.each(this.segments,function(t,e){t.transition({circumference:this.calculateCircumference(t.value),outerRadius:this.outerRadius,innerRadius:this.outerRadius/100*this.options.percentageInnerCutout},i),t.endAngle=t.startAngle+t.circumference,t.draw(),0===e&&(t.startAngle=1.5*Math.PI),e<% for (var i=0; i
  • <%if(datasets[i].label){%><%=datasets[i].label%><%}%>
  • <%}%>'};i.Type.extend({name:"Line",defaults:s,initialize:function(t){this.PointClass=i.Point.extend({strokeWidth:this.options.pointDotStrokeWidth,radius:this.options.pointDotRadius,display:this.options.pointDot,hitDetectionRadius:this.options.pointHitDetectionRadius,ctx:this.chart.ctx,inRange:function(t){return Math.pow(t-this.x,2)0&&ithis.scale.endPoint?t.controlPoints.outer.y=this.scale.endPoint:t.controlPoints.outer.ythis.scale.endPoint?t.controlPoints.inner.y=this.scale.endPoint:t.controlPoints.inner.y0&&(s.lineTo(h[h.length-1].x,this.scale.endPoint),s.lineTo(h[0].x,this.scale.endPoint),s.fillStyle=t.fillColor,s.closePath(),s.fill()),e.each(h,function(t){t.draw()})},this)}})}.call(this),function(){"use strict";var t=this,i=t.Chart,e=i.helpers,s={scaleShowLabelBackdrop:!0,scaleBackdropColor:"rgba(255,255,255,0.75)",scaleBeginAtZero:!0,scaleBackdropPaddingY:2,scaleBackdropPaddingX:2,scaleShowLine:!0,segmentShowStroke:!0,segmentStrokeColor:"#fff",segmentStrokeWidth:2,animationSteps:100,animationEasing:"easeOutBounce",animateRotate:!0,animateScale:!1,legendTemplate:'
      <% for (var i=0; i
    • <%if(segments[i].label){%><%=segments[i].label%><%}%>
    • <%}%>
    '};i.Type.extend({name:"PolarArea",defaults:s,initialize:function(t){this.segments=[],this.SegmentArc=i.Arc.extend({showStroke:this.options.segmentShowStroke,strokeWidth:this.options.segmentStrokeWidth,strokeColor:this.options.segmentStrokeColor,ctx:this.chart.ctx,innerRadius:0,x:this.chart.width/2,y:this.chart.height/2}),this.scale=new i.RadialScale({display:this.options.showScale,fontStyle:this.options.scaleFontStyle,fontSize:this.options.scaleFontSize,fontFamily:this.options.scaleFontFamily,fontColor:this.options.scaleFontColor,showLabels:this.options.scaleShowLabels,showLabelBackdrop:this.options.scaleShowLabelBackdrop,backdropColor:this.options.scaleBackdropColor,backdropPaddingY:this.options.scaleBackdropPaddingY,backdropPaddingX:this.options.scaleBackdropPaddingX,lineWidth:this.options.scaleShowLine?this.options.scaleLineWidth:0,lineColor:this.options.scaleLineColor,lineArc:!0,width:this.chart.width,height:this.chart.height,xCenter:this.chart.width/2,yCenter:this.chart.height/2,ctx:this.chart.ctx,templateString:this.options.scaleLabel,valuesCount:t.length}),this.updateScaleRange(t),this.scale.update(),e.each(t,function(t,i){this.addData(t,i,!0)},this),this.options.showTooltips&&e.bindEvents(this,this.options.tooltipEvents,function(t){var i="mouseout"!==t.type?this.getSegmentsAtEvent(t):[];e.each(this.segments,function(t){t.restore(["fillColor"])}),e.each(i,function(t){t.fillColor=t.highlightColor}),this.showTooltip(i)}),this.render()},getSegmentsAtEvent:function(t){var i=[],s=e.getRelativePosition(t);return e.each(this.segments,function(t){t.inRange(s.x,s.y)&&i.push(t)},this),i},addData:function(t,i,e){var s=i||this.segments.length;this.segments.splice(s,0,new this.SegmentArc({fillColor:t.color,highlightColor:t.highlight||t.color,label:t.label,value:t.value,outerRadius:this.options.animateScale?0:this.scale.calculateCenterOffset(t.value),circumference:this.options.animateRotate?0:this.scale.getCircumference(),startAngle:1.5*Math.PI})),e||(this.reflow(),this.update())},removeData:function(t){var i=e.isNumber(t)?t:this.segments.length-1;this.segments.splice(i,1),this.reflow(),this.update()},calculateTotal:function(t){this.total=0,e.each(t,function(t){this.total+=t.value},this),this.scale.valuesCount=this.segments.length},updateScaleRange:function(t){var i=[];e.each(t,function(t){i.push(t.value)});var s=this.options.scaleOverride?{steps:this.options.scaleSteps,stepValue:this.options.scaleStepWidth,min:this.options.scaleStartValue,max:this.options.scaleStartValue+this.options.scaleSteps*this.options.scaleStepWidth}:e.calculateScaleRange(i,e.min([this.chart.width,this.chart.height])/2,this.options.scaleFontSize,this.options.scaleBeginAtZero,this.options.scaleIntegersOnly);e.extend(this.scale,s,{size:e.min([this.chart.width,this.chart.height]),xCenter:this.chart.width/2,yCenter:this.chart.height/2})},update:function(){this.calculateTotal(this.segments),e.each(this.segments,function(t){t.save()}),this.reflow(),this.render()},reflow:function(){e.extend(this.SegmentArc.prototype,{x:this.chart.width/2,y:this.chart.height/2}),this.updateScaleRange(this.segments),this.scale.update(),e.extend(this.scale,{xCenter:this.chart.width/2,yCenter:this.chart.height/2}),e.each(this.segments,function(t){t.update({outerRadius:this.scale.calculateCenterOffset(t.value)})},this)},draw:function(t){var i=t||1;this.clear(),e.each(this.segments,function(t,e){t.transition({circumference:this.scale.getCircumference(),outerRadius:this.scale.calculateCenterOffset(t.value)},i),t.endAngle=t.startAngle+t.circumference,0===e&&(t.startAngle=1.5*Math.PI),e<% for (var i=0; i
  • <%if(datasets[i].label){%><%=datasets[i].label%><%}%>
  • <%}%>'},initialize:function(t){this.PointClass=i.Point.extend({strokeWidth:this.options.pointDotStrokeWidth,radius:this.options.pointDotRadius,display:this.options.pointDot,hitDetectionRadius:this.options.pointHitDetectionRadius,ctx:this.chart.ctx}),this.datasets=[],this.buildScale(t),this.options.showTooltips&&e.bindEvents(this,this.options.tooltipEvents,function(t){var i="mouseout"!==t.type?this.getPointsAtEvent(t):[];this.eachPoints(function(t){t.restore(["fillColor","strokeColor"])}),e.each(i,function(t){t.fillColor=t.highlightFill,t.strokeColor=t.highlightStroke}),this.showTooltip(i)}),e.each(t.datasets,function(i){var s={label:i.label||null,fillColor:i.fillColor,strokeColor:i.strokeColor,pointColor:i.pointColor,pointStrokeColor:i.pointStrokeColor,points:[]};this.datasets.push(s),e.each(i.data,function(e,n){var o;this.scale.animation||(o=this.scale.getPointPosition(n,this.scale.calculateCenterOffset(e))),s.points.push(new this.PointClass({value:e,label:t.labels[n],datasetLabel:i.label,x:this.options.animation?this.scale.xCenter:o.x,y:this.options.animation?this.scale.yCenter:o.y,strokeColor:i.pointStrokeColor,fillColor:i.pointColor,highlightFill:i.pointHighlightFill||i.pointColor,highlightStroke:i.pointHighlightStroke||i.pointStrokeColor}))},this)},this),this.render()},eachPoints:function(t){e.each(this.datasets,function(i){e.each(i.points,t,this)},this)},getPointsAtEvent:function(t){var i=e.getRelativePosition(t),s=e.getAngleFromPoint({x:this.scale.xCenter,y:this.scale.yCenter},i),n=2*Math.PI/this.scale.valuesCount,o=Math.round((s.angle-1.5*Math.PI)/n),a=[];return(o>=this.scale.valuesCount||0>o)&&(o=0),s.distance<=this.scale.drawingArea&&e.each(this.datasets,function(t){a.push(t.points[o])}),a},buildScale:function(t){this.scale=new i.RadialScale({display:this.options.showScale,fontStyle:this.options.scaleFontStyle,fontSize:this.options.scaleFontSize,fontFamily:this.options.scaleFontFamily,fontColor:this.options.scaleFontColor,showLabels:this.options.scaleShowLabels,showLabelBackdrop:this.options.scaleShowLabelBackdrop,backdropColor:this.options.scaleBackdropColor,backdropPaddingY:this.options.scaleBackdropPaddingY,backdropPaddingX:this.options.scaleBackdropPaddingX,lineWidth:this.options.scaleShowLine?this.options.scaleLineWidth:0,lineColor:this.options.scaleLineColor,angleLineColor:this.options.angleLineColor,angleLineWidth:this.options.angleShowLineOut?this.options.angleLineWidth:0,pointLabelFontColor:this.options.pointLabelFontColor,pointLabelFontSize:this.options.pointLabelFontSize,pointLabelFontFamily:this.options.pointLabelFontFamily,pointLabelFontStyle:this.options.pointLabelFontStyle,height:this.chart.height,width:this.chart.width,xCenter:this.chart.width/2,yCenter:this.chart.height/2,ctx:this.chart.ctx,templateString:this.options.scaleLabel,labels:t.labels,valuesCount:t.datasets[0].data.length}),this.scale.setScaleSize(),this.updateScaleRange(t.datasets),this.scale.buildYLabels()},updateScaleRange:function(t){var i=function(){var i=[];return e.each(t,function(t){t.data?i=i.concat(t.data):e.each(t.points,function(t){i.push(t.value)})}),i}(),s=this.options.scaleOverride?{steps:this.options.scaleSteps,stepValue:this.options.scaleStepWidth,min:this.options.scaleStartValue,max:this.options.scaleStartValue+this.options.scaleSteps*this.options.scaleStepWidth}:e.calculateScaleRange(i,e.min([this.chart.width,this.chart.height])/2,this.options.scaleFontSize,this.options.scaleBeginAtZero,this.options.scaleIntegersOnly);e.extend(this.scale,s)},addData:function(t,i){this.scale.valuesCount++,e.each(t,function(t,e){var s=this.scale.getPointPosition(this.scale.valuesCount,this.scale.calculateCenterOffset(t));this.datasets[e].points.push(new this.PointClass({value:t,label:i,x:s.x,y:s.y,strokeColor:this.datasets[e].pointStrokeColor,fillColor:this.datasets[e].pointColor}))},this),this.scale.labels.push(i),this.reflow(),this.update()},removeData:function(){this.scale.valuesCount--,this.scale.labels.shift(),e.each(this.datasets,function(t){t.points.shift()},this),this.reflow(),this.update()},update:function(){this.eachPoints(function(t){t.save()}),this.reflow(),this.render()},reflow:function(){e.extend(this.scale,{width:this.chart.width,height:this.chart.height,size:e.min([this.chart.width,this.chart.height]),xCenter:this.chart.width/2,yCenter:this.chart.height/2}),this.updateScaleRange(this.datasets),this.scale.setScaleSize(),this.scale.buildYLabels()},draw:function(t){var i=t||1,s=this.chart.ctx;this.clear(),this.scale.draw(),e.each(this.datasets,function(t){e.each(t.points,function(t,e){t.hasValue()&&t.transition(this.scale.getPointPosition(e,this.scale.calculateCenterOffset(t.value)),i)},this),s.lineWidth=this.options.datasetStrokeWidth,s.strokeStyle=t.strokeColor,s.beginPath(),e.each(t.points,function(t,i){0===i?s.moveTo(t.x,t.y):s.lineTo(t.x,t.y)},this),s.closePath(),s.stroke(),s.fillStyle=t.fillColor,s.fill(),e.each(t.points,function(t){t.hasValue()&&t.draw()})},this)}})}.call(this); \ No newline at end of file diff --git a/deploy/deterlab/logserver/webfiles/home.html b/deploy/deterlab/logserver/webfiles/home.html deleted file mode 100644 index 7fd9c4589c..0000000000 --- a/deploy/deterlab/logserver/webfiles/home.html +++ /dev/null @@ -1,106 +0,0 @@ - - - - - Dissent: Coco: Logging Server - - - - - - -
    Number of Hosts: {{.Hosts}}
    -
    Depth: {{.Depth}}
    -
    Branching Factor: {{.BranchingFactor}}
    -
    Hosts Per Node: {{.HostsPerNode}}
    -
    Message Rate: {{.Rate}}
    -
    Minimum Time: s
    -
    Maximum Time: s
    -
    Average Time: s
    -
    Standard Deviation: s
    -
    - -
    -
    - - - diff --git a/deploy/matplotlib/Paper_cosi/naive.csv b/deploy/matplotlib/Paper_cosi/naive.csv new file mode 100644 index 0000000000..4181fac14c --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/naive.csv @@ -0,0 +1,11 @@ +Peers, depth, bf, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_system_avg, round_user_avg, rate +2, 1, 0, 0.107781, 0.111317, 0.109456, 0.001339, 0.012000, 0.060003, 0.000000 +4, 1, 0, 0.109373, 0.112063, 0.110430, 0.000936, 0.012000, 0.080005, 0.000000 +8, 1, 0, 0.109714, 0.112092, 0.110723, 0.000801, 0.024001, 0.140008, 0.000000 +16, 1, 0, 0.110891, 0.114567, 0.112160, 0.001158, 0.020001, 0.292018, 0.000000 +32, 2, 0, 0.113602, 0.117075, 0.115819, 0.000921, 0.032002, 0.564035, 0.000000 +64, 4, 0, 0.117740, 0.125777, 0.121277, 0.002237, 0.032002, 1.036064, 0.000000 +128, 8, 0, 0.131319, 0.137211, 0.133148, 0.001569, 0.064004, 2.028126, 0.000000 +256, 16, 0, 0.155372, 0.190853, 0.168834, 0.011229, 0.132008, 4.360272, 0.000000 +512, 32, 0, 0.197721, 0.229393, 0.211155, 0.011126, 0.240015, 8.232514, 0.000000 +1024, 64, 0, 0.305224, 0.457782, 0.357817, 0.050497, 0.672042, 16.913057, 0.000000 diff --git a/deploy/matplotlib/Paper_cosi/naive_multi_adapted.csv b/deploy/matplotlib/Paper_cosi/naive_multi_adapted.csv new file mode 100644 index 0000000000..9a8ae12eed --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/naive_multi_adapted.csv @@ -0,0 +1,13 @@ +Peers, ppm, machines, setup_wall_min, setup_wall_max, setup_wall_avg, setup_wall_dev, setup_user_min, setup_user_max, setup_user_avg, setup_user_dev, setup_system_min, setup_system_max, setup_system_avg, setup_system_dev, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_user_min, round_user_max, round_user_avg, round_user_dev, round_system_min, round_system_max, round_system_avg, round_system_dev, calc_wall_min, calc_wall_max, calc_wall_avg, calc_wall_dev, calc_user_min, calc_user_max, calc_user_avg, calc_user_dev, calc_system_min, calc_system_max, calc_system_avg, calc_system_dev, verify_wall_min, verify_wall_max, verify_wall_avg, verify_wall_dev, verify_user_min, verify_user_max, verify_user_avg, verify_user_dev, verify_system_min, verify_system_max, verify_system_avg, verify_system_dev +2, 1, 2, 2.149478, 2.149478, 2.149478, NaN, 0.000464, 0.000464, 0.000464, NaN, 0.000061, 0.000061, 0.000061, NaN, 0.122447, 0.732038, 0.226155, 0.213804, 0.013968, 0.028528, 0.020207, 0.004034, 0.000000, 0.004294, 0.001036, 0.001752, 0.109325, 0.718876, 0.212095, 0.214716, 0.000000, 0.001755, 0.001258, 0.000531, 0.000000, 0.000152, 0.000054, 0.000066, 0.010465, 0.018391, 0.013945, 0.002578, 0.012733, 0.026742, 0.018860, 0.003832, 0.000000, 0.003918, 0.000765, 0.001417 +4, 1, 4, 2.215341, 2.215341, 2.215341, NaN, 0.001021, 0.001021, 0.001021, NaN, 0.000047, 0.000047, 0.000047, NaN, 0.144073, 0.158826, 0.149312, 0.004982, 0.047319, 0.063031, 0.053197, 0.005396, 0.000000, 0.009454, 0.003209, 0.003502, 0.111288, 0.113031, 0.112113, 0.000582, 0.000000, 0.004684, 0.003344, 0.001511, 0.000000, 0.000497, 0.000173, 0.000216, 0.031456, 0.046652, 0.037049, 0.005227, 0.043195, 0.059090, 0.049672, 0.004836, 0.000000, 0.009442, 0.003021, 0.003460 +8, 1, 8, 2.225200, 2.225200, 2.225200, NaN, 0.001893, 0.001893, 0.001893, NaN, 0.000430, 0.000430, 0.000430, NaN, 0.179210, 0.802932, 0.255500, 0.205361, 0.098173, 0.131989, 0.117171, 0.011302, 0.000003, 0.035272, 0.009916, 0.010091, 0.111058, 0.710765, 0.178795, 0.199491, 0.007641, 0.023022, 0.014094, 0.004842, 0.000000, 0.007775, 0.001629, 0.002654, 0.067188, 0.092041, 0.076595, 0.007630, 0.086320, 0.112814, 0.102960, 0.008986, 0.000000, 0.027480, 0.008273, 0.008018 +16, 1, 16, 3.042633, 3.042633, 3.042633, NaN, 0.000000, 0.000000, 0.000000, NaN, 0.007863, 0.007863, 0.007863, NaN, 0.273311, 0.301766, 0.289560, 0.010141, 0.259882, 0.331173, 0.299199, 0.026087, 0.008298, 0.024712, 0.017114, 0.005182, 0.111389, 0.113961, 0.112558, 0.001018, 0.013072, 0.026369, 0.017859, 0.004215, 0.000000, 0.002735, 0.000740, 0.000863, 0.160159, 0.189830, 0.176867, 0.010505, 0.237596, 0.314467, 0.281097, 0.027736, 0.008292, 0.023996, 0.016360, 0.005432 +32, 2, 16, 2.334703, 2.334703, 2.334703, NaN, 0.005692, 0.005692, 0.005692, NaN, 0.002070, 0.002070, 0.002070, NaN, 0.476406, 1.104456, 0.589724, 0.213780, 0.652631, 0.803475, 0.722611, 0.048623, 0.020570, 0.093562, 0.055238, 0.023769, 0.112816, 0.722050, 0.215480, 0.214497, 0.033750, 0.063705, 0.044574, 0.008920, 0.000000, 0.005866, 0.002282, 0.002092, 0.351403, 0.391673, 0.374106, 0.013097, 0.617935, 0.756468, 0.677831, 0.043421, 0.019547, 0.087685, 0.052939, 0.023039 +64, 4, 16, 3.397990, 3.397990, 3.397990, NaN, 0.006741, 0.006741, 0.006741, NaN, 0.015844, 0.015844, 0.015844, NaN, 0.953604, 5.089125, 1.572928, 1.385345, 1.962243, 2.381110, 2.195813, 0.124319, 0.095497, 0.216070, 0.171546, 0.038505, 0.115864, 4.170874, 0.708937, 1.364259, 0.073452, 0.141499, 0.109819, 0.023400, 0.000000, 0.051490, 0.012540, 0.015536, 0.834164, 0.918049, 0.863769, 0.024540, 1.888487, 2.258782, 2.085423, 0.114631, 0.088558, 0.216054, 0.158964, 0.039315 +128, 8, 16, 4.647358, 4.647358, 4.647358, NaN, 0.024656, 0.024656, 0.024656, NaN, 0.010183, 0.010183, 0.010183, NaN, 1.882217, 2.008761, 1.943461, 0.045217, 6.023346, 6.719218, 6.342459, 0.278972, 0.219800, 0.394654, 0.316736, 0.054116, 0.122045, 0.132540, 0.126166, 0.003478, 0.180925, 0.310946, 0.248286, 0.045081, 0.002873, 0.036411, 0.015811, 0.011489, 1.757296, 1.881254, 1.817069, 0.046049, 5.766167, 6.481150, 6.093811, 0.279305, 0.214916, 0.372477, 0.300904, 0.053515 +256, 16, 16, 2.418006, 2.418006, 2.418006, NaN, 0.051689, 0.051689, 0.051689, NaN, 0.006154, 0.006154, 0.006154, NaN, 3.532886, 3.892591, 3.648077, 0.130913, 12.234069, 13.566960, 12.699983, 0.382958, 0.435716, 0.657855, 0.567719, 0.079210, 0.142571, 0.429295, 0.210626, 0.122497, 0.448661, 0.565021, 0.525156, 0.039152, 0.019788, 0.099308, 0.053056, 0.027213, 3.385408, 3.491384, 3.436813, 0.033042, 11.688735, 13.002474, 12.173816, 0.373005, 0.415919, 0.623608, 0.514191, 0.072667 +512, 32, 16, 3.265607, 3.265607, 3.265607, NaN, 0.048170, 0.048170, 0.048170, NaN, 0.033896, 0.033896, 0.033896, NaN, 6.641817, 6.775263, 6.714503, 0.039182, 24.533214, 26.552762, 25.612716, 0.628075, 0.802844, 1.112167, 0.967328, 0.114594, 0.170242, 0.189859, 0.179768, 0.006249, 0.866378, 1.162505, 1.010752, 0.098360, 0.036069, 0.172026, 0.104031, 0.040930, 6.471362, 6.595588, 6.534489, 0.037966, 23.579096, 25.520018, 24.601231, 0.607395, 0.693306, 0.970734, 0.862833, 0.096513 +1024, 64, 16, 2.524620, 2.524620, 2.524620, NaN, 0.091990, 0.091990, 0.091990, NaN, 0.075541, 0.075541, 0.075541, NaN, 12.849231, 13.450565, 13.021737, 0.185242, 48.813015, 51.450086, 50.321668, 0.828013, 1.616111, 1.847828, 1.743614, 0.086476, 0.232504, 0.694065, 0.318844, 0.162352, 1.782057, 2.253614, 1.947622, 0.144371, 0.196719, 0.364672, 0.280733, 0.058786, 12.605517, 12.816288, 12.702557, 0.075687, 46.852650, 49.522008, 48.373341, 0.810519, 1.352785, 1.630403, 1.462868, 0.084947 +2048, 128, 16, 2.731492, 2.731492, 2.731492, NaN, 0.161706, 0.161706, 0.161706, NaN, 0.146355, 0.146355, 0.146355, NaN, 24.566124, 25.579450, 25.160792, 0.421900, 90.222232, 97.354623, 94.202086, 2.147500, 2.457983, 3.118737, 2.747375, 0.226673, 0.343136, 1.478201, 0.892748, 0.502496, 3.570382, 4.348449, 3.995048, 0.242013, 0.411758, 0.677569, 0.540538, 0.110055, 24.101032, 24.520927, 24.267608, 0.131950, 86.651578, 93.354139, 90.205687, 1.996526, 2.006468, 2.596654, 2.206353, 0.201578 +4096, 256, 16, 3.109345, 3.109345, 3.109345, NaN, 0.356967, 0.356967, 0.356967, NaN, 0.244210, 0.244210, 0.244210, NaN, 48.527043, 50.115286, 49.466677, 0.532558, 183.610142, 191.214624, 187.932165, 2.927271, 5.445639, 7.285453, 6.406255, 0.657777, 0.662945, 1.512811, 1.184711, 0.320911, 7.256973, 9.352191, 8.504080, 0.771679, 0.789419, 1.874679, 1.343266, 0.409962, 47.863698, 48.848121, 48.281555, 0.342559, 175.956653, 182.440741, 179.426076, 2.568091, 4.656170, 5.641021, 5.062960, 0.309957 diff --git a/deploy/matplotlib/Paper_cosi/naive_multi_adapted_skipcheck.csv b/deploy/matplotlib/Paper_cosi/naive_multi_adapted_skipcheck.csv new file mode 100644 index 0000000000..6bd954e582 --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/naive_multi_adapted_skipcheck.csv @@ -0,0 +1,13 @@ +Peers, ppm, machines, setup_wall_min, setup_wall_max, setup_wall_avg, setup_wall_dev, setup_user_min, setup_user_max, setup_user_avg, setup_user_dev, setup_system_min, setup_system_max, setup_system_avg, setup_system_dev, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_user_min, round_user_max, round_user_avg, round_user_dev, round_system_min, round_system_max, round_system_avg, round_system_dev, calc_wall_min, calc_wall_max, calc_wall_avg, calc_wall_dev, calc_user_min, calc_user_max, calc_user_avg, calc_user_dev, calc_system_min, calc_system_max, calc_system_avg, calc_system_dev, verify_wall_min, verify_wall_max, verify_wall_avg, verify_wall_dev, verify_user_min, verify_user_max, verify_user_avg, verify_user_dev, verify_system_min, verify_system_max, verify_system_avg, verify_system_dev +2, 1, 2, 2.162814, 2.162814, 2.162814, NaN, 0.000000, 0.000000, 0.000000, NaN, 0.003688, 0.003688, 0.003688, NaN, 0.108789, 0.112970, 0.111051, 0.001410, 0.001211, 0.006784, 0.002249, 0.001777, 0.000000, 0.004596, 0.000637, 0.001489, 0.108659, 0.112840, 0.110903, 0.001401, 0.001075, 0.006178, 0.001976, 0.001602, 0.000000, 0.004518, 0.000603, 0.001471, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4, 1, 4, 2.261748, 2.261748, 2.261748, NaN, 0.001086, 0.001086, 0.001086, NaN, 0.000045, 0.000045, 0.000045, NaN, 0.111346, 0.113481, 0.112485, 0.000785, 0.001487, 0.012407, 0.005724, 0.003187, 0.000000, 0.003116, 0.000405, 0.001023, 0.111208, 0.113343, 0.112334, 0.000784, 0.001325, 0.012087, 0.005620, 0.003134, 0.000000, 0.003102, 0.000397, 0.001019, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +8, 1, 8, 2.252133, 2.252133, 2.252133, NaN, 0.002129, 0.002129, 0.002129, NaN, 0.000203, 0.000203, 0.000203, NaN, 0.111859, 0.113892, 0.112893, 0.000768, 0.005026, 0.022279, 0.012869, 0.005164, 0.000000, 0.005373, 0.000850, 0.001740, 0.111719, 0.113744, 0.112743, 0.000761, 0.004916, 0.020451, 0.012531, 0.004812, 0.000000, 0.005360, 0.000839, 0.001738, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +16, 1, 16, 2.348559, 2.348559, 2.348559, NaN, 0.003763, 0.003763, 0.003763, NaN, 0.000990, 0.000990, 0.000990, NaN, 0.111954, 0.116259, 0.113684, 0.001363, 0.021857, 0.040298, 0.028705, 0.005684, 0.000000, 0.003225, 0.000750, 0.001233, 0.111813, 0.116068, 0.113518, 0.001352, 0.021857, 0.039920, 0.028533, 0.005556, 0.000000, 0.003155, 0.000733, 0.001216, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +32, 2, 16, 2.282935, 2.282935, 2.282935, NaN, 0.002059, 0.002059, 0.002059, NaN, 0.004276, 0.004276, 0.004276, NaN, 0.112946, 0.418230, 0.148528, 0.101146, 0.042752, 0.068176, 0.054937, 0.007040, 0.000001, 0.008901, 0.002365, 0.002850, 0.112721, 0.418077, 0.148386, 0.101142, 0.042593, 0.067920, 0.054679, 0.007030, 0.000000, 0.008892, 0.002342, 0.002847, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +64, 4, 16, 2.432438, 2.432438, 2.432438, NaN, 0.015908, 0.015908, 0.015908, NaN, 0.008877, 0.008877, 0.008877, NaN, 0.115713, 0.119763, 0.117442, 0.001321, 0.077461, 0.138365, 0.116023, 0.017028, 0.000013, 0.041783, 0.014424, 0.012371, 0.115511, 0.119604, 0.117280, 0.001325, 0.077188, 0.138018, 0.115719, 0.016984, 0.000000, 0.041693, 0.014372, 0.012349, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +128, 8, 16, 2.263759, 2.263759, 2.263759, NaN, 0.018577, 0.018577, 0.018577, NaN, 0.015658, 0.015658, 0.015658, NaN, 0.118779, 0.126526, 0.122521, 0.002512, 0.197578, 0.300674, 0.239389, 0.038813, 0.000000, 0.037890, 0.009684, 0.011992, 0.118625, 0.126385, 0.122200, 0.002496, 0.197221, 0.297481, 0.238802, 0.038315, 0.000000, 0.037857, 0.009648, 0.011992, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +256, 16, 16, 2.379312, 2.379312, 2.379312, NaN, 0.039301, 0.039301, 0.039301, NaN, 0.018636, 0.018636, 0.018636, NaN, 0.138689, 0.433532, 0.208180, 0.126703, 0.450507, 0.638540, 0.557164, 0.076020, 0.028428, 0.104308, 0.050030, 0.027838, 0.138497, 0.433374, 0.207878, 0.126488, 0.450335, 0.638473, 0.556737, 0.076017, 0.025110, 0.104197, 0.049617, 0.028167, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +512, 32, 16, 2.424230, 2.424230, 2.424230, NaN, 0.056234, 0.056234, 0.056234, NaN, 0.039549, 0.039549, 0.039549, NaN, 0.164347, 0.209145, 0.180733, 0.015520, 0.914423, 1.401130, 1.104237, 0.182554, 0.048011, 0.281308, 0.124081, 0.071218, 0.164162, 0.208981, 0.180477, 0.015597, 0.913116, 1.400968, 1.103812, 0.182773, 0.047985, 0.281289, 0.124026, 0.071228, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +1024, 64, 16, 2.620283, 2.620283, 2.620283, NaN, 0.101282, 0.101282, 0.101282, NaN, 0.068816, 0.068816, 0.068816, NaN, 0.225102, 0.641932, 0.281983, 0.135311, 1.892838, 2.202524, 2.085222, 0.093903, 0.179583, 0.352930, 0.254071, 0.061393, 0.224842, 0.641787, 0.281703, 0.135362, 1.892665, 2.200828, 2.083247, 0.093477, 0.178929, 0.352709, 0.253813, 0.061379, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +2048, 128, 16, 3.591042, 3.591042, 3.591042, NaN, 0.222287, 0.222287, 0.222287, NaN, 0.121945, 0.121945, 0.121945, NaN, 0.385471, 2.412475, 0.873394, 0.658180, 3.635265, 5.371061, 4.238118, 0.525132, 0.416687, 0.952250, 0.637206, 0.166236, 0.385295, 2.412237, 0.873165, 0.658172, 3.635109, 5.370724, 4.237775, 0.525154, 0.416644, 0.952196, 0.637153, 0.166222, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 5.187542, 5.187542, 5.187542, NaN, 0.366369, 0.366369, 0.366369, NaN, 0.242128, 0.242128, 0.242128, NaN, 0.750386, 5.781441, 2.545326, 1.686728, 7.865645, 10.697411, 9.222106, 1.105411, 1.043138, 2.769250, 1.740422, 0.581571, 0.750189, 5.779650, 2.544846, 1.686343, 7.865368, 10.697088, 9.221561, 1.105117, 1.043083, 2.769089, 1.740296, 0.581492, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 diff --git a/deploy/matplotlib/Paper_cosi/naive_sc.csv b/deploy/matplotlib/Paper_cosi/naive_sc.csv new file mode 100644 index 0000000000..4181fac14c --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/naive_sc.csv @@ -0,0 +1,11 @@ +Peers, depth, bf, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_system_avg, round_user_avg, rate +2, 1, 0, 0.107781, 0.111317, 0.109456, 0.001339, 0.012000, 0.060003, 0.000000 +4, 1, 0, 0.109373, 0.112063, 0.110430, 0.000936, 0.012000, 0.080005, 0.000000 +8, 1, 0, 0.109714, 0.112092, 0.110723, 0.000801, 0.024001, 0.140008, 0.000000 +16, 1, 0, 0.110891, 0.114567, 0.112160, 0.001158, 0.020001, 0.292018, 0.000000 +32, 2, 0, 0.113602, 0.117075, 0.115819, 0.000921, 0.032002, 0.564035, 0.000000 +64, 4, 0, 0.117740, 0.125777, 0.121277, 0.002237, 0.032002, 1.036064, 0.000000 +128, 8, 0, 0.131319, 0.137211, 0.133148, 0.001569, 0.064004, 2.028126, 0.000000 +256, 16, 0, 0.155372, 0.190853, 0.168834, 0.011229, 0.132008, 4.360272, 0.000000 +512, 32, 0, 0.197721, 0.229393, 0.211155, 0.011126, 0.240015, 8.232514, 0.000000 +1024, 64, 0, 0.305224, 0.457782, 0.357817, 0.050497, 0.672042, 16.913057, 0.000000 diff --git a/deploy/matplotlib/Paper_cosi/ntree.csv b/deploy/matplotlib/Paper_cosi/ntree.csv new file mode 100644 index 0000000000..0eacb64df2 --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/ntree.csv @@ -0,0 +1,11 @@ +Peers, depth, bf, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_system_avg, round_user_avg, rate +2, 1, 2, 0.109487, 0.112366, 0.110365, 0.000911, 0.064004, 0.420026, 0.000000 +4, 1, 3, 0.113903, 0.119744, 0.116065, 0.002480, 0.072004, 1.444090, 0.000000 +8, 1, 4, 0.222039, 0.228945, 0.224213, 0.002539, 0.152009, 3.136196, 0.000000 +16, 1, 5, 0.225821, 0.637782, 0.270420, 0.122480, 0.292018, 6.904431, 0.000000 +32, 2, 6, 0.237036, 10.414195, 1.365112, 3.032999, 0.228014, 16.041002, 0.000000 +64, 4, 7, 0.353187, 0.974524, 0.421458, 0.184411, 0.468029, 29.941871, 0.000000 +128, 8, 8, 0.476069, 1.946035, 0.644529, 0.434168, 1.536096, 55.447465, 0.000000 +256, 16, 9, 0.566753, 0.921326, 0.651212, 0.096300, 5.124320, 121.995624, 0.000000 +512, 32, 10, 0.989090, 1.285709, 1.112588, 0.092960, 2.600162, 289.246076, 0.000000 +1024, 64, 10, 1.621387, 2.250708, 1.872934, 0.233009, 4.588286, 581.416336, 0.000000 diff --git a/deploy/matplotlib/Paper_cosi/ntree_multi_adapted.csv b/deploy/matplotlib/Paper_cosi/ntree_multi_adapted.csv new file mode 100644 index 0000000000..7611ef8354 --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/ntree_multi_adapted.csv @@ -0,0 +1,11 @@ +Peers, ppm, machines, bf, setup_wall_min, setup_wall_max, setup_wall_avg, setup_wall_dev, setup_user_min, setup_user_max, setup_user_avg, setup_user_dev, setup_system_min, setup_system_max, setup_system_avg, setup_system_dev, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_user_min, round_user_max, round_user_avg, round_user_dev, round_system_min, round_system_max, round_system_avg, round_system_dev, calc_wall_min, calc_wall_max, calc_wall_avg, calc_wall_dev, calc_user_min, calc_user_max, calc_user_avg, calc_user_dev, calc_system_min, calc_system_max, calc_system_avg, calc_system_dev, verify_wall_min, verify_wall_max, verify_wall_avg, verify_wall_dev, verify_user_min, verify_user_max, verify_user_avg, verify_user_dev, verify_system_min, verify_system_max, verify_system_avg, verify_system_dev +2, 1, 2, 2, 0.100449, 0.100449, 0.100449, NaN, 0.000317, 0.000317, 0.000317, NaN, 0.000013, 0.000013, 0.000013, NaN, 0.120324, 0.130494, 0.124703, 0.003074, 0.012450, 0.026531, 0.019862, 0.003272, 0.000000, 0.004819, 0.001877, 0.002040, 0.108487, 0.113403, 0.110655, 0.001471, 0.000000, 0.005072, 0.001988, 0.001055, 0.000000, 0.004251, 0.000301, 0.000964, 0.010372, 0.018900, 0.013875, 0.002330, 0.010535, 0.024823, 0.017741, 0.003594, 0.000000, 0.004759, 0.001567, 0.002015 +4, 1, 4, 3, 2.402086, 2.402086, 2.402086, NaN, 0.001723, 0.001723, 0.001723, NaN, 0.000299, 0.000299, 0.000299, NaN, 0.126312, 0.135937, 0.130433, 0.002771, 0.056680, 0.084308, 0.068500, 0.007670, 0.000000, 0.014448, 0.004768, 0.004224, 0.111084, 0.113896, 0.112062, 0.000715, 0.001040, 0.008464, 0.005115, 0.001888, 0.000000, 0.005046, 0.001173, 0.001725, 0.013270, 0.024231, 0.018120, 0.002758, 0.049251, 0.078458, 0.062879, 0.008579, 0.000000, 0.011986, 0.003564, 0.003640 +8, 1, 8, 4, 1.401838, 1.401838, 1.401838, NaN, 0.000000, 0.000000, 0.000000, NaN, 0.003664, 0.003664, 0.003664, NaN, 0.259127, 0.774013, 0.337518, 0.134740, 0.107807, 0.190077, 0.138552, 0.021993, 0.001693, 0.018544, 0.010915, 0.005620, 0.214062, 0.716911, 0.285401, 0.133776, 0.000000, 0.022813, 0.010676, 0.005102, 0.000000, 0.008378, 0.001326, 0.002636, 0.043468, 0.062537, 0.051840, 0.005334, 0.098107, 0.174173, 0.127343, 0.020324, 0.001685, 0.018054, 0.008653, 0.004546 +16, 1, 16, 5, 0.502244, 0.502244, 0.502244, NaN, 0.001865, 0.001865, 0.001865, NaN, 0.000311, 0.000311, 0.000311, NaN, 0.304642, 0.980866, 0.428776, 0.211476, 0.288756, 0.386870, 0.340646, 0.028421, 0.005361, 0.035717, 0.019466, 0.008661, 0.218460, 0.877611, 0.332373, 0.209095, 0.014104, 0.026170, 0.019684, 0.004125, 0.000000, 0.004106, 0.000915, 0.001478, 0.084005, 0.108867, 0.096149, 0.006987, 0.272859, 0.363736, 0.320189, 0.027676, 0.004939, 0.034495, 0.018350, 0.007980 +32, 2, 16, 6, 0.602502, 0.602502, 0.602502, NaN, 0.002464, 0.002464, 0.002464, NaN, 0.000099, 0.000099, 0.000099, NaN, 0.338123, 10.844336, 0.996569, 2.407182, 0.618175, 0.910253, 0.802950, 0.080110, 0.019672, 0.082388, 0.045658, 0.019572, 0.221120, 10.703160, 0.851044, 2.408126, 0.021895, 0.052829, 0.039810, 0.008662, 0.000000, 0.011150, 0.001854, 0.003238, 0.114925, 0.169426, 0.145289, 0.013756, 0.570136, 0.862427, 0.762829, 0.078910, 0.014017, 0.082023, 0.043786, 0.019835 +64, 4, 16, 7, 0.702889, 0.702889, 0.702889, NaN, 0.003155, 0.003155, 0.003155, NaN, 0.000204, 0.000204, 0.000204, NaN, 0.575264, 3.213600, 0.805674, 0.597556, 1.372747, 2.064277, 1.776329, 0.162749, 0.041361, 0.170027, 0.106460, 0.035849, 0.325782, 2.929171, 0.520955, 0.596960, 0.059881, 0.085914, 0.071062, 0.006368, 0.000000, 0.008117, 0.002609, 0.002948, 0.240736, 0.304030, 0.284507, 0.016655, 1.303233, 1.984689, 1.704989, 0.161080, 0.040402, 0.167943, 0.103629, 0.035390 +128, 8, 16, 8, 0.802883, 0.802883, 0.802883, NaN, 0.000860, 0.000860, 0.000860, NaN, 0.003341, 0.003341, 0.003341, NaN, 1.212799, 1.268417, 1.239000, 0.014515, 2.986866, 3.304737, 3.175518, 0.084134, 0.105020, 0.239617, 0.181537, 0.030122, 0.362400, 0.381427, 0.373056, 0.005521, 0.092680, 0.130903, 0.114604, 0.011249, 0.000000, 0.020054, 0.005847, 0.005379, 0.841139, 0.899149, 0.865737, 0.015829, 2.874638, 3.177449, 3.060657, 0.084856, 0.101153, 0.239181, 0.175676, 0.030639 +256, 16, 16, 9, 3.004187, 3.004187, 3.004187, NaN, 0.001052, 0.001052, 0.001052, NaN, 0.003822, 0.003822, 0.003822, NaN, 1.846035, 2.043132, 1.953409, 0.043218, 6.222078, 6.900675, 6.629020, 0.163824, 0.364474, 0.501118, 0.427074, 0.038674, 0.394537, 0.420693, 0.409594, 0.007018, 0.167812, 0.240315, 0.212018, 0.019970, 0.000000, 0.017099, 0.009639, 0.004775, 1.451295, 1.629069, 1.543592, 0.040779, 6.054077, 6.679238, 6.416731, 0.159161, 0.361835, 0.483982, 0.417417, 0.036271 +512, 32, 16, 10, 3.104737, 3.104737, 3.104737, NaN, 0.005705, 0.005705, 0.005705, NaN, 0.000000, 0.000000, 0.000000, NaN, 2.849617, 3.014967, 2.947142, 0.046183, 15.296439, 16.346522, 15.914362, 0.266065, 0.768807, 1.031957, 0.922639, 0.076157, 0.455074, 0.491450, 0.475407, 0.011755, 0.370293, 0.511009, 0.450685, 0.046176, 0.008830, 0.049172, 0.021657, 0.011631, 2.373389, 2.545136, 2.471420, 0.046784, 14.819275, 15.835873, 15.463132, 0.265704, 0.758596, 1.012460, 0.900948, 0.072474 +1024, 64, 16, 10, 1.003106, 1.003106, 1.003106, NaN, 0.000000, 0.000000, 0.000000, NaN, 0.003906, 0.003906, 0.003906, NaN, 2.827271, 3.076991, 3.001535, 0.070507, 24.980732, 27.406339, 26.601894, 0.653587, 1.101747, 1.406263, 1.217501, 0.074624, 0.497995, 0.541883, 0.520480, 0.012906, 1.025001, 1.205915, 1.108049, 0.041013, 0.012500, 0.079842, 0.047056, 0.020248, 2.313917, 2.561215, 2.480715, 0.069898, 23.939795, 26.333331, 25.493245, 0.658496, 1.032421, 1.335081, 1.170418, 0.070458 diff --git a/deploy/matplotlib/Paper_cosi/shamir_multi_adapted.csv b/deploy/matplotlib/Paper_cosi/shamir_multi_adapted.csv new file mode 100644 index 0000000000..3d8cc7b11d --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/shamir_multi_adapted.csv @@ -0,0 +1,7 @@ +Peers, ppm, machines, setup_wall_min, setup_wall_max, setup_wall_avg, setup_wall_dev, setup_user_min, setup_user_max, setup_user_avg, setup_user_dev, setup_system_min, setup_system_max, setup_system_avg, setup_system_dev, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_user_min, round_user_max, round_user_avg, round_user_dev, round_system_min, round_system_max, round_system_avg, round_system_dev, calc_wall_min, calc_wall_max, calc_wall_avg, calc_wall_dev, calc_user_min, calc_user_max, calc_user_avg, calc_user_dev, calc_system_min, calc_system_max, calc_system_avg, calc_system_dev, verify_wall_min, verify_wall_max, verify_wall_avg, verify_wall_dev, verify_user_min, verify_user_max, verify_user_avg, verify_user_dev, verify_system_min, verify_system_max, verify_system_avg, verify_system_dev +2, 1, 2, 2.397439, 2.397439, 2.397439, NaN, 0.124534, 0.124534, 0.124534, NaN, 0.006239, 0.006239, 0.006239, NaN, 0.194293, 0.222541, 0.208527, 0.007728, 0.138768, 0.208153, 0.164090, 0.019193, 0.001459, 0.044902, 0.014416, 0.012331, 0.184380, 0.208747, 0.197324, 0.007187, 0.126970, 0.194060, 0.148809, 0.018535, 0.001006, 0.044206, 0.012623, 0.010779, 0.009039, 0.017052, 0.011072, 0.002333, 0.005734, 0.027199, 0.015058, 0.005129, 0.000000, 0.014216, 0.001777, 0.003591 +4, 1, 4, 6.651993, 6.651993, 6.651993, NaN, 0.265560, 0.265560, 0.265560, NaN, 0.029646, 0.029646, 0.029646, NaN, 0.262851, 0.291899, 0.273428, 0.007015, 0.315927, 0.379851, 0.349210, 0.016786, 0.007188, 0.051677, 0.023519, 0.012808, 0.251866, 0.279127, 0.261698, 0.007151, 0.305107, 0.362997, 0.333409, 0.014768, 0.007186, 0.051434, 0.021036, 0.010922, 0.009312, 0.015359, 0.011605, 0.001775, 0.003385, 0.023488, 0.015664, 0.005015, 0.000000, 0.020192, 0.002474, 0.005639 +8, 1, 8, 15.130473, 15.130473, 15.130473, NaN, 0.502210, 0.502210, 0.502210, NaN, 0.056133, 0.056133, 0.056133, NaN, 0.367613, 0.449967, 0.407381, 0.018466, 0.584314, 0.797691, 0.713349, 0.065375, 0.020499, 0.074622, 0.038002, 0.015910, 0.357904, 0.435609, 0.395307, 0.017981, 0.570199, 0.783850, 0.695350, 0.065651, 0.016263, 0.074595, 0.036539, 0.016409, 0.008905, 0.017352, 0.011929, 0.002439, 0.010102, 0.027402, 0.017782, 0.004858, 0.000000, 0.004400, 0.001036, 0.001669 +16, 1, 16, 32.389777, 32.389777, 32.389777, NaN, 1.549186, 1.549186, 1.549186, NaN, 0.056969, 0.056969, 0.056969, NaN, 0.836199, 0.925270, 0.886369, 0.025274, 1.813738, 2.000662, 1.930708, 0.046469, 0.050668, 0.202956, 0.123222, 0.039649, 0.825698, 0.915108, 0.874527, 0.024707, 1.794865, 1.977796, 1.913864, 0.045486, 0.050663, 0.202918, 0.120091, 0.040423, 0.008851, 0.017172, 0.011712, 0.002616, 0.000000, 0.026943, 0.016702, 0.006297, 0.000000, 0.032923, 0.002921, 0.007472 +32, 2, 16, 66.919845, 66.919845, 66.919845, NaN, 4.698705, 4.698705, 4.698705, NaN, 0.282331, 0.282331, 0.282331, NaN, 2.030647, 2.180065, 2.102133, 0.037102, 5.563220, 6.041708, 5.795832, 0.143031, 0.221923, 0.425958, 0.329386, 0.053250, 2.019191, 2.170032, 2.090838, 0.036923, 5.541585, 6.028336, 5.776852, 0.143227, 0.221566, 0.425682, 0.327763, 0.053464, 0.009791, 0.016109, 0.011136, 0.001705, 0.013129, 0.026588, 0.018820, 0.003719, 0.000000, 0.007901, 0.001614, 0.002350 +64, 4, 16, 136.376448, 136.376448, 136.376448, NaN, 16.708153, 16.708153, 16.708153, NaN, 0.657086, 0.657086, 0.657086, NaN, 6.692511, 8.925953, 8.368092, 0.451116, 17.966398, 20.031975, 18.669261, 0.518542, 0.678844, 1.070072, 0.842968, 0.096255, 6.679808, 8.916025, 8.356960, 0.451647, 17.944380, 20.025989, 18.651125, 0.520918, 0.678782, 1.049950, 0.841054, 0.094024, 0.009465, 0.015983, 0.010948, 0.001527, 0.005739, 0.023461, 0.017925, 0.003961, 0.000000, 0.020111, 0.001905, 0.004773 diff --git a/deploy/matplotlib/Paper_cosi/shs2.csv b/deploy/matplotlib/Paper_cosi/shs2.csv new file mode 100644 index 0000000000..b0225a3de2 --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/shs2.csv @@ -0,0 +1,14 @@ +Peers, depth, bf, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_system_avg, round_user_avg, rate +2, 1, 2, 0.206181, 0.269042, 0.210499, 0.013991, 0.048003, 0.172010, 1233.333333 +4, 1, 2, 0.410780, 0.445122, 0.413110, 0.007357, 0.056003, 0.192012, 1433.333333 +8, 1, 3, 0.411256, 0.485348, 0.414119, 0.013463, 0.088005, 0.216013, 1433.333333 +16, 1, 4, 0.412070, 0.476790, 0.415335, 0.011769, 0.068004, 0.272017, 1433.333333 +32, 2, 5, 0.615733, 0.616794, 0.616085, 0.000256, 0.092005, 0.288018, 1666.666667 +64, 4, 6, 0.619350, 0.666249, 0.623534, 0.011395, 0.096006, 0.324020, 1666.666667 +128, 8, 7, 0.628251, 0.695538, 0.632673, 0.011927, 0.072004, 0.404025, 1666.666667 +256, 16, 8, 0.642246, 0.646479, 0.643769, 0.001019, 0.104006, 0.460028, 1666.666667 +512, 32, 9, 0.658839, 0.698654, 0.666559, 0.008021, 0.124007, 0.496031, 1700.000000 +1024, 64, 10, 0.700326, 0.735884, 0.708723, 0.008613, 0.120007, 0.636039, 1733.333333 +2048, 128, 11, 0.972734, 1.005249, 0.987141, 0.008805, 0.192012, 0.764047, 2066.666667 +4096, 256, 12, 1.150135, 1.276106, 1.191593, 0.023739, 0.184011, 1.052065, 2333.333333 +8192, 512, 13, 1.839814, 2.207453, 1.974028, 0.092792, 0.560035, 1.828114, 3133.333333 diff --git a/deploy/matplotlib/Paper_cosi/sign_huge.csv b/deploy/matplotlib/Paper_cosi/sign_huge.csv new file mode 100644 index 0000000000..289ca49834 --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/sign_huge.csv @@ -0,0 +1,14 @@ +Peers, ppm, machines, bf, setup_wall_min, setup_wall_max, setup_wall_avg, setup_wall_dev, setup_user_min, setup_user_max, setup_user_avg, setup_user_dev, setup_system_min, setup_system_max, setup_system_avg, setup_system_dev, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_user_min, round_user_max, round_user_avg, round_user_dev, round_system_min, round_system_max, round_system_avg, round_system_dev, calc_wall_min, calc_wall_max, calc_wall_avg, calc_wall_dev, calc_user_min, calc_user_max, calc_user_avg, calc_user_dev, calc_system_min, calc_system_max, calc_system_avg, calc_system_dev, verify_wall_min, verify_wall_max, verify_wall_avg, verify_wall_dev, verify_user_min, verify_user_max, verify_user_avg, verify_user_dev, verify_system_min, verify_system_max, verify_system_avg, verify_system_dev +2, 1, 2, 2, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.245734, 0.260941, 0.256008, 0.004822, 0.021949, 0.071951, 0.033841, 0.012768, 0.000000, 0.012023, 0.002534, 0.002992, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4, 1, 4, 4, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.250462, 0.266719, 0.259717, 0.004251, 0.030882, 0.074926, 0.042040, 0.010644, 0.000000, 0.012233, 0.002717, 0.003475, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +8, 1, 8, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.251411, 0.268906, 0.262792, 0.004307, 0.042737, 0.093501, 0.062272, 0.012270, 0.000000, 0.013460, 0.003727, 0.003376, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +16, 1, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.485355, 0.502763, 0.493514, 0.004965, 0.044229, 0.114130, 0.070992, 0.020196, 0.000000, 0.015616, 0.004290, 0.005144, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +32, 2, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.488457, 0.508997, 0.499152, 0.005817, 0.049411, 0.093041, 0.064772, 0.011543, 0.000000, 0.013113, 0.003272, 0.004167, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +64, 4, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.490808, 0.512138, 0.502773, 0.005577, 0.045408, 0.094211, 0.063234, 0.014766, 0.000000, 0.012590, 0.003851, 0.004224, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +128, 8, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.740167, 0.765274, 0.751802, 0.007347, 0.038377, 0.083628, 0.056626, 0.013223, 0.000000, 0.035785, 0.005776, 0.008051, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +256, 16, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.787509, 0.823479, 0.806159, 0.011805, 0.041789, 0.110735, 0.060815, 0.018375, 0.000000, 0.015740, 0.004090, 0.005205, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +512, 32, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.834299, 0.856943, 0.847046, 0.006189, 0.044181, 0.093670, 0.059616, 0.010240, 0.000000, 0.016144, 0.004267, 0.004745, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +1024, 64, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.283717, 1.338806, 1.304773, 0.014958, 0.047540, 0.105733, 0.066865, 0.016887, 0.000022, 0.024225, 0.005162, 0.005688, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +2048, 128, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.631668, 1.813736, 1.710370, 0.050251, 0.049607, 0.106709, 0.065571, 0.017517, 0.000000, 0.013402, 0.002525, 0.003283, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.264840, 2.491629, 2.369731, 0.056457, 0.048707, 0.132159, 0.072128, 0.024654, 0.000000, 0.022187, 0.006897, 0.006970, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +8192, 512, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 5.225845, 6.028472, 5.655382, 0.250504, 0.055067, 0.163444, 0.076950, 0.030392, 0.000000, 0.039102, 0.008822, 0.009504, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 diff --git a/deploy/matplotlib/Paper_cosi/sign_multi_bf.csv b/deploy/matplotlib/Paper_cosi/sign_multi_bf.csv new file mode 100644 index 0000000000..67733ed98c --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/sign_multi_bf.csv @@ -0,0 +1,12 @@ +Peers, ppm, machines, bf, setup_wall_min, setup_wall_max, setup_wall_avg, setup_wall_dev, setup_user_min, setup_user_max, setup_user_avg, setup_user_dev, setup_system_min, setup_system_max, setup_system_avg, setup_system_dev, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_user_min, round_user_max, round_user_avg, round_user_dev, round_system_min, round_system_max, round_system_avg, round_system_dev, calc_wall_min, calc_wall_max, calc_wall_avg, calc_wall_dev, calc_user_min, calc_user_max, calc_user_avg, calc_user_dev, calc_system_min, calc_system_max, calc_system_avg, calc_system_dev, verify_wall_min, verify_wall_max, verify_wall_avg, verify_wall_dev, verify_user_min, verify_user_max, verify_user_avg, verify_user_dev, verify_system_min, verify_system_max, verify_system_avg, verify_system_dev +4096, 256, 16, 2, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 3.859499, 4.062736, 3.967241, 0.053505, 0.025789, 0.105861, 0.045318, 0.022861, 0.000000, 0.015169, 0.004014, 0.004078, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 3, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 3.162328, 3.340450, 3.253205, 0.057708, 0.031439, 0.099736, 0.045951, 0.014741, 0.000000, 0.020776, 0.003904, 0.004983, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 4, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.550719, 2.710926, 2.627164, 0.048534, 0.037371, 0.077273, 0.047092, 0.009336, 0.000000, 0.019721, 0.006414, 0.006510, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 5, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.290693, 2.652712, 2.517703, 0.090401, 0.041630, 0.090267, 0.054981, 0.013620, 0.000000, 0.027916, 0.003745, 0.006487, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 6, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.467967, 2.695614, 2.558197, 0.062510, 0.040104, 0.147097, 0.065961, 0.028946, 0.000000, 0.015142, 0.006549, 0.005402, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 7, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.432079, 2.692313, 2.542253, 0.075598, 0.048646, 0.112391, 0.067988, 0.021908, 0.000000, 0.019797, 0.006215, 0.006286, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.183023, 2.499015, 2.336380, 0.071169, 0.047573, 0.134374, 0.073729, 0.024717, 0.000000, 0.025141, 0.006107, 0.006614, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 10, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.446424, 2.745331, 2.561968, 0.082496, 0.054565, 0.120631, 0.078410, 0.019963, 0.000000, 0.019951, 0.008146, 0.006690, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 12, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.394004, 2.672546, 2.563309, 0.074592, 0.065604, 0.142578, 0.092296, 0.024520, 0.000214, 0.026124, 0.009091, 0.007407, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 14, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.536099, 2.967478, 2.691562, 0.119659, 0.067488, 0.162235, 0.092283, 0.025345, 0.000000, 0.025634, 0.008872, 0.007282, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 16, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.832834, 3.199515, 3.031522, 0.115846, 0.068128, 0.168437, 0.107044, 0.028589, 0.000000, 0.040776, 0.010990, 0.010981, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 diff --git a/deploy/matplotlib/Paper_cosi/sign_over_1.csv b/deploy/matplotlib/Paper_cosi/sign_over_1.csv new file mode 100644 index 0000000000..addcdd1a8b --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/sign_over_1.csv @@ -0,0 +1,8 @@ +Peers, ppm, machines, bf, setup_wall_min, setup_wall_max, setup_wall_avg, setup_wall_dev, setup_user_min, setup_user_max, setup_user_avg, setup_user_dev, setup_system_min, setup_system_max, setup_system_avg, setup_system_dev, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_user_min, round_user_max, round_user_avg, round_user_dev, round_system_min, round_system_max, round_system_avg, round_system_dev, calc_wall_min, calc_wall_max, calc_wall_avg, calc_wall_dev, calc_user_min, calc_user_max, calc_user_avg, calc_user_dev, calc_system_min, calc_system_max, calc_system_avg, calc_system_dev, verify_wall_min, verify_wall_max, verify_wall_avg, verify_wall_dev, verify_user_min, verify_user_max, verify_user_avg, verify_user_dev, verify_system_min, verify_system_max, verify_system_avg, verify_system_dev +64, 8, 8, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.514034, 0.532865, 0.524527, 0.004531, 0.038718, 0.094819, 0.058160, 0.013534, 0.000000, 0.019756, 0.004534, 0.005559, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +128, 16, 8, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.746169, 0.776087, 0.764306, 0.007506, 0.039344, 0.096068, 0.054260, 0.014890, 0.000000, 0.022092, 0.005514, 0.006164, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +256, 32, 8, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.803168, 0.833732, 0.819059, 0.006850, 0.038937, 0.085702, 0.051918, 0.012325, 0.000000, 0.011651, 0.004054, 0.003794, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +512, 64, 8, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.938186, 0.991680, 0.961631, 0.015426, 0.045093, 0.096815, 0.060048, 0.015658, 0.000000, 0.013421, 0.003278, 0.004493, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +1024, 128, 8, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.349874, 1.496836, 1.412131, 0.040867, 0.055425, 0.112752, 0.069532, 0.015585, 0.000000, 0.024936, 0.008457, 0.008019, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +2048, 256, 8, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.835550, 2.155119, 1.966704, 0.060395, 0.048003, 0.139712, 0.071203, 0.021239, 0.000000, 0.025006, 0.007321, 0.008119, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 512, 8, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 3.909140, 4.662597, 4.274642, 0.234599, 0.038443, 0.112501, 0.064312, 0.018927, 0.000000, 0.046952, 0.016441, 0.014174, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 diff --git a/deploy/matplotlib/Paper_cosi/sign_over_2.csv b/deploy/matplotlib/Paper_cosi/sign_over_2.csv new file mode 100644 index 0000000000..61bbdeadff --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/sign_over_2.csv @@ -0,0 +1,9 @@ +Peers, ppm, machines, bf, setup_wall_min, setup_wall_max, setup_wall_avg, setup_wall_dev, setup_user_min, setup_user_max, setup_user_avg, setup_user_dev, setup_system_min, setup_system_max, setup_system_avg, setup_system_dev, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_user_min, round_user_max, round_user_avg, round_user_dev, round_system_min, round_system_max, round_system_avg, round_system_dev, calc_wall_min, calc_wall_max, calc_wall_avg, calc_wall_dev, calc_user_min, calc_user_max, calc_user_avg, calc_user_dev, calc_system_min, calc_system_max, calc_system_avg, calc_system_dev, verify_wall_min, verify_wall_max, verify_wall_avg, verify_wall_dev, verify_user_min, verify_user_max, verify_user_avg, verify_user_dev, verify_system_min, verify_system_max, verify_system_avg, verify_system_dev +64, 4, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.494899, 0.511088, 0.504327, 0.004424, 0.045144, 0.080426, 0.057469, 0.011337, 0.000000, 0.014772, 0.005624, 0.004539, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +128, 8, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.745585, 0.776018, 0.762119, 0.008392, 0.038674, 0.099508, 0.058310, 0.018122, 0.000000, 0.010263, 0.003373, 0.003037, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +256, 16, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.764635, 0.798984, 0.786778, 0.008417, 0.037958, 0.106815, 0.054319, 0.017429, 0.000000, 0.024996, 0.005398, 0.006874, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +512, 32, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.840106, 0.873001, 0.855906, 0.007217, 0.049414, 0.087521, 0.059190, 0.010048, 0.000000, 0.028193, 0.006436, 0.007901, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +1024, 64, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.221308, 1.268243, 1.244450, 0.013012, 0.045683, 0.112711, 0.062745, 0.017458, 0.000000, 0.021687, 0.004311, 0.005602, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +2048, 128, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.631668, 1.813736, 1.710370, 0.050251, 0.049607, 0.106709, 0.065571, 0.017517, 0.000000, 0.013402, 0.002525, 0.003283, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.264840, 2.491629, 2.369731, 0.056457, 0.048707, 0.132159, 0.072128, 0.024654, 0.000000, 0.022187, 0.006897, 0.006970, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +8192, 512, 16, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 5.225845, 6.028472, 5.655382, 0.250504, 0.055067, 0.163444, 0.076950, 0.030392, 0.000000, 0.039102, 0.008822, 0.009504, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 diff --git a/deploy/matplotlib/Paper_cosi/sign_over_3.csv b/deploy/matplotlib/Paper_cosi/sign_over_3.csv new file mode 100644 index 0000000000..b5c20c4a0a --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/sign_over_3.csv @@ -0,0 +1,8 @@ +Peers, ppm, machines, bf, setup_wall_min, setup_wall_max, setup_wall_avg, setup_wall_dev, setup_user_min, setup_user_max, setup_user_avg, setup_user_dev, setup_system_min, setup_system_max, setup_system_avg, setup_system_dev, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_user_min, round_user_max, round_user_avg, round_user_dev, round_system_min, round_system_max, round_system_avg, round_system_dev, calc_wall_min, calc_wall_max, calc_wall_avg, calc_wall_dev, calc_user_min, calc_user_max, calc_user_avg, calc_user_dev, calc_system_min, calc_system_max, calc_system_avg, calc_system_dev, verify_wall_min, verify_wall_max, verify_wall_avg, verify_wall_dev, verify_user_min, verify_user_max, verify_user_avg, verify_user_dev, verify_system_min, verify_system_max, verify_system_avg, verify_system_dev +128, 4, 32, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.735737, 0.751484, 0.742285, 0.004218, 0.040186, 0.097238, 0.060993, 0.015567, 0.000000, 0.016875, 0.005374, 0.004945, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +256, 8, 32, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.748904, 0.767269, 0.759366, 0.005054, 0.042327, 0.101873, 0.059634, 0.017370, 0.000000, 0.012669, 0.002744, 0.004038, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +512, 16, 32, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.799331, 0.819087, 0.808906, 0.006574, 0.036249, 0.078100, 0.051682, 0.011128, 0.000000, 0.015862, 0.004182, 0.004766, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +1024, 32, 32, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.122204, 1.164148, 1.136926, 0.009537, 0.047321, 0.100790, 0.062102, 0.014051, 0.000000, 0.021961, 0.004611, 0.006545, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +2048, 64, 32, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.325280, 1.390818, 1.349723, 0.017796, 0.032442, 0.091625, 0.056946, 0.013825, 0.000000, 0.023853, 0.005181, 0.006558, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 128, 32, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.594141, 1.790138, 1.673395, 0.063327, 0.056677, 0.132357, 0.073588, 0.021075, 0.000000, 0.017334, 0.003672, 0.004673, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +8192, 256, 32, 8, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 3.071067, 3.387916, 3.237745, 0.086287, 0.055240, 0.148667, 0.075015, 0.028251, 0.000010, 0.019045, 0.007373, 0.005140, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 diff --git a/deploy/matplotlib/Paper_cosi/smah.csv b/deploy/matplotlib/Paper_cosi/smah.csv new file mode 100644 index 0000000000..61dac0b410 --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/smah.csv @@ -0,0 +1,7 @@ +Peers, depth, bf, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_system_avg, round_user_avg, rate +2, 1, 0, 0.155342, 0.163951, 0.159172, 0.002719, 0.012000, 0.480030, 0.000000 +4, 1, 0, 0.240857, 0.268073, 0.247121, 0.007300, 0.004000, 1.300081, 0.000000 +8, 1, 0, 0.581749, 0.691042, 0.600024, 0.030710, 0.052003, 3.736233, 0.000000 +16, 1, 0, 2.124673, 2.266815, 2.156584, 0.039588, 0.128008, 13.184824, 0.000000 +32, 2, 0, 8.454070, 8.660121, 8.557763, 0.066690, 0.264016, 52.611288, 0.000000 +64, 4, 0, 35.594433, 40.953658, 38.186501, 1.206204, 1.036064, 259.648227, 0.000000 diff --git a/deploy/matplotlib/Paper_cosi/stamp_perc100-2.csv b/deploy/matplotlib/Paper_cosi/stamp_perc100-2.csv new file mode 100644 index 0000000000..2ff59f12b5 --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/stamp_perc100-2.csv @@ -0,0 +1,5 @@ +Peers, ppm, machines, bf, rate, stampperc, setup_wall_min, setup_wall_max, setup_wall_avg, setup_wall_dev, setup_user_min, setup_user_max, setup_user_avg, setup_user_dev, setup_system_min, setup_system_max, setup_system_avg, setup_system_dev, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_user_min, round_user_max, round_user_avg, round_user_dev, round_system_min, round_system_max, round_system_avg, round_system_dev, calc_wall_min, calc_wall_max, calc_wall_avg, calc_wall_dev, calc_user_min, calc_user_max, calc_user_avg, calc_user_dev, calc_system_min, calc_system_max, calc_system_avg, calc_system_dev, verify_wall_min, verify_wall_max, verify_wall_avg, verify_wall_dev, verify_user_min, verify_user_max, verify_user_avg, verify_user_dev, verify_system_min, verify_system_max, verify_system_avg, verify_system_dev +4096, 256, 16, 8, 100, 100, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.365325, 2.870416, 2.457421, 0.158656, 0.113829, 0.140035, 0.124728, 0.008436, 0.003721, 0.023239, 0.012229, 0.006862, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 8, 1000, 100, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.405077, 2.760888, 2.500550, 0.105937, 0.094097, 0.129763, 0.112131, 0.009716, 0.004232, 0.027534, 0.016513, 0.007200, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 8, 10000, 100, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.440131, 2.805909, 2.638981, 0.113126, 0.105734, 0.155437, 0.131774, 0.017642, 0.006848, 0.023904, 0.013887, 0.005515, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 8, 20000, 100, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.666309, 3.142264, 2.957527, 0.158118, 0.110830, 0.156438, 0.136750, 0.015865, 0.005680, 0.029129, 0.014998, 0.008252, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 diff --git a/deploy/matplotlib/Paper_cosi/stamp_perc100.csv b/deploy/matplotlib/Paper_cosi/stamp_perc100.csv new file mode 100644 index 0000000000..3b57e49bf6 --- /dev/null +++ b/deploy/matplotlib/Paper_cosi/stamp_perc100.csv @@ -0,0 +1,7 @@ +Peers, ppm, machines, bf, rate, stampperc, setup_wall_min, setup_wall_max, setup_wall_avg, setup_wall_dev, setup_user_min, setup_user_max, setup_user_avg, setup_user_dev, setup_system_min, setup_system_max, setup_system_avg, setup_system_dev, round_wall_min, round_wall_max, round_wall_avg, round_wall_dev, round_user_min, round_user_max, round_user_avg, round_user_dev, round_system_min, round_system_max, round_system_avg, round_system_dev, calc_wall_min, calc_wall_max, calc_wall_avg, calc_wall_dev, calc_user_min, calc_user_max, calc_user_avg, calc_user_dev, calc_system_min, calc_system_max, calc_system_avg, calc_system_dev, verify_wall_min, verify_wall_max, verify_wall_avg, verify_wall_dev, verify_user_min, verify_user_max, verify_user_avg, verify_user_dev, verify_system_min, verify_system_max, verify_system_avg, verify_system_dev +4096, 256, 16, 8, 10, 100, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.516962, 2.785463, 2.630379, 0.081978, 0.098304, 0.136094, 0.119977, 0.011654, 0.001897, 0.025285, 0.012932, 0.008815, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 8, 100, 100, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.293839, 2.442931, 2.356729, 0.042701, 0.105571, 0.151407, 0.122661, 0.014243, 0.001196, 0.021305, 0.009404, 0.006601, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 8, 300, 100, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.383389, 2.489313, 2.446377, 0.032859, 0.093256, 0.140226, 0.117236, 0.013890, 0.002935, 0.022167, 0.009239, 0.006953, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 8, 500, 100, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.301375, 2.519842, 2.371218, 0.068549, 0.101266, 0.143862, 0.115205, 0.013945, 0.005836, 0.021973, 0.011422, 0.005737, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 8, 700, 100, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.554350, 2.698717, 2.618215, 0.047656, 0.100732, 0.134542, 0.118104, 0.010181, 0.000420, 0.031069, 0.011968, 0.010049, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 +4096, 256, 16, 8, 900, 100, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.529272, 2.665140, 2.599124, 0.045414, 0.090712, 0.148560, 0.125427, 0.015416, 0.001701, 0.027486, 0.013202, 0.009884, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 diff --git a/deploy/matplotlib/makeall b/deploy/matplotlib/makeall new file mode 100755 index 0000000000..40fc22136d --- /dev/null +++ b/deploy/matplotlib/makeall @@ -0,0 +1,52 @@ +#!/bin/bash + +CSV_DIR=Paper_cosi +PIC="true" +SHOW="true" +EXT="eps" + +main(){ + plot_roundtime + #plot_over + #plot_bf + #plot_stamp +} + +add_dir(){ + CSV_REL="" + for c in $@; do + CSV_REL="$CSV_REL $CSV_DIR/$c" + done +} + +plot(){ + if [ "$SHOW" = "true" ]; then + python2.7 time_plot.py $1 $CSV_REL + fi + if [ "$PIC" = "true" ]; then + python2.7 time_plot.py $1 $CSV_REL $2.$EXT + fi +} + +plot_roundtime(){ + add_dir sign_huge.csv shamir_multi_adapted.csv naive_multi_adapted.csv naive_multi_adapted_skipcheck.csv ntree_multi_adapted.csv + plot 0 comparison_roundtime + #plot 1 comparison_systime + #plot 3 comparison_check +} + +plot_over(){ + add_dir sign_over_1.csv sign_over_2.csv sign_over_3.csv + plot 4 sign_over +} + +plot_bf(){ + add_dir sign_multi_bf.csv + plot 5 sign_multi_bf +} + +plot_stamp(){ + add_dir stamp_perc100-2.csv + plot 6 stamp_rate +} +main diff --git a/deploy/matplotlib/mplot.py b/deploy/matplotlib/mplot.py new file mode 100644 index 0000000000..dd4be718c3 --- /dev/null +++ b/deploy/matplotlib/mplot.py @@ -0,0 +1,187 @@ +# Plots the graph of one of the test-runs +# It takes the CSV-file as argument and shows the plot +# of the times used for each round + +import matplotlib +matplotlib.use('TkAgg') +import matplotlib.pyplot as plt +import matplotlib.ticker +import csv + +# Our CSVs have a space after the comma, so we need a new 'dialect', here +# called 'deploy' +csv.register_dialect('deploy', delimiter=',', doublequote=False, quotechar='', lineterminator='\n', escapechar='', + quoting=csv.QUOTE_NONE, skipinitialspace=True) + +class MPlot: + show_fig = True + pngname = "" + plt = None + + def __init__(self): + vers = matplotlib.__version__ + if vers != "1.4.3": + print "\nWrong matlib-version " + vers +", please install 1.4.3" + print "http://matplotlib.org/faq/installing_faq.html\n" + print "Or try the following\nsudo easy_install \"matplotlib == 1.4.3\"\n" + exit(1) + self.plt = plt + self.resetMinMax() + + def readCSV(self, name): + print 'Reading ' + name + + def resetMinMax(self): + self.ymin = -1 + self.ymax = 0 + self.xmin = -1 + self.xmax = 0 + + + # Updates the xmin and xmax with the given values on the x-axis + def updateX(self, *values): + for v in values: + if self.xmin == -1: + self.xmin = min(v) + else: + self.xmin = min(self.xmin, min(v)) + self.xmax = max(self.xmax, max(v)) + + # Updates the xmin and xmax with the given values on the y-axis + def updateY(self, *values): + for v in values: + if self.ymin == -1: + self.ymin = min(v) + else: + self.ymin = min(self.ymin, min(v)) + self.ymax = max(self.ymax, max(v)) + + # Plots the Minimum, Maximum, Average on the same plot. + def plotMMA(self, stats, values, plot_color, plot_z, args): + val = stats.get_values(values) + plt.plot(val.x, val.avg, **args) + self.plotFilledLegend(val, "min-max", plot_color, z=plot_z) + return val + + # Adds a fill_between and the corresponding 'empty' plot to show up in + # the legend + def plotFilledLegend(self, stats, label, color, z=None): + x, y1, y2 = stats.x, stats.min, stats.max + if z: + fb = plt.fill_between(x, y1, y2, facecolor=color, edgecolor='white', zorder=z) + else: + fb = plt.fill_between(x, y1, y2, facecolor=color, edgecolor='white', zorder=3) + + self.updateX(x) + self.updateY(y1, y2) + # plt.plot([], [], '-', label=label, color=color, linewidth=10) + + # Takes one x and y1, y2 to stack y2 on top of y1. Does all the + # calculation necessary to sum up everything + def plotStacked(self, stats, col1, col2, label1, label2, color1, color2, ymin=None): + stats.reset_min_max() + y1 = stats.update_values(col1) + y2 = stats.update_values(col2) + if ymin == None: + ymin = min(min(y1), min(y2)) + ymins = [ymin] * len(x) + ysum = [sum(t) for t in zip(y1, y2)] + self.plotFilledLegend(stats.x, y1, ysum, label2, color2) + self.plotFilledLegend(stats.x, ymins, y1, label1, color1) + + + # Takes one x and y1, y2 to stack y2 on top of y1. Does all the + # calculation necessary to sum up everything + def plotStackedBars(self, stats, values1, values2, label1, label2, color1, color2, ymin=None, + delta_x=0): + val1 = stats.get_values(values1) + val2 = stats.get_values(values2) + x = val1.x + y1 = val1.avg + y2 = val2.avg + width = [(t * 0.125 + delta_x * t * 0.018) for t in x] + + zero = [min(y1) for t in y1] + xd = [t[0] + delta_x * t[1] for t in zip(stats.x, width)] + y12 = [sum(t) for t in zip(y1, y2)] + plt.bar(xd, y12, width, color=color1, bottom=y1, zorder=3, label=label1) + plt.bar(xd, y1, width, color=color2, bottom=zero, zorder=3, label=label2) + + # Takes one x and y1, y2 to stack y2 on top of y1. Does all the + # calculation necessary to sum up everything + def plotStackedBarsHatched(self, stats, values1, values2, label, color, ymin=None, + limit_values=None, delta_x=0): + val1 = stats.get_values(values1) + val2 = stats.get_values(values2) + x = val1.x + y1 = val1.avg + y2 = val2.avg + if limit_values != None: + x = x[0:limit_values] + y1 = y1[0:limit_values] + y2 = y2[0:limit_values] + width = [(t * 0.18 + delta_x * t * 0.018) for t in x] + + zero = [min(y1) for t in y1] + xd = [t[0] + ( delta_x - 0.5 ) * t[1] for t in zip(x, width)] + y12 = [sum(t) for t in zip(y1, y2)] + plt.bar(xd, y12, width, color=color, bottom=y1, zorder=3, hatch='//') + return plt.bar(xd, y1, width, color=color, bottom=ymin, zorder=3, label=label), val1 + + + # Puts the most used arguments for starting a plot with + # LogLog by default. + def plotPrepareLogLog(self, logx=2, logy=2): + plt.clf() + plt.ylabel('Total seconds over all rounds') + plt.xlabel('Number of co-signing hosts') + if logx > 0: + plt.xscale(u'log', basex=logx) + if logy > 0: + plt.yscale(u'log', basey=logy) + + ax = plt.axes() + ax.yaxis.grid(color='gray', linestyle='dashed', zorder=0) + ax.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False)) + ax.xaxis.set_zorder(5) + sf = matplotlib.ticker.ScalarFormatter() + sf.set_powerlimits((-10, 10)) + sf.set_scientific(False) + # ax.yaxis.set_major_formatter(sf) + # ax.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%2.2e')) + ax.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%2.2f')) + + + # Ends the plot and takes an extension for saving the png. If + # show_fig is True, it will show the window instead. + def plotEnd(self): + if self.show_fig: + print "Showing plot" + plt.show() + else: + print "Saving to", self.pngname + plt.savefig(self.pngname) + + self.resetMinMax() + + + # Draws an arrow for out-of-bound data + def arrow(self, text, x, top, color): + plt.annotate(text, xy=(x, top), xytext=(x, top - 2), + arrowprops=dict(facecolor=color, frac=0.4, width=8, headwidth=20, edgecolor='white'), + horizontalalignment='center', ) + + # If we want to remove a poly + def delete_poly(self, poly): + self.poly.remove() + + + # For removing a line + def delete_line(self, line): + self.line[0].remove() + if len(self.line) > 1: + for i in range(1, 3): + for l in self.line[i]: + l.remove() + + diff --git a/deploy/matplotlib/naive.csv b/deploy/matplotlib/naive.csv new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deploy/matplotlib/naive_sc.csv b/deploy/matplotlib/naive_sc.csv new file mode 100644 index 0000000000..16a398a43b --- /dev/null +++ b/deploy/matplotlib/naive_sc.csv @@ -0,0 +1,11 @@ +hosts, depth, bf, min, max, avg, stddev, systime, usertime, rate +2, 1, 0, 0.107781, 0.111317, 0.109456, 0.001339, 0.012000, 0.060003, 0.000000 +4, 1, 0, 0.109373, 0.112063, 0.110430, 0.000936, 0.012000, 0.080005, 0.000000 +8, 1, 0, 0.109714, 0.112092, 0.110723, 0.000801, 0.024001, 0.140008, 0.000000 +16, 1, 0, 0.110891, 0.114567, 0.112160, 0.001158, 0.020001, 0.292018, 0.000000 +32, 2, 0, 0.113602, 0.117075, 0.115819, 0.000921, 0.032002, 0.564035, 0.000000 +64, 4, 0, 0.117740, 0.125777, 0.121277, 0.002237, 0.032002, 1.036064, 0.000000 +128, 8, 0, 0.131319, 0.137211, 0.133148, 0.001569, 0.064004, 2.028126, 0.000000 +256, 16, 0, 0.155372, 0.190853, 0.168834, 0.011229, 0.132008, 4.360272, 0.000000 +512, 32, 0, 0.197721, 0.229393, 0.211155, 0.011126, 0.240015, 8.232514, 0.000000 +1024, 64, 0, 0.305224, 0.457782, 0.357817, 0.050497, 0.672042, 16.913057, 0.000000 diff --git a/deploy/matplotlib/ntree.csv b/deploy/matplotlib/ntree.csv new file mode 100644 index 0000000000..c305836d45 --- /dev/null +++ b/deploy/matplotlib/ntree.csv @@ -0,0 +1,11 @@ +hosts, depth, bf, min, max, avg, stddev, systime, usertime, rate +2, 1, 2, 0.109487, 0.112366, 0.110365, 0.000911, 0.064004, 0.420026, 0.000000 +4, 1, 3, 0.113903, 0.119744, 0.116065, 0.002480, 0.072004, 1.444090, 0.000000 +8, 1, 4, 0.222039, 0.228945, 0.224213, 0.002539, 0.152009, 3.136196, 0.000000 +16, 1, 5, 0.225821, 0.637782, 0.270420, 0.122480, 0.292018, 6.904431, 0.000000 +32, 2, 6, 0.237036, 10.414195, 1.365112, 3.032999, 0.228014, 16.041002, 0.000000 +64, 4, 7, 0.353187, 0.974524, 0.421458, 0.184411, 0.468029, 29.941871, 0.000000 +128, 8, 8, 0.476069, 1.946035, 0.644529, 0.434168, 1.536096, 55.447465, 0.000000 +256, 16, 9, 0.566753, 0.921326, 0.651212, 0.096300, 5.124320, 121.995624, 0.000000 +512, 32, 10, 0.989090, 1.285709, 1.112588, 0.092960, 2.600162, 289.246076, 0.000000 +1024, 64, 10, 1.621387, 2.250708, 1.872934, 0.233009, 4.588286, 581.416336, 0.000000 diff --git a/deploy/matplotlib/shs2.csv b/deploy/matplotlib/shs2.csv new file mode 100644 index 0000000000..051f3d704d --- /dev/null +++ b/deploy/matplotlib/shs2.csv @@ -0,0 +1,14 @@ +hosts, depth, bf, min, max, avg, stddev, systime, usertime, rate +2, 1, 2, 0.206181, 0.269042, 0.210499, 0.013991, 0.048003, 0.172010, 1233.333333 +4, 1, 2, 0.410780, 0.445122, 0.413110, 0.007357, 0.056003, 0.192012, 1433.333333 +8, 1, 3, 0.411256, 0.485348, 0.414119, 0.013463, 0.088005, 0.216013, 1433.333333 +16, 1, 4, 0.412070, 0.476790, 0.415335, 0.011769, 0.068004, 0.272017, 1433.333333 +32, 2, 5, 0.615733, 0.616794, 0.616085, 0.000256, 0.092005, 0.288018, 1666.666667 +64, 4, 6, 0.619350, 0.666249, 0.623534, 0.011395, 0.096006, 0.324020, 1666.666667 +128, 8, 7, 0.628251, 0.695538, 0.632673, 0.011927, 0.072004, 0.404025, 1666.666667 +256, 16, 8, 0.642246, 0.646479, 0.643769, 0.001019, 0.104006, 0.460028, 1666.666667 +512, 32, 9, 0.658839, 0.698654, 0.666559, 0.008021, 0.124007, 0.496031, 1700.000000 +1024, 64, 10, 0.700326, 0.735884, 0.708723, 0.008613, 0.120007, 0.636039, 1733.333333 +2048, 128, 11, 0.972734, 1.005249, 0.987141, 0.008805, 0.192012, 0.764047, 2066.666667 +4096, 256, 12, 1.150135, 1.276106, 1.191593, 0.023739, 0.184011, 1.052065, 2333.333333 +8192, 512, 13, 1.839814, 2.207453, 1.974028, 0.092792, 0.560035, 1.828114, 3133.333333 diff --git a/deploy/matplotlib/smah.csv b/deploy/matplotlib/smah.csv new file mode 100644 index 0000000000..abd6ada326 --- /dev/null +++ b/deploy/matplotlib/smah.csv @@ -0,0 +1,7 @@ +hosts, depth, bf, min, max, avg, stddev, systime, usertime, rate +2, 1, 0, 0.155342, 0.163951, 0.159172, 0.002719, 0.012000, 0.480030, 0.000000 +4, 1, 0, 0.240857, 0.268073, 0.247121, 0.007300, 0.004000, 1.300081, 0.000000 +8, 1, 0, 0.581749, 0.691042, 0.600024, 0.030710, 0.052003, 3.736233, 0.000000 +16, 1, 0, 2.124673, 2.266815, 2.156584, 0.039588, 0.128008, 13.184824, 0.000000 +32, 2, 0, 8.454070, 8.660121, 8.557763, 0.066690, 0.264016, 52.611288, 0.000000 +64, 4, 0, 35.594433, 40.953658, 38.186501, 1.206204, 1.036064, 259.648227, 0.000000 diff --git a/deploy/matplotlib/stats.py b/deploy/matplotlib/stats.py new file mode 100644 index 0000000000..e52b6e6348 --- /dev/null +++ b/deploy/matplotlib/stats.py @@ -0,0 +1,95 @@ +# Reads the stats of a given run and returns easy-to-use data + +import csv +import unittest + + +# Our CSVs have a space after the comma, so we need a new 'dialect', here +# called 'deploy' +csv.register_dialect('deploy', delimiter=',', doublequote=False, quotechar='', lineterminator='\n', escapechar='', + quoting=csv.QUOTE_NONE, skipinitialspace=True) + + +class CSVStats: + x = [] + + # reads in a cvs and fills up the corresponding arrays + # also fills in xmin, xmax, ymin and ymax which are + # valid over multiple calls to readCVS! + # If you want to start a new set, put xmin = -1 + def __init__(self, file, x_id=0): + self.x = [] + self.columns = {} + # Read in all lines of the CSV and store in the arrays + with open(file) as csvfile: + reader = csv.DictReader(csvfile, dialect='deploy') + for row in reader: + for column, value in row.iteritems(): + if not column in self.columns: + self.columns[column] = [] + self.columns[column] += [float(value)] + + if type(x_id) == str: + self.x = self.columns[x_id] + else: + col = sorted(self.columns.keys())[x_id] + self.x = self.columns[col] + + # Returns a Values-object with the requested column. + # Updates the self.(x|y)(min|max) + def get_values(self, column): + values = Values(self.x, column, self.columns) + return values + + + @staticmethod + def get_min_max(*vals): + values_y = [] + values_x = [] + for v in vals: + values_y += [v.ymin, v.ymax] + values_x += v.x + return (min(values_x), max(values_x),min(values_y), max(values_y)) + + +# Value holds the min / max / avg / dev for a single named value +class Values: + def __init__(self, x, column, columns): + self.name = column + self.x = x + + # Set min, max, avg, dev-values from csv-file + self.min = columns[column + "_min"] + self.max = columns[column + "_max"] + self.avg = columns[column + "_avg"] + self.dev = columns[column + "_dev"] + self.ymin = min(self.min) + self.ymax = max(self.max) + + +class TestStringMethods(unittest.TestCase): + def test_load(self): + stats = CSVStats("test.csv") + self.assertEqual(stats.x, [1, 2, 4, 8], "x-values not correct") + stats = CSVStats("test.csv", 0) + self.assertEqual(stats.x, [1, 2, 4, 8], "x-values not correct") + stats = CSVStats("test.csv", 'Hosts') + self.assertEqual(stats.x, [1, 2, 4, 8], "x-values not correct") + stats = CSVStats("test.csv", 'round_min') + self.assertEqual(stats.x, [2, 3, 4, 5], "x-values not correct") + + def test_min(self): + stats = CSVStats("test.csv") + stats.update_values('round') + self.assertEqual(stats.min, [2, 3, 4, 5], "minimum of round not correct") + self.assertEqual(stats.max, [6, 7, 8, 9], "maximum of round not correct") + self.assertEqual(stats.avg, [3, 4, 5, 6], "average of round not correct") + self.assertEqual(stats.dev, [1, 1, 1, 1], "deviation of round not correct") + self.assertEqual(stats.xmin, 1) + self.assertEqual(stats.xmax, 8) + self.assertEqual(stats.ymin, 2) + self.assertEqual(stats.ymax, 9) + + +if __name__ == '__main__': + unittest.main() diff --git a/deploy/matplotlib/test.py b/deploy/matplotlib/test.py new file mode 100644 index 0000000000..d4d6b1505a --- /dev/null +++ b/deploy/matplotlib/test.py @@ -0,0 +1,29 @@ +__author__ = 'ligasser' + +import matplotlib.pyplot as plt +import matplotlib.ticker +import csv +import sys +import math +import matplotlib.patches as mpatches +from matplotlib.legend_handler import HandlerLine2D, HandlerRegularPolyCollection +from mplot import MPlot +from stats import CSVStats + +color1_light = 'lightgreen' +color1_dark = 'green' +color2_light = 'lightblue' +color2_dark = 'blue' +color3_light = 'yellow' +color3_dark = 'brown' +color4_light = 'pink' +color4_dark = 'red' + +mplot = MPlot() +mplot.plotPrepareLogLog() + +mplot.show_fig = True +jvss = CSVStats('test_naive_multi.csv').get_values('round_wall') +plt.plot(jvss.x, jvss.avg, label='JVSS', linestyle='-', marker='^', color=color2_dark, zorder=3) +mplot.plotFilledLegend(jvss.x, jvss.min, jvss.max, "min-max", color2_light, z=0) +mplot.plotEnd() \ No newline at end of file diff --git a/deploy/matplotlib/time_plot.py b/deploy/matplotlib/time_plot.py new file mode 100644 index 0000000000..4dd22b6e73 --- /dev/null +++ b/deploy/matplotlib/time_plot.py @@ -0,0 +1,256 @@ +# Plots the graph of one of the test-runs +# It takes the CSV-file as argument and shows the plot +# of the times used for each round + +import sys +from mplot import MPlot +from stats import CSVStats +import matplotlib.pyplot as plt +import matplotlib.patches as mpatches + + +# This one takes two csv-files which represent a Cothority and a JVSS +# run, stacking the user and system-time one upon the other. +def CoJVTimeArea(cothority, jvss): + mplot.plotPrepareLogLog(); + mplot.plotStacked(jvss, "basic_round", "JVSS system time", "JVSS user time", + color2_light, color2_dark) + mm = [min(mplot.tsys), max(mplot.tusr)] + + mplot.readCSV(cothority) + mplot.plotStacked(mplot.x, mplot.tsys, mplot.tusr, "Cothority system time", "Cothority user time", + color1_light, color1_dark, min(mm)) + mm = [min(mm[0], min(mplot.tsys)), max(mm[1], max(mplot.tusr))] + + plt.ylim(min(mplot.tsys), mm[1]) + plt.xlim(mplot.xmin, mplot.xmax * 1.3) + plt.legend() + mplot.plotEnd() + + +# This one takes two csv-files which represent a Cothority and a JVSS +# run, stacking the user and system-time one upon the other. +def CoJVTimeBars(cothority, jvss, naive): + mplot.plotPrepareLogLog(); + + ymin = 0.005 + bar_jvss, jvss_val = mplot.plotStackedBarsHatched(jvss, "round_system", "round_user", "JVSS", color2_light, + ymin, delta_x=-1) + + bar_naive, na_val = mplot.plotStackedBarsHatched(naive, "round_system", "round_user", "Naive", color3_light, + ymin, limit_values=7) + + bar_cothority, co_val = mplot.plotStackedBarsHatched(cothority, "round_system", "round_user", "Cothority", + color1_light, ymin, delta_x=1) + + plt.ylim(ymin, max(jvss_val.ymax, na_val.ymax, co_val.ymax)) + # plt.xlim(mplot.xmin, mplot.xmax * 1.3) + + usert = mpatches.Patch(color='white', ec='black', label='User time', hatch='//') + syst = mpatches.Patch(color='white', ec='black', label='System time') + + plt.legend(handles=[bar_jvss, bar_naive, bar_cothority, usert, syst], loc=u'upper left') + mplot.plotEnd() + + +# Plots a Cothority and a JVSS run with regard to their averages. Supposes that +# the last two values from JVSS are off-grid and writes them with arrows +# directly on the plot +def plotAvgMM(co, jvss, naive, nt): + mplot.plotPrepareLogLog() + + nt = mplot.plotMMA(ntree, 'round_wall', color4_light, 0, + dict(label='Ntree', linestyle='-', marker='v', color=color4_dark, zorder=3)) + # mplot.arrow("{:.1f} sec ".format(mplot.avg[-2]), x[-2], 4, color3_dark) + # mplot.arrow(" {:.0f} sec".format(mplot.avg[-1]), x[-1], 4, color3_dark) + + j = mplot.plotMMA(jvss, 'round_wall', color2_light, 0, + dict(label='JVSS', linestyle='-', marker='^', color=color2_dark, zorder=3)) + #j_p = jvss.get_values('round_wall') + #plt.plot(j_p.x, j_p.avg, label="JVSS", color=color2_dark, marker='^') + #mplot.arrow("{:.1f} sec ".format(j_p.avg[-2]), j_p.x[-2], 4, color2_dark) + #mplot.arrow(" {:.0f} sec".format(j_p.avg[-1]), j_p.x[-1], 8, color2_dark) + + na = mplot.plotMMA(naive, 'round_wall', color3_light, 0, + dict(label='Naive', linestyle='-', marker='s', color=color3_dark, zorder=3)) + na_p = naive.get_values('round_wall') + #mplot.arrow("{:.1f} sec ".format(na_p.avg[8]), na_p.x[8], 4, color3_dark) + mplot.arrow(" {:.0f} sec".format(na_p.avg[9]), na_p.x[9], 8, color3_dark) + + co = mplot.plotMMA(cothority, 'round_wall', color1_light, 4, + dict(label='Cothority', linestyle='-', marker='o', color=color1_dark, zorder=5)) + + # Make horizontal lines and add arrows for JVSS + xmin, xmax, ymin, ymax = CSVStats.get_min_max(na, co) + plt.ylim(ymin, 8) + plt.xlim(xmin, xmax * 1.2) + plt.ylabel('Seconds per round') + + plt.legend(loc=u'lower right') + mplot.plotEnd() + + +# Plots a Cothority and a JVSS run with regard to their averages. Supposes that +# the last two values from JVSS are off-grid and writes them with arrows +# directly on the plot +def plotAvg(co, jvss, naive, nt): + mplot.plotPrepareLogLog() + + j_p = jvss.get_values('round_wall') + plt.plot(j_p.x, j_p.avg, label="JVSS", color=color2_dark, marker='^') + + na_p = naive.get_values('round_wall') + plt.plot(na_p.x, na_p.avg, label="Naive", color=color3_dark, marker='s') + #mplot.arrow(" {:.0f} sec".format(na_p.avg[9]), na_p.x[9], 8, color3_dark) + + nt_p = nt.get_values('round_wall') + plt.plot(nt_p.x, nt_p.avg, label="Ntree", color=color4_dark, marker='v') + + co_p = cothority.get_values('round_wall') + plt.plot(co_p.x, co_p.avg, label="Cothority", color=color1_dark, marker='o') + + # Make horizontal lines and add arrows for JVSS + xmin, xmax, ymin, ymax = CSVStats.get_min_max(j_p, na_p, nt_p, co_p) + plt.ylim(ymin, 8) + plt.xlim(xmin, 1024 * 1.2) + plt.ylabel('Seconds per round') + + plt.legend(loc=u'lower right') + mplot.plotEnd() + + +def Over(over_1, over_2, over_3): + mplot.plotPrepareLogLog() + + o3 = mplot.plotMMA(over_3, 'round_wall', color3_light, 0, + dict(label='32 Machines', linestyle='-', marker='s', color=color3_dark, zorder=3)) + + o2 = mplot.plotMMA(over_2, 'round_wall', color2_light, 0, + dict(label='16 Machines', linestyle='-', marker='^', color=color2_dark, zorder=3)) + + o1 = mplot.plotMMA(over_1, 'round_wall', color1_light, 0, + dict(label='8 Machines', linestyle='-', marker='o', color=color1_dark, zorder=3)) + + xmin, xmax, ymin, ymax = CSVStats.get_min_max(o1, o2, o3) + plt.ylim(ymin, ymax) + plt.xlim(xmin, xmax * 1.2) + plt.ylabel('Seconds per round') + + plt.legend(loc=u'lower right') + mplot.plotEnd() + + +def PlotBF(values_bf): + mplot.plotPrepareLogLog(2, 0) + + plotbf = mplot.plotMMA(values_bf, 'round_wall', color1_light, 0, + dict(label='4096 Peers', linestyle='-', marker='o', color=color2_dark, zorder=3)) + + xmin, xmax, ymin, ymax = CSVStats.get_min_max(plotbf) + plt.ylim(ymin, ymax) + plt.xlim(xmin, xmax * 1.2) + plt.ylabel('Seconds per round') + plt.xlabel('Branching factor') + + plt.legend(loc=u'upper right') + mplot.plotEnd() + + +# Calculates the time it takes to check the signature +def SigCheck(naive, naive_cs): + mplot.plotPrepareLogLog() + + # Read in naive + x = naive.get_values("round_wall").x + naive_avg = naive.get_values("round_wall").avg + naive_tsys = naive.get_values("round_system").avg + naive_tusr = naive.get_values("round_user").avg + + naive_cs_avg = naive_cs.get_values("round_wall").avg + naive_cs_tsys = naive_cs.get_values("round_system").avg + naive_cs_tusr = naive_cs.get_values("round_user").avg + check_avg = [t[0] - t[1] for t in zip(naive_avg, naive_cs_avg)] + check_tsys = [t[0] - t[1] for t in zip(naive_tsys, naive_cs_tsys)] + check_tusr = [t[0] - t[1] for t in zip(naive_tusr, naive_cs_tusr)] + #plt.plot(x, check_avg, label="Round-time", color=color1_dark, marker='o') + plt.plot(x, check_tsys, label="System time", color=color2_dark, marker='s') + plt.plot(x, check_tusr, label="User time", color=color3_dark, marker='^') + + plt.legend(loc='upper left') + plt.ylabel('Time for verification') + mplot.plotEnd() + +def PlotStamp(stamp): + mplot.plotPrepareLogLog(10, 0) + + plotbf = mplot.plotMMA(stamp, 'round_wall', color1_light, 0, + dict(label='4096 Peers', linestyle='-', marker='o', color=color2_dark, zorder=3)) + + xmin, xmax, ymin, ymax = CSVStats.get_min_max(plotbf) + plt.ylim(ymin, ymax) + plt.xlim(xmin, xmax * 1.2) + plt.ylabel('Seconds per round') + plt.xlabel('Stamping rate [1/s]') + + plt.legend(loc=u'upper right') + mplot.plotEnd() + + +# Colors for the Cothority +color1_light = 'lightgreen' +color1_dark = 'green' +color2_light = 'lightblue' +color2_dark = 'blue' +color3_light = 'yellow' +color3_dark = 'brown' +color4_light = 'pink' +color4_dark = 'red' +mplot = MPlot() + + +def check_Args(args): + if len(sys.argv) < args + 1: + print("Error: Please give a mode and " + str(args) + " .csv-files as argument - " + str(len(sys.argv)) + "\n") + print("Mode: (0=printAverage, 1=printSystemUserTimes with bars, 2=printSystemUserTimes with areas)\n") + print("CSV: cothority.csv jvss.csv\n") + exit(1) + + +def plot_show(argn): + if len(sys.argv) > 2 + argn: + mplot.pngname = sys.argv[2 + argn] + mplot.show_fig = False + print mplot.pngname, mplot.show_fig + + +def args_to_csv(argn): + stats = [] + for a in sys.argv[2:argn + 2]: + stats.append(CSVStats(a)) + plot_show(argn) + return stats + + +option = sys.argv[1] + +if option == "0": + cothority, jvss, naive, naive_sc, ntree = args_to_csv(5) + plotAvgMM(cothority, jvss, naive, ntree) +elif option == "1": + cothority, jvss, naive, naive_sc, ntree = args_to_csv(5) + CoJVTimeBars(cothority, jvss, naive) +elif option == "2": + cothority, jvss, naive, naive_sc, ntree = args_to_csv(5) + CoJVTimeArea(cothority, jvss) +elif option == "3": + cothority, jvss, naive, naive_sc, ntree = args_to_csv(5) + SigCheck(naive, naive_sc) +elif option == "4": + Over(*args_to_csv(3)) +elif option == "5": + plot_show(1) + PlotBF(CSVStats(sys.argv[2], "bf")) +elif option == "6": + plot_show(1) + stamp = CSVStats(sys.argv[2], "rate") + PlotStamp(stamp) diff --git a/deploy/monitor.go b/deploy/monitor.go deleted file mode 100644 index 74c2aedd9d..0000000000 --- a/deploy/monitor.go +++ /dev/null @@ -1,193 +0,0 @@ -package deploy - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "log" - "math" - "strconv" - "time" - - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/PuerkitoBio/goquery" - "golang.org/x/net/websocket" -) - -// Monitor monitors log aggregates results into RunStats -func Monitor(bf int) RunStats { - dbg.Lvl1("Starting monitoring") - defer dbg.Lvl1("Done monitoring") - retry_dial: - ws, err := websocket.Dial(fmt.Sprintf("ws://localhost:%d/log", port), "", "http://localhost/") - if err != nil { - time.Sleep(1 * time.Second) - goto retry_dial - } - retry: - // Get HTML of webpage for data (NHosts, Depth, ...) - doc, err := goquery.NewDocument(fmt.Sprintf("http://localhost:%d/", port)) - if err != nil { - dbg.Lvl4("unable to get log data: retrying:", err) - time.Sleep(10 * time.Second) - goto retry - } - nhosts := doc.Find("#numhosts").First().Text() - dbg.Lvl4("hosts:", nhosts) - depth := doc.Find("#depth").First().Text() - dbg.Lvl4("depth:", depth) - nh, err := strconv.Atoi(nhosts) - if err != nil { - log.Fatal("unable to convert hosts to be a number:", nhosts) - } - d, err := strconv.Atoi(depth) - if err != nil { - log.Fatal("unable to convert depth to be a number:", depth) - } - clientDone := false - rootDone := false - var rs RunStats - rs.NHosts = nh - rs.Depth = d - rs.BF = bf - - var M, S float64 - k := float64(1) - first := true - for { - var data []byte - err := websocket.Message.Receive(ws, &data) - if err != nil { - // if it is an eof error than stop reading - if err == io.EOF { - dbg.Lvl4("websocket terminated before emitting EOF or terminating string") - break - } - continue - } - if bytes.Contains(data, []byte("EOF")) || bytes.Contains(data, []byte("terminating")) { - dbg.Lvl2( - "EOF/terminating Detected: need forkexec to report and clients: rootDone", rootDone, "clientDone", clientDone) - } - if bytes.Contains(data, []byte("root_round")) { - dbg.Lvl4("root_round msg received (clientDone = ", clientDone, ", rootDone = ", rootDone, ")") - - if clientDone || rootDone { - dbg.Lvl4("Continuing searching data") - // ignore after we have received our first EOF - continue - } - var entry StatsEntry - err := json.Unmarshal(data, &entry) - if err != nil { - log.Fatal("json unmarshalled improperly:", err) - } - if entry.Type != "root_round" { - dbg.Lvl1("Wrong debugging message - ignoring") - continue - } - dbg.Lvl4("root_round:", entry) - if first { - first = false - dbg.Lvl4("Setting min-time to", entry.Time) - rs.MinTime = entry.Time - rs.MaxTime = entry.Time - } - if entry.Time < rs.MinTime { - dbg.Lvl4("Setting min-time to", entry.Time) - rs.MinTime = entry.Time - } else if entry.Time > rs.MaxTime { - rs.MaxTime = entry.Time - } - - rs.AvgTime = ((rs.AvgTime * (k - 1)) + entry.Time) / k - - var tM = M - M += (entry.Time - tM) / k - S += (entry.Time - tM) * (entry.Time - M) - k++ - rs.StdDev = math.Sqrt(S / (k - 1)) - } else if bytes.Contains(data, []byte("schnorr_round")) { - - var entry StatsEntry - err := json.Unmarshal(data, &entry) - if err != nil { - log.Fatal("json unmarshalled improperly:", err) - } - if entry.Type != "schnorr_round" { - dbg.Lvl1("Wrong debugging message - ignoring") - continue - } - dbg.Lvl4("schnorr_round:", entry) - if first { - first = false - dbg.Lvl4("Setting min-time to", entry.Time) - rs.MinTime = entry.Time - rs.MaxTime = entry.Time - } - if entry.Time < rs.MinTime { - dbg.Lvl4("Setting min-time to", entry.Time) - rs.MinTime = entry.Time - } else if entry.Time > rs.MaxTime { - rs.MaxTime = entry.Time - } - - rs.AvgTime = ((rs.AvgTime * (k - 1)) + entry.Time) / k - - var tM = M - M += (entry.Time - tM) / k - S += (entry.Time - tM) * (entry.Time - M) - k++ - rs.StdDev = math.Sqrt(S / (k - 1)) - } else if bytes.Contains(data, []byte("schnorr_end")){ - break - } else if bytes.Contains(data, []byte("forkexec")) { - if rootDone { - continue - } - var ss SysStats - err := json.Unmarshal(data, &ss) - if err != nil { - log.Fatal("unable to unmarshal forkexec:", ss) - } - rs.SysTime = ss.SysTime - rs.UserTime = ss.UserTime - dbg.Lvl4("forkexec:", ss) - rootDone = true - dbg.Lvl2("Monitor() Forkexec msg received (clientDone = ", clientDone, ", rootDone = ", rootDone, ")") - if clientDone { - break - } - } else if bytes.Contains(data, []byte("client_msg_stats")) { - if clientDone { - continue - } - var cms ClientMsgStats - err := json.Unmarshal(data, &cms) - if err != nil { - log.Fatal("unable to unmarshal client_msg_stats:", string(data)) - } - // what do I want to keep out of the Client Message States - // cms.Buckets stores how many were processed at time T - // cms.RoundsAfter stores how many rounds delayed it was - // - // get the average delay (roundsAfter), max and min - // get the total number of messages timestamped - // get the average number of messages timestamped per second? - avg, _, _, _ := ArrStats(cms.Buckets) - // get the observed rate of processed messages - // avg is how many messages per second, we want how many milliseconds between messages - observed := avg / 1000 // set avg to messages per milliseconds - observed = 1 / observed - rs.Rate = observed - rs.Times = cms.Times - dbg.Lvl2("Monitor() Client Msg stats received (clientDone = ", clientDone, ",rootDone = ", rootDone, ")") - clientDone = true - if rootDone { - break - } - } - } - return rs -} diff --git a/deploy/platform/README.md b/deploy/platform/README.md new file mode 100644 index 0000000000..494c670275 --- /dev/null +++ b/deploy/platform/README.md @@ -0,0 +1,27 @@ +# Deployment + Configure(*Config) + Build() (error) + Deploy() (error) + Start() (error) + Stop() (error) + +The Life of a simulation: + +1. Configure + * read configuration + * compile eventual files +2. Build + * builds all files + * eventually for different platforms +3. Cleanup + * send killall to applications +4. Deploy + * make sure the environment is up and running + * copy files +5. Start + * start all logservers + * start all nodes + * start all clients +6. Wait + * wait for the applications to finish + diff --git a/deploy/platform/deterlab.go b/deploy/platform/deterlab.go new file mode 100644 index 0000000000..20f2279747 --- /dev/null +++ b/deploy/platform/deterlab.go @@ -0,0 +1,472 @@ +// Deterlab is responsible for setting up everything to test the application +// on deterlab.net +// Given a list of hostnames, it will create an overlay +// tree topology, using all but the last node. It will create multiple +// nodes per server and run timestamping processes. The last node is +// reserved for the logging server, which is forwarded to localhost:8081 +// +// Creates the following directory structure: +// build/ - where all cross-compiled executables are stored +// remote/ - directory to be copied to the deterlab server +// +// The following apps are used: +// deter - runs on the user-machine in deterlab and launches the others +// forkexec - runs on the other servers and launches the app, so it can measure its cpu usage + +package platform + +import ( + "os" + "os/exec" + "strings" + "sync" + + "bufio" + _ "errors" + "fmt" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/graphs" + "github.com/dedis/cothority/lib/monitor" + "io/ioutil" + "path" + "path/filepath" + "runtime" + "strconv" + "time" +) + +type Deterlab struct { + // The login on the platform + Login string + // The outside host on the platform + Host string + // The name of the project + Project string + // Name of the Experiment - also name of hosts + Experiment string + // Directory of applications + AppDir string + // Directory where everything is copied into + DeployDir string + // Directory for building + BuildDir string + // Working directory of deterlab + DeterDir string + // Where the main logging machine resides + MasterLogger string + // DNS-resolvable names + Phys []string + // VLAN-IP names + Virt []string + + // ProxyRedirectionAddress : the proxy will redirect every traffic it + // receives to this address + ProxyRedirectionAddress string + // Proxy redirection port + ProxyRedirectionPort string + // MonitorAddress is the address given to clients to connect to the monitor + // It is actually the Proxy that will listen to that address and clients + // won't know a thing about it + MonitorAddress string + + // Which app to run + App string + // Number of machines + Machines int + // Number of Rounds + Rounds int + // Channel to communication stopping of experiment + sshDeter chan string + // Whether the simulation is started + started bool + // Debugging-level: 0 is none - 5 is everything + Debug int + + // All hostnames used concatenated with the port + Hostnames []string + + // Testing the connection? + TestConnect bool +} + +func (d *Deterlab) Configure() { + // Directory setup - would also be possible in /tmp + pwd, _ := os.Getwd() + d.DeterDir = pwd + "/platform/deterlab" + d.DeployDir = d.DeterDir + "/remote" + d.BuildDir = d.DeterDir + "/build" + d.AppDir = pwd + "/../app" + dbg.Lvl3("Dirs are:", d.DeterDir, d.DeployDir) + dbg.Lvl3("Dirs are:", d.BuildDir, d.AppDir) + d.LoadAndCheckDeterlabVars() + + d.Debug = dbg.DebugVisible + if d.App == "" { + dbg.Fatal("No app defined in simulation") + } + + // Setting up channel + d.sshDeter = make(chan string) +} + +// build is the name of the app to build +// empty = all otherwise build specific package +func (d *Deterlab) Build(build string) error { + dbg.Lvl1("Building for", d.Login, d.Host, d.Project, build) + start := time.Now() + + var wg sync.WaitGroup + + // Start with a clean build-directory + current, _ := os.Getwd() + dbg.Lvl3("Current dir is:", current, d.DeterDir) + defer os.Chdir(current) + + // Go into deterlab-dir and create the build-dir + os.Chdir(d.DeterDir) + os.RemoveAll(d.BuildDir) + os.Mkdir(d.BuildDir, 0777) + + // start building the necessary packages + packages := []string{"forkexec", "app", "users"} + if build != "" { + packages = strings.Split(build, ",") + } + dbg.Lvl3("Starting to build all executables", packages) + for _, p := range packages { + src_dir := d.DeterDir + "/" + p + basename := path.Base(p) + if p == "app" { + src_dir = d.AppDir + "/" + d.App + basename = d.App + } + dst := d.BuildDir + "/" + basename + + dbg.Lvl3("Building", p, "from", src_dir, "into", basename) + wg.Add(1) + if p == "users" { + go func(src, dest string) { + defer wg.Done() + // the users node has a 386 FreeBSD architecture + // go won't compile on an absolute path so we need to + // convert it to a relative one + src_rel, _ := filepath.Rel(d.DeterDir, src) + out, err := cliutils.Build("./"+src_rel, dest, "386", "freebsd") + if err != nil { + cliutils.KillGo() + dbg.Lvl1(out) + dbg.Fatal(err) + } + }(src_dir, dst) + continue + } + go func(src, dest string) { + defer wg.Done() + // deter has an amd64, linux architecture + src_rel, _ := filepath.Rel(d.DeterDir, src) + dbg.Lvl3("Relative-path is", src, src_rel, d.DeterDir) + out, err := cliutils.Build("./"+src_rel, dest, "amd64", "linux") + if err != nil { + cliutils.KillGo() + dbg.Lvl1(out) + dbg.Fatal(err) + } + }(src_dir, dst) + } + // wait for the build to finish + wg.Wait() + dbg.Lvl1("Build is finished after", time.Since(start)) + return nil +} + +// Kills all eventually remaining processes from the last Deploy-run +func (d *Deterlab) Cleanup() error { + // Cleanup eventual ssh from the proxy-forwarding to the logserver + //err := exec.Command("kill", "-9", "$(ps x | grep ssh | grep nNTf | cut -d' ' -f1)").Run() + err := exec.Command("pkill", "-9", "-f", "ssh -nNTf").Run() + if err != nil { + dbg.Lvl3("Error stopping ssh:", err) + } + + // SSH to the deterlab-server and end all running users-processes + dbg.Lvl3("Going to kill everything") + var sshKill chan string + sshKill = make(chan string) + go func() { + // Cleanup eventual residues of previous round - users and sshd + cliutils.SshRun(d.Login, d.Host, "killall -9 users sshd") + err = cliutils.SshRunStdout(d.Login, d.Host, "test -f remote/users && ( cd remote; ./users -kill )") + if err != nil { + dbg.Lvl1("NOT-Normal error from cleanup") + sshKill <- "error" + } + sshKill <- "stopped" + }() + + for { + select { + case msg := <-sshKill: + if msg == "stopped" { + dbg.Lvl3("Users stopped") + return nil + } else { + dbg.Lvl2("Received other command", msg, "probably the app didn't quit correctly") + } + case <-time.After(time.Second * 20): + dbg.Lvl3("Timeout error when waiting for end of ssh") + return nil + } + } + + return nil +} + +// Creates the appropriate configuration-files and copies everything to the +// deterlab-installation. +func (d *Deterlab) Deploy(rc RunConfig) error { + dbg.Lvlf1("Next run is %+v", rc) + os.RemoveAll(d.DeployDir) + os.Mkdir(d.DeployDir, 0777) + + dbg.Lvl3("Writing config-files") + + // Initialize the deter-struct with our current structure (for debug-levels + // and such), then read in the app-configuration to overwrite eventual + // 'Machines', 'ppm', '' or other fields + deter := *d + appConfig := d.DeployDir + "/app.toml" + deterConfig := d.DeployDir + "/deter.toml" + ioutil.WriteFile(appConfig, rc.Toml(), 0666) + deter.ReadConfig(appConfig) + + deter.createHosts() + d.MasterLogger = deter.MasterLogger + app.WriteTomlConfig(deter, deterConfig) + + // Prepare special configuration preparation for each application - the + // reading in twice of the configuration file, once for the deterConfig, + // then for the appConfig, sets the deterConfig as defaults and overwrites + // everything else with the actual appConfig (which comes from the + // runconfig-file) + switch d.App { + case "sign", "stamp": + conf := app.ConfigColl{} + conf.StampsPerRound = -1 + conf.StampRatio = 1.0 + app.ReadTomlConfig(&conf, deterConfig) + app.ReadTomlConfig(&conf, appConfig) + // Calculates a tree that is used for the timestampers + var depth int + conf.Tree, conf.Hosts, depth, _ = graphs.TreeFromList(deter.Virt[:], conf.Ppm, conf.Bf) + dbg.Lvl2("Depth:", depth) + dbg.Lvl2("Total peers:", len(conf.Hosts)) + total := deter.Machines * conf.Ppm + if len(conf.Hosts) != total { + dbg.Fatal("Only calculated", len(conf.Hosts), "out of", total, "hosts - try changing number of", + "machines or hosts per node") + } + deter.Hostnames = conf.Hosts + // re-write the new configuration-file + app.WriteTomlConfig(conf, appConfig) + case "shamir": + conf := app.ConfigShamir{} + app.ReadTomlConfig(&conf, deterConfig) + app.ReadTomlConfig(&conf, appConfig) + _, conf.Hosts, _, _ = graphs.TreeFromList(deter.Virt[:], conf.Ppm, conf.Ppm) + deter.Hostnames = conf.Hosts + // re-write the new configuration-file + app.WriteTomlConfig(conf, appConfig) + case "naive": + conf := app.NaiveConfig{} + app.ReadTomlConfig(&conf, deterConfig) + app.ReadTomlConfig(&conf, appConfig) + _, conf.Hosts, _, _ = graphs.TreeFromList(deter.Virt[:], conf.Ppm, 2) + deter.Hostnames = conf.Hosts + dbg.Lvl3("Deterlab: naive applications:", conf.Hosts) + dbg.Lvl3("Deterlab: naive app config:", conf) + dbg.Lvl3("Deterlab: naive app virt:", deter.Virt[:]) + deter.Hostnames = conf.Hosts + app.WriteTomlConfig(conf, appConfig) + case "ntree": + conf := app.NTreeConfig{} + app.ReadTomlConfig(&conf, deterConfig) + app.ReadTomlConfig(&conf, appConfig) + var depth int + conf.Tree, conf.Hosts, depth, _ = graphs.TreeFromList(deter.Virt[:], conf.Ppm, conf.Bf) + dbg.Lvl2("Depth:", depth) + deter.Hostnames = conf.Hosts + app.WriteTomlConfig(conf, appConfig) + + case "randhound": + } + app.WriteTomlConfig(deter, "deter.toml", d.DeployDir) + + // copy the webfile-directory of the logserver to the remote directory + err := exec.Command("cp", "-a", d.DeterDir+"/cothority.conf", d.DeployDir).Run() + if err != nil { + dbg.Fatal("error copying webfiles:", err) + } + build, err := ioutil.ReadDir(d.BuildDir) + for _, file := range build { + err = exec.Command("cp", d.BuildDir+"/"+file.Name(), d.DeployDir).Run() + if err != nil { + dbg.Fatal("error copying build-file:", err) + } + } + + dbg.Lvl1("Copying over to", d.Login, "@", d.Host) + // Copy everything over to Deterlabs + err = cliutils.Rsync(d.Login, d.Host, d.DeployDir+"/", "remote/") + if err != nil { + dbg.Fatal(err) + } + dbg.Lvl2("Done copying") + + return nil +} + +func (d *Deterlab) Start(args ...string) error { + // setup port forwarding for viewing log server + d.started = true + // Remote tunneling : the sink port is used both for the sink and for the + // proxy => the proxy redirects packets to the same port the sink is + // listening. + // -n = stdout == /Dev/null, -N => no command stream, -T => no tty + cmd := []string{"-nNTf", "-o", "StrictHostKeyChecking=no", "-o", "ExitOnForwardFailure=yes", "-R", d.ProxyRedirectionPort + ":" + d.ProxyRedirectionAddress + ":" + monitor.SinkPort, fmt.Sprintf("%s@%s", d.Login, d.Host)} + exCmd := exec.Command("ssh", cmd...) + if err := exCmd.Start(); err != nil { + dbg.Fatal("Failed to start the ssh port forwarding:", err) + } + if err := exCmd.Wait(); err != nil { + dbg.Fatal("ssh port forwarding exited in failure:", err) + } + dbg.Lvl3("Setup remote port forwarding", cmd) + go func() { + err := cliutils.SshRunStdout(d.Login, d.Host, "cd remote; GOMAXPROCS=8 ./users") + if err != nil { + dbg.Lvl3(err) + } + d.sshDeter <- "finished" + }() + + return nil +} + +// Waiting for the process to finish +func (d *Deterlab) Wait() error { + if d.started { + dbg.Lvl3("Simulation is started") + select { + case msg := <-d.sshDeter: + if msg == "finished" { + dbg.Lvl3("Received finished-message, not killing users") + return nil + } else { + dbg.Lvl1("Received out-of-line message", msg) + } + case <-time.After(time.Second): + dbg.Lvl3("No message waiting") + } + d.started = false + } + return nil +} + +// Reads in the deterlab-config and drops out if there is an error +func (d *Deterlab) ReadConfig(name ...string) { + configName := "deter.toml" + if len(name) > 0 { + configName = name[0] + } + err := app.ReadTomlConfig(d, configName) + _, caller, line, _ := runtime.Caller(1) + who := caller + ":" + strconv.Itoa(line) + if err != nil { + dbg.Fatal("Couldn't read config in", who, ":", err) + } + dbg.DebugVisible = d.Debug +} + +/* +* Write the hosts.txt file automatically +* from project name and number of servers + */ +func (d *Deterlab) createHosts() error { + num_servers := d.Machines + nmachs := d.Machines + + // write the name of the server + \t + IP address + ip := "10.255.0." + name := d.Project + ".isi.deterlab.net" + d.Phys = make([]string, 0, num_servers) + d.Virt = make([]string, 0, num_servers) + for i := 1; i <= num_servers; i++ { + d.Phys = append(d.Phys, fmt.Sprintf("server-%d.%s.%s", i-1, d.Experiment, name)) + d.Virt = append(d.Virt, fmt.Sprintf("%s%d", ip, i)) + } + + // only take the machines we need + d.Phys = d.Phys[:nmachs] + d.Virt = d.Virt[:nmachs] + + return nil +} + +// Checks whether host, login and project are defined. If any of them are missing, it will +// ask on the command-line. +// For the login-variable, it will try to set up a connection to d.Host and copy over the +// public key for a more easy communication +func (d *Deterlab) LoadAndCheckDeterlabVars() { + deter := Deterlab{} + err := app.ReadTomlConfig(&deter, "deter.toml", d.DeterDir) + d.Host, d.Login, d.Project, d.Experiment, d.ProxyRedirectionPort, d.ProxyRedirectionAddress, d.MonitorAddress = + deter.Host, deter.Login, deter.Project, deter.Experiment, + deter.ProxyRedirectionPort, deter.ProxyRedirectionAddress, deter.MonitorAddress + + if err != nil { + dbg.Lvl1("Couldn't read config-file - asking for default values") + } + + if d.Host == "" { + d.Host = readString("Please enter the hostname of deterlab", "users.deterlab.net") + } + + if d.Login == "" { + d.Login = readString("Please enter the login-name on "+d.Host, "") + } + + if d.Project == "" { + d.Project = readString("Please enter the project on deterlab", "SAFER") + } + + if d.Experiment == "" { + d.Experiment = readString("Please enter the Experiment on "+d.Project, "Dissent-CS") + } + + if d.MonitorAddress == "" { + d.MonitorAddress = readString("Please enter the Monitor address (where clients will connect)", "users.isi.deterlab.net") + } + if d.ProxyRedirectionPort == "" { + d.ProxyRedirectionPort = readString("Please enter the proxy redirection port", "4001") + } + if d.ProxyRedirectionAddress == "" { + d.ProxyRedirectionAddress = readString("Please enter the proxy redirection address", "localhost") + } + + app.WriteTomlConfig(*d, "deter.toml", d.DeterDir) +} + +// Shows a messages and reads in a string, eventually returning a default (dft) string +func readString(msg, dft string) string { + fmt.Printf("%s [%s]:", msg, dft) + + reader := bufio.NewReader(os.Stdin) + strnl, _ := reader.ReadString('\n') + str := strings.TrimSpace(strnl) + if str == "" { + return dft + } + return str +} diff --git a/deploy/deterlab/cothority.conf b/deploy/platform/deterlab/cothority.conf similarity index 66% rename from deploy/deterlab/cothority.conf rename to deploy/platform/deterlab/cothority.conf index fb6d1e603f..aea562fedf 100644 --- a/deploy/deterlab/cothority.conf +++ b/deploy/platform/deterlab/cothority.conf @@ -1,4 +1,4 @@ # This is for the cothority testbed, which can use up an awful lot of connections -* soft nofile 8192 -* hard nofile 16384 \ No newline at end of file +* soft nofile 128000 +* hard nofile 128000 diff --git a/deploy/platform/deterlab/forkexec/forkexec.go b/deploy/platform/deterlab/forkexec/forkexec.go new file mode 100644 index 0000000000..65e84d0ff4 --- /dev/null +++ b/deploy/platform/deterlab/forkexec/forkexec.go @@ -0,0 +1,98 @@ +package main + +import ( + "flag" + "os/exec" + "strconv" + + "github.com/dedis/cothority/deploy/platform" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/dbg" + "net" + "os" + "sync" +) + +// Wrapper around app to enable measuring of cpu time + +var deter platform.Deterlab +var testConnect bool +var physToServer map[string][]string +var rootname string + +func main() { + deter.ReadConfig() + // The flags are defined in lib/app + app.FlagInit() + flag.Parse() + + setup_deter() + + var wg sync.WaitGroup + virts := physToServer[app.RunFlags.PhysAddr] + if len(virts) > 0 { + dbg.Lvl3("starting", len(virts), "servers of", deter.App, "on", virts) + for _, name := range virts { + dbg.Lvl3("Starting", name, "on", app.RunFlags.PhysAddr) + wg.Add(1) + go func(nameport string) { + dbg.Lvl3("Running on", app.RunFlags.PhysAddr, "starting", nameport, rootname) + defer wg.Done() + + amroot := nameport == rootname + args := []string{ + "-hostname=" + nameport, + "-physaddr=" + app.RunFlags.PhysAddr, + "-amroot=" + strconv.FormatBool(amroot), + "-test_connect=" + strconv.FormatBool(testConnect), + "-logger=" + app.RunFlags.Logger, + "-mode=server", + } + + dbg.Lvl3("Starting on", app.RunFlags.PhysAddr, "with args", args) + cmdApp := exec.Command("./"+deter.App, args...) + cmdApp.Stdout = os.Stdout + cmdApp.Stderr = os.Stderr + err := cmdApp.Run() + if err != nil { + dbg.Lvl1("cmd run:", err) + } + + dbg.Lvl3("Finished with app", app.RunFlags.PhysAddr) + }(name) + } + dbg.Lvl3(app.RunFlags.PhysAddr, "Finished starting apps") + wg.Wait() + } else { + dbg.Lvl2("No apps for", app.RunFlags.PhysAddr) + } + dbg.Lvl2(app.RunFlags.PhysAddr, "forkexec exited") +} + +func setup_deter() { + vpmap := make(map[string]string) + for i := range deter.Virt { + vpmap[deter.Virt[i]] = deter.Phys[i] + } + + deter.Phys = deter.Phys[:] + deter.Virt = deter.Virt[:] + + hostnames := deter.Hostnames + dbg.Lvl4("hostnames:", hostnames) + + rootname = hostnames[0] + + // mapping from physical node name to the app servers that are running there + // essentially a reverse mapping of vpmap except ports are also used + physToServer = make(map[string][]string) + for _, virt := range hostnames { + v, _, _ := net.SplitHostPort(virt) + p := vpmap[v] + ss := physToServer[p] + ss = append(ss, virt) + physToServer[p] = ss + } + dbg.Lvl3("PhysToServer is", physToServer) + +} diff --git a/deploy/platform/deterlab/forkexec/forkexec_test.go b/deploy/platform/deterlab/forkexec/forkexec_test.go new file mode 100644 index 0000000000..f421ab4e82 --- /dev/null +++ b/deploy/platform/deterlab/forkexec/forkexec_test.go @@ -0,0 +1,7 @@ +package main_test + +import "testing" + +func TestBuild(t *testing.T) { + +} diff --git a/deploy/deterlab/topo.ns b/deploy/platform/deterlab/topo.ns similarity index 100% rename from deploy/deterlab/topo.ns rename to deploy/platform/deterlab/topo.ns diff --git a/deploy/platform/deterlab/users/users.go b/deploy/platform/deterlab/users/users.go new file mode 100644 index 0000000000..b796d50556 --- /dev/null +++ b/deploy/platform/deterlab/users/users.go @@ -0,0 +1,229 @@ +// deter is the deterlab process that should run on the boss node +// +// It spawns multiple timestampers and clients, while constructing +// the topology defined on tree.json. It assumes that hosts.txt has +// the entire list of hosts to run timestampers on and that the final +// host is the designated logging server. +// +// The overall topology that is created is defined by tree.json. +// The port layout for each node, however, is specified here. +// tree.json will assign each node a port p. This is the port +// that each singing node is listening on. The timestamp server +// to which clients connect is listneing on port p+1. And the +// pprof server for each node is listening on port p+2. This +// means that in order to debug each client, you can forward +// the p+2 port of each node to your localhost. +// +// In the future the loggingserver will be connecting to the +// servers on the pprof port in order to gather extra data. +package main + +import ( + "flag" + "net" + "strings" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/dedis/cothority/deploy/platform" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/monitor" + "os" + "os/exec" + "regexp" + "strconv" +) + +var deterlab platform.Deterlab +var kill = false + +func init() { + flag.BoolVar(&kill, "kill", false, "kill everything (and don't start anything)") +} + +func main() { + deterlab.ReadConfig() + flag.Parse() + + vpmap := make(map[string]string) + for i := range deterlab.Virt { + vpmap[deterlab.Virt[i]] = deterlab.Phys[i] + } + // kill old processes + var wg sync.WaitGroup + re := regexp.MustCompile(" +") + hosts, err := exec.Command("/usr/testbed/bin/node_list", "-e", deterlab.Project+","+deterlab.Experiment).Output() + if err != nil { + dbg.Fatal("Deterlab experiment", deterlab.Project+"/"+deterlab.Experiment, "seems not to be swapped in. Aborting.") + os.Exit(-1) + } + hosts_trimmed := strings.TrimSpace(re.ReplaceAllString(string(hosts), " ")) + hostlist := strings.Split(hosts_trimmed, " ") + doneHosts := make([]bool, len(hostlist)) + dbg.Lvl2("Found the following hosts:", hostlist) + if kill { + dbg.Lvl1("Cleaning up", len(hostlist), "hosts.") + } + for i, h := range hostlist { + wg.Add(1) + go func(i int, h string) { + defer wg.Done() + if kill { + dbg.Lvl4("Cleaning up host", h, ".") + cliutils.SshRun("", h, "sudo killall -9 "+deterlab.App+" logserver forkexec timeclient scp 2>/dev/null >/dev/null") + time.Sleep(1 * time.Second) + cliutils.SshRun("", h, "sudo killall -9 "+deterlab.App+" 2>/dev/null >/dev/null") + time.Sleep(1 * time.Second) + // Also kill all other process that start with "./" and are probably + // locally started processes + cliutils.SshRun("", h, "sudo pkill -9 -f '\\./'") + time.Sleep(1 * time.Second) + if dbg.DebugVisible > 3 { + dbg.Lvl4("Cleaning report:") + cliutils.SshRunStdout("", h, "ps aux") + } + } else { + dbg.Lvl3("Setting the file-limit higher on", h) + + // Copy configuration file to make higher file-limits + err := cliutils.SshRunStdout("", h, "sudo cp remote/cothority.conf /etc/security/limits.d") + if err != nil { + dbg.Fatal("Couldn't copy limit-file:", err) + } + } + doneHosts[i] = true + dbg.Lvl3("Host", h, "cleaned up") + }(i, h) + } + + cleanupChannel := make(chan string) + go func() { + wg.Wait() + dbg.Lvl3("Done waiting") + cleanupChannel <- "done" + }() + select { + case msg := <-cleanupChannel: + dbg.Lvl3("Received msg from cleanupChannel", msg) + case <-time.After(time.Second * 20): + for i, m := range doneHosts { + if !m { + dbg.Lvl1("Missing host:", hostlist[i], "- You should run") + dbg.Lvl1("/usr/testbed/bin/node_reboot", hostlist[i]) + } + } + dbg.Fatal("Didn't receive all replies while cleaning up - aborting.") + } + + if kill { + dbg.Lvl2("Only cleaning up - returning") + return + } + + // ADDITIONS : the monitoring part + // Proxy will listen on Sink:SinkPort and redirect every packet to + // RedirectionAddress:RedirectionPort. With remote tunnel forwarding it will + // be forwarded to the real sink + dbg.Lvl2("Launching proxy redirecting to", deterlab.ProxyRedirectionAddress, ":", deterlab.ProxyRedirectionPort) + go monitor.Proxy(deterlab.ProxyRedirectionAddress + ":" + deterlab.ProxyRedirectionPort) + time.Sleep(time.Second) + + hostnames := deterlab.Hostnames + dbg.Lvl4("hostnames:", hostnames) + + // mapping from physical node name to the timestamp servers that are running there + // essentially a reverse mapping of vpmap except ports are also used + physToServer := make(map[string][]string) + for _, virt := range hostnames { + v, _, _ := net.SplitHostPort(virt) + p := vpmap[v] + ss := physToServer[p] + ss = append(ss, virt) + physToServer[p] = ss + } + + monitorAddr := deterlab.MonitorAddress + ":" + monitor.SinkPort + servers := len(physToServer) + ppm := len(deterlab.Hostnames) / servers + dbg.Lvl1("starting", servers, "forkexecs with", ppm, "processes each =", servers*ppm) + totalServers := 0 + for phys, virts := range physToServer { + if len(virts) == 0 { + continue + } + totalServers += len(virts) + dbg.Lvl2("Launching forkexec for", len(virts), "clients on", phys) + wg.Add(1) + go func(phys string) { + //dbg.Lvl4("running on", phys, cmd) + defer wg.Done() + dbg.Lvl4("Starting servers on physical machine", phys, "with logger =", deterlab.MonitorAddress+":"+monitor.SinkPort) + err := cliutils.SshRunStdout("", phys, "cd remote; sudo ./forkexec"+ + " -physaddr="+phys+" -logger="+deterlab.MonitorAddress+":"+monitor.SinkPort) + if err != nil { + dbg.Lvl1("Error starting timestamper:", err, phys) + } + dbg.Lvl4("Finished with Timestamper", phys) + }(phys) + } + + if deterlab.App == "stamp" || deterlab.App == "sign" { + // Every stampserver that started up (mostly waiting for configuration-reading) + // writes its name in coll_stamp_dir - once everybody is there, the directory + // is cleaned to flag it's OK to go on. + start_config := time.Now() + for { + s, err := monitor.GetReady(monitorAddr) + if err != nil { + log.Fatal("Couldn't contact monitor") + } else { + dbg.Lvl1("Processes started:", s.Ready, "/", totalServers, "after", time.Since(start_config)) + if s.Ready == totalServers { + dbg.Lvl2("Everybody ready, starting") + // 1st second for everybody to see the deleted directory + // 2nd second for everybody to start up listening + time.Sleep(time.Second * 2) + break + } + } + time.Sleep(time.Second) + } + } + + switch deterlab.App { + case "stamp": + dbg.Lvl1("starting", len(physToServer), "time clients") + // start up one timeclient per physical machine + // it requests timestamps from all the servers on that machine + amroot := true + for p, ss := range physToServer { + if len(ss) == 0 { + dbg.Lvl3("ss is empty - not starting") + continue + } + servers := strings.Join(ss, ",") + dbg.Lvl3("Starting with ss=", ss) + go func(p string, a bool) { + cmdstr := "cd remote; sudo ./" + deterlab.App + " -mode=client " + + " -name=client@" + p + + " -server=" + servers + + " -amroot=" + strconv.FormatBool(a) + dbg.Lvl3("Users will launch client:", cmdstr) + err := cliutils.SshRunStdout("", p, cmdstr) + if err != nil { + dbg.Lvl4("Deter.go: error for", deterlab.App, err) + } + dbg.Lvl4("Deter.go: Finished with", deterlab.App, p) + }(p, amroot) + amroot = false + } + case "sign_no": + // TODO: for now it's only a simple startup from the server + dbg.Lvl1("Starting only one client") + } + + // wait for the servers to finish before stopping + wg.Wait() +} diff --git a/deploy/platform/deterlab/users/users_test.go b/deploy/platform/deterlab/users/users_test.go new file mode 100644 index 0000000000..f421ab4e82 --- /dev/null +++ b/deploy/platform/deterlab/users/users_test.go @@ -0,0 +1,7 @@ +package main_test + +import "testing" + +func TestBuild(t *testing.T) { + +} diff --git a/deploy/platform/localhost.go b/deploy/platform/localhost.go new file mode 100644 index 0000000000..34b80e3170 --- /dev/null +++ b/deploy/platform/localhost.go @@ -0,0 +1,249 @@ +package platform + +import ( + "fmt" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/graphs" + "github.com/dedis/cothority/lib/monitor" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "strconv" + "sync" + "time" +) + +// Localhost is responsible for launching the app with the specified number of nodes +// directly on your machine, for local testing. + +var defaultConfigName = "localhost.toml" + +// Localhost is the platform for launching thee apps locally +type Localhost struct { + + // Address of the logger (can be local or not) + Logger string + + // App to run [shamir,coll_sign..] + App string + // where the app is located + AppDir string + + // Where is the Localhost package located + LocalDir string + // Where to build the executables + + // where to read the config file + // it will be assembled like LocalDir/RunDir + RunDir string + + // Debug level 1 - 5 + Debug int + + // Number of machines - so we can use the same + // configuration-files + Machines int + // This gives the number of hosts per node (machine) + Ppm int + // hosts used with the applications + // example: localhost:2000, ...:2010 , ... + Hosts []string + + // Whether we started a simulation + running bool + // WaitGroup for running processes + wg_run sync.WaitGroup +} + +// Configure various +func (d *Localhost) Configure() { + pwd, _ := os.Getwd() + d.AppDir = pwd + "/../app" + d.RunDir = pwd + "/platform/localhost" + d.LocalDir = pwd + d.Debug = dbg.DebugVisible + d.running = false + if d.App == "" { + dbg.Fatal("No app defined in simulation") + } + dbg.Lvl3(fmt.Sprintf("Localhost dirs: AppDir %s, RunDir %s", d.AppDir, d.RunDir)) + dbg.Lvl3("Localhost configured ...") +} + +// Will build the application +func (d *Localhost) Build(build string) error { + src, _ := filepath.Rel(d.LocalDir, d.AppDir+"/"+d.App) + dst := d.RunDir + "/" + d.App + start := time.Now() + // build for the local machine + res, err := cliutils.Build(src, dst, runtime.GOARCH, runtime.GOOS) + if err != nil { + dbg.Fatal("Error while building for localhost (src", src, ", dst", dst, ":", res) + } + dbg.Lvl3("Localhost: Build src", src, ", dst", dst) + dbg.Lvl3("Localhost: Results of localhost build:", res) + dbg.Lvl2("Localhost: build finished in", time.Since(start)) + return err +} + +func (d *Localhost) Cleanup() error { + ex := d.RunDir + "/" + d.App + err := exec.Command("pkill", "-f", ex).Run() + if err != nil { + dbg.Lvl3("Error stopping localhost", err) + } + + // Wait for eventual connections to clean up + time.Sleep(time.Second) + return nil +} + +func (d *Localhost) Deploy(rc RunConfig) error { + dbg.Lvl2("Localhost: Deploying and writing config-files") + + // Initialize the deter-struct with our current structure (for debug-levels + // and such), then read in the app-configuration to overwrite eventual + // 'Machines', 'Ppm', 'Loggers' or other fields + appConfig := d.RunDir + "/app.toml" + localConfig := d.RunDir + "/" + defaultConfigName + ioutil.WriteFile(appConfig, rc.Toml(), 0666) + d.ReadConfig(appConfig) + d.GenerateHosts() + + app.WriteTomlConfig(d, localConfig) + + // Prepare special configuration preparation for each application - the + // reading in twice of the configuration file, once for the deterConfig, + // then for the appConfig, sets the deterConfig as defaults and overwrites + // everything else with the actual appConfig (which comes from the + // runconfig-file) + switch d.App { + case "sign", "stamp": + conf := app.ConfigColl{} + conf.StampsPerRound = -1 + conf.StampRatio = 1.0 + app.ReadTomlConfig(&conf, localConfig) + app.ReadTomlConfig(&conf, appConfig) + // Calculates a tree that is used for the timestampers + // ppm = 1 + conf.Tree = graphs.CreateLocalTree(d.Hosts, conf.Bf) + conf.Hosts = d.Hosts + + dbg.Lvl2("Total hosts / depth:", len(conf.Hosts), graphs.Depth(conf.Tree)) + total := d.Machines * d.Ppm + if len(conf.Hosts) != total { + dbg.Fatal("Only calculated", len(conf.Hosts), "out of", total, "hosts - try changing number of", + "machines or hosts per node") + } + d.Hosts = conf.Hosts + // re-write the new configuration-file + app.WriteTomlConfig(conf, appConfig) + case "shamir": + conf := app.ConfigShamir{} + app.ReadTomlConfig(&conf, localConfig) + app.ReadTomlConfig(&conf, appConfig) + //_, conf.Hosts, _, _ = graphs.TreeFromList(d.Hosts, len(d.Hosts), 1) + //d.Hosts = conf.Hosts + dbg.Lvl4("Localhost: graphs.Tree for shamir", conf.Hosts) + // re-write the new configuration-file + app.WriteTomlConfig(conf, appConfig) + case "naive": + conf := app.NaiveConfig{} + app.ReadTomlConfig(&conf, localConfig) + app.ReadTomlConfig(&conf, appConfig) + dbg.Lvl4("Localhost: naive applications:", conf.Hosts) + app.WriteTomlConfig(conf, appConfig) + case "ntree": + conf := app.NTreeConfig{} + app.ReadTomlConfig(&conf, localConfig) + app.ReadTomlConfig(&conf, appConfig) + conf.Tree = graphs.CreateLocalTree(d.Hosts, conf.Bf) + conf.Hosts = d.Hosts + dbg.Lvl3("Localhost: naive Tree applications:", conf.Hosts) + d.Hosts = conf.Hosts + app.WriteTomlConfig(conf, appConfig) + case "randhound": + } + //app.WriteTomlConfig(d, defaultConfigName, d.RunDir) + debug := reflect.ValueOf(d).Elem().FieldByName("Debug") + if debug.IsValid() { + dbg.DebugVisible = debug.Interface().(int) + } + dbg.Lvl2("Localhost: Done deploying") + + return nil + +} + +func (d *Localhost) Start(args ...string) error { + os.Chdir(d.RunDir) + dbg.Lvl4("Localhost: chdir into", d.RunDir) + ex := d.RunDir + "/" + d.App + dbg.Lvl4("Localhost: in Start() => hosts", d.Hosts) + d.running = true + dbg.Lvl1("Starting", len(d.Hosts), "applications of", ex) + for index, host := range d.Hosts { + dbg.Lvl3("Starting", index, "=", host) + amroot := fmt.Sprintf("-amroot=%s", strconv.FormatBool(index == 0)) + cmdArgs := []string{"-hostname", host, "-mode", "server", "-logger", + "localhost:" + monitor.SinkPort, amroot} + cmdArgs = append(args, cmdArgs...) + dbg.Lvl3("CmdArgs are", cmdArgs) + cmd := exec.Command(ex, cmdArgs...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + go func(i int, h string) { + dbg.Lvl3("Localhost: will start host", host) + d.wg_run.Add(1) + err := cmd.Run() + if err != nil { + dbg.Lvl3("Error running localhost", h, ":", err) + } + d.wg_run.Done() + dbg.Lvl3("host (index", i, ")", h, "done") + }(index, host) + } + return nil +} + +// Waits for all processes to finish +func (d *Localhost) Wait() error { + dbg.Lvl3("Waiting for processes to finish") + d.wg_run.Wait() + dbg.Lvl2("Processes finished") + return nil +} + +// Reads in the localhost-config and drops out if there is an error +func (d *Localhost) ReadConfig(name ...string) { + configName := defaultConfigName + if len(name) > 0 { + configName = name[0] + } + err := app.ReadTomlConfig(d, configName) + _, caller, line, _ := runtime.Caller(1) + who := caller + ":" + strconv.Itoa(line) + if err != nil { + dbg.Fatal("Couldn't read config in", who, ":", err) + } + dbg.DebugVisible = d.Debug + dbg.Lvl4("Localhost: read the config, Hosts", d.Hosts) +} + +// GenerateHosts will generate the list of hosts +// with a new port each +func (d *Localhost) GenerateHosts() { + nrhosts := d.Machines * d.Ppm + d.Hosts = make([]string, nrhosts) + port := 2000 + inc := 5 + for i := 0; i < nrhosts; i++ { + s := "127.0.0.1:" + strconv.Itoa(port+inc*i) + d.Hosts[i] = s + } + dbg.Lvl4("Localhost: Generated hosts list", d.Hosts) +} diff --git a/deploy/platform/platform.go b/deploy/platform/platform.go new file mode 100644 index 0000000000..b5623beffc --- /dev/null +++ b/deploy/platform/platform.go @@ -0,0 +1,164 @@ +package platform + +import ( + "bufio" + "bytes" + "fmt" + "github.com/BurntSushi/toml" + "github.com/dedis/cothority/lib/dbg" + "os" + "strings" +) + +// Generic interface to represent a platform where to run tests +// or direct applications. For now only localhost + deterlab. +// one could imagine EC2 or OpenStack or whatever you can as long as you +// implement this interface ! +type Platform interface { + // Does the initial configuration of all structures needed for the platform + Configure() + // Builds all necessary binaries + Build(string) error + // Makes sure that there is no part of the application still running + Cleanup() error + // Copies the binaries to the appropriate directory/machines, together with + // the necessary configuration. RunConfig is a simple string that should + // be copied as 'app.toml' to the directory where the app resides + Deploy(RunConfig) error + // Starts the application and returns - non-blocking! + Start(args ...string) error + // Waits for the application to quit + Wait() error +} + +var deterlab string = "deterlab" +var localhost string = "localhost" + +// Return the appropriate platform +// [deterlab,localhost] +func NewPlatform(t string) Platform { + var p Platform + switch t { + case deterlab: + p = &Deterlab{} + case localhost: + p = &Localhost{} + } + return p +} + +/* Reads in a configuration-file for a run. The configuration-file has the + * following syntax: + * Name1 = value1 + * Name2 = value2 + * [empty line] + * n1, n2, n3, n4 + * v11, v12, v13, v14 + * v21, v22, v23, v24 + * + * The Name1...Namen are global configuration-options. + * n1..nn are configuration-options for one run + * Both the global and the run-configuration are copied to both + * the platform and the app-configuration. + */ +func ReadRunFile(p Platform, filename string) []RunConfig { + var runconfigs []RunConfig + masterConfig := NewRunConfig() + dbg.Lvl3("Reading file", filename) + + file, err := os.Open(filename) + defer file.Close() + if err != nil { + dbg.Fatal("Couldn't open file", file, err) + } + + // Decoding of the first part of the run config file + // where the config wont change for the whole set of the simulation's tests + scanner := bufio.NewScanner(file) + for scanner.Scan() { + text := scanner.Text() + dbg.Lvl3("Decoding", text) + // end of the first part + if text == "" { + break + } + + // checking if format is good + vals := strings.Split(text, "=") + if len(vals) != 2 { + dbg.Fatal("Simulation file:", filename, " is not properly formatted ( key = value )") + } + // fill in the general config + masterConfig.Put(strings.TrimSpace(vals[0]), strings.TrimSpace(vals[1])) + // also put it in platform + toml.Decode(text, p) + dbg.Lvlf3("Platform is now %+v", p) + } + + scanner.Scan() + args := strings.Split(scanner.Text(), ", ") + for scanner.Scan() { + rc := masterConfig.Clone() + // put each individual test configs + for i, value := range strings.Split(scanner.Text(), ", ") { + rc.Put(strings.TrimSpace(args[i]), strings.TrimSpace(value)) + } + runconfigs = append(runconfigs, *rc) + } + + return runconfigs +} + +// Struct that represent the configuration to apply for one "test" +// Note: a "simulation" is a set of "tests" +type RunConfig struct { + fields map[string]string +} + +func NewRunConfig() *RunConfig { + rc := new(RunConfig) + rc.fields = make(map[string]string) + return rc +} + +// One problem for now is RunConfig read also the ' " ' char (34 ASCII) +// and thus when doing Get() , also return the value enclosed by ' " ' +// One fix is to each time we Get(), aautomatically delete those chars +var replacer *strings.Replacer = strings.NewReplacer("\"", "", "'", "") + +// Returns the associated value of the field in the config +func (r *RunConfig) Get(field string) string { + return replacer.Replace(r.fields[strings.ToLower(field)]) +} + +// Insert a new field - value relationship +func (r *RunConfig) Put(field, value string) { + r.fields[strings.ToLower(field)] = value +} + +// Returns this config as bytes in a Toml format +func (r *RunConfig) Toml() []byte { + var buf bytes.Buffer + for k, v := range r.fields { + fmt.Fprintf(&buf, "%s = %s\n", k, v) + } + return buf.Bytes() +} + +// Returns this config as a Map +func (r *RunConfig) Map() map[string]string { + tomap := make(map[string]string) + for k := range r.fields { + tomap[k] = r.Get(k) + } + return tomap +} + +// Clone this runconfig so it has all fields-value relationship already present +func (r *RunConfig) Clone() *RunConfig { + rc := NewRunConfig() + for k, v := range r.fields { + rc.fields[k] = v + } + return rc +} diff --git a/deploy/platform/platform_test.go b/deploy/platform/platform_test.go new file mode 100644 index 0000000000..36899f358f --- /dev/null +++ b/deploy/platform/platform_test.go @@ -0,0 +1,53 @@ +package platform_test + +import ( + "github.com/dedis/cothority/deploy/platform" + "github.com/dedis/cothority/lib/dbg" + "io/ioutil" + // "strings" + "testing" +) + +var testfile = `Machines = 8 +App = "sign" + +Ppm, Rounds +2, 30 +4, 30` + +func TestReadRunfile(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 2) + tplat := &TPlat{} + + tmpfile := "/tmp/testrun.toml" + err := ioutil.WriteFile(tmpfile, []byte(testfile), 0666) + if err != nil { + dbg.Fatal("Couldn't create file:", err) + } + + tests := platform.ReadRunFile(tplat, tmpfile) + dbg.Lvl2(tplat) + dbg.Lvlf2("%+v\n", tests[0]) + if tplat.App != "sign" { + dbg.Fatal("App should be 'sign'") + } + if len(tests) != 2 { + dbg.Fatal("There should be 2 tests") + } + if tests[0].Get("machines") != "8" { + dbg.Fatal("Machines = 8 has not been copied into RunConfig") + } +} + +type TPlat struct { + App string + Machines int +} + +func (t *TPlat) Configure() {} +func (t *TPlat) Build(s string) error { return nil } +func (t *TPlat) Deploy(rc platform.RunConfig) error { return nil } +func (t *TPlat) Start(...string) error { return nil } +func (t *TPlat) Stop() error { return nil } +func (t *TPlat) Cleanup() error { return nil } +func (t *TPlat) Wait() error { return nil } diff --git a/deploy/run_all.sh b/deploy/run_all.sh new file mode 100755 index 0000000000..cbe07b5602 --- /dev/null +++ b/deploy/run_all.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash -e + +echo Building deploy-binary +go build + +for simul in simulation/test*toml; do + echo Simulating $simul + ./deploy $simul + echo -e "\n\n" +done \ No newline at end of file diff --git a/deploy/simulation/Paper_cosi/naive_multi_adapted.toml b/deploy/simulation/Paper_cosi/naive_multi_adapted.toml new file mode 100644 index 0000000000..6e3b6f7285 --- /dev/null +++ b/deploy/simulation/Paper_cosi/naive_multi_adapted.toml @@ -0,0 +1,17 @@ +App = "naive" +Suite = "Ed25519" +Rounds = 10 + +Machines, Ppm +2, 1 +4, 1 +8, 1 +16, 1 +16, 2 +16, 4 +16, 8 +16, 16 +16, 32 +16, 64 +16, 128 +16, 256 diff --git a/deploy/simulation/Paper_cosi/naive_multi_adapted_skipcheck.toml b/deploy/simulation/Paper_cosi/naive_multi_adapted_skipcheck.toml new file mode 100644 index 0000000000..71b227c83f --- /dev/null +++ b/deploy/simulation/Paper_cosi/naive_multi_adapted_skipcheck.toml @@ -0,0 +1,18 @@ +App = "naive" +Suite = "Ed25519" +Rounds = 10 +SkipChecks = true + +Machines, Ppm +2, 1 +4, 1 +8, 1 +16, 1 +16, 2 +16, 4 +16, 8 +16, 16 +16, 32 +16, 64 +16, 128 +16, 256 diff --git a/deploy/simulation/Paper_cosi/ntree_multi_adapted.toml b/deploy/simulation/Paper_cosi/ntree_multi_adapted.toml new file mode 100644 index 0000000000..34917b98cf --- /dev/null +++ b/deploy/simulation/Paper_cosi/ntree_multi_adapted.toml @@ -0,0 +1,15 @@ +App = "ntree" +Suite = "Ed25519" +Rounds = 20 + +Machines, Ppm, Bf +2, 1, 2 +4, 1, 3 +8, 1, 4 +16, 1, 5 +16, 2, 6 +16, 4, 7 +16, 8, 8 +16, 16, 9 +16, 32, 10 +16, 64, 10 \ No newline at end of file diff --git a/deploy/simulation/Paper_cosi/shamir_multi_adapted.toml b/deploy/simulation/Paper_cosi/shamir_multi_adapted.toml new file mode 100644 index 0000000000..f46f02cc36 --- /dev/null +++ b/deploy/simulation/Paper_cosi/shamir_multi_adapted.toml @@ -0,0 +1,11 @@ +App = "shamir" +Rounds = 20 + +Machines, ppm +16, 1 +2, 1 +4, 1 +8, 1 +16, 1 +16, 2 +16, 4 diff --git a/deploy/simulation/Paper_cosi/sign_huge.toml b/deploy/simulation/Paper_cosi/sign_huge.toml new file mode 100644 index 0000000000..3438852d23 --- /dev/null +++ b/deploy/simulation/Paper_cosi/sign_huge.toml @@ -0,0 +1,18 @@ +App = "sign" +Suite = "Ed25519" +Rounds = 20 + +Machines, ppm, bf +2, 1, 2 +4, 1, 4 +8, 1, 8 +16, 1, 8 +16, 2, 8 +16, 4, 8 +16, 8, 8 +16, 16, 8 +16, 32, 8 +16, 64, 8 +16, 128, 8 +16, 256, 8 +16, 512, 8 diff --git a/deploy/simulation/Paper_cosi/sign_lan_speed.toml b/deploy/simulation/Paper_cosi/sign_lan_speed.toml new file mode 100644 index 0000000000..bf58b21336 --- /dev/null +++ b/deploy/simulation/Paper_cosi/sign_lan_speed.toml @@ -0,0 +1,11 @@ +App = "sign" +Suite = "Ed25519" +Rounds = 20 +Machines = 16 +bf = 8 + +ppm +8 +32 +128 +512 \ No newline at end of file diff --git a/deploy/simulation/Paper_cosi/sign_over_1.toml b/deploy/simulation/Paper_cosi/sign_over_1.toml new file mode 100644 index 0000000000..65361b9bf8 --- /dev/null +++ b/deploy/simulation/Paper_cosi/sign_over_1.toml @@ -0,0 +1,14 @@ +App = "sign" +Suite = "Ed25519" +Rounds = 20 +Machines = 8 +bf = 8 + +ppm +8 +16 +32 +64 +128 +256 +512 \ No newline at end of file diff --git a/deploy/simulation/Paper_cosi/sign_over_2.toml b/deploy/simulation/Paper_cosi/sign_over_2.toml new file mode 100644 index 0000000000..d70d1b7490 --- /dev/null +++ b/deploy/simulation/Paper_cosi/sign_over_2.toml @@ -0,0 +1,15 @@ +App = "sign" +Suite = "Ed25519" +Rounds = 20 +Machines = 16 +bf = 8 + +ppm +4 +8 +16 +32 +64 +128 +256 +512 \ No newline at end of file diff --git a/deploy/simulation/Paper_cosi/sign_over_3.toml b/deploy/simulation/Paper_cosi/sign_over_3.toml new file mode 100644 index 0000000000..5404069560 --- /dev/null +++ b/deploy/simulation/Paper_cosi/sign_over_3.toml @@ -0,0 +1,15 @@ +App = "sign" +Suite = "Ed25519" +Rounds = 20 +Machines = 32 +bf = 8 + +ppm +4 +8 +16 +32 +64 +128 +256 +512 diff --git a/deploy/simulation/Paper_cosi/stamp_machines_1.toml b/deploy/simulation/Paper_cosi/stamp_machines_1.toml new file mode 100644 index 0000000000..88b4d98b17 --- /dev/null +++ b/deploy/simulation/Paper_cosi/stamp_machines_1.toml @@ -0,0 +1,19 @@ +App = "sign" +Suite = "Ed25519" +Rounds = 20 +Rate = 1000 + +Machines, ppm, bf +2, 1, 2 +4, 1, 3 +8, 1, 4 +16, 1, 5 +16, 2, 6 +16, 4, 7 +16, 8, 8 +16, 16, 10 +16, 32, 10 +16, 64, 10 +16, 128, 10 +16, 256, 10 +16, 512, 10 diff --git a/deploy/simulation/Paper_cosi/stamp_rate.toml b/deploy/simulation/Paper_cosi/stamp_rate.toml new file mode 100644 index 0000000000..43d8b904e8 --- /dev/null +++ b/deploy/simulation/Paper_cosi/stamp_rate.toml @@ -0,0 +1,18 @@ +App = "stamp" +Suite = "Ed25519" +Rounds = 20 +Machines = 16 +PPM = 256 + +bf +2 +3 +4 +5 +6 +7 +8 +10 +12 +14 +16 \ No newline at end of file diff --git a/deploy/simulation/naive_multi.toml b/deploy/simulation/naive_multi.toml new file mode 100644 index 0000000000..45ee29a738 --- /dev/null +++ b/deploy/simulation/naive_multi.toml @@ -0,0 +1,8 @@ +Machines = 3 +App = "naive" +Suite = "Ed25519" + +Ppm, Rounds +1, 10 +2, 10 +4, 10 diff --git a/deploy/simulation/naive_multi_adapted.toml b/deploy/simulation/naive_multi_adapted.toml new file mode 100644 index 0000000000..7cf54214cc --- /dev/null +++ b/deploy/simulation/naive_multi_adapted.toml @@ -0,0 +1,11 @@ +App = "naive" +Suite = "Ed25519" +Rounds = 20 + +Machines, Ppm +2, 1 +4, 1 +8, 1 +16, 1 +16, 2 +16, 4 diff --git a/deploy/simulation/naive_multi_adapted_skipcheck.toml b/deploy/simulation/naive_multi_adapted_skipcheck.toml new file mode 100644 index 0000000000..37a40cc064 --- /dev/null +++ b/deploy/simulation/naive_multi_adapted_skipcheck.toml @@ -0,0 +1,12 @@ +App = "naive" +Suite = "Ed25519" +Rounds = 10 +SkipChecks = true + +Machines, Ppm +2, 1 +4, 1 +8, 1 +16, 1 +16, 2 +16, 4 diff --git a/deploy/simulation/naive_single.toml b/deploy/simulation/naive_single.toml new file mode 100644 index 0000000000..4450f7baa8 --- /dev/null +++ b/deploy/simulation/naive_single.toml @@ -0,0 +1,7 @@ +Machines = 3 +App = "naive" +Suite = "Ed25519" +SkipChecks = false + +PPM, Rounds +1, 3 diff --git a/deploy/simulation/naive_single_skipcheck.toml b/deploy/simulation/naive_single_skipcheck.toml new file mode 100644 index 0000000000..51bf00415d --- /dev/null +++ b/deploy/simulation/naive_single_skipcheck.toml @@ -0,0 +1,7 @@ +Machines = 128 +App = "naive" +Suite = "Ed25519" +SkipChecks = true + +Ppm, Rounds +1, 3 diff --git a/deploy/simulation/ntree_multi.toml b/deploy/simulation/ntree_multi.toml new file mode 100644 index 0000000000..884dfa4cd5 --- /dev/null +++ b/deploy/simulation/ntree_multi.toml @@ -0,0 +1,12 @@ +Machines = 8 +App = "ntree" +Suite = "Ed25519" + +Ppm, Bf, Rounds +1, 8, 10 +2, 8, 10 +4, 8, 10 +8, 8, 10 +16, 8, 10 +32, 8, 10 +64, 8, 10 diff --git a/deploy/simulation/ntree_multi_adapted.toml b/deploy/simulation/ntree_multi_adapted.toml new file mode 100644 index 0000000000..69b73df535 --- /dev/null +++ b/deploy/simulation/ntree_multi_adapted.toml @@ -0,0 +1,16 @@ +App = "ntree" +Suite = "Ed25519" +Rounds = 20 + +Machines, Ppm, Bf +2, 1, 2 +4, 1, 3 +8, 1, 4 +16, 1, 5 +16, 2, 6 +16, 4, 7 +16, 8, 8 +16, 16, 9 +16, 32, 10 +16, 64, 10 + diff --git a/deploy/simulation/ntree_single.toml b/deploy/simulation/ntree_single.toml new file mode 100644 index 0000000000..b48bcd30e9 --- /dev/null +++ b/deploy/simulation/ntree_single.toml @@ -0,0 +1,6 @@ +Machines = 3 +App = "ntree" +Suite = "Ed25519" + +Ppm, Bf, Rounds +2, 2, 3 diff --git a/deploy/simulation/old/sign_hosts_test.toml b/deploy/simulation/old/sign_hosts_test.toml new file mode 100644 index 0000000000..f489535b2d --- /dev/null +++ b/deploy/simulation/old/sign_hosts_test.toml @@ -0,0 +1,11 @@ +var SignTest = []T{ + {0, 1, 2, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 2, 3, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 4, 3, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 8, 8, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 16, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 32, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 64, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 128, 16, 30, 50, 0, 0, 0, false, "coll_sign"}, +} + diff --git a/deploy/simulation/old/sign_multi.toml b/deploy/simulation/old/sign_multi.toml new file mode 100644 index 0000000000..2de91f25fa --- /dev/null +++ b/deploy/simulation/old/sign_multi.toml @@ -0,0 +1,6 @@ +var SignTestMulti2 = []T{ + {0, 256, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 512, 32, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 1024, 64, 30, 20, 0, 0, 0, false, "coll_sign"}, +} + diff --git a/deploy/simulation/old/sign_multi_2.toml b/deploy/simulation/old/sign_multi_2.toml new file mode 100644 index 0000000000..7b24ddaf88 --- /dev/null +++ b/deploy/simulation/old/sign_multi_2.toml @@ -0,0 +1,9 @@ +var SignTestMulti = []T{ + {0, 1, 2, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 8, 8, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 32, 8, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 64, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 128, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, + {0, 256, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, +} + diff --git a/deploy/simulation/old/stamp_depth_fixed.toml b/deploy/simulation/old/stamp_depth_fixed.toml new file mode 100644 index 0000000000..680f65340b --- /dev/null +++ b/deploy/simulation/old/stamp_depth_fixed.toml @@ -0,0 +1,14 @@ +func DepthTestFixed(hpn int) []T { + return []T{ + {0, hpn, 1, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, + {0, hpn, 2, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, + {0, hpn, 4, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, + {0, hpn, 8, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, + {0, hpn, 16, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, + {0, hpn, 32, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, + {0, hpn, 64, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, + {0, hpn, 128, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, + {0, hpn, 256, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, + {0, hpn, 512, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, + } +} diff --git a/deploy/simulation/old/stamp_failure_test.toml b/deploy/simulation/old/stamp_failure_test.toml new file mode 100644 index 0000000000..0a0e1a6a44 --- /dev/null +++ b/deploy/simulation/old/stamp_failure_test.toml @@ -0,0 +1,9 @@ +// nmachs=32, hpn=128, bf=16, rate=500, failures=20, root failures, failures +var FailureTests = []T{ + {0, 64, 16, 30, 50, 0, 0, 0, false, "coll_stamp"}, + {0, 64, 16, 30, 50, 0, 5, 0, false, "coll_stamp"}, + {0, 64, 16, 30, 50, 0, 10, 0, false, "coll_stamp"}, + {0, 64, 16, 30, 50, 5, 0, 5, false, "coll_stamp"}, + {0, 64, 16, 30, 50, 5, 0, 10, false, "coll_stamp"}, + {0, 64, 16, 30, 50, 5, 0, 10, true, "coll_stamp"}, +} diff --git a/deploy/simulation/old/stamp_hosts_test.toml b/deploy/simulation/old/stamp_hosts_test.toml new file mode 100644 index 0000000000..d254bc5570 --- /dev/null +++ b/deploy/simulation/old/stamp_hosts_test.toml @@ -0,0 +1,10 @@ +var HostsTest = []T{ + {0, 1, 2, 30, 20, 0, 0, 0, false, "coll_stamp"}, + {0, 2, 3, 30, 20, 0, 0, 0, false, "coll_stamp"}, + {0, 4, 3, 30, 20, 0, 0, 0, false, "coll_stamp"}, + {0, 8, 8, 30, 20, 0, 0, 0, false, "coll_stamp"}, + {0, 16, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, + {0, 32, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, + {0, 64, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, + {0, 128, 16, 30, 50, 0, 0, 0, false, "coll_stamp"}, +} diff --git a/deploy/simulation/old/stamp_rate_load.toml b/deploy/simulation/old/stamp_rate_load.toml new file mode 100644 index 0000000000..519cc9beb5 --- /dev/null +++ b/deploy/simulation/old/stamp_rate_load.toml @@ -0,0 +1,10 @@ +// high and low specify how many milliseconds between messages +func RateLoadTest(hpn, bf int) []T { + return []T{ + {0, hpn, bf, 5000, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, // never send a message + {0, hpn, bf, 5000, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, // one per round + {0, hpn, bf, 500, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, // 10 per round + {0, hpn, bf, 50, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, // 100 per round + {0, hpn, bf, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, // 1000 per round + } +} diff --git a/deploy/simulation/old/stamp_single.toml b/deploy/simulation/old/stamp_single.toml new file mode 100644 index 0000000000..308df5e832 --- /dev/null +++ b/deploy/simulation/old/stamp_single.toml @@ -0,0 +1,4 @@ +var HostsTestSingle = []T{ + {0, 2, 8, 30, 20, 0, 0, 0, false, "coll_stamp"}, +} + diff --git a/deploy/simulation/old/stamp_single_2.toml b/deploy/simulation/old/stamp_single_2.toml new file mode 100644 index 0000000000..9f00207911 --- /dev/null +++ b/deploy/simulation/old/stamp_single_2.toml @@ -0,0 +1,5 @@ +var StampTestSingle = []T{ + {0, 1, 2, + 30, 20, 0, + 0, 0, false, "coll_stamp"}, +} diff --git a/deploy/simulation/old/stamp_test_short.toml b/deploy/simulation/old/stamp_test_short.toml new file mode 100644 index 0000000000..de05b8a0f3 --- /dev/null +++ b/deploy/simulation/old/stamp_test_short.toml @@ -0,0 +1,7 @@ +var HostsTestShort = []T{ + {0, 1, 2, 30, 20, 0, 0, 0, false, "coll_stamp"}, + {0, 8, 4, 30, 20, 0, 0, 0, false, "coll_stamp"}, + {0, 32, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, + {0, 64, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, + {0, 128, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, +} diff --git a/deploy/simulation/old/vote_hosts_test.toml b/deploy/simulation/old/vote_hosts_test.toml new file mode 100644 index 0000000000..93b76fc477 --- /dev/null +++ b/deploy/simulation/old/vote_hosts_test.toml @@ -0,0 +1,10 @@ +var VTest = []T{ + {0, 1, 3, 10000000, 20, 0, 0, 0, false, "vote"}, + {0, 2, 4, 10000000, 20, 0, 0, 0, false, "vote"}, + {0, 4, 6, 10000000, 20, 0, 0, 0, false, "vote"}, + {0, 8, 8, 10000000, 20, 0, 0, 0, false, "vote"}, + {0, 16, 16, 10000000, 20, 0, 0, 0, false, "vote"}, + {0, 32, 16, 10000000, 20, 0, 0, 0, false, "vote"}, + {0, 64, 16, 10000000, 20, 0, 0, 0, false, "vote"}, + {0, 128, 16, 10000000, 20, 0, 0, 0, false, "vote"}, +} diff --git a/deploy/simulation/shamir_multi.toml b/deploy/simulation/shamir_multi.toml new file mode 100644 index 0000000000..4b344aa327 --- /dev/null +++ b/deploy/simulation/shamir_multi.toml @@ -0,0 +1,12 @@ +Machines = 8 +App = "shamir" +Rounds = 10 + +ppm, bf +1, 2 +2, 2 +4, 2 +8, 2 +16, 2 +32, 2 +64, 2 diff --git a/deploy/simulation/shamir_multi_adapted.toml b/deploy/simulation/shamir_multi_adapted.toml new file mode 100644 index 0000000000..aaf886f4ba --- /dev/null +++ b/deploy/simulation/shamir_multi_adapted.toml @@ -0,0 +1,10 @@ +App = "shamir" +Rounds = 20 + +Machines, ppm +2, 1 +4, 1 +8, 1 +16, 1 +16, 2 +16, 4 diff --git a/deploy/simulation/shamir_single.toml b/deploy/simulation/shamir_single.toml new file mode 100644 index 0000000000..00aaefa388 --- /dev/null +++ b/deploy/simulation/shamir_single.toml @@ -0,0 +1,7 @@ +Machines = 3 +Logservers = 0 +App = "shamir" +Suite = "Ed25519" + +ppm, Rounds +1, 10 diff --git a/deploy/simulation/sign_fail_marshal.toml b/deploy/simulation/sign_fail_marshal.toml new file mode 100644 index 0000000000..eca8c45864 --- /dev/null +++ b/deploy/simulation/sign_fail_marshal.toml @@ -0,0 +1,5 @@ +Machines = 2 +App = "sign" + +ppm, bf, rounds, rate, suite +4, 2, 10, 10, "25519" diff --git a/deploy/simulation/sign_multi.toml b/deploy/simulation/sign_multi.toml new file mode 100644 index 0000000000..bb52c043cf --- /dev/null +++ b/deploy/simulation/sign_multi.toml @@ -0,0 +1,17 @@ +Machines = 8 +App = "sign" +Rounds = 20 +Suite = "Ed25519" + +ppm, bf +1, 2 +4, 3 +8, 4 +16, 5 +32, 6 +64, 7 +128, 8 +256, 9 +512, 10 +1024, 11 +2048, 12 diff --git a/deploy/simulation/sign_multi_adapted.toml b/deploy/simulation/sign_multi_adapted.toml new file mode 100644 index 0000000000..4d6c827dd2 --- /dev/null +++ b/deploy/simulation/sign_multi_adapted.toml @@ -0,0 +1,18 @@ +App = "sign" +Suite = "Ed25519" +Rounds = 20 + +Machines, ppm, bf +2, 1, 2 +4, 1, 3 +8, 1, 4 +16, 1, 5 +16, 2, 6 +16, 4, 7 +16, 8, 8 +16, 16, 10 +16, 32, 10 +16, 64, 10 +16, 128, 10 +16, 256, 10 +16, 512, 10 diff --git a/deploy/simulation/sign_single.toml b/deploy/simulation/sign_single.toml new file mode 100644 index 0000000000..33193ec0c7 --- /dev/null +++ b/deploy/simulation/sign_single.toml @@ -0,0 +1,5 @@ +Machines = 16 +App = "sign" + +ppm, bf, rounds, rate, suite +256, 8, 20, 10, "Ed25519" diff --git a/deploy/simulation/stamp_multi.toml b/deploy/simulation/stamp_multi.toml new file mode 100644 index 0000000000..6d9cbc4719 --- /dev/null +++ b/deploy/simulation/stamp_multi.toml @@ -0,0 +1,19 @@ +Machines = 8 +App = "stamp" +Rounds = 10 +Rate = 10 +Suite = "Ed25519" +filter_round = 90 + +ppm, bf +1, 2 +4, 3 +8, 4 +16, 5 +32, 6 +64, 7 +128, 8 +256, 9 +512, 10 +1024, 11 +2048, 12 diff --git a/deploy/simulation/stamp_multi_adapted.toml b/deploy/simulation/stamp_multi_adapted.toml new file mode 100644 index 0000000000..4d6c827dd2 --- /dev/null +++ b/deploy/simulation/stamp_multi_adapted.toml @@ -0,0 +1,18 @@ +App = "sign" +Suite = "Ed25519" +Rounds = 20 + +Machines, ppm, bf +2, 1, 2 +4, 1, 3 +8, 1, 4 +16, 1, 5 +16, 2, 6 +16, 4, 7 +16, 8, 8 +16, 16, 10 +16, 32, 10 +16, 64, 10 +16, 128, 10 +16, 256, 10 +16, 512, 10 diff --git a/deploy/simulation/stamp_perc.toml b/deploy/simulation/stamp_perc.toml new file mode 100644 index 0000000000..1779361ff4 --- /dev/null +++ b/deploy/simulation/stamp_perc.toml @@ -0,0 +1,14 @@ +Machines = 16 +App = "stamp" +Rounds = 10 +StampRatio = -1 +Suite = "Ed25519" +ppm = 8 +bf = 8 + +rate +10 +100 +200 +300 +400 diff --git a/deploy/simulation/stamp_perc10.toml b/deploy/simulation/stamp_perc10.toml new file mode 100644 index 0000000000..accc559b4c --- /dev/null +++ b/deploy/simulation/stamp_perc10.toml @@ -0,0 +1,13 @@ +Machines = 16 +App = "stamp" +Rounds = 10 +StampRatio = 0.1 +Suite = "Ed25519" +ppm = 50 +bf = 8 + +rate +100 +1000 +10000 +100000 diff --git a/deploy/simulation/stamp_perc100.toml b/deploy/simulation/stamp_perc100.toml new file mode 100644 index 0000000000..a0b429d40f --- /dev/null +++ b/deploy/simulation/stamp_perc100.toml @@ -0,0 +1,13 @@ +Machines = 16 +App = "stamp" +Rounds = 10 +Suite = "Ed25519" +ppm = 256 +bf = 8 +StampRatio = 1 + +rate +10 +100 +500 +1000 diff --git a/deploy/simulation/stamp_perc50.toml b/deploy/simulation/stamp_perc50.toml new file mode 100644 index 0000000000..522d853021 --- /dev/null +++ b/deploy/simulation/stamp_perc50.toml @@ -0,0 +1,14 @@ +Machines = 16 +App = "stamp" +Rounds = 10 +StampRatio = 0.5 +Suite = "Ed25519" +ppm = 128 +bf = 8 + +rate +10 +100 +200 +300 +400 diff --git a/deploy/simulation/stamp_percLeader.toml b/deploy/simulation/stamp_percLeader.toml new file mode 100644 index 0000000000..8e2c2c6c6a --- /dev/null +++ b/deploy/simulation/stamp_percLeader.toml @@ -0,0 +1,13 @@ +Machines = 16 +App = "stamp" +Rounds = 10 +StampRatio = -1 +Suite = "Ed25519" +ppm = 256 +bf = 8 + +rate +100 +1000 +10000 +100000 diff --git a/deploy/simulation/stamp_single.toml b/deploy/simulation/stamp_single.toml new file mode 100644 index 0000000000..b676752272 --- /dev/null +++ b/deploy/simulation/stamp_single.toml @@ -0,0 +1,10 @@ +Machines = 6 +App = "stamp" +Rounds = 20 +Rate = 10 +StampsPerRound = 2 +StampRatio = 0.50 +Suite = "Ed25519" + +ppm, bf +1, 2 diff --git a/deploy/simulation/test_naive_multi.toml b/deploy/simulation/test_naive_multi.toml new file mode 100644 index 0000000000..1e09cf4af8 --- /dev/null +++ b/deploy/simulation/test_naive_multi.toml @@ -0,0 +1,8 @@ +Machines = 2 +App = "naive" +Suite = "Ed25519" + +Ppm, Rounds +1, 10 +2, 10 +4, 10 diff --git a/deploy/simulation/test_ntree_multi.toml b/deploy/simulation/test_ntree_multi.toml new file mode 100644 index 0000000000..8e094aefd8 --- /dev/null +++ b/deploy/simulation/test_ntree_multi.toml @@ -0,0 +1,8 @@ +Machines = 2 +App = "ntree" +Suite = "Ed25519" + +Ppm, Bf, Rounds +1, 8, 10 +2, 8, 10 +4, 8, 10 diff --git a/deploy/simulation/test_shamir_multi.toml b/deploy/simulation/test_shamir_multi.toml new file mode 100644 index 0000000000..1ef905dccd --- /dev/null +++ b/deploy/simulation/test_shamir_multi.toml @@ -0,0 +1,8 @@ +Machines = 2 +App = "shamir" +Rounds = 10 + +ppm, bf +1, 2 +2, 2 +4, 2 diff --git a/deploy/simulation/test_sign_multi.toml b/deploy/simulation/test_sign_multi.toml new file mode 100644 index 0000000000..dae7746e9c --- /dev/null +++ b/deploy/simulation/test_sign_multi.toml @@ -0,0 +1,9 @@ +Machines = 8 +App = "sign" +Rounds = 10 +Suite = "Ed25519" + +ppm, bf +1, 2 +2, 3 +4, 4 diff --git a/deploy/simulation/test_stamp_multi.toml b/deploy/simulation/test_stamp_multi.toml new file mode 100644 index 0000000000..46e6b3f7c2 --- /dev/null +++ b/deploy/simulation/test_stamp_multi.toml @@ -0,0 +1,10 @@ +Machines = 4 +App = "stamp" +Rounds = 10 +Rate = 10 +Suite = "Ed25519" + +ppm, bf +1, 2 +2, 3 +4, 4 diff --git a/deploy/start.go b/deploy/start.go deleted file mode 100644 index 9c5263cdca..0000000000 --- a/deploy/start.go +++ /dev/null @@ -1,400 +0,0 @@ -// Outputting data: output to csv files (for loading into excel) -// make a datastructure per test output file -// all output should be in the test_data subdirectory -// -// connect with logging server (receive json until "EOF" seen or "terminating") -// connect to websocket ws://localhost:8080/log -// receive each message as bytes -// if bytes contains "EOF" or contains "terminating" -// wrap up the round, output to test_data directory, kill deploy2deter -// -// for memstats check localhost:8080/d/server-0-0/debug/vars -// parse out the memstats zones that we are concerned with -// -// different graphs needed rounds: -// load on the x-axis: increase messages per round holding everything else constant -// hpn=40 bf=10, bf=50 -// -// latency on y-axis, timestamp servers on x-axis push timestampers as higher as possible -// -// -package deploy - -import ( - "errors" - "fmt" - log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "math" - "os" - "strconv" - "time" -) - -// Configuration-variables -var deploy_config *Config -var deployP Platform -var nobuild bool = false -var port int = 8081 - -// time-per-round * DefaultRounds = 10 * 20 = 3.3 minutes now -// this leaves us with 7 minutes for test setup and tear-down -var DefaultRounds int = 1 - -func init() { - deploy_config = NewConfig() - deployP = NewPlatform() -} - -type T struct { - nmachs int - hpn int - bf int - - rate int - rounds int - failures int - - rFail int - fFail int - testConnect bool - app string -} - -// nmachs, hpn, bf -// rate, rounds, failures -// rFail, fFail, testConnect, app -var StampTestSingle = []T{ - {0, 1, 2, - 30, 20, 0, - 0, 0, false, "coll_stamp"}, -} - -var SignTestSingle = []T{ - {0, 8, 8, 30, 10, 0, 0, 0, false, "coll_sign"}, -} - -var SignTestMulti = []T{ - {0, 1, 2, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 8, 8, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 32, 8, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 64, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 128, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 256, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, -} - -var SignTestMulti2 = []T{ - {0, 256, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 512, 32, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 1024, 64, 30, 20, 0, 0, 0, false, "coll_sign"}, -} - -var HostsTestSingle = []T{ - {0, 2, 8, 30, 20, 0, 0, 0, false, "coll_stamp"}, -} - -var HostsTestShort = []T{ - {0, 1, 2, 30, 20, 0, 0, 0, false, "coll_stamp"}, - {0, 8, 4, 30, 20, 0, 0, 0, false, "coll_stamp"}, - {0, 32, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, - {0, 64, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, - {0, 128, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, -} -var SchnorrHostSingle = []T{ - {8, 2, 2, 30, 20, 0, 0, 0, false, "schnorr_sign"}, -} - -func Start(destination string, nbld bool, build string, machines int) { - deployP.Configure(deploy_config) - nobuild = nbld - deploy_config.Nmachs = machines - - deployP.Stop() - - if nobuild == false { - deployP.Build(build) - } - - dbg.Lvl1("Starting tests") - DefaultRounds = 5 - RunTests("schnorr_host_single", SchnorrHostSingle) - //RunTests("sign_test_single", SignTestSingle) - //RunTests("sign_test_multi2", SignTestMulti2) - //RunTests("sign_test_multi", SignTestMulti) - //RunTests("hosts_test_single", HostsTestSingle) - //RunTests("hosts_test_short", HostsTestShort) - //RunTests("hosts_test", HostsTest) - //RunTests("stamp_test_single", StampTestSingle) - //RunTests("sign_test_multi", SignTestMulti) - // test the testing framework - //RunTests("vote_test_no_signing.csv", VTest) - //RunTests("hosts_test", HostsTest) - // t := FailureTests - // RunTests("failure_test.csv", t) - // RunTests("vote_test", VotingTest) - // RunTests("failure_test", FailureTests) - //RunTests("sign_test", SignTest) - // t := FailureTests - // RunTests("failure_test", t) - // t = ScaleTest(10, 1, 100, 2) - // RunTests("scale_test.csv", t) - // how does the branching factor effect speed - // t = DepthTestFixed(100) - // RunTests("depth_test.csv", t) - - // load test the client - // t = RateLoadTest(40, 10) - // RunTests("load_rate_test_bf10.csv", t) - // t = RateLoadTest(40, 50) - // RunTests("load_rate_test_bf50.csv", t) -} - -// RunTests runs the given tests and puts the output into the -// given file name. It outputs RunStats in a CSV format. -func RunTests(name string, ts []T) { - for i, _ := range ts { - ts[i].nmachs = deploy_config.Nmachs - } - - MkTestDir() - rs := make([]RunStats, len(ts)) - f, err := os.OpenFile(TestFile(name), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0660) - if err != nil { - log.Fatal("error opening test file:", err) - } - _, err = f.Write(rs[0].CSVHeader()) - if err != nil { - log.Fatal("error writing test file header:", err) - } - err = f.Sync() - if err != nil { - log.Fatal("error syncing test file:", err) - } - - nTimes := 1 - stopOnSuccess := true - for i, t := range ts { - // run test t nTimes times - // take the average of all successful runs - var runs []RunStats - for r := 0; r < nTimes; r++ { - run, err := RunTest(t) - if err != nil { - log.Fatalln("error running test:", err) - } - - if deployP.Stop() == nil { - runs = append(runs, run) - if stopOnSuccess { - break - } - } else { - dbg.Lvl1("Error for test ", r, " : ", err) - } - } - - if len(runs) == 0 { - dbg.Lvl1("unable to get any data for test:", t) - continue - } - - rs[i] = RunStatsAvg(runs) - //log.Println(fmt.Sprintf("Writing to CSV for %d: %+v", i, rs[i])) - _, err := f.Write(rs[i].CSV()) - if err != nil { - log.Fatal("error writing data to test file:", err) - } - err = f.Sync() - if err != nil { - log.Fatal("error syncing data to test file:", err) - } - - cl, err := os.OpenFile( - TestFile("client_latency_"+name+"_"+strconv.Itoa(i)), - os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0660) - if err != nil { - log.Fatal("error opening test file:", err) - } - _, err = cl.Write(rs[i].TimesCSV()) - if err != nil { - log.Fatal("error writing client latencies to file:", err) - } - err = cl.Sync() - if err != nil { - log.Fatal("error syncing data to latency file:", err) - } - cl.Close() - - } -} - -// hpn, bf, nmsgsG -func RunTest(t T) (RunStats, error) { - // add timeout for 10 minutes? - done := make(chan struct{}) - var rs RunStats - cfg := &Config{ - t.nmachs, deploy_config.Nloggers, t.hpn, t.bf, - -1, t.rate, t.rounds, t.failures, t.rFail, t.fFail, - deploy_config.Debug, deploy_config.RootWait, t.app, deploy_config.Suite} - - dbg.Lvl1("Running test with parameters", cfg) - dbg.Lvl1("Failures percent is", t.failures) - - deployP.Configure(cfg) - deployP.Deploy() - err := deployP.Start() - if err != nil { - log.Fatal(err) - return rs, nil - } - - // give it a while to start up - time.Sleep(10 * time.Second) - - go func() { - rs = Monitor(t.bf) - deployP.Stop() - dbg.Lvl2("Test complete:", rs) - done <- struct{}{} - }() - - // timeout the command if it takes too long - select { - case <-done: - if isZero(rs.MinTime) || isZero(rs.MaxTime) || isZero(rs.AvgTime) || math.IsNaN(rs.Rate) || math.IsInf(rs.Rate, 0) { - return rs, errors.New(fmt.Sprintf("unable to get good data: %+v", rs)) - } - return rs, nil - /* No time out for the moment - case <-time.After(5 * time.Minute): - return rs, errors.New("timed out") - */ - } -} - -// high and low specify how many milliseconds between messages -func RateLoadTest(hpn, bf int) []T { - return []T{ - {0, hpn, bf, 5000, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, // never send a message - {0, hpn, bf, 5000, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, // one per round - {0, hpn, bf, 500, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, // 10 per round - {0, hpn, bf, 50, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, // 100 per round - {0, hpn, bf, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, // 1000 per round - } -} - -func DepthTest(hpn, low, high, step int) []T { - ts := make([]T, 0) - for bf := low; bf <= high; bf += step { - ts = append(ts, T{0, hpn, bf, 10, DefaultRounds, 0, 0, 0, false, "coll_stamp"}) - } - return ts -} - -func DepthTestFixed(hpn int) []T { - return []T{ - {0, hpn, 1, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, - {0, hpn, 2, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, - {0, hpn, 4, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, - {0, hpn, 8, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, - {0, hpn, 16, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, - {0, hpn, 32, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, - {0, hpn, 64, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, - {0, hpn, 128, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, - {0, hpn, 256, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, - {0, hpn, 512, 30, DefaultRounds, 0, 0, 0, false, "coll_stamp"}, - } -} - -func ScaleTest(bf, low, high, mult int) []T { - ts := make([]T, 0) - for hpn := low; hpn <= high; hpn *= mult { - ts = append(ts, T{0, hpn, bf, 10, DefaultRounds, 0, 0, 0, false, "coll_stamp"}) - } - return ts -} - -// nmachs=32, hpn=128, bf=16, rate=500, failures=20, root failures, failures -var FailureTests = []T{ - {0, 64, 16, 30, 50, 0, 0, 0, false, "coll_stamp"}, - {0, 64, 16, 30, 50, 0, 5, 0, false, "coll_stamp"}, - {0, 64, 16, 30, 50, 0, 10, 0, false, "coll_stamp"}, - {0, 64, 16, 30, 50, 5, 0, 5, false, "coll_stamp"}, - {0, 64, 16, 30, 50, 5, 0, 10, false, "coll_stamp"}, - {0, 64, 16, 30, 50, 5, 0, 10, true, "coll_stamp"}, -} - -var VotingTest = []T{ - {0, 64, 16, 30, 50, 0, 0, 0, true, "coll_stamp"}, - {0, 64, 16, 30, 50, 0, 0, 0, false, "coll_stamp"}, -} - -func FullTests() []T { - var nmachs = []int{1, 16, 32} - var hpns = []int{1, 16, 32, 128} - var bfs = []int{2, 4, 8, 16, 128} - var rates = []int{5000, 500, 100, 30} - failures := 0 - - var tests []T - for _, nmach := range nmachs { - for _, hpn := range hpns { - for _, bf := range bfs { - for _, rate := range rates { - tests = append(tests, T{nmach, hpn, bf, rate, DefaultRounds, failures, 0, 0, false, "coll_stamp"}) - } - } - } - } - - return tests -} - -var HostsTest = []T{ - {0, 1, 2, 30, 20, 0, 0, 0, false, "coll_stamp"}, - {0, 2, 3, 30, 20, 0, 0, 0, false, "coll_stamp"}, - {0, 4, 3, 30, 20, 0, 0, 0, false, "coll_stamp"}, - {0, 8, 8, 30, 20, 0, 0, 0, false, "coll_stamp"}, - {0, 16, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, - {0, 32, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, - {0, 64, 16, 30, 20, 0, 0, 0, false, "coll_stamp"}, - {0, 128, 16, 30, 50, 0, 0, 0, false, "coll_stamp"}, -} - -var SignTest = []T{ - {0, 1, 2, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 2, 3, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 4, 3, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 8, 8, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 16, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 32, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 64, 16, 30, 20, 0, 0, 0, false, "coll_sign"}, - {0, 128, 16, 30, 50, 0, 0, 0, false, "coll_sign"}, -} - -var VTest = []T{ - {0, 1, 3, 10000000, 20, 0, 0, 0, false, "vote"}, - {0, 2, 4, 10000000, 20, 0, 0, 0, false, "vote"}, - {0, 4, 6, 10000000, 20, 0, 0, 0, false, "vote"}, - {0, 8, 8, 10000000, 20, 0, 0, 0, false, "vote"}, - {0, 16, 16, 10000000, 20, 0, 0, 0, false, "vote"}, - {0, 32, 16, 10000000, 20, 0, 0, 0, false, "vote"}, - {0, 64, 16, 10000000, 20, 0, 0, 0, false, "vote"}, - {0, 128, 16, 10000000, 20, 0, 0, 0, false, "vote"}, -} - -func MkTestDir() { - err := os.MkdirAll("test_data/", 0777) - if err != nil { - log.Fatal("failed to make test directory") - } -} - -func TestFile(name string) string { - return "test_data/" + name + ".csv" -} - -func isZero(f float64) bool { - return math.Abs(f) < 0.0000001 -} diff --git a/deploy/stats.go b/deploy/stats.go deleted file mode 100644 index 1b3572a58f..0000000000 --- a/deploy/stats.go +++ /dev/null @@ -1,198 +0,0 @@ -package deploy - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "math" - "net/http" - "runtime" - "strconv" - "time" -) - -type StatsEntry struct { - App string `json:"eapp"` - Host string `json:"ehost"` - Level string `json:"elevel"` - Msg string `json:"emsg"` - MsgTime string `json:"etime"` - File string `json:"file"` - Round int `json:"round"` - Time float64 `json:"time"` - Type string `json:"type"` -} - -type SysStats struct { - File string `json:"file"` - Type string `json:"type"` - SysTime float64 `json:"systime"` - UserTime float64 `json:"usertime"` -} - -type ClientMsgStats struct { - File string `json:"file"` - Type string `json:"type"` - Buckets []float64 `json:"buck,omitempty"` - RoundsAfter []float64 `json:"roundsAfter,omitempty"` - Times []float64 `json:"times,omitempty"` -} - -type RunStats struct { - NHosts int - Depth int - - BF int - - MinTime float64 - MaxTime float64 - AvgTime float64 - StdDev float64 - - SysTime float64 - UserTime float64 - - Rate float64 - Times []float64 -} - -func (s RunStats) CSVHeader() []byte { - var buf bytes.Buffer - buf.WriteString("hosts, depth, bf, min, max, avg, stddev, systime, usertime, rate\n") - return buf.Bytes() -} -func (s RunStats) CSV() []byte { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%d, %d, %d, %f, %f, %f, %f, %f, %f, %f\n", - s.NHosts, - s.Depth, - s.BF, - s.MinTime/1e9, - s.MaxTime/1e9, - s.AvgTime/1e9, - s.StdDev/1e9, - s.SysTime/1e9, - s.UserTime/1e9, - s.Rate) - return buf.Bytes() -} - -func (s RunStats) TimesCSV() []byte { - times := bytes.Buffer{} - times.WriteString("client_times\n") - for _, t := range s.Times { - times.WriteString(strconv.FormatFloat(t/1e9, 'f', 15, 64)) - times.WriteRune('\n') - } - return times.Bytes() -} - -func RunStatsAvg(rs []RunStats) RunStats { - if len(rs) == 0 { - return RunStats{} - } - r := RunStats{} - r.NHosts = rs[0].NHosts - r.Depth = rs[0].Depth - r.BF = rs[0].BF - r.Times = make([]float64, len(rs[0].Times)) - - for _, a := range rs { - r.MinTime += a.MinTime - r.MaxTime += a.MaxTime - r.AvgTime += a.AvgTime - r.StdDev += a.StdDev - r.SysTime += a.SysTime - r.UserTime += a.UserTime - r.Rate += a.Rate - r.Times = append(r.Times, a.Times...) - } - l := float64(len(rs)) - r.MinTime /= l - r.MaxTime /= l - r.AvgTime /= l - r.StdDev /= l - r.SysTime /= l - r.UserTime /= l - r.Rate /= l - return r -} - -type ExpVar struct { - Cmdline []string `json:"cmdline"` - Memstats runtime.MemStats `json:"memstats"` -} - -func Memstats(server string) (*ExpVar, error) { - url := "localhost:8081/d/" + server + "/debug/vars" - resp, err := http.Get(url) - if err != nil { - return nil, err - } - b, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return nil, err - } - var evar ExpVar - err = json.Unmarshal(b, &evar) - if err != nil { - log.Println("failed to unmarshal expvar:", string(b)) - return nil, err - } - return &evar, nil -} - -func MonitorMemStats(server string, poll int, done chan struct{}, stats *[]*ExpVar) { - go func() { - ticker := time.NewTicker(time.Duration(poll) * time.Millisecond) - for { - select { - case <-ticker.C: - evar, err := Memstats(server) - if err != nil { - continue - } - *stats = append(*stats, evar) - case <-done: - return - } - } - }() -} - -func ArrStats(stream []float64) (avg float64, min float64, max float64, stddev float64) { - // truncate trailing 0s - i := len(stream) - 1 - for ; i >= 0; i-- { - if math.Abs(stream[i]) > 0.01 { - break - } - } - stream = stream[:i+1] - - k := float64(1) - first := true - var M, S float64 - for _, e := range stream { - if first { - first = false - min = e - max = e - } - if e < min { - min = e - } else if max < e { - max = e - } - avg = ((avg * (k - 1)) + e) / k - var tM = M - M += (e - tM) / k - S += (e - tM) * (e - M) - k++ - stddev = math.Sqrt(S / (k - 1)) - } - return avg, min, max, stddev -} diff --git a/lib/app/app.go b/lib/app/app.go new file mode 100644 index 0000000000..e4bedb79d5 --- /dev/null +++ b/lib/app/app.go @@ -0,0 +1,163 @@ +package app + +import ( + "flag" + _ "net/http/pprof" + + "bytes" + "github.com/BurntSushi/toml" + log "github.com/Sirupsen/logrus" + "github.com/dedis/cothority/lib/dbg" + "io/ioutil" + "os" + "path/filepath" + "reflect" + + "github.com/dedis/cothority/lib/monitor" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/suites" + "time" +) + +type Flags struct { + Hostname string // Hostname like server-0.cs-dissent ? + Logger string // ip addr of the logger to connect to + PhysAddr string // physical IP addr of the host + AmRoot bool // is the host root (i.e. special operations) + TestConnect bool // Dylan-code to only test the connection and exit afterwards + Mode string // ["server", "client"] + Name string // Comes from deter.go:187 - "Name of the node" + Server string // Timestamping servers to contact +} + +// Initialize before 'init' so we can directly use the fields as parameters +// to 'Flag' +var RunFlags Flags + +func FlagInit() { + flag.StringVar(&RunFlags.Hostname, "hostname", "", "the hostname of this node") + flag.StringVar(&RunFlags.Logger, "logger", "", "remote logger") + flag.StringVar(&RunFlags.PhysAddr, "physaddr", "", "the physical address of the noded [for deterlab]") + flag.BoolVar(&RunFlags.AmRoot, "amroot", false, "am I root node") + flag.BoolVar(&RunFlags.TestConnect, "test_connect", false, "test connecting and disconnecting") + flag.StringVar(&RunFlags.Mode, "mode", RunFlags.Mode, "Run the app in [server,client] mode") + flag.StringVar(&RunFlags.Name, "name", RunFlags.Name, "Name of the node") + flag.StringVar(&RunFlags.Server, "server", "", "the timestamping servers to contact") +} + +/* + * Reads in the config for the application - + * also parses the init-flags and connects to + * the monitor. + */ +func ReadConfig(conf interface{}, dir ...string) { + var err error + err = ReadTomlConfig(conf, "app.toml", dir...) + if err != nil { + log.Fatal("Couldn't load app-config-file in exec") + } + debug := reflect.ValueOf(conf).Elem().FieldByName("Debug") + if debug.IsValid() { + dbg.DebugVisible = debug.Interface().(int) + } + FlagInit() + flag.Parse() + dbg.Lvlf3("Flags are %+v", RunFlags) + + if RunFlags.AmRoot { + if err := monitor.ConnectSink(RunFlags.Logger); err != nil { + dbg.Fatal("Couldn't connect to monitor", err) + } + } + + dbg.Lvl3("Running", RunFlags.Hostname, "with logger at", RunFlags.Logger) +} + +/* + * Writes any structure to a toml-file + * + * Takes a filename and an optional directory-name. + */ +func WriteTomlConfig(conf interface{}, filename string, dirOpt ...string) { + buf := new(bytes.Buffer) + if err := toml.NewEncoder(buf).Encode(conf); err != nil { + dbg.Fatal(err) + } + err := ioutil.WriteFile(getFullName(filename, dirOpt...), buf.Bytes(), 0660) + if err != nil { + dbg.Fatal(err) + } +} + +/* + * Reads any structure from a toml-file + * + * Takes a filename and an optional directory-name + */ +func ReadTomlConfig(conf interface{}, filename string, dirOpt ...string) error { + buf, err := ioutil.ReadFile(getFullName(filename, dirOpt...)) + if err != nil { + pwd, _ := os.Getwd() + dbg.Lvl1("Didn't find", filename, "in", pwd) + return err + } + + _, err = toml.Decode(string(buf), conf) + if err != nil { + dbg.Fatal(err) + } + + return nil +} + +// StartedUp waits for everybody to start by contacting the +// monitor. Argument is total number of peers. +func (f Flags) StartedUp(total int) { + monitor.Ready(f.Logger) + // Wait for everybody to be ready before going on + for { + s, err := monitor.GetReady(f.Logger) + if err != nil { + dbg.Lvl1("Couldn't reach monitor") + } else { + if s.Ready != total { + dbg.Lvl4(f.Hostname, "waiting for others to finish", s.Ready, total) + } else { + break + } + time.Sleep(time.Second) + } + } + dbg.Lvl3(f.Hostname, "thinks everybody's here") +} + +/* + * Gets filename and dirname + * + * special cases: + * - filename only + * - filename in relative path + * - filename in absolute path + * - filename and additional path + */ +func getFullName(filename string, dirOpt ...string) string { + dir := filepath.Dir(filename) + if len(dirOpt) > 0 { + dir = dirOpt[0] + } else { + if dir == "" { + dir = "." + } + } + return dir + "/" + filepath.Base(filename) +} + +// Helper functions that will return the suite used during the process from a string name +func GetSuite(suite string) abstract.Suite { + s, ok := suites.All()[suite] + if !ok { + dbg.Lvl1("Suites available:", suites.All()) + dbg.Fatal("Didn't find suite", suite) + } + return s +} diff --git a/lib/app/app_test.go b/lib/app/app_test.go new file mode 100644 index 0000000000..9f1e7bddac --- /dev/null +++ b/lib/app/app_test.go @@ -0,0 +1,31 @@ +package app_test + +import ( + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/dbg" + "io/ioutil" + "testing" +) + +var testFileApp = `Machines = 8 +Debug = 1` +var testFileDeter = `Machines = 5` + +func TestReadConfig(t *testing.T) { + conf := app.ConfigColl{} + + dbg.DebugVisible = 5 + + writeFile("/tmp/app.toml", testFileApp) + writeFile("/tmp/deter.toml", testFileDeter) + + app.ReadConfig(&conf, "/tmp") + +} + +func writeFile(name string, content string) { + err := ioutil.WriteFile(name, []byte(content), 0666) + if err != nil { + dbg.Fatal("Couldn't create file:", err) + } +} diff --git a/lib/app/config_client.go b/lib/app/config_client.go new file mode 100644 index 0000000000..ed50395b8b --- /dev/null +++ b/lib/app/config_client.go @@ -0,0 +1,11 @@ +package app + +import ( + "github.com/dedis/crypto/abstract" +) + +// Struct used by the client containing infromations to verify signatures +type ConfigClient struct { + // Agreggated public keys ! + K0 abstract.Point +} diff --git a/lib/app/config_coll.go b/lib/app/config_coll.go new file mode 100644 index 0000000000..cc6a493268 --- /dev/null +++ b/lib/app/config_coll.go @@ -0,0 +1,36 @@ +package app + +type ConfigColl struct { + *ConfigConode + + // ppm is the replication factor of hosts per node: how many hosts do we want per node + Ppm int + // bf is the branching factor of the tree that we build + Bf int + + // How many messages to send + Nmsgs int + // The speed of request stamping/ms + Rate int + // Percentage of stamp server we want to request on (0% = only leader) + StampRatio float64 + // How many rounds + Rounds int + // Pre-defined failure rate + Failures int + // Rounds for root to wait before failing + RFail int + // Rounds for follower to wait before failing + FFail int + // Debug-level + Debug int + + // How many stamps per round are we signing limiting rate + // if StampsPerRound == -1 ==> no limits + StampsPerRound int + + // RootWait - how long the root timestamper waits for the clients to start up + RootWait int + // Just set up the connections and then quit + TestConnect bool +} diff --git a/lib/app/config_conode.go b/lib/app/config_conode.go new file mode 100644 index 0000000000..2acf423c99 --- /dev/null +++ b/lib/app/config_conode.go @@ -0,0 +1,22 @@ +package app + +import ( + "github.com/dedis/cothority/lib/graphs" + "github.com/dedis/crypto/abstract" +) + +type ConfigConode struct { + // Coding-suite to run [nist256, nist512, ed25519] + Suite string + // Tree for knowing whom to connect + Tree *graphs.Tree + // hosts + Hosts []string + + // Aggregated long term public keys of all the peers in the tree + AggPubKey string + ////// Only used during process and never written to file ///// + // Private / public keys of your host + Secret abstract.Secret + Public abstract.Point +} diff --git a/lib/app/config_naive_sign.go b/lib/app/config_naive_sign.go new file mode 100644 index 0000000000..3463cf7c78 --- /dev/null +++ b/lib/app/config_naive_sign.go @@ -0,0 +1,25 @@ +package app + +import () + +// Configuration-structure for the 'naive' signing-implementation +type NaiveConfig struct { + // Hosts per node + Ppm int + + // A list of all hosts that will participate. The first one in the list + // is the master + Hosts []string + + // What suite to use - standard is ed25519 + Suite string + + // How many rounds to measure + Rounds int + + // The debug-level to use when running the application + Debug int + + // Whether to skip the checks + SkipChecks bool +} diff --git a/lib/app/config_ntree.go b/lib/app/config_ntree.go new file mode 100644 index 0000000000..216f65ad52 --- /dev/null +++ b/lib/app/config_ntree.go @@ -0,0 +1,27 @@ +package app + +import ( + "github.com/dedis/cothority/lib/graphs" +) + +type NTreeConfig struct { + Ppm int + + Bf int + + Suite string + + Rounds int + + Debug int + + Hosts []string + + Tree *graphs.Tree + + Name string + + Root bool + + SkipChecks bool +} diff --git a/lib/app/config_shamir.go b/lib/app/config_shamir.go new file mode 100644 index 0000000000..330695a7f8 --- /dev/null +++ b/lib/app/config_shamir.go @@ -0,0 +1,16 @@ +package app + +import () + +type ConfigShamir struct { + // ppm is the replication factor of hosts per node: how many hosts do we want per node + Ppm int + // All hostnames concatenated with the port-number to use + Hosts []string + // Coding-suite to run [nist256, nist512, ed25519] + Suite string + // How many rounds + Rounds int + // Debug-level + Debug int +} diff --git a/lib/cliutils/addresses.go b/lib/cliutils/addresses.go new file mode 100644 index 0000000000..b6c228cd8f --- /dev/null +++ b/lib/cliutils/addresses.go @@ -0,0 +1,79 @@ +package cliutils + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// This file handles manipulations of IP address with ports +// Like checking if an address contains a port, adding one etc + +var addressRegexp *regexp.Regexp + +func init() { + addressRegexp = regexp.MustCompile(`^[^:]*(:)(\d{1,5})?$`) +} + +// Checks if an address contains a port. If it does not, it add the +// given port to that and returns the new address. If it does, it returns +// directly. Both operation checks if the port is correct. +func VerifyPort(address string, port int) (string, error) { + p := strconv.Itoa(port) + subs := addressRegexp.FindStringSubmatch(address) + switch { + case len(subs) == 0: + // address does not contain a port + return address + ":" + p, checkPort(port) + case len(subs) == 3 && subs[2] == "": + // we got a addres: style ..?? + return address + p, checkPort(port) + case len(subs) == 3: + // we got full address already address:port + sp, err := strconv.Atoi(subs[2]) + if err != nil { + return address, errors.New("Not a valid port-number given") + } + return address, checkPort(sp) + } + return address, errors.New("Could not anaylze address") +} + +// Returns the global-binding address +func GlobalBind(address string) (string, error) { + addr := strings.Split(address, ":") + if len(addr) != 2 { + return "", errors.New("Not a host:port address") + } + return "0.0.0.0:" + addr[1], nil +} + +// Gets the port-number, if none is found, returns +// 'def' +func GetPort(address string, def int) int { + if strings.Contains(address, ":") { + port, err := strconv.Atoi(strings.Split(address, ":")[1]) + if err == nil { + return port + } + } + return def +} + +// Gets the address-part and ignores the port +func GetAddress(address string) string { + if strings.Contains(address, ":") { + return strings.Split(address, ":")[0] + } + return address +} + +// Simply returns an error if the port is invalid +func checkPort(port int) error { + if port < 1 || port > 65535 { + return fmt.Errorf("Port number invalid %d !", port) + } + return nil +} diff --git a/lib/cliutils/addresses_test.go b/lib/cliutils/addresses_test.go new file mode 100644 index 0000000000..f5b08dc92d --- /dev/null +++ b/lib/cliutils/addresses_test.go @@ -0,0 +1,47 @@ +package cliutils + +import ( + "strconv" + "testing" +) + +func TestVerifyPort(t *testing.T) { + good := "abs:104" + medium := "abs:" + bad := "abs" + port := 1000 + ports := strconv.Itoa(port) + if na, err := VerifyPort(good, port); err != nil { + t.Error("VerifyPort should not generate any error", err) + } else if na != good { + t.Error("address should not have changed with a port number inside it") + } + if na, err := VerifyPort(medium, port); err != nil { + t.Error("VerifyPort should not gen any error", err) + } else if na != medium+ports { + t.Error("address should generated is not correct: added port") + } + if na, err := VerifyPort(bad, port); err != nil { + t.Error("VerifyPort should not gen any error", err) + } else if na != bad+":"+ports { + t.Error("address should generated is not correct: added port and:") + } +} + +func TestGetPort(t *testing.T) { + if GetPort("localhost", 2000) != 2000 { + t.Error("Didn't get correct default-port") + } + if GetPort("localhost:2001", 2000) != 2001 { + t.Error("Didn't extract correct port") + } +} + +func TestGetAddress(t *testing.T) { + if GetAddress("localhost") != "localhost" { + t.Error("Didn't get correct address for address-only") + } + if GetAddress("localhost:2000") != "localhost" { + t.Error("Didn't separate address and port") + } +} diff --git a/lib/cliutils/key.go b/lib/cliutils/key.go new file mode 100644 index 0000000000..bab89a51a4 --- /dev/null +++ b/lib/cliutils/key.go @@ -0,0 +1,190 @@ +package cliutils + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/config" + "github.com/dedis/crypto/random" + "io" + "io/ioutil" + "os" + "strings" +) + +// This file manage every operations related to keys +// KeyPair will generate a keypair (private + public key) from a given suite +func KeyPair(s abstract.Suite) config.KeyPair { + kp := config.KeyPair{} + kp.Gen(s, random.Stream) + return kp +} + +// WritePrivKey will write the private key into the filename given +// It takes a suite in order to adequatly write the secret +// Returns an error if anything went wrong during file handling or writing key +func WritePrivKey(suite abstract.Suite, fileName string, priv abstract.Secret) error { + // Opening file + privFile, err := os.OpenFile(fileName, os.O_TRUNC|os.O_WRONLY|os.O_CREATE, 0744) + if err != nil { + return err + } + defer privFile.Close() + + // Writing down ! + err = suite.Write(privFile, priv) + if err != nil { + return err + } + privFile.WriteString("\n") + return nil +} + +// WritePubKey will write the public key into the filename using the suite +// 'prepend' is if you want to write something before the actual key like in ssh +// format hostname KEY_in_base_64 +// if before contains a space it will throw an error +// Returns an error if anything went wrong during file handling or writing key +func WritePubKey(suite abstract.Suite, fileName string, pub abstract.Point, prepend string) error { + + pubFile, err := os.OpenFile(fileName, os.O_TRUNC|os.O_WRONLY|os.O_CREATE, 0744) + if err != nil { + return err + } + defer pubFile.Close() + + if strings.Contains(prepend, " ") { + return errors.New("The string to insert before public key contains some space. Invalid !") + } + pubFile.WriteString(prepend + " ") + + err = WritePub64(suite, pubFile, pub) + if err != nil { + return err + } + pubFile.WriteString("\n") + return nil +} + +// ReadPrivKey will read the file and decrypt the private key inside +// It takes a suite to decrypt and a filename to know where to read +// Returns the secret and an error if anything wrong occured +func ReadPrivKey(suite abstract.Suite, fileName string) (abstract.Secret, error) { + secret := suite.Secret() + // Opening files + privFile, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer privFile.Close() + + // Read the keys + err = suite.Read(privFile, &secret) + if err != nil { + return nil, err + } + return secret, nil +} + +// ReadPubKey will read the file and decrypt the public key inside +// It takes a suite to decrypt and a file name +// Returns the public key, whatever text is in front and an error if anything went wrong +func ReadPubKey(suite abstract.Suite, fileName string) (abstract.Point, string, error) { + + public := suite.Point() + // Opening files + pubFile, err := os.Open(fileName) + if err != nil { + return nil, "", err + } + defer pubFile.Close() + + // read the string before + by, err := ioutil.ReadAll(pubFile) + if err != nil { + return nil, "", errors.New(fmt.Sprintf("Error reading the whole file %s", err)) + } + splits := strings.Split(string(by), " ") + if len(splits) != 2 { + return nil, "", errors.New(fmt.Sprintf("Error reading pub key file format is not correct (val space val)")) + } + + before := splits[0] + key := strings.NewReader(splits[1]) + + // Some readings + public, err = ReadPub64(suite, key) + if err != nil { + return nil, "", errors.New(fmt.Sprintf("Error reading the public key itself: %s", err)) + } + + return public, before, nil + +} + +// Read a public point to a base64 representation +func ReadPub64(suite abstract.Suite, r io.Reader) (abstract.Point, error) { + public := suite.Point() + dec := base64.NewDecoder(base64.StdEncoding, r) + err := suite.Read(dec, &public) + return public, err +} + +// Write a public point to a base64 representation +func WritePub64(suite abstract.Suite, w io.Writer, point abstract.Point) error { + enc := base64.NewEncoder(base64.StdEncoding, w) + err := suite.Write(enc, point) + enc.Close() + return err +} + +func WriteSecret64(suite abstract.Suite, w io.Writer, secret abstract.Secret) error { + enc := base64.NewEncoder(base64.StdEncoding, w) + err := suite.Write(enc, secret) + enc.Close() + return err +} + +func ReadSecret64(suite abstract.Suite, r io.Reader) (abstract.Secret, error) { + sec := suite.Secret() + dec := base64.NewDecoder(base64.StdEncoding, r) + err := suite.Read(dec, &sec) + return sec, err +} + +// COnvert a Public point to a hexadecimal reprensation +func PubHex(suite abstract.Suite, point abstract.Point) (string, error) { + pbuf, err := point.MarshalBinary() + return hex.EncodeToString(pbuf), err +} + +// Read a hexadecimal representation of a public point and convert it to the +// right struct +func ReadPubHex(suite abstract.Suite, s string) (abstract.Point, error) { + encoded, err := hex.DecodeString(s) + if err != nil { + return nil, err + } + point := suite.Point() + err = point.UnmarshalBinary(encoded) + return point, err +} + +// Encode a secret to hexadecimal +func SecretHex(suite abstract.Suite, secret abstract.Secret) (string, error) { + sbuf, err := secret.MarshalBinary() + return hex.EncodeToString(sbuf), err +} + +// Read a secret in hexadceimal from string +func ReadSecretHex(suite abstract.Suite, str string) (abstract.Secret, error) { + enc, err := hex.DecodeString(str) + if err != nil { + return nil, err + } + sec := suite.Secret() + err = sec.UnmarshalBinary(enc) + return sec, err +} diff --git a/lib/cliutils/utils.go b/lib/cliutils/utils.go index 060ac3c013..39be2173aa 100644 --- a/lib/cliutils/utils.go +++ b/lib/cliutils/utils.go @@ -4,10 +4,7 @@ import ( "bufio" "bytes" "errors" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/crypto/abstract" - "github.com/dedis/crypto/config" - "github.com/dedis/crypto/random" + "github.com/dedis/cothority/lib/dbg" "io/ioutil" "os" "os/exec" @@ -15,11 +12,8 @@ import ( "time" ) -// KeyPair will generate a keypair (private + public key) from a given suite -func KeyPair(s abstract.Suite) config.KeyPair { - kp := config.KeyPair{} - kp.Gen(s, random.Stream) - return kp +func Boldify(s string) string { + return "\033[1m" + s + "\033[0m" } func ReadLines(filename string) ([]string, error) { @@ -61,7 +55,7 @@ func SshRun(username, host, command string) ([]byte, error) { cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking=no", addr, "eval '"+command+"'") //log.Println(cmd) - cmd.Stderr = os.Stderr + //cmd.Stderr = os.Stderr return cmd.Output() } @@ -71,7 +65,7 @@ func SshRunStdout(username, host, command string) error { addr = username + "@" + addr } - dbg.Lvl4("Going to ssh to ", addr, command) + dbg.Lvl4("Going to ssh to", addr, command) cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking=no", addr, "eval '"+command+"'") cmd.Stderr = os.Stderr @@ -95,8 +89,12 @@ func Build(path, out, goarch, goos string) (string, error) { var cmd *exec.Cmd var b bytes.Buffer build_buffer := bufio.NewWriter(&b) + + wd, _ := os.Getwd() + dbg.Lvl4("In directory", wd) + cmd = exec.Command("go", "build", "-v", "-o", out, path) - dbg.Lvl4("Building", path) + dbg.Lvl4("Building", cmd.Args, "in", path) cmd.Stdout = build_buffer cmd.Stderr = build_buffer cmd.Env = append([]string{"GOOS=" + goos, "GOARCH=" + goarch}, os.Environ()...) diff --git a/lib/coconet/conn.go b/lib/coconet/conn.go index 9441326988..cbe880868e 100644 --- a/lib/coconet/conn.go +++ b/lib/coconet/conn.go @@ -15,11 +15,11 @@ type Conn interface { SetPubKey(abstract.Point) // Put puts data to the connection, calling the MarshalBinary method as needed. - Put(data BinaryMarshaler) error + PutData(data BinaryMarshaler) error // Get gets data from the connection, calling the UnmarshalBinary method as needed. // It blocks until it successfully receives data or there was a network error. // It returns io.EOF if the channel has been closed. - Get(data BinaryUnmarshaler) error + GetData(data BinaryUnmarshaler) error // Connect establishes the connection. Before using the Put and Get // methods of a Conn, Connect must first be called. @@ -37,10 +37,12 @@ type Conn interface { // All messages passing through our conn must implement their own BinaryMarshaler type BinaryMarshaler interface { MarshalBinary() (data []byte, err error) + //MarshalJSON()([]byte, error) } // Taken from: http://golang.org/pkg/encoding/#BinaryMarshaler // All messages passing through our conn must implement their own BinaryUnmarshaler type BinaryUnmarshaler interface { UnmarshalBinary(data []byte) error + //UnmarshalJSON(data []byte) error } diff --git a/lib/coconet/goconn.go b/lib/coconet/goconn.go index adf1d80f0e..3ad9605e1e 100644 --- a/lib/coconet/goconn.go +++ b/lib/coconet/goconn.go @@ -140,7 +140,7 @@ func (c *GoConn) PubKey() abstract.Point { } // Put puts data on the connection. -func (c *GoConn) Put(data BinaryMarshaler) error { +func (c *GoConn) PutData(data BinaryMarshaler) error { if c.Closed() { return ErrClosed } @@ -179,7 +179,7 @@ retry: } // Get receives data from the sender. -func (c *GoConn) Get(bum BinaryUnmarshaler) error { +func (c *GoConn) GetData(bum BinaryUnmarshaler) error { if c.Closed() { return ErrClosed } diff --git a/lib/coconet/gohost.go b/lib/coconet/gohost.go index 369120321c..ccd163f42f 100644 --- a/lib/coconet/gohost.go +++ b/lib/coconet/gohost.go @@ -110,13 +110,13 @@ func (h *GoHost) ConnectTo(parent string) error { // send the hostname to the destination mname := StringMarshaler(h.Name()) - err := conn.Put(&mname) + err := conn.PutData(&mname) if err != nil { log.Fatal("failed to connect: putting name:", err) } // give the parent the public key - err = conn.Put(h.Pubkey) + err = conn.PutData(h.Pubkey) if err != nil { log.Fatal("failed to send public key:", err) } @@ -124,7 +124,7 @@ func (h *GoHost) ConnectTo(parent string) error { // get the public key of the parent suite := h.suite pubkey := suite.Point() - err = conn.Get(pubkey) + err = conn.GetData(pubkey) if err != nil { log.Fatal("failed to establish connection: getting pubkey:", err) } @@ -138,7 +138,7 @@ func (h *GoHost) ConnectTo(parent string) error { go func() { for { data := h.pool.Get().(BinaryUnmarshaler) - err := conn.Get(data) + err := conn.GetData(data) h.msgchan <- NetworkMessg{Data: data, From: conn.Name(), Err: err} } @@ -173,7 +173,7 @@ func (h *GoHost) Listen() error { h.PeerLock.Unlock() var mname StringMarshaler - err := conn.Get(&mname) + err := conn.GetData(&mname) if err != nil { log.Fatal("failed to establish connection: getting name:", err) } @@ -181,13 +181,13 @@ func (h *GoHost) Listen() error { suite := h.suite pubkey := suite.Point() - e := conn.Get(pubkey) + e := conn.GetData(pubkey) if e != nil { log.Fatal("unable to get pubkey from child") } conn.SetPubKey(pubkey) - err = conn.Put(h.Pubkey) + err = conn.PutData(h.Pubkey) if err != nil { log.Fatal("failed to send public key:", err) } @@ -200,7 +200,7 @@ func (h *GoHost) Listen() error { go func() { for { data := h.pool.Get().(BinaryUnmarshaler) - err := conn.Get(data) + err := conn.GetData(data) h.msgchan <- NetworkMessg{Data: data, From: conn.Name(), Err: err} } @@ -383,7 +383,7 @@ func (h *GoHost) PutTo(ctx context.Context, host string, data BinaryMarshaler) e if Ready { // if closed put will return ErrClosed - done <- parent.Put(data) + done <- parent.PutData(data) return } time.Sleep(250 * time.Millisecond) @@ -418,7 +418,7 @@ func (h *GoHost) PutUp(ctx context.Context, view int, data BinaryMarshaler) erro if Ready { // if closed put will return ErrClosed - done <- parent.Put(data) + done <- parent.PutData(data) return } time.Sleep(250 * time.Millisecond) @@ -458,7 +458,7 @@ func (h *GoHost) PutDown(ctx context.Context, view int, data []BinaryMarshaler) h.PeerLock.Unlock() if Ready { - e := conn.Put(data[i]) + e := conn.PutData(data[i]) if e != nil { errLock.Lock() err = e @@ -489,7 +489,7 @@ func (h *GoHost) PutDown(ctx context.Context, view int, data []BinaryMarshaler) // Get returns two channels. One of messages that are received, and another of errors // associated with each message. -func (h *GoHost) Get() chan NetworkMessg { +func (h *GoHost) GetNetworkMessg() chan NetworkMessg { return h.msgchan } diff --git a/lib/coconet/host.go b/lib/coconet/host.go index 72bcd5d3b2..a25089f7af 100644 --- a/lib/coconet/host.go +++ b/lib/coconet/host.go @@ -75,7 +75,7 @@ type Host interface { // Multiple listeners will receive disjoint sets of messages. // When receiving from the channels always recieve from both the network // messages channel as well as the error channel. - Get() chan NetworkMessg + GetNetworkMessg() chan NetworkMessg // Connect connects to the parent in the given view. Connect(view int) error diff --git a/lib/coconet/networkMessg.go b/lib/coconet/networkMessg.go index 16e491d032..0d9c533d09 100644 --- a/lib/coconet/networkMessg.go +++ b/lib/coconet/networkMessg.go @@ -1,6 +1,7 @@ package coconet import ( + "github.com/dedis/cothority/lib/dbg" "github.com/dedis/protobuf" ) @@ -15,5 +16,6 @@ func (nm *NetworkMessg) MarshalBinary() ([]byte, error) { } func (nm *NetworkMessg) UnmarshalBinary(data []byte) error { + dbg.Lvl2("UnmarshalBinary:", len(data), "bytes") return protobuf.Decode(data, nm) } diff --git a/lib/coconet/tcpconn.go b/lib/coconet/tcpconn.go index 0cbf4efc1a..47b87c315d 100644 --- a/lib/coconet/tcpconn.go +++ b/lib/coconet/tcpconn.go @@ -1,18 +1,17 @@ package coconet import ( - "encoding/gob" + "encoding/json" "errors" - "math/rand" "net" "sync" - "time" //"runtime/debug" - dbg "github.com/dedis/cothority/lib/debug_lvl" + "github.com/dedis/cothority/lib/dbg" "github.com/dedis/crypto/abstract" "io" + "strings" ) var Latency = 100 @@ -23,8 +22,8 @@ type TCPConn struct { encLock sync.Mutex name string conn net.Conn - enc *gob.Encoder - dec *gob.Decoder + enc *json.Encoder + dec *json.Decoder // pkLock guards the public key pkLock sync.Mutex @@ -41,8 +40,8 @@ func NewTCPConnFromNet(conn net.Conn) *TCPConn { return &TCPConn{ name: conn.RemoteAddr().String(), conn: conn, - enc: gob.NewEncoder(conn), - dec: gob.NewDecoder(conn)} + enc: json.NewEncoder(conn), + dec: json.NewDecoder(conn)} } @@ -69,8 +68,8 @@ func (tc *TCPConn) Connect() error { } tc.encLock.Lock() tc.conn = conn - tc.enc = gob.NewEncoder(conn) - tc.dec = gob.NewDecoder(conn) + tc.enc = json.NewEncoder(conn) + tc.dec = json.NewDecoder(conn) tc.encLock.Unlock() return nil } @@ -118,7 +117,7 @@ func IsTemporary(err error) bool { // Put puts data to the connection. // Returns io.EOF on an irrecoverable error. // Returns actual error if it is Temporary. -func (tc *TCPConn) Put(bm BinaryMarshaler) error { +func (tc *TCPConn) PutData(bm BinaryMarshaler) error { if tc.Closed() { dbg.Lvl3("tcpconn: put: connection closed") return ErrClosed @@ -143,9 +142,9 @@ func (tc *TCPConn) Put(bm BinaryMarshaler) error { } // Get gets data from the connection. -// Returns io.EOF on an irrecoveralbe error. +// Returns io.EOF on an irrecoverable error. // Returns given error if it is Temporary. -func (tc *TCPConn) Get(bum BinaryUnmarshaler) error { +func (tc *TCPConn) GetData(bum BinaryUnmarshaler) error { if tc.Closed() { dbg.Lvl3("tcpconn: get: connection closed") return ErrClosed @@ -158,20 +157,24 @@ func (tc *TCPConn) Get(bum BinaryUnmarshaler) error { dec := tc.dec tc.encLock.Unlock() - if Latency != 0 { - time.Sleep(time.Duration(rand.Intn(Latency)) * time.Millisecond) - } + //if Latency != 0 { + // time.Sleep(time.Duration(rand.Intn(Latency)) * time.Millisecond) + //} err := dec.Decode(bum) if err != nil { if IsTemporary(err) { + dbg.Lvl2("Temporary error") return err } // if it is an irrecoverable error // close the channel and return that it has been closed - if err != io.EOF && err.Error() != "read tcp4"{ - dbg.Lvl2("Couldn't decode packet at", tc.name, "error:", err) + if err == io.EOF || err.Error() == "read tcp4" { + dbg.Lvl3("Closing connection by EOF:", err) } else { - dbg.Lvl3("Closing connection by EOF") + if !strings.Contains(err.Error(), "use of closed") { + dbg.Lvl1("Couldn't decode packet at", tc.name, "error:", err) + dbg.Lvlf1("Packet was: %+v", bum) + } } tc.Close() return ErrClosed diff --git a/lib/coconet/tcphost.go b/lib/coconet/tcphost.go index ab4b59dc8c..6c1a42ffdd 100644 --- a/lib/coconet/tcphost.go +++ b/lib/coconet/tcphost.go @@ -9,7 +9,8 @@ import ( "time" log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/dbg" "github.com/dedis/crypto/abstract" "golang.org/x/net/context" ) @@ -102,9 +103,13 @@ func (s *StringMarshaler) UnmarshalBinary(b []byte) error { func (h *TCPHost) Listen() error { var err error dbg.Lvl3("Starting to listen on", h.name) - ln, err := net.Listen("tcp4", h.name) + address, err := cliutils.GlobalBind(h.name) if err != nil { - log.Println("failed to listen:", err) + dbg.Fatal("Didn't get global binding for", address, err) + } + ln, err := net.Listen("tcp4", address) + if err != nil { + dbg.Lvl2("failed to listen on", address, ":", err) return err } h.listener = ln @@ -115,7 +120,7 @@ func (h *TCPHost) Listen() error { conn, err := ln.Accept() dbg.Lvl3(h.Name(), "Connection request - handling") if err != nil { - dbg.Lvl3("failed to accept connection: ", err) + dbg.Lvl3("failed to accept connection:", err) // if the host has been closed then stop listening if atomic.LoadInt64(&h.closed) == 1 { return @@ -126,9 +131,9 @@ func (h *TCPHost) Listen() error { // Read in name of client tp := NewTCPConnFromNet(conn) var mname StringMarshaler - err = tp.Get(&mname) + err = tp.GetData(&mname) if err != nil { - log.Errorln("failed to establish connection: getting name: ", err) + log.Errorln("failed to establish connection: getting name:", err) tp.Close() continue } @@ -140,7 +145,7 @@ func (h *TCPHost) Listen() error { // get and set public key suite := h.suite pubkey := suite.Point() - err = tp.Get(pubkey) + err = tp.GetData(pubkey) if err != nil { log.Errorln("failed to establish connection: getting pubkey:", err) tp.Close() @@ -149,7 +154,7 @@ func (h *TCPHost) Listen() error { tp.SetPubKey(pubkey) // give child the public key - err = tp.Put(h.Pubkey) + err = tp.PutData(h.Pubkey) if err != nil { log.Errorln("failed to send public key:", err) continue @@ -165,7 +170,7 @@ func (h *TCPHost) Listen() error { go func() { for { data := h.pool.Get().(BinaryUnmarshaler) - err := tp.Get(data) + err := tp.GetData(data) h.msgchan <- NetworkMessg{Data: data, From: tp.Name(), Err: err} } @@ -194,15 +199,15 @@ func (h *TCPHost) ConnectTo(parent string) error { tp := NewTCPConnFromNet(conn) mname := StringMarshaler(h.Name()) - err = tp.Put(&mname) + err = tp.PutData(&mname) if err != nil { - log.Errorln(err) + log.Errorln("Putting data error:", err) return err } tp.SetName(parent) // give parent the public key - err = tp.Put(h.Pubkey) + err = tp.PutData(h.Pubkey) if err != nil { log.Errorln("failed to send public key") return err @@ -211,7 +216,7 @@ func (h *TCPHost) ConnectTo(parent string) error { // get and set the parents public key suite := h.suite pubkey := suite.Point() - err = tp.Get(pubkey) + err = tp.GetData(pubkey) if err != nil { log.Errorln("failed to establish connection: getting pubkey:", err) tp.Close() @@ -224,12 +229,12 @@ func (h *TCPHost) ConnectTo(parent string) error { h.peers[parent] = tp // h.PendingPeers[parent] = true h.PeerLock.Unlock() - dbg.Lvl4("CONNECTED TO PARENT:", parent) + dbg.Lvl4("Connected to parent:", parent) go func() { for { data := h.pool.Get().(BinaryUnmarshaler) - err := tp.Get(data) + err := tp.GetData(data) h.msgchan <- NetworkMessg{Data: data, From: tp.Name(), Err: err} } @@ -276,10 +281,12 @@ func (h *TCPHost) NewViewFromPrev(view int, parent string) { // Close closes all the connections currently open. func (h *TCPHost) Close() { - dbg.Lvl3("tcphost: closing") + dbg.Lvl3("tcphost: closing", h, h.listener) // stop accepting new connections atomic.StoreInt64(&h.closed, 1) - h.listener.Close() + if h.listener != nil { + h.listener.Close() + } // close peer connections h.PeerLock.Lock() @@ -433,7 +440,7 @@ func (h *TCPHost) AddPeers(cs ...string) { } // ErrClosed indicates that the connection has been closed. -var ErrClosed = errors.New("connection closed") +var ErrClosed = errors.New("connection closed!") func (h *TCPHost) PutTo(ctx context.Context, host string, data BinaryMarshaler) error { pname := host @@ -464,7 +471,7 @@ func (h *TCPHost) PutTo(ctx context.Context, host string, data BinaryMarshaler) return } // if the connection has been closed put will fail - done <- parent.Put(data) + done <- parent.PutData(data) return } }() @@ -505,7 +512,7 @@ func (h *TCPHost) PutUp(ctx context.Context, view int, data BinaryMarshaler) err return } // if the connection has been closed put will fail - done <- parent.Put(data) + done <- parent.PutData(data) return } }() @@ -551,12 +558,12 @@ func (h *TCPHost) PutDown(ctx context.Context, view int, data []BinaryMarshaler) conn := h.peers[c] h.PeerLock.Unlock() if Ready { - if e := conn.Put(data[i]); e != nil { + if e := conn.PutData(data[i]); e != nil { errLock.Lock() err = e errLock.Unlock() } - dbg.Lvl4("Informed child", c) + dbg.Lvl4("Informed child", c, "of", data[i]) return } dbg.Lvl4("Re-trying, waiting to put down msg from", h.Name(), "to", c) @@ -586,7 +593,7 @@ func (h *TCPHost) PutDown(ctx context.Context, view int, data []BinaryMarshaler) // // TODO: each of these goroutines could be spawned when we initally connect to // them instead. -func (h *TCPHost) Get() chan NetworkMessg { +func (h *TCPHost) GetNetworkMessg() chan NetworkMessg { return h.msgchan } diff --git a/lib/coconet/view.go b/lib/coconet/view.go index d3fb792451..ee2cd1945a 100644 --- a/lib/coconet/view.go +++ b/lib/coconet/view.go @@ -5,7 +5,7 @@ import ( "sync" log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" + "github.com/dedis/cothority/lib/dbg" ) type View struct { @@ -105,7 +105,7 @@ func (v *View) RemovePeerFromHostlist(name string) { } func (v *View) RemovePeer(name string) bool { - log.Println("LOOKING FOR ", name, "in HOSTLIST", v.HostList) + log.Println("LOOKING FOR", name, "in HOSTLIST", v.HostList) v.Lock() // make sure we don't remove our parent if v.Parent == name { @@ -237,7 +237,7 @@ func (v *Views) RemovePeer(view int, child string) bool { func (v *Views) Children(view int) []string { v.RLock() defer v.RUnlock() - if view < len(v.Views){ + if view < len(v.Views) { return v.Views[view].Children } else { return nil diff --git a/lib/config/config_test.go b/lib/config/config_test.go deleted file mode 100644 index cde362ce20..0000000000 --- a/lib/config/config_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package config - -import ( - "sync" - "testing" - - "github.com/dedis/cothority/proto/sign" -) - -func TestLoadConfig(t *testing.T) { - _, err := LoadConfig("../data/exconf.json") - if err != nil { - t.Error("error parsing json file:", err) - } -} - -func TestPubKeysConfig(t *testing.T) { - _, err := LoadConfig("../data/exconf.json", ConfigOptions{ConnType: "tcp", GenHosts: true}) - if err != nil { - t.Fatal("error parsing json file:", err) - } - // if err := ioutil.WriteFile("data/exconf_wkeys.json", []byte(hc.String()), 0666); err != nil { - // t.Fatal(err) - // } -} - -func TestPubKeysOneNode(t *testing.T) { - // has hosts 8089 - 9094 @ 172.27.187.80 - done := make(chan bool) - hosts := []string{ - ":6095", - ":6096", - ":6097", - ":6098", - ":6099", - ":6100"} - nodes := make(map[string]*sign.Node) - var mu sync.Mutex - var wg sync.WaitGroup - for _, host := range hosts { - wg.Add(1) - go func(host string) { - hc, err := LoadConfig("../data/exconf_wkeys.json", ConfigOptions{ConnType: "tcp", Host: host, Hostnames: hosts}) - if err != nil { - done <- true - t.Fatal(err) - } - - err = hc.Run(false, sign.MerkleTree, host) - if err != nil { - done <- true - t.Fatal(err) - } - - mu.Lock() - nodes[host] = hc.SNodes[0] - mu.Unlock() - - if hc.SNodes[0].IsRoot(0) { - hc.SNodes[0].LogTest = []byte("Hello World") - err = hc.SNodes[0].Announce(0, &sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest}) - if err != nil { - t.Fatal(err) - } - done <- true - hc.SNodes[0].Close() - } - wg.Done() - }(host) - } - <-done - wg.Wait() - for _, sn := range nodes { - sn.Close() - } -} - - diff --git a/lib/config/structs.go b/lib/config/structs.go deleted file mode 100644 index c92203a251..0000000000 --- a/lib/config/structs.go +++ /dev/null @@ -1,35 +0,0 @@ -package config - -import "github.com/dedis/cothority/lib/graphs" - -// This file has only the structures in it, for easy references - -type ConfigFile struct { - Hosts []string `json:"hosts"` - Tree *graphs.Tree `json:"tree"` -} - -type ConfigFileOld struct { - Conn string `json:"conn,omitempty"` - Hosts []string `json:"hosts"` - Tree *Node `json:"tree"` -} - -// Simplest config representig the type of connection we want to do (tcp / goroutines ?) -// and the list of hostnames like "10.0.4.10:2000 -type HostsConfig struct { - Conn string `json:"conn,omitempty"` - Hosts []string `json:"hosts"` -} - -type AppConfig struct { - Hostname string // Hostname like server-0.cs-dissent ? - Logger string // ip addr of the logger to connect to - PhysAddr string // physical IP addr of the host - AmRoot bool // is the host root (i.e. special operations) - TestConnect bool // Dylan-code to only test the connection and exit afterwards - App string // which app are we running on this host ["coll_sign","coll_stamp","schnorr_sign"] - Mode string // ["server", "client"] - Name string // Comes from deter.go:187 - "Name of the node" - Server string // Timestamping servers to contact -} diff --git a/lib/conode/peer.go b/lib/conode/peer.go new file mode 100644 index 0000000000..e00f4f1cf4 --- /dev/null +++ b/lib/conode/peer.go @@ -0,0 +1,199 @@ +package conode + +import ( + "sync" + "time" + + "github.com/dedis/cothority/lib/dbg" + + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/graphs" + "github.com/dedis/cothority/lib/sign" + "github.com/dedis/crypto/abstract" + "strings" +) + +/* +This will run rounds with RoundCosiStamper while listening for +incoming requests through StampListener. +*/ + +type Peer struct { + *sign.Node + + conf *app.ConfigConode + + RLock sync.Mutex + CloseChan chan bool + Closed bool + + Logger string + Hostname string +} + +// NewPeer returns a peer that can be used to set up +// connections. +func NewPeer(address string, conf *app.ConfigConode) *Peer { + suite := app.GetSuite(conf.Suite) + + var err error + // make sure address has a port or insert default one + address, err = cliutils.VerifyPort(address, DefaultPort) + if err != nil { + dbg.Fatal(err) + } + + // For retro compatibility issues, convert the base64 encoded key into hex + // encoded keys.... + convertTree(suite, conf.Tree) + // Add our private key to the tree (compatibility issues again with graphs/ + // lib) + addPrivateKey(suite, address, conf) + // load the configuration + dbg.Lvl3("loading configuration") + var hc *graphs.HostConfig + opts := graphs.ConfigOptions{ConnType: "tcp", Host: address, Suite: suite} + + hc, err = graphs.LoadConfig(conf.Hosts, conf.Tree, suite, opts) + if err != nil { + dbg.Fatal(err) + } + + // Listen to stamp-requests on port 2001 + node := hc.Hosts[address] + peer := &Peer{ + conf: conf, + Node: node, + RLock: sync.Mutex{}, + CloseChan: make(chan bool, 5), + Hostname: address, + } + + // Start the cothority-listener on port 2000 + err = hc.Run(true, sign.MerkleTree, address) + if err != nil { + dbg.Fatal(err) + } + + go func() { + err := peer.Node.Listen() + dbg.Lvl3("Node.listen quits with status", err) + peer.CloseChan <- true + peer.Close() + }() + return peer +} + +// LoopRounds starts the system by sending a round of type +// 'roundType' every second for number of 'rounds'. +// If 'rounds' < 0, it loops forever, or until you call +// peer.Close(). +func (peer *Peer) LoopRounds(roundType string, rounds int) { + dbg.Lvl3("Stamp-server", peer.Node.Name(), "starting with IsRoot=", peer.IsRoot(peer.ViewNo)) + ticker := time.NewTicker(sign.ROUND_TIME) + firstRound := peer.Node.LastRound() + if !peer.IsRoot(peer.ViewNo) { + // Children don't need to tick, only the root. + ticker.Stop() + } + + for { + select { + case nextRole := <-peer.ViewChangeCh(): + dbg.Lvl2(peer.Name(), "assuming next role is", nextRole) + case <-peer.CloseChan: + dbg.Lvl3("Server-peer", peer.Name(), "has closed the connection") + return + case <-ticker.C: + dbg.Lvl3("Ticker is firing in", peer.Hostname) + roundNbr := peer.LastRound() - firstRound + if roundNbr >= rounds && rounds >= 0 { + dbg.Lvl3(peer.Name(), "reached max round: closing", + roundNbr, ">=", rounds) + ticker.Stop() + if peer.IsRoot(peer.ViewNo) { + dbg.Lvl3("As I'm root, asking everybody to terminate") + peer.SendCloseAll() + } + } else { + if peer.IsRoot(peer.ViewNo) { + dbg.Lvl2(peer.Name(), "Stamp server in round", + roundNbr+1, "of", rounds) + round, err := sign.NewRoundFromType(roundType, peer.Node) + if err != nil { + dbg.Fatal("Couldn't create", roundType, err) + } + err = peer.StartAnnouncement(round) + if err != nil { + dbg.Lvl3(err) + time.Sleep(1 * time.Second) + break + } + } else { + dbg.Lvl3(peer.Name(), "running as regular") + } + } + } + } +} + +// Sends the 'CloseAll' to everybody +func (peer *Peer) SendCloseAll() { + peer.Node.CloseAll(peer.Node.ViewNo) + peer.Node.Close() +} + +// Closes the channel +func (peer *Peer) Close() { + if peer.Closed { + dbg.Lvl1("Peer", peer.Name(), "Already closed!") + return + } else { + peer.Closed = true + } + peer.CloseChan <- true + peer.Node.Close() + StampListenersClose() + dbg.Lvlf3("Closing of peer: %s finished", peer.Name()) +} + +// Simple ephemeral helper for compatibility issues +// From base64 => hexadecimal +func convertTree(suite abstract.Suite, t *graphs.Tree) { + if t.PubKey != "" { + point, err := cliutils.ReadPub64(suite, strings.NewReader(t.PubKey)) + if err != nil { + dbg.Fatal("Could not decode base64 public key") + } + + str, err := cliutils.PubHex(suite, point) + if err != nil { + dbg.Fatal("Could not encode point to hexadecimal") + } + t.PubKey = str + } + for _, c := range t.Children { + convertTree(suite, c) + } +} + +// Add our own private key in the tree. This function exists because of +// compatibility issues with the graphs/lib. +func addPrivateKey(suite abstract.Suite, address string, conf *app.ConfigConode) { + fn := func(t *graphs.Tree) { + // this is our node in the tree + if t.Name == address { + if conf.Secret != nil { + // convert to hexa + s, err := cliutils.SecretHex(suite, conf.Secret) + if err != nil { + dbg.Fatal("Error converting our secret key to hexadecimal") + } + // adds it + t.PriKey = s + } + } + } + conf.Tree.TraverseTree(fn) +} diff --git a/lib/conode/peer_test.go b/lib/conode/peer_test.go new file mode 100644 index 0000000000..cda314373d --- /dev/null +++ b/lib/conode/peer_test.go @@ -0,0 +1,157 @@ +package conode_test + +import ( + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/graphs" + "github.com/dedis/cothority/lib/sign" + "strconv" + "testing" + "time" +) + +func TestStampListener(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 4) + peer1, peer2 := createPeers() + + round1 := conode.NewRoundStamperListener(peer1.Node) + round2, err := sign.NewRoundFromType(conode.RoundStamperListenerType, peer1.Node) + + if err != nil { + dbg.Fatal("Error when creating round:", err) + } + + dbg.Lvlf2("Round1: %+v", round1) + dbg.Lvlf2("Round2: %+v", round2) + name1, name2 := round1.Name, round2.(*conode.RoundStamperListener).Name + if name1 != name2 { + t.Fatal("Hostname of first round is", name1, "and should be equal to", name2) + } + peer1.Close() + peer2.Close() +} + +// Can we build the Peer without a valid key? +func TestEmptyKeys(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 4) + conf1 := readConfig() + emptyKeys(conf1.Tree) + peer1 := createPeer(conf1, 1) + dbg.Lvlf3("Peer 1 is %+v", peer1) + + conf2 := readConfig() + emptyKeys(conf2.Tree) + peer2 := createPeer(conf2, 1) + dbg.Lvlf3("Peer 1 is %+v", peer2) + + go peer1.LoopRounds(sign.RoundCosiType, 2) + go peer2.LoopRounds(sign.RoundCosiType, 2) + + time.Sleep(time.Second * 2) + + peer1.Close() + peer2.Close() +} + +// Make sure closeall sends messages to everybody +func TestCloseAll(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 4) + peer1, peer2 := createPeers() + + // Launch peers in endless loop + go peer1.LoopRounds(sign.RoundCosiType, -1) + go peer2.LoopRounds(sign.RoundCosiType, -1) + + // Send CloseAll manually + peer1.SendCloseAll() + time.Sleep(time.Second) + if !peer1.Closed { + t.Fatal("Peer 1 should be closed now.") + } + if !peer2.Closed { + t.Fatal("Peer 2 should be closed now.") + } + + // Now let's just wait for two rounds + peer1, peer2 = createPeers() + go peer1.LoopRounds(sign.RoundCosiType, 2) + go peer2.LoopRounds(sign.RoundCosiType, 2) + time.Sleep(time.Second * 4) + if !peer1.Closed { + t.Fatal("Peer 1 should be closed now.") + } + if !peer2.Closed { + t.Fatal("Peer 2 should be closed now.") + } +} + +// What happens if client closes before server does? +func TestClientBeforeServer(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 4) + peer1, peer2 := createPeers() + + peer2.Close() + time.Sleep(time.Second) + + round, err := sign.NewRoundFromType(sign.RoundCosiType, peer1.Node) + if err != nil { + t.Fatal("Error while creating round:", err) + } + + peer1.StartAnnouncement(round) + time.Sleep(time.Second) + + peer1.Close() +} + +func emptyKeys(t *graphs.Tree) { + t.PriKey = "" + t.PubKey = "" + for _, c := range t.Children { + emptyKeys(c) + } +} + +func createPeers() (p1, p2 *conode.Peer) { + conf1 := readConfig() + peer1 := createPeer(conf1, 1) + dbg.Lvlf3("Peer 1 is %+v", peer1) + + // conf will hold part of the configuration for each server, + // so we have to create a second one for the second server + conf2 := readConfig() + peer2 := createPeer(conf2, 2) + dbg.Lvlf3("Peer 2 is %+v", peer2) + + return peer1, peer2 +} + +func createPeer(conf *app.ConfigConode, id int) *conode.Peer { + // Read the private / public keys + binded address + keybase := "testdata/key" + strconv.Itoa(id) + address := "" + if sec, err := cliutils.ReadPrivKey(suite, keybase+".priv"); err != nil { + dbg.Fatal("Error reading private key file :", err) + } else { + conf.Secret = sec + } + if pub, addr, err := cliutils.ReadPubKey(suite, keybase+".pub"); err != nil { + dbg.Fatal("Error reading public key file :", err) + } else { + conf.Public = pub + address = addr + } + return conode.NewPeer(address, conf) +} + +func readConfig() *app.ConfigConode { + conf := &app.ConfigConode{} + if err := app.ReadTomlConfig(conf, "testdata/config.toml"); err != nil { + dbg.Fatal("Could not read toml config... : ", err) + } + dbg.Lvl2("Configuration file read") + suite = app.GetSuite(conf.Suite) + return conf +} diff --git a/lib/conode/round_test.go b/lib/conode/round_test.go new file mode 100644 index 0000000000..95fa2df66a --- /dev/null +++ b/lib/conode/round_test.go @@ -0,0 +1,137 @@ +package conode_test + +import ( + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/sign" + "testing" + "time" +) + +// Tests if the rounds are deleted when done +func TestDeleteRounds(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 4) + peer1, peer2 := createPeers() + + if len(peer1.Rounds) != 0 { + t.Fatal("There should be 0 rounds to start with") + } + + round, err := sign.NewRoundFromType(conode.RoundStamperListenerType, peer1.Node) + if err != nil { + t.Fatal("Couldn't create cosi-round") + } + + peer1.StartAnnouncement(round) + if len(peer1.Rounds) != 1 { + t.Fatal("Created one round - should be there") + } + + time.Sleep(time.Second) + + if len(peer1.Rounds) != 0 { + t.Fatal("Doing one round shouldn't take more than 1 second") + } + + peer1.Close() + peer2.Close() +} + +func TestRoundException(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 4) + peer1, peer2 := createPeers() + sign.ExceptionForceFailure = peer2.Name() + + round, err := sign.NewRoundFromType(sign.RoundExceptionType, peer1.Node) + if err != nil { + t.Fatal("Couldn't create Exception round:", err) + } + + peer1.StartAnnouncement(round) + time.Sleep(time.Second) + + cosi := round.(*sign.RoundException).Cosi + if cosi.R_hat == nil { + t.Fatal("Didn't finish round - R_hat empty") + } + err = cosi.VerifyResponses() + if err != nil { + t.Fatal("Couldn't verify responses") + } + peer1.Close() + peer2.Close() +} + +func TestRoundCosi(t *testing.T) { + testRound(t, sign.RoundCosiType) +} + +func TestRoundStamper(t *testing.T) { + testRound(t, conode.RoundStamperType) +} + +func TestRoundCosiStamper(t *testing.T) { + testRound(t, conode.RoundStamperListenerType) +} + +func TestRoundSetup(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 4) + roundType := "setup" + dbg.Lvl2("Testing", roundType) + peer1, peer2 := createPeers() + + round, err := sign.NewRoundFromType(roundType, peer1.Node) + if err != nil { + t.Fatal("Couldn't create", roundType, "round:", err) + } + + peer1.StartAnnouncement(round) + time.Sleep(time.Second) + + counted := <-round.(*sign.RoundSetup).Counted + if counted != 2 { + t.Fatal("Counted", counted, "nodes, but should be 2") + } + + peer1.Close() + peer2.Close() +} + +// For testing the different round-types +// Every round-type is in his own Test*-method, +// so one can easily run just a given round-test +func testRound(t *testing.T, roundType string) { + dbg.TestOutput(testing.Verbose(), 4) + dbg.Lvl2("Testing", roundType) + peer1, peer2 := createPeers() + + round, err := sign.NewRoundFromType(roundType, peer1.Node) + if err != nil { + t.Fatal("Couldn't create", roundType, "round:", err) + } + + peer1.StartAnnouncement(round) + time.Sleep(time.Second) + + var cosi *sign.CosiStruct + switch roundType { + case sign.RoundCosiType: + cosi = round.(*sign.RoundCosi).Cosi + case sign.RoundExceptionType: + cosi = round.(*sign.RoundException).Cosi + case conode.RoundStamperType: + cosi = round.(*conode.RoundStamper).Cosi + case conode.RoundStamperListenerType: + cosi = round.(*conode.RoundStamperListener).Cosi + } + if cosi.R_hat == nil { + t.Fatal("Didn't finish round - R_hat empty") + } + err = cosi.VerifyResponses() + if err != nil { + t.Fatal("Couldn't verify responses") + } + + peer1.Close() + peer2.Close() +} diff --git a/lib/conode/roundstamper.go b/lib/conode/roundstamper.go new file mode 100644 index 0000000000..6996ae8440 --- /dev/null +++ b/lib/conode/roundstamper.go @@ -0,0 +1,138 @@ +package conode + +import ( + "bytes" + "encoding/binary" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/hashid" + "github.com/dedis/cothority/lib/proof" + "github.com/dedis/cothority/lib/sign" + "strconv" + "time" +) + +/* +Implements a merkle-tree hasher for incoming messages that +are passed to roundcosi. +*/ + +const RoundStamperType = "stamper" + +type RoundStamper struct { + *sign.RoundCosi + Timestamp int64 + + Proof []hashid.HashId // the inclusion-proof of the data + MTRoot hashid.HashId // mt root for subtree, passed upwards + StampLeaves []hashid.HashId + StampRoot hashid.HashId + StampProofs []proof.Proof + StampQueue [][]byte + CombProofs []proof.Proof +} + +func init() { + sign.RegisterRoundFactory(RoundStamperType, + func(s *sign.Node) sign.Round { + return NewRoundStamper(s) + }) +} + +func NewRoundStamper(node *sign.Node) *RoundStamper { + dbg.Lvlf3("Making new stamperlistener %+v", node) + round := &RoundStamper{} + round.RoundCosi = sign.NewRoundCosi(node) + round.Type = RoundStamperType + return round +} + +func (round *RoundStamper) Announcement(viewNbr, roundNbr int, in *sign.SigningMessage, out []*sign.SigningMessage) error { + dbg.Lvl3("New roundstamper announcement in round-nbr", roundNbr) + if round.IsRoot { + // We are root ! + // Adding timestamp + ts := time.Now().UTC() + var b bytes.Buffer + round.Timestamp = ts.Unix() + binary.Write(&b, binary.LittleEndian, ts.Unix()) + in.Am.Message = b.Bytes() + } else { + // otherwise decode it + var t int64 + if err := binary.Read(bytes.NewBuffer(in.Am.Message), binary.LittleEndian, &t); err != nil { + dbg.Lvl1("Unmashaling timestamp has failed") + } + dbg.Lvl3("Received timestamp:", t) + round.Timestamp = t + } + round.RoundCosi.Announcement(viewNbr, roundNbr, in, out) + return nil +} + +func (round *RoundStamper) Commitment(in []*sign.SigningMessage, out *sign.SigningMessage) error { + // compute the local Merkle root + + // give up if nothing to process + if len(round.StampQueue) == 0 { + round.StampRoot = make([]byte, hashid.Size) + round.StampProofs = make([]proof.Proof, 1) + } else { + // pull out to be Merkle Tree leaves + round.StampLeaves = make([]hashid.HashId, 0) + for _, msg := range round.StampQueue { + round.StampLeaves = append(round.StampLeaves, hashid.HashId(msg)) + } + + // create Merkle tree for this round's messages and check corectness + round.StampRoot, round.StampProofs = proof.ProofTree(round.Suite.Hash, round.StampLeaves) + if dbg.DebugVisible > 2 { + if proof.CheckLocalProofs(round.Suite.Hash, round.StampRoot, round.StampLeaves, round.StampProofs) == true { + dbg.Lvl4("Local Proofs of", round.Name, "successful for round "+ + strconv.Itoa(round.RoundNbr)) + } else { + panic("Local Proofs" + round.Name + " unsuccessful for round " + + strconv.Itoa(round.RoundNbr)) + } + } + } + out.Com.MTRoot = round.StampRoot + round.RoundCosi.Commitment(in, out) + return nil +} + +func (round *RoundStamper) QueueSet(queue [][]byte) { + round.StampQueue = make([][]byte, len(queue)) + copy(round.StampQueue, queue) +} + +// Challenge is already defined in RoundCosi + +// Response is already defined in RoundCosi + +func (round *RoundStamper) SignatureBroadcast(in *sign.SigningMessage, out []*sign.SigningMessage) error { + round.RoundCosi.SignatureBroadcast(in, out) + round.Proof = round.RoundCosi.Cosi.Proof + round.MTRoot = round.RoundCosi.Cosi.MTRoot + + round.CombProofs = make([]proof.Proof, len(round.StampQueue)) + // Send back signature to clients + for i, msg := range round.StampQueue { + // proof to get from s.Root to big root + combProof := make(proof.Proof, len(round.Proof)) + copy(combProof, round.Proof) + + // add my proof to get from a leaf message to my root s.Root + combProof = append(combProof, round.StampProofs[i]...) + + // proof that I can get from a leaf message to the big root + if proof.CheckProof(round.Suite.Hash, round.MTRoot, + round.StampLeaves[i], combProof) { + dbg.Lvl2("Proof is OK for msg", msg) + } else { + dbg.Lvl2("Inclusion-proof failed") + } + + round.CombProofs[i] = combProof + } + return nil +} diff --git a/lib/conode/roundstamperlistener.go b/lib/conode/roundstamperlistener.go new file mode 100644 index 0000000000..1bc4afe7b4 --- /dev/null +++ b/lib/conode/roundstamperlistener.go @@ -0,0 +1,121 @@ +package conode + +import ( + "github.com/dedis/cothority/lib/coconet" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/sign" +) + +/* +Implements a Stamper and a Cosi-round +*/ + +const RoundStamperListenerType = "stamperlistener" + +type RoundStamperListener struct { + *StampListener + *RoundStamper + ClientQueue []ReplyMessage + roundMessages int +} + +type ReplyMessage struct { + Val []byte + To string + ReqNo byte +} + +func init() { + sign.RegisterRoundFactory(RoundStamperListenerType, + func(node *sign.Node) sign.Round { + return NewRoundStamperListener(node) + }) +} + +func NewRoundStamperListener(node *sign.Node) *RoundStamperListener { + dbg.Lvlf3("Making new roundStamperListener %+v", node) + round := &RoundStamperListener{} + round.StampListener = NewStampListener(node.Name()) + round.RoundStamper = NewRoundStamper(node) + round.Type = RoundStamperListenerType + return round +} + +// Announcement is already defined in RoundStamper + +func (round *RoundStamperListener) Commitment(in []*sign.SigningMessage, out *sign.SigningMessage) error { + round.Mux.Lock() + // messages read will now be processed + round.Queue[READING], round.Queue[PROCESSING] = round.Queue[PROCESSING], round.Queue[READING] + round.Queue[READING] = round.Queue[READING][:0] + msgs := len(round.Queue[PROCESSING]) + out.Com.Messages = msgs + for _, m := range in { + out.Com.Messages += m.Com.Messages + } + if round.IsRoot { + round.roundMessages = out.Com.Messages + round.Node.Messages += out.Com.Messages + } + + round.ClientQueue = make([]ReplyMessage, msgs) + queue := make([][]byte, len(round.Queue[PROCESSING])) + for i, q := range round.Queue[PROCESSING] { + queue[i] = q.Tsm.Sreq.Val + round.ClientQueue[i] = ReplyMessage{ + Val: q.Tsm.Sreq.Val, + To: q.To, + ReqNo: byte(q.Tsm.ReqNo), + } + } + // get data from s once to avoid refetching from structure + round.RoundStamper.QueueSet(queue) + round.Mux.Unlock() + + round.RoundStamper.Commitment(in, out) + return nil +} + +// Challenge is already defined in RoundStamper + +// Response is already defined in RoundStamper + +func (round *RoundStamperListener) SignatureBroadcast(in *sign.SigningMessage, out []*sign.SigningMessage) error { + round.RoundStamper.SignatureBroadcast(in, out) + if round.IsRoot { + in.SBm.Messages = round.roundMessages + } + for _, o := range out { + o.SBm.Messages = in.SBm.Messages + } + for i, msg := range round.ClientQueue { + respMessg := &TimeStampMessage{ + Type: StampSignatureType, + ReqNo: SeqNo(msg.ReqNo), + Srep: &StampSignature{ + SuiteStr: round.Suite.String(), + Timestamp: round.Timestamp, + MerkleRoot: round.MTRoot, + Prf: round.RoundStamper.CombProofs[i], + Response: in.SBm.R0_hat, + Challenge: in.SBm.C, + AggCommit: in.SBm.V0_hat, + AggPublic: in.SBm.X0_hat, + }} + round.PutToClient(msg.To, respMessg) + dbg.Lvl2("Sent signature response back to client", msg.To) + } + return nil +} + +// Send message to client given by name +func (round *RoundStamperListener) PutToClient(name string, data coconet.BinaryMarshaler) { + err := round.Clients[name].PutData(data) + if err == coconet.ErrClosed { + round.Clients[name].Close() + return + } + if err != nil && err != coconet.ErrNotEstablished { + dbg.Lvl1("%p error putting to client: %v", round, err) + } +} diff --git a/lib/conode/stamp.go b/lib/conode/stamp.go new file mode 100644 index 0000000000..76206e6458 --- /dev/null +++ b/lib/conode/stamp.go @@ -0,0 +1,141 @@ +package conode + +import ( + "bytes" + "encoding/base64" + "fmt" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/coconet" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/crypto/abstract" + "math/rand" + "strconv" + "strings" +) + +/* + * This is a simple interface to get a string stamped by + * a cothority. It can be used as standalone or in an + * application that needs collective signing from an existing + * cothority. + */ + +type Stamp struct { + Config app.ConfigConode + X0 abstract.Point + Suite abstract.Suite + conn *coconet.TCPConn +} + +// NewStamp initializes a new stamp-client by reading all +// configuration from a "config.toml"-file. +// If an error occurs, it is returned by the second argument. +// It also initializes X0 and Suite for later use. +func NewStamp(file string) (*Stamp, error) { + s := &Stamp{} + err := app.ReadTomlConfig(&s.Config, file) + if err != nil { + return nil, err + } + s.Suite = app.GetSuite(s.Config.Suite) + pub, _ := base64.StdEncoding.DecodeString(s.Config.AggPubKey) + s.Suite.Read(bytes.NewReader(pub), &s.X0) + return s, nil +} + +// GetStamp contacts the "server" and waits for the "msg" to +// be signed +// If server is empty, it will contact one randomly +func (s *Stamp) GetStamp(msg []byte, server string) (*TimeStampMessage, error) { + if server == "" { + server = s.Config.Hosts[rand.Intn(len(s.Config.Hosts))] + } + dbg.Lvl2("StampClient will stamp on server", server) + portstr := strconv.Itoa(cliutils.GetPort(server, DefaultPort) + 1) + err := s.connect(cliutils.GetAddress(server) + ":" + portstr) + if err != nil { + return nil, err + } + + tsm, err := s.stamp(msg) + if err != nil { + return nil, err + } + + err = s.disconnect() + if err != nil { + return nil, err + } + + // Verify if what we received is correct + if !VerifySignature(s.Suite, tsm.Srep, s.X0, msg) { + return nil, fmt.Errorf("Verification of signature failed") + } + + return tsm, nil +} + +// Used to connect to server +func (s *Stamp) connect(server string) error { + // First get a connection. Get a random one if no server provided + if server == "" { + serverPort := strings.Split(s.Config.Hosts[rand.Intn(len(s.Config.Hosts))], ":") + server = serverPort[0] + port, _ := strconv.Atoi(serverPort[1]) + server += ":" + strconv.Itoa(port+1) + } + if !strings.Contains(server, ":") { + server += ":2000" + } + dbg.Lvl2("Connecting to", server) + s.conn = coconet.NewTCPConn(server) + err := s.conn.Connect() + if err != nil { + return fmt.Errorf("Couldn't get connection to host: %s", err) + } + + dbg.Lvl3("Connected to", server) + return nil +} + +// This stamps the message, but the connection already needs +// to be set up +func (s *Stamp) stamp(msg []byte) (*TimeStampMessage, error) { + tsmsg := &TimeStampMessage{ + Type: StampRequestType, + ReqNo: 0, + Sreq: &StampRequest{Val: msg}} + + err := s.conn.PutData(tsmsg) + if err != nil { + return nil, fmt.Errorf("Couldn't send hash-message to server: %s", err) + } + dbg.Lvl3("Sent signature request") + + // Wait for the signed message + tsm := &TimeStampMessage{} + tsm.Srep = &StampSignature{} + tsm.Srep.SuiteStr = s.Suite.String() + err = s.conn.GetData(tsm) + if err != nil { + return nil, fmt.Errorf("Error while receiving signature: %s", err) + } + dbg.Lvl3("Got signature response") + return tsm, nil +} + +// Asking to close the connection +func (s *Stamp) disconnect() error { + err := s.conn.PutData(&TimeStampMessage{ + ReqNo: 1, + Type: StampClose, + }) + if err != nil { + return err + } + + s.conn.Close() + dbg.Lvl3("Connection closed with server") + return nil +} diff --git a/lib/conode/stamp_test.go b/lib/conode/stamp_test.go new file mode 100644 index 0000000000..7bc00d9e0e --- /dev/null +++ b/lib/conode/stamp_test.go @@ -0,0 +1,43 @@ +package conode_test + +import ( + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" + "strconv" + "testing" + "time" +) + +// Runs two conodes and tests if the value returned is OK +func TestStamp(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 4) + peer1, peer2 := createPeers() + go peer1.LoopRounds(conode.RoundStamperListenerType, 4) + go peer2.LoopRounds(conode.RoundStamperListenerType, 4) + time.Sleep(2 * time.Second) + + s, err := conode.NewStamp("testdata/config.toml") + if err != nil { + t.Fatal("Couldn't open config-file:", err) + } + + for _, port := range []int{7000, 7010} { + stamper := "localhost:" + strconv.Itoa(port) + dbg.Lvl2("Contacting stamper", stamper) + tsm, err := s.GetStamp([]byte("test"), stamper) + dbg.Lvl3("Evaluating results of", stamper) + if err != nil { + t.Fatal("Couldn't get stamp from server:", err) + } + + if !tsm.Srep.AggPublic.Equal(s.X0) { + t.Fatal("Not correct aggregate public key") + } + } + + dbg.Lvl2("Closing peer1") + peer1.Close() + dbg.Lvl2("Closing peer2") + peer2.Close() + dbg.Lvl3("Done with test") +} diff --git a/lib/conode/stamplistener.go b/lib/conode/stamplistener.go new file mode 100644 index 0000000000..87084d7972 --- /dev/null +++ b/lib/conode/stamplistener.go @@ -0,0 +1,158 @@ +package conode + +import ( + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/coconet" + "github.com/dedis/cothority/lib/dbg" + "net" + "os" + "strconv" + "sync" +) + +const ( + READING = iota + PROCESSING +) + +/* +The counterpart to stamp.go - it listens for incoming requests +and passes those to the roundstamper. +*/ + +func init() { + SLList = make(map[string]*StampListener) +} + +var SLList map[string]*StampListener + +type StampListener struct { + // for aggregating messages from clients + Mux sync.Mutex + Queue [][]MustReplyMessage + // All clients connected to that listener + Clients map[string]coconet.Conn + // The name of the listener + NameL string + // The channel for closing the connection + waitClose chan string + // The port we're listening on + Port net.Listener +} + +// Creates a new stamp listener one port above the +// address given in nameP +func NewStampListener(nameP string) *StampListener { + // listen for client requests at one port higher + // than the signing node + var nameL string + h, p, err := net.SplitHostPort(nameP) + if err == nil { + i, err := strconv.Atoi(p) + if err != nil { + dbg.Fatal(err) + } + nameL = net.JoinHostPort(h, strconv.Itoa(i+1)) + } else { + dbg.Fatal("Couldn't split host into name and port:", err) + } + sl, ok := SLList[nameL] + if !ok { + sl = &StampListener{} + dbg.Lvl3("Creating new StampListener for", nameL) + sl.Queue = make([][]MustReplyMessage, 2) + sl.Queue[READING] = make([]MustReplyMessage, 0) + sl.Queue[PROCESSING] = make([]MustReplyMessage, 0) + sl.Clients = make(map[string]coconet.Conn) + sl.waitClose = make(chan string) + sl.NameL = nameL + + SLList[sl.NameL] = sl + sl.ListenRequests() + } else { + dbg.Lvl3("Taking cached StampListener") + } + return sl +} + +// listen for clients connections +func (s *StampListener) ListenRequests() error { + dbg.Lvl3("Setup StampListener on", s.NameL) + global, _ := cliutils.GlobalBind(s.NameL) + var err error + s.Port, err = net.Listen("tcp4", global) + if err != nil { + panic(err) + } + + go func() { + for { + dbg.Lvlf2("Listening to sign-requests: %p", s) + conn, err := s.Port.Accept() + if err != nil { + // handle error + dbg.Lvl3("failed to accept connection") + select { + case w := <-s.waitClose: + dbg.Lvl3("Closing stamplistener:", w) + return + default: + continue + } + } + + dbg.Lvl3("Waiting for connection") + c := coconet.NewTCPConnFromNet(conn) + + if _, ok := s.Clients[c.Name()]; !ok { + s.Clients[c.Name()] = c + + go func(co coconet.Conn) { + for { + tsm := TimeStampMessage{} + err := co.GetData(&tsm) + dbg.Lvlf2("Got data to sign %+v - %+v", tsm, tsm.Sreq) + if err != nil { + dbg.Lvlf1("%p Failed to get from child: %s", s.NameL, err) + co.Close() + return + } + switch tsm.Type { + default: + dbg.Lvlf1("Message of unknown type: %v\n", tsm.Type) + case StampRequestType: + s.Mux.Lock() + s.Queue[READING] = append(s.Queue[READING], + MustReplyMessage{Tsm: tsm, To: co.Name()}) + s.Mux.Unlock() + case StampClose: + dbg.Lvl2("Closing connection") + co.Close() + return + case StampExit: + dbg.Lvl2("Exiting server upon request") + os.Exit(-1) + } + } + }(c) + } + } + }() + + return nil +} + +// Close shuts down the connection +func (s *StampListener) Close() { + close(s.waitClose) + s.Port.Close() + delete(SLList, s.NameL) + dbg.Lvl3(s.NameL, "Closing stamplistener done - SLList is", SLList) +} + +// StampListenersClose closes all open stamplisteners +func StampListenersClose() { + for _, s := range SLList { + s.Close() + } +} diff --git a/lib/conode/stampmessg.go b/lib/conode/stampmessg.go new file mode 100644 index 0000000000..3bc1007927 --- /dev/null +++ b/lib/conode/stampmessg.go @@ -0,0 +1,240 @@ +package conode + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/proof" + "github.com/dedis/crypto/abstract" + "strings" +) + +/* +All messages for stamper-related actions +*/ + +// struct to ease keeping track of who requires a reply after +// tsm is processed/ aggregated by the TSServer +type MustReplyMessage struct { + Tsm TimeStampMessage + To string // name of reply destination +} + +// Default port for the conode-setup - the stamping-request port +// is at ```DefaultPort + 1``` +var DefaultPort int = 2000 + +type MessageType int + +type SeqNo byte + +const ( + Error MessageType = iota + StampRequestType + StampSignatureType + StampClose + StampExit +) + +type StampRequest struct { + Val []byte // Hash-size value to timestamp +} + +// NOTE: In order to decode correctly the Proof, we need to the get the suite +// somehow. We could just simply add it as a field and not (un)marhsal it +// We'd just make sure that the suite is setup before unmarshaling. +type StampSignature struct { + SuiteStr string + Timestamp int64 // The timestamp requested for the file + MerkleRoot []byte // root of the merkle tree + Prf proof.Proof // Merkle proof for the value sent to be stamped + Response abstract.Secret // Aggregate response + Challenge abstract.Secret // Aggregate challenge + AggCommit abstract.Point // Aggregate commitment key + AggPublic abstract.Point // Aggregate public key (use for easy troubleshooting) +} + +func (Sreq StampRequest) MarshalBinary() ([]byte, error) { + dbg.Fatal("Don't want to do MarshalBinary on StampRequest") + return nil, nil +} +func (Sreq *StampRequest) UnmarshalBinary(data []byte) error { + dbg.Fatal("Don't want to do UnamrshalBinary on StampRequest") + return nil +} + +func (sr *StampSignature) MarshalJSON() ([]byte, error) { + type Alias StampSignature + var b bytes.Buffer + suite := app.GetSuite(sr.SuiteStr) + if err := suite.Write(&b, sr.Response, sr.Challenge, sr.AggCommit, sr.AggPublic); err != nil { + dbg.Lvl1("encoding stampreply response/challenge/AggCommit:", err) + return nil, err + } + + return json.Marshal(&struct { + BinaryBlob []byte + *Alias + }{ + BinaryBlob: b.Bytes(), + Alias: (*Alias)(sr), + }) +} + +func (sr *StampSignature) UnmarshalJSON(dataJSON []byte) error { + type Alias StampSignature + suite := app.GetSuite(sr.SuiteStr) + aux := &struct { + BinaryBlob []byte + Response abstract.Secret + Challenge abstract.Secret + AggCommit abstract.Point + AggPublic abstract.Point + *Alias + }{ + Response: suite.Secret(), + Challenge: suite.Secret(), + AggCommit: suite.Point(), + AggPublic: suite.Point(), + Alias: (*Alias)(sr), + } + if err := json.Unmarshal(dataJSON, &aux); err != nil { + return err + } + if err := suite.Read(bytes.NewReader(aux.BinaryBlob), &sr.Response, + &sr.Challenge, &sr.AggCommit, &sr.AggPublic); err != nil { + dbg.Fatal("decoding signature Response / Challenge / AggCommit:", err) + return err + } + return nil +} + +// sigFile represnets a signature to be written to a file or to be read in a +// human readble format (TOML + base64 encoding) +type sigFile struct { + SuiteStr string + Name string + Timestamp int64 + Proof []string + MerkleRoot string + Challenge string + Response string + AggCommitment string + AggPublic string +} + +// Write will write the struct in a human readable format into this writer +// The format is TOML and most fields are written in base64 +func (sr *StampSignature) Save(file string) error { + var p []string + for _, pr := range sr.Prf { + p = append(p, base64.StdEncoding.EncodeToString(pr)) + } + suite := app.GetSuite(sr.SuiteStr) + // Write challenge and response + commitment part + var bufChall bytes.Buffer + var bufResp bytes.Buffer + var bufCommit bytes.Buffer + var bufPublic bytes.Buffer + if err := cliutils.WriteSecret64(suite, &bufChall, sr.Challenge); err != nil { + return fmt.Errorf("Could not write secret challenge:", err) + } + if err := cliutils.WriteSecret64(suite, &bufResp, sr.Response); err != nil { + return fmt.Errorf("Could not write secret response:", err) + } + if err := cliutils.WritePub64(suite, &bufCommit, sr.AggCommit); err != nil { + return fmt.Errorf("Could not write aggregated commitment:", err) + } + if err := cliutils.WritePub64(suite, &bufPublic, sr.AggPublic); err != nil { + return fmt.Errorf("Could not write aggregated public key:", err) + } + // Signature file struct containing everything needed + sigStr := &sigFile{ + Name: file, + SuiteStr: suite.String(), + Timestamp: sr.Timestamp, + Proof: p, + MerkleRoot: base64.StdEncoding.EncodeToString(sr.MerkleRoot), + Challenge: bufChall.String(), + Response: bufResp.String(), + AggCommitment: bufCommit.String(), + AggPublic: bufPublic.String(), + } + + // Print to the screen, and write to file + dbg.Lvl2("Signature-file will be:\n%+v", sigStr) + + app.WriteTomlConfig(sigStr, file) + return nil +} + +func (sr *StampSignature) Open(file string) error { + // Read in the toml-file + sigStr := &sigFile{} + err := app.ReadTomlConfig(sigStr, file) + if err != nil { + return err + } + suite := app.GetSuite(sigStr.SuiteStr) + + sr.Timestamp = sigStr.Timestamp + for _, pr := range sigStr.Proof { + pro, err := base64.StdEncoding.DecodeString(pr) + if err != nil { + dbg.Lvl1("Couldn't decode proof:", pr) + return err + } + sr.Prf = append(sr.Prf, pro) + } + // Read the root, the challenge and response + sr.MerkleRoot, err = base64.StdEncoding.DecodeString(sigStr.MerkleRoot) + if err != nil { + fmt.Errorf("Could not decode Merkle Root from sig file:", err) + } + sr.Response, err = cliutils.ReadSecret64(suite, strings.NewReader(sigStr.Response)) + if err != nil { + fmt.Errorf("Could not read secret challenge:", err) + } + if sr.Challenge, err = cliutils.ReadSecret64(suite, strings.NewReader(sigStr.Challenge)); err != nil { + fmt.Errorf("Could not read the aggregate commitment:", err) + } + if sr.AggCommit, err = cliutils.ReadPub64(suite, strings.NewReader(sigStr.AggCommitment)); err != nil { + return err + } + if sr.AggPublic, err = cliutils.ReadPub64(suite, strings.NewReader(sigStr.AggPublic)); err != nil { + return err + } + + return nil +} + +func (Sreq StampSignature) MarshalBinary() ([]byte, error) { + dbg.Fatal("Don't want to do MarshalBinary on StampReply") + return nil, nil +} +func (Sreq *StampSignature) UnmarshalBinary(data []byte) error { + dbg.Fatal("Don't want to do UnarmsahlBinary on StampReply") + return nil +} + +type TimeStampMessage struct { + ReqNo SeqNo // Request sequence number + // ErrorReply *ErrorReply // Generic error reply to any request + Type MessageType + Sreq *StampRequest + Srep *StampSignature +} + +func (tsm TimeStampMessage) MarshalBinary() ([]byte, error) { + dbg.Fatal("Don't want to do that") + return nil, nil +} + +func (sm *TimeStampMessage) UnmarshalBinary(data []byte) error { + dbg.Fatal("Don't want to do that") + return nil +} diff --git a/lib/conode/testdata/config.toml b/lib/conode/testdata/config.toml new file mode 100644 index 0000000000..0e8257d4a0 --- /dev/null +++ b/lib/conode/testdata/config.toml @@ -0,0 +1,13 @@ +Suite = "Ed25519" +Hosts = ["localhost:7000", "localhost:7010"] +AggPubKey = "dkp32QL4viiR0EiMtnLIVYLgk6PXTcSQlNXFNwpnLiI=" + +[Tree] + Name = "localhost:7000" + PriKey = "" + PubKey = "01yZ7/w/LxWHF6XBvkr2g0w9awCzOn8WrmAVBmXGw8s=" + + [[Tree.Children]] + Name = "localhost:7010" + PriKey = "" + PubKey = "IiEjL/BXXl/5gJP9lQ4/EZZ/T+eJCI+bKO7HX0kgCxw=" diff --git a/lib/conode/testdata/hostlist b/lib/conode/testdata/hostlist new file mode 100644 index 0000000000..c1dfcfb863 --- /dev/null +++ b/lib/conode/testdata/hostlist @@ -0,0 +1,2 @@ +localhost:7000 01yZ7/w/LxWHF6XBvkr2g0w9awCzOn8WrmAVBmXGw8s= +localhost:7010 IiEjL/BXXl/5gJP9lQ4/EZZ/T+eJCI+bKO7HX0kgCxw= diff --git a/lib/conode/testdata/key1.priv b/lib/conode/testdata/key1.priv new file mode 100755 index 0000000000..b6926df41c --- /dev/null +++ b/lib/conode/testdata/key1.priv @@ -0,0 +1 @@ +‹æñ2ÑáKO'×Káûå…ÐèEu¦YÈp¦N^E diff --git a/lib/conode/testdata/key1.pub b/lib/conode/testdata/key1.pub new file mode 100755 index 0000000000..3859174cbf --- /dev/null +++ b/lib/conode/testdata/key1.pub @@ -0,0 +1 @@ +localhost:7000 01yZ7/w/LxWHF6XBvkr2g0w9awCzOn8WrmAVBmXGw8s= diff --git a/lib/conode/testdata/key2.priv b/lib/conode/testdata/key2.priv new file mode 100755 index 0000000000..ca01853793 --- /dev/null +++ b/lib/conode/testdata/key2.priv @@ -0,0 +1 @@ +u·G5~C>˜oxž£·û¥ÂãO“XŒML2 diff --git a/lib/conode/testdata/key2.pub b/lib/conode/testdata/key2.pub new file mode 100755 index 0000000000..d041bc417f --- /dev/null +++ b/lib/conode/testdata/key2.pub @@ -0,0 +1 @@ +localhost:7010 IiEjL/BXXl/5gJP9lQ4/EZZ/T+eJCI+bKO7HX0kgCxw= diff --git a/lib/conode/verification.go b/lib/conode/verification.go new file mode 100644 index 0000000000..169c8181fe --- /dev/null +++ b/lib/conode/verification.go @@ -0,0 +1,105 @@ +package conode + +import ( + "bytes" + "encoding/binary" + "errors" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/hashid" + "github.com/dedis/cothority/lib/proof" + "github.com/dedis/crypto/abstract" +) + +/* +Verification methods used by stamper. +*/ + +// Verifies that the 'message' is included in the signature and that it +// is correct. +// Message is your own hash, and reply contains the inclusion proof + signature +// on the aggregated message +func VerifySignature(suite abstract.Suite, reply *StampSignature, public abstract.Point, message []byte) bool { + // Check if aggregate public key is correct + if !public.Equal(reply.AggPublic) { + dbg.Lvl1("Aggregate-public-key check: FAILED (maybe you have an outdated config file of the tree)") + return false + } + // First check if the challenge is ok + if err := VerifyChallenge(suite, reply); err != nil { + dbg.Lvl1("Challenge-check: FAILED (", err, ")") + return false + } + dbg.Lvl2("Challenge-check: OK") + + // Incorporate the timestamp in the message since the verification process + // is done by reconstructing the challenge + var b bytes.Buffer + if err := binary.Write(&b, binary.LittleEndian, reply.Timestamp); err != nil { + dbg.Lvl1("Error marshaling the timestamp for signature verification") + return false + } + msg := append(b.Bytes(), []byte(reply.MerkleRoot)...) + if err := VerifySchnorr(suite, msg, public, reply.Challenge, reply.Response); err != nil { + dbg.Lvl1("Signature-check: FAILED (", err, ")") + return false + } + dbg.Lvl2("Signature-check: OK") + + // finally check the proof + if !proof.CheckProof(suite.Hash, reply.MerkleRoot, hashid.HashId(message), reply.Prf) { + dbg.Lvl2("Inclusion-check: FAILED") + return false + } + dbg.Lvl2("Inclusion-check: OK") + return true +} + +// verifyChallenge will reconstruct the challenge in order to see if any of the +// components of the challenge has been spoofed or not. It may be a different +// timestamp . +func VerifyChallenge(suite abstract.Suite, reply *StampSignature) error { + dbg.Lvlf3("Reply is %+v", reply) + // marshal the V + pbuf, err := reply.AggCommit.MarshalBinary() + if err != nil { + return err + } + c := suite.Cipher(pbuf) + // concat timestamp and merkle root + var b bytes.Buffer + if err := binary.Write(&b, binary.LittleEndian, reply.Timestamp); err != nil { + return err + } + cbuf := append(b.Bytes(), reply.MerkleRoot...) + c.Message(nil, nil, cbuf) + challenge := suite.Secret().Pick(c) + if challenge.Equal(reply.Challenge) { + return nil + } + return errors.New("Challenge reconstructed is not equal to the one given") +} + +// A simple verification of a Schnorr signature given the message +func VerifySchnorr(suite abstract.Suite, message []byte, publicKey abstract.Point, c, r abstract.Secret) error { + + // Check that: base**r_hat * X_hat**c == V_hat + // Equivalent to base**(r+xc) == base**(v) == T in vanillaElGamal + Aux := suite.Point() + V_clean := suite.Point() + V_clean.Add(V_clean.Mul(nil, r), Aux.Mul(publicKey, c)) + // T is the recreated V_hat + T := suite.Point().Null() + T.Add(T, V_clean) + + // Verify that the hash based on the message and T + // matches the challange c from the signature + // copy of hashSchnorr + bufPoint, _ := T.MarshalBinary() + cipher := suite.Cipher(bufPoint) + cipher.Message(nil, nil, message) + hash := suite.Secret().Pick(cipher) + if !hash.Equal(c) { + return errors.New("invalid signature") + } + return nil +} diff --git a/lib/conode/verification_test.go b/lib/conode/verification_test.go new file mode 100644 index 0000000000..1f2fe7c482 --- /dev/null +++ b/lib/conode/verification_test.go @@ -0,0 +1,136 @@ +package conode_test + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "github.com/dedis/cothority/lib/app" + "github.com/dedis/cothority/lib/conode" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/hashid" + "github.com/dedis/crypto/abstract" + "io" + "testing" +) + +var reply conode.StampSignature +var X0 abstract.Point +var suite abstract.Suite +var hash []byte + +func init() { + dbg.DebugVisible = 1 +} + +// Verifies whether the Challenge is correct +func TestVerifyChallenge(t *testing.T) { + setupTestSig() + + err := conode.VerifyChallenge(suite, &reply) + if err != nil { + t.Error("Verification failed") + } else { + dbg.Lvl2("Verification passed") + } +} + +// Verifies whether the X0 and hash is correct +func TestVerifySignature(t *testing.T) { + setupTestSig() + + if !conode.VerifySignature(suite, &reply, X0, hash) { + t.Error("Verification failed") + } else { + dbg.Lvl2("Verification passed") + } +} + +// Verifies whether the Schnorr signature is correct +func TestVerifySchnorr(t *testing.T) { + setupTestSig() + var b bytes.Buffer + if err := binary.Write(&b, binary.LittleEndian, reply.Timestamp); err != nil { + dbg.Lvl1("Error marshaling the timestamp for signature verification") + } + msg := append(b.Bytes(), []byte(reply.MerkleRoot)...) + err := conode.VerifySchnorr(suite, msg, X0, reply.Challenge, reply.Response) + if err != nil { + dbg.Fatal("Schnorr verification failed") + } else { + dbg.Lvl2("Schnorr OK") + } +} + +// Checks the correct setup of the signature +func TestsetupTestSig(t *testing.T) { + setupTestSig() + if !reply.AggPublic.Equal(X0) { + t.Error("X0 is not equal") + } else { + dbg.Lvl2("X0 is OK") + } +} + +type test_sig struct { + Suite string + AggPubKey string + Name string + Timestamp int + Hash string + Root string + Proof []string + Challenge string + Response string + Commitment string +} + +func setupTestSig() { + var sig = test_sig{ + Suite: "Ed25519", + Name: "stamp.sig", + Timestamp: 1448637057, + Proof: []string{"fN1GPbpXUqLGh20Ls1JmiFncbWcnvai4pt2ufJnUcIo=", "ehvna7oGGqwZsCgLVP1GvEHxCbYl2Bv8fS0EgGEvmB4=", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=", "cFol1fkwjOXyXLNM8Bhu3Bafs1L8GnrWWswE19YDh/E=", "cFol1fkwjOXyXLNM8Bhu3Bafs1L8GnrWWswE19YDh/E="}, + Root: "vu2wvZDMc5ZlCNspMRWKZCI0ShYZ8zyLfy2cuZFf54A=", + Challenge: "ClSG6pa3uklYndfBztJ9yAD8aY2g/GzTq0rs8nQC12w=", + Response: "AJXkEEiyvWdoidjjUMUi5nryHRzJSywXoFY/fIT/8FM=", + Commitment: "LbMi53pBXpHN1IYa7pRaC953KAox+NvzQSzAOsK1CEQ=", + AggPubKey: "dkp32QL4viiR0EiMtnLIVYLgk6PXTcSQlNXFNwpnLiI=", + Hash: "RnZyRnItjXQBSMYLfY/f8WDgiYJI9Yh4lQXa6+VwWxc=", + } + + suite = app.GetSuite(sig.Suite) + suite.Read(get64R(sig.AggPubKey), &X0) + + reply.SuiteStr = sig.Suite + reply.Timestamp = int64(sig.Timestamp) + reply.MerkleRoot = get64(sig.Root) + var proof []hashid.HashId + for _, p := range sig.Proof { + proof = append(proof, get64(p)) + } + reply.Prf = proof + + suite.Read(get64R(sig.Challenge), &reply.Challenge) + suite.Read(get64R(sig.Response), &reply.Response) + suite.Read(get64R(sig.Commitment), &reply.AggCommit) + suite.Read(get64R(sig.AggPubKey), &reply.AggPublic) + + hash = get64(sig.Hash) + + dbg.Lvl3("Challenge", reply.Challenge) + dbg.Lvl3("Response", reply.Response) + dbg.Lvl3("Commitment", reply.AggCommit) + dbg.Lvl3("AggPubKey", reply.AggPublic) +} + +func get64R(str string) io.Reader { + return bytes.NewReader(get64(str)) +} + +func get64(str string) []byte { + ret, err := base64.StdEncoding.DecodeString(str) + if err != nil { + dbg.Fatal("Couldn't decode", str) + } + return ret +} diff --git a/lib/dbg/debug_lvl.go b/lib/dbg/debug_lvl.go new file mode 100644 index 0000000000..98f8c2abfc --- /dev/null +++ b/lib/dbg/debug_lvl.go @@ -0,0 +1,184 @@ +package dbg + +import ( + "flag" + "fmt" + "os" + "regexp" + "runtime" +) + +// These are information-debugging levels that can be turned on or off. +// Every logging greater than 'DebugVisible' will be discarded. So you can +// Log at different levels and easily turn on or off the amount of logging +// generated by adjusting the 'DebugVisible' variable. +var DebugVisible = 1 + +// The padding of functions to make a nice debug-output - this is automatically updated +// whenever there are longer functions and kept at that new maximum. If you prefer +// to have a fixed output and don't remember oversized names, put a negative value +// in here +var NamePadding = 40 + +// Padding of line-numbers for a nice debug-output - used in the same way as +// NamePadding +var LinePadding = 3 + +// Testing output has to be on fmt, it doesn't take into account log-outputs +// So for testing, set Testing = true, and instead of sending to log, it will +// output to fmt +var Testing = false + +// If this variable is set, it will be outputted between the position and the message +var StaticMsg = "" + +var regexpPaths, _ = regexp.Compile(".*/") + +func init() { + flag.IntVar(&DebugVisible, "debug", DebugVisible, "How much debug you from 1 (discrete) - 5 (very noisy). Default 1") +} + +// Needs two functions to keep the caller-depth the same and find who calls us +// Lvlf1 -> Lvlf -> Lvl +// or +// Lvl1 -> Lvld -> Lvl +func Lvld(lvl int, args ...interface{}) { + Lvl(lvl, args...) +} +func Lvl(lvl int, args ...interface{}) { + if lvl > DebugVisible { + return + } + pc, _, line, _ := runtime.Caller(3) + name := regexpPaths.ReplaceAllString(runtime.FuncForPC(pc).Name(), "") + lineStr := fmt.Sprintf("%d", line) + + // For the testing-framework, we check the resulting string. So as not to + // have the tests fail every time somebody moves the functions, we put + // the line-# to 0 + if Testing { + line = 0 + } + + if len(name) > NamePadding && NamePadding > 0 { + NamePadding = len(name) + } + if len(lineStr) > LinePadding && LinePadding > 0 { + LinePadding = len(name) + } + fmtstr := fmt.Sprintf("%%%ds: %%%dd", NamePadding, LinePadding) + caller := fmt.Sprintf(fmtstr, name, line) + if StaticMsg != "" { + caller += "@" + StaticMsg + } + message := fmt.Sprintln(args...) + fmt.Printf("%d: (%s) - %s", lvl, caller, message) +} + +func Lvlf(lvl int, f string, args ...interface{}) { + Lvl(lvl, fmt.Sprintf(f, args...)) +} + +func Print(args ...interface{}) { + Lvld(-1, args...) +} + +func Printf(f string, args ...interface{}) { + Lvlf(-1, f, args...) +} + +func Lvl1(args ...interface{}) { + Lvld(1, args...) +} + +func Lvl2(args ...interface{}) { + Lvld(2, args...) +} + +func Lvl3(args ...interface{}) { + Lvld(3, args...) +} + +func Lvl4(args ...interface{}) { + Lvld(4, args...) +} + +func Lvl5(args ...interface{}) { + Lvld(5, args...) +} + +func Error(args ...interface{}) { + Lvld(0, args...) +} + +func Fatal(args ...interface{}) { + Lvld(0, args...) + os.Exit(1) +} + +func Panic(args ...interface{}) { + Lvld(0, args...) + panic(args) +} + +func Lvlf1(f string, args ...interface{}) { + Lvlf(1, f, args...) +} + +func Lvlf2(f string, args ...interface{}) { + Lvlf(2, f, args...) +} + +func Lvlf3(f string, args ...interface{}) { + Lvlf(3, f, args...) +} + +func Lvlf4(f string, args ...interface{}) { + Lvlf(4, f, args...) +} + +func Lvlf5(f string, args ...interface{}) { + Lvlf(5, f, args...) +} + +func Fatalf(f string, args ...interface{}) { + Lvlf(0, f, args...) + os.Exit(1) +} + +func Errorf(f string, args ...interface{}) { + Lvlf(0, f, args...) + os.Exit(1) +} + +func Panicf(f string, args ...interface{}) { + Lvlf(0, f, args...) + panic(args) +} + +// TestOutput sets the DebugVisible to 0 if 'show' +// is false, else it will set DebugVisible to 'level' +// +// Usage: TestOutput( test.Verbose(), 2 ) +func TestOutput(show bool, level int) { + if show { + DebugVisible = level + } else { + DebugVisible = 0 + } +} + +// To easy print a debug-message anyway without discarding the level +// Just add an additional "L" in front, and remove it later: +// - easy hack to turn on other debug-messages +// - easy removable by searching/replacing 'LLvl' with 'Lvl' +func LLvl1(args ...interface{}) { Lvld(-1, args...) } +func LLvl2(args ...interface{}) { Lvld(-1, args...) } +func LLvl3(args ...interface{}) { Lvld(-1, args...) } +func LLvl4(args ...interface{}) { Lvld(-1, args...) } +func LLvl5(args ...interface{}) { Lvld(-1, args...) } +func LLvlf1(f string, args ...interface{}) { Lvlf(-1, f, args...) } +func LLvlf2(f string, args ...interface{}) { Lvlf(-1, f, args...) } +func LLvlf3(f string, args ...interface{}) { Lvlf(-1, f, args...) } +func LLvlf4(f string, args ...interface{}) { Lvlf(-1, f, args...) } +func LLvlf5(f string, args ...interface{}) { Lvlf(-1, f, args...) } diff --git a/lib/dbg/debug_lvl_test.go b/lib/dbg/debug_lvl_test.go new file mode 100644 index 0000000000..69e3b04cf5 --- /dev/null +++ b/lib/dbg/debug_lvl_test.go @@ -0,0 +1,69 @@ +package dbg_test + +import ( + "github.com/dedis/cothority/lib/dbg" +) + +func init() { + dbg.Testing = true +} + +func ExampleLevel2() { + dbg.DebugVisible = 2 + dbg.Lvl1("Level1") + dbg.Lvl2("Level2") + dbg.Lvl3("Level3") + dbg.Lvl4("Level4") + dbg.Lvl5("Level5") + + // Output: + // 1: ( dbg_test.ExampleLevel2: 0) - Level1 + // 2: ( dbg_test.ExampleLevel2: 0) - Level2 +} + +func ExampleMultiParams() { + dbg.Lvl1("Multiple", "parameters") + + // Output: + // 1: ( dbg_test.ExampleMultiParams: 0) - Multiple parameters +} + +func ExampleLLvl() { + dbg.Lvl1("Lvl output") + dbg.LLvl1("LLvl output") + dbg.Lvlf1("Lvlf output") + dbg.LLvlf1("LLvlf output") + + // Output: + // 1: ( dbg_test.ExampleLLvl: 0) - Lvl output + // -1: ( dbg_test.ExampleLLvl: 0) - LLvl output + // 1: ( dbg_test.ExampleLLvl: 0) - Lvlf output + // -1: ( dbg_test.ExampleLLvl: 0) - LLvlf output +} + +func thisIsAVeryLongFunctionNameThatWillOverflow() { + dbg.Lvl1("Overflow") +} + +func ExampleLongFunctions() { + dbg.Lvl1("Before") + thisIsAVeryLongFunctionNameThatWillOverflow() + dbg.Lvl1("After") + + // Output: + // 1: ( dbg_test.ExampleLongFunctions: 0) - Before + // 1: (dbg_test.thisIsAVeryLongFunctionNameThatWillOverflow: 0) - Overflow + // 1: ( dbg_test.ExampleLongFunctions: 0) - After +} + +func ExampleLongFunctionsLimit() { + dbg.NamePadding = -1 + dbg.Lvl1("Before") + thisIsAVeryLongFunctionNameThatWillOverflow() + dbg.Lvl1("After") + + // Output: + // 1: (dbg_test.ExampleLongFunctionsLimit: 0) - Before + // 1: (dbg_test.thisIsAVeryLongFunctionNameThatWillOverflow: 0) - Overflow + // 1: (dbg_test.ExampleLongFunctionsLimit: 0) - After +} diff --git a/lib/debug_lvl/debug_lvl.go b/lib/debug_lvl/debug_lvl.go deleted file mode 100644 index ff6b8e0b88..0000000000 --- a/lib/debug_lvl/debug_lvl.go +++ /dev/null @@ -1,130 +0,0 @@ -package debug_lvl -import ( - "fmt" - "bytes" - "github.com/Sirupsen/logrus" - "os" - "runtime" - "regexp" -) - -// These are information-debugging levels that can be turned on or off. -// Every logging greater than 'DebugVisible' will be discarded. So you can -// Log at different levels and easily turn on or off the amount of logging -// generated by adjusting the 'DebugVisible' variable. -var DebugVisible = 1 - -// The padding of functions to make a nice debug-output - this is automatically updated -// whenever there are longer functions and kept at that new maximum. If you prefer -// to have a fixed output and don't remember oversized names, put a negative value -// in here -var NamePadding = 40 - -// Padding of line-numbers for a nice debug-output - used in the same way as -// NamePadding -var LinePadding = 3 - -// Testing output has to be on fmt, it doesn't take into account log-outputs -// So for testing, set Testing = true, and instead of sending to log, it will -// output to fmt -var Testing = false - -// If this variable is set, it will be outputted between the position and the message -var StaticMsg = "" - -// Holds the logrus-structure to do our logging -var DebugLog = &logrus.Logger{ - Out: os.Stdout, - Formatter: &DebugLvl{}, - Hooks: make(logrus.LevelHooks), - Level: logrus.InfoLevel} - -var regexpPaths, _ = regexp.Compile(".*/") - -func init(){ -} - -func Lvl(lvl int, args ...interface{}) { - pc, _, line, _ := runtime.Caller(2) - name := regexpPaths.ReplaceAllString(runtime.FuncForPC(pc).Name(), "") - lineStr := fmt.Sprintf("%d", line) - - // For the testing-framework, we check the resulting string. So as not to - // have the tests fail every time somebody moves the functions, we put - // the line-# to 0 - if Testing { - line = 0 - } - - if len(name) > NamePadding && NamePadding > 0{ - NamePadding = len(name) - } - if len(lineStr) > LinePadding && LinePadding > 0{ - LinePadding = len(name) - } - fmtstr := fmt.Sprintf("%%%ds: %%%dd", NamePadding, LinePadding) - caller := fmt.Sprintf(fmtstr, name, line) - if StaticMsg != ""{ - caller += "@" + StaticMsg - } - DebugLog.WithFields(logrus.Fields{ - "debug_lvl": lvl, - "caller": caller}).Println(args...) -} - -func Lvl1(args ...interface{}) { - Lvl(1, args...) -} - -func Lvl2(args ...interface{}) { - Lvl(2, args...) -} - -func Lvl3(args ...interface{}) { - Lvl(3, args...) -} - -func Lvl4(args ...interface{}) { - Lvl(4, args...) -} - -func Lvl5(args ...interface{}) { - Lvl(5, args...) -} - -func Fatal(args ...interface{}){ - Lvl(0, args...) - os.Exit(1) -} - -// To easy print a debug-message anyway without discarding the level -func LLvl2(args ...interface{}){Lvl(1, args...)} -func LLvl3(args ...interface{}){Lvl(1, args...)} -func LLvl4(args ...interface{}){Lvl(1, args...)} -func LLvl5(args ...interface{}){Lvl(1, args...)} - -type DebugLvl struct { -} - -func (f *DebugLvl) Format(entry *logrus.Entry) ([]byte, error) { - lvl := entry.Data["debug_lvl"].(int) - caller := entry.Data["caller"].(string) - if lvl <= DebugVisible { - b := &bytes.Buffer{} - b.WriteString(fmt.Sprintf("%d: (%s) - %s", lvl, caller, entry.Message)) - b.WriteByte('\n') - - if Testing { - fmt.Print(b) - return nil, nil - } else { - return b.Bytes(), nil - } - } else { - if len(entry.Message) > 2048 && DebugVisible > 1{ - fmt.Printf("%d: (%s) - HUGE message of %d bytes not printed\n", lvl, caller, len(entry.Message)) - } - return nil, nil - } -} - diff --git a/lib/debug_lvl/debug_lvl_test.go b/lib/debug_lvl/debug_lvl_test.go deleted file mode 100644 index 546b9a5920..0000000000 --- a/lib/debug_lvl/debug_lvl_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package debug_lvl_test - -import ( - dbg "github.com/dedis/cothority/lib/debug_lvl" -) - -func init(){ - dbg.Testing = true -} - -func ExampleLevel2() { - dbg.Lvl1("Level1") - dbg.Lvl3("Level2") - dbg.Lvl4("Level3") - dbg.Lvl4("Level4") - dbg.Lvl5("Level5") - - // Output: - // 1: ( debug_lvl_test.ExampleLevel2: 0) - Level1 - // 2: ( debug_lvl_test.ExampleLevel2: 0) - Level2 -} - -func thisIsAVeryLongFunctionNameThatWillOverflow(){ - dbg.Lvl1("Overflow") -} - -func ExampleLongFunctions() { - dbg.Lvl1("Before") - thisIsAVeryLongFunctionNameThatWillOverflow() - dbg.Lvl1("After") - - // Output: - // 1: ( debug_lvl_test.ExampleLongFunctions: 0) - Before - // 1: (debug_lvl_test.thisIsAVeryLongFunctionNameThatWillOverflow: 0) - Overflow - // 1: ( debug_lvl_test.ExampleLongFunctions: 0) - After -} - -func ExampleLongFunctionsLimit() { - dbg.NamePadding = -1 - dbg.Lvl1("Before") - thisIsAVeryLongFunctionNameThatWillOverflow() - dbg.Lvl1("After") - - // Output: - // 1: (debug_lvl_test.ExampleLongFunctionsLimit: 0) - Before - // 1: (debug_lvl_test.thisIsAVeryLongFunctionNameThatWillOverflow: 0) - Overflow - // 1: (debug_lvl_test.ExampleLongFunctionsLimit: 0) - After -} diff --git a/lib/graphs/graph.go b/lib/graphs/graph.go index 6e7932b8aa..9b92f189ea 100644 --- a/lib/graphs/graph.go +++ b/lib/graphs/graph.go @@ -4,8 +4,7 @@ import ( "bytes" "container/list" "errors" - "fmt" - "log" + "github.com/dedis/cothority/lib/dbg" "net" "strconv" ) @@ -34,10 +33,10 @@ func NewGraph(names []string) *Graph { // takes in a byte array representing an edge list and loads the graph func (g *Graph) LoadEdgeList(edgelist []byte) { - log.Println(g.Names) + dbg.Lvl3(g.Names) fields := bytes.Fields(edgelist) // create name map from string to index - log.Println(g.Names) + dbg.Lvl3(g.Names) names := make(map[string]int) for i, n := range g.Names { names[n] = i @@ -49,18 +48,18 @@ func (g *Graph) LoadEdgeList(edgelist []byte) { to := string(fields[i+1]) weight, err := strconv.ParseFloat(string(fields[i+2]), 64) if err != nil { - log.Println(err) + dbg.Lvl3(err) continue } fi, ok := names[from] if !ok { - log.Println("from not ok:", from) + dbg.Lvl3("from not ok:", from) continue } ti, ok := names[to] if !ok { - log.Println("to not ok:", to) + dbg.Lvl3("to not ok:", to) continue } @@ -78,11 +77,11 @@ func (g *Graph) MST() *Tree { // pi: parent index, bf: branching factor, visited: set of visited nodes, ti: tree index, tnodes: space for tree nodes // returns the last used index for tree nodes func (g *Graph) constructTree(ri int, bf int, visited []bool, tnodes []Tree) { - log.Println("constructing tree: ", ri, bf) - log.Println(g.Names) + dbg.Lvl3("constructing tree:", ri, bf) + dbg.Lvl3(g.Names) root := &tnodes[ri] root.Name = g.Names[ri] - log.Println(root) + dbg.Lvl3(root) visited[ri] = true tni := 1 indmap := make([]int, len(visited)) @@ -104,12 +103,12 @@ func (g *Graph) constructTree(ri int, bf int, visited []bool, tnodes []Tree) { queue.Remove(e) // parent index pi := e.Value.(int) - fmt.Println("next: ", pi) + dbg.Lvl3("next:", pi) parent := &tnodes[indmap[pi]] fs := sortFloats(g.Weights[pi]) nc := bf - fmt.Println(fs) + dbg.Lvl3(fs) // iterate through children and select the bf closest ones for _, ci := range fs.I { if nc == 0 { @@ -119,7 +118,7 @@ func (g *Graph) constructTree(ri int, bf int, visited []bool, tnodes []Tree) { // if this child hasn't been visited // it is the closest unvisited child if !visited[ci] { - fmt.Println("adding child:", ci, tni) + dbg.Lvl3("adding child:", ci, tni) queue.PushFront(ci) cn := &tnodes[tni] indmap[ci] = tni @@ -152,9 +151,9 @@ func (g *Graph) Tree(nlevels int) *Tree { bf += 1 } // log.Panicf("n: %d, nlevels: %d, branching factor: %d\n", n, nlevels, bf) - fmt.Println("Tree:", n, nlevels, bf) + dbg.Lvl3("Tree:", n, nlevels, bf) g.constructTree(ri, bf, make([]bool, n), tnodes) - log.Println("tnodes:", tnodes) + dbg.Lvl3("tnodes:", tnodes) return root } @@ -282,7 +281,7 @@ func ColorTree(nodeNames []string, hostAddr []string, hostsPerNode int, bf int, node, _, _ := net.SplitHostPort(newHost) nodesTouched = append(nodesTouched, node) } - // fmt.Println(i, hostsCreated) + // dbg.Lvl3(i, hostsCreated) } if TRIM == true { @@ -389,16 +388,16 @@ func TrimLastIncompleteLevel(root *Tree, hosts []string, depths []int, bf int) ( d := Depth(newRoot) if len(badHosts) != 0 && d != treed-1 { - fmt.Println(d, "!=", treed-1) + dbg.Lvl3(d, "!=", treed-1) panic("TrimTree return wrong result") } else { if len(badHosts) == 0 && d != treed { - fmt.Println(d, "!=", treed) + dbg.Lvl3(d, "!=", treed) panic("TrimTree return wrong result") } } - // log.Println(" Trimmed", n-sj, "nodes") + // dbg.Lvl3(" Trimmed", n-sj, "nodes") return newRoot, hosts[:sj] } diff --git a/lib/graphs/graph_test.go b/lib/graphs/graph_test.go index 3191da92f4..549f3f9002 100644 --- a/lib/graphs/graph_test.go +++ b/lib/graphs/graph_test.go @@ -1,8 +1,6 @@ package graphs import ( - "fmt" - "log" "strconv" "strings" "testing" @@ -11,7 +9,7 @@ import ( func TestTree(t *testing.T) { g := &Graph{Names: []string{"planetlab2.cs.unc.edu", "pl1.6test.edu.cn", "planetlab1.cs.du.edu", "planetlab02.cs.washington.edu", "planetlab-2.cse.ohio-state.edu", "planetlab2.cs.ubc.ca"}, mem: []float64{0, 213.949, 51.86, 76.716, 2754.531, 81.301, 214.143, 0, 169.744, 171.515, 557.526, 189.186, 51.601, 170.191, 0, 41.418, 2444.206, 31.475, 76.731, 171.43, 41.394, 0, 2470.722, 5.741, 349.881, 520.028, 374.362, 407.282, 0, 392.211, 81.381, 189.386, 31.582, 5.78, 141.273, 0}, Weights: [][]float64{[]float64{0, 213.949, 51.86, 76.716, 2754.531, 81.301}, []float64{214.143, 0, 169.744, 171.515, 557.526, 189.186}, []float64{51.601, 170.191, 0, 41.418, 2444.206, 31.475}, []float64{76.731, 171.43, 41.394, 0, 2470.722, 5.741}, []float64{349.881, 520.028, 374.362, 407.282, 0, 392.211}, []float64{81.381, 189.386, 31.582, 5.78, 141.273, 0}}} tree := g.Tree(2) - log.Println(tree) + t.Log(tree) } func TestTreeFromList(t *testing.T) { @@ -30,12 +28,12 @@ func TestTreeFromList(t *testing.T) { // if err != nil { // t.Error(err) // } - // fmt.Println(string(b)) + // t.Log(string(b)) // if len(usedHosts) != len(nodeNames)*hostsPerNode { // t.Error("Should have been able to use all hosts") // } - fmt.Println("used hosts", usedHosts) + t.Log("used hosts", usedHosts) root.TraverseTree(PrintTreeNode) // Output: @@ -75,7 +73,7 @@ func TestTreeFromList2(t *testing.T) { if len(usedHosts) != 6 { t.Error("Should have been able to use only 6 hosts") } - fmt.Println("used hosts", usedHosts) + t.Log("used hosts", usedHosts) root.TraverseTree(PrintTreeNode) } @@ -108,7 +106,7 @@ func TestTreeFromListColoring(t *testing.T) { if !checkColoring(root) { t.Fatal("failed to properly color:", nodes, hpn, bf) } - t.Log("able to use:", len(hosts), " of: ", hpn*len(nodes)) + t.Log("able to use:", len(hosts), "of:", hpn*len(nodes)) depth := Depth(root) if depth != retDepth { diff --git a/lib/config/config.go b/lib/graphs/node.go similarity index 66% rename from lib/config/config.go rename to lib/graphs/node.go index 46f0aecca2..930b012ac4 100644 --- a/lib/config/config.go +++ b/lib/graphs/node.go @@ -1,7 +1,6 @@ -package config +package graphs -import -( +import ( "bytes" "crypto/cipher" "encoding/hex" @@ -13,17 +12,11 @@ import "regexp" "time" - log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/cothority/proto/sign" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/sign" "github.com/dedis/cothority/lib/coconet" - "github.com/dedis/cothority/lib/graphs" "github.com/dedis/crypto/abstract" - "github.com/dedis/crypto/edwards" - "github.com/dedis/crypto/edwards/ed25519" - "github.com/dedis/crypto/nist" - "io/ioutil" "sort" "strconv" "strings" @@ -53,13 +46,6 @@ ex.json type JSONPoint json.RawMessage -type Node struct { - Name string `json:"name"` - PriKey string `json:"prikey,omitempty"` - PubKey string `json:"pubkey,omitempty"` - Children []*Node `json:"children,omitempty"` -} - // HostConfig stores all of the relevant information of the configuration file. type HostConfig struct { SNodes []*sign.Node // an array of signing nodes @@ -67,37 +53,6 @@ type HostConfig struct { Dir *coconet.GoDirectory // the directory mapping hostnames to goPeers } -func ConfigFromTree(t *graphs.Tree, hosts []string) *ConfigFile { - cf := &ConfigFile{} - cf.Hosts = make([]string, len(hosts)) - copy(cf.Hosts, hosts) - cf.Tree = t - return cf -} - -func (hc *HostConfig) Verify() error { - // root := hc.SNodes[0] - // traverseTree(root, hc, publicKeyCheck) - fmt.Println("tree verified") - return nil -} - -func traverseTree(p *sign.Node, - hc *HostConfig, - f func(*sign.Node, *HostConfig) error) error { - if err := f(p, hc); err != nil { - return err - } - for _, cn := range p.Children(0) { - c := hc.Hosts[cn.Name()] - err := traverseTree(c, hc, f) - if err != nil { - return err - } - } - return nil -} - func (hc *HostConfig) String() string { b := bytes.NewBuffer([]byte{}) @@ -125,8 +80,8 @@ func (hc *HostConfig) String() string { bformatted := bytes.NewBuffer([]byte{}) err := json.Indent(bformatted, b.Bytes(), "", "\t") if err != nil { - fmt.Println(string(b.Bytes())) - fmt.Println("ERROR: ", err) + dbg.Lvl3(string(b.Bytes())) + dbg.Lvl3("ERROR:", err) } return string(bformatted.Bytes()) @@ -185,7 +140,7 @@ func max(a, b int) int { // config file. ConstructTree must be called AFTER populating the HostConfig with // ALL the possible hosts. func ConstructTree( - n *Node, + node *Tree, hc *HostConfig, parent string, suite abstract.Suite, @@ -196,9 +151,9 @@ func ConstructTree( // passes up its X_hat, and/or an error // get the name associated with this address - name, ok := nameToAddr[n.Name] + name, ok := nameToAddr[node.Name] if !ok { - fmt.Println("unknown name in address book:", n.Name) + dbg.Lvl3("unknown name in address book:", node.Name) return 0, errors.New("unknown name in address book") } @@ -211,7 +166,7 @@ func ConstructTree( // it can be backed by a nil pointer h, ok := hosts[name] if !ok { - fmt.Println("unknown host in tree:", name) + dbg.Lvl3("unknown host in tree:", name) return 0, errors.New("unknown host in tree") } @@ -220,29 +175,31 @@ func ConstructTree( var sn *sign.Node // if the JSON holds the fields field is set load from there - if len(n.PubKey) != 0 { + if len(node.PubKey) != 0 { // dbg.Lvl4("decoding point") - encoded, err := hex.DecodeString(string(n.PubKey)) + encoded, err := hex.DecodeString(string(node.PubKey)) if err != nil { - log.Error("failed to decode hex from encoded") + dbg.Error("failed to decode hex from encoded") return 0, err } pubkey = suite.Point() err = pubkey.UnmarshalBinary(encoded) if err != nil { - log.Error("failed to decode point from hex") + dbg.Error("failed to decode point from hex") return 0, err } + } + if len(node.PriKey) != 0 { // dbg.Lvl4("decoding point") - encoded, err = hex.DecodeString(string(n.PriKey)) + encoded, err := hex.DecodeString(string(node.PriKey)) if err != nil { - log.Error("failed to decode hex from encoded") + dbg.Error("failed to decode hex from encoded") return 0, err } prikey = suite.Secret() err = prikey.UnmarshalBinary(encoded) if err != nil { - log.Error("failed to decode point from hex") + dbg.Error("failed to decode point from hex") return 0, err } } @@ -268,7 +225,7 @@ func ConstructTree( pubkey = sn.PubKey } // dbg.Lvl4("pubkey:", sn.PubKey) - // dbg.Lvl4("given: ", pubkey) + // dbg.Lvl4("given:", pubkey) } // if the parent of this call is empty then this must be the root node if parent != "" && generate { @@ -276,15 +233,15 @@ func ConstructTree( h.AddParent(0, parent) } - // dbg.Lvl4("name: ", n.Name) - // dbg.Lvl4("prikey: ", prikey) - // dbg.Lvl4("pubkey: ", pubkey) + // dbg.Lvl4("name:", n.Name) + // dbg.Lvl4("prikey:", prikey) + // dbg.Lvl4("pubkey:", pubkey) height := 0 - for _, c := range n.Children { + for _, c := range node.Children { // connect this node to its children cname, ok := nameToAddr[c.Name] if !ok { - fmt.Println("unknown name in address book:", n.Name) + dbg.Lvl3("unknown name in address book:", node.Name) return 0, errors.New("unknown name in address book") } @@ -307,9 +264,9 @@ func ConstructTree( sn.Height = height } - // dbg.Lvl4("name: ", n.Name) - // dbg.Lvl4("final x_hat: ", x_hat) - // dbg.Lvl4("final pubkey: ", pubkey) + // dbg.Lvl4("name:", n.Name) + // dbg.Lvl4("final x_hat:", x_hat) + // dbg.Lvl4("final pubkey:", pubkey) return height, nil } @@ -320,7 +277,7 @@ var ipv4host = "NONE" func GetAddress() (string, error) { name, err := os.Hostname() if err != nil { - log.Error("Error Resolving Hostname:", err) + dbg.Error("Error Resolving Hostname:", err) return "", err } @@ -364,67 +321,44 @@ type ConfigOptions struct { } // run the given hostnames -func (hc *HostConfig) Run(stamper bool, signType sign.Type, hostnameSlice ...string) error { - dbg.Lvl3(hc.Hosts, "going to connect everything for", hostnameSlice) - hostnames := make(map[string]*sign.Node) - if hostnameSlice == nil { - hostnames = hc.Hosts - } else { - for _, h := range hostnameSlice { - sn, ok := hc.Hosts[h] - if !ok { - return errors.New("hostname given not in config file:" + h) - } - hostnames[h] = sn +func (hc *HostConfig) Run(stamper bool, signType sign.Type, hostname string) error { + dbg.Lvl3(hc.Hosts, "going to connect everything for", hostname) + node := hc.Hosts[hostname] + + node.Type = signType + dbg.Lvl3("Listening on", node.Host) + node.Host.Listen() + + var err error + // exponential backoff for attempting to connect to parent + startTime := time.Duration(200) + maxTime := time.Duration(2000) + for i := 0; i < 2000; i++ { + dbg.Lvl3(hostname, "attempting to connect to parent") + // the host should connect with the parent + err = node.Connect(0) + if err == nil { + // log.Infoln("hostconfig: connected to parent:") + break } - } - // set all hosts to be listening - open the port and connect to the channel - for _, sn := range hostnames { - sn.Type = signType - dbg.Lvl3("Listening on", sn.Host) - sn.Host.Listen() - } - - for h, sn := range hostnames { - var err error - // exponential backoff for attempting to connect to parent - startTime := time.Duration(200) - maxTime := time.Duration(2000) - for i := 0; i < 2000; i++ { - dbg.Lvl3("Attempting to connect to parent", h) - // the host should connect with the parent - err = sn.Connect(0) - if err == nil { - // log.Infoln("hostconfig: connected to parent:") - break - } - - time.Sleep(startTime * time.Millisecond) - startTime *= 2 - if startTime > maxTime { - startTime = maxTime - } - } - if err != nil { - log.Fatal(fmt.Sprintf("%s failed to connect to parent"), h) - //return errors.New("failed to connect") - } else { - dbg.Lvl3(fmt.Sprintf("Successfully connected to parent %s", h)) + time.Sleep(startTime * time.Millisecond) + startTime *= 2 + if startTime > maxTime { + startTime = maxTime } } - - // need to make sure network connections are setup properly first - // wait for a little bit for connections to establish fully - // get rid of waits they hide true bugs - // time.Sleep(1000 * time.Millisecond) + if err != nil { + dbg.Fatal(hostname, "failed to connect to parent") + //return errors.New("failed to connect") + } else { + dbg.Lvl3(hostname, "successfully connected to parent") + } if !stamper { // This will call the dispatcher in collectiveSigning for every request - dbg.Lvl4("Starting to listen for", hostnames) - for _, sn := range hostnames { - go sn.Listen() - } + dbg.Lvl4("Starting to listen for incoming stamp-requests on", hostname) + node.Listen() } return nil @@ -435,33 +369,15 @@ func (hc *HostConfig) Run(stamper bool, signType sign.Type, hostnameSlice ...str // complete hostnames to be used by the hosts. // LoadConfig loads a configuration file in the format specified above. It // populates a HostConfig with HostNode Hosts and goPeer Peers. -func LoadConfig(fname string, opts ...ConfigOptions) (*HostConfig, error) { - file, err := ioutil.ReadFile(fname) - if err != nil { - return nil, err - } - return LoadJSON(file, opts...) -} - -func LoadJSON(file []byte, optsSlice ...ConfigOptions) (*HostConfig, error) { +func LoadConfig(appHosts []string, appTree *Tree, suite abstract.Suite, optsSlice ...ConfigOptions) (*HostConfig, error) { opts := ConfigOptions{} if len(optsSlice) > 0 { opts = optsSlice[0] } hc := NewHostConfig() - var cf ConfigFileOld - err := json.Unmarshal(file, &cf) - if err != nil { - return hc, err - } - // TODO remove this duplicate check of tcp conn connT := GoC - if cf.Conn == "tcp" { - connT = TcpC - } - // options override file if opts.ConnType == "tcp" { connT = TcpC @@ -472,7 +388,7 @@ func LoadJSON(file []byte, optsSlice ...ConfigOptions) (*HostConfig, error) { nameToAddr := make(map[string]string) if connT == GoC { - for _, h := range cf.Hosts { + for _, h := range appHosts { if _, ok := hc.Hosts[h]; !ok { nameToAddr[h] = h // it doesn't make sense to only make 1 go host @@ -489,22 +405,23 @@ func LoadJSON(file []byte, optsSlice ...ConfigOptions) (*HostConfig, error) { localAddr := "" if opts.GenHosts { + var err error localAddr, err = GetAddress() if err != nil { return nil, err } } - for i, h := range cf.Hosts { + for i, h := range appHosts { addr := h if opts.GenHosts { p := strconv.Itoa(StartConfigPort) addr = localAddr + ":" + p - //dbg.Lvl4("created new host address: ", addr) + //dbg.Lvl4("created new host address:", addr) StartConfigPort += 10 } else if opts.Port != "" { - dbg.Lvl4("attempting to rewrite port: ", opts.Port) + dbg.Lvl4("attempting to rewrite port:", opts.Port) // if the port has been specified change the port hostport := strings.Split(addr, ":") dbg.Lvl4(hostport) @@ -534,21 +451,17 @@ func LoadJSON(file []byte, optsSlice ...ConfigOptions) (*HostConfig, error) { } } - - suite := edwards.NewAES128SHA256Ed25519(true) + //suite := edwards.NewAES128SHA256Ed25519(false) //suite := nist.NewAES128SHA256P256() - if opts.Suite != nil { - suite = opts.Suite - } rand := suite.Cipher([]byte("example")) - //fmt.Println("hosts", hosts) + //dbg.Lvl3("hosts", hosts) // default value = false - if err != nil { - log.Fatal(err) - } start := time.Now() if opts.NoTree == false { - _, err = ConstructTree(cf.Tree, hc, "", suite, rand, hosts, nameToAddr, opts) + _, err := ConstructTree(appTree, hc, "", suite, rand, hosts, nameToAddr, opts) + if err != nil { + dbg.Fatal("Couldn't construct tree:", err) + } } dbg.Lvl3("Timing for ConstructTree", time.Since(start)) if connT != GoC { @@ -561,7 +474,6 @@ func LoadJSON(file []byte, optsSlice ...ConfigOptions) (*HostConfig, error) { hostList = append(hostList, h) } - for _, sn := range hc.SNodes { sn.HostList = make([]string, len(hostList)) sortable := sort.StringSlice(hostList) @@ -572,21 +484,5 @@ func LoadJSON(file []byte, optsSlice ...ConfigOptions) (*HostConfig, error) { sn.SetHostList(0, sn.HostList) } - return hc, err -} - -// Helper functions that will return the suite used during the process from a string name -func GetSuite(suite string) abstract.Suite { - var s abstract.Suite - switch { - case suite == "nist256": - s = nist.NewAES128SHA256P256() - case suite == "nist512": - s = nist.NewAES128SHA256QR512() - case suite == "ed25519": - s = ed25519.NewAES128SHA256Ed25519(true) - default: - s = nist.NewAES128SHA256P256() - } - return s + return hc, nil } diff --git a/lib/graphs/tree.go b/lib/graphs/tree.go index 8a8887c537..df04559dd8 100644 --- a/lib/graphs/tree.go +++ b/lib/graphs/tree.go @@ -2,8 +2,9 @@ package graphs import ( "encoding/hex" - "fmt" + "strings" + "github.com/dedis/cothority/lib/dbg" "github.com/dedis/crypto/abstract" ) @@ -18,6 +19,19 @@ type Tree struct { Children []*Tree `json:"children,omitempty"` } +func (t *Tree) FindByName(name string, depth int) (*Tree, int) { + if t.Name == name { + return t, depth + } + for _, c := range t.Children { + ct, d := c.FindByName(name, depth+1) + if ct != nil { + return ct, d + } + } + return nil, depth +} + func (t *Tree) TraverseTree(f func(*Tree)) { f(t) for _, c := range t.Children { @@ -25,6 +39,37 @@ func (t *Tree) TraverseTree(f func(*Tree)) { } } +// Simply organizes a list of nodes into a tree with a branching factor = bf +// bfs style +func CreateLocalTree(nodeNames []string, bf int) *Tree { + if bf < 1 { + panic("Branching Factor < 1 in CreateLocalTree:/") + } + var root *Tree = new(Tree) + root.Name = nodeNames[0] + var index int = 1 + bfs := make([]*Tree, 1) + bfs[0] = root + for len(bfs) > 0 && index < len(nodeNames) { + t := bfs[0] + t.Children = make([]*Tree, 0) + lbf := 0 + // create space for enough children + // init them + for lbf < bf && index < len(nodeNames) { + child := new(Tree) + child.Name = nodeNames[index] + // append the children to the list of trees to visit + bfs = append(bfs, child) + t.Children = append(t.Children, child) + index += 1 + lbf += 1 + } + bfs = bfs[1:] + } + return root +} + // generate keys for the tree func (t *Tree) GenKeys(suite abstract.Suite, rand abstract.Cipher) { t.TraverseTree(func(t *Tree) { @@ -37,11 +82,18 @@ func (t *Tree) GenKeys(suite abstract.Suite, rand abstract.Cipher) { }) } +func (t *Tree) Visit(fn func(*Tree)) { + fn(t) + for _, c := range t.Children { + c.Visit(fn) + } +} + func PrintTreeNode(t *Tree) { - fmt.Println(t.Name) + dbg.Lvl3(t.Name) for _, c := range t.Children { - fmt.Println("\t", c.Name) + dbg.Lvl3("\t", c.Name) } } @@ -55,3 +107,11 @@ func Depth(t *Tree) int { } return md + 1 } + +func (t *Tree) String(depth int) string { + str := strings.Repeat("\t", depth) + t.Name + "\n" + for _, c := range t.Children { + str += c.String(depth + 1) + } + return str + "\n" +} diff --git a/lib/graphs/tree_test.go b/lib/graphs/tree_test.go new file mode 100644 index 0000000000..a9ecc5bf50 --- /dev/null +++ b/lib/graphs/tree_test.go @@ -0,0 +1,13 @@ +package graphs + +import ( + "testing" +) + +func TestLocalTree(test *testing.T) { + nodes := []string{"1st", "2nd", "3rd", "4th", "5th", "6th", "7th", "8th"} + test.Logf("Traversing tree for nodes %v \n", nodes) + t := CreateLocalTree(nodes, 3) + test.Logf(t.String(0)) + test.Logf("Depth = %d\n", Depth(t)) +} diff --git a/lib/logutils/logutils.go b/lib/logutils/logutils.go index 2b7d3696b8..09e4cbaaf2 100644 --- a/lib/logutils/logutils.go +++ b/lib/logutils/logutils.go @@ -32,7 +32,7 @@ func File() string { short := file for i := len(file) - 1; i > 0; i-- { if file[i] == '/' { - short = file[i + 1:] + short = file[i+1:] break } } @@ -42,7 +42,7 @@ func File() string { func (lh *LoggerHook) Connect() { hostport := lh.HostPort - retry: +retry: addr := "ws://" + hostport + "/_log" ws, err := websocket.Dial(addr, "", "http://localhost/") if err != nil { @@ -60,7 +60,7 @@ func NewLoggerHook(hostport, host, app string) (*LoggerHook, error) { log.SetFormatter(&JSONFormatter{host, app}) if hostport != "" { - retry: + retry: addr := "ws://" + hostport + "/_log" ws, err := websocket.Dial(addr, "", "http://localhost/") if err != nil { @@ -75,7 +75,7 @@ func NewLoggerHook(hostport, host, app string) (*LoggerHook, error) { func NewLoggerHookSimple(host, app string) { lh, err := NewLoggerHook("", host, app) - if err != nil{ + if err != nil { log.AddHook(lh) } } @@ -116,7 +116,7 @@ type JSONFormatter struct { } func (f *JSONFormatter) Format(entry *log.Entry) ([]byte, error) { - data := make(log.Fields, len(entry.Data) + 5) + data := make(log.Fields, len(entry.Data)+5) for k, v := range entry.Data { data[k] = v } diff --git a/lib/monitor/measure.go b/lib/monitor/measure.go new file mode 100644 index 0000000000..d48cead51f --- /dev/null +++ b/lib/monitor/measure.go @@ -0,0 +1,194 @@ +/* + * Time-measurement functions. + * + * Usage: + * ```measure := monitor.NewMeasure()``` + * ```// Do some calculations``` + * ```measure.MeasureWall("CPU on calculations")``` + */ + +package monitor + +import ( + "encoding/json" + "fmt" + "github.com/dedis/cothority/lib/dbg" + "net" + "syscall" + "time" +) + +// Sink is the server address where all measures are transmitted to for +// further analysis. +var sink string + +// Structs are encoded through a json encoder. +var encoder *json.Encoder +var connection net.Conn + +// Keeps track if a measure is enabled (true) or not (false). If disabled, +// measures are not sent to the monitor. Use EnableMeasure(bool) to toggle +// this variable. +var enabled = true + +// Enables / Disables a measure. +func EnableMeasure(b bool) { + if b { + dbg.Lvl3("Monitor: Measure enabled") + } else { + dbg.Lvl3("Monitor: Measure disabled") + } + enabled = b +} + +// ConnectSink connects to the given endpoint and initialises a json +// encoder. It can be the address of a proxy or a monitoring process. +// Returns an error if it could not connect to the endpoint. +func ConnectSink(addr string) error { + if encoder != nil { + return nil + } + dbg.Lvl3("Connecting to:", addr) + conn, err := net.Dial("tcp", addr) + if err != nil { + return err + } + dbg.Lvl3("Connected to sink:", addr) + sink = addr + connection = conn + encoder = json.NewEncoder(conn) + return nil +} + +func StopSink() { + connection.Close() + encoder = nil +} + +// Only sends a ready-string +func Ready(addr string) error { + if encoder == nil { + dbg.Lvl3("Connecting to sink", addr) + err := ConnectSink(addr) + if err != nil { + return err + } + } + dbg.Lvl3("Sending ready-signal") + send(Measure{Name: "ready"}) + return nil +} + +// Returns how many peers are ready +func GetReady(addr string) (*Stats, error) { + if encoder == nil { + err := ConnectSink(addr) + if err != nil { + return nil, err + } + } + dbg.Lvl3("Getting ready_count") + send(Measure{Name: "ready_count"}) + decoder := json.NewDecoder(connection) + var s Stats + err := decoder.Decode(&s) + if err != nil { + return nil, err + } + dbg.Lvlf3("Received stats with %+v", s) + return &s, nil +} + +// Send transmits the given struct over the network. +func send(v interface{}) { + if encoder == nil { + panic(fmt.Errorf("Monitor's sink connection not initalized. Can not send any measures")) + } + if !enabled { + return + } + for wait := 500; wait < 1000; wait += 100 { + if err := encoder.Encode(v); err == nil { + return + } else { + dbg.Lvl1("Couldn't send to monitor-sink:", err) + time.Sleep(time.Duration(wait) * time.Millisecond) + } + } + panic(fmt.Errorf("No contact to monitor-sink possible!")) +} + +// Measure holds the different values that can be computed for a measure. +// Measures are sent for further processing from the client to the monitor. +type Measure struct { + Name string + WallTime float64 + CPUTimeUser float64 + CPUTimeSys float64 + // These are used for communicating with the clients + Sender string + Ready int + // Since we send absolute timing values, we need to store our reference too. + lastWallTime time.Time + autoReset bool +} + +// NewMeasure creates a new measure struct and enables automatic reset after +// each Measure call. +func NewMeasure(name string) *Measure { + m := &Measure{Name: name} + m.enableAutoReset(true) + return m +} + +// Takes a measure, sends it to the monitor and resets all timers. +func (m *Measure) Measure() { + // Wall time measurement + m.WallTime = float64(time.Since(m.lastWallTime)) / 1.0e9 + // CPU time measurement + m.CPUTimeSys, m.CPUTimeUser = getDiffRTime(m.CPUTimeSys, m.CPUTimeUser) + // send data + send(m) + // reset timers + m.reset() +} + +// Enables / Disables automatic reset of a measure. If called with true, the +// measure is reset. +func (m *Measure) enableAutoReset(b bool) { + m.autoReset = b + m.reset() +} + +// Resets the timers in a measure to 'now'. +func (m *Measure) reset() { + if m.autoReset { + m.CPUTimeSys, m.CPUTimeUser = GetRTime() + m.lastWallTime = time.Now() + } +} + +// Prints a message to end the logging. +func End() { + send(Measure{Name: "end"}) + connection.Close() +} + +// Converts microseconds to seconds. +func iiToF(sec int64, usec int64) float64 { + return float64(sec) + float64(usec)/1000000.0 +} + +// Returns the sytem and the user time so far. +func GetRTime() (tSys, tUsr float64) { + rusage := &syscall.Rusage{} + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + s, u := rusage.Stime, rusage.Utime + return iiToF(int64(s.Sec), int64(s.Usec)), iiToF(int64(u.Sec), int64(u.Usec)) +} + +// Returns the difference of the given system- and user-time. +func getDiffRTime(tSys, tUsr float64) (tDiffSys, tDiffUsr float64) { + nowSys, nowUsr := GetRTime() + return nowSys - tSys, nowUsr - tUsr +} diff --git a/lib/monitor/monitor.go b/lib/monitor/monitor.go new file mode 100644 index 0000000000..476cc50484 --- /dev/null +++ b/lib/monitor/monitor.go @@ -0,0 +1,194 @@ +// Monitor package handle the logging, collection and computation of +// statisticals data. Every application can send some Measure (for the moment, +// we mostly measure the CPU time but it can be applied later for any kind of +// measures). The Monitor receives them and update a Stats struct. This Statss +// struct can hold many different kinds of Measurement (the measure of an +// specific action such as "round time" or "verify time" etc). Theses +// measurements contains Values which compute the actual min/max/dev/avg values. +// There exists the Proxy file so we can have a Proxy relaying Measure from +// clients to the Monitor listening. An starter feature is also the DataFilter +// which can apply somes filtering rules to the data before making any +// statistics about them. +package monitor + +import ( + "encoding/json" + "fmt" + "github.com/dedis/cothority/lib/dbg" + "io" + "net" + "strings" + "sync" +) + +// This file handles the collection of measurements, aggregates them and +// write CSV file reports + +// listen is the address where to listen for the monitor. The endpoint can be a +// monitor.Proxy or a direct connection with measure.go +var Sink = "0.0.0.0" +var SinkPort = "10003" + +// Monitor struct is used to collect measures and make the statistics about +// them. It takes a stats object so it update that in a concurrent-safe manner +// for each new measure it receives. +type Monitor struct { + listener net.Listener + + // Current conections + conns map[string]net.Conn + // and the mutex to play with it + mutexConn sync.Mutex + + // Current stats + stats *Stats + // and the mutex to play with it + mutexStats sync.Mutex + + // channel to give new measures + measures chan Measure + + // channel to notify the end of a connection + // send the name of the connection when finishd + done chan string +} + +// NewMonitor returns a new monitor given the stats +func NewMonitor(stats *Stats) Monitor { + return Monitor{ + conns: make(map[string]net.Conn), + stats: stats, + mutexStats: sync.Mutex{}, + measures: make(chan Measure), + done: make(chan string), + } +} + +// Monitor will start listening for incoming connections on this address +// It needs the stats struct pointer to update when measures come +// Return an error if something went wrong during the connection setup +func (m *Monitor) Listen() error { + ln, err := net.Listen("tcp", Sink+":"+SinkPort) + if err != nil { + return fmt.Errorf("Error while monitor is binding address: %v", err) + } + m.listener = ln + dbg.Lvl2("Monitor listening for stats on", Sink, ":", SinkPort) + finished := false + go func() { + for { + if finished { + break + } + conn, err := ln.Accept() + if err != nil { + operr, ok := err.(*net.OpError) + // We cant accept anymore we closed the listener + if ok && operr.Op == "accept" { + break + } + dbg.Lvl2("Error while monitor accept connection:", operr) + continue + } + dbg.Lvl3("Monitor: new connection from", conn.RemoteAddr().String()) + m.mutexConn.Lock() + m.conns[conn.RemoteAddr().String()] = conn + go m.handleConnection(conn) + m.mutexConn.Unlock() + } + }() + for !finished { + select { + // new stats + case measure := <-m.measures: + m.update(measure) + // end of a peer conn + case peer := <-m.done: + dbg.Lvl3("Connections left:", len(m.conns)) + m.mutexConn.Lock() + delete(m.conns, peer) + m.mutexConn.Unlock() + // end of monitoring, + if len(m.conns) == 0 { + m.listener.Close() + finished = true + break + } + } + } + dbg.Lvl2("Monitor finished waiting !") + m.conns = make(map[string]net.Conn) + return nil +} + +// StopMonitor will close every connections it has +// And will stop updating the stats +func (m *Monitor) Stop() { + dbg.Lvl2("Monitor Stop") + m.listener.Close() + m.mutexConn.Lock() + for _, c := range m.conns { + c.Close() + } + m.mutexConn.Unlock() + +} + +// handleConnection will decode the data received and aggregates it into its +// stats +func (m *Monitor) handleConnection(conn net.Conn) { + dec := json.NewDecoder(conn) + enc := json.NewEncoder(conn) + nerr := 0 + for { + measure := Measure{} + if err := dec.Decode(&measure); err != nil { + // if end of connection + if err == io.EOF { + break + } + // otherwise log it + dbg.Lvl2("Error monitor decoding from", conn.RemoteAddr().String(), ":", err) + nerr += 1 + if nerr > 1 { + dbg.Lvl2("Monitor: too many errors from", conn.RemoteAddr().String(), ": Abort.") + break + } + } + + dbg.Lvlf3("Monitor: received a Measure from %s: %+v", conn.RemoteAddr().String(), measure) + // Special case where the measurement is indicating a FINISHED step + switch strings.ToLower(measure.Name) { + case "end": + dbg.Lvl3("Finishing monitor") + m.done <- conn.RemoteAddr().String() + case "ready": + m.stats.Ready++ + dbg.Lvl3("Increasing counter to", m.stats.Ready) + case "ready_count": + dbg.Lvl3("Sending stats") + m_send := measure + m_send.Ready = m.stats.Ready + enc.Encode(m_send) + default: + m.measures <- measure + } + } +} + +// updateMeasures will add that specific measure to the global stats +// in a concurrently safe manner +func (m *Monitor) update(meas Measure) { + m.mutexStats.Lock() + // updating + m.stats.Update(meas) + m.mutexStats.Unlock() +} + +// Stats returns the updated stats in a concurrent-safe manner +func (m *Monitor) Stats() *Stats { + m.mutexStats.Lock() + s := m.stats + m.mutexStats.Unlock() + return s +} diff --git a/lib/monitor/monitor_test.go b/lib/monitor/monitor_test.go new file mode 100644 index 0000000000..d08a48f5c1 --- /dev/null +++ b/lib/monitor/monitor_test.go @@ -0,0 +1,110 @@ +package monitor + +import ( + "bytes" + "fmt" + "github.com/dedis/cothority/lib/dbg" + "strings" + "testing" + "time" +) + +func TestMonitor(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 2) + m := make(map[string]string) + m["machines"] = "1" + m["ppm"] = "1" + stat := NewStats(m) + fresh := stat.String() + // First set up monitor listening + mon := NewMonitor(stat) + go mon.Listen() + time.Sleep(100 * time.Millisecond) + + // Then measure + err := ConnectSink("localhost:" + SinkPort) + if err != nil { + t.Error(fmt.Sprintf("Error starting monitor: %s", err)) + return + } + + meas := NewMeasure("round") + meas.Measure() + time.Sleep(200 * time.Millisecond) + meas.Measure() + End() + time.Sleep(100 * time.Millisecond) + updated := stat.String() + if updated == fresh { + t.Error("Stats not updated ?") + } + + StopSink() +} + +func TestReadyNormal(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 3) + m := make(map[string]string) + m["machines"] = "1" + m["ppm"] = "1" + m["Ready"] = "0" + stat := NewStats(m) + if stat.Ready != 0 { + t.Fatal("Stats should start with ready==0") + } + // First set up monitor listening + mon := NewMonitor(stat) + go mon.Listen() + time.Sleep(100 * time.Millisecond) + host := "localhost:" + SinkPort + if stat.Ready != 0 { + t.Fatal("Stats should have ready==0 after start of Monitor") + } + + s, err := GetReady(host) + if err != nil { + t.Fatal("Couldn't get number of peers:", err) + } + if s.Ready != 0 { + t.Fatal("Stats.Ready != 0") + } + + err = Ready(host) + if err != nil { + t.Errorf("Error starting monitor: %s", err) + return + } + + s, err = GetReady(host) + if err != nil { + t.Fatal("Couldn't get number of peers:", err) + } + if s.Ready != 1 { + t.Fatal("Stats.Ready != 1") + } + + End() + StopSink() +} + +func TestKeyOrder(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 3) + m := make(map[string]string) + m["machines"] = "1" + m["ppm"] = "1" + m["bf"] = "2" + m["rounds"] = "3" + + for i := 0; i < 20; i++ { + // First set up monitor listening + stat := NewStats(m) + NewMonitor(stat) + time.Sleep(100 * time.Millisecond) + b := bytes.NewBuffer(make([]byte, 1024)) + stat.WriteHeader(b) + dbg.Lvl2("Order:", strings.TrimSpace(b.String())) + if strings.Contains(b.String(), "rounds, bf") { + t.Fatal("Order of fields is not correct") + } + } +} diff --git a/lib/monitor/proxy.go b/lib/monitor/proxy.go new file mode 100644 index 0000000000..616c9d4c7f --- /dev/null +++ b/lib/monitor/proxy.go @@ -0,0 +1,187 @@ +package monitor + +import ( + "encoding/json" + "fmt" + "github.com/dedis/cothority/lib/dbg" + "io" + "net" + "sync/atomic" +) + +// Implements a simple proxy +// A <-> D <-> B +// D is the proxy. It will listen for incoming connections on the side of B +// And will connect to A + +// serverConn is the connection object to the server +var serverConn net.Conn + +// to write back the measure to the server +var serverEnc *json.Encoder +var serverDec *json.Decoder +var readyCount int64 + +// proxy connections opened +var proxyConns map[string]*json.Encoder + +var proxyDone chan bool + +func init() { + proxyDone = make(chan bool) +} + +// Proxy will launch a routine that waits for input connections +// It takes a redirection address soas to where redirect incoming packets +// Proxy will listen on Sink:SinkPort variables so that the user do not +// differentiate between connecting to a proxy or directly to the sink +// It will panic if it can not contact the server or can not bind to the address +func Proxy(redirection string) { + // Connect to the sink + if err := connectToSink(redirection); err != nil { + panic(err) + } + dbg.Lvl2("Proxy connected to sink", redirection) + // Here it listens the same way monitor.go would + // usually 0.0.0.0:4000 + ln, err := net.Listen("tcp", Sink+":"+SinkPort) + if err != nil { + dbg.Fatalf("Error while binding proxy to addr %s: %v", Sink+":"+SinkPort, err) + } + dbg.Lvl2("Proxy listening on", Sink+":"+SinkPort) + var newConn = make(chan bool) + var closeConn = make(chan bool) + var finished = false + proxyConns = make(map[string]*json.Encoder) + readyCount = 0 + + // Listen for incoming connections + go func() { + for finished == false { + conn, err := ln.Accept() + if err != nil { + operr, ok := err.(*net.OpError) + // the listener is closed + if ok && operr.Op == "accept" { + break + } + dbg.Lvl1("Error proxy accepting connection:", err) + continue + } + dbg.Lvl3("Proxy accepting incoming connection from:", conn.RemoteAddr().String()) + newConn <- true + proxyConns[conn.RemoteAddr().String()] = json.NewEncoder(conn) + go proxyConnection(conn, closeConn) + } + }() + + // Listen for replies and give them further + go func() { + for finished == false { + m := Measure{} + err := serverDec.Decode(&m) + if err != nil { + return + } + dbg.Lvlf3("Proxy received %+v", m) + c, ok := proxyConns[m.Sender] + if !ok { + return + } + dbg.Lvl3("Found connection") + c.Encode(m) + } + }() + + // notify every new connection and every end of connection. When all + // connections are closed, send an "end" measure to the sink. + var nconn int + for finished == false { + select { + case <-newConn: + nconn += 1 + case <-closeConn: + nconn -= 1 + if nconn == 0 { + // everything is finished + serverEnc.Encode(Measure{Name: "end"}) + serverConn.Close() + ln.Close() + finished = true + break + } + } + } +} + +// connectToSink starts the connection with the server +func connectToSink(redirection string) error { + conn, err := net.Dial("tcp", redirection) + if err != nil { + return fmt.Errorf("Proxy connection to server %s failed: %v", redirection, err) + } + serverConn = conn + serverEnc = json.NewEncoder(conn) + serverDec = json.NewDecoder(conn) + return nil +} + +// The core of the file: read any input from the connection and outputs it into +// the server connection +func proxyConnection(conn net.Conn, done chan bool) { + dec := json.NewDecoder(conn) + nerr := 0 + for { + m := Measure{} + // Receive data + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + dbg.Lvl1("Error receiving data from", conn.RemoteAddr().String(), ":", err) + nerr += 1 + if nerr > 1 { + dbg.Lvl1("Too many errors from", conn.RemoteAddr().String(), ": Abort connection") + break + } + } + dbg.Lvl3("Proxy received", m) + + // Implement our own ready-count, so it doesn't have to go through the + // main monitor which might be far away. + switch m.Name { + case "ready": + atomic.AddInt64(&readyCount, 1) + case "ready_count": + m.Ready = int(readyCount) + err := json.NewEncoder(conn).Encode(m) + if err != nil { + dbg.Lvl2("Couldn't send ready-result back to client") + break + } + default: + // Proxy data - add who is sending, as we only have one channel + // to the server + m.Sender = conn.RemoteAddr().String() + if err := serverEnc.Encode(m); err != nil { + dbg.Lvl2("Error proxying data :", err) + break + } + if m.Name == "end" { + // the end + dbg.Lvl2("Proxy detected end of measurement. Closing connection.") + break + } + } + } + conn.Close() + done <- true +} + +// proxyDataServer send the data to the server... +func proxyDataServer(data []byte) { + _, err := serverConn.Write(data) + if err != nil { + panic(fmt.Errorf("Error proxying data to server: %v", err)) + } +} diff --git a/lib/monitor/proxy_test.go b/lib/monitor/proxy_test.go new file mode 100644 index 0000000000..c401d57023 --- /dev/null +++ b/lib/monitor/proxy_test.go @@ -0,0 +1,132 @@ +package monitor + +import ( + "fmt" + "github.com/dedis/cothority/lib/dbg" + "testing" + "time" +) + +func TestProxy(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 3) + m := make(map[string]string) + m["machines"] = "1" + m["ppm"] = "1" + m["filter_round"] = "100" + stat := NewStats(m) + fresh := stat.String() + // First set up monitor listening + monitor := NewMonitor(stat) + done := make(chan bool) + go func() { + monitor.Listen() + done <- true + }() + time.Sleep(100 * time.Millisecond) + // Then setup proxy + // change port so the proxy does not listen to the same + // than the original monitor + oldSink := SinkPort + SinkPort = "8000" + // proxy listen to 0.0.0.0:8000 & redirect to + // localhost:4000 + go Proxy("localhost:" + oldSink) + + time.Sleep(100 * time.Millisecond) + // Then measure + proxyAddr := "localhost:" + SinkPort + err := ConnectSink(proxyAddr) + if err != nil { + t.Error(fmt.Sprintf("Can not connect to proxy : %s", err)) + return + } + + meas := NewMeasure("setup") + meas.Measure() + time.Sleep(100 * time.Millisecond) + meas.Measure() + + s, err := GetReady(proxyAddr) + if err != nil { + t.Error("Couldn't get stats from proxy") + } + if s.Ready != 0 { + t.Error("stats.Ready should be 0") + } + Ready(proxyAddr) + s, err = GetReady(proxyAddr) + if err != nil { + t.Error("Couldn't get stats from proxy") + } + if s.Ready != 1 { + t.Error("stats.Ready should be 1") + } + + SinkPort = oldSink + End() + StopSink() + select { + case <-done: + s := monitor.Stats() + s.Collect() + if s.String() == fresh { + t.Error("stats not updated?") + } + return + case <-time.After(2 * time.Second): + t.Error("Monitor not finished") + } +} + +func TestReadyProxy(t *testing.T) { + dbg.TestOutput(testing.Verbose(), 3) + m := make(map[string]string) + m["machines"] = "1" + m["ppm"] = "1" + stat := NewStats(m) + // First set up monitor listening + monitor := NewMonitor(stat) + done := make(chan bool) + go func() { + monitor.Listen() + done <- true + }() + time.Sleep(100 * time.Millisecond) + // Then setup proxy + // change port so the proxy does not listen to the same + // than the original monitor + oldSink := SinkPort + SinkPort = "8000" + // proxy listen to 0.0.0.0:8000 & redirect to + // localhost:4000 + go Proxy("localhost:" + oldSink) + + time.Sleep(100 * time.Millisecond) + // Then measure + proxyAddr := "localhost:" + SinkPort + err := ConnectSink(proxyAddr) + if err != nil { + t.Error(fmt.Sprintf("Can not connect to proxy : %s", err)) + return + } + + s, err := GetReady(proxyAddr) + if err != nil { + t.Error("Couldn't get stats from proxy") + } + if s.Ready != 0 { + t.Error("stats.Ready should be 0") + } + Ready(proxyAddr) + s, err = GetReady(proxyAddr) + if err != nil { + t.Error("Couldn't get stats from proxy") + } + if s.Ready != 1 { + t.Error("stats.Ready should be 1") + } + + SinkPort = oldSink + End() + StopSink() +} diff --git a/lib/monitor/stats.go b/lib/monitor/stats.go new file mode 100644 index 0000000000..87494ec52d --- /dev/null +++ b/lib/monitor/stats.go @@ -0,0 +1,472 @@ +package monitor + +import ( + "fmt" + "github.com/dedis/cothority/lib/dbg" + "github.com/montanaflynn/stats" + "io" + "math" + "regexp" + "sort" + "strconv" + "strings" +) + +// Stats contains all structures that are related to the computations of stats +// such as Values (compute the mean/min/max/...), Measurements ( aggregation of +// Values), Stats (collection of measurements) and DataFilter which is used to +// apply some filtering before any statistics is done. + +// ExtraFields in a RunConfig argument that we may want to parse if present +var extraFields = [...]string{"bf", "rate", "stampratio"} + +// Stats holds the different measurements done +type Stats struct { + // How many peers do we have + Peers int + // How many peers per machine do we use + PPM int // PeerPerMachine + // How many machines do we have + Machines int + // How many peers are ready + Ready int + + // Additionals fields that may appears in the resulting CSV + // The additionals fields are created when creating the stats out of a + // running config. It will try to read some known fields such as "depth" or + // "bf" (branching factor) and add then to its struct + Additionals map[string]int + addKeys []string + // The measures we have and the keys ordered + measures map[string]*Measurement + keys []string + + // ValuesWritten is to know wether we have already written some values or + // not. If yes, we can make sure we dont write new measurements otherwise + // the CSV would be garbage + valuesWritten bool + // The filter used to filter out abberant data + filter DataFilter +} + +// Return a NewStats with some fields extracted from the platform run config +// It enforces the default set of measure to do. +func NewStats(rc map[string]string) *Stats { + s := new(Stats).NewStats() + s.readRunConfig(rc) + return s +} + +// Read a config file and fills up some fields for Stats struct +func (s *Stats) readRunConfig(rc map[string]string) { + if machs, err := strconv.Atoi(rc["machines"]); err != nil { + dbg.Fatal("Can not create stats from RunConfig with no machines") + } else { + s.Machines = machs + } + if ppm, err := strconv.Atoi(rc["ppm"]); err != nil { + dbg.Fatal("Can not create stats from RunConfig with no ppm") + } else { + s.PPM = ppm + } + rc2 := make(map[string]string) + for k, v := range rc { + if k != "machines" && k != "ppm" { + rc2[k] = v + } + } + s.Peers = s.Machines * s.PPM + // Sort rc2, so the output is always the same + rc2_ids := make([]string, 0) + for k := range rc2 { + rc2_ids = append(rc2_ids, k) + } + sort.Sort(sort.StringSlice(rc2_ids)) + // Add ALL extra fields + for _, k := range rc2_ids { + v := rc2[k] + if ef, err := strconv.Atoi(v); err != nil { + continue + } else { + s.Additionals[k] = ef + s.addKeys = append(s.addKeys, k) + } + } + // let the filter figure out itself what it is supposed to be doing + s.filter = NewDataFilter(rc) +} + +// Returns a new stats-structure with all necessary initialisations. +func (s *Stats) NewStats() *Stats { + s.measures = make(map[string]*Measurement) + s.keys = make([]string, 0) + s.Additionals = make(map[string]int) + s.addKeys = make([]string, 0) + s.valuesWritten = false + return s +} + +// WriteHeader will write the header to the writer +func (s *Stats) WriteHeader(w io.Writer) { + // write basic info + fmt.Fprintf(w, "Peers, ppm, machines") + // write additionals fields + for _, k := range s.addKeys { + if _, ok := s.Additionals[k]; ok { + fmt.Fprintf(w, ", %s", k) + } + } + // Write the values header + for _, k := range s.keys { + fmt.Fprintf(w, ", ") + m := s.measures[k] + m.WriteHeader(w) + } + fmt.Fprintf(w, "\n") +} + +// WriteValues will write the values to the specified writer +func (s *Stats) WriteValues(w io.Writer) { + // by default + s.Collect() + // write basic info + fmt.Fprintf(w, "%d, %d, %d", s.Peers, s.PPM, s.Machines) + // write additionals fields + for _, k := range s.addKeys { + v, ok := s.Additionals[k] + if ok { + fmt.Fprintf(w, ", %d", v) + } + } + // write the values + for _, k := range s.keys { + fmt.Fprintf(w, ", ") + m := s.measures[k] + m.WriteValues(w) + } + fmt.Fprintf(w, "\n") + s.valuesWritten = true +} + +// AverageStats will make an average of the given stats +func AverageStats(stats []Stats) Stats { + if len(stats) < 1 { + return Stats{} + } + s := new(Stats).NewStats() + s.Machines = stats[0].Machines + s.PPM = stats[0].PPM + s.Peers = stats[0].Peers + s.Additionals = stats[0].Additionals + s.addKeys = stats[0].addKeys + s.keys = stats[0].keys + // Average + for _, k := range s.keys { + // Collect measurements for a given key + measurements := make([]Measurement, len(stats)) + for i, stat := range stats { + sub, ok := stat.measures[k] + if !ok { + continue + } + measurements[i] = *sub + } + // make the average + avg := AverageMeasurements(measurements) + s.measures[k] = &avg + } + return *s +} + +// Update will update the Stats with this given measure +func (s *Stats) Update(m Measure) { + var meas *Measurement + meas, ok := s.measures[m.Name] + if !ok { + // if we already written some values, we can not take new ones + if s.valuesWritten { + dbg.Lvl2("Stats Update received unknown type of measure:", m.Name) + return + } + meas = NewMeasurement(m.Name, s.filter) + s.measures[m.Name] = meas + s.keys = append(s.keys, m.Name) + } + meas.Update(m) +} + +// Returns an overview of the stats - not complete data returned! +func (s *Stats) String() string { + var str string + for _, v := range s.measures { + str += fmt.Sprintf("%v", v) + } + return fmt.Sprintf("{Stats: Peers %d, Measures: %s}", s.Peers, str) +} + +// Collect make the final computations before stringing or writing. +// Autmatically done in other methods anyway. +func (s *Stats) Collect() { + for _, v := range s.measures { + v.Collect() + } +} + +// DataFilter is used to process data before making any statistics about them +type DataFilter struct { + // percentiles maps the measurements name to the percentile we need to take + // to filter thoses measuremements with the percentile + percentiles map[string]float64 +} + +// NewDataFilter returns a new data filter initialized with the rights values +// taken out from the run config. If absent, will take defaults values. +// Keys expected are: +// discard_measurementname = perc => will take the lower and upper percentile = +// perc +// discard_measurementname = lower,upper => will take different percentiles +func NewDataFilter(config map[string]string) DataFilter { + df := DataFilter{ + percentiles: make(map[string]float64), + } + reg, err := regexp.Compile("filter_(\\w+)") + if err != nil { + dbg.Lvl1("DataFilter: Error compiling regexp:", err) + return df + } + // analyse the each entry + for k, v := range config { + if measure := reg.FindString(k); measure == "" { + continue + } else { + // this value must be filtered by how many ? + perc, err := strconv.ParseFloat(v, 64) + if err != nil { + dbg.Lvl1("DataFilter: Cannot parse value for filter measure:", measure) + continue + } + measure = strings.Replace(measure, "filter_", "", -1) + df.percentiles[measure] = perc + } + } + dbg.Lvl3("Filtering:", df.percentiles) + return df +} + +// Filter out a serie of values +func (df *DataFilter) Filter(measure string, values []float64) []float64 { + // do we have a filter for this measure ? + if _, ok := df.percentiles[measure]; !ok { + return values + } + // Compute the percentile value + max, err := stats.PercentileNearestRank(values, df.percentiles[measure]) + if err != nil { + dbg.Lvl2("Monitor: Error filtering data:", err) + return values + } + + // Find the index from where to filter + maxIndex := -1 + for i, v := range values { + if v > max { + maxIndex = i + } + } + // check if we foud something to filter out + if maxIndex == -1 { + dbg.Lvl3("Filtering: nothing to filter for", measure) + return values + } + // return the values below the percentile + dbg.Lvl3("Filtering: filters out", measure, ":", maxIndex, "/", len(values)) + return values[:maxIndex] +} + +// value is used to compute the statistics +// it reprensent the time to an action (setup, shamir round, coll round etc) +// use it to compute streaming mean + dev +type value struct { + min float64 + max float64 + + n int + oldM float64 + newM float64 + oldS float64 + newS float64 + dev float64 + + // Store where are kept the values + store []float64 +} + +func newValue() *value { + return &value{store: make([]float64, 0)} +} + +// Store takes this new time and stores it for later analysis +// Since we might want to do percentile sorting, we need to have all the values +// For the moment, we do a simple store of the value, but note that some +// streaming percentile algorithm exists in case the number of messages is +// growing to big. +func (t *value) Store(newTime float64) { + t.store = append(t.store, newTime) +} + +// Collect will Collect all values stored in the store's Value. +// It is kept as a streaming average / dev processus fr the moment (not the most +// optimized). +// streaming dev algo taken from http://www.johndcook.com/blog/standard_deviation/ +func (t *value) Collect(measure string, df DataFilter) { + t.store = df.Filter(measure, t.store) + for _, newTime := range t.store { + // nothings takes 0 ms to complete, so we know it's the first time + if t.min > newTime || t.n == 0 { + t.min = newTime + } + if t.max < newTime { + t.max = newTime + } + + t.n += 1 + if t.n == 1 { + t.oldM = newTime + t.newM = newTime + t.oldS = 0.0 + } else { + t.newM = t.oldM + (newTime-t.oldM)/float64(t.n) + t.newS = t.oldS + (newTime-t.oldM)*(newTime-t.newM) + t.oldM = t.newM + t.oldS = t.newS + } + t.dev = math.Sqrt(t.newS / float64(t.n-1)) + } +} + +// Average will set the current Value to the average of all Value +func AverageValue(st ...*value) *value { + var t value + for _, s := range st { + t.min += s.min + t.max += s.max + t.newM += s.newM + t.dev += s.dev + } + l := float64(len(st)) + t.min /= l + t.max /= l + t.newM /= l + t.dev /= l + t.n = len(st) + return &t +} + +// Get the minimum or the maximum of all stored values +func (t *value) Min() float64 { + return t.min +} +func (t *value) Max() float64 { + return t.max +} + +// NumValue returns the number of value added +func (t *value) NumValue() int { + return t.n +} + +// Avg returns the average (mean) of the values +func (t *value) Avg() float64 { + return t.newM +} + +// Dev returns the standard deviation of the values +func (t *value) Dev() float64 { + return t.dev +} + +// Header returns the first line of the CSV-file +func (t *value) Header(prefix string) string { + return fmt.Sprintf("%s_min, %s_max, %s_avg, %s_dev", prefix, prefix, prefix, prefix) +} + +// String returns the min, max, avg and dev of a value +func (t *value) String() string { + return fmt.Sprintf("%f, %f, %f, %f", t.Min(), t.Max(), t.Avg(), t.Dev()) +} + +// Measurement represents the precise measurement of a specific thing to measure +// example: I want to measure the time it takes to verify a signature, the +// measurement "verify" will hold a wallclock Value, cpu_user Value, cpu_system +// Value. A measurement is frequently updated with Measure given by the client. +type Measurement struct { + Name string + Wall *value + User *value + System *value + Filter DataFilter +} + +// NewMeasurement returns a new measurements with this name +func NewMeasurement(name string, df DataFilter) *Measurement { + return &Measurement{ + Name: name, + Wall: newValue(), + User: newValue(), + System: newValue(), + Filter: df, + } +} + +// WriteHeader will write the header to the specified writer +func (m *Measurement) WriteHeader(w io.Writer) { + fmt.Fprintf(w, "%s, %s, %s", m.Wall.Header(m.Name+"_wall"), + m.User.Header(m.Name+"_user"), m.System.Header(m.Name+"_system")) +} + +// WriteValues will write a new entry for this entry in the writer +// First compute the values then write to writer +func (m *Measurement) WriteValues(w io.Writer) { + fmt.Fprintf(w, "%s, %s, %s", m.Wall.String(), m.User.String(), m.System.String()) +} + +// Update takes a measure received from the network and update the wall system +// and user values +func (m *Measurement) Update(measure Measure) { + dbg.Lvl2("Got measurement for", m.Name, measure.WallTime, measure.CPUTimeUser, measure.CPUTimeSys) + m.Wall.Store(measure.WallTime) + m.User.Store(measure.CPUTimeUser) + m.System.Store(measure.CPUTimeSys) +} + +// Collect will call Collect on Wall- User- and System-time +func (m *Measurement) Collect() { + m.Wall.Collect(m.Name, m.Filter) + m.User.Collect(m.Name, m.Filter) + m.System.Collect(m.Name, m.Filter) +} + +// AverageMeasurements takes an slice of measurements and make the average +// between them. i.e. it takes the average of the Wall value from each +// measurements, etc. +func AverageMeasurements(measurements []Measurement) Measurement { + m := NewMeasurement(measurements[0].Name, measurements[0].Filter) + walls := make([]*value, len(measurements)) + users := make([]*value, len(measurements)) + systems := make([]*value, len(measurements)) + for i, m2 := range measurements { + m2.Collect() + walls[i] = m2.Wall + users[i] = m2.User + systems[i] = m2.System + } + m.Wall = AverageValue(walls...) + m.User = AverageValue(users...) + m.System = AverageValue(systems...) + return *m +} + +// String shows one measurement +func (m *Measurement) String() string { + return fmt.Sprintf("{Measurement %s: wall = %v, system = %v, user = %v}", m.Name, m.Wall, m.User, m.System) +} diff --git a/lib/monitor/stats_test.go b/lib/monitor/stats_test.go new file mode 100644 index 0000000000..1218155e85 --- /dev/null +++ b/lib/monitor/stats_test.go @@ -0,0 +1,110 @@ +package monitor + +import ( + "bytes" + "fmt" + "testing" +) + +func TestNewDataFilter(t *testing.T) { + rc := make(map[string]string) + rc["filter_round"] = "50" + rc["filter_verify"] = "90" + df := NewDataFilter(rc) + if df.percentiles["round"] == 0 || df.percentiles["verify"] == 0 { + t.Error("Datafilter not correctly parsed the run config") + } + if df.percentiles["round"] != 50.0 || df.percentiles["verify"] != 90.0 { + t.Error(fmt.Sprintf("datafilter not correctly parsed the percentile: %f vs 50 or %f vs 90", df.percentiles["round"], df.percentiles["verifiy"])) + } +} + +func TestDataFilterFilter(t *testing.T) { + rc := make(map[string]string) + rc["filter_round"] = "75" + df := NewDataFilter(rc) + + values := []float64{35, 20, 15, 40, 50} + filtered := df.Filter("round", values) + shouldBe := []float64{35, 20, 15, 40} + if len(shouldBe) != len(filtered) { + t.Error(fmt.Sprintf("Filter returned %d values instead of %d", len(filtered), len(shouldBe))) + } + for i, v := range filtered { + if v != shouldBe[i] { + t.Error(fmt.Sprintf("Element %d = %d vs %d", i, filtered[i], shouldBe[i])) + } + } +} + +func TestStatsUpdate(t *testing.T) { + rc := make(map[string]string) + rc["machines"] = "2" + rc["ppm"] = "2" + stats := NewStats(rc) + + m1 := Measure{ + Name: "round", + WallTime: 10, + CPUTimeUser: 20, + CPUTimeSys: 30, + } + m2 := Measure{ + Name: "round", + WallTime: 10, + CPUTimeUser: 20, + CPUTimeSys: 30, + } + stats.Update(m1) + stats.Update(m2) + stats.Collect() + meas := stats.measures["round"] + if meas.Wall.Avg() != 10 || meas.User.Avg() != 20 { + t.Error("Aggregate or Update not working") + } +} + +func TestStatsNotWriteUnknownMeasures(t *testing.T) { + rc := make(map[string]string) + rc["machines"] = "2" + rc["ppm"] = "2" + stats := NewStats(rc) + + m1 := Measure{ + Name: "test1", + WallTime: 10, + CPUTimeUser: 20, + CPUTimeSys: 30, + } + m2 := Measure{ + Name: "round2", + WallTime: 70, + CPUTimeUser: 20, + CPUTimeSys: 30, + } + m3 := Measure{ + Name: "test2", + WallTime: 30, + CPUTimeUser: 30, + CPUTimeSys: 30, + } + stats.Update(m1) + stats.Update(m3) + var writer = new(bytes.Buffer) + stats.WriteHeader(writer) + stats.WriteValues(writer) + output := writer.Bytes() + if !bytes.Contains(output, []byte("10")) { + t.Error(fmt.Sprintf("Stats should write the right measures: %s", writer)) + } + if !bytes.Contains(output, []byte("test2")) || !bytes.Contains(output, []byte("test1")) { + t.Error(fmt.Sprintf("Stats should write the right header values")) + } + stats.Update(m2) + stats.WriteValues(writer) + + output = writer.Bytes() + if bytes.Contains(output, []byte("70")) { + t.Error("Stats should not contain any new measurements after first write") + } +} diff --git a/lib/network/net.go b/lib/network/net.go new file mode 100644 index 0000000000..4fb4d0a213 --- /dev/null +++ b/lib/network/net.go @@ -0,0 +1,311 @@ +// This package is a networking library. You have Hosts which can +// issue connections to others hosts, and Conn which are the connections itself. +// Hosts and Conns are interfaces and can be of type Tcp, or Chans, or Udp or +// whatever protocols you think might implement this interface. +// In this library we also provide a way to encode / decode any kind of packet / +// structs. When you want to send a struct to a conn, you first register +// (one-time operation) this packet to the library, and then directly pass the +// struct itself to the conn that will recognize its type. When decoding, +// it will automatically detect the underlying type of struct given, and decode +// it accordingly. You can provide your own decode / encode methods if for +// example, you have a variable length packet structure. For this, just +// implements MarshalBinary or UnmarshalBinary. + +package network + +import ( + "bytes" + "encoding" + "encoding/binary" + "encoding/gob" + "errors" + "fmt" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/crypto/abstract" + "net" + "os" + "reflect" + "time" +) + +/// Encoding part /// + +type Type uint8 + +var currType Type +var Suite abstract.Suite +var TypeRegistry = make(map[Type]reflect.Type) +var InvTypeRegistry = make(map[reflect.Type]Type) + +// RegisterProtocolType register a custom "struct" / "packet" and get +// the allocated Type +// Pass simply an your non-initialized struct +func RegisterProtocolType(msg ProtocolMessage) Type { + currType += 1 + t := reflect.TypeOf(msg) + TypeRegistry[currType] = t + InvTypeRegistry[t] = currType + return currType +} + +// String returns the underlying type in human format +func (t Type) String() string { + ty, ok := TypeRegistry[t] + if !ok { + return "unknown" + } + return ty.Name() +} + +// ProtocolMessage is a type for any message that the user wants to send +type ProtocolMessage interface{} + +// ApplicationMessage is the container for any ProtocolMessage +type ApplicationMessage struct { + MsgType Type + Msg ProtocolMessage +} + +// MarshalBinary the application message => to bytes +// Implements BinaryMarshaler interface so it will be used when sending with gob +func (am *ApplicationMessage) MarshalBinary() ([]byte, error) { + var buf = new(bytes.Buffer) + err := binary.Write(buf, binary.BigEndian, am.MsgType) + if err != nil { + return nil, err + } + // if underlying type implements BinaryMarshal => use that + if bm, ok := am.Msg.(encoding.BinaryMarshaler); ok { + bufMsg, err := bm.MarshalBinary() + if err != nil { + return nil, err + } + _, err = buf.Write(bufMsg) + if err != nil { + return nil, err + } + return buf.Bytes(), nil + } + // Otherwise, use Encoding from the Suite + err = Suite.Write(buf, am.Msg) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalBinary will decode the incoming bytes +// It checks if the underlying packet is self-decodable +// by using its UnmarshalBinary interface +// otherwise, use abstract.Encoding (suite) to decode +func (am *ApplicationMessage) UnmarshalBinary(buf []byte) error { + b := bytes.NewBuffer(buf) + var t Type + err := binary.Read(b, binary.BigEndian, &t) + if err != nil { + fmt.Printf("Error reading Type: %v\n", err) + os.Exit(1) + } + + ty, ok := TypeRegistry[t] + if !ok { + fmt.Printf("Type %d is not registered so we can not allocate this type %s\n", t, t.String()) + os.Exit(1) + } + + am.MsgType = t + + // Look if the type supports UnmarshalBinary + ptr := reflect.New(ty) + v := ptr.Elem() + if bu, ok := ptr.Interface().(encoding.BinaryUnmarshaler); ok { + // Bytes() returns the UNREAD portion of bytes ;) + err := bu.UnmarshalBinary(b.Bytes()) + am.Msg = ptr.Elem().Interface() + return err + } + // Otherwise decode it ourself + err = Suite.Read(b, ptr.Interface()) // v.Addr().Interface()) + if err != nil { + fmt.Printf("Error decoding ProtocolMessage: %v\n", err) + os.Exit(1) + } + am.Msg = v.Interface() + //fmt.Printf("UnmarshalBinary(): Decoded type %s => %v\n", t.String(), ty) + return nil +} + +// ConstructFrom takes a ProtocolMessage and then construct a +// ApplicationMessage from it. Error if the type is unknown +func (am *ApplicationMessage) ConstructFrom(obj ProtocolMessage) error { + t := reflect.TypeOf(obj) + ty, ok := InvTypeRegistry[t] + if !ok { + return errors.New(fmt.Sprintf("Packet to send is not known. Please register packet: %s\n", t.String())) + } + am.MsgType = ty + am.Msg = obj + return nil +} + +// Network part // + +// How many times should we try to connect +const maxRetry = 10 +const waitRetry = 1 * time.Second + +// Host is the basic interface to represent a Host of any kind +// Host can open new Conn(ections) and Listen for any incoming Conn(...) +type Host interface { + Name() string + Open(name string) Conn + Listen(addr string, fn func(Conn)) // the srv processing function +} + +// Conn is the basic interface to represent any communication mean +// between two host. It is closely related to the underlying type of Host +// since a TcpHost will generate only TcpConn +type Conn interface { + PeerName() string + Send(obj ProtocolMessage) error + Receive() (ApplicationMessage, error) + Close() +} + +// TcpHost is the underlying implementation of +// Host using Tcp as a communication channel +type TcpHost struct { + // its name (usually its IP address) + name string + // A list of connection maintained by this host + peers map[string]Conn +} + +// TcpConn is the underlying implementation of +// Conn using Tcp +type TcpConn struct { + // Peer is the name of the endpoint + Peer string + + // The connection used + Conn net.Conn + // TcpConn uses Gob to encode / decode its messages + enc *gob.Encoder + dec *gob.Decoder + // A pointer to the associated host (just-in-case) + host *TcpHost +} + +// PeerName returns the name of the peer at the end point of +// the conn +func (c *TcpConn) PeerName() string { + return c.Peer +} + +// Receive waits for any input on the connection and returns +// the ApplicationMessage **decoded** and an error if something +// wrong occured +func (c *TcpConn) Receive() (ApplicationMessage, error) { + var am ApplicationMessage + err := c.dec.Decode(&am) + if err != nil { + dbg.Fatal("Error decoding ApplicationMessage:", err) + } + return am, nil +} + +// Send will convert the Protocolmessage into an ApplicationMessage +// Then send the message through the Gob encoder +// Returns an error if anything was wrong +func (c *TcpConn) Send(obj ProtocolMessage) error { + am := ApplicationMessage{} + err := am.ConstructFrom(obj) + if err != nil { + return fmt.Errorf("Error converting packet: %v\n", err) + } + err = c.enc.Encode(&am) + if err != nil { + return fmt.Errorf("Error sending message: %v", err) + } + return err +} + +// Close ... closes the connection +func (c *TcpConn) Close() { + err := c.Conn.Close() + if err != nil { + dbg.Fatal("Error while closing tcp conn:", err) + } +} + +// NewTcpHost returns a Fresh TCP Host +func NewTcpHost(name string) *TcpHost { + return &TcpHost{ + name: name, + peers: make(map[string]Conn), + } +} + +// Name is the name ofthis host +func (t *TcpHost) Name() string { + return t.name +} + +// Open will create a new connection between this host +// and the remote host named "name". This is a TcpConn. +// If anything went wrong, Conn will be nil. +func (t *TcpHost) Open(name string) Conn { + var conn net.Conn + var err error + for i := 0; i < maxRetry; i++ { + + conn, err = net.Dial("tcp", name) + if err != nil { + dbg.Lvl3(t.Name(), "(", i, "/", maxRetry, ") Error opening connection to", name) + time.Sleep(waitRetry) + } else { + break + } + time.Sleep(waitRetry) + } + if conn == nil { + dbg.Fatal(t.Name(), "could not connect to", name, ": ABORT") + } + c := TcpConn{ + Peer: name, + Conn: conn, + enc: gob.NewEncoder(conn), + dec: gob.NewDecoder(conn), + host: t, + } + t.peers[name] = &c + return &c +} + +// Listen for any host trying to contact him. +// Will launch in a goroutine the srv function once a connection is established +func (t *TcpHost) Listen(addr string, fn func(Conn)) { + global, _ := cliutils.GlobalBind(addr) + ln, err := net.Listen("tcp", global) + if err != nil { + dbg.Fatal("error listening (host", t.Name(), ")") + } + dbg.Lvl3(t.Name(), "Waiting for connections on addr", addr, "..\n") + for { + conn, err := ln.Accept() + if err != nil { + dbg.Lvl2(t.Name(), "error accepting connection:", err) + continue + } + c := TcpConn{ + Peer: conn.RemoteAddr().String(), + Conn: conn, + enc: gob.NewEncoder(conn), + dec: gob.NewDecoder(conn), + host: t, + } + t.peers[conn.RemoteAddr().String()] = &c + go fn(&c) + } +} diff --git a/lib/network/net_test.go b/lib/network/net_test.go new file mode 100644 index 0000000000..5f27c20433 --- /dev/null +++ b/lib/network/net_test.go @@ -0,0 +1,130 @@ +package network + +import ( + "bytes" + "fmt" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/edwards" + "testing" + "time" +) + +type PublicPacket struct { + Point abstract.Point +} + +func (p *PublicPacket) MarshalBinary() ([]byte, error) { + var b bytes.Buffer + err := Suite.Write(&b, &p.Point) + return b.Bytes(), err +} +func (p *PublicPacket) UnmarshalBinary(buf []byte) error { + b := bytes.NewBuffer(buf) + err := Suite.Read(b, &p.Point) + return err +} + +var PublicType Type + +func init() { + PublicType = RegisterProtocolType(PublicPacket{}) +} + +type SimpleClient struct { + Host + Pub abstract.Point + Peers []abstract.Point +} + +func (s *SimpleClient) Init(host Host, pub abstract.Point) *SimpleClient { + return &SimpleClient{ + Host: host, + Pub: pub, + Peers: make([]abstract.Point, 0), + } +} + +// overridding Name host +func (s *SimpleClient) Name() string { + return "Client " + s.Host.Name() +} + +// Simplest protocol : exchange keys with the server +func (s *SimpleClient) ExchangeWithServer(name string, t *testing.T) { + // open a connection to the peer + c := s.Open(name) + if c == nil { + t.Error("client connection is nil ><") + } + // create pack + p := PublicPacket{ + Point: s.Pub, + } + // Send it + err := c.Send(p) + if err != nil { + t.Error("error sending from client:", err) + } + + // Receive the response + am, err := c.Receive() + if err != nil { + fmt.Printf("error receiving ..") + } + + // Cast to the right type + if am.MsgType != PublicType { + t.Error("Received a non-wanted packet.\n") + } + + c.Close() +} + +type SimpleServer struct { + Host + Pub abstract.Point + t *testing.T +} + +func (s *SimpleServer) Name() string { + return "Server " + s.Host.Name() +} + +// this is the callback when a new connection is don +func (s *SimpleServer) ExchangeWithClient(c Conn) { + p := PublicPacket{ + Point: s.Pub, + } + + c.Send(p) + am, err := c.Receive() + if err != nil { + s.t.Error("Server errored when receiving packet ...\n") + } + if am.MsgType != PublicType { + s.t.Error("Server received a non-wanted packet\n") + } + c.Close() +} + +func (s *SimpleServer) Init(host Host, pub abstract.Point, t *testing.T) *SimpleServer { + s.Host = host + s.Pub = pub + s.t = t + return s +} + +func TestTcpNetwork(t *testing.T) { + clientHost := NewTcpHost("127.0.0.1") + serverHost := NewTcpHost("127.0.0.1") + suite := edwards.NewAES128SHA256Ed25519(false) + Suite = suite + clientPub := suite.Point().Base() + serverPub := suite.Point().Add(suite.Point().Base(), suite.Point().Base()) + client := new(SimpleClient).Init(clientHost, clientPub) + server := new(SimpleServer).Init(serverHost, serverPub, t) + + go server.Listen("127.0.0.1:5000", server.ExchangeWithClient) + time.Sleep(1 * time.Second) + client.ExchangeWithServer("127.0.0.1:5000", t) +} diff --git a/lib/network/packet.go b/lib/network/packet.go new file mode 100644 index 0000000000..13d0256ce0 --- /dev/null +++ b/lib/network/packet.go @@ -0,0 +1,90 @@ +package network + +import ( + "bytes" + "errors" + "github.com/dedis/crypto/abstract" +) + +// This file contains usual packets that are needed for different +// protocols. + +// Type for MessageSigning +var MessageSigningType Type + +// Type for BasicSignature +var BasicSignatureType Type + +// Type for ListBasicSignature +var ListBasicSignatureType Type + +// Init registers these few types on the network type registry +func init() { + MessageSigningType = RegisterProtocolType(MessageSigning{}) + BasicSignatureType = RegisterProtocolType(BasicSignature{}) + ListBasicSignatureType = RegisterProtocolType(ListBasicSignature{}) +} + +// BasicSignatur is used to transmit our any kind of signature +// along with the public key ( used mostly for testing ) +type BasicSignature struct { + Pub abstract.Point + Chall abstract.Secret + Resp abstract.Secret +} + +// ListBasicSignature is a packet representing a list of basic signature +// It is self-decodable by implementing Unmarshal binary interface +type ListBasicSignature struct { + Length int + Sigs []BasicSignature +} + +// UnmarshalBinary is our custom decoding function +func (l *ListBasicSignature) UnmarshalBinary(buf []byte) error { + b := bytes.NewBuffer(buf) + // first decode size + var length int + err := Suite.Read(b, &length) + if length < 0 { + return errors.New("Received a length < 0 for ListBasicSignature msg") + } + l.Length = length + l.Sigs = make([]BasicSignature, length) + for i := range l.Sigs { + err = Suite.Read(b, &l.Sigs[i]) + if err != nil { + return err + } + } + return nil +} + +// MessageSigning is a simple packet to transmit a variable-length message +type MessageSigning struct { + Length int + Msg []byte +} + +// MarshalBinary encode MessageSigning it self. Shown mostly as an example +// as there is no need here to implement that since abstract.Encoding does +// it already +func (m *MessageSigning) MarshalBinary() ([]byte, error) { + var b bytes.Buffer + err := Suite.Write(&b, m.Length, m.Msg) + by := b.Bytes() + return by, err +} + +// UnmarshalBinary is needed to construct the slice containing the msg +// of the right length before decoding it from abstract.Encoding +func (m *MessageSigning) UnmarshalBinary(buf []byte) error { + b := bytes.NewBuffer(buf) + err := Suite.Read(b, &m.Length) + if err != nil { + return err + } + m.Msg = make([]byte, m.Length) + err = Suite.Read(b, m.Msg) + return err +} diff --git a/lib/network/packet_test.go b/lib/network/packet_test.go new file mode 100644 index 0000000000..35350adebd --- /dev/null +++ b/lib/network/packet_test.go @@ -0,0 +1,54 @@ +package network + +import ( + "bytes" + "github.com/dedis/cothority/lib/cliutils" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/config" + "github.com/dedis/crypto/edwards" + "reflect" + "testing" +) + +var s abstract.Suite = edwards.NewAES128SHA256Ed25519(false) +var key1 config.KeyPair = cliutils.KeyPair(s) +var key2 config.KeyPair = cliutils.KeyPair(s) + +func TestListBasicSignatureMarshaling(t *testing.T) { + Suite = s + bs := BasicSignature{ + Pub: key1.Public, + Chall: key1.Secret, + Resp: key1.Secret, + } + var length int = 10 + sigs := make([]BasicSignature, length) + for i := 0; i < length; i++ { + sigs[i] = bs + } + lbs := ListBasicSignature{ + Length: length, + Sigs: sigs, + } + var buf bytes.Buffer + err := s.Write(&buf, &lbs) + if err != nil { + t.Error("Marshaling BasicSiganture should not throw error") + } + bytesBuffer := buf.Bytes() + + bbs := &ListBasicSignature{} + err = bbs.UnmarshalBinary(bytesBuffer) + if err != nil { + t.Error("Unmarshaling BasicSignature should not throw an error") + } + + if bbs.Length != lbs.Length { + t.Error("Unmarshaling did not give the same ListBasicSIganture") + } + for i := 0; i < length; i++ { + if !reflect.DeepEqual(bbs.Sigs[i], lbs.Sigs[i]) { + t.Error("Unmarshaling did not give the same ListBasicSignature") + } + } +} diff --git a/lib/proof/proof.go b/lib/proof/proof.go index c0e5500df0..e9c1b4b5cb 100644 --- a/lib/proof/proof.go +++ b/lib/proof/proof.go @@ -5,9 +5,8 @@ import ( "crypto/subtle" "errors" "fmt" - log "github.com/Sirupsen/logrus" - "github.com/dedis/crypto/abstract" "github.com/dedis/cothority/lib/hashid" + "github.com/dedis/crypto/abstract" "hash" "strconv" ) @@ -73,11 +72,7 @@ func CheckProof(newHash HashFunc, root hashid.HashId, leaf hashid.HashId, proof // log.Println("Leaf", len(leaf), leaf) // log.Println("Proof", proof) // log.Println("\n") - if proof.Check(newHash, root, leaf) == false { - log.Errorln("FAILED TO CHECK") - panic("check failed at leaf") - } - return true + return proof.Check(newHash, root, leaf) } func CheckLocalProofs(newHash HashFunc, root hashid.HashId, leaves []hashid.HashId, proofs []Proof) bool { diff --git a/lib/proof/proof_test.go b/lib/proof/proof_test.go index 7aed917077..6b8a6d88d1 100644 --- a/lib/proof/proof_test.go +++ b/lib/proof/proof_test.go @@ -4,7 +4,7 @@ import ( "crypto/sha256" "testing" - "github.com/dedis/cothority/hashid" + "github.com/dedis/cothority/lib/hashid" ) func TestPath(t *testing.T) { diff --git a/lib/sign/cosistruct.go b/lib/sign/cosistruct.go new file mode 100644 index 0000000000..7f58a607cd --- /dev/null +++ b/lib/sign/cosistruct.go @@ -0,0 +1,318 @@ +package sign + +import ( + "bytes" + "encoding/gob" + "errors" + "github.com/dedis/cothority/lib/coconet" + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/cothority/lib/hashid" + "github.com/dedis/cothority/lib/proof" + "github.com/dedis/crypto/abstract" + "sort" +) + +/* +Functionality used in the roundcosi. Abstracted here for better +understanding and readability of roundcosi. +*/ + +const FIRST_ROUND int = 1 // start counting rounds at 1 + +type CosiStruct struct { + // Message created by root. It can be empty and it will make no difference. In + // the case of a timestamp service however we need the timestamp generated by + // the round for this round . It will be included in the challenge, and then + // can be verified by the client + Msg []byte + C abstract.Secret // round lasting challenge + R abstract.Secret // round lasting response + + Log SNLog // round lasting log structure + HashedLog []byte + + R_hat abstract.Secret // aggregate of responses + + X_hat abstract.Point // aggregate of public keys + + Commits []*SigningMessage + Responses []*SigningMessage + + // own big merkle subtree + MTRoot hashid.HashId // mt root for subtree, passed upwards + Leaves []hashid.HashId // leaves used to build the merkle subtre + LeavesFrom []string // child names for leaves + + // mtRoot before adding HashedLog + LocalMTRoot hashid.HashId + + // merkle tree roots of children in strict order + CMTRoots []hashid.HashId + CMTRootNames []string + Proofs map[string]proof.Proof + Proof []hashid.HashId + PubKey abstract.Point + PrivKey abstract.Secret + Name string + + // round-lasting public keys of children servers that did not + // respond to latest commit or respond phase, in subtree + ExceptionList []abstract.Point + // combined point commits of children servers in subtree + ChildV_hat map[string]abstract.Point + // combined public keys of children servers in subtree + ChildX_hat map[string]abstract.Point + // for internal verification purposes + ExceptionX_hat abstract.Point + ExceptionV_hat abstract.Point + + BackLink hashid.HashId + AccRound []byte + + Suite abstract.Suite + + Children map[string]coconet.Conn + Parent string + ViewNbr int +} + +// Sets up a round according to the needs stated in the +// Announcementmessage. +func NewCosi(sn *Node, viewNbr, roundNbr int, am *AnnouncementMessage) *CosiStruct { + // set up commit and response channels for the new round + cosi := &CosiStruct{} + cosi.Commits = make([]*SigningMessage, 0) + cosi.Responses = make([]*SigningMessage, 0) + cosi.ExceptionList = make([]abstract.Point, 0) + cosi.Suite = sn.suite + cosi.Log.Suite = sn.suite + cosi.Children = sn.Children(viewNbr) + cosi.Parent = sn.Parent(viewNbr) + cosi.ViewNbr = viewNbr + cosi.PubKey = sn.PubKey + cosi.PrivKey = sn.PrivKey + cosi.Name = sn.Name() + cosi.R_hat = sn.suite.Secret().Zero() + cosi.ExceptionV_hat = sn.suite.Point().Null() + cosi.ExceptionX_hat = sn.suite.Point().Null() + cosi.ExceptionList = make([]abstract.Point, 0) + cosi.InitCommitCrypto() + return cosi +} + +/* + * This is a module for the round-struct that does all the + * calculation for a merkle-hash-tree. + */ + +// Create round lasting secret and commit point v and V +// Initialize log structure for the round +func (cosi *CosiStruct) InitCommitCrypto() { + // generate secret and point commitment for this round + rand := cosi.Suite.Cipher([]byte(cosi.Name)) + cosi.Log = SNLog{} + cosi.Log.v = cosi.Suite.Secret().Pick(rand) + cosi.Log.V = cosi.Suite.Point().Mul(nil, cosi.Log.v) + // initialize product of point commitments + cosi.Log.V_hat = cosi.Suite.Point().Null() + cosi.Log.Suite = cosi.Suite + //cosi.Add(cosi.Log.V_hat, cosi.Log.V) + cosi.Log.V_hat.Add(cosi.Log.V_hat, cosi.Log.V) + + cosi.X_hat = cosi.Suite.Point().Null() + //cosi.Add(cosi.X_hat, cosi.PubKey) + cosi.X_hat.Add(cosi.X_hat, cosi.PubKey) +} + +// Adds a child-node to the Merkle-tree and updates the root-hashes +func (cosi *CosiStruct) MerkleAddChildren() { + // children commit roots + cosi.CMTRoots = make([]hashid.HashId, len(cosi.Leaves)) + copy(cosi.CMTRoots, cosi.Leaves) + cosi.CMTRootNames = make([]string, len(cosi.Leaves)) + copy(cosi.CMTRootNames, cosi.LeavesFrom) + + // concatenate children commit roots in one binary blob for easy marshalling + cosi.Log.CMTRoots = make([]byte, 0) + for _, leaf := range cosi.Leaves { + cosi.Log.CMTRoots = append(cosi.Log.CMTRoots, leaf...) + } +} + +// Adds the local Merkle-tree root, usually from a stamper or +// such +func (cosi *CosiStruct) MerkleAddLocal(localMTroot hashid.HashId) { + // add own local mtroot to leaves + cosi.LocalMTRoot = localMTroot + cosi.Leaves = append(cosi.Leaves, cosi.LocalMTRoot) +} + +// Hashes the log of the round-structure +func (cosi *CosiStruct) MerkleHashLog() error { + var err error + + h := cosi.Suite.Hash() + logBytes, err := cosi.Log.MarshalBinary() + if err != nil { + return err + } + h.Write(logBytes) + cosi.HashedLog = h.Sum(nil) + return err +} + +func (cosi *CosiStruct) ComputeCombinedMerkleRoot() { + // add hash of whole log to leaves + cosi.Leaves = append(cosi.Leaves, cosi.HashedLog) + + // compute MT root based on Log as right child and + // MT of leaves as left child and send it up to parent + sort.Sort(hashid.ByHashId(cosi.Leaves)) + left, proofs := proof.ProofTree(cosi.Suite.Hash, cosi.Leaves) + right := cosi.HashedLog + moreLeaves := make([]hashid.HashId, 0) + moreLeaves = append(moreLeaves, left, right) + cosi.MTRoot, _ = proof.ProofTree(cosi.Suite.Hash, moreLeaves) + + // Hashed Log has to come first in the proof; len(sn.CMTRoots)+1 proofs + cosi.Proofs = make(map[string]proof.Proof, 0) + for name := range cosi.Children { + cosi.Proofs[name] = append(cosi.Proofs[name], right) + } + cosi.Proofs["local"] = append(cosi.Proofs["local"], right) + + // separate proofs by children (need to send personalized proofs to children) + // also separate local proof (need to send it to timestamp server) + cosi.SeparateProofs(proofs, cosi.Leaves) +} + +// Identify which proof corresponds to which leaf +// Needed given that the leaves are sorted before passed to the function that create +// the Merkle Tree and its Proofs +func (cosi *CosiStruct) SeparateProofs(proofs []proof.Proof, leaves []hashid.HashId) { + // separate proofs for children servers mt roots + for i := 0; i < len(cosi.CMTRoots); i++ { + name := cosi.CMTRootNames[i] + for j := 0; j < len(leaves); j++ { + if bytes.Compare(cosi.CMTRoots[i], leaves[j]) == 0 { + // sn.Proofs[i] = append(sn.Proofs[i], proofs[j]...) + cosi.Proofs[name] = append(cosi.Proofs[name], proofs[j]...) + continue + } + } + } + + // separate proof for local mt root + for j := 0; j < len(leaves); j++ { + if bytes.Compare(cosi.LocalMTRoot, leaves[j]) == 0 { + cosi.Proofs["local"] = append(cosi.Proofs["local"], proofs[j]...) + } + } +} + +func (cosi *CosiStruct) InitResponseCrypto() { + cosi.R = cosi.Suite.Secret() + cosi.R.Mul(cosi.PrivKey, cosi.C).Sub(cosi.Log.v, cosi.R) + // initialize sum of children's responses + cosi.R_hat = cosi.R +} + +// Create Merkle Proof for local client (timestamp server) and +// store it in Node so that we can send it to the clients during +// the SignatureBroadcast +func (cosi *CosiStruct) StoreLocalMerkleProof(chm *ChallengeMessage) error { + proofForClient := make(proof.Proof, len(chm.Proof)) + copy(proofForClient, chm.Proof) + + // To the proof from our root to big root we must add the separated proof + // from the localMKT of the client (timestamp server) to our root + proofForClient = append(proofForClient, cosi.Proofs["local"]...) + + // if want to verify partial and full proofs + if dbg.DebugVisible > 2 { + //round.sn.VerifyAllProofs(view, chm, proofForClient) + } + cosi.Proof = proofForClient + cosi.MTRoot = chm.MTRoot + return nil +} + +// Called by every node after receiving aggregate responses from descendants +func (cosi *CosiStruct) VerifyResponses() error { + + // Check that: base**r_hat * X_hat**c == V_hat + // Equivalent to base**(r+xc) == base**(v) == T in vanillaElGamal + Aux := cosi.Suite.Point() + V_clean := cosi.Suite.Point() + V_clean.Add(V_clean.Mul(nil, cosi.R_hat), Aux.Mul(cosi.X_hat, cosi.C)) + // T is the recreated V_hat + T := cosi.Suite.Point().Null() + T.Add(T, V_clean) + T.Add(T, cosi.ExceptionV_hat) + + var c2 abstract.Secret + isroot := cosi.Parent == "" + if isroot { + // round challenge must be recomputed given potential + // exception list + msg := cosi.Msg + msg = append(msg, []byte(cosi.MTRoot)...) + cosi.C = cosi.HashElGamal(msg, cosi.Log.V_hat) + c2 = cosi.HashElGamal(msg, T) + } + + // intermediary nodes check partial responses aginst their partial keys + // the root node is also able to check against the challenge it emitted + if !T.Equal(cosi.Log.V_hat) || (isroot && !cosi.C.Equal(c2)) { + return errors.New("Verifying ElGamal Collective Signature failed in " + + cosi.Name) + } else if isroot { + dbg.Lvl4(cosi.Name, "reports ElGamal Collective Signature succeeded") + } + return nil +} + +// Returns a secret that depends on on a message and a point +func (cosi *CosiStruct) HashElGamal(message []byte, p abstract.Point) abstract.Secret { + pb, _ := p.MarshalBinary() + c := cosi.Suite.Cipher(pb) + c.Message(nil, nil, message) + return cosi.Suite.Secret().Pick(c) +} + +// Signing Node Log for a round +// For Marshaling and Unmarshaling to work smoothly +// crypto fields must appear first in the structure +type SNLog struct { + v abstract.Secret // round lasting secret + V abstract.Point // round lasting commitment point + V_hat abstract.Point // aggregate of commit points + + // merkle tree roots of children in strict order + CMTRoots hashid.HashId // concatenated hash ids of children + Suite abstract.Suite +} + +func (snLog SNLog) MarshalBinary() ([]byte, error) { + // abstract.Write used to encode/ marshal crypto types + b := bytes.Buffer{} + snLog.Suite.Write(&b, &snLog.v, &snLog.V, &snLog.V_hat) + ////// gob is used to encode non-crypto types + enc := gob.NewEncoder(&b) + err := enc.Encode(snLog.CMTRoots) + return b.Bytes(), err +} + +func (snLog *SNLog) UnmarshalBinary(data []byte) error { + // abstract.Read used to decode/ unmarshal crypto types + b := bytes.NewBuffer(data) + err := snLog.Suite.Read(b, &snLog.v, &snLog.V, &snLog.V_hat) + // gob is used to decode non-crypto types + rem, _ := snLog.MarshalBinary() + snLog.CMTRoots = data[len(rem):] + return err +} + +func (snLog *SNLog) Getv() abstract.Secret { + return snLog.v +} diff --git a/proto/sign/signingMessages.go b/lib/sign/messagessign.go similarity index 52% rename from proto/sign/signingMessages.go rename to lib/sign/messagessign.go index c8f1dd6bf8..bafc47f4c6 100644 --- a/proto/sign/signingMessages.go +++ b/lib/sign/messagessign.go @@ -3,18 +3,21 @@ package sign import ( "reflect" - "github.com/dedis/crypto/abstract" - "github.com/dedis/crypto/edwards" - //"github.com/dedis/crypto/nist" - "github.com/dedis/protobuf" + "encoding/json" + "github.com/dedis/cothority/lib/dbg" "github.com/dedis/cothority/lib/hashid" "github.com/dedis/cothority/lib/proof" + "github.com/dedis/crypto/abstract" + "github.com/dedis/crypto/suites" + "github.com/dedis/protobuf" ) -// All message structures defined in this package are used in the -// Collective Signing Protocol -// Over the network they are sent as byte slices, so each message -// has its own MarshlBinary and UnmarshalBinary method +/* +All message structures defined in this package are used in the +Collective Signing Protocol +Over the network they are sent as byte slices, so each message +has its own MarshalBinary and UnmarshalBinary method +*/ type MessageType int @@ -24,6 +27,8 @@ const ( Commitment Challenge Response + SignatureBroadcast + StatusReturn CatchUpReq CatchUpResp GroupChange @@ -46,6 +51,10 @@ func (m MessageType) String() string { return "Challenge" case Response: return "Response" + case SignatureBroadcast: + return "SignatureBroadcast" + case StatusReturn: + return "StatusReturn" case CatchUpReq: return "CatchUpRequest" case CatchUpResp: @@ -69,55 +78,101 @@ func (m MessageType) String() string { // Signing Messages are used for all communications between servers // It is important for encoding/ decoding for type to be kept as first field type SigningMessage struct { + Suite string Type MessageType Am *AnnouncementMessage Com *CommitmentMessage Chm *ChallengeMessage Rm *ResponseMessage + SBm *SignatureBroadcastMessage + SRm *StatusReturnMessage Cureq *CatchUpRequest Curesp *CatchUpResponse Vrm *VoteRequestMessage Gcm *GroupChangedMessage Err *ErrorMessage From string - View int + To string + ViewNbr int LastSeenVote int // highest vote ever seen and commited in log, used for catch-up + RoundNbr int } -var msgSuite abstract.Suite = edwards.NewAES128SHA256Ed25519(true) - -//var msgSuite abstract.Suite = nist.NewAES128SHA256P256() +// Helper functions that will return the suite used during the process from a string name +func GetSuite(suite string) abstract.Suite { + s, ok := suites.All()[suite] + if !ok { + dbg.Lvl1("Suites available:", suites.All()) + dbg.Fatal("Didn't find suite", suite) + } + return s +} func NewSigningMessage() interface{} { return &SigningMessage{} } func (sm *SigningMessage) MarshalBinary() ([]byte, error) { - return protobuf.Encode(sm) + b, e := protobuf.Encode(sm) + if len(b) != 0 { + //dbg.Print("Length of bytes is", len(b), "for", sm) + //debug.PrintStack() + } + return b, e } func (sm *SigningMessage) UnmarshalBinary(data []byte) error { + dbg.Fatal("Shouldn't be called") + return nil +} + +func (sm *SigningMessage) UnmarshalBinarySuite(jdata *JSONdata) error { + suite := GetSuite(jdata.Suite) var cons = make(protobuf.Constructors) var point abstract.Point var secret abstract.Secret - cons[reflect.TypeOf(&point).Elem()] = func() interface{} { return msgSuite.Point() } - cons[reflect.TypeOf(&secret).Elem()] = func() interface{} { return msgSuite.Secret() } - return protobuf.DecodeWithConstructors(data, sm, cons) + cons[reflect.TypeOf(&point).Elem()] = func() interface{} { return suite.Point() } + cons[reflect.TypeOf(&secret).Elem()] = func() interface{} { return suite.Secret() } + return protobuf.DecodeWithConstructors(jdata.Data, sm, cons) +} + +type JSONdata struct { + Suite string + Data []byte +} + +func (sm *SigningMessage) MarshalJSON() ([]byte, error) { + data, err := sm.MarshalBinary() + if err != nil { + return nil, err + } + return json.Marshal(JSONdata{ + Suite: sm.Suite, + Data: data, + }) +} + +func (sm *SigningMessage) UnmarshalJSON(dataJSON []byte) error { + jdata := &JSONdata{} + json.Unmarshal(dataJSON, jdata) + return sm.UnmarshalBinarySuite(jdata) } // Broadcasted message initiated and signed by proposer type AnnouncementMessage struct { - LogTest []byte // TODO: change LogTest to Messg - Round int - + Message []byte + RoundType string // what kind of round this announcement is made for // VoteRequest *VoteRequest Vote *Vote // Vote Request (propose) } +// Commitment of all nodes together with the data they want +// to have signed type CommitmentMessage struct { - V abstract.Point // commitment Point - V_hat abstract.Point // product of subtree participating nodes' commitment points - X_hat abstract.Point // product of subtree participating nodes' public keys + Message []byte + V abstract.Point // commitment Point + V_hat abstract.Point // product of subtree participating nodes' commitment points + X_hat abstract.Point // product of subtree participating nodes' public keys MTRoot hashid.HashId // root of Merkle (sub)Tree @@ -128,11 +183,13 @@ type CommitmentMessage struct { // CountedVotes *CountedVotes // CountedVotes contains a subtree's votes Vote *Vote // Vote Response (promise) - Round int + Messages int // Actual number of messages signed } +// The challenge calculated by the root-node type ChallengeMessage struct { - C abstract.Secret // challenge + Message []byte + C abstract.Secret // challenge // Depth byte MTRoot hashid.HashId // the very root of the big Merkle Tree @@ -141,11 +198,13 @@ type ChallengeMessage struct { // CountedVotes *CountedVotes // CountedVotes contains the whole tree's votes Vote *Vote // Vote Confirmerd/ Rejected (accept) - Round int } +// Every node replies with eventual exceptions if they +// are not OK type ResponseMessage struct { - R_hat abstract.Secret // response + Message []byte + R_hat abstract.Secret // response // public keys of children servers that did not respond to // challenge from root @@ -157,17 +216,45 @@ type ResponseMessage struct { Vote *Vote // Vote Ack/Nack in thr log (ack/nack) - Round int } +// 5th message going from root to leaves to send the +// signature +type SignatureBroadcastMessage struct { + // Aggregate response of root + R0_hat abstract.Secret + // Challenge + C abstract.Secret + // Aggregate public key + X0_hat abstract.Point + // Aggregate public commitment + V0_hat abstract.Point + + // Number of messages signed + Messages int +} + +// StatusReturnMessage carries the last status after the +// SignatureBroadcastMessage has been sent to everybody. +// Every node should just add up the stats from its children. +type StatusReturnMessage struct { + // How many nodes sent a 'respond' message + Responders int + // How many peers contacted for a challenge + Peers int +} + +// In case of an error, this message is sent type ErrorMessage struct { Err string } +// For request of a vote on tree-structure change type VoteRequestMessage struct { Vote *Vote } +// Whenever the group changed type GroupChangedMessage struct { V *Vote // if vote not accepted rest of fields are nil diff --git a/proto/sign/voteMessages.go b/lib/sign/messagesvote.go similarity index 96% rename from proto/sign/voteMessages.go rename to lib/sign/messagesvote.go index 16d185eff6..978fea9db5 100644 --- a/proto/sign/voteMessages.go +++ b/lib/sign/messagesvote.go @@ -67,6 +67,12 @@ type VoteResponse struct { Sig BasicSig } +// A basic, verifiable signature +type BasicSig struct { + C abstract.Secret // challenge + R abstract.Secret // response +} + // for sorting arrays of VoteResponse type ByVoteResponse []*VoteResponse diff --git a/lib/sign/nodeconsensus.go b/lib/sign/nodeconsensus.go new file mode 100644 index 0000000000..0ad57c7324 --- /dev/null +++ b/lib/sign/nodeconsensus.go @@ -0,0 +1,88 @@ +package sign + +import ( + "errors" + "github.com/dedis/cothority/lib/dbg" +) + +/* +NOT WORKING - consensus code for voting - should +be implemented as a roundType +*/ + +func (sn *Node) TryViewChange(view int) error { + dbg.Lvl4(sn.Name(), "TRY VIEW CHANGE on", view, "with last view", sn.ViewNo) + // should ideally be compare and swap + sn.viewmu.Lock() + if view <= sn.ViewNo { + sn.viewmu.Unlock() + return errors.New("trying to view change on previous/ current view") + } + if sn.ChangingView { + sn.viewmu.Unlock() + return ChangingViewError + } + sn.ChangingView = true + sn.viewmu.Unlock() + + // take action if new view root + if sn.Name() == sn.RootFor(view) { + dbg.Fatal(sn.Name(), "Initiating view change for view:", view, "BTH") + /* + go func() { + err := sn.StartVotingRound( + &Vote{ + View: view, + Type: ViewChangeVT, + Vcv: &ViewChangeVote{ + View: view, + Root: sn.Name()}}) + if err != nil { + dbg.Lvl2(sn.Name(), "Try view change failed:", err) + } + }() + */ + } + return nil +} + +func (sn *Node) TimeForViewChange() bool { + if sn.RoundsPerView == 0 { + // No view change asked + return false + } + sn.roundmu.Lock() + defer sn.roundmu.Unlock() + + // if this round is last one for this view + if sn.LastSeenRound%sn.RoundsPerView == 0 { + // dbg.Lvl4(sn.Name(), "TIME FOR VIEWCHANGE:", lsr, rpv) + return true + } + return false +} + +func (sn *Node) SetupProposal(view int, am *AnnouncementMessage, from string) error { + dbg.Fatal("SetupProposal not implemented anymore") + return nil +} + +func (sn *Node) Propose(view int, RoundNbr int, am *AnnouncementMessage, from string) error { + dbg.Fatal("Propose not implemented anymore") + return nil +} + +func (sn *Node) Promise(view, Round int, sm *SigningMessage) error { + dbg.Fatal("Promise not implemented anymore") + return nil +} + +func (sn *Node) Accept(view, RoundNbr int, chm *ChallengeMessage) error { + dbg.Fatal("Accept not implemented anymore") + return nil +} + +func (sn *Node) Accepted(view, Round int, sm *SigningMessage) error { + dbg.Fatal("Accepted not implemented anymore") + return nil +} diff --git a/proto/sign/signingNode.go b/lib/sign/nodehelper.go similarity index 61% rename from proto/sign/signingNode.go rename to lib/sign/nodehelper.go index 8031534188..7aed88ec6b 100644 --- a/proto/sign/signingNode.go +++ b/lib/sign/nodehelper.go @@ -9,22 +9,29 @@ import ( "math/rand" "strconv" "sync" - "sync/atomic" "time" "golang.org/x/net/context" log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" + "github.com/dedis/cothority/lib/dbg" - "github.com/dedis/crypto/abstract" "github.com/dedis/cothority/lib/coconet" "github.com/dedis/cothority/lib/hashid" "github.com/dedis/cothority/lib/logutils" + "github.com/dedis/cothority/lib/proof" + "github.com/dedis/crypto/abstract" + "sync/atomic" ) +/* +This implements the helper Node-methods +*/ + type Type int // used by other modules as coll_sign.Type +var ChangingViewError error = errors.New("In the process of changing view") + const ( // Default Signature involves creating Merkle Trees MerkleTree = iota @@ -35,8 +42,6 @@ const ( Voter ) -var _ Signer = &Node{} - type Node struct { coconet.Host @@ -58,25 +63,26 @@ type Node struct { PrivKey abstract.Secret // long lasting private key nRounds int - Rounds map[int]*Round - Round int // *only* used by Root( by annoucer) - RoundTypes []RoundType + Rounds map[int]Round roundmu sync.Mutex LastSeenRound int // largest round number I have seen RoundsAsRoot int // latest continuous streak of rounds with sn root - AnnounceLock sync.Mutex + // Little hack for the moment where we keep the number of responses + + // commits for each round so we know when to pass down the messages to the + // round interfaces.(it was the role of the RoundMerkle before) + RoundCommits map[int][]*SigningMessage + RoundResponses map[int][]*SigningMessage - CommitFunc CommitFunc - DoneFunc DoneFunc + AnnounceLock sync.Mutex // NOTE: reuse of channels via round-number % Max-Rounds-In-Mermory can be used roundLock sync.RWMutex - LogTest []byte // for testing purposes + Message []byte // for testing purposes peerKeys map[string]abstract.Point // map of all peer public keys closed chan error // error sent when connection closed - isclosed bool + Isclosed bool done chan int // round number sent when round done commitsDone chan int // round number sent when announce/commit phase done @@ -102,6 +108,18 @@ type Node struct { LastAppliedVote int64 // last vote we have committed to our log Actions map[int][]*Vote + + // These are stored during the challenge phase so that they can + // be sent to the client during the SignatureBroadcast + Proof proof.Proof + MTRoot hashid.HashId // the very root of the big Merkle Tree + Messages int // Number of messages to be signed received + MessagesInRun int // Total number of messages since start of run + + PeerStatus StatusReturnMessage // Actual status of children peers + PeerStatusRcvd int // How many peers sent status + + MaxWait time.Duration // How long the announcement phase can take } // Start listening for messages coming from parent(up) @@ -109,29 +127,10 @@ func (sn *Node) Listen() error { if sn.Pool() == nil { sn.GenSetPool() } - err := sn.get() + err := sn.ProcessMessages() return err } -// func (sn *Node) CheckRoundTypes(rts []RoundType) error { -// if len(rts) != len(sn.RoundTypes) -// for i := range sn.RoundTypes { -// -// -// } -// } -// -func (sn *Node) printRoundTypes() { - sn.roundmu.Lock() - defer sn.roundmu.Unlock() - for i, rt := range sn.RoundTypes { - if i > sn.LastSeenRound { - break - } - log.Println("Round", i, "type", rt.String()) - } -} - func (sn *Node) Close() { // sn.printRoundTypes() sn.hbLock.Lock() @@ -140,12 +139,13 @@ func (sn *Node) Close() { sn.heartbeat = nil dbg.Lvl4("after close", sn.Name(), "has heartbeat=", sn.heartbeat) } - if !sn.isclosed { + if !sn.Isclosed { close(sn.closed) dbg.Lvl4("signing node: closing:", sn.Name()) sn.Host.Close() } - sn.isclosed = true + dbg.Lvl3("Closed connection") + sn.Isclosed = true sn.hbLock.Unlock() } @@ -160,7 +160,7 @@ func (sn *Node) Hostlist() []string { // Returns name of node who should be the root for the next view // round robin is used on the array of host names to determine the next root func (sn *Node) RootFor(view int) string { - // log.Println(sn.Name(), "ROOT FOR", view) + dbg.Lvl2(sn.Name(), "Root for view", view) var hl []string if view == 0 { hl = sn.HostListOn(view) @@ -176,14 +176,6 @@ func (sn *Node) SetFailureRate(v int) { sn.FailureRate = v } -func (sn *Node) RegisterAnnounceFunc(cf CommitFunc) { - sn.CommitFunc = cf -} - -func (sn *Node) RegisterDoneFunc(df DoneFunc) { - sn.DoneFunc = df -} - func (sn *Node) logFirstPhase(firstRoundTime time.Duration) { log.WithFields(log.Fields{ "file": logutils.File(), @@ -211,33 +203,44 @@ func (sn *Node) logTotalTime(totalTime time.Duration) { }).Info("done with root challenge round " + strconv.Itoa(sn.nRounds)) } -var MAX_WILLING_TO_WAIT time.Duration = 50 * time.Second +func (sn *Node) StartAnnouncementWithWait(round Round, wait time.Duration) error { + sn.AnnounceLock.Lock() + sn.nRounds = sn.LastSeenRound + + // report view is being change, and sleep before retrying + sn.viewmu.Lock() + if sn.ChangingView { + dbg.Lvl1(sn.Name(), "start signing round: changingViewError") + sn.viewmu.Unlock() + return ChangingViewError + } + sn.viewmu.Unlock() -var ChangingViewError error = errors.New("In the process of changing view") + sn.nRounds++ + sn.Rounds[sn.nRounds] = round -func (sn *Node) StartAnnouncement(am *AnnouncementMessage) error { - sn.AnnounceLock.Lock() defer sn.AnnounceLock.Unlock() - dbg.Lvl1("root", sn.Name(), "starting announcement round for round: ", sn.nRounds, "on view", sn.ViewNo) + dbg.Lvl2("root", sn.Name(), "starting announcement round for round:", sn.nRounds, "on view", sn.ViewNo) - first := time.Now() - total := time.Now() - var firstRoundTime time.Duration - var totalTime time.Duration - - ctx, cancel := context.WithTimeout(context.Background(), MAX_WILLING_TO_WAIT) + ctx, cancel := context.WithTimeout(context.Background(), wait) var cancelederr error go func() { var err error - if am.Vote != nil { - err = sn.Propose(am.Vote.View, am, "") - } else { - err = sn.Announce(sn.ViewNo, am) - } + // Launch the announcement process + err = sn.Announce(&SigningMessage{ + Suite: sn.Suite().String(), + Type: Announcement, + RoundNbr: sn.nRounds, + ViewNbr: sn.ViewNo, + Am: &AnnouncementMessage{ + RoundType: round.GetType(), + Message: make([]byte, 0), + }, + }) if err != nil { - log.Errorln(err) + dbg.Lvl1(err) cancelederr = err cancel() } @@ -247,13 +250,13 @@ func (sn *Node) StartAnnouncement(am *AnnouncementMessage) error { select { case _ = <-sn.commitsDone: // log time it took for first round to complete - firstRoundTime = time.Since(first) - sn.logFirstPhase(firstRoundTime) + //firstRoundTime = time.Since(first) + //sn.logFirstPhase(firstRoundTime) break case <-sn.closed: return errors.New("closed") case <-ctx.Done(): - log.Errorln(ctx.Err()) + dbg.Lvl1(ctx.Err()) if ctx.Err() == context.Canceled { return cancelederr } @@ -264,14 +267,14 @@ func (sn *Node) StartAnnouncement(am *AnnouncementMessage) error { select { case _ = <-sn.done: // log time it took for second round to complete - totalTime = time.Since(total) - sn.logSecondPhase(totalTime - firstRoundTime) - sn.logTotalTime(totalTime) + //totalTime = time.Since(total) + //sn.logSecondPhase(totalTime - firstRoundTime) + //sn.logTotalTime(totalTime) return nil case <-sn.closed: return errors.New("closed") case <-ctx.Done(): - log.Errorln(ctx.Err()) + dbg.Lvl2("Timeout:", ctx.Err()) if ctx.Err() == context.Canceled { return cancelederr } @@ -279,69 +282,24 @@ func (sn *Node) StartAnnouncement(am *AnnouncementMessage) error { } } -func (sn *Node) StartVotingRound(v *Vote) error { - log.Println(sn.Name(), "start voting round") - sn.nRounds = sn.LastSeenRound - - // during view changes, only accept view change related votes - if sn.ChangingView && v.Vcv == nil { - log.Println(sn.Name(), "start signing round: changingViewError") - return ChangingViewError - } - - sn.nRounds++ - v.Round = sn.nRounds - v.Index = int(atomic.LoadInt64(&sn.LastSeenVote)) + 1 - v.Count = &Count{} - v.Confirmed = false - // only default fill-in view numbers when not prefilled - if v.View == 0 { - v.View = sn.ViewNo - } - if v.Av != nil && v.Av.View == 0 { - v.Av.View = sn.ViewNo + 1 - } - if v.Rv != nil && v.Rv.View == 0 { - v.Rv.View = sn.ViewNo + 1 - } - if v.Vcv != nil && v.Vcv.View == 0 { - v.Vcv.View = sn.ViewNo + 1 - } - return sn.StartAnnouncement( - &AnnouncementMessage{LogTest: []byte("vote round"), Round: sn.nRounds, Vote: v}) -} - -func (sn *Node) StartSigningRound() error { - sn.nRounds = sn.LastSeenRound - - // report view is being change, and sleep before retrying - sn.viewmu.Lock() - if sn.ChangingView { - log.Println(sn.Name(), "start signing round: changingViewError") - sn.viewmu.Unlock() - return ChangingViewError - } - sn.viewmu.Unlock() - - sn.nRounds++ - return sn.StartAnnouncement( - &AnnouncementMessage{LogTest: []byte("sign round"), Round: sn.nRounds}) +func (sn *Node) StartAnnouncement(round Round) error { + return sn.StartAnnouncementWithWait(round, sn.MaxWait) } func NewNode(hn coconet.Host, suite abstract.Suite, random cipher.Stream) *Node { sn := &Node{Host: hn, suite: suite} - msgSuite = suite sn.PrivKey = suite.Secret().Pick(random) sn.PubKey = suite.Point().Mul(nil, sn.PrivKey) sn.peerKeys = make(map[string]abstract.Point) - sn.Rounds = make(map[int]*Round) sn.closed = make(chan error, 20) sn.done = make(chan int, 10) sn.commitsDone = make(chan int, 10) sn.viewChangeCh = make(chan string, 0) + sn.RoundCommits = make(map[int][]*SigningMessage) + sn.RoundResponses = make(map[int][]*SigningMessage) sn.FailureRate = 0 h := fnv.New32a() h.Write([]byte(hn.Name())) @@ -350,7 +308,9 @@ func NewNode(hn coconet.Host, suite abstract.Suite, random cipher.Stream) *Node sn.Host.SetSuite(suite) sn.VoteLog = NewVoteLog() sn.Actions = make(map[int][]*Vote) - sn.RoundsPerView = 100 + sn.RoundsPerView = 0 + sn.Rounds = make(map[int]Round) + sn.MaxWait = 50 * time.Second return sn } @@ -360,13 +320,15 @@ func NewKeyedNode(hn coconet.Host, suite abstract.Suite, PrivKey abstract.Secret sn.PubKey = suite.Point().Mul(nil, sn.PrivKey) sn.peerKeys = make(map[string]abstract.Point) - sn.Rounds = make(map[int]*Round) sn.closed = make(chan error, 20) sn.done = make(chan int, 10) sn.commitsDone = make(chan int, 10) sn.viewChangeCh = make(chan string, 0) + sn.RoundCommits = make(map[int][]*SigningMessage) + sn.RoundResponses = make(map[int][]*SigningMessage) + sn.FailureRate = 0 h := fnv.New32a() h.Write([]byte(hn.Name())) @@ -375,7 +337,9 @@ func NewKeyedNode(hn coconet.Host, suite abstract.Suite, PrivKey abstract.Secret sn.Host.SetSuite(suite) sn.VoteLog = NewVoteLog() sn.Actions = make(map[int][]*Vote) - sn.RoundsPerView = 100 + sn.RoundsPerView = 0 + sn.Rounds = make(map[int]Round) + sn.MaxWait = 50 * time.Second return sn } @@ -384,13 +348,13 @@ func (sn *Node) ShouldIFail(phase string) bool { // If we were manually set to always fail if sn.Host.(*coconet.FaultyHost).IsDead() || sn.Host.(*coconet.FaultyHost).IsDeadFor(phase) { - // log.Println(sn.Name(), "dead for "+phase) + dbg.Lvl2(sn.Name(), "dead for "+phase) return true } // If we were only given a probability of failing if p := sn.Rand.Int() % 100; p < sn.FailureRate { - // log.Println(sn.Name(), "died for "+phase, "p", p, "with prob ", sn.FailureRate) + dbg.Lvl2(sn.Name(), "died for "+phase, "p", p, "with prob", sn.FailureRate) return true } @@ -419,77 +383,16 @@ func (sn *Node) LastRound() int { return lsr } -func (sn *Node) SetLastSeenRound(round int) { - sn.LastSeenRound = round +func (sn *Node) SetLastSeenRound(roundNbr int) { + sn.LastSeenRound = roundNbr } -func (sn *Node) CommitedFor(round *Round) bool { - sn.roundLock.RLock() - defer sn.roundLock.RUnlock() - - if round.Log.v != nil { - return true - } - return false -} - -// Cast on vote for Vote -func (sn *Node) AddVotes(Round int, v *Vote) { - if v == nil { - return - } - - round := sn.Rounds[Round] - cv := round.Vote.Count - vresp := &VoteResponse{Name: sn.Name()} - - // accept what admin requested with x% probability - // TODO: replace with non-probabilistic approach, maybe callback - forProbability := 100 - sn.randmu.Lock() - if p := sn.Rand.Int() % 100; p < forProbability { - cv.For += 1 - vresp.Accepted = true - } else { - cv.Against += 1 - } - sn.randmu.Unlock() - - // log.Infoln(sn.Name(), "added votes. for:", cv.For, "against:", cv.Against) - - // Generate signature on Vote with OwnVote *counted* in - b, err := v.MarshalBinary() - if err != nil { - log.Fatal("Marshal Binary on Counted Votes failed") - } - rand := sn.suite.Cipher([]byte(sn.Name() + strconv.Itoa(Round))) - vresp.Sig = ElGamalSign(sn.suite, rand, b, sn.PrivKey) - - // Add VoteResponse to Votes - v.Count.Responses = append(v.Count.Responses, vresp) - round.Vote = v -} - -func intToByteSlice(Round int) []byte { +func intToByteSlice(roundNbr int) []byte { buf := new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, Round) + binary.Write(buf, binary.LittleEndian, roundNbr) return buf.Bytes() } -// *only* called by root node -func (sn *Node) SetAccountableRound(Round int) { - // Create my back link to previous round - sn.SetBackLink(Round) - - h := sn.suite.Hash() - h.Write(intToByteSlice(Round)) - h.Write(sn.Rounds[Round].BackLink) - sn.Rounds[Round].AccRound = h.Sum(nil) - - // here I could concatenate sn.Round after the hash for easy keeping track of round - // todo: check this -} - func (sn *Node) UpdateTimeout(t ...time.Duration) { if len(t) > 0 { sn.SetTimeout(t[0]) @@ -499,23 +402,6 @@ func (sn *Node) UpdateTimeout(t ...time.Duration) { } } -func (sn *Node) SetBackLink(Round int) { - prevRound := Round - 1 - sn.Rounds[Round].BackLink = hashid.HashId(make([]byte, hashid.Size)) - if prevRound >= FIRST_ROUND { - // My Backlink = Hash(prevRound, sn.Rounds[prevRound].BackLink, sn.Rounds[prevRound].MTRoot) - h := sn.suite.Hash() - if sn.Rounds[prevRound] == nil { - log.Errorln(sn.Name(), "not setting back link") - return - } - h.Write(intToByteSlice(prevRound)) - h.Write(sn.Rounds[prevRound].BackLink) - h.Write(sn.Rounds[prevRound].MTRoot) - sn.Rounds[Round].BackLink = h.Sum(nil) - } -} - func (sn *Node) GenSetPool() { var p sync.Pool p.New = NewSigningMessage @@ -538,3 +424,50 @@ func (sn *Node) Timeout() time.Duration { func (sn *Node) DefaultTimeout() time.Duration { return 5000 * time.Millisecond } + +func (sn *Node) CloseAll(view int) error { + dbg.Lvl2(sn.Name(), "received CloseAll on", view) + + // At the leaves + if len(sn.Children(view)) == 0 { + dbg.Lvl3(sn.Name(), "in CloseAll is root leaf") + } else { + dbg.Lvl3(sn.Name(), "in CloseAll is calling", len(sn.Children(view)), "children") + + // Inform all children of announcement + messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) + for i := range messgs { + sm := SigningMessage{ + Suite: sn.Suite().String(), + Type: CloseAll, + ViewNbr: view, + LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), + } + messgs[i] = &sm + } + ctx := context.TODO() + if err := sn.PutDown(ctx, view, messgs); err != nil { + return err + } + } + dbg.Lvl3("Closing down shop", sn.Isclosed) + sn.Close() + return nil +} + +func (sn *Node) PutUpError(view int, err error) { + // dbg.Lvl4(sn.Name(), "put up response with err", err) + // ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) + ctx := context.TODO() + sn.PutUp(ctx, view, &SigningMessage{ + Suite: sn.Suite().String(), + Type: Error, + ViewNbr: view, + LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), + Err: &ErrorMessage{Err: err.Error()}}) +} + +// Getting actual View +func (sn *Node) GetView() int { + return sn.ViewNo +} diff --git a/lib/sign/nodeprotocol.go b/lib/sign/nodeprotocol.go new file mode 100644 index 0000000000..0706325cb4 --- /dev/null +++ b/lib/sign/nodeprotocol.go @@ -0,0 +1,634 @@ +package sign + +import ( + "errors" + "fmt" + "io" + "sync/atomic" + + log "github.com/Sirupsen/logrus" + "github.com/dedis/cothority/lib/coconet" + "github.com/dedis/cothority/lib/dbg" + "golang.org/x/net/context" + "strings" + "syscall" +) + +/* +This implements the part of the Node-structure that has to +do with the protocol itself: Announce, Commit, Chalenge and +Response. Two additional steps are done: SignatureBroadcast +to send the final commit to all nodes, and StatusReturn which +allows for collection of statistics. +*/ + +// Collective Signing via ElGamal +// 1. Announcement +// 2. Commitment +// 3. Challenge +// 4. Response + +// Get multiplexes all messages from TCPHost using application logic +func (sn *Node) ProcessMessages() error { + dbg.Lvl4(sn.Name(), "getting") + defer dbg.Lvl4(sn.Name(), "done getting") + + sn.UpdateTimeout() + dbg.Lvl4("Going to get", sn.Name()) + msgchan := sn.Host.GetNetworkMessg() + // heartbeat for intiating viewChanges, allows intial 500s setup time + /* sn.hbLock.Lock() + sn.heartbeat = time.NewTimer(500 * time.Second) + sn.hbLock.Unlock() */ + + // gossip to make sure we are up to date + sn.StartGossip() + errReset := syscall.ECONNRESET.Error() + for { + select { + case <-sn.closed: + dbg.Lvl3("Received closed-message through channel") + sn.StopHeartbeat() + return nil + default: + dbg.Lvl4(sn.Name(), "waiting for message") + nm, ok := <-msgchan + err := nm.Err + errStr := "" + if err != nil { + errStr = err.Error() + } + + // One of the errors doesn't have an error-number applied, so we need + // to check for the string - will probably be fixed in go 1.6 + if !ok || err == coconet.ErrClosed || err == io.EOF || + err == io.ErrClosedPipe { + dbg.Lvl3(sn.Name(), "getting from closed host") + sn.Close() + return coconet.ErrClosed + } + + // if it is a non-fatal error try again + if err != nil { + if strings.Contains(errStr, errReset) { + dbg.Lvl2(sn.Name(), "connection reset error") + return coconet.ErrClosed + } + dbg.Lvl1(sn.Name(), "error getting message (still continuing)", err) + continue + } + + // interpret network message as Signing Message + sm := nm.Data.(*SigningMessage) + sm.From = nm.From + dbg.Lvlf4("Message on %s is type %s and %+v", sn.Name(), sm.Type, sm) + + switch sm.Type { + // if it is a bad message just ignore it + default: + continue + case Announcement: + dbg.Lvl3(sn.Name(), "got announcement") + sn.ReceivedHeartbeat(sm.ViewNbr) + + var err error + if sm.Am.Vote != nil { + err = sn.Propose(sm.ViewNbr, sm.RoundNbr, sm.Am, sm.From) + dbg.Lvl4(sn.Name(), "done proposing") + } else { + if !sn.IsParent(sm.ViewNbr, sm.From) { + log.Fatalln(sn.Name(), "received announcement from non-parent on view", sm.ViewNbr) + continue + } + err = sn.Announce(sm) + } + if err != nil { + dbg.Error(sn.Name(), "announce error:", err) + } + + // if it is a commitment or response it is from the child + case Commitment: + dbg.Lvl3(sn.Name(), "got commitment") + if !sn.IsChild(sm.ViewNbr, sm.From) { + log.Fatalln(sn.Name(), "received commitment from non-child on view", sm.ViewNbr) + continue + } + + var err error + if sm.Com.Vote != nil { + err = sn.Promise(sm.ViewNbr, sm.RoundNbr, sm) + } else { + err = sn.Commit(sm) + } + if err != nil { + dbg.Error(sn.Name(), "commit error:", err) + } + case Challenge: + dbg.Lvl3(sn.Name(), "got challenge") + if !sn.IsParent(sm.ViewNbr, sm.From) { + log.Fatalln(sn.Name(), "received challenge from non-parent on view", sm.ViewNbr) + continue + } + sn.ReceivedHeartbeat(sm.ViewNbr) + + var err error + if sm.Chm.Vote != nil { + err = sn.Accept(sm.ViewNbr, sm.RoundNbr, sm.Chm) + } else { + err = sn.Challenge(sm) + } + if err != nil { + dbg.Error(sn.Name(), "challenge error:", err) + } + case Response: + dbg.Lvl3(sn.Name(), "received response from", sm.From) + if !sn.IsChild(sm.ViewNbr, sm.From) { + log.Fatalln(sn.Name(), "received response from non-child on view", sm.ViewNbr) + continue + } + + var err error + if sm.Rm.Vote != nil { + err = sn.Accepted(sm.ViewNbr, sm.RoundNbr, sm) + } else { + err = sn.Respond(sm) + } + if err != nil { + dbg.Error(sn.Name(), "response error:", err) + } + case SignatureBroadcast: + dbg.Lvl3(sn.Name(), "received SignatureBroadcast", sm.From) + sn.ReceivedHeartbeat(sm.ViewNbr) + err = sn.SignatureBroadcast(sm) + case StatusReturn: + sn.StatusReturn(sm.ViewNbr, sm) + case CatchUpReq: + v := sn.VoteLog.Get(sm.Cureq.Index) + ctx := context.TODO() + sn.PutTo(ctx, sm.From, + &SigningMessage{ + Suite: sn.Suite().String(), + From: sn.Name(), + Type: CatchUpResp, + LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), + Curesp: &CatchUpResponse{Vote: v}}) + case CatchUpResp: + if sm.Curesp.Vote == nil || sn.VoteLog.Get(sm.Curesp.Vote.Index) != nil { + continue + } + vi := sm.Curesp.Vote.Index + // put in votelog to be streamed and applied + sn.VoteLog.Put(vi, sm.Curesp.Vote) + // continue catching up + sn.CatchUp(vi+1, sm.From) + case GroupChange: + if sm.ViewNbr == -1 { + sm.ViewNbr = sn.ViewNo + if sm.Vrm.Vote.Type == AddVT { + sn.AddPeerToPending(sm.From) + } + } + // TODO sanity checks: check if view is == sn.ViewNo + if sn.RootFor(sm.ViewNbr) == sn.Name() { + dbg.Fatal("Group change not implementekd. BTH") + //go sn.StartVotingRound(sm.Vrm.Vote) + continue + } + sn.PutUp(context.TODO(), sm.ViewNbr, sm) + case GroupChanged: + if !sm.Gcm.V.Confirmed { + dbg.Lvl4(sn.Name(), " received attempt to group change not confirmed") + continue + } + if sm.Gcm.V.Type == RemoveVT { + dbg.Lvl4(sn.Name(), " received removal notice") + } else if sm.Gcm.V.Type == AddVT { + dbg.Lvl4(sn.Name(), " received addition notice") + sn.NewView(sm.ViewNbr, sm.From, nil, sm.Gcm.HostList) + } else { + log.Errorln(sn.Name(), "received GroupChanged for unacceptable action") + } + case StatusConnections: + sn.ReceivedHeartbeat(sm.ViewNbr) + err = sn.StatusConnections(sm.ViewNbr, sm.Am) + case CloseAll: + sn.ReceivedHeartbeat(sm.ViewNbr) + err = sn.CloseAll(sm.ViewNbr) + return nil + case Error: + dbg.Lvl4("Received Error Message:", errors.New("received message of unknown type"), sm, sm.Err) + } + } + } +} + +func (sn *Node) Announce(sm *SigningMessage) error { + view := sm.ViewNbr + RoundNbr := sm.RoundNbr + am := sm.Am + dbg.Lvl4(sn.Name(), "received announcement on", view) + var round Round + round = sn.Rounds[RoundNbr] + if round == nil { + if am == nil { + return fmt.Errorf("Got a nil announcement on a non root nde?") + } + + sn.LastSeenRound = max(sn.LastSeenRound, RoundNbr) + rtype := am.RoundType + // create the new round and save it + dbg.Lvl3(sn.Name(), "Creating new round-type", rtype) + r, err := NewRoundFromType(rtype, sn) + if err != nil { + dbg.Lvl3(sn.Name(), "Error getting new round in announcement") + return err + } + sn.Rounds[RoundNbr] = r + round = r + } + + nChildren := sn.NChildren(view) + out := make([]*SigningMessage, nChildren) + for i := range out { + out[i] = &SigningMessage{ + Suite: sn.Suite().String(), + Type: Announcement, + ViewNbr: sn.ViewNo, + LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), + RoundNbr: RoundNbr, + Am: &AnnouncementMessage{ + Message: make([]byte, 0), + RoundType: sm.Am.RoundType, + }, + } + } + err := round.Announcement(view, RoundNbr, sm, out) + if err != nil { + dbg.Lvl3(sn.Name(), "Error on announcement", err) + return err + } + + if len(sn.Children(view)) == 0 { + // If we are a leaf, start the commit phase process + sn.Commit(&SigningMessage{ + Suite: sn.Suite().String(), + Type: Commitment, + RoundNbr: RoundNbr, + ViewNbr: view, + }) + } else { + // Transform the AnnouncementMessages to SigningMessages to send to the + // Children + msgs_bm := make([]coconet.BinaryMarshaler, nChildren) + for i := range msgs_bm { + msgs_bm[i] = out[i] + } + + // And sending to all our children-nodes + dbg.Lvlf4("%s sending to all children", sn.Name()) + ctx := context.TODO() + if err := sn.PutDown(ctx, view, msgs_bm); err != nil { + return err + } + } + + return nil +} + +func (sn *Node) Commit(sm *SigningMessage) error { + view := sm.ViewNbr + roundNbr := sm.RoundNbr + // update max seen round + sn.roundmu.Lock() + sn.LastSeenRound = max(sn.LastSeenRound, roundNbr) + sn.roundmu.Unlock() + + commitList, ok := sn.RoundCommits[roundNbr] + if !ok { + // first time we see a commit message for this round + commitList = make([]*SigningMessage, 0) + sn.RoundCommits[roundNbr] = commitList + } + // signingmessage nil <=> we are a leaf + if sm.Com != nil { + commitList = append(commitList, sm) + sn.RoundCommits[roundNbr] = commitList + } + + dbg.Lvl3("Got", len(sn.RoundCommits[roundNbr]), "of", len(sn.Children(view)), "commits") + // not enough commits yet (not all children replied) + if len(sn.RoundCommits[roundNbr]) != len(sn.Children(view)) { + dbg.Lvl3(sn.Name(), "Not enough commits received to call the Commit of the round") + return nil + } + + ri := sn.Rounds[roundNbr] + if ri == nil { + dbg.Lvl3(sn.Name(), "No round interface for commit round number", roundNbr) + return fmt.Errorf("No Round Interface defined for this round number (commitment)") + } + out := &SigningMessage{ + Suite: sn.Suite().String(), + ViewNbr: view, + Type: Commitment, + LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), + RoundNbr: roundNbr, + Com: &CommitmentMessage{ + Message: make([]byte, 0), + }, + } + err := ri.Commitment(sn.RoundCommits[roundNbr], out) + // now we can delete the commits for this round + delete(sn.RoundCommits, roundNbr) + + if err != nil { + return nil + } + + if sn.IsRoot(view) { + sn.commitsDone <- roundNbr + err = sn.Challenge(&SigningMessage{ + Suite: sn.Suite().String(), + RoundNbr: roundNbr, + Type: Challenge, + ViewNbr: view, + Chm: &ChallengeMessage{}, + }) + } else { + // create and putup own commit message + // ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) + dbg.Lvl4(sn.Name(), "puts up commit") + ctx := context.TODO() + dbg.Lvlf3("Out is %+v", out) + err = sn.PutUp(ctx, view, out) + } + return err +} + +// initiated by root, propagated by all others +func (sn *Node) Challenge(sm *SigningMessage) error { + view := sm.ViewNbr + RoundNbr := sm.RoundNbr + dbg.Lvl3("Challenge for round", RoundNbr) + // update max seen round + sn.roundmu.Lock() + sn.LastSeenRound = max(sn.LastSeenRound, RoundNbr) + sn.roundmu.Unlock() + + children := sn.Children(view) + + challs := make([]*SigningMessage, len(children)) + i := 0 + for child := range children { + challs[i] = &SigningMessage{ + Suite: sn.Suite().String(), + ViewNbr: view, + RoundNbr: RoundNbr, + Type: Challenge, + To: child, + Chm: &ChallengeMessage{ + Message: make([]byte, 0), + }} + i++ + } + + round := sn.Rounds[RoundNbr] + if round == nil { + dbg.Lvl3("No Round Interface created for this round. Children:", + len(children)) + } else { + err := round.Challenge(sm, challs) + if err != nil { + return err + } + } + + // if we are a leaf, send the respond up + if len(children) == 0 { + sn.Respond(&SigningMessage{ + Suite: sn.Suite().String(), + Type: Response, + ViewNbr: view, + RoundNbr: RoundNbr, + }) + } else { + // otherwise continue to pass down challenge + for _, out := range challs { + if out.To != "" { + conn := children[out.To] + conn.PutData(out) + } else { + dbg.Error("Out.To == nil with children", children) + } + } + } + // dbg.Lvl4(sn.Name(), "Done handling challenge message") + return nil +} + +// Respond send the response UP from leaf to parent +// called initially by the all the bottom leaves +func (sn *Node) Respond(sm *SigningMessage) error { + view := sm.ViewNbr + roundNbr := sm.RoundNbr + dbg.Lvl4(sn.Name(), "couting response on view, round", view, roundNbr, "Nchildren", len(sn.Children(view))) + // update max seen round + sn.roundmu.Lock() + sn.LastSeenRound = max(sn.LastSeenRound, roundNbr) + sn.roundmu.Unlock() + sn.PeerStatus = StatusReturnMessage{1, len(sn.Children(view))} + + responseList, ok := sn.RoundResponses[roundNbr] + if !ok { + responseList = make([]*SigningMessage, 0) + sn.RoundResponses[roundNbr] = responseList + } + + // Check if we have all replies from the children + if sm.Rm != nil { + responseList = append(responseList, sm) + } + if len(responseList) != len(sn.Children(view)) { + sn.RoundResponses[roundNbr] = responseList + dbg.Lvl4(sn.Name(), "Received response but still waiting on other children responses (stored", len(responseList), " responses)") + return nil + } + + ri := sn.Rounds[roundNbr] + if ri == nil { + return fmt.Errorf("No Round Interface for this round nbr :(") + } + // Fillinwithdefaultmessage is used to fill the exception with missing + // children and all + out := &SigningMessage{ + Suite: sn.Suite().String(), + Type: Response, + ViewNbr: view, + RoundNbr: roundNbr, + LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), + Rm: &ResponseMessage{ + Message: make([]byte, 0), + ExceptionV_hat: sn.suite.Point().Null(), + ExceptionX_hat: sn.suite.Point().Null(), + }, + } + err := ri.Response(responseList, out) + delete(sn.RoundResponses, roundNbr) + if err != nil { + return err + } + isroot := sn.IsRoot(view) + // if error put it up if parent exists + if err != nil && !isroot { + sn.PutUpError(view, err) + return err + } + + // if no error send up own response + if err == nil && !isroot { + /*if Round.Log.Getv() == nil && sn.ShouldIFail("response") {*/ + //dbg.Lvl4(Round.Name, "failing on response") + //return nil + /*}*/ + + // create and putup own response message + // ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) + ctx := context.TODO() + dbg.Lvl4(sn.Name(), "put up response to", sn.Parent(view)) + err = sn.PutUp(ctx, view, out) + } else { + dbg.Lvl4("Root received response") + } + + if sn.TimeForViewChange() { + dbg.Lvl4("acting on responses: trying viewchanges") + err := sn.TryViewChange(view + 1) + if err != nil { + dbg.Lvl3(err) + } + } + + // root reports round is done + // Sends the final signature to every one + if isroot { + sn.SignatureBroadcast(&SigningMessage{ + Suite: sn.Suite().String(), + Type: SignatureBroadcast, + ViewNbr: view, + RoundNbr: roundNbr, + SBm: &SignatureBroadcastMessage{}, + }) + sn.done <- roundNbr + } + + return err +} + +func (sn *Node) StatusConnections(view int, am *AnnouncementMessage) error { + dbg.Lvl3(sn.Name(), "StatusConnected", view) + + // Ask connection-count on all connected children + messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) + for i := range messgs { + sm := SigningMessage{ + Suite: sn.Suite().String(), + Type: StatusConnections, + ViewNbr: view, + LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), + Am: am} + messgs[i] = &sm + } + + ctx := context.TODO() + if err := sn.PutDown(ctx, view, messgs); err != nil { + return err + } + return nil +} + +// This will broadcast the final signature to give to client +// it contins the global Response adn global challenge +func (sn *Node) SignatureBroadcast(sm *SigningMessage) error { + view := sm.ViewNbr + RoundNbr := sm.RoundNbr + dbg.Lvl3(sn.Name(), "received SignatureBroadcast on", view) + sn.PeerStatusRcvd = 0 + + ri := sn.Rounds[RoundNbr] + if ri == nil { + return fmt.Errorf("No round created for this round number (signature broadcast)") + } + out := make([]*SigningMessage, sn.NChildren(view)) + for i := range out { + out[i] = &SigningMessage{ + Suite: sn.Suite().String(), + Type: SignatureBroadcast, + ViewNbr: view, + RoundNbr: RoundNbr, + SBm: &SignatureBroadcastMessage{ + R0_hat: sn.suite.Secret().One(), + C: sn.suite.Secret().One(), + X0_hat: sn.suite.Point().Null(), + V0_hat: sn.suite.Point().Null(), + }, + } + } + + err := ri.SignatureBroadcast(sm, out) + if err != nil { + return err + } + + if len(sn.Children(view)) > 0 { + dbg.Lvl3(sn.Name(), "in SignatureBroadcast is calling", len(sn.Children(view)), "children") + ctx := context.TODO() + msgs := make([]coconet.BinaryMarshaler, len(out)) + for i := range msgs { + msgs[i] = out[i] + // Why oh why do we have to do this? + out[i].SBm.X0_hat = sn.suite.Point().Add(out[i].SBm.X0_hat, sn.suite.Point().Null()) + } + if err := sn.PutDown(ctx, view, msgs); err != nil { + return err + } + } else { + dbg.Lvl3(sn.Name(), "sending StatusReturn") + return sn.StatusReturn(view, &SigningMessage{ + Suite: sn.Suite().String(), + Type: StatusReturn, + ViewNbr: view, + RoundNbr: RoundNbr, + SRm: &StatusReturnMessage{}, + }) + } + return nil +} + +// StatusReturn just adds up all children and sends the result to +// the parent +func (sn *Node) StatusReturn(view int, sm *SigningMessage) error { + sn.PeerStatusRcvd += 1 + sn.PeerStatus.Responders += sm.SRm.Responders + sn.PeerStatus.Peers += sm.SRm.Peers + + // Wait for other children before propagating the message + if sn.PeerStatusRcvd < len(sn.Children(view)) { + dbg.Lvl3(sn.Name(), "Waiting for other children") + return nil + } + + var err error = nil + if sn.IsRoot(view) { + // Add the root-node + sn.PeerStatus.Peers += 1 + dbg.Lvl3("We got", sn.PeerStatus.Responders, "responses from", sn.PeerStatus.Peers, "peers.") + } else { + dbg.Lvl4(sn.Name(), "puts up statusReturn for", sn.PeerStatus) + ctx := context.TODO() + sm.SRm = &sn.PeerStatus + err = sn.PutUp(ctx, view, sm) + } + dbg.Lvl3("Deleting round", sm.RoundNbr, sn.Rounds) + delete(sn.Rounds, sm.RoundNbr) + return err +} diff --git a/proto/sign/snutils.go b/lib/sign/nodeutils.go similarity index 59% rename from proto/sign/snutils.go rename to lib/sign/nodeutils.go index 17a9e3695d..dc945cbec4 100644 --- a/proto/sign/snutils.go +++ b/lib/sign/nodeutils.go @@ -7,7 +7,7 @@ import ( "time" log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" + "github.com/dedis/cothority/lib/dbg" "github.com/dedis/crypto/abstract" "golang.org/x/net/context" @@ -15,6 +15,10 @@ import ( "github.com/dedis/cothority/lib/logutils" ) +/* +Some more utilities for the Node-structure. +*/ + func (sn *Node) multiplexOnChildren(view int, sm *SigningMessage) { messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) for i := range messgs { @@ -24,7 +28,7 @@ func (sn *Node) multiplexOnChildren(view int, sm *SigningMessage) { // ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) ctx := context.TODO() if err := sn.PutDown(ctx, view, messgs); err != nil { - log.Errorln("failed to putdown messg to children") + dbg.Error("failed to putdown messg to children") } } @@ -68,14 +72,14 @@ func (sn *Node) ReceivedHeartbeat(view int) { } -func (sn *Node) TryRootFailure(view, Round int) bool { +func (sn *Node) TryRootFailure(view, roundNbr int) bool { if sn.IsRoot(view) && sn.FailAsRootEvery != 0 { if sn.RoundsAsRoot != 0 && sn.RoundsAsRoot%sn.FailAsRootEvery == 0 { - log.Errorln(sn.Name() + "was imposed root failure on round" + strconv.Itoa(Round)) + log.Errorln(sn.Name() + "was imposed root failure on round" + strconv.Itoa(roundNbr)) log.WithFields(log.Fields{ "file": logutils.File(), "type": "root_failure", - "round": Round, + "round": roundNbr, }).Info(sn.Name() + "Root imposed failure") // It doesn't make sense to try view change twice // what we essentially end up doing is double setting sn.ViewChanged @@ -88,107 +92,32 @@ func (sn *Node) TryRootFailure(view, Round int) bool { return false } -func (sn *Node) TryFailure(view, Round int) error { - if sn.TryRootFailure(view, Round) { - return ErrImposedFailure +// Simulate failure in system +func (sn *Node) TryFailure(view, roundNbr int) error { + if sn.TryRootFailure(view, roundNbr) { + return errors.New("failure imposed") } - if !sn.IsRoot(view) && sn.FailAsFollowerEvery != 0 && Round%sn.FailAsFollowerEvery == 0 { + if !sn.IsRoot(view) && sn.FailAsFollowerEvery != 0 && roundNbr%sn.FailAsFollowerEvery == 0 { // when failure rate given fail with that probability if (sn.FailureRate > 0 && sn.ShouldIFail("")) || (sn.FailureRate == 0) { log.WithFields(log.Fields{ "file": logutils.File(), "type": "follower_failure", - "round": Round, + "round": roundNbr, }).Info(sn.Name() + "Follower imposed failure") - return errors.New(sn.Name() + "was imposed follower failure on round" + strconv.Itoa(Round)) + return errors.New(sn.Name() + "was imposed follower failure on round" + strconv.Itoa(roundNbr)) } } // doing this before annoucing children to avoid major drama if !sn.IsRoot(view) && sn.ShouldIFail("commit") { - log.Warn(sn.Name(), "not announcing or commiting for round", Round) - return ErrImposedFailure + log.Warn(sn.Name(), "not announcing or commiting for round", roundNbr) + return errors.New("failure imposed") } return nil } -// Create round lasting secret and commit point v and V -// Initialize log structure for the round -func (sn *Node) initCommitCrypto(Round int) { - round := sn.Rounds[Round] - // generate secret and point commitment for this round - rand := sn.suite.Cipher([]byte(sn.Name())) - round.Log = SNLog{} - round.Log.v = sn.suite.Secret().Pick(rand) - round.Log.V = sn.suite.Point().Mul(nil, round.Log.v) - // initialize product of point commitments - round.Log.V_hat = sn.suite.Point().Null() - round.Log.Suite = sn.suite - sn.add(round.Log.V_hat, round.Log.V) - - round.X_hat = sn.suite.Point().Null() - sn.add(round.X_hat, sn.PubKey) -} - -func (sn *Node) setUpRound(view int, am *AnnouncementMessage) error { - // TODO: accept annoucements on old views?? linearizabiltity? - sn.viewmu.Lock() - // if (sn.ChangingView && am.Vote == nil) || (sn.ChangingView && am.Vote != nil && am.Vote.Vcv == nil) { - // dbg.Lvl4(sn.Name(), "currently chaning view") - // sn.viewmu.Unlock() - // return ChangingViewError - // } - if sn.ChangingView && am.Vote != nil && am.Vote.Vcv == nil { - dbg.Lvl4(sn.Name(), "currently chaning view") - sn.viewmu.Unlock() - return ChangingViewError - } - sn.viewmu.Unlock() - - sn.roundmu.Lock() - Round := am.Round - if Round <= sn.LastSeenRound { - sn.roundmu.Unlock() - return ErrPastRound - } - - // make space for round type - if len(sn.RoundTypes) <= Round { - sn.RoundTypes = append(sn.RoundTypes, make([]RoundType, max(len(sn.RoundTypes), Round+1))...) - } - if am.Vote == nil { - dbg.Lvl4(Round, len(sn.RoundTypes)) - sn.RoundTypes[Round] = SigningRT - } else { - sn.RoundTypes[Round] = RoundType(am.Vote.Type) - } - sn.roundmu.Unlock() - - // set up commit and response channels for the new round - sn.Rounds[Round] = NewRound(sn.suite) - sn.initCommitCrypto(Round) - sn.Rounds[Round].Vote = am.Vote - - // update max seen round - sn.roundmu.Lock() - sn.LastSeenRound = max(sn.LastSeenRound, Round) - sn.roundmu.Unlock() - - // the root is the only node that keeps track of round # internally - if sn.IsRoot(view) { - sn.RoundsAsRoot += 1 - // TODO: is sn.Round needed if we have LastSeenRound - sn.Round = Round - - // Create my back link to previous round - sn.SetBackLink(Round) - // sn.SetAccountableRound(Round) - } - - return nil -} - // Figure out which kids did not submit messages // Add default messages to messgs, one per missing child // as to make it easier to identify and add them to exception lists in one place @@ -208,7 +137,11 @@ func (sn *Node) FillInWithDefaultMessages(view int, messgs []*SigningMessage) [] } if !found { - allmessgs = append(allmessgs, &SigningMessage{View: view, Type: Default, From: c}) + allmessgs = append(allmessgs, &SigningMessage{ + Suite: sn.Suite().String(), + ViewNbr: view, + Type: Default, + From: c}) } } @@ -223,7 +156,6 @@ func (sn *Node) add(a abstract.Point, b abstract.Point) { if b != nil { a.Add(a, b) } - } // accommodate nils @@ -234,7 +166,6 @@ func (sn *Node) sub(a abstract.Point, b abstract.Point) { if b != nil { a.Sub(a, b) } - } func (sn *Node) subExceptions(a abstract.Point, keys []abstract.Point) { @@ -249,6 +180,28 @@ func (sn *Node) updateLastSeenVote(hv int, from string) { } } +func (sn *Node) ChangeView(vcv *ViewChangeVote) { + // log.Println(sn.Name(), " in CHANGE VIEW") + // at this point actions have already been applied + // all we need to do is switch our default view + sn.viewmu.Lock() + sn.ViewNo = vcv.View + sn.viewmu.Unlock() + if sn.RootFor(vcv.View) == sn.Name() { + log.Println(sn.Name(), "Change view to root", "children", sn.Children(vcv.View)) + sn.viewChangeCh <- "root" + } else { + log.Println(sn.Name(), "Change view to regular") + sn.viewChangeCh <- "regular" + } + + sn.viewmu.Lock() + sn.ChangingView = false + sn.viewmu.Unlock() + log.Println("VIEW CHANGED") + // TODO: garbage collect old connections +} + func max(a int, b int) int { if a > b { return a diff --git a/proto/sign/snvoting.go b/lib/sign/nodevoting.go similarity index 80% rename from proto/sign/snvoting.go rename to lib/sign/nodevoting.go index 3d4008cd5d..ed4dcdb25c 100644 --- a/proto/sign/snvoting.go +++ b/lib/sign/nodevoting.go @@ -4,25 +4,21 @@ import ( "sync/atomic" "time" - log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" + "github.com/dedis/cothority/lib/dbg" "golang.org/x/net/context" ) -func (sn *Node) ApplyVotes(ch chan *Vote) { - go func() { - for v := range ch { - if sn.RoundTypes[v.Round] == EmptyRT { - sn.RoundTypes[v.Round] = RoundType(v.Type) - } - sn.ApplyVote(v) - } - }() -} - +/* +DOESN'T WORK - needs to be implemented in a RoundVote +*/ // HERE: after we change to the new view, we could send our parent // a notification that we are ready to use the new view +// Constants we expect might be used by other packages +var ROUND_TIME time.Duration = 1 * time.Second +var HEARTBEAT = ROUND_TIME + ROUND_TIME/2 +var GOSSIP_TIME time.Duration = 3 * ROUND_TIME + func (sn *Node) ApplyVote(v *Vote) { atomic.StoreInt64(&sn.LastAppliedVote, int64(v.Index)) lav := atomic.LoadInt64(&sn.LastAppliedVote) @@ -39,7 +35,7 @@ func (sn *Node) ApplyVote(v *Vote) { case ShutdownVT: sn.Close() default: - log.Errorln("applyvote: unkown vote type") + dbg.Error("applyvote: unkown vote type") } } @@ -61,16 +57,17 @@ func (sn *Node) ApplyAction(view int, v *Vote) { // not closing TCP connection on remove because if view // does not go through, connection essential to old/ current view closed default: - log.Errorln("applyvote: unkown action type") + dbg.Error("applyvote: unkown action type") } } func (sn *Node) NotifyOfAction(view int, v *Vote) { dbg.Lvl4(sn.Name(), "Notifying node to be added/removed of action") gcm := &SigningMessage{ + Suite: sn.Suite().String(), Type: GroupChanged, From: sn.Name(), - View: view, + ViewNbr: view, LastSeenVote: int(sn.LastSeenVote), Gcm: &GroupChangedMessage{ V: v, @@ -86,7 +83,7 @@ func (sn *Node) NotifyOfAction(view int, v *Vote) { sn.PutTo(context.TODO(), v.Rv.Name, gcm) } default: - log.Errorln("notifyofaction: unkown action type") + dbg.Error("notifyofaction: unkown action type") } } @@ -102,8 +99,9 @@ func (sn *Node) AddSelf(parent string) error { context.TODO(), parent, &SigningMessage{ - Type: GroupChange, - View: -1, + Suite: sn.Suite().String(), + Type: GroupChange, + ViewNbr: -1, Vrm: &VoteRequestMessage{ Vote: &Vote{ Type: AddVT, @@ -117,8 +115,9 @@ func (sn *Node) RemoveSelf() error { context.TODO(), int(sn.ViewNo), &SigningMessage{ - Type: GroupChange, - View: -1, + Suite: sn.Suite().String(), + Type: GroupChange, + ViewNbr: -1, Vrm: &VoteRequestMessage{ Vote: &Vote{ Type: RemoveVT, @@ -133,6 +132,7 @@ func (sn *Node) CatchUp(vi int, from string) { ctx := context.TODO() sn.PutTo(ctx, from, &SigningMessage{ + Suite: sn.Suite().String(), From: sn.Name(), Type: CatchUpReq, Cureq: &CatchUpRequest{Index: vi}}) @@ -148,13 +148,13 @@ func (sn *Node) StartGossip() { c := sn.HostListOn(sn.ViewNo) sn.viewmu.Unlock() if len(c) == 0 { - log.Errorln(sn.Name(), "StartGossip: none in hostlist for view: ", sn.ViewNo, len(c)) + dbg.Error(sn.Name(), "StartGossip: none in hostlist for view:", sn.ViewNo, len(c)) continue } sn.randmu.Lock() from := c[sn.Rand.Int()%len(c)] sn.randmu.Unlock() - dbg.Lvl4("Gossiping with: ", from) + dbg.Lvl4("Gossiping with:", from) sn.CatchUp(int(atomic.LoadInt64(&sn.LastAppliedVote)+1), from) case <-sn.closed: dbg.Lvl3("stopping gossip: closed") diff --git a/lib/sign/round.go b/lib/sign/round.go new file mode 100644 index 0000000000..0b9e27d3a2 --- /dev/null +++ b/lib/sign/round.go @@ -0,0 +1,78 @@ +package sign + +import ( + "fmt" + "github.com/dedis/cothority/lib/dbg" +) + +/* +Round holds the functions that are used to define the +behaviour of a Round. All different round-types use the +cothority-tree, but they have different behaviors. +This is only the interface, so actual implementation can also start new +rounds of same type or different at the time it want. +*/ + +type Round interface { + // Announcement: root -> nodes + // This is called from the root-node whenever an + // announcement is made. + Announcement(int, int, *SigningMessage, []*SigningMessage) error + // Commitment: nodes -> root + // This is called whenever a commitment is ready to + // be sent. It takes the messages of its children and returns + // the new message to be sent. + Commitment([]*SigningMessage, *SigningMessage) error + // Challenge: root -> nodes + // This is called with the message to be signed. If necessary, + // each node can change the message for its children. + Challenge(*SigningMessage, []*SigningMessage) error + // Response: nodes -> root + // This is called with the signature of the challenge-message + // or with updated ExceptionList* in case of refusal to sign. + Response([]*SigningMessage, *SigningMessage) error + // SignatureBroadcast: root -> nodes + // This is called whenever the turn is completed and + // the results are propagated through the tree. + // return error if something is wrong and no need to broadcast down the tree + // return array of sigbroadcast because if we are root we want to put + // whatever we need inside. Give fine grained control to user as to what + // final signature is given to which peer. + SignatureBroadcast(*SigningMessage, []*SigningMessage) error + // Statistics: nodes -> root + // This is called at the end to collect eventual statistics + // about the round. + + // Returns the string reflecting the type + GetType() string +} + +// RoundFactory is a function that returns a Round given a SigningNode +type RoundFactory func(*Node) Round + +// RoundFactories holds the different round factories together. Each round has a +// "type name" that can be associated with its RoundFactory +var RoundFactories map[string]RoundFactory + +// Init function init the map +func init() { +} + +// RegisterRoundFactory register a new round factory given its name type. +func RegisterRoundFactory(roundType string, rf RoundFactory) { + if RoundFactories == nil { + RoundFactories = make(map[string]RoundFactory) + } + RoundFactories[roundType] = rf +} + +// Return the RoundFactory for this round type. Return an error if this round +// has not been registered before. +func NewRoundFromType(rtype string, node *Node) (Round, error) { + dbg.Lvl3("For", node.Name(), "creating round-type:", rtype, "out of", RoundFactories) + rf, ok := RoundFactories[rtype] + if !ok { + return nil, fmt.Errorf("RoundFactory not registered for the type %s", rtype) + } + return rf(node), nil +} diff --git a/lib/sign/roundcosi.go b/lib/sign/roundcosi.go new file mode 100644 index 0000000000..26491653e8 --- /dev/null +++ b/lib/sign/roundcosi.go @@ -0,0 +1,200 @@ +package sign + +import ( + "github.com/dedis/cothority/lib/dbg" + + "fmt" + "github.com/dedis/cothority/lib/hashid" + "github.com/dedis/cothority/lib/proof" + "github.com/dedis/crypto/abstract" + "runtime/debug" +) + +/* +RoundCosi implements the collective signature protocol using +Schnorr signatures to collectively sign on a message. By default +the message is only the collection of all Commits, but another +round can add any message it wants in the Commitment-phase. +*/ + +// The name type of this round implementation +const RoundCosiType = "cosi" + +type RoundCosi struct { + *RoundStruct + Cosi *CosiStruct + SaveViewNo int +} + +func init() { + RegisterRoundFactory(RoundCosiType, + func(node *Node) Round { + return NewRoundCosi(node) + }) +} + +func NewRoundCosi(node *Node) *RoundCosi { + round := &RoundCosi{} + round.RoundStruct = NewRoundStruct(node, RoundCosiType) + return round +} + +func (round *RoundCosi) CheckChildren() { + c := round.Node.Children(round.Node.ViewNo) + if len(c) != len(round.Cosi.Children) { + dbg.Print("Children in cosi and node are different") + dbg.Printf("round.Cosi: %+v", round.Cosi) + dbg.Printf("Node.Children: %+v", round.Node.Children(round.Node.ViewNo)) + dbg.Print("viewNbr:", round.SaveViewNo, "Node.ViewNo:", round.Node.ViewNo) + debug.PrintStack() + } +} + +// AnnounceFunc will keep the timestamp generated for this round +func (round *RoundCosi) Announcement(viewNbr, roundNbr int, in *SigningMessage, out []*SigningMessage) error { + if err := round.Node.TryFailure(round.Node.ViewNo, roundNbr); err != nil { + return err + } + + // Store the message for the round + //round.Merkle = round.Node.MerkleStructs[roundNbr] + round.Cosi = NewCosi(round.Node, viewNbr, roundNbr, in.Am) + round.SaveViewNo = round.Node.ViewNo + round.CheckChildren() + + round.Cosi.Msg = in.Am.Message + // Inform all children of announcement - just copy the one that came in + for i := range out { + *out[i].Am = *in.Am + } + return nil +} + +func (round *RoundCosi) Commitment(in []*SigningMessage, out *SigningMessage) error { + cosi := round.Cosi + cosi.Commits = in + + // Create the mapping between children and their respective public key + commitment + // V for commitment + children := cosi.Children + cosi.ChildV_hat = make(map[string]abstract.Point, len(children)) + // X for public key + cosi.ChildX_hat = make(map[string]abstract.Point, len(children)) + + for key := range children { + cosi.ChildX_hat[key] = cosi.Suite.Point().Null() + cosi.ChildV_hat[key] = cosi.Suite.Point().Null() + } + + // Commits from children are the first Merkle Tree leaves for the round + cosi.Leaves = make([]hashid.HashId, 0) + cosi.LeavesFrom = make([]string, 0) + for _, sm := range cosi.Commits { + from := sm.From + // MTR ==> root of sub-merkle tree + cosi.Leaves = append(cosi.Leaves, sm.Com.MTRoot) + cosi.LeavesFrom = append(cosi.LeavesFrom, from) + cosi.ChildV_hat[from] = sm.Com.V_hat + cosi.ChildX_hat[from] = sm.Com.X_hat + + // Aggregation + // add good child server to combined public key, and point commit + cosi.X_hat.Add(cosi.X_hat, sm.Com.X_hat) + cosi.Log.V_hat.Add(cosi.Log.V_hat, sm.Com.V_hat) + //dbg.Lvl4("Adding aggregate public key from ", from, " : ", sm.Com.X_hat) + } + + dbg.Lvl4("Node.Commit using Merkle") + cosi.MerkleAddChildren() + + round.Cosi.MerkleAddLocal(out.Com.MTRoot) + round.Cosi.MerkleHashLog() + round.Cosi.ComputeCombinedMerkleRoot() + + out.Com.V = round.Cosi.Log.V + out.Com.V_hat = round.Cosi.Log.V_hat + out.Com.X_hat = round.Cosi.X_hat + out.Com.MTRoot = round.Cosi.MTRoot + return nil + +} + +func (round *RoundCosi) Challenge(in *SigningMessage, out []*SigningMessage) error { + + cosi := round.Cosi + // we are root + if round.IsRoot { + msg := cosi.Msg + msg = append(msg, []byte(cosi.MTRoot)...) + cosi.C = cosi.HashElGamal(msg, cosi.Log.V_hat) + //proof := make([]hashid.HashId, 0) + + in.Chm.C = cosi.C + in.Chm.MTRoot = cosi.MTRoot + in.Chm.Proof = cosi.Proof + } else { // we are a leaf + // register challenge + cosi.C = in.Chm.C + } + // compute response share already + localmerkle proof + cosi.InitResponseCrypto() + // messages from clients, proofs computed + if cosi.Log.Getv() != nil { + if err := cosi.StoreLocalMerkleProof(in.Chm); err != nil { + return err + } + } + + // proof from big root to our root will be sent to all children + baseProof := make(proof.Proof, len(in.Chm.Proof)) + copy(baseProof, in.Chm.Proof) + + round.CheckChildren() + if len(cosi.Children) != len(out) { + return fmt.Errorf("Children (%d) and output (%d) are of different length. Should be %d / %d", + len(cosi.Children), len(out), len(round.Node.Children(round.Node.ViewNo)), + round.Node.ViewNo) + } + // for each child, create personalized part of proof + // embed it in SigningMessage, and send it + var i = 0 + for name, _ := range cosi.Children { + out[i].Chm.C = in.Chm.C + out[i].Chm.MTRoot = in.Chm.MTRoot + out[i].Chm.Proof = append(baseProof, cosi.Proofs[name]...) + out[i].To = name + i++ + } + return nil +} + +// TODO make that in == nil in case we are a leaf to stay consistent with +// others calls +func (round *RoundCosi) Response(in []*SigningMessage, out *SigningMessage) error { + dbg.Lvl4(round.Cosi.Name, "got all responses") + for _, sm := range in { + round.Cosi.R_hat.Add(round.Cosi.R_hat, sm.Rm.R_hat) + } + err := round.Cosi.VerifyResponses() + if err != nil { + dbg.Lvl3(round.Node.Name(), "Could not verify responses..") + return err + } + out.Rm.R_hat = round.Cosi.R_hat + return nil +} + +func (round *RoundCosi) SignatureBroadcast(in *SigningMessage, out []*SigningMessage) error { + // Root is creating the sig broadcast + if round.IsRoot { + in.SBm.R0_hat = round.Cosi.R_hat + in.SBm.C = round.Cosi.C + in.SBm.X0_hat = round.Cosi.X_hat + in.SBm.V0_hat = round.Cosi.Log.V_hat + } + // Inform all children of broadcast - just copy the one that came in + for i := range out { + *out[i].SBm = *in.SBm + } + return nil +} diff --git a/lib/sign/roundempty.go b/lib/sign/roundempty.go new file mode 100644 index 0000000000..a005f6139d --- /dev/null +++ b/lib/sign/roundempty.go @@ -0,0 +1,49 @@ +package sign + +/* +RoundEmpty is a bare-bones round implementation to be copy-pasted. It +already implements RoundStruct for your convenience. +*/ + +// The name type of this round implementation +const RoundEmptyType = "empty" + +type RoundEmpty struct { + *RoundStruct +} + +func init() { + RegisterRoundFactory(RoundEmptyType, + func(node *Node) Round { + return NewRoundEmpty(node) + }) +} + +func NewRoundEmpty(node *Node) *RoundEmpty { + round := &RoundEmpty{} + round.RoundStruct = NewRoundStruct(node, RoundEmptyType) + // If you're sub-classing from another round-type, don't forget to remove + // the above line, call the constructor of your parent round and add + // round.Type = RoundEmptyType + return round +} + +func (round *RoundEmpty) Announcement(viewNbr, roundNbr int, in *SigningMessage, out []*SigningMessage) error { + return nil +} + +func (round *RoundEmpty) Commitment(in []*SigningMessage, out *SigningMessage) error { + return nil +} + +func (round *RoundEmpty) Challenge(in *SigningMessage, out []*SigningMessage) error { + return nil +} + +func (round *RoundEmpty) Response(in []*SigningMessage, out *SigningMessage) error { + return nil +} + +func (round *RoundEmpty) SignatureBroadcast(in *SigningMessage, out []*SigningMessage) error { + return nil +} diff --git a/lib/sign/roundexception.go b/lib/sign/roundexception.go new file mode 100644 index 0000000000..40ddae593a --- /dev/null +++ b/lib/sign/roundexception.go @@ -0,0 +1,114 @@ +package sign + +import ( + "github.com/dedis/cothority/lib/dbg" + "github.com/dedis/crypto/abstract" +) + +/* +RoundException implements the collective signature protocol using +Schnorr signatures to collectively sign on a message. By default +the message is only the collection of all Commits, but another +round can add any message it wants in the Commitment-phase. +*/ + +// The name type of this round implementation +const RoundExceptionType = "cosiexception" + +// Can be used for debugging by telling which node should fail +var ExceptionForceFailure string + +type RoundException struct { + *RoundCosi +} + +// init adds RoundException to the list of available rounds +func init() { + RegisterRoundFactory(RoundExceptionType, + func(node *Node) Round { + return NewRoundException(node) + }) +} + +// NewRoundException creates a new RoundException based on RoundCosi +func NewRoundException(node *Node) *RoundException { + round := &RoundException{} + round.RoundCosi = NewRoundCosi(node) + round.Type = RoundExceptionType + return round +} + +// Commitment adds up all exception-lists from children and calls roundcosi +func (round *RoundException) Commitment(in []*SigningMessage, out *SigningMessage) error { + err := round.RoundCosi.Commitment(in, out) + if err != nil { + return err + } + + // prepare to handle exceptions + cosi := round.Cosi + cosi.ExceptionList = make([]abstract.Point, 0) + for _, sm := range cosi.Commits { + cosi.ExceptionList = append(cosi.ExceptionList, sm.Com.ExceptionList...) + } + out.Com.ExceptionList = round.Cosi.ExceptionList + return nil +} + +func (round *RoundException) Response(in []*SigningMessage, out *SigningMessage) error { + if round.Name == ExceptionForceFailure { + dbg.Lvl1("Forcing failure in response") + round.RaiseException() + } + + // initialize exception handling + nullPoint := round.Cosi.Suite.Point().Null() + + children := round.Cosi.Children + for _, sm := range in { + from := sm.From + switch sm.Type { + default: + // default == no response from child + dbg.Lvl4(round.Name, "Empty response from child", from, sm.Type) + if children[from] != nil { + round.Cosi.ExceptionList = append(round.Cosi.ExceptionList, children[from].PubKey()) + + // remove public keys and point commits from subtree of failed child + round.Cosi.ExceptionX_hat.Add(round.Cosi.ExceptionX_hat, round.Cosi.ChildX_hat[from]) + round.Cosi.ExceptionV_hat.Add(round.Cosi.ExceptionV_hat, round.Cosi.ChildV_hat[from]) + } + continue + case Response: + // disregard response from children who did not commit + _, ok := round.Cosi.ChildV_hat[from] + if ok == true && round.Cosi.ChildV_hat[from].Equal(nullPoint) { + dbg.Lvl4(round.Name, ": no response from", from, sm.Type) + continue + } + + dbg.Lvl4(round.Name, "accepts response from", from, sm.Type) + round.Cosi.ExceptionV_hat.Add(round.Cosi.ExceptionV_hat, sm.Rm.ExceptionV_hat) + round.Cosi.ExceptionX_hat.Add(round.Cosi.ExceptionX_hat, sm.Rm.ExceptionX_hat) + round.Cosi.ExceptionList = append(round.Cosi.ExceptionList, sm.Rm.ExceptionList...) + } + } + + round.Cosi.X_hat.Sub(round.Cosi.X_hat, round.Cosi.ExceptionX_hat) + + err := round.RoundCosi.Response(in, out) + if err != nil { + return err + } + + out.Rm.ExceptionList = round.Cosi.ExceptionList + out.Rm.ExceptionV_hat = round.Cosi.ExceptionV_hat + out.Rm.ExceptionX_hat = round.Cosi.ExceptionX_hat + return nil +} + +func (round *RoundException) RaiseException() { + round.Cosi.R_hat = round.Suite.Secret().Zero() + round.Cosi.ExceptionX_hat.Add(round.Cosi.ExceptionX_hat, round.Cosi.PubKey) + round.Cosi.ExceptionV_hat.Add(round.Cosi.ExceptionV_hat, round.Cosi.Log.V_hat) +} diff --git a/lib/sign/roundsetup.go b/lib/sign/roundsetup.go new file mode 100644 index 0000000000..4a15f75c7a --- /dev/null +++ b/lib/sign/roundsetup.go @@ -0,0 +1,62 @@ +package sign + +import ( + "github.com/dedis/cothority/lib/dbg" +) + +/* +RoundSetup merely traverses the tree and counts the number of nodes. +This can be used to check the validity of the tree. +*/ + +// The name type of this round implementation +const RoundSetupType = "setup" + +type RoundSetup struct { + *RoundStruct + Counted chan int +} + +func init() { + RegisterRoundFactory(RoundSetupType, + func(node *Node) Round { + return NewRoundSetup(node) + }) +} + +func NewRoundSetup(node *Node) *RoundSetup { + round := &RoundSetup{} + round.RoundStruct = NewRoundStruct(node, RoundSetupType) + round.Counted = make(chan int, 1) + return round +} + +func (round *RoundSetup) Announcement(viewNbr, roundNbr int, in *SigningMessage, out []*SigningMessage) error { + return nil +} + +func (round *RoundSetup) Commitment(in []*SigningMessage, out *SigningMessage) error { + out.Com.Messages = 1 + if !round.IsLeaf { + for _, i := range in { + out.Com.Messages += i.Com.Messages + } + } + if round.IsRoot { + dbg.Lvl2("Number of nodes found:", out.Com.Messages) + round.Counted <- out.Com.Messages + } + return nil +} + +func (round *RoundSetup) Challenge(in *SigningMessage, out []*SigningMessage) error { + return nil +} + +func (round *RoundSetup) Response(in []*SigningMessage, out *SigningMessage) error { + return nil +} + +func (round *RoundSetup) SignatureBroadcast(in *SigningMessage, out []*SigningMessage) error { + return nil +} diff --git a/lib/sign/roundstruct.go b/lib/sign/roundstruct.go new file mode 100644 index 0000000000..c55558e550 --- /dev/null +++ b/lib/sign/roundstruct.go @@ -0,0 +1,48 @@ +package sign + +import ( + "github.com/dedis/cothority/lib/coconet" + "github.com/dedis/crypto/abstract" +) + +/* +This structure holds basic information about a round. It +can be included in a structure. To initialise, the +round has to call NewRoundStruct. +*/ + +type RoundStruct struct { + Node *Node + Type string + Name string + IsRoot bool + IsLeaf bool + RoundNbr int + ViewNbr int + Parent string + Children map[string]coconet.Conn + Suite abstract.Suite +} + +func NewRoundStruct(node *Node, rtype string) *RoundStruct { + viewNbr := node.ViewNo + roundNbr := node.nRounds + children := node.Children(viewNbr) + cbs := &RoundStruct{ + Node: node, + Type: rtype, + Name: node.Name(), + IsRoot: node.IsRoot(viewNbr), + IsLeaf: len(children) == 0, + RoundNbr: roundNbr, + ViewNbr: viewNbr, + Parent: node.Parent(viewNbr), + Children: children, + Suite: node.Suite(), + } + return cbs +} + +func (r *RoundStruct) GetType() string { + return r.Type +} diff --git a/lib/sign/roundvote.go b/lib/sign/roundvote.go new file mode 100644 index 0000000000..95585fb764 --- /dev/null +++ b/lib/sign/roundvote.go @@ -0,0 +1,40 @@ +package sign + +/* +NOT WORKING - this can be implemented to have a RoundVote which +will ask for a view-change. +*/ + +/* +func (sn *Node) StartVotingRound(v *Vote) error { + dbg.Lvl2(sn.Name(), "start voting round") + sn.nRounds = sn.LastSeenRound + + // during view changes, only accept view change related votes + if sn.ChangingView && v.Vcv == nil { + dbg.Lvl2(sn.Name(), "start signing round: changingViewError") + return ChangingViewError + } + + sn.nRounds++ + v.Round = sn.nRounds + v.Index = int(atomic.LoadInt64(&sn.LastSeenVote)) + 1 + v.Count = &Count{} + v.Confirmed = false + // only default fill-in view numbers when not prefilled + if v.View == 0 { + v.View = sn.ViewNo + } + if v.Av != nil && v.Av.View == 0 { + v.Av.View = sn.ViewNo + 1 + } + if v.Rv != nil && v.Rv.View == 0 { + v.Rv.View = sn.ViewNo + 1 + } + if v.Vcv != nil && v.Vcv.View == 0 { + v.Vcv.View = sn.ViewNo + 1 + } + return sn.StartAnnouncement( + &AnnouncementMessage{Message: []byte("vote round"), RoundNbr: sn.nRounds, Vote: v}) +} +*/ diff --git a/lib/sign/sign_test.go b/lib/sign/sign_test.go new file mode 100644 index 0000000000..d6a6b4e963 --- /dev/null +++ b/lib/sign/sign_test.go @@ -0,0 +1,7 @@ +package sign_test + +import "testing" + +func TestBuild(t *testing.T) { + +} diff --git a/proto/sign/testdata/exconf.json b/lib/sign/testdata/exconf.json similarity index 100% rename from proto/sign/testdata/exconf.json rename to lib/sign/testdata/exconf.json diff --git a/proto/sign/testdata/exconf1.json b/lib/sign/testdata/exconf1.json similarity index 100% rename from proto/sign/testdata/exconf1.json rename to lib/sign/testdata/exconf1.json diff --git a/proto/sign/testdata/exconf_wkeys.json b/lib/sign/testdata/exconf_wkeys.json similarity index 100% rename from proto/sign/testdata/exconf_wkeys.json rename to lib/sign/testdata/exconf_wkeys.json diff --git a/proto/sign/testdata/extcpconf.json b/lib/sign/testdata/extcpconf.json similarity index 100% rename from proto/sign/testdata/extcpconf.json rename to lib/sign/testdata/extcpconf.json diff --git a/proto/sign/testdata/exwax.json b/lib/sign/testdata/exwax.json similarity index 100% rename from proto/sign/testdata/exwax.json rename to lib/sign/testdata/exwax.json diff --git a/proto/sign/testdata/gen.py b/lib/sign/testdata/gen.py similarity index 100% rename from proto/sign/testdata/gen.py rename to lib/sign/testdata/gen.py diff --git a/proto/sign/testdata/wax.json b/lib/sign/testdata/wax.json similarity index 100% rename from proto/sign/testdata/wax.json rename to lib/sign/testdata/wax.json diff --git a/proto/sign/testdata/zoo.json b/lib/sign/testdata/zoo.json similarity index 100% rename from proto/sign/testdata/zoo.json rename to lib/sign/testdata/zoo.json diff --git a/main.go b/main.go deleted file mode 100644 index a075e24585..0000000000 --- a/main.go +++ /dev/null @@ -1,42 +0,0 @@ -// Cothority - framework for co-authority based research -// -// - -package main -import "flag" -import ( - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/cothority/deploy" -) - -var deploy_dst = "deterlab" -var app = "" -var nobuild = false -var build = "" -var machines = 3 - -func init() { - flag.StringVar(&deploy_dst, "deploy", deploy_dst, "if you want to deploy, chose [deterlab]") - flag.StringVar(&app, "app", app, "start [server,client] locally") - flag.IntVar(&dbg.DebugVisible, "debug", dbg.DebugVisible, "Debugging-level. 0 is silent, 5 is flood") - flag.BoolVar(&nobuild, "nobuild", false, "Don't rebuild all helpers") - flag.StringVar(&build, "build", "", "List of packages to build") - flag.IntVar(&machines, "machines", machines, "Number of machines on Deterlab") -} - -func main() { - flag.Parse() - - switch app{ - default: - switch deploy_dst{ - default: - dbg.Lvl1("Sorry, deployment method", deploy_dst, "not yet implemented") - case "deterlab": - dbg.Lvl1("Deploying to deterlab") - deploy.Start("deterlab", nobuild, build, machines) - } - case "server", "client": - dbg.Lvl1("Sorry,", app, "not yet implemented") - } -} \ No newline at end of file diff --git a/proto/sign/basicSig.go b/proto/sign/basicSig.go deleted file mode 100644 index 6d0733aa7a..0000000000 --- a/proto/sign/basicSig.go +++ /dev/null @@ -1,56 +0,0 @@ -package sign - -import ( - "crypto/cipher" - "errors" - - "github.com/dedis/crypto/abstract" -) - -// A basic, verifiable signature -type BasicSig struct { - C abstract.Secret // challenge - R abstract.Secret // response -} - -// This simplified implementation of ElGamal Signatures is based on -// crypto/anon/sig.go -func ElGamalSign(suite abstract.Suite, random cipher.Stream, message []byte, - privateKey abstract.Secret) BasicSig { - - // Create random secret v and public point commitment T - v := suite.Secret().Pick(random) - T := suite.Point().Mul(nil, v) - - // Create challenge c based on message and T - c := hashElGamal(suite, message, T) - - // Compute response r = v - x*c - r := suite.Secret() - r.Mul(privateKey, c).Sub(v, r) - - // Return verifiable signature {c, r} - sig := BasicSig{c, r} - return sig -} - -func ElGamalVerify(suite abstract.Suite, message []byte, publicKey abstract.Point, - sig BasicSig) error { - r := sig.R - c := sig.C - - // Compute base**(r + x*c) == T - var P, T abstract.Point - P = suite.Point() - T = suite.Point() - T.Add(T.Mul(nil, r), P.Mul(publicKey, c)) - - // Verify that the hash based on the message and T - // matches the challange c from the signature - c = hashElGamal(suite, message, T) - if !c.Equal(sig.C) { - return errors.New("invalid signature") - } - - return nil -} diff --git a/proto/sign/collectiveSigning.go b/proto/sign/collectiveSigning.go deleted file mode 100644 index b7da598c31..0000000000 --- a/proto/sign/collectiveSigning.go +++ /dev/null @@ -1,747 +0,0 @@ -package sign - -import ( - "errors" - "io" - "strconv" - "sync/atomic" - - log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" - "github.com/dedis/crypto/abstract" - "github.com/dedis/cothority/lib/coconet" - "github.com/dedis/cothority/lib/hashid" - "golang.org/x/net/context" - // "strconv" - // "os" -) - -// Collective Signing via ElGamal -// 1. Announcement -// 2. Commitment -// 3. Challenge -// 4. Response - -// Get multiplexes all messages from TCPHost using application logic -func (sn *Node) get() error { - dbg.Lvl4(sn.Name(), "getting") - defer dbg.Lvl4(sn.Name(), "done getting") - - sn.UpdateTimeout() - dbg.Lvl4("Going to get", sn.Name()) - msgchan := sn.Host.Get() - // heartbeat for intiating viewChanges, allows intial 500s setup time - /* sn.hbLock.Lock() - sn.heartbeat = time.NewTimer(500 * time.Second) - sn.hbLock.Unlock() */ - - // as votes get approved they are streamed in ApplyVotes - voteChan := sn.VoteLog.Stream() - sn.ApplyVotes(voteChan) - - // gossip to make sure we are up to date - sn.StartGossip() - - for { - select { - case <-sn.closed: - sn.StopHeartbeat() - return nil - default: - dbg.Lvl4(sn.Name(), "waiting for message") - nm, ok := <-msgchan - err := nm.Err - - // TODO: graceful shutdown voting - if !ok || err == coconet.ErrClosed || err == io.EOF { - dbg.Lvl3(sn.Name(), " getting from closed host") - sn.Close() - return coconet.ErrClosed - } - - // if it is a non-fatal error try again - if err != nil { - log.Errorln(sn.Name(), " error getting message (still continuing) ", err) - continue - } - // interpret network message as Signing Message - //log.Printf("got message: %#v with error %v\n", sm, err) - sm := nm.Data.(*SigningMessage) - sm.From = nm.From - dbg.Lvl4(sn.Name(), "received message:", sm.Type) - - // don't act on future view if not caught up, must be done after updating vote index - sn.viewmu.Lock() - if sm.View > sn.ViewNo { - if atomic.LoadInt64(&sn.LastSeenVote) != atomic.LoadInt64(&sn.LastAppliedVote) { - log.Warnln(sn.Name(), "not caught up for view change", sn.LastSeenVote, sn.LastAppliedVote) - return errors.New("not caught up for view change") - } - } - sn.viewmu.Unlock() - sn.updateLastSeenVote(sm.LastSeenVote, sm.From) - - switch sm.Type { - // if it is a bad message just ignore it - default: - continue - case Announcement: - dbg.Lvl4(sn.Name(), "got announcement") - sn.ReceivedHeartbeat(sm.View) - - var err error - if sm.Am.Vote != nil { - err = sn.Propose(sm.View, sm.Am, sm.From) - dbg.Lvl4(sn.Name(), "done proposing") - } else { - if !sn.IsParent(sm.View, sm.From) { - log.Fatalln(sn.Name(), "received announcement from non-parent on view", sm.View) - continue - } - err = sn.Announce(sm.View, sm.Am) - } - if err != nil { - log.Errorln(sn.Name(), "announce error:", err) - } - - case Challenge: - dbg.Lvl4(sn.Name(), "got challenge") - if !sn.IsParent(sm.View, sm.From) { - log.Fatalln(sn.Name(), "received challenge from non-parent on view", sm.View) - continue - } - sn.ReceivedHeartbeat(sm.View) - - var err error - if sm.Chm.Vote != nil { - err = sn.Accept(sm.View, sm.Chm) - } else { - err = sn.Challenge(sm.View, sm.Chm) - } - if err != nil { - log.Errorln(sn.Name(), "challenge error:", err) - } - - // if it is a commitment or response it is from the child - case Commitment: - dbg.Lvl4(sn.Name(), "got commitment") - if !sn.IsChild(sm.View, sm.From) { - log.Fatalln(sn.Name(), "received commitment from non-child on view", sm.View) - continue - } - - var err error - if sm.Com.Vote != nil { - err = sn.Promise(sm.View, sm.Com.Round, sm) - } else { - err = sn.Commit(sm.View, sm.Com.Round, sm) - } - if err != nil { - log.Errorln(sn.Name(), "commit error:", err) - } - case Response: - dbg.Lvl4(sn.Name(), "received response from", sm.From) - if !sn.IsChild(sm.View, sm.From) { - log.Fatalln(sn.Name(), "received response from non-child on view", sm.View) - continue - } - - var err error - if sm.Rm.Vote != nil { - err = sn.Accepted(sm.View, sm.Rm.Round, sm) - } else { - err = sn.Respond(sm.View, sm.Rm.Round, sm) - } - if err != nil { - log.Errorln(sn.Name(), "response error:", err) - } - case CatchUpReq: - v := sn.VoteLog.Get(sm.Cureq.Index) - ctx := context.TODO() - sn.PutTo(ctx, sm.From, - &SigningMessage{ - From: sn.Name(), - Type: CatchUpResp, - LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), - Curesp: &CatchUpResponse{Vote: v}}) - case CatchUpResp: - if sm.Curesp.Vote == nil || sn.VoteLog.Get(sm.Curesp.Vote.Index) != nil { - continue - } - vi := sm.Curesp.Vote.Index - // put in votelog to be streamed and applied - sn.VoteLog.Put(vi, sm.Curesp.Vote) - // continue catching up - sn.CatchUp(vi+1, sm.From) - case GroupChange: - if sm.View == -1 { - sm.View = sn.ViewNo - if sm.Vrm.Vote.Type == AddVT { - sn.AddPeerToPending(sm.From) - } - } - // TODO sanity checks: check if view is == sn.ViewNo - if sn.RootFor(sm.View) == sn.Name() { - go sn.StartVotingRound(sm.Vrm.Vote) - continue - } - sn.PutUp(context.TODO(), sm.View, sm) - case GroupChanged: - if !sm.Gcm.V.Confirmed { - dbg.Lvl4(sn.Name(), " received attempt to group change not confirmed") - continue - } - if sm.Gcm.V.Type == RemoveVT { - dbg.Lvl4(sn.Name(), " received removal notice") - } else if sm.Gcm.V.Type == AddVT { - dbg.Lvl4(sn.Name(), " received addition notice") - sn.NewView(sm.View, sm.From, nil, sm.Gcm.HostList) - } else { - log.Errorln(sn.Name(), "received GroupChanged for unacceptable action") - } - case StatusConnections: - sn.ReceivedHeartbeat(sm.View) - err = sn.StatusConnections(sm.View, sm.Am) - case CloseAll: - sn.ReceivedHeartbeat(sm.View) - err = sn.CloseAll(sm.View) - case Error: - dbg.Lvl4("Received Error Message:", ErrUnknownMessageType, sm, sm.Err) - } - } - } - -} - -func (sn *Node) Announce(view int, am *AnnouncementMessage) error { - dbg.Lvl4(sn.Name(), "received announcement on", view) - - if err := sn.TryFailure(view, am.Round); err != nil { - return err - } - - if err := sn.setUpRound(view, am); err != nil { - return err - } - - // Inform all children of announcement - messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) - for i := range messgs { - sm := SigningMessage{ - Type: Announcement, - View: view, - LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), - Am: am} - messgs[i] = &sm - } - dbg.Lvl4(sn.Name(), "sending to all children") - ctx := context.TODO() - //ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) - if err := sn.PutDown(ctx, view, messgs); err != nil { - return err - } - - // return sn.Commit(view, am) - if len(sn.Children(view)) == 0 { - sn.Commit(view, am.Round, nil) - } - return nil -} - -func (sn *Node) Commit(view, Round int, sm *SigningMessage) error { - // update max seen round - sn.roundmu.Lock() - sn.LastSeenRound = max(sn.LastSeenRound, Round) - sn.roundmu.Unlock() - - round := sn.Rounds[Round] - if round == nil { - // was not announced of this round, should retreat - return nil - } - - if sm != nil { - round.Commits = append(round.Commits, sm) - } - - if len(round.Commits) != len(sn.Children(view)) { - return nil - } - - // prepare to handle exceptions - round.ExceptionList = make([]abstract.Point, 0) - - // Create the mapping between children and their respective public key + commitment - // V for commitment - round.ChildV_hat = make(map[string]abstract.Point, len(sn.Children(view))) - // X for public key - round.ChildX_hat = make(map[string]abstract.Point, len(sn.Children(view))) - children := sn.Children(view) - - // Commits from children are the first Merkle Tree leaves for the round - round.Leaves = make([]hashid.HashId, 0) - round.LeavesFrom = make([]string, 0) - - for key := range children { - round.ChildX_hat[key] = sn.suite.Point().Null() - round.ChildV_hat[key] = sn.suite.Point().Null() - } - - // TODO: fill in missing commit messages, and add back exception code - for _, sm := range round.Commits { - from := sm.From - // MTR ==> root of sub-merkle tree - round.Leaves = append(round.Leaves, sm.Com.MTRoot) - round.LeavesFrom = append(round.LeavesFrom, from) - round.ChildV_hat[from] = sm.Com.V_hat - round.ChildX_hat[from] = sm.Com.X_hat - round.ExceptionList = append(round.ExceptionList, sm.Com.ExceptionList...) - - // Aggregation - // add good child server to combined public key, and point commit - sn.add(round.X_hat, sm.Com.X_hat) - sn.add(round.Log.V_hat, sm.Com.V_hat) - } - - if sn.Type == PubKey { - dbg.Lvl4("sign.Node.Commit using PubKey") - return sn.actOnCommits(view, Round) - } else { - dbg.Lvl4("sign.Node.Commit using Merkle") - sn.AddChildrenMerkleRoots(Round) - sn.AddLocalMerkleRoot(view, Round) - sn.HashLog(Round) - sn.ComputeCombinedMerkleRoot(view, Round) - return sn.actOnCommits(view, Round) - } -} - -// Finalize commits by initiating the challenge pahse if root -// Send own commitment message up to parent if non-root -func (sn *Node) actOnCommits(view, Round int) error { - round := sn.Rounds[Round] - var err error - - if sn.IsRoot(view) { - sn.commitsDone <- Round - err = sn.FinalizeCommits(view, Round) - } else { - // create and putup own commit message - com := &CommitmentMessage{ - V: round.Log.V, - V_hat: round.Log.V_hat, - X_hat: round.X_hat, - MTRoot: round.MTRoot, - ExceptionList: round.ExceptionList, - Vote: round.Vote, - Round: Round} - - // ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) - dbg.Lvl4(sn.Name(), "puts up commit") - ctx := context.TODO() - err = sn.PutUp(ctx, view, &SigningMessage{ - View: view, - Type: Commitment, - LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), - Com: com}) - } - return err -} - -// initiated by root, propagated by all others -func (sn *Node) Challenge(view int, chm *ChallengeMessage) error { - // update max seen round - sn.roundmu.Lock() - sn.LastSeenRound = max(sn.LastSeenRound, chm.Round) - sn.roundmu.Unlock() - - round := sn.Rounds[chm.Round] - if round == nil { - return nil - } - - // register challenge - round.c = chm.C - - if sn.Type == PubKey { - dbg.Lvl4(sn.Name(), "challenge: using pubkey", sn.Type, chm.Vote) - if err := sn.SendChildrenChallenges(view, chm); err != nil { - return err - } - } else { - dbg.Lvl4(sn.Name(), "challenge: using merkle proofs") - // messages from clients, proofs computed - if sn.CommitedFor(round) { - if err := sn.SendLocalMerkleProof(view, chm); err != nil { - return err - } - - } - if err := sn.SendChildrenChallengesProofs(view, chm); err != nil { - return err - } - } - - // dbg.Lvl4(sn.Name(), "In challenge before response") - sn.initResponseCrypto(chm.Round) - // if we are a leaf, send the respond up - if len(sn.Children(view)) == 0 { - sn.Respond(view, chm.Round, nil) - } - // dbg.Lvl4(sn.Name(), "Done handling challenge message") - return nil -} - -func (sn *Node) initResponseCrypto(Round int) { - round := sn.Rounds[Round] - // generate response r = v - xc - round.r = sn.suite.Secret() - round.r.Mul(sn.PrivKey, round.c).Sub(round.Log.v, round.r) - // initialize sum of children's responses - round.r_hat = round.r -} - -func (sn *Node) Respond(view, Round int, sm *SigningMessage) error { - dbg.Lvl4(sn.Name(), "couting response on view, round", view, Round, "Nchildren", len(sn.Children(view))) - // update max seen round - sn.roundmu.Lock() - sn.LastSeenRound = max(sn.LastSeenRound, Round) - sn.roundmu.Unlock() - - round := sn.Rounds[Round] - if round == nil || round.Log.v == nil { - // If I was not announced of this round, or I failed to commit - return nil - } - - if sm != nil { - round.Responses = append(round.Responses, sm) - } - if len(round.Responses) != len(sn.Children(view)) { - return nil - } - - // initialize exception handling - exceptionV_hat := sn.suite.Point().Null() - exceptionX_hat := sn.suite.Point().Null() - round.ExceptionList = make([]abstract.Point, 0) - nullPoint := sn.suite.Point().Null() - allmessgs := sn.FillInWithDefaultMessages(view, round.Responses) - - children := sn.Children(view) - for _, sm := range allmessgs { - from := sm.From - switch sm.Type { - default: - // default == no response from child - // dbg.Lvl4(sn.Name(), "default in respose for child", from, sm) - if children[from] != nil { - round.ExceptionList = append(round.ExceptionList, children[from].PubKey()) - - // remove public keys and point commits from subtree of failed child - sn.add(exceptionX_hat, round.ChildX_hat[from]) - sn.add(exceptionV_hat, round.ChildV_hat[from]) - } - continue - case Response: - // disregard response from children who did not commit - _, ok := round.ChildV_hat[from] - if ok == true && round.ChildV_hat[from].Equal(nullPoint) { - continue - } - - // dbg.Lvl4(sn.Name(), "accepts response from", from, sm.Type) - round.r_hat.Add(round.r_hat, sm.Rm.R_hat) - - sn.add(exceptionV_hat, sm.Rm.ExceptionV_hat) - - sn.add(exceptionX_hat, sm.Rm.ExceptionX_hat) - round.ExceptionList = append(round.ExceptionList, sm.Rm.ExceptionList...) - - case Error: - if sm.Err == nil { - log.Errorln("Error message with no error") - continue - } - - // Report up non-networking error, probably signature failure - log.Errorln(sn.Name(), "Error in respose for child", from, sm) - err := errors.New(sm.Err.Err) - sn.PutUpError(view, err) - return err - } - } - - // remove exceptions from subtree that failed - sn.sub(round.X_hat, exceptionX_hat) - round.exceptionV_hat = exceptionV_hat - - return sn.actOnResponses(view, Round, exceptionV_hat, exceptionX_hat) -} - -func (sn *Node) actOnResponses(view, Round int, exceptionV_hat abstract.Point, exceptionX_hat abstract.Point) error { - dbg.Lvl4(sn.Name(), "got all responses for view, round", view, Round) - round := sn.Rounds[Round] - err := sn.VerifyResponses(view, Round) - - isroot := sn.IsRoot(view) - // if error put it up if parent exists - if err != nil && !isroot { - sn.PutUpError(view, err) - return err - } - - // if no error send up own response - if err == nil && !isroot { - if round.Log.v == nil && sn.ShouldIFail("response") { - dbg.Lvl4(sn.Name(), "failing on response") - return nil - } - - // create and putup own response message - rm := &ResponseMessage{ - R_hat: round.r_hat, - ExceptionList: round.ExceptionList, - ExceptionV_hat: exceptionV_hat, - ExceptionX_hat: exceptionX_hat, - Round: Round} - - // ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) - ctx := context.TODO() - dbg.Lvl4(sn.Name(), "put up response to", sn.Parent(view)) - err = sn.PutUp(ctx, view, &SigningMessage{ - Type: Response, - View: view, - LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), - Rm: rm}) - } else { - dbg.Lvl4("Root received response") - } - - if sn.TimeForViewChange() { - dbg.Lvl4("acting on responses: trying viewchanges") - err := sn.TryViewChange(view + 1) - if err != nil { - log.Errorln(err) - } - } - - // root reports round is done - if isroot { - sn.done <- Round - } - - return err -} - -func (sn *Node) TryViewChange(view int) error { - dbg.Lvl4(sn.Name(), "TRY VIEW CHANGE on", view, "with last view", sn.ViewNo) - // should ideally be compare and swap - sn.viewmu.Lock() - if view <= sn.ViewNo { - sn.viewmu.Unlock() - return errors.New("trying to view change on previous/ current view") - } - if sn.ChangingView { - sn.viewmu.Unlock() - return ChangingViewError - } - sn.ChangingView = true - sn.viewmu.Unlock() - - // take action if new view root - if sn.Name() == sn.RootFor(view) { - dbg.Lvl4(sn.Name(), "INITIATING VIEW CHANGE FOR VIEW:", view) - go func() { - err := sn.StartVotingRound( - &Vote{ - View: view, - Type: ViewChangeVT, - Vcv: &ViewChangeVote{ - View: view, - Root: sn.Name()}}) - if err != nil { - log.Errorln(sn.Name(), "TRY VIEW CHANGE FAILED: ", err) - } - }() - } - return nil -} - -// Called *only* by root node after receiving all commits -func (sn *Node) FinalizeCommits(view int, Round int) error { - round := sn.Rounds[Round] - - // challenge = Hash(Merkle Tree Root/ Announcement Message, sn.Log.V_hat) - if sn.Type == PubKey { - round.c = hashElGamal(sn.suite, sn.LogTest, round.Log.V_hat) - } else { - round.c = hashElGamal(sn.suite, round.MTRoot, round.Log.V_hat) - } - - proof := make([]hashid.HashId, 0) - err := sn.Challenge(view, &ChallengeMessage{ - C: round.c, - MTRoot: round.MTRoot, - Proof: proof, - Round: Round, - Vote: round.Vote}) - return err -} - -// Called by every node after receiving aggregate responses from descendants -func (sn *Node) VerifyResponses(view, Round int) error { - round := sn.Rounds[Round] - - // Check that: base**r_hat * X_hat**c == V_hat - // Equivalent to base**(r+xc) == base**(v) == T in vanillaElGamal - Aux := sn.suite.Point() - V_clean := sn.suite.Point() - V_clean.Add(V_clean.Mul(nil, round.r_hat), Aux.Mul(round.X_hat, round.c)) - // T is the recreated V_hat - T := sn.suite.Point().Null() - T.Add(T, V_clean) - T.Add(T, round.exceptionV_hat) - - var c2 abstract.Secret - isroot := sn.IsRoot(view) - if isroot { - // round challenge must be recomputed given potential - // exception list - if sn.Type == PubKey { - round.c = hashElGamal(sn.suite, sn.LogTest, round.Log.V_hat) - c2 = hashElGamal(sn.suite, sn.LogTest, T) - } else { - round.c = hashElGamal(sn.suite, round.MTRoot, round.Log.V_hat) - c2 = hashElGamal(sn.suite, round.MTRoot, T) - } - } - - // intermediary nodes check partial responses aginst their partial keys - // the root node is also able to check against the challenge it emitted - if !T.Equal(round.Log.V_hat) || (isroot && !round.c.Equal(c2)) { - if DEBUG == true { - panic(sn.Name() + "reports ElGamal Collective Signature failed for Round" + strconv.Itoa(Round)) - } - // return errors.New("Verifying ElGamal Collective Signature failed in " + sn.Name() + "for round " + strconv.Itoa(Round)) - } - - if isroot { - dbg.Lvl4(sn.Name(), "reports ElGamal Collective Signature succeeded for round", Round, "view", view) - nel := len(round.ExceptionList) - nhl := len(sn.HostListOn(view)) - p := strconv.FormatFloat(float64(nel)/float64(nhl), 'f', 6, 64) - log.Infoln(sn.Name(), "reports", nel, "out of", nhl, "percentage", p, "failed in round", Round) - // dbg.Lvl4(round.MTRoot) - } - return nil -} - -func (sn *Node) TimeForViewChange() bool { - sn.roundmu.Lock() - defer sn.roundmu.Unlock() - - // if this round is last one for this view - if sn.LastSeenRound%sn.RoundsPerView == 0 { - // dbg.Lvl4(sn.Name(), "TIME FOR VIEWCHANGE:", lsr, rpv) - return true - } - return false -} - -func (sn *Node) StatusConnections(view int, am *AnnouncementMessage) error { - dbg.Lvl2(sn.Name(), "StatusConnected", view) - - // Ask connection-count on all connected children - messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) - for i := range messgs { - sm := SigningMessage{ - Type: StatusConnections, - View: view, - LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), - Am: am} - messgs[i] = &sm - } - - ctx := context.TODO() - if err := sn.PutDown(ctx, view, messgs); err != nil { - return err - } - - if len(sn.Children(view)) == 0 { - sn.Commit(view, am.Round, nil) - } - return nil -} - -func (sn *Node) CloseAll(view int) error { - dbg.Lvl2(sn.Name(), "received CloseAll on", view) - - // At the leaves - if len(sn.Children(view)) == 0 { - dbg.Lvl2(sn.Name(), "in CloseAll is root leaf") - } else { - dbg.Lvl2(sn.Name(), "in CloseAll is calling", len(sn.Children(view)), "children") - - // Inform all children of announcement - messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) - for i := range messgs { - sm := SigningMessage{ - Type: CloseAll, - View: view, - LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), - } - messgs[i] = &sm - } - ctx := context.TODO() - if err := sn.PutDown(ctx, view, messgs); err != nil { - return err - } - sn.Close() - } - - log.Fatal("Closing down shop") - return nil -} - - - -func (sn *Node) PutUpError(view int, err error) { - // dbg.Lvl4(sn.Name(), "put up response with err", err) - // ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) - ctx := context.TODO() - sn.PutUp(ctx, view, &SigningMessage{ - Type: Error, - View: view, - LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), - Err: &ErrorMessage{Err: err.Error()}}) -} - -// Returns a secret that depends on on a message and a point -func hashElGamal(suite abstract.Suite, message []byte, p abstract.Point) abstract.Secret { - pb, _ := p.MarshalBinary() - c := suite.Cipher(pb) - c.Message(nil, nil, message) - return suite.Secret().Pick(c) -} - -// Called when log for round if full and ready to be hashed -func (sn *Node) HashLog(Round int) error { - round := sn.Rounds[Round] - var err error - round.HashedLog, err = sn.hashLog(Round) - return err -} - -// Auxilary function to perform the actual hashing of the log -func (sn *Node) hashLog(Round int) ([]byte, error) { - round := sn.Rounds[Round] - - h := sn.suite.Hash() - logBytes, err := round.Log.MarshalBinary() - if err != nil { - return nil, err - } - h.Write(logBytes) - return h.Sum(nil), nil -} diff --git a/proto/sign/collectiveSigning_test.go b/proto/sign/collectiveSigning_test.go deleted file mode 100644 index c6a00fe762..0000000000 --- a/proto/sign/collectiveSigning_test.go +++ /dev/null @@ -1,490 +0,0 @@ -package sign_test - -import -( - "fmt" - "log" - "strconv" - "testing" - "time" - - "github.com/dedis/crypto/abstract" - "github.com/dedis/crypto/edwards" - "github.com/dedis/crypto/edwards/ed25519" - "github.com/dedis/cothority/lib/config" - "github.com/dedis/cothority/proto/sign" - "github.com/dedis/cothority/lib/coconet" -) - -// NOTE: when announcing must provide round numbers - -// Testing suite for signing -// NOTE: when testing if we can gracefully accommodate failures we must: -// 1. Wrap our hosts in FaultyHosts (ex: via field passed in LoadConfig) -// 2. Set out Nodes TesingFailures field to true -// 3. We can Choose at which stage our nodes fail by using SetDeadFor -// or we can choose to take them off completely via SetDead - -// 0 -// / -// 1 -// / \ -// 2 3 -func TestStaticMerkle(t *testing.T) { - if err := runStaticTest(sign.MerkleTree, 100); err != nil { - t.Fatal(err) - } -} - -func TestStaticPubKey(t *testing.T) { - if err := runStaticTest(sign.PubKey, 100); err != nil { - t.Fatal(err) - } -} - -func TestStaticFaulty(t *testing.T) { - faultyNodes := make([]int, 0) - faultyNodes = append(faultyNodes, 1) - - if err := runStaticTest(sign.PubKey, 100, faultyNodes...); err != nil { - t.Fatal(err) - } -} - -var DefaultView = 0 - -func runStaticTest(signType sign.Type, RoundsPerView int, faultyNodes ...int) error { - // Crypto setup - suite := edwards.NewAES128SHA256Ed25519(true) - //suite := nist.NewAES128SHA256P256() - rand := suite.Cipher([]byte("example")) - - // number of nodes for the test - nNodes := 4 - // create new directory for communication between peers - dir := coconet.NewGoDirectory() - // Create Hosts and Peers - h := make([]coconet.Host, nNodes) - - for i := 0; i < nNodes; i++ { - hostName := "host" + strconv.Itoa(i) - - if len(faultyNodes) > 0 { - h[i] = &coconet.FaultyHost{} - gohost := coconet.NewGoHost(hostName, dir) - h[i] = coconet.NewFaultyHost(gohost) - } else { - h[i] = coconet.NewGoHost(hostName, dir) - } - - } - - for _, fh := range faultyNodes { - h[fh].(*coconet.FaultyHost).SetDeadFor("response", true) - } - - // Create Signing Nodes out of the hosts - nodes := make([]*sign.Node, nNodes) - for i := 0; i < nNodes; i++ { - nodes[i] = sign.NewNode(h[i], suite, rand) - nodes[i].Type = signType - nodes[i].GenSetPool() - nodes[i].RoundsPerView = RoundsPerView - defer nodes[i].Close() - - h[i].SetPubKey(nodes[i].PubKey) - // To test the already keyed signing node, uncomment - // PrivKey := suite.Secret().Pick(rand) - // nodes[i] = NewKeyedNode(h[i], suite, PrivKey) - } - nodes[0].Height = 2 - nodes[1].Height = 1 - nodes[2].Height = 0 - nodes[3].Height = 0 - // Add edges to parents - h[1].AddParent(DefaultView, h[0].Name()) - h[2].AddParent(DefaultView, h[1].Name()) - h[3].AddParent(DefaultView, h[1].Name()) - // Add edges to children, listen to children - h[0].AddChildren(DefaultView, h[1].Name()) - h[1].AddChildren(DefaultView, h[2].Name(), h[3].Name()) - - for _, host := range h { - host.Listen() - host.Connect(0) - } - - for i := 0; i < nNodes; i++ { - if len(faultyNodes) > 0 { - nodes[i].FailureRate = 1 - } - - go func(i int) { - // start listening for messages from within the tree - nodes[i].Listen() - }(i) - } - - // Have root node initiate the signing protocol - // via a simple annoucement - nodes[0].LogTest = []byte("Hello World") - // return nodes[0].Announce(DefaultView, &coll_sign.AnnouncementMessage{LogTest: nodes[0].LogTest, Round: 1}) - return nodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: nodes[0].LogTest, Round: 1}) -} - -// Configuration file data/exconf.json -// 0 -// / \ -// 1 4 -// / \ \ -// 2 3 5 -func TestSmallConfigHealthy(t *testing.T) { - //suite := nist.NewAES128SHA256P256() - suite := edwards.NewAES128SHA256Ed25519(true) - RoundsPerView := 100 - if err := runTreeSmallConfig(sign.MerkleTree, RoundsPerView, suite, 0); err != nil { - t.Fatal(err) - } -} - -func TestSmallConfigHealthyNistQR512(t *testing.T) { - //suite := nist.NewAES128SHA256QR512() - suite := edwards.NewAES128SHA256Ed25519(true) - RoundsPerView := 100 - if err := runTreeSmallConfig(sign.MerkleTree, RoundsPerView, suite, 0); err != nil { - t.Fatal(err) - } -} - -func TestSmallConfigHealthyEd25519(t *testing.T) { - suite := ed25519.NewAES128SHA256Ed25519(true) - RoundsPerView := 100 - if err := runTreeSmallConfig(sign.MerkleTree, RoundsPerView, suite, 0); err != nil { - t.Fatal(err) - } -} - -func TestSmallConfigFaulty(t *testing.T) { - faultyNodes := make([]int, 0) - faultyNodes = append(faultyNodes, 2, 5) - suite := edwards.NewAES128SHA256Ed25519(true) - //suite := nist.NewAES128SHA256P256() - RoundsPerView := 100 - if err := runTreeSmallConfig(sign.MerkleTree, RoundsPerView, suite, 1, faultyNodes...); err != nil { - t.Fatal(err) - } -} - -func TestSmallConfigFaulty2(t *testing.T) { - failureRate := 15 - faultyNodes := make([]int, 0) - faultyNodes = append(faultyNodes, 1, 2, 3, 4, 5) - RoundsPerView := 100 - suite := edwards.NewAES128SHA256Ed25519(true) - //suite := nist.NewAES128SHA256P256() - if err := runTreeSmallConfig(sign.MerkleTree, RoundsPerView, suite, failureRate, faultyNodes...); err != nil { - t.Fatal(err) - } -} - -func runTreeSmallConfig(signType sign.Type, RoundsPerView int, suite abstract.Suite, failureRate int, faultyNodes ...int) error { - var hc *config.HostConfig - var err error - opts := config.ConfigOptions{Suite: suite} - - if len(faultyNodes) > 0 { - opts.Faulty = true - } - hc, err = config.LoadConfig("testdata/exconf.json", opts) - if err != nil { - return err - } - - for _, fh := range faultyNodes { - fmt.Println("Setting", hc.SNodes[fh].Name(), "as faulty") - if failureRate == 100 { - hc.SNodes[fh].Host.(*coconet.FaultyHost).SetDeadFor("commit", true) - - } - // hc.SNodes[fh].Host.(*coconet.FaultyHost).Die() - } - - if len(faultyNodes) > 0 { - for i := range hc.SNodes { - hc.SNodes[i].FailureRate = failureRate - } - } - for _, sn := range hc.SNodes { - sn.RoundsPerView = RoundsPerView - } - - err = hc.Run(false, signType) - if err != nil { - return err - } - - for _, sn := range hc.SNodes { - defer sn.Close() - } - // Have root node initiate the signing protocol via a simple annoucement - hc.SNodes[0].LogTest = []byte("Hello World") - hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 1}) - - return nil -} - -func TestTreeFromBigConfig(t *testing.T) { - // this test configuration HostList is incorrect -- duplicates are present - return - - // not mixing view changes in - RoundsPerView := 100 - - hc, err := config.LoadConfig("testdata/exwax.json") - if err != nil { - t.Fatal(err) - } - for _, sn := range hc.SNodes { - sn.RoundsPerView = RoundsPerView - } - err = hc.Run(false, sign.MerkleTree) - if err != nil { - t.Fatal(err) - } - defer func() { - for _, n := range hc.SNodes { - n.Close() - } - time.Sleep(1 * time.Second) - }() - - // give it some time to set up - time.Sleep(2 * time.Second) - - hc.SNodes[0].LogTest = []byte("hello world") - err = hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 1}) - if err != nil { - t.Error(err) - } -} - -// tree from configuration file data/exconf.json -func TestMultipleRounds(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - // not mixing view changes in - RoundsPerView := 100 - hc, err := config.LoadConfig("testdata/exconf.json") - if err != nil { - t.Fatal(err) - } - N := 5 - for _, sn := range hc.SNodes { - sn.RoundsPerView = RoundsPerView - } - err = hc.Run(false, sign.MerkleTree) - if err != nil { - t.Fatal(err) - } - defer func() { - for _, n := range hc.SNodes { - n.Close() - } - time.Sleep(1 * time.Second) - }() - - // give it some time to set up - time.Sleep(1 * time.Second) - - // Have root node initiate the signing protocol - // via a simple annoucement - for i := 1; i <= N; i++ { - hc.SNodes[0].LogTest = []byte("Hello World" + strconv.Itoa(i)) - err = hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: i}) - if err != nil { - t.Error(err) - } - } -} - -func TestTCPStaticConfig(t *testing.T) { - // not mixing view changes in - RoundsPerView := 100 - time.Sleep(5 * time.Second) - hc, err := config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true}) - if err != nil { - t.Error(err) - } - for _, n := range hc.SNodes { - n.RoundsPerView = RoundsPerView - } - defer func() { - for _, n := range hc.SNodes { - n.Close() - } - time.Sleep(1 * time.Second) - }() - - err = hc.Run(false, sign.MerkleTree) - if err != nil { - t.Fatal(err) - } - - // give it some time to set up - time.Sleep(2 * time.Second) - - hc.SNodes[0].LogTest = []byte("hello world") - hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 1}) - log.Println("Test Done") -} - -func TestTCPStaticConfigRounds(t *testing.T) { - // not mixing view changes in - RoundsPerView := 100 - time.Sleep(5 * time.Second) - if testing.Short() { - t.Skip("skipping test in short mode.") - } - hc, err := config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true}) - if err != nil { - t.Fatal("error loading configuration: ", err) - } - - for _, n := range hc.SNodes { - n.RoundsPerView = RoundsPerView - } - - defer func() { - for _, n := range hc.SNodes { - n.Close() - } - time.Sleep(1 * time.Second) - }() - err = hc.Run(false, sign.MerkleTree) - if err != nil { - t.Fatal("error running:", err) - } - // give it some time to set up - time.Sleep(2 * time.Second) - - N := 5 - for i := 1; i <= N; i++ { - hc.SNodes[0].LogTest = []byte("hello world") - hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: i}) - } -} - -// Tests the integration of View Change with Signer (ability to reach consensus on a view change) -// After achieving consensus, View is not actually changed, because of Signer test framework limitations -// See tests in coll_stamp/ for the actual view change ocurring -// Go channels, static configuration, multiple rounds -func TestViewChangeChan(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - hc, err := config.LoadConfig("testdata/exconf.json") - if err != nil { - t.Fatal(err) - } - defer func() { - for _, n := range hc.SNodes { - n.Close() - } - time.Sleep(1 * time.Second) - }() - - err = hc.Run(false, sign.MerkleTree) - if err != nil { - t.Fatal(err) - } - // give it some time to set up - time.Sleep(2 * time.Second) - - // Have root node initiate the signing protocol - // via a simple annoucement - N := 6 - for i := 1; i <= N; i++ { - hc.SNodes[0].LogTest = []byte("Hello World" + strconv.Itoa(i)) - err = hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: i}) - if err == sign.ChangingViewError { - log.Println("Attempted round", i, "but received view change. waiting then retrying") - time.Sleep(3 * time.Second) - i-- - continue - } - - if err != nil { - t.Error(err) - } - } -} - -// Tests the integration of View Change with Signer (ability to reach consensus on a view change) -// After achieving consensus, View is not actually changed, because of Signer test framework limitations -// See tests in coll_stamp/ for the actual view change ocurring -func TestViewChangeTCP(t *testing.T) { - time.Sleep(5 * time.Second) - if testing.Short() { - t.Skip("skipping test in short mode.") - } - hc, err := config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true}) - if err != nil { - t.Fatal("error loading configuration: ", err) - } - defer func() { - for _, n := range hc.SNodes { - n.Close() - } - time.Sleep(1 * time.Second) - }() - err = hc.Run(false, sign.MerkleTree) - if err != nil { - t.Fatal("error running:", err) - } - // give it some time to set up - time.Sleep(2 * time.Second) - - N := 6 - for i := 1; i <= N; i++ { - hc.SNodes[0].LogTest = []byte("hello world") - hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: i}) - } -} - -// func TestTreeBigConfigTCP(t *testing.T) { -// if testing.Short() { -// t.Skip("skipping test in short mode.") -// } -// hc, err := LoadConfig("data/wax.json", ConfigOptions{ConnType: "tcp", GenHosts: true}) -// if err != nil { -// t.Error() -// } -// err = hc.Run(false, coll_sign.MerkleTree) -// if err != nil { -// t.Fatal(err) -// } -// hc.SNodes[0].LogTest = []byte("hello world") -// err = hc.SNodes[0].Announce(&AnnouncementMessage{hc.SNodes[0].LogTest}) -// if err != nil { -// t.Error(err) -// } -// for _, n := range hc.SNodes { -// n.Close() -// } -// } - -/*func BenchmarkTreeBigConfigTCP(b *testing.B) { - if testing.Short() { - b.Skip("skipping test in short mode.") - } - hc, err := LoadConfig("data/wax.json", "tcp") - if err != nil { - b.Error() - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - hc.SNodes[0].LogTest = []byte("hello world") - hc.SNodes[0].Announce(&AnnouncementMessage{hc.SNodes[0].LogTest}) - } -}*/ diff --git a/proto/sign/consensus.go b/proto/sign/consensus.go deleted file mode 100644 index bcac19f5e1..0000000000 --- a/proto/sign/consensus.go +++ /dev/null @@ -1,286 +0,0 @@ -package sign - -import ( - "errors" - "sync/atomic" - - "golang.org/x/net/context" - - log "github.com/Sirupsen/logrus" - "github.com/dedis/cothority/lib/coconet" -) - -func (sn *Node) SetupProposal(view int, am *AnnouncementMessage, from string) error { - // if this is for viewchanges: otherwise new views are not allowed - if am.Vote.Type == ViewChangeVT { - // viewchange votes must be received from the new parent on the new view - if view != am.Vote.Vcv.View { - return errors.New("view change attempt on view != received view") - } - // ensure that we are caught up - if atomic.LoadInt64(&sn.LastSeenVote) != atomic.LoadInt64(&sn.LastAppliedVote) { - return errors.New("not up to date: need to catch up") - } - if sn.RootFor(am.Vote.Vcv.View) != am.Vote.Vcv.Root { - return errors.New("invalid root for proposed view") - } - - nextview := sn.ViewNo + 1 - for ; nextview <= view; nextview++ { - // log.Println(sn.Name(), "CREATING NEXT VIEW", nextview) - sn.NewViewFromPrev(nextview, from) - for _, act := range sn.Actions[nextview] { - sn.ApplyAction(nextview, act) - } - for _, act := range sn.Actions[nextview] { - sn.NotifyOfAction(nextview, act) - } - } - // fmt.Fprintln(os.Stderr, sn.Name(), "setuppropose:", sn.HostListOn(view)) - // fmt.Fprintln(os.Stderr, sn.Name(), "setuppropose:", sn.Parent(view)) - } else { - if view != sn.ViewNo { - return errors.New("vote on not-current view") - } - } - - if am.Vote.Type == AddVT { - if am.Vote.Av.View <= sn.ViewNo { - return errors.New("unable to change past views") - } - } - if am.Vote.Type == RemoveVT { - if am.Vote.Rv.View <= sn.ViewNo { - return errors.New("unable to change past views") - } - } - return nil -} - -// A propose for a view change would come on current view + sth -// when we receive view change message on a future view, -// we must be caught up, create that view and apply actions on it -func (sn *Node) Propose(view int, am *AnnouncementMessage, from string) error { - log.Println(sn.Name(), "GOT ", "Propose", am) - if err := sn.SetupProposal(view, am, from); err != nil { - return err - } - - if err := sn.setUpRound(view, am); err != nil { - return err - } - // log.Println(sn.Name(), "propose on view", view, sn.HostListOn(view)) - sn.Rounds[am.Round].Vote = am.Vote - - // Inform all children of proposal - messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) - for i := range messgs { - sm := SigningMessage{ - Type: Announcement, - View: view, - LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), - Am: am} - messgs[i] = &sm - } - ctx := context.TODO() - //ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) - if err := sn.PutDown(ctx, view, messgs); err != nil { - return err - } - - if len(sn.Children(view)) == 0 { - log.Println(sn.Name(), "no children") - sn.Promise(view, am.Round, nil) - } - return nil -} - -func (sn *Node) Promise(view, Round int, sm *SigningMessage) error { - log.Println(sn.Name(), "GOT ", "Promise", sm) - // update max seen round - sn.roundmu.Lock() - sn.LastSeenRound = max(sn.LastSeenRound, Round) - sn.roundmu.Unlock() - - round := sn.Rounds[Round] - if round == nil { - // was not announced of this round, should retreat - return nil - } - if sm != nil { - round.Commits = append(round.Commits, sm) - } - - if len(round.Commits) != len(sn.Children(view)) { - return nil - } - - // cast own vote - sn.AddVotes(Round, round.Vote) - - for _, sm := range round.Commits { - // count children votes - round.Vote.Count.Responses = append(round.Vote.Count.Responses, sm.Com.Vote.Count.Responses...) - round.Vote.Count.For += sm.Com.Vote.Count.For - round.Vote.Count.Against += sm.Com.Vote.Count.Against - - } - - return sn.actOnPromises(view, Round) -} - -func (sn *Node) actOnPromises(view, Round int) error { - round := sn.Rounds[Round] - var err error - - if sn.IsRoot(view) { - sn.commitsDone <- Round - - var b []byte - b, err = round.Vote.MarshalBinary() - if err != nil { - // log.Fatal("Marshal Binary failed for CountedVotes") - return err - } - round.c = hashElGamal(sn.suite, b, round.Log.V_hat) - err = sn.Accept(view, &ChallengeMessage{ - C: round.c, - Round: Round, - Vote: round.Vote}) - - } else { - // create and putup own commit message - com := &CommitmentMessage{ - Vote: round.Vote, - Round: Round} - - // ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) - // log.Println(sn.Name(), "puts up promise on view", view, "to", sn.Parent(view)) - ctx := context.TODO() - err = sn.PutUp(ctx, view, &SigningMessage{ - View: view, - Type: Commitment, - LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), - Com: com}) - } - return err -} - -func (sn *Node) Accept(view int, chm *ChallengeMessage) error { - log.Println(sn.Name(), "GOT ", "Accept", chm) - // update max seen round - sn.roundmu.Lock() - sn.LastSeenRound = max(sn.LastSeenRound, chm.Round) - sn.roundmu.Unlock() - - round := sn.Rounds[chm.Round] - if round == nil { - log.Errorln("error round is nil") - return nil - } - - // act on decision of aggregated votes - // log.Println(sn.Name(), chm.Round, round.VoteRequest) - if round.Vote != nil { - // append vote to vote log - // potentially initiates signing node action based on vote - sn.actOnVotes(view, chm.Vote) - } - if err := sn.SendChildrenChallenges(view, chm); err != nil { - return err - } - - if len(sn.Children(view)) == 0 { - sn.Accepted(view, chm.Round, nil) - } - - return nil -} - -func (sn *Node) Accepted(view, Round int, sm *SigningMessage) error { - log.Println(sn.Name(), "GOT ", "Accepted") - // update max seen round - sn.roundmu.Lock() - sn.LastSeenRound = max(sn.LastSeenRound, Round) - sn.roundmu.Unlock() - - round := sn.Rounds[Round] - if round == nil { - // TODO: if combined with cosi pubkey, check for round.Log.v existing needed - // If I was not announced of this round, or I failed to commit - return nil - } - - if sm != nil { - round.Responses = append(round.Responses, sm) - } - if len(round.Responses) != len(sn.Children(view)) { - return nil - } - // TODO: after having a chance to inspect the contents of the challenge - // nodes can raise an alarm respond by ack/nack - - if sn.IsRoot(view) { - sn.done <- Round - } else { - // create and putup own response message - rm := &ResponseMessage{ - Vote: round.Vote, - Round: Round} - - // ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) - ctx := context.TODO() - return sn.PutUp(ctx, view, &SigningMessage{ - Type: Response, - View: view, - LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), - Rm: rm}) - } - - return nil -} - -func (sn *Node) actOnVotes(view int, v *Vote) { - // TODO: percentage of nodes for quorum should be parameter - // Basic check to validate Vote was Confirmed, can be enhanced - // TODO: signing node can go through list of votes and verify - accepted := v.Count.For > 2*len(sn.HostListOn(view))/3 - - // Report on vote decision - if sn.IsRoot(view) { - abstained := len(sn.HostListOn(view)) - v.Count.For - v.Count.Against - log.Infoln("Votes FOR:", v.Count.For, "; Votes AGAINST:", v.Count.Against, "; Absteined:", abstained, "Accepted", accepted) - } - - // Act on vote Decision - if accepted { - log.Println(sn.Name(), "actOnVotes: vote", v.Index, " has been accepted") - sn.VoteLog.Put(v.Index, v) - } else { - log.Println(sn.Name(), "actOnVotes: vote", v.Index, " has been rejected") - - // inform node trying to join/remove group of rejection - gcm := &SigningMessage{ - Type: GroupChanged, - From: sn.Name(), - View: view, - LastSeenVote: int(sn.LastSeenVote), - Gcm: &GroupChangedMessage{ - V: &*v, // need copy bcs PutTo on separate thread - HostList: sn.HostListOn(view)}} - - if v.Type == AddVT && sn.Name() == v.Av.Parent { - sn.PutTo(context.TODO(), v.Av.Name, gcm) - } else if v.Type == RemoveVT && sn.Name() == v.Rv.Parent { - sn.PutTo(context.TODO(), v.Rv.Name, gcm) - } - - v.Type = NoOpVT - sn.VoteLog.Put(v.Index, v) - - } - // List out all votes - // for _, vote := range round.CountedVotes.Votes { - // log.Infoln(vote.Name, vote.Accepted) - // } -} diff --git a/proto/sign/constants.go b/proto/sign/constants.go deleted file mode 100644 index bc7259fd10..0000000000 --- a/proto/sign/constants.go +++ /dev/null @@ -1,10 +0,0 @@ -package sign - -import "time" - -// Constants we expect might be used by other packages -// TODO: can this be replaced by the application using the signer? -var ROUND_TIME time.Duration = 1 * time.Second -var HEARTBEAT = ROUND_TIME + ROUND_TIME/2 - -var GOSSIP_TIME time.Duration = 3 * ROUND_TIME diff --git a/proto/sign/errors.go b/proto/sign/errors.go deleted file mode 100644 index c16f1915ec..0000000000 --- a/proto/sign/errors.go +++ /dev/null @@ -1,11 +0,0 @@ -package sign - -import "errors" - -var ErrUnknownMessageType error = errors.New("received message of unknown type") - -var ErrViewRejected error = errors.New("view Rejected: not all nodes accepted view") - -var ErrImposedFailure error = errors.New("failure imposed") - -var ErrPastRound error = errors.New("round number already passed") diff --git a/proto/sign/interfaces.go b/proto/sign/interfaces.go deleted file mode 100644 index 40d61e3f12..0000000000 --- a/proto/sign/interfaces.go +++ /dev/null @@ -1,52 +0,0 @@ -package sign - -import ( - "github.com/dedis/crypto/abstract" - "github.com/dedis/cothority/lib/hashid" - "github.com/dedis/cothority/lib/proof" -) - -var DEBUG bool // to avoid verifying paths and signatures all the time - -// Returns commitment contribution for a round -type CommitFunc func(view int) []byte - -// Called at the end of a round -// Allows client of Signer to receive signature, proof, and error via RPC -type DoneFunc func(view int, SNRoot hashid.HashId, LogHash hashid.HashId, p proof.Proof) - -// todo: see where Signer should be located -type Signer interface { - Name() string - IsRoot(view int) bool - Suite() abstract.Suite - StartSigningRound() error - StartVotingRound(v *Vote) error - - LastRound() int // last round number seen by Signer - SetLastSeenRound(int) // impose change in round numbering - - Hostlist() []string - - // // proof can be nil for simple non Merkle Tree signatures - // // could add option field for Sign - // Sign([]byte) (hashid.HashId, proof.Proof, error) - - // registers a commitment function to be called - // at the start of every round - RegisterAnnounceFunc(cf CommitFunc) - - RegisterDoneFunc(df DoneFunc) - - // Allows user of Signer to inform Signer to run with simulated failures - // As to test robustness of Signer - SetFailureRate(val int) - - ViewChangeCh() chan string - - Close() - Listen() error - - AddSelf(host string) error - RemoveSelf() error -} diff --git a/proto/sign/merkle.go b/proto/sign/merkle.go deleted file mode 100644 index c66d6fdde5..0000000000 --- a/proto/sign/merkle.go +++ /dev/null @@ -1,195 +0,0 @@ -package sign - -// Functions used in collective signing -// That are direclty related to the generation/ verification/ sending -// of the Merkle Tree Signature - -import ( - "bytes" - "sort" - "strconv" - - //log "github.com/Sirupsen/logrus" - dbg "github.com/dedis/cothority/lib/debug_lvl" - - "github.com/dedis/cothority/lib/coconet" - "github.com/dedis/cothority/lib/hashid" - "github.com/dedis/cothority/lib/proof" -) - -func (sn *Node) AddChildrenMerkleRoots(Round int) { - sn.roundLock.RLock() - round := sn.Rounds[Round] - sn.roundLock.RUnlock() - // children commit roots - round.CMTRoots = make([]hashid.HashId, len(round.Leaves)) - copy(round.CMTRoots, round.Leaves) - round.CMTRootNames = make([]string, len(round.Leaves)) - copy(round.CMTRootNames, round.LeavesFrom) - - // concatenate children commit roots in one binary blob for easy marshalling - round.Log.CMTRoots = make([]byte, 0) - for _, leaf := range round.Leaves { - round.Log.CMTRoots = append(round.Log.CMTRoots, leaf...) - } -} - -func (sn *Node) AddLocalMerkleRoot(view, Round int) { - sn.roundLock.RLock() - round := sn.Rounds[Round] - sn.roundLock.RUnlock() - // add own local mtroot to leaves - if sn.CommitFunc != nil { - round.LocalMTRoot = sn.CommitFunc(view) - } else { - round.LocalMTRoot = make([]byte, hashid.Size) - } - round.Leaves = append(round.Leaves, round.LocalMTRoot) -} - -func (sn *Node) ComputeCombinedMerkleRoot(view, Round int) { - sn.roundLock.RLock() - round := sn.Rounds[Round] - sn.roundLock.RUnlock() - // add hash of whole log to leaves - round.Leaves = append(round.Leaves, round.HashedLog) - - // compute MT root based on Log as right child and - // MT of leaves as left child and send it up to parent - sort.Sort(hashid.ByHashId(round.Leaves)) - left, proofs := proof.ProofTree(sn.Suite().Hash, round.Leaves) - right := round.HashedLog - moreLeaves := make([]hashid.HashId, 0) - moreLeaves = append(moreLeaves, left, right) - round.MTRoot, _ = proof.ProofTree(sn.Suite().Hash, moreLeaves) - - // Hashed Log has to come first in the proof; len(sn.CMTRoots)+1 proofs - round.Proofs = make(map[string]proof.Proof, 0) - children := sn.Children(view) - for name := range children { - round.Proofs[name] = append(round.Proofs[name], right) - } - round.Proofs["local"] = append(round.Proofs["local"], right) - - // separate proofs by children (need to send personalized proofs to children) - // also separate local proof (need to send it to timestamp server) - sn.SeparateProofs(proofs, round.Leaves, Round) -} - -// Create Merkle Proof for local client (timestamp server) -// Send Merkle Proof to local client (timestamp server) -func (sn *Node) SendLocalMerkleProof(view int, chm *ChallengeMessage) error { - if sn.DoneFunc != nil { - sn.roundLock.RLock() - round := sn.Rounds[chm.Round] - sn.roundLock.RUnlock() - proofForClient := make(proof.Proof, len(chm.Proof)) - copy(proofForClient, chm.Proof) - - // To the proof from our root to big root we must add the separated proof - // from the localMKT of the client (timestamp server) to our root - proofForClient = append(proofForClient, round.Proofs["local"]...) - - // if want to verify partial and full proofs - // dbg.Lvl4("*****") - // dbg.Lvl4(sn.Name(), chm.Round, proofForClient) - if DEBUG == true { - sn.VerifyAllProofs(view, chm, proofForClient) - } - - // 'reply' to client - // TODO: add error to done function - sn.DoneFunc(view, chm.MTRoot, round.MTRoot, proofForClient) - } - - return nil -} - -// Create Personalized Merkle Proofs for children servers -// Send Personalized Merkle Proofs to children servers -func (sn *Node) SendChildrenChallengesProofs(view int, chm *ChallengeMessage) error { - round := sn.Rounds[chm.Round] - // proof from big root to our root will be sent to all children - baseProof := make(proof.Proof, len(chm.Proof)) - copy(baseProof, chm.Proof) - - // for each child, create personalized part of proof - // embed it in SigningMessage, and send it - for name, conn := range sn.Children(view) { - newChm := *chm - newChm.Proof = append(baseProof, round.Proofs[name]...) - - var messg coconet.BinaryMarshaler - messg = &SigningMessage{View: view, Type: Challenge, Chm: &newChm} - - // send challenge message to child - // dbg.Lvl4("connection: sending children challenge proofs:", name, conn) - if err := conn.Put(messg); err != nil { - return err - } - } - - return nil -} - -// Identify which proof corresponds to which leaf -// Needed given that the leaves are sorted before passed to the function that create -// the Merkle Tree and its Proofs -func (sn *Node) SeparateProofs(proofs []proof.Proof, leaves []hashid.HashId, Round int) { - sn.roundLock.RLock() - round := sn.Rounds[Round] - sn.roundLock.RUnlock() - // separate proofs for children servers mt roots - for i := 0; i < len(round.CMTRoots); i++ { - name := round.CMTRootNames[i] - for j := 0; j < len(leaves); j++ { - if bytes.Compare(round.CMTRoots[i], leaves[j]) == 0 { - // sn.Proofs[i] = append(sn.Proofs[i], proofs[j]...) - round.Proofs[name] = append(round.Proofs[name], proofs[j]...) - continue - } - } - } - - // separate proof for local mt root - for j := 0; j < len(leaves); j++ { - if bytes.Compare(round.LocalMTRoot, leaves[j]) == 0 { - round.Proofs["local"] = append(round.Proofs["local"], proofs[j]...) - } - } -} - -// Check that starting from its own committed message each child can reach our subtrees' mtroot -// Also checks that starting from local mt root we can get to our subtrees' mtroot <-- could be in diff fct -func (sn *Node) checkChildrenProofs(Round int) { - sn.roundLock.RLock() - round := sn.Rounds[Round] - sn.roundLock.RUnlock() - cmtAndLocal := make([]hashid.HashId, len(round.CMTRoots)) - copy(cmtAndLocal, round.CMTRoots) - cmtAndLocal = append(cmtAndLocal, round.LocalMTRoot) - - proofs := make([]proof.Proof, 0) - for _, name := range round.CMTRootNames { - proofs = append(proofs, round.Proofs[name]) - } - - if proof.CheckLocalProofs(sn.Suite().Hash, round.MTRoot, cmtAndLocal, proofs) == true { - dbg.Lvl4("Chidlren Proofs of", sn.Name(), "successful for round "+strconv.Itoa(sn.nRounds)) - } else { - panic("Children Proofs" + sn.Name() + " unsuccessful for round " + strconv.Itoa(sn.nRounds)) - } -} - -func (sn *Node) VerifyAllProofs(view int, chm *ChallengeMessage, proofForClient proof.Proof) { - sn.roundLock.RLock() - round := sn.Rounds[chm.Round] - sn.roundLock.RUnlock() - // proof from client to my root - proof.CheckProof(sn.Suite().Hash, round.MTRoot, round.LocalMTRoot, round.Proofs["local"]) - // proof from my root to big root - dbg.Lvl4(sn.Name(), "verifying for view", view) - proof.CheckProof(sn.Suite().Hash, chm.MTRoot, round.MTRoot, chm.Proof) - // proof from client to big root - proof.CheckProof(sn.Suite().Hash, chm.MTRoot, round.LocalMTRoot, proofForClient) -} diff --git a/proto/sign/pubkey.go b/proto/sign/pubkey.go deleted file mode 100644 index 5b2b8fce1e..0000000000 --- a/proto/sign/pubkey.go +++ /dev/null @@ -1,22 +0,0 @@ -package sign - -import "github.com/dedis/cothority/lib/coconet" - -// Functions used in collective signing -// That are direclty related to the generation/ verification/ sending -// of the Simple Combined Public Key Signature - -// Send children challenges -func (sn *Node) SendChildrenChallenges(view int, chm *ChallengeMessage) error { - for _, child := range sn.Children(view) { - var messg coconet.BinaryMarshaler - messg = &SigningMessage{View: view, Type: Challenge, Chm: chm} - - // fmt.Println(sn.Name(), "send to", i, child, "on view", view) - if err := child.Put(messg); err != nil { - return err - } - } - - return nil -} diff --git a/proto/sign/round.go b/proto/sign/round.go deleted file mode 100644 index f9e3d6946e..0000000000 --- a/proto/sign/round.go +++ /dev/null @@ -1,93 +0,0 @@ -package sign - -import "github.com/dedis/crypto/abstract" -import "github.com/dedis/cothority/lib/hashid" -import "github.com/dedis/cothority/lib/proof" - -const FIRST_ROUND int = 1 // start counting rounds at 1 - -type Round struct { - c abstract.Secret // round lasting challenge - r abstract.Secret // round lasting response - - Log SNLog // round lasting log structure - HashedLog []byte - - r_hat abstract.Secret // aggregate of responses - X_hat abstract.Point // aggregate of public keys - - Commits []*SigningMessage - Responses []*SigningMessage - - // own big merkle subtree - MTRoot hashid.HashId // mt root for subtree, passed upwards - Leaves []hashid.HashId // leaves used to build the merkle subtre - LeavesFrom []string // child names for leaves - - // mtRoot before adding HashedLog - LocalMTRoot hashid.HashId - - // merkle tree roots of children in strict order - CMTRoots []hashid.HashId - CMTRootNames []string - Proofs map[string]proof.Proof - - // round-lasting public keys of children servers that did not - // respond to latest commit or respond phase, in subtree - ExceptionList []abstract.Point - // combined point commits of children servers in subtree - ChildV_hat map[string]abstract.Point - // combined public keys of children servers in subtree - ChildX_hat map[string]abstract.Point - // for internal verification purposes - exceptionV_hat abstract.Point - - BackLink hashid.HashId - AccRound []byte - - Vote *Vote - // VoteRequest *VoteRequest // Vote Request vote on in the round - // CountedVotes *CountedVotes // CountedVotes contains a subtree's votes -} - -func NewRound(suite abstract.Suite) *Round { - round := &Round{} - round.Commits = make([]*SigningMessage, 0) - round.Responses = make([]*SigningMessage, 0) - round.ExceptionList = make([]abstract.Point, 0) - round.Log.Suite = suite - return round -} - -type RoundType int - -const ( - EmptyRT RoundType = iota - ViewChangeRT - AddRT - RemoveRT - ShutdownRT - NoOpRT - SigningRT -) - -func (rt RoundType) String() string { - switch rt { - case EmptyRT: - return "empty" - case SigningRT: - return "signing" - case ViewChangeRT: - return "viewchange" - case AddRT: - return "add" - case RemoveRT: - return "remove" - case ShutdownRT: - return "shutdown" - case NoOpRT: - return "noop" - default: - return "" - } -} diff --git a/proto/sign/signBenchmark_test.go b/proto/sign/signBenchmark_test.go deleted file mode 100644 index bd5b433825..0000000000 --- a/proto/sign/signBenchmark_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package sign_test - -import -( - "strconv" - "testing" - - "github.com/dedis/cothority/proto/sign" - "github.com/dedis/cothority/lib/config" -) - -// func init() { -// log.SetOutput(ioutil.Discard) -// } - -// one after the other by the root (one signature per message created) -func SimpleRoundsThroughput(N int, b *testing.B) { - hc, _ := config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true}) - hc.Run(false, sign.PubKey) - - for n := 0; n < b.N; n++ { - for i := 0; i < N; i++ { - hc.SNodes[0].LogTest = []byte("hello world" + strconv.Itoa(i)) - hc.SNodes[0].Announce(DefaultView, &sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 0}) - - } - for _, sn := range hc.SNodes { - sn.Close() - } - - } -} - -func BenchmarkSimpleRoundsThroughput100(b *testing.B) { - SimpleRoundsThroughput(100, b) -} - -func BenchmarkSimpleRoundsThroughput200(b *testing.B) { - SimpleRoundsThroughput(200, b) -} diff --git a/proto/sign/signingMessages_test.go b/proto/sign/signingMessages_test.go deleted file mode 100644 index cc40468544..0000000000 --- a/proto/sign/signingMessages_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package sign_test - -import -( - "bytes" - "reflect" - "testing" - - "log" - - "github.com/dedis/cothority/lib/hashid" - "github.com/dedis/cothority/lib/proof" - "github.com/dedis/cothority/proto/sign" - "github.com/dedis/crypto/edwards" -) - -func init() { - log.SetFlags(log.Lshortfile) -} - -func TestErrorMessage(t *testing.T) { - sm := &sign.SigningMessage{Type: sign.Error, Err: &sign.ErrorMessage{Err: "random error"}} - b, e := sm.MarshalBinary() - if e != nil { - t.Fatal(e) - } - sm2 := &sign.SigningMessage{} - e = sm2.UnmarshalBinary(b) - if e != nil { - t.Fatal(e) - } - if !reflect.DeepEqual(sm, sm2) { - t.Fatal("sm != sm2: ", sm, sm2, sm.Am, sm2.Am) - } -} - -// test marshalling and unmarshalling for -// the various types of signing messages - -func TestMUAnnouncement(t *testing.T) { - logTest := []byte("Hello World") - sm := &sign.SigningMessage{Type: sign.Announcement, Am: &sign.AnnouncementMessage{LogTest: logTest}} - dataBytes, err := sm.MarshalBinary() - if err != nil { - t.Error("Marshaling didn't work") - } - - sm2 := &sign.SigningMessage{} - sm2.UnmarshalBinary(dataBytes) - if err != nil { - t.Error("Unmarshaling didn't work") - } - if !reflect.DeepEqual(sm, sm2) { - t.Fatal("sm != sm2: ", sm, sm2, sm.Am, sm2.Am) - } -} - -// Test for Marshalling and Unmarshalling Challenge Messages -// Important: when making empty HashIds len should be set to HASH_SIZE -func TestMUChallenge(t *testing.T) { - nHashIds := 3 - - var err error - suite := edwards.NewAES128SHA256Ed25519(true) - //suite := nist.NewAES128SHA256P256() - rand := suite.Cipher([]byte("example")) - - cm := &sign.ChallengeMessage{} - cm.C = suite.Secret().Pick(rand) - cm.MTRoot = make([]byte, hashid.Size) - cm.Proof = proof.Proof(make([]hashid.HashId, nHashIds)) - for i := 0; i < nHashIds; i++ { - cm.Proof[i] = make([]byte, hashid.Size) - } - sm := &sign.SigningMessage{Type: sign.Challenge, Chm: cm} - smBytes, err := sm.MarshalBinary() - if err != nil { - t.Error(err) - } - - messg := &sign.SigningMessage{} - err = messg.UnmarshalBinary(smBytes) - cm2 := messg.Chm - - // test for equality after marshal and unmarshal - if !cm2.C.Equal(cm.C) || - bytes.Compare(cm2.MTRoot, cm.MTRoot) != 0 || - !byteArrayEqual(cm2.Proof, cm.Proof) { - t.Error("challenge message MU failed") - } -} - -// Test for Marshalling and Unmarshalling Comit Messages -// Important: when making empty HashIds len should be set to HASH_SIZE -func TestMUCommit(t *testing.T) { - var err error - suite := edwards.NewAES128SHA256Ed25519(true) - //suite := nist.NewAES128SHA256P256() - rand := suite.Cipher([]byte("exampfsdjkhujgkjsgfjgle")) - rand2 := suite.Cipher([]byte("examplsfhsjedgjhsge2")) - - cm := &sign.CommitmentMessage{} - cm.V, _ = suite.Point().Pick(nil, rand) - cm.V_hat, _ = suite.Point().Pick(nil, rand2) - - cm.MTRoot = make([]byte, hashid.Size) - sm := sign.SigningMessage{Type: sign.Commitment, Com: cm} - smBytes, err := sm.MarshalBinary() - if err != nil { - t.Error(err) - } - - messg := &sign.SigningMessage{} - err = messg.UnmarshalBinary(smBytes) - cm2 := messg.Com - - // test for equality after marshal and unmarshal - if !cm2.V.Equal(cm.V) || - !cm2.V_hat.Equal(cm.V_hat) || - bytes.Compare(cm2.MTRoot, cm.MTRoot) != 0 { - t.Error("commit message MU failed") - } - -} - -func byteArrayEqual(a proof.Proof, b proof.Proof) bool { - n := len(a) - if n != len(b) { - return false - } - - for i := 0; i < n; i++ { - if bytes.Compare(a[i], b[i]) != 0 { - return false - } - } - - return true -} diff --git a/proto/sign/snlog.go b/proto/sign/snlog.go deleted file mode 100644 index 36e3c80003..0000000000 --- a/proto/sign/snlog.go +++ /dev/null @@ -1,42 +0,0 @@ -package sign - -import ( - "bytes" - "encoding/gob" - - "github.com/dedis/cothority/lib/hashid" - "github.com/dedis/crypto/abstract" -) - -// Signing Node Log for a round -// For Marshaling and Unrmarshaling to work smoothly -// crypto fields must appear first in the structure -type SNLog struct { - v abstract.Secret // round lasting secret - V abstract.Point // round lasting commitment point - V_hat abstract.Point // aggregate of commit points - - // merkle tree roots of children in strict order - CMTRoots hashid.HashId // concatenated hash ids of children - Suite abstract.Suite -} - -func (snLog SNLog) MarshalBinary() ([]byte, error) { - // abstract.Write used to encode/ marshal crypto types - b := bytes.Buffer{} - snLog.Suite.Write(&b, &snLog.v, &snLog.V, &snLog.V_hat) - ////// gob is used to encode non-crypto types - enc := gob.NewEncoder(&b) - err := enc.Encode(snLog.CMTRoots) - return b.Bytes(), err -} - -func (snLog *SNLog) UnmarshalBinary(data []byte) error { - // abstract.Read used to decode/ unmarshal crypto types - b := bytes.NewBuffer(data) - err := snLog.Suite.Read(b, &snLog.v, &snLog.V, &snLog.V_hat) - // gob is used to decode non-crypto types - rem, _ := snLog.MarshalBinary() - snLog.CMTRoots = data[len(rem):] - return err -} diff --git a/proto/sign/snviewchange.go b/proto/sign/snviewchange.go deleted file mode 100644 index 7c9845a539..0000000000 --- a/proto/sign/snviewchange.go +++ /dev/null @@ -1,104 +0,0 @@ -package sign - -import "log" - -func (sn *Node) ChangeView(vcv *ViewChangeVote) { - // log.Println(sn.Name(), " in CHANGE VIEW") - // at this point actions have already been applied - // all we need to do is switch our default view - sn.viewmu.Lock() - sn.ViewNo = vcv.View - sn.viewmu.Unlock() - if sn.RootFor(vcv.View) == sn.Name() { - log.Println(sn.Name(), "CHANGE VIEW TO ROOT", "children", sn.Children(vcv.View)) - sn.viewChangeCh <- "root" - } else { - log.Println(sn.Name(), "CHANGE VIEW TO REGULAR") - sn.viewChangeCh <- "regular" - } - - sn.viewmu.Lock() - sn.ChangingView = false - sn.viewmu.Unlock() - log.Println("VIEW CHANGED") - // TODO: garbage collect old connections -} - -/* -func (sn *Node) ViewChange(view int, parent string, vcm *ViewChangeMessage) error { - sn.ChangingView = true - - log.Println(sn.Name(), "VIEW CHANGE MESSAGE: new Round == , oldlsr == , view == ", vcm.Round, sn.LastSeenRound, view) - sn.LastSeenRound = max(vcm.Round, sn.LastSeenRound) - - iAmNextRoot := false - if sn.RootFor(vcm.ViewNo) == sn.Name() { - iAmNextRoot = true - } - - sn.Views().Lock() - _, exists := sn.Views().Views[vcm.ViewNo] - sn.Views().Unlock() - if !exists { - log.Println("PEERS:", sn.Peers()) - children := sn.childrenForNewView(parent) - log.Println("CREATING NEW VIEW with ", len(sn.HostListOn(view-1)), "hosts", "on view", view) - sn.NewView(vcm.ViewNo, parent, children, sn.HostListOn(view-1)) - } - - log.Println(sn.Name(), ":multiplexing onto children:", sn.Children(view)) - sn.multiplexOnChildren(vcm.ViewNo, &SigningMessage{View: view, Type: ViewChange, Vcm: vcm}) - - log.Println(sn.Name(), "waiting on view accept messages from children:", sn.Children(view)) - - votes := len(sn.Children(view)) - - log.Println(sn.Name(), "received view accept messages from children:", votes) - - var err error - if iAmNextRoot { - - if votes > len(sn.HostListOn(view))*2/3 { - - log.Println(sn.Name(), "quorum", votes, "of", len(sn.HostListOn(view)), "confirmed me as new root") - vcfm := &ViewConfirmedMessage{ViewNo: vcm.ViewNo} - sm := &SigningMessage{Type: ViewConfirmed, Vcfm: vcfm, From: sn.Name(), View: vcm.ViewNo} - sn.multiplexOnChildren(vcm.ViewNo, sm) - - sn.ChangingView = false - sn.ViewNo = vcm.ViewNo - sn.viewChangeCh <- "root" - } else { - log.Errorln(sn.Name(), " (ROOT) DID NOT RECEIVE quorum", votes, "of", len(sn.HostList)) - return ErrViewRejected - } - } else { - sn.RoundsAsRoot = 0 - - vam := &ViewAcceptedMessage{ViewNo: vcm.ViewNo, Votes: votes} - - log.Println(sn.Name(), "putting up on view", view, "accept for view", vcm.ViewNo) - err = sn.PutUp(context.TODO(), vcm.ViewNo, &SigningMessage{ - View: view, - From: sn.Name(), - Type: ViewAccepted, - Vam: vam}) - - return err - } - - return err -} - -func (sn *Node) ViewChanged(view int, sm *SigningMessage) { - log.Println(sn.Name(), "view CHANGED to", view) - - sn.ChangingView = false - - sn.viewChangeCh <- "regular" - - log.Println("in view change, children for view", view, sn.Children(view)) - sn.multiplexOnChildren(view, sm) - log.Println(sn.Name(), " exited view CHANGE to", view) -} -*/ diff --git a/proto/sign/vote_test.go b/proto/sign/vote_test.go deleted file mode 100644 index 8aac1ed056..0000000000 --- a/proto/sign/vote_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package sign_test - -import -( - "testing" - "time" - - "github.com/dedis/cothority/proto/sign" - "github.com/dedis/cothority/lib/config" -) - -// Configuration file data/exconf.json -// 0 -// / \ -// 1 4 -// / \ \ -// 2 3 5 -func TestTreeSmallConfigVote(t *testing.T) { - hc, err := config.LoadConfig("testdata/exconf.json") - if err != nil { - t.Fatal(err) - } - - err = hc.Run(false, sign.Voter) - if err != nil { - t.Fatal(err) - } - - // Achieve consensus on removing a node - vote := &sign.Vote{Type: sign.AddVT, Av: &sign.AddVote{Name: "host5", Parent: "host4"}} - err = hc.SNodes[0].StartVotingRound(vote) - - if err != nil { - t.Error(err) - } - -} - -func TestTCPStaticConfigVote(t *testing.T) { - hc, err := config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true}) - if err != nil { - t.Error(err) - } - defer func() { - for _, n := range hc.SNodes { - n.Close() - } - time.Sleep(1 * time.Second) - }() - - err = hc.Run(false, sign.Voter) - if err != nil { - t.Fatal(err) - } - - // give it some time to set up - time.Sleep(2 * time.Second) - - hc.SNodes[0].LogTest = []byte("Hello Voting") - vote := &sign.Vote{Type: sign.RemoveVT, Rv: &sign.RemoveVote{Name: "host5", Parent: "host4"}} - err = hc.SNodes[0].StartVotingRound(vote) - - if err != nil { - t.Error(err) - } -}