From 18e032dc22af57722a2fb7e1817b9bb4c0bf10e8 Mon Sep 17 00:00:00 2001 From: Alex Dovzhanyn Date: Thu, 27 Sep 2018 22:02:47 -0400 Subject: [PATCH 1/4] major rewrite to better handle forking. not fully done --- lib/elixium_node.ex | 136 +++++++++++++++++++++++++++++++++++----- lib/elixium_node_app.ex | 4 +- 2 files changed, 121 insertions(+), 19 deletions(-) diff --git a/lib/elixium_node.ex b/lib/elixium_node.ex index 98db9f5..59c5d42 100644 --- a/lib/elixium_node.ex +++ b/lib/elixium_node.ex @@ -7,27 +7,65 @@ defmodule ElixiumNode do alias Elixium.Store.Ledger alias Elixium.P2P.Peer alias Elixium.Pool.Orphan + require IEx - def start_link(chain) do + def start_link do GenServer.start_link(__MODULE__, {}) end - def init(_state), do: {:ok, {}} - - def handle_info(msg, _state) do - case msg do - header = %{type: "BLOCK_HEADER"} -> IO.inspect header - block = %{type: "BLOCK"} -> - # Check if we've already received a block at this index. If we have, - # diff it against the one we've stored. - case Ledger.block_at_height(block.index) do - :none -> evaluate_new_block(block) - stored_block -> handle_possible_fork(block, stored_block) - end - _ -> IO.puts "Didnt match" - end + def init(_state), do: {:ok, %{transactions: []}} + + def handle_info(msg, state) do + state = + case msg do + header = %{type: "BLOCK_HEADER"} -> + IO.inspect header + state + block = %{type: "BLOCK"} -> + IO.inspect(block, limit: :infinity) + # Check if we've already received a block at this index. If we have, + # diff it against the one we've stored. If we haven't, check to see + # if this index is the next index in the chain. In the case that its + # not, we've likely found a new longest chain, so we need to evaluate + # whether or not we want to switch to that chain + case Ledger.block_at_height(block.index) do + :none -> + last_block = Ledger.last_block() + + if block.index == last_block.index + 1 && block.previous_hash == last_block.hash do + # TODO: Revisit this logic; when receiving a block at the current expected + # index, we're dropping the block since we don't check if the block is + # building on a fork, we just assume that it's building on our chain, + # so validation fails, since the blocks previous_hash will be different. + evaluate_new_block(block) + else + evaluate_chain_swap(block) + end + stored_block -> handle_possible_fork(block, stored_block) + end + state + transaction = %{type: "TRANSACTION"} -> + IO.inspect(transaction) - {:noreply, {}} + # Don't re-validate and re-send a transaction we've already received. + # This eliminates looping issues where nodes pass the same transaction + # back and forth. + new_state = + if !Enum.member?(state.transactions, transaction) && Validator.valid_transaction?(transaction) do + Logger.info("Received valid transaction #{transaction.id}. Forwarding to peers.") + Peer.gossip("TRANSACTION", transaction) + + %{state | transactions: [transaction | state.transactions]} + else + state + end + + _ -> + IO.puts "Didnt match" + state + end + + {:noreply, state} end @spec evaluate_new_block(Block) :: none @@ -58,7 +96,9 @@ defmodule ElixiumNode do case Block.diff_header(existing_block, block) do # If there is no diff, just skip the block - [] -> :no_diff + [] -> + Logger.info("Same block.") + :no_diff diff -> Logger.warn("Fork block received! Checking existing orphan pool...") @@ -93,8 +133,70 @@ defmodule ElixiumNode do # This block might be a fork of a block that we have stored in our # orphan pool Logger.warn("Possibly extension of existing fork") + Orphan.add(block) end end end end + + # Check that a given fork is valid, and if it is, swap to the fork + @spec evaluate_chain_swap(Block) :: none + defp evaluate_chain_swap(block) do + # Rebuild the chain backwards until reaching a point where we agree on the + # same blocks as the fork does. + {fork_chain, fork_source} = rebuild_fork_chain(block) + + # Traverse the fork chain, making sure each block is valid within its own + # context. + # TODO: Make validation difficulty dynamic + {_, validation_results} = + fork_chain + |> Enum.scan({fork_source, []}, fn (block, {last, results}) -> + {block, [Validator.is_block_valid?(block, 5.0, last) | results]} + end) + |> List.last() + + # Ensure that every block passed validation + if Enum.all?(validation_results, &(&1 == :ok)) do + Logger.info("Candidate fork chain valid. Switching.") + + fork_chain + |> Enum.reverse() + |> Enum.flat_map(&parse_transaction_inputs/1) + |> IO.inspect + # TODO: continue this. + else + Logger.info("Evaluated candidate fork chain. Not viable for switch.") + end + end + + def rebuild_fork_chain(chain) when is_list(chain) do + case Orphan.blocks_at_height(hd(chain).index - 1) do + [] -> + IO.puts "got to false" + false + orphan_blocks -> + orphan_blocks + |> Enum.filter(fn {_, block} -> block.hash == hd(chain).previous_hash end) + |> Enum.find_value(fn {_, candidate_orphan}-> + # Check if we agree on a previous_hash + case Ledger.retrieve_block(candidate_orphan.previous_hash) do + # We need to dig deeper... + :not_found -> rebuild_fork_chain([candidate_orphan | chain]) + # We found the source of this fork. Return the chain we've accumulated + fork_source -> {[candidate_orphan | chain], fork_source} + end + end) + end + end + + def rebuild_fork_chain(block), do: rebuild_fork_chain([block]) + + # Return a list of all transaction inputs for every transaction in this block + @spec parse_transaction_inputs(Block) :: list + defp parse_transaction_inputs(block) do + block.transactions + |> Enum.flat_map(&(&1.inputs)) + |> Enum.map(&(Map.delete(&1, :signature))) + end end diff --git a/lib/elixium_node_app.ex b/lib/elixium_node_app.ex index adc7c1d..6687076 100644 --- a/lib/elixium_node_app.ex +++ b/lib/elixium_node_app.ex @@ -10,9 +10,9 @@ defmodule ElixiumNodeApp do Ledger.initialize() Utxo.initialize() Orphan.initialize() - chain = Blockchain.initialize() + Blockchain.initialize() - {:ok, comm_pid} = ElixiumNode.start_link(chain) + {:ok, comm_pid} = ElixiumNode.start_link() if port = Application.get_env(:elixium_node, :port) do Peer.initialize(comm_pid, port) From 9c40d05cb99e20f7c48bd6fb103d72636cab2571 Mon Sep 17 00:00:00 2001 From: Alex Dovzhanyn Date: Sun, 7 Oct 2018 14:49:28 -0400 Subject: [PATCH 2/4] tie in with forking logic from core --- lib/elixium_node.ex | 95 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 84 insertions(+), 11 deletions(-) diff --git a/lib/elixium_node.ex b/lib/elixium_node.ex index 59c5d42..4c5554e 100644 --- a/lib/elixium_node.ex +++ b/lib/elixium_node.ex @@ -7,6 +7,7 @@ defmodule ElixiumNode do alias Elixium.Store.Ledger alias Elixium.P2P.Peer alias Elixium.Pool.Orphan + alias Elixium.Store.Utxo require IEx def start_link do @@ -146,25 +147,62 @@ defmodule ElixiumNode do # same blocks as the fork does. {fork_chain, fork_source} = rebuild_fork_chain(block) + current_utxos_in_pool = Utxo.retrieve_all_utxos() + + # Blocks which need to be reversed + blocks_to_reverse = + (fork_source.index + 1)..Ledger.last_block().index + |> Enum.map(&Ledger.block_at_height/1) + + # Find transaction inputs that need to be reversed + all_canonical_transaction_inputs_since_fork = + Enum.flat_map(blocks_to_reverse, &parse_transaction_inputs/1) + + canon_output_txoids = + blocks_to_reverse + |> Enum.flat_map(&parse_transaction_outputs/1) + |> Enum.map(& &1.txoid) + + # Pool at the time of fork is basically just current pool plus all inputs + # used in canon chain since fork, minus all outputs created in after fork + # (this will also remove inputs that were created as outputs and used in + # the fork) + pool = + current_utxos_in_pool ++ all_canonical_transaction_inputs_since_fork + |> Enum.filter(&(!Enum.member?(canon_output_txoids, &1.txoid))) + # Traverse the fork chain, making sure each block is valid within its own # context. - # TODO: Make validation difficulty dynamic - {_, validation_results} = + {_, final_contextual_pool, validation_results} = fork_chain - |> Enum.scan({fork_source, []}, fn (block, {last, results}) -> - {block, [Validator.is_block_valid?(block, 5.0, last) | results]} - end) + |> Enum.scan({fork_source, pool, []}, &validate_in_context/2) |> List.last() # Ensure that every block passed validation - if Enum.all?(validation_results, &(&1 == :ok)) do + if Enum.all?(validation_results, & &1) do Logger.info("Candidate fork chain valid. Switching.") - fork_chain - |> Enum.reverse() - |> Enum.flat_map(&parse_transaction_inputs/1) - |> IO.inspect - # TODO: continue this. + # Add everything in final_contextual_pool that is not also in current_utxos_in_pool + Enum.each(final_contextual_pool -- current_utxos_in_pool, &Utxo.add_utxo/1) + + # Remove everything in current_utxos_in_pool that is not also in final_contextual_pool + current_utxos_in_pool -- final_contextual_pool + |> Enum.map(& &1.txoid) + |> Enum.each(&Utxo.remove_utxo/1) + + # Drop canon chain blocks from the ledger, add them to the orphan pool + # in case the chain gets revived by another miner + Enum.each(blocks_to_reverse, fn blk -> + Orphan.add(blk) + Ledger.drop_block(blk) + end) + + # Remove fork chain from orphan pool; now it becomes the canon chain, + # so we add its blocks to the ledger + Enum.each(fork_chain, fn blk -> + Ledger.append_block(blk) + Orphan.remove(blk) + end) else Logger.info("Evaluated candidate fork chain. Not viable for switch.") end @@ -199,4 +237,39 @@ defmodule ElixiumNode do |> Enum.flat_map(&(&1.inputs)) |> Enum.map(&(Map.delete(&1, :signature))) end + + @spec parse_transaction_outputs(Block) :: list + defp parse_transaction_outputs(block), do: Enum.flat_map(block.transactions, &(&1.outputs)) + + defp validate_in_context(block, {last, pool, results}) do + # TODO: Make validation difficulty dynamic + valid = :ok == Validator.is_block_valid?(block, 5.0, last, &(pool_check(pool, &1))) + + # Update the contextual utxo pool by removing spent inputs and adding + # unspent outputs from this block. The following block will use the updated + # contextual pool for utxo validation + updated_pool = + if valid do + block_input_txoids = + block + |> parse_transaction_inputs() + |> Enum.map(& &1.txoid) + + block_outputs = parse_transaction_outputs(block) + + Enum.filter(pool ++ block_outputs, &(!Enum.member?(block_input_txoids, &1.txoid))) + else + pool + end + + {block, updated_pool, [valid | results]} + end + + @spec pool_check(list, map) :: true | false + defp pool_check(pool, utxo) do + case Enum.find(pool, false, & &1.txoid == utxo.txoid) do + false -> false + txo_in_pool -> utxo.amount == txo_in_pool.amount && utxo.addr == txo_in_pool.addr + end + end end From 0e8b0c91fcf8bd096b72c846b01ca4f10729d041 Mon Sep 17 00:00:00 2001 From: Alex Dovzhanyn Date: Sun, 7 Oct 2018 18:21:06 -0400 Subject: [PATCH 3/4] dynamic difficulty --- lib/elixium_node.ex | 51 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 43 insertions(+), 8 deletions(-) diff --git a/lib/elixium_node.ex b/lib/elixium_node.ex index 4c5554e..f74525b 100644 --- a/lib/elixium_node.ex +++ b/lib/elixium_node.ex @@ -106,7 +106,6 @@ defmodule ElixiumNode do # Is this a fork of the most recent block? If it is, we don't have an orphan # chain to build on... if Ledger.last_block().index == block.index do - # TODO: validate orphan block in context of its chain state before adding it Logger.warn("Received fork of current block") Orphan.add(block) else @@ -126,7 +125,6 @@ defmodule ElixiumNode do Logger.warn("Received orphan block with no reference to a known block. Dropping orphan") canonical_block -> # This block is a fork of a canonical block. - # TODO: Validate this fork in context of the chain state at this point in time Logger.warn("Fork of canonical block received") Orphan.add(block) end @@ -147,6 +145,19 @@ defmodule ElixiumNode do # same blocks as the fork does. {fork_chain, fork_source} = rebuild_fork_chain(block) + # Calculate the difficulty that we were looking for at the time of the fork. + # First, we need to find the start of the last epoch + start_of_last_epoch = fork_source.index - rem(fork_source.index, Blockchain.diff_rebalance_offset()) + + difficulty = + if start_of_last_epoch >= Blockchain.diff_rebalance_offset() do + end_of_prev_epoch = Ledger.block_at_height(start_of_last_epoch) + beginning_of_prev_epoch = Ledger.block_at_height(start_of_last_epoch - Blockchain.diff_rebalance_offset()) + Blockchain.recalculate_difficulty(beginning_of_prev_epoch, end_of_prev_epoch) + else + fork_source.difficulty + end + current_utxos_in_pool = Utxo.retrieve_all_utxos() # Blocks which need to be reversed @@ -173,9 +184,9 @@ defmodule ElixiumNode do # Traverse the fork chain, making sure each block is valid within its own # context. - {_, final_contextual_pool, validation_results} = + {_, final_contextual_pool, _difficulty, _fork_chain, validation_results} = fork_chain - |> Enum.scan({fork_source, pool, []}, &validate_in_context/2) + |> Enum.scan({fork_source, pool, difficulty, fork_chain, []}, &validate_in_context/2) |> List.last() # Ensure that every block passed validation @@ -241,30 +252,54 @@ defmodule ElixiumNode do @spec parse_transaction_outputs(Block) :: list defp parse_transaction_outputs(block), do: Enum.flat_map(block.transactions, &(&1.outputs)) - defp validate_in_context(block, {last, pool, results}) do - # TODO: Make validation difficulty dynamic - valid = :ok == Validator.is_block_valid?(block, 5.0, last, &(pool_check(pool, &1))) + defp validate_in_context(block, {last, pool, difficulty, chain, results}) do + difficulty = + if rem(block.index, Blockchain.diff_rebalance_offset()) == 0 do + # Check first to see if the beginning of this epoch was within the fork. + # If not, get the epoch start block from the canonical chain + epoch_start = + case Enum.find(chain, & &1.index == block.index - Blockchain.diff_rebalance_offset()) do + nil -> Ledger.block_at_height(block.index - Blockchain.diff_rebalance_offset()) + block -> block + end + + Blockchain.recalculate_difficulty(epoch_start, block) + last.difficulty + else + difficulty + end + + valid = :ok == Validator.is_block_valid?(block, difficulty, last, &(pool_check(pool, &1))) # Update the contextual utxo pool by removing spent inputs and adding # unspent outputs from this block. The following block will use the updated # contextual pool for utxo validation updated_pool = if valid do + # Get a list of this blocks inputs (now that we've deemed it valid) block_input_txoids = block |> parse_transaction_inputs() |> Enum.map(& &1.txoid) + # Get a list of the outputs this block produced block_outputs = parse_transaction_outputs(block) + # Remove all the outputs that were both created and used within this same + # block Enum.filter(pool ++ block_outputs, &(!Enum.member?(block_input_txoids, &1.txoid))) else pool end - {block, updated_pool, [valid | results]} + {block, updated_pool, difficulty, chain, [valid | results]} end + # Function that gets passed to Validator.is_block_valid?/3, telling it how to + # evaluate the pool. We're doing this because by default, the validator uses + # the canonical UTXO pool for validation, but when we're processing a potential + # fork, we won't have the same exact UTXO pool, so we reconstruct one based on + # the fork chain. We then use this pool to verify the existence of a particular + # UTXO in the fork chain. @spec pool_check(list, map) :: true | false defp pool_check(pool, utxo) do case Enum.find(pool, false, & &1.txoid == utxo.txoid) do From f1ac42e94f28bbc147c76ee61c541bdd37584d62 Mon Sep 17 00:00:00 2001 From: Alex Dovzhanyn Date: Thu, 6 Dec 2018 11:17:07 -0500 Subject: [PATCH 4/4] update node to work with latest core --- config/config.exs | 11 -- config/dev.exs.sample | 4 - lib/elixium_node.ex | 310 ------------------------------------- lib/elixium_node_app.ex | 25 --- lib/node.ex | 45 ++++++ lib/peer/peer_router.ex | 189 ++++++++++++++++++++++ lib/peer/supervisor.ex | 13 ++ lib/supervisor.ex | 32 ++++ lib/util.ex | 35 +++++ mix.exs | 17 +- mix.lock | 8 +- rel/commands/drop_chain.sh | 3 + rel/commands/genkey.sh | 3 + rel/commands/usage.sh | 3 + rel/config.exs | 62 ++++++++ rel/overlays/run.sh | 8 + rel/plugins/.gitignore | 3 + rel/vm.args | 30 ++++ 18 files changed, 442 insertions(+), 359 deletions(-) delete mode 100644 config/dev.exs.sample delete mode 100644 lib/elixium_node.ex delete mode 100644 lib/elixium_node_app.ex create mode 100644 lib/node.ex create mode 100644 lib/peer/peer_router.ex create mode 100644 lib/peer/supervisor.ex create mode 100644 lib/supervisor.ex create mode 100644 lib/util.ex create mode 100644 rel/commands/drop_chain.sh create mode 100644 rel/commands/genkey.sh create mode 100644 rel/commands/usage.sh create mode 100644 rel/config.exs create mode 100644 rel/overlays/run.sh create mode 100644 rel/plugins/.gitignore create mode 100644 rel/vm.args diff --git a/config/config.exs b/config/config.exs index 93128e3..e69de29 100644 --- a/config/config.exs +++ b/config/config.exs @@ -1,11 +0,0 @@ -use Mix.Config - -config :logger, backends: [:console, {LoggerFileBackend, :info}] - -config :logger, :info, - path: "./log/info.log", - level: :info - -if File.exists?("config/#{Mix.env}.exs") do - import_config "#{Mix.env}.exs" -end diff --git a/config/dev.exs.sample b/config/dev.exs.sample deleted file mode 100644 index f831992..0000000 --- a/config/dev.exs.sample +++ /dev/null @@ -1,4 +0,0 @@ -use Mix.Config - -config :elixium_node, - port: 31012 diff --git a/lib/elixium_node.ex b/lib/elixium_node.ex deleted file mode 100644 index f74525b..0000000 --- a/lib/elixium_node.ex +++ /dev/null @@ -1,310 +0,0 @@ -defmodule ElixiumNode do - use GenServer - require Logger - alias Elixium.Validator - alias Elixium.Blockchain - alias Elixium.Blockchain.Block - alias Elixium.Store.Ledger - alias Elixium.P2P.Peer - alias Elixium.Pool.Orphan - alias Elixium.Store.Utxo - require IEx - - def start_link do - GenServer.start_link(__MODULE__, {}) - end - - def init(_state), do: {:ok, %{transactions: []}} - - def handle_info(msg, state) do - state = - case msg do - header = %{type: "BLOCK_HEADER"} -> - IO.inspect header - state - block = %{type: "BLOCK"} -> - IO.inspect(block, limit: :infinity) - # Check if we've already received a block at this index. If we have, - # diff it against the one we've stored. If we haven't, check to see - # if this index is the next index in the chain. In the case that its - # not, we've likely found a new longest chain, so we need to evaluate - # whether or not we want to switch to that chain - case Ledger.block_at_height(block.index) do - :none -> - last_block = Ledger.last_block() - - if block.index == last_block.index + 1 && block.previous_hash == last_block.hash do - # TODO: Revisit this logic; when receiving a block at the current expected - # index, we're dropping the block since we don't check if the block is - # building on a fork, we just assume that it's building on our chain, - # so validation fails, since the blocks previous_hash will be different. - evaluate_new_block(block) - else - evaluate_chain_swap(block) - end - stored_block -> handle_possible_fork(block, stored_block) - end - state - transaction = %{type: "TRANSACTION"} -> - IO.inspect(transaction) - - # Don't re-validate and re-send a transaction we've already received. - # This eliminates looping issues where nodes pass the same transaction - # back and forth. - new_state = - if !Enum.member?(state.transactions, transaction) && Validator.valid_transaction?(transaction) do - Logger.info("Received valid transaction #{transaction.id}. Forwarding to peers.") - Peer.gossip("TRANSACTION", transaction) - - %{state | transactions: [transaction | state.transactions]} - else - state - end - - _ -> - IO.puts "Didnt match" - state - end - - {:noreply, state} - end - - @spec evaluate_new_block(Block) :: none - defp evaluate_new_block(block) do - last_block = Ledger.last_block() - - difficulty = - if rem(block.index, Blockchain.diff_rebalance_offset()) == 0 do - new_difficulty = Blockchain.recalculate_difficulty() + last_block.difficulty - IO.puts("Difficulty recalculated! Changed from #{last_block.difficulty} to #{new_difficulty}") - new_difficulty - else - last_block.difficulty - end - - case Validator.is_block_valid?(block, difficulty) do - :ok -> - Logger.info("Block #{block.index} valid.") - Blockchain.add_block(block) - Peer.gossip("BLOCK", block) - err -> Logger.info("Block #{block.index} invalid!") - end - end - - @spec handle_possible_fork(Block, Block) :: none - defp handle_possible_fork(block, existing_block) do - Logger.info("Already have block with index #{existing_block.index}. Performing block diff...") - - case Block.diff_header(existing_block, block) do - # If there is no diff, just skip the block - [] -> - Logger.info("Same block.") - :no_diff - diff -> - Logger.warn("Fork block received! Checking existing orphan pool...") - - # Is this a fork of the most recent block? If it is, we don't have an orphan - # chain to build on... - if Ledger.last_block().index == block.index do - Logger.warn("Received fork of current block") - Orphan.add(block) - else - # Check the orphan pool for blocks at the previous height whose hash this - # orphan block references as a previous_hash - case Orphan.blocks_at_height(block.index - 1) do - [] -> - # We don't know of any ORPHAN blocks that this block might be referencing. - # Perhaps this is a fork of a block that we've accepted as canonical into our - # chain? - case Ledger.retrieve_block(block.previous_hash) do - :not_found -> - # If this block doesn't reference and blocks that we know of, we can not - # build a chain using this block -- we can't validate this block at all. - # Our only option is to drop the block. Realistically we shouldn't ever - # get into this situation unless a malicious actor has sent us a fake block. - Logger.warn("Received orphan block with no reference to a known block. Dropping orphan") - canonical_block -> - # This block is a fork of a canonical block. - Logger.warn("Fork of canonical block received") - Orphan.add(block) - end - orphan_blocks -> - # This block might be a fork of a block that we have stored in our - # orphan pool - Logger.warn("Possibly extension of existing fork") - Orphan.add(block) - end - end - end - end - - # Check that a given fork is valid, and if it is, swap to the fork - @spec evaluate_chain_swap(Block) :: none - defp evaluate_chain_swap(block) do - # Rebuild the chain backwards until reaching a point where we agree on the - # same blocks as the fork does. - {fork_chain, fork_source} = rebuild_fork_chain(block) - - # Calculate the difficulty that we were looking for at the time of the fork. - # First, we need to find the start of the last epoch - start_of_last_epoch = fork_source.index - rem(fork_source.index, Blockchain.diff_rebalance_offset()) - - difficulty = - if start_of_last_epoch >= Blockchain.diff_rebalance_offset() do - end_of_prev_epoch = Ledger.block_at_height(start_of_last_epoch) - beginning_of_prev_epoch = Ledger.block_at_height(start_of_last_epoch - Blockchain.diff_rebalance_offset()) - Blockchain.recalculate_difficulty(beginning_of_prev_epoch, end_of_prev_epoch) - else - fork_source.difficulty - end - - current_utxos_in_pool = Utxo.retrieve_all_utxos() - - # Blocks which need to be reversed - blocks_to_reverse = - (fork_source.index + 1)..Ledger.last_block().index - |> Enum.map(&Ledger.block_at_height/1) - - # Find transaction inputs that need to be reversed - all_canonical_transaction_inputs_since_fork = - Enum.flat_map(blocks_to_reverse, &parse_transaction_inputs/1) - - canon_output_txoids = - blocks_to_reverse - |> Enum.flat_map(&parse_transaction_outputs/1) - |> Enum.map(& &1.txoid) - - # Pool at the time of fork is basically just current pool plus all inputs - # used in canon chain since fork, minus all outputs created in after fork - # (this will also remove inputs that were created as outputs and used in - # the fork) - pool = - current_utxos_in_pool ++ all_canonical_transaction_inputs_since_fork - |> Enum.filter(&(!Enum.member?(canon_output_txoids, &1.txoid))) - - # Traverse the fork chain, making sure each block is valid within its own - # context. - {_, final_contextual_pool, _difficulty, _fork_chain, validation_results} = - fork_chain - |> Enum.scan({fork_source, pool, difficulty, fork_chain, []}, &validate_in_context/2) - |> List.last() - - # Ensure that every block passed validation - if Enum.all?(validation_results, & &1) do - Logger.info("Candidate fork chain valid. Switching.") - - # Add everything in final_contextual_pool that is not also in current_utxos_in_pool - Enum.each(final_contextual_pool -- current_utxos_in_pool, &Utxo.add_utxo/1) - - # Remove everything in current_utxos_in_pool that is not also in final_contextual_pool - current_utxos_in_pool -- final_contextual_pool - |> Enum.map(& &1.txoid) - |> Enum.each(&Utxo.remove_utxo/1) - - # Drop canon chain blocks from the ledger, add them to the orphan pool - # in case the chain gets revived by another miner - Enum.each(blocks_to_reverse, fn blk -> - Orphan.add(blk) - Ledger.drop_block(blk) - end) - - # Remove fork chain from orphan pool; now it becomes the canon chain, - # so we add its blocks to the ledger - Enum.each(fork_chain, fn blk -> - Ledger.append_block(blk) - Orphan.remove(blk) - end) - else - Logger.info("Evaluated candidate fork chain. Not viable for switch.") - end - end - - def rebuild_fork_chain(chain) when is_list(chain) do - case Orphan.blocks_at_height(hd(chain).index - 1) do - [] -> - IO.puts "got to false" - false - orphan_blocks -> - orphan_blocks - |> Enum.filter(fn {_, block} -> block.hash == hd(chain).previous_hash end) - |> Enum.find_value(fn {_, candidate_orphan}-> - # Check if we agree on a previous_hash - case Ledger.retrieve_block(candidate_orphan.previous_hash) do - # We need to dig deeper... - :not_found -> rebuild_fork_chain([candidate_orphan | chain]) - # We found the source of this fork. Return the chain we've accumulated - fork_source -> {[candidate_orphan | chain], fork_source} - end - end) - end - end - - def rebuild_fork_chain(block), do: rebuild_fork_chain([block]) - - # Return a list of all transaction inputs for every transaction in this block - @spec parse_transaction_inputs(Block) :: list - defp parse_transaction_inputs(block) do - block.transactions - |> Enum.flat_map(&(&1.inputs)) - |> Enum.map(&(Map.delete(&1, :signature))) - end - - @spec parse_transaction_outputs(Block) :: list - defp parse_transaction_outputs(block), do: Enum.flat_map(block.transactions, &(&1.outputs)) - - defp validate_in_context(block, {last, pool, difficulty, chain, results}) do - difficulty = - if rem(block.index, Blockchain.diff_rebalance_offset()) == 0 do - # Check first to see if the beginning of this epoch was within the fork. - # If not, get the epoch start block from the canonical chain - epoch_start = - case Enum.find(chain, & &1.index == block.index - Blockchain.diff_rebalance_offset()) do - nil -> Ledger.block_at_height(block.index - Blockchain.diff_rebalance_offset()) - block -> block - end - - Blockchain.recalculate_difficulty(epoch_start, block) + last.difficulty - else - difficulty - end - - valid = :ok == Validator.is_block_valid?(block, difficulty, last, &(pool_check(pool, &1))) - - # Update the contextual utxo pool by removing spent inputs and adding - # unspent outputs from this block. The following block will use the updated - # contextual pool for utxo validation - updated_pool = - if valid do - # Get a list of this blocks inputs (now that we've deemed it valid) - block_input_txoids = - block - |> parse_transaction_inputs() - |> Enum.map(& &1.txoid) - - # Get a list of the outputs this block produced - block_outputs = parse_transaction_outputs(block) - - # Remove all the outputs that were both created and used within this same - # block - Enum.filter(pool ++ block_outputs, &(!Enum.member?(block_input_txoids, &1.txoid))) - else - pool - end - - {block, updated_pool, difficulty, chain, [valid | results]} - end - - # Function that gets passed to Validator.is_block_valid?/3, telling it how to - # evaluate the pool. We're doing this because by default, the validator uses - # the canonical UTXO pool for validation, but when we're processing a potential - # fork, we won't have the same exact UTXO pool, so we reconstruct one based on - # the fork chain. We then use this pool to verify the existence of a particular - # UTXO in the fork chain. - @spec pool_check(list, map) :: true | false - defp pool_check(pool, utxo) do - case Enum.find(pool, false, & &1.txoid == utxo.txoid) do - false -> false - txo_in_pool -> utxo.amount == txo_in_pool.amount && utxo.addr == txo_in_pool.addr - end - end -end diff --git a/lib/elixium_node_app.ex b/lib/elixium_node_app.ex deleted file mode 100644 index 6687076..0000000 --- a/lib/elixium_node_app.ex +++ /dev/null @@ -1,25 +0,0 @@ -defmodule ElixiumNodeApp do - use Application - alias Elixium.Store.Ledger - alias Elixium.Store.Utxo - alias Elixium.Blockchain - alias Elixium.P2P.Peer - alias Elixium.Pool.Orphan - - def start(_type, _args) do - Ledger.initialize() - Utxo.initialize() - Orphan.initialize() - Blockchain.initialize() - - {:ok, comm_pid} = ElixiumNode.start_link() - - if port = Application.get_env(:elixium_node, :port) do - Peer.initialize(comm_pid, port) - else - Peer.initialize(comm_pid) - end - - {:ok, self()} - end -end diff --git a/lib/node.ex b/lib/node.ex new file mode 100644 index 0000000..c0611ba --- /dev/null +++ b/lib/node.ex @@ -0,0 +1,45 @@ +defmodule ElixiumNode do + use Application + + def start(_type, _args) do + print_ascii_header() + Elixium.Store.Ledger.initialize() + + # TODO: Make genesis block mined rather than hard-coded + if !Elixium.Store.Ledger.empty?() do + Elixium.Store.Ledger.hydrate() + end + + Elixium.Store.Utxo.initialize() + Elixium.Store.Oracle.start_link(Elixium.Store.Utxo) + Elixium.Pool.Orphan.initialize() + ElixiumNode.Supervisor.start_link() + end + + def print_ascii_header do + IO.puts "\e[34m + EEEEEEEEEEEEEEEEEEEEEElllllll iiii iiii + E::::::::::::::::::::El:::::l i::::i i::::i + E::::::::::::::::::::El:::::l iiii iiii + EE::::::EEEEEEEEE::::El:::::l + E:::::E EEEEEE l::::l iiiiiii xxxxxxx xxxxxxxiiiiiii uuuuuu uuuuuu mmmmmmm mmmmmmm + E:::::E l::::l i:::::i x:::::x x:::::x i:::::i u::::u u::::u mm:::::::m m:::::::mm + E::::::EEEEEEEEEE l::::l i::::i x:::::x x:::::x i::::i u::::u u::::u m::::::::::mm::::::::::m + E:::::::::::::::E l::::l i::::i x:::::xx:::::x i::::i u::::u u::::u m::::::::::::::::::::::m + E:::::::::::::::E l::::l i::::i x::::::::::x i::::i u::::u u::::u m:::::mmm::::::mmm:::::m + E::::::EEEEEEEEEE l::::l i::::i x::::::::x i::::i u::::u u::::u m::::m m::::m m::::m + E:::::E l::::l i::::i x::::::::x i::::i u::::u u::::u m::::m m::::m m::::m + E:::::E EEEEEE l::::l i::::i x::::::::::x i::::i u:::::uuuu:::::u m::::m m::::m m::::m + EE::::::EEEEEEEE:::::El::::::li::::::i x:::::xx:::::x i::::::iu:::::::::::::::uum::::m m::::m m::::m + E::::::::::::::::::::El::::::li::::::i x:::::x x:::::x i::::::i u:::::::::::::::um::::m m::::m m::::m + E::::::::::::::::::::El::::::li::::::i x:::::x x:::::x i::::::i uu::::::::uu:::um::::m m::::m m::::m + EEEEEEEEEEEEEEEEEEEEEElllllllliiiiiiiixxxxxxx xxxxxxxiiiiiiii uuuuuuuu uuuummmmmm mmmmmm mmmmmm + \e[32m + Elixium Core Version #{Application.spec(:elixium_core, :vsn)} Node version #{Application.spec(:elixium_node, :vsn)} + \e[0m + \n + " + + end + +end diff --git a/lib/peer/peer_router.ex b/lib/peer/peer_router.ex new file mode 100644 index 0000000..f918598 --- /dev/null +++ b/lib/peer/peer_router.ex @@ -0,0 +1,189 @@ +defmodule ElixiumNode.PeerRouter do + use GenServer + require Logger + alias Elixium.Node.Supervisor, as: Peer + alias Elixium.Node.LedgerManager + alias Elixium.Store.Ledger + alias Elixium.Pool.Orphan + alias Elixium.Block + alias Elixium.Transaction + alias Elixium.Validator + + def start_link(_args) do + GenServer.start_link(__MODULE__, [], name: __MODULE__) + end + + def init(_args), do: {:ok, []} + + # Handles recieved blocks + def handle_info({block = %{type: "BLOCK"}, caller}, state) do + block = Block.sanitize(block) + + case LedgerManager.handle_new_block(block) do + :ok -> + # We've received a valid block. We need to stop mining the block we're + # currently working on and start mining the new one. We also need to gossip + # this block to all the nodes we know of. + Logger.info("Received valid block #{block.hash} at index #{:binary.decode_unsigned(block.index)}.") + Peer.gossip("BLOCK", block) + + :gossip -> + # For one reason or another, we want to gossip this block without + # restarting our current block calculation. (Perhaps this is a fork block) + Peer.gossip("BLOCK", block) + + {:missing_blocks, fork_chain} -> + # We've discovered a fork, but we can't rebuild the fork chain without + # some blocks. Let's request them from our peer. + query_block(:binary.decode_unsigned(hd(fork_chain).index) - 1, caller) + + :ignore -> :ignore # We already know of this block. Ignore it + :invalid -> Logger.info("Recieved invalid block at index #{:binary.decode_unsigned(block.index)}.") + end + + {:noreply, state} + end + + def handle_info({block_query_request = %{type: "BLOCK_QUERY_REQUEST"}, caller}, state) do + send(caller, {"BLOCK_QUERY_RESPONSE", Ledger.block_at_height(block_query_request.index)}) + + {:noreply, state} + end + + def handle_info({block_query_response = %{type: "BLOCK_QUERY_RESPONSE"}, _caller}, state) do + orphans_ahead = + Ledger.last_block().index + |> :binary.decode_unsigned() + |> Kernel.+(1) + |> Orphan.blocks_at_height() + |> length() + + if orphans_ahead > 0 do + # If we have an orphan with an index that is greater than our current latest + # block, we're likely here trying to rebuild the fork chain and have requested + # a block that we're missing. + # TODO: FETCH BLOCKS + end + + {:noreply, state} + end + + # Handles a batch block query request, where another peer has asked this node to send + # all the blocks it has since a given index. + def handle_info({block_query_request = %{type: "BLOCK_BATCH_QUERY_REQUEST"}, caller}, state) do + # TODO: This is a possible DOS vulnerability if an attacker requests a very + # high amount of blocks. Need to figure out a better way to do this; maybe + # we need to limit the maximum amount of blocks a peer is allowed to request. + last_block = Ledger.last_block() + + blocks = + if last_block != :err && block_query_request.starting_at <= :binary.decode_unsigned(last_block.index) do + block_query_request.starting_at + |> Range.new(:binary.decode_unsigned(last_block.index)) + |> Enum.map(&Ledger.block_at_height/1) + |> Enum.filter(&(&1 != :none)) + else + [] + end + + send(caller, {"BLOCK_BATCH_QUERY_RESPONSE", %{blocks: blocks}}) + + {:noreply, state} + end + + # Handles a batch block query response, where we've requested new blocks and are now + # getting a response with potentially new blocks + def handle_info({block_query_response = %{type: "BLOCK_BATCH_QUERY_RESPONSE"}, _caller}, state) do + if length(block_query_response.blocks) > 0 do + Logger.info("Recieved #{length(block_query_response.blocks)} blocks from peer.") + + block_query_response.blocks + |> Enum.with_index() + |> Enum.each(fn {block, i} -> + block = Block.sanitize(block) + + if LedgerManager.handle_new_block(block) == :ok do + IO.write("Syncing blocks #{round(((i + 1) / length(block_query_response.blocks)) * 100)}% [#{i + 1}/#{length(block_query_response.blocks)}]\r") + end + end) + + IO.write("Block Sync Complete") + end + + {:noreply, state} + end + + def handle_info({transaction = %{type: "TRANSACTION"}, _caller}, state) do + transaction = Transaction.sanitize(transaction) + + state = + if Validator.valid_transaction?(transaction) do + if transaction not in state.known_transactions do + <> = transaction.id + Logger.info("Received transaction \e[32m#{shortid}...\e[0m") + Peer.gossip("TRANSACTION", transaction) + + %{state | known_transactions: [transaction | state.known_transactions]} + end + else + Logger.info("Received Invalid Transaction. Ignoring.") + state + end + + {:noreply, state} + end + + def handle_info({:new_outbound_connection, handler_pid}, state) do + # Let's ask our peer for new blocks, if there + # are any. We'll ask for all blocks starting from our current index minus + # 120 (4 hours worth of blocks before we disconnected) just in case there + # was a fork after we disconnected. + + starting_at = + case Ledger.last_block() do + :err -> 0 + last_block -> + # Current index minus 120 or 1, whichever is greater. + max(0, :binary.decode_unsigned(last_block.index) - 120) + end + + send(handler_pid, {"BLOCK_BATCH_QUERY_REQUEST", %{starting_at: starting_at}}) + + send(handler_pid, {"PEER_QUERY_REQUEST", %{}}) + + {:noreply, state} + end + + def handle_info({:new_inbound_connection, handler_pid}, state) do + send(handler_pid, {"PEER_QUERY_REQUEST", %{}}) + + {:noreply, state} + end + + def handle_info({%{type: "PEER_QUERY_REQUEST"}, handler_pid}, state) do + peers = + :"Elixir.Elixium.Store.PeerOracle" + |> GenServer.call({:load_known_peers, []}) + |> Enum.take(8) + + send(handler_pid, {"PEER_QUERY_RESPONSE", %{peers: peers}}) + + {:noreply, state} + end + + def handle_info({%{type: "PEER_QUERY_RESPONSE", peers: peers}, _caller}, state) do + Enum.each(peers, fn peer -> + GenServer.call(:"Elixir.Elixium.Store.PeerOracle", {:save_known_peer, [peer]}) + end) + + {:noreply, state} + end + + def handle_info(_, state) do + Logger.warn("Received message that isn't handled by any other case.") + + {:noreply, state} + end + + def query_block(index, caller), do: send(caller, {"BLOCK_QUERY_REQUEST", %{index: index}}) +end diff --git a/lib/peer/supervisor.ex b/lib/peer/supervisor.ex new file mode 100644 index 0000000..0edcbfd --- /dev/null +++ b/lib/peer/supervisor.ex @@ -0,0 +1,13 @@ +defmodule ElixiumNode.PeerRouter.Supervisor do + use Supervisor + + def start_link(_args) do + Supervisor.start_link(__MODULE__, [], name: __MODULE__) + end + + def init(_args) do + children = [ElixiumNode.PeerRouter] + + Supervisor.init(children, strategy: :one_for_one) + end +end diff --git a/lib/supervisor.ex b/lib/supervisor.ex new file mode 100644 index 0000000..a3350ed --- /dev/null +++ b/lib/supervisor.ex @@ -0,0 +1,32 @@ +defmodule ElixiumNode.Supervisor do + use Supervisor + + def start_link do + Supervisor.start_link(__MODULE__, [], name: __MODULE__) + end + + def init(_args) do + port = + :port + |> Util.get_arg("-1") + |> String.to_integer() + |> case do + -1 -> nil + p -> p + end + + children = [ + {Elixium.Node.Supervisor, [:"Elixir.ElixiumNode.PeerRouter", port]}, + ElixiumNode.PeerRouter.Supervisor + ] + + children = + if Util.get_arg(:rpc) do + [ElixiumNode.RPC.Supervisor | children] + else + children + end + + Supervisor.init(children, strategy: :one_for_one) + end +end diff --git a/lib/util.ex b/lib/util.ex new file mode 100644 index 0000000..e273ffb --- /dev/null +++ b/lib/util.ex @@ -0,0 +1,35 @@ +defmodule Util do + @moduledoc """ + Extra utilities + """ + + + @doc """ + Gets an option that was passed in as a command line argument + """ + @spec get_arg(atom, any) :: String.t() + def get_arg(arg, not_found \\ nil), do: Map.get(args(), arg, not_found) + + def args do + :init.get_plain_arguments() + |> Enum.at(1) + |> List.to_string() + |> String.split("--") + |> Enum.filter(& &1 != "") + |> Enum.map(fn a -> + kv = + a + |> String.trim() + |> String.replace("=", " ") + |> String.replace(~r/\s+/, " ") + |> String.split(" ") + + case kv do + [key, value] -> {String.to_atom(key), value} + [key] -> {String.to_atom(key), true} + end + end) + |> Map.new() + end + +end diff --git a/mix.exs b/mix.exs index e83cbe5..5b6160b 100644 --- a/mix.exs +++ b/mix.exs @@ -4,7 +4,7 @@ defmodule ElixiumNode.MixProject do def project do [ app: :elixium_node, - version: "0.1.0", + version: "1.0.0", elixir: "~> 1.7", start_permanent: true, deps: deps() @@ -14,17 +14,22 @@ defmodule ElixiumNode.MixProject do # Run "mix help compile.app" to learn about applications. def application do [ - mod: {ElixiumNodeApp, []}, - applications: [:logger] + mod: {ElixiumNode, []}, + extra_applications: [ + :ssl, + :logger, + :inets, + :crypto, + :elixium_core + ] ] end # Run "mix help deps" to learn about dependencies. defp deps do [ - {:elixium_core, "~> 0.2"}, - # {:local_dependency, path: "../core", app: false}, - {:logger_file_backend, "~> 0.0.10"} + {:elixium_core, "~> 0.4"}, + {:distillery, "~> 2.0"} ] end end diff --git a/mix.lock b/mix.lock index e99ca61..0465822 100644 --- a/mix.lock +++ b/mix.lock @@ -1,9 +1,11 @@ %{ - "decimal": {:hex, :decimal, "1.5.0", "b0433a36d0e2430e3d50291b1c65f53c37d56f83665b43d79963684865beab68", [:mix], [], "hexpm"}, + "artificery": {:hex, :artificery, "0.2.6", "f602909757263f7897130cbd006b0e40514a541b148d366ad65b89236b93497a", [:mix], [], "hexpm"}, + "decimal": {:hex, :decimal, "1.6.0", "bfd84d90ff966e1f5d4370bdd3943432d8f65f07d3bab48001aebd7030590dcc", [:mix], [], "hexpm"}, + "distillery": {:hex, :distillery, "2.0.12", "6e78fe042df82610ac3fa50bd7d2d8190ad287d120d3cd1682d83a44e8b34dfb", [:mix], [{:artificery, "~> 0.2", [hex: :artificery, repo: "hexpm", optional: false]}], "hexpm"}, "eleveldb": {:hex, :eleveldb, "2.2.20", "1fff63a5055bbf4bf821f797ef76065882b193f5e8095f95fcd9287187773b58", [:rebar3], [], "hexpm"}, - "elixium_core": {:hex, :elixium_core, "0.2.2", "ebb55f6af3d48f19f81842f5a28e82b4e14db4ff69d73e0168c9a6d53e8a480a", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:exleveldb, "~> 0.12.2", [hex: :exleveldb, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:keccakf1600, "~> 2.0.0", [hex: :keccakf1600, repo: "hexpm", optional: false]}, {:strap, "~> 0.1.1", [hex: :strap, repo: "hexpm", optional: false]}], "hexpm"}, + "elixium_core": {:hex, :elixium_core, "0.4.9", "f810ff9ef8dc32ac3b7b2fd299ecc26fcd43aba6d57d3f99c6c70a9833e6f8ba", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:exleveldb, "~> 0.12.2", [hex: :exleveldb, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:keccakf1600, "~> 2.0.0", [hex: :keccakf1600, repo: "hexpm", optional: false]}, {:strap, "~> 0.1.1", [hex: :strap, repo: "hexpm", optional: false]}], "hexpm"}, "exleveldb": {:hex, :exleveldb, "0.12.2", "7336156284c94fe4c7eeb1270c69c9d7b59cb852e49e21cd68a2c6a85d87f34e", [:mix], [{:eleveldb, "~> 2.2.20", [hex: :eleveldb, repo: "hexpm", optional: false]}], "hexpm"}, - "jason": {:hex, :jason, "1.1.1", "d3ccb840dfb06f2f90a6d335b536dd074db748b3e7f5b11ab61d239506585eb2", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm"}, + "jason": {:hex, :jason, "1.1.2", "b03dedea67a99223a2eaf9f1264ce37154564de899fd3d8b9a21b1a6fd64afe7", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm"}, "keccakf1600": {:hex, :keccakf1600, "2.0.0", "69d02d844a101bf3c75484c9e334fd04b0f57280727e881cac3bd8240432f43a", [:rebar3], [], "hexpm"}, "logger_file_backend": {:hex, :logger_file_backend, "0.0.10", "876f9f84ae110781207c54321ffbb62bebe02946fe3c13f0d7c5f5d8ad4fa910", [:mix], [], "hexpm"}, "strap": {:hex, :strap, "0.1.1", "90fedb0b51a1c81678cc1386ed9d3d9e6126ac718af4de2a852d730c0fc6d67f", [:mix], [], "hexpm"}, diff --git a/rel/commands/drop_chain.sh b/rel/commands/drop_chain.sh new file mode 100644 index 0000000..02f9c04 --- /dev/null +++ b/rel/commands/drop_chain.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +$RELEASE_ROOT_DIR/bin/elixium_node command Elixir.Command.DropChain run diff --git a/rel/commands/genkey.sh b/rel/commands/genkey.sh new file mode 100644 index 0000000..02c2387 --- /dev/null +++ b/rel/commands/genkey.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +$RELEASE_ROOT_DIR/bin/elixium_node command Elixir.Command.GenKeypair run diff --git a/rel/commands/usage.sh b/rel/commands/usage.sh new file mode 100644 index 0000000..0f540cd --- /dev/null +++ b/rel/commands/usage.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +$RELEASE_ROOT_DIR/bin/elixium_node command Elixir.Command.Usage run diff --git a/rel/config.exs b/rel/config.exs new file mode 100644 index 0000000..b944e85 --- /dev/null +++ b/rel/config.exs @@ -0,0 +1,62 @@ +# Import all plugins from `rel/plugins` +# They can then be used by adding `plugin MyPlugin` to +# either an environment, or release definition, where +# `MyPlugin` is the name of the plugin module. +~w(rel plugins *.exs) +|> Path.join() +|> Path.wildcard() +|> Enum.map(&Code.eval_file(&1)) + +use Mix.Releases.Config, + # This sets the default release built by `mix release` + default_release: :default, + # This sets the default environment used by `mix release` + default_environment: Mix.env() + +# For a full list of config options for both releases +# and environments, visit https://hexdocs.pm/distillery/config/distillery.html + + +# You may define one or more environments in this file, +# an environment's settings will override those of a release +# when building in that environment, this combination of release +# and environment configuration is called a profile + +environment :dev do + # If you are running Phoenix, you should make sure that + # server: true is set and the code reloader is disabled, + # even in dev mode. + # It is recommended that you build with MIX_ENV=prod and pass + # the --env flag to Distillery explicitly if you want to use + # dev mode. + set dev_mode: true + set include_erts: false + set cookie: :"*Ce%rHlVKPxIsP6FfXYSaH_8Zip?05&QYuOW}ruYTFBYYohdhq$,fM:K8:E3[>TS" +end + +environment :prod do + set include_erts: true + set include_src: false + set cookie: :"p@|CnzPH9,v[~/lDm,Y:*W>@dq8d:s~b5ofzKc&c1S8]53mj4R!OIQnz@%|Xl&DW" + set vm_args: "rel/vm.args" + set commands: [ + genkey: "rel/commands/genkey.sh", + dropchain: "rel/commands/drop_chain.sh", + usage: "rel/commands/usage.sh" + ] + set overlays: [ + {:copy, "rel/overlays/run.sh", "run.sh"}, + ] +end + +# You may define one or more releases in this file. +# If you have not set a default release, or selected one +# when running `mix release`, the first release in the file +# will be used by default + +release :elixium_node do + set version: current_version(:elixium_node) + set applications: [ + :runtime_tools + ] +end diff --git a/rel/overlays/run.sh b/rel/overlays/run.sh new file mode 100644 index 0000000..a87f65d --- /dev/null +++ b/rel/overlays/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +resize -s 50 150 +stty rows 50 +stty cols 150 + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd -P)" + +$SCRIPT_DIR/bin/elixium_node foreground --address EX054sbR7BtpfuwpyFvDwWd27ffUhwrRvSeQHgW27cbDz5YyUM2Ue diff --git a/rel/plugins/.gitignore b/rel/plugins/.gitignore new file mode 100644 index 0000000..4fa3b5c --- /dev/null +++ b/rel/plugins/.gitignore @@ -0,0 +1,3 @@ +*.* +!*.exs +!.gitignore \ No newline at end of file diff --git a/rel/vm.args b/rel/vm.args new file mode 100644 index 0000000..93f34bb --- /dev/null +++ b/rel/vm.args @@ -0,0 +1,30 @@ +## This file provide the arguments provided to the VM at startup +## You can find a full list of flags and their behaviours at +## http://erlang.org/doc/man/erl.html + +## Name of the node +-name <%= release_name %>@127.0.0.1 + +## Cookie for distributed erlang +-setcookie <%= release.profile.cookie %> + +## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive +## (Disabled by default..use with caution!) +##-heart + +## Enable kernel poll and a few async threads +##+K true +##+A 5 +## For OTP21+, the +A flag is not used anymore, +## +SDio replace it to use dirty schedulers +##+SDio 5 + +## Increase number of concurrent ports/sockets +##-env ERL_MAX_PORTS 4096 + +## Tweak GC to run more often +##-env ERL_FULLSWEEP_AFTER 10 + +# Enable SMP automatically based on availability +# On OTP21+, this is not needed anymore. +-smp auto