diff --git a/config/account/test.exs b/config/account/test.exs index 36cfac74..a7413f48 100644 --- a/config/account/test.exs +++ b/config/account/test.exs @@ -6,4 +6,5 @@ config :bcrypt_elixir, :log_rounds, 2 config :helix, Helix.Account.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_account" + database: prefix <> "_test_account", + ownership_timeout: 90_000 diff --git a/config/cache/test.exs b/config/cache/test.exs index 529f3e06..c278dc37 100644 --- a/config/cache/test.exs +++ b/config/cache/test.exs @@ -4,4 +4,5 @@ prefix = System.get_env("HELIX_DB_PREFIX") || "helix" config :helix, Helix.Cache.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_cache" + database: prefix <> "_test_cache", + ownership_timeout: 90_000 diff --git a/config/core/test.exs b/config/core/test.exs index e87ee334..7f427d28 100644 --- a/config/core/test.exs +++ b/config/core/test.exs @@ -4,4 +4,5 @@ prefix = System.get_env("HELIX_DB_PREFIX") || "helix" config :helix, Helix.Core.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_core" + database: prefix <> "_test_core", + ownership_timeout: 90_000 diff --git a/config/entity/test.exs b/config/entity/test.exs index c89197a2..cb4572cc 100644 --- a/config/entity/test.exs +++ b/config/entity/test.exs @@ -4,4 +4,5 @@ prefix = System.get_env("HELIX_DB_PREFIX") || "helix" config :helix, Helix.Entity.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_entity" + database: prefix <> "_test_entity", + ownership_timeout: 90_000 diff --git a/config/hardware/test.exs b/config/hardware/test.exs index 60c51009..116e0c7b 100644 --- a/config/hardware/test.exs +++ b/config/hardware/test.exs @@ -4,4 +4,5 @@ prefix = System.get_env("HELIX_DB_PREFIX") || "helix" config :helix, Helix.Hardware.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_hardware" + database: prefix <> "_test_hardware", + ownership_timeout: 90_000 diff --git a/config/log/test.exs b/config/log/test.exs index c6f926b2..d1b876d1 100644 --- a/config/log/test.exs +++ b/config/log/test.exs @@ -4,4 +4,5 @@ prefix = System.get_env("HELIX_DB_PREFIX") || "helix" config :helix, Helix.Log.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_log" + database: prefix <> "_test_log", + ownership_timeout: 90_000 diff --git a/config/network/test.exs b/config/network/test.exs index f83d514f..2fea9645 100644 --- a/config/network/test.exs +++ b/config/network/test.exs @@ -4,4 +4,5 @@ prefix = System.get_env("HELIX_DB_PREFIX") || "helix" config :helix, Helix.Network.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_network" + database: prefix <> "_test_network", + ownership_timeout: 90_000 diff --git a/config/process/test.exs b/config/process/test.exs index 61ed8fb3..7d2d3267 100644 --- a/config/process/test.exs +++ b/config/process/test.exs @@ -4,4 +4,5 @@ prefix = System.get_env("HELIX_DB_PREFIX") || "helix" config :helix, Helix.Process.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_process" + database: prefix <> "_test_process", + ownership_timeout: 90_000 diff --git a/config/server/test.exs b/config/server/test.exs index 83aca640..7dd9a659 100644 --- a/config/server/test.exs +++ b/config/server/test.exs @@ -4,4 +4,5 @@ prefix = System.get_env("HELIX_DB_PREFIX") || "helix" config :helix, Helix.Server.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_server" + database: prefix <> "_test_server", + ownership_timeout: 90_000 diff --git a/config/software/test.exs b/config/software/test.exs index 27f4772b..e4af5150 100644 --- a/config/software/test.exs +++ b/config/software/test.exs @@ -4,4 +4,5 @@ prefix = System.get_env("HELIX_DB_PREFIX") || "helix" config :helix, Helix.Software.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_software" + database: prefix <> "_test_software", + ownership_timeout: 90_000 diff --git a/config/story/test.exs b/config/story/test.exs index e3d6f0a1..67de5909 100644 --- a/config/story/test.exs +++ b/config/story/test.exs @@ -4,4 +4,5 @@ prefix = System.get_env("HELIX_DB_PREFIX") || "helix" config :helix, Helix.Story.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_story" + database: prefix <> "_test_story", + ownership_timeout: 90_000 diff --git a/config/universe/test.exs b/config/universe/test.exs index 97421da6..ec492127 100644 --- a/config/universe/test.exs +++ b/config/universe/test.exs @@ -4,4 +4,5 @@ prefix = System.get_env("HELIX_DB_PREFIX") || "helix" config :helix, Helix.Universe.Repo, pool: Ecto.Adapters.SQL.Sandbox, - database: prefix <> "_test_universe" + database: prefix <> "_test_universe", + ownership_timeout: 90_000 diff --git a/lib/application.ex b/lib/application.ex index 01acef5d..29161bf2 100644 --- a/lib/application.ex +++ b/lib/application.ex @@ -6,7 +6,6 @@ defmodule Helix.Application do def start(_type, _args) do children = [ - # worker(HELF.Broker, []), supervisor(Helix.Endpoint, []), supervisor(Helix.Application.DomainsSupervisor, []) ] @@ -54,6 +53,7 @@ defmodule Helix.Application.DomainsSupervisor do supervisor(Helix.Account.Supervisor, []), supervisor(Helix.Cache.Supervisor, []), supervisor(Helix.Core.Supervisor, []), + supervisor(Helix.Event.Supervisor, []), supervisor(Helix.Entity.Supervisor, []), supervisor(Helix.Hardware.Supervisor, []), supervisor(Helix.Log.Supervisor, []), diff --git a/lib/event/dispatcher.ex b/lib/event/dispatcher.ex index 919fe883..6d051de2 100644 --- a/lib/event/dispatcher.ex +++ b/lib/event/dispatcher.ex @@ -112,6 +112,22 @@ defmodule Helix.Event.Dispatcher do # All event ProcessEvent.Process.Created event ProcessEvent.Process.Completed + event ProcessEvent.Process.Signaled + event ProcessEvent.TOP.BringMeToLife + event ProcessEvent.TOP.Recalcado + + # Custom handlers + event ProcessEvent.Process.Created, + ProcessHandler.TOP, + :recalque_handler + + event ProcessEvent.TOP.BringMeToLife, + ProcessHandler.TOP, + :wake_me_up + + event ProcessEvent.Process.Signaled, + ProcessHandler.Process, + :signal_handler ############################################################################## # Server events diff --git a/lib/event/event.ex b/lib/event/event.ex index 27d9e251..2255f957 100644 --- a/lib/event/event.ex +++ b/lib/event/event.ex @@ -10,6 +10,7 @@ defmodule Helix.Event do alias Helix.Event.Dispatcher, as: HelixDispatcher alias Helix.Event.Meta, as: EventMeta + alias Helix.Event.State.Timer, as: EventTimer alias Helix.Process.Model.Process @type t :: HELF.Event.t @@ -85,6 +86,18 @@ defmodule Helix.Event do def emit(event), do: HelixDispatcher.emit(event) + @spec emit_after([t] | t, interval :: float | non_neg_integer) :: + term + @doc """ + Emits the given event(s) after `interval` milliseconds have passed. + """ + def emit_after([], _), + do: :noop + def emit_after(events = [_|_], interval), + do: Enum.each(events, &(emit_after(&1, interval))) + def emit_after(event, interval), + do: EventTimer.emit_after(event, interval) + @spec inherit(t, t) :: t docp """ diff --git a/lib/event/state/supervisor.ex b/lib/event/state/supervisor.ex new file mode 100644 index 00000000..12e420b5 --- /dev/null +++ b/lib/event/state/supervisor.ex @@ -0,0 +1,20 @@ +defmodule Helix.Event.State.Supervisor do + + use Supervisor + + alias Helix.Event.State.Timer, as: EventTimer + + @doc false + def start_link do + Supervisor.start_link(__MODULE__, []) + end + + @doc false + def init(_) do + children = [ + worker(EventTimer, []) + ] + + supervise(children, strategy: :one_for_one) + end +end diff --git a/lib/event/state/timer.ex b/lib/event/state/timer.ex new file mode 100644 index 00000000..a1752622 --- /dev/null +++ b/lib/event/state/timer.ex @@ -0,0 +1,44 @@ +defmodule Helix.Event.State.Timer do + @moduledoc """ + `EventTimer` is responsible for handling events that were asked to be emitted + sometime in the future. + """ + + use GenServer + + alias Helix.Event + + @registry_name :event_timer + + # Client API + + def start_link, + do: GenServer.start_link(__MODULE__, [], name: @registry_name) + + @spec emit_after(Event.t, interval :: float | non_neg_integer) :: + term + @doc """ + Emits `event` after `interval` milliseconds have passed. + + Unit is in milliseconds! + """ + def emit_after(event, interval), + do: GenServer.call(@registry_name, {:emit_after, event, interval}) + + # Callbacks + + def init(_), + do: {:ok, []} + + def handle_call({:emit_after, event, interval}, _from, state) do + Process.send_after(@registry_name, {:emit, event}, interval) + + {:reply, :ok, state} + end + + def handle_info({:emit, event}, state) do + Event.emit(event) + + {:noreply, state} + end +end diff --git a/lib/event/supervisor.ex b/lib/event/supervisor.ex new file mode 100644 index 00000000..a049584c --- /dev/null +++ b/lib/event/supervisor.ex @@ -0,0 +1,20 @@ +defmodule Helix.Event.Supervisor do + + use Supervisor + + alias Helix.Event.State.Supervisor, as: StateSupervisor + + @doc false + def start_link do + Supervisor.start_link(__MODULE__, []) + end + + @doc false + def init(_) do + children = [ + supervisor(StateSupervisor, []) + ] + + supervise(children, strategy: :one_for_one) + end +end diff --git a/lib/hardware/internal/motherboard.ex b/lib/hardware/internal/motherboard.ex index 9bac689a..40378b2a 100644 --- a/lib/hardware/internal/motherboard.ex +++ b/lib/hardware/internal/motherboard.ex @@ -35,7 +35,7 @@ defmodule Helix.Hardware.Internal.Motherboard do end end - @spec fetch_by_nip(Network.id, NetworkConnection.ip) :: + @spec fetch_by_nip(Network.id, Network.ip) :: Motherboard.t | nil def fetch_by_nip(network_id, ip) do @@ -102,38 +102,33 @@ defmodule Helix.Hardware.Internal.Motherboard do |> get_hdds_from_ids() end - defp get_cpus_from_ids(components) do + def get_cpus_from_ids(components) do components |> Component.CPU.Query.from_components_ids() |> Repo.all() end - defp get_rams_from_ids(components) do + def get_rams_from_ids(components) do components |> Component.RAM.Query.from_components_ids() |> Repo.all() end - defp get_nics_from_ids(components) do + def get_nics_from_ids(components) do components |> Component.NIC.Query.from_components_ids() |> Component.NIC.Query.inner_join_network_connection() |> Repo.all() end - defp get_hdds_from_ids(components) do + def get_hdds_from_ids(components) do components |> Component.HDD.Query.from_components_ids() |> Repo.all() end @spec resources(Motherboard.t) :: - %{ - cpu: non_neg_integer, - ram: non_neg_integer, - hdd: non_neg_integer, - net: %{String.t => %{uplink: non_neg_integer, downlink: non_neg_integer}} - } + Motherboard.resources def resources(motherboard) do components_ids = get_components_ids(motherboard) @@ -155,7 +150,7 @@ defmodule Helix.Hardware.Internal.Motherboard do components_ids |> get_nics_from_ids() |> Enum.reduce(%{}, fn el, acc -> - network = to_string(el.network_connection.network_id) + network = el.network_connection.network_id value = Map.take(el.network_connection, [:uplink, :downlink]) sum_map_values = &Map.merge(&1, value, fn _, v1, v2 -> v1 + v2 end) diff --git a/lib/hardware/internal/network_connection.ex b/lib/hardware/internal/network_connection.ex index 74643378..9b7d8746 100644 --- a/lib/hardware/internal/network_connection.ex +++ b/lib/hardware/internal/network_connection.ex @@ -42,7 +42,7 @@ defmodule Helix.Hardware.Internal.NetworkConnection do end end - @spec update_ip(NetworkConnection.t | NetworkConnection.id, NetworkConnection.ip) :: + @spec update_ip(NetworkConnection.idt, NetworkConnection.ip) :: {:ok, NetworkConnection} | {:error, Ecto.Changeset.t} def update_ip(nc = %NetworkConnection{}, new_ip) do diff --git a/lib/hardware/model/motherboard.ex b/lib/hardware/model/motherboard.ex index 107109c5..11d4eb93 100644 --- a/lib/hardware/model/motherboard.ex +++ b/lib/hardware/model/motherboard.ex @@ -5,6 +5,7 @@ defmodule Helix.Hardware.Model.Motherboard do import Ecto.Changeset alias Ecto.Changeset + alias Helix.Network.Model.Network alias Helix.Hardware.Model.Component alias Helix.Hardware.Model.ComponentSpec alias Helix.Hardware.Model.MotherboardSlot @@ -23,6 +24,20 @@ defmodule Helix.Hardware.Model.Motherboard do updated_at: NaiveDateTime.t } + @type resources :: + %{ + cpu: non_neg_integer, + ram: non_neg_integer, + hdd: non_neg_integer, + net: %{ + Network.id => + %{ + uplink: non_neg_integer, + downlink: non_neg_integer + } + } + } + @primary_key false schema "motherboards" do field :motherboard_id, Component.ID, diff --git a/lib/hell/hack.ex b/lib/hell/hack.ex index 93a9c659..77a23c6a 100644 --- a/lib/hell/hack.ex +++ b/lib/hell/hack.ex @@ -41,11 +41,9 @@ defmodule HELL.Hack.Experience do {:join, 3} ], "Elixir.Helix.Process.Model.Processable" => [ - {:dynamic_resources, 1}, - {:state_change, 4}, {:kill, 3}, - {:minimum, 1}, - {:conclusion, 2}, + {:complete, 2}, + {:connection_closed, 3}, {:after_read_hook, 1} ], "Elixir.Helix.Process.Public.View.ProcessViewable" => [ diff --git a/lib/process/model/process/naive_struct.ex b/lib/hell/hell/naive_struct.ex similarity index 90% rename from lib/process/model/process/naive_struct.ex rename to lib/hell/hell/naive_struct.ex index edf8a8ab..1ced154f 100644 --- a/lib/process/model/process/naive_struct.ex +++ b/lib/hell/hell/naive_struct.ex @@ -1,9 +1,5 @@ -defmodule Helix.Process.Model.Process.NaiveStruct do - @moduledoc false - - import HELL.Macros - - docp """ +defmodule HELL.NaiveStruct do + @moduledoc """ This module is a converter that transforms any struct into a non-struct map (while keeping the metadata of which struct that map was) and transforms that map back into it's original struct on runtime. diff --git a/lib/log/event/handler/log.ex b/lib/log/event/handler/log.ex index b88c3174..c5711063 100644 --- a/lib/log/event/handler/log.ex +++ b/lib/log/event/handler/log.ex @@ -38,7 +38,7 @@ defmodule Helix.Log.Event.Handler.Log do def log_forge_conclusion(event = %LogForgeCreateComplete{}) do {:ok, _, events} = LogAction.create( - event.target_server_id, + event.target_id, event.entity_id, event.message, event.version) diff --git a/lib/network/action/tunnel.ex b/lib/network/action/tunnel.ex index 97b08681..cd87608a 100644 --- a/lib/network/action/tunnel.ex +++ b/lib/network/action/tunnel.ex @@ -1,10 +1,6 @@ defmodule Helix.Network.Action.Tunnel do - import HELL.Macros - - alias Helix.Server.Henforcer.Server, as: ServerHenforcer alias Helix.Server.Model.Server - alias Helix.Network.Henforcer.Network, as: NetworkHenforcer alias Helix.Network.Internal.Tunnel, as: TunnelInternal alias Helix.Network.Model.Connection alias Helix.Network.Model.Network @@ -57,29 +53,8 @@ defmodule Helix.Network.Action.Tunnel do @spec create_tunnel(Network.t, Server.id, Server.id, [Server.id]) :: {:ok, Tunnel.t} - | create_tunnel_errors - docp """ - Checks if gateway, destination and bounces are valid servers, and if they - are connected to network - Note that those are more or less redundant since the interface (WS or HTTP) - have to convert the input IPs into server_ids anyway - """ defp create_tunnel(network, gateway, destination, bounces) do - with \ - exists? = &ServerHenforcer.server_exists?/1, - {true, _} <- exists?.(gateway) || {:gateway_id, :notfound}, - {true, _} <- exists?.(destination) || {:destination_id, :notfound}, - true <- Enum.all?(bounces, exists?) || {:links, :notfound}, - connected? = &NetworkHenforcer.node_connected?(&1, network.network_id), - true <- connected?.(gateway) || {:gateway_id, :disconnected}, - true <- connected?.(destination) || {:destination_id, :disconnected}, - true <- Enum.all?(bounces, connected?) || {:links, :disconnected} - do - TunnelInternal.create(network, gateway, destination, bounces) - else - error -> - {:error, error} - end + TunnelInternal.create(network, gateway, destination, bounces) end @spec delete(Tunnel.idt) :: diff --git a/lib/network/henforcer/network.ex b/lib/network/henforcer/network.ex index 677036cf..a84e16e3 100644 --- a/lib/network/henforcer/network.ex +++ b/lib/network/henforcer/network.ex @@ -32,45 +32,6 @@ defmodule Helix.Network.Henforcer.Network do end end - ## Delete everything below this line --- - - alias Helix.Hardware.Query.Component, as: ComponentQuery - alias Helix.Hardware.Query.Motherboard, as: MotherboardQuery - alias Helix.Server.Model.Server - alias Helix.Server.Query.Server, as: ServerQuery - alias Helix.Network.Model.Network - alias Helix.Network.Query.Tunnel, as: TunnelQuery - - @spec node_connected?(Server.id, Network.id) :: - boolean - def node_connected?(server, network) do - # FIXME: This looks awful - # FIXME: Test (needs network factory and some patience) - network_id = to_string(network) - with \ - %{motherboard_id: motherboard} <- ServerQuery.fetch(server), - component = %{} <- ComponentQuery.fetch(motherboard), - motherboard = %{} <- MotherboardQuery.fetch(component), - %{net: %{^network_id => _}} <- MotherboardQuery.resources(motherboard) - do - true - else - _ -> - false - end - end - - @spec has_ssh_connection?(Server.id, Server.id) :: - boolean - def has_ssh_connection?(gateway, destination) do - connections_between = TunnelQuery.connections_on_tunnels_between( - gateway, - destination) - connection_types = MapSet.new(connections_between, &(&1.connection_type)) - - MapSet.member?(connection_types, :ssh) - end - @spec valid_origin?( origin :: Server.idtb, gateway :: Server.id, diff --git a/lib/process/action/flow/process.ex b/lib/process/action/flow/process.ex new file mode 100644 index 00000000..f76ad38f --- /dev/null +++ b/lib/process/action/flow/process.ex @@ -0,0 +1,16 @@ +defmodule Helix.Process.Action.Flow.Process do + + import HELF.Flow + + alias Helix.Event + alias Helix.Process.Model.Process + alias Helix.Process.Action.Process, as: ProcessAction + + def signal(process = %Process{}, signal, params) do + flowing do + with {:ok, events} <- ProcessAction.signal(process, signal, params) do + Event.emit(events) + end + end + end +end diff --git a/lib/process/action/process.ex b/lib/process/action/process.ex index 906eb2d8..701a6d1c 100644 --- a/lib/process/action/process.ex +++ b/lib/process/action/process.ex @@ -1,8 +1,6 @@ defmodule Helix.Process.Action.Process do - import HELL.Macros - - alias HELL.IPv4 + alias Helix.Event alias Helix.Entity.Model.Entity alias Helix.Entity.Query.Entity, as: EntityQuery alias Helix.Network.Model.Connection @@ -10,12 +8,13 @@ defmodule Helix.Process.Action.Process do alias Helix.Server.Model.Server alias Helix.Server.Query.Server, as: ServerQuery alias Helix.Software.Model.File - alias Helix.Process.Event.Process.Created, as: ProcessCreatedEvent + alias Helix.Process.Internal.Process, as: ProcessInternal alias Helix.Process.Model.Process alias Helix.Process.Model.Processable - alias Helix.Process.Query.Process, as: ProcessQuery - alias Helix.Process.State.TOP.Manager, as: ManagerTOP - alias Helix.Process.State.TOP.Server, as: ServerTOP + + alias Helix.Process.Event.Process.Completed, as: ProcessCompletedEvent + alias Helix.Process.Event.Process.Created, as: ProcessCreatedEvent + alias Helix.Process.Event.Process.Signaled, as: ProcessSignaledEvent @type on_create :: {:ok, Process.t, [ProcessCreatedEvent.t]} @@ -27,203 +26,115 @@ defmodule Helix.Process.Action.Process do @type base_params :: %{ - :gateway_id => Server.idtb, - :target_server_id => Server.idtb, - :process_data => Processable.t, - :process_type => String.t, - optional(:file_id) => File.idtb | nil, - optional(:network_id) => Network.idtb | nil, - optional(:connection_id) => Connection.idtb | nil, - optional(:objective) => map + :gateway_id => Server.id, + :target_id => Server.id, + :data => Processable.t, + :type => Process.type, + :file_id => File.id | nil, + :network_id => Network.id | nil, + :connection_id => Connection.id | nil, + :objective => map, + :l_dynamic => Process.dynamic, + :r_dynamic => Process.dynamic, + :static => Process.static } @spec create(base_params) :: - on_create - @doc """ - Creates a new process - - Each process defines its required arguments. When the process is successfully - created, it'll cause the server to reallocate resources to properly hold it. - - Might return `{:error, :resources}` if the server does not have enough - resources to hold its current processes along with the input process - - ### Examples - - iex> create(%{ - gateway_id: "aa::bb", - target_server_id: "aa::bb", - file_id: "1:2::3", - process_data: %FirewallProcess{version: 1}, - process_type: "firewall_passive" - }) - {:ok, %Process{}, [%{}]} - """ + {:ok, Process.t, [ProcessCreatedEvent.t]} + | {:error, Process.changeset} def create(params) do with \ - {source_entity_id, target_entity_id} <- get_process_entities(params), + source_entity = EntityQuery.fetch_by_server(params.gateway_id), {gateway_ip, target_ip} <- get_process_ips(params), - process_params = prepare_create_params(params, source_entity_id), - pid = get_top(process_params), - {:ok, process} <- ServerTOP.create(pid, process_params) + process_params = prepare_create_params(params, source_entity.entity_id), + {:ok, process} <- ProcessInternal.create(process_params) do event = ProcessCreatedEvent.new( - process, - gateway_ip, - target_entity_id, - target_ip + process, gateway_ip, target_ip, confirmed: false ) {:ok, process, [event]} end end - @spec prepare_create_params(base_params, Entity.id) :: - Process.create_params - defp prepare_create_params(params, source_entity_id), - do: Map.put(params, :source_entity_id, source_entity_id) - - @spec pause(Process.t) :: - :ok - @doc """ - Changes a process state to _paused_ - - This will cause it not to "process" the allocated resources and thus suspend - it's effect. - - Some processes might still consume resources (without any progress) on paused - state - - This function is idempotent + @spec delete(Process.t, Process.kill_reason) :: + {:ok, [ProcessCompletedEvent.t]} + def delete(process = %Process{}, reason) do + ProcessInternal.delete(process) - ### Examples + event = + if reason == :completed do + ProcessCompletedEvent.new(process) + else + ProcessCompletedEvent.new(process) + # ProcessKilledEvent.new(process) + end - iex> pause(%Process{}) - :ok - """ - def pause(process) do - process - |> get_top() - |> ServerTOP.pause(process) + {:ok, [event]} end - @spec resume(Process.t) :: - :ok - @doc """ - Changes a process state from _paused_ to _running_ - - This will allow the process to continue processing resources and causing side- - effects - - This function is idempotent + # def pause(process = %Process{}) do + # ProcessInternal.pause(process) - ### Examples + # event = ProcessPausedEvent.new(process) - iex> resume(%Process{}) - :ok - """ - def resume(process) do - process - |> get_top() - |> ServerTOP.resume(process) - end - - @spec priority(Process.t, 0..5) :: - :ok - @doc """ - Changes the priority of a process + # {:ok, [event]} + # end - Effectively this will change how much proportionally the input process will - receive of dynamic resources. The higher the priority, the higher the amount - of dynamic resources a process will receive + @spec signal(Process.t, Process.signal, Process.signal_params) :: + {:ok, [Event.t]} + def signal(process = %Process{}, signal, params \\ %{}) do + {action, events} = signal_handler(signal, process, params) - ### Examples + signaled_event = ProcessSignaledEvent.new(signal, process, action, params) - iex> priority(%Process{}, 1) - :ok - - iex> priority(%Process{}, 5) - :ok - """ - def priority(process, priority) when priority in 0..5 do - process - |> get_top() - |> ServerTOP.priority(process, priority) + {:ok, events ++ [signaled_event]} end - @spec kill(Process.t, atom) :: - :ok - @doc """ - Stops a process with reason `reason` + @spec signal_handler(Process.signal, Process.t, Process.signal_params) :: + {Processable.action, [Event.t]} + defp signal_handler(:SIGTERM, process, _), + do: Processable.complete(process.data, process) - ### Examples + defp signal_handler(:SIGKILL, process, %{reason: reason}), + do: Processable.kill(process.data, process, reason) - iex> kill(%Process{}, :normal) - :ok - """ - def kill(process, _reason) do - process - |> get_top() - |> ServerTOP.kill(process) - end + # defp signal_handler(:SIGSTOP, process, _), + # do: Processable.stop(process.data, process) - @doc false - def reset_processes_on_server(gateway_id) do - case ManagerTOP.get(gateway_id) do - nil -> - :noop - pid -> - processes = ProcessQuery.get_processes_on_server(gateway_id) - ServerTOP.reset_processes(pid, processes) - end - end + # defp signal_handler(:SIGCONT, process, _), + # do: Processable.resume(process.data, process, reason) - @spec get_top(Process.t | base_params | Server.id) :: - pid - docp """ - Given a server/process, return the TOP pid. - """ - defp get_top(%Process{gateway_id: gateway_id}), - do: get_top(gateway_id) - defp get_top(%{gateway_id: gateway_id}), - do: get_top(gateway_id) - defp get_top(server_id = %Server.ID{}) do - {:ok, pid} = ManagerTOP.prepare_top(server_id) - pid - end + # defp signal_handler(:SIGPRIO, process, %{priority: priority}), + # do: Processable.priority(process.data, process, priority) - @spec get_process_entities(base_params) :: - {source_entity :: Entity.id, target_entity :: Entity.id} - defp get_process_entities(params) do - source_entity = EntityQuery.fetch_by_server(params.gateway_id) + defp signal_handler(:SIGCONND, process, %{connection: connection}), + do: Processable.connection_closed(process.data, process, connection) - target_entity = - if params.gateway_id == params.target_server_id do - source_entity - else - EntityQuery.fetch_by_server(params.target_server_id) - end + # defp signal_handler(:SIGFILED, process, %{file: file}), + # do: Processable.file_deleted(process.data, process, file) - {source_entity.entity_id, target_entity.entity_id} - end + @spec prepare_create_params(base_params, Entity.id) :: + Process.creation_params + defp prepare_create_params(params, source_entity_id), + do: Map.put(params, :source_entity_id, source_entity_id) @spec get_process_ips(base_params) :: - {gateway_ip :: IPv4.t, target_ip :: IPv4.t} + {gateway_ip :: Network.ip, target_ip :: Network.ip} | {nil, nil} - defp get_process_ips(params = %{network_id: _}) do + defp get_process_ips(%{network_id: nil}), + do: {nil, nil} + defp get_process_ips(params) do gateway_ip = ServerQuery.get_ip(params.gateway_id, params.network_id) target_ip = - if params.gateway_id == params.target_server_id do + if params.gateway_id == params.target_id do gateway_ip else - ServerQuery.get_ip(params.target_server_id, params.network_id) + ServerQuery.get_ip(params.target_id, params.network_id) end {gateway_ip, target_ip} end - - defp get_process_ips(_), - do: {nil, nil} end diff --git a/lib/process/action/top.ex b/lib/process/action/top.ex new file mode 100644 index 00000000..4671891b --- /dev/null +++ b/lib/process/action/top.ex @@ -0,0 +1,226 @@ +defmodule Helix.Process.Action.TOP do + + import HELL.Macros + + alias Helix.Event + alias Helix.Server.Model.Server + alias Helix.Process.Action.Process, as: ProcessAction + alias Helix.Process.Internal.Process, as: ProcessInternal + alias Helix.Process.Model.Process + alias Helix.Process.Model.Processable + alias Helix.Process.Model.TOP + alias Helix.Process.Query.Process, as: ProcessQuery + alias Helix.Process.Query.TOP, as: TOPQuery + + alias Helix.Process.Event.TOP.BringMeToLife, as: TOPBringMeToLifeEvent + alias Helix.Process.Event.TOP.Recalcado, as: TOPRecalcadoEvent + + @type recalque_result :: + {:ok, [Process.t], [TOPRecalcadoEvent.t]} + | {:error, :resources} + + @typep recalque_opts :: term + + @spec complete(Process.t) :: + {:ok, [Event.t]} + | {:error, {:process, :running}} + def complete(process) do + case TOP.Scheduler.simulate(process) do + {:completed, _process} -> + ProcessAction.signal(process, :SIGTERM, %{reason: :completed}) + + {:running, _process} -> + {:error, {:process, :running}} + end + end + + @spec recalque(Server.id, recalque_opts) :: + recalque_result + @spec recalque(Process.t, recalque_opts) :: + %{ + gateway: recalque_result, + target: recalque_result + } + def recalque(process_or_server, alloc_opts \\ []) + + def recalque(%Process{gateway_id: gateway_id, target_id: target_id}, opts) do + %{ + gateway: do_recalque(gateway_id, opts), + target: do_recalque(target_id, opts) + } + end + def recalque(server_id = %Server.ID{}, opts), + do: do_recalque(server_id, opts) + + @spec do_recalque(Server.id, recalque_opts) :: + recalque_result + defp do_recalque(server_id, alloc_opts) do + resources = TOPQuery.load_top_resources(server_id) + processes = ProcessQuery.get_processes_on_server(server_id) + + case TOP.Allocator.allocate(server_id, resources, processes, alloc_opts) do + {:ok, allocation_result} -> + processes = schedule(allocation_result) + event = TOPRecalcadoEvent.new(server_id) + + {:ok, processes, [event]} + + {:error, :resources, _} -> + {:error, :resources} + end + end + + @spec schedule(TOP.Allocator.allocation_successful) :: + [Process.t] + defp schedule(%{allocated: processes, dropped: _dropped}) do + # Organize all processes in two groups: the local ones and the remote ones + # A local process was started on this very server, while a remote process + # was started somewhere else and *targets* this server. + # (The `local?` variable was set on the Allocator). + # This organization is useful because we can only forecast local processes. + # (A process may be completed only on its local server; so the remote + # processes here that are not being forecast will be forecast during *their* + # server's TOP recalque, which should happen shortly). + local_processes = Enum.filter(processes, &(&1.local? == true)) + remote_processes = Enum.filter(processes, &(&1.local? == false)) + + # Forecast will be used to figure out which process is the next to be + # completed. This is the first - and only - time these processes will be + # simulated, so we have to ensure the return of `forecast/1` is served as + # input for the Checkpoint step below. + forecast = TOP.Scheduler.forecast(local_processes) + + # This is our new list of (local) processes. It accounts for all processes + # that are not completed, so it contains: + # - paused processes + # - running processes + # - processes awaiting allocation + local_processes = forecast.paused ++ forecast.running + + # On a separate thread, we'll "handle" the forecast above. Basically we'll + # track the completion date of the `next`-to-be-completed process. + # Here we also deal with processes that were deemed already completed by the + # simulation. + hespawn fn -> handle_forecast(forecast) end + + # Recreate the complete process list, filtering out the ones that were + # already completed (see Forecast step above) + processes = local_processes ++ remote_processes + + # The Checkpoint step is done to update the processes with their new + # allocation, as well as the amount of work done previously on `processed`. + # We'll accumulate all processes that should be updated to a list, which + # will later be passed on to `handle_checkpoint`. + {processes, processes_to_update} = + Enum.reduce(processes, {[], []}, fn process, {acc_procs, acc_update} -> + + # Call `Scheduler.checkpoint/2`, which will let us know if we should + # update the process or not. + # Also accumulates the new process (may have changed `allocated` and + # `last_checkpoint_time`). + case TOP.Scheduler.checkpoint(process) do + {true, changeset} -> + process = Ecto.Changeset.apply_changes(changeset) + {acc_procs ++ [process], acc_update ++ [changeset]} + + false -> + {acc_procs ++ [process], acc_update} + end + end) + + # Based on the return of `checkpoint` above, we've accumulated all processes + # that should be updated. They will be passed to `handle_checkpoint`, which + # shall be responsible of properly handling this update in a transaction. + hespawn(fn -> handle_checkpoint(processes_to_update) end) + + # Returns a list of all processes the new server has (excluding completed + # ones). The processes in this list are updated with the new `allocation`, + # `processed` and `last_checkpoint_time`. + # Notice that this updated data hasn't been updated yet on the DB. It is + # being performed asynchronously, in a background process. + processes + end + + @spec handle_forecast(TOP.Scheduler.forecast) :: + term + docp """ + `handle_forecast` aggregates the `Scheduler.forecast/1` result and guides it + to the corresponding handlers. Check `handle_completed/1` and `handle_next/1` + for detailed explanation of each one. + """ + defp handle_forecast(%{completed: completed, next: next}) do + handle_completed(completed) + handle_next(next) + end + + @spec handle_completed([Process.t]) :: + term + docp """ + `handle_completed` receives processes that according to `Schedule.forecast/1` + have already finished. We'll then complete each one and Emit their + corresponding events. + + For most recalques and forecasts, this function should receive an empty list. + This is sort-of a "never should happen" scenario, but one which we are able to + handle gracefully if it does. + + Most process completion cases are handled either by `TOPBringMeToLifeEvent` or + calling `TOPAction.complete/1` directly once the Helix application boots up. + + Note that this function emits an event. This is "wrong", as "Action-style", + within our architecture, are not supposed to emit events. However, + `handle_completed` happens within a spawned process, and as such the resulting + events cannot be sent back to the original Handler/ActionFlow caller. + + Emits event. + """ + defp handle_completed([]), + do: :noop + defp handle_completed(completed) do + Enum.each(completed, fn completed_process -> + with {:ok, events} <- complete(completed_process) do + Event.emit(events) + end + end) + end + + @spec handle_next({Process.t, Process.time_left}) :: + term + docp """ + `handle_next` will receive the "next-to-be-completed" process, as defined by + `Scheduler.forecast/1`. If a tuple is received, then we know there's a process + that will be completed soon, and we'll sleep during the remaining time. + Once the process is (supposedly) completed, TOP will receive the + `TOPBringMeToLifeEvent`, which shall confirm the completion and actually + complete the task. + + Emits TOPBringMeToLifeEvent.t after `time_left` seconds have elapsed. + """ + defp handle_next({process, time_left}) do + wake_me_up = TOPBringMeToLifeEvent.new(process) + save_me = time_left * 1000 |> trunc() + + # Wakes me up inside + Event.emit_after(wake_me_up, save_me) + end + defp handle_next(_), + do: :noop + + @spec handle_checkpoint([Process.t]) :: + term + docp """ + `handle_checkpoint` is responsible for handling the result of + `Scheduler.checkpoint/1`, called during the `recalque` above. + + It receives the *changeset* of the process, ready to be updated directly. No + further changes are required (as far as TOP is concerned). + + These changes include the new `allocated` information, as well as the updated + `last_checkpoint_time`. + + Ideally these changes should occur in an atomic (as in ACID-atomic) way. The + `ProcessInternal.batch_update/1` handles the transaction details. + """ + defp handle_checkpoint(processes), + do: ProcessInternal.batch_update(processes) +end diff --git a/lib/process/event/handler/process.ex b/lib/process/event/handler/process.ex new file mode 100644 index 00000000..9c164a44 --- /dev/null +++ b/lib/process/event/handler/process.ex @@ -0,0 +1,26 @@ +defmodule Helix.Process.Event.Handler.Process do + + alias Helix.Event + alias Helix.Process.Action.Process, as: ProcessAction + + alias Helix.Process.Event.Process.Signaled, as: ProcessSignaledEvent + + def signal_handler(event = %ProcessSignaledEvent{}) do + event.action + |> action_handler(event.process, event.params) + |> Enum.map(&(Event.set_process_id(&1, event.process.process_id))) + |> Event.emit(from: event) + end + + defp action_handler(:delete, process, %{reason: reason}) do + {:ok, events} = ProcessAction.delete(process, reason) + + events + end + + # defp action_handler(:pause, process, _) do + # {:ok, events} = ProcessAction.pause(process) + + # events + # end +end diff --git a/lib/process/event/handler/top.ex b/lib/process/event/handler/top.ex index a3d1043c..d880f85b 100644 --- a/lib/process/event/handler/top.ex +++ b/lib/process/event/handler/top.ex @@ -1,15 +1,81 @@ defmodule Helix.Process.Event.Handler.TOP do @moduledoc false + alias Helix.Event alias Helix.Network.Event.Connection.Closed, as: ConnectionClosedEvent + alias Helix.Process.Action.Flow.Process, as: ProcessFlow + alias Helix.Process.Action.TOP, as: TOPAction + alias Helix.Process.Model.Process alias Helix.Process.Query.Process, as: ProcessQuery - alias Helix.Process.Action.Process, as: ProcessAction - # TODO: Ensure that the processes are killed (by making `kill` blocking - # probably) + alias Helix.Process.Event.Process.Created, as: ProcessCreatedEvent + alias Helix.Process.Event.TOP.BringMeToLife, as: TOPBringMeToLifeEvent + + def wake_me_up(event = %TOPBringMeToLifeEvent{}) do + process = ProcessQuery.fetch(event.process_id) + + if process do + case TOPAction.complete(process) do + {:ok, events} -> + Event.emit(events) + + # Can't wake up + {:error, {:process, :running}} -> + # Weird but could happen. Recalculate the TOP just in case + call_recalque(process) + end + end + end + + def recalque_handler(event = %ProcessCreatedEvent{confirmed: false}) do + case call_recalque(event.process) do + {true, _} -> + event + |> ProcessCreatedEvent.new() + |> Event.emit(from: event) + + _ -> + event + # |> ProcessCreateFailedEvent.new() + # |> Event.emit + end + end + + def recalque_handler(%_{confirmed: true}), + do: :noop + + @spec call_recalque(Process.t) :: + {gateway_recalque :: boolean, target_recalque :: boolean} + defp call_recalque(process = %Process{}) do + %{gateway: gateway_recalque, target: target_recalque} = + TOPAction.recalque(process) + + gateway_recalque = + case gateway_recalque do + {:ok, _processes, events} -> + Event.emit(events) + true + + _ -> + false + end + + target_recalque = + case target_recalque do + {:ok, _processes, events} -> + Event.emit(events) + true + + _ -> + false + end + + {gateway_recalque, target_recalque} + end + def connection_closed(event = %ConnectionClosedEvent{}) do event.connection.connection_id |> ProcessQuery.get_processes_on_connection() - |> Enum.each(&ProcessAction.kill(&1, :connection_closed)) + |> Enum.each(&ProcessFlow.signal(&1, :SIGCONND, event)) end end diff --git a/lib/process/event/process.ex b/lib/process/event/process.ex index dee6f900..1bc51348 100644 --- a/lib/process/event/process.ex +++ b/lib/process/event/process.ex @@ -4,45 +4,44 @@ defmodule Helix.Process.Event.Process do event Created do - alias Helix.Entity.Model.Entity alias Helix.Network.Model.Network alias Helix.Server.Model.Server alias Helix.Process.Model.Process @type t :: %__MODULE__{ process: Process.t, + confirmed: boolean, gateway_id: Server.id, target_id: Server.id, - gateway_entity_id: Entity.id, - target_entity_id: Entity.id, gateway_ip: Network.ip, target_ip: Network.ip } event_struct [ :process, + :confirmed, :gateway_id, :target_id, - :gateway_entity_id, - :target_entity_id, :gateway_ip, :target_ip ] - @spec new(Process.t, Network.ip, Entity.id, Network.ip) :: - t - def new(process = %Process{}, source_ip, target_entity_id, target_ip) do + # @spec new(Process.t, Network.ip, Network.ip, [optimistic: boolean]) :: + # t + def new(process = %Process{}, source_ip, target_ip, confirmed: confirmed) do %__MODULE__{ process: process, + confirmed: confirmed, gateway_id: process.gateway_id, - target_id: process.target_server_id, - gateway_entity_id: process.source_entity_id, - target_entity_id: target_entity_id, + target_id: process.target_id, gateway_ip: source_ip, target_ip: target_ip } end + def new(event = %__MODULE__{confirmed: false}), + do: %{event| confirmed: true} + notify do @event :process_created @@ -66,7 +65,7 @@ defmodule Helix.Process.Event.Process do process because of 1. Hence, this rule (3) only applies to third-parties connecting to the attack target. """ - def generate_payload(event, socket) do + def generate_payload(event = %_{confirmed: true}, socket) do gateway_id = socket.assigns.gateway.server_id destination_id = socket.assigns.destination.server_id @@ -95,6 +94,10 @@ defmodule Helix.Process.Event.Process do end end + # Internal event used for optimistic (asynchronous) processing + def generate_payload(%_{confirmed: false}, _), + do: :noreply + defp do_payload(event, _socket, opts \\ []) do file_id = event.process.file_id && to_string(event.process.file_id) connection_id = @@ -102,7 +105,7 @@ defmodule Helix.Process.Event.Process do data = %{ process_id: to_string(event.process.process_id), - type: to_string(event.process.process_type), + type: to_string(event.process.type), network_id: to_string(event.process.network_id), file_id: file_id, connection_id: connection_id, @@ -133,26 +136,25 @@ defmodule Helix.Process.Event.Process do event Completed do @moduledoc """ - This event is used solely to update the TOP display on the client. + `ProcessCompletedEvent` is fired after a process has met its objective, and + the corresponding `Processable.conclusion/2` callback was executed. + + It's used to notify the Client a process has finished. """ - alias Helix.Event - alias Helix.Server.Model.Server alias Helix.Process.Model.Process + event_struct [:process] + @type t :: %__MODULE__{ - gateway_id: Server.id, - target_id: Server.id + process: Process.t } - event_struct [:gateway_id, :target_id] - @spec new(Process.t) :: t def new(process = %Process{}) do %__MODULE__{ - gateway_id: process.gateway_id, - target_id: process.target_server_id + process: process } end @@ -162,14 +164,45 @@ defmodule Helix.Process.Event.Process do def generate_payload(event, _socket) do data = %{ - process_id: Event.get_process_id(event) + process_id: event.process.process_id } {:ok, data} end def whom_to_notify(event), - do: %{server: [event.gateway_id, event.target_id]} + do: %{server: [event.process.gateway_id, event.process.target_id]} + end + end + + event Signaled do + @moduledoc """ + `ProcessSignaledEvent` is fired when the process receives a signal. A signal + is an instruction to the process, which shall be handled by `Processable`. + If the process does not implement the corresponding handler, then the + signal's default action will be performed. + + This is the probably the single most important event of the TOP - and the + game - since all changes in a process, including its completion, are handled + by signals being delivered to it. + + Granted, `ProcessSignaledEvent` is emitted *after* the signal was delivered + and handled by the corresponding Processable implementation, but the actual + change to the process (defined at `action`) will be performed once this + event is emitted. + """ + + alias Helix.Process.Model.Process + + event_struct [:process, :action, :signal, :params] + + def new(signal, process = %Process{}, action, params) do + %__MODULE__{ + signal: signal, + process: process, + action: action, + params: params + } end end end diff --git a/lib/process/event/top.ex b/lib/process/event/top.ex new file mode 100644 index 00000000..f1651478 --- /dev/null +++ b/lib/process/event/top.ex @@ -0,0 +1,58 @@ +defmodule Helix.Process.Event.TOP do + + import Helix.Event + + event BringMeToLife do + + alias Helix.Process.Model.Process + + @type t :: term + + event_struct [:process_id] + + def new(process = %Process{}) do + # We do not store the process struct itself because it may be used several + # seconds later. By storing `process_id` directly, we force any subscriber + # to always fetch the most recent process information. + %__MODULE__{ + process_id: process.process_id + } + end + end + + event Recalcado do + + alias Helix.Server.Model.Server + + @type t :: term + + event_struct [:server_id] + + def new(server_id = %Server.ID{}) do + %__MODULE__{ + server_id: server_id + } + end + + notify do + @moduledoc """ + Notifies a client that the TOP has changed. Instead of sending a diff of + what has changed, we send the whole TOP, as the Client would receive if it + were logging in for the first time. + """ + + alias Helix.Process.Public.Index, as: ProcessIndex + + @event :top_recalcado + + def generate_payload(event, socket) do + data = ProcessIndex.index(event.server_id, socket.assigns.entity_id) + + {:ok, data} + end + + def whom_to_notify(event), + do: %{server: event.server_id} + end + end +end diff --git a/lib/process/executable.ex b/lib/process/executable.ex index 2e21e63f..4e942c66 100644 --- a/lib/process/executable.ex +++ b/lib/process/executable.ex @@ -39,20 +39,20 @@ defmodule Helix.Process.Executable do defp handlers(process) do quote do @spec get_process_data(params) :: - %{process_data: unquote(process).t} + %{data: unquote(process).t} docp """ Retrieves the `process_data`, according to how it was defined at the Process' `new/1`. Subset of the full process params. """ defp get_process_data(params) do data = call_process(:new, params) - %{process_data: data} + %{data: data} end @spec get_ownership(Server.t, Server.t, params, meta) :: %{ gateway_id: Server.id, - target_server_id: Server.id + target_id: Server.id } docp """ Infers ownership information about the process, which is a subset of the @@ -61,20 +61,20 @@ defmodule Helix.Process.Executable do defp get_ownership(gateway, target, params, meta) do %{ gateway_id: gateway.server_id, - target_server_id: target.server_id + target_id: target.server_id } end @spec get_process_type(term) :: - %{process_type: Process.type} + %{type: Process.type} docp """ Returns the `process_type` parameter, a subset of the full process params. """ - defp get_process_type(%{process_type: process_type}), - do: %{process_type: process_type |> to_string()} + defp get_process_type(%{type: process_type}), + do: %{type: process_type} defp get_process_type(_) do process_type = call_process(:get_process_type) - %{process_type: process_type} + %{type: process_type} end @spec get_network_id(term) :: @@ -230,7 +230,7 @@ defmodule Helix.Process.Executable do """ def execute(unquote_splicing(args)) do process_data = get_process_data(unquote(params)) - objective = get_objective(unquote_splicing(args)) + resources = get_resources(unquote_splicing(args)) file = get_file(unquote_splicing(args)) ownership = get_ownership(unquote_splicing(args)) process_type = get_process_type(unquote(meta)) @@ -239,7 +239,7 @@ defmodule Helix.Process.Executable do partial = %{} |> Map.merge(process_data) - |> Map.merge(objective) + |> Map.merge(resources) |> Map.merge(file) |> Map.merge(ownership) |> Map.merge(process_type) @@ -268,6 +268,9 @@ defmodule Helix.Process.Executable do {:error, %Ecto.Changeset{}} -> {:error, :internal} + + _ -> + {:error, :internal} end end end @@ -298,24 +301,25 @@ defmodule Helix.Process.Executable do end @doc """ - Returns the process' `objective`, calling the process' `new/1` with the - parameters defined on the `objective` section of the Process.Executable. + Returns information about the resource usage of that process, including: + + - what is the process objective + - which resources can be allocated dynamically + - what are the statically allocated resources """ - defmacro objective(gateway, target, params, meta, do: block) do + defmacro resources(gateway, target, params, meta, do: block) do args = [gateway, target, params, meta] process = get_process(__CALLER__) quote do - @spec get_objective(term, term, term, term) :: - %{objective: unquote(process).objective} + @spec get_resources(term, term, term, term) :: + unquote(process).resources @doc false - defp get_objective(unquote_splicing(args)) do + defp get_resources(unquote_splicing(args)) do params = unquote(block) - objective = call_process(:objective, params) - - %{objective: objective} + call_process(:resources, params) end end diff --git a/lib/process/internal/process.ex b/lib/process/internal/process.ex index aab32c5a..d76a1e00 100644 --- a/lib/process/internal/process.ex +++ b/lib/process/internal/process.ex @@ -5,6 +5,15 @@ defmodule Helix.Process.Internal.Process do alias Helix.Process.Model.Process alias Helix.Process.Repo + @spec create(Process.creation_params) :: + {:ok, Process.t} + | {:error, Process.changeset} + def create(params) do + params + |> Process.create_changeset() + |> Repo.insert() + end + @spec fetch(Process.id) :: Process.t | nil @@ -14,43 +23,22 @@ defmodule Helix.Process.Internal.Process do end end - @spec get_running_processes_of_type_on_server(Server.idt, String.t) :: - [Process.t] - def get_running_processes_of_type_on_server(gateway_id, type) do - gateway_id - |> Process.Query.by_gateway() - |> Process.Query.by_type(type) - |> Process.Query.by_state(:running) - |> Repo.all() - |> Enum.map(&Process.format/1) - end - @spec get_processes_on_server(Server.idt) :: [Process.t] - def get_processes_on_server(gateway_id) do - gateway_id - |> Process.Query.by_gateway() + def get_processes_on_server(server_id) do + server_id + |> Process.Query.on_server() |> Repo.all() |> Enum.map(&Process.format/1) end - @spec get_processes_targeting_server(Server.idt) :: + @spec get_running_processes_of_type_on_server(Server.idt, Process.type) :: [Process.t] - def get_processes_targeting_server(gateway_id) do - gateway_id - |> Process.Query.by_target() - |> Process.Query.not_targeting_gateway() - |> Repo.all() - |> Enum.map(&Process.format/1) - end - - @spec get_processes_of_type_targeting_server(Server.idt, String.t) :: - [Process.t] - def get_processes_of_type_targeting_server(gateway_id, type) do + def get_running_processes_of_type_on_server(gateway_id, type) do gateway_id - |> Process.Query.by_target() - |> Process.Query.not_targeting_gateway() + |> Process.Query.by_gateway() |> Process.Query.by_type(type) + |> Process.Query.by_state(:running) |> Repo.all() |> Enum.map(&Process.format/1) end @@ -64,10 +52,31 @@ defmodule Helix.Process.Internal.Process do |> Enum.map(&Process.format/1) end + def batch_update(processes) do + # TODO: Transaction + Enum.each(processes, fn process -> + Repo.update(process) + end) + end + @spec delete(Process.t) :: :ok + @doc """ + Deletes a process. + + Using `Repo.delete_all/1` is a better idea than `Repo.delete/1`, since it may + happen that TOP would attempt to delete so-called "stale" Repo structs. + + This happens when the side-effect of a process would lead to itself being + deleted. Example: When completing a BankTransferProcess, the underlying + connection will be closed. But when a ConnectionClosedEvent is emitted, any + underlying Process with such connection would also be closed. This race + condition is "harmless" in our context. + """ def delete(process) do - Repo.delete(process) + process.process_id + |> Process.Query.by_id() + |> Repo.delete_all() :ok end diff --git a/lib/process/internal/top/allocator/plan.ex b/lib/process/internal/top/allocator/plan.ex deleted file mode 100644 index d07fd826..00000000 --- a/lib/process/internal/top/allocator/plan.ex +++ /dev/null @@ -1,213 +0,0 @@ -defmodule Helix.Process.Internal.TOP.Allocator.Plan do - - alias Ecto.Changeset - alias Helix.Network.Model.Network - alias Helix.Process.Internal.TOP.ServerResources, as: ServerResourcesTOP - alias Helix.Process.Model.Process - alias Helix.Process.Model.Process.Resources - - @type process :: Process.t | %Ecto.Changeset{data: Process.t} - - @type shares_plan :: %{ - cpu: non_neg_integer, - ram: non_neg_integer, - net: %{Network.id => %{dlk: non_neg_integer, ulk: non_neg_integer}} - } - - @type plan :: %{ - current_plan: %{ - fragment: ServerResourcesTOP.t, - processes: [{process, [:cpu | :ram | :dlk | :ulk]}] - }, - next_plan: %{ - shares: shares_plan, - processes: [{process, [:cpu | :ram | :dlk | :ulk]}] - }, - acc: [process] - } - - @spec allocate([process], ServerResourcesTOP.t) :: - [Changeset.t] - | {:error, :insufficient_resources} - def allocate(processes, resources) do - processes - |> plan(resources) - |> execute() - end - - # TODO: accumulate on the "plan" the amount of allocation rounds needed and - # return those changesets inside a map containing that debug info. That way - # we can metrify (?) the TOP and possibly optimize it - @spec plan([process], ServerResourcesTOP.t) :: - {plan, ServerResourcesTOP.t} - | {:error, any} - defp plan(processes, resources) do - plan = %{ - current_plan: %{fragment: %ServerResourcesTOP{}, processes: []}, - next_plan: %{shares: %{cpu: 0, ram: 0, net: %{}}, processes: []}, - acc: [] - } - - preallocate(processes, plan, resources) - end - - # REVIEW: I was pretty tired when i wrote this. It should be refactored later - @spec preallocate([process], plan, ServerResourcesTOP.t) :: - {plan, ServerResourcesTOP.t} - | {:error, {:resources, :lack, :cpu | :ram | {:net, Network.id}}} - defp preallocate([h| t], plan, resources) do - process = Process.allocate_minimum(h) - net_id = Changeset.get_field(process, :network_id) - - # REVIEW: TODO: the return format still seems odd and this code is getting - # complicated - case ServerResourcesTOP.sub_from_process(resources, process) do - {:ok, resources} -> - case allocable_resources(process, resources) do - [] -> - plan = Map.update!(plan, :acc, &([process| &1])) - - preallocate(t, plan, resources) - res_types -> - shares = Changeset.get_field(process, :priority) - - enqueue_process = &([{process, res_types}| &1]) - update_shares = &merge_share(&1, res_types, shares, net_id) - - plan = - plan - |> update_in([:next_plan, :processes], enqueue_process) - |> update_in([:next_plan, :shares], update_shares) - - preallocate(t, plan, resources) - end - error = {:error, {:resources, :lack, _}} -> - error - end - end - - defp preallocate([], plan, r), - do: {plan, r} - - @spec allocable_resources(process, ServerResourcesTOP.t) :: - [:cpu | :ram | :dlk | :ulk] - defp allocable_resources(process, %{cpu: cpu, ram: ram, net: networks}) do - net_id = Changeset.get_field(process, :network_id) - net = Map.get(networks, net_id, [dlk: 0, ulk: 0]) - - # Returns a list of resource types that the server can't allocate for the - # process so we can reject them from the resources the process ask - # This is done so we can avoid trying to allocate resources a process can't - # receive - shouldnt = - net - |> Enum.to_list() - |> Kernel.++([cpu: cpu, ram: ram]) - |> Enum.filter(fn {_, v} -> v == 0 end) - |> Enum.map(&elem(&1, 0)) - - Process.can_allocate(process) -- shouldnt - end - - @spec execute({plan, ServerResourcesTOP.t}) :: - [Changeset.t] - @spec execute({:error, any}) :: - {:error, :insufficient_resources} - defp execute({:error, _}), - # FIXME: Return the resource that was insufficient - do: {:error, :insufficient_resources} - defp execute({plan = %{}, resources = %{}}) do - plan - |> execute_step(resources) - |> Map.fetch!(:acc) - end - - @spec execute_step(plan, ServerResourcesTOP.t) :: - plan - defp execute_step(plan = %{current_plan: %{fragment: fragment, processes: [{process, required_resources}| t]}}, resources) do - shares = Changeset.get_field(process, :priority) - net_id = Changeset.get_field(process, :network_id) - - allocate = - fragment.net - |> Map.get(net_id, %{}) - # Prepare a map with resources that the process might want - |> Enum.into(%{cpu: fragment.cpu, ram: fragment.ram}) - # Filter out those that it didn't request - |> Map.take(required_resources) - |> Enum.map(fn {k, v} -> {k, v * shares} end) - |> :maps.from_list() - - allocated_proccess = Process.allocate(process, allocate) - - allocated_before = Changeset.get_field(process, :allocated) - allocated_after = Changeset.get_field(allocated_proccess, :allocated) - - if allocated_before == allocated_after do - # Nothing was changed, so this process won't receive any more allocation - plan - |> put_in([:current_plan, :processes], t) - |> Map.update!(:acc, &([process| &1])) - |> execute_step(resources) - else - can_allocate = Process.can_allocate(allocated_proccess) - - resources_diff = Resources.sub(allocated_after, allocated_before) - resources = ServerResourcesTOP.sub_from_resources( - resources, - resources_diff, - net_id) - - enqueue_process = &([{allocated_proccess, can_allocate}| &1]) - update_shares = &merge_share(&1, can_allocate, shares, net_id) - - plan - |> put_in([:current_plan, :processes], t) - |> update_in([:next_plan, :processes], enqueue_process) - |> update_in([:next_plan, :shares], update_shares) - |> execute_step(resources) - end - end - - defp execute_step(p = %{current_plan: %{processes: []}, next_plan: %{processes: []}}, _) do - p - end - - defp execute_step(plan = %{current_plan: %{processes: []}, next_plan: next_plan}, resources) do - %{processes: processes, shares: shares} = next_plan - - # TODO: Maybe, instead of storing the shares the process asks for, just - # store the process prio and what it is requesting, then, at this step, - # filter out the requests that can't be completed because the resource - # is all used (so we can save a few computations) and use the lowest common - # denominator of the priority of the rest of processes to allocate more - # properly the total amount of resources possible - - fragment = ServerResourcesTOP.part_from_shares(resources, shares) - - # Let's simply update it to avoid forgetting we should keep the acc :joy: - plan = %{plan| - current_plan: %{processes: processes, fragment: fragment}, - next_plan: %{shares: %{cpu: 0, ram: 0, net: %{}}, processes: []} - } - - execute_step(plan, resources) - end - - @spec merge_share(shares_plan, [:cpu | :ram | :dlk | :ulk], non_neg_integer, Network.id) :: shares_plan - defp merge_share(shares, requested_resources, priority, net_id) do - Enum.reduce(requested_resources, shares, fn - :cpu, acc = %{cpu: cpu} -> - %{acc| cpu: cpu + priority} - :ram, acc = %{ram: ram} -> - %{acc| ram: ram + priority} - link, acc = %{net: networks} when link in [:ulk, :dlk] -> - updated_networks = - networks - |> Map.put_new(net_id, %{dlk: 0, ulk: 0}) - |> update_in([net_id, link], &(&1 + priority)) - - %{acc| net: updated_networks} - end) - end -end diff --git a/lib/process/internal/top/server_resources.ex b/lib/process/internal/top/server_resources.ex deleted file mode 100644 index 465cc10b..00000000 --- a/lib/process/internal/top/server_resources.ex +++ /dev/null @@ -1,293 +0,0 @@ -defmodule Helix.Process.Internal.TOP.ServerResources do - - alias Ecto.Changeset - alias Helix.Network.Model.Network - alias Helix.Process.Model.Process - alias Helix.Process.Model.Process.Resources - - defstruct [cpu: 0, ram: 0, net: %{}] - - @type t :: %__MODULE__{ - cpu: non_neg_integer, - ram: non_neg_integer, - net: %{Network.id => %{dlk: non_neg_integer, ulk: non_neg_integer}} - } - - @type shares :: %{ - cpu: non_neg_integer, - ram: non_neg_integer, - net: %{Network.id => %{dlk: non_neg_integer, ulk: non_neg_integer}} - } - - # TODO: FIXME: change symbols and fun names to things that make sense - - @spec cast(map) :: t - def cast(params) do - server_resources = struct(__MODULE__, params) - - # FIXME: This is hard to read, it basicaly ensures that all networks are - # just maps %{dlk: term, ulk: term} - networks = - server_resources.net - |> Enum.map(fn - {k, v = %{dlk: _, ulk: _}} when map_size(v) == 2 -> - {Network.ID.cast!(k), v} - {k, v = %{}} -> - value = Map.merge(%{dlk: 0, ulk: 0}, Map.take(v, [:dlk, :ulk])) - {Network.ID.cast!(k), value} - end) - |> :maps.from_list() - - %{server_resources| net: networks} - end - - @spec replace_network_if_exists(t, Network.id, non_neg_integer, non_neg_integer) :: - t - def replace_network_if_exists(server_resources = %__MODULE__{}, net_id, dlk, ulk) when is_integer(dlk) and is_integer(ulk) do - case server_resources.net do - %{^net_id => _} -> - updated_net = Map.put( - server_resources.net, - net_id, - %{dlk: dlk, ulk: ulk}) - - %{server_resources| net: updated_net} - _ -> - server_resources - end - end - - @spec update_network_if_exists(t, Network.id, ((map) -> map)) :: - t - def update_network_if_exists(server_resources = %__MODULE__{}, net_id, fun) do - case server_resources.net do - %{^net_id => value} -> - case fun.(value) do - # This is to ensure that the returned value complies with our contract - # otherwise the error could happen later on the pipeline and just make - # it harder to debug why it happened - value = %{dlk: dlk, ulk: ulk} - when map_size(value) == 2 and is_integer(dlk) and is_integer(ulk) -> - updated_net = Map.put(server_resources.net, net_id, value) - %{server_resources| net: updated_net} - end - _ -> - server_resources - end - end - - @spec sub_from_process(t, Process.t | Changeset.t) :: - {:ok, t} - | {:error, {:resources, :lack, :cpu | :ram}} - | {:error, {:resources, :lack, {:net, :dlk | :ulk, Network.id}}} - def sub_from_process(server_resources = %__MODULE__{cpu: cpu, ram: ram, net: networks}, process) do - process = Changeset.change(process) - net_id = Changeset.get_field(process, :network_id) - - # If network doesn't exists and the process doesn't require network alloc, - # it'll be returned as 0, and it's not a problem :) - # If the network doesn't exists but the process requires it, the values - # will be obviously negative - rest = - networks - |> Map.get(net_id, %{}) - |> Map.merge(%{cpu: cpu, ram: ram}) - |> Resources.cast() - |> Resources.sub(Changeset.get_field(process, :allocated)) - |> Map.take([:cpu, :ram, :dlk, :ulk]) - - negative_resource = Enum.find(rest, fn {_, v} -> v < 0 end) - case negative_resource do - nil -> - server_resources = replace_network_if_exists( - %{server_resources| cpu: rest.cpu, ram: rest.ram}, - net_id, - rest.dlk, - rest.ulk) - {:ok, server_resources} - {:cpu, _} -> - {:error, {:resources, :lack, :cpu}} - {:ram, _} -> - {:error, {:resources, :lack, :ram}} - {resource_kind, _} when resource_kind in [:dlk, :ulk] -> - {:error, {:resources, :lack, {:net, resource_kind, net_id}}} - end - end - - @spec sub_from_resources(t, Resources.t, Network.id) :: - t - @doc """ - Subtracts `resources` from `server_resources`. - - If network `net_id` exists for server resources `server_resources`, subtracts `resources`'s dlk and - ulk from it - """ - def sub_from_resources(server_resources = %__MODULE__{}, resources = %Resources{}, net_id) do - %{server_resources| - cpu: server_resources.cpu - resources.cpu, - ram: server_resources.ram - resources.ram, - net: case server_resources.net do - networks = %{^net_id => %{dlk: dlk, ulk: ulk}} -> - val = %{dlk: dlk - resources.dlk, ulk: ulk - resources.ulk} - Map.put(networks, net_id, val) - networks -> - networks - end - } - end - - @spec part_from_shares(t, shares) :: - t - @doc """ - Divides `x` by `shares` including dropping networks not requested by `shares` - """ - def part_from_shares(server_resources = %__MODULE__{}, shares) do - %__MODULE__{ - cpu: shares.cpu > 0 && div(server_resources.cpu, shares.cpu) || 0, - ram: shares.ram > 0 && div(server_resources.ram, shares.ram) || 0, - net: - server_resources.net - |> Map.take(Map.keys(shares.net)) - |> Enum.map(fn {net_id, %{dlk: dlk, ulk: ulk}} -> - network_share = shares.net[net_id] - divide_dlk_by = network_share.dlk - divide_ulk_by = network_share.ulk - - updated_value = %{ - dlk: divide_dlk_by > 0 && div(dlk, divide_dlk_by) || 0, - ulk: divide_ulk_by > 0 && div(ulk, divide_ulk_by) || 0 - } - - {net_id, updated_value} - end) - |> :maps.from_list() - } - end - - def sum_process(server_resources, process) do - {allocated, network_id} = case process do - %Process{} -> - allocated = process.allocated - network_id = process.network_id - - {allocated, network_id} - %Changeset{} -> - allocated = Changeset.get_field(process, :allocated) - network_id = Changeset.get_field(process, :network_id) - - {allocated, network_id} - end - - do_sum_process(server_resources, allocated, network_id) - end - - defp do_sum_process(server_resources, allocated, network_id) do - net = if network_id do - net_alloc = Map.take(allocated, [:ulk, :dlk]) - if %{ulk: 0, dlk: 0} == net_alloc do - server_resources.net - else - sum_net_alloc = &Map.merge(&1, net_alloc, fn _, v1, v2 -> v1 + v2 end) - Map.update(server_resources.net, network_id, net_alloc, sum_net_alloc) - end - else - server_resources.net - end - - %{ - server_resources| - cpu: server_resources.cpu + allocated.cpu, - ram: server_resources.ram + allocated.ram, - net: net - } - end - - @spec sum(t, t) :: - t - def sum(resources_a, resources_b) do - net = Map.merge(resources_a.net, resources_b.net, fn - _k, v1, v2 -> - %{ - dlk: v1.dlk + v2.dlk, - ulk: v1.ulk + v2.ulk - } - end) - - %__MODULE__{ - cpu: resources_a.cpu + resources_b.cpu, - ram: resources_a.cpu + resources_b.cpu, - net: net - } - end - - @spec sub(t, t) :: - t - def sub(resources_a, resources_b) do - net = Map.merge(resources_a.net, resources_b.net, fn - _k, v1, v2 -> - %{ - dlk: v1.dlk - v2.dlk, - ulk: v1.ulk - v2.ulk - } - end) - - %__MODULE__{ - cpu: resources_a.cpu - resources_b.cpu, - ram: resources_a.cpu - resources_b.cpu, - net: net - } - end - - @spec negatives(t) :: - list - def negatives(resources) do - cpu = resources.cpu < 0 && [{:cpu, resources.cpu * -1}] || [] - ram = resources.ram < 0 && [{:ram, resources.ram * -1}] || [] - networks = Enum.reduce(resources.net, [], fn - {net_id, values}, acc -> - negative_net_res = - values - |> Enum.filter(fn {_, v} -> v < 0 end) - |> Enum.map(fn {k, v} -> {k, net_id, v * -1} end) - - negative_net_res ++ acc - end) - - cpu ++ ram ++ networks - end - - @spec exceeds?(t, t) :: - boolean - @doc """ - Checks if any resource on `a` exceeds a resource on `b` - - In practice, seeing `a` and `b` as sets, this function will check if `a` is - **not** a subset of `b` - - ## Examples - - iex> a = %{cpu: 10, ram: 10, net: %{"a::b" => %{ulk: 10, dlk: 10}}} - %{} - iex> b = %{cpu: 10, ram: 10, net: %{"a::b" => %{ulk: 10, dlk: 10}, "c::d" => %{ulk: 10, dlk: 10}}} - %{} - iex> exceeds?(a, b) - false - iex> exceeds?(%{a| cpu: 20}, b) - true - iex> exceeds?(%{a| net: %{"ffff::ffff" => %{ulk: 1, dlk: 1}}}, b) - true - """ - def exceeds?(a, b) do - a.cpu > b.cpu - or a.ram > b.ram - or not MapSet.subset?(MapSet.new(Map.keys(a)), MapSet.new(Map.keys(b))) - or Enum.any?(a.net, fn {net_id, a} -> - case b.net[net_id] do - nil -> - true - b -> - a.ulk > b.ulk or a.dlk > b.dlk - end - end) - end -end diff --git a/lib/process/model/process.ex b/lib/process/model/process.ex index 5f00952e..dff95291 100644 --- a/lib/process/model/process.ex +++ b/lib/process/model/process.ex @@ -1,4 +1,13 @@ defmodule Helix.Process.Model.Process do + @moduledoc """ + The Process model is responsible for persisting all in-game processes. + + Compared to other models within the Process service, this model is quite + simple and straightforward. Other than the usual model responsibilities (like + ensuring the data is stored correctly and providing ways to query the data), + it plays a major role when formatting the process before giving it back to + whoever asked for it. + """ use Ecto.Schema use HELL.ID, field: :process_id, meta: [0x0021] @@ -6,557 +15,449 @@ defmodule Helix.Process.Model.Process do import Ecto.Changeset alias Ecto.Changeset + alias HELL.Constant + alias HELL.MapUtils + alias HELL.NaiveStruct alias Helix.Entity.Model.Entity alias Helix.Network.Model.Connection alias Helix.Network.Model.Network alias Helix.Server.Model.Server alias Helix.Software.Model.File - alias Helix.Process.Model.Process.Limitations - alias Helix.Process.Model.Process.MapServerToProcess - alias Helix.Process.Model.Process.NaiveStruct - alias Helix.Process.Model.Process.Resources alias Helix.Process.Model.Processable - alias Helix.Process.Model.Process.State - - @type type :: String.t - - @type t :: %__MODULE__{ - process_id: id, - gateway_id: Server.id, - source_entity_id: Entity.id, - target_server_id: Server.id, - file_id: File.id | nil, - network_id: Network.id | nil, - connection_id: Connection.id | nil, - process_data: Processable.t, - process_type: type, - state: State.state, - limitations: Limitations.t, - objective: Resources.t, - processed: Resources.t, - allocated: Resources.t, - priority: 0..5, - minimum: map, - creation_time: DateTime.t, - updated_time: DateTime.t, - estimated_time: DateTime.t | nil - } + alias Helix.Process.Model.TOP + alias __MODULE__, as: Process + + @type t :: + %__MODULE__{ + process_id: id, + gateway_id: Server.id, + source_entity_id: Entity.id, + target_id: Server.id, + file_id: File.id | nil, + connection_id: Connection.id | nil, + network_id: Network.id | nil, + data: term, + type: type, + priority: term, + l_allocated: Process.Resources.t | nil, + r_allocated: Process.Resources.t | nil, + r_limit: limit, + l_limit: limit, + l_reserved: Process.Resources.t, + r_reserved: Process.Resources.t, + last_checkpoint_time: DateTime.t, + static: static, + l_dynamic: dynamic, + r_dynamic: dynamic, + local?: boolean | nil, + next_allocation: Process.Resources.t | nil, + state: state | nil, + creation_time: DateTime.t, + time_left: non_neg_integer | nil, + completion_date: DateTime.t | nil + } - @type process :: %__MODULE__{} | %Ecto.Changeset{data: %__MODULE__{}} - - @type create_params :: %{ - :gateway_id => Server.idtb, - :source_entity_id => Entity.idtb, - :target_server_id => Server.idtb, - :process_data => Processable.t, - :process_type => String.t, - optional(:file_id) => File.idtb, - optional(:network_id) => Network.idtb, - optional(:connection_id) => Connection.idtb, - optional(:objective) => map + @typep limit :: Process.Resources.t | %{} + + @type type :: + :file_upload + | :file_download + | :cracker_bruteforce + | :cracker_overflow + + @type signal :: + :SIGTERM + | :SIGKILL + | :SIGSTOP + | :SIGCONT + | :SIGPRIO + | :SIGCONND + | :SIGFILED + + @type signal_params :: + %{reason: kill_reason} + | %{priority: term} + | %{connection: Connection.t} + | %{file: File.t} + + @type kill_reason :: + :completed + | :killed + + @type changeset :: %Changeset{data: %__MODULE__{}} + + @type creation_params :: %{ + :gateway_id => Server.id, + :source_entity_id => Entity.id, + :target_id => Server.id, + :data => Processable.t, + :type => type, + :network_id => Network.id | nil, + :file_id => File.id | nil, + :connection_id => Connection.id | nil, + :objective => map, + :l_dynamic => dynamic, + :r_dynamic => dynamic, + :static => static, } - @type update_params :: %{ - optional(:state) => State.state, - optional(:priority) => 0..5, - optional(:creation_time) => DateTime.t, - optional(:updated_time) => DateTime.t, - optional(:estimated_time) => DateTime.t | nil, - optional(:limitations) => map, - optional(:objective) => map, - optional(:processed) => map, - optional(:allocated) => map, - optional(:minimum) => map, - optional(:process_data) => Processable.t - } + @creation_fields [ + :gateway_id, + :source_entity_id, + :target_id, + :file_id, + :network_id, + :connection_id, + :data, + :type, + :objective, + :static, + :l_dynamic, + :r_dynamic, + :l_limit, + :r_limit + ] + + @required_fields [ + :gateway_id, + :source_entity_id, + :target_id, + :data, + :type, + :objective, + :static, + :priority, + :l_dynamic, + :priority + ] + + @type state :: + :waiting_allocation + | :running + | :paused + + @type time_left :: float + + @type resource :: + :cpu + | :ram + | :dlk + | :ulk + + @type dynamic :: [resource] + + @type static :: + %{ + paused: static_usage, + running: static_usage + } - @creation_fields ~w/ - process_data - process_type - gateway_id - source_entity_id - target_server_id - file_id - network_id - connection_id/a - @update_fields ~w/state priority updated_time estimated_time minimum/a - - @required_fields ~w/ - gateway_id - target_server_id - process_data - process_type/a + @typep static_usage :: + %{ + cpu: non_neg_integer, + ram: non_neg_integer, + dlk: non_neg_integer, + ulk: non_neg_integer + } + # Similar to `task_struct` on `sched.h` ;-) + @primary_key false schema "processes" do field :process_id, ID, primary_key: true + ### Identifiers + # The gateway that started the process field :gateway_id, Server.ID + # The entity that started the process field :source_entity_id, Entity.ID + # The server where the target object of this process action is - field :target_server_id, Server.ID - # Which file (if any) contains the "executable" of this process + field :target_id, Server.ID + + ### Custom keys + + # Which file (if any) is the relevant target of this process field :file_id, File.ID - # Which network is this process bound to (if any) + + # Which network (if any) is this process bound to field :network_id, Network.ID - # Which connection is the transport method for this process (if any). - # Obviously if the connection is closed, the process will be killed. In the - # future it might make sense to have processes that might survive after a - # connection shutdown but right now, it's a kill + + # Which connection (if any) is the transport method for this process field :connection_id, Connection.ID - # Data that is used by the specific implementation of the process - # side-effects - field :process_data, NaiveStruct - - # The type of process that defines this process behaviour. - # This field might sound redundant when `:process_data` is a struct that - # might allow us to infer the type of process, but this field is included to - # allow filtering by process_type (and even blocking more than one process - # of certain process_type from running on a server) from the db - field :process_type, :string - - # Which state in the process FSM the process is currently on - field :state, State, - default: :running - # What is the process priority on the Table of Processes (only affects - # dynamic allocation) + ### Helix.Process required data + + # Data used by the specific implementation for side-effects generation + field :data, NaiveStruct + + # The process type identifier + field :type, Constant + + # Process priority. field :priority, :integer, default: 3 - embeds_one :objective, Resources, - on_replace: :delete - embeds_one :processed, Resources, - on_replace: :delete - embeds_one :allocated, Resources, - on_replace: :delete - embeds_one :limitations, Limitations, - on_replace: :delete - - # The minimum amount of resources this process requires (aka the static - # amount of resources this process uses) - field :minimum, :map, - default: %{}, - virtual: true + ### Resource usage information - field :creation_time, :utc_datetime - field :updated_time, :utc_datetime - field :estimated_time, :utc_datetime, + # Amount of resources required in order to consider the process completed. + field :objective, :map + + # Amount of resources that this process has already processed. + field :processed, :map + + # Amount of resources that this process has allocated to it + field :l_allocated, :map, + virtual: true + field :r_allocated, :map, virtual: true - # Pretend this doesn't exists. This is included on the vschema solely to - # ensure with ease that those entries will be inserted in the same - # transaction but only after the process is inserted - has_many :server_to_process_map, MapServerToProcess, - foreign_key: :process_id, - references: :process_id - end + # Limitations + field :r_limit, :map, + default: %{} + field :l_limit, :map, + default: %{} - @spec create_changeset(create_params) :: - Changeset.t - def create_changeset(params) do - now = DateTime.utc_now() + field :l_reserved, :map, + default: %{} + field :r_reserved, :map, + default: %{} - %__MODULE__{} - |> cast(params, @creation_fields) - |> put_change(:creation_time, now) - |> put_change(:updated_time, now) - |> validate_change(:process_data, fn :process_data, value -> - # Only accepts as input structs that implement protocol Processable to - # ensure that they will be properly processed - if Processable.impl_for(value), - do: [], - else: [process_data: "invalid value"] - end) - |> put_defaults() - |> changeset(params) - |> server_to_process_map() - |> Map.put(:action, :insert) - end + # Date when the process was last simulated during a `TOPAction.recalque/2` + field :last_checkpoint_time, :utc_datetime - @spec put_defaults(Changeset.t) :: - Changeset.t - defp put_defaults(changeset) do - cs = - get_change(changeset, :limitations) - && changeset - || put_embed(changeset, :limitations, %{}) - - cs - |> put_embed(:processed, %{}) - |> put_embed(:allocated, %{}) - end + # Static amount of resources used by the process + field :static, :map - @spec update_changeset(process, update_params) :: - Changeset.t - def update_changeset(process, params) do - process - |> cast(params, @update_fields) - |> cast_embed(:processed) - |> cast_embed(:allocated) - |> cast_embed(:limitations) - |> validate_process_data(params) - |> changeset(params) - |> Map.put(:action, :update) - end + # List of dynamically allocated resources + field :l_dynamic, {:array, Constant} + field :r_dynamic, {:array, Constant}, + default: [] - defp validate_process_data(changeset, params) do - process_data = get_field(changeset, :process_data) + ### Metadata - changeset - |> cast(params, [:process_data]) - |> validate_change(:process_data, fn :process_data, new_data -> - if process_data.__struct__ == new_data.__struct__, - do: [], - else: [process_data: "type changed"] - end) - end + # Used internally by Allocator to identify whether the process is local (it + # was started on this server) or remote (started somewhere else, and targets + # the current server). + field :local?, :boolean, + virtual: true - @spec changeset(process, map) :: - Changeset.t - @doc false - def changeset(struct, params) do - struct - |> cast(params, [:updated_time]) - |> cast_embed(:objective) - |> validate_required(@required_fields) - |> validate_inclusion(:priority, 0..5) - end + # Used by the Scheduler to accurately forecast the process, taking into + # consideration both the current allocation (`allocated`) and the next + # allocation, as defined by the Allocator. + field :next_allocation, :map, + virtual: true - @spec load_virtual_data(t) :: - t - @doc """ - Updates `minimum` and `estimated_time` into the process - """ - def load_virtual_data(process) do - minimum = Processable.minimum(process.process_data) + # Process state (`:running`, `:stopped`). Used internally for an easier + # abstraction over `priority` (which is used to define the process state) + field :state, Constant, + virtual: true - process - |> estimate_conclusion() - |> Map.put(:minimum, minimum) + field :creation_time, :utc_datetime + + # Estimated date of completion + field :time_left, :float, + virtual: true + + field :completion_date, :utc_datetime, + virtual: true + end + + @spec create_changeset(creation_params) :: + changeset + def create_changeset(params) do + %__MODULE__{} + |> cast(params, @creation_fields) + |> validate_required(@required_fields) + |> put_defaults() end - @spec format(t) :: + @spec format(raw_process :: t) :: t @doc """ Converts the retrieved process from the Database into TOP's internal format. - Notably, it: + - Adds virtual data (derived data not stored on DB). - Converts the Processable (defined at `process_data`) into Helix internal - format, by using the `after_read_hook/1` implemented by each Processable + format, by using the `after_read_hook/1` implemented by each Processable + - Converts all resources (objective, limit, reserved etc) into Helix format. + - Infers the actual process usage, based on what was reserved for it. + - Estimates the completion date and time left """ - def format(process) do - data = Processable.after_read_hook(process.process_data) + def format(process = %Process{}) do + formatted_data = Processable.after_read_hook(process.data) process - |> load_virtual_data() - |> Map.replace(:process_data, data) - end - - @spec complete?(process) :: boolean - def complete?(process = %Ecto.Changeset{}), - do: complete?(apply_changes(process)) - def complete?(p = %__MODULE__{}), - do: p.state == :complete or (p.objective && p.objective == p.processed) - - @spec kill(process, atom) :: - {[process] | process, [struct]} - def kill(process = %__MODULE__{}, reason), - do: Processable.kill(process.process_data, process, reason) - def kill(process = %Ecto.Changeset{}, reason), - do: Processable.kill(get_change(process, :process_data), process, reason) - - @spec allocate_minimum(process) :: - Changeset.t - def allocate_minimum(process) do - process = change(process) - - minimum = - process - |> get_field(:minimum) - |> Map.get(get_field(process, :state), %{}) - - update_changeset(process, %{allocated: minimum}) + |> Map.replace(:state, get_state(process)) + |> Map.replace(:data, formatted_data) + |> format_resources() + |> infer_usage() + |> estimate_duration() end - @spec allocate(process, Resources.resourceable) :: - Changeset.t - def allocate(process, amount) do - cs = change(process) - - # The amount we want to allocate - allocable = - cs - |> get_field(:allocated) - |> Resources.sum(amount) - - allocated = - cs - |> get_field(:limitations) - |> Limitations.to_list() - |> Enum.reduce(allocable, fn - {_, nil}, acc -> - acc - {field, value}, acc -> - # If there is a limit to certain resource, don't allow the allocation - # to exceed that limit - Map.update!(acc, field, &min(&1, value)) - end) - |> Map.from_struct() - - update_changeset(cs, %{allocated: allocated}) - end + @spec get_dynamic(t) :: + [resource] + def get_dynamic(%{local?: true, l_dynamic: dynamic}), + do: dynamic + def get_dynamic(%{local?: false, r_dynamic: dynamic}), + do: dynamic + + @spec get_limit(t) :: + limit + def get_limit(%{local?: true, l_limit: limit}), + do: limit + def get_limit(%{local?: false, r_limit: limit}), + do: limit + + @spec get_last_update(t) :: + DateTime.t + def get_last_update(p = %{last_checkpoint_time: nil}), + do: p.creation_time + def get_last_update(%{last_checkpoint_time: last_checkpoint_time}), + do: last_checkpoint_time + + @spec infer_usage(t) :: + t + def infer_usage(process) do + l_alloc = Process.Resources.min(process.l_limit, process.l_reserved) + r_alloc = Process.Resources.min(process.r_limit, process.r_reserved) + + {l_alloc, r_alloc} = + # Assumes that, if remote allocation was not defined, then the process is + # oblivious to the remote server's resources + if r_alloc == %{} do + {l_alloc, Process.Resources.initial()} + + # On the other hand, if there are remote allocations/limitations, we'll + # immediately mirror its resources and potentially limit the local + # allocation + else + mirrored_transfer_resources = + r_alloc + |> Process.Resources.mirror() + |> Map.drop([:cpu, :ram]) - @spec allocation_shares(process) :: - non_neg_integer - def allocation_shares(process) do - case process do - %__MODULE__{state: state, priority: priority} - when state in [:standby, :running] -> - can_allocate?(process) - && priority - || 0 - %__MODULE__{} -> - 0 - %Ecto.Changeset{} -> - process - |> apply_changes() - |> allocation_shares() - end - end + l_alloc = Process.Resources.min(mirrored_transfer_resources, l_alloc) - @spec pause(process) :: - {[t | Ecto.Changeset.t] | t | Ecto.Changeset.t, [struct]} - def pause(process) do - changeset = change(process) - state = get_field(changeset, :state) + {l_alloc, r_alloc} + end - if :paused == state do - {changeset, []} - else - changeset = - changeset - |> calculate_work(DateTime.utc_now()) - |> update_changeset(%{state: :paused, estimated_time: nil}) - |> allocate_minimum() - - changeset - |> get_field(:process_data) - |> Processable.state_change(changeset, state, :paused) - end + %{process| + l_allocated: l_alloc |> Process.Resources.prepare(), + r_allocated: r_alloc |> Process.Resources.prepare() + } end - @spec resume(process) :: - {[t | Ecto.Changeset.t] | t | Ecto.Changeset.t, [struct]} - def resume(process) do - changeset = change(process) - state = get_field(changeset, :state) - - if :paused == state do - # FIXME: state can be "standby" on some cases - changeset = - changeset - |> update_changeset(%{ - state: :running, - updated_time: DateTime.utc_now()}) - |> allocate_minimum() - |> estimate_conclusion() - - changeset - |> get_field(:process_data) - |> Processable.state_change(changeset, state, :running) - else - changeset - end - end + @spec estimate_duration(t) :: + t + defp estimate_duration(process = %Process{}) do + {_, time_left} = TOP.Scheduler.estimate_completion(process) - @spec calculate_work(elem, DateTime.t) :: - elem when elem: process - def calculate_work(p = %__MODULE__{}, now) do - p - |> change() - |> calculate_work(now) - |> apply_changes() - end + completion_date = + if time_left == :infinity do + nil + else + previous_update = get_last_update(process) - def calculate_work(process, now) do - if :running == get_field(process, :state) do - diff = - process - |> get_field(:updated_time) - |> diff_in_seconds(now) + ms_left = + time_left + |> Kernel.*(1000) # From second to millisecond + |> trunc() - processed = calculate_processed(process, diff) + previous_update + |> DateTime.to_unix(:millisecond) + |> Kernel.+(ms_left) + |> DateTime.from_unix!(:millisecond) + end - update_changeset(process, %{updated_time: now, processed: processed}) - else - process - end + process + |> Map.replace(:completion_date, completion_date) + |> Map.replace(:time_left, time_left) end - @spec estimate_conclusion(elem) :: - elem when elem: process - def estimate_conclusion(process = %__MODULE__{}) do + @spec format_resources(t) :: + t + defp format_resources(process = %Process{}) do process - |> change() - |> estimate_conclusion() - |> apply_changes() + |> format_objective() + |> format_processed() + |> format_static() + |> format_limits() + |> format_reserved() end - def estimate_conclusion(process) do - objective = get_field(process, :objective) - processed = get_field(process, :processed) - allocated = get_field(process, :allocated) - - conclusion = - if objective do - ttl = - objective - |> Resources.sub(processed) - |> Resources.div(allocated) - |> Resources.to_list() - # Returns a list of "seconds to fulfill resource" - |> Enum.filter(fn {_, x} -> x != 0 end) - |> Enum.map(&elem(&1, 1)) - |> Enum.reduce(0, &max/2) - - case ttl do - x when not x in [0, nil] -> - process - |> get_field(:updated_time) - |> Timex.shift(seconds: x) - _ -> - # Exceptional case when all resources are "0" (ie: nothing to do) - # Also includes the case of when a certain resource will never be - # completed - nil - end - else - nil - end + @spec format_objective(t) :: + t + defp format_objective(p = %{objective: objective}), + do: %{p| objective: Process.Resources.format(objective)} - update_changeset(process, %{estimated_time: conclusion}) - end + @spec format_processed(t) :: + t + defp format_processed(p = %{processed: nil}), + do: p + defp format_processed(p = %{processed: processed}), + do: %{p| processed: Process.Resources.format(processed)} - @spec seconds_to_change(process) :: - non_neg_integer - | :infinity - @doc """ - How many seconds until the `process` change state or frees some resource from - completing part of it's objective - """ - def seconds_to_change(p = %Changeset{}), - do: seconds_to_change(apply_changes(p)) - def seconds_to_change(%{objective: nil}), - do: :infinity - def seconds_to_change(process) do - process.objective - |> Resources.sub(process.processed) - |> Resources.div(process.allocated) - |> Resources.to_list() - |> Keyword.values() - |> Enum.filter(&(is_integer(&1) and &1 > 0)) - |> Enum.reduce(:infinity, &min/2) # Note that atom > int - end + @spec format_static(t) :: + t + defp format_static(p = %{static: static}) do + static = MapUtils.atomize_keys(static) - @spec can_allocate?(process, res | [res]) :: - boolean when res: (:cpu | :ram | :dlk | :ulk) - @doc """ - Checks if the `process` can allocate any of the specified `resources` - """ - def can_allocate?(process, resources \\ [:cpu, :ram, :dlk, :ulk]) do - resources = List.wrap(resources) - Enum.any?(can_allocate(process), &(&1 in resources)) + %{p| static: static} end - # TODO: rename this - @spec can_allocate(process) :: - [:cpu | :ram | :dlk | :ulk] - @doc """ - Returns a list with all resources that the `process` can allocate - """ - def can_allocate(process = %Changeset{}), - do: can_allocate(apply_changes(process)) - def can_allocate(process = %__MODULE__{}) do - dynamic_resources = Processable.dynamic_resources(process.process_data) - - allowed = - case process.objective do - nil -> - [] - objective -> - remaining = Resources.sub(objective, process.processed) - - Enum.filter(dynamic_resources, fn resource -> - remaining = Map.get(remaining, resource) - is_integer(remaining) and remaining > 0 - end) - end - - dynamic_resources - |> Enum.filter(fn resource -> - # Note that this is `nil` unless a value is specified. - # Also note that nil is greater than any integer :) - limitations = Map.get(process.limitations, resource) - allocated = Map.get(process.allocated, resource) - - resource in allowed and limitations > allocated - end) + @spec format_limits(t) :: + t + defp format_limits(p) do + l_limit = + p.l_limit + |> Process.Resources.format() + |> Process.Resources.reject_empty() + + r_limit = + p.r_limit + |> Process.Resources.format() + |> Process.Resources.reject_empty() + + %{p| + l_limit: l_limit, + r_limit: r_limit + } end - @spec server_to_process_map(Changeset.t) :: - Changeset.t - defp server_to_process_map(changeset) do - process_type = get_field(changeset, :process_type) - - params1 = %{ - server_id: get_field(changeset, :gateway_id), - process_type: process_type - } - params2 = %{ - server_id: get_field(changeset, :target_server_id), - process_type: process_type + @spec format_reserved(t) :: + t + defp format_reserved(p) do + %{p| + l_reserved: Process.Resources.format(p.l_reserved), + r_reserved: Process.Resources.format(p.r_reserved) } + end - # Should both records be identical, dedup will remove one of them - records = Enum.dedup([params1, params2]) - - put_assoc(changeset, :server_to_process_map, records) + @spec get_state(t) :: + state + defp get_state(%{priority: 0}), + do: :paused + defp get_state(process) do + if process.l_reserved == %{} do + :waiting_allocation + else + :running + end end - @spec diff_in_seconds(DateTime.t, DateTime.t) :: - non_neg_integer - | nil - # Returns the difference in seconds from `start` to `finish`. - # This assumes that both the inputs are using UTC. - defp diff_in_seconds(%DateTime{}, nil), - do: nil - defp diff_in_seconds(start = %DateTime{}, finish = %DateTime{}), - do: Timex.diff(finish, start, :seconds) - - @spec calculate_processed(process, non_neg_integer) :: - Resources.t - # Returns the value of resources processed by `process` after adding the - # amount processed in `seconds_passed` - defp calculate_processed(process, seconds_passed) do - cs = change(process) - - diff = - cs - |> get_field(:allocated) - |> Resources.mul(seconds_passed) - - cs - |> get_field(:processed) - |> Resources.sum(diff) - |> Resources.min(get_field(cs, :objective)) - |> Map.from_struct() + @spec put_defaults(changeset) :: + changeset + defp put_defaults(changeset) do + changeset + |> put_change(:creation_time, DateTime.utc_now()) end defmodule Query do + import Ecto.Query alias Ecto.Queryable @@ -565,23 +466,24 @@ defmodule Helix.Process.Model.Process do alias Helix.Network.Model.Network alias Helix.Server.Model.Server alias Helix.Process.Model.Process - alias Helix.Process.Model.Process.MapServerToProcess - alias Helix.Process.Model.Process.State @spec by_id(Queryable.t, Process.idtb) :: Queryable.t def by_id(query \\ Process, id), do: where(query, [p], p.process_id == ^id) - @spec from_type_list(Queryable.t, [String.t]) :: + @spec from_type_list(Queryable.t, [Process.type]) :: Queryable.t def from_type_list(query \\ Process, type_list), - do: where(query, [p], p.process_type in ^type_list) + do: where(query, [p], p.type in ^type_list) - @spec from_state_list(Queryable.t, [State.state]) :: + @spec on_server(Queryable.t, Server.idt) :: Queryable.t - def from_state_list(query \\ Process, state_list), - do: where(query, [p], p.state in ^state_list) + def on_server(query \\ Process, server_id) do + query + |> where([p], p.gateway_id == ^server_id) + |> or_where([p], p.target_id == ^server_id) + end @spec by_gateway(Queryable.t, Server.idtb) :: Queryable.t @@ -591,7 +493,7 @@ defmodule Helix.Process.Model.Process do @spec by_target(Queryable.t, Server.idtb) :: Queryable.t def by_target(query \\ Process, id), - do: where(query, [p], p.target_server_id == ^id) + do: where(query, [p], p.target_id == ^id) @spec by_file(Queryable.t, File.idtb) :: Queryable.t @@ -608,34 +510,21 @@ defmodule Helix.Process.Model.Process do def by_connection(query \\ Process, id), do: where(query, [p], p.connection_id == ^id) - @spec by_type(Queryable.t, String.t) :: + @spec by_type(Queryable.t, Process.type) :: Queryable.t - def by_type(query \\ Process, process_type), - do: where(query, [p], p.process_type == ^process_type) + def by_type(query \\ Process, type), + do: where(query, [p], p.type == ^type) - @spec by_state(Queryable.t, State.state) :: + @spec by_state(Queryable.t, :running | :paused) :: Queryable.t - def by_state(query \\ Process, state), - do: where(query, [p], p.state == ^state) + def by_state(query, :running), + do: where(query, [p], p.priority > 1) + def by_state(query, :paused), + do: where(query, [p], p.priority == 0) @spec not_targeting_gateway(Queryable.t) :: Queryable.t def not_targeting_gateway(query \\ Process), - do: where(query, [p], p.gateway_id != p.target_server_id) - - @spec related_to_server(Queryable.t, Server.idtb) :: - Queryable.t - @doc """ - Filter processes that are running on `server_id` or affect it - """ - def related_to_server(query \\ Process, id) do - query - |> join( - :inner, - [p], - m in MapServerToProcess, - m.process_id == p.process_id) - |> where([p, ..., m], m.server_id == ^id) - end + do: where(query, [p], p.gateway_id != p.target_id) end end diff --git a/lib/process/model/process/limitations.ex b/lib/process/model/process/limitations.ex deleted file mode 100644 index 701922de..00000000 --- a/lib/process/model/process/limitations.ex +++ /dev/null @@ -1,37 +0,0 @@ -defmodule Helix.Process.Model.Process.Limitations do - - use Ecto.Schema - import Ecto.Changeset - - @type t :: %__MODULE__{ - cpu: non_neg_integer | nil, - ram: non_neg_integer | nil, - dlk: non_neg_integer | nil, - ulk: non_neg_integer | nil - } - - @fields ~w/cpu ram dlk ulk/a - - @primary_key false - embedded_schema do - field :cpu, :integer - field :ram, :integer - field :dlk, :integer - field :ulk, :integer - end - - def changeset(limitations, params) do - limitations - |> cast(params, [:cpu, :ram, :dlk, :ulk]) - |> validate_number(:cpu, greater_than_or_equal_to: 0) - |> validate_number(:ram, greater_than_or_equal_to: 0) - |> validate_number(:dlk, greater_than_or_equal_to: 0) - |> validate_number(:ulk, greater_than_or_equal_to: 0) - end - - def to_list(lim) do - lim - |> Map.take(@fields) - |> :maps.to_list() - end -end \ No newline at end of file diff --git a/lib/process/model/process/map_server_to_process.ex b/lib/process/model/process/map_server_to_process.ex deleted file mode 100644 index be76e852..00000000 --- a/lib/process/model/process/map_server_to_process.ex +++ /dev/null @@ -1,24 +0,0 @@ -defmodule Helix.Process.Model.Process.MapServerToProcess do - - use Ecto.Schema - - alias Helix.Server.Model.Server - alias Helix.Process.Model.Process - - import Ecto.Changeset - - @primary_key false - schema "process_servers" do - field :server_id, Server.ID, - primary_key: true - field :process_id, Process.ID, - primary_key: true - field :process_type, :string - end - - def create_changeset(params) do - %__MODULE__{} - |> cast(params, [:server_id, :process_type]) - |> validate_required([:server_id, :process_type]) - end -end diff --git a/lib/process/model/process/resources.ex b/lib/process/model/process/resources.ex index 95caef21..7d1e81e4 100644 --- a/lib/process/model/process/resources.ex +++ b/lib/process/model/process/resources.ex @@ -1,142 +1,54 @@ -defmodule Helix.Process.Model.Process.Resources do +defmodule Helix.Process.Model.Process.Resources.Utils do - use Ecto.Schema - import Ecto.Changeset + alias Helix.Network.Model.Network - @type t :: %__MODULE__{ - cpu: non_neg_integer, - ram: non_neg_integer, - dlk: non_neg_integer, - ulk: non_neg_integer - } - - @type resourceable :: t | %{ - optional(:cpu) => non_neg_integer, - optional(:ram) => non_neg_integer, - optional(:dlk) => non_neg_integer, - optional(:ulk) => non_neg_integer - } - - @fields ~w/cpu ram dlk ulk/a - - @primary_key false - embedded_schema do - field :cpu, :integer, default: 0 - field :ram, :integer, default: 0 - field :dlk, :integer, default: 0 - field :ulk, :integer, default: 0 - end - - @spec changeset(t | Ecto.Changeset.t, resourceable) :: Ecto.Changeset.t - def changeset(resource, params) do - resource - |> cast(params, [:cpu, :ram, :dlk, :ulk]) - |> validate_number(:cpu, greater_than_or_equal_to: 0) - |> validate_number(:ram, greater_than_or_equal_to: 0) - |> validate_number(:dlk, greater_than_or_equal_to: 0) - |> validate_number(:ulk, greater_than_or_equal_to: 0) - end - - @spec cast(resourceable) :: t - def cast(input) do - struct(__MODULE__, input) - end - - @spec sum(t, resourceable) :: t - def sum(res, params) do - Map.merge(res, Map.take(params, @fields), fn _, v1, v2 -> v1 + v2 end) - end - - @spec sub(t, resourceable) :: t - def sub(res, params) do - Map.merge(res, Map.take(params, @fields), fn _, v1, v2 -> v1 - v2 end) - end - - @spec div(t, t | non_neg_integer) :: t - @doc """ - Divides each resource from `x` by the value passed. - - Alternatively, another Resources struct can be passed as argument and the - value of each resource will be divided by that struct's resources. - - To simplify and avoid errors, on the case of div'ing one struct by another, if - the second value is 0 and the first is 0, 0 is set for that resource; if the - second value is 0 and the first is not, it will be set to nil. - - ## Examples - a = %Resources{cpu: 0} - b = %Resources{cpu: 0} + @spec format_network(Network.idtb, term) :: + {Network.id, term} + def format_network(key = %Network.ID{}, value), + do: {key, value} + def format_network(key, value), + do: {Network.ID.cast!(key), value} +end - div(a, b) - # %Resources{cpu: 0, ...} +import Helix.Process.Resources - c = %Resources{cpu: 100} +resources Helix.Process.Model.Process.Resources do - div(c, b) - # %Resources{cpu: nil, ...} + alias Helix.Network.Model.Network + alias Helix.Process.Model.Process.Resources.Utils, as: ResourcesUtils + alias Helix.Process.Resources.Behaviour - d = %Resources{cpu: 20} - div(c, d) - # %Resources{cpu: 5, ...} - """ - def div(dividend = %__MODULE__{}, divisor = %__MODULE__{}) do - %__MODULE__{dividend| - cpu: safe_div(dividend.cpu, divisor.cpu), - ram: safe_div(dividend.ram, divisor.ram), - dlk: safe_div(dividend.dlk, divisor.dlk), - ulk: safe_div(dividend.ulk, divisor.ulk) + @type t :: + %{ + ram: number, + cpu: number, + dlk: %{Network.id => number}, + ulk: %{Network.id => number} } - end - def div(dividend = %__MODULE__{}, divisor) when is_integer(divisor) do - %__MODULE__{dividend| - cpu: Kernel.div(dividend.cpu, divisor), - ram: Kernel.div(dividend.ram, divisor), - dlk: Kernel.div(dividend.dlk, divisor), - ulk: Kernel.div(dividend.ulk, divisor) + @type map_t(type) :: + %{ + ram: type, + cpu: type, + dlk: %{Network.id => type}, + ulk: %{Network.id => type} } - end - - @spec mul(t, non_neg_integer) :: t - def mul(res, val) do - %__MODULE__{res| - cpu: res.cpu * val, - ram: res.ram * val, - dlk: res.dlk * val, - ulk: res.ulk * val} - end - - @spec min(t | nil, t | nil) :: t | nil - def min(first = %__MODULE__{}, second = %__MODULE__{}) do - %__MODULE__{first| - cpu: Kernel.min(first.cpu, second.cpu), - ram: Kernel.min(first.ram, second.ram), - dlk: Kernel.min(first.dlk, second.dlk), - ulk: Kernel.min(first.ulk, second.ulk)} - end - def min(nil, second), - do: second - def min(first, nil), - do: first + resource RAM, + behaviour: Behaviour.Default - @spec to_list(resourceable) :: [{:cpu | :ram | :dlk | :ulk, non_neg_integer}] - def to_list(res) do - res - |> Map.take(@fields) - |> :maps.to_list() - end + resource CPU, + behaviour: Behaviour.Default - @spec compare(t, t) :: :eq | :lt | :gt | :divergent - def compare(_a, _b) do - # FIXME: this interface is pure garbage - :eq - end + resource DLK, + behaviour: Behaviour.KV, + key: :network_id, + formatter: &ResourcesUtils.format_network/2, + mirror: :ulk - defp safe_div(dividend, divisor) when is_integer(divisor) and divisor > 0, - do: trunc(Float.ceil(dividend / divisor)) - defp safe_div(0, 0), - do: 0 - defp safe_div(_, _), - do: nil + resource ULK, + behaviour: Behaviour.KV, + key: :network_id, + formatter: &ResourcesUtils.format_network/2, + mirror: :dlk end diff --git a/lib/process/model/process/state.ex b/lib/process/model/process/state.ex deleted file mode 100644 index b8855388..00000000 --- a/lib/process/model/process/state.ex +++ /dev/null @@ -1,38 +0,0 @@ -defmodule Helix.Process.Model.Process.State do - - @behaviour Ecto.Type - - @type state :: :standby | :paused | :running | :complete - - @mappings %{ - 0 => :standby, - 1 => :paused, - 2 => :running, - 3 => :complete - } - - def type, do: :integer - - for {_, v} <- @mappings do - def cast(unquote(v)), - do: {:ok, unquote(v)} - def cast(unquote(Atom.to_string(v))), - do: {:ok, unquote(v)} - end - - def cast(_), - do: :error - - for {k, v} <- @mappings do - def load(unquote(k)), - do: {:ok, unquote(v)} - end - - for {k, v} <- @mappings do - def dump(unquote(v)), - do: {:ok, unquote(k)} - end - - def dump(_), - do: :error -end \ No newline at end of file diff --git a/lib/process/model/processable.ex b/lib/process/model/processable.ex index 0434afd1..72be1a20 100644 --- a/lib/process/model/processable.ex +++ b/lib/process/model/processable.ex @@ -1,29 +1,29 @@ defprotocol Helix.Process.Model.Processable do - alias Helix.Process.Model.Process - alias Helix.Process.Model.Process.State - - @type resource :: :cpu | :ram | :dlk | :ulk + # @type resource :: :cpu | :ram | :dlk | :ulk - @spec dynamic_resources(t) :: - [resource] - def dynamic_resources(data) + alias Helix.Event + alias Helix.Network.Model.Connection + alias Helix.Process.Model.Process - @spec conclusion(t, Process.t | Ecto.Changeset.t) :: - {[Process.t | Ecto.Changeset.t] | Process.t | Ecto.Changeset.t, [struct]} - def conclusion(data, process) + @type action :: + :delete + | :pause + | :resume + | :renice + | :restart - @spec state_change(t, Ecto.Changeset.t, State.state, State.state) :: - {[Process.t | Ecto.Changeset.t] | Process.t | Ecto.Changeset.t, [struct]} - def state_change(data, process, from, to) + @spec complete(t, Process.t) :: + {action, [Event.t]} + def complete(data, process) - @spec kill(t, Process.t | Ecto.Changeset.t, atom) :: - {[Process.t | Ecto.Changeset.t] | Process.t | Ecto.Changeset.t, [struct]} + @spec kill(t, Process.t, Process.kill_reason) :: + {action, [Event.t]} def kill(data, process, reason) - @spec minimum(t) :: - %{optional(State.state) => %{resource => non_neg_integer}} - def minimum(data) + @spec connection_closed(t, Process.t, Connection.t) :: + {action, [Event.t]} + def connection_closed(data, process, connection) @spec after_read_hook(term) :: t diff --git a/lib/process/model/top/allocator.ex b/lib/process/model/top/allocator.ex new file mode 100644 index 00000000..eea6549e --- /dev/null +++ b/lib/process/model/top/allocator.ex @@ -0,0 +1,252 @@ +defmodule Helix.Process.Model.TOP.Allocator do + + alias Helix.Server.Model.Server + alias Helix.Process.Model.Process + + @type shares :: number + + @spec identify_origin(Server.id, [Process.t]) :: + [Process.t] + defp identify_origin(server_id, processes) do + Enum.map(processes, fn process -> + local? = + if process.target_id == server_id do + false + else + true + end + + %{process| local?: local?} + end) + end + + @type allocated_process :: {Process.t | term, Process.Resources.t | term} + + @type allocation_successful :: + %{ + dropped: [Process.t], + allocated: [Process.t] + } + + @type allocation_failed :: + {:error, :resources, [Process.t]} + + @type allocation_result :: + {:ok, allocation_successful} + | allocation_failed + + @spec allocate(Server.id, Process.Resources.t, [Process.t], opts :: term) :: + allocation_result + def allocate(server_id, total_resources, processes, _opts \\ []) do + # forced_allocation? = opts[:force] || false + + # Assign the process' "identity" from the TOP's point of view. It may either + # be local (gateway_id == server_id) or remote (target_id == server_id). + processes = identify_origin(server_id, processes) + + # Calculates the static resources that should be allocated to each process + {static_resources_usage, allocated_processes} = static_allocation(processes) + + # Subtracts the total server resources with the allocated resources above, + # on the `static_allocation` step. + remaining_resources = + Process.Resources.sub(total_resources, static_resources_usage) + + # Performs the dynamic allocation to every process. The result + # (`allocated_processes`) already has the full allocation ready, i.e. it + # contains both the static and the dynamic allocations. + {dynamic_resources_usage, allocated_processes} = + dynamic_allocation(remaining_resources, allocated_processes) + + # Subtracts the resources remaining (after the static allocation) with the + # newly allocated dynamic resources. + remaining_resources = + Process.Resources.sub(remaining_resources, dynamic_resources_usage) + + # Now we'll take another pass, in order to give a change for processes to + # claim unused resources. This may happen when a resource is reserved to a + # process, but the process does not allocate it due to upper limitations + {remaining_resources_usage, allocated_processes} = + remaining_allocation(remaining_resources, allocated_processes) + + # Subtract again. Now we should be very close to 100% utilization. We'll + # check to make sure it didn't overflow - if it didn't, the allocation was + # successful. + remaining_resources = + Process.Resources.sub(remaining_resources, remaining_resources_usage) + + case overflow?(remaining_resources, allocated_processes) do + # No overflow, we did it! + false -> + + # Modify the Process model to include the `next_allocation` + allocated_processes = merge_allocation(allocated_processes) + + result = + %{ + dropped: [], # TODO + allocated: allocated_processes + } + + {:ok, result} + + # Allocated more than we could handle :( + {true, heaviest} -> + {:error, :resources, heaviest} + + end + end + + @spec merge_allocation([allocated_process]) :: + [Process.t] + defp merge_allocation(allocated_processes) do + Enum.map(allocated_processes, fn {process, new_alloc} -> + %{process| next_allocation: new_alloc} + end) + end + + @spec overflow?(Process.Resources.t, [allocated_process]) :: + {true, heaviest :: [Process.t]} + | false + defp overflow?(remaining_resources, allocated_processes) do + # Checks whether any of the resources are in overflow (usage > available) + overflow? = + Process.Resources.overflow?(remaining_resources, allocated_processes) + + # The Enum below is used to detect that, in case more than one resource is + # overflowed, the corresponding `heaviest` process is accumulated. This is + # used to inform the top-level all the heaviest processes that should be + # removed (if the `force` flag was passed as argument to the Allocator). + {overflow?, heaviest} = + Enum.reduce(overflow?, {false, []}, fn {_res, result}, {status, heaviest} -> + case result do + false -> + {status, heaviest} + + {true, heavy} -> + {true, heaviest ++ [heavy]} + end + end) + + if overflow? do + uniq_heaviest = Enum.uniq_by(heaviest, &(&1.process_id)) + + {true, uniq_heaviest} + else + false + end + end + + @spec static_allocation([Process.t]) :: + {allocated :: Process.Resources.t, [allocated_process]} + def static_allocation(processes) do + initial = Process.Resources.initial() + + Enum.reduce(processes, {initial, []}, fn process, {allocated, acc} -> + + # Calculates how many resources should be allocated statically + proc_static_allocation = Process.Resources.allocate_static(process) + + # Accumulates total static resources allocated by all processes + allocated = Process.Resources.sum(allocated, proc_static_allocation) + + # This 2-tuple associates the process to its static allocation + proc_alloc_info = [{process, proc_static_allocation}] + + {allocated, acc ++ proc_alloc_info} + end) + end + + @spec dynamic_allocation(Process.Resources.t, [allocated_process]) :: + {Process.Resources.t, [allocated_process]} + def dynamic_allocation(available_resources, allocated_processes) do + initial = Process.Resources.initial() + + {total_shares, process_shares} = + Enum.reduce(allocated_processes, {initial, []}, fn allocated_process, {shares, acc} -> + {process, proc_static_allocation} = allocated_process + + # Calculates number of shares the process should receive + proc_shares = Process.Resources.get_shares(process) + + # Accumulates total shares in use by the system + shares = Process.Resources.sum(shares, proc_shares) + + # This 3-tuple represents what is the process, how many static resources + # are allocated to it, and how many (dynamic) shares it should receive + proc_share_info = [{process, proc_static_allocation, proc_shares}] + + {shares, acc ++ proc_share_info} + end) + + # Based on the total shares selected, figure out how much resource each + # share shall receive + resource_per_share = + Process.Resources.resource_per_share(available_resources, total_shares) + + Enum.reduce(process_shares, {initial, []}, fn allocated_shared_proc, {total_alloc, acc} -> + {process, proc_static_allocation, proc_shares} = allocated_shared_proc + + # Allocates dynamic resources. "Naive" because it has not taken into + # consideration the process limitations yet + naive_dynamic_alloc = + Process.Resources.allocate_dynamic( + proc_shares, resource_per_share, process + ) + + limit = Process.get_limit(process) + + # Now we take the naive allocated amount and apply the process limitations + proc_dynamic_alloc = Process.Resources.min(naive_dynamic_alloc, limit) + + # Sums static and dynamic allocation, resulting on the final allocation + proc_allocation = + Process.Resources.allocate(proc_dynamic_alloc, proc_static_allocation) + + # Accumulate total alloc, in order to know how many resources were used + total_alloc = Process.Resources.sum(total_alloc, proc_dynamic_alloc) + + {total_alloc, acc ++ [{process, proc_allocation}]} + end) + end + + @spec remaining_allocation(Process.Resources.t, [allocated_process]) :: + {Process.Resources.t, [allocated_process]} + def remaining_allocation(available_resources, allocated_processes) do + # Exclude processes that have limits + # Note that this is wrong: it's possible that a process with limits would + # benefit from a second pass, in the case that one of its resources were + # limited, but the others weren't. We can safely skip this for now since + # we don't have this scenario, but this could change in the future. + processes = + Enum.filter(allocated_processes, fn {process, _} -> + Process.get_limit(process) == %{} + end) + + skipped_processes = allocated_processes -- processes + + # Make a second pass on dynamic allocation, now only with processes that are + # not being limited. + {remaining_resources, processes} = + dynamic_allocation(available_resources, processes) + + # Reorder the processes in the same order they were passed originally + # Useful for tests, probably irrelevant to production code. Maybe macro it. + ordered_processes = + Enum.reduce(allocated_processes, [], fn allocated = {process, _}, acc -> + + if allocated in skipped_processes do + acc ++ [allocated] + else + proc = + Enum.find( + processes, fn {p, _} -> p.process_id == process.process_id end + ) + + acc ++ [proc] + end + end) + + {remaining_resources, ordered_processes} + end +end diff --git a/lib/process/model/top/scheduler.ex b/lib/process/model/top/scheduler.ex new file mode 100644 index 00000000..e264c38e --- /dev/null +++ b/lib/process/model/top/scheduler.ex @@ -0,0 +1,200 @@ +defmodule Helix.Process.Model.TOP.Scheduler do + + alias Ecto.Changeset + alias Helix.Process.Model.Process + + @type forecast :: + %{ + next: {Process.t, Process.time_left} | nil, + paused: [Process.t], + completed: [Process.t], + running: [Process.t] + } + + @spec simulate(Process.t) :: + {:completed, Process.t} + | {:running, Process.t} + | {:paused, Process.t} + def simulate(process = %{state: :paused}), + do: {:paused, process} + def simulate(process = %{state: :waiting_allocation}) do + processed = process.processed || Process.Resources.initial() + + process = + %{process| + processed: processed, + state: :running + } + + {:running, process} + end + + def simulate(process) do + # Based on the last checkpoint, figure out how long this simulation should + # run + simulation_duration = get_simulation_duration(process) + + # Create an empty resource map in case it never went through a checkpoint + processed = process.processed || Process.Resources.initial() + + # Convert allocation to millisecond + alloc = Process.Resources.map(process.l_allocated, &(&1 / 1000)) + + # Calculate how many resource units have been processed since the last + # checkpoint. This is the amount that should be added to the process. + moar_processed = + Process.Resources.map(alloc, &(&1 * simulation_duration)) + + # Sum the previous `processed` and the amount that has been processed since + # the last checkpoint + new_processed = Process.Resources.sum(processed, moar_processed) + + process = %{process| processed: new_processed} + + # If all resources in `new_processed` are equal or superior to their + # corresponding resource on the `objective`, then the process is finished. + completed? = Process.Resources.completed?(new_processed, process.objective) + + if completed? do + {:completed, process} + else + {:running, process} + end + end + + @spec forecast([Process.t]) :: + forecast + def forecast(processes) do + initial_acc = %{next: nil, paused: [], completed: [], running: []} + + processes + |> Enum.map(&estimate_completion/1) + |> Enum.reduce(initial_acc, fn {process, seconds_left}, acc -> + + case seconds_left do + # Process will never complete; (paused or hasn't got any allocation yet) + :infinity -> + %{acc| paused: acc.paused ++ [process]} + + # Process has already reached its objective; it's completed. + -1 -> + %{acc| completed: acc.completed ++ [process]} + + # Process would need to run for almost zero seconds... it's completed. + 0.0 -> + %{acc| completed: acc.completed ++ [process]} + + # Add the process to the list of running processes, and maybe select it + # to be marked as `next`, depending on whether it would be completed + # first. + seconds -> + %{acc| + running: acc.running ++ [process], + next: sort_next_completion(acc.next, {process, seconds}) + } + end + end) + end + + @spec estimate_completion(Process.t) :: + {Process.t, Process.time_left | -1 | :infinity} + def estimate_completion(process) do + process + |> simulate() + |> seconds_for_completion() + end + + @spec checkpoint(Process.t) :: + {true, Process.changeset} + | false + def checkpoint(%{l_reserved: alloc, next_allocation: alloc, local?: true}), + do: false + def checkpoint(%{r_reserved: alloc, next_allocation: alloc, local?: false}), + do: false + def checkpoint(proc = %{next_allocation: next_allocation, local?: true}) do + changeset = + proc + |> Changeset.change() + |> Changeset.put_change(:l_reserved, next_allocation) + |> Changeset.put_change(:last_checkpoint_time, DateTime.utc_now()) + + changeset = + if proc.processed == Process.Resources.initial() do + changeset + else + Changeset.force_change(changeset, :processed, proc.processed) + end + + {true, changeset} + end + def checkpoint(proc = %{next_allocation: next_allocation, local?: false}) do + {_, proc} = simulate(proc) + + changeset = + proc + |> Changeset.change() + |> Changeset.put_change(:r_reserved, next_allocation) + |> Changeset.put_change(:last_checkpoint_time, DateTime.utc_now()) + |> Changeset.force_change(:processed, proc.processed) + + {true, changeset} + end + + @spec get_simulation_duration(Process.t) :: + pos_integer + defp get_simulation_duration(process) do + now = DateTime.utc_now() + last_update = Process.get_last_update(process) + + DateTime.diff(now, last_update, :millisecond) + end + + @spec seconds_for_completion({:paused | :completed | :running, Process.t}) :: + {Process.t, Process.time_left | -1 | :infinity} + defp seconds_for_completion({:paused, process}), + do: {process, :infinity} + defp seconds_for_completion({:completed, process}), + do: {process, -1} + defp seconds_for_completion({:running, process}) do + # This is the amount of work left for completion of the process + remaining_work = Process.Resources.sub(process.objective, process.processed) + + next_allocation = process.next_allocation || process.l_allocated + + # Convert allocation to millisecond + alloc = Process.Resources.map(next_allocation, &(&1 / 1000)) + + # Figure out the work left in order to complete each resource + work_left = Process.Resources.div(remaining_work, alloc) + + # Return a raw number (float) representing how many seconds it would need + # to complete the resource with more work left to do. + # So if this process needs 10 seconds to complete its CPU objective, and 30s + # to complete the DLK objective, it will return 30s. + estimated_seconds = + work_left + |> Process.Resources.max() + |> Kernel./(1000) # From millisecond to second + |> Float.round(1) + + {process, estimated_seconds} + end + + @spec sort_next_completion(nil | {Process.t, term}, {Process.t, term}) :: + {Process.t, term} + defp sort_next_completion(nil, {process, seconds}), + do: {process, seconds} + defp sort_next_completion(current, candidate) do + {_, cur_seconds} = current + {_, candidate_seconds} = candidate + + # If the currently selected process is bound to finish before the candidate, + # then we must keep the current selection and reject the candidate. + # Otherwise, the candidate will finish first, and must be selected. + if cur_seconds < candidate_seconds do + current + else + candidate + end + end +end diff --git a/lib/process/process.ex b/lib/process/process.ex index 3b016dd5..e2710dc1 100644 --- a/lib/process/process.ex +++ b/lib/process/process.ex @@ -10,12 +10,12 @@ defmodule Helix.Process do # Imports all sub-modules that, together, will define the Process. import Helix.Process.Executable - import Helix.Process.Objective + import Helix.Process.Resourceable import Helix.Process.Processable import Helix.Process.Viewable # Static types - @type resource_usage :: Helix.Process.Objective.resource_usage + @type resource_usage :: Helix.Process.Resourceable.resource_usage # Custom types @type executable_error :: __MODULE__.Executable.executable_error @@ -34,21 +34,33 @@ defmodule Helix.Process do Returns the process type. """ def get_process_type, - do: @process_type |> to_string() + do: @process_type end end end @doc """ - `set_objective` will pass the given params to `Process.Objective.calculate/2`, - which will use its own flow to specify the required objectives the process - should need for each hardware resource. + `get_resources` will pass the given params to `Process.Resourceable`, which + will use its own flow to specify the required objectives the process should + need for each hardware resource, as well as static and dynamic resource + allocation usage. """ - defmacro set_objective(params) do + defmacro get_resources(params) do quote bind_quoted: [params: params] do - factors = __MODULE__.Objective.get_factors(params) - __MODULE__.Objective.calculate(params, factors) + factors = __MODULE__.Resourceable.get_factors(params) + + objective = __MODULE__.Resourceable.calculate(params, factors) + static = __MODULE__.Resourceable.static(params, factors) + l_dynamic = __MODULE__.Resourceable.l_dynamic(params, factors) + r_dynamic = __MODULE__.Resourceable.r_dynamic(params, factors) + + %{ + objective: objective, + static: static, + l_dynamic: l_dynamic, + r_dynamic: r_dynamic + } end end diff --git a/lib/process/processable.ex b/lib/process/processable.ex index 1136925a..53cb53b3 100644 --- a/lib/process/processable.ex +++ b/lib/process/processable.ex @@ -11,9 +11,7 @@ defmodule Helix.Process.Processable do side-effects. """ - import HELL.Macros - - alias Helix.Process.Model.Process + alias Helix.Event @doc """ Macro for implementation of the Processable protocol. @@ -27,74 +25,25 @@ defmodule Helix.Process.Processable do # Fallbacks - on_kill(_data, _reason) do - {:ok, []} + on_kill(_process, _data, _reason) do + {:delete, []} + end + + on_connection_closed(_process, _data, _connection) do + {:delete, []} end @doc false def after_read_hook(data), do: data - # Required by current TOP API - - @doc false - def state_change(_, process, _, _), - do: {process, []} - - @doc false - def conclusion(data, process), - do: state_change(data, process, :running, :complete) - # Utils - docp """ - Makes available, within the scope of all public Processable methods, the - variable `process`, which is of type `Process.t`. This is required since - because, for legacy reasons, TOP feeds the Changeset as argument, making - the handling of `process` a lot harder. - - Well, this function -- and the way it's called from this module's macros - -- ensures that a Processable method always deal with `Process.t`. as it - should be. - """ - defp unchange(process = %Process{}), - do: process - defp unchange(process = %Ecto.Changeset{}), - do: Ecto.Changeset.apply_changes(process) - - docp """ - Flags the process for deletion. - - Current TOP needs that we return a process changeset with `action` set - to `:delete`, so that's what we do here. - """ - defp delete(process = %Process{}) do - process - |> Ecto.Changeset.change() - |> delete() - end - defp delete(process = %Ecto.Changeset{}), - do: %{process| action: :delete} - - # Result handlers - - docp """ - Called when `on_completion` finishes. Currently it supports: - - - `{:ok, events :: [Event.t]}`: Process is flagged for deletion and the - corresponding events are emitted. - """ - defp handle_completion_result({:ok, events}, process), - do: {delete(process), events} + defp add_fingerprint({action, events}, %{process_id: process_id}) do + events = Enum.map(events, &(Event.set_process_id(&1, process_id))) - docp """ - Called when `on_kill` finishes. Currently it supports: - - - `{:ok, events :: [Event.t]}`: Process is flagged for deletion and the - corresponding events are emitted. - """ - defp handle_kill_result({:ok, events}, process), - do: {delete(process), events} + {action, events} + end end end @@ -103,17 +52,13 @@ defmodule Helix.Process.Processable do @doc """ Defines what happens should the process get killed. Reason is also passed as argument. - - The result will be interpreted by `handle_kill_result/2`. """ - defmacro on_kill(data, reason \\ quote(do: _), do: block) do + defmacro on_kill(process, data, reason \\ quote(do: _), do: block) do quote do - def kill(unquote(data), process, unquote(reason)) do - var!(process) = unchange(process) - + def kill(unquote(data), p = unquote(process), unquote(reason)) do unquote(block) - |> handle_kill_result(var!(process)) + |> add_fingerprint(p) end end @@ -121,17 +66,24 @@ defmodule Helix.Process.Processable do @doc """ Defines what should happen when the process completes (finishes). - - The result will be interpreted by `handle_completion_result/2`. """ - defmacro on_completion(data, do: block) do + defmacro on_completion(process, data, do: block) do quote do - def state_change(unquote(data), process, _, :complete) do - var!(process) = unchange(process) + def complete(unquote(data), p = unquote(process)) do + unquote(block) + |> add_fingerprint(p) + end + + end + end + + defmacro on_connection_closed(process, data, connection, do: block) do + quote do + def connection_closed(unquote(data), p = unquote(process), unquote(connection)) do unquote(block) - |> handle_completion_result(var!(process)) + |> add_fingerprint(p) end end diff --git a/lib/process/public/index.ex b/lib/process/public/index.ex index 158a470d..0180eff2 100644 --- a/lib/process/public/index.ex +++ b/lib/process/public/index.ex @@ -7,12 +7,12 @@ defmodule Helix.Process.Public.Index do @type index :: %{ - owned: owned_process, - targeting: targeting_process + local: [local_process], + remote: [remote_process] } - @type owned_process :: [map] - @type targeting_process :: [map] + @type local_process :: map + @type remote_process :: map @spec index(Server.id, Entity.id) :: index @@ -25,23 +25,23 @@ defmodule Helix.Process.Public.Index do the return of this function is already rendered and ready for the client. """ def index(server_id, entity_id) do - processes_on_server = ProcessQuery.get_processes_on_server(server_id) + processes = ProcessQuery.get_processes_on_server(server_id) - processes_targeting_server = - ProcessQuery.get_processes_targeting_server(server_id) + local_processes = Enum.filter(processes, &(&1.gateway_id == server_id)) + remote_processes = processes -- local_processes - rendered_processes_on_server = - Enum.map(processes_on_server, fn process -> - ProcessView.render(process.process_data, process, server_id, entity_id) + rendered_local_processes = + Enum.map(local_processes, fn process -> + ProcessView.render(process.data, process, server_id, entity_id) end) - rendered_processes_targeting_server = - Enum.map(processes_targeting_server, fn process -> - ProcessView.render(process.process_data, process, server_id, entity_id) + rendered_remote_processes = + Enum.map(remote_processes, fn process -> + ProcessView.render(process.data, process, server_id, entity_id) end) %{ - owned: rendered_processes_on_server, - targeting: rendered_processes_targeting_server + local: rendered_local_processes, + remote: rendered_remote_processes } end end diff --git a/lib/process/public/process.ex b/lib/process/public/process.ex new file mode 100644 index 00000000..07c2d014 --- /dev/null +++ b/lib/process/public/process.ex @@ -0,0 +1,8 @@ +defmodule Helix.Process.Public.Process do + + alias Helix.Process.Model.Process + alias Helix.Process.Action.Flow.Process, as: ProcessFlow + + def kill(process = %Process{}, reason), + do: ProcessFlow.signal(process, :SIGKILL, %{reason: reason}) +end diff --git a/lib/process/public/view/helper.ex b/lib/process/public/view/helper.ex index 05e15ce1..e7566ff4 100644 --- a/lib/process/public/view/helper.ex +++ b/lib/process/public/view/helper.ex @@ -77,7 +77,7 @@ defmodule Helix.Process.Public.View.Process.Helper do state: to_string(process.state), network_id: network_id, target_ip: target_ip, - type: to_string(process.process_type) + type: to_string(process.type) } end @@ -125,8 +125,8 @@ defmodule Helix.Process.Public.View.Process.Helper do ProcessView.progress defp build_progress(process = %Process{}) do completion_date = - if process.estimated_time do - ClientUtils.to_timestamp(process.estimated_time) + if process.completion_date do + ClientUtils.to_timestamp(process.completion_date) else nil end @@ -152,7 +152,7 @@ defmodule Helix.Process.Public.View.Process.Helper do @spec get_target_ip(Process.t) :: String.t defp get_target_ip(process = %Process{}) do - case CacheQuery.from_server_get_nips(process.target_server_id) do + case CacheQuery.from_server_get_nips(process.target_id) do {:ok, nips} -> nips |> Enum.find(&(&1.network_id == process.network_id)) diff --git a/lib/process/query/process.ex b/lib/process/query/process.ex index 234937c6..57f33a83 100644 --- a/lib/process/query/process.ex +++ b/lib/process/query/process.ex @@ -24,7 +24,7 @@ defmodule Helix.Process.Query.Process do defdelegate fetch(id), to: ProcessInternal - @spec get_running_processes_of_type_on_server(Server.idt, String.t) :: + @spec get_running_processes_of_type_on_server(Server.idt, Process.type) :: [Process.t] @doc """ Fetches processes running on `gateway` that are of `type` @@ -46,48 +46,13 @@ defmodule Helix.Process.Query.Process do @spec get_processes_on_server(Server.idt) :: [Process.t] @doc """ - Fetches processes running on `gateway` + Fetches *all* processes running on the given server. - ### Examples - - iex> get_processes_on_server("aa::bb") - [%Process{}, %Process{}, %Process{}, %Process{}, %Process{}] + Returns both local and remote processes. """ defdelegate get_processes_on_server(gateway_id), to: ProcessInternal - @spec get_processes_targeting_server(Server.idt) :: - [Process.t] - @doc """ - Fetches remote processes affecting `gateway` - - Note that this will **not** include processes running on `gateway` even if - they affect it - - ### Examples - - iex> get_processes_targeting_server("aa::bb") - [%Process{}] - """ - defdelegate get_processes_targeting_server(gateway_id), - to: ProcessInternal - - @spec get_processes_of_type_targeting_server(Server.idt, String.t) :: - [Process.t] - @doc """ - Fetches remote processes of type `type` affecting `gateway` - - Note that this will **not** include processes running on `gateway` even if - they affect it - - ### Examples - - iex> get_processes_of_type_targeting_server("aa::bb", "cracker") - [%Process{}, %Process{}] - """ - defdelegate get_processes_of_type_targeting_server(gateway_id, type), - to: ProcessInternal - @spec get_processes_on_connection(Connection.idt) :: [Process.t] @doc """ @@ -101,10 +66,10 @@ defmodule Helix.Process.Query.Process do defdelegate get_processes_on_connection(connection), to: ProcessInternal - get_custom "file_download", %{file_id: file_id}, + get_custom :file_download, %{file_id: file_id}, do: &(&1.file_id == file_id) - get_custom "file_upload", %{file_id: file_id}, + get_custom :file_upload, %{file_id: file_id}, do: &(&1.file_id == file_id) @spec get_custom(Process.type, Server.idt, meta :: map) :: @@ -120,7 +85,7 @@ defmodule Helix.Process.Query.Process do The generated code is something like: ``` - def get_custom(type = "process_type", server_id, %{file_id: file_id}) do + def get_custom(type = :process_type, server_id, %{file_id: file_id}) do server_id |> get_running_processes_of_type_on_server(type) |> Enum.fiter(&(&1.file_id == file_id)) diff --git a/lib/process/query/top.ex b/lib/process/query/top.ex new file mode 100644 index 00000000..cbd6a533 --- /dev/null +++ b/lib/process/query/top.ex @@ -0,0 +1,48 @@ +defmodule Helix.Process.Query.TOP do + + alias Helix.Hardware.Query.Motherboard, as: MotherboardQuery + alias Helix.Server.Model.Server + alias Helix.Server.Query.Server, as: ServerQuery + alias Helix.Process.Model.Process + + @spec load_top_resources(Server.idt) :: + Process.Resources.t + def load_top_resources(server = %Server{}) do + resources = + server.motherboard_id + |> MotherboardQuery.fetch() + |> MotherboardQuery.resources() + + {server_dlk, server_ulk} = + Enum.reduce( + resources.net, + {%{}, %{}}, + fn {network, %{downlink: dlk, uplink: ulk}}, {acc_dlk, acc_ulk} -> + + acc_dlk = + %{} + |> Map.put(network, dlk) + |> Map.merge(acc_dlk) + + acc_ulk = + %{} + |> Map.put(network, ulk) + |> Map.merge(acc_ulk) + + {acc_dlk, acc_ulk} + end) + + %{ + cpu: resources.cpu, + ram: resources.ram, + dlk: server_dlk, + ulk: server_ulk + } + end + + def load_top_resources(server_id = %Server.ID{}) do + server_id + |> ServerQuery.fetch() + |> load_top_resources() + end +end diff --git a/lib/process/objective.ex b/lib/process/resourceable.ex similarity index 55% rename from lib/process/objective.ex rename to lib/process/resourceable.ex index fd4d09db..f64b82e4 100644 --- a/lib/process/objective.ex +++ b/lib/process/resourceable.ex @@ -1,7 +1,16 @@ -defmodule Helix.Process.Objective do +defmodule Helix.Process.Resourceable do @moduledoc """ - `Process.Objective` is a DSL to calculate how many resources, for each type of - hardware resource, a process should use. + # Resourceable + + `Process.Resourceable` is a DSL to calculate how many resources, for each type + of hardware resource, a process should use. This usage involves: + + - Figuring out the process' objective, the total amount of work a process + should perform before being deemed completed. + - How many resources the project should allocate statically, whether it's + paused or running. + - What resources can be dynamically allocated, according to the server's total + available resources. It builds upon `Helix.Factor` and its `FactorClient` API, which will efficiently retrieve all data you need to figure out the correct resource @@ -9,6 +18,8 @@ defmodule Helix.Process.Objective do Once you have the factors, each resource will be called: + ### Objective + - cpu (Processor usage) - ram (Memory usage) - dlk (Downlink usage) @@ -20,15 +31,20 @@ defmodule Helix.Process.Objective do These resource blocks should return either `nil` or an integer that represents how much the process should work - its objectives. + ### Allocation: + + - static(params, factors) -- Specifies static resource allocation + - dynamic(params, factors) -- List of dynamically allocated resources + The resource blocks argument is the `params` specified at Process's top-level `objective/n`. On top of that, within the block scope you have access to the `f` variable, which is a map containing all factors returned from the `get_factors` function you defined beforehand. - ### Usage example + # Usage example ``` - process_objective do + resourceable do @type params :: %{type: :download | :upload} @@ -52,6 +68,21 @@ defmodule Helix.Process.Objective do # Safety fallbacks (see section below) dlk(%{type: :upload}) ulk(%{type: :download}) + + # Static allocation + + static do + %{paused: %{ram: 50}} + end + + # Dynamic allocation + dynamic(%{type: :download}) do + [:dlk] + end + + dynamic(%{type: :upload}) do + [:ulk] + end end ``` @@ -76,7 +107,7 @@ defmodule Helix.Process.Objective do @resources [:dlk, :ulk, :cpu, :ram] @doc """ - We have to `use` `Helix.Process.Objective` so we can verify + We have to `use` Resourceable so we can perform some compile-time checks. """ defmacro __using__(_args) do quote do @@ -107,28 +138,51 @@ defmodule Helix.Process.Objective do # Declares handlers for unused resources (only the ones who were not # defined; see "Safety Fallbacks" section on moduledoc for more info.) - for resource <- unhandled_resources do + fallback_objective = + for resource <- unhandled_resources do + quote do + + @doc false + def calculate(unquote(resource), _, _), + do: 0 + + end + end + + # Fallbacks in case the user did not specify static and dynamic allocations + fallback_allocations = quote do + @doc false + def static(_, _), + do: %{} @doc false - def calculate(unquote(resource), _, _), - do: 0 + def l_dynamic(_, _), + do: [] + @doc false + def r_dynamic(_, _), + do: [] + + @doc false + def set_network(_, _), + do: nil end - end + + [fallback_allocations, fallback_objective] end @doc """ - Top-level macro for `Process.Objective`. + Top-level macro for `Process.Resourceable`. Automatically imports `Helix.Factor.Client`; also defines the `calculate/2` flow which will be called from `Helix.Process`. """ - defmacro process_objective(do: block) do + defmacro resourceable(do: block) do quote location: :keep do - defmodule Objective do + defmodule Resourceable do - use Helix.Process.Objective + use Helix.Process.Resourceable import Helix.Factor.Client @@ -136,7 +190,7 @@ defmodule Helix.Process.Objective do # because it could be defined multiple times, raising dialyzer's # overloaded contract warning. @spec calculate(atom, params, factors) :: - Helix.Process.Objective.resource_usage | term # elixir-lang 6426 + Helix.Process.Resourceable.resource_usage | term # elixir-lang 6426 @spec calculate(params, factors) :: objectives :: map @@ -147,13 +201,42 @@ defmodule Helix.Process.Objective do It removes any non-objective (when required resource usage is 0). """ def calculate(params, factors) do + network_id = set_network(params, factors) + + {dlk, ulk} = + if network_id do + dlk = calculate(:dlk, params, factors) + ulk = calculate(:ulk, params, factors) + + dlk = Map.put(%{}, network_id, dlk) + ulk = Map.put(%{}, network_id, ulk) + + dlk = + dlk + |> Enum.filter(fn {_net_id, val} -> + is_number(val) && val > 0 + end) + |> Map.new() + + ulk = + ulk + |> Enum.filter(fn {_net_id, val} -> + is_number(val) && val > 0 + end) + |> Map.new() + + {dlk, ulk} + else + {%{}, %{}} + end + %{ cpu: calculate(:cpu, params, factors), ram: calculate(:ram, params, factors), - dlk: calculate(:dlk, params, factors), - ulk: calculate(:ulk, params, factors) + dlk: dlk, + ulk: ulk } - |> Enum.reject(fn {_, total} -> total == nil end) # Test me + |> Enum.reject(fn {_, total} -> total == %{} end) |> Enum.reject(fn {_, total} -> total == 0 end) |> Map.new() end @@ -176,7 +259,22 @@ defmodule Helix.Process.Objective do do: set_resource(unquote(resource), fallback) end - # Actually generate the resources' macros + # Special macro used to determine the process' network + defmacro network(params, do: block) do + quote do + + def set_network(unquote(params), factors) do + # Special variable `f` holds previously calculated `factors` + var!(f) = factors + + var!(f) # Mark as used + + unquote(block) + end + + end + end + docp """ Generates the macros for each resource. @@ -200,4 +298,64 @@ defmodule Helix.Process.Objective do end end + + defmacro static(params, do: block), + do: set_static(params, block) + defmacro static(do: block), + do: set_static(quote(do: _params), block) + + defp set_static(params, block) do + quote do + + def static(unquote(params), factors) do + # Assigns variable `f` to caller's scope + var!(f) = factors + + var!(f) # Mark as used + + unquote(block) + end + + end + end + + defmacro r_dynamic(params, do: block), + do: set_r_dynamic(params, block) + defmacro r_dynamic(do: block), + do: set_r_dynamic(quote(do: _params), block) + + defp set_r_dynamic(params, block) do + quote do + + def r_dynamic(unquote(params), factors) do + # Assigns variable `f` to caller's scope + var!(f) = factors + + var!(f) # Mark as used + + unquote(block) + end + + end + end + + defmacro dynamic(params, do: block), + do: set_dynamic(params, block) + defmacro dynamic(do: block), + do: set_dynamic(quote(do: _params), block) + + defp set_dynamic(params, block) do + quote do + + def l_dynamic(unquote(params), factors) do + # Assigns variable `f` to caller's scope + var!(f) = factors + + var!(f) # Mark as used + + unquote(block) + end + + end + end end diff --git a/lib/process/resources.ex b/lib/process/resources.ex new file mode 100644 index 00000000..8ce9f1d8 --- /dev/null +++ b/lib/process/resources.ex @@ -0,0 +1,360 @@ +defmodule Helix.Process.Resources do + + alias HELL.Utils + + defmacro __using__(_) do + quote do + + import Helix.Process.Resources + + Module.register_attribute( + __MODULE__, + :resources, + accumulate: true, + persist: true + ) + + @before_compile unquote(__MODULE__) + + end + end + + defmacro __before_compile__(_) do + quote do + + alias Helix.Process.Model.Process + alias Helix.Process.Model.TOP + + # Maps the resource (name) to its module. + @res_modules ( + Enum.reduce(@resources, %{}, fn resource, acc -> + + %{} + |> Map.put(resource, get_resource_module(__MODULE__, resource)) + |> Map.merge(acc) + end) + ) + + @spec map(t, function) :: + map_t(term) + def map(res_a, fun), + do: dispatch(:map, res_a, [fun]) + + @spec reduce(t, term, function) :: + term + def reduce(resource, initial, function), + do: dispatch(:reduce, resource, [initial, function]) + + @spec initial :: + t + def initial, + do: dispatch_create :initial + + @spec format(t) :: + t + def format(resources) do + # Make sure our keys are valid, and all keys are defined on the resource + # (If any key is missing, it will be populated to its initial value) + resources = prepare(resources) + + # Now that we've prepared the resource, we can dispatch to each + # resource's own implementation, which will take care of formatting + # themselves. + dispatch(:format, resources) + end + + @spec reject_empty(t) :: + t | %{} + def reject_empty(resources) do + Enum.reject(resources, fn {res, val} -> + val == call_resource(res, :initial, []) + end) + |> Map.new() + end + + @spec prepare(t) :: + t + def prepare(resources) do + # First and foremost, we must ensure that all keys have been transformed + # into atoms. If they came from the DB, they will be a string. + resources = + Enum.reduce(resources, %{}, fn {key, val}, acc -> + valid_key = is_atom(key) && key || String.to_existing_atom(key) + + %{} + |> Map.put(valid_key, val) + |> Map.merge(acc) + end) + + # Fill up empty/undefined/missing resources with their initial values + missing_resources = @resources -- Map.keys(resources) + + Enum.reduce(missing_resources, resources, fn res, acc -> + initial = call_resource(res, :initial, []) + + Map.put(acc, res, initial) + end) + end + + @spec sum(t, t) :: + t + def sum(res_a, res_b), + do: dispatch_merge(:sum, res_a, res_b) + + @spec sub(t, t) :: + t + def sub(res_a, res_b), + do: dispatch_merge(:sub, res_a, res_b) + + @spec mul(t, t) :: + t + def mul(res_a, res_b), + do: dispatch_merge(:mul, res_a, res_b) + + @spec div(t, t) :: + t + def div(res_a, res_b), + do: dispatch_merge(:div, res_a, res_b) + + @spec get_shares(Process.t) :: + t + def get_shares(process), + do: dispatch_create :get_shares, [process] + + @spec resource_per_share(t, t) :: + t + def resource_per_share(resources, shares), + do: dispatch_merge :resource_per_share, resources, shares + + @spec allocate_static(Process.t) :: + t + def allocate_static(process), + do: dispatch_create :allocate_static, [process] + + @spec allocate_dynamic(t, t, Process.t) :: + t + def allocate_dynamic(shares, res_per_share, process), + do: dispatch_merge :allocate_dynamic, shares, res_per_share, [process] + + @spec allocate(t, t) :: + t + def allocate(dynamic_alloc, static_alloc), + do: dispatch_merge :allocate, dynamic_alloc, static_alloc + + @spec completed?(t, t) :: + boolean + def completed?(processed, objective) do + :completed? + |> dispatch_merge(processed, objective) + |> reduce(true, fn acc, v -> acc && v || false end) + |> Enum.all?(fn {_res, status} -> status == true end) + end + + @spec overflow?(t, [TOP.Allocator.allocated_process]) :: + map_t({true, Process.t} | false) + def overflow?(resources, processes), + do: dispatch(:overflow?, resources, [processes]) + + @spec mirror(t) :: + map_t(Process.resource) + def mirror(resources) do + Enum.reduce(resources, %{}, fn {res, val}, acc -> + mirror_res = call_resource(res, :mirror, []) + + %{} + |> Map.put(mirror_res, val) + |> Map.merge(acc) + end) + end + + @spec max(t) :: + number + def max(resources) do + resources + |> reduce(0, fn acc, v -> max(acc, v) end) + + # Select highest usage among all resource + |> Enum.sort_by(fn {_res, max} -> max end) + + # Make sure to return only the *usage* of the highest resource + |> List.last() + |> elem(1) + end + + @spec min(t, t) :: + t | %{} + def min(res1, res2) do + :op_map + |> dispatch_merge(res1, res2, [&min/2]) + |> Enum.reject(fn {res, val} -> + val == call_resource(res, :initial, []) + end) + |> Map.new() + end + end + end + + defmacro dispatch(method, resources, params \\ quote(do: [])) do + quote do + Enum.reduce(@resources, %{}, fn resource_name, acc -> + + resource = Map.fetch!(unquote(resources), resource_name) + params = [resource] ++ unquote(params) + + result = call_resource(resource_name, unquote(method), params) + + %{} + |> Map.put(resource_name, result) + |> Map.merge(acc) + end) + end + end + + defmacro dispatch_merge(method, res_a, res_b, params \\ quote(do: [])) do + quote do + unquote(res_a) + |> Map.merge(Map.take(unquote(res_b), @resources), fn resource, v1, v2 -> + call_resource(resource, unquote(method), [v1, v2] ++ unquote(params)) + end) + end + end + + defmacro dispatch_create(method, params \\ quote(do: [])) do + quote do + Enum.reduce(@resources, %{}, fn resource, acc -> + result = call_resource(resource, unquote(method), unquote(params)) + + %{} + |> Map.put(resource, result) + |> Map.merge(acc) + end) + end + end + + defmacro call_resource(resource, method, params) do + quote do + module = Map.get(@res_modules, unquote(resource)) + apply(module, unquote(method), unquote(params)) + end + end + + def get_resource_module(caller, resource) do + module_name = + resource + |> Atom.to_string() + |> String.upcase() + |> String.to_atom() + + Module.concat(caller, module_name) + end + + defmacro resources(name, do: block) do + quote location: :keep do + + defmodule unquote(name) do + + @name unquote(name) + + use Helix.Process.Resources + + unquote(block) + end + + end + end + + defmacro resource(name, opts) do + opts = Macro.expand(opts, __CALLER__) + + resource_name = get_resource_name(name) + + args = + if opts[:behaviour] do + behaviour_block = + opts[:behaviour] + |> Macro.expand(__CALLER__) + |> apply(:generate_behaviour, [resource_name, opts]) + + [behaviour: behaviour_block] + else + [do: opts[:do]] + end + + do_resource(name, resource_name, args) + end + + defp do_resource(module_name, resource_name, do: block) do + quote location: :keep do + Module.put_attribute(__MODULE__, :resources, unquote(resource_name)) + + defmodule unquote(module_name) do + + unquote(block) + end + + end + end + + defp do_resource(module_name, _resource_name, behaviour: behaviour_block) do + quote location: :keep do + + resource unquote(module_name) do + unquote(behaviour_block) + end + + end + end + + defp get_resource_name({_, _, [name]}), + do: name |> Atom.to_string() |> String.downcase() |> String.to_atom() + + @operations [:sum, :sub, :mul, :div] + @methods [ + {:get_shares, 1}, + {:resource_per_share, 2}, + {:allocate_static, 1}, + {:allocate_dynamic, 3}, + {:allocate, 2} + ] + + for op <- @operations do + + defmacro unquote(op)(a, b, do: block) do + op = unquote(op) + + quote location: :keep do + + + def unquote(op)(unquote(a), unquote(b)) do + unquote(block) + |> build() + end + + end + end + end + + for {method, arity} <- @methods do + + params = + 1..arity + |> Enum.map(fn i -> + name = Utils.concat_atom(:arg, Integer.to_string(i)) + Macro.var(name, nil) + end) + + defmacro unquote(method)(unquote_splicing(params), do: block) do + method = unquote(method) + params = unquote(params) + + quote location: :keep do + + def unquote(method)(unquote_splicing(params)) do + unquote(block) + |> build() + end + + end + end + end +end diff --git a/lib/process/resources/behaviour.ex b/lib/process/resources/behaviour.ex new file mode 100644 index 00000000..2e54d5a9 --- /dev/null +++ b/lib/process/resources/behaviour.ex @@ -0,0 +1,34 @@ +defmodule Helix.Process.Resources.Behaviour do + + # Would love to generate those automatically, based on resources that call + # the `resource/2` macro... + @type resource :: term + + @type process :: term + + # Resource creation + @callback build(term) :: resource + @callback initial() :: resource + + # Operations + @callback sum(resource, resource) :: resource + @callback sub(resource, resource) :: resource + @callback mul(resource, resource) :: resource + @callback div(resource, resource) :: resource + + # Allocation logic + @callback get_shares(process) :: shares :: resource + @callback allocate_dynamic( + shares :: resource, + res_per_share :: resource, + process) + :: + resource + @callback allocate_static(process) :: resource + @callback allocate(dynamic :: resource, static :: resource) :: resource + + # Flow checks / verifications + @callback overflow?(resource, [{process, allocations :: resource}]) :: + {true, heaviest :: process} + | false +end diff --git a/lib/process/resources/behaviour/default.ex b/lib/process/resources/behaviour/default.ex new file mode 100644 index 00000000..d342647b --- /dev/null +++ b/lib/process/resources/behaviour/default.ex @@ -0,0 +1,190 @@ +defmodule Helix.Process.Resources.Behaviour.Default do + + import Helix.Process.Resources + + def generate_behaviour(name, args) do + quote location: :keep do + + alias Helix.Process.Model.Process + alias Helix.Process.Model.TOP + alias Helix.Process.Resources.Utils, as: ResourceUtils + + @behaviour Helix.Process.Resources.Behaviour + + @name unquote(name) + @formatter unquote(args)[:formatter] || &__MODULE__.default_formatter/1 + @mirror unquote(args)[:mirror] || @name + + @type t :: number + @type initial :: t + + # Generic data manipulation + + @spec reduce(t, term, function) :: + term + def reduce(resource, initial, function), + do: function.(initial, resource) + + @spec map(t, function) :: + term + def map(resource, function), + do: function.(resource) + + @spec op_map(t, t, function) :: + t + def op_map(a, b, function), + do: function.(a, b) + + # Creation & formatting of resource + + @spec build(number) :: + t + def build(value), + do: value |> ResourceUtils.ensure_float() + + @spec initial :: + initial + def initial, + do: build(0) + + @spec format(t) :: + t + def format(resource), + do: @formatter.(resource) + + @spec default_formatter(t) :: + t + def default_formatter(v), + do: v + + # Basic operations + + @spec sum(t, t) :: + t + sum(a, b) do + a + b + end + + @spec sub(t, t) :: + t + sub(a, b) do + a - b + end + + @spec div(t, t) :: + t + div(a, b) do + ResourceUtils.safe_div(a, b, &initial/0) + end + + @spec mul(t, t) :: + t + mul(a, b) do + a * b + end + + # Allocation logic + + @spec get_shares(Process.t) :: + t + get_shares(process = %{priority: priority}) do + dynamic_res = Process.get_dynamic(process) + + with \ + true <- @name in dynamic_res, + true <- can_allocate?(process) + do + priority + else + _ -> + initial() + end + end + + @spec mirror :: + Process.resource + def mirror, + do: @mirror + + @spec can_allocate?(Process.t) :: + boolean + defp can_allocate?(%{processed: nil}), + do: true + defp can_allocate?(%{processed: processed, objective: objective}), + do: Map.fetch!(objective, @name) >= Map.get(processed, @name, 0) + + @spec resource_per_share(t, t) :: + t + resource_per_share(resources, shares) do + res_per_share = __MODULE__.div(resources, shares) + + res_per_share >= 0 && res_per_share || 0.0 + end + + @spec allocate_static(Process.t) :: + t + allocate_static(%{local?: false}) do + initial() + end + + allocate_static(%{static: static, state: state}) do + state = + if state == :waiting_allocation do + :running + else + state + end + + static + |> Map.get(state, %{}) + |> Map.get(@name, initial()) + end + + @spec allocate_dynamic(t, t, Process.t) :: + t + allocate_dynamic(shares, res_per_share, process) do + dynamic = Process.get_dynamic(process) + + if @name in dynamic do + mul(shares, res_per_share) + else + initial() + end + end + + @spec allocate(t, t) :: + t + allocate(dynamic_alloc, static_alloc) do + sum(dynamic_alloc, static_alloc) + end + + @spec completed?(t, t) :: + boolean + def completed?(processed, objective), + do: processed >= objective + + @spec overflow?(t, [TOP.Allocator.allocated_process]) :: + {true, heaviest :: Process.t} + | false + def overflow?(res, allocated_processes) do + # Due to rounding errors, we may have a "valid overflow" of a few units + if res < -1 do + {true, find_heaviest(allocated_processes)} + else + false + end + end + + @spec find_heaviest([TOP.Allocator.allocated_process]) :: + Process.t + defp find_heaviest(allocated_processes) do + allocated_processes + |> Enum.sort_by(fn {process, resources} -> + Map.fetch!(resources, @name) + end) + |> List.last() + |> elem(0) + end + end + end +end diff --git a/lib/process/resources/behaviour/kv.ex b/lib/process/resources/behaviour/kv.ex new file mode 100644 index 00000000..1ab0b872 --- /dev/null +++ b/lib/process/resources/behaviour/kv.ex @@ -0,0 +1,308 @@ +defmodule Helix.Process.Resources.Behaviour.KV do + + import Helix.Process.Resources + + def generate_behaviour(name, args) do + quote location: :keep do + + formatter = unquote(args)[:formatter] + + alias Helix.Process.Model.Process + alias Helix.Process.Model.TOP + alias Helix.Process.Resources.Utils, as: ResourceUtils + + @behaviour Helix.Process.Resources.Behaviour + + @name unquote(name) + @key Keyword.fetch!(unquote(args), :key) + @formatter unquote(args)[:formatter] || &__MODULE__.default_formatter/2 + @mirror unquote(args)[:mirror] || @name + + @type key :: term + @type value :: number + + @type t :: %{key => value} | %{} + @type initial :: %{} + + @type map_t(type) :: %{key => type} | %{} + + # Generic data manipulation + + @spec map(t, function) :: + map_t(term) + def map(resource, function) do + Enum.reduce(resource, %{}, fn {key, value}, acc -> + new_value = function.(value) + + %{} + |> Map.put(key, new_value) + |> Map.merge(acc) + end) + end + + @spec reduce(t, term, function) :: + term + def reduce(resource, initial, function) do + Enum.reduce(resource, initial, fn {key, value}, acc -> + function.(acc, value) + end) + end + + @spec op_map(t, t, function) :: + t + def op_map(a, b, fun) do + keys = get_keys(a, b) + + Map.merge(a, Map.take(b, keys), fn _, v1, v2 -> + fun.(v1, v2) + end) + end + + # Creation & formatting of resource + + @spec build(t | [t]) :: + t + def build(entries) do + Enum.reduce(entries, %{}, fn {key, value}, acc -> + + %{} + |> Map.put(key, ResourceUtils.ensure_float(value)) + |> Map.merge(acc) + end) + end + + @spec initial :: + initial + def initial, + do: build([]) + + @spec format(map_t(term)) :: + t + def format(resource) do + Enum.reduce(resource, %{}, fn {key, value}, acc -> + {k, v} = @formatter.(key, value) + + %{} + |> Map.put(k, v) + |> Map.merge(acc) + end) + end + + @spec default_formatter(term, term) :: + {key, number} + def default_formatter(k, v), + do: {k, v} + + # Basic operations + + @spec sum(t, t) :: + t + sum(a, b) do + op_map(a, b, &Kernel.+/2) + end + + @spec sub(t, t) :: + t + sub(a, b) do + # Ensure missing elements (exist on `b` but not on `a`) are filled as 0. + # a = fill_missing(a, b) + + op_map(a, b, &Kernel.-/2) + end + + @spec mul(t, t) :: + t + mul(a, b) do + op_map(a, b, &Kernel.*/2) + end + + @spec div(t, t) :: + t + div(a, b) do + op_map(a, b, fn a, b -> ResourceUtils.safe_div(a, b, &initial/0) end) + end + + # Allocation logic + + @spec fill_missing(t, t, value) :: + t + defp fill_missing(a, b, value \\ 0) do + Enum.reduce(get_keys(a, b), a, fn key, acc -> + Map.put_new(acc, key, value) + end) + end + + @spec get_shares(Process.t) :: + t + get_shares(process = %{priority: priority}) do + dynamic = Process.get_dynamic(process) + + with \ + true <- @name in dynamic, + key = get_key(process), + true <- key != nil, + true <- can_allocate?(process, key) + do + Map.put(%{}, key, priority) + else + _ -> + initial() + end + end + + @spec mirror :: + Process.resource + def mirror, + do: @mirror + + @spec can_allocate?(Process.t, key) :: + boolean + defp can_allocate?(%{processed: nil}, _), + do: true + defp can_allocate?(%{local?: false}, _), + do: true + defp can_allocate?(%{processed: processed, objective: objective}, key) do + value_objective = objective[@name][key] + value_processed = processed[@name][key] + + # Convert `nil` and `%{}` to `0.0` + value_objective = is_number(value_objective) && value_objective || 0.0 + value_processed = is_number(value_processed) && value_processed || 0.0 + + value_objective > value_processed + end + + @spec resource_per_share(t, t) :: + t + resource_per_share(resources, shares) do + # If there are fields defined on `resources` which are not on `shares`, + # then we must "fill" `shares` with zero, since this means that the + # allocator is not supposed to add any share to it. If we don't, once + # we call `div/2` both maps will be merged, and no div operation would + # be performed on `resources`. + # TL;DR: Make sure we multiply by zero when we have no shares. + shares = fill_missing(shares, resources) + + res_per_share = __MODULE__.div(resources, shares) + + # Ensure we do not return any negative or invalid number + map(res_per_share, fn v -> is_number(v) && v >= 0 && v || 0.0 end) + end + + # At least currently, only local process may allocate static resources + # This means that e.g. a FileDownload may consume RAM on the local server + # but none on the remote one. + @spec allocate_static(Process.t) :: + t + allocate_static(%{local?: false}) do + initial() + end + + allocate_static(process = %{static: static, state: state}) do + state = + if state == :waiting_allocation do + :running + else + state + end + + alloc = + static + |> Map.get(state, %{}) + |> Map.get(@name, 0) + + case get_key(process) do + nil -> + initial() + + key -> + Map.put(%{}, key, alloc) + end + end + + @spec allocate_dynamic(t, t, Process.t) :: + t + allocate_dynamic(shares, res_per_share, process) do + dynamic = Process.get_dynamic(process) + + shares = fill_missing(shares, res_per_share) + + if @name in dynamic do + mul(shares, res_per_share) + else + initial() + end + end + + @spec allocate(t, t) :: + t + allocate(dynamic_alloc, static_alloc) do + sum(dynamic_alloc, static_alloc) + end + + @spec completed?(t, t) :: + t + def completed?(processed, objective) do + Enum.reduce(processed, %{}, fn {key, value}, acc -> + # If the corresponding objective is `nil`, then by definition this + # resource is completed + result = + if objective[key] do + value >= objective[key] + else + true + end + + %{} + |> Map.put(key, result) + |> Map.merge(acc) + end) + end + + @spec overflow?(t, [TOP.Allocator.allocated_process]) :: + {true, heaviest :: Process.t} + | false + def overflow?(res, allocated_processes) do + overflowed? = + reduce(res, false, fn acc, val -> + # Slack for rounding errors + if val >= -1 do + acc + else + true + end + end) + + if overflowed? do + {true, find_heaviest(allocated_processes)} + else + false + end + end + + @spec find_heaviest([TOP.Allocator.allocated_process]) :: + Process.t + defp find_heaviest(allocated_processes) do + allocated_processes + |> Enum.sort_by(fn {process, resources} -> + resources + |> Map.fetch!(@name) + |> Map.fetch!(get_key(process)) + end) + |> List.last() + |> elem(0) + end + + @spec get_key(Process.t) :: + key + defp get_key(process), + do: Map.fetch!(process, @key) + + @spec get_keys(t, t) :: + [key] + defp get_keys(a, b) do + Enum.uniq(Map.keys(a) ++ Map.keys(b)) + end + end + end +end diff --git a/lib/process/resources/utils.ex b/lib/process/resources/utils.ex new file mode 100644 index 00000000..1bafdd67 --- /dev/null +++ b/lib/process/resources/utils.ex @@ -0,0 +1,19 @@ +defmodule Helix.Process.Resources.Utils do + + @spec ensure_float(term) :: + float + def ensure_float(i) when is_number(i), + do: i / 1 |> Float.round(3) + def ensure_float(map) when map_size(map) == 0, + do: 0.0 + + @spec safe_div(number, number, initial :: term) :: + number + | initial :: term + def safe_div(dividend, divisor, _initial) when divisor > 0, + do: dividend / divisor + def safe_div(_, 0.0, initial), + do: initial.() + def safe_div(_, 0, initial), + do: initial.() +end diff --git a/lib/process/state/top/domain.ex b/lib/process/state/top/domain.ex deleted file mode 100644 index 31c56379..00000000 --- a/lib/process/state/top/domain.ex +++ /dev/null @@ -1,541 +0,0 @@ -defmodule Helix.Process.State.TOP.Domain do - @moduledoc false - - alias Ecto.Changeset - alias Helix.Event - alias Helix.Server.Model.Server - alias Helix.Process.Event.Process.Completed, as: ProcessCompletedEvent - alias Helix.Process.Internal.TOP.Allocator.Plan, as: PlanTOP - alias Helix.Process.Internal.TOP.ServerResources, as: ServerResourcesTOP - alias Helix.Process.Model.Process - alias Helix.Process.Model.Processable - - @behaviour :gen_statem - - @typep t :: %__MODULE__{} - @type server_id :: Server.id - @type process_id :: Process.id - @type process :: Process.t | Changeset.t - @type resources :: ServerResourcesTOP.t - - # gen_statem instruction to execute procedure to flush instructions to the - # handler - @flush {:next_event, :internal, :flush} - - # gen_statem instruction to execute procedure to allocate resources into - # processes - @allocate {:next_event, :internal, :allocate} - - @enforce_keys [:gateway, :processes, :resources, :handler] - defstruct [ - gateway: nil, - processes: nil, - resources: nil, - last_minimum: nil, - instructions: [], - handler: nil - ] - - @spec start_link(server_id, [process], resources) :: - {:ok, pid} - | :ignore - | {:error, :badarg} - def start_link(gateway, processes, resources) do - handler = self() - init_options = {gateway, processes, resources, handler} - :gen_statem.start_link(__MODULE__, init_options, []) - end - - @spec may_create?(pid, process) :: - :ok - | {:error, :resources} - def may_create?(pid, process), - do: :gen_statem.call(pid, {:may_create?, process}) - - @spec create(pid, process) :: - :ok - def create(pid, process), - do: :gen_statem.cast(pid, {:create, process}) - - @spec priority(pid, process_id, 0..5) :: - :ok - def priority(pid, process, priority) when priority in 0..5, - do: :gen_statem.cast(pid, {:priority, process, priority}) - - @spec pause(pid, process_id) :: - :ok - def pause(pid, process), - do: :gen_statem.cast(pid, {:pause, process}) - - @spec resume(pid, process_id) :: - :ok - def resume(pid, process), - do: :gen_statem.cast(pid, {:resume, process}) - - @spec kill(pid, process_id) :: - :ok - def kill(pid, process), - do: :gen_statem.cast(pid, {:kill, process}) - - @spec reset_processes(pid, [process]) :: - :ok - def reset_processes(pid, processes), - do: :gen_statem.cast(pid, {:reset, :processes, processes}) - - @doc false - def init({gateway, processes, resources, handler}) do - data = %__MODULE__{ - gateway: gateway, - processes: processes, - resources: resources, - handler: handler - } - - actions = [@allocate] - - {:ok, :startup, data, actions} - end - - @doc false - def callback_mode, - do: :handle_event_function - - @doc false - def code_change(_vsn, state, data, _extra) do - {:ok, state, data} - end - - @doc false - def terminate(_reason, _state, _data) do - :top - end - - @doc false - def handle_event(event_type, event_msg, state, data) - - # This callback exists because i intend to potentially include additional - # operations on startup - def handle_event(:internal, :allocate, :startup, data) do - actions = [@allocate, @flush] - - {:next_state, :running, data, actions} - end - - def handle_event(:internal, :allocate, :running, data) do - now = DateTime.utc_now() - - {data, processes} = - Enum.reduce(data.processes, {data, []}, fn - p = %Changeset{action: :delete}, {data_acc, process_acc} -> - data_acc = store_processes(data_acc, [p]) - {data_acc, process_acc} - process, {data_acc, process_acc} -> - process = - process - |> Changeset.change() - |> Process.calculate_work(now) - - if Process.complete?(process) do - process_data = Changeset.get_field(process, :process_data) - {processes, events} = Processable.conclusion(process_data, process) - - # Add the process_id as metadata for both *ProcessedEvent and - # ProcessCompletedEvent - process_id = Changeset.get_field(process, :process_id) - - events = Enum.map(events, fn event -> - Event.set_process_id(event, process_id) - end) - - process_completed = - process - |> Changeset.apply_changes() - |> ProcessCompletedEvent.new() - |> Event.set_process_id(process_id) - - {delete, keep} = - processes - |> List.wrap() - |> Enum.split_with(fn - %Changeset{action: :delete} -> - true - _ -> - false - end) - - data_acc = - data_acc - |> store_processes(delete) - |> store_events([process_completed]) - |> store_events(events) - - {data_acc, keep ++ process_acc} - else - {data_acc, [process| process_acc]} - end - end) - - {processes, minimum} = allocate(processes, data.resources) - - data = %{data| processes: [], last_minimum: minimum} - newdata = store_processes(data, processes) - - {:keep_state, newdata} - end - - # Sends a set of instructions to the handler process to persist data - def handle_event(:internal, :flush, :running, data) do - with instructions = [_|_] <- :lists.reverse(data.instructions) do - send(data.handler, {:top, :instructions, instructions}) - end - - {:keep_state, %{data| instructions: []}, [{:next_event, :internal, :wait}]} - end - - def handle_event(:internal, :wait, :running, data) do - time = - data.processes - |> Enum.map(&Process.seconds_to_change/1) - |> Enum.reduce(:infinity, &min/2) - - time = if is_integer(time), - do: time * 1_000, - else: time - - notify_completed = {:timeout, time, :allocate} - actions = [notify_completed] - - {:keep_state, data, actions} - end - - def handle_event(:timeout, :allocate, :running, data) do - actions = [@allocate, @flush] - - {:keep_state, data, actions} - end - - # Changes the priority of a single process - def handle_event(:cast, {:priority, id, priority}, :running, data) do - processes = - data.processes - |> calculate_worked() - |> Enum.map(fn p -> - Changeset.get_field(p, :process_id) == id - && Process.update_changeset(p, %{priority: priority}) - || p - end) - - actions = [@allocate, @flush] - {:keep_state, %{data| processes: processes}, actions} - end - - # Pauses a single process - def handle_event(:cast, {:pause, id}, :running, data) do - case Enum.split_with(data.processes, &(&1.process_id == id)) do - {[], _} -> - {:keep_state, data} - {[process], processes} -> - {resulting_processes, events} = Process.pause(process) - - new_data = - %{data| processes: processes} - |> store_processes(List.wrap(resulting_processes)) - |> store_events(events) - - actions = [@allocate, @flush] - {:keep_state, new_data, actions} - end - end - - # Resumes a single process - def handle_event(:cast, {:resume, id}, :running, data) do - # TODO: block this action if it would trigger "resource overflow" - case Enum.split_with(data.processes, &(&1.process_id == id)) do - {[], _} -> - {:keep_state, data} - {[process], processes} -> - {resulting_processes, events} = Process.resume(process) - - new_data = - %{data| processes: processes} - |> store_processes(List.wrap(resulting_processes)) - |> store_events(events) - - actions = [@allocate, @flush] - {:keep_state, new_data, actions} - end - end - - # Kills a single process - def handle_event(:cast, {:kill, id}, :running, data) do - # Marks the process to be removed. It'll be included in the remove - # instructions after the allocation procedure - case Enum.split_with(data.processes, &(&1.process_id == id)) do - {[], _} -> - {:keep_state, data} - {[process], processes} -> - {resulting_processes, events} = Process.kill(process, :shutdown) - - new_data = - %{data| processes: processes} - |> store_processes(List.wrap(resulting_processes)) - |> store_events(events) - - actions = [@allocate, @flush] - {:keep_state, new_data, actions} - end - end - - # Resets the machine (useful as a recovery mechanism for when the persisted - # state is inconsistent with current state) - def handle_event(:cast, {:reset, :processes, processes}, :running, data) do - new_data = %{data| instructions: [], processes: processes} - - actions = [@allocate, @flush] - - {:next_state, :startup, new_data, actions} - end - - def handle_event(:cast, {:create, process}, :running, data) do - actions = [@allocate, @flush] - new_data = %{data| processes: [process| data.processes]} - {:keep_state, new_data, actions} - end - - # I don't like setting server-like callbacks like this on FSM but this will do - # for now - def handle_event({:call, from}, {:may_create?, process}, :running, data) do - minimum = data.last_minimum - maximum = data.resources - - process = Process.allocate_minimum(process) - foreseen = ServerResourcesTOP.sum_process(minimum, process) - - reply = if ServerResourcesTOP.exceeds?(foreseen, maximum) do - # Process would cause the server to overflow it's resources - {:error, :resources} - else - # Everything is okay, so we aggregate process into the TOP processes, - # reply the requesting client with an okay, allocate and tells the handler - # to update the processes on the database with new allocation values - :ok - end - - {:keep_state, data, [{:reply, from, reply}]} - end - - # Cleans processes so the in-game server can be shutdown gracefully - def handle_event({:call, from}, :shutdown, :running, data) do - # Deallocates resources completely, so the processes can be "frozen" while - # the server is shutdown - processes = - data.processes - |> calculate_worked() - |> Enum.map(&Process.update_changeset(&1, %{allocated: %{}})) - - new_data = store_processes(data, processes) - - reply = {:reply, from, new_data.instructions} - - # This timeout is executed if the handler for some reason fails to execute - # it's operation in a timely manner, effectively making the whole shutdown - # sequence to fail - kill_timer = {:timeout, 10_000, :timeout} - - # Will move the process state to the shutdown state so it can wait for the - # handler to properly - {:next_state, :shutdown, nil, [reply, kill_timer]} - end - - # This event means that for some reason the graceful shutdown didn't happen in - # a timely manner, so this will force a shutdown (and cause the supervisor to - # reset both this state machine and the handler) - def handle_event(:timeout, :timeout, :shutdown, _) do - {:stop, :shutdown_failed} - end - - @spec calculate_worked([Process.t | Changeset.t]) :: - [Changeset.t] - defp calculate_worked(processes) do - now = DateTime.utc_now() - - Enum.map(processes, fn process -> - process - |> Ecto.Changeset.change() - |> Process.calculate_work(now) - end) - end - - @spec allocate_minimum([Process.t]) :: - {[Changeset.t], resources} - defp allocate_minimum(processes) do - Enum.map_reduce(processes, %ServerResourcesTOP{}, fn - cs = %Changeset{action: :delete}, acc -> - {cs, acc} - process, acc -> - process = Process.allocate_minimum(process) - acc = ServerResourcesTOP.sum_process(acc, process) - - {process, acc} - end) - end - - @spec allocate([Process.t | Changeset.t], resources) :: - {[Changeset.t], resources} - defp allocate(processes, resources) do - {processes, reserved_resources} = allocate_minimum(processes) - - # Subtracts from the total resource pool the amount that was already - # reserved by the processes - remaining_resources = ServerResourcesTOP.sub(resources, reserved_resources) - - {processes, resources} = - case ServerResourcesTOP.negatives(remaining_resources) do - [] -> - {processes, remaining_resources} - negative_resources -> - # If the server doesn't have enough resources to keep the instantiated - # processes, run a procedure to free the minimum possible resources by - # killing the most consuming processes that are over-reserving those - # resources - {dropped, freed_resources} = - drop_processes_to_free_resources(processes, negative_resources) - - processes = Enum.map(processes, fn changeset -> - id = Changeset.get_field(changeset, :process_id) - - if MapSet.member?(dropped, id) do - %{changeset| action: :delete} - else - changeset - end - end) - - resources = ServerResourcesTOP.sum( - remaining_resources, - freed_resources) - {processes, resources} - end - - # This is necessary because our allocator is dumb and would allocate to - # deleted processes. This will be removed in the future tho - {remove, allocate} = Enum.split_with(processes, &(&1.action == :delete)) - - {remove ++ PlanTOP.allocate(allocate, resources), reserved_resources} - end - - @spec drop_processes_to_free_resources([Changeset.t], list) :: - {dropped_process_ids :: MapSet.t, freed_resources :: resources} - defp drop_processes_to_free_resources(processes, negative_resources) do - processes = - processes - |> Enum.filter(&(&1.action != :delete)) - |> Enum.map(&Changeset.apply_changes/1) - - free_resources( - processes, - MapSet.new(), - %ServerResourcesTOP{}, - negative_resources) - end - - @spec free_resources([Process.t], removed, freed, list) :: - {removed, freed} when removed: MapSet.t, freed: ServerResourcesTOP.t - # This looks a bit complex. I'll have to think another way to make it simpler - # without making it long and bothersome - # This function will, for each negative_resource, remove processes that - # consume the highest chunks of it. Then it frees the resources that - # the removed process reserved beforehand. - defp free_resources(processes, removed, freed, [negative_resource| t]) do - remove_process = fn process, removed, freed -> - removed = MapSet.put(removed, process.process_id) - freed = ServerResourcesTOP.sum_process(freed, process) - {removed, freed} - end - - # Since this function is called for every negative resource and a removed - # process might have freed the resource we're querying, we have to update it - negative_resource = {resource, _} = case negative_resource do - {net_resource, {network_id, value}} when net_resource in [:dlk, :ulk] -> - freed_resource = freed.net[network_id][net_resource] || 0 - {net_resource, {network_id, value - freed_resource}} - {resource, value} when resource in [:cpu, :ram] -> - freed_resource = freed[resource] - {resource, value - freed_resource} - end - - {processes, removed, freed, _} = - processes - |> Enum.sort_by(&(&1.allocated[resource]), &>=/2) - |> Enum.reduce({[], removed, freed, negative_resource}, fn - # Too much ulk or dlk used on network `n` and this is one of the main - # wasters - p = %{network_id: n}, {acc, removed, freed, {net_resource, {n, value}}} - when value > 0 and net_resource in [:ulk, :dlk] -> - {removed, freed} = remove_process.(p, removed, freed) - freed_value = p.allocated[net_resource] - {acc, removed, freed, {net_resource, n, value - freed_value}} - - # Too much ram or cpu consumed and this is one of the main wasters - p, {acc, removed, freed, {resource, value}} - when value > 0 and resource in [:cpu, :ram] -> - {removed, freed} = remove_process.(p, removed, freed) - freed_value = p.allocated[resource] - {acc, removed, freed, {resource, value - freed_value}} - - # Process is not consuming the resource or the lack was of the resource - # was already addressed - p, {acc, removed, freed, query} -> - {[p| acc], removed, freed, query} - end) - - free_resources(processes, removed, freed, t) - end - - defp free_resources(_, removed, freed, []) do - {removed, freed} - end - - @spec store_events(t, [struct]) :: - t - defp store_events(data, events) do - Enum.reduce(events, data, fn e, acc = %{instructions: i} -> - instruction = {:event, e} - - %{acc| instructions: [instruction| i]} - end) - end - - @spec store_processes(t, [Process.t | Changeset.t]) :: - t - defp store_processes(data, processes) do - Enum.reduce(processes, data, fn - e = %Changeset{action: :delete}, acc = %{instructions: i} -> - instruction = {:delete, e} - - %{acc| instructions: [instruction| i]} - e = %Changeset{action: :update}, acc = %{processes: p, instructions: i} -> - instruction = {:update, e} - e = Changeset.apply_changes(e) - - %{acc| processes: [e| p], instructions: [instruction| i]} - e = %Changeset{action: :insert}, acc = %{processes: p, instructions: i} -> - instruction = {:create, e} - e = Changeset.apply_changes(e) - - %{acc| processes: [e| p], instructions: [instruction| i]} - e = %Process{}, acc = %{processes: p, instructions: i} -> - # This case might happen if a protocol returns a new process from it's - # conclusion and we handle it. This is what happens (will happen) with - # virus installing - instruction = {:create, e} - - %{acc| processes: [e| p], instructions: [instruction| i]} - %Changeset{action: nil, changes: c}, acc - when map_size(c) == 0 -> - # For some reason this process was converted to a changeset but no - # change was reduced on it - acc - end) - end -end diff --git a/lib/process/state/top/manager.ex b/lib/process/state/top/manager.ex deleted file mode 100644 index 6106f79f..00000000 --- a/lib/process/state/top/manager.ex +++ /dev/null @@ -1,47 +0,0 @@ -defmodule Helix.Process.State.TOP.Manager do - @moduledoc false - - alias Helix.Server.Model.Server - alias Helix.Process.State.TOP.Supervisor, as: TOPSupervisor - - # TODO: Replace this with a distributed alternative. Maybe using PubSub - - @doc false - def start_link do - Registry.start_link(:unique, __MODULE__) - end - - @spec prepare_top(Server.id) :: - Supervisor.on_start_child - @doc """ - Fetches or starts a TOP process for `gateway` - """ - def prepare_top(gateway) do - pid = get(gateway) - - if pid do - {:ok, pid} - else - TOPSupervisor.start_top(gateway) - end - end - - @spec get(Server.id) :: - pid - | nil - @doc """ - Fetches the pid of the process running the TOP for the specified `gateway` - """ - def get(gateway) do - case Registry.lookup(__MODULE__, gateway) do - [{pid, _}] -> - pid - [] -> - nil - end - end - - @doc false - def register(gateway), - do: Registry.register(__MODULE__, gateway, []) -end diff --git a/lib/process/state/top/server.ex b/lib/process/state/top/server.ex deleted file mode 100644 index a9478eab..00000000 --- a/lib/process/state/top/server.ex +++ /dev/null @@ -1,240 +0,0 @@ -defmodule Helix.Process.State.TOP.Server do - @moduledoc false - - # This is the default adapter for the TOP. It will proxy requests from API and - # persist data - - use GenServer - - # FIXME: State cannot use Internal modules directly. - - alias Ecto.Changeset - alias Helix.Event - alias Helix.Hardware.Query.Motherboard, as: MotherboardQuery - alias Helix.Server.Model.Server - alias Helix.Server.Query.Server, as: ServerQuery - alias Helix.Process.Internal.TOP.ServerResources, as: ServerResourcesTOP - alias Helix.Process.Model.Process - alias Helix.Process.Query.Process, as: ProcessQuery - alias Helix.Process.State.TOP.Domain, as: DomainTOP - alias Helix.Process.State.TOP.Manager, as: ManagerTOP - alias Helix.Process.Repo - - require Logger - - @typep state :: %__MODULE__{} - - @enforce_keys [:gateway, :domain] - defstruct [:gateway, :domain] - - @spec start_link(Server.id) :: - GenServer.on_start - def start_link(gateway_id) do - GenServer.start_link(__MODULE__, gateway_id) - end - - @spec create(pid, Process.create_params) :: - {:ok, Process.t} - | {:error, Changeset.t} - | {:error, :resources} - def create(pid, params) do - GenServer.call(pid, {:create, params}) - end - - # REVIEW: Maybe make priority/3, pause/2, resume/2 and kill/2 synchronous - @spec priority(pid, Process.t, 0..5) :: - :ok - def priority(pid, process, priority) when priority in 0..5 do - GenServer.cast(pid, {:priority, process, priority}) - end - - @spec pause(pid, Process.t) :: - :ok - def pause(pid, process) do - GenServer.cast(pid, {:pause, process}) - end - - @spec resume(pid, Process.t) :: - :ok - def resume(pid, process) do - GenServer.cast(pid, {:resume, process}) - end - - @spec kill(pid, Process.t) :: - :ok - def kill(pid, process) do - GenServer.cast(pid, {:kill, process}) - end - - @spec reset_processes(pid, [Process.t]) :: - :ok - @doc false - def reset_processes(pid, processes) do - # The processes of a TOP server changed in a potentially unexpected way, so - # it's better to gracefully reset the domain machine - GenServer.cast(pid, {:reset, :processes, processes}) - end - - @spec init(Server.id) :: - {:ok, state} - | {:stop, reason :: term} - @doc false - def init(gateway) do - with \ - {:ok, resources} <- get_resources(gateway) - do - processes = ProcessQuery.get_processes_on_server(gateway) - {:ok, domain} = DomainTOP.start_link(gateway, processes, resources) - - ManagerTOP.register(gateway) - - state = %__MODULE__{ - gateway: gateway, - domain: domain - } - - {:ok, state} - else - reason -> - {:stop, reason} - end - end - - @spec handle_call({:create, Process.create_params}, GenServer.from, state) :: - {:reply, {:ok, Process.t}, state} - | {:reply, {:error, Changeset.t | :resources}, state} - @doc false - def handle_call({:create, params}, _from, state) do - reply = - with \ - changeset = Process.create_changeset(params), - true <- changeset.valid? || changeset, - process = Process.load_virtual_data(Changeset.apply_changes(changeset)), - :ok <- DomainTOP.may_create?(state.domain, process), - {:ok, process} <- Repo.insert(changeset) - do - process = Process.load_virtual_data(process) - DomainTOP.create(state.domain, process) - - {:ok, process} - else - changeset = %Changeset{} -> - {:error, changeset} - {:error, :resources} -> - {:error, :resources} - end - - {:reply, reply, state} - end - - @spec handle_cast({:priority, Process.t, 0..5}, state) :: - {:noreply, state} - @doc false - def handle_cast({:priority, process, priority}, state) do - if belongs_to_the_server?(process, state) do - DomainTOP.priority(state.domain, process.process_id, priority) - end - - {:noreply, state} - end - - @spec handle_cast({:pause, Process.t}, state) :: - {:noreply, state} - def handle_cast({:pause, process}, state) do - if belongs_to_the_server?(process, state) do - DomainTOP.pause(state.domain, process.process_id) - end - - {:noreply, state} - end - - @spec handle_cast({:resume, Process.t}, state) :: - {:noreply, state} - def handle_cast({:resume, process}, state) do - if belongs_to_the_server?(process, state) do - DomainTOP.resume(state.domain, process.process_id) - end - - {:noreply, state} - end - - @spec handle_cast({:kill, Process.t}, state) :: - {:noreply, state} - def handle_cast({:kill, process}, state) do - if belongs_to_the_server?(process, state) do - DomainTOP.kill(state.domain, process.process_id) - end - - {:noreply, state} - end - - @spec handle_cast({:reset, :processes, [Process.t]}, state) :: - {:noreply, state} - def handle_cast({:reset, :processes, processes}, state) do - DomainTOP.reset_processes(state.domain, processes) - - {:noreply, state} - end - - @opaque top_instruction :: - {:create, Process.t} - | {:create, Changeset.t} - | {:delete, Changeset.t} - | {:update, Changeset.t} - | {:event, Event.t} - @spec handle_info({:top, :instructions, [top_instruction]}, state) :: - {:noreply, state} - @doc false - def handle_info({:top, :instructions, instructions}, state) do - # If the brick hits the fan, it's better to just crash and try again - {:ok, _} = Repo.transaction fn -> - Enum.each(instructions, &execute_repo_instruction/1) - end - - Enum.each(instructions, &execute_post_instruction/1) - - {:noreply, state} - end - - defp execute_repo_instruction({:delete, record}), - do: Repo.delete!(record) - defp execute_repo_instruction({:update, record}), - do: Repo.update!(record) - defp execute_repo_instruction({:create, record}), - do: Repo.insert!(record) - defp execute_repo_instruction(_), - do: :ok - - defp execute_post_instruction({:event, event}), - do: Event.emit(event) - defp execute_post_instruction(_), - do: :ok - - @spec belongs_to_the_server?(Process.t | Changeset.t, state) :: - boolean - defp belongs_to_the_server?(%Process{gateway_id: g}, %{gateway: g}), - do: true - defp belongs_to_the_server?(%Process{}, %{}), - do: false - defp belongs_to_the_server?(cs = %Changeset{}, %{gateway: g}), - do: Changeset.get_field(cs, :gateway_id) == g - - @spec get_resources(Server.id) :: - {:ok, ServerResourcesTOP.t} - | {:error, :server_not_found} - defp get_resources(gateway_id) do - with server = %{} <- ServerQuery.fetch(gateway_id) do - resources = - server.motherboard_id - |> MotherboardQuery.fetch() - |> MotherboardQuery.resources() - |> Map.delete(:hdd) - |> ServerResourcesTOP.cast() - - {:ok, resources} - else - _ -> - {:error, :server_not_found} - end - end -end diff --git a/lib/process/state/top/supervisor.ex b/lib/process/state/top/supervisor.ex deleted file mode 100644 index 71021550..00000000 --- a/lib/process/state/top/supervisor.ex +++ /dev/null @@ -1,63 +0,0 @@ -defmodule Helix.Process.State.TOP.Supervisor do - @moduledoc false - - use Supervisor - - alias Helix.Server.Model.Server - alias Helix.Process.State.TOP.Manager, as: TOPManager - alias Helix.Process.State.TOP.ChildrenSupervisor, as: TOPChildrenSupervisor - - @spec start_link() :: - Supervisor.on_start - @doc false - def start_link do - Supervisor.start_link(__MODULE__, [], name: __MODULE__) - end - - @spec start_top(Server.id) :: - Supervisor.on_start_child - def start_top(gateway) do - TOPChildrenSupervisor.start_child(gateway) - end - - @doc false - def init(_) do - children = [ - supervisor(TOPManager, []), - supervisor(TOPChildrenSupervisor, []) - ] - - supervise(children, strategy: :rest_for_one) - end -end - -defmodule Helix.Process.State.TOP.ChildrenSupervisor do - @moduledoc false - - use Supervisor - - alias Helix.Server.Model.Server - alias Helix.Process.State.TOP.Server, as: ServerTOP - - @spec start_link() :: - Supervisor.on_start - @doc false - def start_link do - Supervisor.start_link(__MODULE__, [], name: __MODULE__) - end - - @spec start_child(Server.id) :: - Supervisor.on_start_child - def start_child(gateway) do - Supervisor.start_child(__MODULE__, [gateway]) - end - - @doc false - def init(_) do - children = [ - worker(ServerTOP, [], restart: :transient) - ] - - supervise(children, strategy: :simple_one_for_one) - end -end diff --git a/lib/process/supervisor.ex b/lib/process/supervisor.ex index e5e89306..98fe04b7 100644 --- a/lib/process/supervisor.ex +++ b/lib/process/supervisor.ex @@ -4,7 +4,6 @@ defmodule Helix.Process.Supervisor do use Supervisor alias Helix.Process.Repo - alias Helix.Process.State.TOP.Supervisor, as: TOPSupervisor @doc false def start_link do @@ -14,8 +13,7 @@ defmodule Helix.Process.Supervisor do @doc false def init(_) do children = [ - worker(Repo, []), - supervisor(TOPSupervisor, []) + worker(Repo, []) ] supervise(children, strategy: :rest_for_one) diff --git a/lib/server/model/server.ex b/lib/server/model/server.ex index 74b3175a..13548d4d 100644 --- a/lib/server/model/server.ex +++ b/lib/server/model/server.ex @@ -9,6 +9,7 @@ defmodule Helix.Server.Model.Server do alias HELL.Constant alias HELL.Password alias Helix.Hardware.Model.Component + alias Helix.Hardware.Model.Motherboard alias Helix.Server.Model.ServerType @type password :: String.t @@ -22,6 +23,8 @@ defmodule Helix.Server.Model.Server do updated_at: NaiveDateTime.t } + @type resources :: Motherboard.resources + @type creation_params :: %{ :server_type => Constant.t, optional(:motherboard_id) => Component.idtb | nil diff --git a/lib/software/action/flow/file/transfer.ex b/lib/software/action/flow/file/transfer.ex index 368a4140..c5b245c2 100644 --- a/lib/software/action/flow/file/transfer.ex +++ b/lib/software/action/flow/file/transfer.ex @@ -68,7 +68,7 @@ defmodule Helix.Software.Action.Flow.File.Transfer do network_id: net.network_id, bounce: net.bounce_id, file: file, - process_type: process_type + type: process_type } FileTransferProcess.execute(gateway, endpoint, params, meta) @@ -78,9 +78,9 @@ defmodule Helix.Software.Action.Flow.File.Transfer do Given the transfer type, figure out all related types used by other services. """ defp get_type_info(:download), - do: {:ftp, "file_download", :download} + do: {:ftp, :file_download, :download} defp get_type_info(:upload), - do: {:ftp, "file_upload", :upload} + do: {:ftp, :file_upload, :upload} defp get_type_info(:pftp_download), - do: {:public_ftp, "file_download", :download} + do: {:public_ftp, :file_download, :download} end diff --git a/lib/software/action/flow/software/firewall.ex b/lib/software/action/flow/software/firewall.ex index 8e11f726..74405893 100644 --- a/lib/software/action/flow/software/firewall.ex +++ b/lib/software/action/flow/software/firewall.ex @@ -23,7 +23,7 @@ # params = %{ # gateway_id: server, -# target_server_id: server, +# target_id: server, # file_id: file.file_id, # process_data: process_data, # process_type: "firewall_passive" diff --git a/lib/software/action/flow/software/log_forger.ex b/lib/software/action/flow/software/log_forger.ex index 52cc359c..b9f05157 100644 --- a/lib/software/action/flow/software/log_forger.ex +++ b/lib/software/action/flow/software/log_forger.ex @@ -53,7 +53,7 @@ # process_params = %{ # gateway_id: server_id, -# target_server_id: log.server_id, +# target_id: log.server_id, # file_id: file.file_id, # objective: objective, # process_data: data, @@ -69,7 +69,7 @@ # process_params = %{ # gateway_id: server, -# target_server_id: data.target_server_id, +# target_id: data.target_id, # file_id: file.file_id, # objective: objective, # process_data: data, diff --git a/lib/software/event/cracker/bruteforce.ex b/lib/software/event/cracker/bruteforce.ex index c18d7593..458b310b 100644 --- a/lib/software/event/cracker/bruteforce.ex +++ b/lib/software/event/cracker/bruteforce.ex @@ -20,14 +20,14 @@ defmodule Helix.Software.Event.Cracker.Bruteforce do source_entity_id: Entity.id, network_id: Network.id, target_server_ip: IPv4.t, - target_server_id: Server.id, + target_id: Server.id, } event_struct [ :source_entity_id, :network_id, :target_server_ip, - :target_server_id + :target_id ] @spec new(Process.t, BruteforceProcess.t) :: @@ -36,7 +36,7 @@ defmodule Helix.Software.Event.Cracker.Bruteforce do %__MODULE__{ source_entity_id: process.source_entity_id, network_id: process.network_id, - target_server_id: process.target_server_id, + target_id: process.target_id, target_server_ip: data.target_server_ip } end diff --git a/lib/software/event/handler/cracker.ex b/lib/software/event/handler/cracker.ex index d6276538..12239176 100644 --- a/lib/software/event/handler/cracker.ex +++ b/lib/software/event/handler/cracker.ex @@ -38,7 +38,7 @@ defmodule Helix.Software.Event.Handler.Cracker do {:ok, password, events} <- ServerAction.crack( event.source_entity_id, - event.target_server_id, + event.target_id, event.network_id, event.target_server_ip ), @@ -72,8 +72,8 @@ defmodule Helix.Software.Event.Handler.Cracker do do process = ProcessQuery.fetch(event.target_process_id) - case process.process_type do - "wire_transfer" -> + case process.type do + :wire_transfer -> overflow_of_wire_transfer(process, event) end end @@ -103,7 +103,7 @@ defmodule Helix.Software.Event.Handler.Cracker do Emits: BankTokenAcquiredEvent.t """ defp overflow_of_wire_transfer(process, event) do - transfer_id = process.process_data.transfer_id + transfer_id = process.data.transfer_id connection_id = process.connection_id flowing do diff --git a/lib/software/event/handler/decryptor.ex b/lib/software/event/handler/decryptor.ex index 0513a7f5..8ce1c0bb 100644 --- a/lib/software/event/handler/decryptor.ex +++ b/lib/software/event/handler/decryptor.ex @@ -26,10 +26,10 @@ # def complete(event = %ProcessConclusionEvent{scope: :local}) do # storage = StorageQuery.fetch(event.storage_id) # target_file = FileQuery.fetch(event.target_file_id) -# target_server_id = event.target_server_id +# target_id = event.target_id # transaction = fn -> -# {:ok, _} = CryptoKeyAction.create(storage, target_server_id, target_file) +# {:ok, _} = CryptoKeyAction.create(storage, target_id, target_file) # end # {:ok, _} = Repo.transaction(transaction) diff --git a/lib/software/event/handler/encryptor.ex b/lib/software/event/handler/encryptor.ex index 90df0d3d..04d5f7f4 100644 --- a/lib/software/event/handler/encryptor.ex +++ b/lib/software/event/handler/encryptor.ex @@ -11,13 +11,13 @@ # def complete(event = %ProcessConclusionEvent{}) do # storage = StorageQuery.fetch(event.storage_id) # target_file = FileQuery.fetch(event.target_file_id) -# target_server_id = event.target_server_id +# target_id = event.target_id # transaction = fn -> # events = CryptoKeyAction.invalidate_keys_for_file(target_file) # {:ok, _} = FileAction.encrypt(target_file, event.version) -# {:ok, _} = CryptoKeyAction.create(storage, target_server_id, target_file) +# {:ok, _} = CryptoKeyAction.create(storage, target_id, target_file) # events # end diff --git a/lib/software/event/log_forge/log_create.ex b/lib/software/event/log_forge/log_create.ex index 92569e9e..438856ef 100644 --- a/lib/software/event/log_forge/log_create.ex +++ b/lib/software/event/log_forge/log_create.ex @@ -9,19 +9,19 @@ defmodule Helix.Software.Event.LogForge.LogCreate do alias Helix.Software.Model.SoftwareType.LogForge, as: LogForgeProcess @type t :: %__MODULE__{ - target_server_id: Server.id, + target_id: Server.id, entity_id: Entity.id, message: String.t, version: pos_integer } - event_struct [:target_server_id, :entity_id, :message, :version] + event_struct [:target_id, :entity_id, :message, :version] @spec new(LogForgeProcess.t) :: t def new(data = %LogForgeProcess{operation: :create}) do %__MODULE__{ - target_server_id: data.target_server_id, + target_id: data.target_id, entity_id: data.entity_id, message: data.message, version: data.version diff --git a/lib/software/internal/file.ex b/lib/software/internal/file.ex index 45b47707..a14dc94a 100644 --- a/lib/software/internal/file.ex +++ b/lib/software/internal/file.ex @@ -44,6 +44,7 @@ defmodule Helix.Software.Internal.File do storage |> File.Query.by_storage() |> Repo.all() + |> Enum.map(&File.format/1) end @spec create(File.creation_params, [File.module_params]) :: diff --git a/lib/software/model/crypto_key.ex b/lib/software/model/crypto_key.ex index 207423d3..c9fba783 100644 --- a/lib/software/model/crypto_key.ex +++ b/lib/software/model/crypto_key.ex @@ -12,7 +12,7 @@ # @type t :: %__MODULE__{ # file_id: File.id, # target_file_id: File.id | nil, -# target_server_id: Server.id, +# target_id: Server.id, # file: term, # target_file: term # } @@ -26,7 +26,7 @@ # primary_key: true # field :target_file_id, File.ID -# field :target_server_id, Server.ID +# field :target_id, Server.ID # belongs_to :file, File, # foreign_key: :file_id, @@ -51,10 +51,10 @@ # file = generate_file(storage) # %__MODULE__{} -# |> cast(%{target_server_id: server}, [:target_server_id]) +# |> cast(%{target_id: server}, [:target_id]) # |> put_assoc(:target_file, target_file, required: true) # |> put_assoc(:file, file, required: true) -# |> validate_required([:target_server_id]) +# |> validate_required([:target_id]) # end # defp generate_file(storage) do diff --git a/lib/software/model/file.ex b/lib/software/model/file.ex index d97fd19f..b99548a8 100644 --- a/lib/software/model/file.ex +++ b/lib/software/model/file.ex @@ -228,7 +228,7 @@ defmodule Helix.Software.Model.File do def by_file(query \\ File, id) do query |> where([f], f.file_id == ^id) - |> join_assoc_modules() + |> join_modules() |> preload_modules() end @@ -237,8 +237,12 @@ defmodule Helix.Software.Model.File do @doc """ Query by storage id. """ - def by_storage(query \\ File, id), - do: where(query, [f], f.storage_id == ^id) + def by_storage(query \\ File, id) do + query + |> where([f], f.storage_id == ^id) + |> join_modules() + |> preload_modules() + end @spec by_version(Queryable.t, Storage.idtb, File.Module.name) :: Queryable.t @@ -247,7 +251,7 @@ defmodule Helix.Software.Model.File do """ def by_version(query \\ File, storage, module) do query - |> by_storage(storage) + |> where([f], f.storage_id == ^storage) |> join_modules() |> by_module(module) |> order_by_version() @@ -282,17 +286,9 @@ defmodule Helix.Software.Model.File do @spec join_modules(Queryable.t) :: Queryable.t docp """ - Join File.Module. - """ - defp join_modules(query), - do: join(query, :left, [f], fm in File.Module, fm.file_id == f.file_id) - - @spec join_assoc_modules(Queryable.t) :: - Queryable.t - docp """ Join File.Module through Ecto Schema's association. """ - defp join_assoc_modules(query), + defp join_modules(query), do: join(query, :left, [f], fm in assoc(f, :modules)) @spec preload_modules(Queryable.t) :: diff --git a/lib/software/model/software_type/decryptor/process_type.ex b/lib/software/model/software_type/decryptor/process_type.ex index 7fbc8a09..ab9d4f06 100644 --- a/lib/software/model/software_type/decryptor/process_type.ex +++ b/lib/software/model/software_type/decryptor/process_type.ex @@ -35,7 +35,7 @@ # event = %ProcessConclusionEvent{ # target_file_id: data.target_file_id, -# target_server_id: Ecto.Changeset.get_field(process, :target_server_id), +# target_id: Ecto.Changeset.get_field(process, :target_id), # storage_id: data.storage_id, # scope: data.scope # } diff --git a/lib/software/model/software_type/encryptor/process_type.ex b/lib/software/model/software_type/encryptor/process_type.ex index e9763a1a..a74f7458 100644 --- a/lib/software/model/software_type/encryptor/process_type.ex +++ b/lib/software/model/software_type/encryptor/process_type.ex @@ -35,7 +35,7 @@ # event = %ProcessConclusionEvent{ # target_file_id: data.target_file_id, -# target_server_id: Ecto.Changeset.get_field(process, :target_server_id), +# target_id: Ecto.Changeset.get_field(process, :target_id), # storage_id: data.storage_id, # version: data.software_version # } diff --git a/lib/software/model/software_type/firewall/process_type.ex b/lib/software/model/software_type/firewall/process_type.ex index 3eeb1fa1..bc64b061 100644 --- a/lib/software/model/software_type/firewall/process_type.ex +++ b/lib/software/model/software_type/firewall/process_type.ex @@ -10,64 +10,30 @@ defmodule Helix.Software.Model.SoftwareType.Firewall.Passive do defimpl Helix.Process.Model.Processable do - alias Helix.Software.Event.Firewall.Started, as: FirewallStartedEvent alias Helix.Software.Event.Firewall.Stopped, as: FirewallStoppedEvent - @ram_base_factor 5 - @cpu_base_factor 2 - - def dynamic_resources(_), - do: [] - - def minimum(%{version: v}), - do: %{ - paused: %{ - ram: v * @ram_base_factor - }, - running: %{ - ram: v * @ram_base_factor, - cpu: v * @cpu_base_factor - } - } - def kill(data, process, _) do - process = - process - |> Ecto.Changeset.change() - |> Map.put(:action, :delete) - event = %FirewallStoppedEvent{ version: data.version, - gateway_id: Ecto.Changeset.get_field(process, :gateway_id) + gateway_id: process.gateway_id } - {process, [event]} + {:delete, [event]} end - def state_change(data, process, :running, :paused) do + def complete(data, process) do event = %FirewallStoppedEvent{ version: data.version, - gateway_id: Ecto.Changeset.get_field(process, :gateway_id) + gateway_id: process.gateway_id } - {process, [event]} + {:delete, [event]} end - def state_change(data, process, :paused, :running) do - event = %FirewallStartedEvent{ - version: data.version, - gateway_id: Ecto.Changeset.get_field(process, :gateway_id) - } - - {process, [event]} + def connection_closed(_, _, _) do + {:delete, []} end - def state_change(_, process, _, _), - do: {process, []} - - def conclusion(_, _), - do: raise "firewall(passive) process should not be 'completed'" - def after_read_hook(data), do: data end diff --git a/lib/software/model/software_type/log_forge/process_type.ex b/lib/software/model/software_type/log_forge/process_type.ex index 88e41618..7c3fc93b 100644 --- a/lib/software/model/software_type/log_forge/process_type.ex +++ b/lib/software/model/software_type/log_forge/process_type.ex @@ -15,7 +15,7 @@ defmodule Helix.Software.Model.SoftwareType.LogForge do # TODO: Remove `entity_id` and `version` when `Balance` module is implemented @type t :: %__MODULE__{ target_log_id: Log.id | nil, - target_server_id: Server.id | nil, + target_id: Server.id | nil, entity_id: Entity.id, operation: :edit | :create, message: String.t, @@ -27,7 +27,7 @@ defmodule Helix.Software.Model.SoftwareType.LogForge do :entity_id => Entity.idtb, :operation => :edit | :create, :message => String.t, - optional(:target_server_id) => Server.idtb, + optional(:target_id) => Server.idtb, optional(:target_log_id) => Log.idtb, optional(atom) => any } @@ -40,7 +40,7 @@ defmodule Helix.Software.Model.SoftwareType.LogForge do embedded_schema do field :target_log_id, Log.ID field :entity_id, Entity.ID - field :target_server_id, Server.ID + field :target_id, Server.ID field :operation, Constant @@ -87,8 +87,8 @@ defmodule Helix.Software.Model.SoftwareType.LogForge do :create -> changeset |> cast(%{version: file.modules.log_create.version}, [:version]) - |> cast(params, [:target_server_id]) - |> validate_required([:target_server_id, :version]) + |> cast(params, [:target_id]) + |> validate_required([:target_id, :version]) |> validate_number(:version, greater_than: 0) :edit -> changeset @@ -143,15 +143,17 @@ defmodule Helix.Software.Model.SoftwareType.LogForge do } } - def kill(_, process, _), - do: {%{Changeset.change(process)| action: :delete}, []} - - def state_change(data, process, _, :complete) do - process = %{Changeset.change(process)| action: :delete} + def kill(_, _, _), + do: {:delete, []} + def complete(data, _process) do event = conclusion_event(data) - {process, [event]} + {:delete, [event]} + end + + def connection_closed(_, _, _) do + {:delete, []} end def state_change(_, process, _, _), @@ -172,7 +174,7 @@ defmodule Helix.Software.Model.SoftwareType.LogForge do %LogForgeProcess{ entity_id: Entity.ID.cast!(data.entity_id), target_log_id: target_log_id, - target_server_id: Server.ID.cast!(data.target_server_id), + target_id: Server.ID.cast!(data.target_id), operation: String.to_existing_atom(data.operation), message: data.message, version: data.version diff --git a/lib/software/process/cracker/bruteforce.ex b/lib/software/process/cracker/bruteforce.ex index b583e29c..cf923539 100644 --- a/lib/software/process/cracker/bruteforce.ex +++ b/lib/software/process/cracker/bruteforce.ex @@ -4,7 +4,7 @@ process Helix.Software.Process.Cracker.Bruteforce do @moduledoc """ The BruteforceProcess is launched when a user wants to figure out the root password of the target server (identified by `target_server_ip` and - `target_server_id`). + `target_id`). """ alias Helix.Network.Model.Network @@ -26,7 +26,15 @@ process Helix.Software.Process.Cracker.Bruteforce do @type objective :: %{cpu: resource_usage} - @type objective_params :: + @type resources :: + %{ + objective: objective, + static: map, + l_dynamic: [:cpu], + r_dynamic: [] + } + + @type resources_params :: %{ cracker: File.t_of_type(:cracker), hasher: File.t_of_type(:hasher) | nil @@ -40,10 +48,10 @@ process Helix.Software.Process.Cracker.Bruteforce do } end - @spec objective(objective_params) :: - objective - def objective(params = %{cracker: %File{}, hasher: _}), - do: set_objective params + @spec resources(resources_params) :: + resources + def resources(params = %{cracker: %File{}, hasher: _}), + do: get_resources params processable do @moduledoc """ @@ -55,20 +63,10 @@ process Helix.Software.Process.Cracker.Bruteforce do alias Helix.Software.Event.Cracker.Bruteforce.Processed, as: BruteforceProcessedEvent - def dynamic_resources(_), - do: [:cpu] - - def minimum(_) do - %{ - paused: %{ram: 500}, - running: %{ram: 500} - } - end - - on_completion(data) do + on_completion(process, data) do event = BruteforceProcessedEvent.new(process, data) - {:ok, [event]} + {:delete, [event]} end def after_read_hook(data) do @@ -78,7 +76,7 @@ process Helix.Software.Process.Cracker.Bruteforce do end end - process_objective do + resourceable do @moduledoc """ Defines how long a BruteforceProcess should take, resource usage, etc. """ @@ -87,7 +85,7 @@ process Helix.Software.Process.Cracker.Bruteforce do alias Helix.Software.Model.File alias Helix.Software.Process.Cracker.Bruteforce, as: BruteforceProcess - @type params :: BruteforceProcess.objective_params + @type params :: BruteforceProcess.resources_params @type factors :: %{ @@ -110,7 +108,7 @@ process Helix.Software.Process.Cracker.Bruteforce do # Retrieves information about the target's hasher (if any) factor FileFactor, %{file: hasher}, if: not is_nil(hasher), - only: :size, + only: :version, as: :hasher end @@ -119,12 +117,23 @@ process Helix.Software.Process.Cracker.Bruteforce do BruteforceProcess only uses CPU. """ cpu(%{hasher: nil}) do - f.cracker.version.bruteforce + 10_000 - 100 * f.cracker.version.bruteforce end cpu(%{hasher: %File{}}) do f.cracker.version.bruteforce * f.hasher.version.password end + + static do + %{ + paused: %{ram: 20}, + running: %{ram: 50} + } + end + + dynamic do + [:cpu] + end end executable do @@ -143,7 +152,7 @@ process Helix.Software.Process.Cracker.Bruteforce do optional(atom) => term } - objective(_, target, _, %{cracker: cracker}) do + resources(_, target, _, %{cracker: cracker}) do hasher = FileQuery.fetch_best(target, :password) %{ diff --git a/lib/software/process/cracker/overflow.ex b/lib/software/process/cracker/overflow.ex index 28077553..83e1ecd4 100644 --- a/lib/software/process/cracker/overflow.ex +++ b/lib/software/process/cracker/overflow.ex @@ -23,7 +23,15 @@ process Helix.Software.Process.Cracker.Overflow do @type objective :: %{cpu: resource_usage} - @type objective_params :: + @type resources :: + %{ + objective: objective, + static: map, + l_dynamic: [:cpu], + r_dynamic: [] + } + + @type resources_params :: %{ cracker: File.t } @@ -44,10 +52,10 @@ process Helix.Software.Process.Cracker.Overflow do } end - @spec objective(objective_params) :: - objective - def objective(params = %{cracker: %File{}}), - do: set_objective params + @spec resources(resources_params) :: + resources + def resources(params = %{cracker: %File{}}), + do: get_resources params processable do @@ -58,20 +66,10 @@ process Helix.Software.Process.Cracker.Overflow do alias Helix.Software.Event.Cracker.Overflow.Processed, as: OverflowProcessedEvent - def dynamic_resources(_), - do: [:cpu] - - def minimum(_) do - %{ - paused: %{ram: 24}, - running: %{ram: 24} - } - end - - on_completion(data) do + on_completion(process, data) do event = OverflowProcessedEvent.new(process, data) - {:ok, [event]} + {:delete, [event]} end def after_read_hook(data = %{target_connection_id: nil}), @@ -87,13 +85,13 @@ process Helix.Software.Process.Cracker.Overflow do end end - process_objective do + resourceable do alias Helix.Software.Factor.File, as: FileFactor alias Helix.Software.Model.File alias Helix.Software.Process.Cracker.Overflow, as: OverflowProcess - @type params :: OverflowProcess.objective_params + @type params :: OverflowProcess.resources_params @type factors :: %{ cracker: %{version: FileFactor.fact_version} @@ -109,6 +107,17 @@ process Helix.Software.Process.Cracker.Overflow do cpu do f.cracker.version.overflow end + + dynamic do + [:cpu] + end + + static do + %{ + paused: %{ram: 100}, + running: %{ram: 200} + } + end end executable do @@ -123,7 +132,7 @@ process Helix.Software.Process.Cracker.Overflow do optional(atom) => term } - objective(_, _, _, %{cracker: cracker}) do + resources(_, _, _, %{cracker: cracker}) do %{cracker: cracker} end diff --git a/lib/software/process/file/transfer.ex b/lib/software/process/file/transfer.ex index c93a15f2..d9fda757 100644 --- a/lib/software/process/file/transfer.ex +++ b/lib/software/process/file/transfer.ex @@ -11,6 +11,7 @@ process Helix.Software.Process.File.Transfer do file is being transferred, is already present on the standard process data. """ + alias Helix.Network.Model.Network alias Helix.Software.Model.File alias Helix.Software.Model.Storage @@ -22,6 +23,14 @@ process Helix.Software.Process.File.Transfer do connection_type: connection_type } + @type resources :: + %{ + objective: objective, + l_dynamic: [:dlk] | [:ulk], + r_dynamic: [:ulk] | [:dlk], + static: map + } + @type objective :: %{dlk: resource_usage} | %{ulk: resource_usage} @@ -35,6 +44,12 @@ process Helix.Software.Process.File.Transfer do destination_storage_id: Storage.id } + @type resources_params :: %{ + type: transfer_type, + file: File.t, + network_id: Network.id + } + @spec new(creation_params) :: t def new(params = %{destination_storage_id: %Storage.ID{}}) do @@ -45,15 +60,10 @@ process Helix.Software.Process.File.Transfer do } end - @type objective_params :: %{ - type: transfer_type, - file: File.t - } - - @spec objective(objective_params) :: - objective - def objective(params = %{type: _, file: _}), - do: set_objective params + @spec resources(resources_params) :: + resources + def resources(params = %{type: _, file: _, network_id: _}), + do: get_resources params processable do @moduledoc """ @@ -75,44 +85,36 @@ process Helix.Software.Process.File.Transfer do alias Helix.Software.Event.File.Transfer.Processed, as: FileTransferProcessedEvent - def dynamic_resources(%{type: :download}), - do: [:dlk] - def dynamic_resources(%{type: :upload}), - do: [:ulk] - - def minimum(_), - do: %{} - @doc """ Emits `FileTransferAbortedEvent.t` when/if process gets killed. """ - on_kill(data, _reason) do + on_kill(process, data, _reason) do reason = :killed {from_id, to_id} = get_servers_context(data, process) event = FileTransferAbortedEvent.new(process, data, from_id, to_id, reason) - {:ok, [event]} + {:delete, [event]} end @doc """ Emits `FileTransferProcessedEvent.t` when process completes. """ - on_completion(data) do + on_completion(process, data) do {from_id, to_id} = get_servers_context(data, process) event = FileTransferProcessedEvent.new(process, data, from_id, to_id) - {:ok, [event]} + {:delete, [event]} end @spec get_servers_context(data :: term, process :: term) :: context :: {from_server :: Server.id, to_server :: Server.id} defp get_servers_context(%{type: :download}, process), - do: {process.target_server_id, process.gateway_id} + do: {process.target_id, process.gateway_id} defp get_servers_context(%{type: :upload}, process), - do: {process.gateway_id, process.target_server_id} + do: {process.gateway_id, process.target_id} def after_read_hook(data) do %FileTransferProcess{ @@ -123,18 +125,15 @@ process Helix.Software.Process.File.Transfer do end end - process_objective do + resourceable do @moduledoc """ Sets the objectives to FileTransferProcess """ + alias Helix.Software.Process.File.Transfer, as: FileTransferProcess alias Helix.Software.Factor.File, as: FileFactor - @type params :: - %{ - type: :download | :upload, - file: File.t - } + @type params :: FileTransferProcess.resources_params @type factors :: %{ @@ -162,9 +161,36 @@ process Helix.Software.Process.File.Transfer do f.file.size end + network(%{network_id: network_id}) do + network_id + end + # Safety fallbacks dlk(%{type: :upload}) ulk(%{type: :download}) + + dynamic(%{type: :download}) do + [:dlk] + end + + dynamic(%{type: :upload}) do + [:ulk] + end + + static do + %{ + paused: %{ram: 10}, + running: %{ram: 20} + } + end + + r_dynamic(%{type: :download}) do + [:ulk] + end + + r_dynamic(%{type: :upload}) do + [:dlk] + end end executable do @@ -182,10 +208,11 @@ process Helix.Software.Process.File.Transfer do optional(atom) => term } - objective(_, _, params, meta) do + resources(_, _, params, meta) do %{ type: params.type, - file: meta.file + file: meta.file, + network_id: meta.network_id } end diff --git a/lib/universe/bank/process/bank/account/password_reveal.ex b/lib/universe/bank/process/bank/account/password_reveal.ex index da6c72fd..dbb9a2f7 100644 --- a/lib/universe/bank/process/bank/account/password_reveal.ex +++ b/lib/universe/bank/process/bank/account/password_reveal.ex @@ -24,7 +24,15 @@ process Helix.Universe.Bank.Process.Bank.Account.RevealPassword do } @type objective :: %{cpu: resource_usage} - @type objective_params :: + + @type resources :: %{ + objective: objective, + static: map, + l_dynamic: [:cpu], + r_dynamic: [] + } + + @type resources_params :: %{ account: BankAccount.t } @@ -39,35 +47,29 @@ process Helix.Universe.Bank.Process.Bank.Account.RevealPassword do } end - @spec objective(objective_params) :: - objective - def objective(params = %{account: %BankAccount{}}), - do: set_objective params + @spec resources(resources_params) :: + resources + def resources(params = %{account: %BankAccount{}}), + do: get_resources params processable do alias Helix.Universe.Bank.Event.RevealPassword.Processed, as: RevealPasswordProcessedEvent - def dynamic_resources(_), - do: [:cpu] - - def minimum(_), - do: %{} - - on_completion(data) do + on_completion(process, data) do event = RevealPasswordProcessedEvent.new(process, data) - {:ok, [event]} + {:delete, [event]} end end - process_objective do + resourceable do alias Helix.Universe.Bank.Process.Bank.Account.RevealPassword, as: RevealPasswordProcess - @type params :: RevealPasswordProcess.objective_params + @type params :: RevealPasswordProcess.resources_params @type factors :: term # TODO proper balance @@ -76,6 +78,10 @@ process Helix.Universe.Bank.Process.Bank.Account.RevealPassword do cpu(_) do 1 end + + dynamic do + [:cpu] + end end executable do @@ -90,7 +96,7 @@ process Helix.Universe.Bank.Process.Bank.Account.RevealPassword do optional(atom) => term } - objective(_, _, %{account: account}, _) do + resources(_, _, %{account: account}, _) do %{account: account} end end diff --git a/lib/universe/bank/process/bank/transfer.ex b/lib/universe/bank/process/bank/transfer.ex index 65b77ed4..ed3f6f53 100644 --- a/lib/universe/bank/process/bank/transfer.ex +++ b/lib/universe/bank/process/bank/transfer.ex @@ -21,7 +21,14 @@ process Helix.Universe.Bank.Process.Bank.Transfer do @type objective :: %{cpu: resource_usage} - @type objective_params :: + @type resources :: %{ + objective: objective, + static: map, + l_dynamic: [], + r_dynamic: [] + } + + @type resources_params :: %{ transfer: BankTransfer.t } @@ -35,8 +42,10 @@ process Helix.Universe.Bank.Process.Bank.Transfer do } end - def objective(params = %{transfer: %BankTransfer{}}), - do: set_objective params + @spec resources(resources_params) :: + resources + def resources(params = %{transfer: %BankTransfer{}}), + do: get_resources params processable do @@ -45,43 +54,48 @@ process Helix.Universe.Bank.Process.Bank.Transfer do alias Helix.Universe.Bank.Event.Bank.Transfer.Processed, as: BankTransferProcessedEvent - def dynamic_resources(_), - do: [:cpu] - - # Review: Not exactly what I want. Where do I put limitations? - # TODO: Once TOP supports it, `minimum` should refer to raw time, not - # hardware resources like cpu - def minimum(_), - do: %{} - - on_kill(data, _reason) do + on_kill(process, data, _reason) do event = BankTransferAbortedEvent.new(process, data) - {:ok, [event]} + {:delete, [event]} end - on_completion(data) do + on_completion(process, data) do event = BankTransferProcessedEvent.new(process, data) - {:ok, [event]} + {:delete, [event]} end def after_read_hook(data), do: data end - process_objective do + resourceable do alias Helix.Universe.Bank.Process.Bank.Transfer, as: BankTransferProcess - @type params :: BankTransferProcess.objective_params + @type params :: BankTransferProcess.resources_params @type factors :: term get_factors(%{transfer: _}) do end + # TODO: Use Time, not CPU cpu(%{transfer: transfer}) do transfer.amount end + + dynamic do + [] + end + + # Review: Not exactly what I want. Where do I put limitations? + # TODO: Add ResourceTime; specify to the size of the transfer. + static do + %{ + paused: %{ram: 50}, + running: %{ram: 100} + } + end end executable do @@ -91,7 +105,7 @@ process Helix.Universe.Bank.Process.Bank.Transfer do @type params :: BankTransferProcess.creation_params @type meta :: term - objective(_gateway, _atm, %{transfer: transfer}, _meta) do + resources(_gateway, _atm, %{transfer: transfer}, _meta) do %{transfer: transfer} end diff --git a/lib/websocket/utils.ex b/lib/websocket/utils.ex index ab209f69..571eb2bb 100644 --- a/lib/websocket/utils.ex +++ b/lib/websocket/utils.ex @@ -26,7 +26,7 @@ defmodule Helix.Websocket.Utils do Helper that automatically renders the reply with the recently created process. """ def render_process(process = %Process{}, socket) do - process_data = process.process_data + process_data = process.data server_id = socket.assigns.gateway.server_id entity_id = socket.assigns.entity_id diff --git a/lib/websocket/websocket.ex b/lib/websocket/websocket.ex index 1b9da5cc..22030f35 100644 --- a/lib/websocket/websocket.ex +++ b/lib/websocket/websocket.ex @@ -64,6 +64,7 @@ defmodule Helix.Websocket do Generic request handler. It guides the request through the Requestable flow, replying the result back to the client. """ + # TODO: Adicionar ReqMeta aqui \/; passar diretamente p/ `handle_request/2` def handle_request(request, socket) do with \ {:ok, request} <- Requestable.check_params(request, socket), diff --git a/priv/repo/process/migrations/20171026190429_top_rewrite.exs b/priv/repo/process/migrations/20171026190429_top_rewrite.exs new file mode 100644 index 00000000..2663bdd0 --- /dev/null +++ b/priv/repo/process/migrations/20171026190429_top_rewrite.exs @@ -0,0 +1,61 @@ +defmodule Helix.Process.Repo.Migrations.TOPRewrite do + use Ecto.Migration + + def change do + + drop table(:process_servers) + drop table(:processes) + + create table(:processes, primary_key: false) do + add :process_id, :inet, primary_key: true + + # Identifiers + add :gateway_id, :inet, null: false + add :target_id, :inet, null: false + add :source_entity_id, :inet, null: false + + # Custom keys + add :file_id, :inet + add :network_id, :inet + add :connection_id, :inet + + # Helix.Process stuff + add :data, :jsonb, null: false + add :type, :string, null: false + add :priority, :integer, null: false + + # Resources + add :objective, :jsonb, null: false + add :processed, :jsonb + + add :l_reserved, :jsonb + add :r_reserved, :jsonb + + add :l_limit, :jsonb + add :r_limit, :jsonb + + add :l_dynamic, {:array, :string}, null: false + add :r_dynamic, {:array, :string} + + add :static, :jsonb, null: false + add :last_checkpoint_time, :utc_datetime + + # Metadata + add :creation_time, :utc_datetime, null: false + end + # Used to identify all processes of #{type} on #{server} + # Also used when fetching all processes on #{server} + create index(:processes, [:gateway_id, :type]) + + # Useful but currently unused. Uncomment me if you need me (no one does) + # create index(:processes, [:target_id]) + + # Used on e.g. FileDelete operations, where the underlying process should be + # killed if the file was modified. + create index(:processes, [:file_id]) + + # Used on e.g. ConnectionClosed operations, where the underlying process + # should be killed if the connection was terminated + create index(:processes, [:connection_id]) + end +end diff --git a/test/cache/action/cache_test.exs b/test/cache/action/cache_test.exs index ad0c9dc7..4aad7e06 100644 --- a/test/cache/action/cache_test.exs +++ b/test/cache/action/cache_test.exs @@ -319,7 +319,11 @@ defmodule Helix.Cache.Action.CacheTest do # Fresh entry from db web2 = assert_hit CacheInternal.direct_query({:web, :content}, nip) - assert web2.expiration_date > web1.expiration_date + diff = + DateTime.diff(web2.expiration_date, web1.expiration_date, :millisecond) + + assert diff > 0 + assert web2.content == web1.content end end diff --git a/test/core/listener/internal/listener_test.exs b/test/core/listener/internal/listener_test.exs index 6d475c42..7a5d3d10 100644 --- a/test/core/listener/internal/listener_test.exs +++ b/test/core/listener/internal/listener_test.exs @@ -27,8 +27,6 @@ defmodule Helix.Core.Listener.Internal.ListenerTest do refute listener.event == event # Make sure the Owner entry was created too - # assert OwnerInternal.fetch() # Parei aqui - assert %{owner: owner} = ListenerInternal.fetch_owner( owner_id, object_id, listener.event, subscriber diff --git a/test/entity/event/handler/database_test.exs b/test/entity/event/handler/database_test.exs index e46c7df0..61029931 100644 --- a/test/entity/event/handler/database_test.exs +++ b/test/entity/event/handler/database_test.exs @@ -32,9 +32,11 @@ defmodule Helix.Entity.Event.Handler.DatabaseTest do on_db = DatabaseQuery.fetch_bank_account(entry.entity_id, acc) assert on_db.password == password - assert on_db.last_update > entry.last_update refute on_db.token refute on_db.last_login_date + + diff = DateTime.diff(on_db.last_update, entry.last_update, :millisecond) + assert diff > 0 end test "a new entry is created in case it did not exist before" do @@ -54,8 +56,11 @@ defmodule Helix.Entity.Event.Handler.DatabaseTest do on_db = DatabaseQuery.fetch_bank_account(fake_entry.entity_id, acc) assert on_db.password == password - assert on_db.last_update > fake_entry.last_update refute on_db.last_login_date + + diff = + DateTime.diff(on_db.last_update, fake_entry.last_update, :millisecond) + assert diff > 0 end end @@ -71,9 +76,11 @@ defmodule Helix.Entity.Event.Handler.DatabaseTest do on_db = DatabaseQuery.fetch_bank_account(entry.entity_id, acc) assert on_db.token == token - assert on_db.last_update > entry.last_update refute on_db.password refute on_db.last_login_date + + diff = DateTime.diff(on_db.last_update, entry.last_update, :millisecond) + assert diff > 0 end test "a new entry is created in case it did not exist before" do @@ -89,9 +96,12 @@ defmodule Helix.Entity.Event.Handler.DatabaseTest do on_db = DatabaseQuery.fetch_bank_account(fake_entry.entity_id, acc) assert on_db.token == token - assert on_db.last_update > fake_entry.last_update refute on_db.password refute on_db.last_login_date + + diff = + DateTime.diff(on_db.last_update, fake_entry.last_update, :millisecond) + assert diff > 0 end end @@ -106,10 +116,12 @@ defmodule Helix.Entity.Event.Handler.DatabaseTest do on_db = DatabaseQuery.fetch_bank_account(entry.entity_id, acc) refute on_db.token - assert on_db.last_update > entry.last_update assert on_db.password == acc.password assert on_db.last_login_date assert on_db.known_balance == acc.balance + + diff = DateTime.diff(on_db.last_update, entry.last_update, :millisecond) + assert diff > 0 end test "a new entry is created in case it did not exist before" do @@ -124,10 +136,13 @@ defmodule Helix.Entity.Event.Handler.DatabaseTest do on_db = DatabaseQuery.fetch_bank_account(fake_entry.entity_id, acc) refute on_db.token - assert on_db.last_update > fake_entry.last_update assert on_db.password == acc.password assert on_db.last_login_date assert on_db.known_balance == acc.balance + + diff = + DateTime.diff(on_db.last_update, fake_entry.last_update, :millisecond) + assert diff > 0 end end end diff --git a/test/entity/model/database_bank_account_test.exs b/test/entity/model/database_bank_account_test.exs index a7b3af33..a43ab1fe 100644 --- a/test/entity/model/database_bank_account_test.exs +++ b/test/entity/model/database_bank_account_test.exs @@ -1,6 +1,6 @@ defmodule Helix.Entity.Model.DatabaseBankAccountTest do - use ExUnit.Case, async: true + use Helix.Test.Case.Integration alias Ecto.Changeset alias Helix.Entity.Model.DatabaseBankAccount diff --git a/test/event/event_test.exs b/test/event/event_test.exs new file mode 100644 index 00000000..39e4f893 --- /dev/null +++ b/test/event/event_test.exs @@ -0,0 +1,36 @@ +defmodule Helix.EventTest do + + use Helix.Test.Case.Integration + + alias Helix.Story.Model.Step + alias Helix.Story.Query.Story, as: StoryQuery + alias Helix.Event + + alias Helix.Test.Story.Setup, as: StorySetup + alias Helix.Test.Event.Setup, as: EventSetup + + describe "emit_after/2" do + test "event is emitted after the specified time" do + {_, %{entity_id: entity_id, step: cur_step}} = + StorySetup.story_step(name: :fake_steps@test_msg, meta: %{}) + + event = EventSetup.Story.reply_sent(cur_step, "reply_to_e3", "e3") + + # We've just asked to emit `event` within 100 ms + Event.emit_after([event], 50) + + # Meanwhile, let's make sure the current step on the DB hasn't changed. + assert %{object: step} = StoryQuery.fetch_current_step(entity_id) + assert step == cur_step + + # Wait for it... needs some extra time because async + :timer.sleep(80) + + assert %{object: new_step} = StoryQuery.fetch_current_step(entity_id) + + # DB state has changed + refute new_step == cur_step + assert new_step.name == Step.get_next_step(step) + end + end +end diff --git a/test/event/notification_handler_test.exs b/test/event/notification_handler_test.exs index 5402efcf..6143630a 100644 --- a/test/event/notification_handler_test.exs +++ b/test/event/notification_handler_test.exs @@ -29,10 +29,7 @@ defmodule Helix.Event.NotificationHandlerTest do {_socket, %{gateway: gateway}} = ChannelSetup.join_server([own_server: true]) - event = - EventSetup.process_created( - :single_server, - [gateway_id: gateway.server_id]) + event = EventSetup.Process.created(gateway.server_id) # Process happens on the same server assert event.gateway_id == event.target_id @@ -50,7 +47,7 @@ defmodule Helix.Event.NotificationHandlerTest do # Make sure all we need is on the process return assert_id notification.data.process_id, event.process.process_id - assert notification.data.type == event.process.process_type + assert notification.data.type == event.process.type |> to_string() assert_id notification.data.file_id, event.process.file_id assert_id notification.data.connection_id, event.process.connection_id assert_id notification.data.network_id, event.process.network_id @@ -66,21 +63,14 @@ defmodule Helix.Event.NotificationHandlerTest do end test "multi-server" do - {socket, %{gateway: gateway, destination: destination}} = + {_, %{gateway: gateway, destination: destination}} = ChannelSetup.join_server() # Filter out the usual `LogCreatedEvent` after remote server join assert_broadcast "event", _ - gateway_entity_id = socket.assigns.gateway.entity_id - destination_entity_id = socket.assigns.destination.entity_id - event = - EventSetup.process_created( - gateway.server_id, - destination.server_id, - gateway_entity_id, - destination_entity_id) + EventSetup.Process.created(gateway.server_id, destination.server_id) # Process happens on two different servers refute event.gateway_id == event.target_id @@ -98,7 +88,7 @@ defmodule Helix.Event.NotificationHandlerTest do # Make sure all we need is on the process return assert_id notification.data.process_id, event.process.process_id - assert notification.data.type == event.process.process_type + assert notification.data.type == event.process.type |> to_string() assert_id notification.data.file_id, event.process.file_id assert_id notification.data.connection_id, event.process.connection_id assert_id notification.data.network_id, event.process.network_id @@ -142,12 +132,13 @@ defmodule Helix.Event.NotificationHandlerTest do ref = push socket, "cracker.bruteforce", params # Wait for response - assert_reply ref, :ok, response + assert_reply ref, :ok, response, 300 # The response includes the Bruteforce process information assert response.data.process_id # Wait for generic ProcessCreatedEvent + assert_push "event", _top_recalcado_event assert_push "event", process_created_event assert process_created_event.event == "process_created" @@ -162,32 +153,34 @@ defmodule Helix.Event.NotificationHandlerTest do # Notificable protocol. # We are getting them here so we can inspect the actual metadata of # both `ProcessCompletedEvent` and `PasswordAcquiredEvent` - assert_broadcast "event", _process_created_event - assert_broadcast "event", process_completed_event + assert_broadcast "event", _top_recalcado_event + assert_broadcast "event", _process_created_t + assert_broadcast "event", _process_created_f assert_broadcast "event", server_password_acquired_event + assert_broadcast "event", process_completed_event # They have the process IDs! assert process_id == process_completed_event.__meta__.process_id assert process_id == server_password_acquired_event.__meta__.process_id + # We'll receive the PasswordAcquiredEvent + assert_push "event", password_acquired_event + assert password_acquired_event.event == "server_password_acquired" + + # Which has a valid `process_id` on the event metadata! + assert to_string(process_id) == password_acquired_event.meta.process_id + # And if `ServerPasswordAcquiredEvent` has the process_id, then # `BruteforceProcessedEvent` have it as well, and as such TOP should be # working for all kinds of events. - # We'll receive the generic ProcessCompletedEvent + # Soon we'll receive the generic ProcessCompletedEvent assert_push "event", process_conclusion_event assert process_conclusion_event.event == "process_completed" # As long as we are here, let's test that the metadata sent to the client # has been converted to JSON-friendly strings assert to_string(process_id) == process_conclusion_event.meta.process_id - - # And soon we'll receive the PasswordAcquiredEvent - assert_push "event", password_acquired_event - assert password_acquired_event.event == "server_password_acquired" - - # Which has a valid `process_id` on the event metadata! - assert to_string(process_id) == password_acquired_event.meta.process_id end end end diff --git a/test/event/state/timer_test.exs b/test/event/state/timer_test.exs new file mode 100644 index 00000000..ce4673ab --- /dev/null +++ b/test/event/state/timer_test.exs @@ -0,0 +1,36 @@ +defmodule Helix.Event.State.TimerTest do + + use Helix.Test.Case.Integration + + alias Helix.Story.Model.Step + alias Helix.Story.Query.Story, as: StoryQuery + alias Helix.Event.State.Timer, as: EventTimer + + alias Helix.Test.Story.Setup, as: StorySetup + alias Helix.Test.Event.Setup, as: EventSetup + + describe "emit_after/2" do + test "event is emitted after the specified time" do + {_, %{entity_id: entity_id, step: cur_step}} = + StorySetup.story_step(name: :fake_steps@test_msg, meta: %{}) + + event = EventSetup.Story.reply_sent(cur_step, "reply_to_e3", "e3") + + # We've just asked to emit `event` within 100 ms + EventTimer.emit_after(event, 50) + + # Meanwhile, let's make sure the current step on the DB hasn't changed. + assert %{object: step} = StoryQuery.fetch_current_step(entity_id) + assert step == cur_step + + # Wait for it... needs some extra time because async + :timer.sleep(80) + + assert %{object: new_step} = StoryQuery.fetch_current_step(entity_id) + + # DB state has changed + refute new_step == cur_step + assert new_step.name == Step.get_next_step(step) + end + end +end diff --git a/test/features/hack_test.exs b/test/features/hack_test.exs index bba6d83d..46a603e0 100644 --- a/test/features/hack_test.exs +++ b/test/features/hack_test.exs @@ -53,6 +53,7 @@ defmodule Helix.Test.Features.Hack do assert response.data.process_id # Wait for generic ProcessCreatedEvent + assert_push "event", _top_recalcado_event assert_push "event", process_created_event assert process_created_event.event == "process_created" @@ -64,10 +65,6 @@ defmodule Helix.Test.Features.Hack do # Let's cheat and finish the process right now TOPHelper.force_completion(process) - # We'll receive the generic ProcessCompletedEvent - assert_push "event", process_conclusion_event - assert process_conclusion_event.event == "process_completed" - # And soon we'll receive the PasswordAcquiredEvent assert_push "event", password_acquired_event assert password_acquired_event.event == "server_password_acquired" @@ -77,7 +74,9 @@ defmodule Helix.Test.Features.Hack do assert password_acquired_event.data.server_ip == target_nip.ip assert password_acquired_event.data.password - :timer.sleep(50) + # We'll receive the generic ProcessCompletedEvent + assert_push "event", process_conclusion_event + assert process_conclusion_event.event == "process_completed" db_server = DatabaseQuery.fetch_server( @@ -113,10 +112,6 @@ defmodule Helix.Test.Features.Hack do assert bootstrap.filesystem assert bootstrap.logs assert bootstrap.processes - - :timer.sleep(50) - - TOPHelper.top_stop(gateway.server_id) end end diff --git a/test/features/process/lifecycle_test.exs b/test/features/process/lifecycle_test.exs new file mode 100644 index 00000000..300d6e50 --- /dev/null +++ b/test/features/process/lifecycle_test.exs @@ -0,0 +1,114 @@ +defmodule Helix.Test.Features.Process.Lifecycle do + + use Helix.Test.Case.Integration + + import Phoenix.ChannelTest + import Helix.Test.Process.Macros + + alias Helix.Hardware.Query.Motherboard, as: MotherboardQuery + alias Helix.Software.Query.Storage, as: StorageQuery + alias Helix.Process.Model.Process + alias Helix.Process.Query.Process, as: ProcessQuery + + alias Helix.Test.Channel.Setup, as: ChannelSetup + alias Helix.Test.Network.Helper, as: NetworkHelper + alias Helix.Test.Software.Helper, as: SoftwareHelper + alias Helix.Test.Software.Setup, as: SoftwareSetup + + @moduletag :feature + + @internet_id NetworkHelper.internet_id() + + describe "process" do + + test "creation and allocation" do + {socket, %{gateway: gateway, destination: destination}} = + ChannelSetup.join_server() + + # Create the File that we'll downloaded + {file, _} = SoftwareSetup.file(server_id: destination.server_id) + + params = %{ + "file_id" => file.file_id |> to_string(), + } + + # Starts the file download + ref = push socket, "file.download", params + + assert_reply ref, :ok, response, 200 + + # The process was created + assert response.data.process_id + process_id = Process.ID.cast!(response.data.process_id) + + assert_push "event", top_recalcado + assert_push "event", process_created + + assert top_recalcado.event == "top_recalcado" + assert process_created.event == "process_created" + + # Let's fetch the process, just to make sure + process = ProcessQuery.fetch(process_id) + + # The process received allocation + refute Enum.empty?(process.l_reserved) + refute Enum.empty?(process.r_reserved) + + resources = + gateway.motherboard_id + |> MotherboardQuery.fetch() + |> MotherboardQuery.resources() + + server_dlk = resources.net[@internet_id].downlink + + # Process received no allocations of CPU/ULK (some RAM for static usage) + assert process.l_allocated.cpu == 0.0 + assert process.l_allocated.ulk[@internet_id] == 0.0 + assert process.l_allocated.ram > 0 + + # But received 100% of server DLK resources + assert_resource process.l_allocated.dlk[@internet_id], server_dlk + end + + # This is pretty much the same test above, but now we'll focus on the other + # half: completing the process. We want to avoid using `force_completion` + # from TOPHelper, so the completion is actually spontaneous. + # In order to do that we create a very small process which needs to transfer + # a file of about ~1kb, taking less than a second. + test "spontaneous completion" do + # TODO: Local socket for local TOPREcalcado event + {socket, %{gateway: gateway, destination: destination}} = + ChannelSetup.join_server() + + # Create the File that we'll downloaded + {file, _} = SoftwareSetup.file(server_id: destination.server_id, size: 10) + + params = %{ + "file_id" => file.file_id |> to_string(), + } + + # Starts the file download + ref = push socket, "file.download", params + assert_reply ref, :ok, _ + + gateway_storage = SoftwareHelper.get_storage(gateway) + + # No files on gateway server. Download process started but not completed. + assert [] == StorageQuery.files_on_storage(gateway_storage) + + # Wait for process completion (Process itself takes about 100ms) + # Extra time is desired to let all "spawned" connections close + :timer.sleep(200) + + # I haz file!11 + assert [downloaded_file] = StorageQuery.files_on_storage(gateway_storage) + + # Same file... + assert downloaded_file.name == file.name + assert downloaded_file.modules == file.modules + + # Different ID + refute downloaded_file.file_id == file.file_id + end + end +end diff --git a/test/features/process/recalque_test.exs b/test/features/process/recalque_test.exs new file mode 100644 index 00000000..f8d34376 --- /dev/null +++ b/test/features/process/recalque_test.exs @@ -0,0 +1,162 @@ +# credo:disable-for-this-file Credo.Check.Readability.VariableNames +defmodule Helix.Test.Features.Process.Recalque do + + use Helix.Test.Case.Integration + + import Helix.Test.Macros + import Helix.Test.Process.Macros + + alias Helix.Process.Query.Process, as: ProcessQuery + alias Helix.Software.Public.File, as: FilePublic + + alias Helix.Test.Network.Helper, as: NetworkHelper + alias Helix.Test.Network.Setup, as: NetworkSetup + alias Helix.Test.Process.Helper, as: ProcessHelper + alias Helix.Test.Server.Helper, as: ServerHelper + alias Helix.Test.Server.Setup, as: ServerSetup + alias Helix.Test.Software.Helper, as: SoftwareHelper + alias Helix.Test.Software.Setup, as: SoftwareSetup + + @moduletag :feature + + @internet_id NetworkHelper.internet_id() + + describe "recalque" do + # On this scenario, we have two servers. We'll start a few process on them + # and see how they behave, specially inter-top resource utilization (DLK and + # ULK). + test "scenario one" do + {serverA, _} = ServerSetup.server() + {serverB, _} = ServerSetup.server() + {serverC, _} = ServerSetup.server() + + resA = %{cpu: 300, ram: 200, dlk: 100, ulk: 10} + resB = %{cpu: 250, ram: 150, dlk: 50, ulk: 30} + resC = %{cpu: 200, ram: 100, dlk: 30, ulk: 10} + + ServerHelper.update_server_specs(serverA, resA) + ServerHelper.update_server_specs(serverB, resB) + ServerHelper.update_server_specs(serverC, resC) + + storageA = SoftwareHelper.get_storage(serverA) + storageC = SoftwareHelper.get_storage(serverC) + + {tunnelAB, _} = + NetworkSetup.tunnel( + gateway_id: serverA.server_id, destination_id: serverB.server_id + ) + + {tunnelCB, _} = + NetworkSetup.tunnel( + gateway_id: serverC.server_id, destination_id: serverB.server_id + ) + + {dl_file, _} = SoftwareSetup.file(server_id: serverB.server_id) + + # Create a download process + assert {:ok, %{process_id: downloadA_id}} = + FilePublic.download(serverA, serverB, tunnelAB, storageA, dl_file) + + # Give some time for allocation + # :timer.sleep(50) + + # Let's fetch the Process, as this is the moment when the actual + # allocation is loaded. + downloadA = ProcessQuery.fetch(downloadA_id) + orig_downloadA = downloadA + + # The download uses 30 units of DLK of serverA + assert_resource downloadA.l_allocated.dlk[@internet_id], resB.ulk + + # And it uses 30 units of ULK of serverB + assert_resource downloadA.r_allocated.ulk[@internet_id], resB.ulk + + # All other resources are unused (except RAM, due to static allocations) + assert downloadA.l_allocated.ulk[@internet_id] == 0 + assert downloadA.l_allocated.cpu == 0 + assert downloadA.l_allocated.ram > 0 + + assert downloadA.r_allocated.dlk == %{} + assert downloadA.r_allocated.cpu == 0 + assert downloadA.r_allocated.ram == 0 + + # All good! Let's make this story more exciting. + + ### Chapter 2 ### + + # Now, serverA will start a local-only process, which should not affect + # the download's local or remote resources. + {cracker, _} = SoftwareSetup.cracker(server_id: serverA.server_id) + + ipB = ServerHelper.get_ip(serverB) + + # Start the Bruteforce attack + assert {:ok, %{process_id: bruteforce_id}} = + FilePublic.bruteforce(cracker, serverA, serverB, @internet_id, ipB, []) + + # Give some time for allocation + # :timer.sleep(50) + + bruteforce = ProcessQuery.fetch(bruteforce_id) + + # All CPU of serverA was assigned to the Bruteforce process + assert bruteforce.l_allocated.cpu == resA.cpu + + # Does not use other resources (except RAM due to static allocations) + assert bruteforce.l_allocated.dlk[@internet_id] == 0 + assert bruteforce.l_allocated.ulk[@internet_id] == 0 + assert bruteforce.l_allocated.ram > 0 + + assert bruteforce.r_allocated.ulk == %{} + assert bruteforce.r_allocated.dlk == %{} + assert bruteforce.r_allocated.cpu == 0 + assert bruteforce.r_allocated.ram == 0 + + downloadA2 = ProcessQuery.fetch(downloadA_id) + + # After recalque, the Download process remains unchanged + # (The time_left may have changed a little bit, but that's because some + # time has passed since it was created :) + # assert downloadA2 == downloadA + assert_map downloadA2, downloadA, skip: [:time_left, :completion_date] + + ### Chapter 3 ### + + # Now things get real. A new server, `C` will also start a download on `B` + # This will reduce B's ULK availability, and the previously started + # download process should be recalculated. + # Notice this is our first chain reaction: When C starts its download, TOP + # will be recalculate on C and B. Then, it should recalculate A. + + assert {:ok, %{process_id: downloadC_id}} = + FilePublic.download(serverC, serverB, tunnelCB, storageC, dl_file) + + # :timer.sleep(50) + + downloadC = ProcessQuery.fetch(downloadC_id) + downloadA = ProcessQuery.fetch(downloadA_id) + + # DownloadC DLK allocation is exactly half of B's ULK + assert_resource downloadC.l_allocated.dlk[@internet_id], resB.ulk / 2 + assert_resource downloadC.r_allocated.ulk[@internet_id], resB.ulk / 2 + + # Now, downloadA is using half of B's ULK as well + assert_resource downloadA.l_allocated.dlk[@internet_id], resB.ulk / 2 + assert_resource downloadA.r_allocated.ulk[@internet_id], resB.ulk / 2 + + # The process duration has roughly doubled, since it's using half of the + # resources from before + assert_in_delta downloadA.time_left, orig_downloadA.time_left * 2, 1 + + # downloadA has processed a little bit during this time + refute downloadA.processed == orig_downloadA.processed + refute \ + downloadA.last_checkpoint_time == orig_downloadA.last_checkpoint_time + + # This `processed` information is actually saved on the DB + raw_downloadA = ProcessHelper.raw_get(downloadA_id) + assert raw_downloadA.processed["dlk"]["::"] > 0 + assert raw_downloadA.processed["ram"] > 0 + end + end +end diff --git a/test/hardware/action/motherboad_test.exs b/test/hardware/action/motherboard_test.exs similarity index 84% rename from test/hardware/action/motherboad_test.exs rename to test/hardware/action/motherboard_test.exs index ff9dab45..ec3eb690 100644 --- a/test/hardware/action/motherboad_test.exs +++ b/test/hardware/action/motherboard_test.exs @@ -57,20 +57,22 @@ defmodule Helix.Hardware.Action.MotherboardTest do end describe "unlink/1 is idempotent" do - slot = Factory.insert(:motherboard_slot) + test "unlinks slot" do + slot = Factory.insert(:motherboard_slot) - component = Factory.insert(slot.link_component_type) - {:ok, slot} = MotherboardAction.link(slot, component.component) + component = Factory.insert(slot.link_component_type) + {:ok, slot} = MotherboardAction.link(slot, component.component) - assert slot.link_component_id + assert slot.link_component_id - MotherboardAction.unlink(slot) - MotherboardAction.unlink(slot) + MotherboardAction.unlink(slot) + MotherboardAction.unlink(slot) - result = Repo.get(MotherboardSlot, slot.slot_id) - refute result.link_component_id + result = Repo.get(MotherboardSlot, slot.slot_id) + refute result.link_component_id - CacheHelper.sync_test() + CacheHelper.sync_test() + end end describe "delete/1" do diff --git a/test/log/event/handler/log_test.exs b/test/log/event/handler/log_test.exs index 23a927f0..e1f820b7 100644 --- a/test/log/event/handler/log_test.exs +++ b/test/log/event/handler/log_test.exs @@ -78,7 +78,7 @@ defmodule Helix.Log.Event.Handler.LogTest do event = %LogForgeCreateComplete{ entity_id: entity.entity_id, - target_server_id: server.server_id, + target_id: server.server_id, message: message, version: 456 } diff --git a/test/process/action/process_test.exs b/test/process/action/process_test.exs index 2806279b..047ae132 100644 --- a/test/process/action/process_test.exs +++ b/test/process/action/process_test.exs @@ -2,5 +2,38 @@ defmodule Helix.Process.Action.ProcessTest do use Helix.Test.Case.Integration - # TODO: tests + alias Helix.Process.Action.Process, as: ProcessAction + alias Helix.Process.Query.Process, as: ProcessQuery + + alias Helix.Test.Server.Setup, as: ServerSetup + alias Helix.Test.Process.Setup, as: ProcessSetup + alias Helix.Test.Process.TOPHelper + + describe "create/1" do + test "process is created; event is defined" do + + {server, %{entity: entity}} = ServerSetup.server() + {_, %{params: params}} = + ProcessSetup.fake_process( + gateway_id: server.server_id, + single_server: true, + entity_id: entity.entity_id + ) + + assert {:ok, process, [event]} = ProcessAction.create(params) + + # Created the process... + assert process.process_id + assert process.gateway_id == server.server_id + assert process.source_entity_id == entity.entity_id + + # And actually inserted it into the DB + assert ProcessQuery.fetch(process.process_id) + + # Process hasn't been confirmed (allocated) yet. + assert event.confirmed == false + + TOPHelper.top_stop() + end + end end diff --git a/test/process/action/top_test.exs b/test/process/action/top_test.exs new file mode 100644 index 00000000..a9fe14ae --- /dev/null +++ b/test/process/action/top_test.exs @@ -0,0 +1,183 @@ +defmodule Helix.Process.Action.TOPTest do + + use Helix.Test.Case.Integration + + import Helix.Test.Process.Macros + + alias Helix.Process.Action.TOP, as: TOPAction + alias Helix.Process.Model.Process + alias Helix.Process.Query.Process, as: ProcessQuery + + alias Helix.Test.Network.Helper, as: NetworkHelper + alias Helix.Test.Server.Helper, as: ServerHelper + alias Helix.Test.Server.Setup, as: ServerSetup + alias Helix.Test.Process.Helper, as: ProcessHelper + alias Helix.Test.Process.Setup, as: ProcessSetup + alias Helix.Test.Process.TOPHelper + + @internet_id NetworkHelper.internet_id() + + describe "complete/1" do + test "completes process when it has actually reached its objective" do + {proc, _} = ProcessSetup.fake_process() + + # Has processed everything it was supposed to; it's completed. + proc = + %{proc| + processed: proc.objective, + l_allocated: %{cpu: 1, ram: 1, ulk: %{}, dlk: %{}} + } + + assert {:ok, events} = TOPAction.complete(proc) + + # Two events; one is ProcessCompletedEvent, the other is the corresponding + # ProcessedEvent (e.g. FileTransferProcessedEvent). + assert length(events) == 2 + end + + test "fails if process hasn't actually finished yet" do + [proc] = + ProcessSetup.TOP.fake_process( + l_dynamic: [:ulk, :dlk, :ram, :cpu], + l_allocated: %{cpu: 1, ram: 1, dlk: %{}, ulk: %{}} + ) + + assert {:error, reason, []} = TOPAction.complete(proc) + assert reason == {:process, :running} + end + end + + describe "recalque/3" do + test "persists processed information" do + {gateway, _} = ServerSetup.server() + + {proc, _} = + ProcessSetup.process( + gateway_id: gateway.server_id, + type: :bruteforce, + static: %{}, + objective: %{cpu: 9999} + ) + + # At this moment, we have `proc` inserted on the Database, but it never + # went through any allocation/recalque. Let's fetch it now. + process = ProcessQuery.fetch(proc.process_id) + + # See? `waiting_allocation`, `l_reserved` never was touched, etc + assert process.state == :waiting_allocation + assert process.l_allocated == Process.Resources.initial() + assert process.l_reserved == Process.Resources.initial() + + # Now let's run recalque on this server. We'll ignore remote for now. + assert {:ok, [proc_recalque], _} = TOPAction.recalque(gateway.server_id) + + # The process state has been changed + assert proc_recalque.state == :running + + # Resources were reserved + assert proc_recalque.l_reserved.cpu > 0 + + # But it hasn't processed anything yet (it's the first allocation) + assert proc_recalque.processed == Process.Resources.initial() + + # OK, the returned value of the recalque is valid. How about whatever was + # persisted on the DB? Let's see + proc_db = ProcessQuery.fetch(proc.process_id) + + # At the very least, state and `l_reserved` must match + assert proc_db.state == :running + assert proc_db.l_reserved == proc_recalque.l_reserved + + # Still hasn't processed anything + # refute proc_db.processed + + # Some time_left was assigned + assert proc_db.time_left > 0 + + # Let's recalque the TOP again. Theoretically, nothing should change. + assert {:ok, [proc_recalque2], _} = TOPAction.recalque(gateway.server_id) + + # Allocation is the same... + assert proc_recalque2.next_allocation == proc_recalque.next_allocation + + # Now this is interesting. We'll detail this verification below because + # it *is* important (and it *does* fixes a bug). + # See, `proc_recalque2`'s `processed` is different than `proc_recalque`. + # As you may remember, `proc_recalque` has never processed anything, while + # `proc_recalque2` has (even if just for a few milliseconds). + # This is correct! HOWEVER, the `processed` information is modified after + # the process was fetched from the DB, so the verification below does not + # guarantee that the `processed` field has been saved correctly in the DB. + refute proc_recalque2.processed == Process.Resources.initial() + + # Interestingly, `processed` is only updated on the DB when the current + # process' allocation has changed. So in the scenario above, even though + # `proc_recalque2` did process something, this information is not saved + # on the DB. Instead, it is derived from the process' current stats. + raw_proc = ProcessHelper.raw_get(process.process_id) + + # See? It's empty + refute raw_proc.processed + + # In order to test this, we'll need to make the process allocation change + # somehow. Let's cheat and reduce the server's total CPU. This should + # reduce the process allocation, which uses 100% of the available CPU. + ServerHelper.update_server_specs(gateway, %{cpu: 500}) + + # So, let's recalque again and see if something changed + assert {:ok, [proc_recalque3], _} = TOPAction.recalque(gateway.server_id) + + # Reserved/allocated CPU went down to 500 + refute proc_recalque3.next_allocation == proc_recalque2.next_allocation + refute proc_recalque3.l_reserved == proc_recalque2.l_reserved + assert_resource proc_recalque3.l_reserved.cpu, 500 + + # How about the processed (on DB)? + raw_proc = ProcessHelper.raw_get(process.process_id) + + # Yep, it's saved there + assert raw_proc.processed["cpu"] > 0 + + TOPHelper.top_stop() + end + + test "performs recalque of both gateway and target (for inter-top procs)" do + {gateway, _} = ServerSetup.server() + {target, _} = ServerSetup.server() + + {proc, _} = + ProcessSetup.process( + gateway_id: gateway.server_id, + target_id: target.server_id, + type: :file_download, + l_limit: %{dlk: %{"::" => 50}}, + r_limit: %{ulk: %{"::" => 20}}, + static: %{} + ) + + assert %{gateway: gateway, target: target} = TOPAction.recalque(proc) + + {:ok, [gateway_proc], _} = gateway + {:ok, [target_proc], _} = target + + # Remember, it's the same process + assert gateway_proc.process_id == target_proc.process_id + + # On the gateway, reserved 50 units of DLK + assert gateway_proc.l_reserved.dlk[@internet_id] == 50 + assert gateway_proc.l_reserved.ulk[@internet_id] == 0 + assert gateway_proc.l_reserved.cpu == 0 + assert gateway_proc.l_reserved.ram == 0 + + # On the target, reserved 20 units of ULK + assert target_proc.r_reserved.ulk[@internet_id] == 20 + assert target_proc.r_reserved.dlk == %{} + assert target_proc.r_reserved.cpu == 0 + assert target_proc.r_reserved.ram == 0 + + # More tests exploring edge-cases of Inter-TOP allocation at + # `test/features/process/*`. + TOPHelper.top_stop() + end + end +end diff --git a/test/process/event/handler/top_test.exs b/test/process/event/handler/top_test.exs index cc2d0ec1..c3d6be22 100644 --- a/test/process/event/handler/top_test.exs +++ b/test/process/event/handler/top_test.exs @@ -3,32 +3,47 @@ defmodule Helix.Process.Event.Handler.TOPTest do use Helix.Test.Case.Integration alias Helix.Process.Event.Handler.TOP, as: TOPHandler + alias Helix.Process.Internal.Process, as: ProcessInternal alias Helix.Process.Query.Process, as: ProcessQuery alias Helix.Test.Event.Setup, as: EventSetup alias Helix.Test.Network.Setup, as: NetworkSetup + alias Helix.Test.Process.FakeDefaultProcess alias Helix.Test.Process.Setup, as: ProcessSetup + alias Helix.Test.Process.TOPHelper alias Helix.Test.Server.Setup, as: ServerSetup test "process is killed when its connection is closed" do {connection, _} = NetworkSetup.fake_connection() {server, _} = ServerSetup.server() - {process, _} = - ProcessSetup.process( + # Create a FakeDefaultProcess, a process that we know will always use the + # default callbacks defined by Processable + {_, %{params: params}} = + ProcessSetup.fake_process( gateway_id: server.server_id, - connection_id: connection.connection_id + connection_id: connection.connection_id, ) + params = + params + |> Map.replace(:data, FakeDefaultProcess.new()) + |> Map.replace(:type, :fake_default_process) + + {:ok, process} = ProcessInternal.create(params) + + # Fake ConnectionClosedEvent event = EventSetup.Network.connection_closed(connection) + # Process exists assert ProcessQuery.fetch(process.process_id) + # Simulate emission of ConnectionClosedEvent TOPHandler.connection_closed(event) - # Give enough time for all the asynchronous stuff to happen - :timer.sleep(50) - + # Process no longer exists refute ProcessQuery.fetch(process.process_id) + + TOPHelper.top_stop() end end diff --git a/test/process/event/process_created_test.exs b/test/process/event/process_created_test.exs index c270c4c7..74934969 100644 --- a/test/process/event/process_created_test.exs +++ b/test/process/event/process_created_test.exs @@ -3,13 +3,14 @@ defmodule Helix.Process.Event.Process.CreatedTest do use Helix.Test.Case.Integration alias Helix.Event.Notificable + alias Helix.Server.Model.Server alias Helix.Test.Channel.Setup, as: ChannelSetup alias Helix.Test.Event.Setup, as: EventSetup describe "Notificable.whom_to_notify/1" do test "servers are listed correctly" do - event = EventSetup.process_created(:multi_server) + event = EventSetup.Process.created() assert %{server: [event.gateway_id, event.target_id]} == Notificable.whom_to_notify(event) @@ -20,14 +21,10 @@ defmodule Helix.Process.Event.Process.CreatedTest do test "single server process create (player AT action_server)" do socket = ChannelSetup.mock_server_socket([own_server: true]) - action_server = socket.assigns.gateway.server_id - player_entity_id = socket.assigns.gateway.entity_id + gateway_id = socket.assigns.gateway.server_id # Player doing an action on his own server - event = - EventSetup.process_created( - :single_server, - [gateway_id: action_server, gateway_entity_id: player_entity_id]) + event = EventSetup.Process.created(gateway_id) assert {:ok, data} = Notificable.generate_payload(event, socket) @@ -35,27 +32,17 @@ defmodule Helix.Process.Event.Process.CreatedTest do end test "multi server process create (attacker AT attack_source)" do - socket = ChannelSetup.mock_server_socket([own_server: true]) + socket = ChannelSetup.mock_server_socket() - attack_source = socket.assigns.gateway.server_id - attacker_entity_id = socket.assigns.gateway.entity_id + attack_source_id = socket.assigns.gateway.server_id - event = - EventSetup.process_created( - :multi_server, - [gateway_id: attack_source, gateway_entity_id: attacker_entity_id]) + event = EventSetup.Process.created(attack_source_id, Server.ID.generate()) # Event originated on attack_source - assert event.gateway_id == attack_source - - # Event attacker is attacker - assert event.gateway_entity_id == attacker_entity_id + assert event.gateway_id == attack_source_id # Action happens on a remote server - refute event.target_id == attack_source - - # Which belongs to a different player - refute event.target_entity_id == attacker_entity_id + refute event.target_id == attack_source_id # Attacker has full access to the output payload assert {:ok, data} = Notificable.generate_payload(event, socket) @@ -66,17 +53,10 @@ defmodule Helix.Process.Event.Process.CreatedTest do test "multi server process create (attacker AT attack_target)" do socket = ChannelSetup.mock_server_socket() - attack_source = socket.assigns.gateway.server_id - attack_target = socket.assigns.destination.server_id - attacker_entity_id = socket.assigns.gateway.entity_id - victim_entity_id = socket.assigns.destination.entity_id + attack_source_id = socket.assigns.gateway.server_id + attack_target_id = socket.assigns.destination.server_id - event = - EventSetup.process_created( - attack_source, - attack_target, - attacker_entity_id, - victim_entity_id) + event = EventSetup.Process.created(attack_source_id, attack_target_id) # Attacker has full access to the output payload assert {:ok, data} = Notificable.generate_payload(event, socket) @@ -84,66 +64,21 @@ defmodule Helix.Process.Event.Process.CreatedTest do assert_payload_full(data) end - test "multi server process create (victim AT attack_target)" do - socket = ChannelSetup.mock_server_socket([own_server: true]) - - attack_target = socket.assigns.gateway.server_id - victim_entity_id = socket.assigns.gateway.entity_id - - event = - EventSetup.process_created( - :multi_server, - [destination_id: attack_target, - destination_entity_id: victim_entity_id]) - - # Victim has full access to the output payload - assert {:ok, data} = Notificable.generate_payload(event, socket) - - assert_payload_full(data) - end - - test "multi server process create (victim AT attack_source)" do - socket = ChannelSetup.mock_server_socket() - - attack_target = socket.assigns.gateway.server_id - attack_source = socket.assigns.destination.server_id - victim_entity_id = socket.assigns.gateway.entity_id - attacker_entity_id = socket.assigns.destination.entity_id - - event = - EventSetup.process_created( - attack_source, - attack_target, - attacker_entity_id, - victim_entity_id) - - # Victim has full access to the output payload - assert {:ok, data} = Notificable.generate_payload(event, socket) - - assert_payload_full(data) - end - test "multi server process create (third AT attack_source)" do socket = ChannelSetup.mock_server_socket() - third_server = socket.assigns.gateway.server_id - third_entity_id = socket.assigns.gateway.entity_id - attack_source = socket.assigns.destination.server_id - attacker_entity_id = socket.assigns.destination.entity_id + third_server_id = socket.assigns.gateway.server_id + attack_source_id = socket.assigns.destination.server_id # Action from `attack_source` to `attack_target` - event = - EventSetup.process_created( - :multi_server, - [gateway_id: attack_source, gateway_entity_id: attacker_entity_id]) + event = EventSetup.Process.created(attack_source_id, Server.ID.generate()) # Attack originated on `attack_source`, owned by `attacker` - assert event.gateway_id == attack_source - refute third_server == attack_source + assert event.gateway_id == attack_source_id + refute third_server_id == attack_source_id # And it targets `attack_target`, totally unrelated to `third` - refute event.target_id == third_server - refute event.target_entity_id == third_entity_id + refute event.target_id == third_server_id # `third` sees everything assert {:ok, data} = Notificable.generate_payload(event, socket) @@ -155,15 +90,10 @@ defmodule Helix.Process.Event.Process.CreatedTest do test "multi server process create (third AT attack_target)" do socket = ChannelSetup.mock_server_socket() - attack_target = socket.assigns.destination.server_id - victim_entity_id = socket.assigns.destination.entity_id + target_id = socket.assigns.destination.server_id # Action from `attack_source` to `attack_target` - event = - EventSetup.process_created( - :multi_server, - [destination_id: attack_target, - destination_entity_id: victim_entity_id]) + event = EventSetup.Process.created(Server.ID.generate(), target_id) # `third` never gets the notification assert {:ok, data} = Notificable.generate_payload(event, socket) diff --git a/test/process/internal/process_test.exs b/test/process/internal/process_test.exs index 50bbc89b..4fc35b8d 100644 --- a/test/process/internal/process_test.exs +++ b/test/process/internal/process_test.exs @@ -5,22 +5,102 @@ defmodule Helix.Process.Internal.ProcessTest do alias Helix.Process.Internal.Process, as: ProcessInternal alias Helix.Process.Model.Process + alias Helix.Test.Process.Helper, as: ProcessHelper alias Helix.Test.Process.Setup, as: ProcessSetup + alias Helix.Test.Process.TOPHelper - describe "fetching" do - test "succeeds by id" do + describe "create/1" do + test "inserts the process on the database" do + {_, %{params: params}} = ProcessSetup.fake_process() + + assert {:ok, process} = ProcessInternal.create(params) + + # Required / input data is correct + assert process.gateway_id == params.gateway_id + assert process.source_entity_id == params.source_entity_id + assert process.target_id == params.target_id + assert process.file_id == params.file_id + assert process.network_id == params.network_id + assert process.connection_id == params.connection_id + assert process.data == params.data + assert process.type == params.type + assert process.objective == params.objective + assert process.static == params.static + assert process.l_dynamic == params.l_dynamic + assert process.r_dynamic == params.r_dynamic + + # Generated / default data is correct + assert process.creation_time + refute process.last_checkpoint_time + assert process.priority == 3 + + # Now we'll test the actual format the data was saved on the DB. + # We'll have some trouble with maps, which convert all atoms to strings.. + entry = ProcessHelper.raw_get(process) + + assert entry + + # All IDs are in the expected Helix format + assert entry.process_id == process.process_id + assert entry.gateway_id == process.gateway_id + assert entry.target_id == process.target_id + assert entry.source_entity_id == process.source_entity_id + assert entry.file_id == process.file_id + assert entry.network_id == process.network_id + assert entry.connection_id == process.connection_id + + # Atoms, or a list of them, are converted automatically back to atoms + assert entry.l_dynamic == process.l_dynamic + assert entry.r_dynamic == process.r_dynamic + assert entry.type == process.type + + # Because of NaiveStruct type, we have the Struct loaded + assert entry.data.__struct__ == params.data.__struct__ + + # However its values still need formatting + # Resource maps, too, need reformatting. + + # The conversion of the above values into our internal format (atoms, + # maps, structs) is done with `format/1`. All calls to `fetch/1` (among + # other `get_*` functions) have their result automatically `format`-ted. + # Notice this wasn't the case here because we've executed a "raw_query", + # which did not send our process to `format/1`. + + # Anyway, testing `format/1` is not our goal. See `Process.format/1`. + TOPHelper.top_stop() + end + end + + describe "fetch/1" do + test "returns the process, formatted" do {process, _} = ProcessSetup.process() + entry = ProcessInternal.fetch(process.process_id) - # Returned the correct entry - assert entry.process_id == process.process_id + assert entry - # Loaded/formatted the entry from DB (virtual data) - assert entry.minimum - assert Map.has_key?(entry, :estimated_time) + # The returned data was formatted, it's exactly the same as defined before + assert entry.data == process.data + + # Added some virtual data + assert entry.state + + # Resource data is identical + assert entry.objective == process.objective + assert entry.processed == process.processed + assert entry.static == process.static + + # Populated derived data + assert entry.l_allocated + assert entry.r_allocated + assert entry.state + assert entry.time_left + assert entry.completion_date + + TOPHelper.top_stop() end - test "fails when process doesn't exists" do + test "returns empty when process does not exist" do refute ProcessInternal.fetch(Process.ID.generate()) end end @@ -37,6 +117,8 @@ defmodule Helix.Process.Internal.ProcessTest do # No longer on DB refute ProcessInternal.fetch(process.process_id) + + TOPHelper.top_stop() end end end diff --git a/test/process/internal/top/allocator/plan_test.exs b/test/process/internal/top/allocator/plan_test.exs deleted file mode 100644 index f40d05fe..00000000 --- a/test/process/internal/top/allocator/plan_test.exs +++ /dev/null @@ -1,312 +0,0 @@ -defmodule Helix.Process.Internal.TOP.Allocator.PlanTest do - - use ExUnit.Case, async: true - - alias Helix.Server.Model.Server - alias Helix.Process.Model.Process - alias Helix.Process.Internal.TOP.Allocator.Plan - alias Helix.Process.Internal.TOP.ServerResources - alias Helix.Test.Process.ProcessableExample - alias Helix.Test.Process.StaticProcessableExample - - @moduletag :unit - - # Note that most tests assert that the value is inside a range. this is done - # because the allocation algorithm might not allocate 100% of the resources - # because it's allocation logic is naive (i might fix it or worsen it in the - # future) - - test "allocating to a static process doesn't affects it" do - params = %{ - gateway_id: Server.ID.generate(), - target_server_id: Server.ID.generate(), - process_data: %StaticProcessableExample{}, - objective: %{ - cpu: 100_000 - } - } - - params2 = %{ - state: :running, - allocated: %{ - cpu: 100, - ram: 100 - }, - minimum: %{ - running: %{ - cpu: 100, - ram: 100 - } - } - } - - process = - params - |> Process.create_changeset() - |> Process.update_changeset(params2) - |> Ecto.Changeset.apply_changes() - - resources = %ServerResources{ - cpu: 9_000, - ram: 9_000, - net: %{"::" => %{dlk: 9_000, ulk: 9_000}} - } - - [allocated_process] = - [process] - |> Plan.allocate(resources) - |> Enum.map(&Ecto.Changeset.apply_changes/1) - - # Static processes doesn't receive dynamic allocations. - # Note that with "static process" i mean a process whose process_type - # doesn't allows dynamic allocation to any resources (unlike dynamic - # processes that allow dynamic allocation to some or all of their resources) - assert process.allocated === allocated_process.allocated - end - - test "allocating to a dynamic process" do - params = %{ - gateway_id: Server.ID.generate(), - target_server_id: Server.ID.generate(), - process_data: %ProcessableExample{}, - objective: %{ - cpu: 100_000 - } - } - - params2 = %{ - state: :running, - allocated: %{ - cpu: 100, - ram: 100 - }, - minimum: %{ - running: %{ - cpu: 100, - ram: 100 - } - } - } - - process = - params - |> Process.create_changeset() - |> Process.update_changeset(params2) - |> Ecto.Changeset.apply_changes() - - resources = %ServerResources{ - cpu: 9_000, - ram: 9_000, - net: %{"::" => %{dlk: 9_000, ulk: 9_000}} - } - - [allocated_process] = - [process] - |> Plan.allocate(resources) - |> Enum.map(&Ecto.Changeset.apply_changes/1) - - assert allocated_process.allocated.cpu in 8_950..9_000 - assert 100 === allocated_process.allocated.ram - end - - test "resources are divided between different dynamic processes" do - params = %{ - gateway_id: Server.ID.generate(), - target_server_id: Server.ID.generate(), - process_data: %ProcessableExample{}, - objective: %{ - cpu: 100_000 - } - } - - params2 = %{ - state: :running, - allocated: %{ - cpu: 100, - ram: 100 - }, - minimum: %{ - running: %{ - cpu: 100, - ram: 100 - } - } - } - - process0 = - params - |> Process.create_changeset() - |> Process.update_changeset(params2) - |> Ecto.Changeset.apply_changes() - - process1 = %{process0| process_id: Process.ID.generate()} - - resources = %ServerResources{ - cpu: 9_000, - ram: 9_000, - net: %{"::" => %{dlk: 9_000, ulk: 9_000}} - } - - [allocated_process0, allocated_process1] = - [process0, process1] - |> Plan.allocate(resources) - |> Enum.map(&Ecto.Changeset.apply_changes/1) - - assert allocated_process0.allocated.cpu in 4_450..4_500 - assert allocated_process1.allocated.cpu in 4_450..4_500 - end - - test "processes with higher priority receive bigger shares" do - params = %{ - gateway_id: Server.ID.generate(), - target_server_id: Server.ID.generate(), - process_data: %ProcessableExample{}, - objective: %{ - cpu: 100_000 - } - } - - params2 = %{ - state: :running, - priority: 1, - allocated: %{ - ram: 100 - }, - minimum: %{ - running: %{ - ram: 100 - } - } - } - - process0 = - params - |> Process.create_changeset() - |> Process.update_changeset(params2) - |> Ecto.Changeset.apply_changes() - - process1 = - %{process0| process_id: Process.ID.generate()} - |> Process.update_changeset(%{priority: 4}) - |> Ecto.Changeset.apply_changes() - - resources = %ServerResources{ - cpu: 9_000, - ram: 9_000, - net: %{"::" => %{dlk: 9_000, ulk: 9_000}} - } - - processes = - [process0, process1] - |> Plan.allocate(resources) - |> Enum.map(&Ecto.Changeset.apply_changes/1) - |> Enum.map(&({&1.process_id, &1})) - |> :maps.from_list() - - # Process0 has priority 1, process1 has priority 4, thus the resources will - # be split in 5 parts, process0 receives 1/5 of the total resources and - # process1 receives 4/5 - assert processes[process0.process_id].allocated.cpu in 1_750..1_800 - assert processes[process1.process_id].allocated.cpu in 7_150..7_200 - end - - test "complex allocation using limits" do - params = %{ - gateway_id: Server.ID.generate(), - target_server_id: Server.ID.generate(), - process_data: %ProcessableExample{}, - objective: %{ - cpu: 100_000 - } - } - - params2 = %{ - state: :running, - allocated: %{ - ram: 100 - }, - minimum: %{ - running: %{ - ram: 100 - } - } - } - - process0 = - params - |> Process.create_changeset() - |> Process.update_changeset(params2) - |> Ecto.Changeset.apply_changes() - - process1 = %{process0| process_id: Process.ID.generate()} - - process2 = - %{process0| process_id: Process.ID.generate()} - |> Process.update_changeset(%{limitations: %{cpu: 500}}) - |> Ecto.Changeset.apply_changes() - - resources = %ServerResources{ - cpu: 9_000, - ram: 9_000, - net: %{"::" => %{dlk: 9_000, ulk: 9_000}} - } - - processes = - [process0, process1, process2] - |> Plan.allocate(resources) - |> Enum.map(&Ecto.Changeset.apply_changes/1) - |> Enum.map(&({&1.process_id, &1})) - |> :maps.from_list() - - # We expect the following to happen: 500 cpu to process2 because it has - # limit and (8500/2) to process0 and process1 because they receive the rest. - # So, we expect process0 and process1 to have aproximately 4250, but since - # there are several ways to execute the allocation algorithm, we should - # expect allocator to fail to allocate a part of the resources left - assert processes[process0.process_id].allocated.cpu in 4_200..4_250 - - cpu_allocated0 = processes[process0.process_id].allocated.cpu - cpu_allocated1 = processes[process1.process_id].allocated.cpu - assert cpu_allocated0 == cpu_allocated1 - - assert 500 === processes[process2.process_id].allocated.cpu - end - - test "returns error when resources can't handle processes at minimum" do - params = %{ - gateway_id: Server.ID.generate(), - target_server_id: Server.ID.generate(), - process_data: %ProcessableExample{}, - objective: %{ - cpu: 100_000 - } - } - - process0 = - params - |> Process.create_changeset() - |> Process.update_changeset(%{state: :running}) - |> Process.update_changeset(%{minimum: %{running: %{ram: 2_000}}}) - |> Ecto.Changeset.apply_changes() - - process1 = - %{params| gateway_id: Server.ID.generate()} - |> Process.create_changeset() - |> Process.update_changeset(%{state: :running}) - |> Process.update_changeset(%{minimum: %{running: %{ram: 2_000}}}) - |> Ecto.Changeset.apply_changes() - - resources = %ServerResources{ - cpu: 9_000, - # Note that the server only has 3k Ram total and the processes together - # requires a minimum of 4k ram - ram: 3_000, - net: %{"::" => %{dlk: 9_000, ulk: 9_000}} - } - - # TODO: return precise errors - # result = Plan.allocate([process0, process1], resources) - # assert {:error, {:resources, :lack, :ram}} == result - assert {:error, _} = Plan.allocate([process0, process1], resources) - end -end diff --git a/test/process/model/process/resources/kv_test.exs b/test/process/model/process/resources/kv_test.exs new file mode 100644 index 00000000..97c52761 --- /dev/null +++ b/test/process/model/process/resources/kv_test.exs @@ -0,0 +1,174 @@ +defmodule Helix.Process.Model.Process.Resources.DLKTest do + + use ExUnit.Case, async: true + + alias Helix.Process.Model.Process.Resources.DLK, as: ResourceDLK + + describe "build/1" do + test "builds correctly" do + # Empty resource (same as initial) + assert %{} == ResourceDLK.build([]) + + # With map + assert %{net_id: 100} == ResourceDLK.build(%{net_id: 100}) + assert %{net1: 1, net2: 2} == ResourceDLK.build(%{net1: 1, net2: 2}) + + # Created %{network_id => 100} + assert %{net_id: 100} == ResourceDLK.build([{:net_id, 100}]) + + # Created multiple networks + assert %{net1: 1, net2: 2} == ResourceDLK.build([{:net1, 1}, {:net2, 2}]) + end + end + + describe "sum/2" do + test "valid data" do + + a = %{net1: 100, net2: 200} + b = %{net1: 1, net2: 2} + + assert %{net1: 101, net2: 202} == ResourceDLK.sum(a, b) + end + + test "non-overlapping keys" do + + a = %{net1: 50, net2: 2} + b = %{net1: 50, net3: 3} + + assert %{net1: 100, net2: 2, net3: 3} == ResourceDLK.sum(a, b) + end + + test "empty keys" do + initial = ResourceDLK.initial() + assert %{} == ResourceDLK.sum(initial, initial) + + a = %{netA: 1} + b = %{netB: 2} + + assert %{netA: 1} == ResourceDLK.sum(a, initial) + assert %{netB: 2} == ResourceDLK.sum(initial, b) + end + end + + describe "mul/2" do + test "multiplies overlapping keys" do + a = %{net1: 3, net2: 2} + b = %{net1: 4, net2: 0} + + assert %{net1: 12, net2: 0} == ResourceDLK.mul(a, b) + end + + test "handles missing keys" do + a = %{net1: 2, net2: 2} + b = %{net1: 5, net3: 3} + + assert %{net1: 10.0, net2: 2.0, net3: 3.0} == ResourceDLK.mul(a, b) + end + end + + describe "div/2" do + test "divides" do + a = %{net1: 10, net2: 5} + b = %{net1: 2, net2: 5} + + assert %{net1: 5, net2: 1} == ResourceDLK.div(a, b) + end + end + + describe "allocate_static/1" do + test "returns the expected format" do + process = + %{ + static: %{running: %{dlk: 100}}, + state: :running, + network_id: :net_id + } + + assert %{net_id: 100} == ResourceDLK.allocate_static(process) + end + + test "ignores if resource is not requested statically" do + process = + %{ + static: %{running: %{ulk: 100}}, + state: :running, + network_id: :net_id + } + + assert %{net_id: 0} == ResourceDLK.allocate_static(process) + end + + test "ignores if resource is on different state" do + process = + %{ + static: %{running: %{ulk: 100}}, + state: :paused, + network_id: :net_id + } + + assert %{net_id: 0} == ResourceDLK.allocate_static(process) + end + + test "ignores if network_id is nil" do + process = + %{ + static: %{running: %{dlk: 100}}, + state: :running, + network_id: nil + } + + assert %{} == ResourceDLK.allocate_static(process) + end + end + + describe "completed?/2" do + test "true when all processed values are greater than their objectives" do + processed = %{net1: 200, net2: 1} + objective = %{net1: 101, net2: 10} + + result = ResourceDLK.completed?(processed, objective) + + assert result == %{net1: true, net2: false} + end + + test "true when there is no objective" do + processed = %{net: 100} + objective = %{} + + assert %{net: true} == ResourceDLK.completed?(processed, objective) + end + end + + describe "map/2" do + test "applies to each value" do + + res = %{net1: true, net2: false, net3: true} + + function = fn val -> not val end + + result = ResourceDLK.map(res, function) + + assert result == %{net1: false, net2: true, net3: false} + end + end + + describe "reduce/2" do + test "works" do + r1 = %{net1: 100, net2: 300} + f1 = fn acc, v -> acc + v end + i1 = 0 + + assert 400 == ResourceDLK.reduce(r1, i1, f1) + + r2 = %{net1: true, net2: true, net3: true} + f2 = fn acc, v -> acc && v || false end + i2 = true + + assert ResourceDLK.reduce(r2, i2, f2) + + r3 = %{net1: true, net2: true, net3: false} + + refute ResourceDLK.reduce(r3, i2, f2) + end + end +end diff --git a/test/process/model/process/resources_test.exs b/test/process/model/process/resources_test.exs new file mode 100644 index 00000000..3f54e0ec --- /dev/null +++ b/test/process/model/process/resources_test.exs @@ -0,0 +1,206 @@ +defmodule Helix.Process.Model.Process.ResourcesTest do + + use ExUnit.Case, async: true + + alias Helix.Network.Model.Network + alias Helix.Process.Model.Process + + alias Helix.Test.Process.Setup.TOP, as: TOPSetup + + defp gen_resource do + {res, _} = TOPSetup.Resources.resources() + res + end + + describe "sum/2" do + test "sums all resources" do + res1 = gen_resource() + res2 = gen_resource() + + sum = Process.Resources.sum(res1, res2) + + # Returned sum of each resource matches the sum of each resource. + assert sum.cpu == Process.Resources.CPU.sum(res1.cpu, res2.cpu) + assert sum.ram == Process.Resources.RAM.sum(res1.ram, res2.ram) + assert sum.dlk == Process.Resources.DLK.sum(res1.dlk, res2.dlk) + assert sum.ulk == Process.Resources.ULK.sum(res1.ulk, res2.ulk) + end + end + + describe "sub/2" do + test "subs all resources" do + res1 = gen_resource() + res2 = gen_resource() + + sub = Process.Resources.sub(res1, res2) + + # Returned sub of each resource matches the sub of each resource. + assert sub.cpu == Process.Resources.CPU.sub(res1.cpu, res2.cpu) + assert sub.ram == Process.Resources.RAM.sub(res1.ram, res2.ram) + assert sub.dlk == Process.Resources.DLK.sub(res1.dlk, res2.dlk) + assert sub.ulk == Process.Resources.ULK.sub(res1.ulk, res2.ulk) + end + end + + describe "div/2" do + test "divs all resources" do + res1 = gen_resource() + res2 = gen_resource() + + div = Process.Resources.div(res1, res2) + + # Returned div of each resource matches the div of each resource. + assert div.cpu == Process.Resources.CPU.div(res1.cpu, res2.cpu) + assert div.ram == Process.Resources.RAM.div(res1.ram, res2.ram) + assert div.dlk == Process.Resources.DLK.div(res1.dlk, res2.dlk) + assert div.ulk == Process.Resources.ULK.div(res1.ulk, res2.ulk) + end + end + + describe "mul/2" do + test "muls all resources" do + res1 = gen_resource() + res2 = gen_resource() + + mul = Process.Resources.mul(res1, res2) + + # Returned mul of each resource matches the mul of each resource. + assert mul.cpu == Process.Resources.CPU.mul(res1.cpu, res2.cpu) + assert mul.ram == Process.Resources.RAM.mul(res1.ram, res2.ram) + assert mul.dlk == Process.Resources.DLK.mul(res1.dlk, res2.dlk) + assert mul.ulk == Process.Resources.ULK.mul(res1.ulk, res2.ulk) + end + end + + describe "min/1" do + test "returns minimum value of resources" do + res1 = + %{ + cpu: 100, + ram: 30, + dlk: %{net: 150}, + ulk: %{net: 250} + } + + res2 = + %{ + cpu: 50, + ram: 90, + dlk: %{net: 240}, + ulk: %{net: 0} + } + + min = Process.Resources.min(res1, res2) + + assert min == %{cpu: 50, ram: 30, dlk: %{net: 150}, ulk: %{net: 0}} + end + + test "fills missing keys" do + res1 = + %{ + cpu: 100, + dlk: %{net: 150}, + ulk: %{net: 250} + } + + res2 = + %{ + cpu: 50, + ram: 90, + dlk: %{net: 240, net2: 500}, + ulk: %{} + } + + assert %{ + cpu: 50, + ram: 90, + dlk: %{net: 150, net2: 500}, + ulk: %{net: 250} + } == Process.Resources.min(res1, res2) + end + + test "handles empty resource" do + res1 = + %{ + cpu: 100, + dlk: %{net: 150}, + ulk: %{net: 250} + } + + res2 = %{} + + assert %{ + cpu: 100, + dlk: %{net: 150}, + ulk: %{net: 250} + } == Process.Resources.min(res1, res2) + end + + test "hello emptiness my old friend" do + assert %{} == Process.Resources.min(%{}, %{}) + end + end + + describe "initial/0" do + test "initializes all resources" do + initial = Process.Resources.initial() + + assert initial.cpu == Process.Resources.CPU.initial() + assert initial.ram == Process.Resources.RAM.initial() + assert initial.dlk == Process.Resources.DLK.initial() + assert initial.ulk == Process.Resources.ULK.initial() + end + end + + describe "format/1" do + test "converts string keys to atoms" do + res = + %{ + "cpu" => 100, + "ram" => 200, + "ulk" => %{}, + "dlk" => %{} + } + + assert %{ + cpu: 100, + ram: 200, + ulk: %{}, + dlk: %{} + } == Process.Resources.format(res) + end + + test "fills up undefined/missing resources with their initial values" do + res = + %{ + cpu: 100, + dlk: %{} + } + + assert %{ + cpu: 100, + ram: Process.Resources.RAM.initial(), + dlk: %{}, + ulk: Process.Resources.ULK.initial() + } == Process.Resources.format(res) + end + + test "network-related keys are converted to Helix.IDs" do + network_id = Network.ID.cast!("::") + res = + %{ + dlk: %{"::" => 100}, + ulk: Map.put(%{}, network_id, 200), + cpu: 0, + ram: 0 + } + + assert %{ + dlk: Map.put(%{}, network_id, 100), + ulk: Map.put(%{}, network_id, 200), + ram: 0, + cpu: 0 + } == Process.Resources.format(res) + end + end +end diff --git a/test/process/model/process_test.exs b/test/process/model/process_test.exs index 21e1b04a..36f7115c 100644 --- a/test/process/model/process_test.exs +++ b/test/process/model/process_test.exs @@ -1,368 +1,234 @@ defmodule Helix.Process.Model.ProcessTest do - use ExUnit.Case + use ExUnit.Case, async: true - alias Ecto.Changeset - alias Helix.Server.Model.Server alias Helix.Process.Model.Process - alias Helix.Process.Model.Process.Resources - alias Helix.Process.Model.Processable - alias Helix.Test.Process.ProcessableExample - alias Helix.Test.Process.StaticProcessableExample - - @moduletag :unit - - setup do - process = - %{ - gateway_id: Server.ID.generate(), - target_server_id: Server.ID.generate(), - process_data: %ProcessableExample{} - } - |> Process.create_changeset() - |> Changeset.apply_changes() - - {:ok, process: process} - end - - defp error_fields(changeset) do - changeset - |> Changeset.traverse_errors(&(&1)) - |> Map.keys() - end - - describe "process data" do - test "process data must be a struct" do - p = Process.create_changeset(%{process_data: %{foo: :bar}}) - - assert :process_data in error_fields(p) - end - - test "a struct is only valid if it implements Processable protocol" do - p = Process.create_changeset(%{process_data: %File.Stream{}}) - - assert :process_data in error_fields(p) - end - - test "works as long as the struct implements Processabe" do - params = %{process_data: %ProcessableExample{}} - p = Process.create_changeset(params) - - refute :process_data in error_fields(p) + alias Helix.Test.Network.Helper, as: NetworkHelper + alias Helix.Test.Process.FakeFileTransfer + alias Helix.Test.Process.Setup.TOP, as: TOPSetup + + @internet_id NetworkHelper.internet_id() + + describe "infer_usage/1" do + test "infers usage on remote process" do + [proc] = TOPSetup.fake_process() + + # Process below is probably a file transfer + # Locally, it uses DLK and has reserved 50 units of it + # On the remote, however, it is limited by the remote's ULK, at 20 units + # The actual allocated resources are 20 DLK for local, 20 ULK for remote + # (Plus the other unrelated stuff that was reserved before) + proc = + proc + |> Map.put(:l_limit, %{}) + |> Map.put(:l_reserved, %{cpu: 72, ram: 20, dlk: %{net: 50}, ulk: %{}}) + |> Map.put(:r_limit, %{ulk: %{net: 20}}) + |> Map.put(:r_reserved, %{dlk: %{}, cpu: 0, ram: 0, ulk: %{net: 20}}) + + process = Process.infer_usage(proc) + + assert process.l_allocated.cpu == 72 + assert process.l_allocated.ram == 20.0 + assert process.l_allocated.dlk.net == 20 + assert process.l_allocated.ulk == %{} + + assert process.r_allocated.ulk.net == 20 + assert process.r_allocated.cpu == 0 + assert process.r_allocated.ram == 0 + assert process.r_allocated.dlk == %{} + end + + test "ignores r_allocated when remote resources are not relevant" do + [proc] = TOPSetup.fake_process() + + # Process below has its own rules for limitations and allocation, but it's + # completely independent of the remote server's behaviour/resources. + proc = + proc + |> Map.put(:l_limit, %{ram: 10}) + |> Map.put(:l_reserved, %{cpu: 72, ram: 20, dlk: %{net: 50}, ulk: %{}}) + |> Map.put(:r_limit, %{}) + |> Map.put(:r_reserved, %{}) + + process = Process.infer_usage(proc) + + assert process.l_allocated.cpu == 72 + assert process.l_allocated.ram == 10 + assert process.l_allocated.dlk.net == 50 + assert process.l_allocated.ulk == %{} + + assert process.r_allocated.ulk == %{} + assert process.r_allocated.cpu == 0 + assert process.r_allocated.ram == 0 + assert process.r_allocated.dlk == %{} + end + + test "mirrors DLK and ULK resources" do + # Notice that in `proc`, both DLK and ULK are being limited by remote + [proc] = + TOPSetup.fake_process( + l_limit: %{ram: 30}, + r_limit: %{dlk: %{net: 20}}, + l_reserved: %{cpu: 0, ram: 40, ulk: %{net: 30}, dlk: %{}}, + r_reserved: %{cpu: 10, ram: 20, ulk: %{net: 15}, dlk: %{net: 20}}, + ) + + process = Process.infer_usage(proc) + + assert process.l_allocated.cpu == 0 + assert process.l_allocated.ram == 30 + assert process.l_allocated.ulk.net == 20 + assert process.l_allocated.dlk.net == 15 + + assert process.r_allocated.cpu == 10 + assert process.r_allocated.ram == 20 + assert process.r_allocated.dlk.net == 20 + assert process.r_allocated.ulk.net == 15 + + # We'll now modify `proc` so that the `l_reserved` is the min value + # (this means DLK/ULK will be limited by local resources) + + proc = + %{proc| + l_limit: %{}, + r_limit: %{}, + l_reserved: %{cpu: 10, ram: 20, ulk: %{net: 15}, dlk: %{net: 20}}, + r_reserved: %{cpu: 0, ram: 40, ulk: %{net: 30}, dlk: %{}} + } + + process = Process.infer_usage(proc) + + assert process.l_allocated.cpu == 10 + assert process.l_allocated.ram == 20 + assert process.l_allocated.ulk.net == 15 + assert process.l_allocated.dlk.net == 20 + + assert process.r_allocated.cpu == 0 + assert process.r_allocated.ram == 40 + assert process.r_allocated.ulk.net == 30 + assert process.r_allocated.dlk == %{} end end - describe "objective" do - test "objective is optional" do - p = Process.create_changeset(%{}) - refute :objective in error_fields(p) - end + describe "format/1" do + test "formats the process data" do - test "objective is a map whose values are integers" do - p = Process.create_changeset(%{objective: %{cpu: :foo}}) - assert :objective in error_fields(p) + [proc] = + TOPSetup.fake_process( + l_limit: %{ram: 30}, + r_limit: %{dlk: %{"::" => 20}, cpu: 10}, + l_reserved: %{cpu: 0, ram: 20, ulk: %{"::" => 50}, dlk: %{}}, + r_reserved: %{cpu: 30, ram: 30, ulk: %{}, dlk: %{"::" => 20}}, + l_dynamic: [:ulk], + r_dynamic: [:dlk], + objective: %{cpu: 0, ram: 0, dlk: %{}, ulk: %{"::" => 9999}}, + network_id: "::", + data: FakeFileTransfer.new() + ) - p = Process.create_changeset(%{objective: :foo}) - assert :objective in error_fields(p) + process = Process.format(proc) - p = Process.create_changeset(%{objective: %{cpu: 0.5}}) - assert :objective in error_fields(p) + assert process.data == proc.data - p = Process.create_changeset(%{objective: %{cpu: 50}}) - refute :objective in error_fields(p) - end + # Never went through a checkpoint + refute process.last_checkpoint_time - test "objective values must be non-negative" do - p = Process.create_changeset(%{objective: %{cpu: -50}}) - assert :objective in error_fields(p) - end - end + ### Formatted resources - describe "ttl" do - test "seconds_to_change defaults to :infinity if nothing is going to change" do - now = DateTime.from_unix!(1_470_000_000) + # Objective - params = %{allocated: %{cpu: 0, dlk: 0}, updated_time: now} + assert process.objective.cpu == 0 + assert process.objective.ram == 0 + assert process.objective.dlk == %{} + assert process.objective.ulk[@internet_id] == 9999 - process = - %{objective: %{cpu: 50}} - |> Process.create_changeset() - |> Process.update_changeset(params) - |> Changeset.apply_changes() + # Limits - assert :infinity == Process.seconds_to_change(process) - end + assert process.l_limit.ram == 30 + refute Map.has_key?(process.l_limit, :cpu) + refute Map.has_key?(process.l_limit, :dlk) + refute Map.has_key?(process.l_limit, :ulk) - test "seconds_to_change returns amount of seconds to the next change on a process resource consumption" do - now = DateTime.from_unix!(1_470_000_000) + assert process.r_limit.cpu == 10 + assert process.r_limit.dlk[@internet_id] == 20 + refute Map.has_key?(process.r_limit, :ram) + refute Map.has_key?(process.r_limit, :ulk) - params = %{allocated: %{cpu: 10, dlk: 10}, updated_time: now} + # Reservation - process = - %{objective: %{cpu: 50, dlk: 100}} - |> Process.create_changeset() - |> Process.update_changeset(params) - |> Changeset.apply_changes() + assert process.l_reserved.cpu == 0 + assert process.l_reserved.ram == 20 + assert process.l_reserved.dlk == %{} + assert process.l_reserved.ulk[@internet_id] == 50 - assert 5 === Process.seconds_to_change(process) - end + assert process.r_reserved.cpu == 30 + assert process.r_reserved.ram == 30 + assert process.r_reserved.dlk[@internet_id] == 20 + assert process.r_reserved.ulk == %{} - test "estimate_conclusion is the value of the longest-to-complete objective (or nil if infinity)" do - now = DateTime.from_unix!(1_470_000_000) + ### Virtual data - p = - %{objective: %{cpu: 50, dlk: 100}} - |> Process.create_changeset() - |> Process.update_changeset(%{allocated: %{dlk: 10}, updated_time: now}) - |> Changeset.apply_changes() + # The process has reserved resources and it's not paused, so it's running + assert process.state == :running - p1 = Process.estimate_conclusion(p) + # Correct allocation (see more tests on `infer_usage/1`) + assert process.l_allocated.cpu == 0 + assert process.l_allocated.ram == 20 + assert process.l_allocated.dlk == %{} + assert process.l_allocated.ulk[@internet_id] == 20 - refute p1.estimated_time + assert process.r_allocated.cpu == 10 + assert process.r_allocated.ram == 30 + assert process.r_allocated.dlk[@internet_id] == 20 + assert process.r_allocated.ulk == %{} - p2 = - p - |> Process.update_changeset(%{allocated: %{cpu: 1, dlk: 10}}) - |> Changeset.apply_changes() - |> Process.estimate_conclusion() + # Estimated duration - future = DateTime.from_unix!(1_470_000_050) + assert_in_delta process.time_left, 500, 1 + assert process.completion_date - assert :eq === DateTime.compare(future, p2.estimated_time) - end - end - - describe "allocation_shares" do - test "returns 0 when paused", %{process: process} do - process = - process - |> Process.pause() - |> elem(0) - |> Changeset.apply_changes() + diff = + DateTime.diff(process.completion_date, Process.get_last_update(process)) - assert 0 === Process.allocation_shares(process) + assert_in_delta diff, 500, 1.1 end - test \ - "returns the priority value when the process still requires resources", - %{process: process} - do - priority = 2 - process = - process - |> Changeset.cast(%{priority: priority}, [:priority]) - |> Changeset.put_embed(:objective, %{cpu: 1_000}) - |> Changeset.apply_changes() - - assert 2 === Process.allocation_shares(process) - end - - test "can only allocate if the Processable allows", %{process: process} do - priority = 2 - process = - process - |> Changeset.cast(%{priority: priority}, [:priority]) - |> Changeset.put_embed(:objective, %{cpu: 1_000}) - |> Changeset.apply_changes() + test "gives :waiting_allocation state when process hasn't received alloc" do + [proc] = + TOPSetup.fake_process( + l_limit: %{ram: 30}, + r_limit: %{dlk: %{"::" => 20}, cpu: 10}, + l_reserved: %{}, + r_reserved: %{}, + l_dynamic: [:dlk], + r_dynamic: [:ulk], + network_id: "::", + objective: %{ulk: %{"::" => 999}}, + data: FakeFileTransfer.new() + ) - assert 2 === Process.allocation_shares(process) - p2 = %{process| process_data: %StaticProcessableExample{}} + process = Process.format(proc) - process_type = %StaticProcessableExample{} - assert [] === Processable.dynamic_resources(process_type) - assert 0 === Process.allocation_shares(p2) + assert process.state == :waiting_allocation end - end - describe "pause" do - test "pause changes the state of the process", %{process: process} do - process = - process - |> Process.update_changeset(%{state: :running}) - |> Changeset.apply_changes() - |> Process.pause() - |> elem(0) - |> Changeset.apply_changes() - - assert :paused === process.state - end - - test "on pause allocates minimum", %{process: process} do - params = %{ - objective: %{cpu: 1_000}, - allocated: %{cpu: 100, ram: 200}, - minimum: %{paused: %{ram: 155}} - } - - process = - process - |> Changeset.cast(params, [:minimum]) - |> Changeset.cast_embed(:objective) - |> Changeset.cast_embed(:allocated) - |> Changeset.apply_changes() - |> Process.pause() - |> elem(0) - |> Changeset.apply_changes() - - assert 0 === process.allocated.cpu - assert 155 === process.allocated.ram - end - end - - describe "completeness" do - test "is complete if state is :complete", %{process: process} do - process = - process - |> Process.update_changeset(%{state: :complete}) - |> Changeset.apply_changes() - - assert Process.complete?(process) - end + test "gives :paused state when process priority is 0" do + [proc] = + TOPSetup.fake_process( + priority: 0, + l_limit: %{ram: 30}, + r_limit: %{dlk: %{"::" => 20}, cpu: 10}, + l_reserved: %{cpu: 0, ram: 20, ulk: %{"::" => 50}, dlk: %{}}, + r_reserved: %{cpu: 30, ram: 30, ulk: %{}, dlk: %{"::" => 20}}, + r_dynamic: [:ulk], + network_id: "::", + data: FakeFileTransfer.new() + ) - test "is complete if objective has been reached", %{process: process} do - params = %{ - objective: %{cpu: 100, dlk: 20}, - processed: %{cpu: 100, dlk: 20} - } - - process = - process - |> Process.update_changeset(params) - |> Changeset.apply_changes() - - assert Process.complete?(process) - end - - test \ - "is not complete if state is not complete and objective not reached", - %{process: process} - do - params = %{ - state: :running, - processed: %{cpu: 10}, - objective: %{cpu: 500} - } - - process = - process - |> Process.update_changeset(params) - |> Changeset.apply_changes() - - refute Process.complete?(process) - end - end - - describe "minimum allocation" do - test \ - "defaults to 0 when a value is not specified for the state", - %{process: process} - do - resources = %Resources{cpu: 100} - - process = - process - |> Process.allocate(resources) - |> Process.update_changeset(%{minimum: %{}}) - |> Changeset.apply_changes() - - assert 100 === process.allocated.cpu - - process = - process - |> Process.allocate_minimum() - |> Changeset.apply_changes() - - assert 0 === process.allocated.cpu - end - - test "uses the values for each specified state", %{process: process} do - resources = %Resources{cpu: 900, ram: 600} - minimum = %{paused: %{ram: 300}, running: %{cpu: 100, ram: 600}} - - process = - process - |> Process.allocate(resources) - |> Process.update_changeset(%{state: :running, minimum: minimum}) - |> Changeset.apply_changes() - - assert 900 === process.allocated.cpu - assert 600 === process.allocated.ram - - process = - process - |> Process.allocate_minimum() - |> Changeset.apply_changes() - - assert 100 === process.allocated.cpu - assert 600 === process.allocated.ram - - process = - process - |> Process.update_changeset(%{state: :paused}) - |> Process.allocate_minimum() - |> Changeset.apply_changes() - - assert 0 === process.allocated.cpu - assert 300 === process.allocated.ram - - process = - process - |> Process.update_changeset(%{state: :complete}) - |> Process.allocate_minimum() - |> Changeset.apply_changes() - - # When a value is not specified for a certain state, it assumes that - # everything should be 0 - assert 0 === process.allocated.cpu - assert 0 === process.allocated.ram - end - end - - describe "resume" do - test "doesn't change when process is not paused", %{process: process} do - changeset = - process - |> Process.update_changeset(%{state: :running, allocated: %{cpu: 100}}) - |> Changeset.apply_changes() - |> Process.resume() - - # IE: no changes on the changeset - assert 0 === map_size(changeset.changes) - end + process = Process.format(proc) - test \ - "changes state and updated_time and allocates minimum", - %{process: process} - do - resources = %Resources{ram: 300} - minimum = %{running: %{ram: 600}} - last_updated = - {{2000, 01, 01}, {01, 01, 01}} - |> NaiveDateTime.from_erl!() - |> DateTime.from_naive!("Etc/UTC") - params = %{state: :paused, minimum: minimum, updated_time: last_updated} - now = DateTime.utc_now() - - process = - process - |> Process.allocate(resources) - |> Process.update_changeset(params) - |> Changeset.apply_changes() - - assert :paused === process.state - assert 300 === process.allocated.ram - assert 2000 === process.updated_time.year - - process = - process - |> Process.resume() - |> elem(0) - |> Changeset.apply_changes() - - assert :running === process.state - assert 600 === process.allocated.ram - assert now.year === process.updated_time.year + assert process.state == :paused end end end diff --git a/test/process/model/top/allocator_test.exs b/test/process/model/top/allocator_test.exs new file mode 100644 index 00000000..e6014a65 --- /dev/null +++ b/test/process/model/top/allocator_test.exs @@ -0,0 +1,611 @@ +defmodule Helix.Process.Model.Top.AllocatorTest do + + use ExUnit.Case, async: true + + import Helix.Test.Process.Macros + + alias Helix.Process.Model.Process + alias Helix.Process.Model.TOP.Allocator, as: TOPAllocator + + alias Helix.Test.Process.Setup.TOP, as: TOPSetup + + alias HELL.TestHelper.Random + + describe "allocate/2 (without limit)" do + test "one process; all resources" do + {total_resources, _} = TOPSetup.Resources.resources() + + [proc1] = + TOPSetup.fake_process( + total_resources: total_resources, + l_dynamic: [:cpu, :ram, :ulk, :dlk], + r_dynamic: [:cpu, :ram, :ulk, :dlk] + ) + + proc1 = Map.put(proc1, :id, 1) + + assert {:ok, %{allocated: [p], dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, [proc1]) + + assert p.id == proc1.id + + # Alloc of one process will receive all available resources + assert_resource p.next_allocation.cpu, total_resources.cpu + assert_resource p.next_allocation.ram, total_resources.ram + assert_resource p.next_allocation.dlk, total_resources.dlk + assert_resource p.next_allocation.ulk, total_resources.ulk + + # Same test on the remote counterpart + assert {:ok, %{allocated: [p], dropped: []}} = + TOPAllocator.allocate(:target, total_resources, [proc1]) + + assert p.id == proc1.id + + # Remember: `target` is a different server, its resources are independent + assert_resource p.next_allocation.cpu, total_resources.cpu + assert_resource p.next_allocation.ram, total_resources.ram + assert_resource p.next_allocation.dlk, total_resources.dlk + assert_resource p.next_allocation.ulk, total_resources.ulk + end + + test "two processes; non-overlapping dynamic; non-overlapping static" do + {total_resources, _} = TOPSetup.Resources.resources() + + [proc1, proc2, proc3, proc4] = + TOPSetup.fake_process(total_resources: total_resources, total: 4) + + # Proc1 has dynamic CPU resource and does not use any other static res + proc1 = + proc1 + |> Map.from_struct() + |> Map.replace(:l_dynamic, [:cpu]) + |> Map.put(:id, 1) + |> put_in([:static, :running, :ram], 0) + |> put_in([:static, :running, :ulk], 0) + |> put_in([:static, :running, :dlk], 0) + + # Proc2 has dynamic RAM resource and does not use any other static res + proc2 = + proc2 + |> Map.from_struct() + |> Map.replace(:l_dynamic, [:ram]) + |> Map.put(:id, 2) + |> put_in([:static, :running, :cpu], 0) + |> put_in([:static, :running, :ulk], 0) + |> put_in([:static, :running, :dlk], 0) + + # Proc3 has dynamic ULK resource and does not use any other static res + proc3 = + proc3 + |> Map.from_struct() + |> Map.replace(:l_dynamic, [:ulk]) + |> Map.put(:id, 3) + |> put_in([:static, :running, :cpu], 0) + |> put_in([:static, :running, :ram], 0) + |> put_in([:static, :running, :dlk], 0) + + # Proc4 has dynamic DLK resource and does not use any other static res + proc4 = + proc4 + |> Map.from_struct() + |> Map.replace(:l_dynamic, [:dlk]) + |> Map.put(:id, 4) + |> put_in([:static, :running, :cpu], 0) + |> put_in([:static, :running, :ram], 0) + |> put_in([:static, :running, :ulk], 0) + + procs = [proc1, proc2, proc3, proc4] + + assert {:ok, %{allocated: [p1, p2, p3, p4], dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, procs) + + assert p1.id == proc1.id + assert p2.id == proc2.id + assert p3.id == proc3.id + assert p4.id == proc4.id + + # Allocated all available server resources + assert_resource p1.next_allocation.cpu, total_resources.cpu + assert_resource p1.next_allocation.ram, 0 + assert_resource p1.next_allocation.ulk, 0 + assert_resource p1.next_allocation.dlk, 0 + + assert_resource p2.next_allocation.cpu, 0 + assert_resource p2.next_allocation.ram, total_resources.ram + assert_resource p2.next_allocation.ulk, 0 + assert_resource p2.next_allocation.dlk, 0 + + assert_resource p3.next_allocation.cpu, 0 + assert_resource p3.next_allocation.ram, 0 + assert_resource p3.next_allocation.ulk, total_resources.ulk + assert_resource p3.next_allocation.dlk, 0 + + assert_resource p4.next_allocation.cpu, 0 + assert_resource p4.next_allocation.ram, 0 + assert_resource p4.next_allocation.ulk, 0 + assert_resource p4.next_allocation.dlk, total_resources.dlk + end + + test "two processes; non-overlapping dynamic; overlapping static" do + {total_resources, _} = TOPSetup.Resources.resources() + + # Note that, by default, all processes have *some* static res assigned to + # it (except dlk/ulk). + [proc1, proc2] = + TOPSetup.fake_process(total_resources: total_resources, total: 2) + + # `proc1` will be dynamic only on CPU; `proc2`, on RAM + proc1 = %{proc1| l_dynamic: [:cpu]} + proc2 = %{proc2| l_dynamic: [:ram]} + + # Put some identifiers + proc1 = Map.put(proc1, :id, 1) + proc2 = Map.put(proc2, :id, 2) + + procs = [proc1, proc2] + + assert {:ok, %{allocated: [p1, p2], dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, procs) + + assert p1.id == proc1.id + assert p2.id == proc2.id + + alloc1 = p1.next_allocation + alloc2 = p2.next_allocation + + # Allocated all available server resources + assert_resource alloc1.cpu + alloc2.cpu, total_resources.cpu + assert_resource alloc1.ram + alloc2.ram, total_resources.ram + end + + test "two processes; overlapping dynamic and static resources" do + {total_resources, _} = TOPSetup.Resources.resources() + + procs = TOPSetup.fake_process( + total_resources: total_resources, total: 2, l_dynamic: [:cpu, :ram] + ) + + assert {:ok, %{allocated: [p1, p2], dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, procs) + + alloc1 = p1.next_allocation + alloc2 = p2.next_allocation + + # Allocated all available server resources + assert_resource alloc1.cpu + alloc2.cpu, total_resources.cpu + assert_resource alloc1.ram + alloc2.ram, total_resources.ram + end + + test "n processes; overlapping everywhere" do + {total_resources, _} = TOPSetup.Resources.resources() + initial = Process.Resources.initial() + + # We'll simulate the allocation of 50..100 processes (it takes 3ms!) + n = Random.number(min: 50, max: 100) + + procs = + TOPSetup.fake_process( + total_resources: total_resources, total: n, l_dynamic: [:cpu, :ram] + ) + + # Allocates all `n` processes + assert {:ok, %{allocated: allocations, dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, procs) + + accumulated_resources = + Enum.reduce(allocations, initial, fn process, acc -> + Process.Resources.sum(acc, process.next_allocation) + end) + + # The accumulation (sum) of all processes' resources must be equal to the + # total server's resources. + assert_resource accumulated_resources.cpu, total_resources.cpu + assert_resource accumulated_resources.ram, total_resources.ram + end + + test "rejects when there would be resource overflow (on static alloc)" do + initial = Process.Resources.initial() + + [proc] = TOPSetup.fake_process() + + assert {:error, reason, _} = + TOPAllocator.allocate(:gateway, initial, [proc]) + assert reason == :resources + end + + test "rejects when there would be overflow of DLK/ULK" do + {total_resources, _} = TOPSetup.Resources.resources(network_id: :net) + + [proc] = TOPSetup.fake_process(network_id: :net) + + total_resources = + total_resources + |> put_in([:dlk, :net], 0) + |> put_in([:ulk, :net], 0) + |> Map.replace(:cpu, proc.objective.cpu) + |> Map.replace(:ram, proc.objective.ram) + + assert {:error, reason, _} = + TOPAllocator.allocate(:gateway, total_resources, [proc]) + assert reason == :resources + end + + test "picks the heaviest process among multiple overflowing processes" do + initial = Process.Resources.initial() + [proc] = TOPSetup.fake_process() + + # One process which overflows on all resources + assert {:error, :resources, [heaviest]} = + TOPAllocator.allocate(:gateway, initial, [proc]) + + assert heaviest.process_id == proc.process_id + + # Let's increase the fun + + {total_resources, _} = TOPSetup.Resources.resources(network_id: :net) + [proc1, proc2] = + TOPSetup.fake_process( + total_resources: total_resources, network_id: :net, total: 2, + static_ulk: 0, static_dlk: 0 + ) + + # Both `proc1` and `proc2` are overflowing. Notice `proc2` requires more + # CPU power than `proc1`, hence it's the heaviest + # Also note that all other resources are NOT overflowed + proc1 = + proc1 + |> Map.from_struct() + |> put_in([:static, :running, :cpu], total_resources.cpu + 2) + + proc2 = + proc2 + |> Map.from_struct() + |> put_in([:static, :running, :cpu], total_resources.cpu + 3) + + assert {:error, :resources, [heaviest]} = + TOPAllocator.allocate(:gateway, total_resources, [proc1, proc2]) + + assert heaviest.process_id == proc2.process_id + + # More more fun + + # Now we'll make `proc1` overflow on RAM. On this new scenario, `proc1` + # is overflowing (RAM) and `proc2` is overflowing too (CPU) + proc1 = + proc1 + |> put_in([:static, :running, :ram], total_resources.ram) + + # Allocate will return both processes as heaviest, since each one + # overflows a different resource + assert {:error, :resources, heaviest} = + TOPAllocator.allocate(:gateway, total_resources, [proc1, proc2]) + + assert length(heaviest) == 2 + + # Now `proc1` consumes 1 MHz more than `proc2`, so it's the heaviest + # process on both RAM and CPU consumption + proc1 = + proc1 + |> put_in([:static, :running, :cpu], total_resources.cpu + 4) + + # Allocator only returns one process + assert {:error, :resources, [heaviest]} = + TOPAllocator.allocate(:gateway, total_resources, [proc1, proc2]) + + assert heaviest.process_id == proc1.process_id + + # MOARRRR FUN + # Now we'll overflow DLK and ULK resources, so we can test KV Behaviour + + # `proc1` overflows CPU and RAM (from above experiments), and now DLK too + proc1 = + proc1 + |> put_in([:static, :running, :dlk], total_resources.dlk.net + 2) + + # `proc2`, on the other hand, overflows on ULK + proc2 = + proc2 + |> put_in([:static, :running, :ulk], total_resources.ulk.net + 2) + + [proc3] = TOPSetup.fake_process(network_id: :net) + + # `proc3` is a valid process, which consumes no static resources but would + # like to receive dynamic shares (if any are available). + proc3 = + proc3 + |> Map.from_struct() + |> put_in([:static, :running, :cpu], 0) + |> put_in([:static, :running, :ram], 0) + |> put_in([:static, :running, :dlk], 0) + |> put_in([:static, :running, :ulk], 0) + + # Returned `proc1` and `proc2` as overflowers (overflowed?) + assert {:error, :resources, heaviest} = + TOPAllocator.allocate(:gateway, total_resources, [proc1, proc2, proc3]) + + assert length(heaviest) == 2 + end + + test "does not allocate dyn resources on partially completed objectives" do + {total_resources, _} = TOPSetup.Resources.resources(network_id: :net) + + [proc] = TOPSetup.fake_process(total_resources: total_resources) + + # The process has already processed enough RAM, but not enough CPU. + # Excpet for static resources (see test below), dynamic resources of + # already completed objectives must not be allocated, and instead should + # be routed to another process + proc = + %{proc| + processed: %{cpu: 0, ram: 100, ulk: %{net: 30}, dlk: %{}}, + objective: %{cpu: 50, ram: 99.9, ulk: %{net: 29.9}, dlk: %{net: 50}}, + l_dynamic: [:cpu, :ram, :dlk, :ulk], + static: %{}, + network_id: :net + } + + assert {:ok, %{allocated: [p], dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, [proc]) + + alloc = p.next_allocation + + # Allocated the expected CPU for the process + assert_resource alloc.cpu, total_resources.cpu + + # But did not allocate any ram to it, since it's been completed already + assert alloc.ram == 0.0 + + # Allocated all DLK in the world... + assert_resource alloc.dlk, total_resources.dlk + + # But did not allocate any ULK, since it's been completed already + assert alloc.ulk == %{net: 0.0} + + # `proc2` is a copy of `proc`, but it has never processed anything. + # So, if we try to allocate both `proc` and `proc2` at the same time, the + # Allocator should give 50% CPU to both, and 100% RAM to `proc2`. + # Same applies to DLK/ULK: both should receive half of DLK, while `proc2` + # has full ULK access. + proc2 = %{proc| processed: nil} + + # Put some identifiers + proc = Map.put(proc, :process_id, 1) + proc2 = Map.put(proc2, :process_id, 2) + + assert {:ok, %{allocated: [p1, p2], dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, [proc, proc2]) + + assert p1.process_id == proc.process_id + assert p2.process_id == proc2.process_id + + alloc1 = p1.next_allocation + alloc2 = p2.next_allocation + + # `proc` got 50% of CPU and no RAM + assert_resource alloc1.cpu, total_resources.cpu / 2 + assert alloc1.ram == 0.0 + + # `proc` got 50% of DLK and no ULK + assert_resource alloc1.dlk, total_resources.dlk.net / 2 + assert alloc1.ulk.net == 0.0 + + # `proc2` got 50% of CPU and 100% of RAM + assert_resource alloc2.cpu, total_resources.cpu / 2 + assert_resource alloc2.ram, total_resources.ram + + # `proc2` got 50% of DLK and 100% of ULK + assert_resource alloc2.dlk, total_resources.dlk.net / 2 + assert_resource alloc2.ulk, total_resources.ulk + end + + test "allocates static resources even on partially completed objectives" do + {total_resources, _} = TOPSetup.Resources.resources(network_id: :net) + + [proc] = TOPSetup.fake_process(total_resources: total_resources) + + # `proc` has already completed its RAM objective, but not the CPU one. + # However, it is requested that `proc` has 20 units of RAM attached to it + # when it's running, so we'll obey even though that specific objective has + # been completed + # Likewise, it has processed enough DLK but not enough ULK, both of which + # require some static usage + proc = + %{proc| + processed: %{cpu: 0, ram: 100, dlk: %{net: 30}, ulk: %{}}, + objective: %{cpu: 50, ram: 99.9, dlk: %{net: 30}, ulk: %{net: 20}}, + static: %{running: %{cpu: 10, ram: 20, ulk: 30, dlk: 40}}, + network_id: :net, + l_dynamic: [:cpu, :ram, :dlk, :ulk] + } + + assert {:ok, %{allocated: [p], dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, [proc]) + + # Allocated all CPU and ULK to the process + assert_resource p.next_allocation.cpu, total_resources.cpu + assert_resource p.next_allocation.ulk.net, total_resources.ulk.net + + # Allocated only the required static resources on RAM and DLK + assert p.next_allocation.ram == 20 + assert p.next_allocation.dlk.net == 40 + end + + # Regression; similar to above but tests different paths + test "allocates static resources on 'waiting_allocation' processes" do + {total_resources, _} = TOPSetup.Resources.resources(network_id: :net) + + [proc] = TOPSetup.fake_process(total_resources: total_resources) + + # `proc` has already completed its RAM objective, but not the CPU one. + # However, it is requested that `proc` has 20 units of RAM attached to it + # when it's running, so we'll obey even though that specific objective has + # been completed + # Likewise, it has processed enough DLK but not enough ULK, both of which + # require some static usage + proc = + %{proc| + processed: %{cpu: 0, ram: 100, dlk: %{net: 30}, ulk: %{}}, + objective: %{cpu: 50, ram: 99.9, dlk: %{net: 30}, ulk: %{net: 20}}, + static: %{running: %{cpu: 10, ram: 20, ulk: 30, dlk: 40}}, + network_id: :net, + state: :waiting_allocation, + l_dynamic: [:cpu, :ram, :dlk, :ulk] + } + + assert {:ok, %{allocated: [p], dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, [proc]) + + # Allocated all CPU and ULK to the process + assert_resource p.next_allocation.cpu, total_resources.cpu + assert_resource p.next_allocation.ulk.net, total_resources.ulk.net + + # Allocated only the required static resources on RAM and DLK + assert p.next_allocation.ram == 20 + assert p.next_allocation.dlk.net == 40 + end + end + + describe "allocate/2 with resource limitation" do + test "limits resource usage" do + total_resources = + %{ + cpu: 100, + ram: 200, + dlk: %{net: 100}, + ulk: %{net: 100} + } + + # `proc` is limited on all resources. Some of these limits are greater to + # what the server can handle, so they are ignored. + [proc] = + TOPSetup.fake_process( + total_resources: total_resources, + l_dynamic: [:cpu, :ram, :dlk, :ulk], + l_limit: %{cpu: 500, ram: 50, dlk: %{net: 60}, ulk: %{net: 200}}, + r_dynamic: [:cpu], + r_limit: %{cpu: 10}, + static: %{}, + network_id: :net + ) + + assert {:ok, %{allocated: [p], dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, [proc]) + + assert_resource p.next_allocation.cpu, 100 + assert_resource p.next_allocation.ram, 50 + assert_resource p.next_allocation.dlk.net, 60 + assert_resource p.next_allocation.ulk.net, 100 + + # Same test, now on the remote TOP + assert {:ok, %{allocated: [p], dropped: []}} = + TOPAllocator.allocate(:target, total_resources, [proc]) + + assert_resource p.next_allocation.cpu, 10 + assert p.next_allocation.ram == 0 + assert p.next_allocation.dlk == %{} + assert p.next_allocation.ulk == %{} + end + + test "redistributes unclaimed resources to processes not bound to limits" do + total_resources = + %{ + cpu: 100, + ram: 200, + dlk: %{net: 100}, + ulk: %{net: 100} + } + + [proc] = + TOPSetup.fake_process( + total_resources: total_resources, + l_dynamic: [:cpu, :ram, :dlk, :ulk], + l_limit: %{ram: 50, dlk: %{net: 30}, ulk: %{}}, + r_dynamic: [:ulk], + r_limit: %{ulk: %{net: 20}}, + static: %{}, + network_id: :net + ) + + [proc2] = + TOPSetup.fake_process( + total_resources: total_resources, + l_dynamic: [:cpu, :ram, :dlk, :ulk], + r_dynamic: [:ulk], + static: %{}, + network_id: :net + ) + + assert {:ok, %{allocated: allocated, dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, [proc, proc2]) + + [p1, p2] = allocated + + assert p1.process_id == proc.process_id + assert p2.process_id == proc2.process_id + + assert_resource p1.next_allocation.cpu, 50 + assert_resource p1.next_allocation.ram, 50 + assert_resource p1.next_allocation.dlk.net, 30 + assert_resource p1.next_allocation.ulk.net, 50 + + assert_resource p2.next_allocation.cpu, 50 + assert_resource p2.next_allocation.ram, 150 + assert_resource p2.next_allocation.dlk.net, 70 + assert_resource p2.next_allocation.ulk.net, 50 + + assert {:ok, %{allocated: allocated, dropped: []}} = + TOPAllocator.allocate(:target, total_resources, [proc, proc2]) + + [p1, p2] = allocated + + assert p1.process_id == proc.process_id + assert p2.process_id == proc2.process_id + + assert_resource p1.next_allocation.ulk.net, 20 + assert_resource p2.next_allocation.ulk.net, 80 + end + end + + describe "inter-top processes" do + test "different allocations and limitations for each TOP" do + {total_resources, _} = TOPSetup.Resources.resources(network_id: :net) + + [proc] = TOPSetup.fake_process(total_resources: total_resources) + + proc = + %{proc| + gateway_id: :gateway, + target_id: :target, + r_dynamic: [:ulk], + l_dynamic: [:ram, :cpu, :dlk], + static: %{}, + l_limit: %{dlk: %{net: 50}}, + r_limit: %{ulk: %{net: 20}}, + network_id: :net + } + + # First alloc, on gateway + assert {:ok, %{allocated: [p], dropped: []}} = + TOPAllocator.allocate(:gateway, total_resources, [proc]) + + assert p.local? + assert_resource p.next_allocation.cpu, total_resources.cpu + assert_resource p.next_allocation.ram, total_resources.ram + assert_resource p.next_allocation.dlk.net, 50 + assert p.next_allocation.ulk.net == 0 + + total_resources = put_in(total_resources, [:ulk, :net], 30) + + # Second alloc, on target + assert {:ok, %{allocated: [p], dropped: []}} = + TOPAllocator.allocate(:target, total_resources, [p]) + + refute p.local? + + assert_resource p.next_allocation.ulk.net, 20 + assert p.next_allocation.dlk == %{} + assert p.next_allocation.cpu == 0 + assert p.next_allocation.ram == 0 + end + end +end diff --git a/test/process/model/top/scheduler_test.exs b/test/process/model/top/scheduler_test.exs new file mode 100644 index 00000000..808e903d --- /dev/null +++ b/test/process/model/top/scheduler_test.exs @@ -0,0 +1,499 @@ +defmodule Helix.Process.Model.TOP.SchedulerTest do + + use ExUnit.Case, async: true + + alias HELL.Utils + alias Helix.Process.Model.Process + alias Helix.Process.Model.TOP.Scheduler + + alias Helix.Test.Process.Setup.TOP, as: TOPSetup + + @slack 5 + + describe "simulate/1" do + test "simulates progress" do + # The process below has an objective of cpu: 100, ram: 100; and have + # allocated to it cpu: 1, ram: 5. The allocation part will be added to the + # process every second (but with a millisecond-grade precision). + process = + %{ + processed: nil, + objective: %{cpu: 100, ram: 100, dlk: %{}, ulk: %{}}, + l_allocated: %{cpu: 1, ram: 5, dlk: %{}, ulk: %{}}, + last_checkpoint_time: nil, + creation_time: Utils.date_before(10), + state: :running + } + + # Notice that we've set the creation time to 10 seconds in the past, so + # the process should have some ~10 MHz allocated to the `processed`, and + # as such it should not be deemed completed. + assert {:running, p} = Scheduler.simulate(process) + + # As part of the simulation, the `processed` field of the process was + # updated, with the expected processed (accounting for delays, since this + # is a set of floating-point operations at millisecond scale). + assert_in_delta p.processed.cpu, 10, @slack + assert_in_delta p.processed.ram, 50, @slack + + # Now we'll update the original process to be marked as created 20 seconds + # ago. This means it should have ~125 processed RAM and ~25 CPU. + # That's enough RAM but not enough CPU. + process = %{process| creation_time: Utils.date_before(25)} + + assert {:running, p} = Scheduler.simulate(process) + assert_in_delta p.processed.cpu, 25, @slack + assert_in_delta p.processed.ram, 125, @slack + + # OK, 10 minutes should be enough + process = %{process| creation_time: Utils.date_before(600)} + assert {:completed, p} = Scheduler.simulate(process) + + assert p.processed.cpu > p.objective.cpu + assert p.processed.ram > p.objective.ram + end + + test "simulate progress (with DLK/ULK)" do + process = + %{ + processed: nil, + objective: %{cpu: 10, ram: 5, dlk: %{net: 100}, ulk: %{net: 50}}, + l_allocated: %{cpu: 1, ram: 5, dlk: %{net: 1}, ulk: %{net: 0.5}}, + last_checkpoint_time: nil, + creation_time: Utils.date_before(10), + state: :running + } + + # Ran for 10 seconds... Not enough + assert {:running, p} = Scheduler.simulate(process) + + # CPU and RAM are done + assert p.processed.cpu > p.objective.cpu + assert p.processed.ram > p.objective.ram + + # But there's still a long way to go for DLK and ULK + assert_in_delta p.processed.dlk.net, 10, @slack + assert_in_delta p.processed.ulk.net, 5, @slack + + # But given enough time... + process = %{process| creation_time: Utils.date_before(100)} + + assert {:completed, p} = Scheduler.simulate(process) + assert p.processed.cpu > p.objective.cpu + assert p.processed.ram > p.objective.ram + assert p.processed.dlk.net > p.objective.dlk.net + assert p.processed.ulk.net > p.objective.ulk.net + end + + test "simulate loading a process from DB (has `last_checkpoint_time`)" do + # See, the process below was started over a day ago, but that date will be + # ignored by the simulator, since we have a `last_checkpoint_time`. This + # means that the progress of the process was saved on the DB for some + # reason. It may be the case that the user paused her process. This would + # lead to an `last_checkpoint_time` being set, with the previous processed + # resources saved. If the process keeps paused for a year, it still + # should not be marked as completed. Capisce? + process = + %{ + processed: nil, + objective: %{cpu: 100, ram: 100, dlk: %{}, ulk: %{}}, + l_allocated: %{cpu: 1, ram: 5, dlk: %{}, ulk: %{}}, + last_checkpoint_time: Utils.date_before(10), + creation_time: Utils.date_before(86_400), + state: :running + } + + # Still running + assert {:running, p} = Scheduler.simulate(process) + + assert_in_delta p.processed.cpu, 10, @slack + assert_in_delta p.processed.ram, 50, @slack + + # Give it enough time... + process = %{process| last_checkpoint_time: Utils.date_before(100)} + + # Aaaaand we are done. + assert {:completed, _} = Scheduler.simulate(process) + end + + test "ignores paused processes" do + process = %{state: :paused} + + assert {:paused, _p} = Scheduler.simulate(process) + end + end + + describe "estimate_completion/1" do + test "estimates completion time of running process" do + # Never processed anything; needs 100 MHz at 1 MHz per second. + process = + %{ + processed: nil, + objective: %{cpu: 100, ram: 100, dlk: %{}, ulk: %{}}, + l_allocated: %{cpu: 1, ram: 5, dlk: %{}, ulk: %{}}, + next_allocation: %{cpu: 1, ram: 5, dlk: %{}, ulk: %{}}, + last_checkpoint_time: nil, + creation_time: DateTime.utc_now(), + state: :running + } + + assert {p, estimation} = Scheduler.estimate_completion(process) + + # At this rate, it would take ~100 seconds to complete the process + assert_in_delta estimation, 100, 0.1 + + # The returned process has gone through a simulation + assert p.processed + + # Now this new process has already processed 95 MHz, so it would need 5s + # of CPU usage in order to complete it.. However, it still needs ~20s of + # RAM usage in order to be fully processed. + process = %{process| processed: %{cpu: 95, ram: 0, dlk: %{}, ulk: %{}}} + + assert {_p, estimation} = Scheduler.estimate_completion(process) + assert_in_delta estimation, 20, 0.1 + + process = + %{ + processed: nil, + objective: %{cpu: 1, ram: 10, dlk: %{net: 100}, ulk: %{net: 50}}, + l_allocated: %{cpu: 1, ram: 50, dlk: %{net: 20}, ulk: %{net: 25}}, + next_allocation: %{cpu: 1, ram: 50, dlk: %{net: 20}, ulk: %{net: 25}}, + last_checkpoint_time: nil, + creation_time: DateTime.utc_now(), + state: :running + } + + # This is a variation of the above test in order to verify that this + # behaviour holds true for KV resources (DLK/ULK) + assert {_p, estimation} = Scheduler.estimate_completion(process) + + # ~5s to fill DLK up + assert_in_delta estimation, 5, 0.1 + end + + test "estimates completion time of completed processes" do + # This process is already completed!!11! + process = + %{ + processed: %{cpu: 11, ram: 11, dlk: %{}, ulk: %{}}, + objective: %{cpu: 10, ram: 10, dlk: %{}, ulk: %{}}, + l_allocated: %{cpu: 1, ram: 1, dlk: %{}, ulk: %{}}, + next_allocation: %{cpu: 1, ram: 1, dlk: %{}, ulk: %{}}, + last_checkpoint_time: nil, + creation_time: DateTime.utc_now(), + state: :running + } + + assert {_p, estimation} = Scheduler.estimate_completion(process) + assert estimation == -1 + end + + test "estimates completion time of paused processes" do + process = + %{ + processed: :i, + objective: :dont, + l_allocated: :care, + state: :paused + } + + assert {_p, estimation} = Scheduler.estimate_completion(process) + assert estimation == :infinity + end + end + + describe "forecast/1" do + test "figures out the next process that will be completed" do + # Note: we'll be comparing processes with their identifier keys (`id`) + # because, once a process goes through `forecast/1`, it will be simulated, + # which will change its model. + + # p1 would take ~10s to complete + p1 = p1() + + # p2 would take ~5s to complete + p2 = p2() + + # Forecasting only `p1`.. Obviously it's the next one to be completed + assert %{ + completed: [], + next: {next, time_left}, + running: running + } = + Scheduler.forecast([p1]) + + # P1 should take some 10 seconds to complete + assert_in_delta time_left, 10, 0.1 + assert next.id == p1.id + assert Enum.find(running, &(&1.id == p1.id)) + assert length(running) == 1 + + # Forecasting `p1` and `p2`. `p2` shall complete first, in 5 seconds + assert %{ + completed: [], + next: {next, time_left}, + running: running + } = Scheduler.forecast([p1, p2]) + + assert_in_delta time_left, 5, 0.1 + assert next.id == p2.id + assert length(running) == 2 + + # Forecasting repeated processes `p1` and `p2`. The Scheduler doesn't know + # they are the same (and it's not the Scheduler's job anyway). The catch + # here is that some process will have completion time identical to others, + # so we must make sure the Scheduler only picks one to be the `next`. + assert %{ + completed: [], + next: {next, time_left}, + running: running + } = Scheduler.forecast([p1, p1, p2, p2]) + + assert_in_delta time_left, 5, 0.1 + assert next.id == p2.id + assert length(running) == 4 + end + + test "filters completed processes" do + # p1 would take ~10s to complete + p1 = p1() + # p2 would take ~5s to complete + p2 = p2() + # p3 has already processed what it was supposed to + p3 = p3() + + assert %{ + completed: [proc_completed], + next: {next, time_left}, + running: running + } = Scheduler.forecast([p1, p2, p3]) + + assert proc_completed.id == p3.id + assert next.id == p2.id + assert_in_delta time_left, 5, 0.1 + assert length(running) == 2 + end + + test "filters paused processes" do + # p1 will be completed in 10s + p1 = p1() + # p2 will be completed in 5s + p2 = p2() + # p3 is completed + p3 = p3() + # p4 is paused + p4 = p4() + + assert %{ + completed: [proc_completed], + next: {next, time_left}, + running: running, + paused: [proc_paused] + } = Scheduler.forecast([p1, p2, p3, p4]) + + assert proc_completed.id == p3.id + assert next.id == p2.id + assert_in_delta time_left, 5, 0.1 + assert length(running) == 2 + assert proc_paused.id == p4.id + end + + test "empty list" do + assert %{completed: [], next: nil, running: [], paused: []} = + Scheduler.forecast([]) + end + + test "with processes, but none of them will be completed `next`" do + # p3 is completed + p3 = p3() + # p4 is paused + p4 = p4() + + assert %{ + completed: completed, + paused: paused, + running: [], + next: nil + } = Scheduler.forecast([p3, p3, p3, p4, p4, p4]) + + assert length(completed) == 3 + assert length(paused) == 3 + end + + test "processes 'waiting_allocation' are accurately forecast" do + # Describe the problem + + # p5 represents a recently created process, going through the allocator + # (and forecast) for the very first time. + # Without using the `next_allocation` virtual field of process, we'd lose + # this iteration, and on the forecast the `p5` would be marked as `paused` + # (since its state is `waiting_allocation`). + # However, when it goes through the forecast, it has already gone through + # the Allocator, so it *must have received* the allocation, it simply + # wasn't saved yet to the process model. + # That's why we use `waiting_allocation`: With this field, the `forecast` + # method knows that it should estimate the completion of this process + # based on its `next_allocation`, even though `allocated` is nil. + p5 = p5() + + %{completed: [], next: {process, time_left}, paused: []} = + Scheduler.forecast([p5]) + + # ~3.3s to complete + assert_in_delta time_left, 3.3, 0.1 + assert process.state == :running + end + + test "tricky scenario" do + # Context: the process below has processed very little. At the current + # rate (presented on `allocated`), it would complete in ~4 seconds, being + # CPU the bottleneck. Since the last processed time was 2 seconds ago, + # the correct result would be two seconds left for completion.... + # HOWEVER, on the `next_allocation`, the CPU allocation would skyrocket to + # `80`, reducing the CPU completion to less than a second, but the DLK + # allocation would go down to 1 unit per second, meaning it would take + # ~9 seconds for completion. This is the correct result. + # (After simulation, DLK would go to 40; then, on forecast estimation, + # an extra 10 units would be needed. At 1 unit/s, it takes 10 seconds. + # However, one unit of DLK has already been processed before. Hence, 9s). + p = + %{ + processed: %{cpu: 1, ram: 1, dlk: %{net: 1}, ulk: %{}}, + objective: %{cpu: 100, ram: 20, dlk: %{net: 50}, ulk: %{}}, + l_allocated: %{cpu: 25, ram: 10, dlk: %{net: 20}, ulk: %{}}, + next_allocation: %{cpu: 80, ram: 5, dlk: %{net: 1}, ulk: %{}}, + last_checkpoint_time: Utils.date_before(2000, :millisecond), + creation_time: nil, + state: :running + } + + assert %{next: {_, time_left}} = Scheduler.forecast([p]) + + assert_in_delta time_left, 9, 0.1 + end + + defp p1 do + # P1 is running and takes about ~10 seconds to complete + %{ + id: 1, + processed: nil, + objective: %{cpu: 10, ram: 15, dlk: %{}, ulk: %{}}, + l_reserved: %{cpu: 1, ram: 5, dlk: %{}, ulk: %{}}, + l_allocated: %{cpu: 1, ram: 5, dlk: %{}, ulk: %{}}, + next_allocation: %{cpu: 1, ram: 5, dlk: %{}, ulk: %{}}, + last_checkpoint_time: nil, + creation_time: DateTime.utc_now(), + state: :running + } + end + + defp p2 do + # P2 is running and takes about ~5 seconds to complete + %{ + id: 2, + processed: nil, + objective: %{cpu: 10, ram: 0, dlk: %{net: 10}, ulk: %{net: 10}}, + l_reserved: %{cpu: 5, ram: 0, dlk: %{net: 2}, ulk: %{net: 3}}, + l_allocated: %{cpu: 5, ram: 0, dlk: %{net: 2}, ulk: %{net: 3}}, + next_allocation: %{cpu: 5, ram: 0, dlk: %{net: 2}, ulk: %{net: 3}}, + last_checkpoint_time: nil, + creation_time: DateTime.utc_now(), + state: :running + } + end + + defp p3 do + # P3 already processed what it was supposed to (it's completed!) + %{ + id: 3, + processed: %{cpu: 100, ram: 100, dlk: %{net: 100}, ulk: %{}}, + objective: %{cpu: 99, ram: 99, dlk: %{net: 99}, ulk: %{}}, + l_reserved: %{cpu: 10, ram: 10, dlk: %{}, ulk: %{}}, + l_allocated: %{cpu: 10, ram: 10, dlk: %{}, ulk: %{}}, + next_allocation: %{cpu: 10, ram: 10, dlk: %{}, ulk: %{}}, + last_checkpoint_time: nil, + creation_time: DateTime.utc_now(), + state: :running + } + end + + defp p4 do + # P4 would complete in less than 1 second... if only it wasn't paused + %{ + id: 4, + processed: nil, + objective: %{cpu: 10, ram: 0, dlk: %{net: 10}, ulk: %{net: 10}}, + l_allocated: %{cpu: 9, ram: 0, dlk: %{net: 9}, ulk: %{net: 9}}, + l_reserved: %{cpu: 9, ram: 0, dlk: %{net: 9}, ulk: %{net: 9}}, + next_allocation: %{cpu: 9, ram: 0, dlk: %{net: 9}, ulk: %{net: 9}}, + last_checkpoint_time: nil, + creation_time: DateTime.utc_now(), + state: :paused + } + end + + defp p5 do + # P5 was recently created and is going through the forecast/simulation for + # the very first time. + %{ + id: 5, + processed: nil, + objective: %{cpu: 10, ram: 0, dlk: %{net: 10}, ulk: %{net: 10}}, + l_reserved: %{}, + next_allocation: %{cpu: 5, ram: 0, dlk: %{net: 10}, ulk: %{net: 3}}, + last_checkpoint_time: nil, + creation_time: DateTime.utc_now(), + state: :waiting_allocation + } + end + end + + describe "checkpoint/1" do + test "sets allocation; last_checkpoint_time" do + # This is the new allocation of p1, given by the Allocator + next_allocation = %{cpu: 100, ram: 0, dlk: %{}, ulk: %{}} + + # P1 was never processed nor allocated before. It represents a recently + # created process. + [p1] = + TOPSetup.fake_process(next_allocation: next_allocation, local?: true) + + refute p1.processed + assert p1.l_reserved == %{} + assert p1.l_allocated == Process.Resources.initial() + refute p1.last_checkpoint_time + assert p1.next_allocation + + # `checkpoint/2` is telling us that the process should be updated + assert {true, changeset} = Scheduler.checkpoint(p1) + + new_proc = Ecto.Changeset.apply_changes(changeset) + + # The given allocation was saved on the process + assert new_proc.l_reserved == next_allocation + + # Checkpoint time was set + assert new_proc.last_checkpoint_time + + # The given process was not simulated inside `checkpoint`. The `processed` + # entry remains unchanged + refute new_proc.processed + end + + test "does not change the model when the allocation remains unchanged" do + allocated = %{cpu: 100, ram: 0, dlk: %{}, ulk: %{}} + + [p1] = + TOPSetup.fake_process( + l_reserved: allocated, next_allocation: allocated, local?: true + ) + + # The process and the resulting allocation are the same + assert p1.l_reserved == allocated + + # Returns `false`, meaning "do not update" + refute Scheduler.checkpoint(p1) + end + end +end diff --git a/test/process/public/index_test.exs b/test/process/public/index_test.exs index 1455d633..91f6b87f 100644 --- a/test/process/public/index_test.exs +++ b/test/process/public/index_test.exs @@ -28,7 +28,7 @@ defmodule Helix.Process.Public.IndexTest do process2_opts = [ gateway_id: server.server_id, type: :file_download, - target_server_id: process2_destination + target_id: process2_destination ] {process2, _} = ProcessSetup.process(process2_opts) @@ -36,15 +36,18 @@ defmodule Helix.Process.Public.IndexTest do process3_gateway = remote.server_id process3_opts = [ gateway_id: process3_gateway, - target_server_id: server.server_id + target_id: server.server_id ] {process3, _} = ProcessSetup.process(process3_opts) index = ProcessIndex.index(server.server_id, entity.entity_id) - result_process1 = Enum.find(index.owned, &(find_by_id(&1, process1))) - result_process2 = Enum.find(index.owned, &(find_by_id(&1, process2))) - result_process3 = Enum.find(index.targeting, &(find_by_id(&1, process3))) + assert length(index.local) == 2 + assert length(index.remote) == 1 + + result_process1 = Enum.find(index.local, &(find_by_id(&1, process1))) + result_process2 = Enum.find(index.local, &(find_by_id(&1, process2))) + result_process3 = Enum.find(index.remote, &(find_by_id(&1, process3))) # Result comes in binary format assert is_binary(result_process1.process_id) diff --git a/test/process/query/process_test.exs b/test/process/query/process_test.exs index c91903ef..b653dc1c 100644 --- a/test/process/query/process_test.exs +++ b/test/process/query/process_test.exs @@ -2,104 +2,55 @@ defmodule Helix.Process.Query.ProcessTest do use Helix.Test.Case.Integration - alias Helix.Account.Action.Flow.Account, as: AccountFlow alias Helix.Server.Model.Server alias Helix.Software.Model.File - alias Helix.Software.Model.SoftwareType.Firewall.Passive, as: Firewall - alias Helix.Process.Action.Process, as: ProcessAction alias Helix.Process.Query.Process, as: ProcessQuery - alias Helix.Test.Cache.Helper, as: CacheHelper - alias Helix.Test.Account.Factory, as: AccountFactory - # alias Helix.Test.Server.Setup, as: ServerSetup + alias Helix.Test.Server.Setup, as: ServerSetup alias Helix.Test.Process.Setup, as: ProcessSetup alias Helix.Test.Process.TOPHelper - defp create_server do - account = AccountFactory.insert(:account) - {:ok, %{server: server}} = AccountFlow.setup_account(account) - - CacheHelper.sync_test() - - server - end - - describe "get_running_processes_of_type_on_server/2" do - test "returns what it should" do - server = create_server() - - firewall = %{ - gateway_id: server.server_id, - target_server_id: server.server_id, - file_id: File.ID.generate(), - process_data: %Firewall{version: 1}, - process_type: "firewall_passive" - } - - {:ok, firewall1, _} = ProcessAction.create(firewall) - {:ok, firewall2, _} = ProcessAction.create(firewall) - - expected = MapSet.new([firewall1, firewall2], &(&1.process_id)) - - result = - server - |> ProcessQuery.get_running_processes_of_type_on_server("firewall_passive") - |> MapSet.new(&(&1.process_id)) - - assert MapSet.equal?(expected, result) - - TOPHelper.top_stop(server) - end - end - describe "get_processes_on_server/1" do - test "returns all processes on server" do - server = create_server() + test "returns both local and remote servers" do + {server, _} = ServerSetup.server() + {remote, _} = ServerSetup.server() - firewall = %{ + # Process 1 affects player's own server; started by own player; has no + # file / connection + process1_opts = [ gateway_id: server.server_id, - target_server_id: server.server_id, - file_id: File.ID.generate(), - process_data: %Firewall{version: 1}, - process_type: "firewall_passive" - } - - {:ok, _, _} = ProcessAction.create(firewall) - {:ok, _, _} = ProcessAction.create(firewall) - - processes_on_server = - server - |> ProcessQuery.get_processes_on_server() - |> Enum.count() - - assert 2 == processes_on_server - - TOPHelper.top_stop(server) - end - end - - describe "get_processes_of_type_targeting_server" do - @tag :pending - test "returns expected external processes" - end + single_server: true, + type: :bruteforce + ] + {process1, _} = ProcessSetup.process(process1_opts) + + # Process 2 affects another server; started by own player, has file and + # connection + process2_destination = remote.server_id + process2_opts = [ + gateway_id: server.server_id, + type: :file_download, + target_id: process2_destination + ] + {process2, _} = ProcessSetup.process(process2_opts) - describe "get_processes_targeting_server/1" do - test "returns processes that are not running on the gateway" do - server1 = create_server() + # Process 3 affects player's own server, started by third-party. + process3_gateway = remote.server_id + process3_opts = [ + gateway_id: process3_gateway, + target_id: server.server_id + ] + {process3, _} = ProcessSetup.process(process3_opts) - firewall = %{ - gateway_id: server1.server_id, - target_server_id: server1.server_id, - file_id: File.ID.generate(), - process_data: %Firewall{version: 1}, - process_type: "firewall_passive" - } + processes = ProcessQuery.get_processes_on_server(server.server_id) - {:ok, _, _} = ProcessAction.create(firewall) + assert length(processes) == 3 - [] = ProcessQuery.get_processes_targeting_server(server1.server_id) + assert Enum.find(processes, &(&1.process_id == process1.process_id)) + assert Enum.find(processes, &(&1.process_id == process2.process_id)) + assert Enum.find(processes, &(&1.process_id == process3.process_id)) - TOPHelper.top_stop(server1) + TOPHelper.top_stop() end end @@ -118,7 +69,7 @@ defmodule Helix.Process.Query.ProcessTest do # (one process of type `download` who is downloading that specific file) assert [process] = ProcessQuery.get_custom( - download1.process_type, + download1.type, gateway_id, %{file_id: download1.file_id} ) @@ -128,14 +79,16 @@ defmodule Helix.Process.Query.ProcessTest do # Cannot find that same process with random file refute \ ProcessQuery.get_custom( - download1.process_type, + download1.type, gateway_id, %{file_id: File.ID.generate()} ) + + TOPHelper.top_stop() end test "returns empty list if no process is found" do - refute ProcessQuery.get_custom("file_download", Server.ID.generate(), %{}) + refute ProcessQuery.get_custom(:file_download, Server.ID.generate(), %{}) end end end diff --git a/test/process/query/top_test.exs b/test/process/query/top_test.exs new file mode 100644 index 00000000..268a0955 --- /dev/null +++ b/test/process/query/top_test.exs @@ -0,0 +1,29 @@ +defmodule Helix.Process.Query.TOPTest do + + use Helix.Test.Case.Integration + + alias Helix.Process.Query.TOP, as: TOPQuery + + alias Helix.Test.Network.Helper, as: NetworkHelper + alias Helix.Test.Server.Setup, as: ServerSetup + + @internet_id NetworkHelper.internet_id() + + describe "load_top_resources/1" do + test "loads all resources on server" do + {server, _} = ServerSetup.server() + + resources = TOPQuery.load_top_resources(server.server_id) + + # Note: these assertions will fail once we modify the initial hardware, + # but that's on purpose. Once that happens, we'll probably have a proper + # API to fetch the total server resources, and we can use it to: + # - Make the assertions below dynamic (not hard-coded) + # - Create new tests with edge-cases on resource utilization + assert resources.cpu == 1333 + assert resources.ram == 1024 + assert resources.dlk[@internet_id] == 100 + assert resources.ulk[@internet_id] == 100 + end + end +end diff --git a/test/process/state/top/manager_test.exs b/test/process/state/top/manager_test.exs deleted file mode 100644 index 788232b7..00000000 --- a/test/process/state/top/manager_test.exs +++ /dev/null @@ -1,13 +0,0 @@ -defmodule Helix.Process.State.TOP.ManagerTest do - - use Helix.Test.Case.Integration - - alias Helix.Process.State.TOP.Manager - - test "returns the registered process when it exists" do - Manager.register("a:b::c:d") - - assert {:ok, self()} == Manager.prepare_top("a:b::c:d") - assert {:ok, self()} == Manager.prepare_top("a:b::c:d") - end -end diff --git a/test/process/state/top/server_test.exs b/test/process/state/top/server_test.exs deleted file mode 100644 index c9657bf2..00000000 --- a/test/process/state/top/server_test.exs +++ /dev/null @@ -1,134 +0,0 @@ -defmodule Helix.Process.State.TOP.TOPServerTest do - - use Helix.Test.Case.Integration - - alias Helix.Test.Process.StaticProcessableExample - alias Helix.Account.Action.Flow.Account, as: AccountFlow - alias Helix.Process.State.TOP.Server, as: TOPServer - alias Helix.Process.Query.Process, as: ProcessQuery - - alias Helix.Test.Cache.Helper, as: CacheHelper - alias Helix.Test.Account.Factory, as: AccountFactory - - defmodule ProcessThatCausesOverflow do - defstruct [] - defimpl Helix.Process.Model.Processable do - def dynamic_resources(_), - do: [] - def minimum(_), - do: %{running: %{cpu: 999_999_999}} - def kill(_, process, _), - do: {%{Ecto.Changeset.change(process)| action: :delete}, []} - def state_change(_, process, _, _), - do: {process, []} - def conclusion(data, process), - do: state_change(data, process, :running, :complete) - def after_read_hook(data), - do: data - end - end - - defp create_server do - account = AccountFactory.insert(:account) - {:ok, %{server: server}} = AccountFlow.setup_account(account) - - CacheHelper.sync_test() - - server - end - - defp start_process(top, server_id) do - params = %{ - gateway_id: server_id, - target_server_id: server_id, - process_data: %StaticProcessableExample{}, - process_type: "static_example_process", - objective: %{cpu: 9_999_999} - } - - TOPServer.create(top, params) - end - - setup do - server = create_server() - - {:ok, top} = TOPServer.start_link(server.server_id) - - {:ok, top: top, server: server} - end - - describe "create/2" do - test "succeeds with proper input", context do - params = %{ - gateway_id: context.server.server_id, - target_server_id: context.server.server_id, - process_data: %StaticProcessableExample{}, - process_type: "static_example_process", - objective: %{cpu: 9_999_999} - } - - assert {:ok, _} = TOPServer.create(context.top, params) - end - - test "fails if new process would cause resource overflow", context do - params = %{ - gateway_id: context.server.server_id, - target_server_id: context.server.server_id, - process_data: %ProcessThatCausesOverflow{}, - process_type: "overflow_example", - objective: %{cpu: 9_999_999} - } - - assert {:error, :resources} == TOPServer.create(context.top, params) - end - end - - describe "priority/3" do - test "changes the process priority", context do - {:ok, process} = start_process(context.top, context.server.server_id) - - TOPServer.priority(context.top, process, 5) - - :timer.sleep(100) - - assert 5 == ProcessQuery.fetch(process.process_id).priority - end - end - - describe "pause/2" do - test "changes state of process", context do - {:ok, process} = start_process(context.top, context.server.server_id) - - TOPServer.pause(context.top, process) - - :timer.sleep(100) - - assert :paused == ProcessQuery.fetch(process.process_id).state - end - end - - describe "resume/2" do - test "changes state of a paused process to running", context do - {:ok, process} = start_process(context.top, context.server.server_id) - - TOPServer.pause(context.top, process) - :timer.sleep(50) - TOPServer.resume(context.top, process) - :timer.sleep(50) - - assert :running == ProcessQuery.fetch(process.process_id).state - end - end - - describe "kill/2" do - test "removes a process from a server", context do - {:ok, process} = start_process(context.top, context.server.server_id) - - TOPServer.kill(context.top, process) - - :timer.sleep(100) - - refute ProcessQuery.fetch(process.process_id) - end - end -end diff --git a/test/server/public/index_test.exs b/test/server/public/index_test.exs index 45cb8962..2f5cf9e3 100644 --- a/test/server/public/index_test.exs +++ b/test/server/public/index_test.exs @@ -192,66 +192,72 @@ defmodule Helix.Server.Public.IndexTest do end describe "render_gateway/1" do - {server, %{entity: entity}} = ServerSetup.server() + test "renders the gateway index" do + {server, %{entity: entity}} = ServerSetup.server() - rendered = - server - |> ServerIndex.gateway(entity.entity_id) - |> ServerIndex.render_gateway() + rendered = + server + |> ServerIndex.gateway(entity.entity_id) + |> ServerIndex.render_gateway() - assert is_binary(rendered.name) - assert is_binary(rendered.password) + assert is_binary(rendered.name) + assert is_binary(rendered.password) - Enum.each(rendered.nips, fn [network_id, ip] -> - assert is_binary(network_id) - assert is_binary(ip) - end) + Enum.each(rendered.nips, fn [network_id, ip] -> + assert is_binary(network_id) + assert is_binary(ip) + end) - assert rendered.filesystem - assert rendered.logs - assert rendered.processes - assert rendered.tunnels + assert rendered.filesystem + assert rendered.logs + assert rendered.processes + assert rendered.tunnels + end end describe "remote/2" do - {server, _} = ServerSetup.server() - {entity, _} = EntitySetup.entity() - server_nips = ServerHelper.get_all_nips(server) + test "returns the remote server" do + {server, _} = ServerSetup.server() + {entity, _} = EntitySetup.entity() + server_nips = ServerHelper.get_all_nips(server) - remote = ServerIndex.remote(server, entity.entity_id) + remote = ServerIndex.remote(server, entity.entity_id) - # ServerIndex info - assert remote.nips == server_nips + # ServerIndex info + assert remote.nips == server_nips - # Info specific to gateway - refute Map.has_key?(remote, :password) - refute Map.has_key?(remote, :name) + # Info specific to gateway + refute Map.has_key?(remote, :password) + refute Map.has_key?(remote, :name) - # Info retrieved from sub-Indexes - assert remote.filesystem - assert remote.logs - assert remote.processes - assert remote.tunnels + # Info retrieved from sub-Indexes + assert remote.filesystem + assert remote.logs + assert remote.processes + assert remote.tunnels + end end describe "render_remote/1" do - {server, _} = ServerSetup.server() - {entity, _} = EntitySetup.entity() - - rendered = - server - |> ServerIndex.remote(entity.entity_id) - |> ServerIndex.render_remote() - - Enum.each(rendered.nips, fn [network_id, ip] -> - assert is_binary(network_id) - assert is_binary(ip) - end) - - assert rendered.filesystem - assert rendered.logs - assert rendered.processes - assert rendered.tunnels + test "renders the remote index" do + {server, _} = ServerSetup.server() + {entity, _} = EntitySetup.entity() + + rendered = + server + |> ServerIndex.remote(entity.entity_id) + |> ServerIndex.render_remote() + + Enum.each(rendered.nips, fn [network_id, ip] -> + assert is_binary(network_id) + assert is_binary(ip) + end) + + assert rendered.filesystem + assert rendered.logs + assert rendered.processes + assert rendered.tunnels + end end defp find_endpoint(endpoints, %{network_id: network_id, ip: ip}) do diff --git a/test/server/websocket/channel/server/requests/pftp_test.exs b/test/server/websocket/channel/server/requests/pftp_test.exs index 202cd7f8..7b3e7cb9 100644 --- a/test/server/websocket/channel/server/requests/pftp_test.exs +++ b/test/server/websocket/channel/server/requests/pftp_test.exs @@ -1,4 +1,4 @@ -defmodule Helix.Server.Websocket.Channel.Server.Requests.PFTP do +defmodule Helix.Server.Websocket.Channel.Server.Requests.PFTPTest do use Helix.Test.Case.Integration diff --git a/test/software/action/flow/file/transfer_test.exs b/test/software/action/flow/file/transfer_test.exs index 9bf42b07..66cbed98 100644 --- a/test/software/action/flow/file/transfer_test.exs +++ b/test/software/action/flow/file/transfer_test.exs @@ -27,11 +27,11 @@ defmodule Helix.Software.Action.Flow.File.TransferTest do ) # Generated process has the expected data - assert process.process_type == "file_download" + assert process.type == :file_download assert process.file_id == file.file_id - assert process.process_data.type == :download - assert process.process_data.connection_type == :ftp - assert process.process_data.destination_storage_id == + assert process.data.type == :download + assert process.data.connection_type == :ftp + assert process.data.destination_storage_id == destination_storage.storage_id # Generated connection is valid @@ -64,11 +64,11 @@ defmodule Helix.Software.Action.Flow.File.TransferTest do ) # Generated process has the expected data - assert process.process_type == "file_upload" + assert process.type == :file_upload assert process.file_id == file.file_id - assert process.process_data.type == :upload - assert process.process_data.connection_type == :ftp - assert process.process_data.destination_storage_id == + assert process.data.type == :upload + assert process.data.connection_type == :ftp + assert process.data.destination_storage_id == destination_storage.storage_id # Generated connection is valid @@ -101,11 +101,11 @@ defmodule Helix.Software.Action.Flow.File.TransferTest do ) # Generated process has the expected data - assert process.process_type == "file_download" + assert process.type == :file_download assert process.file_id == file.file_id - assert process.process_data.type == :download - assert process.process_data.connection_type == :public_ftp - assert process.process_data.destination_storage_id == + assert process.data.type == :download + assert process.data.connection_type == :public_ftp + assert process.data.destination_storage_id == destination_storage.storage_id # Generated connection is valid; tunnel was created diff --git a/test/software/action/flow/software/firewall_test.exs b/test/software/action/flow/software/firewall_test.exs index 56f68ab6..adf7cf9e 100644 --- a/test/software/action/flow/software/firewall_test.exs +++ b/test/software/action/flow/software/firewall_test.exs @@ -19,8 +19,8 @@ # result = FirewallFlow.execute(file, server.server_id, %{}) # assert {:ok, process} = result -# assert %FirewallPassive{} = process.process_data -# assert "firewall_passive" == process.process_type +# assert %FirewallPassive{} = process.data +# assert "firewall_passive" == process.type # TOPHelper.top_stop(server) # end diff --git a/test/software/action/flow/software/log_forger_test.exs b/test/software/action/flow/software/log_forger_test.exs index 84193f0f..733e4033 100644 --- a/test/software/action/flow/software/log_forger_test.exs +++ b/test/software/action/flow/software/log_forger_test.exs @@ -48,8 +48,8 @@ # result = LogForgerFlow.execute(file, server.server_id, params) # assert {:ok, process} = result -# assert %LogForge{} = process.process_data -# assert "log_forger" == process.process_type +# assert %LogForge{} = process.data +# assert "log_forger" == process.type # TOPHelper.top_stop(server) # end @@ -63,7 +63,7 @@ # {file, _} = SoftwareSetup.file(type: :log_forger, storage_id: storage_id) # params = %{ -# target_server_id: server, +# target_id: server, # message: "", # operation: :create, # entity_id: entity.entity_id @@ -71,8 +71,8 @@ # result = LogForgerFlow.execute(file, server.server_id, params) # assert {:ok, process} = result -# assert %LogForge{} = process.process_data -# assert "log_forger" == process.process_type +# assert %LogForge{} = process.data +# assert "log_forger" == process.type # TOPHelper.top_stop(server) # end diff --git a/test/software/event/handler/cracker_test.exs b/test/software/event/handler/cracker_test.exs index 6f635225..ac291a07 100644 --- a/test/software/event/handler/cracker_test.exs +++ b/test/software/event/handler/cracker_test.exs @@ -18,7 +18,7 @@ defmodule Helix.Software.Event.CrackerTest do test "life cycle for overflow attack against wire transfer connection" do {process, %{acc1: acc1, player: player}} = BankSetup.wire_transfer_flow() - transfer_id = process.process_data.transfer_id + transfer_id = process.data.transfer_id # Simulate completion of overflow process event = EventSetup.Software.overflow_conclusion(process) @@ -94,13 +94,11 @@ defmodule Helix.Software.Event.CrackerTest do describe "bank_transfer_aborted/1" do test "it stops all overflow attacks running on aborted transfer" do {process, _} = BankSetup.wire_transfer_flow() - # transfer_id = process.process_data.transfer_id + # transfer_id = process.data.transfer_id # Abort transfer ProcessAction.kill(process, :normal) - :timer.sleep(100) - TOPHelper.top_stop(process.gateway_id) end end diff --git a/test/software/event/handler/decryptor_test.exs b/test/software/event/handler/decryptor_test.exs index 8f6d4319..17cf28dd 100644 --- a/test/software/event/handler/decryptor_test.exs +++ b/test/software/event/handler/decryptor_test.exs @@ -22,7 +22,7 @@ # scope: :global, # target_file_id: target_file.file_id, # storage_id: Storage.ID.generate(), -# target_server_id: Server.ID.generate() +# target_id: Server.ID.generate() # } # Event.emit(event) @@ -37,7 +37,7 @@ # scope: :global, # target_file_id: target_file.file_id, # storage_id: Storage.ID.generate(), -# target_server_id: Server.ID.generate() +# target_id: Server.ID.generate() # } # # Create several keys for the file @@ -65,7 +65,7 @@ # event = %ProcessConclusionEvent{ # scope: :local, # target_file_id: target_file.file_id, -# target_server_id: server_id, +# target_id: server_id, # storage_id: storage.storage_id # } @@ -74,7 +74,7 @@ # [key] = CryptoKeyInternal.get_on_storage(storage) # assert target_file.file_id == key.target_file_id -# assert server_id == key.target_server_id +# assert server_id == key.target_id # end # end # end diff --git a/test/software/event/handler/encryptor_test.exs b/test/software/event/handler/encryptor_test.exs index be9854c5..18989773 100644 --- a/test/software/event/handler/encryptor_test.exs +++ b/test/software/event/handler/encryptor_test.exs @@ -19,7 +19,7 @@ # server_id = Server.ID.generate() # event = %ProcessConclusionEvent{ # target_file_id: target_file.file_id, -# target_server_id: server_id, +# target_id: server_id, # storage_id: storage.storage_id, # version: Enum.random(1..32) # } @@ -29,7 +29,7 @@ # [key] = CryptoKeyInternal.get_on_storage(storage) # assert target_file.file_id == key.target_file_id -# assert server_id == key.target_server_id +# assert server_id == key.target_id # end # test "changes the crypto version of the target file" do @@ -38,7 +38,7 @@ # server_id = Server.ID.generate() # event = %ProcessConclusionEvent{ # target_file_id: target_file.file_id, -# target_server_id: server_id, +# target_id: server_id, # storage_id: storage.storage_id, # version: Enum.random(1..32) # } @@ -55,7 +55,7 @@ # server_id = Server.ID.generate() # event = %ProcessConclusionEvent{ # target_file_id: target_file.file_id, -# target_server_id: server_id, +# target_id: server_id, # storage_id: storage.storage_id, # version: Enum.random(1..32) # } diff --git a/test/software/event/handler/transfer_test.exs b/test/software/event/handler/transfer_test.exs index 2e6cdb05..f62965fb 100644 --- a/test/software/event/handler/transfer_test.exs +++ b/test/software/event/handler/transfer_test.exs @@ -38,8 +38,8 @@ defmodule Helix.Software.Event.Handler.File.TransferTest do new_file = FileQuery.fetch(new_file.file_id) {:ok, server_id} = CacheQuery.from_storage_get_server(new_file.storage_id) - # The new file has been saved on `target_server_id` (it was uploaded) - assert server_id == process.target_server_id + # The new file has been saved on `target_id` (it was uploaded) + assert server_id == process.target_id TOPHelper.top_stop(process.gateway_id) end diff --git a/test/software/model/file_test.exs b/test/software/model/file_test.exs index 92b7c4b6..c7a6f2a0 100644 --- a/test/software/model/file_test.exs +++ b/test/software/model/file_test.exs @@ -1,6 +1,6 @@ defmodule Helix.Software.Model.FileTest do - use ExUnit.Case, async: true + use Helix.Test.Case.Integration alias Ecto.Changeset alias Helix.Software.Model.File @@ -10,7 +10,9 @@ defmodule Helix.Software.Model.FileTest do describe "set_crypto_version/2" do test "crypto_version is changed" do - {_, %{changeset: original_cs}} = SoftwareSetup.fake_file() + {_, %{changeset: original_cs}} = + SoftwareSetup.fake_file(fake_server: true) + version = 10 changeset = File.set_crypto_version(original_cs, version) diff --git a/test/software/model/software_type/log_forge/process_type_test.exs b/test/software/model/software_type/log_forge/process_type_test.exs index 1cb86a36..5f50312b 100644 --- a/test/software/model/software_type/log_forge/process_type_test.exs +++ b/test/software/model/software_type/log_forge/process_type_test.exs @@ -16,18 +16,20 @@ defmodule Helix.Software.Model.SoftwareType.LogForgeTest do alias Helix.Test.Process.View.Helper, as: ProcessViewHelper alias Helix.Test.Software.Setup, as: SoftwareSetup - @forger_file (SoftwareSetup.file!(type: :log_forger)) + defp forger_file do + SoftwareSetup.file!(type: :log_forger) + end describe "create/2" do test "returns changeset if invalid" do - assert {:error, changeset} = LogForge.create(@forger_file, %{}) + assert {:error, changeset} = LogForge.create(forger_file(), %{}) assert %Changeset{valid?: false} = changeset end test "requires operation and entity_id" do expected_errors = [:operation, :entity_id] - assert {:error, changeset} = LogForge.create(@forger_file, %{}) + assert {:error, changeset} = LogForge.create(forger_file(), %{}) errors = Keyword.keys(changeset.errors) assert Enum.sort(expected_errors) == Enum.sort(errors) end @@ -37,17 +39,17 @@ defmodule Helix.Software.Model.SoftwareType.LogForgeTest do expected_errors = [:target_log_id] - assert {:error, changeset} = LogForge.create(@forger_file, params) + assert {:error, changeset} = LogForge.create(forger_file(), params) errors = Keyword.keys(changeset.errors) assert Enum.all?(expected_errors, &(&1 in errors)) end - test "requires target_server_id when operation is create" do + test "requires target_id when operation is create" do params = %{message: "", operation: :create} - expected_errors = [:target_server_id] + expected_errors = [:target_id] - assert {:error, changeset} = LogForge.create(@forger_file, params) + assert {:error, changeset} = LogForge.create(forger_file(), params) errors = Keyword.keys(changeset.errors) assert Enum.all?(expected_errors, &(&1 in errors)) end @@ -60,14 +62,14 @@ defmodule Helix.Software.Model.SoftwareType.LogForgeTest do "entity_id" => to_string(Entity.ID.generate()) } params_create = %{ - "target_server_id" => to_string(Server.ID.generate()), + "target_id" => to_string(Server.ID.generate()), "message" => "A weapon to surpass Datal Gear", "operation" => :create, "entity_id" => to_string(Entity.ID.generate()) } - assert {:ok, %LogForge{}} = LogForge.create(@forger_file, params_edit) - assert {:ok, %LogForge{}} = LogForge.create(@forger_file, params_create) + assert {:ok, %LogForge{}} = LogForge.create(forger_file(), params_edit) + assert {:ok, %LogForge{}} = LogForge.create(forger_file(), params_create) end test "accepts native erlang term entries" do @@ -78,14 +80,14 @@ defmodule Helix.Software.Model.SoftwareType.LogForgeTest do entity_id: Entity.ID.generate() } params_create = %{ - target_server_id: Server.ID.generate(), + target_id: Server.ID.generate(), message: "Oh noes", operation: :create, entity_id: Entity.ID.generate() } - assert {:ok, %LogForge{}} = LogForge.create(@forger_file, params_edit) - assert {:ok, %LogForge{}} = LogForge.create(@forger_file, params_create) + assert {:ok, %LogForge{}} = LogForge.create(forger_file(), params_edit) + assert {:ok, %LogForge{}} = LogForge.create(forger_file(), params_create) end end @@ -171,7 +173,7 @@ defmodule Helix.Software.Model.SoftwareType.LogForgeTest do describe "create_objective/1" do test "returns a higher objective the higher the forger version is" do data = %LogForge{ - target_server_id: Server.ID.generate(), + target_id: Server.ID.generate(), entity_id: Entity.ID.generate(), operation: :create, message: "Digital style", @@ -192,9 +194,9 @@ defmodule Helix.Software.Model.SoftwareType.LogForgeTest do describe "ProcessView.render/4 for edit operation" do test "both partial and full processes returns target_log_id" do {process, %{target_entity_id: victim_entity}} = log_forger_process(:edit) - data = process.process_data + data = process.data - victim_server = process.target_server_id + victim_server = process.target_id attacker_entity = process.source_entity_id # Victim rendering Log process on her own server. Partial access. @@ -224,10 +226,10 @@ defmodule Helix.Software.Model.SoftwareType.LogForgeTest do describe "ProcessView.render/4 for create operation" do test "both partial and full process adds no complement" do {process, _} = log_forger_process(:create) - data = process.process_data + data = process.data attacker_server = process.gateway_id - victim_server = process.target_server_id + victim_server = process.target_id attacker_entity = process.source_entity_id third_entity = Entity.ID.generate() @@ -254,13 +256,13 @@ defmodule Helix.Software.Model.SoftwareType.LogForgeTest do db_create = ProcessHelper.raw_get(process_create.process_id) db_edit = ProcessHelper.raw_get(process_edit.process_id) - serialized_create = Processable.after_read_hook(db_create.process_data) - serialized_edit = Processable.after_read_hook(db_edit.process_data) + serialized_create = Processable.after_read_hook(db_create.data) + serialized_edit = Processable.after_read_hook(db_edit.data) # Create process has `target_log_id` equals nil refute serialized_create.target_log_id assert %Entity.ID{} = serialized_create.entity_id - assert %Server.ID{} = serialized_create.target_server_id + assert %Server.ID{} = serialized_create.target_id assert serialized_create.operation == :create assert serialized_create.message assert serialized_create.version @@ -268,13 +270,12 @@ defmodule Helix.Software.Model.SoftwareType.LogForgeTest do # Edit has valid `target_log_id` assert %Entity.ID{} = serialized_edit.entity_id assert %Log.ID{} = serialized_edit.target_log_id - assert %Server.ID{} = serialized_edit.target_server_id + assert %Server.ID{} = serialized_edit.target_id assert serialized_edit.operation == :edit assert serialized_edit.message assert serialized_edit.version - TOPHelper.top_stop(process_create.gateway_id) - TOPHelper.top_stop(process_edit.gateway_id) + TOPHelper.top_stop() end end diff --git a/test/software/process/cracker/bruteforce_test.exs b/test/software/process/cracker/bruteforce_test.exs index 4ce9fc7d..cb938d86 100644 --- a/test/software/process/cracker/bruteforce_test.exs +++ b/test/software/process/cracker/bruteforce_test.exs @@ -45,12 +45,12 @@ defmodule Helix.Software.Process.Cracker.BruteforceTest do # Process data is correct assert process.connection_id assert process.file_id == file.file_id - assert process.process_type == "cracker_bruteforce" + assert process.type == :cracker_bruteforce assert process.gateway_id == source_server.server_id assert process.source_entity_id == source_entity.entity_id - assert process.target_server_id == target_server.server_id + assert process.target_id == target_server.server_id assert process.network_id == target_nip.network_id - assert process.process_data.target_server_ip == target_nip.ip + assert process.data.target_server_ip == target_nip.ip # CrackerBruteforce connection is correct connection = TunnelQuery.fetch_connection(process.connection_id) @@ -64,7 +64,6 @@ defmodule Helix.Software.Process.Cracker.BruteforceTest do assert tunnel.destination_id == target_server.server_id assert tunnel.network_id == target_nip.network_id - # :timer.sleep(100) TOPHelper.top_stop(source_server) CacheHelper.sync_test() end @@ -74,7 +73,7 @@ defmodule Helix.Software.Process.Cracker.BruteforceTest do test "full process for any AT attack_source" do {process, meta} = ProcessSetup.process(fake_server: true, type: :bruteforce) - data = process.process_data + data = process.data server_id = process.gateway_id attacker_id = meta.source_entity_id @@ -91,27 +90,31 @@ defmodule Helix.Software.Process.Cracker.BruteforceTest do ProcessViewHelper.assert_keys(pview_attacker, :full) ProcessViewHelper.assert_keys(pview_victim, :full) ProcessViewHelper.assert_keys(pview_third, :full) + + TOPHelper.top_stop() end test "full process for attacker AT attack_target" do {process, %{source_entity_id: entity_id}} = ProcessSetup.process(fake_server: true, type: :bruteforce) - data = process.process_data - server_id = process.target_server_id + data = process.data + server_id = process.target_id # `entity` is the one who started the process, and is listing at the # victim server, so `entity` has full access to the process. rendered = ProcessView.render(data, process, server_id, entity_id) ProcessViewHelper.assert_keys(rendered, :full) + + TOPHelper.top_stop() end test "partial process for third AT attack_target" do {process, _} = ProcessSetup.process(fake_server: true, type: :bruteforce) - data = process.process_data - server_id = process.target_server_id + data = process.data + server_id = process.target_id entity_id = Entity.ID.generate() # `entity` is unrelated to the process, and it's being rendering on the @@ -119,14 +122,16 @@ defmodule Helix.Software.Process.Cracker.BruteforceTest do rendered = ProcessView.render(data, process, server_id, entity_id) ProcessViewHelper.assert_keys(rendered, :partial) + + TOPHelper.top_stop() end test "partial process for victim AT attack_target" do {process, %{target_entity_id: entity_id}} = ProcessSetup.process(fake_server: true, type: :bruteforce) - data = process.process_data - server_id = process.target_server_id + data = process.data + server_id = process.target_id # `entity` is the victim, owner of the server receiving the process. # She's rendering at her own server, but she did not start the process, @@ -134,6 +139,8 @@ defmodule Helix.Software.Process.Cracker.BruteforceTest do rendered = ProcessView.render(data, process, server_id, entity_id) ProcessViewHelper.assert_keys(rendered, :partial) + + TOPHelper.top_stop() end end @@ -143,9 +150,11 @@ defmodule Helix.Software.Process.Cracker.BruteforceTest do db_process = ProcessHelper.raw_get(process.process_id) - serialized = Processable.after_read_hook(db_process.process_data) + serialized = Processable.after_read_hook(db_process.data) assert serialized.target_server_ip + + TOPHelper.top_stop() end end end diff --git a/test/software/process/file/transfer_test.exs b/test/software/process/file/transfer_test.exs index 1eea28c3..14162b9b 100644 --- a/test/software/process/file/transfer_test.exs +++ b/test/software/process/file/transfer_test.exs @@ -2,19 +2,20 @@ defmodule Helix.Software.Process.File.TransferTest do use Helix.Test.Case.Integration - import Helix.Test.Process.Macros - alias Helix.Process.Model.Processable alias Helix.Software.Model.Storage alias Helix.Software.Process.File.Transfer, as: FileTransferProcess alias Helix.Software.Event.File.Transfer.Aborted, as: FileTransferAbortedEvent + alias Helix.Test.Network.Helper, as: NetworkHelper alias Helix.Test.Process.Helper, as: ProcessHelper alias Helix.Test.Process.Setup, as: ProcessSetup alias Helix.Test.Process.TOPHelper alias Helix.Test.Software.Setup, as: SoftwareSetup + @internet_id NetworkHelper.internet_id() + describe "Process Kill" do test "aborted event is emitted (download)" do {process, _} = SoftwareSetup.Flow.file_transfer(:download) @@ -24,7 +25,7 @@ defmodule Helix.Software.Process.File.TransferTest do assert %FileTransferAbortedEvent{} = event assert event.reason == :killed assert event.to_server_id == process.gateway_id - assert event.from_server_id == process.target_server_id + assert event.from_server_id == process.target_id TOPHelper.top_stop(process.gateway_id) end @@ -36,7 +37,7 @@ defmodule Helix.Software.Process.File.TransferTest do assert %FileTransferAbortedEvent{} = event assert event.reason == :killed - assert event.to_server_id == process.target_server_id + assert event.to_server_id == process.target_id assert event.from_server_id == process.gateway_id TOPHelper.top_stop(process.gateway_id) @@ -49,11 +50,13 @@ defmodule Helix.Software.Process.File.TransferTest do db_process = ProcessHelper.raw_get(process) - serialized = Processable.after_read_hook(db_process.process_data) + serialized = Processable.after_read_hook(db_process.data) assert %Storage.ID{} = serialized.destination_storage_id assert is_atom(serialized.connection_type) assert is_atom(serialized.type) + + TOPHelper.top_stop() end defp transfer_process do @@ -64,21 +67,47 @@ defmodule Helix.Software.Process.File.TransferTest do end end - describe "Process.Objective" do + describe "Process.Resourceable" do test "download uses dlk" do {file, _} = SoftwareSetup.file() - resources = FileTransferProcess.objective(%{type: :download, file: file}) + resources = + FileTransferProcess.resources( + %{type: :download, file: file, network_id: @internet_id} + ) + + # Uses DLK on gateway, ULK on remote + assert resources.l_dynamic == [:dlk] + assert resources.r_dynamic == [:ulk] - assert_objective resources, {:dlk, file.file_size} + # Objective depends on file size + assert resources.objective.dlk[@internet_id] == file.file_size + refute Map.has_key?(resources.objective, :ulk) + + # Uses RAM while paused and running + assert resources.static.running.ram + assert resources.static.paused.ram end test "upload uses ulk" do {file, _} = SoftwareSetup.file() - resources = FileTransferProcess.objective(%{type: :upload, file: file}) + resources = + FileTransferProcess.resources( + %{type: :upload, file: file, network_id: @internet_id} + ) + + # Uses ULK on gateway, DLK on remote + assert resources.l_dynamic == [:ulk] + assert resources.r_dynamic == [:dlk] + + # Objective depends on file size + assert resources.objective.ulk[@internet_id] == file.file_size + refute Map.has_key?(resources.objective, :dlk) - assert_objective resources, {:ulk, file.file_size} + # Uses RAM while paused and running + assert resources.static.running.ram + assert resources.static.paused.ram end end diff --git a/test/software/public/pftp_test.exs b/test/software/public/pftp_test.exs index 5aac262f..a084426a 100644 --- a/test/software/public/pftp_test.exs +++ b/test/software/public/pftp_test.exs @@ -23,10 +23,10 @@ defmodule Helix.Software.Public.PFTPTest do PFTPPublic.download(gateway, destination, storage, file) assert process.gateway_id == gateway.server_id - assert process.target_server_id == pftp.server_id + assert process.target_id == pftp.server_id assert process.file_id == file.file_id - assert process.process_type == "file_download" - assert process.process_data.connection_type == :public_ftp + assert process.type == :file_download + assert process.data.connection_type == :public_ftp TOPHelper.top_stop(gateway) end diff --git a/test/software/public/software_test.exs b/test/software/public/software_test.exs index 96ccfb07..ea17e62c 100644 --- a/test/software/public/software_test.exs +++ b/test/software/public/software_test.exs @@ -36,11 +36,11 @@ defmodule Helix.Software.Public.FileTest do assert process.connection_id assert process.gateway_id == source_server.server_id - assert process.target_server_id == target_server.server_id + assert process.target_id == target_server.server_id assert process.network_id == target_nip.network_id assert process.file_id == cracker.file_id assert process.source_entity_id == source_entity.entity_id - assert process.process_data.target_server_ip == target_nip.ip + assert process.data.target_server_ip == target_nip.ip # :timer.sleep(100) TOPHelper.top_stop(source_server.server_id) @@ -66,9 +66,9 @@ defmodule Helix.Software.Public.FileTest do FilePublic.download(gateway, destination, tunnel, storage, file) assert process.file_id == file.file_id - assert process.process_type == "file_download" - assert process.process_data.connection_type == :ftp - assert process.process_data.type == :download + assert process.type == :file_download + assert process.data.connection_type == :ftp + assert process.data.type == :download TOPHelper.top_stop(gateway) end diff --git a/test/software/websocket/requests/file/download_test.exs b/test/software/websocket/requests/file/download_test.exs index 95bc0b9c..daf550b9 100644 --- a/test/software/websocket/requests/file/download_test.exs +++ b/test/software/websocket/requests/file/download_test.exs @@ -132,7 +132,7 @@ defmodule Helix.Software.Websocket.Requests.File.DownloadTest do assert request.meta.process.process_id assert request.meta.process.file_id == file.file_id assert request.meta.process.gateway_id == gateway.server_id - assert request.meta.process.target_server_id == destination.server_id + assert request.meta.process.target_id == destination.server_id TOPHelper.top_stop(gateway) end diff --git a/test/software/websocket/requests/pftp/file/download_test.exs b/test/software/websocket/requests/pftp/file/download_test.exs index 833260e7..1ab74136 100644 --- a/test/software/websocket/requests/pftp/file/download_test.exs +++ b/test/software/websocket/requests/pftp/file/download_test.exs @@ -150,7 +150,7 @@ defmodule Helix.Software.Websocket.Requests.PFTP.File.DownloadTest do process = request.meta.process assert process.file_id == file.file_id assert process.gateway_id == gateway.server_id - assert process.target_server_id == destination.server_id + assert process.target_id == destination.server_id TOPHelper.top_stop(gateway) end diff --git a/test/support/case/integration.ex b/test/support/case/integration.ex index e30c1ba5..b3d3e31f 100644 --- a/test/support/case/integration.ex +++ b/test/support/case/integration.ex @@ -7,6 +7,8 @@ defmodule Helix.Test.Case.Integration do @moduletag :integration setup do + # IO.inspect(__ENV__.module) + # IO.inspect(self()) repos = Application.get_env(:helix, :ecto_repos) Enum.each(repos, fn repo -> :ok = Ecto.Adapters.SQL.Sandbox.checkout(repo) diff --git a/test/support/event/setup.ex b/test/support/event/setup.ex index f1c31426..ea51fe35 100644 --- a/test/support/event/setup.ex +++ b/test/support/event/setup.ex @@ -1,63 +1,3 @@ defmodule Helix.Test.Event.Setup do - - alias Helix.Entity.Model.Entity - alias Helix.Server.Model.Server - - alias Helix.Process.Event.Process.Created, as: ProcessCreatedEvent - - alias HELL.TestHelper.Random - alias Helix.Test.Process.Setup, as: ProcessSetup - - ############################################################################## - # Process events - ############################################################################## - - @doc """ - Accepts: - - - (gateway :: Server.ID, target :: Server.ID, gateway_entity :: Entity.ID \ - target_entity_id :: Entity.ID), in which case a fake process with random ID - is generated - """ - def process_created(gateway_id, target_id, gateway_entity, target_entity) do - # Generates a random process on the given server(s) - process_opts = [gateway_id: gateway_id, target_id: target_id] - {process, _} = ProcessSetup.fake_process(process_opts) - - %ProcessCreatedEvent{ - process: process, - gateway_id: gateway_id, - target_id: target_id, - gateway_entity_id: gateway_entity, - target_entity_id: target_entity, - gateway_ip: Random.ipv4(), - target_ip: Random.ipv4() - } - end - - @doc """ - Opts: - - gateway_id: Specify the gateway id. - - target_id: Specify the target id. - - gateway_entity_id: Specify the gateway entity id. - - target_entity_id: Specify the target entity id. - - Note the generated process is fake (does not exist on DB). - """ - def process_created(type, opts \\ []) - def process_created(:single_server, opts) do - gateway_id = Access.get(opts, :gateway_id, Server.ID.generate()) - gateway_entity = Access.get(opts, :gateway_entity_id, Entity.ID.generate()) - - process_created(gateway_id, gateway_id, gateway_entity, gateway_entity) - end - def process_created(:multi_server, opts) do - gateway_id = Access.get(opts, :gateway_id, Server.ID.generate()) - gateway_entity = Access.get(opts, :gateway_entity_id, Entity.ID.generate()) - - target_id = Access.get(opts, :target_id, Server.ID.generate()) - target_entity = Access.get(opts, :target_entity_id, Entity.ID.generate()) - - process_created(gateway_id, target_id, gateway_entity, target_entity) - end + # Placeholder for child setups end diff --git a/test/support/event/setup/process.ex b/test/support/event/setup/process.ex new file mode 100644 index 00000000..17753f4d --- /dev/null +++ b/test/support/event/setup/process.ex @@ -0,0 +1,29 @@ +defmodule Helix.Test.Event.Setup.Process do + + alias Helix.Process.Event.Process.Created, as: ProcessCreatedEvent + alias Helix.Server.Model.Server + + alias HELL.TestHelper.Random + alias Helix.Test.Process.Setup, as: ProcessSetup + + def created, + do: created(Server.ID.generate(), Server.ID.generate()) + + def created(gateway_id), + do: created(gateway_id, gateway_id) + + def created(gateway_id, target_id) do + # Generates a random process on the given server(s) + process_opts = [gateway_id: gateway_id, target_id: target_id] + {process, _} = ProcessSetup.fake_process(process_opts) + + %ProcessCreatedEvent{ + confirmed: true, + process: process, + gateway_id: gateway_id, + target_id: target_id, + gateway_ip: Random.ipv4(), + target_ip: Random.ipv4() + } + end +end diff --git a/test/support/event/setup/software.ex b/test/support/event/setup/software.ex index 47007156..f3829ef5 100644 --- a/test/support/event/setup/software.ex +++ b/test/support/event/setup/software.ex @@ -46,12 +46,12 @@ defmodule Helix.Test.Event.Setup.Software do end def bruteforce_conclusion(process = %Process{}), - do: BruteforceProcessedEvent.new(process, process.process_data) + do: BruteforceProcessedEvent.new(process, process.data) def bruteforce_conclusion do %BruteforceProcessedEvent{ source_entity_id: Entity.ID.generate(), network_id: @internet, - target_server_id: Server.ID.generate(), + target_id: Server.ID.generate(), target_server_ip: Random.ipv4() } end @@ -112,7 +112,7 @@ defmodule Helix.Test.Event.Setup.Software do TOPHelper.top_stop(process.gateway_id) new_file = - copy_file(process.file_id, process.process_data.destination_storage_id) + copy_file(process.file_id, process.data.destination_storage_id) event = generate_event(event, type, {:completed, new_file}) {event, %{}} diff --git a/test/support/macros.ex b/test/support/macros.ex new file mode 100644 index 00000000..6aeddc63 --- /dev/null +++ b/test/support/macros.ex @@ -0,0 +1,9 @@ +defmodule Helix.Test.Macros do + + defmacro assert_map(a, b, skip: skip) do + skip = is_list(skip) && skip || [skip] + quote bind_quoted: binding() do + assert Map.drop(a, skip) == Map.drop(b, skip) + end + end +end diff --git a/test/support/process/fake_process.ex b/test/support/process/fake_process.ex new file mode 100644 index 00000000..8dfb1752 --- /dev/null +++ b/test/support/process/fake_process.ex @@ -0,0 +1,114 @@ +defmodule Helix.Test.Process do + + import Helix.Process + + process FakeFileTransfer do + + alias HELL.TestHelper.Random + + process_struct [:file_id] + + def new do + %__MODULE__{ + file_id: Random.number() + } + end + + def new(%{file_id: file_id}) do + %__MODULE__{ + file_id: file_id + } + end + + processable do + on_completion(_process, _data) do + {:delete, []} + end + end + + resourceable do + + @type params :: term + @type factors :: term + + get_factors(_) do + :noop + end + + dlk(%{type: :download}) do + 100 + end + + ulk(%{type: :upload}) do + 100 + end + + dlk(%{type: :upload}) + ulk(%{type: :download}) + + def dynamic(%{type: :download}) do + [:dlk] + end + + def dynamic(%{type: :upload}) do + [:ulk] + end + end + + executable do + + @type params :: term + @type meta :: term + + resources(_, _, _, _) do + %{} + end + end + end + + process FakeDefaultProcess do + + process_struct [:foo] + + def new do + %__MODULE__{ + foo: :bar + } + end + + # Inherits default Processable callbacks + processable do + on_completion(_process, _data) do + {:delete, []} + end + end + + resourceable do + + @type params :: term + @type factors :: term + + get_factors(_) do + :noop + end + + cpu(_) do + 5000 + end + + def dynamic(_) do + [:cpu] + end + end + + executable do + + @type params :: term + @type meta :: term + + resources(_, _, _, _) do + %{} + end + end + end +end diff --git a/test/support/process/helper/top.ex b/test/support/process/helper/top.ex new file mode 100644 index 00000000..a373315f --- /dev/null +++ b/test/support/process/helper/top.ex @@ -0,0 +1,54 @@ +defmodule Helix.Test.Process.TOPHelper do + + alias Ecto.Changeset + alias Helix.Process.Model.Process + alias Helix.Process.Model.Processable + alias Helix.Process.Query.Process, as: ProcessQuery + alias Helix.Process.Repo, as: ProcessRepo + + alias Helix.Process.Action.TOP, as: TOPAction + + @doc """ + Stops the TOP of a server. + """ + def top_stop(_), + do: top_stop() + def top_stop, + do: GenServer.stop(:event_timer) + + @doc """ + Completes the process, emitting the related events and removing from the db. + """ + def force_completion(process_id = %Process.ID{}) do + process_id + |> ProcessQuery.fetch() + |> force_completion() + end + def force_completion(process = %Process{}) do + # Update the DB process entry, now it has magically reached its objective + process + |> Changeset.change() + |> Changeset.put_change(:allocated, %{}) # Avoids `:waiting_alloc` status + |> Changeset.put_change(:processed, process.objective) + |> ProcessRepo.update() + + # Force a recalque on the server + TOPAction.recalque(process) + end + + @doc """ + Runs the logic that would be ran if the process was completed, but does not + actually modify the database, nor emit the conclusion events. + """ + def soft_complete(process = %Process{}) do + Processable.complete(process.data, process) + end + + @doc """ + Simulates the `kill` of a process, executing the `Processable` relevant code. + It won't update the status on DB, nor emit events about the kill. + """ + def soft_kill(process = %Process{}, reason \\ :normal) do + Processable.kill(process.data, process, reason) + end +end diff --git a/test/support/process/helper/top/resources.ex b/test/support/process/helper/top/resources.ex new file mode 100644 index 00000000..b7826c2e --- /dev/null +++ b/test/support/process/helper/top/resources.ex @@ -0,0 +1,76 @@ +defmodule Helix.Test.Process.Helper.TOP.Resources do + + alias HELL.TestHelper.Random + + def split_usage(total_resources, num_procs, network_id \\ nil) do + if total_resources do + ulk = Map.get(total_resources.ulk, network_id) + dlk = Map.get(total_resources.dlk, network_id) + + { + div(total_resources.cpu, num_procs + 1), + div(total_resources.ram, num_procs + 1), + ulk && div(ulk, num_procs + 1) || 0, + dlk && div(dlk, num_procs + 1) || 0, + } + else + {10_000, 10_000, 10_000, 10_000} + end + end + + def calculate_static(opts, {max_cpu, max_ram, max_ulk, max_dlk}) do + static_cpu = Keyword.get(opts, :static_cpu, Random.number(0..max_cpu)) + static_ram = Keyword.get(opts, :static_ram, Random.number(0..max_ram)) + static_ulk = Keyword.get(opts, :static_ulk, Random.number(0..max_ulk)) + static_dlk = Keyword.get(opts, :static_dlk, Random.number(0..max_dlk)) + + paused_static = + if Random.number(0..1) == 1 do + %{ + cpu: Random.number(0..1) == 1 && div(static_cpu, 2) || 0, + ram: Random.number(0..1) == 1 && div(static_ram, 2) || 0, + dlk: Random.number(0..1) == 1 && div(static_dlk, 2) || 0, + ulk: Random.number(0..1) == 1 && div(static_ulk, 2) || 0 + } + else + %{} + end + + running_static = %{ + cpu: static_cpu, + ram: static_ram, + dlk: static_dlk, + ulk: static_ulk + } + + %{ + running: running_static, + paused: paused_static + } + end + + def objective(opts \\ []) do + if not is_nil(opts[:dlk]) or not is_nil(opts[:ulk]) do + opts[:network_id] || raise "I need a network_id too!" + end + + # %{} (empty) if not defined + ulk = opts[:ulk] && Map.put(%{}, opts[:network_id], opts[:ulk]) || %{} + dlk = opts[:dlk] && Map.put(%{}, opts[:network_id], opts[:dlk]) || %{} + + %{ + cpu: opts[:cpu] || 999_999, + ram: opts[:ram] || 999_999, + dlk: dlk, + ulk: ulk + } + end + + def random_static(_opts \\ []) do + # Guaranteed to be random + %{ + paused: %{ram: 10}, + running: %{ram: 20} + } + end +end diff --git a/test/support/process/view_helper.ex b/test/support/process/helper/view.ex similarity index 100% rename from test/support/process/view_helper.ex rename to test/support/process/helper/view.ex diff --git a/test/support/process/macros.ex b/test/support/process/macros.ex index 8a0750a8..7b2275a6 100644 --- a/test/support/process/macros.ex +++ b/test/support/process/macros.ex @@ -1,22 +1,30 @@ defmodule Helix.Test.Process.Macros do - defmacro assert_objective(objective, resources) do - quote do - resources = - if is_tuple(unquote(resources)) do - [unquote(resources)] - else - unquote(resources) - end + defmacro assert_resource(res1, res2) do + quote bind_quoted: binding() do + if is_map(res1) do + Enum.each(res1, fn {key, v1} -> - acc_objective = - Enum.reduce(resources, %{}, fn {resource, usage}, acc -> - assert Map.get(unquote(objective), resource) == usage + v2 = is_map(res2) && Map.get(res2, key) || res2 - Map.put(acc, resource, usage) + assert_in_delta v1, v2, 2 end) + else + assert_in_delta res1, res2, 2 + end - assert acc_objective == unquote(objective) + # res1 = is_map(res1) && res1.total || res1 + # res2 = is_map(res2) && res2.total || res2 + + end + end + + defmacro refute_resource(res1, res2) do + quote bind_quoted: binding() do + res1 = is_map(res1) && res1.total || res1 + res2 = is_map(res2) && res2.total || res2 + + refute_in_delta res1, res2, 2 end end end diff --git a/test/support/process/process_type_example.ex b/test/support/process/process_type_example.ex deleted file mode 100644 index 641cec8a..00000000 --- a/test/support/process/process_type_example.ex +++ /dev/null @@ -1,56 +0,0 @@ -# TODO: Delete this ? Yes please. - -defmodule Helix.Test.Process.ProcessableExample do - - defstruct [] - - defimpl Helix.Process.Model.Processable do - def dynamic_resources(_), - do: [:cpu, :dlk, :ulk] - def minimum(_), - do: %{} - def kill(_, process, _), - do: {%{Ecto.Changeset.change(process)| action: :delete}, []} - def state_change(_, process, _, :complete) do - process = - process - |> Ecto.Changeset.change() - |> Map.put(:action, :delete) - - {process, []} - end - def state_change(_, process, _, _), - do: {process, []} - def conclusion(data, process), - do: state_change(data, process, :running, :complete) - def after_read_hook(data), - do: data - end -end - -defmodule Helix.Test.Process.StaticProcessableExample do - - defstruct [] - - defimpl Helix.Process.Model.Processable do - def dynamic_resources(_), - do: [] - def minimum(_), - do: %{} - def kill(_, process, _), - do: {%{Ecto.Changeset.change(process)| action: :delete}, []} - def state_change(_, process, _, :complete) do - process = - process - |> Ecto.Changeset.change() - |> Map.put(:action, :delete) - - {process, []} - end - def state_change(_, process, _, _), - do: {process, []} - def conclusion(data, process), - do: state_change(data, process, :running, :complete) - def after_read_hook(data), - do: data end -end diff --git a/test/support/process/setup.ex b/test/support/process/setup.ex index c721d9cf..f023fd42 100644 --- a/test/support/process/setup.ex +++ b/test/support/process/setup.ex @@ -1,7 +1,7 @@ defmodule Helix.Test.Process.Setup do + alias Helix.Process.Internal.Process, as: ProcessInternal alias Helix.Process.Model.Process - alias Helix.Process.Repo, as: ProcessRepo alias Helix.Test.Entity.Setup, as: EntitySetup alias Helix.Test.Network.Helper, as: NetworkHelper @@ -11,8 +11,8 @@ defmodule Helix.Test.Process.Setup do @internet NetworkHelper.internet_id() def process(opts \\ []) do - {process, related} = fake_process(opts) - {:ok, inserted} = ProcessRepo.insert(process) + {_, related = %{params: params}} = fake_process(opts) + {:ok, inserted} = ProcessInternal.create(params) {inserted, related} end @@ -23,7 +23,8 @@ defmodule Helix.Test.Process.Setup do Opts: - gateway_id: - - target_server_id: + - target_id: + - entity_id: source entity id. - file_id: - network_id: - connection_id: @@ -34,48 +35,61 @@ defmodule Helix.Test.Process.Setup do Related: source_entity_id :: Entity.id, target_entity_id :: Entity.id """ def fake_process(opts \\ []) do - gateway_id = Access.get(opts, :gateway_id, ServerSetup.id()) - source_entity_id = Access.get(opts, :entity_id, EntitySetup.id()) - {target_server_id, target_entity_id} = + gateway_id = Keyword.get(opts, :gateway_id, ServerSetup.id()) + source_entity_id = Keyword.get(opts, :entity_id, EntitySetup.id()) + {target_id, target_entity_id} = cond do opts[:single_server] -> {gateway_id, source_entity_id} - opts[:target_server_id] -> - {opts[:target_server_id], nil} + opts[:target_id] -> + {opts[:target_id], nil} true -> {ServerSetup.id(), EntitySetup.id()} end - file_id = Access.get(opts, :file_id, nil) - connection_id = Access.get(opts, :connection_id, nil) - network_id = Access.get(opts, :network_id, @internet) + file_id = Keyword.get(opts, :file_id, nil) + connection_id = Keyword.get(opts, :connection_id, nil) + network_id = Keyword.get(opts, :network_id, @internet) meta = %{ source_entity_id: source_entity_id, gateway_id: gateway_id, target_entity_id: target_entity_id, - target_server_id: target_server_id, + target_id: target_id, file_id: file_id, connection_id: connection_id, network_id: network_id } - {process_type, process_data, meta} = + {process_type, process_data, meta, resources} = if opts[:type] do ProcessDataSetup.custom(opts[:type], opts[:data] || [], meta) else ProcessDataSetup.random(meta) end + l_limit = Keyword.get(opts, :l_limit, %{}) + r_limit = Keyword.get(opts, :r_limit, %{}) + + static = Keyword.get(opts, :static, resources.static) + + objective = Keyword.get(opts, :objective, resources.objective) + params = %{ - process_data: process_data, - process_type: process_type, + data: process_data, + type: process_type, gateway_id: meta.gateway_id, source_entity_id: meta.source_entity_id, - target_server_id: meta.target_server_id, + target_id: meta.target_id, file_id: meta.file_id, network_id: meta.network_id, - connection_id: meta.connection_id + connection_id: meta.connection_id, + static: static, + l_limit: l_limit, + r_limit: r_limit, + l_dynamic: resources.l_dynamic, + r_dynamic: resources.r_dynamic, + objective: objective } process = @@ -86,7 +100,8 @@ defmodule Helix.Test.Process.Setup do related = %{ source_entity_id: source_entity_id, - target_entity_id: target_entity_id + target_entity_id: target_entity_id, + params: params } {process, related} diff --git a/test/support/process/data_setup.ex b/test/support/process/setup/data.ex similarity index 75% rename from test/support/process/data_setup.ex rename to test/support/process/setup/data.ex index 3bf587d8..eca12215 100644 --- a/test/support/process/data_setup.ex +++ b/test/support/process/setup/data.ex @@ -28,6 +28,7 @@ defmodule Helix.Test.Process.Data.Setup do alias HELL.TestHelper.Random alias Helix.Test.Log.Helper, as: LogHelper + alias Helix.Test.Process.Helper.TOP, as: TOPHelper @doc """ Chooses a random implementation and uses it. Beware that `data_opts`, used by @@ -47,8 +48,8 @@ defmodule Helix.Test.Process.Data.Setup do """ def custom(:file_download, data_opts, meta) do meta = - if meta.gateway_id == meta.target_server_id do - %{meta| target_server_id: Server.ID.generate()} + if meta.gateway_id == meta.target_id do + %{meta| target_id: Server.ID.generate()} else meta end @@ -67,7 +68,18 @@ defmodule Helix.Test.Process.Data.Setup do meta = %{meta| file_id: file_id, connection_id: connection_id} - {"file_download", data, meta} + objective = + TOPHelper.Resources.objective(dlk: 500, network_id: meta.network_id) + + resources = + %{ + l_dynamic: [:dlk], + r_dynamic: [:ulk], + static: TOPHelper.Resources.random_static(), + objective: objective + } + + {:file_download, data, meta, resources} end @doc """ @@ -75,7 +87,7 @@ defmodule Helix.Test.Process.Data.Setup do - storage_id: Set storage_id. """ def custom(:file_upload, data_opts, meta) do - target_id = meta.gateway_id == meta.target_server_id || Server.ID.generate() + target_id = meta.gateway_id == meta.target_id || Server.ID.generate() connection_id = meta.connection_id || Connection.ID.generate() file_id = meta.file_id || File.ID.generate() @@ -91,10 +103,21 @@ defmodule Helix.Test.Process.Data.Setup do %{meta| file_id: file_id, connection_id: connection_id, - target_server_id: target_id + target_id: target_id } - {"file_upload", data, meta} + objective = + TOPHelper.Resources.objective(ulk: 500, network_id: meta.network_id) + + resources = + %{ + l_dynamic: [:ulk], + r_dynamic: [:dlk], + objective: objective, + static: TOPHelper.Resources.random_static() + } + + {:file_upload, data, meta, resources} end @doc """ @@ -117,7 +140,15 @@ defmodule Helix.Test.Process.Data.Setup do data = CrackerBruteforce.new(%{target_server_ip: target_server_ip}) - {"cracker_bruteforce", data, meta} + resources = + %{ + l_dynamic: [:cpu], + r_dynamic: [], + static: TOPHelper.Resources.random_static(), + objective: TOPHelper.Resources.objective(cpu: 500) + } + + {:cracker_bruteforce, data, meta, resources} end @doc """ @@ -129,7 +160,7 @@ defmodule Helix.Test.Process.Data.Setup do All others are automatically derived from process meta data. """ def custom(:forge, data_opts, meta) do - target_server_id = meta.target_server_id + target_id = meta.target_id target_log_id = Keyword.get(data_opts, :target_log_id, Log.ID.generate()) entity_id = meta.source_entity_id operation = Keyword.get(data_opts, :operation, :edit) @@ -138,7 +169,7 @@ defmodule Helix.Test.Process.Data.Setup do data = %LogForge{ - target_server_id: target_server_id, + target_id: target_id, entity_id: entity_id, operation: operation, message: message, @@ -152,7 +183,15 @@ defmodule Helix.Test.Process.Data.Setup do data end - {"log_forger", data, meta} + resources = + %{ + l_dynamic: [:cpu], + r_dynamic: [], + static: TOPHelper.Resources.random_static(), + objective: TOPHelper.Resources.objective(cpu: 500) + } + + {:log_forger, data, meta, resources} end defp custom_implementations do diff --git a/test/support/process/setup/top.ex b/test/support/process/setup/top.ex new file mode 100644 index 00000000..772b701c --- /dev/null +++ b/test/support/process/setup/top.ex @@ -0,0 +1,90 @@ +defmodule Helix.Test.Process.Setup.TOP do + + alias Helix.Process.Model.Process + + alias HELL.TestHelper.Random + alias Helix.Test.Network.Helper, as: NetworkHelper + alias Helix.Test.Process.Helper.TOP, as: TOPHelper + + @internet_id NetworkHelper.internet_id() + + def fake_process(opts \\ []) do + num_procs = Keyword.get(opts, :total, 1) + network_id = Keyword.get(opts, :network_id, @internet_id) + + res_usage = + TOPHelper.Resources.split_usage( + opts[:total_resources], num_procs, network_id + ) + + 1..num_procs + |> Enum.map(fn _ -> + gen_fake_process(opts, res_usage) + end) + end + + defp gen_fake_process(opts, res_usage) do + priority = Keyword.get(opts, :priority, 3) + state = Keyword.get(opts, :state, :running) + + network_id = Keyword.get(opts, :network_id, @internet_id) + + l_dynamic = Keyword.get(opts, :l_dynamic, [:cpu, :ram]) + r_dynamic = Keyword.get(opts, :r_dynamic, []) + + static = + if opts[:static] do + opts[:static] + else + TOPHelper.Resources.calculate_static(opts, res_usage) + end + + processed = Keyword.get(opts, :processed, nil) + objective = Keyword.get(opts, :objective, TOPHelper.Resources.objective()) + next_allocation = Keyword.get(opts, :next_allocation, nil) + + l_limit = Keyword.get(opts, :l_limit, %{}) + r_limit = Keyword.get(opts, :r_limit, %{}) + + l_reserved = Keyword.get(opts, :l_reserved, %{}) + r_reserved = Keyword.get(opts, :r_reserved, %{}) + + creation_time = Keyword.get(opts, :creation_time, DateTime.utc_now()) + last_checkpoint_time = Keyword.get(opts, :last_checkpoint_time, nil) + + gateway_id = Keyword.get(opts, :gateway_id, :gateway) + target_id = Keyword.get(opts, :target_id, :target) + local? = Keyword.get(opts, :local?, nil) + + initial = Process.Resources.initial() + l_allocated = Keyword.get(opts, :l_allocated, initial) + r_allocated = Keyword.get(opts, :r_allocated, initial) + + data = Keyword.get(opts, :data, nil) + + %Process{ + process_id: Random.number(), + gateway_id: gateway_id, + target_id: target_id, + data: data, + objective: objective, + processed: processed, + next_allocation: next_allocation, + priority: priority, + state: state, + static: static, + l_dynamic: l_dynamic, + r_dynamic: r_dynamic, + l_limit: l_limit, + r_limit: r_limit, + l_reserved: l_reserved, + r_reserved: r_reserved, + l_allocated: l_allocated, + r_allocated: r_allocated, + network_id: network_id, + creation_time: creation_time, + last_checkpoint_time: last_checkpoint_time, + local?: local? + } + end +end diff --git a/test/support/process/setup/top/resources.ex b/test/support/process/setup/top/resources.ex new file mode 100644 index 00000000..60c2f254 --- /dev/null +++ b/test/support/process/setup/top/resources.ex @@ -0,0 +1,25 @@ +defmodule Helix.Test.Process.Setup.TOP.Resources do + + alias HELL.TestHelper.Random + alias Helix.Test.Network.Helper, as: NetworkHelper + + @internet_id NetworkHelper.internet_id() + + def resources(opts \\ []) do + network_id = Keyword.get(opts, :network_id, @internet_id) + + cpu = Random.number(min: 100, max: 20_000) + ram = Random.number(min: 100, max: 20_000) + dlk = Random.number(min: 100, max: 20_000) + ulk = Random.number(min: 100, max: 20_000) + + total = %{ + cpu: cpu, + ram: ram, + ulk: Map.put(%{}, network_id, ulk), + dlk: Map.put(%{}, network_id, dlk) + } + + {total, %{}} + end +end diff --git a/test/support/process/top_helper.ex b/test/support/process/top_helper.ex deleted file mode 100644 index fd9ac589..00000000 --- a/test/support/process/top_helper.ex +++ /dev/null @@ -1,73 +0,0 @@ -defmodule Helix.Test.Process.TOPHelper do - - alias Ecto.Changeset - alias Helix.Server.Model.Server - alias Helix.Process.Model.Process - alias Helix.Process.Model.Processable - alias Helix.Process.Query.Process, as: ProcessQuery - alias Helix.Process.Repo, as: ProcessRepo - alias Helix.Process.State.TOP.Manager, as: TOPManager - alias Helix.Process.State.TOP.Server, as: TOPServer - - @doc """ - Stops the TOP of a server. - """ - def top_stop(server = %Server{}), - do: top_stop(server.server_id) - def top_stop(server_id = %Server.ID{}) do - server_id - |> TOPManager.get() - |> its_time_to_stop() - - # Sync TOP events. Required after apply. - :timer.sleep(50) - end - - defp its_time_to_stop(nil), - do: :ok - defp its_time_to_stop(pid), - do: GenServer.stop(pid) - - @doc """ - Completes the process, emitting the related events and removing from the db. - """ - def force_completion(process_id = %Process.ID{}) do - process_id - |> ProcessQuery.fetch() - |> force_completion() - end - def force_completion(process = %Process{}) do - finished_process = mark_as_finished(process) - - process.gateway_id - |> TOPManager.get() - |> TOPServer.reset_processes([finished_process]) - - # Sync TOP events. Required after apply. - :timer.sleep(50) - end - - @doc """ - Runs the logic that would be ran if the process was completed, but does not - actually modify the database, nor emit the conclusion events. - """ - def soft_complete(process = %Process{}) do - cs = Changeset.change(process) - Processable.state_change(process.process_data, cs, :running, :complete) - end - - @doc """ - Simulates the `kill` of a process, executing the `Processable` relevant code. - It won't update the status on DB, nor emit events about the kill. - """ - def soft_kill(process = %Process{}, reason \\ :normal) do - cs = Changeset.change(process) - Processable.kill(process.process_data, cs, reason) - end - - defp mark_as_finished(process) do - %{process| processed: process.objective} - |> Ecto.Changeset.change() - |> ProcessRepo.update!() - end -end diff --git a/test/support/server/helper.ex b/test/support/server/helper.ex index fb7482eb..35f54760 100644 --- a/test/support/server/helper.ex +++ b/test/support/server/helper.ex @@ -27,4 +27,59 @@ defmodule Helix.Test.Server.Helper do do: get_all_nips(server.server_id) def get_all_nips(server_id = %Server.ID{}), do: CacheQuery.from_server_get_nips!(server_id) + + # HACK + # This is a giant hack because the current Hardware service lacks the proper + # API. It will probably be my next PR.... + def update_server_specs(server = %Server{}, new_specs) do + + alias Helix.Hardware.Query.Motherboard, as: MotherboardQuery + alias Helix.Hardware.Internal.Motherboard, as: MotherboardInternal + alias Helix.Hardware.Repo, as: HardwareRepo + + components = + server.motherboard_id + |> MotherboardQuery.fetch() + |> MotherboardInternal.get_components_ids() + + [comp_cpu] = MotherboardInternal.get_cpus_from_ids(components) + [comp_ram] = MotherboardInternal.get_rams_from_ids(components) + [comp_nic] = MotherboardInternal.get_nics_from_ids(components) + + if new_specs[:cpu] do + comp_cpu + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_change(:clock, new_specs[:cpu]) + |> HardwareRepo.update() + end + + if new_specs[:ram] do + comp_ram + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_change(:ram_size, new_specs[:ram]) + |> HardwareRepo.update() + end + + nc = comp_nic.network_connection + + if not is_nil(new_specs[:dlk]) or not is_nil(new_specs[:ulk]) do + cs = Ecto.Changeset.change(nc) + + cs = + if new_specs[:dlk] do + Ecto.Changeset.put_change(cs, :downlink, new_specs[:dlk]) + else + cs + end + + cs = + if new_specs[:ulk] do + Ecto.Changeset.put_change(cs, :uplink, new_specs[:ulk]) + else + cs + end + + HardwareRepo.update(cs) + end + end end diff --git a/test/support/software/flow.ex b/test/support/software/flow.ex index 24a91192..8e0cadd1 100644 --- a/test/support/software/flow.ex +++ b/test/support/software/flow.ex @@ -38,7 +38,7 @@ defmodule Helix.Test.Software.Setup.Flow do network_id: NetworkHelper.internet_id(), bounce: [], file: file, - process_type: process_type + type: process_type } {:ok, process} = @@ -48,11 +48,11 @@ defmodule Helix.Test.Software.Setup.Flow do end defp get_type_info(:download), - do: {:ftp, "file_download", :download} + do: {:ftp, :file_download, :download} defp get_type_info(:upload), - do: {:ftp, "file_upload", :upload} + do: {:ftp, :file_upload, :upload} defp get_type_info(:pftp_download), - do: {:public_ftp, "file_download", :download} + do: {:public_ftp, :file_download, :download} @doc """ Starts a BruteforceProcess. diff --git a/test/universe/bank/action/flow/bank_account_test.exs b/test/universe/bank/action/flow/bank_account_test.exs index f8aba700..60392894 100644 --- a/test/universe/bank/action/flow/bank_account_test.exs +++ b/test/universe/bank/action/flow/bank_account_test.exs @@ -38,10 +38,10 @@ defmodule Helix.Universe.Bank.Action.Flow.BankAccountTest do # Ensure process is valid assert process.gateway_id == gateway.server_id - assert process.target_server_id == acc.atm_id - assert process.process_data.token_id == token.token_id - assert process.process_data.atm_id == acc.atm_id - assert process.process_data.account_number == acc.account_number + assert process.target_id == acc.atm_id + assert process.data.token_id == token.token_id + assert process.data.atm_id == acc.atm_id + assert process.data.account_number == acc.account_number TOPHelper.force_completion(process) diff --git a/test/universe/bank/action/flow/bank_transfer_test.exs b/test/universe/bank/action/flow/bank_transfer_test.exs index 87ad12c7..687cbf79 100644 --- a/test/universe/bank/action/flow/bank_transfer_test.exs +++ b/test/universe/bank/action/flow/bank_transfer_test.exs @@ -26,7 +26,7 @@ defmodule Helix.Universe.Bank.Action.Flow.BankTransferTest do # They see me flowin', they hatin' {:ok, process} = BankTransferFlow.start(acc1, acc2, amount, player, gateway, net) - transfer_id = process.process_data.transfer_id + transfer_id = process.data.transfer_id # Transfer was added to the DB assert BankTransferInternal.fetch(transfer_id) diff --git a/test/universe/bank/event/handler/bank/transfer_test.exs b/test/universe/bank/event/handler/bank/transfer_test.exs index d6568cba..6bce8e4a 100644 --- a/test/universe/bank/event/handler/bank/transfer_test.exs +++ b/test/universe/bank/event/handler/bank/transfer_test.exs @@ -2,7 +2,7 @@ defmodule Helix.Universe.Bank.Event.Handler.Bank.TransferTest do use Helix.Test.Case.Integration - alias Helix.Process.Action.Process, as: ProcessAction + alias Helix.Process.Public.Process, as: ProcessPublic alias Helix.Process.Query.Process, as: ProcessQuery alias Helix.Universe.Bank.Action.Flow.BankTransfer, as: BankTransferFlow alias Helix.Universe.Bank.Internal.BankAccount, as: BankAccountInternal @@ -24,7 +24,7 @@ defmodule Helix.Universe.Bank.Event.Handler.Bank.TransferTest do {:ok, process} = BankTransferFlow.start(acc1, acc2, amount, player, gateway, net) - transfer_id = process.process_data.transfer_id + transfer_id = process.data.transfer_id assert ProcessQuery.fetch(process) assert BankTransferInternal.fetch(transfer_id) @@ -32,9 +32,7 @@ defmodule Helix.Universe.Bank.Event.Handler.Bank.TransferTest do assert 0 == BankAccountInternal.get_balance(acc2) # Kill (abort) - ProcessAction.kill(process, :porquesim) - - :timer.sleep(100) + ProcessPublic.kill(process, :porquesim) # Ensure bank data is consistent refute BankTransferInternal.fetch(transfer_id)