From 83e6b9c3ab2e82c25ed9f730bf812b2acdc3b86e Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 7 Oct 2021 15:55:11 +0200 Subject: [PATCH 01/97] QQ: introduce new machine version (2) This commit is just for the new machine version files and doesn't contain any actual changes to the state machine. --- deps/rabbit/src/rabbit_fifo.erl | 17 +- deps/rabbit/src/rabbit_fifo_client.erl | 2 +- deps/rabbit/src/rabbit_fifo_v1.erl | 2208 ++++++++++++++++++++++++ deps/rabbit/src/rabbit_fifo_v1.hrl | 210 +++ 4 files changed, 2432 insertions(+), 5 deletions(-) create mode 100644 deps/rabbit/src/rabbit_fifo_v1.erl create mode 100644 deps/rabbit/src/rabbit_fifo_v1.hrl diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 89b8141a62b4..e434c8e86dce 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -517,8 +517,8 @@ apply(#{index := Idx} = Meta, #purge_nodes{nodes = Nodes}, State0) -> apply(#{index := Idx} = Meta, #update_config{config = Conf}, State0) -> {State, Reply, Effects} = checkout(Meta, State0, update_config(Conf, State0), []), update_smallest_raft_index(Idx, Reply, State, Effects); -apply(_Meta, {machine_version, 0, 1}, V0State) -> - State = convert_v0_to_v1(V0State), +apply(_Meta, {machine_version, FromVersion, ToVersion}, V0State) -> + State = convert(FromVersion, ToVersion, V0State), {State, ok, []}; apply(_Meta, Cmd, State) -> %% handle unhandled commands gracefully @@ -764,10 +764,11 @@ get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) -> end. -spec version() -> pos_integer(). -version() -> 1. +version() -> 2. which_module(0) -> rabbit_fifo_v0; -which_module(1) -> ?MODULE. +which_module(1) -> rabbit_fifo_v1; +which_module(2) -> ?MODULE. -record(aux_gc, {last_raft_idx = 0 :: ra:index()}). -record(aux, {name :: atom(), @@ -2212,3 +2213,11 @@ notify_decorators_effect(#?MODULE{cfg = #cfg{resource = QName}} = State) -> notify_decorators_effect(QName, MaxActivePriority, IsEmpty) -> {mod_call, rabbit_quorum_queue, spawn_notify_decorators, [QName, consumer_state_changed, [MaxActivePriority, IsEmpty]]}. + +convert(To, To, State0) -> + State0; +convert(0, To, State0) -> + convert(1, To, convert_v0_to_v1(State0)); +convert(1, To, State0) -> + %% no conversion yet + convert(2, To, State0). diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl index 7f2cd55accc1..3f5315de08b2 100644 --- a/deps/rabbit/src/rabbit_fifo_client.erl +++ b/deps/rabbit/src/rabbit_fifo_client.erl @@ -151,7 +151,7 @@ enqueue(Correlation, Msg, 0 -> %% the leader is running the old version enqueue(Correlation, Msg, State0#state{queue_status = go}); - 1 -> + N when is_integer(N) -> %% were running the new version on the leader do sync initialisation %% of enqueuer session Reg = rabbit_fifo:make_register_enqueuer(self()), diff --git a/deps/rabbit/src/rabbit_fifo_v1.erl b/deps/rabbit/src/rabbit_fifo_v1.erl new file mode 100644 index 000000000000..fc585cf6ea39 --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_v1.erl @@ -0,0 +1,2208 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. +%% + +-module(rabbit_fifo_v1). + +-behaviour(ra_machine). + +-compile(inline_list_funcs). +-compile(inline). +-compile({no_auto_import, [apply/3]}). + +-include("rabbit_fifo_v1.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-export([ + init/1, + apply/3, + state_enter/2, + tick/2, + overview/1, + get_checked_out/4, + %% versioning + version/0, + which_module/1, + %% aux + init_aux/1, + handle_aux/6, + % queries + query_messages_ready/1, + query_messages_checked_out/1, + query_messages_total/1, + query_processes/1, + query_ra_indexes/1, + query_consumer_count/1, + query_consumers/1, + query_stat/1, + query_single_active_consumer/1, + query_in_memory_usage/1, + query_peek/2, + query_notify_decorators_info/1, + usage/1, + + zero/1, + + %% misc + dehydrate_state/1, + normalize/1, + + %% protocol helpers + make_enqueue/3, + make_register_enqueuer/1, + make_checkout/3, + make_settle/2, + make_return/2, + make_discard/2, + make_credit/4, + make_purge/0, + make_purge_nodes/1, + make_update_config/1, + make_garbage_collection/0 + ]). + +%% command records representing all the protocol actions that are supported +-record(enqueue, {pid :: option(pid()), + seq :: option(msg_seqno()), + msg :: raw_msg()}). +-record(register_enqueuer, {pid :: pid()}). +-record(checkout, {consumer_id :: consumer_id(), + spec :: checkout_spec(), + meta :: consumer_meta()}). +-record(settle, {consumer_id :: consumer_id(), + msg_ids :: [msg_id()]}). +-record(return, {consumer_id :: consumer_id(), + msg_ids :: [msg_id()]}). +-record(discard, {consumer_id :: consumer_id(), + msg_ids :: [msg_id()]}). +-record(credit, {consumer_id :: consumer_id(), + credit :: non_neg_integer(), + delivery_count :: non_neg_integer(), + drain :: boolean()}). +-record(purge, {}). +-record(purge_nodes, {nodes :: [node()]}). +-record(update_config, {config :: config()}). +-record(garbage_collection, {}). + +-opaque protocol() :: + #enqueue{} | + #register_enqueuer{} | + #checkout{} | + #settle{} | + #return{} | + #discard{} | + #credit{} | + #purge{} | + #purge_nodes{} | + #update_config{} | + #garbage_collection{}. + +-type command() :: protocol() | ra_machine:builtin_command(). +%% all the command types supported by ra fifo + +-type client_msg() :: delivery(). +%% the messages `rabbit_fifo' can send to consumers. + +-opaque state() :: #?MODULE{}. + +-export_type([protocol/0, + delivery/0, + command/0, + credit_mode/0, + consumer_tag/0, + consumer_meta/0, + consumer_id/0, + client_msg/0, + msg/0, + msg_id/0, + msg_seqno/0, + delivery_msg/0, + state/0, + config/0]). + +-spec init(config()) -> state(). +init(#{name := Name, + queue_resource := Resource} = Conf) -> + update_config(Conf, #?MODULE{cfg = #cfg{name = Name, + resource = Resource}}). + +update_config(Conf, State) -> + DLH = maps:get(dead_letter_handler, Conf, undefined), + BLH = maps:get(become_leader_handler, Conf, undefined), + RCI = maps:get(release_cursor_interval, Conf, ?RELEASE_CURSOR_EVERY), + Overflow = maps:get(overflow_strategy, Conf, drop_head), + MaxLength = maps:get(max_length, Conf, undefined), + MaxBytes = maps:get(max_bytes, Conf, undefined), + MaxMemoryLength = maps:get(max_in_memory_length, Conf, undefined), + MaxMemoryBytes = maps:get(max_in_memory_bytes, Conf, undefined), + DeliveryLimit = maps:get(delivery_limit, Conf, undefined), + Expires = maps:get(expires, Conf, undefined), + ConsumerStrategy = case maps:get(single_active_consumer_on, Conf, false) of + true -> + single_active; + false -> + competing + end, + Cfg = State#?MODULE.cfg, + RCISpec = {RCI, RCI}, + + LastActive = maps:get(created, Conf, undefined), + State#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCISpec, + dead_letter_handler = DLH, + become_leader_handler = BLH, + overflow_strategy = Overflow, + max_length = MaxLength, + max_bytes = MaxBytes, + max_in_memory_length = MaxMemoryLength, + max_in_memory_bytes = MaxMemoryBytes, + consumer_strategy = ConsumerStrategy, + delivery_limit = DeliveryLimit, + expires = Expires}, + last_active = LastActive}. + +zero(_) -> + 0. + +% msg_ids are scoped per consumer +% ra_indexes holds all raft indexes for enqueues currently on queue +-spec apply(ra_machine:command_meta_data(), command(), state()) -> + {state(), Reply :: term(), ra_machine:effects()} | + {state(), Reply :: term()}. +apply(Meta, #enqueue{pid = From, seq = Seq, + msg = RawMsg}, State00) -> + apply_enqueue(Meta, From, Seq, RawMsg, State00); +apply(_Meta, #register_enqueuer{pid = Pid}, + #?MODULE{enqueuers = Enqueuers0, + cfg = #cfg{overflow_strategy = Overflow}} = State0) -> + + State = case maps:is_key(Pid, Enqueuers0) of + true -> + %% if the enqueuer exits just echo the overflow state + State0; + false -> + State0#?MODULE{enqueuers = Enqueuers0#{Pid => #enqueuer{}}} + end, + Res = case is_over_limit(State) of + true when Overflow == reject_publish -> + reject_publish; + _ -> + ok + end, + {State, Res, [{monitor, process, Pid}]}; +apply(Meta, + #settle{msg_ids = MsgIds, consumer_id = ConsumerId}, + #?MODULE{consumers = Cons0} = State) -> + case Cons0 of + #{ConsumerId := Con0} -> + % need to increment metrics before completing as any snapshot + % states taken need to include them + complete_and_checkout(Meta, MsgIds, ConsumerId, + Con0, [], State); + _ -> + {State, ok} + + end; +apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, + #?MODULE{consumers = Cons0} = State0) -> + case Cons0 of + #{ConsumerId := Con0} -> + Discarded = maps:with(MsgIds, Con0#consumer.checked_out), + Effects = dead_letter_effects(rejected, Discarded, State0, []), + complete_and_checkout(Meta, MsgIds, ConsumerId, Con0, + Effects, State0); + _ -> + {State0, ok} + end; +apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId}, + #?MODULE{consumers = Cons0} = State) -> + case Cons0 of + #{ConsumerId := #consumer{checked_out = Checked0}} -> + Returned = maps:with(MsgIds, Checked0), + return(Meta, ConsumerId, Returned, [], State); + _ -> + {State, ok} + end; +apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, + drain = Drain, consumer_id = ConsumerId}, + #?MODULE{consumers = Cons0, + service_queue = ServiceQueue0, + waiting_consumers = Waiting0} = State0) -> + case Cons0 of + #{ConsumerId := #consumer{delivery_count = DelCnt} = Con0} -> + %% this can go below 0 when credit is reduced + C = max(0, RemoteDelCnt + NewCredit - DelCnt), + %% grant the credit + Con1 = Con0#consumer{credit = C}, + ServiceQueue = maybe_queue_consumer(ConsumerId, Con1, + ServiceQueue0), + Cons = maps:put(ConsumerId, Con1, Cons0), + {State1, ok, Effects} = + checkout(Meta, State0, + State0#?MODULE{service_queue = ServiceQueue, + consumers = Cons}, [], false), + Response = {send_credit_reply, messages_ready(State1)}, + %% by this point all checkouts for the updated credit value + %% should be processed so we can evaluate the drain + case Drain of + false -> + %% just return the result of the checkout + {State1, Response, Effects}; + true -> + Con = #consumer{credit = PostCred} = + maps:get(ConsumerId, State1#?MODULE.consumers), + %% add the outstanding credit to the delivery count + DeliveryCount = Con#consumer.delivery_count + PostCred, + Consumers = maps:put(ConsumerId, + Con#consumer{delivery_count = DeliveryCount, + credit = 0}, + State1#?MODULE.consumers), + Drained = Con#consumer.credit, + {CTag, _} = ConsumerId, + {State1#?MODULE{consumers = Consumers}, + %% returning a multi response with two client actions + %% for the channel to execute + {multi, [Response, {send_drained, {CTag, Drained}}]}, + Effects} + end; + _ when Waiting0 /= [] -> + %% there are waiting consuemrs + case lists:keytake(ConsumerId, 1, Waiting0) of + {value, {_, Con0 = #consumer{delivery_count = DelCnt}}, Waiting} -> + %% the consumer is a waiting one + %% grant the credit + C = max(0, RemoteDelCnt + NewCredit - DelCnt), + Con = Con0#consumer{credit = C}, + State = State0#?MODULE{waiting_consumers = + [{ConsumerId, Con} | Waiting]}, + {State, {send_credit_reply, messages_ready(State)}}; + false -> + {State0, ok} + end; + _ -> + %% credit for unknown consumer - just ignore + {State0, ok} + end; +apply(_, #checkout{spec = {dequeue, _}}, + #?MODULE{cfg = #cfg{consumer_strategy = single_active}} = State0) -> + {State0, {error, {unsupported, single_active_consumer}}}; +apply(#{index := Index, + system_time := Ts, + from := From} = Meta, #checkout{spec = {dequeue, Settlement}, + meta = ConsumerMeta, + consumer_id = ConsumerId}, + #?MODULE{consumers = Consumers} = State00) -> + %% dequeue always updates last_active + State0 = State00#?MODULE{last_active = Ts}, + %% all dequeue operations result in keeping the queue from expiring + Exists = maps:is_key(ConsumerId, Consumers), + case messages_ready(State0) of + 0 -> + update_smallest_raft_index(Index, {dequeue, empty}, State0, + [notify_decorators_effect(State0)]); + _ when Exists -> + %% a dequeue using the same consumer_id isn't possible at this point + {State0, {dequeue, empty}}; + Ready -> + State1 = update_consumer(ConsumerId, ConsumerMeta, + {once, 1, simple_prefetch}, 0, + State0), + {success, _, MsgId, Msg, State2} = checkout_one(Meta, State1), + {State4, Effects1} = case Settlement of + unsettled -> + {_, Pid} = ConsumerId, + {State2, [{monitor, process, Pid}]}; + settled -> + %% immediately settle the checkout + {State3, _, Effects0} = + apply(Meta, make_settle(ConsumerId, [MsgId]), + State2), + {State3, Effects0} + end, + {Reply, Effects2} = + case Msg of + {RaftIdx, {Header, empty}} -> + %% TODO add here new log effect with reply + {'$ra_no_reply', + [reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From) | + Effects1]}; + _ -> + {{dequeue, {MsgId, Msg}, Ready-1}, Effects1} + + end, + NotifyEffect = notify_decorators_effect(State4), + case evaluate_limit(Index, false, State0, State4, [NotifyEffect | Effects2]) of + {State, true, Effects} -> + update_smallest_raft_index(Index, Reply, State, Effects); + {State, false, Effects} -> + {State, Reply, Effects} + end + end; +apply(#{index := Idx} = Meta, + #checkout{spec = cancel, + consumer_id = ConsumerId}, State0) -> + {State1, Effects1} = cancel_consumer(Meta, ConsumerId, State0, [], + consumer_cancel), + {State, Reply, Effects} = checkout(Meta, State0, State1, Effects1), + update_smallest_raft_index(Idx, Reply, State, Effects); +apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta, + consumer_id = {_, Pid} = ConsumerId}, + State0) -> + Priority = get_priority_from_args(ConsumerMeta), + State1 = update_consumer(ConsumerId, ConsumerMeta, Spec, Priority, State0), + checkout(Meta, State0, State1, [{monitor, process, Pid}]); +apply(#{index := Index}, #purge{}, + #?MODULE{ra_indexes = Indexes0, + returns = Returns, + messages = Messages} = State0) -> + Total = messages_ready(State0), + Indexes1 = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0, + [I || {_, {I, _}} <- lqueue:to_list(Messages)]), + Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes1, + [I || {_, {I, _}} <- lqueue:to_list(Returns)]), + + State1 = State0#?MODULE{ra_indexes = Indexes, + messages = lqueue:new(), + returns = lqueue:new(), + msg_bytes_enqueue = 0, + prefix_msgs = {0, [], 0, []}, + msg_bytes_in_memory = 0, + msgs_ready_in_memory = 0}, + Effects0 = [garbage_collection], + Reply = {purge, Total}, + {State, _, Effects} = evaluate_limit(Index, false, State0, + State1, Effects0), + update_smallest_raft_index(Index, Reply, State, Effects); +apply(#{index := Idx}, #garbage_collection{}, State) -> + update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); +apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, + #?MODULE{consumers = Cons0, + cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = Waiting0, + enqueuers = Enqs0} = State0) -> + Node = node(Pid), + %% if the pid refers to an active or cancelled consumer, + %% mark it as suspected and return it to the waiting queue + {State1, Effects0} = + maps:fold(fun({_, P} = Cid, C0, {S0, E0}) + when node(P) =:= Node -> + %% the consumer should be returned to waiting + %% and checked out messages should be returned + Effs = consumer_update_active_effects( + S0, Cid, C0, false, suspected_down, E0), + Checked = C0#consumer.checked_out, + Credit = increase_credit(C0, maps:size(Checked)), + {St, Effs1} = return_all(Meta, S0, Effs, + Cid, C0#consumer{credit = Credit}), + %% if the consumer was cancelled there is a chance it got + %% removed when returning hence we need to be defensive here + Waiting = case St#?MODULE.consumers of + #{Cid := C} -> + Waiting0 ++ [{Cid, C}]; + _ -> + Waiting0 + end, + {St#?MODULE{consumers = maps:remove(Cid, St#?MODULE.consumers), + waiting_consumers = Waiting, + last_active = Ts}, + Effs1}; + (_, _, S) -> + S + end, {State0, []}, Cons0), + WaitingConsumers = update_waiting_consumer_status(Node, State1, + suspected_down), + + %% select a new consumer from the waiting queue and run a checkout + State2 = State1#?MODULE{waiting_consumers = WaitingConsumers}, + {State, Effects1} = activate_next_consumer(State2, Effects0), + + %% mark any enquers as suspected + Enqs = maps:map(fun(P, E) when node(P) =:= Node -> + E#enqueuer{status = suspected_down}; + (_, E) -> E + end, Enqs0), + Effects = [{monitor, node, Node} | Effects1], + checkout(Meta, State0, State#?MODULE{enqueuers = Enqs}, Effects); +apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, + #?MODULE{consumers = Cons0, + enqueuers = Enqs0} = State0) -> + %% A node has been disconnected. This doesn't necessarily mean that + %% any processes on this node are down, they _may_ come back so here + %% we just mark them as suspected (effectively deactivated) + %% and return all checked out messages to the main queue for delivery to any + %% live consumers + %% + %% all pids for the disconnected node will be marked as suspected not just + %% the one we got the `down' command for + Node = node(Pid), + + {State, Effects1} = + maps:fold( + fun({_, P} = Cid, #consumer{checked_out = Checked0, + status = up} = C0, + {St0, Eff}) when node(P) =:= Node -> + Credit = increase_credit(C0, map_size(Checked0)), + C = C0#consumer{status = suspected_down, + credit = Credit}, + {St, Eff0} = return_all(Meta, St0, Eff, Cid, C), + Eff1 = consumer_update_active_effects(St, Cid, C, false, + suspected_down, Eff0), + {St, Eff1}; + (_, _, {St, Eff}) -> + {St, Eff} + end, {State0, []}, Cons0), + Enqs = maps:map(fun(P, E) when node(P) =:= Node -> + E#enqueuer{status = suspected_down}; + (_, E) -> E + end, Enqs0), + + % Monitor the node so that we can "unsuspect" these processes when the node + % comes back, then re-issue all monitors and discover the final fate of + % these processes + + Effects = case maps:size(State#?MODULE.consumers) of + 0 -> + [{aux, inactive}, {monitor, node, Node}]; + _ -> + [{monitor, node, Node}] + end ++ Effects1, + checkout(Meta, State0, State#?MODULE{enqueuers = Enqs, + last_active = Ts}, Effects); +apply(Meta, {down, Pid, _Info}, State0) -> + {State, Effects} = handle_down(Meta, Pid, State0), + checkout(Meta, State0, State, Effects); +apply(Meta, {nodeup, Node}, #?MODULE{consumers = Cons0, + enqueuers = Enqs0, + service_queue = _SQ0} = State0) -> + %% A node we are monitoring has come back. + %% If we have suspected any processes of being + %% down we should now re-issue the monitors for them to detect if they're + %% actually down or not + Monitors = [{monitor, process, P} + || P <- suspected_pids_for(Node, State0)], + + Enqs1 = maps:map(fun(P, E) when node(P) =:= Node -> + E#enqueuer{status = up}; + (_, E) -> E + end, Enqs0), + ConsumerUpdateActiveFun = consumer_active_flag_update_function(State0), + %% mark all consumers as up + {State1, Effects1} = + maps:fold(fun({_, P} = ConsumerId, C, {SAcc, EAcc}) + when (node(P) =:= Node) and + (C#consumer.status =/= cancelled) -> + EAcc1 = ConsumerUpdateActiveFun(SAcc, ConsumerId, + C, true, up, EAcc), + {update_or_remove_sub(Meta, ConsumerId, + C#consumer{status = up}, + SAcc), EAcc1}; + (_, _, Acc) -> + Acc + end, {State0, Monitors}, Cons0), + Waiting = update_waiting_consumer_status(Node, State1, up), + State2 = State1#?MODULE{ + enqueuers = Enqs1, + waiting_consumers = Waiting}, + {State, Effects} = activate_next_consumer(State2, Effects1), + checkout(Meta, State0, State, Effects); +apply(_, {nodedown, _Node}, State) -> + {State, ok}; +apply(#{index := Idx} = Meta, #purge_nodes{nodes = Nodes}, State0) -> + {State, Effects} = lists:foldl(fun(Node, {S, E}) -> + purge_node(Meta, Node, S, E) + end, {State0, []}, Nodes), + update_smallest_raft_index(Idx, ok, State, Effects); +apply(#{index := Idx} = Meta, #update_config{config = Conf}, State0) -> + {State, Reply, Effects} = checkout(Meta, State0, update_config(Conf, State0), []), + update_smallest_raft_index(Idx, Reply, State, Effects); +apply(_Meta, {machine_version, 0, 1}, V0State) -> + State = convert_v0_to_v1(V0State), + {State, ok, []}; +apply(_Meta, Cmd, State) -> + %% handle unhandled commands gracefully + rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), + {State, ok, []}. + +convert_v0_to_v1(V0State0) -> + V0State = rabbit_fifo_v0:normalize_for_v1(V0State0), + V0Msgs = rabbit_fifo_v0:get_field(messages, V0State), + V1Msgs = lqueue:from_list(lists:sort(maps:to_list(V0Msgs))), + V0Enqs = rabbit_fifo_v0:get_field(enqueuers, V0State), + V1Enqs = maps:map( + fun (_EPid, E) -> + #enqueuer{next_seqno = element(2, E), + pending = element(3, E), + status = element(4, E)} + end, V0Enqs), + V0Cons = rabbit_fifo_v0:get_field(consumers, V0State), + V1Cons = maps:map( + fun (_CId, C0) -> + %% add the priority field + list_to_tuple(tuple_to_list(C0) ++ [0]) + end, V0Cons), + V0SQ = rabbit_fifo_v0:get_field(service_queue, V0State), + V1SQ = priority_queue:from_list([{0, C} || C <- queue:to_list(V0SQ)]), + Cfg = #cfg{name = rabbit_fifo_v0:get_cfg_field(name, V0State), + resource = rabbit_fifo_v0:get_cfg_field(resource, V0State), + release_cursor_interval = rabbit_fifo_v0:get_cfg_field(release_cursor_interval, V0State), + dead_letter_handler = rabbit_fifo_v0:get_cfg_field(dead_letter_handler, V0State), + become_leader_handler = rabbit_fifo_v0:get_cfg_field(become_leader_handler, V0State), + %% TODO: what if policy enabling reject_publish was applied before conversion? + overflow_strategy = drop_head, + max_length = rabbit_fifo_v0:get_cfg_field(max_length, V0State), + max_bytes = rabbit_fifo_v0:get_cfg_field(max_bytes, V0State), + consumer_strategy = rabbit_fifo_v0:get_cfg_field(consumer_strategy, V0State), + delivery_limit = rabbit_fifo_v0:get_cfg_field(delivery_limit, V0State), + max_in_memory_length = rabbit_fifo_v0:get_cfg_field(max_in_memory_length, V0State), + max_in_memory_bytes = rabbit_fifo_v0:get_cfg_field(max_in_memory_bytes, V0State) + }, + + #?MODULE{cfg = Cfg, + messages = V1Msgs, + next_msg_num = rabbit_fifo_v0:get_field(next_msg_num, V0State), + returns = rabbit_fifo_v0:get_field(returns, V0State), + enqueue_count = rabbit_fifo_v0:get_field(enqueue_count, V0State), + enqueuers = V1Enqs, + ra_indexes = rabbit_fifo_v0:get_field(ra_indexes, V0State), + release_cursors = rabbit_fifo_v0:get_field(release_cursors, V0State), + consumers = V1Cons, + service_queue = V1SQ, + prefix_msgs = rabbit_fifo_v0:get_field(prefix_msgs, V0State), + msg_bytes_enqueue = rabbit_fifo_v0:get_field(msg_bytes_enqueue, V0State), + msg_bytes_checkout = rabbit_fifo_v0:get_field(msg_bytes_checkout, V0State), + waiting_consumers = rabbit_fifo_v0:get_field(waiting_consumers, V0State), + msg_bytes_in_memory = rabbit_fifo_v0:get_field(msg_bytes_in_memory, V0State), + msgs_ready_in_memory = rabbit_fifo_v0:get_field(msgs_ready_in_memory, V0State) + }. + +purge_node(Meta, Node, State, Effects) -> + lists:foldl(fun(Pid, {S0, E0}) -> + {S, E} = handle_down(Meta, Pid, S0), + {S, E0 ++ E} + end, {State, Effects}, all_pids_for(Node, State)). + +%% any downs that re not noconnection +handle_down(Meta, Pid, #?MODULE{consumers = Cons0, + enqueuers = Enqs0} = State0) -> + % Remove any enqueuer for the same pid and enqueue any pending messages + % This should be ok as we won't see any more enqueues from this pid + State1 = case maps:take(Pid, Enqs0) of + {#enqueuer{pending = Pend}, Enqs} -> + lists:foldl(fun ({_, RIdx, RawMsg}, S) -> + enqueue(RIdx, RawMsg, S) + end, State0#?MODULE{enqueuers = Enqs}, Pend); + error -> + State0 + end, + {Effects1, State2} = handle_waiting_consumer_down(Pid, State1), + % return checked out messages to main queue + % Find the consumers for the down pid + DownConsumers = maps:keys( + maps:filter(fun({_, P}, _) -> P =:= Pid end, Cons0)), + lists:foldl(fun(ConsumerId, {S, E}) -> + cancel_consumer(Meta, ConsumerId, S, E, down) + end, {State2, Effects1}, DownConsumers). + +consumer_active_flag_update_function(#?MODULE{cfg = #cfg{consumer_strategy = competing}}) -> + fun(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) -> + consumer_update_active_effects(State, ConsumerId, Consumer, Active, + ActivityStatus, Effects) + end; +consumer_active_flag_update_function(#?MODULE{cfg = #cfg{consumer_strategy = single_active}}) -> + fun(_, _, _, _, _, Effects) -> + Effects + end. + +handle_waiting_consumer_down(_Pid, + #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State) -> + {[], State}; +handle_waiting_consumer_down(_Pid, + #?MODULE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = []} = State) -> + {[], State}; +handle_waiting_consumer_down(Pid, + #?MODULE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = WaitingConsumers0} = State0) -> + % get cancel effects for down waiting consumers + Down = lists:filter(fun({{_, P}, _}) -> P =:= Pid end, + WaitingConsumers0), + Effects = lists:foldl(fun ({ConsumerId, _}, Effects) -> + cancel_consumer_effects(ConsumerId, State0, + Effects) + end, [], Down), + % update state to have only up waiting consumers + StillUp = lists:filter(fun({{_, P}, _}) -> P =/= Pid end, + WaitingConsumers0), + State = State0#?MODULE{waiting_consumers = StillUp}, + {Effects, State}. + +update_waiting_consumer_status(Node, + #?MODULE{waiting_consumers = WaitingConsumers}, + Status) -> + [begin + case node(Pid) of + Node -> + {ConsumerId, Consumer#consumer{status = Status}}; + _ -> + {ConsumerId, Consumer} + end + end || {{_, Pid} = ConsumerId, Consumer} <- WaitingConsumers, + Consumer#consumer.status =/= cancelled]. + +-spec state_enter(ra_server:ra_state(), state()) -> ra_machine:effects(). +state_enter(leader, #?MODULE{consumers = Cons, + enqueuers = Enqs, + waiting_consumers = WaitingConsumers, + cfg = #cfg{name = Name, + resource = Resource, + become_leader_handler = BLH}, + prefix_msgs = {0, [], 0, []} + }) -> + % return effects to monitor all current consumers and enqueuers + Pids = lists:usort(maps:keys(Enqs) + ++ [P || {_, P} <- maps:keys(Cons)] + ++ [P || {{_, P}, _} <- WaitingConsumers]), + Mons = [{monitor, process, P} || P <- Pids], + Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids], + NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]), + FHReservation = [{mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}], + Effects = Mons ++ Nots ++ NodeMons ++ FHReservation, + case BLH of + undefined -> + Effects; + {Mod, Fun, Args} -> + [{mod_call, Mod, Fun, Args ++ [Name]} | Effects] + end; +state_enter(eol, #?MODULE{enqueuers = Enqs, + consumers = Custs0, + waiting_consumers = WaitingConsumers0}) -> + Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0), + WaitingConsumers1 = lists:foldl(fun({{_, P}, V}, Acc) -> Acc#{P => V} end, + #{}, WaitingConsumers0), + AllConsumers = maps:merge(Custs, WaitingConsumers1), + [{send_msg, P, eol, ra_event} + || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++ + [{aux, eol}, + {mod_call, rabbit_quorum_queue, file_handle_release_reservation, []}]; +state_enter(State, #?MODULE{cfg = #cfg{resource = _Resource}}) when State =/= leader -> + FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}, + [FHReservation]; + state_enter(_, _) -> + %% catch all as not handling all states + []. + + +-spec tick(non_neg_integer(), state()) -> ra_machine:effects(). +tick(Ts, #?MODULE{cfg = #cfg{name = Name, + resource = QName}, + msg_bytes_enqueue = EnqueueBytes, + msg_bytes_checkout = CheckoutBytes} = State) -> + case is_expired(Ts, State) of + true -> + [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]; + false -> + Metrics = {Name, + messages_ready(State), + num_checked_out(State), % checked out + messages_total(State), + query_consumer_count(State), % Consumers + EnqueueBytes, + CheckoutBytes}, + [{mod_call, rabbit_quorum_queue, + handle_tick, [QName, Metrics, all_nodes(State)]}] + end. + +-spec overview(state()) -> map(). +overview(#?MODULE{consumers = Cons, + enqueuers = Enqs, + release_cursors = Cursors, + enqueue_count = EnqCount, + msg_bytes_enqueue = EnqueueBytes, + msg_bytes_checkout = CheckoutBytes, + ra_indexes = Indexes, + cfg = Cfg} = State) -> + Conf = #{name => Cfg#cfg.name, + resource => Cfg#cfg.resource, + release_cursor_interval => Cfg#cfg.release_cursor_interval, + dead_lettering_enabled => undefined =/= Cfg#cfg.dead_letter_handler, + max_length => Cfg#cfg.max_length, + max_bytes => Cfg#cfg.max_bytes, + consumer_strategy => Cfg#cfg.consumer_strategy, + max_in_memory_length => Cfg#cfg.max_in_memory_length, + max_in_memory_bytes => Cfg#cfg.max_in_memory_bytes, + expires => Cfg#cfg.expires, + delivery_limit => Cfg#cfg.delivery_limit + }, + Smallest = rabbit_fifo_index:smallest(Indexes), + #{type => ?MODULE, + config => Conf, + num_consumers => maps:size(Cons), + num_checked_out => num_checked_out(State), + num_enqueuers => maps:size(Enqs), + num_ready_messages => messages_ready(State), + num_messages => messages_total(State), + num_release_cursors => lqueue:len(Cursors), + release_cursors => [I || {_, I, _} <- lqueue:to_list(Cursors)], + release_cursor_enqueue_counter => EnqCount, + enqueue_message_bytes => EnqueueBytes, + checkout_message_bytes => CheckoutBytes, + smallest_raft_index => Smallest}. + +-spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) -> + [delivery_msg()]. +get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) -> + case Consumers of + #{Cid := #consumer{checked_out = Checked}} -> + [{K, snd(snd(maps:get(K, Checked)))} + || K <- lists:seq(From, To), + maps:is_key(K, Checked)]; + _ -> + [] + end. + +-spec version() -> pos_integer(). +version() -> 1. + +which_module(0) -> rabbit_fifo_v0; +which_module(1) -> ?MODULE. + +-record(aux_gc, {last_raft_idx = 0 :: ra:index()}). +-record(aux, {name :: atom(), + capacity :: term(), + gc = #aux_gc{} :: #aux_gc{}}). + +init_aux(Name) when is_atom(Name) -> + %% TODO: catch specific exception throw if table already exists + ok = ra_machine_ets:create_table(rabbit_fifo_usage, + [named_table, set, public, + {write_concurrency, true}]), + Now = erlang:monotonic_time(micro_seconds), + #aux{name = Name, + capacity = {inactive, Now, 1, 1.0}}. + +handle_aux(leader, _, garbage_collection, State, Log, MacState) -> + % ra_log_wal:force_roll_over(ra_log_wal), + {no_reply, force_eval_gc(Log, MacState, State), Log}; +handle_aux(follower, _, garbage_collection, State, Log, MacState) -> + % ra_log_wal:force_roll_over(ra_log_wal), + {no_reply, force_eval_gc(Log, MacState, State), Log}; +handle_aux(_RaState, cast, eval, Aux0, Log, _MacState) -> + {no_reply, Aux0, Log}; +handle_aux(_RaState, cast, Cmd, #aux{capacity = Use0} = Aux0, + Log, _MacState) + when Cmd == active orelse Cmd == inactive -> + {no_reply, Aux0#aux{capacity = update_use(Use0, Cmd)}, Log}; +handle_aux(_RaState, cast, tick, #aux{name = Name, + capacity = Use0} = State0, + Log, MacState) -> + true = ets:insert(rabbit_fifo_usage, + {Name, capacity(Use0)}), + Aux = eval_gc(Log, MacState, State0), + {no_reply, Aux, Log}; +handle_aux(_RaState, cast, eol, #aux{name = Name} = Aux, Log, _) -> + ets:delete(rabbit_fifo_usage, Name), + {no_reply, Aux, Log}; +handle_aux(_RaState, {call, _From}, oldest_entry_timestamp, Aux, + Log, #?MODULE{ra_indexes = Indexes}) -> + Ts = case rabbit_fifo_index:smallest(Indexes) of + %% if there are no entries, we return current timestamp + %% so that any previously obtained entries are considered older than this + undefined -> + erlang:system_time(millisecond); + Idx when is_integer(Idx) -> + {{_, _, {_, Meta, _, _}}, _Log1} = ra_log:fetch(Idx, Log), + #{ts := Timestamp} = Meta, + Timestamp + end, + {reply, {ok, Ts}, Aux, Log}; +handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, + Log0, MacState) -> + case rabbit_fifo:query_peek(Pos, MacState) of + {ok, {Idx, {Header, empty}}} -> + %% need to re-hydrate from the log + {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0), + #enqueue{msg = Msg} = Cmd, + {reply, {ok, {Header, Msg}}, Aux0, Log}; + {ok, {_Idx, {Header, Msg}}} -> + {reply, {ok, {Header, Msg}}, Aux0, Log0}; + Err -> + {reply, Err, Aux0, Log0} + end. + + +eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, + #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> + {Idx, _} = ra_log:last_index_term(Log), + {memory, Mem} = erlang:process_info(self(), memory), + case messages_total(MacState) of + 0 when Idx > LastGcIdx andalso + Mem > ?GC_MEM_LIMIT_B -> + garbage_collect(), + {memory, MemAfter} = erlang:process_info(self(), memory), + rabbit_log:debug("~s: full GC sweep complete. " + "Process memory changed from ~.2fMB to ~.2fMB.", + [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), + AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}}; + _ -> + AuxState + end. + +force_eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}}, + #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> + {Idx, _} = ra_log:last_index_term(Log), + {memory, Mem} = erlang:process_info(self(), memory), + case Idx > LastGcIdx of + true -> + garbage_collect(), + {memory, MemAfter} = erlang:process_info(self(), memory), + rabbit_log:debug("~s: full GC sweep complete. " + "Process memory changed from ~.2fMB to ~.2fMB.", + [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), + AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}}; + false -> + AuxState + end. + +%%% Queries + +query_messages_ready(State) -> + messages_ready(State). + +query_messages_checked_out(#?MODULE{consumers = Consumers}) -> + maps:fold(fun (_, #consumer{checked_out = C}, S) -> + maps:size(C) + S + end, 0, Consumers). + +query_messages_total(State) -> + messages_total(State). + +query_processes(#?MODULE{enqueuers = Enqs, consumers = Cons0}) -> + Cons = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Cons0), + maps:keys(maps:merge(Enqs, Cons)). + + +query_ra_indexes(#?MODULE{ra_indexes = RaIndexes}) -> + RaIndexes. + +query_consumer_count(#?MODULE{consumers = Consumers, + waiting_consumers = WaitingConsumers}) -> + Up = maps:filter(fun(_ConsumerId, #consumer{status = Status}) -> + Status =/= suspected_down + end, Consumers), + maps:size(Up) + length(WaitingConsumers). + +query_consumers(#?MODULE{consumers = Consumers, + waiting_consumers = WaitingConsumers, + cfg = #cfg{consumer_strategy = ConsumerStrategy}} = State) -> + ActiveActivityStatusFun = + case ConsumerStrategy of + competing -> + fun(_ConsumerId, + #consumer{status = Status}) -> + case Status of + suspected_down -> + {false, Status}; + _ -> + {true, Status} + end + end; + single_active -> + SingleActiveConsumer = query_single_active_consumer(State), + fun({Tag, Pid} = _Consumer, _) -> + case SingleActiveConsumer of + {value, {Tag, Pid}} -> + {true, single_active}; + _ -> + {false, waiting} + end + end + end, + FromConsumers = + maps:fold(fun (_, #consumer{status = cancelled}, Acc) -> + Acc; + ({Tag, Pid}, #consumer{meta = Meta} = Consumer, Acc) -> + {Active, ActivityStatus} = + ActiveActivityStatusFun({Tag, Pid}, Consumer), + maps:put({Tag, Pid}, + {Pid, Tag, + maps:get(ack, Meta, undefined), + maps:get(prefetch, Meta, undefined), + Active, + ActivityStatus, + maps:get(args, Meta, []), + maps:get(username, Meta, undefined)}, + Acc) + end, #{}, Consumers), + FromWaitingConsumers = + lists:foldl(fun ({_, #consumer{status = cancelled}}, Acc) -> + Acc; + ({{Tag, Pid}, #consumer{meta = Meta} = Consumer}, Acc) -> + {Active, ActivityStatus} = + ActiveActivityStatusFun({Tag, Pid}, Consumer), + maps:put({Tag, Pid}, + {Pid, Tag, + maps:get(ack, Meta, undefined), + maps:get(prefetch, Meta, undefined), + Active, + ActivityStatus, + maps:get(args, Meta, []), + maps:get(username, Meta, undefined)}, + Acc) + end, #{}, WaitingConsumers), + maps:merge(FromConsumers, FromWaitingConsumers). + + +query_single_active_consumer(#?MODULE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Consumers}) -> + case maps:size(Consumers) of + 0 -> + {error, no_value}; + 1 -> + {value, lists:nth(1, maps:keys(Consumers))}; + _ + -> + {error, illegal_size} + end ; +query_single_active_consumer(_) -> + disabled. + +query_stat(#?MODULE{consumers = Consumers} = State) -> + {messages_ready(State), maps:size(Consumers)}. + +query_in_memory_usage(#?MODULE{msg_bytes_in_memory = Bytes, + msgs_ready_in_memory = Length}) -> + {Length, Bytes}. + +query_peek(Pos, State0) when Pos > 0 -> + case take_next_msg(State0) of + empty -> + {error, no_message_at_pos}; + {{_Seq, IdxMsg}, _State} + when Pos == 1 -> + {ok, IdxMsg}; + {_Msg, State} -> + query_peek(Pos-1, State) + end. + +query_notify_decorators_info(#?MODULE{consumers = Consumers} = State) -> + MaxActivePriority = maps:fold(fun(_, #consumer{credit = C, + status = up, + priority = P0}, MaxP) when C > 0 -> + P = -P0, + case MaxP of + empty -> P; + MaxP when MaxP > P -> MaxP; + _ -> P + end; + (_, _, MaxP) -> + MaxP + end, empty, Consumers), + IsEmpty = (messages_ready(State) == 0), + {MaxActivePriority, IsEmpty}. + +-spec usage(atom()) -> float(). +usage(Name) when is_atom(Name) -> + case ets:lookup(rabbit_fifo_usage, Name) of + [] -> 0.0; + [{_, Use}] -> Use + end. + +%%% Internal + +messages_ready(#?MODULE{messages = M, + prefix_msgs = {RCnt, _R, PCnt, _P}, + returns = R}) -> + %% prefix messages will rarely have anything in them during normal + %% operations so length/1 is fine here + lqueue:len(M) + lqueue:len(R) + RCnt + PCnt. + +messages_total(#?MODULE{ra_indexes = I, + prefix_msgs = {RCnt, _R, PCnt, _P}}) -> + rabbit_fifo_index:size(I) + RCnt + PCnt. + +update_use({inactive, _, _, _} = CUInfo, inactive) -> + CUInfo; +update_use({active, _, _} = CUInfo, active) -> + CUInfo; +update_use({active, Since, Avg}, inactive) -> + Now = erlang:monotonic_time(micro_seconds), + {inactive, Now, Now - Since, Avg}; +update_use({inactive, Since, Active, Avg}, active) -> + Now = erlang:monotonic_time(micro_seconds), + {active, Now, use_avg(Active, Now - Since, Avg)}. + +capacity({active, Since, Avg}) -> + use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg); +capacity({inactive, _, 1, 1.0}) -> + 1.0; +capacity({inactive, Since, Active, Avg}) -> + use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg). + +use_avg(0, 0, Avg) -> + Avg; +use_avg(Active, Inactive, Avg) -> + Time = Inactive + Active, + moving_average(Time, ?USE_AVG_HALF_LIFE, Active / Time, Avg). + +moving_average(_Time, _, Next, undefined) -> + Next; +moving_average(Time, HalfLife, Next, Current) -> + Weight = math:exp(Time * math:log(0.5) / HalfLife), + Next * (1 - Weight) + Current * Weight. + +num_checked_out(#?MODULE{consumers = Cons}) -> + maps:fold(fun (_, #consumer{checked_out = C}, Acc) -> + maps:size(C) + Acc + end, 0, Cons). + +cancel_consumer(Meta, ConsumerId, + #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State, + Effects, Reason) -> + cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); +cancel_consumer(Meta, ConsumerId, + #?MODULE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = []} = State, + Effects, Reason) -> + %% single active consumer on, no consumers are waiting + cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); +cancel_consumer(Meta, ConsumerId, + #?MODULE{consumers = Cons0, + cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = Waiting0} = State0, + Effects0, Reason) -> + %% single active consumer on, consumers are waiting + case maps:is_key(ConsumerId, Cons0) of + true -> + % The active consumer is to be removed + {State1, Effects1} = cancel_consumer0(Meta, ConsumerId, State0, + Effects0, Reason), + activate_next_consumer(State1, Effects1); + false -> + % The cancelled consumer is not active or cancelled + % Just remove it from idle_consumers + Waiting = lists:keydelete(ConsumerId, 1, Waiting0), + Effects = cancel_consumer_effects(ConsumerId, State0, Effects0), + % A waiting consumer isn't supposed to have any checked out messages, + % so nothing special to do here + {State0#?MODULE{waiting_consumers = Waiting}, Effects} + end. + +consumer_update_active_effects(#?MODULE{cfg = #cfg{resource = QName}}, + ConsumerId, #consumer{meta = Meta}, + Active, ActivityStatus, + Effects) -> + Ack = maps:get(ack, Meta, undefined), + Prefetch = maps:get(prefetch, Meta, undefined), + Args = maps:get(args, Meta, []), + [{mod_call, rabbit_quorum_queue, update_consumer_handler, + [QName, ConsumerId, false, Ack, Prefetch, Active, ActivityStatus, Args]} + | Effects]. + +cancel_consumer0(Meta, ConsumerId, + #?MODULE{consumers = C0} = S0, Effects0, Reason) -> + case C0 of + #{ConsumerId := Consumer} -> + {S, Effects2} = maybe_return_all(Meta, ConsumerId, Consumer, + S0, Effects0, Reason), + + %% The effects are emitted before the consumer is actually removed + %% if the consumer has unacked messages. This is a bit weird but + %% in line with what classic queues do (from an external point of + %% view) + Effects = cancel_consumer_effects(ConsumerId, S, Effects2), + + case maps:size(S#?MODULE.consumers) of + 0 -> + {S, [{aux, inactive} | Effects]}; + _ -> + {S, Effects} + end; + _ -> + %% already removed: do nothing + {S0, Effects0} + end. + +activate_next_consumer(#?MODULE{consumers = Cons, + waiting_consumers = Waiting0} = State0, + Effects0) -> + case maps:filter(fun (_, #consumer{status = S}) -> S == up end, Cons) of + Up when map_size(Up) == 0 -> + %% there are no active consumer in the consumer map + case lists:filter(fun ({_, #consumer{status = Status}}) -> + Status == up + end, Waiting0) of + [{NextConsumerId, NextConsumer} | _] -> + %% there is a potential next active consumer + Remaining = lists:keydelete(NextConsumerId, 1, Waiting0), + #?MODULE{service_queue = ServiceQueue} = State0, + ServiceQueue1 = maybe_queue_consumer(NextConsumerId, + NextConsumer, + ServiceQueue), + State = State0#?MODULE{consumers = Cons#{NextConsumerId => NextConsumer}, + service_queue = ServiceQueue1, + waiting_consumers = Remaining}, + Effects = consumer_update_active_effects(State, NextConsumerId, + NextConsumer, true, + single_active, Effects0), + {State, Effects}; + [] -> + {State0, [{aux, inactive} | Effects0]} + end; + _ -> + {State0, Effects0} + end. + + + +maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, Consumer, S0, Effects0, Reason) -> + case Reason of + consumer_cancel -> + {update_or_remove_sub(Meta, ConsumerId, + Consumer#consumer{lifetime = once, + credit = 0, + status = cancelled}, + S0), Effects0}; + down -> + {S1, Effects1} = return_all(Meta, S0, Effects0, ConsumerId, Consumer), + {S1#?MODULE{consumers = maps:remove(ConsumerId, S1#?MODULE.consumers), + last_active = Ts}, + Effects1} + end. + +apply_enqueue(#{index := RaftIdx} = Meta, From, Seq, RawMsg, State0) -> + case maybe_enqueue(RaftIdx, From, Seq, RawMsg, [], State0) of + {ok, State1, Effects1} -> + State2 = append_to_master_index(RaftIdx, State1), + {State, ok, Effects} = checkout(Meta, State0, State2, Effects1, false), + {maybe_store_dehydrated_state(RaftIdx, State), ok, Effects}; + {duplicate, State, Effects} -> + {State, ok, Effects} + end. + +drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) -> + case take_next_msg(State0) of + {FullMsg = {_MsgId, {RaftIdxToDrop, {Header, Msg}}}, + State1} -> + Indexes = rabbit_fifo_index:delete(RaftIdxToDrop, Indexes0), + State2 = add_bytes_drop(Header, State1#?MODULE{ra_indexes = Indexes}), + State = case Msg of + 'empty' -> State2; + _ -> subtract_in_memory_counts(Header, State2) + end, + Effects = dead_letter_effects(maxlen, #{none => FullMsg}, + State, Effects0), + {State, Effects}; + {{'$prefix_msg', Header}, State1} -> + State2 = subtract_in_memory_counts(Header, add_bytes_drop(Header, State1)), + {State2, Effects0}; + {{'$empty_msg', Header}, State1} -> + State2 = add_bytes_drop(Header, State1), + {State2, Effects0}; + empty -> + {State0, Effects0} + end. + +enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages, + next_msg_num = NextMsgNum} = State0) -> + %% the initial header is an integer only - it will get expanded to a map + %% when the next required key is added + Header = message_size(RawMsg), + {State1, Msg} = + case evaluate_memory_limit(Header, State0) of + true -> + % indexed message with header map + {State0, + {RaftIdx, {Header, 'empty'}}}; + false -> + {add_in_memory_counts(Header, State0), + {RaftIdx, {Header, RawMsg}}} % indexed message with header map + end, + State = add_bytes_enqueue(Header, State1), + State#?MODULE{messages = lqueue:in({NextMsgNum, Msg}, Messages), + next_msg_num = NextMsgNum + 1}. + +append_to_master_index(RaftIdx, + #?MODULE{ra_indexes = Indexes0} = State0) -> + State = incr_enqueue_count(State0), + Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), + State#?MODULE{ra_indexes = Indexes}. + + +incr_enqueue_count(#?MODULE{enqueue_count = EC, + cfg = #cfg{release_cursor_interval = {_Base, C}} + } = State0) when EC >= C-> + %% this will trigger a dehydrated version of the state to be stored + %% at this raft index for potential future snapshot generation + %% Q: Why don't we just stash the release cursor here? + %% A: Because it needs to be the very last thing we do and we + %% first needs to run the checkout logic. + State0#?MODULE{enqueue_count = 0}; +incr_enqueue_count(#?MODULE{enqueue_count = C} = State) -> + State#?MODULE{enqueue_count = C + 1}. + +maybe_store_dehydrated_state(RaftIdx, + #?MODULE{cfg = + #cfg{release_cursor_interval = {Base, _}} + = Cfg, + ra_indexes = Indexes, + enqueue_count = 0, + release_cursors = Cursors0} = State0) -> + case rabbit_fifo_index:exists(RaftIdx, Indexes) of + false -> + %% the incoming enqueue must already have been dropped + State0; + true -> + Interval = case Base of + 0 -> 0; + _ -> + Total = messages_total(State0), + min(max(Total, Base), ?RELEASE_CURSOR_EVERY_MAX) + end, + State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval = + {Base, Interval}}}, + Dehydrated = dehydrate_state(State), + Cursor = {release_cursor, RaftIdx, Dehydrated}, + Cursors = lqueue:in(Cursor, Cursors0), + State#?MODULE{release_cursors = Cursors} + end; +maybe_store_dehydrated_state(_RaftIdx, State) -> + State. + +enqueue_pending(From, + #enqueuer{next_seqno = Next, + pending = [{Next, RaftIdx, RawMsg} | Pending]} = Enq0, + State0) -> + State = enqueue(RaftIdx, RawMsg, State0), + Enq = Enq0#enqueuer{next_seqno = Next + 1, pending = Pending}, + enqueue_pending(From, Enq, State); +enqueue_pending(From, Enq, #?MODULE{enqueuers = Enqueuers0} = State) -> + State#?MODULE{enqueuers = Enqueuers0#{From => Enq}}. + +maybe_enqueue(RaftIdx, undefined, undefined, RawMsg, Effects, State0) -> + % direct enqueue without tracking + State = enqueue(RaftIdx, RawMsg, State0), + {ok, State, Effects}; +maybe_enqueue(RaftIdx, From, MsgSeqNo, RawMsg, Effects0, + #?MODULE{enqueuers = Enqueuers0} = State0) -> + case maps:get(From, Enqueuers0, undefined) of + undefined -> + State1 = State0#?MODULE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, + {ok, State, Effects} = maybe_enqueue(RaftIdx, From, MsgSeqNo, + RawMsg, Effects0, State1), + {ok, State, [{monitor, process, From} | Effects]}; + #enqueuer{next_seqno = MsgSeqNo} = Enq0 -> + % it is the next expected seqno + State1 = enqueue(RaftIdx, RawMsg, State0), + Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1}, + State = enqueue_pending(From, Enq, State1), + {ok, State, Effects0}; + #enqueuer{next_seqno = Next, + pending = Pending0} = Enq0 + when MsgSeqNo > Next -> + % out of order delivery + Pending = [{MsgSeqNo, RaftIdx, RawMsg} | Pending0], + Enq = Enq0#enqueuer{pending = lists:sort(Pending)}, + {ok, State0#?MODULE{enqueuers = Enqueuers0#{From => Enq}}, Effects0}; + #enqueuer{next_seqno = Next} when MsgSeqNo =< Next -> + % duplicate delivery - remove the raft index from the ra_indexes + % map as it was added earlier + {duplicate, State0, Effects0} + end. + +snd(T) -> + element(2, T). + +return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, + Effects0, State0) -> + {State1, Effects1} = maps:fold( + fun(MsgId, {Tag, _} = Msg, {S0, E0}) + when Tag == '$prefix_msg'; + Tag == '$empty_msg'-> + return_one(Meta, MsgId, 0, Msg, S0, E0, ConsumerId); + (MsgId, {MsgNum, Msg}, {S0, E0}) -> + return_one(Meta, MsgId, MsgNum, Msg, S0, E0, + ConsumerId) + end, {State0, Effects0}, Returned), + State2 = + case State1#?MODULE.consumers of + #{ConsumerId := Con0} -> + Con = Con0#consumer{credit = increase_credit(Con0, + map_size(Returned))}, + update_or_remove_sub(Meta, ConsumerId, Con, State1); + _ -> + State1 + end, + {State, ok, Effects} = checkout(Meta, State0, State2, Effects1, false), + update_smallest_raft_index(IncomingRaftIdx, State, Effects). + +% used to processes messages that are finished +complete(Meta, ConsumerId, Discarded, + #consumer{checked_out = Checked} = Con0, Effects, + #?MODULE{ra_indexes = Indexes0} = State0) -> + %% TODO optimise use of Discarded map here + MsgRaftIdxs = [RIdx || {_, {RIdx, _}} <- maps:values(Discarded)], + %% credit_mode = simple_prefetch should automatically top-up credit + %% as messages are simple_prefetch or otherwise returned + Con = Con0#consumer{checked_out = maps:without(maps:keys(Discarded), Checked), + credit = increase_credit(Con0, map_size(Discarded))}, + State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), + Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0, + MsgRaftIdxs), + %% TODO: use maps:fold instead + State2 = lists:foldl(fun({_, {_, {Header, _}}}, Acc) -> + add_bytes_settle(Header, Acc); + ({'$prefix_msg', Header}, Acc) -> + add_bytes_settle(Header, Acc); + ({'$empty_msg', Header}, Acc) -> + add_bytes_settle(Header, Acc) + end, State1, maps:values(Discarded)), + {State2#?MODULE{ra_indexes = Indexes}, Effects}. + +increase_credit(#consumer{lifetime = once, + credit = Credit}, _) -> + %% once consumers cannot increment credit + Credit; +increase_credit(#consumer{lifetime = auto, + credit_mode = credited, + credit = Credit}, _) -> + %% credit_mode: credit also doesn't automatically increment credit + Credit; +increase_credit(#consumer{credit = Current}, Credit) -> + Current + Credit. + +complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, + #consumer{checked_out = Checked0} = Con0, + Effects0, State0) -> + Discarded = maps:with(MsgIds, Checked0), + {State1, Effects1} = complete(Meta, ConsumerId, Discarded, Con0, + Effects0, State0), + {State, ok, Effects} = checkout(Meta, State0, State1, Effects1, false), + update_smallest_raft_index(IncomingRaftIdx, State, Effects). + +dead_letter_effects(_Reason, _Discarded, + #?MODULE{cfg = #cfg{dead_letter_handler = undefined}}, + Effects) -> + Effects; +dead_letter_effects(Reason, Discarded, + #?MODULE{cfg = #cfg{dead_letter_handler = {Mod, Fun, Args}}}, + Effects) -> + RaftIdxs = maps:fold( + fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) -> + [RaftIdx | Acc]; + (_, _, Acc) -> + Acc + end, [], Discarded), + [{log, RaftIdxs, + fun (Log) -> + Lookup = maps:from_list(lists:zip(RaftIdxs, Log)), + DeadLetters = maps:fold( + fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) -> + {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), + [{Reason, Msg} | Acc]; + (_, {_, {_, {_Header, Msg}}}, Acc) -> + [{Reason, Msg} | Acc]; + (_, _, Acc) -> + Acc + end, [], Discarded), + [{mod_call, Mod, Fun, Args ++ [DeadLetters]}] + end} | Effects]. + +cancel_consumer_effects(ConsumerId, + #?MODULE{cfg = #cfg{resource = QName}} = State, Effects) -> + [{mod_call, rabbit_quorum_queue, + cancel_consumer_handler, [QName, ConsumerId]}, + notify_decorators_effect(State) | Effects]. + +update_smallest_raft_index(Idx, State, Effects) -> + update_smallest_raft_index(Idx, ok, State, Effects). + +update_smallest_raft_index(IncomingRaftIdx, Reply, + #?MODULE{cfg = Cfg, + ra_indexes = Indexes, + release_cursors = Cursors0} = State0, + Effects) -> + case rabbit_fifo_index:size(Indexes) of + 0 -> + % there are no messages on queue anymore and no pending enqueues + % we can forward release_cursor all the way until + % the last received command, hooray + %% reset the release cursor interval + #cfg{release_cursor_interval = {Base, _}} = Cfg, + RCI = {Base, Base}, + State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCI}, + release_cursors = lqueue:new(), + enqueue_count = 0}, + {State, Reply, Effects ++ [{release_cursor, IncomingRaftIdx, State}]}; + _ -> + Smallest = rabbit_fifo_index:smallest(Indexes), + case find_next_cursor(Smallest, Cursors0) of + {empty, Cursors} -> + {State0#?MODULE{release_cursors = Cursors}, Reply, Effects}; + {Cursor, Cursors} -> + %% we can emit a release cursor when we've passed the smallest + %% release cursor available. + {State0#?MODULE{release_cursors = Cursors}, Reply, + Effects ++ [Cursor]} + end + end. + +find_next_cursor(Idx, Cursors) -> + find_next_cursor(Idx, Cursors, empty). + +find_next_cursor(Smallest, Cursors0, Potential) -> + case lqueue:out(Cursors0) of + {{value, {_, Idx, _} = Cursor}, Cursors} when Idx < Smallest -> + %% we found one but it may not be the largest one + find_next_cursor(Smallest, Cursors, Cursor); + _ -> + {Potential, Cursors0} + end. + +update_header(Key, UpdateFun, Default, Header) + when is_integer(Header) -> + update_header(Key, UpdateFun, Default, #{size => Header}); +update_header(Key, UpdateFun, Default, Header) -> + maps:update_with(Key, UpdateFun, Default, Header). + + +return_one(Meta, MsgId, 0, {Tag, Header0}, + #?MODULE{returns = Returns, + consumers = Consumers, + cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, + Effects0, ConsumerId) + when Tag == '$prefix_msg'; Tag == '$empty_msg' -> + #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers), + Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0), + Msg0 = {Tag, Header}, + case maps:get(delivery_count, Header) of + DeliveryCount when DeliveryCount > DeliveryLimit -> + complete(Meta, ConsumerId, #{MsgId => Msg0}, Con0, Effects0, State0); + _ -> + %% this should not affect the release cursor in any way + Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)}, + {Msg, State1} = case Tag of + '$empty_msg' -> + {Msg0, State0}; + _ -> case evaluate_memory_limit(Header, State0) of + true -> + {{'$empty_msg', Header}, State0}; + false -> + {Msg0, add_in_memory_counts(Header, State0)} + end + end, + {add_bytes_return( + Header, + State1#?MODULE{consumers = Consumers#{ConsumerId => Con}, + returns = lqueue:in(Msg, Returns)}), + Effects0} + end; +return_one(Meta, MsgId, MsgNum, {RaftId, {Header0, RawMsg}}, + #?MODULE{returns = Returns, + consumers = Consumers, + cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, + Effects0, ConsumerId) -> + #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers), + Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0), + Msg0 = {RaftId, {Header, RawMsg}}, + case maps:get(delivery_count, Header) of + DeliveryCount when DeliveryCount > DeliveryLimit -> + DlMsg = {MsgNum, Msg0}, + Effects = dead_letter_effects(delivery_limit, #{none => DlMsg}, + State0, Effects0), + complete(Meta, ConsumerId, #{MsgId => DlMsg}, Con0, Effects, State0); + _ -> + Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)}, + %% this should not affect the release cursor in any way + {Msg, State1} = case RawMsg of + 'empty' -> + {Msg0, State0}; + _ -> + case evaluate_memory_limit(Header, State0) of + true -> + {{RaftId, {Header, 'empty'}}, State0}; + false -> + {Msg0, add_in_memory_counts(Header, State0)} + end + end, + {add_bytes_return( + Header, + State1#?MODULE{consumers = Consumers#{ConsumerId => Con}, + returns = lqueue:in({MsgNum, Msg}, Returns)}), + Effects0} + end. + +return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, + #consumer{checked_out = Checked0} = Con) -> + %% need to sort the list so that we return messages in the order + %% they were checked out + Checked = lists:sort(maps:to_list(Checked0)), + State = State0#?MODULE{consumers = Cons#{ConsumerId => Con}}, + lists:foldl(fun ({MsgId, {'$prefix_msg', _} = Msg}, {S, E}) -> + return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId); + ({MsgId, {'$empty_msg', _} = Msg}, {S, E}) -> + return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId); + ({MsgId, {MsgNum, Msg}}, {S, E}) -> + return_one(Meta, MsgId, MsgNum, Msg, S, E, ConsumerId) + end, {State, Effects0}, Checked). + +%% checkout new messages to consumers +checkout(Meta, OldState, State, Effects) -> + checkout(Meta, OldState, State, Effects, true). + +checkout(#{index := Index} = Meta, #?MODULE{cfg = #cfg{resource = QName}} = OldState, State0, + Effects0, HandleConsumerChanges) -> + {State1, _Result, Effects1} = checkout0(Meta, checkout_one(Meta, State0), + Effects0, #{}), + case evaluate_limit(Index, false, OldState, State1, Effects1) of + {State, true, Effects} -> + case maybe_notify_decorators(State, HandleConsumerChanges) of + {true, {MaxActivePriority, IsEmpty}} -> + NotifyEffect = notify_decorators_effect(QName, MaxActivePriority, IsEmpty), + update_smallest_raft_index(Index, State, [NotifyEffect | Effects]); + false -> + update_smallest_raft_index(Index, State, Effects) + end; + {State, false, Effects} -> + case maybe_notify_decorators(State, HandleConsumerChanges) of + {true, {MaxActivePriority, IsEmpty}} -> + NotifyEffect = notify_decorators_effect(QName, MaxActivePriority, IsEmpty), + {State, ok, [NotifyEffect | Effects]}; + false -> + {State, ok, Effects} + end + end. + +checkout0(Meta, {success, ConsumerId, MsgId, {RaftIdx, {Header, 'empty'}}, State}, + Effects, SendAcc0) -> + DelMsg = {RaftIdx, {MsgId, Header}}, + SendAcc = maps:update_with(ConsumerId, + fun ({InMem, LogMsgs}) -> + {InMem, [DelMsg | LogMsgs]} + end, {[], [DelMsg]}, SendAcc0), + checkout0(Meta, checkout_one(Meta, State), Effects, SendAcc); +checkout0(Meta, {success, ConsumerId, MsgId, Msg, State}, Effects, + SendAcc0) -> + DelMsg = {MsgId, Msg}, + SendAcc = maps:update_with(ConsumerId, + fun ({InMem, LogMsgs}) -> + {[DelMsg | InMem], LogMsgs} + end, {[DelMsg], []}, SendAcc0), + checkout0(Meta, checkout_one(Meta, State), Effects, SendAcc); +checkout0(_Meta, {Activity, State0}, Effects0, SendAcc) -> + Effects1 = case Activity of + nochange -> + append_delivery_effects(Effects0, SendAcc); + inactive -> + [{aux, inactive} + | append_delivery_effects(Effects0, SendAcc)] + end, + {State0, ok, lists:reverse(Effects1)}. + +evaluate_limit(_Index, Result, _BeforeState, + #?MODULE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}} = State, + Effects) -> + {State, Result, Effects}; +evaluate_limit(Index, Result, BeforeState, + #?MODULE{cfg = #cfg{overflow_strategy = Strategy}, + enqueuers = Enqs0} = State0, + Effects0) -> + case is_over_limit(State0) of + true when Strategy == drop_head -> + {State, Effects} = drop_head(State0, Effects0), + evaluate_limit(Index, true, BeforeState, State, Effects); + true when Strategy == reject_publish -> + %% generate send_msg effect for each enqueuer to let them know + %% they need to block + {Enqs, Effects} = + maps:fold( + fun (P, #enqueuer{blocked = undefined} = E0, {Enqs, Acc}) -> + E = E0#enqueuer{blocked = Index}, + {Enqs#{P => E}, + [{send_msg, P, {queue_status, reject_publish}, + [ra_event]} | Acc]}; + (_P, _E, Acc) -> + Acc + end, {Enqs0, Effects0}, Enqs0), + {State0#?MODULE{enqueuers = Enqs}, Result, Effects}; + false when Strategy == reject_publish -> + %% TODO: optimise as this case gets called for every command + %% pretty much + Before = is_below_soft_limit(BeforeState), + case {Before, is_below_soft_limit(State0)} of + {false, true} -> + %% we have moved below the lower limit which + {Enqs, Effects} = + maps:fold( + fun (P, #enqueuer{} = E0, {Enqs, Acc}) -> + E = E0#enqueuer{blocked = undefined}, + {Enqs#{P => E}, + [{send_msg, P, {queue_status, go}, [ra_event]} + | Acc]}; + (_P, _E, Acc) -> + Acc + end, {Enqs0, Effects0}, Enqs0), + {State0#?MODULE{enqueuers = Enqs}, Result, Effects}; + _ -> + {State0, Result, Effects0} + end; + false -> + {State0, Result, Effects0} + end. + +evaluate_memory_limit(_Header, + #?MODULE{cfg = #cfg{max_in_memory_length = undefined, + max_in_memory_bytes = undefined}}) -> + false; +evaluate_memory_limit(#{size := Size}, State) -> + evaluate_memory_limit(Size, State); +evaluate_memory_limit(Size, + #?MODULE{cfg = #cfg{max_in_memory_length = MaxLength, + max_in_memory_bytes = MaxBytes}, + msg_bytes_in_memory = Bytes, + msgs_ready_in_memory = Length}) + when is_integer(Size) -> + (Length >= MaxLength) orelse ((Bytes + Size) > MaxBytes). + +append_delivery_effects(Effects0, AccMap) when map_size(AccMap) == 0 -> + %% does this ever happen? + Effects0; +append_delivery_effects(Effects0, AccMap) -> + [{aux, active} | + maps:fold(fun (C, {InMemMsgs, LogMsgs}, Ef) -> + [delivery_effect(C, lists:reverse(LogMsgs), InMemMsgs) | Ef] + end, Effects0, AccMap)]. + +%% next message is determined as follows: +%% First we check if there are are prefex returns +%% Then we check if there are current returns +%% then we check prefix msgs +%% then we check current messages +%% +%% When we return it is always done to the current return queue +%% for both prefix messages and current messages +take_next_msg(#?MODULE{prefix_msgs = {R, P}} = State) -> + %% conversion + take_next_msg(State#?MODULE{prefix_msgs = {length(R), R, length(P), P}}); +take_next_msg(#?MODULE{prefix_msgs = {NumR, [{'$empty_msg', _} = Msg | Rem], + NumP, P}} = State) -> + %% there are prefix returns, these should be served first + {Msg, State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; +take_next_msg(#?MODULE{prefix_msgs = {NumR, [Header | Rem], NumP, P}} = State) -> + %% there are prefix returns, these should be served first + {{'$prefix_msg', Header}, + State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; +take_next_msg(#?MODULE{returns = Returns, + messages = Messages0, + prefix_msgs = {NumR, R, NumP, P}} = State) -> + %% use peek rather than out there as the most likely case is an empty + %% queue + case lqueue:peek(Returns) of + {value, NextMsg} -> + {NextMsg, + State#?MODULE{returns = lqueue:drop(Returns)}}; + empty when P == [] -> + case lqueue:out(Messages0) of + {empty, _} -> + empty; + {{value, {_, _} = SeqMsg}, Messages} -> + {SeqMsg, State#?MODULE{messages = Messages }} + end; + empty -> + [Msg | Rem] = P, + case Msg of + {Header, 'empty'} -> + %% There are prefix msgs + {{'$empty_msg', Header}, + State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}}; + Header -> + {{'$prefix_msg', Header}, + State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}} + end + end. + +delivery_effect({CTag, CPid}, [], InMemMsgs) -> + {send_msg, CPid, {delivery, CTag, lists:reverse(InMemMsgs)}, + [local, ra_event]}; +delivery_effect({CTag, CPid}, IdxMsgs, InMemMsgs) -> + {RaftIdxs, Data} = lists:unzip(IdxMsgs), + {log, RaftIdxs, + fun(Log) -> + Msgs0 = lists:zipwith(fun ({enqueue, _, _, Msg}, {MsgId, Header}) -> + {MsgId, {Header, Msg}} + end, Log, Data), + Msgs = case InMemMsgs of + [] -> + Msgs0; + _ -> + lists:sort(InMemMsgs ++ Msgs0) + end, + [{send_msg, CPid, {delivery, CTag, Msgs}, [local, ra_event]}] + end, + {local, node(CPid)}}. + +reply_log_effect(RaftIdx, MsgId, Header, Ready, From) -> + {log, [RaftIdx], + fun([{enqueue, _, _, Msg}]) -> + [{reply, From, {wrap_reply, + {dequeue, {MsgId, {Header, Msg}}, Ready}}}] + end}. + +checkout_one(Meta, #?MODULE{service_queue = SQ0, + messages = Messages0, + consumers = Cons0} = InitState) -> + case priority_queue:out(SQ0) of + {{value, ConsumerId}, SQ1} + when is_map_key(ConsumerId, Cons0) -> + case take_next_msg(InitState) of + {ConsumerMsg, State0} -> + %% there are consumers waiting to be serviced + %% process consumer checkout + case maps:get(ConsumerId, Cons0) of + #consumer{credit = 0} -> + %% no credit but was still on queue + %% can happen when draining + %% recurse without consumer on queue + checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + #consumer{status = cancelled} -> + checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + #consumer{status = suspected_down} -> + checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + #consumer{checked_out = Checked0, + next_msg_id = Next, + credit = Credit, + delivery_count = DelCnt} = Con0 -> + Checked = maps:put(Next, ConsumerMsg, Checked0), + Con = Con0#consumer{checked_out = Checked, + next_msg_id = Next + 1, + credit = Credit - 1, + delivery_count = DelCnt + 1}, + State1 = update_or_remove_sub(Meta, + ConsumerId, Con, + State0#?MODULE{service_queue = SQ1}), + {State, Msg} = + case ConsumerMsg of + {'$prefix_msg', Header} -> + {subtract_in_memory_counts( + Header, add_bytes_checkout(Header, State1)), + ConsumerMsg}; + {'$empty_msg', Header} -> + {add_bytes_checkout(Header, State1), + ConsumerMsg}; + {_, {_, {Header, 'empty'}} = M} -> + {add_bytes_checkout(Header, State1), + M}; + {_, {_, {Header, _} = M}} -> + {subtract_in_memory_counts( + Header, + add_bytes_checkout(Header, State1)), + M} + end, + {success, ConsumerId, Next, Msg, State} + end; + empty -> + {nochange, InitState} + end; + {{value, _ConsumerId}, SQ1} -> + %% consumer did not exist but was queued, recurse + checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + {empty, _} -> + case lqueue:len(Messages0) of + 0 -> {nochange, InitState}; + _ -> {inactive, InitState} + end + end. + +update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = auto, + credit = 0} = Con, + #?MODULE{consumers = Cons} = State) -> + State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)}; +update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = auto} = Con, + #?MODULE{consumers = Cons, + service_queue = ServiceQueue} = State) -> + State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons), + service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}; +update_or_remove_sub(#{system_time := Ts}, + ConsumerId, #consumer{lifetime = once, + checked_out = Checked, + credit = 0} = Con, + #?MODULE{consumers = Cons} = State) -> + case maps:size(Checked) of + 0 -> + % we're done with this consumer + State#?MODULE{consumers = maps:remove(ConsumerId, Cons), + last_active = Ts}; + _ -> + % there are unsettled items so need to keep around + State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)} + end; +update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = once} = Con, + #?MODULE{consumers = Cons, + service_queue = ServiceQueue} = State) -> + State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons), + service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}. + +uniq_queue_in(Key, #consumer{priority = P}, Queue) -> + % TODO: queue:member could surely be quite expensive, however the practical + % number of unique consumers may not be large enough for it to matter + case priority_queue:member(Key, Queue) of + true -> + Queue; + false -> + priority_queue:in(Key, P, Queue) + end. + +update_consumer(ConsumerId, Meta, Spec, Priority, + #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State0) -> + %% general case, single active consumer off + update_consumer0(ConsumerId, Meta, Spec, Priority, State0); +update_consumer(ConsumerId, Meta, Spec, Priority, + #?MODULE{consumers = Cons0, + cfg = #cfg{consumer_strategy = single_active}} = State0) + when map_size(Cons0) == 0 -> + %% single active consumer on, no one is consuming yet + update_consumer0(ConsumerId, Meta, Spec, Priority, State0); +update_consumer(ConsumerId, Meta, {Life, Credit, Mode}, Priority, + #?MODULE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = WaitingConsumers0} = State0) -> + %% single active consumer on and one active consumer already + %% adding the new consumer to the waiting list + Consumer = #consumer{lifetime = Life, meta = Meta, + priority = Priority, + credit = Credit, credit_mode = Mode}, + WaitingConsumers1 = WaitingConsumers0 ++ [{ConsumerId, Consumer}], + State0#?MODULE{waiting_consumers = WaitingConsumers1}. + +update_consumer0(ConsumerId, Meta, {Life, Credit, Mode}, Priority, + #?MODULE{consumers = Cons0, + service_queue = ServiceQueue0} = State0) -> + %% TODO: this logic may not be correct for updating a pre-existing consumer + Init = #consumer{lifetime = Life, meta = Meta, + priority = Priority, + credit = Credit, credit_mode = Mode}, + Cons = maps:update_with(ConsumerId, + fun(S) -> + %% remove any in-flight messages from + %% the credit update + N = maps:size(S#consumer.checked_out), + C = max(0, Credit - N), + S#consumer{lifetime = Life, credit = C} + end, Init, Cons0), + ServiceQueue = maybe_queue_consumer(ConsumerId, maps:get(ConsumerId, Cons), + ServiceQueue0), + State0#?MODULE{consumers = Cons, service_queue = ServiceQueue}. + +maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, + ServiceQueue0) -> + case Credit > 0 of + true -> + % consumerect needs service - check if already on service queue + uniq_queue_in(ConsumerId, Con, ServiceQueue0); + false -> + ServiceQueue0 + end. + +%% creates a dehydrated version of the current state to be cached and +%% potentially used to for a snaphot at a later point +dehydrate_state(#?MODULE{messages = Messages, + consumers = Consumers, + returns = Returns, + prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0}, + waiting_consumers = Waiting0} = State) -> + RCnt = lqueue:len(Returns), + %% TODO: optimise this function as far as possible + PrefRet1 = lists:foldr(fun ({'$prefix_msg', Header}, Acc) -> + [Header | Acc]; + ({'$empty_msg', _} = Msg, Acc) -> + [Msg | Acc]; + ({_, {_, {Header, 'empty'}}}, Acc) -> + [{'$empty_msg', Header} | Acc]; + ({_, {_, {Header, _}}}, Acc) -> + [Header | Acc] + end, + [], + lqueue:to_list(Returns)), + PrefRet = PrefRet0 ++ PrefRet1, + PrefMsgsSuff = dehydrate_messages(Messages, []), + %% prefix messages are not populated in normal operation only after + %% recovering from a snapshot + PrefMsgs = PrefMsg0 ++ PrefMsgsSuff, + Waiting = [{Cid, dehydrate_consumer(C)} || {Cid, C} <- Waiting0], + State#?MODULE{messages = lqueue:new(), + ra_indexes = rabbit_fifo_index:empty(), + release_cursors = lqueue:new(), + consumers = maps:map(fun (_, C) -> + dehydrate_consumer(C) + end, Consumers), + returns = lqueue:new(), + prefix_msgs = {PRCnt + RCnt, PrefRet, + PPCnt + lqueue:len(Messages), PrefMsgs}, + waiting_consumers = Waiting}. + +%% TODO make body recursive to avoid allocating lists:reverse call +dehydrate_messages(Msgs0, Acc0) -> + {OutRes, Msgs} = lqueue:out(Msgs0), + case OutRes of + {value, {_MsgId, {_RaftId, {_, 'empty'} = Msg}}} -> + dehydrate_messages(Msgs, [Msg | Acc0]); + {value, {_MsgId, {_RaftId, {Header, _}}}} -> + dehydrate_messages(Msgs, [Header | Acc0]); + empty -> + lists:reverse(Acc0) + end. + +dehydrate_consumer(#consumer{checked_out = Checked0} = Con) -> + Checked = maps:map(fun (_, {'$prefix_msg', _} = M) -> + M; + (_, {'$empty_msg', _} = M) -> + M; + (_, {_, {_, {Header, 'empty'}}}) -> + {'$empty_msg', Header}; + (_, {_, {_, {Header, _}}}) -> + {'$prefix_msg', Header} + end, Checked0), + Con#consumer{checked_out = Checked}. + +%% make the state suitable for equality comparison +normalize(#?MODULE{messages = Messages, + release_cursors = Cursors} = State) -> + State#?MODULE{messages = lqueue:from_list(lqueue:to_list(Messages)), + release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}. + +is_over_limit(#?MODULE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}}) -> + false; +is_over_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, + max_bytes = MaxBytes}, + msg_bytes_enqueue = BytesEnq} = State) -> + messages_ready(State) > MaxLength orelse (BytesEnq > MaxBytes). + +is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}}) -> + false; +is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, + max_bytes = MaxBytes}, + msg_bytes_enqueue = BytesEnq} = State) -> + is_below(MaxLength, messages_ready(State)) andalso + is_below(MaxBytes, BytesEnq). + +is_below(undefined, _Num) -> + true; +is_below(Val, Num) when is_integer(Val) andalso is_integer(Num) -> + Num =< trunc(Val * ?LOW_LIMIT). + +-spec make_enqueue(option(pid()), option(msg_seqno()), raw_msg()) -> protocol(). +make_enqueue(Pid, Seq, Msg) -> + #enqueue{pid = Pid, seq = Seq, msg = Msg}. + +-spec make_register_enqueuer(pid()) -> protocol(). +make_register_enqueuer(Pid) -> + #register_enqueuer{pid = Pid}. + +-spec make_checkout(consumer_id(), + checkout_spec(), consumer_meta()) -> protocol(). +make_checkout(ConsumerId, Spec, Meta) -> + #checkout{consumer_id = ConsumerId, + spec = Spec, meta = Meta}. + +-spec make_settle(consumer_id(), [msg_id()]) -> protocol(). +make_settle(ConsumerId, MsgIds) when is_list(MsgIds) -> + #settle{consumer_id = ConsumerId, msg_ids = MsgIds}. + +-spec make_return(consumer_id(), [msg_id()]) -> protocol(). +make_return(ConsumerId, MsgIds) -> + #return{consumer_id = ConsumerId, msg_ids = MsgIds}. + +-spec make_discard(consumer_id(), [msg_id()]) -> protocol(). +make_discard(ConsumerId, MsgIds) -> + #discard{consumer_id = ConsumerId, msg_ids = MsgIds}. + +-spec make_credit(consumer_id(), non_neg_integer(), non_neg_integer(), + boolean()) -> protocol(). +make_credit(ConsumerId, Credit, DeliveryCount, Drain) -> + #credit{consumer_id = ConsumerId, + credit = Credit, + delivery_count = DeliveryCount, + drain = Drain}. + +-spec make_purge() -> protocol(). +make_purge() -> #purge{}. + +-spec make_garbage_collection() -> protocol(). +make_garbage_collection() -> #garbage_collection{}. + +-spec make_purge_nodes([node()]) -> protocol(). +make_purge_nodes(Nodes) -> + #purge_nodes{nodes = Nodes}. + +-spec make_update_config(config()) -> protocol(). +make_update_config(Config) -> + #update_config{config = Config}. + +add_bytes_enqueue(Bytes, + #?MODULE{msg_bytes_enqueue = Enqueue} = State) + when is_integer(Bytes) -> + State#?MODULE{msg_bytes_enqueue = Enqueue + Bytes}; +add_bytes_enqueue(#{size := Bytes}, State) -> + add_bytes_enqueue(Bytes, State). + +add_bytes_drop(Bytes, + #?MODULE{msg_bytes_enqueue = Enqueue} = State) + when is_integer(Bytes) -> + State#?MODULE{msg_bytes_enqueue = Enqueue - Bytes}; +add_bytes_drop(#{size := Bytes}, State) -> + add_bytes_drop(Bytes, State). + +add_bytes_checkout(Bytes, + #?MODULE{msg_bytes_checkout = Checkout, + msg_bytes_enqueue = Enqueue } = State) + when is_integer(Bytes) -> + State#?MODULE{msg_bytes_checkout = Checkout + Bytes, + msg_bytes_enqueue = Enqueue - Bytes}; +add_bytes_checkout(#{size := Bytes}, State) -> + add_bytes_checkout(Bytes, State). + +add_bytes_settle(Bytes, + #?MODULE{msg_bytes_checkout = Checkout} = State) + when is_integer(Bytes) -> + State#?MODULE{msg_bytes_checkout = Checkout - Bytes}; +add_bytes_settle(#{size := Bytes}, State) -> + add_bytes_settle(Bytes, State). + +add_bytes_return(Bytes, + #?MODULE{msg_bytes_checkout = Checkout, + msg_bytes_enqueue = Enqueue} = State) + when is_integer(Bytes) -> + State#?MODULE{msg_bytes_checkout = Checkout - Bytes, + msg_bytes_enqueue = Enqueue + Bytes}; +add_bytes_return(#{size := Bytes}, State) -> + add_bytes_return(Bytes, State). + +add_in_memory_counts(Bytes, + #?MODULE{msg_bytes_in_memory = InMemoryBytes, + msgs_ready_in_memory = InMemoryCount} = State) + when is_integer(Bytes) -> + State#?MODULE{msg_bytes_in_memory = InMemoryBytes + Bytes, + msgs_ready_in_memory = InMemoryCount + 1}; +add_in_memory_counts(#{size := Bytes}, State) -> + add_in_memory_counts(Bytes, State). + +subtract_in_memory_counts(Bytes, + #?MODULE{msg_bytes_in_memory = InMemoryBytes, + msgs_ready_in_memory = InMemoryCount} = State) + when is_integer(Bytes) -> + State#?MODULE{msg_bytes_in_memory = InMemoryBytes - Bytes, + msgs_ready_in_memory = InMemoryCount - 1}; +subtract_in_memory_counts(#{size := Bytes}, State) -> + subtract_in_memory_counts(Bytes, State). + +message_size(#basic_message{content = Content}) -> + #content{payload_fragments_rev = PFR} = Content, + iolist_size(PFR); +message_size({'$prefix_msg', H}) -> + get_size_from_header(H); +message_size({'$empty_msg', H}) -> + get_size_from_header(H); +message_size(B) when is_binary(B) -> + byte_size(B); +message_size(Msg) -> + %% probably only hit this for testing so ok to use erts_debug + erts_debug:size(Msg). + +get_size_from_header(Size) when is_integer(Size) -> + Size; +get_size_from_header(#{size := B}) -> + B. + + +all_nodes(#?MODULE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Nodes0 = maps:fold(fun({_, P}, _, Acc) -> + Acc#{node(P) => ok} + end, #{}, Cons0), + Nodes1 = maps:fold(fun(P, _, Acc) -> + Acc#{node(P) => ok} + end, Nodes0, Enqs0), + maps:keys( + lists:foldl(fun({{_, P}, _}, Acc) -> + Acc#{node(P) => ok} + end, Nodes1, WaitingConsumers0)). + +all_pids_for(Node, #?MODULE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Cons = maps:fold(fun({_, P}, _, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, [], Cons0), + Enqs = maps:fold(fun(P, _, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, Cons, Enqs0), + lists:foldl(fun({{_, P}, _}, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, Acc) -> Acc + end, Enqs, WaitingConsumers0). + +suspected_pids_for(Node, #?MODULE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Cons = maps:fold(fun({_, P}, #consumer{status = suspected_down}, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, [], Cons0), + Enqs = maps:fold(fun(P, #enqueuer{status = suspected_down}, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, Cons, Enqs0), + lists:foldl(fun({{_, P}, + #consumer{status = suspected_down}}, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, Acc) -> Acc + end, Enqs, WaitingConsumers0). + +is_expired(Ts, #?MODULE{cfg = #cfg{expires = Expires}, + last_active = LastActive, + consumers = Consumers}) + when is_number(LastActive) andalso is_number(Expires) -> + %% TODO: should it be active consumers? + Active = maps:filter(fun (_, #consumer{status = suspected_down}) -> + false; + (_, _) -> + true + end, Consumers), + + Ts > (LastActive + Expires) andalso maps:size(Active) == 0; +is_expired(_Ts, _State) -> + false. + +get_priority_from_args(#{args := Args}) -> + case rabbit_misc:table_lookup(Args, <<"x-priority">>) of + {_Key, Value} -> + Value; + _ -> 0 + end; +get_priority_from_args(_) -> + 0. + +maybe_notify_decorators(_, false) -> + false; +maybe_notify_decorators(State, _) -> + {true, query_notify_decorators_info(State)}. + +notify_decorators_effect(#?MODULE{cfg = #cfg{resource = QName}} = State) -> + {MaxActivePriority, IsEmpty} = query_notify_decorators_info(State), + notify_decorators_effect(QName, MaxActivePriority, IsEmpty). + +notify_decorators_effect(QName, MaxActivePriority, IsEmpty) -> + {mod_call, rabbit_quorum_queue, spawn_notify_decorators, + [QName, consumer_state_changed, [MaxActivePriority, IsEmpty]]}. diff --git a/deps/rabbit/src/rabbit_fifo_v1.hrl b/deps/rabbit/src/rabbit_fifo_v1.hrl new file mode 100644 index 000000000000..3df988344596 --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_v1.hrl @@ -0,0 +1,210 @@ + +-type option(T) :: undefined | T. + +-type raw_msg() :: term(). +%% The raw message. It is opaque to rabbit_fifo. + +-type msg_in_id() :: non_neg_integer(). +% a queue scoped monotonically incrementing integer used to enforce order +% in the unassigned messages map + +-type msg_id() :: non_neg_integer(). +%% A consumer-scoped monotonically incrementing integer included with a +%% {@link delivery/0.}. Used to settle deliveries using +%% {@link rabbit_fifo_client:settle/3.} + +-type msg_seqno() :: non_neg_integer(). +%% A sender process scoped monotonically incrementing integer included +%% in enqueue messages. Used to ensure ordering of messages send from the +%% same process + +-type msg_header() :: msg_size() | + #{size := msg_size(), + delivery_count => non_neg_integer()}. +%% The message header: +%% delivery_count: the number of unsuccessful delivery attempts. +%% A non-zero value indicates a previous attempt. +%% If it only contains the size it can be condensed to an integer only + +-type msg() :: {msg_header(), raw_msg()}. +%% message with a header map. + +-type msg_size() :: non_neg_integer(). +%% the size in bytes of the msg payload + +-type indexed_msg() :: {ra:index(), msg()}. + +-type prefix_msg() :: {'$prefix_msg', msg_header()}. + +-type delivery_msg() :: {msg_id(), msg()}. +%% A tuple consisting of the message id and the headered message. + +-type consumer_tag() :: binary(). +%% An arbitrary binary tag used to distinguish between different consumers +%% set up by the same process. See: {@link rabbit_fifo_client:checkout/3.} + +-type delivery() :: {delivery, consumer_tag(), [delivery_msg()]}. +%% Represents the delivery of one or more rabbit_fifo messages. + +-type consumer_id() :: {consumer_tag(), pid()}. +%% The entity that receives messages. Uniquely identifies a consumer. + +-type credit_mode() :: simple_prefetch | credited. +%% determines how credit is replenished + +-type checkout_spec() :: {once | auto, Num :: non_neg_integer(), + credit_mode()} | + {dequeue, settled | unsettled} | + cancel. + +-type consumer_meta() :: #{ack => boolean(), + username => binary(), + prefetch => non_neg_integer(), + args => list()}. +%% static meta data associated with a consumer + + +-type applied_mfa() :: {module(), atom(), list()}. +% represents a partially applied module call + +-define(RELEASE_CURSOR_EVERY, 2048). +-define(RELEASE_CURSOR_EVERY_MAX, 3200000). +-define(USE_AVG_HALF_LIFE, 10000.0). +%% an average QQ without any message uses about 100KB so setting this limit +%% to ~10 times that should be relatively safe. +-define(GC_MEM_LIMIT_B, 2000000). + +-define(MB, 1048576). +-define(LOW_LIMIT, 0.8). + +-record(consumer, + {meta = #{} :: consumer_meta(), + checked_out = #{} :: #{msg_id() => {msg_in_id(), indexed_msg()}}, + next_msg_id = 0 :: msg_id(), % part of snapshot data + %% max number of messages that can be sent + %% decremented for each delivery + credit = 0 : non_neg_integer(), + %% total number of checked out messages - ever + %% incremented for each delivery + delivery_count = 0 :: non_neg_integer(), + %% the mode of how credit is incremented + %% simple_prefetch: credit is re-filled as deliveries are settled + %% or returned. + %% credited: credit can only be changed by receiving a consumer_credit + %% command: `{consumer_credit, ReceiverDeliveryCount, Credit}' + credit_mode = simple_prefetch :: credit_mode(), % part of snapshot data + lifetime = once :: once | auto, + status = up :: up | suspected_down | cancelled, + priority = 0 :: non_neg_integer() + }). + +-type consumer() :: #consumer{}. + +-type consumer_strategy() :: competing | single_active. + +-type milliseconds() :: non_neg_integer(). + +-record(enqueuer, + {next_seqno = 1 :: msg_seqno(), + % out of order enqueues - sorted list + pending = [] :: [{msg_seqno(), ra:index(), raw_msg()}], + status = up :: up | + suspected_down, + %% it is useful to have a record of when this was blocked + %% so that we can retry sending the block effect if + %% the publisher did not receive the initial one + blocked :: undefined | ra:index(), + unused_1, + unused_2 + }). + +-record(cfg, + {name :: atom(), + resource :: rabbit_types:r('queue'), + release_cursor_interval :: option({non_neg_integer(), non_neg_integer()}), + dead_letter_handler :: option(applied_mfa()), + become_leader_handler :: option(applied_mfa()), + overflow_strategy = drop_head :: drop_head | reject_publish, + max_length :: option(non_neg_integer()), + max_bytes :: option(non_neg_integer()), + %% whether single active consumer is on or not for this queue + consumer_strategy = competing :: consumer_strategy(), + %% the maximum number of unsuccessful delivery attempts permitted + delivery_limit :: option(non_neg_integer()), + max_in_memory_length :: option(non_neg_integer()), + max_in_memory_bytes :: option(non_neg_integer()), + expires :: undefined | milliseconds(), + unused_1, + unused_2 + }). + +-type prefix_msgs() :: {list(), list()} | + {non_neg_integer(), list(), + non_neg_integer(), list()}. + +-record(rabbit_fifo_v1, + {cfg :: #cfg{}, + % unassigned messages + messages = lqueue:new() :: lqueue:lqueue({msg_in_id(), indexed_msg()}), + % defines the next message id + next_msg_num = 1 :: msg_in_id(), + % queue of returned msg_in_ids - when checking out it picks from + returns = lqueue:new() :: lqueue:lqueue(prefix_msg() | + {msg_in_id(), indexed_msg()}), + % a counter of enqueues - used to trigger shadow copy points + enqueue_count = 0 :: non_neg_integer(), + % a map containing all the live processes that have ever enqueued + % a message to this queue as well as a cached value of the smallest + % ra_index of all pending enqueues + enqueuers = #{} :: #{pid() => #enqueuer{}}, + % master index of all enqueue raft indexes including pending + % enqueues + % rabbit_fifo_index can be slow when calculating the smallest + % index when there are large gaps but should be faster than gb_trees + % for normal appending operations as it's backed by a map + ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), + release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor, + ra:index(), #rabbit_fifo_v1{}}), + % consumers need to reflect consumer state at time of snapshot + % needs to be part of snapshot + consumers = #{} :: #{consumer_id() => #consumer{}}, + % consumers that require further service are queued here + % needs to be part of snapshot + service_queue = priority_queue:new() :: priority_queue:q(), + %% This is a special field that is only used for snapshots + %% It represents the queued messages at the time the + %% dehydrated snapshot state was cached. + %% As release_cursors are only emitted for raft indexes where all + %% prior messages no longer contribute to the current state we can + %% replace all message payloads with their sizes (to be used for + %% overflow calculations). + %% This is done so that consumers are still served in a deterministic + %% order on recovery. + prefix_msgs = {0, [], 0, []} :: prefix_msgs(), + msg_bytes_enqueue = 0 :: non_neg_integer(), + msg_bytes_checkout = 0 :: non_neg_integer(), + %% waiting consumers, one is picked active consumer is cancelled or dies + %% used only when single active consumer is on + waiting_consumers = [] :: [{consumer_id(), consumer()}], + msg_bytes_in_memory = 0 :: non_neg_integer(), + msgs_ready_in_memory = 0 :: non_neg_integer(), + last_active :: undefined | non_neg_integer(), + unused_1, + unused_2 + }). + +-type config() :: #{name := atom(), + queue_resource := rabbit_types:r('queue'), + dead_letter_handler => applied_mfa(), + become_leader_handler => applied_mfa(), + release_cursor_interval => non_neg_integer(), + max_length => non_neg_integer(), + max_bytes => non_neg_integer(), + max_in_memory_length => non_neg_integer(), + max_in_memory_bytes => non_neg_integer(), + overflow_strategy => drop_head | reject_publish, + single_active_consumer_on => boolean(), + delivery_limit => non_neg_integer(), + expires => non_neg_integer(), + created => non_neg_integer() + }. From 9bab3c34d12cff553b349ceac3ceb2fbdab0dd17 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Fri, 18 Jun 2021 17:09:21 +0100 Subject: [PATCH 02/97] rabbit_fifo: code readability make the various nested tuple message formats clearer by introducing and using a set of macros with better descriptive names. --- deps/rabbit/src/rabbit_fifo.erl | 105 +++++++++++++++++--------------- 1 file changed, 55 insertions(+), 50 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index e434c8e86dce..3968decfa29a 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -123,6 +123,12 @@ state/0, config/0]). +-define(MSG(Header, RawMsg), {Header, RawMsg}). +-define(DISK_MSG(Header), {Header, empty}). +-define(INDEX_MSG(Index, Msg), {Index, Msg}). +-define(PREFIX_DISK_MSG(Header), {'$empty_msg', Header}). +-define(PREFIX_MEM_MSG(Header), {'$prefix_msg', Header}). + -spec init(config()) -> state(). init(#{name := Name, queue_resource := Resource} = Conf) -> @@ -1190,21 +1196,21 @@ apply_enqueue(#{index := RaftIdx} = Meta, From, Seq, RawMsg, State0) -> drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) -> case take_next_msg(State0) of - {FullMsg = {_MsgId, {RaftIdxToDrop, {Header, Msg}}}, + {FullMsg = {_MsgId, ?INDEX_MSG(RaftIdxToDrop, ?MSG(Header, _) = Msg)}, State1} -> Indexes = rabbit_fifo_index:delete(RaftIdxToDrop, Indexes0), State2 = add_bytes_drop(Header, State1#?MODULE{ra_indexes = Indexes}), State = case Msg of - 'empty' -> State2; + ?DISK_MSG(_) -> State2; _ -> subtract_in_memory_counts(Header, State2) end, Effects = dead_letter_effects(maxlen, #{none => FullMsg}, State, Effects0), {State, Effects}; - {{'$prefix_msg', Header}, State1} -> + {?PREFIX_MEM_MSG(Header), State1} -> State2 = subtract_in_memory_counts(Header, add_bytes_drop(Header, State1)), {State2, Effects0}; - {{'$empty_msg', Header}, State1} -> + {?PREFIX_DISK_MSG(Header), State1} -> State2 = add_bytes_drop(Header, State1), {State2, Effects0}; empty -> @@ -1221,12 +1227,13 @@ enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages, true -> % indexed message with header map {State0, - {RaftIdx, {Header, 'empty'}}}; + ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header))}; false -> {add_in_memory_counts(Header, State0), - {RaftIdx, {Header, RawMsg}}} % indexed message with header map + ?INDEX_MSG(RaftIdx, ?MSG(Header, RawMsg))} end, State = add_bytes_enqueue(Header, State1), + %% TODO: msg num isn't needed State#?MODULE{messages = lqueue:in({NextMsgNum, Msg}, Messages), next_msg_num = NextMsgNum + 1}. @@ -1324,9 +1331,9 @@ snd(T) -> return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, Effects0, State0) -> {State1, Effects1} = maps:fold( - fun(MsgId, {Tag, _} = Msg, {S0, E0}) - when Tag == '$prefix_msg'; - Tag == '$empty_msg'-> + fun(MsgId, ?PREFIX_MEM_MSG(_) = Msg, {S0, E0}) -> + return_one(Meta, MsgId, 0, Msg, S0, E0, ConsumerId); + (MsgId, ?PREFIX_DISK_MSG(_) = Msg, {S0, E0}) -> return_one(Meta, MsgId, 0, Msg, S0, E0, ConsumerId); (MsgId, {MsgNum, Msg}, {S0, E0}) -> return_one(Meta, MsgId, MsgNum, Msg, S0, E0, @@ -1349,7 +1356,7 @@ complete(Meta, ConsumerId, Discarded, #consumer{checked_out = Checked} = Con0, Effects, #?MODULE{ra_indexes = Indexes0} = State0) -> %% TODO optimise use of Discarded map here - MsgRaftIdxs = [RIdx || {_, {RIdx, _}} <- maps:values(Discarded)], + MsgRaftIdxs = [RIdx || {_, ?INDEX_MSG(RIdx, _)} <- maps:values(Discarded)], %% credit_mode = simple_prefetch should automatically top-up credit %% as messages are simple_prefetch or otherwise returned Con = Con0#consumer{checked_out = maps:without(maps:keys(Discarded), Checked), @@ -1358,11 +1365,11 @@ complete(Meta, ConsumerId, Discarded, Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0, MsgRaftIdxs), %% TODO: use maps:fold instead - State2 = lists:foldl(fun({_, {_, {Header, _}}}, Acc) -> + State2 = lists:foldl(fun({_, ?INDEX_MSG(_, ?MSG(Header, _))}, Acc) -> add_bytes_settle(Header, Acc); - ({'$prefix_msg', Header}, Acc) -> + (?PREFIX_MEM_MSG(Header), Acc) -> add_bytes_settle(Header, Acc); - ({'$empty_msg', Header}, Acc) -> + (?PREFIX_DISK_MSG(Header), Acc) -> add_bytes_settle(Header, Acc) end, State1, maps:values(Discarded)), {State2#?MODULE{ra_indexes = Indexes}, Effects}. @@ -1494,7 +1501,7 @@ return_one(Meta, MsgId, 0, {Tag, Header0}, {Msg0, State0}; _ -> case evaluate_memory_limit(Header, State0) of true -> - {{'$empty_msg', Header}, State0}; + {?PREFIX_DISK_MSG(Header), State0}; false -> {Msg0, add_in_memory_counts(Header, State0)} end @@ -1505,17 +1512,17 @@ return_one(Meta, MsgId, 0, {Tag, Header0}, returns = lqueue:in(Msg, Returns)}), Effects0} end; -return_one(Meta, MsgId, MsgNum, {RaftId, {Header0, RawMsg}}, +return_one(Meta, MsgId, MsgNum, ?INDEX_MSG(RaftId, ?MSG(Header0, RawMsg)), #?MODULE{returns = Returns, consumers = Consumers, cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, Effects0, ConsumerId) -> #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers), Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0), - Msg0 = {RaftId, {Header, RawMsg}}, + IdxMsg0 = ?INDEX_MSG(RaftId, ?MSG(Header, RawMsg)), case maps:get(delivery_count, Header) of DeliveryCount when DeliveryCount > DeliveryLimit -> - DlMsg = {MsgNum, Msg0}, + DlMsg = {MsgNum, IdxMsg0}, Effects = dead_letter_effects(delivery_limit, #{none => DlMsg}, State0, Effects0), complete(Meta, ConsumerId, #{MsgId => DlMsg}, Con0, Effects, State0); @@ -1524,13 +1531,13 @@ return_one(Meta, MsgId, MsgNum, {RaftId, {Header0, RawMsg}}, %% this should not affect the release cursor in any way {Msg, State1} = case RawMsg of 'empty' -> - {Msg0, State0}; + {IdxMsg0, State0}; _ -> case evaluate_memory_limit(Header, State0) of true -> - {{RaftId, {Header, 'empty'}}, State0}; + {?INDEX_MSG(RaftId, ?DISK_MSG(Header)), State0}; false -> - {Msg0, add_in_memory_counts(Header, State0)} + {IdxMsg0, add_in_memory_counts(Header, State0)} end end, {add_bytes_return( @@ -1546,9 +1553,9 @@ return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, %% they were checked out Checked = lists:sort(maps:to_list(Checked0)), State = State0#?MODULE{consumers = Cons#{ConsumerId => Con}}, - lists:foldl(fun ({MsgId, {'$prefix_msg', _} = Msg}, {S, E}) -> + lists:foldl(fun ({MsgId, ?PREFIX_MEM_MSG(_) = Msg}, {S, E}) -> return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId); - ({MsgId, {'$empty_msg', _} = Msg}, {S, E}) -> + ({MsgId, ?PREFIX_DISK_MSG(_) = Msg}, {S, E}) -> return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId); ({MsgId, {MsgNum, Msg}}, {S, E}) -> return_one(Meta, MsgId, MsgNum, Msg, S, E, ConsumerId) @@ -1693,13 +1700,13 @@ append_delivery_effects(Effects0, AccMap) -> take_next_msg(#?MODULE{prefix_msgs = {R, P}} = State) -> %% conversion take_next_msg(State#?MODULE{prefix_msgs = {length(R), R, length(P), P}}); -take_next_msg(#?MODULE{prefix_msgs = {NumR, [{'$empty_msg', _} = Msg | Rem], +take_next_msg(#?MODULE{prefix_msgs = {NumR, [?PREFIX_DISK_MSG(_) = Msg | Rem], NumP, P}} = State) -> %% there are prefix returns, these should be served first {Msg, State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; take_next_msg(#?MODULE{prefix_msgs = {NumR, [Header | Rem], NumP, P}} = State) -> %% there are prefix returns, these should be served first - {{'$prefix_msg', Header}, + {?PREFIX_MEM_MSG(Header), State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; take_next_msg(#?MODULE{returns = Returns, messages = Messages0, @@ -1712,14 +1719,14 @@ take_next_msg(#?MODULE{returns = Returns, {empty, _} -> empty; {{value, {_, _} = SeqMsg}, Messages} -> - {SeqMsg, State#?MODULE{messages = Messages }} + {SeqMsg, State#?MODULE{messages = Messages}} end; empty -> [Msg | Rem] = P, case Msg of - {Header, 'empty'} -> + ?DISK_MSG(Header) -> %% There are prefix msgs - {{'$empty_msg', Header}, + {?PREFIX_DISK_MSG(Header), State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}}; Header -> {{'$prefix_msg', Header}, @@ -1791,11 +1798,11 @@ checkout_one(Meta, #?MODULE{service_queue = SQ0, State0#?MODULE{service_queue = SQ1}), {State, Msg} = case ConsumerMsg of - {'$prefix_msg', Header} -> + ?PREFIX_MEM_MSG(Header) -> {subtract_in_memory_counts( Header, add_bytes_checkout(Header, State1)), ConsumerMsg}; - {'$empty_msg', Header} -> + ?PREFIX_DISK_MSG(Header) -> {add_bytes_checkout(Header, State1), ConsumerMsg}; {_, {_, {Header, 'empty'}} = M} -> @@ -1920,17 +1927,15 @@ dehydrate_state(#?MODULE{messages = Messages, waiting_consumers = Waiting0} = State) -> RCnt = lqueue:len(Returns), %% TODO: optimise this function as far as possible - PrefRet1 = lists:foldr(fun ({'$prefix_msg', Header}, Acc) -> - [Header | Acc]; - ({'$empty_msg', _} = Msg, Acc) -> - [Msg | Acc]; - ({_, {_, {Header, 'empty'}}}, Acc) -> - [{'$empty_msg', Header} | Acc]; - ({_, {_, {Header, _}}}, Acc) -> - [Header | Acc] - end, - [], - lqueue:to_list(Returns)), + PrefRet1 = lists:foldr(fun (?PREFIX_MEM_MSG(Header), Acc) -> + [Header | Acc]; + (?PREFIX_DISK_MSG(_) = Msg, Acc) -> + [Msg | Acc]; + ({_, ?INDEX_MSG(_, ?DISK_MSG(Header))}, Acc) -> + [?PREFIX_DISK_MSG(Header) | Acc]; + ({_, ?INDEX_MSG(_, ?MSG(Header, _))}, Acc) -> + [Header | Acc] + end, [], lqueue:to_list(Returns)), PrefRet = PrefRet0 ++ PrefRet1, PrefMsgsSuff = dehydrate_messages(Messages, []), %% prefix messages are not populated in normal operation only after @@ -1952,23 +1957,23 @@ dehydrate_state(#?MODULE{messages = Messages, dehydrate_messages(Msgs0, Acc0) -> {OutRes, Msgs} = lqueue:out(Msgs0), case OutRes of - {value, {_MsgId, {_RaftId, {_, 'empty'} = Msg}}} -> + {value, {_MsgId, ?INDEX_MSG(_, ?DISK_MSG(_) = Msg)}} -> dehydrate_messages(Msgs, [Msg | Acc0]); - {value, {_MsgId, {_RaftId, {Header, _}}}} -> + {value, {_MsgId, ?INDEX_MSG(_, ?MSG(Header, _))}} -> dehydrate_messages(Msgs, [Header | Acc0]); empty -> lists:reverse(Acc0) end. dehydrate_consumer(#consumer{checked_out = Checked0} = Con) -> - Checked = maps:map(fun (_, {'$prefix_msg', _} = M) -> + Checked = maps:map(fun (_, ?PREFIX_MEM_MSG(_) = M) -> M; - (_, {'$empty_msg', _} = M) -> + (_, ?PREFIX_DISK_MSG(_) = M) -> M; - (_, {_, {_, {Header, 'empty'}}}) -> - {'$empty_msg', Header}; - (_, {_, {_, {Header, _}}}) -> - {'$prefix_msg', Header} + (_, {_, ?INDEX_MSG(_, ?DISK_MSG(Header))}) -> + ?PREFIX_DISK_MSG(Header); + (_, {_, ?INDEX_MSG(_, ?MSG(Header, _))}) -> + ?PREFIX_MEM_MSG(Header) end, Checked0), Con#consumer{checked_out = Checked}. @@ -2108,9 +2113,9 @@ subtract_in_memory_counts(#{size := Bytes}, State) -> message_size(#basic_message{content = Content}) -> #content{payload_fragments_rev = PFR} = Content, iolist_size(PFR); -message_size({'$prefix_msg', H}) -> +message_size(?PREFIX_MEM_MSG(H)) -> get_size_from_header(H); -message_size({'$empty_msg', H}) -> +message_size(?PREFIX_DISK_MSG(H)) -> get_size_from_header(H); message_size(B) when is_binary(B) -> byte_size(B); From 7c92a535095da7220db3641a9e3db8ca945c6d76 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 21 Jun 2021 09:29:58 +0100 Subject: [PATCH 03/97] Replace nested tuples with improper lists to save memory. Also remove tuple that is not needed any more. total per msg memory saving: 5 words --- deps/rabbit/src/rabbit_fifo.erl | 53 +++++++++++++++++---------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 3968decfa29a..c4a7dade318c 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -4,6 +4,8 @@ %% %% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. %% +%% +%% before post gc 1M msg: 203MB, after recovery + gc: 203MB -module(rabbit_fifo). @@ -123,11 +125,12 @@ state/0, config/0]). --define(MSG(Header, RawMsg), {Header, RawMsg}). --define(DISK_MSG(Header), {Header, empty}). --define(INDEX_MSG(Index, Msg), {Index, Msg}). --define(PREFIX_DISK_MSG(Header), {'$empty_msg', Header}). --define(PREFIX_MEM_MSG(Header), {'$prefix_msg', Header}). +-define(TUPLE(A, B), [A | B]). +-define(MSG(Header, RawMsg), [Header | RawMsg]). +-define(DISK_MSG(Header), [Header | empty]). +-define(INDEX_MSG(Index, Msg), [Index | Msg]). +-define(PREFIX_DISK_MSG(Header), ['$empty_msg' | Header]). +-define(PREFIX_MEM_MSG(Header), ['$prefix_msg' | Header]). -spec init(config()) -> state(). init(#{name := Name, @@ -1196,7 +1199,7 @@ apply_enqueue(#{index := RaftIdx} = Meta, From, Seq, RawMsg, State0) -> drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) -> case take_next_msg(State0) of - {FullMsg = {_MsgId, ?INDEX_MSG(RaftIdxToDrop, ?MSG(Header, _) = Msg)}, + {FullMsg = ?INDEX_MSG(RaftIdxToDrop, ?MSG(Header, _) = Msg), State1} -> Indexes = rabbit_fifo_index:delete(RaftIdxToDrop, Indexes0), State2 = add_bytes_drop(Header, State1#?MODULE{ra_indexes = Indexes}), @@ -1217,8 +1220,7 @@ drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) -> {State0, Effects0} end. -enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages, - next_msg_num = NextMsgNum} = State0) -> +enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages} = State0) -> %% the initial header is an integer only - it will get expanded to a map %% when the next required key is added Header = message_size(RawMsg), @@ -1234,8 +1236,7 @@ enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages, end, State = add_bytes_enqueue(Header, State1), %% TODO: msg num isn't needed - State#?MODULE{messages = lqueue:in({NextMsgNum, Msg}, Messages), - next_msg_num = NextMsgNum + 1}. + State#?MODULE{messages = lqueue:in(Msg, Messages)}. append_to_master_index(RaftIdx, #?MODULE{ra_indexes = Indexes0} = State0) -> @@ -1335,8 +1336,8 @@ return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, return_one(Meta, MsgId, 0, Msg, S0, E0, ConsumerId); (MsgId, ?PREFIX_DISK_MSG(_) = Msg, {S0, E0}) -> return_one(Meta, MsgId, 0, Msg, S0, E0, ConsumerId); - (MsgId, {MsgNum, Msg}, {S0, E0}) -> - return_one(Meta, MsgId, MsgNum, Msg, S0, E0, + (MsgId, Msg, {S0, E0}) -> + return_one(Meta, MsgId, 0, Msg, S0, E0, ConsumerId) end, {State0, Effects0}, Returned), State2 = @@ -1481,7 +1482,7 @@ update_header(Key, UpdateFun, Default, Header) -> maps:update_with(Key, UpdateFun, Default, Header). -return_one(Meta, MsgId, 0, {Tag, Header0}, +return_one(Meta, MsgId, 0, ?TUPLE(Tag, Header0), #?MODULE{returns = Returns, consumers = Consumers, cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, @@ -1489,7 +1490,7 @@ return_one(Meta, MsgId, 0, {Tag, Header0}, when Tag == '$prefix_msg'; Tag == '$empty_msg' -> #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers), Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0), - Msg0 = {Tag, Header}, + Msg0 = ?TUPLE(Tag, Header), case maps:get(delivery_count, Header) of DeliveryCount when DeliveryCount > DeliveryLimit -> complete(Meta, ConsumerId, #{MsgId => Msg0}, Con0, Effects0, State0); @@ -1512,17 +1513,17 @@ return_one(Meta, MsgId, 0, {Tag, Header0}, returns = lqueue:in(Msg, Returns)}), Effects0} end; -return_one(Meta, MsgId, MsgNum, ?INDEX_MSG(RaftId, ?MSG(Header0, RawMsg)), +return_one(Meta, MsgId, _MsgNum, ?INDEX_MSG(RaftId, ?MSG(Header0, RawMsg)), #?MODULE{returns = Returns, consumers = Consumers, cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, Effects0, ConsumerId) -> #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers), - Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0), + Header = update_header(delivery_count, fun (C) -> C + 1 end, 1, Header0), IdxMsg0 = ?INDEX_MSG(RaftId, ?MSG(Header, RawMsg)), case maps:get(delivery_count, Header) of DeliveryCount when DeliveryCount > DeliveryLimit -> - DlMsg = {MsgNum, IdxMsg0}, + DlMsg = IdxMsg0, Effects = dead_letter_effects(delivery_limit, #{none => DlMsg}, State0, Effects0), complete(Meta, ConsumerId, #{MsgId => DlMsg}, Con0, Effects, State0); @@ -1543,7 +1544,7 @@ return_one(Meta, MsgId, MsgNum, ?INDEX_MSG(RaftId, ?MSG(Header0, RawMsg)), {add_bytes_return( Header, State1#?MODULE{consumers = Consumers#{ConsumerId => Con}, - returns = lqueue:in({MsgNum, Msg}, Returns)}), + returns = lqueue:in(Msg, Returns)}), Effects0} end. @@ -1557,8 +1558,8 @@ return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId); ({MsgId, ?PREFIX_DISK_MSG(_) = Msg}, {S, E}) -> return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId); - ({MsgId, {MsgNum, Msg}}, {S, E}) -> - return_one(Meta, MsgId, MsgNum, Msg, S, E, ConsumerId) + ({MsgId, Msg}, {S, E}) -> + return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId) end, {State, Effects0}, Checked). %% checkout new messages to consumers @@ -1931,9 +1932,9 @@ dehydrate_state(#?MODULE{messages = Messages, [Header | Acc]; (?PREFIX_DISK_MSG(_) = Msg, Acc) -> [Msg | Acc]; - ({_, ?INDEX_MSG(_, ?DISK_MSG(Header))}, Acc) -> + (?INDEX_MSG(_, ?DISK_MSG(Header)), Acc) -> [?PREFIX_DISK_MSG(Header) | Acc]; - ({_, ?INDEX_MSG(_, ?MSG(Header, _))}, Acc) -> + (?INDEX_MSG(_, ?MSG(Header, _)), Acc) -> [Header | Acc] end, [], lqueue:to_list(Returns)), PrefRet = PrefRet0 ++ PrefRet1, @@ -1957,9 +1958,9 @@ dehydrate_state(#?MODULE{messages = Messages, dehydrate_messages(Msgs0, Acc0) -> {OutRes, Msgs} = lqueue:out(Msgs0), case OutRes of - {value, {_MsgId, ?INDEX_MSG(_, ?DISK_MSG(_) = Msg)}} -> + {value, ?INDEX_MSG(_, ?DISK_MSG(_) = Msg)} -> dehydrate_messages(Msgs, [Msg | Acc0]); - {value, {_MsgId, ?INDEX_MSG(_, ?MSG(Header, _))}} -> + {value, ?INDEX_MSG(_, ?MSG(Header, _))} -> dehydrate_messages(Msgs, [Header | Acc0]); empty -> lists:reverse(Acc0) @@ -1970,9 +1971,9 @@ dehydrate_consumer(#consumer{checked_out = Checked0} = Con) -> M; (_, ?PREFIX_DISK_MSG(_) = M) -> M; - (_, {_, ?INDEX_MSG(_, ?DISK_MSG(Header))}) -> + (_, ?INDEX_MSG(_, ?DISK_MSG(Header))) -> ?PREFIX_DISK_MSG(Header); - (_, {_, ?INDEX_MSG(_, ?MSG(Header, _))}) -> + (_, ?INDEX_MSG(_, ?MSG(Header, _))) -> ?PREFIX_MEM_MSG(Header) end, Checked0), Con#consumer{checked_out = Checked}. From 1c4f563db6920f4b6b153184fa18e43b22c8e1ab Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 21 Jun 2021 13:17:48 +0100 Subject: [PATCH 04/97] QQ memory use improvement The rabbit_fifo_index is now only used for messages that have moved from the messages queue to consumers/return queue. This reduces memory used per queued message. --- deps/rabbit/src/oqueue.erl | 178 +++++ deps/rabbit/src/rabbit_fifo.erl | 821 +++++++++++--------- deps/rabbit/src/rabbit_fifo.hrl | 32 +- deps/rabbit/src/rabbit_fifo_index.erl | 15 +- deps/rabbit/src/rabbit_fifo_v1.erl | 27 + deps/rabbit/src/rabbit_looking_glass.erl | 23 +- deps/rabbit/test/oqueue_SUITE.erl | 158 ++++ deps/rabbit/test/rabbit_fifo_SUITE.erl | 23 +- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 511 +++++++++++- 9 files changed, 1365 insertions(+), 423 deletions(-) create mode 100644 deps/rabbit/src/oqueue.erl create mode 100644 deps/rabbit/test/oqueue_SUITE.erl diff --git a/deps/rabbit/src/oqueue.erl b/deps/rabbit/src/oqueue.erl new file mode 100644 index 000000000000..7d6f559b8f35 --- /dev/null +++ b/deps/rabbit/src/oqueue.erl @@ -0,0 +1,178 @@ +-module(oqueue). + +-export([new/0, + %% O(1) when item is larger than largest item inserted + %% worst O(n) + in/2, + %% O(1) (amortised) + out/1, + %% fast when deleting in the order of insertion + %% worst O(n) + delete/2, + %% O(1) (amortised) + peek/1, + %% O(1) + len/1, + to_list/1, + from_list/1 + ]). + +-record(oqueue, {length = 0 :: non_neg_integer(), + rear = [] :: list(), + rear_deletes = #{} :: map(), + front = [] :: list(), + last_front_item :: undefined | term()}). + +-opaque oqueue() :: #oqueue{}. + +-export_type([oqueue/0]). + +-spec new() -> oqueue(). +new() -> #oqueue{}. + +-spec in(term(), oqueue()) -> oqueue(). +in(Item, #oqueue{length = Len, + front = [_ | _] = Front, + last_front_item = LastFrontItem} = Q) + when Item < LastFrontItem -> + Q#oqueue{length = Len + 1, + front = enqueue_front(Item, Front)}; +in(Item, #oqueue{length = Len, + rear = Rear} = Q) -> + Q#oqueue{length = Len + 1, + rear = enqueue_rear(Item, Rear)}. + +-spec out(oqueue()) -> + {empty | {value, term()}, oqueue()}. +out(#oqueue{length = Len, rear_deletes = Dels} = Q) + when Len - map_size(Dels) == 0 -> + {empty, Q}; +out(#oqueue{front = [Item], length = Len} = Q) -> + {{value, Item}, Q#oqueue{front = [], + last_front_item = undefined, + length = Len - 1}}; +out(#oqueue{front = [Item | Rem], length = Len} = Q) -> + {{value, Item}, Q#oqueue{front = Rem, + length = Len - 1}}; +out(#oqueue{front = []} = Q) -> + out(maybe_reverse(Q)). + +-spec delete(term(), oqueue()) -> + oqueue() | {error, not_found}. +delete(Item, #oqueue{length = Len, + last_front_item = LFI, + front = [_ | _] = Front0, + rear = Rear0, + rear_deletes = Dels0} = Q) -> + %% TODO: check if item is out of range to avoid scan + case Item > LFI of + true when map_size(Dels0) == 31 -> + Rear = Rear0 -- maps:keys(Dels0#{Item => Item}), + %% TODO we don't know all were actually deleted + Q#oqueue{rear = Rear, + rear_deletes = #{}, + length = Len - 32}; + %% item is not in front, scan rear list + %% TODO: this will walk the rear list in the least effective order + %% assuming most deletes will be from the front + % case catch remove(Item, Rear0) of + % not_found -> + % {error, not_found}; + % Rear -> + % Q#oqueue{rear = Rear, + % length = Len - 1} + % end; + true -> + %% cache delete + Q#oqueue{rear_deletes = Dels0#{Item => Item}}; + false -> + case catch remove(Item, Front0) of + not_found -> + {error, not_found}; + [] -> + maybe_reverse(Q#oqueue{front = [], + last_front_item = undefined, + length = Len - 1}); + Front when LFI == Item -> + %% the last item of the front list was removed but we still have + %% items in the front list, inefficient to take last but this should + %% be a moderately rare case given the use case of the oqueue + Q#oqueue{front = Front, + last_front_item = lists:last(Front), + length = Len - 1}; + Front -> + Q#oqueue{front = Front, + length = Len - 1} + end + end; +delete(_Item, #oqueue{front = [], rear = []}) -> + {error, not_found}; +delete(Item, #oqueue{front = []} = Q) -> + delete(Item, maybe_reverse(Q)). + +-spec peek(oqueue()) -> + empty | {value, term(), oqueue()}. +peek(#oqueue{front = [H | _]} = Q) -> + {value, H, Q}; +peek(#oqueue{rear = [_|_]} = Q) -> + %% the front is empty, reverse rear now + %% so that future peek ops are cheap + peek(maybe_reverse(Q)); +peek(_) -> + empty. + +-spec len(oqueue()) -> non_neg_integer(). +len(#oqueue{rear_deletes = Dels, length = Len}) -> + Len - map_size(Dels). + +-spec to_list(oqueue()) -> list(). +to_list(#oqueue{rear = Rear0, rear_deletes = Dels, front = Front}) -> + Rear = Rear0 -- maps:keys(Dels), + Front ++ lists:reverse(Rear). + +-spec from_list(list()) -> oqueue(). +from_list(List) -> + lists:foldl(fun in/2, new(), List). + +%% internal + +remove(_Item, []) -> + throw(not_found); +remove(Item, [Item | Tail]) -> + Tail; +remove(Item, [H | Tail]) -> + [H | remove(Item, Tail)]. + +% remove_all(Items, []) -> +% throw({empty_list, Items}); +% remove_all([], Tail) -> +% Tail; +% remove_all([Item | RemItems], [Item | Tail]) -> +% %% how to record an item was deleted? +% remove_all(RemItems, Tail); +% remove_all([Item | Items], [H | Tail]) when Item < H -> +% [H | remove_all(Item, Tail)]. + +enqueue_rear(Item, [H | T]) when Item < H-> + [H | enqueue_rear(Item, T)]; +enqueue_rear(Item, List) -> + [Item | List]. + +enqueue_front(Item, [H | T]) when Item > H-> + [H | enqueue_front(Item, T)]; +enqueue_front(Item, List) -> + [Item | List]. + +maybe_reverse(#oqueue{front = [], + length = Len, + rear_deletes = Dels, + rear = [_|_] = Rear0} = Q) -> + Rear = Rear0 -- maps:keys(Dels), + Q#oqueue{front = lists:reverse(Rear), + rear_deletes = #{}, + length = Len - map_size(Dels), + rear = [], + last_front_item = hd(Rear)}; +maybe_reverse(Q) -> + Q. + diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index c4a7dade318c..dce0e23a8215 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -14,6 +14,7 @@ -compile(inline_list_funcs). -compile(inline). -compile({no_auto_import, [apply/3]}). +-dialyzer(no_improper_lists). -include("rabbit_fifo.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -125,13 +126,6 @@ state/0, config/0]). --define(TUPLE(A, B), [A | B]). --define(MSG(Header, RawMsg), [Header | RawMsg]). --define(DISK_MSG(Header), [Header | empty]). --define(INDEX_MSG(Index, Msg), [Index | Msg]). --define(PREFIX_DISK_MSG(Header), ['$empty_msg' | Header]). --define(PREFIX_MEM_MSG(Header), ['$prefix_msg' | Header]). - -spec init(config()) -> state(). init(#{name := Name, queue_resource := Resource} = Conf) -> @@ -206,8 +200,6 @@ apply(Meta, #?MODULE{consumers = Cons0} = State) -> case Cons0 of #{ConsumerId := Con0} -> - % need to increment metrics before completing as any snapshot - % states taken need to include them complete_and_checkout(Meta, MsgIds, ConsumerId, Con0, [], State); _ -> @@ -332,13 +324,12 @@ apply(#{index := Index, end, {Reply, Effects2} = case Msg of - {RaftIdx, {Header, empty}} -> - %% TODO add here new log effect with reply + ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)) -> {'$ra_no_reply', [reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From) | Effects1]}; - _ -> - {{dequeue, {MsgId, Msg}, Ready-1}, Effects1} + ?INDEX_MSG(_, ?MSG(Header, Body)) -> + {{dequeue, {MsgId, {Header, Body}}, Ready-1}, Effects1} end, NotifyEffect = notify_decorators_effect(State4), @@ -363,17 +354,27 @@ apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta, State1 = update_consumer(ConsumerId, ConsumerMeta, Spec, Priority, State0), checkout(Meta, State0, State1, [{monitor, process, Pid}]); apply(#{index := Index}, #purge{}, - #?MODULE{ra_indexes = Indexes0, + #?MODULE{messages_total = Tot, returns = Returns, - messages = Messages} = State0) -> + messages = Messages, + ra_indexes = Indexes0} = State0) -> Total = messages_ready(State0), - Indexes1 = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0, - [I || {_, {I, _}} <- lqueue:to_list(Messages)]), - Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes1, - [I || {_, {I, _}} <- lqueue:to_list(Returns)]), + %% TODO: add an optimised version of oqueue:delete that takes a list + %% of items + Indexes1 = lists:foldl(fun (?INDEX_MSG(I, _), Acc0) when is_integer(I) -> + rabbit_fifo_index:delete(I, Acc0); + (_, Acc) -> + Acc + end, Indexes0, lqueue:to_list(Returns)), + Indexes = lists:foldl(fun (?INDEX_MSG(I, _), Acc0) when is_integer(I) -> + rabbit_fifo_index:delete(I, Acc0); + (_, Acc) -> + Acc + end, Indexes1, lqueue:to_list(Messages)), State1 = State0#?MODULE{ra_indexes = Indexes, messages = lqueue:new(), + messages_total = Tot - Total, returns = lqueue:new(), msg_bytes_enqueue = 0, prefix_msgs = {0, [], 0, []}, @@ -534,57 +535,128 @@ apply(_Meta, Cmd, State) -> rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), {State, ok, []}. -convert_v0_to_v1(V0State0) -> - V0State = rabbit_fifo_v0:normalize_for_v1(V0State0), - V0Msgs = rabbit_fifo_v0:get_field(messages, V0State), - V1Msgs = lqueue:from_list(lists:sort(maps:to_list(V0Msgs))), - V0Enqs = rabbit_fifo_v0:get_field(enqueuers, V0State), - V1Enqs = maps:map( - fun (_EPid, E) -> - #enqueuer{next_seqno = element(2, E), - pending = element(3, E), - status = element(4, E)} - end, V0Enqs), - V0Cons = rabbit_fifo_v0:get_field(consumers, V0State), - V1Cons = maps:map( - fun (_CId, C0) -> - %% add the priority field - list_to_tuple(tuple_to_list(C0) ++ [0]) - end, V0Cons), - V0SQ = rabbit_fifo_v0:get_field(service_queue, V0State), - V1SQ = priority_queue:from_list([{0, C} || C <- queue:to_list(V0SQ)]), - Cfg = #cfg{name = rabbit_fifo_v0:get_cfg_field(name, V0State), - resource = rabbit_fifo_v0:get_cfg_field(resource, V0State), - release_cursor_interval = rabbit_fifo_v0:get_cfg_field(release_cursor_interval, V0State), - dead_letter_handler = rabbit_fifo_v0:get_cfg_field(dead_letter_handler, V0State), - become_leader_handler = rabbit_fifo_v0:get_cfg_field(become_leader_handler, V0State), +% convert_v0_to_v1(V0State0) -> +% V0State = rabbit_fifo_v0:normalize_for_v1(V0State0), +% V0Msgs = rabbit_fifo_v0:get_field(messages, V0State), +% V1Msgs = lqueue:from_list(lists:sort(maps:to_list(V0Msgs))), +% V0Enqs = rabbit_fifo_v0:get_field(enqueuers, V0State), +% V1Enqs = maps:map( +% fun (_EPid, E) -> +% #enqueuer{next_seqno = element(2, E), +% pending = element(3, E), +% status = element(4, E)} +% end, V0Enqs), +% V0Cons = rabbit_fifo_v0:get_field(consumers, V0State), +% V1Cons = maps:map( +% fun (_CId, C0) -> +% %% add the priority field +% list_to_tuple(tuple_to_list(C0) ++ [0]) +% end, V0Cons), +% V0SQ = rabbit_fifo_v0:get_field(service_queue, V0State), +% V1SQ = priority_queue:from_list([{0, C} || C <- queue:to_list(V0SQ)]), +% Cfg = #cfg{name = rabbit_fifo_v0:get_cfg_field(name, V0State), +% resource = rabbit_fifo_v0:get_cfg_field(resource, V0State), +% release_cursor_interval = rabbit_fifo_v0:get_cfg_field(release_cursor_interval, V0State), +% dead_letter_handler = rabbit_fifo_v0:get_cfg_field(dead_letter_handler, V0State), +% become_leader_handler = rabbit_fifo_v0:get_cfg_field(become_leader_handler, V0State), +% %% TODO: what if policy enabling reject_publish was applied before conversion? +% overflow_strategy = drop_head, +% max_length = rabbit_fifo_v0:get_cfg_field(max_length, V0State), +% max_bytes = rabbit_fifo_v0:get_cfg_field(max_bytes, V0State), +% consumer_strategy = rabbit_fifo_v0:get_cfg_field(consumer_strategy, V0State), +% delivery_limit = rabbit_fifo_v0:get_cfg_field(delivery_limit, V0State), +% max_in_memory_length = rabbit_fifo_v0:get_cfg_field(max_in_memory_length, V0State), +% max_in_memory_bytes = rabbit_fifo_v0:get_cfg_field(max_in_memory_bytes, V0State) +% }, + +% #?MODULE{cfg = Cfg, +% messages = V1Msgs, +% next_msg_num = rabbit_fifo_v0:get_field(next_msg_num, V0State), +% returns = rabbit_fifo_v0:get_field(returns, V0State), +% enqueue_count = rabbit_fifo_v0:get_field(enqueue_count, V0State), +% enqueuers = V1Enqs, +% ra_indexes = rabbit_fifo_v0:get_field(ra_indexes, V0State), +% release_cursors = rabbit_fifo_v0:get_field(release_cursors, V0State), +% consumers = V1Cons, +% service_queue = V1SQ, +% prefix_msgs = rabbit_fifo_v0:get_field(prefix_msgs, V0State), +% msg_bytes_enqueue = rabbit_fifo_v0:get_field(msg_bytes_enqueue, V0State), +% msg_bytes_checkout = rabbit_fifo_v0:get_field(msg_bytes_checkout, V0State), +% waiting_consumers = rabbit_fifo_v0:get_field(waiting_consumers, V0State), +% msg_bytes_in_memory = rabbit_fifo_v0:get_field(msg_bytes_in_memory, V0State), +% msgs_ready_in_memory = rabbit_fifo_v0:get_field(msgs_ready_in_memory, V0State) +% }. + +convert_msg({RaftIdx, {Header, empty}}) -> + ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); +convert_msg({RaftIdx, {Header, Msg}}) when is_integer(RaftIdx) -> + ?INDEX_MSG(RaftIdx, ?TUPLE(Header, Msg)); +convert_msg({'$empty_msg', Header}) -> + ?DISK_MSG(Header); +convert_msg({'$prefix_msg', Header}) -> + ?PREFIX_MEM_MSG(Header). + + +convert_v1_to_v2(V1State) -> + IndexesV1 = rabbit_fifo_v1:get_field(ra_indexes, V1State), + ReturnsV1 = rabbit_fifo_v1:get_field(returns, V1State), + MessagesV1 = rabbit_fifo_v1:get_field(messages, V1State), + % EnqueuersV1 = rabbit_fifo_v1:get_field(enqueuers, V1State), + ConsumersV1 = rabbit_fifo_v1:get_field(consumers, V1State), + %% remove all raft idx in messages from index + MessagesV2 = lqueue:foldl(fun ({_, IdxMsg}, Acc) -> + lqueue:in(convert_msg(IdxMsg), Acc) + end, lqueue:new(), MessagesV1), + ReturnsV2 = lqueue:foldl(fun ({_SeqId, Msg}, Acc) -> + lqueue:in(convert_msg(Msg), Acc) + end, lqueue:new(), ReturnsV1), + + ConsumersV2 = maps:map( + fun (_, #consumer{checked_out = Ch} = C) -> + C#consumer{ + checked_out = maps:map( + fun (_, {_SeqId, IdxMsg}) -> + convert_msg(IdxMsg) + end, Ch)} + end, ConsumersV1), + + %% Then add all pending messages back into the index + Cfg = #cfg{name = rabbit_fifo_v1:get_cfg_field(name, V1State), + resource = rabbit_fifo_v1:get_cfg_field(resource, V1State), + release_cursor_interval = rabbit_fifo_v1:get_cfg_field(release_cursor_interval, V1State), + dead_letter_handler = rabbit_fifo_v1:get_cfg_field(dead_letter_handler, V1State), + become_leader_handler = rabbit_fifo_v1:get_cfg_field(become_leader_handler, V1State), %% TODO: what if policy enabling reject_publish was applied before conversion? - overflow_strategy = drop_head, - max_length = rabbit_fifo_v0:get_cfg_field(max_length, V0State), - max_bytes = rabbit_fifo_v0:get_cfg_field(max_bytes, V0State), - consumer_strategy = rabbit_fifo_v0:get_cfg_field(consumer_strategy, V0State), - delivery_limit = rabbit_fifo_v0:get_cfg_field(delivery_limit, V0State), - max_in_memory_length = rabbit_fifo_v0:get_cfg_field(max_in_memory_length, V0State), - max_in_memory_bytes = rabbit_fifo_v0:get_cfg_field(max_in_memory_bytes, V0State) + overflow_strategy = rabbit_fifo_v1:get_cfg_field(overflow_strategy, V1State), + max_length = rabbit_fifo_v1:get_cfg_field(max_length, V1State), + max_bytes = rabbit_fifo_v1:get_cfg_field(max_bytes, V1State), + consumer_strategy = rabbit_fifo_v1:get_cfg_field(consumer_strategy, V1State), + delivery_limit = rabbit_fifo_v1:get_cfg_field(delivery_limit, V1State), + max_in_memory_length = rabbit_fifo_v1:get_cfg_field(max_in_memory_length, V1State), + max_in_memory_bytes = rabbit_fifo_v1:get_cfg_field(max_in_memory_bytes, V1State), + expires = rabbit_fifo_v1:get_cfg_field(expires, V1State) }, - - #?MODULE{cfg = Cfg, - messages = V1Msgs, - next_msg_num = rabbit_fifo_v0:get_field(next_msg_num, V0State), - returns = rabbit_fifo_v0:get_field(returns, V0State), - enqueue_count = rabbit_fifo_v0:get_field(enqueue_count, V0State), - enqueuers = V1Enqs, - ra_indexes = rabbit_fifo_v0:get_field(ra_indexes, V0State), - release_cursors = rabbit_fifo_v0:get_field(release_cursors, V0State), - consumers = V1Cons, - service_queue = V1SQ, - prefix_msgs = rabbit_fifo_v0:get_field(prefix_msgs, V0State), - msg_bytes_enqueue = rabbit_fifo_v0:get_field(msg_bytes_enqueue, V0State), - msg_bytes_checkout = rabbit_fifo_v0:get_field(msg_bytes_checkout, V0State), - waiting_consumers = rabbit_fifo_v0:get_field(waiting_consumers, V0State), - msg_bytes_in_memory = rabbit_fifo_v0:get_field(msg_bytes_in_memory, V0State), - msgs_ready_in_memory = rabbit_fifo_v0:get_field(msgs_ready_in_memory, V0State) - }. + #?MODULE{ + cfg = Cfg, + messages = MessagesV2, + messages_total = rabbit_fifo_v1:query_messages_total(V1State), + returns = ReturnsV2, + enqueue_count = rabbit_fifo_v1:get_field(enqueue_count, V1State), + enqueuers = rabbit_fifo_v1:get_field(enqueuers, V1State), + ra_indexes = IndexesV1, + release_cursors = rabbit_fifo_v1:get_field(release_cursors, V1State), + consumers = ConsumersV2, + service_queue = rabbit_fifo_v1:get_field(service_queue, V1State), + prefix_msgs = rabbit_fifo_v1:get_field(prefix_msgs, V1State), + %% this is wrong + % returns = oqueue:from_list(lqueue:to_list(ReturnsV1)) + msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), + msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), + waiting_consumers = rabbit_fifo_v1:get_field(waiting_consumers, V1State), + msg_bytes_in_memory = rabbit_fifo_v1:get_field(msg_bytes_in_memory, V1State), + msgs_ready_in_memory = rabbit_fifo_v1:get_field(msgs_ready_in_memory, V1State), + last_active = rabbit_fifo_v1:get_field(last_active, V1State) + }. purge_node(Meta, Node, State, Effects) -> lists:foldl(fun(Pid, {S0, E0}) -> @@ -730,7 +802,6 @@ overview(#?MODULE{consumers = Cons, enqueue_count = EnqCount, msg_bytes_enqueue = EnqueueBytes, msg_bytes_checkout = CheckoutBytes, - ra_indexes = Indexes, cfg = Cfg} = State) -> Conf = #{name => Cfg#cfg.name, resource => Cfg#cfg.resource, @@ -744,7 +815,7 @@ overview(#?MODULE{consumers = Cons, expires => Cfg#cfg.expires, delivery_limit => Cfg#cfg.delivery_limit }, - Smallest = rabbit_fifo_index:smallest(Indexes), + {Smallest, _} = smallest_raft_index(State), #{type => ?MODULE, config => Conf, num_consumers => maps:size(Cons), @@ -754,7 +825,7 @@ overview(#?MODULE{consumers = Cons, num_pending_messages => messages_pending(State), num_messages => messages_total(State), num_release_cursors => lqueue:len(Cursors), - release_cursors => [I || {_, I, _} <- lqueue:to_list(Cursors)], + release_cursors => [{I, messages_total(S)} || {_, I, S} <- lqueue:to_list(Cursors)], release_cursor_enqueue_counter => EnqCount, enqueue_message_bytes => EnqueueBytes, checkout_message_bytes => CheckoutBytes, @@ -765,9 +836,10 @@ overview(#?MODULE{consumers = Cons, get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) -> case Consumers of #{Cid := #consumer{checked_out = Checked}} -> - [{K, snd(snd(maps:get(K, Checked)))} - || K <- lists:seq(From, To), - maps:is_key(K, Checked)]; + [begin + ?INDEX_MSG(_, ?MSG(H, M)) = maps:get(K, Checked), + {K, {H, M}} + end || K <- lists:seq(From, To), maps:is_key(K, Checked)]; _ -> [] end. @@ -831,12 +903,12 @@ handle_aux(_RaState, {call, _From}, oldest_entry_timestamp, Aux, handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, Log0, MacState) -> case rabbit_fifo:query_peek(Pos, MacState) of - {ok, {Idx, {Header, empty}}} -> + {ok, ?INDEX_MSG(Idx, ?DISK_MSG(Header))} -> %% need to re-hydrate from the log {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0), #enqueue{msg = Msg} = Cmd, {reply, {ok, {Header, Msg}}, Aux0, Log}; - {ok, {_Idx, {Header, Msg}}} -> + {ok, ?INDEX_MSG(_Idx, ?MSG(Header, Msg))} -> {reply, {ok, {Header, Msg}}, Aux0, Log0}; Err -> {reply, Err, Aux0, Log0} @@ -896,6 +968,8 @@ query_processes(#?MODULE{enqueuers = Enqs, consumers = Cons0}) -> query_ra_indexes(#?MODULE{ra_indexes = RaIndexes}) -> RaIndexes. + % rabbit_fifo_index:append(Key, Arg2) + % oqueue:to_list(RaIndexes). query_consumer_count(#?MODULE{consumers = Consumers, waiting_consumers = WaitingConsumers}) -> @@ -990,7 +1064,7 @@ query_peek(Pos, State0) when Pos > 0 -> case take_next_msg(State0) of empty -> {error, no_message_at_pos}; - {{_Seq, IdxMsg}, _State} + {IdxMsg, _State} when Pos == 1 -> {ok, IdxMsg}; {_Msg, State} -> @@ -1030,13 +1104,14 @@ messages_pending(#?MODULE{enqueuers = Enqs}) -> messages_ready(#?MODULE{messages = M, prefix_msgs = {RCnt, _R, PCnt, _P}, returns = R}) -> - %% prefix messages will rarely have anything in them during normal - %% operations so length/1 is fine here lqueue:len(M) + lqueue:len(R) + RCnt + PCnt. -messages_total(#?MODULE{ra_indexes = I, - prefix_msgs = {RCnt, _R, PCnt, _P}}) -> - rabbit_fifo_index:size(I) + RCnt + PCnt. +messages_total(#?MODULE{messages = _M, + messages_total = Total, + ra_indexes = _Indexes, + prefix_msgs = {_RCnt, _R, _PCnt, _P}}) -> + Total. + % lqueue:len(M) + rabbit_fifo_index:size(Indexes) + RCnt + PCnt. update_use({inactive, _, _, _} = CUInfo, inactive) -> CUInfo; @@ -1190,32 +1265,39 @@ maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, Consumer, S0, Effects0 apply_enqueue(#{index := RaftIdx} = Meta, From, Seq, RawMsg, State0) -> case maybe_enqueue(RaftIdx, From, Seq, RawMsg, [], State0) of {ok, State1, Effects1} -> - State2 = append_to_master_index(RaftIdx, State1), + State2 = incr_enqueue_count(incr_total(State1)), {State, ok, Effects} = checkout(Meta, State0, State2, Effects1, false), {maybe_store_dehydrated_state(RaftIdx, State), ok, Effects}; {duplicate, State, Effects} -> {State, ok, Effects} end. +decr_total(#?MODULE{messages_total = Tot} = State) -> + State#?MODULE{messages_total = Tot - 1}. + +incr_total(#?MODULE{messages_total = Tot} = State) -> + State#?MODULE{messages_total = Tot + 1}. + drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) -> case take_next_msg(State0) of - {FullMsg = ?INDEX_MSG(RaftIdxToDrop, ?MSG(Header, _) = Msg), - State1} -> - Indexes = rabbit_fifo_index:delete(RaftIdxToDrop, Indexes0), - State2 = add_bytes_drop(Header, State1#?MODULE{ra_indexes = Indexes}), + {?PREFIX_MEM_MSG(Header), State1} -> + State2 = subtract_in_memory_counts(Header, + add_bytes_drop(Header, State1)), + {decr_total(State2), Effects0}; + {?DISK_MSG(Header), State1} -> + State2 = add_bytes_drop(Header, State1), + {decr_total(State2), Effects0}; + {?INDEX_MSG(Idx, ?MSG(Header, _) = Msg) = FullMsg, State1} -> + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + State2 = decr_total(add_bytes_drop(Header, State1)), State = case Msg of ?DISK_MSG(_) -> State2; - _ -> subtract_in_memory_counts(Header, State2) + _ -> + subtract_in_memory_counts(Header, State2) end, Effects = dead_letter_effects(maxlen, #{none => FullMsg}, State, Effects0), - {State, Effects}; - {?PREFIX_MEM_MSG(Header), State1} -> - State2 = subtract_in_memory_counts(Header, add_bytes_drop(Header, State1)), - {State2, Effects0}; - {?PREFIX_DISK_MSG(Header), State1} -> - State2 = add_bytes_drop(Header, State1), - {State2, Effects0}; + {State#?MODULE{ra_indexes = Indexes}, Effects}; empty -> {State0, Effects0} end. @@ -1235,19 +1317,11 @@ enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages} = State0) -> ?INDEX_MSG(RaftIdx, ?MSG(Header, RawMsg))} end, State = add_bytes_enqueue(Header, State1), - %% TODO: msg num isn't needed State#?MODULE{messages = lqueue:in(Msg, Messages)}. -append_to_master_index(RaftIdx, - #?MODULE{ra_indexes = Indexes0} = State0) -> - State = incr_enqueue_count(State0), - Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), - State#?MODULE{ra_indexes = Indexes}. - - incr_enqueue_count(#?MODULE{enqueue_count = EC, cfg = #cfg{release_cursor_interval = {_Base, C}} - } = State0) when EC >= C-> + } = State0) when EC >= C -> %% this will trigger a dehydrated version of the state to be stored %% at this raft index for potential future snapshot generation %% Q: Why don't we just stash the release cursor here? @@ -1261,18 +1335,17 @@ maybe_store_dehydrated_state(RaftIdx, #?MODULE{cfg = #cfg{release_cursor_interval = {Base, _}} = Cfg, - ra_indexes = Indexes, + ra_indexes = _Indexes, enqueue_count = 0, release_cursors = Cursors0} = State0) -> - case rabbit_fifo_index:exists(RaftIdx, Indexes) of - false -> - %% the incoming enqueue must already have been dropped + case messages_total(State0) of + 0 -> + %% message must have been immediately dropped State0; - true -> + Total -> Interval = case Base of 0 -> 0; _ -> - Total = messages_total(State0), min(max(Total, Base), ?RELEASE_CURSOR_EVERY_MAX) end, State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval = @@ -1300,7 +1373,9 @@ maybe_enqueue(RaftIdx, undefined, undefined, RawMsg, Effects, State0) -> State = enqueue(RaftIdx, RawMsg, State0), {ok, State, Effects}; maybe_enqueue(RaftIdx, From, MsgSeqNo, RawMsg, Effects0, - #?MODULE{enqueuers = Enqueuers0} = State0) -> + #?MODULE{enqueuers = Enqueuers0, + ra_indexes = Indexes0} = State0) -> + case maps:get(From, Enqueuers0, undefined) of undefined -> State1 = State0#?MODULE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, @@ -1319,27 +1394,23 @@ maybe_enqueue(RaftIdx, From, MsgSeqNo, RawMsg, Effects0, % out of order enqueue Pending = [{MsgSeqNo, RaftIdx, RawMsg} | Pending0], Enq = Enq0#enqueuer{pending = lists:sort(Pending)}, - {ok, State0#?MODULE{enqueuers = Enqueuers0#{From => Enq}}, Effects0}; + %% if the enqueue it out of order we need to mark it in the + %% index + Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), + {ok, State0#?MODULE{enqueuers = Enqueuers0#{From => Enq}, + ra_indexes = Indexes}, Effects0}; #enqueuer{next_seqno = Next} when MsgSeqNo =< Next -> % duplicate delivery - remove the raft index from the ra_indexes % map as it was added earlier {duplicate, State0, Effects0} end. -snd(T) -> - element(2, T). - return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, Effects0, State0) -> {State1, Effects1} = maps:fold( - fun(MsgId, ?PREFIX_MEM_MSG(_) = Msg, {S0, E0}) -> - return_one(Meta, MsgId, 0, Msg, S0, E0, ConsumerId); - (MsgId, ?PREFIX_DISK_MSG(_) = Msg, {S0, E0}) -> - return_one(Meta, MsgId, 0, Msg, S0, E0, ConsumerId); - (MsgId, Msg, {S0, E0}) -> - return_one(Meta, MsgId, 0, Msg, S0, E0, - ConsumerId) - end, {State0, Effects0}, Returned), + fun(MsgId, Msg, {S0, E0}) -> + return_one(Meta, MsgId, Msg, S0, E0, ConsumerId) + end, {State0, Effects0}, Returned), State2 = case State1#?MODULE.consumers of #{ConsumerId := Con0} -> @@ -1353,27 +1424,28 @@ return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, update_smallest_raft_index(IncomingRaftIdx, State, Effects). % used to processes messages that are finished -complete(Meta, ConsumerId, Discarded, +complete(Meta, ConsumerId, DiscardedMsgIds, #consumer{checked_out = Checked} = Con0, Effects, - #?MODULE{ra_indexes = Indexes0} = State0) -> - %% TODO optimise use of Discarded map here - MsgRaftIdxs = [RIdx || {_, ?INDEX_MSG(RIdx, _)} <- maps:values(Discarded)], + #?MODULE{messages_total = Tot, + ra_indexes = Indexes0} = State0) -> %% credit_mode = simple_prefetch should automatically top-up credit %% as messages are simple_prefetch or otherwise returned - Con = Con0#consumer{checked_out = maps:without(maps:keys(Discarded), Checked), + Discarded = maps:with(DiscardedMsgIds, Checked), + Con = Con0#consumer{checked_out = maps:without(DiscardedMsgIds, Checked), credit = increase_credit(Con0, map_size(Discarded))}, State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), - Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0, - MsgRaftIdxs), - %% TODO: use maps:fold instead - State2 = lists:foldl(fun({_, ?INDEX_MSG(_, ?MSG(Header, _))}, Acc) -> - add_bytes_settle(Header, Acc); - (?PREFIX_MEM_MSG(Header), Acc) -> - add_bytes_settle(Header, Acc); - (?PREFIX_DISK_MSG(Header), Acc) -> - add_bytes_settle(Header, Acc) - end, State1, maps:values(Discarded)), - {State2#?MODULE{ra_indexes = Indexes}, Effects}. + %% TODO: optimise by passing a list to rabbit_fifo_index + Indexes = maps:fold(fun (_, ?INDEX_MSG(I, _), Acc0) when is_integer(I) -> + rabbit_fifo_index:delete(I, Acc0); + (_, _, Acc) -> + Acc + end, Indexes0, Discarded), + State = maps:fold(fun(_, Msg, Acc) -> + add_bytes_settle( + get_msg_header(Msg), Acc) + end, State1, Discarded), + {State#?MODULE{messages_total = Tot - length(DiscardedMsgIds), + ra_indexes = Indexes}, Effects}. increase_credit(#consumer{lifetime = once, credit = Credit}, _) -> @@ -1388,10 +1460,9 @@ increase_credit(#consumer{credit = Current}, Credit) -> Current + Credit. complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, - #consumer{checked_out = Checked0} = Con0, + #consumer{} = Con0, Effects0, State0) -> - Discarded = maps:with(MsgIds, Checked0), - {State1, Effects1} = complete(Meta, ConsumerId, Discarded, Con0, + {State1, Effects1} = complete(Meta, ConsumerId, MsgIds, Con0, Effects0, State0), {State, ok, Effects} = checkout(Meta, State0, State1, Effects1, false), update_smallest_raft_index(IncomingRaftIdx, State, Effects). @@ -1404,7 +1475,7 @@ dead_letter_effects(Reason, Discarded, #?MODULE{cfg = #cfg{dead_letter_handler = {Mod, Fun, Args}}}, Effects) -> RaftIdxs = maps:fold( - fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) -> + fun (_, ?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header)), Acc) -> [RaftIdx | Acc]; (_, _, Acc) -> Acc @@ -1413,10 +1484,10 @@ dead_letter_effects(Reason, Discarded, fun (Log) -> Lookup = maps:from_list(lists:zip(RaftIdxs, Log)), DeadLetters = maps:fold( - fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) -> + fun (_, ?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header)), Acc) -> {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), [{Reason, Msg} | Acc]; - (_, {_, {_, {_Header, Msg}}}, Acc) -> + (_, ?INDEX_MSG(_, ?MSG(_Header, Msg)), Acc) -> [{Reason, Msg} | Acc]; (_, _, Acc) -> Acc @@ -1435,11 +1506,13 @@ update_smallest_raft_index(Idx, State, Effects) -> update_smallest_raft_index(IncomingRaftIdx, Reply, #?MODULE{cfg = Cfg, - ra_indexes = Indexes, - release_cursors = Cursors0} = State0, + release_cursors = Cursors0} = State00, Effects) -> - case rabbit_fifo_index:size(Indexes) of - 0 -> + %% TODO: optimise + {Smallest, State0} = smallest_raft_index(State00), + Total = messages_total(State0), + case Smallest of + undefined when Total == 0 -> % there are no messages on queue anymore and no pending enqueues % we can forward release_cursor all the way until % the last received command, hooray @@ -1450,11 +1523,12 @@ update_smallest_raft_index(IncomingRaftIdx, Reply, release_cursors = lqueue:new(), enqueue_count = 0}, {State, Reply, Effects ++ [{release_cursor, IncomingRaftIdx, State}]}; + undefined -> + {State0, Reply, Effects}; _ -> - Smallest = rabbit_fifo_index:smallest(Indexes), case find_next_cursor(Smallest, Cursors0) of - {empty, Cursors} -> - {State0#?MODULE{release_cursors = Cursors}, Reply, Effects}; + empty -> + {State0, Reply, Effects}; {Cursor, Cursors} -> %% we can emit a release cursor when we've passed the smallest %% release cursor available. @@ -1471,103 +1545,109 @@ find_next_cursor(Smallest, Cursors0, Potential) -> {{value, {_, Idx, _} = Cursor}, Cursors} when Idx < Smallest -> %% we found one but it may not be the largest one find_next_cursor(Smallest, Cursors, Cursor); + _ when Potential == empty -> + empty; _ -> {Potential, Cursors0} end. +update_msg_header(Key, Fun, Def, ?INDEX_MSG(Idx, ?MSG(Header, Body))) -> + ?INDEX_MSG(Idx, ?MSG(update_header(Key, Fun, Def, Header), Body)); +update_msg_header(Key, Fun, Def, ?DISK_MSG(Header)) -> + ?DISK_MSG(update_header(Key, Fun, Def, Header)); +update_msg_header(Key, Fun, Def, ?PREFIX_MEM_MSG(Header)) -> + ?PREFIX_MEM_MSG(update_header(Key, Fun, Def, Header)). + update_header(Key, UpdateFun, Default, Header) when is_integer(Header) -> update_header(Key, UpdateFun, Default, #{size => Header}); update_header(Key, UpdateFun, Default, Header) -> maps:update_with(Key, UpdateFun, Default, Header). +% get_msg_header(Key, ?INDEX_MSG(_Idx, ?MSG(Header, _Body))) -> +% get_header(Key, Header); +% get_msg_header(Key, ?DISK_MSG(Header)) -> +% get_header(Key, Header); +% get_msg_header(Key, ?PREFIX_MEM_MSG(Header)) -> +% get_header(Key, Header). + +get_msg_header(?INDEX_MSG(_Idx, ?MSG(Header, _Body))) -> + Header; +get_msg_header(?DISK_MSG(Header)) -> + Header; +get_msg_header(?PREFIX_MEM_MSG(Header)) -> + Header. + +get_header(size, Header) + when is_integer(Header) -> + Header; +get_header(_Key, Header) when is_integer(Header) -> + undefined; +get_header(Key, Header) when is_map(Header) -> + maps:get(Key, Header, undefined). -return_one(Meta, MsgId, 0, ?TUPLE(Tag, Header0), - #?MODULE{returns = Returns, - consumers = Consumers, - cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, - Effects0, ConsumerId) - when Tag == '$prefix_msg'; Tag == '$empty_msg' -> - #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers), - Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0), - Msg0 = ?TUPLE(Tag, Header), - case maps:get(delivery_count, Header) of - DeliveryCount when DeliveryCount > DeliveryLimit -> - complete(Meta, ConsumerId, #{MsgId => Msg0}, Con0, Effects0, State0); - _ -> - %% this should not affect the release cursor in any way - Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)}, - {Msg, State1} = case Tag of - '$empty_msg' -> - {Msg0, State0}; - _ -> case evaluate_memory_limit(Header, State0) of - true -> - {?PREFIX_DISK_MSG(Header), State0}; - false -> - {Msg0, add_in_memory_counts(Header, State0)} - end - end, - {add_bytes_return( - Header, - State1#?MODULE{consumers = Consumers#{ConsumerId => Con}, - returns = lqueue:in(Msg, Returns)}), - Effects0} - end; -return_one(Meta, MsgId, _MsgNum, ?INDEX_MSG(RaftId, ?MSG(Header0, RawMsg)), +return_one(Meta, MsgId, Msg0, #?MODULE{returns = Returns, consumers = Consumers, cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, Effects0, ConsumerId) -> #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers), - Header = update_header(delivery_count, fun (C) -> C + 1 end, 1, Header0), - IdxMsg0 = ?INDEX_MSG(RaftId, ?MSG(Header, RawMsg)), - case maps:get(delivery_count, Header) of + Msg = update_msg_header(delivery_count, fun (C) -> C + 1 end, 1, Msg0), + Header = get_msg_header(Msg), + case get_header(delivery_count, Header) of DeliveryCount when DeliveryCount > DeliveryLimit -> - DlMsg = IdxMsg0, - Effects = dead_letter_effects(delivery_limit, #{none => DlMsg}, + %% TODO: don't do for prefix msgs + Effects = dead_letter_effects(delivery_limit, #{none => Msg}, State0, Effects0), - complete(Meta, ConsumerId, #{MsgId => DlMsg}, Con0, Effects, State0); + complete(Meta, ConsumerId, [MsgId], Con0, Effects, State0); _ -> Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)}, - %% this should not affect the release cursor in any way - {Msg, State1} = case RawMsg of - 'empty' -> - {IdxMsg0, State0}; - _ -> - case evaluate_memory_limit(Header, State0) of - true -> - {?INDEX_MSG(RaftId, ?DISK_MSG(Header)), State0}; - false -> - {IdxMsg0, add_in_memory_counts(Header, State0)} - end - end, + + {RtnMsg, State1} = case is_disk_msg(Msg) of + true -> + {Msg, State0}; + false -> + case evaluate_memory_limit(Header, State0) of + true -> + {to_disk_msg(Msg), State0}; + false -> + {Msg, add_in_memory_counts(Header, State0)} + end + end, {add_bytes_return( Header, State1#?MODULE{consumers = Consumers#{ConsumerId => Con}, - returns = lqueue:in(Msg, Returns)}), + returns = lqueue:in(RtnMsg, Returns)}), Effects0} end. +is_disk_msg(?INDEX_MSG(RaftIdx, ?DISK_MSG(_))) when is_integer(RaftIdx) -> + true; +is_disk_msg(?DISK_MSG(_)) -> + true; +is_disk_msg(_) -> + false. + +to_disk_msg(?INDEX_MSG(RaftIdx, ?DISK_MSG(_)) = Msg) when is_integer(RaftIdx) -> + Msg; +to_disk_msg(?INDEX_MSG(RaftIdx, ?MSG(Header, _))) when is_integer(RaftIdx) -> + ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); +to_disk_msg(?PREFIX_MEM_MSG(Header)) -> + ?DISK_MSG(Header). + return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, - #consumer{checked_out = Checked0} = Con) -> - %% need to sort the list so that we return messages in the order - %% they were checked out - Checked = lists:sort(maps:to_list(Checked0)), + #consumer{checked_out = Checked} = Con) -> State = State0#?MODULE{consumers = Cons#{ConsumerId => Con}}, - lists:foldl(fun ({MsgId, ?PREFIX_MEM_MSG(_) = Msg}, {S, E}) -> - return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId); - ({MsgId, ?PREFIX_DISK_MSG(_) = Msg}, {S, E}) -> - return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId); - ({MsgId, Msg}, {S, E}) -> - return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId) - end, {State, Effects0}, Checked). + lists:foldl(fun ({MsgId, Msg}, {S, E}) -> + return_one(Meta, MsgId, Msg, S, E, ConsumerId) + end, {State, Effects0}, lists:sort(maps:to_list(Checked))). %% checkout new messages to consumers checkout(Meta, OldState, State, Effects) -> checkout(Meta, OldState, State, Effects, true). -checkout(#{index := Index} = Meta, #?MODULE{cfg = #cfg{resource = QName}} = OldState, State0, - Effects0, HandleConsumerChanges) -> +checkout(#{index := Index} = Meta, #?MODULE{cfg = #cfg{resource = QName}} = OldState, + State0, Effects0, HandleConsumerChanges) -> {State1, _Result, Effects1} = checkout0(Meta, checkout_one(Meta, State0), Effects0, #{}), case evaluate_limit(Index, false, OldState, State1, Effects1) of @@ -1589,22 +1669,27 @@ checkout(#{index := Index} = Meta, #?MODULE{cfg = #cfg{resource = QName}} = OldS end end. -checkout0(Meta, {success, ConsumerId, MsgId, {RaftIdx, {Header, 'empty'}}, State}, - Effects, SendAcc0) -> +checkout0(Meta, {success, ConsumerId, MsgId, + ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), State}, + Effects, SendAcc0) when is_integer(RaftIdx) -> DelMsg = {RaftIdx, {MsgId, Header}}, SendAcc = maps:update_with(ConsumerId, fun ({InMem, LogMsgs}) -> {InMem, [DelMsg | LogMsgs]} end, {[], [DelMsg]}, SendAcc0), checkout0(Meta, checkout_one(Meta, State), Effects, SendAcc); -checkout0(Meta, {success, ConsumerId, MsgId, Msg, State}, Effects, - SendAcc0) -> - DelMsg = {MsgId, Msg}, +checkout0(Meta, {success, ConsumerId, MsgId, + ?INDEX_MSG(Idx, ?MSG(Header, Msg)), State}, Effects, + SendAcc0) when is_integer(Idx) -> + DelMsg = {MsgId, {Header, Msg}}, SendAcc = maps:update_with(ConsumerId, fun ({InMem, LogMsgs}) -> {[DelMsg | InMem], LogMsgs} end, {[DelMsg], []}, SendAcc0), checkout0(Meta, checkout_one(Meta, State), Effects, SendAcc); +checkout0(Meta, {success, _ConsumerId, _MsgId, ?TUPLE(_, _), State}, Effects, + SendAcc) -> + checkout0(Meta, checkout_one(Meta, State), Effects, SendAcc); checkout0(_Meta, {Activity, State0}, Effects0, SendAcc) -> Effects1 = case Activity of nochange -> @@ -1673,12 +1758,12 @@ evaluate_memory_limit(_Header, false; evaluate_memory_limit(#{size := Size}, State) -> evaluate_memory_limit(Size, State); -evaluate_memory_limit(Size, +evaluate_memory_limit(Header, #?MODULE{cfg = #cfg{max_in_memory_length = MaxLength, max_in_memory_bytes = MaxBytes}, msg_bytes_in_memory = Bytes, - msgs_ready_in_memory = Length}) - when is_integer(Size) -> + msgs_ready_in_memory = Length}) -> + Size = get_header(size, Header), (Length >= MaxLength) orelse ((Bytes + Size) > MaxBytes). append_delivery_effects(Effects0, AccMap) when map_size(AccMap) == 0 -> @@ -1688,7 +1773,8 @@ append_delivery_effects(Effects0, AccMap) -> [{aux, active} | maps:fold(fun (C, {InMemMsgs, LogMsgs}, Ef) -> [delivery_effect(C, lists:reverse(LogMsgs), InMemMsgs) | Ef] - end, Effects0, AccMap)]. + end, Effects0, AccMap) + ]. %% next message is determined as follows: %% First we check if there are are prefex returns @@ -1698,44 +1784,34 @@ append_delivery_effects(Effects0, AccMap) -> %% %% When we return it is always done to the current return queue %% for both prefix messages and current messages -take_next_msg(#?MODULE{prefix_msgs = {R, P}} = State) -> - %% conversion - take_next_msg(State#?MODULE{prefix_msgs = {length(R), R, length(P), P}}); -take_next_msg(#?MODULE{prefix_msgs = {NumR, [?PREFIX_DISK_MSG(_) = Msg | Rem], +take_next_msg(#?MODULE{prefix_msgs = {NumR, [Msg | Rem], NumP, P}} = State) -> %% there are prefix returns, these should be served first {Msg, State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; -take_next_msg(#?MODULE{prefix_msgs = {NumR, [Header | Rem], NumP, P}} = State) -> - %% there are prefix returns, these should be served first - {?PREFIX_MEM_MSG(Header), - State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; -take_next_msg(#?MODULE{returns = Returns, +take_next_msg(#?MODULE{returns = Returns0, messages = Messages0, + ra_indexes = Indexes0, prefix_msgs = {NumR, R, NumP, P}} = State) -> - %% use peek rather than out there as the most likely case is an empty - %% queue - case lqueue:get(Returns, empty) of - empty when P == [] -> + case lqueue:out(Returns0) of + {{value, NextMsg}, Returns} -> + {NextMsg, State#?MODULE{returns = Returns}}; + {empty, _} when P == [] -> case lqueue:out(Messages0) of {empty, _} -> empty; - {{value, {_, _} = SeqMsg}, Messages} -> - {SeqMsg, State#?MODULE{messages = Messages}} - end; - empty -> - [Msg | Rem] = P, - case Msg of - ?DISK_MSG(Header) -> - %% There are prefix msgs - {?PREFIX_DISK_MSG(Header), - State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}}; - Header -> - {{'$prefix_msg', Header}, - State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}} + {{value, ?INDEX_MSG(RaftIdx, _) = IndexMsg}, Messages} -> + %% add index here + Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), + {IndexMsg, State#?MODULE{messages = Messages, + ra_indexes = Indexes}} end; - NextMsg -> - {NextMsg, - State#?MODULE{returns = lqueue:drop(Returns)}} + {empty, _} -> + case P of + [?PREFIX_MEM_MSG(_Header) = Msg | Rem] -> + {Msg, State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}}; + [?DISK_MSG(_Header) = Msg | Rem] -> + {Msg, State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}} + end end. delivery_effect({CTag, CPid}, [], InMemMsgs) -> @@ -1794,28 +1870,21 @@ checkout_one(Meta, #?MODULE{service_queue = SQ0, next_msg_id = Next + 1, credit = Credit - 1, delivery_count = DelCnt + 1}, - State1 = update_or_remove_sub(Meta, - ConsumerId, Con, + State1 = update_or_remove_sub( + Meta, ConsumerId, Con, State0#?MODULE{service_queue = SQ1}), - {State, Msg} = - case ConsumerMsg of - ?PREFIX_MEM_MSG(Header) -> - {subtract_in_memory_counts( - Header, add_bytes_checkout(Header, State1)), - ConsumerMsg}; - ?PREFIX_DISK_MSG(Header) -> - {add_bytes_checkout(Header, State1), - ConsumerMsg}; - {_, {_, {Header, 'empty'}} = M} -> - {add_bytes_checkout(Header, State1), - M}; - {_, {_, {Header, _} = M}} -> - {subtract_in_memory_counts( - Header, - add_bytes_checkout(Header, State1)), - M} - end, - {success, ConsumerId, Next, Msg, State} + Header = get_msg_header(ConsumerMsg), + State = case is_disk_msg(ConsumerMsg) of + true -> + add_bytes_checkout(Header, State1); + false -> + subtract_in_memory_counts( + Header, add_bytes_checkout(Header, State1)) + end, + {success, ConsumerId, Next, ConsumerMsg, State}; + error -> + %% consumer did not exist but was queued, recurse + checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}) end; empty -> {nochange, InitState} @@ -1876,8 +1945,10 @@ update_consumer(ConsumerId, Meta, Spec, Priority, update_consumer(ConsumerId, Meta, Spec, Priority, #?MODULE{consumers = Cons0, cfg = #cfg{consumer_strategy = single_active}} = State0) - when map_size(Cons0) == 0 -> - %% single active consumer on, no one is consuming yet + when map_size(Cons0) == 0 orelse + is_map_key(ConsumerId, Cons0) -> + %% single active consumer on, no one is consuming yet or + %% the currently active consumer is the same update_consumer0(ConsumerId, Meta, Spec, Priority, State0); update_consumer(ConsumerId, Meta, {Life, Credit, Mode}, Priority, #?MODULE{cfg = #cfg{consumer_strategy = single_active}, @@ -1921,6 +1992,17 @@ maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, %% creates a dehydrated version of the current state to be cached and %% potentially used to for a snaphot at a later point +dehydrate_state(#?MODULE{msg_bytes_in_memory = 0, + cfg = #cfg{max_length = 0}, + consumers = Consumers} = State) -> + %% no messages are kept in memory, no need to + %% overly mutate the current state apart from removing indexes and cursors + State#?MODULE{ + ra_indexes = rabbit_fifo_index:empty(), + consumers = maps:map(fun (_, C) -> + dehydrate_consumer(C) + end, Consumers), + release_cursors = lqueue:new()}; dehydrate_state(#?MODULE{messages = Messages, consumers = Consumers, returns = Returns, @@ -1928,17 +2010,11 @@ dehydrate_state(#?MODULE{messages = Messages, waiting_consumers = Waiting0} = State) -> RCnt = lqueue:len(Returns), %% TODO: optimise this function as far as possible - PrefRet1 = lists:foldr(fun (?PREFIX_MEM_MSG(Header), Acc) -> - [Header | Acc]; - (?PREFIX_DISK_MSG(_) = Msg, Acc) -> - [Msg | Acc]; - (?INDEX_MSG(_, ?DISK_MSG(Header)), Acc) -> - [?PREFIX_DISK_MSG(Header) | Acc]; - (?INDEX_MSG(_, ?MSG(Header, _)), Acc) -> - [Header | Acc] + PrefRet1 = lists:foldr(fun (M, Acc) -> + [dehydrate_message(M) | Acc] end, [], lqueue:to_list(Returns)), PrefRet = PrefRet0 ++ PrefRet1, - PrefMsgsSuff = dehydrate_messages(Messages, []), + PrefMsgsSuff = dehydrate_messages(Messages), %% prefix messages are not populated in normal operation only after %% recovering from a snapshot PrefMsgs = PrefMsg0 ++ PrefMsgsSuff, @@ -1954,35 +2030,41 @@ dehydrate_state(#?MODULE{messages = Messages, PPCnt + lqueue:len(Messages), PrefMsgs}, waiting_consumers = Waiting}. -%% TODO make body recursive to avoid allocating lists:reverse call -dehydrate_messages(Msgs0, Acc0) -> +dehydrate_messages(Msgs0) -> {OutRes, Msgs} = lqueue:out(Msgs0), case OutRes of - {value, ?INDEX_MSG(_, ?DISK_MSG(_) = Msg)} -> - dehydrate_messages(Msgs, [Msg | Acc0]); - {value, ?INDEX_MSG(_, ?MSG(Header, _))} -> - dehydrate_messages(Msgs, [Header | Acc0]); + {value, Msg} -> + [dehydrate_message(Msg) | dehydrate_messages(Msgs)]; empty -> - lists:reverse(Acc0) + [] end. dehydrate_consumer(#consumer{checked_out = Checked0} = Con) -> - Checked = maps:map(fun (_, ?PREFIX_MEM_MSG(_) = M) -> - M; - (_, ?PREFIX_DISK_MSG(_) = M) -> - M; - (_, ?INDEX_MSG(_, ?DISK_MSG(Header))) -> - ?PREFIX_DISK_MSG(Header); - (_, ?INDEX_MSG(_, ?MSG(Header, _))) -> - ?PREFIX_MEM_MSG(Header) + Checked = maps:map(fun (_, M) -> + dehydrate_message(M) end, Checked0), Con#consumer{checked_out = Checked}. +dehydrate_message(?PREFIX_MEM_MSG(_) = M) -> + M; +dehydrate_message(?DISK_MSG(_) = M) -> + M; +dehydrate_message(?INDEX_MSG(_Idx, ?DISK_MSG(_Header) = Msg)) -> + %% use disk msgs directly as prefix messages + Msg; +dehydrate_message(?INDEX_MSG(Idx, ?MSG(Header, _))) when is_integer(Idx) -> + ?PREFIX_MEM_MSG(Header). + %% make the state suitable for equality comparison -normalize(#?MODULE{messages = Messages, +normalize(#?MODULE{ra_indexes = _Indexes, + returns = Returns, + messages = Messages, release_cursors = Cursors} = State) -> - State#?MODULE{messages = lqueue:from_list(lqueue:to_list(Messages)), - release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}. + State#?MODULE{ + % ra_indexes = oqueue:from_list(oqueue:to_list(Indexes)), + returns = lqueue:from_list(lqueue:to_list(Returns)), + messages = lqueue:from_list(lqueue:to_list(Messages)), + release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}. is_over_limit(#?MODULE{cfg = #cfg{max_length = undefined, max_bytes = undefined}}) -> @@ -2054,81 +2136,63 @@ make_purge_nodes(Nodes) -> make_update_config(Config) -> #update_config{config = Config}. -add_bytes_enqueue(Bytes, - #?MODULE{msg_bytes_enqueue = Enqueue} = State) - when is_integer(Bytes) -> - State#?MODULE{msg_bytes_enqueue = Enqueue + Bytes}; -add_bytes_enqueue(#{size := Bytes}, State) -> - add_bytes_enqueue(Bytes, State). - -add_bytes_drop(Bytes, - #?MODULE{msg_bytes_enqueue = Enqueue} = State) - when is_integer(Bytes) -> - State#?MODULE{msg_bytes_enqueue = Enqueue - Bytes}; -add_bytes_drop(#{size := Bytes}, State) -> - add_bytes_drop(Bytes, State). - -add_bytes_checkout(Bytes, +add_bytes_enqueue(Header, + #?MODULE{msg_bytes_enqueue = Enqueue} = State) -> + Size = get_header(size, Header), + State#?MODULE{msg_bytes_enqueue = Enqueue + Size}. + +add_bytes_drop(Header, + #?MODULE{msg_bytes_enqueue = Enqueue} = State) -> + Size = get_header(size, Header), + State#?MODULE{msg_bytes_enqueue = Enqueue - Size}. + + +add_bytes_checkout(Header, #?MODULE{msg_bytes_checkout = Checkout, - msg_bytes_enqueue = Enqueue } = State) - when is_integer(Bytes) -> - State#?MODULE{msg_bytes_checkout = Checkout + Bytes, - msg_bytes_enqueue = Enqueue - Bytes}; -add_bytes_checkout(#{size := Bytes}, State) -> - add_bytes_checkout(Bytes, State). - -add_bytes_settle(Bytes, - #?MODULE{msg_bytes_checkout = Checkout} = State) - when is_integer(Bytes) -> - State#?MODULE{msg_bytes_checkout = Checkout - Bytes}; -add_bytes_settle(#{size := Bytes}, State) -> - add_bytes_settle(Bytes, State). - -add_bytes_return(Bytes, + msg_bytes_enqueue = Enqueue } = State) -> + Size = get_header(size, Header), + State#?MODULE{msg_bytes_checkout = Checkout + Size, + msg_bytes_enqueue = Enqueue - Size}. + +add_bytes_settle(Header, + #?MODULE{msg_bytes_checkout = Checkout} = State) -> + Size = get_header(size, Header), + State#?MODULE{msg_bytes_checkout = Checkout - Size}. + +add_bytes_return(Header, #?MODULE{msg_bytes_checkout = Checkout, - msg_bytes_enqueue = Enqueue} = State) - when is_integer(Bytes) -> - State#?MODULE{msg_bytes_checkout = Checkout - Bytes, - msg_bytes_enqueue = Enqueue + Bytes}; -add_bytes_return(#{size := Bytes}, State) -> - add_bytes_return(Bytes, State). - -add_in_memory_counts(Bytes, + msg_bytes_enqueue = Enqueue} = State) -> + Size = get_header(size, Header), + State#?MODULE{msg_bytes_checkout = Checkout - Size, + msg_bytes_enqueue = Enqueue + Size}. + +add_in_memory_counts(Header, #?MODULE{msg_bytes_in_memory = InMemoryBytes, - msgs_ready_in_memory = InMemoryCount} = State) - when is_integer(Bytes) -> - State#?MODULE{msg_bytes_in_memory = InMemoryBytes + Bytes, - msgs_ready_in_memory = InMemoryCount + 1}; -add_in_memory_counts(#{size := Bytes}, State) -> - add_in_memory_counts(Bytes, State). - -subtract_in_memory_counts(Bytes, + msgs_ready_in_memory = InMemoryCount} = State) -> + Size = get_header(size, Header), + State#?MODULE{msg_bytes_in_memory = InMemoryBytes + Size, + msgs_ready_in_memory = InMemoryCount + 1}. + +subtract_in_memory_counts(Header, #?MODULE{msg_bytes_in_memory = InMemoryBytes, - msgs_ready_in_memory = InMemoryCount} = State) - when is_integer(Bytes) -> - State#?MODULE{msg_bytes_in_memory = InMemoryBytes - Bytes, - msgs_ready_in_memory = InMemoryCount - 1}; -subtract_in_memory_counts(#{size := Bytes}, State) -> - subtract_in_memory_counts(Bytes, State). + msgs_ready_in_memory = InMemoryCount} = State) -> + Size = get_header(size, Header), + State#?MODULE{msg_bytes_in_memory = InMemoryBytes - Size, + msgs_ready_in_memory = InMemoryCount - 1}. message_size(#basic_message{content = Content}) -> #content{payload_fragments_rev = PFR} = Content, iolist_size(PFR); -message_size(?PREFIX_MEM_MSG(H)) -> - get_size_from_header(H); -message_size(?PREFIX_DISK_MSG(H)) -> - get_size_from_header(H); +message_size(?PREFIX_MEM_MSG(Header)) -> + get_header(size, Header); +message_size(?DISK_MSG(Header)) -> + get_header(size, Header); message_size(B) when is_binary(B) -> byte_size(B); message_size(Msg) -> %% probably only hit this for testing so ok to use erts_debug erts_debug:size(Msg). -get_size_from_header(Size) when is_integer(Size) -> - Size; -get_size_from_header(#{size := B}) -> - B. - all_nodes(#?MODULE{consumers = Cons0, enqueuers = Enqs0, @@ -2223,7 +2287,28 @@ notify_decorators_effect(QName, MaxActivePriority, IsEmpty) -> convert(To, To, State0) -> State0; convert(0, To, State0) -> - convert(1, To, convert_v0_to_v1(State0)); + convert(1, To, rabbit_fifo_v1:convert_v0_to_v1(State0)); convert(1, To, State0) -> - %% no conversion yet - convert(2, To, State0). + convert(2, To, convert_v1_to_v2(State0)). + +smallest_raft_index(#?MODULE{cfg = _Cfg, + messages = Messages, + ra_indexes = Indexes0 + } = State) -> + case rabbit_fifo_index:smallest(Indexes0) of + I when is_integer(I) -> + case lqueue:peek(Messages) of + {value, ?INDEX_MSG(Idx, _)} -> + {min(I, Idx), State}; + _ -> + {I, State} + end; + _ -> + %% TODO: could be inefficent if there is no front list + case lqueue:peek(Messages) of + {value, ?INDEX_MSG(I, _)} -> + {I, State}; + _ -> + {undefined, State} + end + end. diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index a63483becda7..c797c9d9bd07 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -1,10 +1,23 @@ +%% macros for memory optimised tuple structures +-define(TUPLE(A, B), [A | B]). + +-define(DISK_MSG_TAG, '$disk'). +% -define(PREFIX_DISK_MSG_TAG, '$prefix_disk'). +-define(PREFIX_MEM_MSG_TAG, '$prefix_inmem'). + +-define(DISK_MSG(Header), [Header | ?DISK_MSG_TAG]). +-define(MSG(Header, RawMsg), [Header | RawMsg]). +-define(INDEX_MSG(Index, Msg), [Index | Msg]). +% -define(PREFIX_DISK_MSG(Header), [?PREFIX_DISK_MSG_TAG | Header]). +% -define(PREFIX_DISK_MSG(Header), ?DISK_MSG(Header)). +-define(PREFIX_MEM_MSG(Header), [?PREFIX_MEM_MSG_TAG | Header]). + -type option(T) :: undefined | T. -type raw_msg() :: term(). %% The raw message. It is opaque to rabbit_fifo. --type msg_in_id() :: non_neg_integer(). % a queue scoped monotonically incrementing integer used to enforce order % in the unassigned messages map @@ -26,17 +39,19 @@ %% A non-zero value indicates a previous attempt. %% If it only contains the size it can be condensed to an integer only --type msg() :: {msg_header(), raw_msg()}. +-type msg() :: ?MSG(msg_header(), raw_msg()) | + ?DISK_MSG(msg_header()) | + ?PREFIX_MEM_MSG(msg_header()). %% message with a header map. -type msg_size() :: non_neg_integer(). %% the size in bytes of the msg payload --type indexed_msg() :: {ra:index(), msg()}. +-type indexed_msg() :: ?INDEX_MSG(ra:index(), msg()). -type prefix_msg() :: {'$prefix_msg', msg_header()}. --type delivery_msg() :: {msg_id(), msg()}. +-type delivery_msg() :: {msg_id(), {msg_header(), term()}}. %% A tuple consisting of the message id and the headered message. -type consumer_tag() :: binary(). @@ -79,7 +94,7 @@ -record(consumer, {meta = #{} :: consumer_meta(), - checked_out = #{} :: #{msg_id() => {msg_in_id(), indexed_msg()}}, + checked_out = #{} :: #{msg_id() => indexed_msg()}, next_msg_id = 0 :: msg_id(), % part of snapshot data %% max number of messages that can be sent %% decremented for each delivery @@ -145,12 +160,11 @@ -record(rabbit_fifo, {cfg :: #cfg{}, % unassigned messages - messages = lqueue:new() :: lqueue:lqueue({msg_in_id(), indexed_msg()}), + messages = lqueue:new() :: lqueue:lqueue(indexed_msg()), % defines the next message id - next_msg_num = 1 :: msg_in_id(), + messages_total = 0 :: non_neg_integer(), % queue of returned msg_in_ids - when checking out it picks from - returns = lqueue:new() :: lqueue:lqueue(prefix_msg() | - {msg_in_id(), indexed_msg()}), + returns = lqueue:new() :: lqueue:lqueue(term()), % a counter of enqueues - used to trigger shadow copy points enqueue_count = 0 :: non_neg_integer(), % a map containing all the live processes that have ever enqueued diff --git a/deps/rabbit/src/rabbit_fifo_index.erl b/deps/rabbit/src/rabbit_fifo_index.erl index 14ac89faff62..9dc92acb8252 100644 --- a/deps/rabbit/src/rabbit_fifo_index.erl +++ b/deps/rabbit/src/rabbit_fifo_index.erl @@ -14,7 +14,7 @@ %% the empty atom is a lot smaller (4 bytes) than e.g. `undefined` (13 bytes). %% This matters as the data map gets persisted as part of the snapshot --define(NIL, ''). +-define(NIL, []). -record(?MODULE, {data = #{} :: #{integer() => ?NIL}, smallest :: undefined | non_neg_integer(), @@ -40,10 +40,19 @@ append(Key, #?MODULE{data = Data, smallest = Smallest, largest = Largest} = State) - when Key > Largest orelse Largest =:= undefined -> + when Key > Largest orelse + Largest =:= undefined -> State#?MODULE{data = maps:put(Key, ?NIL, Data), smallest = ra_lib:default(Smallest, Key), - largest = Key}. + largest = Key}; +append(Key, + #?MODULE{data = Data, + largest = Largest, + smallest = Smallest} = State) -> + State#?MODULE{data = maps:put(Key, ?NIL, Data), + smallest = min(Key, ra_lib:default(Smallest, Key)), + largest = max(Key, ra_lib:default(Largest, Key)) + }. -spec delete(Index :: integer(), state()) -> state(). delete(Smallest, #?MODULE{data = Data0, diff --git a/deps/rabbit/src/rabbit_fifo_v1.erl b/deps/rabbit/src/rabbit_fifo_v1.erl index fc585cf6ea39..a59a5c9250ae 100644 --- a/deps/rabbit/src/rabbit_fifo_v1.erl +++ b/deps/rabbit/src/rabbit_fifo_v1.erl @@ -50,6 +50,11 @@ dehydrate_state/1, normalize/1, + + %% getters for coversions + get_field/2, + get_cfg_field/2, + %% protocol helpers make_enqueue/3, make_register_enqueuer/1, @@ -64,6 +69,8 @@ make_garbage_collection/0 ]). +-export([convert_v0_to_v1/1]). + %% command records representing all the protocol actions that are supported -record(enqueue, {pid :: option(pid()), seq :: option(msg_seqno()), @@ -2206,3 +2213,23 @@ notify_decorators_effect(#?MODULE{cfg = #cfg{resource = QName}} = State) -> notify_decorators_effect(QName, MaxActivePriority, IsEmpty) -> {mod_call, rabbit_quorum_queue, spawn_notify_decorators, [QName, consumer_state_changed, [MaxActivePriority, IsEmpty]]}. + +get_field(Field, State) -> + Fields = record_info(fields, ?MODULE), + Index = record_index_of(Field, Fields), + element(Index, State). + +get_cfg_field(Field, #?MODULE{cfg = Cfg} ) -> + Fields = record_info(fields, cfg), + Index = record_index_of(Field, Fields), + element(Index, Cfg). + +record_index_of(F, Fields) -> + index_of(2, F, Fields). + +index_of(_, F, []) -> + exit({field_not_found, F}); +index_of(N, F, [F | _]) -> + N; +index_of(N, F, [_ | T]) -> + index_of(N+1, F, T). diff --git a/deps/rabbit/src/rabbit_looking_glass.erl b/deps/rabbit/src/rabbit_looking_glass.erl index 855d0adf4905..97cf8a7ff892 100644 --- a/deps/rabbit/src/rabbit_looking_glass.erl +++ b/deps/rabbit/src/rabbit_looking_glass.erl @@ -14,7 +14,7 @@ -ignore_xref([{maps, from_list, 1}]). -export([boot/0]). --export([trace/1, profile/0, profile/1]). +-export([trace/1, trace_qq/0, profile/0, profile/1]). -export([connections/0]). boot() -> @@ -59,6 +59,27 @@ trace(Input) -> {send, true}] )). +trace_qq() -> + dbg:stop_clear(), + lg:trace([ra_server, + ra_server_proc, + rabbit_fifo, + queue, + rabbit_fifo_index + ], + lg_file_tracer, + "traces.lz4", + maps:from_list([ + {mode, profile} + % {process_dump, true}, + % {running, true}, + % {send, true} + ] + )), + timer:sleep(10000), + lg:stop(), + profile(). + profile() -> profile("callgrind.out"). diff --git a/deps/rabbit/test/oqueue_SUITE.erl b/deps/rabbit/test/oqueue_SUITE.erl new file mode 100644 index 000000000000..406dbf49c21c --- /dev/null +++ b/deps/rabbit/test/oqueue_SUITE.erl @@ -0,0 +1,158 @@ +-module(oqueue_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-export([ + ]). + +-include_lib("proper/include/proper.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +%%%=================================================================== +%%% Common Test callbacks +%%%=================================================================== + +all() -> + [ + {group, tests} + ]. + + +all_tests() -> + [ + basics, + delete, + delete_front, + order + ]. + +groups() -> + [ + {tests, [], all_tests()} + ]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(_TestCase, _Config) -> + ok. + +%%%=================================================================== +%%% Test cases +%%%=================================================================== + +basics(_Confg) -> + Q0 = oqueue:new(), + ?assertMatch({empty, _}, oqueue:out(Q0)), + Q1 = oqueue:in(1, Q0), + ?assertEqual(1, oqueue:len(Q1)), + ?assertMatch({{value, 1}, _}, oqueue:out(Q1)), + Q2 = oqueue:in(0, Q1), + ?assertEqual(2, oqueue:len(Q2)), + {V2, Q3} = oqueue:out(Q2), + ?assertMatch({value, 0}, V2), + ?assertMatch({{value, 1}, _}, oqueue:out(Q3)), + Q4 = oqueue:in(0, Q3), + ?assertMatch({{value, 0}, _}, oqueue:out(Q4)), + ok. + + +delete(_Config) -> + Q0 = enq_list([1,2,3], oqueue:new()), + Q1 = oqueue:delete(2, Q0), + {error, not_found} = oqueue:delete(4, Q0), + ?assertEqual(2, oqueue:len(Q1)), + ?assertEqual([1,3], oqueue:to_list(Q1)), + ok. + +delete_front(_Config) -> + Q0 = enq_list([1,2,3,4], oqueue:new()), + %% this ensures there is a front + {_, Q1} = oqueue:out(Q0), + {error, not_found} = oqueue:delete(1, Q1), + Q2 = oqueue:delete(3, Q1), + ?assertEqual(2, oqueue:len(Q2)), + ?assertEqual([2,4], oqueue:to_list(Q2)), + + Q3 = oqueue:in(5, Q1), + Q4 = oqueue:delete(3, Q3), + ?assertEqual(3, oqueue:len(Q4)), + ?assertEqual([2,4, 5], oqueue:to_list(Q4)), + ok. + +order(_Config) -> + run_proper( + fun () -> + ?FORALL(Ops, list( + frequency([ + {5, non_neg_integer()}, + {1, deq}, + {2, {del, non_neg_integer()}} + ]) + ), + order_prop(Ops)) + end, [], 20000). + +order_prop(Ops0) -> + % ct:pal("Ops ~w", [Ops0]), + OutQ = enq_list(Ops0, oqueue:new()), + Expected = run_queue(Ops0, []), + OQList = oqueue:to_list(OutQ), + Expected == OQList andalso + oqueue:len(OutQ) == length(Expected). + +enq_list([], Q) -> + Q; +enq_list([deq | T], Q0) -> + {_, Q} = oqueue:out(Q0), + enq_list(T, Q); +enq_list([{del, I} | T], Q0) -> + case oqueue:delete(I, Q0) of + {error, not_found} -> + enq_list(T, Q0); + Q -> + enq_list(T, Q) + end; +enq_list([H | T], Q) -> + enq_list(T, oqueue:in(H, Q)). + +run_proper(Fun, Args, NumTests) -> + ?assertEqual( + true, + proper:counterexample( + erlang:apply(Fun, Args), + [{numtests, NumTests}, + {on_output, fun(".", _) -> ok; % don't print the '.'s on new lines + (F, A) -> ct:pal(?LOW_IMPORTANCE, F, A) + end}])). + +run_queue([], Q) -> + Q; +run_queue([deq | T], Q) -> + run_queue(T, drop_head(Q)); +run_queue([{del, I} | T], Q) -> + run_queue(T, lists:delete(I, Q)); +run_queue([I | T], Q) -> + run_queue(T, insert(I, Q)). + +drop_head([]) -> []; +drop_head([_ | T]) -> T. + +insert(Item, [H | T]) when Item > H-> + [H | insert(Item, T)]; +insert(Item, List) -> + [Item | List]. diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index cf853bca98d5..9d9fd4e3d591 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -2,6 +2,7 @@ %% rabbit_fifo unit tests suite +-compile(nowarn_export_all). -compile(export_all). -compile({no_auto_import, [apply/3]}). @@ -88,6 +89,7 @@ enq_enq_checkout_test(_) -> Cid = {<<"enq_enq_checkout_test">>, self()}, {State1, _} = enq(1, 1, first, test_init(test)), {State2, _} = enq(2, 2, second, State1), + ?assertEqual(2, rabbit_fifo:query_messages_total(State2)), {_State3, _, Effects} = apply(meta(3), rabbit_fifo:make_checkout(Cid, {once, 2, simple_prefetch}, #{}), @@ -201,9 +203,10 @@ enq_enq_checkout_get_settled_test(_) -> Cid = {?FUNCTION_NAME, self()}, {State1, _} = enq(1, 1, first, test_init(test)), % get returns a reply value - {_State2, {dequeue, {0, {_, first}}, _}, _Effs} = + {State2, {dequeue, {0, {_, first}}, _}, _Effs} = apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State1), + ?assertEqual(0, rabbit_fifo:query_messages_total(State2)), ok. checkout_get_empty_test(_) -> @@ -240,6 +243,7 @@ checkout_enq_settle_test(_) -> {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, {monitor, _, _} | _]} = check(Cid, 1, test_init(test)), {State2, Effects0} = enq(2, 1, first, State1), + ct:pal("Effects0 ~p", [Effects0]), ?ASSERT_EFF({send_msg, _, {delivery, ?FUNCTION_NAME, [{0, {_, first}}]}, _}, @@ -362,9 +366,9 @@ return_checked_out_limit_test(_) -> {State2, ok, [{send_msg, _, {delivery, _, [{MsgId2, _}]}, _}, {aux, active}]} = apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1), - {#rabbit_fifo{ra_indexes = RaIdxs}, ok, [_ReleaseEff]} = + {#rabbit_fifo{} = State, ok, [_ReleaseEff]} = apply(meta(4), rabbit_fifo:make_return(Cid, [MsgId2]), State2), - ?assertEqual(0, rabbit_fifo_index:size(RaIdxs)), + ?assertEqual(0, rabbit_fifo:query_messages_total(State)), ok. return_auto_checked_out_test(_) -> @@ -583,9 +587,8 @@ pending_enqueue_is_enqueued_on_down_test(_) -> duplicate_delivery_test(_) -> {State0, _} = enq(1, 1, first, test_init(test)), - {#rabbit_fifo{ra_indexes = RaIdxs, - messages = Messages}, _} = enq(2, 1, first, State0), - ?assertEqual(1, rabbit_fifo_index:size(RaIdxs)), + {#rabbit_fifo{messages = Messages} = State, _} = enq(2, 1, first, State0), + ?assertEqual(1, rabbit_fifo:query_messages_total(State)), ?assertEqual(1, lqueue:len(Messages)), ok. @@ -658,7 +661,7 @@ purge_with_checkout_test(_) -> {State3, {purge, 1}, _} = apply(meta(2), rabbit_fifo:make_purge(), State2), ?assert(State2#rabbit_fifo.msg_bytes_checkout > 0), ?assertEqual(0, State3#rabbit_fifo.msg_bytes_enqueue), - ?assertEqual(1, rabbit_fifo_index:size(State3#rabbit_fifo.ra_indexes)), + ?assertEqual(1, rabbit_fifo:query_messages_total(State3)), #consumer{checked_out = Checked} = maps:get(Cid, State3#rabbit_fifo.consumers), ?assertEqual(1, maps:size(Checked)), ok. @@ -1697,10 +1700,10 @@ query_peek_test(_) -> ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(1, State0)), {State1, _} = enq(1, 1, first, State0), {State2, _} = enq(2, 2, second, State1), - ?assertMatch({ok, {_, {_, first}}}, rabbit_fifo:query_peek(1, State1)), + ?assertMatch({ok, [_, _ | first]}, rabbit_fifo:query_peek(1, State1)), ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(2, State1)), - ?assertMatch({ok, {_, {_, first}}}, rabbit_fifo:query_peek(1, State2)), - ?assertMatch({ok, {_, {_, second}}}, rabbit_fifo:query_peek(2, State2)), + ?assertMatch({ok, [_, _ | first]}, rabbit_fifo:query_peek(1, State2)), + ?assertMatch({ok, [_, _ | second]}, rabbit_fifo:query_peek(2, State2)), ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(3, State2)), ok. diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 85881f68cddc..a22b0a286eb4 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -1,5 +1,6 @@ -module(rabbit_fifo_prop_SUITE). +-compile(nowarn_export_all). -compile(export_all). -export([ @@ -8,7 +9,7 @@ -include_lib("proper/include/proper.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). --include("src/rabbit_fifo.hrl"). +-include_lib("rabbit/src/rabbit_fifo.hrl"). %%%=================================================================== %%% Common Test callbacks @@ -38,6 +39,7 @@ all_tests() -> scenario12, scenario13, scenario14, + scenario14b, scenario15, scenario16, scenario17, @@ -46,10 +48,23 @@ all_tests() -> scenario20, scenario21, scenario22, + scenario23, + scenario24, + scenario25, + scenario26, + scenario27, + scenario28, + scenario29, + scenario30, + scenario31, + scenario32, + upgrade, + messages_total, single_active, single_active_01, single_active_02, single_active_03, + single_active_04, single_active_ordering, single_active_ordering_01, single_active_ordering_03, @@ -217,13 +232,13 @@ scenario11(_Config) -> C1 = {<<>>, c:pid(0,215,0)}, E = c:pid(0,217,0), Commands = [ - make_enqueue(E,1,<<>>), - make_checkout(C1, {auto,1,simple_prefetch}), - make_checkout(C1, cancel), - make_enqueue(E,2,<<>>), - make_checkout(C1, {auto,1,simple_prefetch}), - make_settle(C1, [0]), - make_checkout(C1, cancel) + make_enqueue(E,1,<<"1">>), % 1 + make_checkout(C1, {auto,1,simple_prefetch}), % 2 + make_checkout(C1, cancel), % 3 + make_enqueue(E,2,<<"22">>), % 4 + make_checkout(C1, {auto,1,simple_prefetch}), % 5 + make_settle(C1, [0]), % 6 + make_checkout(C1, cancel) % 7 ], run_snapshot_test(#{name => ?FUNCTION_NAME, max_length => 2}, Commands), @@ -256,6 +271,16 @@ scenario14(_Config) -> max_bytes => 1}, Commands), ok. +scenario14b(_Config) -> + E = c:pid(0,217,0), + Commands = [ + make_enqueue(E,1,<<0>>), + make_enqueue(E,2,<<0>>) + ], + run_snapshot_test(#{name => ?FUNCTION_NAME, + max_bytes => 1}, Commands), + ok. + scenario15(_Config) -> C1 = {<<>>, c:pid(0,179,1)}, E = c:pid(0,176,1), @@ -345,7 +370,7 @@ scenario20(_Config) -> C1 = {<<>>, C1Pid}, E = c:pid(0,176,1), Commands = [make_enqueue(E,1,<<>>), - make_enqueue(E,2,<<>>), + make_enqueue(E,2,<<1>>), make_checkout(C1, {auto,2,simple_prefetch}), {down, C1Pid, noconnection}, make_enqueue(E,3,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>), @@ -355,7 +380,8 @@ scenario20(_Config) -> make_enqueue(E,7,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0>>) ], run_snapshot_test(#{name => ?FUNCTION_NAME, - max_bytes => 97, + max_length => 4, + % max_bytes => 97, max_in_memory_length => 1}, Commands), ok. @@ -395,6 +421,274 @@ scenario22(_Config) -> Commands), ok. +scenario24(_Config) -> + C1Pid = c:pid(0,242,0), + C1 = {<<>>, C1Pid}, + C2 = {<<0>>, C1Pid}, + E = c:pid(0,240,0), + Commands = [ + make_checkout(C1, {auto,2,simple_prefetch}), %% 1 + make_checkout(C2, {auto,1,simple_prefetch}), %% 2 + make_enqueue(E,1,<<"1">>), %% 3 + make_enqueue(E,2,<<"2b">>), %% 4 + make_enqueue(E,3,<<"3">>), %% 5 + make_enqueue(E,4,<<"4">>), %% 6 + {down, E, noconnection} %% 7 + ], + run_snapshot_test(#{name => ?FUNCTION_NAME, + release_cursor_interval => 0, + deliver_limit => undefined, + max_length => 3, + overflow_strategy => drop_head, + dead_letter_handler => {?MODULE, banana, []} + }, + Commands), + ok. + +scenario25(_Config) -> + C1Pid = c:pid(0,282,0), + C2Pid = c:pid(0,281,0), + C1 = {<<>>, C1Pid}, + C2 = {<<>>, C2Pid}, + E = c:pid(0,280,0), + Commands = [ + make_checkout(C1, {auto,2,simple_prefetch}), %% 1 + make_enqueue(E,1,<<0>>), %% 2 + make_checkout(C2, {auto,1,simple_prefetch}), %% 3 + make_enqueue(E,2,<<>>), %% 4 + make_enqueue(E,3,<<>>), %% 5 + {down, C1Pid, noproc}, %% 6 + make_enqueue(E,4,<<>>), %% 7 + rabbit_fifo:make_purge() %% 8 + ], + run_snapshot_test(#{name => ?FUNCTION_NAME, + max_bytes => undefined, + release_cursor_interval => 0, + deliver_limit => undefined, + overflow_strategy => drop_head, + dead_letter_handler => {?MODULE, banana, []} + }, + Commands), + ok. + +scenario26(_Config) -> + C1Pid = c:pid(0,242,0), + C1 = {<<>>, C1Pid}, + E1 = c:pid(0,436,0), + E2 = c:pid(0,435,0), + Commands = [ + make_enqueue(E1,2,<<>>), %% 1 + make_enqueue(E1,3,<<>>), %% 2 + make_enqueue(E2,1,<<>>), %% 3 + make_enqueue(E2,2,<<>>), %% 4 + make_enqueue(E1,4,<<>>), %% 5 + make_enqueue(E1,5,<<>>), %% 6 + make_enqueue(E1,6,<<>>), %% 7 + make_enqueue(E1,7,<<>>), %% 8 + make_enqueue(E1,1,<<>>), %% 9 + make_checkout(C1, {auto,5,simple_prefetch}), %% 1 + make_enqueue(E1,8,<<>>), %% 2 + make_enqueue(E1,9,<<>>), %% 2 + make_enqueue(E1,10,<<>>), %% 2 + {down, C1Pid, noconnection} + ], + run_snapshot_test(#{name => ?FUNCTION_NAME, + release_cursor_interval => 0, + deliver_limit => undefined, + max_length => 8, + overflow_strategy => drop_head, + dead_letter_handler => {?MODULE, banana, []} + }, + Commands), + ok. + +scenario28(_Config) -> + E = c:pid(0,151,0), + Conf = #{dead_letter_handler => {rabbit_fifo_prop_SUITE,banana,[]}, + delivery_limit => undefined, + max_in_memory_bytes => undefined, + max_length => 1,name => ?FUNCTION_NAME,overflow_strategy => drop_head, + release_cursor_interval => 100,single_active_consumer_on => false}, + Commands = [ + make_enqueue(E,2, <<>>), + make_enqueue(E,3, <<>>), + make_enqueue(E,1, <<>>) + ], + ?assert(single_active_prop(Conf, Commands, false)), + ok. + +scenario27(_Config) -> + C1Pid = test_util:fake_pid(fakenode@fake), + % C2Pid = c:pid(0,281,0), + C1 = {<<>>, C1Pid}, + C2 = {<<>>, C1Pid}, + E = c:pid(0,151,0), + E2 = c:pid(0,152,0), + Commands = [ + make_enqueue(E,1,<<>>), + make_enqueue(E2,1,<<28,202>>), + make_enqueue(E,2,<<"Î2">>), + {down, E, noproc}, + make_enqueue(E2,2,<<"ê">>), + {nodeup,fakenode@fake}, + make_enqueue(E2,3,<<>>), + make_enqueue(E2,4,<<>>), + make_enqueue(E2,5,<<>>), + make_enqueue(E2,6,<<>>), + make_enqueue(E2,7,<<>>), + make_enqueue(E2,8,<<>>), + make_enqueue(E2,9,<<>>), + {purge}, + make_enqueue(E2,10,<<>>), + make_enqueue(E2,11,<<>>), + make_enqueue(E2,12,<<>>), + make_enqueue(E2,13,<<>>), + make_enqueue(E2,14,<<>>), + make_enqueue(E2,15,<<>>), + make_enqueue(E2,16,<<>>), + make_enqueue(E2,17,<<>>), + make_enqueue(E2,18,<<>>), + {nodeup,fakenode@fake}, + make_enqueue(E2,19,<<>>), + make_checkout(C1, {auto,77,simple_prefetch}), + make_enqueue(E2,20,<<>>), + make_enqueue(E2,21,<<>>), + make_enqueue(E2,22,<<>>), + make_enqueue(E2,23,<<"Ýý">>), + make_checkout(C2, {auto,66,simple_prefetch}), + {purge}, + make_enqueue(E2,24,<<>>) + ], + ?assert( + single_active_prop(#{name => ?FUNCTION_NAME, + max_bytes => undefined, + release_cursor_interval => 100, + deliver_limit => 1, + max_length => 1, + max_in_memory_length => 8, + max_in_memory_bytes => 691, + overflow_strategy => drop_head, + single_active_consumer_on => true, + dead_letter_handler => {?MODULE, banana, []} + }, Commands, false)), + ok. + +scenario30(_Config) -> + C1Pid = c:pid(0,242,0), + C1 = {<<>>, C1Pid}, + E = c:pid(0,240,0), + Commands = [ + make_enqueue(E,1,<<>>), %% 1 + make_enqueue(E,2,<<1>>), %% 2 + make_checkout(C1, {auto,1,simple_prefetch}), %% 3 + {down, C1Pid, noconnection}, %% 4 + make_enqueue(E,3,<<>>) %% 5 + ], + run_snapshot_test(#{name => ?FUNCTION_NAME, + release_cursor_interval => 0, + deliver_limit => undefined, + max_length => 1, + max_in_memory_length => 1, + overflow_strategy => drop_head, + dead_letter_handler => {?MODULE, banana, []}, + single_active_consumer_on => true + }, + Commands), + ok. + +scenario31(_Config) -> + C1Pid = c:pid(0,242,0), + C1 = {<<>>, C1Pid}, + E1 = c:pid(0,314,0), + E2 = c:pid(0,339,0), + Commands = [ + % [{1,{enqueue,<0.314.0>,1,<<>>}}, + % {2,{enqueue,<0.339.0>,2,<<>>}}, + % {3, + % {checkout,{<<>>,<10689.342.0>}, + % {auto,1,simple_prefetch}, + % #{ack => true,args => [],prefetch => 1,username => <<"user">>}}}, + % {4,{purge}}] + make_enqueue(E1,1,<<>>), %% 1 + make_enqueue(E2,2,<<1>>), %% 2 + make_checkout(C1, {auto,1,simple_prefetch}), %% 3 + {purge} %% 4 + ], + run_snapshot_test(#{name => ?FUNCTION_NAME, + release_cursor_interval => 0, + deliver_limit => undefined, + overflow_strategy => drop_head, + dead_letter_handler => {?MODULE, banana, []} + }, + Commands), + ok. + +scenario32(_Config) -> + E1 = c:pid(0,314,0), + Commands = [ + make_enqueue(E1,1,<<0>>), %% 1 + make_enqueue(E1,2,<<0,0>>), %% 2 + make_enqueue(E1,4,<<0,0,0,0>>), %% 3 + make_enqueue(E1,3,<<0,0,0>>) %% 4 + ], + run_snapshot_test(#{name => ?FUNCTION_NAME, + release_cursor_interval => 0, + max_length => 3, + deliver_limit => undefined, + overflow_strategy => drop_head, + dead_letter_handler => {?MODULE, banana, []} + }, + Commands), + ok. + +scenario29(_Config) -> + C1Pid = c:pid(0,242,0), + C1 = {<<>>, C1Pid}, + E = c:pid(0,240,0), + Commands = [ + make_enqueue(E,1,<<>>), %% 1 + make_enqueue(E,2,<<>>), %% 2 + make_checkout(C1, {auto,2,simple_prefetch}), %% 2 + make_enqueue(E,3,<<>>), %% 3 + make_enqueue(E,4,<<>>), %% 4 + make_enqueue(E,5,<<>>), %% 5 + make_enqueue(E,6,<<>>), %% 6 + make_enqueue(E,7,<<>>), %% 7 + {down, E, noconnection} %% 8 + ], + run_snapshot_test(#{name => ?FUNCTION_NAME, + release_cursor_interval => 0, + deliver_limit => undefined, + max_length => 5, + max_in_memory_length => 1, + overflow_strategy => drop_head, + dead_letter_handler => {?MODULE, banana, []}, + single_active_consumer_on => true + }, + Commands), + ok. +scenario23(_Config) -> + C1Pid = c:pid(0,242,0), + C1 = {<<>>, C1Pid}, + E = c:pid(0,240,0), + Commands = [ + make_enqueue(E,1,<<>>), %% 1 + make_checkout(C1, {auto,2,simple_prefetch}), %% 2 + make_enqueue(E,2,<<>>), %% 3 + make_enqueue(E,3,<<>>), %% 4 + {down, E, noconnection}, %% 5 + make_enqueue(E,4,<<>>) %% 6 + ], + run_snapshot_test(#{name => ?FUNCTION_NAME, + release_cursor_interval => 0, + deliver_limit => undefined, + max_length => 2, + overflow_strategy => drop_head, + dead_letter_handler => {?MODULE, banana, []} + }, + Commands), + ok. + single_active_01(_Config) -> C1Pid = test_util:fake_pid(rabbit@fake_node1), C1 = {<<0>>, C1Pid}, @@ -450,6 +744,27 @@ single_active_03(_Config) -> ?assert(single_active_prop(Conf, Commands, true)), ok. +single_active_04(_Config) -> + % C1Pid = test_util:fake_pid(node()), + % C1 = {<<0>>, C1Pid}, + % C2Pid = test_util:fake_pid(rabbit@fake_node2), + % C2 = {<<>>, C2Pid}, + % Pid = test_util:fake_pid(node()), + E = test_util:fake_pid(rabbit@fake_node2), + Commands = [ + + % make_checkout(C1, {auto,2,simple_prefetch}), + make_enqueue(E, 1, <<>>), + make_enqueue(E, 2, <<>>), + make_enqueue(E, 3, <<>>), + make_enqueue(E, 4, <<>>) + % {down, Pid, noconnection}, + % {nodeup, node()} + ], + Conf = config(?FUNCTION_NAME, 3, 587, true, 3, 7, undefined), + ?assert(single_active_prop(Conf, Commands, true)), + ok. + test_run_log(_Config) -> Fun = {-1, fun ({Prev, _}) -> {Prev + 1, Prev + 1} end}, run_proper( @@ -507,7 +822,7 @@ snapshots(_Config) -> end, [], 1000). single_active(_Config) -> - Size = 2000, + Size = 300, run_proper( fun () -> ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, InMemoryBytes}, @@ -520,18 +835,71 @@ single_active(_Config) -> }}]), begin Config = config(?FUNCTION_NAME, - Length, - Bytes, - true, - DeliveryLimit, - InMemoryLength, - InMemoryBytes), + Length, + Bytes, + true, + DeliveryLimit, + InMemoryLength, + InMemoryBytes), ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), collect({log_size, length(O)}, single_active_prop(Config, O, false))) end) end, [], Size). + +upgrade(_Config) -> + Size = 500, + run_proper( + fun () -> + ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, SingleActive}, + frequency([{5, {undefined, undefined, undefined, undefined, false}}, + {5, {oneof([range(1, 10), undefined]), + oneof([range(1, 1000), undefined]), + oneof([range(1, 3), undefined]), + oneof([range(1, 10), 0, undefined]), + oneof([true, false]) + }}]), + begin + Config = config(?FUNCTION_NAME, + Length, + Bytes, + SingleActive, + DeliveryLimit, + InMemoryLength, + undefined), + ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), + collect({log_size, length(O)}, + upgrade_prop(Config, O))) + end) + end, [], Size). + +messages_total(_Config) -> + Size = 1000, + run_proper( + fun () -> + ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, SingleActive}, + frequency([{5, {undefined, undefined, undefined, undefined, false}}, + {5, {oneof([range(1, 10), undefined]), + oneof([range(1, 1000), undefined]), + oneof([range(1, 3), undefined]), + oneof([range(1, 10), 0, undefined]), + oneof([true, false]) + }}]), + begin + Config = config(?FUNCTION_NAME, + Length, + Bytes, + SingleActive, + DeliveryLimit, + InMemoryLength, + undefined), + ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), + collect({log_size, length(O)}, + messages_total_prop(Config, O))) + end) + end, [], Size). + single_active_ordering(_Config) -> Size = 2000, Fun = {-1, fun ({Prev, _}) -> {Prev + 1, Prev + 1} end}, @@ -550,11 +918,6 @@ single_active_ordering(_Config) -> end, [], Size). single_active_ordering_01(_Config) -> -% [{enqueue,<0.145.0>,1,0}, -% {enqueue,<0.145.0>,1,1}, -% {checkout,{<<>>,<0.148.0>},{auto,1,simple_prefetch},#{ack => true,args => [],prefetch => 1,username => <<117,115,101,114>>}} -% {enqueue,<0.140.0>,1,2}, -% {settle,{<<>>,<0.148.0>},[0]}] C1Pid = test_util:fake_pid(node()), C1 = {<<0>>, C1Pid}, E = test_util:fake_pid(rabbit@fake_node2), @@ -733,7 +1096,6 @@ max_length_prop(Conf0, Commands) -> Entries = lists:zip(Indexes, Commands), Invariant = fun (#rabbit_fifo{cfg = #cfg{max_length = MaxLen}} = S) -> #{num_ready_messages := MsgReady} = rabbit_fifo:overview(S), - % ct:pal("msg Ready ~w ~w", [MsgReady, MaxLen]), MsgReady =< MaxLen end, try run_log(test_init(Conf), Entries, Invariant) of @@ -771,10 +1133,9 @@ single_active_prop(Conf0, Commands, ValidateOrder) -> end, Consumers), map_size(Up) =< 1 end, + try run_log(test_init(Conf), Entries, Invariant) of {_State, Effects} when ValidateOrder -> - % ct:pal("Effects: ~p~n", [Effects]), - % ct:pal("State: ~p~n", [State]), %% validate message ordering lists:foldl(fun ({send_msg, Pid, {delivery, Tag, Msgs}, ra_event}, Acc) -> @@ -792,6 +1153,76 @@ single_active_prop(Conf0, Commands, ValidateOrder) -> false end. +messages_total_prop(Conf0, Commands) -> + Conf = Conf0#{release_cursor_interval => 100}, + Indexes = lists:seq(1, length(Commands)), + Entries = lists:zip(Indexes, Commands), + InitState = test_init(Conf), + run_log(InitState, Entries, messages_total_invariant()), + true. + +messages_total_invariant() -> + fun(#rabbit_fifo{messages = M, + consumers = C, + enqueuers = E, + prefix_msgs = {PTot, _, RTot, _}, + returns = R} = S) -> + Base = lqueue:len(M) + lqueue:len(R) + PTot + RTot, + CTot = maps:fold(fun (_, #consumer{checked_out = Ch}, Acc) -> + Acc + map_size(Ch) + end, Base, C), + Tot = maps:fold(fun (_, #enqueuer{pending = P}, Acc) -> + Acc + length(P) + end, CTot, E), + QTot = rabbit_fifo:query_messages_total(S), + case Tot == QTot of + true -> true; + false -> + ct:pal("message invariant failed Expected ~b Got ~b", + [Tot, QTot]), + false + end + end. + +upgrade_prop(Conf0, Commands) -> + Conf = Conf0#{release_cursor_interval => 1}, + Indexes = lists:seq(1, length(Commands)), + Entries = lists:zip(Indexes, Commands), + InitState = test_init_v1(Conf), + [begin + {PreEntries, PostEntries} = lists:split(SplitPos, Entries), + %% run log v1 + V1 = lists:foldl( + fun ({Idx, E}, Acc0) -> + element(1, rabbit_fifo_v1:apply(meta(Idx), E, Acc0)) + end, InitState, PreEntries), + + %% perform conversion + V2 = element(1, rabbit_fifo:apply(meta(length(PreEntries) + 1), + {machine_version, 1, 2}, V1)), + %% assert invariants + Fields = [num_messages, + num_ready_messages, + smallest_raft_index, + num_enqueuers, + num_consumers, + enqueue_message_bytes, + checkout_message_bytes + ], + V1Overview = maps:with(Fields, rabbit_fifo_v1:overview(V1)), + V2Overview = maps:with(Fields, rabbit_fifo:overview(V2)), + case V1Overview == V2Overview of + true -> ok; + false -> + ct:pal("upgrade_prop failed expected~n~p~nGot:~n~p", + [V1Overview, V2Overview]), + ?assertEqual(V1Overview, V2Overview) + end, + %% check we can run the post entries from the converted state + run_log(V2, PostEntries) + end || SplitPos <- lists:seq(1, length(Entries))], + true. + %% single active consumer ordering invariant: %% only redelivered messages can go backwards validate_msg_order(_, [], S) -> @@ -821,7 +1252,7 @@ dump_generated(Conf, Commands) -> true. snapshots_prop(Conf, Commands) -> - try run_snapshot_test(Conf, Commands) of + try run_snapshot_test(Conf, Commands, messages_total_invariant()) of _ -> true catch Err -> @@ -1119,28 +1550,35 @@ run_proper(Fun, Args, NumTests) -> end}])). run_snapshot_test(Conf, Commands) -> + run_snapshot_test(Conf, Commands, fun (_) -> true end). + +run_snapshot_test(Conf, Commands, Invariant) -> %% create every incremental permutation of the commands lists %% and run the snapshot tests against that ct:pal("running snapshot test with ~b commands using config ~p", [length(Commands), Conf]), [begin - % ?debugFmt("~w running command to ~w~n", [?FUNCTION_NAME, lists:last(C)]), - run_snapshot_test0(Conf, C) + % ct:pal("~w running commands to ~w~n", [?FUNCTION_NAME, lists:last(C)]), + run_snapshot_test0(Conf, C, Invariant) end || C <- prefixes(Commands, 1, [])]. run_snapshot_test0(Conf, Commands) -> + run_snapshot_test0(Conf, Commands, fun (_) -> true end). + +run_snapshot_test0(Conf, Commands, Invariant) -> Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), - {State0, Effects} = run_log(test_init(Conf), Entries), + {State0, Effects} = run_log(test_init(Conf), Entries, Invariant), State = rabbit_fifo:normalize(State0), + Cursors = [ C || {release_cursor, _, _} = C <- Effects], [begin - % ct:pal("release_cursor: ~b~n", [SnapIdx]), %% drop all entries below and including the snapshot Filtered = lists:dropwhile(fun({X, _}) when X =< SnapIdx -> true; (_) -> false end, Entries), - {S0, _} = run_log(SnapState, Filtered), + % ct:pal("release_cursor: ~b from ~w~n", [SnapIdx, element(1, hd_or(Filtered))]), + {S0, _} = run_log(SnapState, Filtered, Invariant), S = rabbit_fifo:normalize(S0), % assert log can be restored from any release cursor index case S of @@ -1153,9 +1591,12 @@ run_snapshot_test0(Conf, Commands) -> ct:pal("Expected~n~p~nGot:~n~p", [State, S]), ?assertEqual(State, S) end - end || {release_cursor, SnapIdx, SnapState} <- Effects], + end || {release_cursor, SnapIdx, SnapState} <- Cursors], ok. +hd_or([H | _]) -> H; +hd_or(_) -> {undefined}. + %% transforms [1,2,3] into [[1,2,3], [1,2], [1]] prefixes(Source, N, Acc) when N > length(Source) -> lists:reverse(Acc); @@ -1195,6 +1636,12 @@ test_init(Conf) -> metrics_handler => {?MODULE, metrics_handler, []}}, rabbit_fifo:init(maps:merge(Default, Conf)). +test_init_v1(Conf) -> + Default = #{queue_resource => blah, + release_cursor_interval => 0, + metrics_handler => {?MODULE, metrics_handler, []}}, + rabbit_fifo_v1:init(maps:merge(Default, Conf)). + meta(Idx) -> #{index => Idx, term => 1, system_time => 0}. From aec42ecf0b50ea9768b99d7295f715e4163b4447 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 7 Oct 2021 18:46:55 +0200 Subject: [PATCH 05/97] Delete oqueue --- deps/rabbit/src/oqueue.erl | 178 ------------------------------ deps/rabbit/src/rabbit_fifo.erl | 7 -- deps/rabbit/test/oqueue_SUITE.erl | 158 -------------------------- 3 files changed, 343 deletions(-) delete mode 100644 deps/rabbit/src/oqueue.erl delete mode 100644 deps/rabbit/test/oqueue_SUITE.erl diff --git a/deps/rabbit/src/oqueue.erl b/deps/rabbit/src/oqueue.erl deleted file mode 100644 index 7d6f559b8f35..000000000000 --- a/deps/rabbit/src/oqueue.erl +++ /dev/null @@ -1,178 +0,0 @@ --module(oqueue). - --export([new/0, - %% O(1) when item is larger than largest item inserted - %% worst O(n) - in/2, - %% O(1) (amortised) - out/1, - %% fast when deleting in the order of insertion - %% worst O(n) - delete/2, - %% O(1) (amortised) - peek/1, - %% O(1) - len/1, - to_list/1, - from_list/1 - ]). - --record(oqueue, {length = 0 :: non_neg_integer(), - rear = [] :: list(), - rear_deletes = #{} :: map(), - front = [] :: list(), - last_front_item :: undefined | term()}). - --opaque oqueue() :: #oqueue{}. - --export_type([oqueue/0]). - --spec new() -> oqueue(). -new() -> #oqueue{}. - --spec in(term(), oqueue()) -> oqueue(). -in(Item, #oqueue{length = Len, - front = [_ | _] = Front, - last_front_item = LastFrontItem} = Q) - when Item < LastFrontItem -> - Q#oqueue{length = Len + 1, - front = enqueue_front(Item, Front)}; -in(Item, #oqueue{length = Len, - rear = Rear} = Q) -> - Q#oqueue{length = Len + 1, - rear = enqueue_rear(Item, Rear)}. - --spec out(oqueue()) -> - {empty | {value, term()}, oqueue()}. -out(#oqueue{length = Len, rear_deletes = Dels} = Q) - when Len - map_size(Dels) == 0 -> - {empty, Q}; -out(#oqueue{front = [Item], length = Len} = Q) -> - {{value, Item}, Q#oqueue{front = [], - last_front_item = undefined, - length = Len - 1}}; -out(#oqueue{front = [Item | Rem], length = Len} = Q) -> - {{value, Item}, Q#oqueue{front = Rem, - length = Len - 1}}; -out(#oqueue{front = []} = Q) -> - out(maybe_reverse(Q)). - --spec delete(term(), oqueue()) -> - oqueue() | {error, not_found}. -delete(Item, #oqueue{length = Len, - last_front_item = LFI, - front = [_ | _] = Front0, - rear = Rear0, - rear_deletes = Dels0} = Q) -> - %% TODO: check if item is out of range to avoid scan - case Item > LFI of - true when map_size(Dels0) == 31 -> - Rear = Rear0 -- maps:keys(Dels0#{Item => Item}), - %% TODO we don't know all were actually deleted - Q#oqueue{rear = Rear, - rear_deletes = #{}, - length = Len - 32}; - %% item is not in front, scan rear list - %% TODO: this will walk the rear list in the least effective order - %% assuming most deletes will be from the front - % case catch remove(Item, Rear0) of - % not_found -> - % {error, not_found}; - % Rear -> - % Q#oqueue{rear = Rear, - % length = Len - 1} - % end; - true -> - %% cache delete - Q#oqueue{rear_deletes = Dels0#{Item => Item}}; - false -> - case catch remove(Item, Front0) of - not_found -> - {error, not_found}; - [] -> - maybe_reverse(Q#oqueue{front = [], - last_front_item = undefined, - length = Len - 1}); - Front when LFI == Item -> - %% the last item of the front list was removed but we still have - %% items in the front list, inefficient to take last but this should - %% be a moderately rare case given the use case of the oqueue - Q#oqueue{front = Front, - last_front_item = lists:last(Front), - length = Len - 1}; - Front -> - Q#oqueue{front = Front, - length = Len - 1} - end - end; -delete(_Item, #oqueue{front = [], rear = []}) -> - {error, not_found}; -delete(Item, #oqueue{front = []} = Q) -> - delete(Item, maybe_reverse(Q)). - --spec peek(oqueue()) -> - empty | {value, term(), oqueue()}. -peek(#oqueue{front = [H | _]} = Q) -> - {value, H, Q}; -peek(#oqueue{rear = [_|_]} = Q) -> - %% the front is empty, reverse rear now - %% so that future peek ops are cheap - peek(maybe_reverse(Q)); -peek(_) -> - empty. - --spec len(oqueue()) -> non_neg_integer(). -len(#oqueue{rear_deletes = Dels, length = Len}) -> - Len - map_size(Dels). - --spec to_list(oqueue()) -> list(). -to_list(#oqueue{rear = Rear0, rear_deletes = Dels, front = Front}) -> - Rear = Rear0 -- maps:keys(Dels), - Front ++ lists:reverse(Rear). - --spec from_list(list()) -> oqueue(). -from_list(List) -> - lists:foldl(fun in/2, new(), List). - -%% internal - -remove(_Item, []) -> - throw(not_found); -remove(Item, [Item | Tail]) -> - Tail; -remove(Item, [H | Tail]) -> - [H | remove(Item, Tail)]. - -% remove_all(Items, []) -> -% throw({empty_list, Items}); -% remove_all([], Tail) -> -% Tail; -% remove_all([Item | RemItems], [Item | Tail]) -> -% %% how to record an item was deleted? -% remove_all(RemItems, Tail); -% remove_all([Item | Items], [H | Tail]) when Item < H -> -% [H | remove_all(Item, Tail)]. - -enqueue_rear(Item, [H | T]) when Item < H-> - [H | enqueue_rear(Item, T)]; -enqueue_rear(Item, List) -> - [Item | List]. - -enqueue_front(Item, [H | T]) when Item > H-> - [H | enqueue_front(Item, T)]; -enqueue_front(Item, List) -> - [Item | List]. - -maybe_reverse(#oqueue{front = [], - length = Len, - rear_deletes = Dels, - rear = [_|_] = Rear0} = Q) -> - Rear = Rear0 -- maps:keys(Dels), - Q#oqueue{front = lists:reverse(Rear), - rear_deletes = #{}, - length = Len - map_size(Dels), - rear = [], - last_front_item = hd(Rear)}; -maybe_reverse(Q) -> - Q. - diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index dce0e23a8215..5f64cec10f94 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -359,8 +359,6 @@ apply(#{index := Index}, #purge{}, messages = Messages, ra_indexes = Indexes0} = State0) -> Total = messages_ready(State0), - %% TODO: add an optimised version of oqueue:delete that takes a list - %% of items Indexes1 = lists:foldl(fun (?INDEX_MSG(I, _), Acc0) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc0); (_, Acc) -> @@ -648,8 +646,6 @@ convert_v1_to_v2(V1State) -> consumers = ConsumersV2, service_queue = rabbit_fifo_v1:get_field(service_queue, V1State), prefix_msgs = rabbit_fifo_v1:get_field(prefix_msgs, V1State), - %% this is wrong - % returns = oqueue:from_list(lqueue:to_list(ReturnsV1)) msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), waiting_consumers = rabbit_fifo_v1:get_field(waiting_consumers, V1State), @@ -968,8 +964,6 @@ query_processes(#?MODULE{enqueuers = Enqs, consumers = Cons0}) -> query_ra_indexes(#?MODULE{ra_indexes = RaIndexes}) -> RaIndexes. - % rabbit_fifo_index:append(Key, Arg2) - % oqueue:to_list(RaIndexes). query_consumer_count(#?MODULE{consumers = Consumers, waiting_consumers = WaitingConsumers}) -> @@ -2061,7 +2055,6 @@ normalize(#?MODULE{ra_indexes = _Indexes, messages = Messages, release_cursors = Cursors} = State) -> State#?MODULE{ - % ra_indexes = oqueue:from_list(oqueue:to_list(Indexes)), returns = lqueue:from_list(lqueue:to_list(Returns)), messages = lqueue:from_list(lqueue:to_list(Messages)), release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}. diff --git a/deps/rabbit/test/oqueue_SUITE.erl b/deps/rabbit/test/oqueue_SUITE.erl deleted file mode 100644 index 406dbf49c21c..000000000000 --- a/deps/rabbit/test/oqueue_SUITE.erl +++ /dev/null @@ -1,158 +0,0 @@ --module(oqueue_SUITE). - --compile(nowarn_export_all). --compile(export_all). - --export([ - ]). - --include_lib("proper/include/proper.hrl"). --include_lib("common_test/include/ct.hrl"). --include_lib("eunit/include/eunit.hrl"). - -%%%=================================================================== -%%% Common Test callbacks -%%%=================================================================== - -all() -> - [ - {group, tests} - ]. - - -all_tests() -> - [ - basics, - delete, - delete_front, - order - ]. - -groups() -> - [ - {tests, [], all_tests()} - ]. - -init_per_suite(Config) -> - Config. - -end_per_suite(_Config) -> - ok. - -init_per_group(_Group, Config) -> - Config. - -end_per_group(_Group, _Config) -> - ok. - -init_per_testcase(_TestCase, Config) -> - Config. - -end_per_testcase(_TestCase, _Config) -> - ok. - -%%%=================================================================== -%%% Test cases -%%%=================================================================== - -basics(_Confg) -> - Q0 = oqueue:new(), - ?assertMatch({empty, _}, oqueue:out(Q0)), - Q1 = oqueue:in(1, Q0), - ?assertEqual(1, oqueue:len(Q1)), - ?assertMatch({{value, 1}, _}, oqueue:out(Q1)), - Q2 = oqueue:in(0, Q1), - ?assertEqual(2, oqueue:len(Q2)), - {V2, Q3} = oqueue:out(Q2), - ?assertMatch({value, 0}, V2), - ?assertMatch({{value, 1}, _}, oqueue:out(Q3)), - Q4 = oqueue:in(0, Q3), - ?assertMatch({{value, 0}, _}, oqueue:out(Q4)), - ok. - - -delete(_Config) -> - Q0 = enq_list([1,2,3], oqueue:new()), - Q1 = oqueue:delete(2, Q0), - {error, not_found} = oqueue:delete(4, Q0), - ?assertEqual(2, oqueue:len(Q1)), - ?assertEqual([1,3], oqueue:to_list(Q1)), - ok. - -delete_front(_Config) -> - Q0 = enq_list([1,2,3,4], oqueue:new()), - %% this ensures there is a front - {_, Q1} = oqueue:out(Q0), - {error, not_found} = oqueue:delete(1, Q1), - Q2 = oqueue:delete(3, Q1), - ?assertEqual(2, oqueue:len(Q2)), - ?assertEqual([2,4], oqueue:to_list(Q2)), - - Q3 = oqueue:in(5, Q1), - Q4 = oqueue:delete(3, Q3), - ?assertEqual(3, oqueue:len(Q4)), - ?assertEqual([2,4, 5], oqueue:to_list(Q4)), - ok. - -order(_Config) -> - run_proper( - fun () -> - ?FORALL(Ops, list( - frequency([ - {5, non_neg_integer()}, - {1, deq}, - {2, {del, non_neg_integer()}} - ]) - ), - order_prop(Ops)) - end, [], 20000). - -order_prop(Ops0) -> - % ct:pal("Ops ~w", [Ops0]), - OutQ = enq_list(Ops0, oqueue:new()), - Expected = run_queue(Ops0, []), - OQList = oqueue:to_list(OutQ), - Expected == OQList andalso - oqueue:len(OutQ) == length(Expected). - -enq_list([], Q) -> - Q; -enq_list([deq | T], Q0) -> - {_, Q} = oqueue:out(Q0), - enq_list(T, Q); -enq_list([{del, I} | T], Q0) -> - case oqueue:delete(I, Q0) of - {error, not_found} -> - enq_list(T, Q0); - Q -> - enq_list(T, Q) - end; -enq_list([H | T], Q) -> - enq_list(T, oqueue:in(H, Q)). - -run_proper(Fun, Args, NumTests) -> - ?assertEqual( - true, - proper:counterexample( - erlang:apply(Fun, Args), - [{numtests, NumTests}, - {on_output, fun(".", _) -> ok; % don't print the '.'s on new lines - (F, A) -> ct:pal(?LOW_IMPORTANCE, F, A) - end}])). - -run_queue([], Q) -> - Q; -run_queue([deq | T], Q) -> - run_queue(T, drop_head(Q)); -run_queue([{del, I} | T], Q) -> - run_queue(T, lists:delete(I, Q)); -run_queue([I | T], Q) -> - run_queue(T, insert(I, Q)). - -drop_head([]) -> []; -drop_head([_ | T]) -> T. - -insert(Item, [H | T]) when Item > H-> - [H | insert(Item, T)]; -insert(Item, List) -> - [Item | List]. From e3ccefbf39403e03c732cf0e46ba9dc7d8099a9e Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 28 Oct 2021 16:26:11 +0200 Subject: [PATCH 06/97] Maintain order when dead-lettering rejected messages in quorum queues Before this commit, when a client consumes from a quorum queue and rejects many messages, the order in which the messages got dead-lettered is not the same as the order in which the messages got rejected. Classic queues already maintain the order. --- deps/rabbit/src/rabbit_fifo.erl | 45 +++++++++++-------- deps/rabbit/test/dead_lettering_SUITE.erl | 31 +++++++++++++ .../src/rabbit_ct_client_helpers.erl | 4 +- 3 files changed, 60 insertions(+), 20 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 5f64cec10f94..cb2fe7fd7819 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -209,8 +209,17 @@ apply(Meta, apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, #?MODULE{consumers = Cons0} = State0) -> case Cons0 of - #{ConsumerId := Con0} -> - Discarded = maps:with(MsgIds, Con0#consumer.checked_out), + #{ConsumerId := #consumer{checked_out = Checked} = Con0} -> + % Discarded maintains same order as MsgIds (so that publishing to + % dead-letter exchange will be in same order as messages got rejected) + Discarded = lists:filtermap(fun(Id) -> + case maps:find(Id, Checked) of + {ok, Msg} -> + {true, Msg}; + error -> + false + end + end, MsgIds), Effects = dead_letter_effects(rejected, Discarded, State0, []), complete_and_checkout(Meta, MsgIds, ConsumerId, Con0, Effects, State0); @@ -1289,7 +1298,7 @@ drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) -> _ -> subtract_in_memory_counts(Header, State2) end, - Effects = dead_letter_effects(maxlen, #{none => FullMsg}, + Effects = dead_letter_effects(maxlen, [FullMsg], State, Effects0), {State#?MODULE{ra_indexes = Indexes}, Effects}; empty -> @@ -1468,24 +1477,24 @@ dead_letter_effects(_Reason, _Discarded, dead_letter_effects(Reason, Discarded, #?MODULE{cfg = #cfg{dead_letter_handler = {Mod, Fun, Args}}}, Effects) -> - RaftIdxs = maps:fold( - fun (_, ?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header)), Acc) -> - [RaftIdx | Acc]; - (_, _, Acc) -> - Acc - end, [], Discarded), + RaftIdxs = lists:filtermap( + fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> + {true, RaftIdx}; + (_) -> + false + end, Discarded), [{log, RaftIdxs, fun (Log) -> Lookup = maps:from_list(lists:zip(RaftIdxs, Log)), - DeadLetters = maps:fold( - fun (_, ?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header)), Acc) -> + DeadLetters = lists:filtermap( + fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), - [{Reason, Msg} | Acc]; - (_, ?INDEX_MSG(_, ?MSG(_Header, Msg)), Acc) -> - [{Reason, Msg} | Acc]; - (_, _, Acc) -> - Acc - end, [], Discarded), + {true, {Reason, Msg}}; + (?INDEX_MSG(_, ?MSG(_Header, Msg))) -> + {true, {Reason, Msg}}; + (_) -> + false + end, Discarded), [{mod_call, Mod, Fun, Args ++ [DeadLetters]}] end} | Effects]. @@ -1591,7 +1600,7 @@ return_one(Meta, MsgId, Msg0, case get_header(delivery_count, Header) of DeliveryCount when DeliveryCount > DeliveryLimit -> %% TODO: don't do for prefix msgs - Effects = dead_letter_effects(delivery_limit, #{none => Msg}, + Effects = dead_letter_effects(delivery_limit, [Msg], State0, Effects0), complete(Meta, ConsumerId, [MsgId], Con0, Effects, State0); _ -> diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl index 189080d83637..4c7e7968f9cb 100644 --- a/deps/rabbit/test/dead_lettering_SUITE.erl +++ b/deps/rabbit/test/dead_lettering_SUITE.erl @@ -28,6 +28,7 @@ groups() -> dead_letter_nack_requeue, dead_letter_nack_requeue_multiple, dead_letter_reject, + dead_letter_reject_many, dead_letter_reject_requeue, dead_letter_max_length_drop_head, dead_letter_missing_exchange, @@ -315,6 +316,36 @@ dead_letter_reject(Config) -> _ = consume(Ch, QName, [P2, P3]), consume_empty(Ch, QName). +%% 1) Many messages are rejected. They get dead-lettered in correct order. +dead_letter_reject_many(Config) -> + {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + QName = ?config(queue_name, Config), + DLXQName = ?config(queue_name_dlx, Config), + declare_dead_letter_queues(Ch, Config, QName, DLXQName), + + %% Publish 100 messages + Payloads = lists:map(fun erlang:integer_to_binary/1, lists:seq(1, 100)), + publish(Ch, QName, Payloads), + wait_for_messages(Config, [[QName, <<"100">>, <<"100">>, <<"0">>]]), + + %% Reject all messages using same consumer + amqp_channel:subscribe(Ch, #'basic.consume'{queue = QName}, self()), + CTag = receive #'basic.consume_ok'{consumer_tag = C} -> C end, + [begin + receive {#'basic.deliver'{consumer_tag = CTag, delivery_tag = DTag}, #amqp_msg{payload = P}} -> + amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag, requeue = false}) + after 5000 -> + amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}), + exit(timeout) + end + end || P <- Payloads], + amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}), + + %% Consume all messages from dead letter queue in correct order (i.e. from payload <<1>> to <<100>>) + wait_for_messages(Config, [[DLXQName, <<"100">>, <<"100">>, <<"0">>]]), + _ = consume(Ch, DLXQName, Payloads), + consume_empty(Ch, DLXQName). + %% 1) Message is rejected with basic.reject, requeue=true. Dead-lettering does not take place. dead_letter_reject_requeue(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), diff --git a/deps/rabbitmq_ct_client_helpers/src/rabbit_ct_client_helpers.erl b/deps/rabbitmq_ct_client_helpers/src/rabbit_ct_client_helpers.erl index d08433718926..57a0f769bf0a 100644 --- a/deps/rabbitmq_ct_client_helpers/src/rabbit_ct_client_helpers.erl +++ b/deps/rabbitmq_ct_client_helpers/src/rabbit_ct_client_helpers.erl @@ -266,7 +266,7 @@ consume(Ch, QName, Count) -> self()), CTag = receive #'basic.consume_ok'{consumer_tag = C} -> C end, [begin - Exp = list_to_binary(integer_to_list(I)), + Exp = integer_to_binary(I), receive {#'basic.deliver'{consumer_tag = CTag}, #amqp_msg{payload = Exp}} -> ok @@ -287,7 +287,7 @@ accumulate_without_acknowledging(Ch, CTag, Remaining, Acc) when Remaining =:= 0 amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}), lists:reverse(Acc); accumulate_without_acknowledging(Ch, CTag, Remaining, Acc) -> - receive {#'basic.deliver'{consumer_tag = CTag, delivery_tag = DTag}, _MSg} -> + receive {#'basic.deliver'{consumer_tag = CTag, delivery_tag = DTag}, _Msg} -> accumulate_without_acknowledging(Ch, CTag, Remaining - 1, [DTag | Acc]) after 5000 -> amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}), From 1c17773c9181e09e5495f061674e01e221f679e9 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Fri, 8 Oct 2021 17:02:19 +0200 Subject: [PATCH 07/97] Add at-least once dead-lettering for quorum queues and message TTL --- deps/rabbit/src/rabbit_amqqueue.erl | 25 +- deps/rabbit/src/rabbit_basic.erl | 3 +- deps/rabbit/src/rabbit_classic_queue.erl | 6 +- deps/rabbit/src/rabbit_dead_letter.erl | 6 +- deps/rabbit/src/rabbit_fifo.erl | 616 +++++++++++---- deps/rabbit/src/rabbit_fifo.hrl | 42 +- deps/rabbit/src/rabbit_fifo_client.erl | 2 +- deps/rabbit/src/rabbit_fifo_dlx.erl | 324 ++++++++ deps/rabbit/src/rabbit_fifo_dlx.hrl | 30 + deps/rabbit/src/rabbit_fifo_dlx_client.erl | 93 +++ deps/rabbit/src/rabbit_fifo_dlx_sup.erl | 37 + deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 571 ++++++++++++++ deps/rabbit/src/rabbit_fifo_v1.erl | 2 + deps/rabbit/src/rabbit_policies.erl | 8 + deps/rabbit/src/rabbit_quorum_queue.erl | 158 ++-- deps/rabbit/src/rabbit_stream_queue.erl | 4 +- deps/rabbit/test/dead_lettering_SUITE.erl | 8 +- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 735 +++++++++++++----- deps/rabbit_common/include/rabbit.hrl | 2 +- .../rabbitmq_management/priv/www/js/global.js | 10 +- .../priv/www/js/tmpl/policies.ejs | 13 +- .../priv/www/js/tmpl/queue.ejs | 11 + .../priv/www/js/tmpl/queues.ejs | 11 +- 23 files changed, 2276 insertions(+), 441 deletions(-) create mode 100644 deps/rabbit/src/rabbit_fifo_dlx.erl create mode 100644 deps/rabbit/src/rabbit_fifo_dlx.hrl create mode 100644 deps/rabbit/src/rabbit_fifo_dlx_client.erl create mode 100644 deps/rabbit/src/rabbit_fifo_dlx_sup.erl create mode 100644 deps/rabbit/src/rabbit_fifo_dlx_worker.erl diff --git a/deps/rabbit/src/rabbit_amqqueue.erl b/deps/rabbit/src/rabbit_amqqueue.erl index 05dbc7137891..ec73770faaa7 100644 --- a/deps/rabbit/src/rabbit_amqqueue.erl +++ b/deps/rabbit/src/rabbit_amqqueue.erl @@ -778,6 +778,7 @@ declare_args() -> {<<"x-message-ttl">>, fun check_message_ttl_arg/2}, {<<"x-dead-letter-exchange">>, fun check_dlxname_arg/2}, {<<"x-dead-letter-routing-key">>, fun check_dlxrk_arg/2}, + {<<"x-dead-letter-strategy">>, fun check_dlxstrategy_arg/2}, {<<"x-max-length">>, fun check_non_neg_int_arg/2}, {<<"x-max-length-bytes">>, fun check_non_neg_int_arg/2}, {<<"x-max-in-memory-length">>, fun check_non_neg_int_arg/2}, @@ -946,6 +947,22 @@ check_dlxrk_arg(Val, Args) when is_binary(Val) -> check_dlxrk_arg(_Val, _Args) -> {error, {unacceptable_type, "expected a string"}}. +-define(KNOWN_DLX_STRATEGIES, [<<"at-most-once">>, <<"at-least-once">>]). +check_dlxstrategy_arg({longstr, Val}, _Args) -> + case lists:member(Val, ?KNOWN_DLX_STRATEGIES) of + true -> ok; + false -> {error, invalid_dlx_strategy} + end; +check_dlxstrategy_arg({Type, _}, _Args) -> + {error, {unacceptable_type, Type}}; +check_dlxstrategy_arg(Val, _Args) when is_binary(Val) -> + case lists:member(Val, ?KNOWN_DLX_STRATEGIES) of + true -> ok; + false -> {error, invalid_dlx_strategy} + end; +check_dlxstrategy_arg(_Val, _Args) -> + {error, invalid_dlx_strategy}. + -define(KNOWN_OVERFLOW_MODES, [<<"drop-head">>, <<"reject-publish">>, <<"reject-publish-dlx">>]). check_overflow({longstr, Val}, _Args) -> case lists:member(Val, ?KNOWN_OVERFLOW_MODES) of @@ -1657,8 +1674,8 @@ credit(Q, CTag, Credit, Drain, QStates) -> {'ok', non_neg_integer(), qmsg(), rabbit_queue_type:state()} | {'empty', rabbit_queue_type:state()} | {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}. -basic_get(Q, NoAck, LimiterPid, CTag, QStates0) -> - rabbit_queue_type:dequeue(Q, NoAck, LimiterPid, CTag, QStates0). +basic_get(Q, NoAck, LimiterPid, CTag, QStates) -> + rabbit_queue_type:dequeue(Q, NoAck, LimiterPid, CTag, QStates). -spec basic_consume(amqqueue:amqqueue(), boolean(), pid(), pid(), boolean(), @@ -1670,7 +1687,7 @@ basic_get(Q, NoAck, LimiterPid, CTag, QStates0) -> {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}. basic_consume(Q, NoAck, ChPid, LimiterPid, LimiterActive, ConsumerPrefetchCount, ConsumerTag, - ExclusiveConsume, Args, OkMsg, ActingUser, Contexts) -> + ExclusiveConsume, Args, OkMsg, ActingUser, QStates) -> QName = amqqueue:get_name(Q), %% first phase argument validation @@ -1686,7 +1703,7 @@ basic_consume(Q, NoAck, ChPid, LimiterPid, args => Args, ok_msg => OkMsg, acting_user => ActingUser}, - rabbit_queue_type:consume(Q, Spec, Contexts). + rabbit_queue_type:consume(Q, Spec, QStates). -spec basic_cancel(amqqueue:amqqueue(), rabbit_types:ctag(), any(), rabbit_types:username(), diff --git a/deps/rabbit/src/rabbit_basic.erl b/deps/rabbit/src/rabbit_basic.erl index cc7c00047e63..b42e832f71eb 100644 --- a/deps/rabbit/src/rabbit_basic.erl +++ b/deps/rabbit/src/rabbit_basic.erl @@ -12,7 +12,8 @@ -export([publish/4, publish/5, publish/1, message/3, message/4, properties/1, prepend_table_header/3, extract_headers/1, extract_timestamp/1, map_headers/2, delivery/4, - header_routes/1, parse_expiration/1, header/2, header/3]). + header_routes/1, parse_expiration/1, header/2, header/3, + is_message_persistent/1]). -export([build_content/2, from_content/1, msg_size/1, maybe_gc_large_msg/1, maybe_gc_large_msg/2]). -export([add_header/4, diff --git a/deps/rabbit/src/rabbit_classic_queue.erl b/deps/rabbit/src/rabbit_classic_queue.erl index 20edb7872d4b..f4c52d44d6b4 100644 --- a/deps/rabbit/src/rabbit_classic_queue.erl +++ b/deps/rabbit/src/rabbit_classic_queue.erl @@ -445,8 +445,10 @@ recover_durable_queues(QueuesAndRecoveryTerms) -> capabilities() -> #{unsupported_policies => [ %% Stream policies - <<"max-age">>, <<"stream-max-segment-size-bytes">>, - <<"queue-leader-locator">>, <<"initial-cluster-size">>], + <<"max-age">>, <<"stream-max-segment-size-bytes">>, + <<"queue-leader-locator">>, <<"initial-cluster-size">>, + %% Quorum policies + <<"dead-letter-strategy">>], queue_arguments => [<<"x-expires">>, <<"x-message-ttl">>, <<"x-dead-letter-exchange">>, <<"x-dead-letter-routing-key">>, <<"x-max-length">>, <<"x-max-length-bytes">>, <<"x-max-in-memory-length">>, diff --git a/deps/rabbit/src/rabbit_dead_letter.erl b/deps/rabbit/src/rabbit_dead_letter.erl index f13b409dce85..c3865d31b696 100644 --- a/deps/rabbit/src/rabbit_dead_letter.erl +++ b/deps/rabbit/src/rabbit_dead_letter.erl @@ -7,7 +7,9 @@ -module(rabbit_dead_letter). --export([publish/5]). +-export([publish/5, + make_msg/5, + detect_cycles/3]). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl"). @@ -39,7 +41,7 @@ make_msg(Msg = #basic_message{content = Content, undefined -> {RoutingKeys, fun (H) -> H end}; _ -> {[RK], fun (H) -> lists:keydelete(<<"CC">>, 1, H) end} end, - ReasonBin = list_to_binary(atom_to_list(Reason)), + ReasonBin = atom_to_binary(Reason), TimeSec = os:system_time(seconds), PerMsgTTL = per_msg_ttl_header(Content#content.properties), HeadersFun2 = diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index cb2fe7fd7819..827d62dccdb0 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -20,11 +20,13 @@ -include_lib("rabbit_common/include/rabbit.hrl"). -export([ + %% ra_machine callbacks init/1, apply/3, state_enter/2, tick/2, overview/1, + get_checked_out/4, %% versioning version/0, @@ -41,6 +43,7 @@ query_consumer_count/1, query_consumers/1, query_stat/1, + query_stat_dlx/1, query_single_active_consumer/1, query_in_memory_usage/1, query_peek/2, @@ -51,7 +54,10 @@ %% misc dehydrate_state/1, + dehydrate_message/1, normalize/1, + get_msg_header/1, + get_header/2, %% protocol helpers make_enqueue/3, @@ -103,7 +109,7 @@ #update_config{} | #garbage_collection{}. --type command() :: protocol() | ra_machine:builtin_command(). +-type command() :: protocol() | rabbit_fifo_dlx:protocol() | ra_machine:builtin_command(). %% all the command types supported by ra fifo -type client_msg() :: delivery(). @@ -126,6 +132,8 @@ state/0, config/0]). +%% This function is never called since only rabbit_fifo_v0:init/1 is called. +%% See https://github.com/rabbitmq/ra/blob/e0d1e6315a45f5d3c19875d66f9d7bfaf83a46e3/src/ra_machine.erl#L258-L265 -spec init(config()) -> state(). init(#{name := Name, queue_resource := Resource} = Conf) -> @@ -143,6 +151,7 @@ update_config(Conf, State) -> MaxMemoryBytes = maps:get(max_in_memory_bytes, Conf, undefined), DeliveryLimit = maps:get(delivery_limit, Conf, undefined), Expires = maps:get(expires, Conf, undefined), + MsgTTL = maps:get(msg_ttl, Conf, undefined), ConsumerStrategy = case maps:get(single_active_consumer_on, Conf, false) of true -> single_active; @@ -153,6 +162,7 @@ update_config(Conf, State) -> RCISpec = {RCI, RCI}, LastActive = maps:get(created, Conf, undefined), + MaxMemoryBytes = maps:get(max_in_memory_bytes, Conf, undefined), State#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCISpec, dead_letter_handler = DLH, become_leader_handler = BLH, @@ -163,8 +173,9 @@ update_config(Conf, State) -> max_in_memory_bytes = MaxMemoryBytes, consumer_strategy = ConsumerStrategy, delivery_limit = DeliveryLimit, - expires = Expires}, - last_active = LastActive}. + expires = Expires, + msg_ttl = MsgTTL}, + last_active = LastActive}. zero(_) -> 0. @@ -201,30 +212,46 @@ apply(Meta, case Cons0 of #{ConsumerId := Con0} -> complete_and_checkout(Meta, MsgIds, ConsumerId, - Con0, [], State); + Con0, [], State, true); _ -> {State, ok} end; apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons0} = State0) -> - case Cons0 of - #{ConsumerId := #consumer{checked_out = Checked} = Con0} -> - % Discarded maintains same order as MsgIds (so that publishing to - % dead-letter exchange will be in same order as messages got rejected) - Discarded = lists:filtermap(fun(Id) -> - case maps:find(Id, Checked) of - {ok, Msg} -> - {true, Msg}; - error -> - false - end - end, MsgIds), - Effects = dead_letter_effects(rejected, Discarded, State0, []), - complete_and_checkout(Meta, MsgIds, ConsumerId, Con0, - Effects, State0); + #?MODULE{consumers = Cons, + dlx = DlxState0, + cfg = #cfg{dead_letter_handler = DLH}} = State) -> + case Cons of + #{ConsumerId := #consumer{checked_out = Checked} = Con} -> + case DLH of + at_least_once -> + DlxState = lists:foldl(fun(MsgId, S) -> + case maps:find(MsgId, Checked) of + {ok, Msg} -> + rabbit_fifo_dlx:discard(Msg, rejected, S); + error -> + S + end + end, DlxState0, MsgIds), + complete_and_checkout(Meta, MsgIds, ConsumerId, Con, + [], State#?MODULE{dlx = DlxState}, false); + _ -> + % Discarded maintains same order as MsgIds (so that publishing to + % dead-letter exchange will be in same order as messages got rejected) + Discarded = lists:filtermap(fun(Id) -> + case maps:find(Id, Checked) of + {ok, Msg} -> + {true, Msg}; + error -> + false + end + end, MsgIds), + Effects = dead_letter_effects(rejected, Discarded, State, []), + complete_and_checkout(Meta, MsgIds, ConsumerId, Con, + Effects, State, true) + end; _ -> - {State0, ok} + {State, ok} end; apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId}, #?MODULE{consumers = Cons0} = State) -> @@ -319,17 +346,17 @@ apply(#{index := Index, State1 = update_consumer(ConsumerId, ConsumerMeta, {once, 1, simple_prefetch}, 0, State0), - {success, _, MsgId, Msg, State2} = checkout_one(Meta, State1), + {success, _, MsgId, Msg, State2, Effects0} = checkout_one(Meta, State1, []), {State4, Effects1} = case Settlement of unsettled -> {_, Pid} = ConsumerId, - {State2, [{monitor, process, Pid}]}; + {State2, [{monitor, process, Pid} | Effects0]}; settled -> %% immediately settle the checkout - {State3, _, Effects0} = + {State3, _, SettleEffects} = apply(Meta, make_settle(ConsumerId, [MsgId]), State2), - {State3, Effects0} + {State3, SettleEffects ++ Effects0} end, {Reply, Effects2} = case Msg of @@ -366,34 +393,45 @@ apply(#{index := Index}, #purge{}, #?MODULE{messages_total = Tot, returns = Returns, messages = Messages, - ra_indexes = Indexes0} = State0) -> - Total = messages_ready(State0), - Indexes1 = lists:foldl(fun (?INDEX_MSG(I, _), Acc0) when is_integer(I) -> + ra_indexes = Indexes0, + dlx = DlxState0} = State0) -> + NumReady = messages_ready(State0), + Indexes1 = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_, _)), Acc0) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc0); (_, Acc) -> Acc end, Indexes0, lqueue:to_list(Returns)), - Indexes = lists:foldl(fun (?INDEX_MSG(I, _), Acc0) when is_integer(I) -> + Indexes2 = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_, _)), Acc0) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc0); (_, Acc) -> Acc end, Indexes1, lqueue:to_list(Messages)), + {DlxState, DiscardMsgs} = rabbit_fifo_dlx:purge(DlxState0), + Indexes = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_, _)), Acc0) when is_integer(I) -> + rabbit_fifo_index:delete(I, Acc0); + (_, Acc) -> + Acc + end, Indexes2, DiscardMsgs), + NumPurged = NumReady + length(DiscardMsgs), State1 = State0#?MODULE{ra_indexes = Indexes, messages = lqueue:new(), - messages_total = Tot - Total, + messages_total = Tot - NumPurged, returns = lqueue:new(), + dlx = DlxState, msg_bytes_enqueue = 0, prefix_msgs = {0, [], 0, []}, msg_bytes_in_memory = 0, msgs_ready_in_memory = 0}, Effects0 = [garbage_collection], - Reply = {purge, Total}, + Reply = {purge, NumPurged}, {State, _, Effects} = evaluate_limit(Index, false, State0, State1, Effects0), update_smallest_raft_index(Index, Reply, State, Effects); apply(#{index := Idx}, #garbage_collection{}, State) -> update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); +apply(Meta, {timeout, expire_msgs}, State) -> + checkout(Meta, State, State, [], false); apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, #?MODULE{consumers = Cons0, cfg = #cfg{consumer_strategy = single_active}, @@ -531,12 +569,73 @@ apply(#{index := Idx} = Meta, #purge_nodes{nodes = Nodes}, State0) -> purge_node(Meta, Node, S, E) end, {State0, []}, Nodes), update_smallest_raft_index(Idx, ok, State, Effects); -apply(#{index := Idx} = Meta, #update_config{config = Conf}, State0) -> - {State, Reply, Effects} = checkout(Meta, State0, update_config(Conf, State0), []), +apply(#{index := Idx} = Meta, #update_config{config = Conf}, + #?MODULE{cfg = #cfg{dead_letter_handler = Old_DLH}} = State0) -> + #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState, + ra_indexes = Indexes0, + messages_total = Tot} = State1 = update_config(Conf, State0), + %%TODO return aux effect here and move logic over to handle_aux/6 which can return effects as last arguments. + {State4, Effects1} = case DLH of + at_least_once -> + case rabbit_fifo_dlx:consumer_pid(DlxState) of + undefined -> + %% Policy changed from at-most-once to at-least-once. + %% Therefore, start rabbit_fifo_dlx_worker on leader. + {State1, [{aux, start_dlx_worker}]}; + DlxWorkerPid -> + %% Leader already exists. + %% Notify leader of new policy. + Effect = {send_msg, DlxWorkerPid, lookup_topology, ra_event}, + {State1, [Effect]} + end; + _ when Old_DLH =:= at_least_once -> + %% Cleanup any remaining messages stored by rabbit_fifo_dlx + %% by either dropping or at-most-once dead-lettering. + ReasonMsgs = rabbit_fifo_dlx:cleanup(DlxState), + Len = length(ReasonMsgs), + rabbit_log:debug("Cleaning up ~b dead-lettered messages " + "since dead_letter_handler changed from ~s to ~p", + [Len, Old_DLH, DLH]), + Effects0 = dead_letter_effects(undefined, ReasonMsgs, State1, []), + {_, Msgs} = lists:unzip(ReasonMsgs), + Indexes = delete_indexes(Msgs, Indexes0), + State2 = subtract_in_memory(Msgs, State1), + State3 = State2#?MODULE{dlx = rabbit_fifo_dlx:init(), + ra_indexes = Indexes, + messages_total = Tot - Len}, + {State3, Effects0}; + _ -> + {State1, []} + end, + {State, Reply, Effects} = checkout(Meta, State0, State4, Effects1), update_smallest_raft_index(Idx, Reply, State, Effects); apply(_Meta, {machine_version, FromVersion, ToVersion}, V0State) -> State = convert(FromVersion, ToVersion, V0State), - {State, ok, []}; + {State, ok, [{aux, start_dlx_worker}]}; +%%TODO are there better approach to +%% 1. matching against opaque rabbit_fifo_dlx:protocol / record (without exposing all the protocol details), and +%% 2. Separate the logic running in rabbit_fifo and rabbit_fifo_dlx when dead-letter messages is acked? +apply(#{index := IncomingRaftIdx} = Meta, {dlx, Cmd}, + #?MODULE{dlx = DlxState0, + messages_total = Total0, + ra_indexes = Indexes0} = State0) when element(1, Cmd) =:= settle -> + {DlxState, AckedMsgs} = rabbit_fifo_dlx:apply(Cmd, DlxState0), + Indexes = delete_indexes(AckedMsgs, Indexes0), + Total = Total0 - length(AckedMsgs), + State1 = subtract_in_memory(AckedMsgs, State0), + State2 = State1#?MODULE{dlx = DlxState, + messages_total = Total, + ra_indexes = Indexes}, + {State, ok, Effects} = checkout(Meta, State0, State2, [], false), + update_smallest_raft_index(IncomingRaftIdx, State, Effects); +apply(Meta, {dlx, Cmd}, + #?MODULE{dlx = DlxState0} = State0) -> + {DlxState, ok} = rabbit_fifo_dlx:apply(Cmd, DlxState0), + State1 = State0#?MODULE{dlx = DlxState}, + %% Run a checkout so that a new DLX consumer will be delivered discarded messages + %% directly after it subscribes. + checkout(Meta, State0, State1, [], false); apply(_Meta, Cmd, State) -> %% handle unhandled commands gracefully rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), @@ -627,11 +726,31 @@ convert_v1_to_v2(V1State) -> end, Ch)} end, ConsumersV1), + %% The (old) format of dead_letter_handler in RMQ < v3.10 is: + %% {Module, Function, Args} + %% The (new) format of dead_letter_handler in RMQ >= v3.10 is: + %% undefined | {at_most_once, {Module, Function, Args}} | at_least_once + %% + %% Note that the conversion must convert both from old format to new format + %% as well as from new format to new format. The latter is because quorum queues + %% created in RMQ >= v3.10 are still initialised with rabbit_fifo_v0 as described in + %% https://github.com/rabbitmq/ra/blob/e0d1e6315a45f5d3c19875d66f9d7bfaf83a46e3/src/ra_machine.erl#L258-L265 + DLH = case rabbit_fifo_v1:get_cfg_field(dead_letter_handler, V1State) of + {_M, _F, _A = [_DLX = undefined|_]} -> + %% queue was declared in RMQ < v3.10 and no DLX configured + undefined; + {_M, _F, _A} = MFA -> + %% queue was declared in RMQ < v3.10 and DLX configured + {at_most_once, MFA}; + Other -> + Other + end, + %% Then add all pending messages back into the index Cfg = #cfg{name = rabbit_fifo_v1:get_cfg_field(name, V1State), resource = rabbit_fifo_v1:get_cfg_field(resource, V1State), release_cursor_interval = rabbit_fifo_v1:get_cfg_field(release_cursor_interval, V1State), - dead_letter_handler = rabbit_fifo_v1:get_cfg_field(dead_letter_handler, V1State), + dead_letter_handler = DLH, become_leader_handler = rabbit_fifo_v1:get_cfg_field(become_leader_handler, V1State), %% TODO: what if policy enabling reject_publish was applied before conversion? overflow_strategy = rabbit_fifo_v1:get_cfg_field(overflow_strategy, V1State), @@ -670,14 +789,20 @@ purge_node(Meta, Node, State, Effects) -> end, {State, Effects}, all_pids_for(Node, State)). %% any downs that re not noconnection -handle_down(Meta, Pid, #?MODULE{consumers = Cons0, - enqueuers = Enqs0} = State0) -> +handle_down(#{system_time := DownTs} = Meta, Pid, #?MODULE{consumers = Cons0, + enqueuers = Enqs0} = State0) -> % Remove any enqueuer for the same pid and enqueue any pending messages % This should be ok as we won't see any more enqueues from this pid State1 = case maps:take(Pid, Enqs0) of {#enqueuer{pending = Pend}, Enqs} -> - lists:foldl(fun ({_, RIdx, RawMsg}, S) -> - enqueue(RIdx, RawMsg, S) + lists:foldl(fun ({_, RIdx, Ts, RawMsg}, S) -> + enqueue(RIdx, Ts, RawMsg, S); + ({_, RIdx, RawMsg}, S) -> + %% This is an edge case: It is an out-of-order delivery + %% from machine version 1. + %% If message TTL is configured, expiration will be delayed + %% for the time the message has been pending. + enqueue(RIdx, DownTs, RawMsg, S) end, State0#?MODULE{enqueuers = Enqs}, Pend); error -> State0 @@ -738,7 +863,16 @@ update_waiting_consumer_status(Node, Consumer#consumer.status =/= cancelled]. -spec state_enter(ra_server:ra_state(), state()) -> ra_machine:effects(). -state_enter(leader, #?MODULE{consumers = Cons, +state_enter(RaState, #?MODULE{cfg = #cfg{dead_letter_handler = at_least_once, + resource = QRef, + name = QName}, + dlx = DlxState} = State) -> + rabbit_fifo_dlx:state_enter(RaState, QRef, QName, DlxState), + state_enter0(RaState, State); +state_enter(RaState, State) -> + state_enter0(RaState, State). + +state_enter0(leader, #?MODULE{consumers = Cons, enqueuers = Enqs, waiting_consumers = WaitingConsumers, cfg = #cfg{name = Name, @@ -753,6 +887,7 @@ state_enter(leader, #?MODULE{consumers = Cons, Mons = [{monitor, process, P} || P <- Pids], Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids], NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]), + %% TODO reissue timer effect if head of message queue has expiry header set FHReservation = [{mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}], Effects = Mons ++ Nots ++ NodeMons ++ FHReservation, case BLH of @@ -761,7 +896,7 @@ state_enter(leader, #?MODULE{consumers = Cons, {Mod, Fun, Args} -> [{mod_call, Mod, Fun, Args ++ [Name]} | Effects] end; -state_enter(eol, #?MODULE{enqueuers = Enqs, +state_enter0(eol, #?MODULE{enqueuers = Enqs, consumers = Custs0, waiting_consumers = WaitingConsumers0}) -> Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0), @@ -772,30 +907,32 @@ state_enter(eol, #?MODULE{enqueuers = Enqs, || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++ [{aux, eol}, {mod_call, rabbit_quorum_queue, file_handle_release_reservation, []}]; -state_enter(State, #?MODULE{cfg = #cfg{resource = _Resource}}) when State =/= leader -> +state_enter0(State, #?MODULE{cfg = #cfg{resource = _Resource}}) when State =/= leader -> FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}, [FHReservation]; - state_enter(_, _) -> +state_enter0(_, _) -> %% catch all as not handling all states []. - -spec tick(non_neg_integer(), state()) -> ra_machine:effects(). tick(Ts, #?MODULE{cfg = #cfg{name = Name, resource = QName}, msg_bytes_enqueue = EnqueueBytes, - msg_bytes_checkout = CheckoutBytes} = State) -> + msg_bytes_checkout = CheckoutBytes, + dlx = DlxState} = State) -> case is_expired(Ts, State) of true -> [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]; false -> + {_, MsgBytesDiscard} = rabbit_fifo_dlx:stat(DlxState), Metrics = {Name, messages_ready(State), num_checked_out(State), % checked out messages_total(State), query_consumer_count(State), % Consumers EnqueueBytes, - CheckoutBytes}, + CheckoutBytes, + MsgBytesDiscard}, [{mod_call, rabbit_quorum_queue, handle_tick, [QName, Metrics, all_nodes(State)]}] end. @@ -805,6 +942,9 @@ overview(#?MODULE{consumers = Cons, enqueuers = Enqs, release_cursors = Cursors, enqueue_count = EnqCount, + dlx = DlxState, + msgs_ready_in_memory = InMemReady, + msg_bytes_in_memory = InMemBytes, msg_bytes_enqueue = EnqueueBytes, msg_bytes_checkout = CheckoutBytes, cfg = Cfg} = State) -> @@ -818,23 +958,28 @@ overview(#?MODULE{consumers = Cons, max_in_memory_length => Cfg#cfg.max_in_memory_length, max_in_memory_bytes => Cfg#cfg.max_in_memory_bytes, expires => Cfg#cfg.expires, + msg_ttl => Cfg#cfg.msg_ttl, delivery_limit => Cfg#cfg.delivery_limit - }, + }, {Smallest, _} = smallest_raft_index(State), - #{type => ?MODULE, - config => Conf, - num_consumers => maps:size(Cons), - num_checked_out => num_checked_out(State), - num_enqueuers => maps:size(Enqs), - num_ready_messages => messages_ready(State), - num_pending_messages => messages_pending(State), - num_messages => messages_total(State), - num_release_cursors => lqueue:len(Cursors), - release_cursors => [{I, messages_total(S)} || {_, I, S} <- lqueue:to_list(Cursors)], - release_cursor_enqueue_counter => EnqCount, - enqueue_message_bytes => EnqueueBytes, - checkout_message_bytes => CheckoutBytes, - smallest_raft_index => Smallest}. + Overview = #{type => ?MODULE, + config => Conf, + num_consumers => maps:size(Cons), + num_checked_out => num_checked_out(State), + num_enqueuers => maps:size(Enqs), + num_ready_messages => messages_ready(State), + num_in_memory_ready_messages => InMemReady, + num_pending_messages => messages_pending(State), + num_messages => messages_total(State), + num_release_cursors => lqueue:len(Cursors), + release_cursors => [{I, messages_total(S)} || {_, I, S} <- lqueue:to_list(Cursors)], + release_cursor_enqueue_counter => EnqCount, + enqueue_message_bytes => EnqueueBytes, + checkout_message_bytes => CheckoutBytes, + in_memory_message_bytes => InMemBytes, + smallest_raft_index => Smallest}, + DlxOverview = rabbit_fifo_dlx:overview(DlxState), + maps:merge(Overview, DlxOverview). -spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) -> [delivery_msg()]. @@ -917,8 +1062,15 @@ handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, {reply, {ok, {Header, Msg}}, Aux0, Log0}; Err -> {reply, Err, Aux0, Log0} - end. - + end; +handle_aux(leader, _, start_dlx_worker, Aux, Log, + #?MODULE{cfg = #cfg{resource = QRef, + name = QName, + dead_letter_handler = at_least_once}}) -> + rabbit_fifo_dlx:start_worker(QRef, QName), + {no_reply, Aux, Log}; +handle_aux(_, _, start_dlx_worker, Aux, Log, _) -> + {no_reply, Aux, Log}. eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> @@ -1063,6 +1215,9 @@ query_in_memory_usage(#?MODULE{msg_bytes_in_memory = Bytes, msgs_ready_in_memory = Length}) -> {Length, Bytes}. +query_stat_dlx(#?MODULE{dlx = DlxState}) -> + rabbit_fifo_dlx:stat(DlxState). + query_peek(Pos, State0) when Pos > 0 -> case take_next_msg(State0) of empty -> @@ -1113,8 +1268,15 @@ messages_total(#?MODULE{messages = _M, messages_total = Total, ra_indexes = _Indexes, prefix_msgs = {_RCnt, _R, _PCnt, _P}}) -> - Total. % lqueue:len(M) + rabbit_fifo_index:size(Indexes) + RCnt + PCnt. + Total; +%% release cursors might be old state (e.g. after recent upgrade) +messages_total(State) + when element(1, State) =:= rabbit_fifo_v1 -> + rabbit_fifo_v1:query_messages_total(State); +messages_total(State) + when element(1, State) =:= rabbit_fifo_v0 -> + rabbit_fifo_v0:query_messages_total(State). update_use({inactive, _, _, _} = CUInfo, inactive) -> CUInfo; @@ -1265,8 +1427,9 @@ maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, Consumer, S0, Effects0 Effects1} end. -apply_enqueue(#{index := RaftIdx} = Meta, From, Seq, RawMsg, State0) -> - case maybe_enqueue(RaftIdx, From, Seq, RawMsg, [], State0) of +apply_enqueue(#{index := RaftIdx, + system_time := Ts} = Meta, From, Seq, RawMsg, State0) -> + case maybe_enqueue(RaftIdx, Ts, From, Seq, RawMsg, [], State0) of {ok, State1, Effects1} -> State2 = incr_enqueue_count(incr_total(State1)), {State, ok, Effects} = checkout(Meta, State0, State2, Effects1, false), @@ -1305,10 +1468,11 @@ drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) -> {State0, Effects0} end. -enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages} = State0) -> +enqueue(RaftIdx, Ts, RawMsg, #?MODULE{messages = Messages} = State0) -> %% the initial header is an integer only - it will get expanded to a map %% when the next required key is added - Header = message_size(RawMsg), + Header0 = message_size(RawMsg), + Header = maybe_set_msg_ttl(RawMsg, Ts, Header0, State0), {State1, Msg} = case evaluate_memory_limit(Header, State0) of true -> @@ -1322,6 +1486,39 @@ enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages} = State0) -> State = add_bytes_enqueue(Header, State1), State#?MODULE{messages = lqueue:in(Msg, Messages)}. +maybe_set_msg_ttl(#basic_message{content = #content{properties = none}}, + _, Header, + #?MODULE{cfg = #cfg{msg_ttl = undefined}}) -> + Header; +maybe_set_msg_ttl(#basic_message{content = #content{properties = none}}, + RaCmdTs, Header, + #?MODULE{cfg = #cfg{msg_ttl = PerQueueMsgTTL}}) -> + update_expiry_header(RaCmdTs, PerQueueMsgTTL, Header); +maybe_set_msg_ttl(#basic_message{content = #content{properties = Props}}, + RaCmdTs, Header, + #?MODULE{cfg = #cfg{msg_ttl = PerQueueMsgTTL}}) -> + %% rabbit_quorum_queue will leave the properties decoded if and only if + %% per message message TTL is set. + %% We already check in the channel that expiration must be valid. + {ok, PerMsgMsgTTL} = rabbit_basic:parse_expiration(Props), + TTL = min(PerMsgMsgTTL, PerQueueMsgTTL), + update_expiry_header(RaCmdTs, TTL, Header). + +update_expiry_header(_, undefined, Header) -> + Header; +update_expiry_header(RaCmdTs, 0, Header) -> + %% We do not comply exactly with the "TTL=0 models AMQP immediate flag" semantics + %% as done for classic queues where the message is discarded if it cannot be + %% consumed immediately. + %% Instead, we discard the message if it cannot be consumed within the same millisecond + %% when it got enqueued. This behaviour should be good enough. + update_expiry_header(RaCmdTs + 1, Header); +update_expiry_header(RaCmdTs, TTL, Header) -> + update_expiry_header(RaCmdTs + TTL, Header). + +update_expiry_header(ExpiryTs, Header) -> + update_header(expiry, fun(Ts) -> Ts end, ExpiryTs, Header). + incr_enqueue_count(#?MODULE{enqueue_count = EC, cfg = #cfg{release_cursor_interval = {_Base, C}} } = State0) when EC >= C -> @@ -1363,39 +1560,39 @@ maybe_store_dehydrated_state(_RaftIdx, State) -> enqueue_pending(From, #enqueuer{next_seqno = Next, - pending = [{Next, RaftIdx, RawMsg} | Pending]} = Enq0, + pending = [{Next, RaftIdx, Ts, RawMsg} | Pending]} = Enq0, State0) -> - State = enqueue(RaftIdx, RawMsg, State0), + State = enqueue(RaftIdx, Ts, RawMsg, State0), Enq = Enq0#enqueuer{next_seqno = Next + 1, pending = Pending}, enqueue_pending(From, Enq, State); enqueue_pending(From, Enq, #?MODULE{enqueuers = Enqueuers0} = State) -> State#?MODULE{enqueuers = Enqueuers0#{From => Enq}}. -maybe_enqueue(RaftIdx, undefined, undefined, RawMsg, Effects, State0) -> +maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, Effects, State0) -> % direct enqueue without tracking - State = enqueue(RaftIdx, RawMsg, State0), + State = enqueue(RaftIdx, Ts, RawMsg, State0), {ok, State, Effects}; -maybe_enqueue(RaftIdx, From, MsgSeqNo, RawMsg, Effects0, +maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, #?MODULE{enqueuers = Enqueuers0, ra_indexes = Indexes0} = State0) -> case maps:get(From, Enqueuers0, undefined) of undefined -> State1 = State0#?MODULE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, - {ok, State, Effects} = maybe_enqueue(RaftIdx, From, MsgSeqNo, + {ok, State, Effects} = maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, State1), {ok, State, [{monitor, process, From} | Effects]}; #enqueuer{next_seqno = MsgSeqNo} = Enq0 -> % it is the next expected seqno - State1 = enqueue(RaftIdx, RawMsg, State0), + State1 = enqueue(RaftIdx, Ts, RawMsg, State0), Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1}, State = enqueue_pending(From, Enq, State1), {ok, State, Effects0}; #enqueuer{next_seqno = Next, pending = Pending0} = Enq0 when MsgSeqNo > Next -> - % out of order enqueue - Pending = [{MsgSeqNo, RaftIdx, RawMsg} | Pending0], + % out of order delivery + Pending = [{MsgSeqNo, RaftIdx, Ts, RawMsg} | Pending0], Enq = Enq0#enqueuer{pending = lists:sort(Pending)}, %% if the enqueue it out of order we need to mark it in the %% index @@ -1426,29 +1623,39 @@ return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, {State, ok, Effects} = checkout(Meta, State0, State2, Effects1, false), update_smallest_raft_index(IncomingRaftIdx, State, Effects). -% used to processes messages that are finished +% used to process messages that are finished complete(Meta, ConsumerId, DiscardedMsgIds, - #consumer{checked_out = Checked} = Con0, Effects, + #consumer{checked_out = Checked} = Con0, #?MODULE{messages_total = Tot, - ra_indexes = Indexes0} = State0) -> + ra_indexes = Indexes0} = State0, Delete) -> %% credit_mode = simple_prefetch should automatically top-up credit %% as messages are simple_prefetch or otherwise returned Discarded = maps:with(DiscardedMsgIds, Checked), + DiscardedMsgs = maps:values(Discarded), + Len = length(DiscardedMsgs), Con = Con0#consumer{checked_out = maps:without(DiscardedMsgIds, Checked), - credit = increase_credit(Con0, map_size(Discarded))}, + credit = increase_credit(Con0, Len)}, State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), + State = lists:foldl(fun(Msg, Acc) -> + add_bytes_settle( + get_msg_header(Msg), Acc) + end, State1, DiscardedMsgs), + case Delete of + true -> + Indexes = delete_indexes(DiscardedMsgs, Indexes0), + State#?MODULE{messages_total = Tot - Len, + ra_indexes = Indexes}; + false -> + State + end. + +delete_indexes(Msgs, Indexes) -> %% TODO: optimise by passing a list to rabbit_fifo_index - Indexes = maps:fold(fun (_, ?INDEX_MSG(I, _), Acc0) when is_integer(I) -> - rabbit_fifo_index:delete(I, Acc0); - (_, _, Acc) -> - Acc - end, Indexes0, Discarded), - State = maps:fold(fun(_, Msg, Acc) -> - add_bytes_settle( - get_msg_header(Msg), Acc) - end, State1, Discarded), - {State#?MODULE{messages_total = Tot - length(DiscardedMsgIds), - ra_indexes = Indexes}, Effects}. + lists:foldl(fun (?INDEX_MSG(I, ?MSG(_,_)), Acc) when is_integer(I) -> + rabbit_fifo_index:delete(I, Acc); + (_, Acc) -> + Acc + end, Indexes, Msgs). increase_credit(#consumer{lifetime = once, credit = Credit}, _) -> @@ -1464,10 +1671,9 @@ increase_credit(#consumer{credit = Current}, Credit) -> complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, #consumer{} = Con0, - Effects0, State0) -> - {State1, Effects1} = complete(Meta, ConsumerId, MsgIds, Con0, - Effects0, State0), - {State, ok, Effects} = checkout(Meta, State0, State1, Effects1, false), + Effects0, State0, Delete) -> + State1 = complete(Meta, ConsumerId, MsgIds, Con0, State0, Delete), + {State, ok, Effects} = checkout(Meta, State0, State1, Effects0, false), update_smallest_raft_index(IncomingRaftIdx, State, Effects). dead_letter_effects(_Reason, _Discarded, @@ -1475,12 +1681,14 @@ dead_letter_effects(_Reason, _Discarded, Effects) -> Effects; dead_letter_effects(Reason, Discarded, - #?MODULE{cfg = #cfg{dead_letter_handler = {Mod, Fun, Args}}}, + #?MODULE{cfg = #cfg{dead_letter_handler = {at_most_once, {Mod, Fun, Args}}}}, Effects) -> RaftIdxs = lists:filtermap( fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> {true, RaftIdx}; - (_) -> + ({_PerMsgReason, ?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))}) when Reason =:= undefined -> + {true, RaftIdx}; + (_IgnorePrefixMessage) -> false end, Discarded), [{log, RaftIdxs, @@ -1492,7 +1700,12 @@ dead_letter_effects(Reason, Discarded, {true, {Reason, Msg}}; (?INDEX_MSG(_, ?MSG(_Header, Msg))) -> {true, {Reason, Msg}}; - (_) -> + ({PerMsgReason, ?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))}) when Reason =:= undefined -> + {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), + {true, {PerMsgReason, Msg}}; + ({PerMsgReason, ?INDEX_MSG(_, ?MSG(_Header, Msg))}) when Reason =:= undefined -> + {true, {PerMsgReason, Msg}}; + (_IgnorePrefixMessage) -> false end, Discarded), [{mod_call, Mod, Fun, Args ++ [DeadLetters]}] @@ -1592,7 +1805,9 @@ get_header(Key, Header) when is_map(Header) -> return_one(Meta, MsgId, Msg0, #?MODULE{returns = Returns, consumers = Consumers, - cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, + dlx = DlxState0, + cfg = #cfg{delivery_limit = DeliveryLimit, + dead_letter_handler = DLH}} = State0, Effects0, ConsumerId) -> #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers), Msg = update_msg_header(delivery_count, fun (C) -> C + 1 end, 1, Msg0), @@ -1600,9 +1815,17 @@ return_one(Meta, MsgId, Msg0, case get_header(delivery_count, Header) of DeliveryCount when DeliveryCount > DeliveryLimit -> %% TODO: don't do for prefix msgs - Effects = dead_letter_effects(delivery_limit, [Msg], - State0, Effects0), - complete(Meta, ConsumerId, [MsgId], Con0, Effects, State0); + case DLH of + at_least_once -> + DlxState = rabbit_fifo_dlx:discard(Msg, delivery_limit, DlxState0), + State = complete(Meta, ConsumerId, [MsgId], Con0, State0#?MODULE{dlx = DlxState}, false), + {State, Effects0}; + _ -> + Effects = dead_letter_effects(delivery_limit, [Msg], + State0, Effects0), + State = complete(Meta, ConsumerId, [MsgId], Con0, State0, true), + {State, Effects} + end; _ -> Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)}, @@ -1649,11 +1872,15 @@ return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, checkout(Meta, OldState, State, Effects) -> checkout(Meta, OldState, State, Effects, true). -checkout(#{index := Index} = Meta, #?MODULE{cfg = #cfg{resource = QName}} = OldState, +checkout(#{index := Index} = Meta, + #?MODULE{cfg = #cfg{resource = QName}} = OldState, State0, Effects0, HandleConsumerChanges) -> - {State1, _Result, Effects1} = checkout0(Meta, checkout_one(Meta, State0), - Effects0, #{}), - case evaluate_limit(Index, false, OldState, State1, Effects1) of + {#?MODULE{dlx = DlxState0} = State1, _Result, Effects1} = checkout0(Meta, checkout_one(Meta, State0, Effects0), #{}), + %%TODO For now we checkout the discards queue here. Move it to a better place + {DlxState1, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(DlxState0), + State2 = State1#?MODULE{dlx = DlxState1}, + Effects2 = DlxDeliveryEffects ++ Effects1, + case evaluate_limit(Index, false, OldState, State2, Effects2) of {State, true, Effects} -> case maybe_notify_decorators(State, HandleConsumerChanges) of {true, {MaxActivePriority, IsEmpty}} -> @@ -1673,27 +1900,31 @@ checkout(#{index := Index} = Meta, #?MODULE{cfg = #cfg{resource = QName}} = OldS end. checkout0(Meta, {success, ConsumerId, MsgId, - ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), State}, - Effects, SendAcc0) when is_integer(RaftIdx) -> + ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), State, Effects}, + SendAcc0) when is_integer(RaftIdx) -> DelMsg = {RaftIdx, {MsgId, Header}}, SendAcc = maps:update_with(ConsumerId, - fun ({InMem, LogMsgs}) -> - {InMem, [DelMsg | LogMsgs]} - end, {[], [DelMsg]}, SendAcc0), - checkout0(Meta, checkout_one(Meta, State), Effects, SendAcc); + fun ({InMem, LogMsgs}) -> + {InMem, [DelMsg | LogMsgs]} + end, {[], [DelMsg]}, SendAcc0), + checkout0(Meta, checkout_one(Meta, State, Effects), SendAcc); checkout0(Meta, {success, ConsumerId, MsgId, - ?INDEX_MSG(Idx, ?MSG(Header, Msg)), State}, Effects, + ?INDEX_MSG(Idx, ?MSG(Header, Msg)), State, Effects}, SendAcc0) when is_integer(Idx) -> DelMsg = {MsgId, {Header, Msg}}, SendAcc = maps:update_with(ConsumerId, - fun ({InMem, LogMsgs}) -> - {[DelMsg | InMem], LogMsgs} - end, {[DelMsg], []}, SendAcc0), - checkout0(Meta, checkout_one(Meta, State), Effects, SendAcc); -checkout0(Meta, {success, _ConsumerId, _MsgId, ?TUPLE(_, _), State}, Effects, + fun ({InMem, LogMsgs}) -> + {[DelMsg | InMem], LogMsgs} + end, {[DelMsg], []}, SendAcc0), + checkout0(Meta, checkout_one(Meta, State, Effects), SendAcc); +checkout0(Meta, {success, _ConsumerId, _MsgId, ?TUPLE(_, _), State, Effects}, SendAcc) -> - checkout0(Meta, checkout_one(Meta, State), Effects, SendAcc); -checkout0(_Meta, {Activity, State0}, Effects0, SendAcc) -> + %% Do not append delivery effect for prefix messages. + %% Prefix messages do not exist anymore, but they still go through the + %% normal checkout flow to derive correct consumer states + %% after recovery and will still be settled or discarded later on. + checkout0(Meta, checkout_one(Meta, State, Effects), SendAcc); +checkout0(_Meta, {Activity, State0, Effects0}, SendAcc) -> Effects1 = case Activity of nochange -> append_delivery_effects(Effects0, SendAcc); @@ -1844,9 +2075,12 @@ reply_log_effect(RaftIdx, MsgId, Header, Ready, From) -> {dequeue, {MsgId, {Header, Msg}}, Ready}}}] end}. -checkout_one(Meta, #?MODULE{service_queue = SQ0, - messages = Messages0, - consumers = Cons0} = InitState) -> +checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> + %% Before checking out any messsage to any consumer, + %% first remove all expired messages from the head of the queue. + {#?MODULE{service_queue = SQ0, + messages = Messages0, + consumers = Cons0} = InitState, Effects1} = expire_msgs(Ts, InitState0, Effects0), case priority_queue:out(SQ0) of {{value, ConsumerId}, SQ1} when is_map_key(ConsumerId, Cons0) -> @@ -1859,11 +2093,11 @@ checkout_one(Meta, #?MODULE{service_queue = SQ0, %% no credit but was still on queue %% can happen when draining %% recurse without consumer on queue - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1); #consumer{status = cancelled} -> - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1); #consumer{status = suspected_down} -> - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1); #consumer{checked_out = Checked0, next_msg_id = Next, credit = Credit, @@ -1881,27 +2115,101 @@ checkout_one(Meta, #?MODULE{service_queue = SQ0, true -> add_bytes_checkout(Header, State1); false -> + %% TODO do not subtract from memory here since + %% messages are still in memory when checked out subtract_in_memory_counts( Header, add_bytes_checkout(Header, State1)) end, - {success, ConsumerId, Next, ConsumerMsg, State}; + {success, ConsumerId, Next, ConsumerMsg, State, Effects1}; error -> %% consumer did not exist but was queued, recurse - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}) + checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1) end; empty -> - {nochange, InitState} + {nochange, InitState, Effects1} end; {{value, _ConsumerId}, SQ1} -> %% consumer did not exist but was queued, recurse - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1); {empty, _} -> + Effects = timer_effect(Ts, InitState, Effects1), case lqueue:len(Messages0) of - 0 -> {nochange, InitState}; - _ -> {inactive, InitState} + 0 -> + {nochange, InitState, Effects}; + _ -> + {inactive, InitState, Effects} end end. +%% dequeue all expired messages +expire_msgs(RaCmdTs, State0, Effects0) -> + case take_next_msg(State0) of + {?INDEX_MSG(Idx, ?MSG(#{expiry := Expiry} = Header, _) = Msg) = FullMsg, State1} + when RaCmdTs >= Expiry -> + #?MODULE{dlx = DlxState0, + cfg = #cfg{dead_letter_handler = DLH}, + ra_indexes = Indexes0} = State2 = add_bytes_drop(Header, State1), + case DLH of + at_least_once -> + DlxState = rabbit_fifo_dlx:discard(FullMsg, expired, DlxState0), + State = State2#?MODULE{dlx = DlxState}, + expire_msgs(RaCmdTs, State, Effects0); + _ -> + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + State3 = decr_total(State2), + State4 = case Msg of + ?DISK_MSG(_) -> + State3; + _ -> + subtract_in_memory_counts(Header, State3) + end, + Effects = dead_letter_effects(expired, [FullMsg], + State4, Effects0), + State = State4#?MODULE{ra_indexes = Indexes}, + expire_msgs(RaCmdTs, State, Effects) + end; + {?PREFIX_MEM_MSG(#{expiry := Expiry} = Header) = Msg, State1} + when RaCmdTs >= Expiry -> + State2 = expire_prefix_msg(Msg, Header, State1), + expire_msgs(RaCmdTs, State2, Effects0); + {?DISK_MSG(#{expiry := Expiry} = Header) = Msg, State1} + when RaCmdTs >= Expiry -> + State2 = expire_prefix_msg(Msg, Header, State1), + expire_msgs(RaCmdTs, State2, Effects0); + _ -> + {State0, Effects0} + end. + +expire_prefix_msg(Msg, Header, State0) -> + #?MODULE{dlx = DlxState0, + cfg = #cfg{dead_letter_handler = DLH}} = State1 = add_bytes_drop(Header, State0), + case DLH of + at_least_once -> + DlxState = rabbit_fifo_dlx:discard(Msg, expired, DlxState0), + State1#?MODULE{dlx = DlxState}; + _ -> + State2 = case Msg of + ?DISK_MSG(_) -> + State1; + _ -> + subtract_in_memory_counts(Header, State1) + end, + decr_total(State2) + end. + +timer_effect(RaCmdTs, State, Effects) -> + T = case take_next_msg(State) of + {?INDEX_MSG(_, ?MSG(#{expiry := Expiry}, _)), _} when is_number(Expiry) -> + %% Next message contains 'expiry' header. + %% (Re)set timer so that mesage will be dropped or dead-lettered on time. + Expiry - RaCmdTs; + _ -> + %% Next message does not contain 'expiry' header. + %% Therefore, do not set timer or cancel timer if it was set. + infinity + end, + [{timer, expire_msgs, T} | Effects]. + update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = auto, credit = 0} = Con, #?MODULE{consumers = Cons} = State) -> @@ -1996,21 +2304,22 @@ maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, %% creates a dehydrated version of the current state to be cached and %% potentially used to for a snaphot at a later point dehydrate_state(#?MODULE{msg_bytes_in_memory = 0, - cfg = #cfg{max_length = 0}, + cfg = #cfg{max_in_memory_length = 0}, consumers = Consumers} = State) -> - %% no messages are kept in memory, no need to - %% overly mutate the current state apart from removing indexes and cursors + % no messages are kept in memory, no need to + % overly mutate the current state apart from removing indexes and cursors State#?MODULE{ - ra_indexes = rabbit_fifo_index:empty(), - consumers = maps:map(fun (_, C) -> - dehydrate_consumer(C) - end, Consumers), - release_cursors = lqueue:new()}; + ra_indexes = rabbit_fifo_index:empty(), + consumers = maps:map(fun (_, C) -> + dehydrate_consumer(C) + end, Consumers), + release_cursors = lqueue:new()}; dehydrate_state(#?MODULE{messages = Messages, consumers = Consumers, returns = Returns, prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0}, - waiting_consumers = Waiting0} = State) -> + waiting_consumers = Waiting0, + dlx = DlxState} = State) -> RCnt = lqueue:len(Returns), %% TODO: optimise this function as far as possible PrefRet1 = lists:foldr(fun (M, Acc) -> @@ -2031,7 +2340,8 @@ dehydrate_state(#?MODULE{messages = Messages, returns = lqueue:new(), prefix_msgs = {PRCnt + RCnt, PrefRet, PPCnt + lqueue:len(Messages), PrefMsgs}, - waiting_consumers = Waiting}. + waiting_consumers = Waiting, + dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. dehydrate_messages(Msgs0) -> {OutRes, Msgs} = lqueue:out(Msgs0), @@ -2053,7 +2363,8 @@ dehydrate_message(?PREFIX_MEM_MSG(_) = M) -> dehydrate_message(?DISK_MSG(_) = M) -> M; dehydrate_message(?INDEX_MSG(_Idx, ?DISK_MSG(_Header) = Msg)) -> - %% use disk msgs directly as prefix messages + %% Use disk msgs directly as prefix messages. + %% This avoids memory allocation since we do not convert. Msg; dehydrate_message(?INDEX_MSG(Idx, ?MSG(Header, _))) when is_integer(Idx) -> ?PREFIX_MEM_MSG(Header). @@ -2062,11 +2373,13 @@ dehydrate_message(?INDEX_MSG(Idx, ?MSG(Header, _))) when is_integer(Idx) -> normalize(#?MODULE{ra_indexes = _Indexes, returns = Returns, messages = Messages, - release_cursors = Cursors} = State) -> + release_cursors = Cursors, + dlx = DlxState} = State) -> State#?MODULE{ returns = lqueue:from_list(lqueue:to_list(Returns)), messages = lqueue:from_list(lqueue:to_list(Messages)), - release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}. + release_cursors = lqueue:from_list(lqueue:to_list(Cursors)), + dlx = rabbit_fifo_dlx:normalize(DlxState)}. is_over_limit(#?MODULE{cfg = #cfg{max_length = undefined, max_bytes = undefined}}) -> @@ -2314,3 +2627,14 @@ smallest_raft_index(#?MODULE{cfg = _Cfg, {undefined, State} end end. + +subtract_in_memory(Msgs, State) -> + lists:foldl(fun(?INDEX_MSG(_, ?DISK_MSG(_)), S) -> + S; + (?INDEX_MSG(_, ?MSG(H, _)), S) -> + subtract_in_memory_counts(H, S); + (?DISK_MSG(_), S) -> + S; + (?PREFIX_MEM_MSG(H), S) -> + subtract_in_memory_counts(H, S) + end, State, Msgs). diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index c797c9d9bd07..ca37fbca7981 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -2,16 +2,21 @@ %% macros for memory optimised tuple structures -define(TUPLE(A, B), [A | B]). --define(DISK_MSG_TAG, '$disk'). -% -define(PREFIX_DISK_MSG_TAG, '$prefix_disk'). --define(PREFIX_MEM_MSG_TAG, '$prefix_inmem'). +%% We want short atoms since their binary representations will get +%% persisted in a snapshot for every message. +%% '$d' stand for 'disk'. +-define(DISK_MSG_TAG, '$d'). +%% '$m' stand for 'memory'. +-define(PREFIX_MEM_MSG_TAG, '$m'). -define(DISK_MSG(Header), [Header | ?DISK_MSG_TAG]). -define(MSG(Header, RawMsg), [Header | RawMsg]). -define(INDEX_MSG(Index, Msg), [Index | Msg]). +-define(PREFIX_MEM_MSG(Header), [Header | ?PREFIX_MEM_MSG_TAG]). + +% -define(PREFIX_DISK_MSG_TAG, '$prefix_disk'). % -define(PREFIX_DISK_MSG(Header), [?PREFIX_DISK_MSG_TAG | Header]). % -define(PREFIX_DISK_MSG(Header), ?DISK_MSG(Header)). --define(PREFIX_MEM_MSG(Header), [?PREFIX_MEM_MSG_TAG | Header]). -type option(T) :: undefined | T. @@ -32,11 +37,14 @@ %% same process -type msg_header() :: msg_size() | - #{size := msg_size(), - delivery_count => non_neg_integer()}. +#{size := msg_size(), + delivery_count => non_neg_integer(), + expiry => milliseconds()}. %% The message header: %% delivery_count: the number of unsuccessful delivery attempts. %% A non-zero value indicates a previous attempt. +%% expiry: Epoch time in ms when a message expires. Set during enqueue. +%% Value is determined by per-queue or per-message message TTL. %% If it only contains the size it can be condensed to an integer only -type msg() :: ?MSG(msg_header(), raw_msg()) | @@ -122,7 +130,7 @@ -record(enqueuer, {next_seqno = 1 :: msg_seqno(), % out of order enqueues - sorted list - pending = [] :: [{msg_seqno(), ra:index(), raw_msg()}], + pending = [] :: [{msg_seqno(), ra:index(), milliseconds(), raw_msg()}], status = up :: up | suspected_down, %% it is useful to have a record of when this was blocked @@ -137,7 +145,7 @@ {name :: atom(), resource :: rabbit_types:r('queue'), release_cursor_interval :: option({non_neg_integer(), non_neg_integer()}), - dead_letter_handler :: option(applied_mfa()), + dead_letter_handler :: option({at_most_once, applied_mfa()} | at_least_once), become_leader_handler :: option(applied_mfa()), overflow_strategy = drop_head :: drop_head | reject_publish, max_length :: option(non_neg_integer()), @@ -149,6 +157,7 @@ max_in_memory_length :: option(non_neg_integer()), max_in_memory_bytes :: option(non_neg_integer()), expires :: undefined | milliseconds(), + msg_ttl :: undefined | milliseconds(), unused_1, unused_2 }). @@ -166,6 +175,7 @@ % queue of returned msg_in_ids - when checking out it picks from returns = lqueue:new() :: lqueue:lqueue(term()), % a counter of enqueues - used to trigger shadow copy points + % reset to 0 when release_cursor gets stored enqueue_count = 0 :: non_neg_integer(), % a map containing all the live processes that have ever enqueued % a message to this queue as well as a cached value of the smallest @@ -177,11 +187,19 @@ % index when there are large gaps but should be faster than gb_trees % for normal appending operations as it's backed by a map ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), + %% A release cursor is essentially a snapshot without message bodies + %% (aka. "dehydrated state") taken at time T in order to truncate + %% the log at some point in the future when all messages that were enqueued + %% up to time T have been removed (e.g. consumed, dead-lettered, or dropped). + %% This concept enables snapshots to not contain any message bodies. + %% Advantage: Smaller snapshots are sent between Ra nodes. + %% Working assumption: Messages are consumed in a FIFO-ish order because + %% the log is truncated only until the oldest message. release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor, ra:index(), #rabbit_fifo{}}), % consumers need to reflect consumer state at time of snapshot % needs to be part of snapshot - consumers = #{} :: #{consumer_id() => #consumer{}}, + consumers = #{} :: #{consumer_id() => consumer()}, % consumers that require further service are queued here % needs to be part of snapshot service_queue = priority_queue:new() :: priority_queue:q(), @@ -194,7 +212,10 @@ %% overflow calculations). %% This is done so that consumers are still served in a deterministic %% order on recovery. + %% TODO Remove this field and store prefix messages in-place. This will + %% simplify the checkout logic. prefix_msgs = {0, [], 0, []} :: prefix_msgs(), + dlx = rabbit_fifo_dlx:init() :: rabbit_fifo_dlx:state(), msg_bytes_enqueue = 0 :: non_neg_integer(), msg_bytes_checkout = 0 :: non_neg_integer(), %% waiting consumers, one is picked active consumer is cancelled or dies @@ -209,7 +230,7 @@ -type config() :: #{name := atom(), queue_resource := rabbit_types:r('queue'), - dead_letter_handler => applied_mfa(), + dead_letter_handler => option({at_most_once, applied_mfa()} | at_least_once), become_leader_handler => applied_mfa(), release_cursor_interval => non_neg_integer(), max_length => non_neg_integer(), @@ -220,5 +241,6 @@ single_active_consumer_on => boolean(), delivery_limit => non_neg_integer(), expires => non_neg_integer(), + msg_ttl => non_neg_integer(), created => non_neg_integer() }. diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl index 3f5315de08b2..0faf32fd300d 100644 --- a/deps/rabbit/src/rabbit_fifo_client.erl +++ b/deps/rabbit/src/rabbit_fifo_client.erl @@ -531,7 +531,7 @@ update_machine_state(Server, Conf) -> %% `{internal, AppliedCorrelations, State}' if the event contained an internally %% handled event such as a notification and a correlation was included with %% the command (e.g. in a call to `enqueue/3' the correlation terms are returned -%% here. +%% here). %% %% `{RaFifoEvent, State}' if the event contained a client message generated by %% the `rabbit_fifo' state machine such as a delivery. diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl new file mode 100644 index 000000000000..cc41733151b7 --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -0,0 +1,324 @@ +-module(rabbit_fifo_dlx). + +-include("rabbit_fifo_dlx.hrl"). +-include("rabbit_fifo.hrl"). + +% client API, e.g. for rabbit_fifo_dlx_client +-export([make_checkout/2, + make_settle/1]). + +% called by rabbit_fifo delegating DLX handling to this module +-export([init/0, apply/2, discard/3, overview/1, + checkout/1, state_enter/4, + start_worker/2, terminate_worker/1, cleanup/1, purge/1, + consumer_pid/1, dehydrate/1, normalize/1, + stat/1]). + +%% This module handles the dead letter (DLX) part of the rabbit_fifo state machine. +%% This is a separate module to better unit test and provide separation of concerns. +%% This module maintains its own state: +%% a queue of DLX messages, a single node local DLX consumer, and some stats. +%% The state of this module is included into rabbit_fifo state because there can only by one Ra state machine. +%% The rabbit_fifo module forwards all DLX commands to this module where we then update the DLX specific state only: +%% e.g. DLX consumer subscribed, adding / removing discarded messages, stats +%% +%% It also runs its own checkout logic sending DLX messages to the DLX consumer. + +-record(checkout,{ + consumer :: atom(), + prefetch :: non_neg_integer() + }). +-record(settle, {msg_ids :: [msg_id()]}). +-opaque protocol() :: {dlx, #checkout{} | #settle{}}. +-opaque state() :: #?MODULE{}. +-export_type([state/0, protocol/0, reason/0]). + +init() -> + #?MODULE{}. + +make_checkout(RegName, NumUnsettled) -> + {dlx, #checkout{consumer = RegName, + prefetch = NumUnsettled + }}. + +make_settle(MessageIds) when is_list(MessageIds) -> + {dlx, #settle{msg_ids = MessageIds}}. + +overview(#?MODULE{consumer = undefined, + msg_bytes = MsgBytes, + msg_bytes_checkout = 0, + discards = Discards}) -> + overview0(Discards, #{}, MsgBytes, 0); +overview(#?MODULE{consumer = #dlx_consumer{checked_out = Checked}, + msg_bytes = MsgBytes, + msg_bytes_checkout = MsgBytesCheckout, + discards = Discards}) -> + overview0(Discards, Checked, MsgBytes, MsgBytesCheckout). + +overview0(Discards, Checked, MsgBytes, MsgBytesCheckout) -> + #{num_discarded => lqueue:len(Discards), + num_discard_checked_out => map_size(Checked), + discard_message_bytes => MsgBytes, + discard_checkout_message_bytes => MsgBytesCheckout}. + +stat(#?MODULE{consumer = Con, + discards = Discards, + msg_bytes = MsgBytes, + msg_bytes_checkout = MsgBytesCheckout}) -> + Num0 = lqueue:len(Discards), + Num = case Con of + undefined -> + Num0; + #dlx_consumer{checked_out = Checked} -> + Num0 + map_size(Checked) + end, + Bytes = MsgBytes + MsgBytesCheckout, + {Num, Bytes}. + +apply(#checkout{consumer = RegName, + prefetch = Prefetch}, + #?MODULE{consumer = undefined} = State0) -> + State = State0#?MODULE{consumer = #dlx_consumer{registered_name = RegName, + prefetch = Prefetch}}, + {State, ok}; +apply(#checkout{consumer = RegName, + prefetch = Prefetch}, + #?MODULE{consumer = #dlx_consumer{checked_out = CheckedOutOldConsumer}, + discards = Discards0, + msg_bytes = Bytes, + msg_bytes_checkout = BytesCheckout} = State0) -> + %% Since we allow only a single consumer, the new consumer replaces the old consumer. + %% All checked out messages to the old consumer need to be returned to the discards queue + %% such that these messages can be (eventually) re-delivered to the new consumer. + %% When inserting back into the discards queue, we respect the original order in which messages + %% were discarded. + Checked0 = maps:to_list(CheckedOutOldConsumer), + Checked1 = lists:keysort(1, Checked0), + {Discards, BytesMoved} = lists:foldr(fun({_Id, {_Reason, IdxMsg} = Msg}, {D, B}) -> + {lqueue:in_r(Msg, D), B + size_in_bytes(IdxMsg)} + end, {Discards0, 0}, Checked1), + State = State0#?MODULE{consumer = #dlx_consumer{registered_name = RegName, + prefetch = Prefetch}, + discards = Discards, + msg_bytes = Bytes + BytesMoved, + msg_bytes_checkout = BytesCheckout - BytesMoved}, + {State, ok}; +apply(#settle{msg_ids = MsgIds}, + #?MODULE{consumer = #dlx_consumer{checked_out = Checked} = C, + msg_bytes_checkout = BytesCheckout} = State0) -> + Acked = maps:with(MsgIds, Checked), + AckedRsnMsgs = maps:values(Acked), + AckedMsgs = lists:map(fun({_Reason, Msg}) -> Msg end, AckedRsnMsgs), + AckedBytes = lists:foldl(fun(Msg, Bytes) -> + Bytes + size_in_bytes(Msg) + end, 0, AckedMsgs), + Unacked = maps:without(MsgIds, Checked), + State = State0#?MODULE{consumer = C#dlx_consumer{checked_out = Unacked}, + msg_bytes_checkout = BytesCheckout - AckedBytes}, + {State, AckedMsgs}. + +%%TODO delete delivery_count header to save space? +%% It's not needed anymore. +discard(Msg, Reason, #?MODULE{discards = Discards0, + msg_bytes = MsgBytes0} = State) -> + Discards = lqueue:in({Reason, Msg}, Discards0), + MsgBytes = MsgBytes0 + size_in_bytes(Msg), + State#?MODULE{discards = Discards, + msg_bytes = MsgBytes}. + +checkout(#?MODULE{consumer = undefined, + discards = Discards} = State) -> + case lqueue:is_empty(Discards) of + true -> + ok; + false -> + rabbit_log:warning("there are dead-letter messages but no dead-letter consumer") + end, + {State, []}; +checkout(State) -> + checkout0(checkout_one(State), {[],[]}). + +checkout0({success, MsgId, {Reason, ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header))}, State}, {InMemMsgs, LogMsgs}) when is_integer(RaftIdx) -> + DelMsg = {RaftIdx, {Reason, MsgId, Header}}, + SendAcc = {InMemMsgs, [DelMsg|LogMsgs]}, + checkout0(checkout_one(State ), SendAcc); +checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?MSG(Header, Msg))}, State}, {InMemMsgs, LogMsgs}) when is_integer(Idx) -> + DelMsg = {MsgId, {Reason, Header, Msg}}, + SendAcc = {[DelMsg|InMemMsgs], LogMsgs}, + checkout0(checkout_one(State), SendAcc); +checkout0({success, _MsgId, {_Reason, ?TUPLE(_, _)}, State}, SendAcc) -> + %% This is a prefix message which means we are recovering from a snapshot. + %% We know: + %% 1. This message was already delivered in the past, and + %% 2. The recovery Raft log ahead of this Raft command will defintely settle this message. + %% Therefore, here, we just check this message out to the consumer but do not re-deliver this message + %% so that we will end up with the correct and deterministic state once the whole recovery log replay is completed. + checkout0(checkout_one(State), SendAcc); +checkout0(#?MODULE{consumer = #dlx_consumer{registered_name = RegName}} = State, SendAcc) -> + Effects = delivery_effects(whereis(RegName), SendAcc), + {State, Effects}. + +checkout_one(#?MODULE{consumer = #dlx_consumer{checked_out = Checked, + prefetch = Prefetch}} = State) when map_size(Checked) >= Prefetch -> + State; +checkout_one(#?MODULE{consumer = #dlx_consumer{checked_out = Checked0, + next_msg_id = Next} = Con0} = State0) -> + case take_next_msg(State0) of + {{_, Msg} = ReasonMsg, State1} -> + Checked = maps:put(Next, ReasonMsg, Checked0), + State2 = State1#?MODULE{consumer = Con0#dlx_consumer{checked_out = Checked, + next_msg_id = Next + 1}}, + Bytes = size_in_bytes(Msg), + State = add_bytes_checkout(Bytes, State2), + {success, Next, ReasonMsg, State}; + empty -> + State0 + end. + +take_next_msg(#?MODULE{discards = Discards0} = State) -> + case lqueue:out(Discards0) of + {empty, _} -> + empty; + {{value, ReasonMsg}, Discards} -> + {ReasonMsg, State#?MODULE{discards = Discards}} + end. + +add_bytes_checkout(Size, #?MODULE{msg_bytes = Bytes, + msg_bytes_checkout = BytesCheckout} = State) -> + State#?MODULE{msg_bytes = Bytes - Size, + msg_bytes_checkout = BytesCheckout + Size}. + +size_in_bytes(Msg) -> + Header = rabbit_fifo:get_msg_header(Msg), + rabbit_fifo:get_header(size, Header). + +%% returns at most one delivery effect because there is only one consumer +delivery_effects(_CPid, {[], []}) -> + []; +delivery_effects(CPid, {InMemMsgs, []}) -> + [{send_msg, CPid, {dlx_delivery, lists:reverse(InMemMsgs)}, [ra_event]}]; +delivery_effects(CPid, {InMemMsgs, IdxMsgs0}) -> + IdxMsgs = lists:reverse(IdxMsgs0), + {RaftIdxs, Data} = lists:unzip(IdxMsgs), + [{log, RaftIdxs, + fun(Log) -> + Msgs0 = lists:zipwith(fun ({enqueue, _, _, Msg}, {Reason, MsgId, Header}) -> + {MsgId, {Reason, Header, Msg}} + end, Log, Data), + Msgs = case InMemMsgs of + [] -> + Msgs0; + _ -> + lists:sort(InMemMsgs ++ Msgs0) + end, + [{send_msg, CPid, {dlx_delivery, Msgs}, [ra_event]}] + end}]. + +state_enter(leader, QRef, QName, _State) -> + start_worker(QRef, QName); +state_enter(_, _, _, State) -> + terminate_worker(State). + +start_worker(QRef, QName) -> + RegName = registered_name(QName), + %% We must ensure that starting the rabbit_fifo_dlx_worker succeeds. + %% Therefore, we don't use an effect. + %% Also therefore, if starting the rabbit_fifo_dlx_worker fails, let the whole Ra server process crash + %% in which case another Ra node will become leader. + %% supervisor:start_child/2 blocks until rabbit_fifo_dlx_worker:init/1 returns (TODO check if this is correct). + %% That's okay since rabbit_fifo_dlx_worker:init/1 returns immediately by delegating + %% initial setup to handle_continue/2. + case whereis(RegName) of + undefined -> + {ok, Pid} = supervisor:start_child(rabbit_fifo_dlx_sup, [QRef, RegName]), + rabbit_log:debug("started rabbit_fifo_dlx_worker (~s ~p)", [RegName, Pid]); + Pid -> + rabbit_log:debug("rabbit_fifo_dlx_worker (~s ~p) already started", [RegName, Pid]) + end. + +terminate_worker(#?MODULE{consumer = #dlx_consumer{registered_name = RegName}}) -> + case whereis(RegName) of + undefined -> + ok; + Pid -> + %% Note that we can't return a mod_call effect here because mod_call is executed on the leader only. + ok = supervisor:terminate_child(rabbit_fifo_dlx_sup, Pid), + rabbit_log:debug("terminated rabbit_fifo_dlx_worker (~s ~p)", [RegName, Pid]) + end; +terminate_worker(_) -> + ok. + +%% TODO consider not registering the worker name at all +%% because if there is a new worker process, it will always subscribe and tell us its new pid +registered_name(QName) when is_atom(QName) -> + list_to_atom(atom_to_list(QName) ++ "_dlx"). + +consumer_pid(#?MODULE{consumer = #dlx_consumer{registered_name = Name}}) -> + whereis(Name); +consumer_pid(_) -> + undefined. + +%% called when switching from at-least-once to at-most-once +cleanup(#?MODULE{consumer = Consumer, + discards = Discards} = State) -> + terminate_worker(State), + %% Return messages in the order they got discarded originally + %% for the final at-most-once dead-lettering. + CheckedReasonMsgs = case Consumer of + #dlx_consumer{checked_out = Checked} when is_map(Checked) -> + L0 = maps:to_list(Checked), + L1 = lists:keysort(1, L0), + {_, L2} = lists:unzip(L1), + L2; + _ -> + [] + end, + DiscardReasonMsgs = lqueue:to_list(Discards), + CheckedReasonMsgs ++ DiscardReasonMsgs. + +purge(#?MODULE{consumer = Con0, + discards = Discards} = State0) -> + {Con, CheckedMsgs} = case Con0 of + #dlx_consumer{checked_out = Checked} when is_map(Checked) -> + L = maps:to_list(Checked), + {_, CheckedReasonMsgs} = lists:unzip(L), + {_, Msgs} = lists:unzip(CheckedReasonMsgs), + C = Con0#dlx_consumer{checked_out = #{}}, + {C, Msgs}; + _ -> + {Con0, []} + end, + DiscardReasonMsgs = lqueue:to_list(Discards), + {_, DiscardMsgs} = lists:unzip(DiscardReasonMsgs), + PurgedMsgs = CheckedMsgs ++ DiscardMsgs, + State = State0#?MODULE{consumer = Con, + discards = lqueue:new(), + msg_bytes = 0, + msg_bytes_checkout = 0 + }, + {State, PurgedMsgs}. + +%% TODO Consider alternative to not dehydrate at all +%% by putting messages to disk before enqueueing them in discards queue. +dehydrate(#?MODULE{discards = Discards, + consumer = Con} = State) -> + State#?MODULE{discards = dehydrate_messages(Discards), + consumer = dehydrate_consumer(Con)}. + +dehydrate_messages(Discards) -> + L0 = lqueue:to_list(Discards), + L1 = lists:map(fun({_Reason, Msg}) -> + {?NIL, rabbit_fifo:dehydrate_message(Msg)} + end, L0), + lqueue:from_list(L1). + +dehydrate_consumer(#dlx_consumer{checked_out = Checked0} = Con) -> + Checked = maps:map(fun (_, {_, Msg}) -> + {?NIL, rabbit_fifo:dehydrate_message(Msg)} + end, Checked0), + Con#dlx_consumer{checked_out = Checked}; +dehydrate_consumer(undefined) -> + undefined. + +normalize(#?MODULE{discards = Discards} = State) -> + State#?MODULE{discards = lqueue:from_list(lqueue:to_list(Discards))}. diff --git a/deps/rabbit/src/rabbit_fifo_dlx.hrl b/deps/rabbit/src/rabbit_fifo_dlx.hrl new file mode 100644 index 000000000000..5d8c023f9e8c --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_dlx.hrl @@ -0,0 +1,30 @@ +-define(NIL, []). + +%% At-least-once dead-lettering does not support reason 'maxlen'. +%% Reason of prefix messages is [] because the message will not be +%% actually delivered and storing 2 bytes in the persisted snapshot +%% is less than the reason atom. +-type reason() :: 'expired' | 'rejected' | delivery_limit | ?NIL. + +% See snapshot scenarios in rabbit_fifo_prop_SUITE. Add dlx dehydrate tests. +-record(dlx_consumer,{ + %% We don't require a consumer tag because a consumer tag is a means to distinguish + %% multiple consumers in the same channel. The rabbit_fifo_dlx_worker channel like process however + %% creates only a single consumer to this quorum queue's discards queue. + registered_name :: atom(), + prefetch :: non_neg_integer(), + checked_out = #{} :: #{msg_id() => {reason(), indexed_msg()}}, + next_msg_id = 0 :: msg_id() % part of snapshot data + % total number of checked out messages - ever + % incremented for each delivery + % delivery_count = 0 :: non_neg_integer(), + % status = up :: up | suspected_down | cancelled + }). + +-record(rabbit_fifo_dlx,{ + consumer = undefined :: #dlx_consumer{} | undefined, + %% Queue of dead-lettered messages. + discards = lqueue:new() :: lqueue:lqueue({reason(), indexed_msg()}), + msg_bytes = 0 :: non_neg_integer(), + msg_bytes_checkout = 0 :: non_neg_integer() + }). diff --git a/deps/rabbit/src/rabbit_fifo_dlx_client.erl b/deps/rabbit/src/rabbit_fifo_dlx_client.erl new file mode 100644 index 000000000000..4b9733b769bc --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_dlx_client.erl @@ -0,0 +1,93 @@ +-module(rabbit_fifo_dlx_client). + +-export([checkout/4, settle/2, handle_ra_event/3, + overview/1]). + +-record(state,{ + queue_resource :: rabbit_tyes:r(queue), + leader :: ra:server_id(), + last_msg_id :: non_neg_integer | -1 + }). +-opaque state() :: #state{}. +-export_type([state/0]). + +checkout(RegName, QResource, Leader, NumUnsettled) -> + Cmd = rabbit_fifo_dlx:make_checkout(RegName, NumUnsettled), + State = #state{queue_resource = QResource, + leader = Leader, + last_msg_id = -1}, + process_command(Cmd, State, 5). + +settle(MsgIds, State) when is_list(MsgIds) -> + Cmd = rabbit_fifo_dlx:make_settle(MsgIds), + %%TODO use pipeline_command without correlation ID, i.e. without notification + process_command(Cmd, State, 2). + +process_command(_Cmd, _State, 0) -> + {error, ra_command_failed}; +process_command(Cmd, #state{leader = Leader} = State, Tries) -> + case ra:process_command(Leader, Cmd, 60_000) of + {ok, ok, Leader} -> + {ok, State#state{leader = Leader}}; + {ok, ok, L} -> + rabbit_log:warning("Failed to process command ~p on quorum queue leader ~p because actual leader is ~p.", + [Cmd, Leader, L]), + {error, ra_command_failed}; + Err -> + rabbit_log:warning("Failed to process command ~p on quorum queue leader ~p: ~p~n" + "Trying ~b more time(s)...", + [Cmd, Leader, Err, Tries]), + process_command(Cmd, State, Tries - 1) + end. + +handle_ra_event(Leader, {machine, {dlx_delivery, _} = Del}, #state{leader = Leader} = State) -> + handle_delivery(Del, State); +handle_ra_event(_From, Evt, State) -> + rabbit_log:warning("~s received unknown ra event: ~p", [?MODULE, Evt]), + {ok, State, []}. + +handle_delivery({dlx_delivery, [{FstId, _} | _] = IdMsgs}, + #state{queue_resource = QRes, + last_msg_id = Prev} = State0) -> + %% format as a deliver action + {LastId, _} = lists:last(IdMsgs), + Del = {deliver, transform_msgs(QRes, IdMsgs)}, + case Prev of + Prev when FstId =:= Prev+1 -> + %% expected message ID(s) got delivered + State = State0#state{last_msg_id = LastId}, + {ok, State, [Del]}; + Prev when FstId > Prev+1 -> + %% messages ID(s) are missing, therefore fetch all checked-out discarded messages + %% TODO implement as done in + %% https://github.com/rabbitmq/rabbitmq-server/blob/b4eb5e2cfd7f85a1681617dc489dd347fa9aac72/deps/rabbit/src/rabbit_fifo_client.erl#L732-L744 + %% A: not needed because of local guarantees, let it crash + exit(not_implemented); + Prev when FstId =< Prev -> + rabbit_log:debug("dropping messages with duplicate IDs (~b to ~b) consumed from ~s", + [FstId, Prev, rabbit_misc:rs(QRes)]), + case lists:dropwhile(fun({Id, _}) -> Id =< Prev end, IdMsgs) of + [] -> + {ok, State0, []}; + IdMsgs2 -> + handle_delivery({dlx_delivery, IdMsgs2}, State0) + end; + _ when FstId =:= 0 -> + % the very first delivery + % TODO We init last_msg_id with -1. So, why would we ever run into this branch? + % A: can be a leftover + rabbit_log:debug("very first delivery consumed from ~s", [rabbit_misc:rs(QRes)]), + State = State0#state{last_msg_id = 0}, + {ok, State, [Del]} + end. + +transform_msgs(QRes, Msgs) -> + lists:map( + fun({MsgId, {Reason, _MsgHeader, Msg}}) -> + {QRes, MsgId, Msg, Reason} + end, Msgs). + +overview(#state{leader = Leader, + last_msg_id = LastMsgId}) -> + #{leader => Leader, + last_msg_id => LastMsgId}. diff --git a/deps/rabbit/src/rabbit_fifo_dlx_sup.erl b/deps/rabbit/src/rabbit_fifo_dlx_sup.erl new file mode 100644 index 000000000000..29043eec3f06 --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_dlx_sup.erl @@ -0,0 +1,37 @@ +-module(rabbit_fifo_dlx_sup). + +-behaviour(supervisor). + +-rabbit_boot_step({?MODULE, + [{description, "supervisor of quorum queue dead-letter workers"}, + {mfa, {rabbit_sup, start_supervisor_child, [?MODULE]}}, + {requires, kernel_ready}, + {enables, core_initialized}]}). + +%% supervisor callback +-export([init/1]). +%% client API +-export([start_link/0]). + +start_link() -> + supervisor:start_link({local, ?MODULE}, ?MODULE, []). + +init([]) -> + FeatureFlag = quorum_queue, + %%TODO rabbit_feature_flags:is_enabled(FeatureFlag) ? + case rabbit_ff_registry:is_enabled(FeatureFlag) of + true -> + SupFlags = #{strategy => simple_one_for_one, + intensity => 1, + period => 5}, + Worker = rabbit_fifo_dlx_worker, + ChildSpec = #{id => Worker, + start => {Worker, start_link, []}, + type => worker, + modules => [Worker]}, + {ok, {SupFlags, [ChildSpec]}}; + false -> + rabbit_log:info("not starting supervisor ~s because feature flag ~s is disabled", + [?MODULE, FeatureFlag]), + ignore + end. diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl new file mode 100644 index 000000000000..89c53533dcb7 --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -0,0 +1,571 @@ +%% This module consumes from a single quroum queue's discards queue (containing dead-letttered messages) +%% and forwards the DLX messages at least once to every target queue. +%% +%% Some parts of this module resemble the channel process in the sense that it needs to keep track what messages +%% are consumed but not acked yet and what messages are published but not confirmed yet. +%% Compared to the channel process, this module is protocol independent since it doesn't deal with AMQP clients. +%% +%% This module consumes directly from the rabbit_fifo_dlx_client bypassing the rabbit_queue_type interface, +%% but publishes via the rabbit_queue_type interface. +%% While consuming via rabbit_queue_type interface would have worked in practice (by using a special consumer argument, +%% e.g. {<<"x-internal-queue">>, longstr, <<"discards">>} ) using the rabbit_fifo_dlx_client directly provides +%% separation of concerns making things much easier to test, to debug, and to understand. + +-module(rabbit_fifo_dlx_worker). + +-include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("rabbit_common/include/rabbit_framing.hrl"). + +-behaviour(gen_server2). + +-export([start_link/2]). +%% gen_server2 callbacks +-export([init/1, terminate/2, handle_continue/2, + handle_cast/2, handle_call/3, handle_info/2, + code_change/3, format_status/2]). + +%%TODO make configurable or leave at 0 which means 2000 as in +%% https://github.com/rabbitmq/rabbitmq-server/blob/1e7df8c436174735b1d167673afd3f1642da5cdc/deps/rabbit/src/rabbit_quorum_queue.erl#L726-L729 +-define(CONSUMER_PREFETCH_COUNT, 100). +-define(HIBERNATE_AFTER, 180_000). +%% If no publisher confirm was received for at least SETTLE_TIMEOUT, message will be redelivered. +%% To prevent duplicates in the target queue and to ensure message will eventually be acked to the source queue, +%% set this value higher than the maximum time it takes for a queue to settle a message. +-define(SETTLE_TIMEOUT, 120_000). + +-record(pending, { + %% consumed_msg_id is not to be confused with consumer delivery tag. + %% The latter represents a means for AMQP clients to (multi-)ack to a channel process. + %% However, queues are not aware of delivery tags. + %% This rabbit_fifo_dlx_worker does not have the concept of delivery tags because it settles (acks) + %% message IDs directly back to the queue (and there is no AMQP consumer). + consumed_msg_id :: non_neg_integer(), + content :: rabbit_types:decoded_content(), + %% TODO Reason is already stored in first x-death header of #content.properties.#'P_basic'.headers + %% So, we could remove this convenience field and lookup the 1st header when redelivering. + reason :: rabbit_fifo_dlx:reason(), + %% + %%TODO instead of using 'unsettled' and 'settled' fields, use rabbit_confirms because it handles many to one logic + %% in a generic way. Its API might need to be modified though if it is targeted only towards channel. + %% + %% target queues for which publisher confirm has not been received yet + unsettled = [] :: [rabbit_amqqueue:name()], + %% target queues for which publisher confirm was received + settled = [] :: [rabbit_amqqueue:name()], + %% Number of times the message was published (i.e. rabbit_queue_type:deliver/3 invoked). + %% Can be 0 if the message was never published (for example no route exists). + publish_count = 0 :: non_neg_integer(), + %% Epoch time in milliseconds when the message was last published (i.e. rabbit_queue_type:deliver/3 invoked). + %% It can be 'undefined' if the message was never published (for example no route exists). + last_published_at :: undefined | integer(), + %% Epoch time in milliseconds when the message was consumed from the source quorum queue. + %% This value never changes. + %% It's mainly informational and meant for debugging to understand for how long the message + %% is sitting around without having received all publisher confirms. + consumed_at :: integer() + }). + +-record(state, { + registered_name :: atom(), + %% There is one rabbit_fifo_dlx_worker per source quorum queue + %% (if dead-letter-strategy at-least-once is used). + queue_ref :: rabbit_amqqueue:name(), + %% configured (x-)dead-letter-exchange of source queue + exchange_ref, + %% configured (x-)dead-letter-routing-key of source queue + routing_key, + dlx_client_state :: rabbit_fifo_dlx_client:state(), + queue_type_state :: rabbit_queue_type:state(), + %% Consumed messages for which we have not received all publisher confirms yet. + %% Therefore, they have not been ACKed yet to the consumer queue. + %% This buffer contains at most CONSUMER_PREFETCH_COUNT pending messages at any given point in time. + pendings = #{} :: #{OutSeq :: non_neg_integer() => #pending{}}, + %% next publisher confirm delivery tag sequence number + next_out_seq = 1, + %% Timer firing every SETTLE_TIMEOUT milliseconds + %% redelivering messages for which not all publisher confirms were received. + %% If there are no pending messages, this timer will eventually be cancelled to allow + %% this worker to hibernate. + timer :: undefined | reference() + }). + +% -type state() :: #state{}. + +%%TODO add metrics like global counters for messages routed, delivered, etc. + +start_link(QRef, RegName) -> + gen_server:start_link({local, RegName}, + ?MODULE, {QRef, RegName}, + [{hibernate_after, ?HIBERNATE_AFTER}]). + +-spec init({rabbit_amqqueue:name(), atom()}) -> {ok, undefined, {continue, {rabbit_amqqueue:name(), atom()}}}. +init(Arg) -> + {ok, undefined, {continue, Arg}}. + +handle_continue({QRef, RegName}, undefined) -> + State = lookup_topology(#state{queue_ref = QRef}), + {ok, Q} = rabbit_amqqueue:lookup(QRef), + {ClusterName, _MaybeOldLeaderNode} = amqqueue:get_pid(Q), + {ok, ConsumerState} = rabbit_fifo_dlx_client:checkout(RegName, + QRef, + {ClusterName, node()}, + ?CONSUMER_PREFETCH_COUNT), + {noreply, State#state{registered_name = RegName, + dlx_client_state = ConsumerState, + queue_type_state = rabbit_queue_type:init()}}. + +terminate(_Reason, _State) -> + %%TODO cancel timer? + ok. + +handle_call(Request, From, State) -> + rabbit_log:warning("~s received unhandled call from ~p: ~p", [?MODULE, From, Request]), + {noreply, State}. + +handle_cast({queue_event, QRef, {_From, {machine, lookup_topology}}}, + #state{queue_ref = QRef} = State0) -> + State = lookup_topology(State0), + redeliver_and_ack(State); +handle_cast({queue_event, QRef, {From, Evt}}, + #state{queue_ref = QRef, + dlx_client_state = DlxState0} = State0) -> + %% received dead-letter messsage from source queue + % rabbit_log:debug("~s received queue event: ~p", [rabbit_misc:rs(QRef), E]), + {ok, DlxState, Actions} = rabbit_fifo_dlx_client:handle_ra_event(From, Evt, DlxState0), + State1 = State0#state{dlx_client_state = DlxState}, + State = handle_queue_actions(Actions, State1), + {noreply, State}; +handle_cast({queue_event, QRef, Evt}, + #state{queue_type_state = QTypeState0} = State0) -> + %% received e.g. confirm from target queue + case rabbit_queue_type:handle_event(QRef, Evt, QTypeState0) of + {ok, QTypeState1, Actions} -> + State1 = State0#state{queue_type_state = QTypeState1}, + State = handle_queue_actions(Actions, State1), + {noreply, State}; + %% TODO handle as done in + %% https://github.com/rabbitmq/rabbitmq-server/blob/9cf18e83f279408e20430b55428a2b19156c90d7/deps/rabbit/src/rabbit_channel.erl#L771-L783 + eol -> + {noreply, State0}; + {protocol_error, _Type, _Reason, _ReasonArgs} -> + {noreply, State0} + end; +handle_cast(settle_timeout, State0) -> + State = State0#state{timer = undefined}, + redeliver_and_ack(State); +handle_cast(Request, State) -> + rabbit_log:warning("~s received unhandled cast ~p", [?MODULE, Request]), + {noreply, State}. + +redeliver_and_ack(State0) -> + State1 = redeliver_messsages(State0), + %% Routes could have been changed dynamically. + %% If a publisher confirm timed out for a target queue to which we now don't route anymore, ack the message. + State2 = maybe_ack(State1), + State = maybe_set_timer(State2), + {noreply, State}. + +%%TODO monitor source quorum queue upon init / handle_continue and terminate ourself if source quorum queue is DOWN +%% since new leader will re-create a worker +handle_info({'DOWN', _MRef, process, QPid, Reason}, + #state{queue_type_state = QTypeState0} = State0) -> + %% received from target classic queue + State = case rabbit_queue_type:handle_down(QPid, Reason, QTypeState0) of + {ok, QTypeState, Actions} -> + State1 = State0#state{queue_type_state = QTypeState}, + handle_queue_actions(Actions, State1); + {eol, QTypeState1, QRef} -> + QTypeState = rabbit_queue_type:remove(QRef, QTypeState1), + State0#state{queue_type_state = QTypeState} + end, + {noreply, State}; +handle_info(Info, State) -> + rabbit_log:warning("~s received unhandled info ~p", [?MODULE, Info]), + {noreply, State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +lookup_topology(#state{queue_ref = {resource, Vhost, queue, _} = QRef} = State) -> + {ok, Q} = rabbit_amqqueue:lookup(QRef), + DLRKey = rabbit_queue_type_util:args_policy_lookup(<<"dead-letter-routing-key">>, fun(_Pol, QArg) -> QArg end, Q), + DLX = rabbit_queue_type_util:args_policy_lookup(<<"dead-letter-exchange">>, fun(_Pol, QArg) -> QArg end, Q), + DLXRef = rabbit_misc:r(Vhost, exchange, DLX), + State#state{exchange_ref = DLXRef, + routing_key = DLRKey}. + +%% https://github.com/rabbitmq/rabbitmq-server/blob/9cf18e83f279408e20430b55428a2b19156c90d7/deps/rabbit/src/rabbit_channel.erl#L2855-L2888 +handle_queue_actions(Actions, State0) -> + lists:foldl( + fun ({deliver, Msgs}, S0) -> + S1 = handle_deliver(Msgs, S0), + maybe_set_timer(S1); + ({settled, QRef, MsgSeqs}, S0) -> + S1 = handle_settled(QRef, MsgSeqs, S0), + S2 = maybe_ack(S1), + maybe_cancel_timer(S2); + ({rejected, QRef, MsgSeqNos}, S0) -> + rabbit_log:debug("Ignoring rejected messages ~p from ~s", [MsgSeqNos, rabbit_misc:rs(QRef)]), + S0; + ({queue_down, QRef}, S0) -> + %% target classic queue is down, but not deleted + rabbit_log:debug("Ignoring DOWN from ~s", [rabbit_misc:rs(QRef)]), + S0 + end, State0, Actions). + +handle_deliver(Msgs, #state{queue_ref = QRef} = State) when is_list(Msgs) -> + DLX = lookup_dlx(State), + lists:foldl(fun({_QRef, MsgId, Msg, Reason}, S) -> + forward(Msg, MsgId, QRef, DLX, Reason, S) + end, State, Msgs). + +lookup_dlx(#state{exchange_ref = DLXRef, + queue_ref = QRef}) -> + case rabbit_exchange:lookup(DLXRef) of + {error, not_found} -> + rabbit_log:warning("Cannot forward any dead-letter messages from source quorum ~s because its configured " + "dead-letter-exchange ~s does not exist. " + "Either create the configured dead-letter-exchange or re-configure " + "the dead-letter-exchange policy for the source quorum queue to prevent " + "dead-lettered messages from piling up in the source quorum queue.", + [rabbit_misc:rs(QRef), rabbit_misc:rs(DLXRef)]), + not_found; + {ok, X} -> + X + end. + +forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, + #state{next_out_seq = OutSeq, + pendings = Pendings, + exchange_ref = DLXRef, + routing_key = RKey} = State0) -> + #basic_message{content = Content, routing_keys = RKeys} = Msg = + rabbit_dead_letter:make_msg(ConsumedMsg, Reason, DLXRef, RKey, ConsumedQRef), + %% Field 'mandatory' is set to false because our module checks on its own whether the message is routable. + Delivery = rabbit_basic:delivery(_Mandatory = false, _Confirm = true, Msg, OutSeq), + TargetQs = case DLX of + not_found -> + []; + _ -> + RouteToQs = rabbit_exchange:route(DLX, Delivery), + case rabbit_dead_letter:detect_cycles(Reason, Msg, RouteToQs) of + {[], []} -> + rabbit_log:warning("Cannot deliver message with sequence number ~b " + "(for consumed message sequence number ~b) " + "because no queue is bound to dead-letter ~s with routing keys ~p.", + [OutSeq, ConsumedMsgId, rabbit_misc:rs(DLXRef), RKeys]), + []; + {Qs, []} -> + %% the "normal" case, i.e. no dead-letter-topology misconfiguration + Qs; + {[], Cycles} -> + %%TODO introduce structured logging in rabbit_log by using type logger:report + rabbit_log:warning("Cannot route to any queues. Detected dead-letter queue cycles. " + "Fix the dead-letter routing topology to prevent dead-letter messages from " + "piling up in source quorum queue. " + "outgoing_sequene_number=~b " + "consumed_message_sequence_number=~b " + "consumed_queue=~s " + "dead_letter_exchange=~s " + "effective_dead_letter_routing_keys=~p " + "routed_to_queues=~s " + "dead_letter_queue_cycles=~p", + [OutSeq, ConsumedMsgId, rabbit_misc:rs(ConsumedQRef), + rabbit_misc:rs(DLXRef), RKeys, strings(RouteToQs), Cycles]), + []; + {Qs, Cycles} -> + rabbit_log:warning("Detected dead-letter queue cycles. " + "Fix the dead-letter routing topology. " + "outgoing_sequene_number=~b " + "consumed_message_sequence_number=~b " + "consumed_queue=~s " + "dead_letter_exchange=~s " + "effective_dead_letter_routing_keys=~p " + "routed_to_queues_desired=~s " + "routed_to_queues_effective=~s " + "dead_letter_queue_cycles=~p", + [OutSeq, ConsumedMsgId, rabbit_misc:rs(ConsumedQRef), + rabbit_misc:rs(DLXRef), RKeys, strings(RouteToQs), strings(Qs), Cycles]), + %% Ignore the target queues resulting in cycles. + %% We decide it's good enough to deliver to only routable target queues. + Qs + end + end, + Now = os:system_time(millisecond), + State1 = State0#state{next_out_seq = OutSeq + 1}, + Pend0 = #pending{ + consumed_msg_id = ConsumedMsgId, + consumed_at = Now, + content = Content, + reason = Reason + }, + case TargetQs of + [] -> + %% We can't deliver this message since there is no target queue we can route to. + %% Under no circumstances should we drop a message with dead-letter-strategy at-least-once. + %% We buffer this message and retry to send every SETTLE_TIMEOUT milliseonds + %% (until the user has fixed the dead-letter routing topology). + State1#state{pendings = maps:put(OutSeq, Pend0, Pendings)}; + _ -> + Pend = Pend0#pending{publish_count = 1, + last_published_at = Now, + unsettled = TargetQs}, + State = State1#state{pendings = maps:put(OutSeq, Pend, Pendings)}, + deliver_to_queues(Delivery, TargetQs, State) + end. + +deliver_to_queues(Delivery, RouteToQNames, #state{queue_type_state = QTypeState0} = State0) -> + Qs = rabbit_amqqueue:lookup(RouteToQNames), + {ok, QTypeState1, Actions} = rabbit_queue_type:deliver(Qs, Delivery, QTypeState0), + State = State0#state{queue_type_state = QTypeState1}, + handle_queue_actions(Actions, State). + +handle_settled(QRef, MsgSeqs, #state{pendings = Pendings0} = State) -> + Pendings = lists:foldl(fun (MsgSeq, P0) -> + handle_settled0(QRef, MsgSeq, P0) + end, Pendings0, MsgSeqs), + State#state{pendings = Pendings}. + +handle_settled0(QRef, MsgSeq, Pendings) -> + case maps:find(MsgSeq, Pendings) of + {ok, #pending{unsettled = Unset0, settled = Set0} = Pend0} -> + Unset = lists:delete(QRef, Unset0), + Set = [QRef | Set0], + Pend = Pend0#pending{unsettled = Unset, settled = Set}, + maps:update(MsgSeq, Pend, Pendings); + error -> + rabbit_log:warning("Ignoring publisher confirm for sequence number ~b " + "from target dead letter ~s after settle timeout of ~bms. " + "Troubleshoot why that queue confirms so slowly.", + [MsgSeq, rabbit_misc:rs(QRef), ?SETTLE_TIMEOUT]), + Pendings + end. + +maybe_ack(#state{pendings = Pendings0, + dlx_client_state = DlxState0} = State) -> + Settled = maps:filter(fun(_OutSeq, #pending{unsettled = [], settled = [_|_]}) -> + %% Ack because there is at least one target queue and all + %% target queues settled (i.e. combining publisher confirm + %% and mandatory flag semantics). + true; + (_, _) -> + false + end, Pendings0), + case maps:size(Settled) of + 0 -> + %% nothing to ack + State; + _ -> + Ids = lists:map(fun(#pending{consumed_msg_id = Id}) -> Id end, maps:values(Settled)), + case rabbit_fifo_dlx_client:settle(Ids, DlxState0) of + {ok, DlxState} -> + SettledOutSeqs = maps:keys(Settled), + Pendings = maps:without(SettledOutSeqs, Pendings0), + State#state{pendings = Pendings, + dlx_client_state = DlxState}; + {error, _Reason} -> + %% Failed to ack. Ack will be retried in the next maybe_ack/1 + State + end + end. + +%% Re-deliver messages that timed out waiting on publisher confirm and +%% messages that got never sent due to routing topology misconfiguration. +redeliver_messsages(#state{pendings = Pendings} = State) -> + case lookup_dlx(State) of + not_found -> + %% Configured dead-letter-exchange does (still) not exist. + %% Warning got already logged. + %% Keep the same Pendings in our state until user creates or re-configures the dead-letter-exchange. + State; + DLX -> + Now = os:system_time(millisecond), + maps:fold(fun(OutSeq, #pending{last_published_at = LastPub} = Pend, S0) + when LastPub + ?SETTLE_TIMEOUT =< Now -> + %% Publisher confirm timed out. + redeliver(Pend, DLX, OutSeq, S0); + (OutSeq, #pending{last_published_at = undefined} = Pend, S0) -> + %% Message was never published due to dead-letter routing topology misconfiguration. + redeliver(Pend, DLX, OutSeq, S0); + (_OutSeq, _Pending, S) -> + %% Publisher confirm did not time out. + S + end, State, Pendings) + end. + +redeliver(#pending{content = Content} = Pend, DLX, OldOutSeq, + #state{routing_key = undefined} = State) -> + %% No dead-letter-routing-key defined for source quorum queue. + %% Therefore use all of messages's original routing keys (which can include CC and BCC recipients). + %% This complies with the behaviour of the rabbit_dead_letter module. + %% We stored these original routing keys in the 1st (i.e. most recent) x-death entry. + #content{properties = #'P_basic'{headers = Headers}} = + rabbit_binary_parser:ensure_content_decoded(Content), + {array, [{table, MostRecentDeath}|_]} = rabbit_misc:table_lookup(Headers, <<"x-death">>), + {<<"routing-keys">>, array, Routes0} = lists:keyfind(<<"routing-keys">>, 1, MostRecentDeath), + Routes = [Route || {longstr, Route} <- Routes0], + redeliver0(Pend, DLX, Routes, OldOutSeq, State); +redeliver(Pend, DLX, OldOutSeq, #state{routing_key = DLRKey} = State) -> + redeliver0(Pend, DLX, [DLRKey], OldOutSeq, State). + +%% Quorum queues maintain their own Raft sequene number mapping to the message sequence number (= Raft correlation ID). +%% So, they would just send us a 'settled' queue action containing the correct message sequence number. +%% +%% Classic queues however maintain their state by mapping the message sequence number to pending and confirmed queues. +%% While re-using the same message sequence number could work there as well, it just gets unnecssary complicated when +%% different target queues settle two separate deliveries referring to the same message sequence number (and same basic message). +%% +%% Therefore, to keep things simple, create a brand new delivery, store it in our state and forget about the old delivery and +%% sequence number. +%% +%% If a sequene number gets settled after SETTLE_TIMEOUT, we can't map it anymore to the #pending{}. Hence, we ignore it. +%% +%% This can lead to issues when SETTLE_TIMEOUT is too low and time to settle takes too long. +%% For example, if SETTLE_TIMEOUT is set to only 10 seconds, but settling a message takes always longer than 10 seconds +%% (e.g. due to extremly slow hypervisor disks that ran out of credit), we will re-deliver the same message all over again +%% leading to many duplicates in the target queue without ever acking the message back to the source discards queue. +%% +%% Therefore, set SETTLE_TIMEOUT reasonably high (e.g. 2 minutes). +%% +%% TODO do not log per message? +redeliver0(#pending{consumed_msg_id = ConsumedMsgId, + content = Content, + unsettled = Unsettled, + settled = Settled, + publish_count = PublishCount, + reason = Reason} = Pend0, + DLX, DLRKeys, OldOutSeq, + #state{next_out_seq = OutSeq, + queue_ref = QRef, + pendings = Pendings0, + exchange_ref = DLXRef} = State0) when is_list(DLRKeys) -> + BasicMsg = #basic_message{exchange_name = DLXRef, + routing_keys = DLRKeys, + %% BCC Header was already stripped previously + content = Content, + id = rabbit_guid:gen(), + is_persistent = rabbit_basic:is_message_persistent(Content) + }, + %% Field 'mandatory' is set to false because our module checks on its own whether the message is routable. + Delivery = rabbit_basic:delivery(_Mandatory = false, _Confirm = true, BasicMsg, OutSeq), + RouteToQs0 = rabbit_exchange:route(DLX, Delivery), + %% Do not re-deliver to queues for which we already received a publisher confirm. + RouteToQs1 = RouteToQs0 -- Settled, + {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, BasicMsg, RouteToQs1), + Prefix = io_lib:format("Message has not received required publisher confirm(s). " + "Received confirm from: [~s]. " + "Did not receive confirm from: [~s]. " + "timeout=~bms " + "message_sequence_number=~b " + "consumed_message_sequence_number=~b " + "publish_count=~b.", + [strings(Settled), strings(Unsettled), ?SETTLE_TIMEOUT, + OldOutSeq, ConsumedMsgId, PublishCount]), + case {RouteToQs, Cycles, Settled} of + {[], [], []} -> + rabbit_log:warning("~s Failed to re-deliver this message because no queue is bound " + "to dead-letter ~s with routing keys ~p.", + [Prefix, rabbit_misc:rs(DLXRef), DLRKeys]), + State0; + {[], [], [_|_]} -> + rabbit_log:debug("~s Routes changed dynamically so that this message does not need to be routed " + "to any queue anymore. This message will be acknowledged to the source ~s.", + [Prefix, rabbit_misc:rs(QRef)]), + State0; + {[], [_|_], []} -> + rabbit_log:warning("~s Failed to re-deliver this message because dead-letter queue cycles " + "got detected: ~p", + [Prefix, Cycles]), + State0; + {[], [_|_], [_|_]} -> + rabbit_log:warning("~s Dead-letter queue cycles detected: ~p. " + "This message will nevertheless be acknowledged to the source ~s " + "because it received at least one publisher confirm.", + [Prefix, Cycles, rabbit_misc:rs(QRef)]), + State0; + _ -> + case Cycles of + [] -> + rabbit_log:debug("~s Re-delivering this message to ~s", + [Prefix, strings(RouteToQs)]); + [_|_] -> + rabbit_log:warning("~s Dead-letter queue cycles detected: ~p. " + "Re-delivering this message only to ~s", + [Prefix, Cycles, strings(RouteToQs)]) + end, + Pend = Pend0#pending{publish_count = PublishCount + 1, + last_published_at = os:system_time(millisecond), + %% override 'unsettled' because topology could have changed + unsettled = RouteToQs}, + Pendings1 = maps:remove(OldOutSeq, Pendings0), + Pendings = maps:put(OutSeq, Pend, Pendings1), + State = State0#state{next_out_seq = OutSeq + 1, + pendings = Pendings}, + deliver_to_queues(Delivery, RouteToQs, State) + end. + +strings(QRefs) when is_list(QRefs) -> + L0 = lists:map(fun rabbit_misc:rs/1, QRefs), + L1 = lists:join(", ", L0), + lists:flatten(L1). + +maybe_set_timer(#state{timer = TRef} = State) when is_reference(TRef) -> + State; +maybe_set_timer(#state{timer = undefined, + pendings = Pendings} = State) when map_size(Pendings) =:= 0 -> + State; +maybe_set_timer(#state{timer = undefined} = State) -> + TRef = erlang:send_after(?SETTLE_TIMEOUT, self(), {'$gen_cast', settle_timeout}), + % rabbit_log:debug("set timer"), + State#state{timer = TRef}. + +maybe_cancel_timer(#state{timer = undefined} = State) -> + State; +maybe_cancel_timer(#state{timer = TRef, + pendings = Pendings} = State) -> + case maps:size(Pendings) of + 0 -> + erlang:cancel_timer(TRef, [{async, true}, {info, false}]), + % rabbit_log:debug("cancelled timer"), + State#state{timer = undefined}; + _ -> + State + end. + +%% Avoids large message contents being logged. +format_status(_Opt, [_PDict, #state{ + registered_name = RegisteredName, + queue_ref = QueueRef, + exchange_ref = ExchangeRef, + routing_key = RoutingKey, + dlx_client_state = DlxClientState, + queue_type_state = QueueTypeState, + pendings = Pendings, + next_out_seq = NextOutSeq, + timer = Timer + }]) -> + S = #{registered_name => RegisteredName, + queue_ref => QueueRef, + exchange_ref => ExchangeRef, + routing_key => RoutingKey, + dlx_client_state => rabbit_fifo_dlx_client:overview(DlxClientState), + queue_type_state => QueueTypeState, + pendings => maps:map(fun(_, P) -> format_pending(P) end, Pendings), + next_out_seq => NextOutSeq, + timer_is_active => Timer =/= undefined}, + [{data, [{"State", S}]}]. + +format_pending(#pending{consumed_msg_id = ConsumedMsgId, + reason = Reason, + unsettled = Unsettled, + settled = Settled, + publish_count = PublishCount, + last_published_at = LastPublishedAt, + consumed_at = ConsumedAt}) -> + #{consumed_msg_id => ConsumedMsgId, + reason => Reason, + unsettled => Unsettled, + settled => Settled, + publish_count => PublishCount, + last_published_at => LastPublishedAt, + consumed_at => ConsumedAt}. diff --git a/deps/rabbit/src/rabbit_fifo_v1.erl b/deps/rabbit/src/rabbit_fifo_v1.erl index a59a5c9250ae..51150b0f7089 100644 --- a/deps/rabbit/src/rabbit_fifo_v1.erl +++ b/deps/rabbit/src/rabbit_fifo_v1.erl @@ -130,6 +130,8 @@ state/0, config/0]). +%% This function is never called since only rabbit_fifo_v0:init/1 is called. +%% See https://github.com/rabbitmq/ra/blob/e0d1e6315a45f5d3c19875d66f9d7bfaf83a46e3/src/ra_machine.erl#L258-L265 -spec init(config()) -> state(). init(#{name := Name, queue_resource := Resource} = Conf) -> diff --git a/deps/rabbit/src/rabbit_policies.erl b/deps/rabbit/src/rabbit_policies.erl index 37a467ac7550..2e8684e14523 100644 --- a/deps/rabbit/src/rabbit_policies.erl +++ b/deps/rabbit/src/rabbit_policies.erl @@ -30,6 +30,7 @@ register() -> {Class, Name} <- [{policy_validator, <<"alternate-exchange">>}, {policy_validator, <<"dead-letter-exchange">>}, {policy_validator, <<"dead-letter-routing-key">>}, + {policy_validator, <<"dead-letter-strategy">>}, {policy_validator, <<"message-ttl">>}, {policy_validator, <<"expires">>}, {policy_validator, <<"max-length">>}, @@ -85,6 +86,13 @@ validate_policy0(<<"dead-letter-routing-key">>, Value) validate_policy0(<<"dead-letter-routing-key">>, Value) -> {error, "~p is not a valid dead letter routing key", [Value]}; +validate_policy0(<<"dead-letter-strategy">>, <<"at-most-once">>) -> + ok; +validate_policy0(<<"dead-letter-strategy">>, <<"at-least-once">>) -> + ok; +validate_policy0(<<"dead-letter-strategy">>, Value) -> + {error, "~p is not a valid dead letter strategy", [Value]}; + validate_policy0(<<"message-ttl">>, Value) when is_integer(Value), Value >= 0 -> ok; diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index a4c6d5dd5f46..d95f8e8b69fd 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -71,6 +71,7 @@ -include_lib("stdlib/include/qlc.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("rabbit_common/include/rabbit_framing.hrl"). -include("amqqueue.hrl"). -type msg_id() :: non_neg_integer(). @@ -94,7 +95,9 @@ single_active_consumer_pid, single_active_consumer_ctag, messages_ram, - message_bytes_ram + message_bytes_ram, + messages_dlx, + message_bytes_dlx ]). -define(INFO_KEYS, [name, durable, auto_delete, arguments, pid, messages, messages_ready, @@ -227,18 +230,17 @@ ra_machine_config(Q) when ?is_amqqueue(Q) -> {Name, _} = amqqueue:get_pid(Q), %% take the minimum value of the policy and the queue arg if present MaxLength = args_policy_lookup(<<"max-length">>, fun min/2, Q), - %% prefer the policy defined strategy if available - Overflow = args_policy_lookup(<<"overflow">>, fun (A, _B) -> A end , Q), + OverflowBin = args_policy_lookup(<<"overflow">>, fun policyHasPrecedence/2, Q), + Overflow = overflow(OverflowBin, drop_head, QName), MaxBytes = args_policy_lookup(<<"max-length-bytes">>, fun min/2, Q), MaxMemoryLength = args_policy_lookup(<<"max-in-memory-length">>, fun min/2, Q), MaxMemoryBytes = args_policy_lookup(<<"max-in-memory-bytes">>, fun min/2, Q), DeliveryLimit = args_policy_lookup(<<"delivery-limit">>, fun min/2, Q), - Expires = args_policy_lookup(<<"expires">>, - fun (A, _B) -> A end, - Q), + Expires = args_policy_lookup(<<"expires">>, fun policyHasPrecedence/2, Q), + MsgTTL = args_policy_lookup(<<"message-ttl">>, fun min/2, Q), #{name => Name, queue_resource => QName, - dead_letter_handler => dlx_mfa(Q), + dead_letter_handler => dead_letter_handler(Q, Overflow), become_leader_handler => {?MODULE, become_leader, [QName]}, max_length => MaxLength, max_bytes => MaxBytes, @@ -246,11 +248,17 @@ ra_machine_config(Q) when ?is_amqqueue(Q) -> max_in_memory_bytes => MaxMemoryBytes, single_active_consumer_on => single_active_consumer_on(Q), delivery_limit => DeliveryLimit, - overflow_strategy => overflow(Overflow, drop_head, QName), + overflow_strategy => Overflow, created => erlang:system_time(millisecond), - expires => Expires + expires => Expires, + msg_ttl => MsgTTL }. +policyHasPrecedence(Policy, _QueueArg) -> + Policy. +queueArgHasPrecedence(_Policy, QueueArg) -> + QueueArg. + single_active_consumer_on(Q) -> QArguments = amqqueue:get_arguments(Q), case rabbit_misc:table_lookup(QArguments, <<"x-single-active-consumer">>) of @@ -293,7 +301,7 @@ become_leader(QName, Name) -> end, %% as this function is called synchronously when a ra node becomes leader %% we need to ensure there is no chance of blocking as else the ra node - %% may not be able to establish it's leadership + %% may not be able to establish its leadership spawn(fun() -> rabbit_misc:execute_mnesia_transaction( fun() -> @@ -377,19 +385,20 @@ filter_quorum_critical(Queues, ReplicaStates) -> capabilities() -> #{unsupported_policies => [ %% Classic policies - <<"message-ttl">>, <<"max-priority">>, <<"queue-mode">>, + <<"max-priority">>, <<"queue-mode">>, <<"single-active-consumer">>, <<"ha-mode">>, <<"ha-params">>, <<"ha-sync-mode">>, <<"ha-promote-on-shutdown">>, <<"ha-promote-on-failure">>, <<"queue-master-locator">>, %% Stream policies <<"max-age">>, <<"stream-max-segment-size-bytes">>, <<"queue-leader-locator">>, <<"initial-cluster-size">>], - queue_arguments => [<<"x-expires">>, <<"x-dead-letter-exchange">>, - <<"x-dead-letter-routing-key">>, <<"x-max-length">>, - <<"x-max-length-bytes">>, <<"x-max-in-memory-length">>, - <<"x-max-in-memory-bytes">>, <<"x-overflow">>, - <<"x-single-active-consumer">>, <<"x-queue-type">>, - <<"x-quorum-initial-group-size">>, <<"x-delivery-limit">>], + queue_arguments => [<<"x-dead-letter-exchange">>, <<"x-dead-letter-routing-key">>, + <<"x-dead-letter-strategy">>, <<"x-expires">>, <<"x-max-length">>, + <<"x-max-length-bytes">>, <<"x-max-in-memory-length">>, + <<"x-max-in-memory-bytes">>, <<"x-overflow">>, + <<"x-single-active-consumer">>, <<"x-queue-type">>, + <<"x-quorum-initial-group-size">>, <<"x-delivery-limit">>, + <<"x-message-ttl">>], consumer_arguments => [<<"x-priority">>, <<"x-credit">>], server_named => false}. @@ -410,7 +419,7 @@ spawn_notify_decorators(QName, Fun, Args) -> end). handle_tick(QName, - {Name, MR, MU, M, C, MsgBytesReady, MsgBytesUnack}, + {Name, MR, MU, M, C, MsgBytesReady, MsgBytesUnack, MsgBytesDiscard}, Nodes) -> %% this makes calls to remote processes so cannot be run inside the %% ra server @@ -429,8 +438,8 @@ handle_tick(QName, {consumer_utilisation, Util}, {message_bytes_ready, MsgBytesReady}, {message_bytes_unacknowledged, MsgBytesUnack}, - {message_bytes, MsgBytesReady + MsgBytesUnack}, - {message_bytes_persistent, MsgBytesReady + MsgBytesUnack}, + {message_bytes, MsgBytesReady + MsgBytesUnack + MsgBytesDiscard}, + {message_bytes_persistent, MsgBytesReady + MsgBytesUnack + MsgBytesDiscard}, {messages_persistent, M} | infos(QName, ?STATISTICS_KEYS -- [consumers])], @@ -839,8 +848,11 @@ deliver(true, Delivery, QState0) -> rabbit_fifo_client:enqueue(Delivery#delivery.msg_seq_no, Delivery#delivery.message, QState0). -deliver(QSs, #delivery{confirm = Confirm} = Delivery0) -> - Delivery = clean_delivery(Delivery0), +deliver(QSs, #delivery{message = #basic_message{content = Content0} = Msg, + confirm = Confirm} = Delivery0) -> + %% TODO: we could also consider clearing out the message id here + Content = prepare_content(Content0), + Delivery = Delivery0#delivery{message = Msg#basic_message{content = Content}}, lists:foldl( fun({Q, stateless}, {Qs, Actions}) -> QRef = amqqueue:get_pid(Q), @@ -1253,20 +1265,46 @@ reclaim_memory(Vhost, QueueName) -> ra_log_wal:force_roll_over({?RA_WAL_NAME, Node}). %%---------------------------------------------------------------------------- -dlx_mfa(Q) -> - DLX = init_dlx(args_policy_lookup(<<"dead-letter-exchange">>, - fun res_arg/2, Q), Q), - DLXRKey = args_policy_lookup(<<"dead-letter-routing-key">>, - fun res_arg/2, Q), - {?MODULE, dead_letter_publish, [DLX, DLXRKey, amqqueue:get_name(Q)]}. - -init_dlx(undefined, _Q) -> - undefined; -init_dlx(DLX, Q) when ?is_amqqueue(Q) -> +dead_letter_handler(Q, Overflow) -> + %% Queue arg continues to take precedence to not break existing configurations + %% for queues upgraded from =v3.10 + Exchange = args_policy_lookup(<<"dead-letter-exchange">>, fun queueArgHasPrecedence/2, Q), + RoutingKey = args_policy_lookup(<<"dead-letter-routing-key">>, fun queueArgHasPrecedence/2, Q), + %% Policy takes precedence because it's a new key introduced in v3.10 and we want + %% users to use policies instead of queue args allowing dynamic reconfiguration. + %% TODO change to queueArgHasPrecedence for dead-letter-strategy + Strategy = args_policy_lookup(<<"dead-letter-strategy">>, fun policyHasPrecedence/2, Q), QName = amqqueue:get_name(Q), - rabbit_misc:r(QName, exchange, DLX). + dlh(Exchange, RoutingKey, Strategy, Overflow, QName). -res_arg(_PolVal, ArgVal) -> ArgVal. +dlh(undefined, undefined, undefined, _, _) -> + undefined; +dlh(undefined, RoutingKey, undefined, _, QName) -> + rabbit_log:warning("Disabling dead-lettering for ~s despite configured dead-letter-routing-key '~s' " + "because dead-letter-exchange is not configured.", + [rabbit_misc:rs(QName), RoutingKey]), + undefined; +dlh(undefined, _, Strategy, _, QName) -> + rabbit_log:warning("Disabling dead-lettering for ~s despite configured dead-letter-strategy '~s' " + "because dead-letter-exchange is not configured.", + [rabbit_misc:rs(QName), Strategy]), + undefined; +dlh(_, _, <<"at-least-once">>, reject_publish, _) -> + at_least_once; +dlh(Exchange, RoutingKey, <<"at-least-once">>, drop_head, QName) -> + rabbit_log:warning("Falling back to dead-letter-strategy at-most-once for ~s " + "because configured dead-letter-strategy at-least-once is incompatible with " + "effective overflow strategy drop-head. To enable dead-letter-strategy " + "at-least-once, set overflow strategy to reject-publish.", + [rabbit_misc:rs(QName)]), + dlh_at_most_once(Exchange, RoutingKey, QName); +dlh(Exchange, RoutingKey, _, _, QName) -> + dlh_at_most_once(Exchange, RoutingKey, QName). + +dlh_at_most_once(Exchange, RoutingKey, QName) -> + DLX = rabbit_misc:r(QName, exchange, Exchange), + MFA = {?MODULE, dead_letter_publish, [DLX, RoutingKey, QName]}, + {at_most_once, MFA}. dead_letter_publish(undefined, _, _, _) -> ok; @@ -1438,6 +1476,28 @@ i(message_bytes_ram, Q) when ?is_amqqueue(Q) -> {timeout, _} -> 0 end; +i(messages_dlx, Q) when ?is_amqqueue(Q) -> + QPid = amqqueue:get_pid(Q), + case ra:local_query(QPid, + fun rabbit_fifo:query_stat_dlx/1) of + {ok, {_, {Num, _}}, _} -> + Num; + {error, _} -> + 0; + {timeout, _} -> + 0 + end; +i(message_bytes_dlx, Q) when ?is_amqqueue(Q) -> + QPid = amqqueue:get_pid(Q), + case ra:local_query(QPid, + fun rabbit_fifo:query_stat_dlx/1) of + {ok, {_, {_, Bytes}}, _} -> + Bytes; + {error, _} -> + 0; + {timeout, _} -> + 0 + end; i(_K, _Q) -> ''. open_files(Name) -> @@ -1582,7 +1642,7 @@ overflow(undefined, Def, _QName) -> Def; overflow(<<"reject-publish">>, _Def, _QName) -> reject_publish; overflow(<<"drop-head">>, _Def, _QName) -> drop_head; overflow(<<"reject-publish-dlx">> = V, Def, QName) -> - rabbit_log:warning("Invalid overflow strategy ~p for quorum queue: ~p", + rabbit_log:warning("Invalid overflow strategy ~p for quorum queue: ~s", [V, rabbit_misc:rs(QName)]), Def. @@ -1626,19 +1686,15 @@ notify_decorators(QName, F, A) -> end. %% remove any data that a quorum queue doesn't need -clean_delivery(#delivery{message = - #basic_message{content = Content0} = Msg} = Delivery) -> - Content = case Content0 of - #content{properties = none} -> - Content0; - #content{protocol = none} -> - Content0; - #content{properties = Props, - protocol = Proto} -> - Content0#content{properties = none, - properties_bin = Proto:encode_properties(Props)} - end, - - %% TODO: we could also consider clearing out the message id here - Delivery#delivery{message = Msg#basic_message{content = Content}}. - +prepare_content(#content{properties = none} = Content) -> + Content; +prepare_content(#content{protocol = none} = Content) -> + Content; +prepare_content(#content{properties = #'P_basic'{expiration = undefined} = Props, + protocol = Proto} = Content) -> + Content#content{properties = none, + properties_bin = Proto:encode_properties(Props)}; +prepare_content(Content) -> + %% expiration is set. Therefore, leave properties decoded so that + %% rabbit_fifo can directly parse it without having to decode again. + Content. diff --git a/deps/rabbit/src/rabbit_stream_queue.erl b/deps/rabbit/src/rabbit_stream_queue.erl index 7ae66bfd9fd7..1717556e0b72 100644 --- a/deps/rabbit/src/rabbit_stream_queue.erl +++ b/deps/rabbit/src/rabbit_stream_queue.erl @@ -957,7 +957,9 @@ capabilities() -> <<"single-active-consumer">>, <<"delivery-limit">>, <<"ha-mode">>, <<"ha-params">>, <<"ha-sync-mode">>, <<"ha-promote-on-shutdown">>, <<"ha-promote-on-failure">>, - <<"queue-master-locator">>], + <<"queue-master-locator">>, + %% Quorum policies + <<"dead-letter-strategy">>], queue_arguments => [<<"x-dead-letter-exchange">>, <<"x-dead-letter-routing-key">>, <<"x-max-length">>, <<"x-max-length-bytes">>, <<"x-single-active-consumer">>, <<"x-queue-type">>, diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl index 4c7e7968f9cb..ebd04bf72002 100644 --- a/deps/rabbit/test/dead_lettering_SUITE.erl +++ b/deps/rabbit/test/dead_lettering_SUITE.erl @@ -93,7 +93,9 @@ init_per_group(quorum_queue, Config) -> ok -> rabbit_ct_helpers:set_config( Config, - [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]}, + [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + %%TODO add at-least-once tests + {<<"x-dead-letter-strategy">>, longstr, <<"at-most-once">>}]}, {queue_durable, true}]); Skip -> Skip @@ -708,7 +710,9 @@ dead_letter_policy(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), QName = ?config(queue_name, Config), DLXQName = ?config(queue_name_dlx, Config), - Args = ?config(queue_args, Config), + Args0 = ?config(queue_args, Config), + %% declaring a quorum queue with x-dead-letter-strategy without defining a DLX will fail + Args = proplists:delete(<<"x-dead-letter-strategy">>, Args0), Durable = ?config(queue_durable, Config), DLXExchange = ?config(dlx_exchange, Config), diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index a22b0a286eb4..d4a061c1d591 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -10,6 +10,11 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit/src/rabbit_fifo.hrl"). +-include_lib("rabbit/src/rabbit_fifo_dlx.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("rabbit_common/include/rabbit_framing.hrl"). + +-define(record_info(T,R),lists:zip(record_info(fields,T),tl(tuple_to_list(R)))). %%%=================================================================== %%% Common Test callbacks @@ -25,7 +30,6 @@ all_tests() -> [ test_run_log, snapshots, - scenario1, scenario2, scenario3, scenario4, @@ -69,7 +73,16 @@ all_tests() -> single_active_ordering_01, single_active_ordering_03, in_memory_limit, - max_length + max_length, + snapshots_dlx, + dlx_01, + dlx_02, + dlx_03, + dlx_04, + dlx_05, + dlx_06, + dlx_07, + dlx_08 % single_active_ordering_02 ]. @@ -103,32 +116,14 @@ end_per_testcase(_TestCase, _Config) -> % -type log_op() :: % {enqueue, pid(), maybe(msg_seqno()), Msg :: raw_msg()}. -scenario1(_Config) -> - C1 = {<<>>, c:pid(0,6723,1)}, - C2 = {<<0>>,c:pid(0,6723,1)}, - E = c:pid(0,6720,1), - - Commands = [ - make_checkout(C1, {auto,2,simple_prefetch}), - make_enqueue(E,1,msg1), - make_enqueue(E,2,msg2), - make_checkout(C1, cancel), %% both on returns queue - make_checkout(C2, {auto,1,simple_prefetch}), - make_return(C2, [0]), %% E1 in returns, E2 with C2 - make_return(C2, [1]), %% E2 in returns E1 with C2 - make_settle(C2, [2]) %% E2 with C2 - ], - run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands), - ok. - scenario2(_Config) -> C1 = {<<>>, c:pid(0,346,1)}, C2 = {<<>>,c:pid(0,379,1)}, E = c:pid(0,327,1), Commands = [make_checkout(C1, {auto,1,simple_prefetch}), - make_enqueue(E,1,msg1), + make_enqueue(E,1,msg(<<"msg1">>)), make_checkout(C1, cancel), - make_enqueue(E,2,msg2), + make_enqueue(E,2,msg(<<"msg2">>)), make_checkout(C2, {auto,1,simple_prefetch}), make_settle(C1, [0]), make_settle(C2, [0]) @@ -140,10 +135,10 @@ scenario3(_Config) -> C1 = {<<>>, c:pid(0,179,1)}, E = c:pid(0,176,1), Commands = [make_checkout(C1, {auto,2,simple_prefetch}), - make_enqueue(E,1,msg1), + make_enqueue(E,1,msg(<<"msg1">>)), make_return(C1, [0]), - make_enqueue(E,2,msg2), - make_enqueue(E,3,msg3), + make_enqueue(E,2,msg(<<"msg2">>)), + make_enqueue(E,3,msg(<<"msg3">>)), make_settle(C1, [1]), make_settle(C1, [2]) ], @@ -154,7 +149,7 @@ scenario4(_Config) -> C1 = {<<>>, c:pid(0,179,1)}, E = c:pid(0,176,1), Commands = [make_checkout(C1, {auto,1,simple_prefetch}), - make_enqueue(E,1,msg), + make_enqueue(E,1,msg(<<"msg">>)), make_settle(C1, [0]) ], run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands), @@ -163,17 +158,17 @@ scenario4(_Config) -> scenario5(_Config) -> C1 = {<<>>, c:pid(0,505,0)}, E = c:pid(0,465,9), - Commands = [make_enqueue(E,1,<<0>>), + Commands = [make_enqueue(E,1,msg(<<0>>)), make_checkout(C1, {auto,1,simple_prefetch}), - make_enqueue(E,2,<<>>), + make_enqueue(E,2,msg(<<>>)), make_settle(C1,[0])], run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands), ok. scenario6(_Config) -> E = c:pid(0,465,9), - Commands = [make_enqueue(E,1,<<>>), %% 1 msg on queue - snap: prefix 1 - make_enqueue(E,2,<<>>) %% 1. msg on queue - snap: prefix 1 + Commands = [make_enqueue(E,1,msg(<<>>)), %% 1 msg on queue - snap: prefix 1 + make_enqueue(E,2,msg(<<>>)) %% 1. msg on queue - snap: prefix 1 ], run_snapshot_test(#{name => ?FUNCTION_NAME, max_length => 1}, Commands), @@ -183,10 +178,10 @@ scenario7(_Config) -> C1 = {<<>>, c:pid(0,208,0)}, E = c:pid(0,188,0), Commands = [ - make_enqueue(E,1,<<>>), + make_enqueue(E,1,msg(<<>>)), make_checkout(C1, {auto,1,simple_prefetch}), - make_enqueue(E,2,<<>>), - make_enqueue(E,3,<<>>), + make_enqueue(E,2,msg(<<>>)), + make_enqueue(E,3,msg(<<>>)), make_settle(C1,[0])], run_snapshot_test(#{name => ?FUNCTION_NAME, max_length => 1}, Commands), @@ -196,8 +191,8 @@ scenario8(_Config) -> C1 = {<<>>, c:pid(0,208,0)}, E = c:pid(0,188,0), Commands = [ - make_enqueue(E,1,<<>>), - make_enqueue(E,2,<<>>), + make_enqueue(E,1,msg(<<>>)), + make_enqueue(E,2,msg(<<>>)), make_checkout(C1, {auto,1,simple_prefetch}), % make_checkout(C1, cancel), {down, E, noconnection}, @@ -209,9 +204,9 @@ scenario8(_Config) -> scenario9(_Config) -> E = c:pid(0,188,0), Commands = [ - make_enqueue(E,1,<<>>), - make_enqueue(E,2,<<>>), - make_enqueue(E,3,<<>>)], + make_enqueue(E,1,msg(<<>>)), + make_enqueue(E,2,msg(<<>>)), + make_enqueue(E,3,msg(<<>>))], run_snapshot_test(#{name => ?FUNCTION_NAME, max_length => 1}, Commands), ok. @@ -221,7 +216,7 @@ scenario10(_Config) -> E = c:pid(0,188,0), Commands = [ make_checkout(C1, {auto,1,simple_prefetch}), - make_enqueue(E,1,<<>>), + make_enqueue(E,1,msg(<<>>)), make_settle(C1, [0]) ], run_snapshot_test(#{name => ?FUNCTION_NAME, @@ -232,10 +227,10 @@ scenario11(_Config) -> C1 = {<<>>, c:pid(0,215,0)}, E = c:pid(0,217,0), Commands = [ - make_enqueue(E,1,<<"1">>), % 1 + make_enqueue(E,1,msg(<<"1">>)), % 1 make_checkout(C1, {auto,1,simple_prefetch}), % 2 make_checkout(C1, cancel), % 3 - make_enqueue(E,2,<<"22">>), % 4 + make_enqueue(E,2,msg(<<"22">>)), % 4 make_checkout(C1, {auto,1,simple_prefetch}), % 5 make_settle(C1, [0]), % 6 make_checkout(C1, cancel) % 7 @@ -246,19 +241,19 @@ scenario11(_Config) -> scenario12(_Config) -> E = c:pid(0,217,0), - Commands = [make_enqueue(E,1,<<0>>), - make_enqueue(E,2,<<0>>), - make_enqueue(E,3,<<0>>)], + Commands = [make_enqueue(E,1,msg(<<0>>)), + make_enqueue(E,2,msg(<<0>>)), + make_enqueue(E,3,msg(<<0>>))], run_snapshot_test(#{name => ?FUNCTION_NAME, max_bytes => 2}, Commands), ok. scenario13(_Config) -> E = c:pid(0,217,0), - Commands = [make_enqueue(E,1,<<0>>), - make_enqueue(E,2,<<>>), - make_enqueue(E,3,<<>>), - make_enqueue(E,4,<<>>) + Commands = [make_enqueue(E,1,msg(<<0>>)), + make_enqueue(E,2,msg(<<>>)), + make_enqueue(E,3,msg(<<>>)), + make_enqueue(E,4,msg(<<>>)) ], run_snapshot_test(#{name => ?FUNCTION_NAME, max_length => 2}, Commands), @@ -266,7 +261,7 @@ scenario13(_Config) -> scenario14(_Config) -> E = c:pid(0,217,0), - Commands = [make_enqueue(E,1,<<0,0>>)], + Commands = [make_enqueue(E,1,msg(<<0,0>>))], run_snapshot_test(#{name => ?FUNCTION_NAME, max_bytes => 1}, Commands), ok. @@ -274,8 +269,8 @@ scenario14(_Config) -> scenario14b(_Config) -> E = c:pid(0,217,0), Commands = [ - make_enqueue(E,1,<<0>>), - make_enqueue(E,2,<<0>>) + make_enqueue(E,1,msg(<<0>>)), + make_enqueue(E,2,msg(<<0>>)) ], run_snapshot_test(#{name => ?FUNCTION_NAME, max_bytes => 1}, Commands), @@ -285,8 +280,8 @@ scenario15(_Config) -> C1 = {<<>>, c:pid(0,179,1)}, E = c:pid(0,176,1), Commands = [make_checkout(C1, {auto,2,simple_prefetch}), - make_enqueue(E, 1, msg1), - make_enqueue(E, 2, msg2), + make_enqueue(E, 1, msg(<<"msg1">>)), + make_enqueue(E, 2, msg(<<"msg2">>)), make_return(C1, [0]), make_return(C1, [2]), make_settle(C1, [1]) @@ -302,11 +297,11 @@ scenario16(_Config) -> E = c:pid(0,176,1), Commands = [ make_checkout(C1, {auto,1,simple_prefetch}), - make_enqueue(E, 1, msg1), + make_enqueue(E, 1, msg(<<"msg1">>)), make_checkout(C2, {auto,1,simple_prefetch}), {down, C1Pid, noproc}, %% msg1 allocated to C2 make_return(C2, [0]), %% msg1 returned - make_enqueue(E, 2, <<>>), + make_enqueue(E, 2, msg(<<>>)), make_settle(C2, [0]) ], run_snapshot_test(#{name => ?FUNCTION_NAME, @@ -321,11 +316,11 @@ scenario17(_Config) -> E = test_util:fake_pid(rabbit@fake_node2), Commands = [ make_checkout(C1, {auto,1,simple_prefetch}), - make_enqueue(E,1,<<"one">>), + make_enqueue(E,1,msg(<<"one">>)), make_checkout(C2, {auto,1,simple_prefetch}), {down, C1Pid, noconnection}, make_checkout(C2, cancel), - make_enqueue(E,2,<<"two">>), + make_enqueue(E,2,msg(<<"two">>)), {nodeup,rabbit@fake_node1}, %% this has no effect as was returned make_settle(C1, [0]), @@ -339,11 +334,11 @@ scenario17(_Config) -> scenario18(_Config) -> E = c:pid(0,176,1), - Commands = [make_enqueue(E,1,<<"1">>), - make_enqueue(E,2,<<"2">>), - make_enqueue(E,3,<<"3">>), - make_enqueue(E,4,<<"4">>), - make_enqueue(E,5,<<"5">>) + Commands = [make_enqueue(E,1,msg(<<"1">>)), + make_enqueue(E,2,msg(<<"2">>)), + make_enqueue(E,3,msg(<<"3">>)), + make_enqueue(E,4,msg(<<"4">>)), + make_enqueue(E,5,msg(<<"5">>)) ], run_snapshot_test(#{name => ?FUNCTION_NAME, %% max_length => 3, @@ -354,10 +349,10 @@ scenario19(_Config) -> C1Pid = c:pid(0,883,1), C1 = {<<>>, C1Pid}, E = c:pid(0,176,1), - Commands = [make_enqueue(E,1,<<"1">>), - make_enqueue(E,2,<<"2">>), + Commands = [make_enqueue(E,1,msg(<<"1">>)), + make_enqueue(E,2,msg(<<"2">>)), make_checkout(C1, {auto,2,simple_prefetch}), - make_enqueue(E,3,<<"3">>), + make_enqueue(E,3,msg(<<"3">>)), make_settle(C1, [0, 1]) ], run_snapshot_test(#{name => ?FUNCTION_NAME, @@ -369,15 +364,15 @@ scenario20(_Config) -> C1Pid = c:pid(0,883,1), C1 = {<<>>, C1Pid}, E = c:pid(0,176,1), - Commands = [make_enqueue(E,1,<<>>), - make_enqueue(E,2,<<1>>), + Commands = [make_enqueue(E,1,msg(<<>>)), + make_enqueue(E,2,msg(<<1>>)), make_checkout(C1, {auto,2,simple_prefetch}), {down, C1Pid, noconnection}, - make_enqueue(E,3,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>), - make_enqueue(E,4,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>), - make_enqueue(E,5,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>), - make_enqueue(E,6,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>), - make_enqueue(E,7,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0>>) + make_enqueue(E,3,msg(<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>)), + make_enqueue(E,4,msg(<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>)), + make_enqueue(E,5,msg(<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>)), + make_enqueue(E,6,msg(<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>)), + make_enqueue(E,7,msg(<<0,0,0,0,0,0,0,0,0,0,0,0,0,0>>)) ], run_snapshot_test(#{name => ?FUNCTION_NAME, max_length => 4, @@ -391,15 +386,15 @@ scenario21(_Config) -> E = c:pid(0,176,1), Commands = [ make_checkout(C1, {auto,2,simple_prefetch}), - make_enqueue(E,1,<<"1">>), - make_enqueue(E,2,<<"2">>), - make_enqueue(E,3,<<"3">>), + make_enqueue(E,1,msg(<<"1">>)), + make_enqueue(E,2,msg(<<"2">>)), + make_enqueue(E,3,msg(<<"3">>)), rabbit_fifo:make_discard(C1, [0]), rabbit_fifo:make_settle(C1, [1]) ], run_snapshot_test(#{name => ?FUNCTION_NAME, release_cursor_interval => 1, - dead_letter_handler => {?MODULE, banana, []}}, + dead_letter_handler => {at_most_once, {?MODULE, banana, []}}}, Commands), ok. @@ -408,16 +403,16 @@ scenario22(_Config) -> % C1 = {<<>>, C1Pid}, E = c:pid(0,176,1), Commands = [ - make_enqueue(E,1,<<"1">>), - make_enqueue(E,2,<<"2">>), - make_enqueue(E,3,<<"3">>), - make_enqueue(E,4,<<"4">>), - make_enqueue(E,5,<<"5">>) + make_enqueue(E,1,msg(<<"1">>)), + make_enqueue(E,2,msg(<<"2">>)), + make_enqueue(E,3,msg(<<"3">>)), + make_enqueue(E,4,msg(<<"4">>)), + make_enqueue(E,5,msg(<<"5">>)) ], run_snapshot_test(#{name => ?FUNCTION_NAME, release_cursor_interval => 1, max_length => 3, - dead_letter_handler => {?MODULE, banana, []}}, + dead_letter_handler => {at_most_once, {?MODULE, banana, []}}}, Commands), ok. @@ -429,10 +424,10 @@ scenario24(_Config) -> Commands = [ make_checkout(C1, {auto,2,simple_prefetch}), %% 1 make_checkout(C2, {auto,1,simple_prefetch}), %% 2 - make_enqueue(E,1,<<"1">>), %% 3 - make_enqueue(E,2,<<"2b">>), %% 4 - make_enqueue(E,3,<<"3">>), %% 5 - make_enqueue(E,4,<<"4">>), %% 6 + make_enqueue(E,1,msg(<<"1">>)), %% 3 + make_enqueue(E,2,msg(<<"2b">>)), %% 4 + make_enqueue(E,3,msg(<<"3">>)), %% 5 + make_enqueue(E,4,msg(<<"4">>)), %% 6 {down, E, noconnection} %% 7 ], run_snapshot_test(#{name => ?FUNCTION_NAME, @@ -440,7 +435,7 @@ scenario24(_Config) -> deliver_limit => undefined, max_length => 3, overflow_strategy => drop_head, - dead_letter_handler => {?MODULE, banana, []} + dead_letter_handler => {at_most_once, {?MODULE, banana, []}} }, Commands), ok. @@ -453,12 +448,12 @@ scenario25(_Config) -> E = c:pid(0,280,0), Commands = [ make_checkout(C1, {auto,2,simple_prefetch}), %% 1 - make_enqueue(E,1,<<0>>), %% 2 + make_enqueue(E,1,msg(<<0>>)), %% 2 make_checkout(C2, {auto,1,simple_prefetch}), %% 3 - make_enqueue(E,2,<<>>), %% 4 - make_enqueue(E,3,<<>>), %% 5 + make_enqueue(E,2,msg(<<>>)), %% 4 + make_enqueue(E,3,msg(<<>>)), %% 5 {down, C1Pid, noproc}, %% 6 - make_enqueue(E,4,<<>>), %% 7 + make_enqueue(E,4,msg(<<>>)), %% 7 rabbit_fifo:make_purge() %% 8 ], run_snapshot_test(#{name => ?FUNCTION_NAME, @@ -466,7 +461,7 @@ scenario25(_Config) -> release_cursor_interval => 0, deliver_limit => undefined, overflow_strategy => drop_head, - dead_letter_handler => {?MODULE, banana, []} + dead_letter_handler => {at_most_once, {?MODULE, banana, []}} }, Commands), ok. @@ -477,19 +472,19 @@ scenario26(_Config) -> E1 = c:pid(0,436,0), E2 = c:pid(0,435,0), Commands = [ - make_enqueue(E1,2,<<>>), %% 1 - make_enqueue(E1,3,<<>>), %% 2 - make_enqueue(E2,1,<<>>), %% 3 - make_enqueue(E2,2,<<>>), %% 4 - make_enqueue(E1,4,<<>>), %% 5 - make_enqueue(E1,5,<<>>), %% 6 - make_enqueue(E1,6,<<>>), %% 7 - make_enqueue(E1,7,<<>>), %% 8 - make_enqueue(E1,1,<<>>), %% 9 + make_enqueue(E1,2,msg(<<>>)), %% 1 + make_enqueue(E1,3,msg(<<>>)), %% 2 + make_enqueue(E2,1,msg(<<>>)), %% 3 + make_enqueue(E2,2,msg(<<>>)), %% 4 + make_enqueue(E1,4,msg(<<>>)), %% 5 + make_enqueue(E1,5,msg(<<>>)), %% 6 + make_enqueue(E1,6,msg(<<>>)), %% 7 + make_enqueue(E1,7,msg(<<>>)), %% 8 + make_enqueue(E1,1,msg(<<>>)), %% 9 make_checkout(C1, {auto,5,simple_prefetch}), %% 1 - make_enqueue(E1,8,<<>>), %% 2 - make_enqueue(E1,9,<<>>), %% 2 - make_enqueue(E1,10,<<>>), %% 2 + make_enqueue(E1,8,msg(<<>>)), %% 2 + make_enqueue(E1,9,msg(<<>>)), %% 2 + make_enqueue(E1,10,msg(<<>>)), %% 2 {down, C1Pid, noconnection} ], run_snapshot_test(#{name => ?FUNCTION_NAME, @@ -497,22 +492,22 @@ scenario26(_Config) -> deliver_limit => undefined, max_length => 8, overflow_strategy => drop_head, - dead_letter_handler => {?MODULE, banana, []} + dead_letter_handler => {at_most_once, {?MODULE, banana, []}} }, Commands), ok. scenario28(_Config) -> E = c:pid(0,151,0), - Conf = #{dead_letter_handler => {rabbit_fifo_prop_SUITE,banana,[]}, + Conf = #{dead_letter_handler => {at_most_once, {rabbit_fifo_prop_SUITE,banana,[]}}, delivery_limit => undefined, max_in_memory_bytes => undefined, max_length => 1,name => ?FUNCTION_NAME,overflow_strategy => drop_head, release_cursor_interval => 100,single_active_consumer_on => false}, Commands = [ - make_enqueue(E,2, <<>>), - make_enqueue(E,3, <<>>), - make_enqueue(E,1, <<>>) + make_enqueue(E,2,msg( <<>>)), + make_enqueue(E,3,msg( <<>>)), + make_enqueue(E,1,msg( <<>>)) ], ?assert(single_active_prop(Conf, Commands, false)), ok. @@ -525,39 +520,39 @@ scenario27(_Config) -> E = c:pid(0,151,0), E2 = c:pid(0,152,0), Commands = [ - make_enqueue(E,1,<<>>), - make_enqueue(E2,1,<<28,202>>), - make_enqueue(E,2,<<"Î2">>), + make_enqueue(E,1,msg(<<>>)), + make_enqueue(E2,1,msg(<<28,202>>)), + make_enqueue(E,2,msg(<<"Î2">>)), {down, E, noproc}, - make_enqueue(E2,2,<<"ê">>), + make_enqueue(E2,2,msg(<<"ê">>)), {nodeup,fakenode@fake}, - make_enqueue(E2,3,<<>>), - make_enqueue(E2,4,<<>>), - make_enqueue(E2,5,<<>>), - make_enqueue(E2,6,<<>>), - make_enqueue(E2,7,<<>>), - make_enqueue(E2,8,<<>>), - make_enqueue(E2,9,<<>>), + make_enqueue(E2,3,msg(<<>>)), + make_enqueue(E2,4,msg(<<>>)), + make_enqueue(E2,5,msg(<<>>)), + make_enqueue(E2,6,msg(<<>>)), + make_enqueue(E2,7,msg(<<>>)), + make_enqueue(E2,8,msg(<<>>)), + make_enqueue(E2,9,msg(<<>>)), {purge}, - make_enqueue(E2,10,<<>>), - make_enqueue(E2,11,<<>>), - make_enqueue(E2,12,<<>>), - make_enqueue(E2,13,<<>>), - make_enqueue(E2,14,<<>>), - make_enqueue(E2,15,<<>>), - make_enqueue(E2,16,<<>>), - make_enqueue(E2,17,<<>>), - make_enqueue(E2,18,<<>>), + make_enqueue(E2,10,msg(<<>>)), + make_enqueue(E2,11,msg(<<>>)), + make_enqueue(E2,12,msg(<<>>)), + make_enqueue(E2,13,msg(<<>>)), + make_enqueue(E2,14,msg(<<>>)), + make_enqueue(E2,15,msg(<<>>)), + make_enqueue(E2,16,msg(<<>>)), + make_enqueue(E2,17,msg(<<>>)), + make_enqueue(E2,18,msg(<<>>)), {nodeup,fakenode@fake}, - make_enqueue(E2,19,<<>>), + make_enqueue(E2,19,msg(<<>>)), make_checkout(C1, {auto,77,simple_prefetch}), - make_enqueue(E2,20,<<>>), - make_enqueue(E2,21,<<>>), - make_enqueue(E2,22,<<>>), - make_enqueue(E2,23,<<"Ýý">>), + make_enqueue(E2,20,msg(<<>>)), + make_enqueue(E2,21,msg(<<>>)), + make_enqueue(E2,22,msg(<<>>)), + make_enqueue(E2,23,msg(<<"Ýý">>)), make_checkout(C2, {auto,66,simple_prefetch}), {purge}, - make_enqueue(E2,24,<<>>) + make_enqueue(E2,24,msg(<<>>)) ], ?assert( single_active_prop(#{name => ?FUNCTION_NAME, @@ -569,7 +564,7 @@ scenario27(_Config) -> max_in_memory_bytes => 691, overflow_strategy => drop_head, single_active_consumer_on => true, - dead_letter_handler => {?MODULE, banana, []} + dead_letter_handler => {at_most_once, {?MODULE, banana, []}} }, Commands, false)), ok. @@ -578,11 +573,11 @@ scenario30(_Config) -> C1 = {<<>>, C1Pid}, E = c:pid(0,240,0), Commands = [ - make_enqueue(E,1,<<>>), %% 1 - make_enqueue(E,2,<<1>>), %% 2 + make_enqueue(E,1,msg(<<>>)), %% 1 + make_enqueue(E,2,msg(<<1>>)), %% 2 make_checkout(C1, {auto,1,simple_prefetch}), %% 3 {down, C1Pid, noconnection}, %% 4 - make_enqueue(E,3,<<>>) %% 5 + make_enqueue(E,3,msg(<<>>)) %% 5 ], run_snapshot_test(#{name => ?FUNCTION_NAME, release_cursor_interval => 0, @@ -590,7 +585,7 @@ scenario30(_Config) -> max_length => 1, max_in_memory_length => 1, overflow_strategy => drop_head, - dead_letter_handler => {?MODULE, banana, []}, + dead_letter_handler => {at_most_once, {?MODULE, banana, []}}, single_active_consumer_on => true }, Commands), @@ -609,8 +604,8 @@ scenario31(_Config) -> % {auto,1,simple_prefetch}, % #{ack => true,args => [],prefetch => 1,username => <<"user">>}}}, % {4,{purge}}] - make_enqueue(E1,1,<<>>), %% 1 - make_enqueue(E2,2,<<1>>), %% 2 + make_enqueue(E1,1,msg(<<>>)), %% 1 + make_enqueue(E2,2,msg(<<1>>)), %% 2 make_checkout(C1, {auto,1,simple_prefetch}), %% 3 {purge} %% 4 ], @@ -618,7 +613,7 @@ scenario31(_Config) -> release_cursor_interval => 0, deliver_limit => undefined, overflow_strategy => drop_head, - dead_letter_handler => {?MODULE, banana, []} + dead_letter_handler => {at_most_once, {?MODULE, banana, []}} }, Commands), ok. @@ -626,17 +621,17 @@ scenario31(_Config) -> scenario32(_Config) -> E1 = c:pid(0,314,0), Commands = [ - make_enqueue(E1,1,<<0>>), %% 1 - make_enqueue(E1,2,<<0,0>>), %% 2 - make_enqueue(E1,4,<<0,0,0,0>>), %% 3 - make_enqueue(E1,3,<<0,0,0>>) %% 4 + make_enqueue(E1,1,msg(<<0>>)), %% 1 + make_enqueue(E1,2,msg(<<0,0>>)), %% 2 + make_enqueue(E1,4,msg(<<0,0,0,0>>)), %% 3 + make_enqueue(E1,3,msg(<<0,0,0>>)) %% 4 ], run_snapshot_test(#{name => ?FUNCTION_NAME, release_cursor_interval => 0, max_length => 3, deliver_limit => undefined, overflow_strategy => drop_head, - dead_letter_handler => {?MODULE, banana, []} + dead_letter_handler => {at_most_once, {?MODULE, banana, []}} }, Commands), ok. @@ -646,14 +641,14 @@ scenario29(_Config) -> C1 = {<<>>, C1Pid}, E = c:pid(0,240,0), Commands = [ - make_enqueue(E,1,<<>>), %% 1 - make_enqueue(E,2,<<>>), %% 2 + make_enqueue(E,1,msg(<<>>)), %% 1 + make_enqueue(E,2,msg(<<>>)), %% 2 make_checkout(C1, {auto,2,simple_prefetch}), %% 2 - make_enqueue(E,3,<<>>), %% 3 - make_enqueue(E,4,<<>>), %% 4 - make_enqueue(E,5,<<>>), %% 5 - make_enqueue(E,6,<<>>), %% 6 - make_enqueue(E,7,<<>>), %% 7 + make_enqueue(E,3,msg(<<>>)), %% 3 + make_enqueue(E,4,msg(<<>>)), %% 4 + make_enqueue(E,5,msg(<<>>)), %% 5 + make_enqueue(E,6,msg(<<>>)), %% 6 + make_enqueue(E,7,msg(<<>>)), %% 7 {down, E, noconnection} %% 8 ], run_snapshot_test(#{name => ?FUNCTION_NAME, @@ -662,7 +657,7 @@ scenario29(_Config) -> max_length => 5, max_in_memory_length => 1, overflow_strategy => drop_head, - dead_letter_handler => {?MODULE, banana, []}, + dead_letter_handler => {at_most_once, {?MODULE, banana, []}}, single_active_consumer_on => true }, Commands), @@ -672,19 +667,19 @@ scenario23(_Config) -> C1 = {<<>>, C1Pid}, E = c:pid(0,240,0), Commands = [ - make_enqueue(E,1,<<>>), %% 1 + make_enqueue(E,1,msg(<<>>)), %% 1 make_checkout(C1, {auto,2,simple_prefetch}), %% 2 - make_enqueue(E,2,<<>>), %% 3 - make_enqueue(E,3,<<>>), %% 4 + make_enqueue(E,2,msg(<<>>)), %% 3 + make_enqueue(E,3,msg(<<>>)), %% 4 {down, E, noconnection}, %% 5 - make_enqueue(E,4,<<>>) %% 6 + make_enqueue(E,4,msg(<<>>)) %% 6 ], run_snapshot_test(#{name => ?FUNCTION_NAME, release_cursor_interval => 0, deliver_limit => undefined, max_length => 2, overflow_strategy => drop_head, - dead_letter_handler => {?MODULE, banana, []} + dead_letter_handler => {at_most_once, {?MODULE, banana, []}} }, Commands), ok. @@ -697,7 +692,7 @@ single_active_01(_Config) -> E = test_util:fake_pid(rabbit@fake_node2), Commands = [ make_checkout(C1, {auto,1,simple_prefetch}), - make_enqueue(E,1,<<"one">>), + make_enqueue(E,1,msg(<<"one">>)), make_checkout(C2, {auto,1,simple_prefetch}), make_checkout(C1, cancel), {nodeup,rabbit@fake_node1} @@ -716,7 +711,7 @@ single_active_02(_Config) -> E = test_util:fake_pid(node()), Commands = [ make_checkout(C1, {auto,1,simple_prefetch}), - make_enqueue(E,1,<<"one">>), + make_enqueue(E,1,msg(<<"one">>)), {down,E,noconnection}, make_checkout(C2, {auto,1,simple_prefetch}), make_checkout(C2, cancel), @@ -735,8 +730,8 @@ single_active_03(_Config) -> E = test_util:fake_pid(rabbit@fake_node2), Commands = [ make_checkout(C1, {auto,2,simple_prefetch}), - make_enqueue(E, 1, 0), - make_enqueue(E, 2, 1), + make_enqueue(E, 1, msg(<<0>>)), + make_enqueue(E, 2, msg(<<1>>)), {down, Pid, noconnection}, {nodeup, node()} ], @@ -754,10 +749,10 @@ single_active_04(_Config) -> Commands = [ % make_checkout(C1, {auto,2,simple_prefetch}), - make_enqueue(E, 1, <<>>), - make_enqueue(E, 2, <<>>), - make_enqueue(E, 3, <<>>), - make_enqueue(E, 4, <<>>) + make_enqueue(E, 1, msg(<<>>)), + make_enqueue(E, 2, msg(<<>>)), + make_enqueue(E, 3, msg(<<>>)), + make_enqueue(E, 4, msg(<<>>)) % {down, Pid, noconnection}, % {nodeup, node()} ], @@ -796,15 +791,16 @@ snapshots(_Config) -> fun () -> ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit, InMemoryLength, InMemoryBytes, - Overflow}, - frequency([{10, {0, 0, false, 0, 0, 0, drop_head}}, + Overflow, DeadLetterHandler}, + frequency([{10, {0, 0, false, 0, 0, 0, drop_head, undefined}}, {5, {oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), boolean(), oneof([range(1, 3), undefined]), oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), - oneof([drop_head, reject_publish]) + oneof([drop_head, reject_publish]), + oneof([undefined, {at_most_once, {?MODULE, banana, []}}]) }}]), begin Config = config(?FUNCTION_NAME, @@ -814,13 +810,43 @@ snapshots(_Config) -> DeliveryLimit, InMemoryLength, InMemoryBytes, - Overflow), + Overflow, + DeadLetterHandler), ?FORALL(O, ?LET(Ops, log_gen(256), expand(Ops, Config)), collect({log_size, length(O)}, snapshots_prop(Config, O))) end) end, [], 1000). +snapshots_dlx(_Config) -> + run_proper( + fun () -> + ?FORALL({Length, Bytes, SingleActiveConsumer, + DeliveryLimit, InMemoryLength, InMemoryBytes}, + frequency([{10, {0, 0, false, 0, 0, 0}}, + {5, {oneof([range(1, 10), undefined]), + oneof([range(1, 1000), undefined]), + boolean(), + oneof([range(1, 3), undefined]), + oneof([range(1, 10), undefined]), + oneof([range(1, 1000), undefined]) + }}]), + begin + Config = config(?FUNCTION_NAME, + Length, + Bytes, + SingleActiveConsumer, + DeliveryLimit, + InMemoryLength, + InMemoryBytes, + reject_publish, + at_least_once), + ?FORALL(O, ?LET(Ops, log_gen_dlx(256), expand(Ops, Config)), + collect({log_size, length(O)}, + snapshots_prop(Config, O))) + end) + end, [], 1000). + single_active(_Config) -> Size = 300, run_proper( @@ -867,7 +893,10 @@ upgrade(_Config) -> SingleActive, DeliveryLimit, InMemoryLength, - undefined), + undefined, + drop_head, + {?MODULE, banana, []} + ), ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), collect({log_size, length(O)}, upgrade_prop(Config, O))) @@ -923,10 +952,10 @@ single_active_ordering_01(_Config) -> E = test_util:fake_pid(rabbit@fake_node2), E2 = test_util:fake_pid(rabbit@fake_node2), Commands = [ - make_enqueue(E, 1, 0), - make_enqueue(E, 2, 1), + make_enqueue(E, 1, msg(<<"0">>)), + make_enqueue(E, 2, msg(<<"1">>)), make_checkout(C1, {auto,2,simple_prefetch}), - make_enqueue(E2, 1, 2), + make_enqueue(E2, 1, msg(<<"2">>)), make_settle(C1, [0]) ], Conf = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0), @@ -945,7 +974,7 @@ single_active_ordering_02(_Config) -> E = test_util:fake_pid(node()), Commands = [ make_checkout(C1, {auto,1,simple_prefetch}), - make_enqueue(E, 2, 1), + make_enqueue(E, 2, msg(<<"1">>)), %% CANNOT HAPPEN {down,E,noproc}, make_settle(C1, [0]) @@ -961,9 +990,9 @@ single_active_ordering_03(_Config) -> C2 = {<<2>>, C2Pid}, E = test_util:fake_pid(rabbit@fake_node2), Commands = [ - make_enqueue(E, 1, 0), - make_enqueue(E, 2, 1), - make_enqueue(E, 3, 2), + make_enqueue(E, 1, msg(<<"0">>)), + make_enqueue(E, 2, msg(<<"1">>)), + make_enqueue(E, 3, msg(<<"2">>)), make_checkout(C1, {auto,1,simple_prefetch}), make_checkout(C2, {auto,1,simple_prefetch}), make_settle(C1, [0]), @@ -1045,17 +1074,232 @@ max_length(_Config) -> end) end, [], Size). -config(Name, Length, Bytes, SingleActive, DeliveryLimit, - InMemoryLength, InMemoryBytes) -> -config(Name, Length, Bytes, SingleActive, DeliveryLimit, - InMemoryLength, InMemoryBytes, drop_head). +%% Test that rabbit_fifo_dlx can check out a prefix message. +dlx_01(_Config) -> + C1Pid = c:pid(0,883,1), + C1 = {<<>>, C1Pid}, + E = c:pid(0,176,1), + Commands = [ + rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + make_checkout(C1, {auto,1,simple_prefetch}), + make_enqueue(E,1,msg(<<"1">>)), + make_enqueue(E,2,msg(<<"2">>)), + rabbit_fifo:make_discard(C1, [0]), + rabbit_fifo_dlx:make_settle([0]), + rabbit_fifo:make_discard(C1, [1]), + rabbit_fifo_dlx:make_settle([1]) + ], + Config = config(?FUNCTION_NAME, 8, undefined, false, 2, 5, 100, reject_publish, at_least_once), + ?assert(snapshots_prop(Config, Commands)), + ok. + +%% Test that dehydrating dlx_consumer works. +dlx_02(_Config) -> + C1Pid = c:pid(0,883,1), + C1 = {<<>>, C1Pid}, + E = c:pid(0,176,1), + Commands = [ + rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + make_checkout(C1, {auto,1,simple_prefetch}), + make_enqueue(E,1,msg(<<"1">>)), + %% State contains release cursor A. + rabbit_fifo:make_discard(C1, [0]), + make_enqueue(E,2,msg(<<"2">>)), + %% State contains release cursor B + %% with the 1st msg being checked out to dlx_consumer and + %% being dehydrated. + rabbit_fifo_dlx:make_settle([0]) + %% Release cursor A got emitted. + ], + Config = config(?FUNCTION_NAME, 10, undefined, false, 5, 5, 100, reject_publish, at_least_once), + ?assert(snapshots_prop(Config, Commands)), + ok. + +%% Test that dehydrating discards queue works. +dlx_03(_Config) -> + C1Pid = c:pid(0,883,1), + C1 = {<<>>, C1Pid}, + E = c:pid(0,176,1), + Commands = [ + make_enqueue(E,1,msg(<<"1">>)), + %% State contains release cursor A. + make_checkout(C1, {auto,1,simple_prefetch}), + rabbit_fifo:make_discard(C1, [0]), + make_enqueue(E,2,msg(<<"2">>)), + %% State contains release cursor B. + %% 1st message sitting in discards queue got dehydrated. + rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + rabbit_fifo_dlx:make_settle([0]) + %% Release cursor A got emitted. + ], + Config = config(?FUNCTION_NAME, 10, undefined, false, 5, 5, 100, reject_publish, at_least_once), + ?assert(snapshots_prop(Config, Commands)), + ok. + +dlx_04(_Config) -> + C1Pid = c:pid(0,883,1), + C1 = {<<>>, C1Pid}, + E = c:pid(0,176,1), + Commands = [ + rabbit_fifo_dlx:make_checkout(my_dlx_worker, 3), + make_enqueue(E,1,msg(<<>>)), + make_enqueue(E,2,msg(<<>>)), + make_enqueue(E,3,msg(<<>>)), + make_enqueue(E,4,msg(<<>>)), + make_enqueue(E,5,msg(<<>>)), + make_enqueue(E,6,msg(<<>>)), + make_checkout(C1, {auto,6,simple_prefetch}), + rabbit_fifo:make_discard(C1, [0,1,2,3,4,5]), + rabbit_fifo_dlx:make_settle([0,1,2]) + ], + Config = config(?FUNCTION_NAME, undefined, undefined, true, 1, 5, 136, reject_publish, at_least_once), + ?assert(snapshots_prop(Config, Commands)), + ok. + +%% Test that discards queue gets dehydrated with 1 message that has empty message body. +dlx_05(_Config) -> + C1Pid = c:pid(0,883,1), + C1 = {<<>>, C1Pid}, + E = c:pid(0,176,1), + Commands = [ + make_enqueue(E,1,msg(<<>>)), + make_enqueue(E,2,msg(<<"msg2">>)), + %% 0,1 in messages + make_checkout(C1, {auto,1,simple_prefetch}), + rabbit_fifo:make_discard(C1, [0]), + %% 0 in discards, 1 in checkout + make_enqueue(E,3,msg(<<"msg3">>)), + %% 0 in discards (rabbit_fifo_dlx msg_bytes is still 0 because body of msg 0 is empty), + %% 1 in checkout, 2 in messages + rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + %% 0 in dlx_checkout, 1 in checkout, 2 in messages + make_settle(C1, [1]), + %% 0 in dlx_checkout, 2 in checkout + rabbit_fifo_dlx:make_settle([0]) + %% 2 in checkout + ], + Config = config(?FUNCTION_NAME, 0, 0, false, 0, 0, 0, reject_publish, at_least_once), + ?assert(snapshots_prop(Config, Commands)), + ok. + +% Test that after recovery we can differentiate between index messge and (prefix) disk message +dlx_06(_Config) -> + C1Pid = c:pid(0,883,1), + C1 = {<<>>, C1Pid}, + E = c:pid(0,176,1), + Commands = [ + make_enqueue(E,1,msg(<<>>)), + %% The following message has 3 bytes. + %% If we cannot differentiate between disk message and prefix disk message, + %% rabbit_fifo:delete_indexes/2 will not know whether it's a disk message or + %% prefix disk message and it will therefore falsely think that 3 is an index + %% instead of a size header resulting in message 3 being deleted from the index + %% after recovery. + make_enqueue(E,2,msg(<<"111">>)), + make_enqueue(E,3,msg(<<>>)), + %% 0,1,2 in messages + rabbit_fifo_dlx:make_checkout(my_dlx_worker, 2), + make_checkout(C1, {auto,3,simple_prefetch}), + %% 0,1,2 in checkout + rabbit_fifo:make_discard(C1, [0,1,2]), + %% 0,1 in dlx_checkout, 3 in discards + rabbit_fifo_dlx:make_settle([0,1]) + %% 3 in dlx_checkout + ], + Config = config(?FUNCTION_NAME, undefined, 749, false, 1, 1, 131, reject_publish, at_least_once), + ?assert(snapshots_prop(Config, Commands)), + ok. + +dlx_07(_Config) -> + C1Pid = c:pid(0,883,1), + C1 = {<<>>, C1Pid}, + E = c:pid(0,176,1), + Commands = [ + make_checkout(C1, {auto,1,simple_prefetch}), + make_enqueue(E,1,msg(<<"12">>)), + %% 0 in checkout + rabbit_fifo:make_discard(C1, [0]), + %% 0 in discard + make_enqueue(E,2,msg(<<"1234567">>)), + %% 0 in discard, 1 in checkout + rabbit_fifo:make_discard(C1, [1]), + %% 0, 1 in discard + rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + %% 0 in dlx_checkout, 1 in discard + make_enqueue(E,3,msg(<<"123">>)), + %% 0 in dlx_checkout, 1 in discard, 2 in checkout + rabbit_fifo_dlx:make_checkout(my_dlx_worker, 2), + %% 0,1 in dlx_checkout, 2 in checkout + rabbit_fifo_dlx:make_settle([0]), + %% 1 in dlx_checkout, 2 in checkout + make_settle(C1, [2]), + %% 1 in dlx_checkout + make_enqueue(E,4,msg(<<>>)), + %% 1 in dlx_checkout, 3 in checkout + rabbit_fifo_dlx:make_settle([0,1]) + %% 3 in checkout + ], + Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, undefined, undefined, + reject_publish, at_least_once), + ?assert(snapshots_prop(Config, Commands)), + ok. + +%% This test fails if discards queue is not normalized for comparison. +dlx_08(_Config) -> + C1Pid = c:pid(0,883,1), + C1 = {<<>>, C1Pid}, + E = c:pid(0,176,1), + Commands = [ + make_enqueue(E,1,msg(<<>>)), + %% 0 in messages + make_checkout(C1, {auto,1,simple_prefetch}), + %% 0 in checkout + make_enqueue(E,2,msg(<<>>)), + %% 1 in messages, 0 in checkout + rabbit_fifo:make_discard(C1, [0]), + %% 1 in checkout, 0 in discards + make_enqueue(E,3,msg(<<>>)), + %% 2 in messages, 1 in checkout, 0 in discards + rabbit_fifo:make_discard(C1, [1]), + %% 2 in checkout, 0,1 in discards + rabbit_fifo:make_discard(C1, [2]), + %% 0,1,2 in discards + make_enqueue(E,4,msg(<<>>)), + %% 3 in checkout, 0,1,2 in discards + %% last command emitted this release cursor + make_settle(C1, [3]), + make_enqueue(E,5,msg(<<>>)), + make_enqueue(E,6,msg(<<>>)), + rabbit_fifo:make_discard(C1, [4]), + rabbit_fifo:make_discard(C1, [5]), + make_enqueue(E,7,msg(<<>>)), + make_enqueue(E,8,msg(<<>>)), + make_enqueue(E,9,msg(<<>>)), + rabbit_fifo:make_discard(C1, [6]), + rabbit_fifo:make_discard(C1, [7]), + rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + make_enqueue(E,10,msg(<<>>)), + rabbit_fifo:make_discard(C1, [8]), + rabbit_fifo_dlx:make_settle([0]), + rabbit_fifo:make_discard(C1, [9]), + rabbit_fifo_dlx:make_settle([1]), + rabbit_fifo_dlx:make_settle([2]) + ], + Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, undefined, undefined, + reject_publish, at_least_once), + ?assert(snapshots_prop(Config, Commands)), + ok. + +config(Name, Length, Bytes, SingleActive, DeliveryLimit, InMemoryLength, InMemoryBytes) -> +config(Name, Length, Bytes, SingleActive, DeliveryLimit, InMemoryLength, InMemoryBytes, + drop_head, {at_most_once, {?MODULE, banana, []}}). config(Name, Length, Bytes, SingleActive, DeliveryLimit, - InMemoryLength, InMemoryBytes, Overflow) -> + InMemoryLength, InMemoryBytes, Overflow, DeadLetterHandler) -> #{name => Name, max_length => map_max(Length), max_bytes => map_max(Bytes), - dead_letter_handler => {?MODULE, banana, []}, + dead_letter_handler => DeadLetterHandler, single_active_consumer_on => SingleActive, delivery_limit => map_max(DeliveryLimit), max_in_memory_length => map_max(InMemoryLength), @@ -1121,6 +1365,16 @@ validate_idx_order(Idxs, ReleaseCursorIdx) -> ok end. +%%TODO write separate generator for dlx using single_active_prop() or +%% messages_total_prop() as base template. +%% +%% E.g. enqueue few messages and have a consumer rejecting those. +%% The invariant could be: Delivery effects to dlx_worker must match the number of dead-lettered messages. +%% +%% Other invariants could be: +%% * if new consumer subscribes, messages are checked out to new consumer +%% * if dlx_worker fails receiving DOWN, messages are still in state. + single_active_prop(Conf0, Commands, ValidateOrder) -> Conf = Conf0#{release_cursor_interval => 100}, Indexes = lists:seq(1, length(Commands)), @@ -1166,14 +1420,23 @@ messages_total_invariant() -> consumers = C, enqueuers = E, prefix_msgs = {PTot, _, RTot, _}, - returns = R} = S) -> + returns = R, + dlx = #rabbit_fifo_dlx{discards = D, + consumer = DlxCon}} = S) -> Base = lqueue:len(M) + lqueue:len(R) + PTot + RTot, CTot = maps:fold(fun (_, #consumer{checked_out = Ch}, Acc) -> Acc + map_size(Ch) end, Base, C), - Tot = maps:fold(fun (_, #enqueuer{pending = P}, Acc) -> + Tot0 = maps:fold(fun (_, #enqueuer{pending = P}, Acc) -> Acc + length(P) end, CTot, E), + Tot1 = Tot0 + lqueue:len(D), + Tot = case DlxCon of + undefined -> + Tot1; + #dlx_consumer{checked_out = DlxChecked} -> + Tot1 + map_size(DlxChecked) + end, QTot = rabbit_fifo:query_messages_total(S), case Tot == QTot of true -> true; @@ -1262,9 +1525,6 @@ snapshots_prop(Conf, Commands) -> end. log_gen(Size) -> - log_gen(Size, binary()). - -log_gen(Size, _Body) -> Nodes = [node(), fakenode@fake, fakenode@fake2 @@ -1287,6 +1547,35 @@ log_gen(Size, _Body) -> {1, purge} ]))))). +log_gen_dlx(Size) -> + Nodes = [node(), + fakenode@fake, + fakenode@fake2 + ], + ?LET(EPids, vector(2, pid_gen(Nodes)), + ?LET(CPids, vector(2, pid_gen(Nodes)), + resize(Size, + list( + frequency( + [{20, enqueue_gen(oneof(EPids))}, + {40, {input_event, + frequency([{1, settle}, + {1, return}, + %% dead-letter many messages + {5, discard}, + {1, requeue}])}}, + {2, checkout_gen(oneof(CPids))}, + {1, checkout_cancel_gen(oneof(CPids))}, + {1, down_gen(oneof(EPids ++ CPids))}, + {1, nodeup_gen(Nodes)}, + {1, purge}, + %% same dlx_worker can subscribe multiple times, + %% e.g. after it dlx_worker crashed + %% "last subscriber wins" + {2, {checkout_dlx, choose(1,10)}} + ]))))). + + log_gen_config(Size) -> Nodes = [node(), fakenode@fake, @@ -1359,7 +1648,18 @@ enqueue_gen(Pid, Enq, Del) -> ?LET(E, {enqueue, Pid, frequency([{Enq, enqueue}, {Del, delay}]), - binary()}, E). + msg_gen()}, E). + +%% It's fair to assume that every message enqueued is a #basic_message. +%% That's what the channel expects and what rabbit_quorum_queue invokes rabbit_fifo_client with. +msg_gen() -> + ?LET(Bin, binary(), + #basic_message{content = #content{payload_fragments_rev = [Bin], + properties = none}}). + +msg(Bin) when is_binary(Bin) -> + #basic_message{content = #content{payload_fragments_rev = [Bin], + properties = none}}. checkout_cancel_gen(Pid) -> {checkout, Pid, cancel}. @@ -1368,11 +1668,7 @@ checkout_gen(Pid) -> %% pid, tag, prefetch ?LET(C, {checkout, {binary(), Pid}, choose(1, 100)}, C). - --record(t, {state = rabbit_fifo:init(#{name => proper, - queue_resource => blah, - release_cursor_interval => 1}) - :: rabbit_fifo:state(), +-record(t, {state :: rabbit_fifo:state(), index = 1 :: non_neg_integer(), %% raft index enqueuers = #{} :: #{pid() => term()}, consumers = #{} :: #{{binary(), pid()} => term()}, @@ -1387,20 +1683,34 @@ checkout_gen(Pid) -> expand(Ops, Config) -> expand(Ops, Config, {undefined, fun ra_lib:id/1}). +%% generates a sequence of Raft commands expand(Ops, Config, EnqFun) -> %% execute each command against a rabbit_fifo state and capture all relevant %% effects - T = #t{enq_body_fun = EnqFun, + InitConfig0 = #{name => proper, + queue_resource => blah, + release_cursor_interval => 1}, + InitConfig = case Config of + #{dead_letter_handler := at_least_once} -> + %% Configure rabbit_fifo config with at_least_once so that + %% rabbit_fifo_dlx outputs dlx_delivery effects + %% which we are going to settle immediately in enq_effs/2. + %% Therefore the final generated Raft commands will include + %% {dlx, {checkout, ...}} and {dlx, {settle, ...}} Raft commands. + maps:put(dead_letter_handler, at_least_once, InitConfig0); + _ -> + InitConfig0 + end, + T = #t{state = rabbit_fifo:init(InitConfig), + enq_body_fun = EnqFun, config = Config}, #t{effects = Effs} = T1 = lists:foldl(fun handle_op/2, T, Ops), %% process the remaining effect #t{log = Log} = lists:foldl(fun do_apply/2, T1#t{effects = queue:new()}, queue:to_list(Effs)), - lists:reverse(Log). - handle_op({enqueue, Pid, When, Data}, #t{enqueuers = Enqs0, enq_body_fun = {EnqSt0, Fun}, @@ -1493,6 +1803,9 @@ handle_op({input_event, Settlement}, #t{effects = Effs, false -> do_apply(Cmd, T#t{effects = Q}) end; + {{value, {dlx, {settle, MsgIds}}}, Q} -> + Cmd = rabbit_fifo_dlx:make_settle(MsgIds), + do_apply(Cmd, T#t{effects = Q}); _ -> T end; @@ -1500,7 +1813,10 @@ handle_op(purge, T) -> do_apply(rabbit_fifo:make_purge(), T); handle_op({update_config, Changes}, #t{config = Conf} = T) -> Config = maps:merge(Conf, Changes), - do_apply(rabbit_fifo:make_update_config(Config), T). + do_apply(rabbit_fifo:make_update_config(Config), T); +handle_op({checkout_dlx, Prefetch}, #t{config = #{dead_letter_handler := at_least_once}} = T) -> + Cmd = rabbit_fifo_dlx:make_checkout(proper_dlx_worker, Prefetch), + do_apply(Cmd, T). do_apply(Cmd, #t{effects = Effs, @@ -1534,14 +1850,17 @@ enq_effs([{send_msg, P, {delivery, CTag, Msgs}, _Opts} | Rem], Q) -> %% they can be changed depending on the input event later Cmd = rabbit_fifo:make_settle({CTag, P}, MsgIds), enq_effs(Rem, queue:in(Cmd, Q)); +enq_effs([{send_msg, _, {dlx_delivery, Msgs}, _Opts} | Rem], Q) -> + MsgIds = [I || {I, _} <- Msgs], + Cmd = rabbit_fifo_dlx:make_settle(MsgIds), + enq_effs(Rem, queue:in(Cmd, Q)); enq_effs([_ | Rem], Q) -> enq_effs(Rem, Q). %% Utility run_proper(Fun, Args, NumTests) -> - ?assertEqual( - true, + ?assert( proper:counterexample( erlang:apply(Fun, Args), [{numtests, NumTests}, @@ -1585,10 +1904,12 @@ run_snapshot_test0(Conf, Commands, Invariant) -> State -> ok; _ -> ct:pal("Snapshot tests failed run log:~n" - "~p~n from ~n~p~n Entries~n~p~n" + "~p~n from snapshot index ~b " + "with snapshot state~n~p~n Entries~n~p~n" "Config: ~p~n", - [Filtered, SnapState, Entries, Conf]), - ct:pal("Expected~n~p~nGot:~n~p", [State, S]), + [Filtered, SnapIdx, SnapState, Entries, Conf]), + ct:pal("Expected~n~p~nGot:~n~p~n", [?record_info(rabbit_fifo, State), + ?record_info(rabbit_fifo, S)]), ?assertEqual(State, S) end end || {release_cursor, SnapIdx, SnapState} <- Cursors], diff --git a/deps/rabbit_common/include/rabbit.hrl b/deps/rabbit_common/include/rabbit.hrl index f40b92a24b88..41a5f3045865 100644 --- a/deps/rabbit_common/include/rabbit.hrl +++ b/deps/rabbit_common/include/rabbit.hrl @@ -112,7 +112,7 @@ -record(basic_message, {exchange_name, %% The exchange where the message was received routing_keys = [], %% Routing keys used during publish - content, %% The message content + content, %% The message #content record id, %% A `rabbit_guid:gen()` generated id is_persistent}). %% Whether the message was published as persistent diff --git a/deps/rabbitmq_management/priv/www/js/global.js b/deps/rabbitmq_management/priv/www/js/global.js index 1981d9439c08..dbdedc2ab7ea 100644 --- a/deps/rabbitmq_management/priv/www/js/global.js +++ b/deps/rabbitmq_management/priv/www/js/global.js @@ -174,7 +174,7 @@ const QUEUE_EXTRA_CONTENT_REQUESTS = []; // All help ? popups var HELP = { 'delivery-limit': - 'The number of allowed unsuccessful delivery attempts. Once a message has been delivered unsuccessfully this many times it will be dropped or dead-lettered, depending on the queue configuration.', + 'The number of allowed unsuccessful delivery attempts. Once a message has been delivered unsuccessfully more than this many times it will be dropped or dead-lettered, depending on the queue configuration.', 'exchange-auto-delete': 'If yes, the exchange will delete itself after at least one queue or exchange has been bound to this one, and then all queues or exchanges have been unbound.', @@ -218,6 +218,9 @@ var HELP = { 'queue-dead-letter-routing-key': 'Optional replacement routing key to use when a message is dead-lettered. If this is not set, the message\'s original routing key will be used.
(Sets the "x-dead-letter-routing-key" argument.)', + 'queue-dead-letter-strategy': + 'Valid values are at-most-once or at-least-once. It defaults to at-most-once. This setting is understood only by quorum queues. If at-least-once is set, Overflow behaviour must be set to reject-publish. Otherwise, dead letter strategy will fall back to at-most-once.', + 'queue-single-active-consumer': 'If set, makes sure only one consumer at a time consumes from the queue and fails over to another registered consumer in case the active one is cancelled or dies.
(Sets the "x-single-active-consumer" argument.)', @@ -246,11 +249,14 @@ var HELP = { 'Set the queue initial cluster size.', 'queue-type': - 'Set the queue type, determining the type of queue to use: raft-based high availability or classic queue. Valid values are quorum or classic. It defaults to classic.
', + 'Set the queue type, determining the type of queue to use: raft-based high availability or classic queue. Valid values are quorum or classic. It defaults to classic.
', 'queue-messages': '

Message counts.

Note that "in memory" and "persistent" are not mutually exclusive; persistent messages can be in memory as well as on disc, and transient messages can be paged out if memory is tight. Non-durable queues will consider all messages to be transient.

', + 'queue-dead-lettered': + 'Applies to messages dead-lettered with dead-letter-strategy at-least-once.', + 'queue-message-body-bytes': '

The sum total of the sizes of the message bodies in this queue. This only counts message bodies; it does not include message properties (including headers) or metadata used by the queue.

Note that "in memory" and "persistent" are not mutually exclusive; persistent messages can be in memory as well as on disc, and transient messages can be paged out if memory is tight. Non-durable queues will consider all messages to be transient.

If a message is routed to multiple queues on publication, its body will be stored only once (in memory and on disk) and shared between queues. The value shown here does not take account of this effect.

', diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs index 3b93c210acb0..716e7bc83fba 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs @@ -103,7 +103,8 @@ Overflow behaviour | Auto expire
Dead letter exchange | - Dead letter routing key
+ Dead letter routing key
+ Message TTL
Queues [Classic] @@ -114,7 +115,6 @@ HA mirror promotion on shutdown | HA mirror promotion on failure
- Message TTL | Lazy mode | Version | Master Locator
@@ -128,7 +128,9 @@ Max in memory bytes | Delivery limit - +
+ Dead letter strategy + @@ -271,13 +273,14 @@ Max length | Max length bytes | Overflow behaviour - +
+ Message TTL + Queues [Classic] - Message TTL | Auto expire diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs index c2022f73bb42..c9d7319bb4ff 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs @@ -167,6 +167,9 @@ Unacked <% if (is_quorum(queue)) { %> In memory ready + Dead-lettered + + <% } %> <% if (is_classic(queue)) { %> In memory @@ -192,6 +195,9 @@ <%= fmt_num_thousands(queue.messages_ram) %> + + <%= fmt_num_thousands(queue.messages_dlx) %> + <% } %> <% if (is_classic(queue)) { %> @@ -224,6 +230,11 @@ <%= fmt_bytes(queue.message_bytes_ram) %> <% } %> + <% if (is_quorum(queue)) { %> + + <%= fmt_bytes(queue.message_bytes_dlx) %> + + <% } %> <% if (is_classic(queue)) { %> <%= fmt_bytes(queue.message_bytes_persistent) %> diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs index 8a14fd2dd917..0e4f2779a22f 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs @@ -312,13 +312,11 @@ Add - <% if (queue_type == "classic") { %> - Message TTL | - <% } %> <% if (queue_type != "stream") { %> Auto expire | - Overflow behaviour | - Single active consumer
+ Message TTL | + Overflow behaviour
+ Single active consumer | Dead letter exchange | Dead letter routing key
Max length | @@ -334,7 +332,8 @@ Delivery limit | Max in memory length | Max in memory bytes - | Initial cluster size
+ | Initial cluster size
+ Dead letter strategy
<% } %> <% if (queue_type == "stream") { %> Max time retention From 8dc7332a4aaaffdba791121bff3c02a57312aaa8 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 7 Dec 2021 15:00:55 +0100 Subject: [PATCH 08/97] Add integration tests for at-least-once dead-lettering --- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 65 ++-- deps/rabbit/test/quorum_queue_SUITE.erl | 1 - .../rabbit_fifo_dlx_integration_SUITE.erl | 340 ++++++++++++++++++ 3 files changed, 378 insertions(+), 28 deletions(-) create mode 100644 deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 89c53533dcb7..ccf4fd1067ad 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -24,14 +24,10 @@ handle_cast/2, handle_call/3, handle_info/2, code_change/3, format_status/2]). -%%TODO make configurable or leave at 0 which means 2000 as in -%% https://github.com/rabbitmq/rabbitmq-server/blob/1e7df8c436174735b1d167673afd3f1642da5cdc/deps/rabbit/src/rabbit_quorum_queue.erl#L726-L729 --define(CONSUMER_PREFETCH_COUNT, 100). +%%TODO make configurable via cuttlefish? +-define(DEFAULT_PREFETCH, 100). +-define(DEFAULT_SETTLE_TIMEOUT, 120_000). -define(HIBERNATE_AFTER, 180_000). -%% If no publisher confirm was received for at least SETTLE_TIMEOUT, message will be redelivered. -%% To prevent duplicates in the target queue and to ensure message will eventually be acked to the source queue, -%% set this value higher than the maximum time it takes for a queue to settle a message. --define(SETTLE_TIMEOUT, 120_000). -record(pending, { %% consumed_msg_id is not to be confused with consumer delivery tag. @@ -78,11 +74,15 @@ queue_type_state :: rabbit_queue_type:state(), %% Consumed messages for which we have not received all publisher confirms yet. %% Therefore, they have not been ACKed yet to the consumer queue. - %% This buffer contains at most CONSUMER_PREFETCH_COUNT pending messages at any given point in time. + %% This buffer contains at most PREFETCH pending messages at any given point in time. pendings = #{} :: #{OutSeq :: non_neg_integer() => #pending{}}, %% next publisher confirm delivery tag sequence number next_out_seq = 1, - %% Timer firing every SETTLE_TIMEOUT milliseconds + %% If no publisher confirm was received for at least settle_timeout milliseconds, message will be redelivered. + %% To prevent duplicates in the target queue and to ensure message will eventually be acked to the source queue, + %% set this value higher than the maximum time it takes for a queue to settle a message. + settle_timeout :: non_neg_integer(), + %% Timer firing every settle_timeout milliseconds %% redelivering messages for which not all publisher confirms were received. %% If there are no pending messages, this timer will eventually be cancelled to allow %% this worker to hibernate. @@ -103,16 +103,23 @@ init(Arg) -> {ok, undefined, {continue, Arg}}. handle_continue({QRef, RegName}, undefined) -> + Prefetch = application:get_env(rabbit, + dead_letter_worker_consumer_prefetch, + ?DEFAULT_PREFETCH), + SettleTimeout = application:get_env(rabbit, + dead_letter_worker_publisher_confirm_timeout_ms, + ?DEFAULT_SETTLE_TIMEOUT), State = lookup_topology(#state{queue_ref = QRef}), {ok, Q} = rabbit_amqqueue:lookup(QRef), {ClusterName, _MaybeOldLeaderNode} = amqqueue:get_pid(Q), {ok, ConsumerState} = rabbit_fifo_dlx_client:checkout(RegName, QRef, {ClusterName, node()}, - ?CONSUMER_PREFETCH_COUNT), + Prefetch), {noreply, State#state{registered_name = RegName, dlx_client_state = ConsumerState, - queue_type_state = rabbit_queue_type:init()}}. + queue_type_state = rabbit_queue_type:init(), + settle_timeout = SettleTimeout}}. terminate(_Reason, _State) -> %%TODO cancel timer? @@ -303,7 +310,7 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, [] -> %% We can't deliver this message since there is no target queue we can route to. %% Under no circumstances should we drop a message with dead-letter-strategy at-least-once. - %% We buffer this message and retry to send every SETTLE_TIMEOUT milliseonds + %% We buffer this message and retry to send every settle_timeout milliseonds %% (until the user has fixed the dead-letter routing topology). State1#state{pendings = maps:put(OutSeq, Pend0, Pendings)}; _ -> @@ -320,13 +327,14 @@ deliver_to_queues(Delivery, RouteToQNames, #state{queue_type_state = QTypeState0 State = State0#state{queue_type_state = QTypeState1}, handle_queue_actions(Actions, State). -handle_settled(QRef, MsgSeqs, #state{pendings = Pendings0} = State) -> +handle_settled(QRef, MsgSeqs, #state{pendings = Pendings0, + settle_timeout = SettleTimeout} = State) -> Pendings = lists:foldl(fun (MsgSeq, P0) -> - handle_settled0(QRef, MsgSeq, P0) - end, Pendings0, MsgSeqs), + handle_settled0(QRef, MsgSeq, SettleTimeout, P0) + end, Pendings0, MsgSeqs), State#state{pendings = Pendings}. -handle_settled0(QRef, MsgSeq, Pendings) -> +handle_settled0(QRef, MsgSeq, SettleTimeout, Pendings) -> case maps:find(MsgSeq, Pendings) of {ok, #pending{unsettled = Unset0, settled = Set0} = Pend0} -> Unset = lists:delete(QRef, Unset0), @@ -337,7 +345,7 @@ handle_settled0(QRef, MsgSeq, Pendings) -> rabbit_log:warning("Ignoring publisher confirm for sequence number ~b " "from target dead letter ~s after settle timeout of ~bms. " "Troubleshoot why that queue confirms so slowly.", - [MsgSeq, rabbit_misc:rs(QRef), ?SETTLE_TIMEOUT]), + [MsgSeq, rabbit_misc:rs(QRef), SettleTimeout]), Pendings end. @@ -371,7 +379,8 @@ maybe_ack(#state{pendings = Pendings0, %% Re-deliver messages that timed out waiting on publisher confirm and %% messages that got never sent due to routing topology misconfiguration. -redeliver_messsages(#state{pendings = Pendings} = State) -> +redeliver_messsages(#state{pendings = Pendings, + settle_timeout = SettleTimeout} = State) -> case lookup_dlx(State) of not_found -> %% Configured dead-letter-exchange does (still) not exist. @@ -381,7 +390,7 @@ redeliver_messsages(#state{pendings = Pendings} = State) -> DLX -> Now = os:system_time(millisecond), maps:fold(fun(OutSeq, #pending{last_published_at = LastPub} = Pend, S0) - when LastPub + ?SETTLE_TIMEOUT =< Now -> + when LastPub + SettleTimeout =< Now -> %% Publisher confirm timed out. redeliver(Pend, DLX, OutSeq, S0); (OutSeq, #pending{last_published_at = undefined} = Pend, S0) -> @@ -418,14 +427,14 @@ redeliver(Pend, DLX, OldOutSeq, #state{routing_key = DLRKey} = State) -> %% Therefore, to keep things simple, create a brand new delivery, store it in our state and forget about the old delivery and %% sequence number. %% -%% If a sequene number gets settled after SETTLE_TIMEOUT, we can't map it anymore to the #pending{}. Hence, we ignore it. +%% If a sequene number gets settled after settle_timeout, we can't map it anymore to the #pending{}. Hence, we ignore it. %% -%% This can lead to issues when SETTLE_TIMEOUT is too low and time to settle takes too long. -%% For example, if SETTLE_TIMEOUT is set to only 10 seconds, but settling a message takes always longer than 10 seconds +%% This can lead to issues when settle_timeout is too low and time to settle takes too long. +%% For example, if settle_timeout is set to only 10 seconds, but settling a message takes always longer than 10 seconds %% (e.g. due to extremly slow hypervisor disks that ran out of credit), we will re-deliver the same message all over again %% leading to many duplicates in the target queue without ever acking the message back to the source discards queue. %% -%% Therefore, set SETTLE_TIMEOUT reasonably high (e.g. 2 minutes). +%% Therefore, set settle_timeout reasonably high (e.g. 2 minutes). %% %% TODO do not log per message? redeliver0(#pending{consumed_msg_id = ConsumedMsgId, @@ -438,7 +447,8 @@ redeliver0(#pending{consumed_msg_id = ConsumedMsgId, #state{next_out_seq = OutSeq, queue_ref = QRef, pendings = Pendings0, - exchange_ref = DLXRef} = State0) when is_list(DLRKeys) -> + exchange_ref = DLXRef, + settle_timeout = SettleTimeout} = State0) when is_list(DLRKeys) -> BasicMsg = #basic_message{exchange_name = DLXRef, routing_keys = DLRKeys, %% BCC Header was already stripped previously @@ -459,7 +469,7 @@ redeliver0(#pending{consumed_msg_id = ConsumedMsgId, "message_sequence_number=~b " "consumed_message_sequence_number=~b " "publish_count=~b.", - [strings(Settled), strings(Unsettled), ?SETTLE_TIMEOUT, + [strings(Settled), strings(Unsettled), SettleTimeout, OldOutSeq, ConsumedMsgId, PublishCount]), case {RouteToQs, Cycles, Settled} of {[], [], []} -> @@ -514,8 +524,9 @@ maybe_set_timer(#state{timer = TRef} = State) when is_reference(TRef) -> maybe_set_timer(#state{timer = undefined, pendings = Pendings} = State) when map_size(Pendings) =:= 0 -> State; -maybe_set_timer(#state{timer = undefined} = State) -> - TRef = erlang:send_after(?SETTLE_TIMEOUT, self(), {'$gen_cast', settle_timeout}), +maybe_set_timer(#state{timer = undefined, + settle_timeout = SettleTimeout} = State) -> + TRef = erlang:send_after(SettleTimeout, self(), {'$gen_cast', settle_timeout}), % rabbit_log:debug("set timer"), State#state{timer = TRef}. diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index ab271a169b4a..668295caece6 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -20,7 +20,6 @@ ra_name/1]). -compile([nowarn_export_all, export_all]). --compile(export_all). -define(DEFAULT_AWAIT, 10000). diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl new file mode 100644 index 000000000000..1b1aa2d76b73 --- /dev/null +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -0,0 +1,340 @@ +-module(rabbit_fifo_dlx_integration_SUITE). + +%% Integration tests for at-least-once dead-lettering comprising mainly +%% rabbit_fifo_dlx, rabbit_fifo_dlx_worker, rabbit_fifo_dlx_client +%% rabbit_quorum_queue, rabbit_fifo. + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). + +-import(quorum_queue_utils, [wait_for_messages_ready/3, + dirty_query/3, + ra_name/1]). +-import(quorum_queue_SUITE, [publish/2, + consume/3]). + +-compile([nowarn_export_all, export_all]). + +all() -> + [ + {group, single_node} + ]. + +groups() -> + [{single_node, [], [ + expired, + rejected, + delivery_limit, + target_queue_not_bound, + dlx_missing + ]}]. + +%% TODO add tests for: +%% * overview and query functions return correct result / stats +%% * dlx_worker resends in various topology misconfigurations +%% * dlx_worker resends when target queue is down (e.g. node is down where non-mirrored classic queue resides) +%% * we comply with mandatory + publisher confirm semantics, e.g. with 3 target queues (1 classic queue, 1 quorum queue, 1 stream) +%% * there is always single leader in 3 node cluster (check via supervisor:count_children and by killing one node) +%% * fall back to at-most-once works +%% * switching between at-most-once and at-least-once works including rabbit_fifo_dlx:cleanup + +init_per_suite(Config0) -> + rabbit_ct_helpers:log_environment(), + Config1 = rabbit_ct_helpers:merge_app_env( + Config0, {rabbit, [{quorum_tick_interval, 1000}, + {dead_letter_worker_consumer_prefetch, 2}, + {dead_letter_worker_publisher_confirm_timeout_ms, 1000} + ]}), + Config2 = rabbit_ct_helpers:merge_app_env( + Config1, {aten, [{poll_interval, 1000}]}), + rabbit_ct_helpers:run_setup_steps(Config2). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(Group, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, + [{rmq_nodes_count, 1}, + {rmq_nodename_suffix, Group}, + {tcp_ports_base}, + {net_ticktime, 10}]), + Config2 = rabbit_ct_helpers:run_steps(Config1, + [fun merge_app_env/1 ] ++ + rabbit_ct_broker_helpers:setup_steps()), + ok = rabbit_ct_broker_helpers:rpc( + Config2, 0, application, set_env, + [rabbit, channel_tick_interval, 100]), + timer:sleep(1000), + Config2. + +end_per_group(_, Config) -> + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_broker_helpers:teardown_steps()). + +merge_app_env(Config) -> + rabbit_ct_helpers:merge_app_env( + rabbit_ct_helpers:merge_app_env(Config, + {rabbit, [{core_metrics_gc_interval, 100}]}), + {ra, [{min_wal_roll_over_interval, 30000}]}). + +init_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase), + Q = rabbit_data_coercion:to_binary(Testcase), + Config2 = rabbit_ct_helpers:set_config(Config1, + [{source_queue, <>}, + {target_queue_1, <>}, + {target_queue_2, <>} + ]), + rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()). + + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_client_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% Test that at-least-once dead-lettering works for message dead-lettered due to message TTL. +expired(Config) -> + {_Server, Ch, SourceQ, TargetQ, _DLX} = Objects = declare_topology(Config, []), + Msg = <<"msg">>, + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = <<"0">>}, + payload = Msg}), + {_, #amqp_msg{props = #'P_basic'{headers = Headers, + expiration = undefined}}} = + ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + 1000), + ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)), + ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Headers, <<"x-first-death-queue">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-exchange">>)), + {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>), + ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Death, <<"queue">>)), + ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Death, <<"reason">>)), + ?assertEqual({longstr, <<"0">>}, rabbit_misc:table_lookup(Death, <<"original-expiration">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death, <<"exchange">>)), + ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death, <<"count">>)), + ?assertEqual({array, [{longstr, SourceQ}]}, rabbit_misc:table_lookup(Death, <<"routing-keys">>)), + delete_topology(Objects). + +%% Test that at-least-once dead-lettering works for message dead-lettered due to rejected by consumer. +rejected(Config) -> + {Server, Ch, SourceQ, TargetQ, _DLX} = Objects = declare_topology(Config, []), + publish(Ch, SourceQ), + wait_for_messages_ready([Server], ra_name(SourceQ), 1), + DelTag = consume(Ch, SourceQ, false), + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DelTag, + multiple = false, + requeue = false}), + {_, #amqp_msg{props = #'P_basic'{headers = Headers}}} = + ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + 1000), + ?assertEqual({longstr, <<"rejected">>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)), + ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Headers, <<"x-first-death-queue">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-exchange">>)), + {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>), + ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Death, <<"queue">>)), + ?assertEqual({longstr, <<"rejected">>}, rabbit_misc:table_lookup(Death, <<"reason">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death, <<"exchange">>)), + ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death, <<"count">>)), + ?assertEqual({array, [{longstr, SourceQ}]}, rabbit_misc:table_lookup(Death, <<"routing-keys">>)), + delete_topology(Objects). + +%% Test that at-least-once dead-lettering works for message dead-lettered due to delivery-limit exceeded. +delivery_limit(Config) -> + {Server, Ch, SourceQ, TargetQ, _DLX} = Objects = + declare_topology(Config, [{<<"x-delivery-limit">>, long, 0}]), + publish(Ch, SourceQ), + wait_for_messages_ready([Server], ra_name(SourceQ), 1), + DelTag = consume(Ch, SourceQ, false), + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DelTag, + multiple = false, + requeue = true}), + {_, #amqp_msg{props = #'P_basic'{headers = Headers}}} = + ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + 1000), + ?assertEqual({longstr, <<"delivery_limit">>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)), + ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Headers, <<"x-first-death-queue">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-exchange">>)), + {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>), + ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Death, <<"queue">>)), + ?assertEqual({longstr, <<"delivery_limit">>}, rabbit_misc:table_lookup(Death, <<"reason">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death, <<"exchange">>)), + ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death, <<"count">>)), + ?assertEqual({array, [{longstr, SourceQ}]}, rabbit_misc:table_lookup(Death, <<"routing-keys">>)), + delete_topology(Objects). + +%% Test that message is not lost despite no route from dead-letter exchange to target queue. +%% Once, the route becomes available, the message is delivered to the target queue +%% and acked to the source quorum queue. +target_queue_not_bound(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + TargetQ = ?config(target_queue_1, Config), + DLX = <<"dead-ex">>, + QArgs = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ], + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = SourceQ, + durable = true, + auto_delete = false, + arguments = QArgs}), + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), + Msg = <<"msg">>, + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = <<"0">>}, + payload = Msg}), + RaName = ra_name(SourceQ), + %% Binding from target queue to DLX is missing. + %% Therefore, 1 message should be kept in discards queue. + eventually(?_assertMatch([{1, _}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + consistently(?_assertMatch([{1, _}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + %% Fix dead-letter toplology misconfiguration. + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ + queue = TargetQ, + exchange = DLX, + routing_key = <<"k1">> + }), + %% Binding from target queue to DLX is now present. + %% Therefore, message should be delivered to target queue and acked to source queue. + eventually(?_assertEqual([{0, 0}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), + 500, 10), + ?assertMatch({#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{expiration = undefined}, + payload = Msg}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), + delete_topology({Server, Ch, SourceQ, TargetQ, DLX}). + +%% Test that message is not lost when configured dead-letter exchange does not exist. +%% Once, the exchange gets declared, the message is delivered to the target queue +%% and acked to the source quorum queue. +dlx_missing(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + TargetQ = ?config(target_queue_1, Config), + DLX = <<"dead-ex">>, + QArgs = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ], + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = SourceQ, + durable = true, + auto_delete = false, + arguments = QArgs}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), + Msg = <<"msg">>, + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = <<"0">>}, + payload = Msg}), + RaName = ra_name(SourceQ), + %% DLX is missing. Therefore, 1 message should be kept in discards queue. + eventually(?_assertMatch([{1, _}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + consistently(?_assertMatch([{1, _}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + %% Fix dead-letter toplology misconfiguration. + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ + queue = TargetQ, + exchange = DLX, + routing_key = <<"k1">> + }), + %% DLX is now present. + %% Therefore, message should be delivered to target queue and acked to source queue. + eventually(?_assertEqual([{0, 0}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), + 500, 10), + ?assertMatch({#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{expiration = undefined}, + payload = Msg}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), + delete_topology({Server, Ch, SourceQ, TargetQ, DLX}). + +declare_topology(Config, AdditionalQArgs) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + TargetQ = ?config(target_queue_1, Config), + DLX = <<"dead-ex">>, + QArgs = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ], + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = SourceQ, + durable = true, + auto_delete = false, + arguments = lists:keymerge(1, AdditionalQArgs, QArgs)}), + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ + queue = TargetQ, + exchange = DLX, + routing_key = <<"k1">> + }), + {Server, Ch, SourceQ, TargetQ, DLX}. + +delete_topology({_Server, Ch, SourceQ, TargetQ, DLX}) -> + #'queue.unbind_ok'{} = amqp_channel:call(Ch, #'queue.unbind'{ + queue = TargetQ, + exchange = DLX, + routing_key = <<"k1">> + }), + #'queue.delete_ok'{message_count = 0} = amqp_channel:call(Ch, #'queue.delete'{queue = TargetQ}), + #'queue.delete_ok'{message_count = 0} = amqp_channel:call(Ch, #'queue.delete'{queue = SourceQ}), + #'exchange.delete_ok'{} = amqp_channel:call(Ch, #'exchange.delete'{exchange = DLX}). + +%%TODO move to rabbitmq_ct_helpers/include/rabbit_assert.hrl +consistently(TestObj) -> + consistently(TestObj, 100, 10). + +consistently(_, _, 0) -> + ok; +consistently({_Line, Assertion} = TestObj, PollInterval, PollCount) -> + Assertion(), + timer:sleep(PollInterval), + consistently(TestObj, PollInterval, PollCount - 1). + +eventually(TestObj) -> + eventually(TestObj, 100, 10). + +eventually({Line, _}, _, 0) -> + erlang:error({assert_timeout, + [{file, ?FILE}, + {line, ?LINE}, + {assertion_line, Line} + ]}); +eventually({Line, Assertion} = TestObj, PollInterval, PollCount) -> + try + Assertion() + catch error:_ = Err -> + ct:pal(?LOW_IMPORTANCE, + "Retrying in ~b ms for ~b more times in file ~s, line ~b due to failed assertion in line ~b: ~p", + [PollInterval, PollCount - 1, ?FILE, ?LINE, Line, Err]), + timer:sleep(PollInterval), + eventually(TestObj, PollInterval, PollCount - 1) + end. From 795b5b576c187861f76ba54b7e605a8788057fd5 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Fri, 12 Nov 2021 15:59:51 +0000 Subject: [PATCH 09/97] QQ: better handle repeated requeues. Repeated re-queues are a problem for quorums queues. A consumer that repeatedly re-queues all messages may eventually cause the quorum queue to run out of messages as the log cannot be truncated if messages are not consumed in a fifo-ish order. This change addresses this as follows: All return commands are not send directly to the log but instead are sent to the aux machine which evaluates how the return should be processed. After this decision it will use the new "append" effect to add it to the log as before. If the queue has a delivery_limit configured the return command will be appended as before and the message will be returned to the front of the queue as before. This is safe as eventually the message will be dropped or dead-letter when it reaches it's delivery limit. If the queue has _not_ configured a delivery_limit the return will be turned into a new command that includes the original message in full and will be returne to the back of the queue. This ensure that messages in the queue will be cycled in fifo-ish order and thus snapshots will be taken. --- deps/rabbit/src/rabbit_fifo.erl | 124 ++++++++++++++++++++++-- deps/rabbit/src/rabbit_fifo.hrl | 2 +- deps/rabbit/src/rabbit_fifo_client.erl | 14 +++ deps/rabbit/test/quorum_queue_SUITE.erl | 18 ++-- erlang_ls.config | 18 ++-- 5 files changed, 155 insertions(+), 21 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 827d62dccdb0..75b00b0c74e9 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -1,4 +1,4 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public +%% This Source Code Form is subject tconsumer_ido the terms of the Mozilla Public %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% @@ -77,6 +77,9 @@ -record(enqueue, {pid :: option(pid()), seq :: option(msg_seqno()), msg :: raw_msg()}). +-record(requeue, {consumer_id :: consumer_id(), + msg_id :: msg_id(), + msg :: indexed_msg()}). -record(register_enqueuer, {pid :: pid()}). -record(checkout, {consumer_id :: consumer_id(), spec :: checkout_spec(), @@ -98,6 +101,7 @@ -opaque protocol() :: #enqueue{} | + #requeue{} | #register_enqueuer{} | #checkout{} | #settle{} | @@ -262,6 +266,52 @@ apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId}, _ -> {State, ok} end; +apply(#{index := Idx} = Meta, + #requeue{consumer_id = ConsumerId, + msg_id = MsgId, + %% as we read the message from disk it is already + %% an inmemory message + msg = ?INDEX_MSG(OldIdx, ?MSG(_Header, _RawMsg) = Msg)}, + #?MODULE{consumers = Cons0, + messages = Messages, + ra_indexes = Indexes0} = State00) -> + case Cons0 of + #{ConsumerId := #consumer{checked_out = Checked0} = Con0} + when is_map_key(MsgId, Checked0) -> + %% construct an index message with the current raft index + %% and update delivery count before adding it to the message queue + ?INDEX_MSG(_, ?MSG(Header, _)) = IdxMsg0 = + update_msg_header(delivery_count, fun incr/1, 1, ?INDEX_MSG(Idx, Msg)), + + State0 = add_bytes_return(Header, State00), + {State1, IdxMsg} = + case evaluate_memory_limit(Header, State0) of + true -> + % indexed message with header map + {State0, ?INDEX_MSG(Idx, ?DISK_MSG(Header))}; + false -> + {add_in_memory_counts(Header, State0), IdxMsg0} + end, + Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked0), + credit = increase_credit(Con0, 1)}, + State2 = update_or_remove_sub( + Meta, + ConsumerId, + Con, + State1#?MODULE{ra_indexes = rabbit_fifo_index:delete(OldIdx, Indexes0), + messages = lqueue:in(IdxMsg, Messages)}), + + %% We have to increment the enqueue counter to ensure release cursors + %% are generated + State3 = incr_enqueue_count(State2), + + {State, Ret, Effs} = checkout(Meta, State0, State3, []), + update_smallest_raft_index(Idx, Ret, + maybe_store_dehydrated_state(Idx, State), + Effs); + _ -> + {State00, ok} + end; apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, drain = Drain, consumer_id = ConsumerId}, #?MODULE{consumers = Cons0, @@ -1016,11 +1066,48 @@ init_aux(Name) when is_atom(Name) -> capacity = {inactive, Now, 1, 1.0}}. handle_aux(leader, _, garbage_collection, State, Log, MacState) -> - % ra_log_wal:force_roll_over(ra_log_wal), {no_reply, force_eval_gc(Log, MacState, State), Log}; handle_aux(follower, _, garbage_collection, State, Log, MacState) -> - % ra_log_wal:force_roll_over(ra_log_wal), {no_reply, force_eval_gc(Log, MacState, State), Log}; +handle_aux(leader, cast, {#return{msg_ids = MsgIds, + consumer_id = ConsumerId}, Corr, Pid}, + Aux0, Log0, #?MODULE{cfg = #cfg{delivery_limit = undefined}, + consumers = Consumers, + ra_indexes = _Indexes}) -> + case Consumers of + #{ConsumerId := #consumer{checked_out = Checked}} -> + {Log, ToReturn} = + maps:fold( + fun (MsgId, ?INDEX_MSG(Idx, ?DISK_MSG(Header)), {L0, Acc}) -> + %% it is possible this is not found if the consumer + %% crashed and the message got removed + %% TODO: handle when log entry is not found + case ra_log:fetch(Idx, L0) of + {{_, _, {_, _, Cmd, _}}, L} -> + Msg = case Cmd of + #enqueue{msg = M} -> M; + #requeue{msg = ?INDEX_MSG(_, ?MSG(_H, M))} -> + M + end, + IdxMsg = ?INDEX_MSG(Idx, ?MSG(Header, Msg)), + {L, [{MsgId, IdxMsg} | Acc]}; + {undefined, L} -> + {L, Acc} + end; + (MsgId, IdxMsg, {L0, Acc}) -> + {L0, [{MsgId, IdxMsg} | Acc]} + end, {Log0, []}, maps:with(MsgIds, Checked)), + + Appends = make_requeue(ConsumerId, {notify, Corr, Pid}, + lists:sort(ToReturn), []), + {no_reply, Aux0, Log, Appends}; + _ -> + {no_reply, Aux0, Log0} + end; +handle_aux(leader, cast, {#return{} = Ret, Corr, Pid}, + Aux0, Log, #?MODULE{}) -> + %% for returns with a delivery limit set we can just return as before + {no_reply, Aux0, Log, [{append, Ret, {notify, Corr, Pid}}]}; handle_aux(_RaState, cast, eval, Aux0, Log, _MacState) -> {no_reply, Aux0, Log}; handle_aux(_RaState, cast, Cmd, #aux{capacity = Use0} = Aux0, @@ -1810,7 +1897,7 @@ return_one(Meta, MsgId, Msg0, dead_letter_handler = DLH}} = State0, Effects0, ConsumerId) -> #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers), - Msg = update_msg_header(delivery_count, fun (C) -> C + 1 end, 1, Msg0), + Msg = update_msg_header(delivery_count, fun incr/1, 1, Msg0), Header = get_msg_header(Msg), case get_header(delivery_count, Header) of DeliveryCount when DeliveryCount > DeliveryLimit -> @@ -2055,7 +2142,11 @@ delivery_effect({CTag, CPid}, IdxMsgs, InMemMsgs) -> {RaftIdxs, Data} = lists:unzip(IdxMsgs), {log, RaftIdxs, fun(Log) -> - Msgs0 = lists:zipwith(fun ({enqueue, _, _, Msg}, {MsgId, Header}) -> + Msgs0 = lists:zipwith(fun + (#enqueue{msg = Msg}, {MsgId, Header}) -> + {MsgId, {Header, Msg}}; + (#requeue{msg = ?INDEX_MSG(_, ?MSG(_, Msg))}, + {MsgId, Header}) -> {MsgId, {Header, Msg}} end, Log, Data), Msgs = case InMemMsgs of @@ -2088,7 +2179,7 @@ checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> {ConsumerMsg, State0} -> %% there are consumers waiting to be serviced %% process consumer checkout - case maps:get(ConsumerId, Cons0) of + case maps:get(ConsumerId, Cons0, error) of #consumer{credit = 0} -> %% no credit but was still on queue %% can happen when draining @@ -2638,3 +2729,24 @@ subtract_in_memory(Msgs, State) -> (?PREFIX_MEM_MSG(H), S) -> subtract_in_memory_counts(H, S) end, State, Msgs). + +make_requeue(ConsumerId, Notify, [{MsgId, Msg}], Acc) -> + lists:reverse([{append, + #requeue{consumer_id = ConsumerId, + msg_id = MsgId, + msg = Msg}, + Notify} + | Acc]); +make_requeue(ConsumerId, Notify, [{MsgId, Msg} | Rem], Acc) -> + make_requeue(ConsumerId, Notify, Rem, + [{append, + #requeue{consumer_id = ConsumerId, + msg_id = MsgId, + msg = Msg}, + noreply} + | Acc]); +make_requeue(_ConsumerId, _Notify, [], []) -> + []. + +incr(I) -> + I + 1. diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index ca37fbca7981..85a76e5ce2c8 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -130,7 +130,7 @@ -record(enqueuer, {next_seqno = 1 :: msg_seqno(), % out of order enqueues - sorted list - pending = [] :: [{msg_seqno(), ra:index(), milliseconds(), raw_msg()}], + unused, status = up :: up | suspected_down, %% it is useful to have a record of when this was blocked diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl index 0faf32fd300d..cb8bc58b8670 100644 --- a/deps/rabbit/src/rabbit_fifo_client.erl +++ b/deps/rabbit/src/rabbit_fifo_client.erl @@ -859,6 +859,19 @@ next_enqueue_seq(#state{next_enqueue_seq = Seq} = State) -> consumer_id(ConsumerTag) -> {ConsumerTag, self()}. +send_command(Server, Correlation, Command, _Priority, + #state{pending = Pending, + cfg = #cfg{soft_limit = SftLmt}} = State0) + when element(1, Command) == return -> + %% returns are sent to the aux machine for pre-evaluation + {Seq, State} = next_seq(State0), + ok = ra:cast_aux_command(Server, {Command, Seq, self()}), + Tag = case maps:size(Pending) >= SftLmt of + true -> slow; + false -> ok + end, + {Tag, State#state{pending = Pending#{Seq => {Correlation, Command}}, + slow = Tag == slow}}; send_command(Server, Correlation, Command, Priority, #state{pending = Pending, cfg = #cfg{soft_limit = SftLmt}} = State0) -> @@ -871,6 +884,7 @@ send_command(Server, Correlation, Command, Priority, {Tag, State#state{pending = Pending#{Seq => {Correlation, Command}}, slow = Tag == slow}}. + resend_command(Node, Correlation, Command, #state{pending = Pending} = State0) -> {Seq, State} = next_seq(State0), diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index 668295caece6..d3b897319796 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -80,11 +80,12 @@ groups() -> quorum_cluster_size_7, node_removal_is_not_quorum_critical ]}, - {clustered_with_partitions, [], [ - reconnect_consumer_and_publish, - reconnect_consumer_and_wait, - reconnect_consumer_and_wait_channel_down - ]} + {clustered_with_partitions, [], + [ + reconnect_consumer_and_publish, + reconnect_consumer_and_wait, + reconnect_consumer_and_wait_channel_down + ]} ]} ]. @@ -1893,7 +1894,11 @@ subscribe_redelivery_count(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, Server), QQ = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + declare(Ch, QQ, + [ + {<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-max-in-memory-length">>, long, 0} + ])), RaName = ra_name(QQ), publish(Ch, QQ), @@ -1923,6 +1928,7 @@ subscribe_redelivery_count(Config) -> multiple = false, requeue = true}) after 5000 -> + flush(1), exit(basic_deliver_timeout_2) end, diff --git a/erlang_ls.config b/erlang_ls.config index e1a08708028b..a4f5d4c4a9e4 100644 --- a/erlang_ls.config +++ b/erlang_ls.config @@ -3,23 +3,25 @@ # otp_path: "/path/to/otp/lib/erlang" deps_dirs: - "deps/*" - - "deps/rabbit/apps/*" diagnostics: - # disabled: - # - bound_var_in_pattern + disabled: + - bound_var_in_pattern enabled: - crossref - dialyzer + # - elvis - compiler # - elvis include_dirs: - "deps" - "deps/*/include" -# lenses: -# enabled: -# - ct-run-test -# - show-behaviour-usages -# disabled: [] +lenses: + enabled: + - ct-run-test + - show-behaviour-usages + - suggest-spec + - function-references + disabled: [] # macros: # - name: DEFINED_WITH_VALUE # value: 42 From 5d35cad5bf0589ae5310edaac0e38563f76c2bbf Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 29 Nov 2021 13:19:58 +0000 Subject: [PATCH 10/97] QQ: resend protocol refacor Instead of resending individual commands when a gap is detect we Resend all pending commands whenever a leader change is detected. The quorum queue will drop any duplicate requeues based on its enqueue sequence number and all other pipelined commands are idempotent. This makes the resend protocol simpler and removes the need for keeping pending enqueues in the state machine. remove unused pending field from enqueuer record Also dialyzer fixes. --- deps/rabbit/src/rabbit_fifo.erl | 62 +--- deps/rabbit/src/rabbit_fifo.hrl | 3 +- deps/rabbit/src/rabbit_fifo_client.erl | 61 ++-- deps/rabbit/src/rabbit_fifo_dlx.erl | 36 +- deps/rabbit/src/rabbit_fifo_dlx_client.erl | 6 +- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 19 +- deps/rabbit/src/rabbit_fifo_v1.erl | 352 ++++++++++---------- deps/rabbit/src/rabbit_fifo_v1.hrl | 5 +- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 16 +- 9 files changed, 272 insertions(+), 288 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 75b00b0c74e9..21e2151c8d6d 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -812,6 +812,7 @@ convert_v1_to_v2(V1State) -> max_in_memory_bytes = rabbit_fifo_v1:get_cfg_field(max_in_memory_bytes, V1State), expires = rabbit_fifo_v1:get_cfg_field(expires, V1State) }, + #?MODULE{ cfg = Cfg, messages = MessagesV2, @@ -839,24 +840,10 @@ purge_node(Meta, Node, State, Effects) -> end, {State, Effects}, all_pids_for(Node, State)). %% any downs that re not noconnection -handle_down(#{system_time := DownTs} = Meta, Pid, #?MODULE{consumers = Cons0, - enqueuers = Enqs0} = State0) -> - % Remove any enqueuer for the same pid and enqueue any pending messages - % This should be ok as we won't see any more enqueues from this pid - State1 = case maps:take(Pid, Enqs0) of - {#enqueuer{pending = Pend}, Enqs} -> - lists:foldl(fun ({_, RIdx, Ts, RawMsg}, S) -> - enqueue(RIdx, Ts, RawMsg, S); - ({_, RIdx, RawMsg}, S) -> - %% This is an edge case: It is an out-of-order delivery - %% from machine version 1. - %% If message TTL is configured, expiration will be delayed - %% for the time the message has been pending. - enqueue(RIdx, DownTs, RawMsg, S) - end, State0#?MODULE{enqueuers = Enqs}, Pend); - error -> - State0 - end, +handle_down(Meta, Pid, #?MODULE{consumers = Cons0, + enqueuers = Enqs0} = State0) -> + % Remove any enqueuer for the down pid + State1 = State0#?MODULE{enqueuers = maps:remove(Pid, Enqs0)}, {Effects1, State2} = handle_waiting_consumer_down(Pid, State1), % return checked out messages to main queue % Find the consumers for the down pid @@ -1019,7 +1006,6 @@ overview(#?MODULE{consumers = Cons, num_enqueuers => maps:size(Enqs), num_ready_messages => messages_ready(State), num_in_memory_ready_messages => InMemReady, - num_pending_messages => messages_pending(State), num_messages => messages_total(State), num_release_cursors => lqueue:len(Cursors), release_cursors => [{I, messages_total(S)} || {_, I, S} <- lqueue:to_list(Cursors)], @@ -1341,11 +1327,6 @@ usage(Name) when is_atom(Name) -> %%% Internal -messages_pending(#?MODULE{enqueuers = Enqs}) -> - maps:fold(fun(_, #enqueuer{pending = P}, Acc) -> - length(P) + Acc - end, 0, Enqs). - messages_ready(#?MODULE{messages = M, prefix_msgs = {RCnt, _R, PCnt, _P}, returns = R}) -> @@ -1521,6 +1502,8 @@ apply_enqueue(#{index := RaftIdx, State2 = incr_enqueue_count(incr_total(State1)), {State, ok, Effects} = checkout(Meta, State0, State2, Effects1, false), {maybe_store_dehydrated_state(RaftIdx, State), ok, Effects}; + {out_of_sequence, State, Effects} -> + {State, not_enqueued, Effects}; {duplicate, State, Effects} -> {State, ok, Effects} end. @@ -1645,23 +1628,12 @@ maybe_store_dehydrated_state(RaftIdx, maybe_store_dehydrated_state(_RaftIdx, State) -> State. -enqueue_pending(From, - #enqueuer{next_seqno = Next, - pending = [{Next, RaftIdx, Ts, RawMsg} | Pending]} = Enq0, - State0) -> - State = enqueue(RaftIdx, Ts, RawMsg, State0), - Enq = Enq0#enqueuer{next_seqno = Next + 1, pending = Pending}, - enqueue_pending(From, Enq, State); -enqueue_pending(From, Enq, #?MODULE{enqueuers = Enqueuers0} = State) -> - State#?MODULE{enqueuers = Enqueuers0#{From => Enq}}. - maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, Effects, State0) -> % direct enqueue without tracking State = enqueue(RaftIdx, Ts, RawMsg, State0), {ok, State, Effects}; maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, - #?MODULE{enqueuers = Enqueuers0, - ra_indexes = Indexes0} = State0) -> + #?MODULE{enqueuers = Enqueuers0} = State0) -> case maps:get(From, Enqueuers0, undefined) of undefined -> @@ -1673,22 +1645,14 @@ maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, % it is the next expected seqno State1 = enqueue(RaftIdx, Ts, RawMsg, State0), Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1}, - State = enqueue_pending(From, Enq, State1), + State = State1#?MODULE{enqueuers = Enqueuers0#{From => Enq}}, {ok, State, Effects0}; - #enqueuer{next_seqno = Next, - pending = Pending0} = Enq0 + #enqueuer{next_seqno = Next} when MsgSeqNo > Next -> - % out of order delivery - Pending = [{MsgSeqNo, RaftIdx, Ts, RawMsg} | Pending0], - Enq = Enq0#enqueuer{pending = lists:sort(Pending)}, - %% if the enqueue it out of order we need to mark it in the - %% index - Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), - {ok, State0#?MODULE{enqueuers = Enqueuers0#{From => Enq}, - ra_indexes = Indexes}, Effects0}; + %% TODO: when can this happen? + {out_of_sequence, State0, Effects0}; #enqueuer{next_seqno = Next} when MsgSeqNo =< Next -> - % duplicate delivery - remove the raft index from the ra_indexes - % map as it was added earlier + % duplicate delivery {duplicate, State0, Effects0} end. diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 85a76e5ce2c8..27d286da73d0 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -131,8 +131,7 @@ {next_seqno = 1 :: msg_seqno(), % out of order enqueues - sorted list unused, - status = up :: up | - suspected_down, + status = up :: up | suspected_down, %% it is useful to have a record of when this was blocked %% so that we can retry sending the block effect if %% the publisher did not receive the initial one diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl index cb8bc58b8670..3b084acbc544 100644 --- a/deps/rabbit/src/rabbit_fifo_client.erl +++ b/deps/rabbit/src/rabbit_fifo_client.erl @@ -42,7 +42,6 @@ -define(COMMAND_TIMEOUT, 30000). -type seq() :: non_neg_integer(). --type maybe_seq() :: integer(). -type action() :: {send_credit_reply, Available :: non_neg_integer()} | {send_drained, CTagCredit :: {rabbit_fifo:consumer_tag(), non_neg_integer()}}. @@ -66,10 +65,6 @@ leader :: undefined | ra:server_id(), queue_status :: undefined | go | reject_publish, next_seq = 0 :: seq(), - %% Last applied is initialise to -1 to note that no command has yet been - %% applied, but allowing to resend messages if the first ones on the sequence - %% are lost (messages are sent from last_applied + 1) - last_applied = -1 :: maybe_seq(), next_enqueue_seq = 1 :: seq(), %% indicates that we've exceeded the soft limit slow = false :: boolean(), @@ -605,18 +600,26 @@ handle_ra_event(Leader, {machine, leader_change}, #state{leader = Leader} = State) -> %% leader already known {ok, State, []}; -handle_ra_event(Leader, {machine, leader_change}, State0) -> +handle_ra_event(Leader, {machine, leader_change}, + #state{leader = OldLeader} = State0) -> %% we need to update leader %% and resend any pending commands + rabbit_log:debug("~s: Detected QQ leader change from ~w to ~w", + [?MODULE, OldLeader, Leader]), + State = resend_all_pending(State0#state{leader = Leader}), + {ok, State, []}; +handle_ra_event(_From, {rejected, {not_leader, Leader, _Seq}}, + #state{leader = Leader} = State) -> + {ok, State, []}; +handle_ra_event(_From, {rejected, {not_leader, Leader, _Seq}}, + #state{leader = OldLeader} = State0) -> + rabbit_log:debug("~s: Detected QQ leader change (rejection) from ~w to ~w", + [?MODULE, OldLeader, Leader]), State = resend_all_pending(State0#state{leader = Leader}), {ok, cancel_timer(State), []}; -handle_ra_event(_From, {rejected, {not_leader, undefined, _Seq}}, State0) -> +handle_ra_event(_From, {rejected, {not_leader, _UndefinedMaybe, _Seq}}, State0) -> % TODO: how should these be handled? re-sent on timer or try random {ok, State0, []}; -handle_ra_event(_From, {rejected, {not_leader, Leader, Seq}}, State0) -> - State1 = State0#state{leader = Leader}, - State = resend(Seq, State1), - {ok, State, []}; handle_ra_event(_, timeout, #state{cfg = #cfg{servers = Servers}} = State0) -> case find_leader(Servers) of undefined -> @@ -663,29 +666,27 @@ try_process_command([Server | Rem], Cmd, try_process_command(Rem, Cmd, State) end. -seq_applied({Seq, MaybeAction}, - {Corrs, Actions0, #state{last_applied = Last} = State0}) - when Seq > Last -> - State1 = do_resends(Last+1, Seq-1, State0), - {Actions, State} = maybe_add_action(MaybeAction, Actions0, State1), +seq_applied({Seq, Response}, + {Corrs, Actions0, #state{} = State0}) -> + %% sequences aren't guaranteed to be applied in order as enqueues are + %% low priority commands and may be overtaken by others with a normal priority. + {Actions, State} = maybe_add_action(Response, Actions0, State0), case maps:take(Seq, State#state.pending) of {{undefined, _}, Pending} -> - {Corrs, Actions, State#state{pending = Pending, - last_applied = Seq}}; - {{Corr, _}, Pending} -> - {[Corr | Corrs], Actions, State#state{pending = Pending, - last_applied = Seq}}; - error -> - % must have already been resent or removed for some other reason - % still need to update last_applied or we may inadvertently resend - % stuff later - {Corrs, Actions, State#state{last_applied = Seq}} + {Corrs, Actions, State#state{pending = Pending}}; + {{Corr, _}, Pending} + when Response /= not_enqueued -> + {[Corr | Corrs], Actions, State#state{pending = Pending}}; + _ -> + {Corrs, Actions, State#state{}} end; seq_applied(_Seq, Acc) -> Acc. maybe_add_action(ok, Acc, State) -> {Acc, State}; +maybe_add_action(not_enqueued, Acc, State) -> + {Acc, State}; maybe_add_action({multi, Actions}, Acc0, State0) -> lists:foldl(fun (Act, {Acc, State}) -> maybe_add_action(Act, Acc, State) @@ -702,10 +703,10 @@ maybe_add_action(Action, Acc, State) -> %% anything else is assumed to be an action {[Action | Acc], State}. -do_resends(From, To, State) when From =< To -> - lists:foldl(fun resend/2, State, lists:seq(From, To)); -do_resends(_, _, State) -> - State. +% do_resends(From, To, State) when From =< To -> +% lists:foldl(fun resend/2, State, lists:seq(From, To)); +% do_resends(_, _, State) -> +% State. % resends a command with a new sequence number resend(OldSeq, #state{pending = Pending0, leader = Leader} = State) -> diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index cc41733151b7..04757b772140 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -2,16 +2,26 @@ -include("rabbit_fifo_dlx.hrl"). -include("rabbit_fifo.hrl"). +-compile({no_auto_import, [apply/3]}). % client API, e.g. for rabbit_fifo_dlx_client -export([make_checkout/2, make_settle/1]). % called by rabbit_fifo delegating DLX handling to this module --export([init/0, apply/2, discard/3, overview/1, - checkout/1, state_enter/4, - start_worker/2, terminate_worker/1, cleanup/1, purge/1, - consumer_pid/1, dehydrate/1, normalize/1, +-export([init/0, + apply/2, + discard/3, + overview/1, + checkout/1, + state_enter/4, + start_worker/2, + terminate_worker/1, + cleanup/1, + purge/1, + consumer_pid/1, + dehydrate/1, + normalize/1, stat/1]). %% This module handles the dead letter (DLX) part of the rabbit_fifo state machine. @@ -29,10 +39,14 @@ prefetch :: non_neg_integer() }). -record(settle, {msg_ids :: [msg_id()]}). --opaque protocol() :: {dlx, #checkout{} | #settle{}}. --opaque state() :: #?MODULE{}. --export_type([state/0, protocol/0, reason/0]). +-type command() :: #checkout{} | #settle{}. +-type protocol() :: {dlx, command()}. +-type state() :: #?MODULE{}. +-export_type([state/0, + protocol/0, + reason/0]). +-spec init() -> state(). init() -> #?MODULE{}. @@ -61,6 +75,8 @@ overview0(Discards, Checked, MsgBytes, MsgBytesCheckout) -> discard_message_bytes => MsgBytes, discard_checkout_message_bytes => MsgBytesCheckout}. +-spec stat(state()) -> + {non_neg_integer(), non_neg_integer()}. stat(#?MODULE{consumer = Con, discards = Discards, msg_bytes = MsgBytes, @@ -75,6 +91,8 @@ stat(#?MODULE{consumer = Con, Bytes = MsgBytes + MsgBytesCheckout, {Num, Bytes}. +-spec apply(command(), state()) -> + {state(), ok | list()}. % TODO: refine return type apply(#checkout{consumer = RegName, prefetch = Prefetch}, #?MODULE{consumer = undefined} = State0) -> @@ -119,6 +137,8 @@ apply(#settle{msg_ids = MsgIds}, %%TODO delete delivery_count header to save space? %% It's not needed anymore. +-spec discard(term(), term(), state()) -> + state(). discard(Msg, Reason, #?MODULE{discards = Discards0, msg_bytes = MsgBytes0} = State) -> Discards = lqueue:in({Reason, Msg}, Discards0), @@ -126,6 +146,8 @@ discard(Msg, Reason, #?MODULE{discards = Discards0, State#?MODULE{discards = Discards, msg_bytes = MsgBytes}. +-spec checkout(state()) -> + {state(), list()}. checkout(#?MODULE{consumer = undefined, discards = Discards} = State) -> case lqueue:is_empty(Discards) of diff --git a/deps/rabbit/src/rabbit_fifo_dlx_client.erl b/deps/rabbit/src/rabbit_fifo_dlx_client.erl index 4b9733b769bc..f4c5a0598337 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_client.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_client.erl @@ -4,11 +4,11 @@ overview/1]). -record(state,{ - queue_resource :: rabbit_tyes:r(queue), + queue_resource :: rabbit_types:r(queue), leader :: ra:server_id(), - last_msg_id :: non_neg_integer | -1 + last_msg_id :: non_neg_integer() | -1 }). --opaque state() :: #state{}. +-type state() :: #state{}. -export_type([state/0]). checkout(RegName, QResource, Leader, NumUnsettled) -> diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index ccf4fd1067ad..6114b8c18589 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -16,7 +16,7 @@ -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl"). --behaviour(gen_server2). +-behaviour(gen_server). -export([start_link/2]). %% gen_server2 callbacks @@ -70,8 +70,8 @@ exchange_ref, %% configured (x-)dead-letter-routing-key of source queue routing_key, - dlx_client_state :: rabbit_fifo_dlx_client:state(), - queue_type_state :: rabbit_queue_type:state(), + dlx_client_state :: undefined | rabbit_fifo_dlx_client:state(), + queue_type_state :: undefined | rabbit_queue_type:state(), %% Consumed messages for which we have not received all publisher confirms yet. %% Therefore, they have not been ACKed yet to the consumer queue. %% This buffer contains at most PREFETCH pending messages at any given point in time. @@ -98,7 +98,8 @@ start_link(QRef, RegName) -> ?MODULE, {QRef, RegName}, [{hibernate_after, ?HIBERNATE_AFTER}]). --spec init({rabbit_amqqueue:name(), atom()}) -> {ok, undefined, {continue, {rabbit_amqqueue:name(), atom()}}}. +% -spec init({rabbit_amqqueue:name(), atom()}) -> +% {ok, undefined, {continue, {rabbit_amqqueue:name(), atom()}}}. init(Arg) -> {ok, undefined, {continue, Arg}}. @@ -109,17 +110,17 @@ handle_continue({QRef, RegName}, undefined) -> SettleTimeout = application:get_env(rabbit, dead_letter_worker_publisher_confirm_timeout_ms, ?DEFAULT_SETTLE_TIMEOUT), - State = lookup_topology(#state{queue_ref = QRef}), + State = lookup_topology(#state{queue_ref = QRef, + registered_name = RegName, + queue_type_state = rabbit_queue_type:init(), + settle_timeout = SettleTimeout}), {ok, Q} = rabbit_amqqueue:lookup(QRef), {ClusterName, _MaybeOldLeaderNode} = amqqueue:get_pid(Q), {ok, ConsumerState} = rabbit_fifo_dlx_client:checkout(RegName, QRef, {ClusterName, node()}, Prefetch), - {noreply, State#state{registered_name = RegName, - dlx_client_state = ConsumerState, - queue_type_state = rabbit_queue_type:init(), - settle_timeout = SettleTimeout}}. + {noreply, State#state{dlx_client_state = ConsumerState}}. terminate(_Reason, _State) -> %%TODO cancel timer? diff --git a/deps/rabbit/src/rabbit_fifo_v1.erl b/deps/rabbit/src/rabbit_fifo_v1.erl index 51150b0f7089..065c3dd4e936 100644 --- a/deps/rabbit/src/rabbit_fifo_v1.erl +++ b/deps/rabbit/src/rabbit_fifo_v1.erl @@ -113,7 +113,7 @@ -type client_msg() :: delivery(). %% the messages `rabbit_fifo' can send to consumers. --opaque state() :: #?MODULE{}. +-opaque state() :: #?STATE{}. -export_type([protocol/0, delivery/0, @@ -135,7 +135,7 @@ -spec init(config()) -> state(). init(#{name := Name, queue_resource := Resource} = Conf) -> - update_config(Conf, #?MODULE{cfg = #cfg{name = Name, + update_config(Conf, #?STATE{cfg = #cfg{name = Name, resource = Resource}}). update_config(Conf, State) -> @@ -155,11 +155,11 @@ update_config(Conf, State) -> false -> competing end, - Cfg = State#?MODULE.cfg, + Cfg = State#?STATE.cfg, RCISpec = {RCI, RCI}, LastActive = maps:get(created, Conf, undefined), - State#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCISpec, + State#?STATE{cfg = Cfg#cfg{release_cursor_interval = RCISpec, dead_letter_handler = DLH, become_leader_handler = BLH, overflow_strategy = Overflow, @@ -184,7 +184,7 @@ apply(Meta, #enqueue{pid = From, seq = Seq, msg = RawMsg}, State00) -> apply_enqueue(Meta, From, Seq, RawMsg, State00); apply(_Meta, #register_enqueuer{pid = Pid}, - #?MODULE{enqueuers = Enqueuers0, + #?STATE{enqueuers = Enqueuers0, cfg = #cfg{overflow_strategy = Overflow}} = State0) -> State = case maps:is_key(Pid, Enqueuers0) of @@ -192,7 +192,7 @@ apply(_Meta, #register_enqueuer{pid = Pid}, %% if the enqueuer exits just echo the overflow state State0; false -> - State0#?MODULE{enqueuers = Enqueuers0#{Pid => #enqueuer{}}} + State0#?STATE{enqueuers = Enqueuers0#{Pid => #enqueuer{}}} end, Res = case is_over_limit(State) of true when Overflow == reject_publish -> @@ -203,7 +203,7 @@ apply(_Meta, #register_enqueuer{pid = Pid}, {State, Res, [{monitor, process, Pid}]}; apply(Meta, #settle{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons0} = State) -> + #?STATE{consumers = Cons0} = State) -> case Cons0 of #{ConsumerId := Con0} -> % need to increment metrics before completing as any snapshot @@ -215,7 +215,7 @@ apply(Meta, end; apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons0} = State0) -> + #?STATE{consumers = Cons0} = State0) -> case Cons0 of #{ConsumerId := Con0} -> Discarded = maps:with(MsgIds, Con0#consumer.checked_out), @@ -226,7 +226,7 @@ apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, {State0, ok} end; apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons0} = State) -> + #?STATE{consumers = Cons0} = State) -> case Cons0 of #{ConsumerId := #consumer{checked_out = Checked0}} -> Returned = maps:with(MsgIds, Checked0), @@ -236,7 +236,7 @@ apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId}, end; apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, drain = Drain, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons0, + #?STATE{consumers = Cons0, service_queue = ServiceQueue0, waiting_consumers = Waiting0} = State0) -> case Cons0 of @@ -250,7 +250,7 @@ apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, Cons = maps:put(ConsumerId, Con1, Cons0), {State1, ok, Effects} = checkout(Meta, State0, - State0#?MODULE{service_queue = ServiceQueue, + State0#?STATE{service_queue = ServiceQueue, consumers = Cons}, [], false), Response = {send_credit_reply, messages_ready(State1)}, %% by this point all checkouts for the updated credit value @@ -261,16 +261,16 @@ apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, {State1, Response, Effects}; true -> Con = #consumer{credit = PostCred} = - maps:get(ConsumerId, State1#?MODULE.consumers), + maps:get(ConsumerId, State1#?STATE.consumers), %% add the outstanding credit to the delivery count DeliveryCount = Con#consumer.delivery_count + PostCred, Consumers = maps:put(ConsumerId, Con#consumer{delivery_count = DeliveryCount, credit = 0}, - State1#?MODULE.consumers), + State1#?STATE.consumers), Drained = Con#consumer.credit, {CTag, _} = ConsumerId, - {State1#?MODULE{consumers = Consumers}, + {State1#?STATE{consumers = Consumers}, %% returning a multi response with two client actions %% for the channel to execute {multi, [Response, {send_drained, {CTag, Drained}}]}, @@ -284,7 +284,7 @@ apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, %% grant the credit C = max(0, RemoteDelCnt + NewCredit - DelCnt), Con = Con0#consumer{credit = C}, - State = State0#?MODULE{waiting_consumers = + State = State0#?STATE{waiting_consumers = [{ConsumerId, Con} | Waiting]}, {State, {send_credit_reply, messages_ready(State)}}; false -> @@ -295,16 +295,16 @@ apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, {State0, ok} end; apply(_, #checkout{spec = {dequeue, _}}, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}} = State0) -> + #?STATE{cfg = #cfg{consumer_strategy = single_active}} = State0) -> {State0, {error, {unsupported, single_active_consumer}}}; apply(#{index := Index, system_time := Ts, from := From} = Meta, #checkout{spec = {dequeue, Settlement}, meta = ConsumerMeta, consumer_id = ConsumerId}, - #?MODULE{consumers = Consumers} = State00) -> + #?STATE{consumers = Consumers} = State00) -> %% dequeue always updates last_active - State0 = State00#?MODULE{last_active = Ts}, + State0 = State00#?STATE{last_active = Ts}, %% all dequeue operations result in keeping the queue from expiring Exists = maps:is_key(ConsumerId, Consumers), case messages_ready(State0) of @@ -363,7 +363,7 @@ apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta, State1 = update_consumer(ConsumerId, ConsumerMeta, Spec, Priority, State0), checkout(Meta, State0, State1, [{monitor, process, Pid}]); apply(#{index := Index}, #purge{}, - #?MODULE{ra_indexes = Indexes0, + #?STATE{ra_indexes = Indexes0, returns = Returns, messages = Messages} = State0) -> Total = messages_ready(State0), @@ -372,7 +372,7 @@ apply(#{index := Index}, #purge{}, Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes1, [I || {_, {I, _}} <- lqueue:to_list(Returns)]), - State1 = State0#?MODULE{ra_indexes = Indexes, + State1 = State0#?STATE{ra_indexes = Indexes, messages = lqueue:new(), returns = lqueue:new(), msg_bytes_enqueue = 0, @@ -387,7 +387,7 @@ apply(#{index := Index}, #purge{}, apply(#{index := Idx}, #garbage_collection{}, State) -> update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, - #?MODULE{consumers = Cons0, + #?STATE{consumers = Cons0, cfg = #cfg{consumer_strategy = single_active}, waiting_consumers = Waiting0, enqueuers = Enqs0} = State0) -> @@ -407,13 +407,13 @@ apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, Cid, C0#consumer{credit = Credit}), %% if the consumer was cancelled there is a chance it got %% removed when returning hence we need to be defensive here - Waiting = case St#?MODULE.consumers of + Waiting = case St#?STATE.consumers of #{Cid := C} -> Waiting0 ++ [{Cid, C}]; _ -> Waiting0 end, - {St#?MODULE{consumers = maps:remove(Cid, St#?MODULE.consumers), + {St#?STATE{consumers = maps:remove(Cid, St#?STATE.consumers), waiting_consumers = Waiting, last_active = Ts}, Effs1}; @@ -424,7 +424,7 @@ apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, suspected_down), %% select a new consumer from the waiting queue and run a checkout - State2 = State1#?MODULE{waiting_consumers = WaitingConsumers}, + State2 = State1#?STATE{waiting_consumers = WaitingConsumers}, {State, Effects1} = activate_next_consumer(State2, Effects0), %% mark any enquers as suspected @@ -433,9 +433,9 @@ apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, (_, E) -> E end, Enqs0), Effects = [{monitor, node, Node} | Effects1], - checkout(Meta, State0, State#?MODULE{enqueuers = Enqs}, Effects); + checkout(Meta, State0, State#?STATE{enqueuers = Enqs}, Effects); apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, - #?MODULE{consumers = Cons0, + #?STATE{consumers = Cons0, enqueuers = Enqs0} = State0) -> %% A node has been disconnected. This doesn't necessarily mean that %% any processes on this node are down, they _may_ come back so here @@ -471,18 +471,18 @@ apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, % comes back, then re-issue all monitors and discover the final fate of % these processes - Effects = case maps:size(State#?MODULE.consumers) of + Effects = case maps:size(State#?STATE.consumers) of 0 -> [{aux, inactive}, {monitor, node, Node}]; _ -> [{monitor, node, Node}] end ++ Effects1, - checkout(Meta, State0, State#?MODULE{enqueuers = Enqs, + checkout(Meta, State0, State#?STATE{enqueuers = Enqs, last_active = Ts}, Effects); apply(Meta, {down, Pid, _Info}, State0) -> {State, Effects} = handle_down(Meta, Pid, State0), checkout(Meta, State0, State, Effects); -apply(Meta, {nodeup, Node}, #?MODULE{consumers = Cons0, +apply(Meta, {nodeup, Node}, #?STATE{consumers = Cons0, enqueuers = Enqs0, service_queue = _SQ0} = State0) -> %% A node we are monitoring has come back. @@ -511,7 +511,7 @@ apply(Meta, {nodeup, Node}, #?MODULE{consumers = Cons0, Acc end, {State0, Monitors}, Cons0), Waiting = update_waiting_consumer_status(Node, State1, up), - State2 = State1#?MODULE{ + State2 = State1#?STATE{ enqueuers = Enqs1, waiting_consumers = Waiting}, {State, Effects} = activate_next_consumer(State2, Effects1), @@ -568,7 +568,7 @@ convert_v0_to_v1(V0State0) -> max_in_memory_bytes = rabbit_fifo_v0:get_cfg_field(max_in_memory_bytes, V0State) }, - #?MODULE{cfg = Cfg, + #?STATE{cfg = Cfg, messages = V1Msgs, next_msg_num = rabbit_fifo_v0:get_field(next_msg_num, V0State), returns = rabbit_fifo_v0:get_field(returns, V0State), @@ -593,7 +593,7 @@ purge_node(Meta, Node, State, Effects) -> end, {State, Effects}, all_pids_for(Node, State)). %% any downs that re not noconnection -handle_down(Meta, Pid, #?MODULE{consumers = Cons0, +handle_down(Meta, Pid, #?STATE{consumers = Cons0, enqueuers = Enqs0} = State0) -> % Remove any enqueuer for the same pid and enqueue any pending messages % This should be ok as we won't see any more enqueues from this pid @@ -601,7 +601,7 @@ handle_down(Meta, Pid, #?MODULE{consumers = Cons0, {#enqueuer{pending = Pend}, Enqs} -> lists:foldl(fun ({_, RIdx, RawMsg}, S) -> enqueue(RIdx, RawMsg, S) - end, State0#?MODULE{enqueuers = Enqs}, Pend); + end, State0#?STATE{enqueuers = Enqs}, Pend); error -> State0 end, @@ -614,25 +614,25 @@ handle_down(Meta, Pid, #?MODULE{consumers = Cons0, cancel_consumer(Meta, ConsumerId, S, E, down) end, {State2, Effects1}, DownConsumers). -consumer_active_flag_update_function(#?MODULE{cfg = #cfg{consumer_strategy = competing}}) -> +consumer_active_flag_update_function(#?STATE{cfg = #cfg{consumer_strategy = competing}}) -> fun(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) -> consumer_update_active_effects(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) end; -consumer_active_flag_update_function(#?MODULE{cfg = #cfg{consumer_strategy = single_active}}) -> +consumer_active_flag_update_function(#?STATE{cfg = #cfg{consumer_strategy = single_active}}) -> fun(_, _, _, _, _, Effects) -> Effects end. handle_waiting_consumer_down(_Pid, - #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State) -> + #?STATE{cfg = #cfg{consumer_strategy = competing}} = State) -> {[], State}; handle_waiting_consumer_down(_Pid, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, waiting_consumers = []} = State) -> {[], State}; handle_waiting_consumer_down(Pid, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, waiting_consumers = WaitingConsumers0} = State0) -> % get cancel effects for down waiting consumers Down = lists:filter(fun({{_, P}, _}) -> P =:= Pid end, @@ -644,11 +644,11 @@ handle_waiting_consumer_down(Pid, % update state to have only up waiting consumers StillUp = lists:filter(fun({{_, P}, _}) -> P =/= Pid end, WaitingConsumers0), - State = State0#?MODULE{waiting_consumers = StillUp}, + State = State0#?STATE{waiting_consumers = StillUp}, {Effects, State}. update_waiting_consumer_status(Node, - #?MODULE{waiting_consumers = WaitingConsumers}, + #?STATE{waiting_consumers = WaitingConsumers}, Status) -> [begin case node(Pid) of @@ -661,7 +661,7 @@ update_waiting_consumer_status(Node, Consumer#consumer.status =/= cancelled]. -spec state_enter(ra_server:ra_state(), state()) -> ra_machine:effects(). -state_enter(leader, #?MODULE{consumers = Cons, +state_enter(leader, #?STATE{consumers = Cons, enqueuers = Enqs, waiting_consumers = WaitingConsumers, cfg = #cfg{name = Name, @@ -684,7 +684,7 @@ state_enter(leader, #?MODULE{consumers = Cons, {Mod, Fun, Args} -> [{mod_call, Mod, Fun, Args ++ [Name]} | Effects] end; -state_enter(eol, #?MODULE{enqueuers = Enqs, +state_enter(eol, #?STATE{enqueuers = Enqs, consumers = Custs0, waiting_consumers = WaitingConsumers0}) -> Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0), @@ -695,7 +695,7 @@ state_enter(eol, #?MODULE{enqueuers = Enqs, || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++ [{aux, eol}, {mod_call, rabbit_quorum_queue, file_handle_release_reservation, []}]; -state_enter(State, #?MODULE{cfg = #cfg{resource = _Resource}}) when State =/= leader -> +state_enter(State, #?STATE{cfg = #cfg{resource = _Resource}}) when State =/= leader -> FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}, [FHReservation]; state_enter(_, _) -> @@ -704,7 +704,7 @@ state_enter(State, #?MODULE{cfg = #cfg{resource = _Resource}}) when State =/= le -spec tick(non_neg_integer(), state()) -> ra_machine:effects(). -tick(Ts, #?MODULE{cfg = #cfg{name = Name, +tick(Ts, #?STATE{cfg = #cfg{name = Name, resource = QName}, msg_bytes_enqueue = EnqueueBytes, msg_bytes_checkout = CheckoutBytes} = State) -> @@ -724,7 +724,7 @@ tick(Ts, #?MODULE{cfg = #cfg{name = Name, end. -spec overview(state()) -> map(). -overview(#?MODULE{consumers = Cons, +overview(#?STATE{consumers = Cons, enqueuers = Enqs, release_cursors = Cursors, enqueue_count = EnqCount, @@ -745,7 +745,7 @@ overview(#?MODULE{consumers = Cons, delivery_limit => Cfg#cfg.delivery_limit }, Smallest = rabbit_fifo_index:smallest(Indexes), - #{type => ?MODULE, + #{type => ?STATE, config => Conf, num_consumers => maps:size(Cons), num_checked_out => num_checked_out(State), @@ -761,7 +761,7 @@ overview(#?MODULE{consumers = Cons, -spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) -> [delivery_msg()]. -get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) -> +get_checked_out(Cid, From, To, #?STATE{consumers = Consumers}) -> case Consumers of #{Cid := #consumer{checked_out = Checked}} -> [{K, snd(snd(maps:get(K, Checked)))} @@ -775,7 +775,7 @@ get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) -> version() -> 1. which_module(0) -> rabbit_fifo_v0; -which_module(1) -> ?MODULE. +which_module(1) -> ?STATE. -record(aux_gc, {last_raft_idx = 0 :: ra:index()}). -record(aux, {name :: atom(), @@ -814,7 +814,7 @@ handle_aux(_RaState, cast, eol, #aux{name = Name} = Aux, Log, _) -> ets:delete(rabbit_fifo_usage, Name), {no_reply, Aux, Log}; handle_aux(_RaState, {call, _From}, oldest_entry_timestamp, Aux, - Log, #?MODULE{ra_indexes = Indexes}) -> + Log, #?STATE{ra_indexes = Indexes}) -> Ts = case rabbit_fifo_index:smallest(Indexes) of %% if there are no entries, we return current timestamp %% so that any previously obtained entries are considered older than this @@ -841,7 +841,7 @@ handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, end. -eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, +eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState, #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> {Idx, _} = ra_log:last_index_term(Log), {memory, Mem} = erlang:process_info(self(), memory), @@ -858,7 +858,7 @@ eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, AuxState end. -force_eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}}, +force_eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}}, #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> {Idx, _} = ra_log:last_index_term(Log), {memory, Mem} = erlang:process_info(self(), memory), @@ -879,7 +879,7 @@ force_eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}}, query_messages_ready(State) -> messages_ready(State). -query_messages_checked_out(#?MODULE{consumers = Consumers}) -> +query_messages_checked_out(#?STATE{consumers = Consumers}) -> maps:fold(fun (_, #consumer{checked_out = C}, S) -> maps:size(C) + S end, 0, Consumers). @@ -887,22 +887,22 @@ query_messages_checked_out(#?MODULE{consumers = Consumers}) -> query_messages_total(State) -> messages_total(State). -query_processes(#?MODULE{enqueuers = Enqs, consumers = Cons0}) -> +query_processes(#?STATE{enqueuers = Enqs, consumers = Cons0}) -> Cons = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Cons0), maps:keys(maps:merge(Enqs, Cons)). -query_ra_indexes(#?MODULE{ra_indexes = RaIndexes}) -> +query_ra_indexes(#?STATE{ra_indexes = RaIndexes}) -> RaIndexes. -query_consumer_count(#?MODULE{consumers = Consumers, +query_consumer_count(#?STATE{consumers = Consumers, waiting_consumers = WaitingConsumers}) -> Up = maps:filter(fun(_ConsumerId, #consumer{status = Status}) -> Status =/= suspected_down end, Consumers), maps:size(Up) + length(WaitingConsumers). -query_consumers(#?MODULE{consumers = Consumers, +query_consumers(#?STATE{consumers = Consumers, waiting_consumers = WaitingConsumers, cfg = #cfg{consumer_strategy = ConsumerStrategy}} = State) -> ActiveActivityStatusFun = @@ -963,7 +963,7 @@ query_consumers(#?MODULE{consumers = Consumers, maps:merge(FromConsumers, FromWaitingConsumers). -query_single_active_consumer(#?MODULE{cfg = #cfg{consumer_strategy = single_active}, +query_single_active_consumer(#?STATE{cfg = #cfg{consumer_strategy = single_active}, consumers = Consumers}) -> case maps:size(Consumers) of 0 -> @@ -977,10 +977,10 @@ query_single_active_consumer(#?MODULE{cfg = #cfg{consumer_strategy = single_acti query_single_active_consumer(_) -> disabled. -query_stat(#?MODULE{consumers = Consumers} = State) -> +query_stat(#?STATE{consumers = Consumers} = State) -> {messages_ready(State), maps:size(Consumers)}. -query_in_memory_usage(#?MODULE{msg_bytes_in_memory = Bytes, +query_in_memory_usage(#?STATE{msg_bytes_in_memory = Bytes, msgs_ready_in_memory = Length}) -> {Length, Bytes}. @@ -995,7 +995,7 @@ query_peek(Pos, State0) when Pos > 0 -> query_peek(Pos-1, State) end. -query_notify_decorators_info(#?MODULE{consumers = Consumers} = State) -> +query_notify_decorators_info(#?STATE{consumers = Consumers} = State) -> MaxActivePriority = maps:fold(fun(_, #consumer{credit = C, status = up, priority = P0}, MaxP) when C > 0 -> @@ -1020,14 +1020,14 @@ usage(Name) when is_atom(Name) -> %%% Internal -messages_ready(#?MODULE{messages = M, +messages_ready(#?STATE{messages = M, prefix_msgs = {RCnt, _R, PCnt, _P}, returns = R}) -> %% prefix messages will rarely have anything in them during normal %% operations so length/1 is fine here lqueue:len(M) + lqueue:len(R) + RCnt + PCnt. -messages_total(#?MODULE{ra_indexes = I, +messages_total(#?STATE{ra_indexes = I, prefix_msgs = {RCnt, _R, PCnt, _P}}) -> rabbit_fifo_index:size(I) + RCnt + PCnt. @@ -1061,23 +1061,23 @@ moving_average(Time, HalfLife, Next, Current) -> Weight = math:exp(Time * math:log(0.5) / HalfLife), Next * (1 - Weight) + Current * Weight. -num_checked_out(#?MODULE{consumers = Cons}) -> +num_checked_out(#?STATE{consumers = Cons}) -> maps:fold(fun (_, #consumer{checked_out = C}, Acc) -> maps:size(C) + Acc end, 0, Cons). cancel_consumer(Meta, ConsumerId, - #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State, + #?STATE{cfg = #cfg{consumer_strategy = competing}} = State, Effects, Reason) -> cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); cancel_consumer(Meta, ConsumerId, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, waiting_consumers = []} = State, Effects, Reason) -> %% single active consumer on, no consumers are waiting cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); cancel_consumer(Meta, ConsumerId, - #?MODULE{consumers = Cons0, + #?STATE{consumers = Cons0, cfg = #cfg{consumer_strategy = single_active}, waiting_consumers = Waiting0} = State0, Effects0, Reason) -> @@ -1095,10 +1095,10 @@ cancel_consumer(Meta, ConsumerId, Effects = cancel_consumer_effects(ConsumerId, State0, Effects0), % A waiting consumer isn't supposed to have any checked out messages, % so nothing special to do here - {State0#?MODULE{waiting_consumers = Waiting}, Effects} + {State0#?STATE{waiting_consumers = Waiting}, Effects} end. -consumer_update_active_effects(#?MODULE{cfg = #cfg{resource = QName}}, +consumer_update_active_effects(#?STATE{cfg = #cfg{resource = QName}}, ConsumerId, #consumer{meta = Meta}, Active, ActivityStatus, Effects) -> @@ -1110,7 +1110,7 @@ consumer_update_active_effects(#?MODULE{cfg = #cfg{resource = QName}}, | Effects]. cancel_consumer0(Meta, ConsumerId, - #?MODULE{consumers = C0} = S0, Effects0, Reason) -> + #?STATE{consumers = C0} = S0, Effects0, Reason) -> case C0 of #{ConsumerId := Consumer} -> {S, Effects2} = maybe_return_all(Meta, ConsumerId, Consumer, @@ -1122,7 +1122,7 @@ cancel_consumer0(Meta, ConsumerId, %% view) Effects = cancel_consumer_effects(ConsumerId, S, Effects2), - case maps:size(S#?MODULE.consumers) of + case maps:size(S#?STATE.consumers) of 0 -> {S, [{aux, inactive} | Effects]}; _ -> @@ -1133,7 +1133,7 @@ cancel_consumer0(Meta, ConsumerId, {S0, Effects0} end. -activate_next_consumer(#?MODULE{consumers = Cons, +activate_next_consumer(#?STATE{consumers = Cons, waiting_consumers = Waiting0} = State0, Effects0) -> case maps:filter(fun (_, #consumer{status = S}) -> S == up end, Cons) of @@ -1145,11 +1145,11 @@ activate_next_consumer(#?MODULE{consumers = Cons, [{NextConsumerId, NextConsumer} | _] -> %% there is a potential next active consumer Remaining = lists:keydelete(NextConsumerId, 1, Waiting0), - #?MODULE{service_queue = ServiceQueue} = State0, + #?STATE{service_queue = ServiceQueue} = State0, ServiceQueue1 = maybe_queue_consumer(NextConsumerId, NextConsumer, ServiceQueue), - State = State0#?MODULE{consumers = Cons#{NextConsumerId => NextConsumer}, + State = State0#?STATE{consumers = Cons#{NextConsumerId => NextConsumer}, service_queue = ServiceQueue1, waiting_consumers = Remaining}, Effects = consumer_update_active_effects(State, NextConsumerId, @@ -1175,7 +1175,7 @@ maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, Consumer, S0, Effects0 S0), Effects0}; down -> {S1, Effects1} = return_all(Meta, S0, Effects0, ConsumerId, Consumer), - {S1#?MODULE{consumers = maps:remove(ConsumerId, S1#?MODULE.consumers), + {S1#?STATE{consumers = maps:remove(ConsumerId, S1#?STATE.consumers), last_active = Ts}, Effects1} end. @@ -1190,12 +1190,12 @@ apply_enqueue(#{index := RaftIdx} = Meta, From, Seq, RawMsg, State0) -> {State, ok, Effects} end. -drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) -> +drop_head(#?STATE{ra_indexes = Indexes0} = State0, Effects0) -> case take_next_msg(State0) of {FullMsg = {_MsgId, {RaftIdxToDrop, {Header, Msg}}}, State1} -> Indexes = rabbit_fifo_index:delete(RaftIdxToDrop, Indexes0), - State2 = add_bytes_drop(Header, State1#?MODULE{ra_indexes = Indexes}), + State2 = add_bytes_drop(Header, State1#?STATE{ra_indexes = Indexes}), State = case Msg of 'empty' -> State2; _ -> subtract_in_memory_counts(Header, State2) @@ -1213,7 +1213,7 @@ drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) -> {State0, Effects0} end. -enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages, +enqueue(RaftIdx, RawMsg, #?STATE{messages = Messages, next_msg_num = NextMsgNum} = State0) -> %% the initial header is an integer only - it will get expanded to a map %% when the next required key is added @@ -1229,17 +1229,17 @@ enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages, {RaftIdx, {Header, RawMsg}}} % indexed message with header map end, State = add_bytes_enqueue(Header, State1), - State#?MODULE{messages = lqueue:in({NextMsgNum, Msg}, Messages), + State#?STATE{messages = lqueue:in({NextMsgNum, Msg}, Messages), next_msg_num = NextMsgNum + 1}. append_to_master_index(RaftIdx, - #?MODULE{ra_indexes = Indexes0} = State0) -> + #?STATE{ra_indexes = Indexes0} = State0) -> State = incr_enqueue_count(State0), Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), - State#?MODULE{ra_indexes = Indexes}. + State#?STATE{ra_indexes = Indexes}. -incr_enqueue_count(#?MODULE{enqueue_count = EC, +incr_enqueue_count(#?STATE{enqueue_count = EC, cfg = #cfg{release_cursor_interval = {_Base, C}} } = State0) when EC >= C-> %% this will trigger a dehydrated version of the state to be stored @@ -1247,12 +1247,12 @@ incr_enqueue_count(#?MODULE{enqueue_count = EC, %% Q: Why don't we just stash the release cursor here? %% A: Because it needs to be the very last thing we do and we %% first needs to run the checkout logic. - State0#?MODULE{enqueue_count = 0}; -incr_enqueue_count(#?MODULE{enqueue_count = C} = State) -> - State#?MODULE{enqueue_count = C + 1}. + State0#?STATE{enqueue_count = 0}; +incr_enqueue_count(#?STATE{enqueue_count = C} = State) -> + State#?STATE{enqueue_count = C + 1}. maybe_store_dehydrated_state(RaftIdx, - #?MODULE{cfg = + #?STATE{cfg = #cfg{release_cursor_interval = {Base, _}} = Cfg, ra_indexes = Indexes, @@ -1269,12 +1269,12 @@ maybe_store_dehydrated_state(RaftIdx, Total = messages_total(State0), min(max(Total, Base), ?RELEASE_CURSOR_EVERY_MAX) end, - State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval = + State = State0#?STATE{cfg = Cfg#cfg{release_cursor_interval = {Base, Interval}}}, Dehydrated = dehydrate_state(State), Cursor = {release_cursor, RaftIdx, Dehydrated}, Cursors = lqueue:in(Cursor, Cursors0), - State#?MODULE{release_cursors = Cursors} + State#?STATE{release_cursors = Cursors} end; maybe_store_dehydrated_state(_RaftIdx, State) -> State. @@ -1286,18 +1286,18 @@ enqueue_pending(From, State = enqueue(RaftIdx, RawMsg, State0), Enq = Enq0#enqueuer{next_seqno = Next + 1, pending = Pending}, enqueue_pending(From, Enq, State); -enqueue_pending(From, Enq, #?MODULE{enqueuers = Enqueuers0} = State) -> - State#?MODULE{enqueuers = Enqueuers0#{From => Enq}}. +enqueue_pending(From, Enq, #?STATE{enqueuers = Enqueuers0} = State) -> + State#?STATE{enqueuers = Enqueuers0#{From => Enq}}. maybe_enqueue(RaftIdx, undefined, undefined, RawMsg, Effects, State0) -> % direct enqueue without tracking State = enqueue(RaftIdx, RawMsg, State0), {ok, State, Effects}; maybe_enqueue(RaftIdx, From, MsgSeqNo, RawMsg, Effects0, - #?MODULE{enqueuers = Enqueuers0} = State0) -> + #?STATE{enqueuers = Enqueuers0} = State0) -> case maps:get(From, Enqueuers0, undefined) of undefined -> - State1 = State0#?MODULE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, + State1 = State0#?STATE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, {ok, State, Effects} = maybe_enqueue(RaftIdx, From, MsgSeqNo, RawMsg, Effects0, State1), {ok, State, [{monitor, process, From} | Effects]}; @@ -1313,7 +1313,7 @@ maybe_enqueue(RaftIdx, From, MsgSeqNo, RawMsg, Effects0, % out of order delivery Pending = [{MsgSeqNo, RaftIdx, RawMsg} | Pending0], Enq = Enq0#enqueuer{pending = lists:sort(Pending)}, - {ok, State0#?MODULE{enqueuers = Enqueuers0#{From => Enq}}, Effects0}; + {ok, State0#?STATE{enqueuers = Enqueuers0#{From => Enq}}, Effects0}; #enqueuer{next_seqno = Next} when MsgSeqNo =< Next -> % duplicate delivery - remove the raft index from the ra_indexes % map as it was added earlier @@ -1335,7 +1335,7 @@ return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, ConsumerId) end, {State0, Effects0}, Returned), State2 = - case State1#?MODULE.consumers of + case State1#?STATE.consumers of #{ConsumerId := Con0} -> Con = Con0#consumer{credit = increase_credit(Con0, map_size(Returned))}, @@ -1349,7 +1349,7 @@ return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, % used to processes messages that are finished complete(Meta, ConsumerId, Discarded, #consumer{checked_out = Checked} = Con0, Effects, - #?MODULE{ra_indexes = Indexes0} = State0) -> + #?STATE{ra_indexes = Indexes0} = State0) -> %% TODO optimise use of Discarded map here MsgRaftIdxs = [RIdx || {_, {RIdx, _}} <- maps:values(Discarded)], %% credit_mode = simple_prefetch should automatically top-up credit @@ -1367,7 +1367,7 @@ complete(Meta, ConsumerId, Discarded, ({'$empty_msg', Header}, Acc) -> add_bytes_settle(Header, Acc) end, State1, maps:values(Discarded)), - {State2#?MODULE{ra_indexes = Indexes}, Effects}. + {State2#?STATE{ra_indexes = Indexes}, Effects}. increase_credit(#consumer{lifetime = once, credit = Credit}, _) -> @@ -1391,11 +1391,11 @@ complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, update_smallest_raft_index(IncomingRaftIdx, State, Effects). dead_letter_effects(_Reason, _Discarded, - #?MODULE{cfg = #cfg{dead_letter_handler = undefined}}, + #?STATE{cfg = #cfg{dead_letter_handler = undefined}}, Effects) -> Effects; dead_letter_effects(Reason, Discarded, - #?MODULE{cfg = #cfg{dead_letter_handler = {Mod, Fun, Args}}}, + #?STATE{cfg = #cfg{dead_letter_handler = {Mod, Fun, Args}}}, Effects) -> RaftIdxs = maps:fold( fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) -> @@ -1419,7 +1419,7 @@ dead_letter_effects(Reason, Discarded, end} | Effects]. cancel_consumer_effects(ConsumerId, - #?MODULE{cfg = #cfg{resource = QName}} = State, Effects) -> + #?STATE{cfg = #cfg{resource = QName}} = State, Effects) -> [{mod_call, rabbit_quorum_queue, cancel_consumer_handler, [QName, ConsumerId]}, notify_decorators_effect(State) | Effects]. @@ -1428,7 +1428,7 @@ update_smallest_raft_index(Idx, State, Effects) -> update_smallest_raft_index(Idx, ok, State, Effects). update_smallest_raft_index(IncomingRaftIdx, Reply, - #?MODULE{cfg = Cfg, + #?STATE{cfg = Cfg, ra_indexes = Indexes, release_cursors = Cursors0} = State0, Effects) -> @@ -1440,7 +1440,7 @@ update_smallest_raft_index(IncomingRaftIdx, Reply, %% reset the release cursor interval #cfg{release_cursor_interval = {Base, _}} = Cfg, RCI = {Base, Base}, - State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCI}, + State = State0#?STATE{cfg = Cfg#cfg{release_cursor_interval = RCI}, release_cursors = lqueue:new(), enqueue_count = 0}, {State, Reply, Effects ++ [{release_cursor, IncomingRaftIdx, State}]}; @@ -1448,11 +1448,11 @@ update_smallest_raft_index(IncomingRaftIdx, Reply, Smallest = rabbit_fifo_index:smallest(Indexes), case find_next_cursor(Smallest, Cursors0) of {empty, Cursors} -> - {State0#?MODULE{release_cursors = Cursors}, Reply, Effects}; + {State0#?STATE{release_cursors = Cursors}, Reply, Effects}; {Cursor, Cursors} -> %% we can emit a release cursor when we've passed the smallest %% release cursor available. - {State0#?MODULE{release_cursors = Cursors}, Reply, + {State0#?STATE{release_cursors = Cursors}, Reply, Effects ++ [Cursor]} end end. @@ -1477,7 +1477,7 @@ update_header(Key, UpdateFun, Default, Header) -> return_one(Meta, MsgId, 0, {Tag, Header0}, - #?MODULE{returns = Returns, + #?STATE{returns = Returns, consumers = Consumers, cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, Effects0, ConsumerId) @@ -1503,12 +1503,12 @@ return_one(Meta, MsgId, 0, {Tag, Header0}, end, {add_bytes_return( Header, - State1#?MODULE{consumers = Consumers#{ConsumerId => Con}, + State1#?STATE{consumers = Consumers#{ConsumerId => Con}, returns = lqueue:in(Msg, Returns)}), Effects0} end; return_one(Meta, MsgId, MsgNum, {RaftId, {Header0, RawMsg}}, - #?MODULE{returns = Returns, + #?STATE{returns = Returns, consumers = Consumers, cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, Effects0, ConsumerId) -> @@ -1537,17 +1537,17 @@ return_one(Meta, MsgId, MsgNum, {RaftId, {Header0, RawMsg}}, end, {add_bytes_return( Header, - State1#?MODULE{consumers = Consumers#{ConsumerId => Con}, + State1#?STATE{consumers = Consumers#{ConsumerId => Con}, returns = lqueue:in({MsgNum, Msg}, Returns)}), Effects0} end. -return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, +return_all(Meta, #?STATE{consumers = Cons} = State0, Effects0, ConsumerId, #consumer{checked_out = Checked0} = Con) -> %% need to sort the list so that we return messages in the order %% they were checked out Checked = lists:sort(maps:to_list(Checked0)), - State = State0#?MODULE{consumers = Cons#{ConsumerId => Con}}, + State = State0#?STATE{consumers = Cons#{ConsumerId => Con}}, lists:foldl(fun ({MsgId, {'$prefix_msg', _} = Msg}, {S, E}) -> return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId); ({MsgId, {'$empty_msg', _} = Msg}, {S, E}) -> @@ -1560,7 +1560,7 @@ return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, checkout(Meta, OldState, State, Effects) -> checkout(Meta, OldState, State, Effects, true). -checkout(#{index := Index} = Meta, #?MODULE{cfg = #cfg{resource = QName}} = OldState, State0, +checkout(#{index := Index} = Meta, #?STATE{cfg = #cfg{resource = QName}} = OldState, State0, Effects0, HandleConsumerChanges) -> {State1, _Result, Effects1} = checkout0(Meta, checkout_one(Meta, State0), Effects0, #{}), @@ -1610,12 +1610,12 @@ checkout0(_Meta, {Activity, State0}, Effects0, SendAcc) -> {State0, ok, lists:reverse(Effects1)}. evaluate_limit(_Index, Result, _BeforeState, - #?MODULE{cfg = #cfg{max_length = undefined, + #?STATE{cfg = #cfg{max_length = undefined, max_bytes = undefined}} = State, Effects) -> {State, Result, Effects}; evaluate_limit(Index, Result, BeforeState, - #?MODULE{cfg = #cfg{overflow_strategy = Strategy}, + #?STATE{cfg = #cfg{overflow_strategy = Strategy}, enqueuers = Enqs0} = State0, Effects0) -> case is_over_limit(State0) of @@ -1635,7 +1635,7 @@ evaluate_limit(Index, Result, BeforeState, (_P, _E, Acc) -> Acc end, {Enqs0, Effects0}, Enqs0), - {State0#?MODULE{enqueuers = Enqs}, Result, Effects}; + {State0#?STATE{enqueuers = Enqs}, Result, Effects}; false when Strategy == reject_publish -> %% TODO: optimise as this case gets called for every command %% pretty much @@ -1653,7 +1653,7 @@ evaluate_limit(Index, Result, BeforeState, (_P, _E, Acc) -> Acc end, {Enqs0, Effects0}, Enqs0), - {State0#?MODULE{enqueuers = Enqs}, Result, Effects}; + {State0#?STATE{enqueuers = Enqs}, Result, Effects}; _ -> {State0, Result, Effects0} end; @@ -1662,13 +1662,13 @@ evaluate_limit(Index, Result, BeforeState, end. evaluate_memory_limit(_Header, - #?MODULE{cfg = #cfg{max_in_memory_length = undefined, + #?STATE{cfg = #cfg{max_in_memory_length = undefined, max_in_memory_bytes = undefined}}) -> false; evaluate_memory_limit(#{size := Size}, State) -> evaluate_memory_limit(Size, State); evaluate_memory_limit(Size, - #?MODULE{cfg = #cfg{max_in_memory_length = MaxLength, + #?STATE{cfg = #cfg{max_in_memory_length = MaxLength, max_in_memory_bytes = MaxBytes}, msg_bytes_in_memory = Bytes, msgs_ready_in_memory = Length}) @@ -1692,18 +1692,18 @@ append_delivery_effects(Effects0, AccMap) -> %% %% When we return it is always done to the current return queue %% for both prefix messages and current messages -take_next_msg(#?MODULE{prefix_msgs = {R, P}} = State) -> +take_next_msg(#?STATE{prefix_msgs = {R, P}} = State) -> %% conversion - take_next_msg(State#?MODULE{prefix_msgs = {length(R), R, length(P), P}}); -take_next_msg(#?MODULE{prefix_msgs = {NumR, [{'$empty_msg', _} = Msg | Rem], + take_next_msg(State#?STATE{prefix_msgs = {length(R), R, length(P), P}}); +take_next_msg(#?STATE{prefix_msgs = {NumR, [{'$empty_msg', _} = Msg | Rem], NumP, P}} = State) -> %% there are prefix returns, these should be served first - {Msg, State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; -take_next_msg(#?MODULE{prefix_msgs = {NumR, [Header | Rem], NumP, P}} = State) -> + {Msg, State#?STATE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; +take_next_msg(#?STATE{prefix_msgs = {NumR, [Header | Rem], NumP, P}} = State) -> %% there are prefix returns, these should be served first {{'$prefix_msg', Header}, - State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; -take_next_msg(#?MODULE{returns = Returns, + State#?STATE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; +take_next_msg(#?STATE{returns = Returns, messages = Messages0, prefix_msgs = {NumR, R, NumP, P}} = State) -> %% use peek rather than out there as the most likely case is an empty @@ -1711,13 +1711,13 @@ take_next_msg(#?MODULE{returns = Returns, case lqueue:peek(Returns) of {value, NextMsg} -> {NextMsg, - State#?MODULE{returns = lqueue:drop(Returns)}}; + State#?STATE{returns = lqueue:drop(Returns)}}; empty when P == [] -> case lqueue:out(Messages0) of {empty, _} -> empty; {{value, {_, _} = SeqMsg}, Messages} -> - {SeqMsg, State#?MODULE{messages = Messages }} + {SeqMsg, State#?STATE{messages = Messages }} end; empty -> [Msg | Rem] = P, @@ -1725,10 +1725,10 @@ take_next_msg(#?MODULE{returns = Returns, {Header, 'empty'} -> %% There are prefix msgs {{'$empty_msg', Header}, - State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}}; + State#?STATE{prefix_msgs = {NumR, R, NumP-1, Rem}}}; Header -> {{'$prefix_msg', Header}, - State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}} + State#?STATE{prefix_msgs = {NumR, R, NumP-1, Rem}}} end end. @@ -1759,7 +1759,7 @@ reply_log_effect(RaftIdx, MsgId, Header, Ready, From) -> {dequeue, {MsgId, {Header, Msg}}, Ready}}}] end}. -checkout_one(Meta, #?MODULE{service_queue = SQ0, +checkout_one(Meta, #?STATE{service_queue = SQ0, messages = Messages0, consumers = Cons0} = InitState) -> case priority_queue:out(SQ0) of @@ -1774,11 +1774,11 @@ checkout_one(Meta, #?MODULE{service_queue = SQ0, %% no credit but was still on queue %% can happen when draining %% recurse without consumer on queue - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + checkout_one(Meta, InitState#?STATE{service_queue = SQ1}); #consumer{status = cancelled} -> - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + checkout_one(Meta, InitState#?STATE{service_queue = SQ1}); #consumer{status = suspected_down} -> - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + checkout_one(Meta, InitState#?STATE{service_queue = SQ1}); #consumer{checked_out = Checked0, next_msg_id = Next, credit = Credit, @@ -1790,7 +1790,7 @@ checkout_one(Meta, #?MODULE{service_queue = SQ0, delivery_count = DelCnt + 1}, State1 = update_or_remove_sub(Meta, ConsumerId, Con, - State0#?MODULE{service_queue = SQ1}), + State0#?STATE{service_queue = SQ1}), {State, Msg} = case ConsumerMsg of {'$prefix_msg', Header} -> @@ -1816,7 +1816,7 @@ checkout_one(Meta, #?MODULE{service_queue = SQ0, end; {{value, _ConsumerId}, SQ1} -> %% consumer did not exist but was queued, recurse - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}); + checkout_one(Meta, InitState#?STATE{service_queue = SQ1}); {empty, _} -> case lqueue:len(Messages0) of 0 -> {nochange, InitState}; @@ -1826,31 +1826,31 @@ checkout_one(Meta, #?MODULE{service_queue = SQ0, update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = auto, credit = 0} = Con, - #?MODULE{consumers = Cons} = State) -> - State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)}; + #?STATE{consumers = Cons} = State) -> + State#?STATE{consumers = maps:put(ConsumerId, Con, Cons)}; update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = auto} = Con, - #?MODULE{consumers = Cons, + #?STATE{consumers = Cons, service_queue = ServiceQueue} = State) -> - State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons), + State#?STATE{consumers = maps:put(ConsumerId, Con, Cons), service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}; update_or_remove_sub(#{system_time := Ts}, ConsumerId, #consumer{lifetime = once, checked_out = Checked, credit = 0} = Con, - #?MODULE{consumers = Cons} = State) -> + #?STATE{consumers = Cons} = State) -> case maps:size(Checked) of 0 -> % we're done with this consumer - State#?MODULE{consumers = maps:remove(ConsumerId, Cons), + State#?STATE{consumers = maps:remove(ConsumerId, Cons), last_active = Ts}; _ -> % there are unsettled items so need to keep around - State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)} + State#?STATE{consumers = maps:put(ConsumerId, Con, Cons)} end; update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = once} = Con, - #?MODULE{consumers = Cons, + #?STATE{consumers = Cons, service_queue = ServiceQueue} = State) -> - State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons), + State#?STATE{consumers = maps:put(ConsumerId, Con, Cons), service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}. uniq_queue_in(Key, #consumer{priority = P}, Queue) -> @@ -1864,17 +1864,17 @@ uniq_queue_in(Key, #consumer{priority = P}, Queue) -> end. update_consumer(ConsumerId, Meta, Spec, Priority, - #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State0) -> + #?STATE{cfg = #cfg{consumer_strategy = competing}} = State0) -> %% general case, single active consumer off update_consumer0(ConsumerId, Meta, Spec, Priority, State0); update_consumer(ConsumerId, Meta, Spec, Priority, - #?MODULE{consumers = Cons0, + #?STATE{consumers = Cons0, cfg = #cfg{consumer_strategy = single_active}} = State0) when map_size(Cons0) == 0 -> %% single active consumer on, no one is consuming yet update_consumer0(ConsumerId, Meta, Spec, Priority, State0); update_consumer(ConsumerId, Meta, {Life, Credit, Mode}, Priority, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, waiting_consumers = WaitingConsumers0} = State0) -> %% single active consumer on and one active consumer already %% adding the new consumer to the waiting list @@ -1882,10 +1882,10 @@ update_consumer(ConsumerId, Meta, {Life, Credit, Mode}, Priority, priority = Priority, credit = Credit, credit_mode = Mode}, WaitingConsumers1 = WaitingConsumers0 ++ [{ConsumerId, Consumer}], - State0#?MODULE{waiting_consumers = WaitingConsumers1}. + State0#?STATE{waiting_consumers = WaitingConsumers1}. update_consumer0(ConsumerId, Meta, {Life, Credit, Mode}, Priority, - #?MODULE{consumers = Cons0, + #?STATE{consumers = Cons0, service_queue = ServiceQueue0} = State0) -> %% TODO: this logic may not be correct for updating a pre-existing consumer Init = #consumer{lifetime = Life, meta = Meta, @@ -1901,7 +1901,7 @@ update_consumer0(ConsumerId, Meta, {Life, Credit, Mode}, Priority, end, Init, Cons0), ServiceQueue = maybe_queue_consumer(ConsumerId, maps:get(ConsumerId, Cons), ServiceQueue0), - State0#?MODULE{consumers = Cons, service_queue = ServiceQueue}. + State0#?STATE{consumers = Cons, service_queue = ServiceQueue}. maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, ServiceQueue0) -> @@ -1915,7 +1915,7 @@ maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, %% creates a dehydrated version of the current state to be cached and %% potentially used to for a snaphot at a later point -dehydrate_state(#?MODULE{messages = Messages, +dehydrate_state(#?STATE{messages = Messages, consumers = Consumers, returns = Returns, prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0}, @@ -1939,7 +1939,7 @@ dehydrate_state(#?MODULE{messages = Messages, %% recovering from a snapshot PrefMsgs = PrefMsg0 ++ PrefMsgsSuff, Waiting = [{Cid, dehydrate_consumer(C)} || {Cid, C} <- Waiting0], - State#?MODULE{messages = lqueue:new(), + State#?STATE{messages = lqueue:new(), ra_indexes = rabbit_fifo_index:empty(), release_cursors = lqueue:new(), consumers = maps:map(fun (_, C) -> @@ -1975,23 +1975,23 @@ dehydrate_consumer(#consumer{checked_out = Checked0} = Con) -> Con#consumer{checked_out = Checked}. %% make the state suitable for equality comparison -normalize(#?MODULE{messages = Messages, +normalize(#?STATE{messages = Messages, release_cursors = Cursors} = State) -> - State#?MODULE{messages = lqueue:from_list(lqueue:to_list(Messages)), + State#?STATE{messages = lqueue:from_list(lqueue:to_list(Messages)), release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}. -is_over_limit(#?MODULE{cfg = #cfg{max_length = undefined, +is_over_limit(#?STATE{cfg = #cfg{max_length = undefined, max_bytes = undefined}}) -> false; -is_over_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, +is_over_limit(#?STATE{cfg = #cfg{max_length = MaxLength, max_bytes = MaxBytes}, msg_bytes_enqueue = BytesEnq} = State) -> messages_ready(State) > MaxLength orelse (BytesEnq > MaxBytes). -is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = undefined, +is_below_soft_limit(#?STATE{cfg = #cfg{max_length = undefined, max_bytes = undefined}}) -> false; -is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, +is_below_soft_limit(#?STATE{cfg = #cfg{max_length = MaxLength, max_bytes = MaxBytes}, msg_bytes_enqueue = BytesEnq} = State) -> is_below(MaxLength, messages_ready(State)) andalso @@ -2051,58 +2051,58 @@ make_update_config(Config) -> #update_config{config = Config}. add_bytes_enqueue(Bytes, - #?MODULE{msg_bytes_enqueue = Enqueue} = State) + #?STATE{msg_bytes_enqueue = Enqueue} = State) when is_integer(Bytes) -> - State#?MODULE{msg_bytes_enqueue = Enqueue + Bytes}; + State#?STATE{msg_bytes_enqueue = Enqueue + Bytes}; add_bytes_enqueue(#{size := Bytes}, State) -> add_bytes_enqueue(Bytes, State). add_bytes_drop(Bytes, - #?MODULE{msg_bytes_enqueue = Enqueue} = State) + #?STATE{msg_bytes_enqueue = Enqueue} = State) when is_integer(Bytes) -> - State#?MODULE{msg_bytes_enqueue = Enqueue - Bytes}; + State#?STATE{msg_bytes_enqueue = Enqueue - Bytes}; add_bytes_drop(#{size := Bytes}, State) -> add_bytes_drop(Bytes, State). add_bytes_checkout(Bytes, - #?MODULE{msg_bytes_checkout = Checkout, + #?STATE{msg_bytes_checkout = Checkout, msg_bytes_enqueue = Enqueue } = State) when is_integer(Bytes) -> - State#?MODULE{msg_bytes_checkout = Checkout + Bytes, + State#?STATE{msg_bytes_checkout = Checkout + Bytes, msg_bytes_enqueue = Enqueue - Bytes}; add_bytes_checkout(#{size := Bytes}, State) -> add_bytes_checkout(Bytes, State). add_bytes_settle(Bytes, - #?MODULE{msg_bytes_checkout = Checkout} = State) + #?STATE{msg_bytes_checkout = Checkout} = State) when is_integer(Bytes) -> - State#?MODULE{msg_bytes_checkout = Checkout - Bytes}; + State#?STATE{msg_bytes_checkout = Checkout - Bytes}; add_bytes_settle(#{size := Bytes}, State) -> add_bytes_settle(Bytes, State). add_bytes_return(Bytes, - #?MODULE{msg_bytes_checkout = Checkout, + #?STATE{msg_bytes_checkout = Checkout, msg_bytes_enqueue = Enqueue} = State) when is_integer(Bytes) -> - State#?MODULE{msg_bytes_checkout = Checkout - Bytes, + State#?STATE{msg_bytes_checkout = Checkout - Bytes, msg_bytes_enqueue = Enqueue + Bytes}; add_bytes_return(#{size := Bytes}, State) -> add_bytes_return(Bytes, State). add_in_memory_counts(Bytes, - #?MODULE{msg_bytes_in_memory = InMemoryBytes, + #?STATE{msg_bytes_in_memory = InMemoryBytes, msgs_ready_in_memory = InMemoryCount} = State) when is_integer(Bytes) -> - State#?MODULE{msg_bytes_in_memory = InMemoryBytes + Bytes, + State#?STATE{msg_bytes_in_memory = InMemoryBytes + Bytes, msgs_ready_in_memory = InMemoryCount + 1}; add_in_memory_counts(#{size := Bytes}, State) -> add_in_memory_counts(Bytes, State). subtract_in_memory_counts(Bytes, - #?MODULE{msg_bytes_in_memory = InMemoryBytes, + #?STATE{msg_bytes_in_memory = InMemoryBytes, msgs_ready_in_memory = InMemoryCount} = State) when is_integer(Bytes) -> - State#?MODULE{msg_bytes_in_memory = InMemoryBytes - Bytes, + State#?STATE{msg_bytes_in_memory = InMemoryBytes - Bytes, msgs_ready_in_memory = InMemoryCount - 1}; subtract_in_memory_counts(#{size := Bytes}, State) -> subtract_in_memory_counts(Bytes, State). @@ -2126,7 +2126,7 @@ get_size_from_header(#{size := B}) -> B. -all_nodes(#?MODULE{consumers = Cons0, +all_nodes(#?STATE{consumers = Cons0, enqueuers = Enqs0, waiting_consumers = WaitingConsumers0}) -> Nodes0 = maps:fold(fun({_, P}, _, Acc) -> @@ -2140,7 +2140,7 @@ all_nodes(#?MODULE{consumers = Cons0, Acc#{node(P) => ok} end, Nodes1, WaitingConsumers0)). -all_pids_for(Node, #?MODULE{consumers = Cons0, +all_pids_for(Node, #?STATE{consumers = Cons0, enqueuers = Enqs0, waiting_consumers = WaitingConsumers0}) -> Cons = maps:fold(fun({_, P}, _, Acc) @@ -2159,7 +2159,7 @@ all_pids_for(Node, #?MODULE{consumers = Cons0, (_, Acc) -> Acc end, Enqs, WaitingConsumers0). -suspected_pids_for(Node, #?MODULE{consumers = Cons0, +suspected_pids_for(Node, #?STATE{consumers = Cons0, enqueuers = Enqs0, waiting_consumers = WaitingConsumers0}) -> Cons = maps:fold(fun({_, P}, #consumer{status = suspected_down}, Acc) @@ -2179,7 +2179,7 @@ suspected_pids_for(Node, #?MODULE{consumers = Cons0, (_, Acc) -> Acc end, Enqs, WaitingConsumers0). -is_expired(Ts, #?MODULE{cfg = #cfg{expires = Expires}, +is_expired(Ts, #?STATE{cfg = #cfg{expires = Expires}, last_active = LastActive, consumers = Consumers}) when is_number(LastActive) andalso is_number(Expires) -> @@ -2208,7 +2208,7 @@ maybe_notify_decorators(_, false) -> maybe_notify_decorators(State, _) -> {true, query_notify_decorators_info(State)}. -notify_decorators_effect(#?MODULE{cfg = #cfg{resource = QName}} = State) -> +notify_decorators_effect(#?STATE{cfg = #cfg{resource = QName}} = State) -> {MaxActivePriority, IsEmpty} = query_notify_decorators_info(State), notify_decorators_effect(QName, MaxActivePriority, IsEmpty). @@ -2217,11 +2217,11 @@ notify_decorators_effect(QName, MaxActivePriority, IsEmpty) -> [QName, consumer_state_changed, [MaxActivePriority, IsEmpty]]}. get_field(Field, State) -> - Fields = record_info(fields, ?MODULE), + Fields = record_info(fields, ?STATE), Index = record_index_of(Field, Fields), element(Index, State). -get_cfg_field(Field, #?MODULE{cfg = Cfg} ) -> +get_cfg_field(Field, #?STATE{cfg = Cfg} ) -> Fields = record_info(fields, cfg), Index = record_index_of(Field, Fields), element(Index, Cfg). diff --git a/deps/rabbit/src/rabbit_fifo_v1.hrl b/deps/rabbit/src/rabbit_fifo_v1.hrl index 3df988344596..4a427f8fed03 100644 --- a/deps/rabbit/src/rabbit_fifo_v1.hrl +++ b/deps/rabbit/src/rabbit_fifo_v1.hrl @@ -76,6 +76,7 @@ -define(MB, 1048576). -define(LOW_LIMIT, 0.8). +-define(STATE, rabbit_fifo). -record(consumer, {meta = #{} :: consumer_meta(), @@ -142,7 +143,7 @@ {non_neg_integer(), list(), non_neg_integer(), list()}. --record(rabbit_fifo_v1, +-record(?STATE, {cfg :: #cfg{}, % unassigned messages messages = lqueue:new() :: lqueue:lqueue({msg_in_id(), indexed_msg()}), @@ -164,7 +165,7 @@ % for normal appending operations as it's backed by a map ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor, - ra:index(), #rabbit_fifo_v1{}}), + ra:index(), #?STATE{}}), % consumers need to reflect consumer state at time of snapshot % needs to be part of snapshot consumers = #{} :: #{consumer_id() => #consumer{}}, diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index d4a061c1d591..708466514958 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -1418,18 +1418,14 @@ messages_total_prop(Conf0, Commands) -> messages_total_invariant() -> fun(#rabbit_fifo{messages = M, consumers = C, - enqueuers = E, prefix_msgs = {PTot, _, RTot, _}, returns = R, dlx = #rabbit_fifo_dlx{discards = D, consumer = DlxCon}} = S) -> Base = lqueue:len(M) + lqueue:len(R) + PTot + RTot, - CTot = maps:fold(fun (_, #consumer{checked_out = Ch}, Acc) -> + Tot0 = maps:fold(fun (_, #consumer{checked_out = Ch}, Acc) -> Acc + map_size(Ch) - end, Base, C), - Tot0 = maps:fold(fun (_, #enqueuer{pending = P}, Acc) -> - Acc + length(P) - end, CTot, E), + end, Base, C), Tot1 = Tot0 + lqueue:len(D), Tot = case DlxCon of undefined -> @@ -1644,10 +1640,10 @@ nodeup_gen(Nodes) -> enqueue_gen(Pid) -> enqueue_gen(Pid, 10, 1). -enqueue_gen(Pid, Enq, Del) -> - ?LET(E, {enqueue, Pid, - frequency([{Enq, enqueue}, - {Del, delay}]), +enqueue_gen(Pid, _Enq, _Del) -> + ?LET(E, {enqueue, Pid, enqueue, + % frequency([{Enq, enqueue}, + % {Del, delay}]), msg_gen()}, E). %% It's fair to assume that every message enqueued is a #basic_message. From 25741db3a4b0239c1f18148b4151e0894a59b504 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Thu, 9 Dec 2021 12:36:52 +0000 Subject: [PATCH 11/97] QQ: handle all dlx commands in one clause This avoids having to check for specific commands outside of the dlx module. --- deps/rabbit/src/rabbit_fifo.erl | 87 ++++----------------- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 1 - 2 files changed, 17 insertions(+), 71 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 21e2151c8d6d..5427aa227e68 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -669,80 +669,28 @@ apply(_Meta, {machine_version, FromVersion, ToVersion}, V0State) -> apply(#{index := IncomingRaftIdx} = Meta, {dlx, Cmd}, #?MODULE{dlx = DlxState0, messages_total = Total0, - ra_indexes = Indexes0} = State0) when element(1, Cmd) =:= settle -> - {DlxState, AckedMsgs} = rabbit_fifo_dlx:apply(Cmd, DlxState0), - Indexes = delete_indexes(AckedMsgs, Indexes0), - Total = Total0 - length(AckedMsgs), - State1 = subtract_in_memory(AckedMsgs, State0), - State2 = State1#?MODULE{dlx = DlxState, - messages_total = Total, - ra_indexes = Indexes}, - {State, ok, Effects} = checkout(Meta, State0, State2, [], false), - update_smallest_raft_index(IncomingRaftIdx, State, Effects); -apply(Meta, {dlx, Cmd}, - #?MODULE{dlx = DlxState0} = State0) -> - {DlxState, ok} = rabbit_fifo_dlx:apply(Cmd, DlxState0), - State1 = State0#?MODULE{dlx = DlxState}, - %% Run a checkout so that a new DLX consumer will be delivered discarded messages - %% directly after it subscribes. - checkout(Meta, State0, State1, [], false); + ra_indexes = Indexes0} = State0) -> + case rabbit_fifo_dlx:apply(Cmd, DlxState0) of + {DlxState, ok} -> + State1 = State0#?MODULE{dlx = DlxState}, + %% Run a checkout so that a new DLX consumer will be delivered discarded messages + %% directly after it subscribes. + checkout(Meta, State0, State1, [], false); + {DlxState, AckedMsgs} -> + Indexes = delete_indexes(AckedMsgs, Indexes0), + Total = Total0 - length(AckedMsgs), + State1 = subtract_in_memory(AckedMsgs, State0), + State2 = State1#?MODULE{dlx = DlxState, + messages_total = Total, + ra_indexes = Indexes}, + {State, ok, Effects} = checkout(Meta, State0, State2, [], false), + update_smallest_raft_index(IncomingRaftIdx, State, Effects) + end; apply(_Meta, Cmd, State) -> %% handle unhandled commands gracefully rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), {State, ok, []}. -% convert_v0_to_v1(V0State0) -> -% V0State = rabbit_fifo_v0:normalize_for_v1(V0State0), -% V0Msgs = rabbit_fifo_v0:get_field(messages, V0State), -% V1Msgs = lqueue:from_list(lists:sort(maps:to_list(V0Msgs))), -% V0Enqs = rabbit_fifo_v0:get_field(enqueuers, V0State), -% V1Enqs = maps:map( -% fun (_EPid, E) -> -% #enqueuer{next_seqno = element(2, E), -% pending = element(3, E), -% status = element(4, E)} -% end, V0Enqs), -% V0Cons = rabbit_fifo_v0:get_field(consumers, V0State), -% V1Cons = maps:map( -% fun (_CId, C0) -> -% %% add the priority field -% list_to_tuple(tuple_to_list(C0) ++ [0]) -% end, V0Cons), -% V0SQ = rabbit_fifo_v0:get_field(service_queue, V0State), -% V1SQ = priority_queue:from_list([{0, C} || C <- queue:to_list(V0SQ)]), -% Cfg = #cfg{name = rabbit_fifo_v0:get_cfg_field(name, V0State), -% resource = rabbit_fifo_v0:get_cfg_field(resource, V0State), -% release_cursor_interval = rabbit_fifo_v0:get_cfg_field(release_cursor_interval, V0State), -% dead_letter_handler = rabbit_fifo_v0:get_cfg_field(dead_letter_handler, V0State), -% become_leader_handler = rabbit_fifo_v0:get_cfg_field(become_leader_handler, V0State), -% %% TODO: what if policy enabling reject_publish was applied before conversion? -% overflow_strategy = drop_head, -% max_length = rabbit_fifo_v0:get_cfg_field(max_length, V0State), -% max_bytes = rabbit_fifo_v0:get_cfg_field(max_bytes, V0State), -% consumer_strategy = rabbit_fifo_v0:get_cfg_field(consumer_strategy, V0State), -% delivery_limit = rabbit_fifo_v0:get_cfg_field(delivery_limit, V0State), -% max_in_memory_length = rabbit_fifo_v0:get_cfg_field(max_in_memory_length, V0State), -% max_in_memory_bytes = rabbit_fifo_v0:get_cfg_field(max_in_memory_bytes, V0State) -% }, - -% #?MODULE{cfg = Cfg, -% messages = V1Msgs, -% next_msg_num = rabbit_fifo_v0:get_field(next_msg_num, V0State), -% returns = rabbit_fifo_v0:get_field(returns, V0State), -% enqueue_count = rabbit_fifo_v0:get_field(enqueue_count, V0State), -% enqueuers = V1Enqs, -% ra_indexes = rabbit_fifo_v0:get_field(ra_indexes, V0State), -% release_cursors = rabbit_fifo_v0:get_field(release_cursors, V0State), -% consumers = V1Cons, -% service_queue = V1SQ, -% prefix_msgs = rabbit_fifo_v0:get_field(prefix_msgs, V0State), -% msg_bytes_enqueue = rabbit_fifo_v0:get_field(msg_bytes_enqueue, V0State), -% msg_bytes_checkout = rabbit_fifo_v0:get_field(msg_bytes_checkout, V0State), -% waiting_consumers = rabbit_fifo_v0:get_field(waiting_consumers, V0State), -% msg_bytes_in_memory = rabbit_fifo_v0:get_field(msg_bytes_in_memory, V0State), -% msgs_ready_in_memory = rabbit_fifo_v0:get_field(msgs_ready_in_memory, V0State) -% }. - convert_msg({RaftIdx, {Header, empty}}) -> ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); convert_msg({RaftIdx, {Header, Msg}}) when is_integer(RaftIdx) -> @@ -752,7 +700,6 @@ convert_msg({'$empty_msg', Header}) -> convert_msg({'$prefix_msg', Header}) -> ?PREFIX_MEM_MSG(Header). - convert_v1_to_v2(V1State) -> IndexesV1 = rabbit_fifo_v1:get_field(ra_indexes, V1State), ReturnsV1 = rabbit_fifo_v1:get_field(returns, V1State), diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 708466514958..ce8d68c53ae7 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -12,7 +12,6 @@ -include_lib("rabbit/src/rabbit_fifo.hrl"). -include_lib("rabbit/src/rabbit_fifo_dlx.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("rabbit_common/include/rabbit_framing.hrl"). -define(record_info(T,R),lists:zip(record_info(fields,T),tl(tuple_to_list(R)))). From d268f3f66d7c90e559bf588dd44a86dddd1a21f4 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 9 Dec 2021 18:58:42 +0100 Subject: [PATCH 12/97] Add dlx integration tests for stats and many_target_queues --- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 18 +- .../rabbit_fifo_dlx_integration_SUITE.erl | 384 +++++++++++++----- 2 files changed, 307 insertions(+), 95 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 6114b8c18589..7efc96d4e61a 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -324,8 +324,19 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, deliver_to_queues(Delivery, RouteToQNames, #state{queue_type_state = QTypeState0} = State0) -> Qs = rabbit_amqqueue:lookup(RouteToQNames), - {ok, QTypeState1, Actions} = rabbit_queue_type:deliver(Qs, Delivery, QTypeState0), - State = State0#state{queue_type_state = QTypeState1}, + {QTypeState2, Actions} = case rabbit_queue_type:deliver(Qs, Delivery, QTypeState0) of + {ok, QTypeState1, Actions0} -> + {QTypeState1, Actions0}; + {error, {coordinator_unavailable, Resource}} -> + rabbit_log:warning("Cannot deliver message because stream coordinator unavailable for ~s", + [rabbit_misc:rs(Resource)]), + {QTypeState0, []}; + {error, {stream_not_found, Resource}} -> + rabbit_log:warning("Cannot deliver message because stream not found for ~s", + [rabbit_misc:rs(Resource)]), + {QTypeState0, []} + end, + State = State0#state{queue_type_state = QTypeState2}, handle_queue_actions(Actions, State). handle_settled(QRef, MsgSeqs, #state{pendings = Pendings0, @@ -344,8 +355,7 @@ handle_settled0(QRef, MsgSeq, SettleTimeout, Pendings) -> maps:update(MsgSeq, Pend, Pendings); error -> rabbit_log:warning("Ignoring publisher confirm for sequence number ~b " - "from target dead letter ~s after settle timeout of ~bms. " - "Troubleshoot why that queue confirms so slowly.", + "from target dead letter ~s after settle timeout of ~bms.", [MsgSeq, rabbit_misc:rs(QRef), SettleTimeout]), Pendings end. diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 1b1aa2d76b73..2b39c6e9fe99 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -19,23 +19,27 @@ all() -> [ - {group, single_node} + {group, single_node}, + {group, cluster_size_3} ]. groups() -> - [{single_node, [], [ + [ + {single_node, [], [ expired, rejected, delivery_limit, target_queue_not_bound, - dlx_missing - ]}]. + dlx_missing, + stats + ]}, + {cluster_size_3, [], [ + many_target_queues + ]} + ]. %% TODO add tests for: -%% * overview and query functions return correct result / stats %% * dlx_worker resends in various topology misconfigurations -%% * dlx_worker resends when target queue is down (e.g. node is down where non-mirrored classic queue resides) -%% * we comply with mandatory + publisher confirm semantics, e.g. with 3 target queues (1 classic queue, 1 quorum queue, 1 stream) %% * there is always single leader in 3 node cluster (check via supervisor:count_children and by killing one node) %% * fall back to at-most-once works %% * switching between at-most-once and at-least-once works including rabbit_fifo_dlx:cleanup @@ -54,9 +58,14 @@ init_per_suite(Config0) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). -init_per_group(Group, Config) -> +init_per_group(single_node = Group, Config) -> + init_per_group(Group, Config, 1); +init_per_group(cluster_size_3 = Group, Config) -> + init_per_group(Group, Config, 3). + +init_per_group(Group, Config, NodesCount) -> Config1 = rabbit_ct_helpers:set_config(Config, - [{rmq_nodes_count, 1}, + [{rmq_nodes_count, NodesCount}, {rmq_nodename_suffix, Group}, {tcp_ports_base}, {net_ticktime, 10}]), @@ -84,21 +93,55 @@ init_per_testcase(Testcase, Config) -> Q = rabbit_data_coercion:to_binary(Testcase), Config2 = rabbit_ct_helpers:set_config(Config1, [{source_queue, <>}, + {dead_letter_exchange, <>}, {target_queue_1, <>}, - {target_queue_2, <>} + {target_queue_2, <>}, + {target_queue_3, <>} ]), rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()). - end_per_testcase(Testcase, Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + #'queue.delete_ok'{message_count = 0} = amqp_channel:call(Ch, #'queue.delete'{queue = ?config(source_queue, Config)}), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = ?config(target_queue_1, Config)}), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = ?config(target_queue_2, Config)}), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = ?config(target_queue_3, Config)}), + #'exchange.delete_ok'{} = amqp_channel:call(Ch, #'exchange.delete'{exchange = ?config(dead_letter_exchange, Config)}), Config1 = rabbit_ct_helpers:run_steps( Config, rabbit_ct_client_helpers:teardown_steps()), rabbit_ct_helpers:testcase_finished(Config1, Testcase). +declare_topology(Config, AdditionalQArgs) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + TargetQ = ?config(target_queue_1, Config), + DLX = ?config(dead_letter_exchange, Config), + QArgs = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ], + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = SourceQ, + durable = true, + arguments = lists:keymerge(1, AdditionalQArgs, QArgs)}), + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ + queue = TargetQ, + exchange = DLX, + routing_key = <<"k1">> + }), + {Server, Ch, SourceQ, TargetQ}. + %% Test that at-least-once dead-lettering works for message dead-lettered due to message TTL. expired(Config) -> - {_Server, Ch, SourceQ, TargetQ, _DLX} = Objects = declare_topology(Config, []), + {_Server, Ch, SourceQ, TargetQ} = declare_topology(Config, []), Msg = <<"msg">>, ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, @@ -109,21 +152,11 @@ expired(Config) -> ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), 1000), - ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)), - ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Headers, <<"x-first-death-queue">>)), - ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-exchange">>)), - {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>), - ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Death, <<"queue">>)), - ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Death, <<"reason">>)), - ?assertEqual({longstr, <<"0">>}, rabbit_misc:table_lookup(Death, <<"original-expiration">>)), - ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death, <<"exchange">>)), - ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death, <<"count">>)), - ?assertEqual({array, [{longstr, SourceQ}]}, rabbit_misc:table_lookup(Death, <<"routing-keys">>)), - delete_topology(Objects). + assert_dlx_headers(Headers, <<"expired">>, SourceQ). %% Test that at-least-once dead-lettering works for message dead-lettered due to rejected by consumer. rejected(Config) -> - {Server, Ch, SourceQ, TargetQ, _DLX} = Objects = declare_topology(Config, []), + {Server, Ch, SourceQ, TargetQ} = declare_topology(Config, []), publish(Ch, SourceQ), wait_for_messages_ready([Server], ra_name(SourceQ), 1), DelTag = consume(Ch, SourceQ, false), @@ -134,21 +167,11 @@ rejected(Config) -> ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), 1000), - ?assertEqual({longstr, <<"rejected">>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)), - ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Headers, <<"x-first-death-queue">>)), - ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-exchange">>)), - {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>), - ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Death, <<"queue">>)), - ?assertEqual({longstr, <<"rejected">>}, rabbit_misc:table_lookup(Death, <<"reason">>)), - ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death, <<"exchange">>)), - ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death, <<"count">>)), - ?assertEqual({array, [{longstr, SourceQ}]}, rabbit_misc:table_lookup(Death, <<"routing-keys">>)), - delete_topology(Objects). + assert_dlx_headers(Headers, <<"rejected">>, SourceQ). %% Test that at-least-once dead-lettering works for message dead-lettered due to delivery-limit exceeded. delivery_limit(Config) -> - {Server, Ch, SourceQ, TargetQ, _DLX} = Objects = - declare_topology(Config, [{<<"x-delivery-limit">>, long, 0}]), + {Server, Ch, SourceQ, TargetQ} = declare_topology(Config, [{<<"x-delivery-limit">>, long, 0}]), publish(Ch, SourceQ), wait_for_messages_ready([Server], ra_name(SourceQ), 1), DelTag = consume(Ch, SourceQ, false), @@ -159,16 +182,24 @@ delivery_limit(Config) -> ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), 1000), - ?assertEqual({longstr, <<"delivery_limit">>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)), + assert_dlx_headers(Headers, <<"delivery_limit">>, SourceQ). + +assert_dlx_headers(Headers, Reason, SourceQ) -> + ?assertEqual({longstr, Reason}, rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)), ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Headers, <<"x-first-death-queue">>)), ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-exchange">>)), {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>), ?assertEqual({longstr, SourceQ}, rabbit_misc:table_lookup(Death, <<"queue">>)), - ?assertEqual({longstr, <<"delivery_limit">>}, rabbit_misc:table_lookup(Death, <<"reason">>)), + ?assertEqual({longstr, Reason}, rabbit_misc:table_lookup(Death, <<"reason">>)), ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death, <<"exchange">>)), ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death, <<"count">>)), ?assertEqual({array, [{longstr, SourceQ}]}, rabbit_misc:table_lookup(Death, <<"routing-keys">>)), - delete_topology(Objects). + case Reason of + <<"expired">> -> + ?assertEqual({longstr, <<"0">>}, rabbit_misc:table_lookup(Death, <<"original-expiration">>)); + _ -> + ok + end. %% Test that message is not lost despite no route from dead-letter exchange to target queue. %% Once, the route becomes available, the message is delivered to the target queue @@ -179,18 +210,17 @@ target_queue_not_bound(Config) -> SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), DLX = <<"dead-ex">>, - QArgs = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>} - ], #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ queue = SourceQ, durable = true, - auto_delete = false, - arguments = QArgs}), + arguments = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ] + }), #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), Msg = <<"msg">>, @@ -218,8 +248,7 @@ target_queue_not_bound(Config) -> 500, 10), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{expiration = undefined}, payload = Msg}}, - amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), - delete_topology({Server, Ch, SourceQ, TargetQ, DLX}). + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})). %% Test that message is not lost when configured dead-letter exchange does not exist. %% Once, the exchange gets declared, the message is delivered to the target queue @@ -230,18 +259,17 @@ dlx_missing(Config) -> SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), DLX = <<"dead-ex">>, - QArgs = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>} - ], #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ queue = SourceQ, durable = true, - auto_delete = false, - arguments = QArgs}), + arguments = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ] + }), #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), Msg = <<"msg">>, ok = amqp_channel:cast(Ch, @@ -268,49 +296,222 @@ dlx_missing(Config) -> 500, 10), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{expiration = undefined}, payload = Msg}}, - amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), - delete_topology({Server, Ch, SourceQ, TargetQ, DLX}). + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})). -declare_topology(Config, AdditionalQArgs) -> + +%% Test that rabbit_fifo_dlx tracks statistics correctly. +stats(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server), SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), DLX = <<"dead-ex">>, - QArgs = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>} - ], #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ queue = SourceQ, durable = true, - auto_delete = false, - arguments = lists:keymerge(1, AdditionalQArgs, QArgs)}), + arguments = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-max-in-memory-length">>, long, 1}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ] + }), #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), + Msg = <<"12">>, %% 2 bytes per message + [ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = <<"0">>}, + payload = Msg}) + || _ <- lists:seq(1, 10)], %% 10 messages in total + RaName = ra_name(SourceQ), + %% Binding from target queue to DLX is missing. Therefore + %% * 10 msgs should be discarded (i.e. in discards queue or checked out to dlx_worker) + %% * 20 bytes (=10msgs*2bytes) should be discarded (i.e. in discards queue or checked out to dlx_worker) + eventually(?_assertEqual([{10, 20}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + ?assertMatch([#{ + %% 2 msgs (=Prefetch) should be checked out to dlx_worker + num_discard_checked_out := 2, + %% 4 bytes (=2msgs*2bytes) should be checked out to dlx_worker + discard_checkout_message_bytes := 4, + %% 8 msgs (=10-2) should be in discards queue + num_discarded := 8, + %% 16 bytes (=8msgs*2bytes) should be in discards queue + discard_message_bytes := 16, + %% 10 msgs in total + num_messages := 10, + %% 1 msg (=x-max-in-memory-length) should be in-memory + num_in_memory_ready_messages := 1, + %% 2 bytes (1msg) should be in-memory + in_memory_message_bytes := 2 + }], + dirty_query([Server], RaName, fun rabbit_fifo:overview/1)), + %% Fix dead-letter toplology misconfiguration. #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ queue = TargetQ, exchange = DLX, routing_key = <<"k1">> }), - {Server, Ch, SourceQ, TargetQ, DLX}. + %% Binding from target queue to DLX is now present. + %% Therefore, all messages should be delivered to target queue and acked to source queue. + %% Therefore, all stats should be decremented back to 0. + eventually(?_assertEqual([{0, 0}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), + 500, 10), + ?assertMatch([#{ + num_discard_checked_out := 0, + discard_checkout_message_bytes := 0, + num_discarded := 0, + discard_message_bytes := 0, + num_messages := 0, + num_in_memory_ready_messages := 0, + in_memory_message_bytes := 0 + }], + dirty_query([Server], RaName, fun rabbit_fifo:overview/1)), + [?assertMatch({#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{expiration = undefined}, + payload = Msg}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})) || _ <- lists:seq(1, 10)]. -delete_topology({_Server, Ch, SourceQ, TargetQ, DLX}) -> - #'queue.unbind_ok'{} = amqp_channel:call(Ch, #'queue.unbind'{ - queue = TargetQ, - exchange = DLX, - routing_key = <<"k1">> - }), - #'queue.delete_ok'{message_count = 0} = amqp_channel:call(Ch, #'queue.delete'{queue = TargetQ}), - #'queue.delete_ok'{message_count = 0} = amqp_channel:call(Ch, #'queue.delete'{queue = SourceQ}), - #'exchange.delete_ok'{} = amqp_channel:call(Ch, #'exchange.delete'{exchange = DLX}). +%% Test that +%% 1. Message is only acked to source queue once publisher confirms got received from **all** target queues. +%% 2. Target queue can be classic queue, quorum queue, or stream queue. +%% +%% Lesson learnt by writing this test: +%% If there are multiple target queues, messages will not be sent to target non-mirrored classic queues +%% it their host node is temporarily down because these queues get (temporarily) deleted. See: +%% https://github.com/rabbitmq/rabbitmq-server/blob/cf76b479300b767b8ea450293d096cbf729ed734/deps/rabbit/src/rabbit_amqqueue.erl#L1955-L1964 +many_target_queues(Config) -> + [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), + SourceQ = ?config(source_queue, Config), + RaName = ra_name(SourceQ), + TargetQ1 = ?config(target_queue_1, Config), + TargetQ2 = ?config(target_queue_2, Config), + TargetQ3 = ?config(target_queue_3, Config), + DLX = ?config(dead_letter_exchange, Config), + DLRKey = <<"k1">>, + %% Create topology: + %% * source quorum queue with 1 replica on node 1 + %% * target non-mirrored classic queue on node 1 + %% * target quorum queue with 3 replicas + %% * target stream queue with 3 replicas + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = SourceQ, + durable = true, + arguments = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, DLRKey}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 1} + ] + }), + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ1}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ + queue = TargetQ1, + exchange = DLX, + routing_key = DLRKey + }), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = TargetQ2, + durable = true, + arguments = [ + {<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 3} + ] + }), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ + queue = TargetQ2, + exchange = DLX, + routing_key = DLRKey + }), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = TargetQ3, + durable = true, + arguments = [ + {<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 3} + ] + }), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ + queue = TargetQ3, + exchange = DLX, + routing_key = DLRKey + }), + Msg1 = <<"m1">>, + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = <<"5">>}, + payload = Msg1}), + eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ1}))), + eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ2}))), + %% basic.get not supported by stream queues + #'basic.qos_ok'{} = amqp_channel:call(Ch, #'basic.qos'{prefetch_count = 2}), + CTag = <<"ctag">>, + amqp_channel:subscribe( + Ch, + #'basic.consume'{queue = TargetQ3, + consumer_tag = CTag, + arguments = [{<<"x-stream-offset">>, long, 0}]}, + self()), + receive + #'basic.consume_ok'{consumer_tag = CTag} -> + ok + after 2000 -> + exit(consume_ok_timeout) + end, + receive + {#'basic.deliver'{consumer_tag = CTag}, + #amqp_msg{payload = Msg1}} -> + ok + after 2000 -> + exit(deliver_timeout) + end, + eventually(?_assertEqual([{0, 0}], + dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1))), + ok = rabbit_ct_broker_helpers:kill_node(Config, Server3), + ok = rabbit_ct_broker_helpers:kill_node(Config, Server2), + Msg2 = <<"m2">>, + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = <<"1">>}, + payload = Msg2}), + %% Nodes 2 and 3 are down. + %% rabbit_fifo_dlx_worker should wait until all queues confirm the message + %% before acking it to the source queue. + eventually(?_assertEqual([{1, 2}], + dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1))), + consistently(?_assertEqual([{1, 2}], + dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1))), + ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ1})), + ok = rabbit_ct_broker_helpers:start_node(Config, Server2), + ok = rabbit_ct_broker_helpers:start_node(Config, Server3), + eventually(?_assertEqual([{0, 0}], + dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1)), 500, 6), + ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ2})), + receive + {#'basic.deliver'{consumer_tag = CTag}, + #amqp_msg{payload = Msg2}} -> + ok + after 0 -> + exit(deliver_timeout) + end. + %%TODO By end of this test, there will be many duplicate dead-letter messages in the target quorum queue and + %% target stream queue since both their queue clients and rabbit_fifo_dlx_worker re-try. + %% Possible solution is to have rabbit_fifo_dlx_worker only resend for classic target queues? %%TODO move to rabbitmq_ct_helpers/include/rabbit_assert.hrl consistently(TestObj) -> - consistently(TestObj, 100, 10). + consistently(TestObj, 200, 5). consistently(_, _, 0) -> ok; @@ -320,7 +521,7 @@ consistently({_Line, Assertion} = TestObj, PollInterval, PollCount) -> consistently(TestObj, PollInterval, PollCount - 1). eventually(TestObj) -> - eventually(TestObj, 100, 10). + eventually(TestObj, 200, 5). eventually({Line, _}, _, 0) -> erlang:error({assert_timeout, @@ -329,12 +530,13 @@ eventually({Line, _}, _, 0) -> {assertion_line, Line} ]}); eventually({Line, Assertion} = TestObj, PollInterval, PollCount) -> - try - Assertion() - catch error:_ = Err -> - ct:pal(?LOW_IMPORTANCE, - "Retrying in ~b ms for ~b more times in file ~s, line ~b due to failed assertion in line ~b: ~p", - [PollInterval, PollCount - 1, ?FILE, ?LINE, Line, Err]), - timer:sleep(PollInterval), - eventually(TestObj, PollInterval, PollCount - 1) + case catch Assertion() of + ok -> + ok; + Err -> + ct:pal(?LOW_IMPORTANCE, + "Retrying in ~b ms for ~b more times in file ~s, line ~b due to failed assertion in line ~b: ~p", + [PollInterval, PollCount - 1, ?FILE, ?LINE, Line, Err]), + timer:sleep(PollInterval), + eventually(TestObj, PollInterval, PollCount - 1) end. From 51d5bbebe103f1e08b495ad4e64317b1b4d5de0a Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Fri, 10 Dec 2021 15:57:11 +0000 Subject: [PATCH 13/97] Move timers to aux eval event This avoids excessive timer evaluation during high throughput as the timer will only be evaluated after each ra batch. Also test fixes galore. --- deps/rabbit/src/rabbit_fifo.erl | 136 +++++++++++------- deps/rabbit/src/rabbit_fifo.hrl | 6 +- deps/rabbit/src/rabbit_fifo_dlx.erl | 18 +-- deps/rabbit/test/quorum_queue_SUITE.erl | 43 ++++++ deps/rabbit/test/rabbit_fifo_SUITE.erl | 128 ++++++++--------- deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl | 66 +++++++++ .../test/rabbit_stream_coordinator_SUITE.erl | 1 + 7 files changed, 270 insertions(+), 128 deletions(-) create mode 100644 deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 5427aa227e68..e9e512f9e9b6 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -113,7 +113,9 @@ #update_config{} | #garbage_collection{}. --type command() :: protocol() | rabbit_fifo_dlx:protocol() | ra_machine:builtin_command(). +-type command() :: protocol() | + rabbit_fifo_dlx:protocol() | + ra_machine:builtin_command(). %% all the command types supported by ra fifo -type client_msg() :: delivery(). @@ -129,6 +131,7 @@ consumer_meta/0, consumer_id/0, client_msg/0, + indexed_msg/0, msg/0, msg_id/0, msg_seqno/0, @@ -229,14 +232,15 @@ apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, #{ConsumerId := #consumer{checked_out = Checked} = Con} -> case DLH of at_least_once -> - DlxState = lists:foldl(fun(MsgId, S) -> - case maps:find(MsgId, Checked) of - {ok, Msg} -> - rabbit_fifo_dlx:discard(Msg, rejected, S); - error -> - S - end - end, DlxState0, MsgIds), + DlxState = lists:foldl( + fun(MsgId, S) -> + case maps:find(MsgId, Checked) of + {ok, Msg} -> + rabbit_fifo_dlx:discard(Msg, rejected, S); + error -> + S + end + end, DlxState0, MsgIds), complete_and_checkout(Meta, MsgIds, ConsumerId, Con, [], State#?MODULE{dlx = DlxState}, false); _ -> @@ -863,7 +867,8 @@ state_enter0(leader, #?MODULE{consumers = Cons, resource = Resource, become_leader_handler = BLH}, prefix_msgs = {0, [], 0, []} - }) -> + } = State) -> + TimerEffs = timer_effect(erlang:system_time(millisecond), State, []), % return effects to monitor all current consumers and enqueuers Pids = lists:usort(maps:keys(Enqs) ++ [P || {_, P} <- maps:keys(Cons)] @@ -873,7 +878,7 @@ state_enter0(leader, #?MODULE{consumers = Cons, NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]), %% TODO reissue timer effect if head of message queue has expiry header set FHReservation = [{mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}], - Effects = Mons ++ Nots ++ NodeMons ++ FHReservation, + Effects = TimerEffs ++ Mons ++ Nots ++ NodeMons ++ FHReservation, case BLH of undefined -> Effects; @@ -984,10 +989,17 @@ which_module(0) -> rabbit_fifo_v0; which_module(1) -> rabbit_fifo_v1; which_module(2) -> ?MODULE. +-define(AUX, aux_v2). + -record(aux_gc, {last_raft_idx = 0 :: ra:index()}). -record(aux, {name :: atom(), capacity :: term(), gc = #aux_gc{} :: #aux_gc{}}). +-record(?AUX, {name :: atom(), + capacity :: term(), + gc = #aux_gc{} :: #aux_gc{}, + unused, + unused2}). init_aux(Name) when is_atom(Name) -> %% TODO: catch specific exception throw if table already exists @@ -995,13 +1007,21 @@ init_aux(Name) when is_atom(Name) -> [named_table, set, public, {write_concurrency, true}]), Now = erlang:monotonic_time(micro_seconds), - #aux{name = Name, - capacity = {inactive, Now, 1, 1.0}}. - -handle_aux(leader, _, garbage_collection, State, Log, MacState) -> - {no_reply, force_eval_gc(Log, MacState, State), Log}; -handle_aux(follower, _, garbage_collection, State, Log, MacState) -> - {no_reply, force_eval_gc(Log, MacState, State), Log}; + #?AUX{name = Name, + capacity = {inactive, Now, 1, 1.0}}. + +handle_aux(RaftState, Tag, Cmd, #aux{name = Name, + capacity = Cap, + gc = Gc}, Log, MacState) -> + %% convert aux state to new version + Aux = #?AUX{name = Name, + capacity = Cap, + gc = Gc}, + handle_aux(RaftState, Tag, Cmd, Aux, Log, MacState); +handle_aux(leader, _, garbage_collection, Aux, Log, MacState) -> + {no_reply, force_eval_gc(Log, MacState, Aux), Log}; +handle_aux(follower, _, garbage_collection, Aux, Log, MacState) -> + {no_reply, force_eval_gc(Log, MacState, Aux), Log}; handle_aux(leader, cast, {#return{msg_ids = MsgIds, consumer_id = ConsumerId}, Corr, Pid}, Aux0, Log0, #?MODULE{cfg = #cfg{delivery_limit = undefined}, @@ -1041,34 +1061,42 @@ handle_aux(leader, cast, {#return{} = Ret, Corr, Pid}, Aux0, Log, #?MODULE{}) -> %% for returns with a delivery limit set we can just return as before {no_reply, Aux0, Log, [{append, Ret, {notify, Corr, Pid}}]}; -handle_aux(_RaState, cast, eval, Aux0, Log, _MacState) -> +handle_aux(leader, cast, eval, Aux0, Log, MacState) -> + %% this is called after each batch of commands have been applied + %% set timer for message expire + %% should really be the last applied index ts but this will have to do + Ts = erlang:system_time(millisecond), + Effects = timer_effect(Ts, MacState, []), + {no_reply, Aux0, Log, Effects}; +handle_aux(_RaftState, cast, eval, Aux0, Log, _MacState) -> {no_reply, Aux0, Log}; -handle_aux(_RaState, cast, Cmd, #aux{capacity = Use0} = Aux0, +handle_aux(_RaState, cast, Cmd, #?AUX{capacity = Use0} = Aux0, Log, _MacState) when Cmd == active orelse Cmd == inactive -> - {no_reply, Aux0#aux{capacity = update_use(Use0, Cmd)}, Log}; -handle_aux(_RaState, cast, tick, #aux{name = Name, - capacity = Use0} = State0, + {no_reply, Aux0#?AUX{capacity = update_use(Use0, Cmd)}, Log}; +handle_aux(_RaState, cast, tick, #?AUX{name = Name, + capacity = Use0} = State0, Log, MacState) -> true = ets:insert(rabbit_fifo_usage, {Name, capacity(Use0)}), Aux = eval_gc(Log, MacState, State0), {no_reply, Aux, Log}; -handle_aux(_RaState, cast, eol, #aux{name = Name} = Aux, Log, _) -> +handle_aux(_RaState, cast, eol, #?AUX{name = Name} = Aux, Log, _) -> ets:delete(rabbit_fifo_usage, Name), {no_reply, Aux, Log}; handle_aux(_RaState, {call, _From}, oldest_entry_timestamp, Aux, - Log, #?MODULE{ra_indexes = Indexes}) -> - Ts = case rabbit_fifo_index:smallest(Indexes) of - %% if there are no entries, we return current timestamp - %% so that any previously obtained entries are considered older than this - undefined -> - erlang:system_time(millisecond); - Idx when is_integer(Idx) -> - {{_, _, {_, Meta, _, _}}, _Log1} = ra_log:fetch(Idx, Log), - #{ts := Timestamp} = Meta, - Timestamp - end, + Log, #?MODULE{} = State) -> + Ts = case smallest_raft_index(State) of + %% if there are no entries, we return current timestamp + %% so that any previously obtained entries are considered older than this + {undefined, _} -> + erlang:system_time(millisecond); + {Idx, _} when is_integer(Idx) -> + %% TODO: make more defensive to avoid potential crash + {{_, _, {_, Meta, _, _}}, _Log1} = ra_log:fetch(Idx, Log), + #{ts := Timestamp} = Meta, + Timestamp + end, {reply, {ok, Ts}, Aux, Log}; handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, Log0, MacState) -> @@ -1093,7 +1121,7 @@ handle_aux(_, _, start_dlx_worker, Aux, Log, _) -> {no_reply, Aux, Log}. eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, - #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> + #?AUX{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> {Idx, _} = ra_log:last_index_term(Log), {memory, Mem} = erlang:process_info(self(), memory), case messages_total(MacState) of @@ -1104,13 +1132,13 @@ eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, rabbit_log:debug("~s: full GC sweep complete. " "Process memory changed from ~.2fMB to ~.2fMB.", [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), - AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}}; + AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}}; _ -> AuxState end. force_eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}}, - #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> + #?AUX{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> {Idx, _} = ra_log:last_index_term(Log), {memory, Mem} = erlang:process_info(self(), memory), case Idx > LastGcIdx of @@ -1120,7 +1148,7 @@ force_eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}}, rabbit_log:debug("~s: full GC sweep complete. " "Process memory changed from ~.2fMB to ~.2fMB.", [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), - AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}}; + AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}}; false -> AuxState end. @@ -1519,7 +1547,10 @@ maybe_set_msg_ttl(#basic_message{content = #content{properties = Props}}, %% We already check in the channel that expiration must be valid. {ok, PerMsgMsgTTL} = rabbit_basic:parse_expiration(Props), TTL = min(PerMsgMsgTTL, PerQueueMsgTTL), - update_expiry_header(RaCmdTs, TTL, Header). + update_expiry_header(RaCmdTs, TTL, Header); +maybe_set_msg_ttl(_, _, Header, + #?MODULE{cfg = #cfg{}}) -> + Header. update_expiry_header(_, undefined, Header) -> Header; @@ -1873,7 +1904,8 @@ checkout(Meta, OldState, State, Effects) -> checkout(#{index := Index} = Meta, #?MODULE{cfg = #cfg{resource = QName}} = OldState, State0, Effects0, HandleConsumerChanges) -> - {#?MODULE{dlx = DlxState0} = State1, _Result, Effects1} = checkout0(Meta, checkout_one(Meta, State0, Effects0), #{}), + {#?MODULE{dlx = DlxState0} = State1, _Result, Effects1} = + checkout0(Meta, checkout_one(Meta, State0, Effects0), #{}), %%TODO For now we checkout the discards queue here. Move it to a better place {DlxState1, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(DlxState0), State2 = State1#?MODULE{dlx = DlxState1}, @@ -1882,7 +1914,8 @@ checkout(#{index := Index} = Meta, {State, true, Effects} -> case maybe_notify_decorators(State, HandleConsumerChanges) of {true, {MaxActivePriority, IsEmpty}} -> - NotifyEffect = notify_decorators_effect(QName, MaxActivePriority, IsEmpty), + NotifyEffect = notify_decorators_effect(QName, MaxActivePriority, + IsEmpty), update_smallest_raft_index(Index, State, [NotifyEffect | Effects]); false -> update_smallest_raft_index(Index, State, Effects) @@ -1890,7 +1923,8 @@ checkout(#{index := Index} = Meta, {State, false, Effects} -> case maybe_notify_decorators(State, HandleConsumerChanges) of {true, {MaxActivePriority, IsEmpty}} -> - NotifyEffect = notify_decorators_effect(QName, MaxActivePriority, IsEmpty), + NotifyEffect = notify_decorators_effect(QName, MaxActivePriority, + IsEmpty), {State, ok, [NotifyEffect | Effects]}; false -> {State, ok, Effects} @@ -2060,7 +2094,7 @@ delivery_effect({CTag, CPid}, IdxMsgs, InMemMsgs) -> {MsgId, Header}) -> {MsgId, {Header, Msg}} end, Log, Data), - Msgs = case InMemMsgs of + Msgs = case InMemMsgs of [] -> Msgs0; _ -> @@ -2082,7 +2116,8 @@ checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> %% first remove all expired messages from the head of the queue. {#?MODULE{service_queue = SQ0, messages = Messages0, - consumers = Cons0} = InitState, Effects1} = expire_msgs(Ts, InitState0, Effects0), + consumers = Cons0} = InitState, Effects1} = + expire_msgs(Ts, InitState0, Effects0), case priority_queue:out(SQ0) of {{value, ConsumerId}, SQ1} when is_map_key(ConsumerId, Cons0) -> @@ -2134,12 +2169,12 @@ checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> %% consumer did not exist but was queued, recurse checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1); {empty, _} -> - Effects = timer_effect(Ts, InitState, Effects1), + % Effects = timer_effect(Ts, InitState, Effects1), case lqueue:len(Messages0) of 0 -> - {nochange, InitState, Effects}; + {nochange, InitState, Effects1}; _ -> - {inactive, InitState, Effects} + {inactive, InitState, Effects1} end end. @@ -2201,10 +2236,11 @@ expire_prefix_msg(Msg, Header, State0) -> timer_effect(RaCmdTs, State, Effects) -> T = case take_next_msg(State) of - {?INDEX_MSG(_, ?MSG(#{expiry := Expiry}, _)), _} when is_number(Expiry) -> + {?INDEX_MSG(_, ?MSG(#{expiry := Expiry}, _)), _} + when is_number(Expiry) -> %% Next message contains 'expiry' header. %% (Re)set timer so that mesage will be dropped or dead-lettered on time. - Expiry - RaCmdTs; + max(0, Expiry - RaCmdTs); _ -> %% Next message does not contain 'expiry' header. %% Therefore, do not set timer or cancel timer if it was set. diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 27d286da73d0..ee46b6080441 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -37,9 +37,9 @@ %% same process -type msg_header() :: msg_size() | -#{size := msg_size(), - delivery_count => non_neg_integer(), - expiry => milliseconds()}. + #{size := msg_size(), + delivery_count => non_neg_integer(), + expiry => milliseconds()}. %% The message header: %% delivery_count: the number of unsuccessful delivery attempts. %% A non-zero value indicates a previous attempt. diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 04757b772140..6816b4ce11f6 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -112,9 +112,10 @@ apply(#checkout{consumer = RegName, %% were discarded. Checked0 = maps:to_list(CheckedOutOldConsumer), Checked1 = lists:keysort(1, Checked0), - {Discards, BytesMoved} = lists:foldr(fun({_Id, {_Reason, IdxMsg} = Msg}, {D, B}) -> - {lqueue:in_r(Msg, D), B + size_in_bytes(IdxMsg)} - end, {Discards0, 0}, Checked1), + {Discards, BytesMoved} = lists:foldr( + fun({_Id, {_Reason, IdxMsg} = Msg}, {D, B}) -> + {lqueue:in_r(Msg, D), B + size_in_bytes(IdxMsg)} + end, {Discards0, 0}, Checked1), State = State0#?MODULE{consumer = #dlx_consumer{registered_name = RegName, prefetch = Prefetch}, discards = Discards, @@ -137,8 +138,7 @@ apply(#settle{msg_ids = MsgIds}, %%TODO delete delivery_count header to save space? %% It's not needed anymore. --spec discard(term(), term(), state()) -> - state(). +-spec discard(rabbit_fifo:indexed_msg(), term(), state()) -> state(). discard(Msg, Reason, #?MODULE{discards = Discards0, msg_bytes = MsgBytes0} = State) -> Discards = lqueue:in({Reason, Msg}, Discards0), @@ -147,7 +147,7 @@ discard(Msg, Reason, #?MODULE{discards = Discards0, msg_bytes = MsgBytes}. -spec checkout(state()) -> - {state(), list()}. + {state(), {list(), list()}}. checkout(#?MODULE{consumer = undefined, discards = Discards} = State) -> case lqueue:is_empty(Discards) of @@ -160,11 +160,13 @@ checkout(#?MODULE{consumer = undefined, checkout(State) -> checkout0(checkout_one(State), {[],[]}). -checkout0({success, MsgId, {Reason, ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header))}, State}, {InMemMsgs, LogMsgs}) when is_integer(RaftIdx) -> +checkout0({success, MsgId, {Reason, ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header))}, State}, {InMemMsgs, LogMsgs}) + when is_integer(RaftIdx) -> DelMsg = {RaftIdx, {Reason, MsgId, Header}}, SendAcc = {InMemMsgs, [DelMsg|LogMsgs]}, checkout0(checkout_one(State ), SendAcc); -checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?MSG(Header, Msg))}, State}, {InMemMsgs, LogMsgs}) when is_integer(Idx) -> +checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?MSG(Header, Msg))}, State}, {InMemMsgs, LogMsgs}) + when is_integer(Idx) -> DelMsg = {MsgId, {Reason, Header, Msg}}, SendAcc = {[DelMsg|InMemMsgs], LogMsgs}, checkout0(checkout_one(State), SendAcc); diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index d3b897319796..4c9a6a92a2d7 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -137,6 +137,8 @@ all_tests() -> delete_if_unused, queue_ttl, peek, + message_ttl, + per_message_ttl, consumer_priorities, cancel_consumer_gh_3729 ]. @@ -2585,6 +2587,47 @@ peek(Config) -> wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), ok. +message_ttl(Config) -> + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-message-ttl">>, long, 2000}])), + + Msg1 = <<"msg1">>, + Msg2 = <<"msg11">>, + + publish(Ch, QQ, Msg1), + publish(Ch, QQ, Msg2), + wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), + timer:sleep(2000), + wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]), + ok. + +per_message_ttl(Config) -> + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-message-ttl">>, long, 2000}])), + + Msg1 = <<"msg1">>, + + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = QQ}, + #amqp_msg{props = #'P_basic'{delivery_mode = 2, + expiration = <<"2000">>}, + payload = Msg1}), + + wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]), + timer:sleep(2000), + wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]), + ok. + in_memory(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 9d9fd4e3d591..0c37be7e840d 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -255,36 +255,37 @@ checkout_enq_settle_test(_) -> % ?ASSERT_EFF({release_cursor, 2, _}, Effects), ok. -out_of_order_enqueue_test(_) -> - Cid = {?FUNCTION_NAME, self()}, - {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, - {monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)), - {State2, Effects2} = enq(2, 1, first, State1), - ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2), - % assert monitor was set up - ?ASSERT_EFF({monitor, _, _}, Effects2), - % enqueue seq num 3 and 4 before 2 - {State3, Effects3} = enq(3, 3, third, State2), - ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects3), - {State4, Effects4} = enq(4, 4, fourth, State3), - % assert no further deliveries where made - ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects4), - {_State5, Effects5} = enq(5, 2, second, State4), - % assert two deliveries were now made - ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, second}}, - {_, {_, third}}, - {_, {_, fourth}}]}, _}, - Effects5), - ok. - -out_of_order_first_enqueue_test(_) -> - Cid = {?FUNCTION_NAME, self()}, - {State1, _} = check_n(Cid, 5, 5, test_init(test)), - {_State2, Effects2} = enq(2, 10, first, State1), - ?ASSERT_EFF({monitor, process, _}, Effects2), - ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, - Effects2), - ok. +%% this should not be needed anymore as we don't hold pending messages +% out_of_order_enqueue_test(_) -> +% Cid = {?FUNCTION_NAME, self()}, +% {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, +% {monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)), +% {State2, Effects2} = enq(2, 1, first, State1), +% ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2), +% % assert monitor was set up +% ?ASSERT_EFF({monitor, _, _}, Effects2), +% % enqueue seq num 3 and 4 before 2 +% {State3, Effects3} = enq(3, 3, third, State2), +% ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects3), +% {State4, Effects4} = enq(4, 4, fourth, State3), +% % assert no further deliveries where made +% ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects4), +% {_State5, Effects5} = enq(5, 2, second, State4), +% % assert two deliveries were now made +% ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, second}}, +% {_, {_, third}}, +% {_, {_, fourth}}]}, _}, +% Effects5), +% ok. + +% out_of_order_first_enqueue_test(_) -> +% Cid = {?FUNCTION_NAME, self()}, +% {State1, _} = check_n(Cid, 5, 5, test_init(test)), +% {_State2, Effects2} = enq(2, 10, first, State1), +% ?ASSERT_EFF({monitor, process, _}, Effects2), +% ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, +% Effects2), +% ok. duplicate_enqueue_test(_) -> Cid = {<<"duplicate_enqueue_test">>, self()}, @@ -509,7 +510,7 @@ discarded_message_with_dead_letter_handler_emits_log_effect_test(_) -> State00 = init(#{name => test, queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), dead_letter_handler => - {somemod, somefun, [somearg]}}), + {at_most_once, {somemod, somefun, [somearg]}}}), {State0, [_, _]} = enq(1, 1, first, State00), {State1, Effects1} = check_n(Cid, 2, 10, State0), ?ASSERT_EFF({send_msg, _, @@ -526,7 +527,8 @@ mixed_send_msg_and_log_effects_are_correctly_ordered_test(_) -> queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), max_in_memory_length =>1, dead_letter_handler => - {somemod, somefun, [somearg]}}), + {at_most_once, + {somemod, somefun, [somearg]}}}), %% enqueue two messages {State0, _} = enq(1, 1, first, State00), {State1, _} = enq(2, 2, snd, State0), @@ -552,7 +554,7 @@ tick_test(_) -> [{mod_call, rabbit_quorum_queue, handle_tick, [#resource{}, - {?FUNCTION_NAME, 1, 1, 2, 1, 3, 3}, + {?FUNCTION_NAME, 1, 1, 2, 1, 3, 3, 0}, [_Node] ]}] = rabbit_fifo:tick(1, S4), ok. @@ -576,15 +578,6 @@ delivery_query_returns_deliveries_test(_) -> [_, _, _] = rabbit_fifo:get_checked_out(Cid, 1, 3, State), ok. -pending_enqueue_is_enqueued_on_down_test(_) -> - Cid = {<<"cid">>, self()}, - Pid = self(), - {State0, _} = enq(1, 2, first, test_init(test)), - {State1, _, _} = apply(meta(2), {down, Pid, noproc}, State0), - {_State2, {dequeue, {0, {_, first}}, 0}, _} = - apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State1), - ok. - duplicate_delivery_test(_) -> {State0, _} = enq(1, 1, first, test_init(test)), {#rabbit_fifo{messages = Messages} = State, _} = enq(2, 1, first, State0), @@ -840,7 +833,7 @@ single_active_consumer_cancel_consumer_when_channel_is_down_test(_) -> % adding some consumers AddConsumer = fun({CTag, ChannelId}, State) -> {NewState, _, _} = apply( - #{index => 1}, + meta(1), make_checkout({CTag, ChannelId}, {once, 1, simple_prefetch}, #{}), State), NewState @@ -891,7 +884,7 @@ single_active_returns_messages_on_noconnection_test(_) -> queue_resource => R, release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = #{index => 1}, + Meta = meta(1), Nodes = [n1], ConsumerIds = [{_, DownPid}] = [begin @@ -926,7 +919,7 @@ single_active_consumer_replaces_consumer_when_down_noconnection_test(_) -> queue_resource => R, release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = #{index => 1}, + Meta = meta(1), Nodes = [n1, n2, node()], ConsumerIds = [C1 = {_, DownPid}, C2, _C3] = [begin @@ -966,7 +959,7 @@ single_active_consumer_replaces_consumer_when_down_noconnection_test(_) -> ?assertEqual(2, length(State2#rabbit_fifo.waiting_consumers)), % simulate node comes back up - {State3, _, _} = apply(#{index => 2}, {nodeup, node(DownPid)}, State2), + {State3, _, _} = apply(meta(2), {nodeup, node(DownPid)}, State2), %% the consumer is still active and the same as before ?assertMatch([{C2, #consumer{status = up}}], @@ -984,7 +977,7 @@ single_active_consumer_all_disconnected_test(_) -> queue_resource => R, release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = #{index => 1}, + Meta = meta(1), Nodes = [n1, n2], ConsumerIds = [C1 = {_, C1Pid}, C2 = {_, C2Pid}] = [begin @@ -1035,7 +1028,7 @@ single_active_consumer_state_enter_leader_include_waiting_consumers_test(_) -> Pid2 = spawn(DummyFunction), Pid3 = spawn(DummyFunction), - Meta = #{index => 1}, + Meta = meta(1), % adding some consumers AddConsumer = fun({CTag, ChannelId}, State) -> {NewState, _, _} = apply( @@ -1065,7 +1058,7 @@ single_active_consumer_state_enter_eol_include_waiting_consumers_test(_) -> Pid2 = spawn(DummyFunction), Pid3 = spawn(DummyFunction), - Meta = #{index => 1}, + Meta = meta(1), % adding some consumers AddConsumer = fun({CTag, ChannelId}, State) -> {NewState, _, _} = apply( @@ -1094,12 +1087,12 @@ query_consumers_test(_) -> % adding some consumers AddConsumer = fun(CTag, State) -> - {NewState, _, _} = apply( - #{index => 1}, - make_checkout({CTag, self()}, - {once, 1, simple_prefetch}, #{}), - State), - NewState + {NewState, _, _} = apply( + meta(1), + make_checkout({CTag, self()}, + {once, 1, simple_prefetch}, #{}), + State), + NewState end, State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]), Consumers0 = State1#rabbit_fifo.consumers, @@ -1129,7 +1122,7 @@ query_consumers_when_single_active_consumer_is_on_test(_) -> atom_to_binary(?FUNCTION_NAME, utf8)), release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = #{index => 1}, + Meta = meta(1), % adding some consumers AddConsumer = fun(CTag, State) -> {NewState, _, _} = apply( @@ -1172,7 +1165,7 @@ active_flag_updated_when_consumer_suspected_unsuspected_test(_) -> AddConsumer = fun({CTag, ChannelId}, State) -> {NewState, _, _} = apply( - #{index => 1}, + meta(1), rabbit_fifo:make_checkout({CTag, ChannelId}, {once, 1, simple_prefetch}, #{}), @@ -1182,12 +1175,12 @@ active_flag_updated_when_consumer_suspected_unsuspected_test(_) -> State1 = lists:foldl(AddConsumer, State0, [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]), - {State2, _, Effects2} = apply(#{index => 3, - system_time => 1500}, {down, Pid1, noconnection}, State1), + {State2, _, Effects2} = apply(meta(3), + {down, Pid1, noconnection}, State1), % 1 effect to update the metrics of each consumer (they belong to the same node), 1 more effect to monitor the node, 1 more decorators effect ?assertEqual(4 + 1 + 1, length(Effects2)), - {_, _, Effects3} = apply(#{index => 4}, {nodeup, node(self())}, State2), + {_, _, Effects3} = apply(meta(4), {nodeup, node(self())}, State2), % for each consumer: 1 effect to update the metrics, 1 effect to monitor the consumer PID, 1 more decorators effect ?assertEqual(4 + 4 + 1, length(Effects3)). @@ -1206,7 +1199,7 @@ active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_co % adding some consumers AddConsumer = fun({CTag, ChannelId}, State) -> {NewState, _, _} = apply( - #{index => 1}, + meta(1), make_checkout({CTag, ChannelId}, {once, 1, simple_prefetch}, #{}), State), @@ -1545,7 +1538,7 @@ machine_version_test(_) -> S0 = V0:init(#{name => ?FUNCTION_NAME, queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>)}), Idx = 1, - {#rabbit_fifo{}, ok, []} = apply(meta(Idx), {machine_version, 0, 1}, S0), + {#rabbit_fifo{}, ok, _} = apply(meta(Idx), {machine_version, 0, 2}, S0), Cid = {atom_to_binary(?FUNCTION_NAME, utf8), self()}, Entries = [ @@ -1558,8 +1551,9 @@ machine_version_test(_) -> {#rabbit_fifo{enqueuers = #{Self := #enqueuer{}}, consumers = #{Cid := #consumer{priority = 0}}, service_queue = S, - messages = Msgs}, ok, []} = apply(meta(Idx), - {machine_version, 0, 1}, S1), + messages = Msgs}, ok, + [_|_]} = apply(meta(Idx), + {machine_version, 0, 2}, S1), %% validate message conversion to lqueue ?assertEqual(1, lqueue:len(Msgs)), ?assert(priority_queue:is_queue(S)), @@ -1570,7 +1564,7 @@ machine_version_waiting_consumer_test(_) -> S0 = V0:init(#{name => ?FUNCTION_NAME, queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>)}), Idx = 1, - {#rabbit_fifo{}, ok, []} = apply(meta(Idx), {machine_version, 0, 1}, S0), + {#rabbit_fifo{}, ok, _} = apply(meta(Idx), {machine_version, 0, 2}, S0), Cid = {atom_to_binary(?FUNCTION_NAME, utf8), self()}, Entries = [ @@ -1583,8 +1577,8 @@ machine_version_waiting_consumer_test(_) -> {#rabbit_fifo{enqueuers = #{Self := #enqueuer{}}, consumers = #{Cid := #consumer{priority = 0}}, service_queue = S, - messages = Msgs}, ok, []} = apply(meta(Idx), - {machine_version, 0, 1}, S1), + messages = Msgs}, ok, _} = apply(meta(Idx), + {machine_version, 0, 2}, S1), %% validate message conversion to lqueue ?assertEqual(0, lqueue:len(Msgs)), ?assert(priority_queue:is_queue(S)), diff --git a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl new file mode 100644 index 000000000000..bb52649a60b1 --- /dev/null +++ b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl @@ -0,0 +1,66 @@ + +-module(rabbit_fifo_dlx_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-export([ + ]). + +% -include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("rabbit/src/rabbit_fifo.hrl"). +-include_lib("rabbit/src/rabbit_fifo_dlx.hrl"). + +%%%=================================================================== +%%% Common Test callbacks +%%%=================================================================== + +all() -> + [ + {group, tests} + ]. + + +all_tests() -> + [ + discard_no_dlx_consumer + ]. + +groups() -> + [ + {tests, [], all_tests()} + ]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(_TestCase, _Config) -> + ok. + +%%%=================================================================== +%%% Test cases +%%%=================================================================== + +discard_no_dlx_consumer(_Config) -> + S0 = rabbit_fifo_dlx:init(), + ?assertMatch(#{num_discarded := 0}, rabbit_fifo_dlx:overview(S0)), + S1 = rabbit_fifo_dlx:discard(make_msg(1), because, S0), + ?assertMatch(#{num_discarded := 1}, rabbit_fifo_dlx:overview(S1)), + ok. + + +make_msg(RaftIdx) -> + ?INDEX_MSG(RaftIdx, ?DISK_MSG(1)). diff --git a/deps/rabbit/test/rabbit_stream_coordinator_SUITE.erl b/deps/rabbit/test/rabbit_stream_coordinator_SUITE.erl index e2900b8cee31..a2c409f293e0 100644 --- a/deps/rabbit/test/rabbit_stream_coordinator_SUITE.erl +++ b/deps/rabbit/test/rabbit_stream_coordinator_SUITE.erl @@ -1,5 +1,6 @@ -module(rabbit_stream_coordinator_SUITE). +-compile(nowarn_export_all). -compile(export_all). -export([ From bbef82adc9b839763209f530276f021c3ae6a231 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Fri, 10 Dec 2021 16:17:51 +0000 Subject: [PATCH 14/97] QQ Fix invalid_policy test --- deps/rabbit/test/quorum_queue_SUITE.erl | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index 4c9a6a92a2d7..ced1b2556d41 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -965,14 +965,11 @@ invalid_policy(Config) -> ok = rabbit_ct_broker_helpers:set_policy( Config, 0, <<"ha">>, <<"invalid_policy.*">>, <<"queues">>, [{<<"ha-mode">>, <<"all">>}]), - ok = rabbit_ct_broker_helpers:set_policy( - Config, 0, <<"ttl">>, <<"invalid_policy.*">>, <<"queues">>, - [{<<"message-ttl">>, 5}]), Info = rpc:call(Server, rabbit_quorum_queue, infos, [rabbit_misc:r(<<"/">>, queue, QQ)]), ?assertEqual('', proplists:get_value(policy, Info)), ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ha">>), - ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ttl">>). + ok. dead_letter_to_quorum_queue(Config) -> [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), From 42f5e526b035b045554227c2afbb5a2d832be575 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 13 Dec 2021 12:45:58 +0100 Subject: [PATCH 15/97] Add more rabbit_fifo_dlx_integration_SUITE tests --- .../rabbit_fifo_dlx_integration_SUITE.erl | 138 ++++++++++++++++-- 1 file changed, 122 insertions(+), 16 deletions(-) diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 2b39c6e9fe99..cdc751f1e03c 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -31,19 +31,16 @@ groups() -> delivery_limit, target_queue_not_bound, dlx_missing, - stats + stats, + drop_head_falls_back_to_at_most_once, + switch_strategy ]}, {cluster_size_3, [], [ - many_target_queues + many_target_queues, + single_dlx_worker ]} ]. -%% TODO add tests for: -%% * dlx_worker resends in various topology misconfigurations -%% * there is always single leader in 3 node cluster (check via supervisor:count_children and by killing one node) -%% * fall back to at-most-once works -%% * switching between at-most-once and at-least-once works including rabbit_fifo_dlx:cleanup - init_per_suite(Config0) -> rabbit_ct_helpers:log_environment(), Config1 = rabbit_ct_helpers:merge_app_env( @@ -209,7 +206,7 @@ target_queue_not_bound(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, Server), SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), - DLX = <<"dead-ex">>, + DLX = ?config(dead_letter_exchange, Config), #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ queue = SourceQ, durable = true, @@ -258,7 +255,7 @@ dlx_missing(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, Server), SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), - DLX = <<"dead-ex">>, + DLX = ?config(dead_letter_exchange, Config), #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ queue = SourceQ, durable = true, @@ -305,7 +302,7 @@ stats(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, Server), SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), - DLX = <<"dead-ex">>, + DLX = ?config(dead_letter_exchange, Config), #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ queue = SourceQ, durable = true, @@ -375,6 +372,79 @@ stats(Config) -> payload = Msg}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})) || _ <- lists:seq(1, 10)]. +%% Test that configuring overflow (default) drop-head will fall back to +%% dead-letter-strategy at-most-once despite configuring at-least-once. +drop_head_falls_back_to_at_most_once(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + DLX = ?config(dead_letter_exchange, Config), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = SourceQ, + durable = true, + arguments = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"drop-head">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ] + }), + consistently( + ?_assertMatch( + [_, {active, 0}, _, _], + rabbit_ct_broker_helpers:rpc(Config, Server, supervisor, count_children, [rabbit_fifo_dlx_sup]))). + +%% Test that dynamically switching dead-letter-strategy works. +switch_strategy(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + RaName = ra_name(SourceQ), + DLX = ?config(dead_letter_exchange, Config), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = SourceQ, + durable = true, + arguments = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ] + }), + %% default strategy is at-most-once + assert_active_dlx_workers(0, Config, Server), + ok = rabbit_ct_broker_helpers:set_policy(Config, Server, <<"my-policy">>, SourceQ, <<"queues">>, + [{<<"dead-letter-strategy">>, <<"at-least-once">>}]), + assert_active_dlx_workers(1, Config, Server), + [ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = <<"0">>}, + payload = <<"m">>}) %% 1 byte per message + || _ <- lists:seq(1, 5)], + eventually( + ?_assertMatch( + [#{ + %% 2 msgs (=Prefetch) should be checked out to dlx_worker + num_discard_checked_out := 2, + discard_checkout_message_bytes := 2, + %% 3 msgs (=5-2) should be in discards queue + num_discarded := 3, + discard_message_bytes := 3, + num_messages := 5 + }], + dirty_query([Server], RaName, fun rabbit_fifo:overview/1))), + ok = rabbit_ct_broker_helpers:set_policy(Config, Server, <<"my-policy">>, SourceQ, <<"queues">>, + [{<<"dead-letter-strategy">>, <<"at-most-once">>}]), + assert_active_dlx_workers(0, Config, Server), + ?assertMatch( + [#{ + num_discard_checked_out := 0, + discard_checkout_message_bytes := 0, + num_discarded := 0, + discard_message_bytes := 0, + num_messages := 0 + }], + dirty_query([Server], RaName, fun rabbit_fifo:overview/1)). + %% Test that %% 1. Message is only acked to source queue once publisher confirms got received from **all** target queues. %% 2. Target queue can be classic queue, quorum queue, or stream queue. @@ -476,8 +546,8 @@ many_target_queues(Config) -> end, eventually(?_assertEqual([{0, 0}], dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1))), - ok = rabbit_ct_broker_helpers:kill_node(Config, Server3), - ok = rabbit_ct_broker_helpers:kill_node(Config, Server2), + ok = rabbit_ct_broker_helpers:stop_node(Config, Server3), + ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), Msg2 = <<"m2">>, ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, @@ -494,6 +564,9 @@ many_target_queues(Config) -> amqp_channel:call(Ch, #'basic.get'{queue = TargetQ1})), ok = rabbit_ct_broker_helpers:start_node(Config, Server2), ok = rabbit_ct_broker_helpers:start_node(Config, Server3), + %%TODO By end of this test, there will be many duplicate dead-letter messages in the target quorum queue and + %% target stream queue since both their queue clients and rabbit_fifo_dlx_worker re-try. + %% Possible solution is to have rabbit_fifo_dlx_worker only resend for classic target queues? eventually(?_assertEqual([{0, 0}], dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1)), 500, 6), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, @@ -505,9 +578,42 @@ many_target_queues(Config) -> after 0 -> exit(deliver_timeout) end. - %%TODO By end of this test, there will be many duplicate dead-letter messages in the target quorum queue and - %% target stream queue since both their queue clients and rabbit_fifo_dlx_worker re-try. - %% Possible solution is to have rabbit_fifo_dlx_worker only resend for classic target queues? + +%% Test that there is a single active rabbit_fifo_dlx_worker that is co-located with the quorum queue leader. +single_dlx_worker(Config) -> + [Server1, Server2, _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), + SourceQ = ?config(source_queue, Config), + DLX = ?config(dead_letter_exchange, Config), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = SourceQ, + durable = true, + arguments = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ] + }), + ?assertMatch( + [[_, {active, 1}, _, _], + [_, {active, 0}, _, _], + [_, {active, 0}, _, _]], + rabbit_ct_broker_helpers:rpc_all(Config, supervisor, count_children, [rabbit_fifo_dlx_sup])), + ok = rabbit_ct_broker_helpers:stop_node(Config, Server1), + RaName = ra_name(SourceQ), + {ok, _, {_, Leader}} = ra:members({RaName, Server2}), + ?assertNotEqual(Server1, Leader), + [Follower] = Servers -- [Server1, Leader], + assert_active_dlx_workers(1, Config, Leader), + assert_active_dlx_workers(0, Config, Follower), + ok = rabbit_ct_broker_helpers:start_node(Config, Server1), + assert_active_dlx_workers(0, Config, Server1). + +assert_active_dlx_workers(N, Config, Server) -> + ?assertMatch( + [_, {active, N}, _, _], + rabbit_ct_broker_helpers:rpc(Config, Server, supervisor, count_children, [rabbit_fifo_dlx_sup])). %%TODO move to rabbitmq_ct_helpers/include/rabbit_assert.hrl consistently(TestObj) -> From e1425567c64b38922db335e05197592a38e14bbf Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 13 Dec 2021 12:00:56 +0000 Subject: [PATCH 16/97] Bazel suite config --- deps/rabbit/BUILD.bazel | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index f22a8056e8d9..f24602f76e98 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -716,6 +716,34 @@ suites = [ "@proper//:erlang_app", ], ), + rabbitmq_suite( + name = "rabbit_fifo_dlx_SUITE", + size = "small", + additional_hdrs = [ + "src/rabbit_fifo.hrl", + "src/rabbit_fifo_dlx.hrl", + ], + ), + rabbitmq_integration_suite( + PACKAGE, + name = "rabbit_fifo_dlx_integration_SUITE", + size = "medium", + additional_beam = [ + ":test_util", + ":quorum_queue_utils", + ":quorum_queue_SUITE_beam_files", + ], + additional_hdrs = [ + "src/rabbit_fifo.hrl", + "src/rabbit_fifo_dlx.hrl", + ], + runtime_deps = [ + "@ra//:bazel_erlang_lib", + ], + deps = [ + "@proper//:bazel_erlang_lib", + ], + ), rabbitmq_suite( name = "rabbit_fifo_SUITE", size = "medium", From 87b67c22e7f2b472bcd257015212e7f46c7575eb Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 13 Dec 2021 12:51:36 +0000 Subject: [PATCH 17/97] update rabbit_fifo_SUITE expectations --- deps/rabbit/test/rabbit_fifo_SUITE.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 0c37be7e840d..f736792b8d6f 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -592,8 +592,9 @@ state_enter_file_handle_leader_reservation_test(_) -> Resource = {resource, <<"/">>, queue, <<"test">>}, Effects = rabbit_fifo:state_enter(leader, S0), - ?assertEqual([ + ?assertMatch([ {mod_call, m, f, [a, the_name]}, + _Timer, {mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]} ], Effects), ok. @@ -1044,7 +1045,7 @@ single_active_consumer_state_enter_leader_include_waiting_consumers_test(_) -> Effects = rabbit_fifo:state_enter(leader, State1), %% 2 effects for each consumer process (channel process), 1 effect for the node, %% 1 effect for file handle reservation - ?assertEqual(2 * 3 + 1 + 1, length(Effects)). + ?assertEqual(2 * 3 + 1 + 1 + 1, length(Effects)). single_active_consumer_state_enter_eol_include_waiting_consumers_test(_) -> Resource = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), From e5382050aa6da097e4c3030d05ac9037ecbddec6 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 13 Dec 2021 14:24:19 +0000 Subject: [PATCH 18/97] Set delivery_limit on quorum queue dead_lettering tests As this mode will enable the "requeue at front" behaviour which isn't enabled if delivery_limit is not set. --- deps/rabbit/test/dead_lettering_SUITE.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl index ebd04bf72002..5cffd1da1816 100644 --- a/deps/rabbit/test/dead_lettering_SUITE.erl +++ b/deps/rabbit/test/dead_lettering_SUITE.erl @@ -94,6 +94,7 @@ init_per_group(quorum_queue, Config) -> rabbit_ct_helpers:set_config( Config, [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-delivery-limit">>, long, 100}, %%TODO add at-least-once tests {<<"x-dead-letter-strategy">>, longstr, <<"at-most-once">>}]}, {queue_durable, true}]); From 9873b243408700fc838c888f8647b336c7b1052d Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 13 Dec 2021 17:01:40 +0000 Subject: [PATCH 19/97] rabbit_fifo_prop fixes --- deps/rabbit/src/rabbit_fifo.erl | 18 +++++++++--------- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 9 ++++++--- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index e9e512f9e9b6..221345c8d6dd 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -1310,16 +1310,16 @@ messages_ready(#?MODULE{messages = M, messages_total(#?MODULE{messages = _M, messages_total = Total, ra_indexes = _Indexes, - prefix_msgs = {_RCnt, _R, _PCnt, _P}}) -> + prefix_msgs = _}) -> % lqueue:len(M) + rabbit_fifo_index:size(Indexes) + RCnt + PCnt. Total; %% release cursors might be old state (e.g. after recent upgrade) -messages_total(State) - when element(1, State) =:= rabbit_fifo_v1 -> - rabbit_fifo_v1:query_messages_total(State); -messages_total(State) - when element(1, State) =:= rabbit_fifo_v0 -> - rabbit_fifo_v0:query_messages_total(State). +messages_total(State) -> + try + rabbit_fifo_v1:query_messages_total(State) + catch _:_ -> + rabbit_fifo_v0:query_messages_total(State) + end. update_use({inactive, _, _, _} = CUInfo, inactive) -> CUInfo; @@ -1616,9 +1616,9 @@ maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, case maps:get(From, Enqueuers0, undefined) of undefined -> State1 = State0#?MODULE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, - {ok, State, Effects} = maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, + {Res, State, Effects} = maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, State1), - {ok, State, [{monitor, process, From} | Effects]}; + {Res, State, [{monitor, process, From} | Effects]}; #enqueuer{next_seqno = MsgSeqNo} = Enq0 -> % it is the next expected seqno State1 = enqueue(RaftIdx, Ts, RawMsg, State0), diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index ce8d68c53ae7..322cd9b28796 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -604,6 +604,8 @@ scenario31(_Config) -> % #{ack => true,args => [],prefetch => 1,username => <<"user">>}}}, % {4,{purge}}] make_enqueue(E1,1,msg(<<>>)), %% 1 + make_enqueue(E1,0,msg(<<>>)), %% 1 + make_enqueue(E1,1,msg(<<>>)), %% 1 make_enqueue(E2,2,msg(<<1>>)), %% 2 make_checkout(C1, {auto,1,simple_prefetch}), %% 3 {purge} %% 4 @@ -623,7 +625,8 @@ scenario32(_Config) -> make_enqueue(E1,1,msg(<<0>>)), %% 1 make_enqueue(E1,2,msg(<<0,0>>)), %% 2 make_enqueue(E1,4,msg(<<0,0,0,0>>)), %% 3 - make_enqueue(E1,3,msg(<<0,0,0>>)) %% 4 + make_enqueue(E1,3,msg(<<0,0,0>>)), %% 4 + make_enqueue(E1,4,msg(<<0,0,0,0>>)) %% 3 ], run_snapshot_test(#{name => ?FUNCTION_NAME, release_cursor_interval => 0, @@ -1456,8 +1459,8 @@ upgrade_prop(Conf0, Commands) -> end, InitState, PreEntries), %% perform conversion - V2 = element(1, rabbit_fifo:apply(meta(length(PreEntries) + 1), - {machine_version, 1, 2}, V1)), + #rabbit_fifo{} = V2 = element(1, rabbit_fifo:apply(meta(length(PreEntries) + 1), + {machine_version, 1, 2}, V1)), %% assert invariants Fields = [num_messages, num_ready_messages, From cd785fcea21c0a12c4a8cc849bfcf4804c7873af Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 13 Dec 2021 20:45:09 +0100 Subject: [PATCH 20/97] Make dlx_worker terminate itself if leader is down If source quorum queue leader is down (e.g. process crashes), co-located rabbit_fifo_dlx_worker will terminate itself. The new leader (on same node or different node) will re-create the rabbit_fifo_dlx_worker. That's cleaner compared to the previous approach where the new Ra server process on same node took care of the rabbit_fifo_dlx_worker termination. --- deps/rabbit/src/rabbit_fifo_dlx.erl | 1 - deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 16 ++++++++-- .../rabbit_fifo_dlx_integration_SUITE.erl | 29 ++++++++++++++----- 3 files changed, 35 insertions(+), 11 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 6816b4ce11f6..dc0d88ad6696 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -16,7 +16,6 @@ checkout/1, state_enter/4, start_worker/2, - terminate_worker/1, cleanup/1, purge/1, consumer_pid/1, diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 7efc96d4e61a..075130a51467 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -66,6 +66,8 @@ %% There is one rabbit_fifo_dlx_worker per source quorum queue %% (if dead-letter-strategy at-least-once is used). queue_ref :: rabbit_amqqueue:name(), + %% monitors source queue + monitor_ref :: reference(), %% configured (x-)dead-letter-exchange of source queue exchange_ref, %% configured (x-)dead-letter-routing-key of source queue @@ -120,7 +122,9 @@ handle_continue({QRef, RegName}, undefined) -> QRef, {ClusterName, node()}, Prefetch), - {noreply, State#state{dlx_client_state = ConsumerState}}. + MonitorRef = erlang:monitor(process, ClusterName), + {noreply, State#state{dlx_client_state = ConsumerState, + monitor_ref = MonitorRef}}. terminate(_Reason, _State) -> %%TODO cancel timer? @@ -173,8 +177,14 @@ redeliver_and_ack(State0) -> State = maybe_set_timer(State2), {noreply, State}. -%%TODO monitor source quorum queue upon init / handle_continue and terminate ourself if source quorum queue is DOWN -%% since new leader will re-create a worker +handle_info({'DOWN', Ref, process, _, _}, + #state{monitor_ref = Ref, + queue_ref = QRef}) -> + %% Source quorum queue is down. Therefore, terminate ourself. + %% The new leader will re-create another dlx_worker. + rabbit_log:debug("~s terminating itself because leader of ~s is down...", + [?MODULE, rabbit_misc:rs(QRef)]), + supervisor:terminate_child(rabbit_fifo_dlx_sup, self()); handle_info({'DOWN', _MRef, process, QPid, Reason}, #state{queue_type_state = QTypeState0} = State0) -> %% received from target classic queue diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index cdc751f1e03c..93bd98935a66 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -600,20 +600,35 @@ single_dlx_worker(Config) -> [_, {active, 0}, _, _], [_, {active, 0}, _, _]], rabbit_ct_broker_helpers:rpc_all(Config, supervisor, count_children, [rabbit_fifo_dlx_sup])), + ok = rabbit_ct_broker_helpers:stop_node(Config, Server1), RaName = ra_name(SourceQ), - {ok, _, {_, Leader}} = ra:members({RaName, Server2}), - ?assertNotEqual(Server1, Leader), - [Follower] = Servers -- [Server1, Leader], - assert_active_dlx_workers(1, Config, Leader), - assert_active_dlx_workers(0, Config, Follower), + {ok, _, {_, Leader0}} = ra:members({RaName, Server2}), + ?assertNotEqual(Server1, Leader0), + [Follower0] = Servers -- [Server1, Leader0], + assert_active_dlx_workers(1, Config, Leader0), + assert_active_dlx_workers(0, Config, Follower0), ok = rabbit_ct_broker_helpers:start_node(Config, Server1), - assert_active_dlx_workers(0, Config, Server1). + consistently( + ?_assertMatch( + [_, {active, 0}, _, _], + rabbit_ct_broker_helpers:rpc(Config, Server1, supervisor, count_children, [rabbit_fifo_dlx_sup], 1000))), + + Pid = rabbit_ct_broker_helpers:rpc(Config, Leader0, erlang, whereis, [RaName]), + true = rabbit_ct_broker_helpers:rpc(Config, Leader0, erlang, exit, [Pid, kill]), + {ok, _, {_, Leader1}} = ?awaitMatch({ok, _, _}, + ra:members({RaName, Follower0}), + 1000), + ?assertNotEqual(Leader0, Leader1), + [Follower1, Follower2] = Servers -- [Leader1], + assert_active_dlx_workers(0, Config, Follower1), + assert_active_dlx_workers(0, Config, Follower2), + assert_active_dlx_workers(1, Config, Leader1). assert_active_dlx_workers(N, Config, Server) -> ?assertMatch( [_, {active, N}, _, _], - rabbit_ct_broker_helpers:rpc(Config, Server, supervisor, count_children, [rabbit_fifo_dlx_sup])). + rabbit_ct_broker_helpers:rpc(Config, Server, supervisor, count_children, [rabbit_fifo_dlx_sup], 1000)). %%TODO move to rabbitmq_ct_helpers/include/rabbit_assert.hrl consistently(TestObj) -> From 29714e70c72f654ad14dbfc237278888fa8d5bfb Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 14 Dec 2021 12:03:54 +0100 Subject: [PATCH 21/97] Send acks asynchronously from rabbit_fifo_dlx_worker to source quorum queue. Use 'fire and forget' semantics. We know that source quorum queue leader process is local and up and running. This results in much higher throughput. --- deps/rabbit/src/rabbit_fifo_dlx_client.erl | 11 ++++++----- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 15 +++++---------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_client.erl b/deps/rabbit/src/rabbit_fifo_dlx_client.erl index f4c5a0598337..2eb3d7965fb5 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_client.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_client.erl @@ -11,6 +11,12 @@ -type state() :: #state{}. -export_type([state/0]). +settle(MsgIds, #state{leader = Leader} = State) + when is_list(MsgIds) -> + Cmd = rabbit_fifo_dlx:make_settle(MsgIds), + ra:pipeline_command(Leader, Cmd), + {ok, State}. + checkout(RegName, QResource, Leader, NumUnsettled) -> Cmd = rabbit_fifo_dlx:make_checkout(RegName, NumUnsettled), State = #state{queue_resource = QResource, @@ -18,11 +24,6 @@ checkout(RegName, QResource, Leader, NumUnsettled) -> last_msg_id = -1}, process_command(Cmd, State, 5). -settle(MsgIds, State) when is_list(MsgIds) -> - Cmd = rabbit_fifo_dlx:make_settle(MsgIds), - %%TODO use pipeline_command without correlation ID, i.e. without notification - process_command(Cmd, State, 2). - process_command(_Cmd, _State, 0) -> {error, ra_command_failed}; process_command(Cmd, #state{leader = Leader} = State, Tries) -> diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 075130a51467..91664745da23 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -386,16 +386,11 @@ maybe_ack(#state{pendings = Pendings0, State; _ -> Ids = lists:map(fun(#pending{consumed_msg_id = Id}) -> Id end, maps:values(Settled)), - case rabbit_fifo_dlx_client:settle(Ids, DlxState0) of - {ok, DlxState} -> - SettledOutSeqs = maps:keys(Settled), - Pendings = maps:without(SettledOutSeqs, Pendings0), - State#state{pendings = Pendings, - dlx_client_state = DlxState}; - {error, _Reason} -> - %% Failed to ack. Ack will be retried in the next maybe_ack/1 - State - end + {ok, DlxState} = rabbit_fifo_dlx_client:settle(Ids, DlxState0), + SettledOutSeqs = maps:keys(Settled), + Pendings = maps:without(SettledOutSeqs, Pendings0), + State#state{pendings = Pendings, + dlx_client_state = DlxState} end. %% Re-deliver messages that timed out waiting on publisher confirm and From f1ec322c71acacca0565b3a32f1b5a5d8de794cb Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Tue, 14 Dec 2021 13:16:22 +0000 Subject: [PATCH 22/97] add lg:stop/0 to xref ignore list --- deps/rabbit/src/rabbit_looking_glass.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/deps/rabbit/src/rabbit_looking_glass.erl b/deps/rabbit/src/rabbit_looking_glass.erl index 97cf8a7ff892..357384894257 100644 --- a/deps/rabbit/src/rabbit_looking_glass.erl +++ b/deps/rabbit/src/rabbit_looking_glass.erl @@ -9,6 +9,7 @@ -ignore_xref([ {lg, trace, 4}, + {lg, stop, 0}, {lg_callgrind, profile_many, 3} ]). -ignore_xref([{maps, from_list, 1}]). From fc1538b7c970b4c223dafe31609695c43678045b Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Tue, 14 Dec 2021 13:39:41 +0000 Subject: [PATCH 23/97] rabbit_fifo_prop: turn down iterations of snapshots_dlx a bit It is by far the longest running test and can timeout on ci so turn it down a bit. --- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 322cd9b28796..46d656baeafe 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -847,7 +847,7 @@ snapshots_dlx(_Config) -> collect({log_size, length(O)}, snapshots_prop(Config, O))) end) - end, [], 1000). + end, [], 500). single_active(_Config) -> Size = 300, From 711be2d1623a7ad067ea5fe7d692e838f0159976 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Tue, 14 Dec 2021 14:12:17 +0000 Subject: [PATCH 24/97] Try an even smaller size for dlx property tests. --- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 46d656baeafe..3c88e01e96cb 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -821,6 +821,7 @@ snapshots(_Config) -> end, [], 1000). snapshots_dlx(_Config) -> + Size = 256, run_proper( fun () -> ?FORALL({Length, Bytes, SingleActiveConsumer, @@ -843,11 +844,11 @@ snapshots_dlx(_Config) -> InMemoryBytes, reject_publish, at_least_once), - ?FORALL(O, ?LET(Ops, log_gen_dlx(256), expand(Ops, Config)), + ?FORALL(O, ?LET(Ops, log_gen_dlx(Size), expand(Ops, Config)), collect({log_size, length(O)}, snapshots_prop(Config, O))) end) - end, [], 500). + end, [], Size). single_active(_Config) -> Size = 300, From 4eb8eb069fb3e0baf86bcf43cfc2696007213e20 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 16 Dec 2021 21:55:29 +0100 Subject: [PATCH 25/97] Increase timeout to 15 minutes --- deps/rabbit/BUILD.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index f24602f76e98..c9e45ae1c11f 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -698,7 +698,7 @@ suites = [ ), rabbitmq_suite( name = "rabbit_fifo_prop_SUITE", - size = "medium", + size = "large", additional_beam = [ ":test_util", ], From 65dbb3bd0d6294153c541daa2ead663ec7339fd6 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Fri, 17 Dec 2021 09:52:28 +0100 Subject: [PATCH 26/97] Do not register rabbit_fifo_dlx_worker --- deps/rabbit/src/rabbit_fifo.erl | 34 +++--- deps/rabbit/src/rabbit_fifo_dlx.erl | 113 +++++++++++--------- deps/rabbit/src/rabbit_fifo_dlx.hrl | 6 +- deps/rabbit/src/rabbit_fifo_dlx_client.erl | 6 +- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 29 ++--- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 20 ++-- 6 files changed, 105 insertions(+), 103 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 221345c8d6dd..a73bab60a0e7 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -632,14 +632,19 @@ apply(#{index := Idx} = Meta, #update_config{config = Conf}, %%TODO return aux effect here and move logic over to handle_aux/6 which can return effects as last arguments. {State4, Effects1} = case DLH of at_least_once -> - case rabbit_fifo_dlx:consumer_pid(DlxState) of + case rabbit_fifo_dlx:local_alive_consumer_pid(DlxState) of + undefined + when Old_DLH =/= at_least_once -> + %% dead-letter-strategy changed to at-least-once. + %% Therefore, start dlx worker. + {State1, [{aux, ensure_dlx_worker}]}; undefined -> - %% Policy changed from at-most-once to at-least-once. - %% Therefore, start rabbit_fifo_dlx_worker on leader. - {State1, [{aux, start_dlx_worker}]}; + %% Do not start dlx worker twice. + %% It is about to be started, but DlxState does not reflect that yet. + {State1, []}; DlxWorkerPid -> - %% Leader already exists. - %% Notify leader of new policy. + %% rabbit_fifo_dlx_worker already exists. + %% Notify worker of new policy. Effect = {send_msg, DlxWorkerPid, lookup_topology, ra_event}, {State1, [Effect]} end; @@ -666,7 +671,7 @@ apply(#{index := Idx} = Meta, #update_config{config = Conf}, update_smallest_raft_index(Idx, Reply, State, Effects); apply(_Meta, {machine_version, FromVersion, ToVersion}, V0State) -> State = convert(FromVersion, ToVersion, V0State), - {State, ok, [{aux, start_dlx_worker}]}; + {State, ok, [{aux, ensure_dlx_worker}]}; %%TODO are there better approach to %% 1. matching against opaque rabbit_fifo_dlx:protocol / record (without exposing all the protocol details), and %% 2. Separate the logic running in rabbit_fifo and rabbit_fifo_dlx when dead-letter messages is acked? @@ -852,10 +857,9 @@ update_waiting_consumer_status(Node, -spec state_enter(ra_server:ra_state(), state()) -> ra_machine:effects(). state_enter(RaState, #?MODULE{cfg = #cfg{dead_letter_handler = at_least_once, - resource = QRef, - name = QName}, + resource = QRef}, dlx = DlxState} = State) -> - rabbit_fifo_dlx:state_enter(RaState, QRef, QName, DlxState), + rabbit_fifo_dlx:state_enter(RaState, QRef, DlxState), state_enter0(RaState, State); state_enter(RaState, State) -> state_enter0(RaState, State). @@ -1111,13 +1115,13 @@ handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, Err -> {reply, Err, Aux0, Log0} end; -handle_aux(leader, _, start_dlx_worker, Aux, Log, - #?MODULE{cfg = #cfg{resource = QRef, - name = QName, +handle_aux(leader, _, ensure_dlx_worker, Aux, Log, + #?MODULE{dlx = DlxState, + cfg = #cfg{resource = QRef, dead_letter_handler = at_least_once}}) -> - rabbit_fifo_dlx:start_worker(QRef, QName), + rabbit_fifo_dlx:ensure_worker_started(QRef, DlxState), {no_reply, Aux, Log}; -handle_aux(_, _, start_dlx_worker, Aux, Log, _) -> +handle_aux(_, _, ensure_dlx_worker, Aux, Log, _) -> {no_reply, Aux, Log}. eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index dc0d88ad6696..8208f9cd7da0 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -14,11 +14,11 @@ discard/3, overview/1, checkout/1, - state_enter/4, - start_worker/2, + state_enter/3, + ensure_worker_started/2, cleanup/1, purge/1, - consumer_pid/1, + local_alive_consumer_pid/1, dehydrate/1, normalize/1, stat/1]). @@ -34,7 +34,7 @@ %% It also runs its own checkout logic sending DLX messages to the DLX consumer. -record(checkout,{ - consumer :: atom(), + consumer :: pid(), prefetch :: non_neg_integer() }). -record(settle, {msg_ids :: [msg_id()]}). @@ -49,8 +49,8 @@ init() -> #?MODULE{}. -make_checkout(RegName, NumUnsettled) -> - {dlx, #checkout{consumer = RegName, +make_checkout(Pid, NumUnsettled) -> + {dlx, #checkout{consumer = Pid, prefetch = NumUnsettled }}. @@ -92,13 +92,13 @@ stat(#?MODULE{consumer = Con, -spec apply(command(), state()) -> {state(), ok | list()}. % TODO: refine return type -apply(#checkout{consumer = RegName, +apply(#checkout{consumer = Pid, prefetch = Prefetch}, #?MODULE{consumer = undefined} = State0) -> - State = State0#?MODULE{consumer = #dlx_consumer{registered_name = RegName, + State = State0#?MODULE{consumer = #dlx_consumer{pid = Pid, prefetch = Prefetch}}, {State, ok}; -apply(#checkout{consumer = RegName, +apply(#checkout{consumer = ConsumerPid, prefetch = Prefetch}, #?MODULE{consumer = #dlx_consumer{checked_out = CheckedOutOldConsumer}, discards = Discards0, @@ -115,7 +115,7 @@ apply(#checkout{consumer = RegName, fun({_Id, {_Reason, IdxMsg} = Msg}, {D, B}) -> {lqueue:in_r(Msg, D), B + size_in_bytes(IdxMsg)} end, {Discards0, 0}, Checked1), - State = State0#?MODULE{consumer = #dlx_consumer{registered_name = RegName, + State = State0#?MODULE{consumer = #dlx_consumer{pid = ConsumerPid, prefetch = Prefetch}, discards = Discards, msg_bytes = Bytes + BytesMoved, @@ -177,8 +177,8 @@ checkout0({success, _MsgId, {_Reason, ?TUPLE(_, _)}, State}, SendAcc) -> %% Therefore, here, we just check this message out to the consumer but do not re-deliver this message %% so that we will end up with the correct and deterministic state once the whole recovery log replay is completed. checkout0(checkout_one(State), SendAcc); -checkout0(#?MODULE{consumer = #dlx_consumer{registered_name = RegName}} = State, SendAcc) -> - Effects = delivery_effects(whereis(RegName), SendAcc), +checkout0(#?MODULE{consumer = #dlx_consumer{pid = Pid}} = State, SendAcc) -> + Effects = delivery_effects(Pid, SendAcc), {State, Effects}. checkout_one(#?MODULE{consumer = #dlx_consumer{checked_out = Checked, @@ -237,54 +237,47 @@ delivery_effects(CPid, {InMemMsgs, IdxMsgs0}) -> [{send_msg, CPid, {dlx_delivery, Msgs}, [ra_event]}] end}]. -state_enter(leader, QRef, QName, _State) -> - start_worker(QRef, QName); -state_enter(_, _, _, State) -> - terminate_worker(State). - -start_worker(QRef, QName) -> - RegName = registered_name(QName), - %% We must ensure that starting the rabbit_fifo_dlx_worker succeeds. - %% Therefore, we don't use an effect. - %% Also therefore, if starting the rabbit_fifo_dlx_worker fails, let the whole Ra server process crash - %% in which case another Ra node will become leader. - %% supervisor:start_child/2 blocks until rabbit_fifo_dlx_worker:init/1 returns (TODO check if this is correct). - %% That's okay since rabbit_fifo_dlx_worker:init/1 returns immediately by delegating - %% initial setup to handle_continue/2. - case whereis(RegName) of - undefined -> - {ok, Pid} = supervisor:start_child(rabbit_fifo_dlx_sup, [QRef, RegName]), - rabbit_log:debug("started rabbit_fifo_dlx_worker (~s ~p)", [RegName, Pid]); - Pid -> - rabbit_log:debug("rabbit_fifo_dlx_worker (~s ~p) already started", [RegName, Pid]) +state_enter(leader, QRef, State) -> + ensure_worker_started(QRef, State); +state_enter(_, _, State) -> + ensure_worker_terminated(State). + +ensure_worker_started(QRef, #?MODULE{consumer = undefined}) -> + start_worker(QRef); +ensure_worker_started(QRef, #?MODULE{consumer = #dlx_consumer{pid = Pid}}) -> + case is_local_and_alive(Pid) of + true -> + rabbit_log:debug("rabbit_fifo_dlx_worker ~p already started for ~s", + [Pid, rabbit_misc:rs(QRef)]); + false -> + start_worker(QRef) end. -terminate_worker(#?MODULE{consumer = #dlx_consumer{registered_name = RegName}}) -> - case whereis(RegName) of - undefined -> - ok; - Pid -> +%% Ensure that starting the rabbit_fifo_dlx_worker succeeds. +%% Therefore, do not use an effect. +%% Also therefore, if starting the rabbit_fifo_dlx_worker fails, let the +%% Ra server process crash in which case another Ra node will become leader. +start_worker(QRef) -> + {ok, Pid} = supervisor:start_child(rabbit_fifo_dlx_sup, [QRef]), + rabbit_log:debug("started rabbit_fifo_dlx_worker ~p for ~s", + [Pid, rabbit_misc:rs(QRef)]). + +ensure_worker_terminated(#?MODULE{consumer = undefined}) -> + ok; +ensure_worker_terminated(#?MODULE{consumer = #dlx_consumer{pid = Pid}}) -> + case is_local_and_alive(Pid) of + true -> %% Note that we can't return a mod_call effect here because mod_call is executed on the leader only. ok = supervisor:terminate_child(rabbit_fifo_dlx_sup, Pid), - rabbit_log:debug("terminated rabbit_fifo_dlx_worker (~s ~p)", [RegName, Pid]) - end; -terminate_worker(_) -> - ok. - -%% TODO consider not registering the worker name at all -%% because if there is a new worker process, it will always subscribe and tell us its new pid -registered_name(QName) when is_atom(QName) -> - list_to_atom(atom_to_list(QName) ++ "_dlx"). - -consumer_pid(#?MODULE{consumer = #dlx_consumer{registered_name = Name}}) -> - whereis(Name); -consumer_pid(_) -> - undefined. + rabbit_log:debug("terminated rabbit_fifo_dlx_worker ~p", [Pid]); + false -> + ok + end. %% called when switching from at-least-once to at-most-once cleanup(#?MODULE{consumer = Consumer, discards = Discards} = State) -> - terminate_worker(State), + ensure_worker_terminated(State), %% Return messages in the order they got discarded originally %% for the final at-most-once dead-lettering. CheckedReasonMsgs = case Consumer of @@ -345,3 +338,19 @@ dehydrate_consumer(undefined) -> normalize(#?MODULE{discards = Discards} = State) -> State#?MODULE{discards = lqueue:from_list(lqueue:to_list(Discards))}. + +local_alive_consumer_pid(#?MODULE{consumer = undefined}) -> + undefined; +local_alive_consumer_pid(#?MODULE{consumer = #dlx_consumer{pid = Pid}}) -> + case is_local_and_alive(Pid) of + true -> + Pid; + false -> + undefined + end. + +is_local_and_alive(Pid) + when node(Pid) =:= node() -> + is_process_alive(Pid); +is_local_and_alive(_) -> + false. diff --git a/deps/rabbit/src/rabbit_fifo_dlx.hrl b/deps/rabbit/src/rabbit_fifo_dlx.hrl index 5d8c023f9e8c..7604a481f021 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.hrl +++ b/deps/rabbit/src/rabbit_fifo_dlx.hrl @@ -11,14 +11,10 @@ %% We don't require a consumer tag because a consumer tag is a means to distinguish %% multiple consumers in the same channel. The rabbit_fifo_dlx_worker channel like process however %% creates only a single consumer to this quorum queue's discards queue. - registered_name :: atom(), + pid :: pid(), prefetch :: non_neg_integer(), checked_out = #{} :: #{msg_id() => {reason(), indexed_msg()}}, next_msg_id = 0 :: msg_id() % part of snapshot data - % total number of checked out messages - ever - % incremented for each delivery - % delivery_count = 0 :: non_neg_integer(), - % status = up :: up | suspected_down | cancelled }). -record(rabbit_fifo_dlx,{ diff --git a/deps/rabbit/src/rabbit_fifo_dlx_client.erl b/deps/rabbit/src/rabbit_fifo_dlx_client.erl index 2eb3d7965fb5..141926532083 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_client.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_client.erl @@ -1,6 +1,6 @@ -module(rabbit_fifo_dlx_client). --export([checkout/4, settle/2, handle_ra_event/3, +-export([checkout/3, settle/2, handle_ra_event/3, overview/1]). -record(state,{ @@ -17,8 +17,8 @@ settle(MsgIds, #state{leader = Leader} = State) ra:pipeline_command(Leader, Cmd), {ok, State}. -checkout(RegName, QResource, Leader, NumUnsettled) -> - Cmd = rabbit_fifo_dlx:make_checkout(RegName, NumUnsettled), +checkout(QResource, Leader, NumUnsettled) -> + Cmd = rabbit_fifo_dlx:make_checkout(self(), NumUnsettled), State = #state{queue_resource = QResource, leader = Leader, last_msg_id = -1}, diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 91664745da23..39ec57d7bb3b 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -18,8 +18,8 @@ -behaviour(gen_server). --export([start_link/2]). -%% gen_server2 callbacks +-export([start_link/1]). +%% gen_server callbacks -export([init/1, terminate/2, handle_continue/2, handle_cast/2, handle_call/3, handle_info/2, code_change/3, format_status/2]). @@ -62,7 +62,6 @@ }). -record(state, { - registered_name :: atom(), %% There is one rabbit_fifo_dlx_worker per source quorum queue %% (if dead-letter-strategy at-least-once is used). queue_ref :: rabbit_amqqueue:name(), @@ -95,17 +94,15 @@ %%TODO add metrics like global counters for messages routed, delivered, etc. -start_link(QRef, RegName) -> - gen_server:start_link({local, RegName}, - ?MODULE, {QRef, RegName}, - [{hibernate_after, ?HIBERNATE_AFTER}]). +start_link(QRef) -> + gen_server:start_link(?MODULE, QRef, [{hibernate_after, ?HIBERNATE_AFTER}]). -% -spec init({rabbit_amqqueue:name(), atom()}) -> -% {ok, undefined, {continue, {rabbit_amqqueue:name(), atom()}}}. -init(Arg) -> - {ok, undefined, {continue, Arg}}. +% -spec init(rabbit_amqqueue:name()) -> +% {ok, undefined, {continue, rabbit_amqqueue:name()}}}. +init(QRef) -> + {ok, undefined, {continue, QRef}}. -handle_continue({QRef, RegName}, undefined) -> +handle_continue(QRef, undefined) -> Prefetch = application:get_env(rabbit, dead_letter_worker_consumer_prefetch, ?DEFAULT_PREFETCH), @@ -113,13 +110,11 @@ handle_continue({QRef, RegName}, undefined) -> dead_letter_worker_publisher_confirm_timeout_ms, ?DEFAULT_SETTLE_TIMEOUT), State = lookup_topology(#state{queue_ref = QRef, - registered_name = RegName, queue_type_state = rabbit_queue_type:init(), settle_timeout = SettleTimeout}), {ok, Q} = rabbit_amqqueue:lookup(QRef), {ClusterName, _MaybeOldLeaderNode} = amqqueue:get_pid(Q), - {ok, ConsumerState} = rabbit_fifo_dlx_client:checkout(RegName, - QRef, + {ok, ConsumerState} = rabbit_fifo_dlx_client:checkout(QRef, {ClusterName, node()}, Prefetch), MonitorRef = erlang:monitor(process, ClusterName), @@ -561,7 +556,6 @@ maybe_cancel_timer(#state{timer = TRef, %% Avoids large message contents being logged. format_status(_Opt, [_PDict, #state{ - registered_name = RegisteredName, queue_ref = QueueRef, exchange_ref = ExchangeRef, routing_key = RoutingKey, @@ -571,8 +565,7 @@ format_status(_Opt, [_PDict, #state{ next_out_seq = NextOutSeq, timer = Timer }]) -> - S = #{registered_name => RegisteredName, - queue_ref => QueueRef, + S = #{queue_ref => QueueRef, exchange_ref => ExchangeRef, routing_key => RoutingKey, dlx_client_state => rabbit_fifo_dlx_client:overview(DlxClientState), diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 3c88e01e96cb..3d38b9cd00f6 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -1083,7 +1083,7 @@ dlx_01(_Config) -> C1 = {<<>>, C1Pid}, E = c:pid(0,176,1), Commands = [ - rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + rabbit_fifo_dlx:make_checkout(ignore_pid, 1), make_checkout(C1, {auto,1,simple_prefetch}), make_enqueue(E,1,msg(<<"1">>)), make_enqueue(E,2,msg(<<"2">>)), @@ -1102,7 +1102,7 @@ dlx_02(_Config) -> C1 = {<<>>, C1Pid}, E = c:pid(0,176,1), Commands = [ - rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + rabbit_fifo_dlx:make_checkout(ignore_pid, 1), make_checkout(C1, {auto,1,simple_prefetch}), make_enqueue(E,1,msg(<<"1">>)), %% State contains release cursor A. @@ -1131,7 +1131,7 @@ dlx_03(_Config) -> make_enqueue(E,2,msg(<<"2">>)), %% State contains release cursor B. %% 1st message sitting in discards queue got dehydrated. - rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + rabbit_fifo_dlx:make_checkout(ignore_pid, 1), rabbit_fifo_dlx:make_settle([0]) %% Release cursor A got emitted. ], @@ -1144,7 +1144,7 @@ dlx_04(_Config) -> C1 = {<<>>, C1Pid}, E = c:pid(0,176,1), Commands = [ - rabbit_fifo_dlx:make_checkout(my_dlx_worker, 3), + rabbit_fifo_dlx:make_checkout(ignore_pid, 3), make_enqueue(E,1,msg(<<>>)), make_enqueue(E,2,msg(<<>>)), make_enqueue(E,3,msg(<<>>)), @@ -1174,7 +1174,7 @@ dlx_05(_Config) -> make_enqueue(E,3,msg(<<"msg3">>)), %% 0 in discards (rabbit_fifo_dlx msg_bytes is still 0 because body of msg 0 is empty), %% 1 in checkout, 2 in messages - rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + rabbit_fifo_dlx:make_checkout(ignore_pid, 1), %% 0 in dlx_checkout, 1 in checkout, 2 in messages make_settle(C1, [1]), %% 0 in dlx_checkout, 2 in checkout @@ -1201,7 +1201,7 @@ dlx_06(_Config) -> make_enqueue(E,2,msg(<<"111">>)), make_enqueue(E,3,msg(<<>>)), %% 0,1,2 in messages - rabbit_fifo_dlx:make_checkout(my_dlx_worker, 2), + rabbit_fifo_dlx:make_checkout(ignore_pid, 2), make_checkout(C1, {auto,3,simple_prefetch}), %% 0,1,2 in checkout rabbit_fifo:make_discard(C1, [0,1,2]), @@ -1227,11 +1227,11 @@ dlx_07(_Config) -> %% 0 in discard, 1 in checkout rabbit_fifo:make_discard(C1, [1]), %% 0, 1 in discard - rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + rabbit_fifo_dlx:make_checkout(ignore_pid, 1), %% 0 in dlx_checkout, 1 in discard make_enqueue(E,3,msg(<<"123">>)), %% 0 in dlx_checkout, 1 in discard, 2 in checkout - rabbit_fifo_dlx:make_checkout(my_dlx_worker, 2), + rabbit_fifo_dlx:make_checkout(ignore_pid, 2), %% 0,1 in dlx_checkout, 2 in checkout rabbit_fifo_dlx:make_settle([0]), %% 1 in dlx_checkout, 2 in checkout @@ -1280,7 +1280,7 @@ dlx_08(_Config) -> make_enqueue(E,9,msg(<<>>)), rabbit_fifo:make_discard(C1, [6]), rabbit_fifo:make_discard(C1, [7]), - rabbit_fifo_dlx:make_checkout(my_dlx_worker, 1), + rabbit_fifo_dlx:make_checkout(ignore_pid, 1), make_enqueue(E,10,msg(<<>>)), rabbit_fifo:make_discard(C1, [8]), rabbit_fifo_dlx:make_settle([0]), @@ -1814,7 +1814,7 @@ handle_op({update_config, Changes}, #t{config = Conf} = T) -> Config = maps:merge(Conf, Changes), do_apply(rabbit_fifo:make_update_config(Config), T); handle_op({checkout_dlx, Prefetch}, #t{config = #{dead_letter_handler := at_least_once}} = T) -> - Cmd = rabbit_fifo_dlx:make_checkout(proper_dlx_worker, Prefetch), + Cmd = rabbit_fifo_dlx:make_checkout(ignore_pid, Prefetch), do_apply(Cmd, T). From 517fcd9ac226482c801b0d7ddcb44b319867daf2 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 20 Dec 2021 08:55:24 +0000 Subject: [PATCH 27/97] QQ: optimise expire messages By not doing it for each checkout as a timer has been set anyway. Also introduces a peek_next_message function that doesn't modify then throw away the internal queues as this can be expensive in some cases. --- deps/rabbit/src/rabbit_fifo.erl | 57 +++++++++++++++++++++++---------- 1 file changed, 40 insertions(+), 17 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index a73bab60a0e7..1219dd28d60e 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -484,8 +484,9 @@ apply(#{index := Index}, #purge{}, update_smallest_raft_index(Index, Reply, State, Effects); apply(#{index := Idx}, #garbage_collection{}, State) -> update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); -apply(Meta, {timeout, expire_msgs}, State) -> - checkout(Meta, State, State, [], false); +apply(#{system_time := Ts} = Meta, {timeout, expire_msgs}, State0) -> + {State, Effects} = expire_msgs(Ts, State0, []), + checkout(Meta, State, State, Effects, false); apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, #?MODULE{consumers = Cons0, cfg = #cfg{consumer_strategy = single_active}, @@ -1284,7 +1285,8 @@ query_peek(Pos, State0) when Pos > 0 -> query_notify_decorators_info(#?MODULE{consumers = Consumers} = State) -> MaxActivePriority = maps:fold(fun(_, #consumer{credit = C, status = up, - priority = P0}, MaxP) when C > 0 -> + priority = P0}, MaxP) + when C > 0 -> P = -P0, case MaxP of empty -> P; @@ -2084,6 +2086,27 @@ take_next_msg(#?MODULE{returns = Returns0, end end. +peek_next_msg(#?MODULE{prefix_msgs = {_NumR, [Msg | _], + _NumP, _P}}) -> + %% there are prefix returns, these should be served first + {value, Msg}; +peek_next_msg(#?MODULE{returns = Returns0, + messages = Messages0, + prefix_msgs = {_NumR, _R, _NumP, P}}) -> + case lqueue:peek(Returns0) of + {value, _} = Msg -> + Msg; + empty when P == [] -> + lqueue:peek(Messages0); + empty -> + case P of + [?PREFIX_MEM_MSG(_Header) = Msg | _] -> + {value, Msg}; + [?DISK_MSG(_Header) = Msg | _] -> + {value, Msg} + end + end. + delivery_effect({CTag, CPid}, [], InMemMsgs) -> {send_msg, CPid, {delivery, CTag, lists:reverse(InMemMsgs)}, [local, ra_event]}; @@ -2115,13 +2138,13 @@ reply_log_effect(RaftIdx, MsgId, Header, Ready, From) -> {dequeue, {MsgId, {Header, Msg}}, Ready}}}] end}. -checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> +checkout_one(#{system_time := _Ts} = Meta, InitState0, Effects1) -> %% Before checking out any messsage to any consumer, %% first remove all expired messages from the head of the queue. - {#?MODULE{service_queue = SQ0, - messages = Messages0, - consumers = Cons0} = InitState, Effects1} = - expire_msgs(Ts, InitState0, Effects0), + #?MODULE{service_queue = SQ0, + messages = Messages0, + consumers = Cons0} = InitState = InitState0, + % expire_msgs(Ts, InitState0, Effects0), case priority_queue:out(SQ0) of {{value, ConsumerId}, SQ1} when is_map_key(ConsumerId, Cons0) -> @@ -2129,11 +2152,14 @@ checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> {ConsumerMsg, State0} -> %% there are consumers waiting to be serviced %% process consumer checkout - case maps:get(ConsumerId, Cons0, error) of + case maps:get(ConsumerId, Cons0) of #consumer{credit = 0} -> %% no credit but was still on queue %% can happen when draining %% recurse without consumer on queue + %% NB: these retry cases introduce the "queue list reversal" + %% inefficiency but this is a rare thing to happen + %% so should not need optimising checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1); #consumer{status = cancelled} -> checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1); @@ -2161,10 +2187,7 @@ checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> subtract_in_memory_counts( Header, add_bytes_checkout(Header, State1)) end, - {success, ConsumerId, Next, ConsumerMsg, State, Effects1}; - error -> - %% consumer did not exist but was queued, recurse - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1) + {success, ConsumerId, Next, ConsumerMsg, State, Effects1} end; empty -> {nochange, InitState, Effects1} @@ -2185,7 +2208,8 @@ checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> %% dequeue all expired messages expire_msgs(RaCmdTs, State0, Effects0) -> case take_next_msg(State0) of - {?INDEX_MSG(Idx, ?MSG(#{expiry := Expiry} = Header, _) = Msg) = FullMsg, State1} + {?INDEX_MSG(Idx, ?MSG(#{expiry := Expiry} = Header, _) = Msg) = FullMsg, + State1} when RaCmdTs >= Expiry -> #?MODULE{dlx = DlxState0, cfg = #cfg{dead_letter_handler = DLH}, @@ -2239,8 +2263,8 @@ expire_prefix_msg(Msg, Header, State0) -> end. timer_effect(RaCmdTs, State, Effects) -> - T = case take_next_msg(State) of - {?INDEX_MSG(_, ?MSG(#{expiry := Expiry}, _)), _} + T = case peek_next_msg(State) of + {value, ?INDEX_MSG(_, ?MSG(#{expiry := Expiry}, _))} when is_number(Expiry) -> %% Next message contains 'expiry' header. %% (Re)set timer so that mesage will be dropped or dead-lettered on time. @@ -2661,7 +2685,6 @@ smallest_raft_index(#?MODULE{cfg = _Cfg, {I, State} end; _ -> - %% TODO: could be inefficent if there is no front list case lqueue:peek(Messages) of {value, ?INDEX_MSG(I, _)} -> {I, State}; From d733942761ffa58093a728af86f215975dc418c8 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 20 Dec 2021 14:08:21 +0000 Subject: [PATCH 28/97] QQ: add back expire before checkout But optimised to use peek instead. --- deps/rabbit/src/rabbit_fifo.erl | 21 +++++----- deps/rabbit/test/quorum_queue_SUITE.erl | 52 ++++++++++++++++++++++++- 2 files changed, 62 insertions(+), 11 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 1219dd28d60e..af39f67b4de6 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -2138,13 +2138,14 @@ reply_log_effect(RaftIdx, MsgId, Header, Ready, From) -> {dequeue, {MsgId, {Header, Msg}}, Ready}}}] end}. -checkout_one(#{system_time := _Ts} = Meta, InitState0, Effects1) -> +checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> %% Before checking out any messsage to any consumer, %% first remove all expired messages from the head of the queue. - #?MODULE{service_queue = SQ0, + {#?MODULE{service_queue = SQ0, messages = Messages0, - consumers = Cons0} = InitState = InitState0, - % expire_msgs(Ts, InitState0, Effects0), + consumers = Cons0} = InitState, Effects1} = + expire_msgs(Ts, InitState0, Effects0), + case priority_queue:out(SQ0) of {{value, ConsumerId}, SQ1} when is_map_key(ConsumerId, Cons0) -> @@ -2207,10 +2208,10 @@ checkout_one(#{system_time := _Ts} = Meta, InitState0, Effects1) -> %% dequeue all expired messages expire_msgs(RaCmdTs, State0, Effects0) -> - case take_next_msg(State0) of - {?INDEX_MSG(Idx, ?MSG(#{expiry := Expiry} = Header, _) = Msg) = FullMsg, - State1} + case peek_next_msg(State0) of + {value, ?INDEX_MSG(Idx, ?MSG(#{expiry := Expiry} = Header, _) = Msg) = FullMsg} when RaCmdTs >= Expiry -> + {_, State1} = take_next_msg(State0), #?MODULE{dlx = DlxState0, cfg = #cfg{dead_letter_handler = DLH}, ra_indexes = Indexes0} = State2 = add_bytes_drop(Header, State1), @@ -2233,12 +2234,14 @@ expire_msgs(RaCmdTs, State0, Effects0) -> State = State4#?MODULE{ra_indexes = Indexes}, expire_msgs(RaCmdTs, State, Effects) end; - {?PREFIX_MEM_MSG(#{expiry := Expiry} = Header) = Msg, State1} + {value, ?PREFIX_MEM_MSG(#{expiry := Expiry} = Header) = Msg} when RaCmdTs >= Expiry -> + {_, State1} = take_next_msg(State0), State2 = expire_prefix_msg(Msg, Header, State1), expire_msgs(RaCmdTs, State2, Effects0); - {?DISK_MSG(#{expiry := Expiry} = Header) = Msg, State1} + {value, ?DISK_MSG(#{expiry := Expiry} = Header) = Msg} when RaCmdTs >= Expiry -> + {_, State1} = take_next_msg(State0), State2 = expire_prefix_msg(Msg, Header, State1), expire_msgs(RaCmdTs, State2, Effects0); _ -> diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index ced1b2556d41..6f48e6c55348 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -139,6 +139,7 @@ all_tests() -> peek, message_ttl, per_message_ttl, + per_message_ttl_mixed_expiry, consumer_priorities, cancel_consumer_gh_3729 ]. @@ -2609,8 +2610,7 @@ per_message_ttl(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, Server), QQ = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-message-ttl">>, long, 2000}])), + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), Msg1 = <<"msg1">>, @@ -2625,6 +2625,54 @@ per_message_ttl(Config) -> wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]), ok. +per_message_ttl_mixed_expiry(Config) -> + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + + Msg1 = <<"msg1">>, + Msg2 = <<"msg2">>, + + %% message with no expiration + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = QQ}, + #amqp_msg{props = #'P_basic'{delivery_mode = 2}, + payload = Msg1}), + %% followed by message with expiration + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = QQ}, + #amqp_msg{props = #'P_basic'{delivery_mode = 2, + expiration = <<"500">>}, + payload = Msg2}), + + + wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), + timer:sleep(1000), + wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), + subscribe(Ch, QQ, false), + receive + {#'basic.deliver'{delivery_tag = DeliveryTag}, + #amqp_msg{payload = Msg1}} -> + amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag, + multiple = false}) + after 2000 -> + flush(10), + ct:fail("basic deliver timeout") + end, + + %% the second message should NOT be received as it has expired + receive + {#'basic.deliver'{}, #amqp_msg{payload = Msg2}} -> + flush(10), + ct:fail("unexpected delivery") + after 500 -> + ok + end, + ok. + in_memory(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), From 2f2710f93930a7bfed24c50b0ce2a5eda6f353e1 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 27 Dec 2021 16:10:23 +0100 Subject: [PATCH 29/97] Simplify rabbit_fifo by moving dead-letter strategies to rabbit_fifo_dlx. This removes conditional statements from rabbit_fifo. rabbit_fifo_dlx handles now dead-letter strategies * at-least-once * at-most-once * undefined / none --- deps/rabbit/BUILD.bazel | 3 + deps/rabbit/src/rabbit_dead_letter.erl | 1 + deps/rabbit/src/rabbit_fifo.erl | 453 +++++++------------- deps/rabbit/src/rabbit_fifo.hrl | 1 + deps/rabbit/src/rabbit_fifo_dlx.erl | 379 ++++++++++------ deps/rabbit/src/rabbit_fifo_dlx.hrl | 2 +- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 6 +- deps/rabbit/src/rabbit_quorum_queue.erl | 7 +- deps/rabbit/test/rabbit_fifo_SUITE.erl | 3 +- deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl | 12 +- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 4 +- 11 files changed, 427 insertions(+), 444 deletions(-) diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index c9e45ae1c11f..930ced215895 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -723,6 +723,9 @@ suites = [ "src/rabbit_fifo.hrl", "src/rabbit_fifo_dlx.hrl", ], + deps = [ + "//deps/rabbit_common:bazel_erlang_lib", + ], ), rabbitmq_integration_suite( PACKAGE, diff --git a/deps/rabbit/src/rabbit_dead_letter.erl b/deps/rabbit/src/rabbit_dead_letter.erl index c3865d31b696..f85845b29c9f 100644 --- a/deps/rabbit/src/rabbit_dead_letter.erl +++ b/deps/rabbit/src/rabbit_dead_letter.erl @@ -17,6 +17,7 @@ %%---------------------------------------------------------------------------- -type reason() :: 'expired' | 'rejected' | 'maxlen' | delivery_limit. +-export_type([reason/0]). %%---------------------------------------------------------------------------- diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index af39f67b4de6..e8c952f6b11b 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -70,7 +70,13 @@ make_purge/0, make_purge_nodes/1, make_update_config/1, - make_garbage_collection/0 + make_garbage_collection/0, + + %% called by rabbit_fifo_dlx + checkout/5, + update_smallest_raft_index/3, + delete_indexes/2, + subtract_in_memory/2 ]). %% command records representing all the protocol actions that are supported @@ -121,7 +127,7 @@ -type client_msg() :: delivery(). %% the messages `rabbit_fifo' can send to consumers. --opaque state() :: #?MODULE{}. +-type state() :: #?MODULE{}. -export_type([protocol/0, delivery/0, @@ -225,41 +231,22 @@ apply(Meta, end; apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons, - dlx = DlxState0, - cfg = #cfg{dead_letter_handler = DLH}} = State) -> + #?MODULE{consumers = Cons} = State0) -> case Cons of #{ConsumerId := #consumer{checked_out = Checked} = Con} -> - case DLH of - at_least_once -> - DlxState = lists:foldl( - fun(MsgId, S) -> - case maps:find(MsgId, Checked) of - {ok, Msg} -> - rabbit_fifo_dlx:discard(Msg, rejected, S); - error -> - S - end - end, DlxState0, MsgIds), - complete_and_checkout(Meta, MsgIds, ConsumerId, Con, - [], State#?MODULE{dlx = DlxState}, false); - _ -> - % Discarded maintains same order as MsgIds (so that publishing to - % dead-letter exchange will be in same order as messages got rejected) - Discarded = lists:filtermap(fun(Id) -> - case maps:find(Id, Checked) of - {ok, Msg} -> - {true, Msg}; - error -> - false - end - end, MsgIds), - Effects = dead_letter_effects(rejected, Discarded, State, []), - complete_and_checkout(Meta, MsgIds, ConsumerId, Con, - Effects, State, true) - end; + % Publishing to dead-letter exchange must maintain same order as messages got rejected. + DiscardMsgs = lists:filtermap(fun(Id) -> + case maps:find(Id, Checked) of + {ok, Msg} -> + {true, Msg}; + error -> + false + end + end, MsgIds), + {State, Effects, Delete} = rabbit_fifo_dlx:discard(DiscardMsgs, rejected, State0), + complete_and_checkout(Meta, MsgIds, ConsumerId, Con, Effects, State, Delete); _ -> - {State, ok} + {State0, ok} end; apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId}, #?MODULE{consumers = Cons0} = State) -> @@ -447,8 +434,7 @@ apply(#{index := Index}, #purge{}, #?MODULE{messages_total = Tot, returns = Returns, messages = Messages, - ra_indexes = Indexes0, - dlx = DlxState0} = State0) -> + ra_indexes = Indexes0} = State0) -> NumReady = messages_ready(State0), Indexes1 = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_, _)), Acc0) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc0); @@ -460,7 +446,7 @@ apply(#{index := Index}, #purge{}, (_, Acc) -> Acc end, Indexes1, lqueue:to_list(Messages)), - {DlxState, DiscardMsgs} = rabbit_fifo_dlx:purge(DlxState0), + {State1, DiscardMsgs} = rabbit_fifo_dlx:purge(State0), Indexes = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_, _)), Acc0) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc0); (_, Acc) -> @@ -468,11 +454,10 @@ apply(#{index := Index}, #purge{}, end, Indexes2, DiscardMsgs), NumPurged = NumReady + length(DiscardMsgs), - State1 = State0#?MODULE{ra_indexes = Indexes, + State2 = State1#?MODULE{ra_indexes = Indexes, messages = lqueue:new(), messages_total = Tot - NumPurged, returns = lqueue:new(), - dlx = DlxState, msg_bytes_enqueue = 0, prefix_msgs = {0, [], 0, []}, msg_bytes_in_memory = 0, @@ -480,7 +465,7 @@ apply(#{index := Index}, #purge{}, Effects0 = [garbage_collection], Reply = {purge, NumPurged}, {State, _, Effects} = evaluate_limit(Index, false, State0, - State1, Effects0), + State2, Effects0), update_smallest_raft_index(Index, Reply, State, Effects); apply(#{index := Idx}, #garbage_collection{}, State) -> update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); @@ -624,78 +609,16 @@ apply(#{index := Idx} = Meta, #purge_nodes{nodes = Nodes}, State0) -> purge_node(Meta, Node, S, E) end, {State0, []}, Nodes), update_smallest_raft_index(Idx, ok, State, Effects); -apply(#{index := Idx} = Meta, #update_config{config = Conf}, - #?MODULE{cfg = #cfg{dead_letter_handler = Old_DLH}} = State0) -> - #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, - dlx = DlxState, - ra_indexes = Indexes0, - messages_total = Tot} = State1 = update_config(Conf, State0), - %%TODO return aux effect here and move logic over to handle_aux/6 which can return effects as last arguments. - {State4, Effects1} = case DLH of - at_least_once -> - case rabbit_fifo_dlx:local_alive_consumer_pid(DlxState) of - undefined - when Old_DLH =/= at_least_once -> - %% dead-letter-strategy changed to at-least-once. - %% Therefore, start dlx worker. - {State1, [{aux, ensure_dlx_worker}]}; - undefined -> - %% Do not start dlx worker twice. - %% It is about to be started, but DlxState does not reflect that yet. - {State1, []}; - DlxWorkerPid -> - %% rabbit_fifo_dlx_worker already exists. - %% Notify worker of new policy. - Effect = {send_msg, DlxWorkerPid, lookup_topology, ra_event}, - {State1, [Effect]} - end; - _ when Old_DLH =:= at_least_once -> - %% Cleanup any remaining messages stored by rabbit_fifo_dlx - %% by either dropping or at-most-once dead-lettering. - ReasonMsgs = rabbit_fifo_dlx:cleanup(DlxState), - Len = length(ReasonMsgs), - rabbit_log:debug("Cleaning up ~b dead-lettered messages " - "since dead_letter_handler changed from ~s to ~p", - [Len, Old_DLH, DLH]), - Effects0 = dead_letter_effects(undefined, ReasonMsgs, State1, []), - {_, Msgs} = lists:unzip(ReasonMsgs), - Indexes = delete_indexes(Msgs, Indexes0), - State2 = subtract_in_memory(Msgs, State1), - State3 = State2#?MODULE{dlx = rabbit_fifo_dlx:init(), - ra_indexes = Indexes, - messages_total = Tot - Len}, - {State3, Effects0}; - _ -> - {State1, []} - end, - {State, Reply, Effects} = checkout(Meta, State0, State4, Effects1), +apply(#{index := Idx} = Meta, #update_config{config = Conf}, State0) -> + {State1, Effects0} = rabbit_fifo_dlx:update_config(Conf, State0), + State2 = update_config(Conf, State1), + {State, Reply, Effects} = checkout(Meta, State0, State2, Effects0), update_smallest_raft_index(Idx, Reply, State, Effects); apply(_Meta, {machine_version, FromVersion, ToVersion}, V0State) -> State = convert(FromVersion, ToVersion, V0State), - {State, ok, [{aux, ensure_dlx_worker}]}; -%%TODO are there better approach to -%% 1. matching against opaque rabbit_fifo_dlx:protocol / record (without exposing all the protocol details), and -%% 2. Separate the logic running in rabbit_fifo and rabbit_fifo_dlx when dead-letter messages is acked? -apply(#{index := IncomingRaftIdx} = Meta, {dlx, Cmd}, - #?MODULE{dlx = DlxState0, - messages_total = Total0, - ra_indexes = Indexes0} = State0) -> - case rabbit_fifo_dlx:apply(Cmd, DlxState0) of - {DlxState, ok} -> - State1 = State0#?MODULE{dlx = DlxState}, - %% Run a checkout so that a new DLX consumer will be delivered discarded messages - %% directly after it subscribes. - checkout(Meta, State0, State1, [], false); - {DlxState, AckedMsgs} -> - Indexes = delete_indexes(AckedMsgs, Indexes0), - Total = Total0 - length(AckedMsgs), - State1 = subtract_in_memory(AckedMsgs, State0), - State2 = State1#?MODULE{dlx = DlxState, - messages_total = Total, - ra_indexes = Indexes}, - {State, ok, Effects} = checkout(Meta, State0, State2, [], false), - update_smallest_raft_index(IncomingRaftIdx, State, Effects) - end; + {State, ok, [{aux, {dlx, setup}}]}; +apply(Meta, {dlx, _} = Cmd, State) -> + rabbit_fifo_dlx:apply(Meta, Cmd, State); apply(_Meta, Cmd, State) -> %% handle unhandled commands gracefully rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), @@ -857,23 +780,20 @@ update_waiting_consumer_status(Node, Consumer#consumer.status =/= cancelled]. -spec state_enter(ra_server:ra_state(), state()) -> ra_machine:effects(). -state_enter(RaState, #?MODULE{cfg = #cfg{dead_letter_handler = at_least_once, - resource = QRef}, - dlx = DlxState} = State) -> - rabbit_fifo_dlx:state_enter(RaState, QRef, DlxState), - state_enter0(RaState, State); state_enter(RaState, State) -> - state_enter0(RaState, State). + Effects = rabbit_fifo_dlx:state_enter(RaState, State), + state_enter0(RaState, State, Effects). state_enter0(leader, #?MODULE{consumers = Cons, - enqueuers = Enqs, - waiting_consumers = WaitingConsumers, - cfg = #cfg{name = Name, - resource = Resource, - become_leader_handler = BLH}, - prefix_msgs = {0, [], 0, []} - } = State) -> - TimerEffs = timer_effect(erlang:system_time(millisecond), State, []), + enqueuers = Enqs, + waiting_consumers = WaitingConsumers, + cfg = #cfg{name = Name, + resource = Resource, + become_leader_handler = BLH}, + prefix_msgs = {0, [], 0, []} + } = State, + Effects0) -> + TimerEffs = timer_effect(erlang:system_time(millisecond), State, Effects0), % return effects to monitor all current consumers and enqueuers Pids = lists:usort(maps:keys(Enqs) ++ [P || {_, P} <- maps:keys(Cons)] @@ -881,7 +801,6 @@ state_enter0(leader, #?MODULE{consumers = Cons, Mons = [{monitor, process, P} || P <- Pids], Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids], NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]), - %% TODO reissue timer effect if head of message queue has expiry header set FHReservation = [{mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}], Effects = TimerEffs ++ Mons ++ Nots ++ NodeMons ++ FHReservation, case BLH of @@ -891,34 +810,35 @@ state_enter0(leader, #?MODULE{consumers = Cons, [{mod_call, Mod, Fun, Args ++ [Name]} | Effects] end; state_enter0(eol, #?MODULE{enqueuers = Enqs, - consumers = Custs0, - waiting_consumers = WaitingConsumers0}) -> + consumers = Custs0, + waiting_consumers = WaitingConsumers0}, + Effects) -> Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0), WaitingConsumers1 = lists:foldl(fun({{_, P}, V}, Acc) -> Acc#{P => V} end, #{}, WaitingConsumers0), AllConsumers = maps:merge(Custs, WaitingConsumers1), [{send_msg, P, eol, ra_event} || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++ - [{aux, eol}, - {mod_call, rabbit_quorum_queue, file_handle_release_reservation, []}]; -state_enter0(State, #?MODULE{cfg = #cfg{resource = _Resource}}) when State =/= leader -> + [{aux, eol}, + {mod_call, rabbit_quorum_queue, file_handle_release_reservation, []} | Effects]; +state_enter0(State, #?MODULE{cfg = #cfg{resource = _Resource}}, Effects) + when State =/= leader -> FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}, - [FHReservation]; -state_enter0(_, _) -> + [FHReservation | Effects]; +state_enter0(_, _, Effects) -> %% catch all as not handling all states - []. + Effects. -spec tick(non_neg_integer(), state()) -> ra_machine:effects(). tick(Ts, #?MODULE{cfg = #cfg{name = Name, resource = QName}, - msg_bytes_enqueue = EnqueueBytes, - msg_bytes_checkout = CheckoutBytes, - dlx = DlxState} = State) -> + msg_bytes_enqueue = EnqueueBytes, + msg_bytes_checkout = CheckoutBytes} = State) -> case is_expired(Ts, State) of true -> [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]; false -> - {_, MsgBytesDiscard} = rabbit_fifo_dlx:stat(DlxState), + {_, MsgBytesDiscard} = rabbit_fifo_dlx:stat(State), Metrics = {Name, messages_ready(State), num_checked_out(State), % checked out @@ -936,7 +856,6 @@ overview(#?MODULE{consumers = Cons, enqueuers = Enqs, release_cursors = Cursors, enqueue_count = EnqCount, - dlx = DlxState, msgs_ready_in_memory = InMemReady, msg_bytes_in_memory = InMemBytes, msg_bytes_enqueue = EnqueueBytes, @@ -971,7 +890,7 @@ overview(#?MODULE{consumers = Cons, checkout_message_bytes => CheckoutBytes, in_memory_message_bytes => InMemBytes, smallest_raft_index => Smallest}, - DlxOverview = rabbit_fifo_dlx:overview(DlxState), + DlxOverview = rabbit_fifo_dlx:overview(State), maps:merge(Overview, DlxOverview). -spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) -> @@ -1116,13 +1035,8 @@ handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, Err -> {reply, Err, Aux0, Log0} end; -handle_aux(leader, _, ensure_dlx_worker, Aux, Log, - #?MODULE{dlx = DlxState, - cfg = #cfg{resource = QRef, - dead_letter_handler = at_least_once}}) -> - rabbit_fifo_dlx:ensure_worker_started(QRef, DlxState), - {no_reply, Aux, Log}; -handle_aux(_, _, ensure_dlx_worker, Aux, Log, _) -> +handle_aux(RaState, _, {dlx, Cmd}, Aux0, Log, State) -> + Aux = rabbit_fifo_dlx:handle_aux(RaState, Cmd, Aux0, State), {no_reply, Aux, Log}. eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, @@ -1268,8 +1182,8 @@ query_in_memory_usage(#?MODULE{msg_bytes_in_memory = Bytes, msgs_ready_in_memory = Length}) -> {Length, Bytes}. -query_stat_dlx(#?MODULE{dlx = DlxState}) -> - rabbit_fifo_dlx:stat(DlxState). +query_stat_dlx(State) -> + rabbit_fifo_dlx:stat(State). query_peek(Pos, State0) when Pos > 0 -> case take_next_msg(State0) of @@ -1495,28 +1409,28 @@ decr_total(#?MODULE{messages_total = Tot} = State) -> incr_total(#?MODULE{messages_total = Tot} = State) -> State#?MODULE{messages_total = Tot + 1}. -drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) -> +drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects) -> case take_next_msg(State0) of {?PREFIX_MEM_MSG(Header), State1} -> State2 = subtract_in_memory_counts(Header, add_bytes_drop(Header, State1)), - {decr_total(State2), Effects0}; + {decr_total(State2), Effects}; {?DISK_MSG(Header), State1} -> State2 = add_bytes_drop(Header, State1), - {decr_total(State2), Effects0}; + {decr_total(State2), Effects}; {?INDEX_MSG(Idx, ?MSG(Header, _) = Msg) = FullMsg, State1} -> Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - State2 = decr_total(add_bytes_drop(Header, State1)), - State = case Msg of - ?DISK_MSG(_) -> State2; - _ -> - subtract_in_memory_counts(Header, State2) - end, - Effects = dead_letter_effects(maxlen, [FullMsg], - State, Effects0), - {State#?MODULE{ra_indexes = Indexes}, Effects}; + State2 = State1#?MODULE{ra_indexes = Indexes}, + State3 = decr_total(add_bytes_drop(Header, State2)), + State4 = case Msg of + ?DISK_MSG(_) -> State3; + _ -> + subtract_in_memory_counts(Header, State3) + end, + {State, DlxEffects, true} = rabbit_fifo_dlx:discard([FullMsg], maxlen, State4), + {State, DlxEffects ++ Effects}; empty -> - {State0, Effects0} + {State0, Effects} end. enqueue(RaftIdx, Ts, RawMsg, #?MODULE{messages = Messages} = State0) -> @@ -1661,8 +1575,7 @@ return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, % used to process messages that are finished complete(Meta, ConsumerId, DiscardedMsgIds, #consumer{checked_out = Checked} = Con0, - #?MODULE{messages_total = Tot, - ra_indexes = Indexes0} = State0, Delete) -> + #?MODULE{messages_total = Tot} = State0, Delete) -> %% credit_mode = simple_prefetch should automatically top-up credit %% as messages are simple_prefetch or otherwise returned Discarded = maps:with(DiscardedMsgIds, Checked), @@ -1671,26 +1584,26 @@ complete(Meta, ConsumerId, DiscardedMsgIds, Con = Con0#consumer{checked_out = maps:without(DiscardedMsgIds, Checked), credit = increase_credit(Con0, Len)}, State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), - State = lists:foldl(fun(Msg, Acc) -> - add_bytes_settle( - get_msg_header(Msg), Acc) - end, State1, DiscardedMsgs), + State2 = lists:foldl(fun(Msg, Acc) -> + add_bytes_settle( + get_msg_header(Msg), Acc) + end, State1, DiscardedMsgs), case Delete of true -> - Indexes = delete_indexes(DiscardedMsgs, Indexes0), - State#?MODULE{messages_total = Tot - Len, - ra_indexes = Indexes}; + State = State2#?MODULE{messages_total = Tot - Len}, + delete_indexes(DiscardedMsgs, State); false -> - State + State2 end. -delete_indexes(Msgs, Indexes) -> +delete_indexes(Msgs, #?MODULE{ra_indexes = Indexes0} = State) -> %% TODO: optimise by passing a list to rabbit_fifo_index - lists:foldl(fun (?INDEX_MSG(I, ?MSG(_,_)), Acc) when is_integer(I) -> - rabbit_fifo_index:delete(I, Acc); - (_, Acc) -> - Acc - end, Indexes, Msgs). + Indexes = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_,_)), Acc) when is_integer(I) -> + rabbit_fifo_index:delete(I, Acc); + (_, Acc) -> + Acc + end, Indexes0, Msgs), + State#?MODULE{ra_indexes = Indexes}. increase_credit(#consumer{lifetime = once, credit = Credit}, _) -> @@ -1711,41 +1624,6 @@ complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, {State, ok, Effects} = checkout(Meta, State0, State1, Effects0, false), update_smallest_raft_index(IncomingRaftIdx, State, Effects). -dead_letter_effects(_Reason, _Discarded, - #?MODULE{cfg = #cfg{dead_letter_handler = undefined}}, - Effects) -> - Effects; -dead_letter_effects(Reason, Discarded, - #?MODULE{cfg = #cfg{dead_letter_handler = {at_most_once, {Mod, Fun, Args}}}}, - Effects) -> - RaftIdxs = lists:filtermap( - fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> - {true, RaftIdx}; - ({_PerMsgReason, ?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))}) when Reason =:= undefined -> - {true, RaftIdx}; - (_IgnorePrefixMessage) -> - false - end, Discarded), - [{log, RaftIdxs, - fun (Log) -> - Lookup = maps:from_list(lists:zip(RaftIdxs, Log)), - DeadLetters = lists:filtermap( - fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> - {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), - {true, {Reason, Msg}}; - (?INDEX_MSG(_, ?MSG(_Header, Msg))) -> - {true, {Reason, Msg}}; - ({PerMsgReason, ?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))}) when Reason =:= undefined -> - {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), - {true, {PerMsgReason, Msg}}; - ({PerMsgReason, ?INDEX_MSG(_, ?MSG(_Header, Msg))}) when Reason =:= undefined -> - {true, {PerMsgReason, Msg}}; - (_IgnorePrefixMessage) -> - false - end, Discarded), - [{mod_call, Mod, Fun, Args ++ [DeadLetters]}] - end} | Effects]. - cancel_consumer_effects(ConsumerId, #?MODULE{cfg = #cfg{resource = QName}} = State, Effects) -> [{mod_call, rabbit_quorum_queue, @@ -1840,9 +1718,7 @@ get_header(Key, Header) when is_map(Header) -> return_one(Meta, MsgId, Msg0, #?MODULE{returns = Returns, consumers = Consumers, - dlx = DlxState0, - cfg = #cfg{delivery_limit = DeliveryLimit, - dead_letter_handler = DLH}} = State0, + cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, Effects0, ConsumerId) -> #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers), Msg = update_msg_header(delivery_count, fun incr/1, 1, Msg0), @@ -1850,17 +1726,9 @@ return_one(Meta, MsgId, Msg0, case get_header(delivery_count, Header) of DeliveryCount when DeliveryCount > DeliveryLimit -> %% TODO: don't do for prefix msgs - case DLH of - at_least_once -> - DlxState = rabbit_fifo_dlx:discard(Msg, delivery_limit, DlxState0), - State = complete(Meta, ConsumerId, [MsgId], Con0, State0#?MODULE{dlx = DlxState}, false), - {State, Effects0}; - _ -> - Effects = dead_letter_effects(delivery_limit, [Msg], - State0, Effects0), - State = complete(Meta, ConsumerId, [MsgId], Con0, State0, true), - {State, Effects} - end; + {State1, DlxEffects, Delete} = rabbit_fifo_dlx:discard([Msg], delivery_limit, State0), + State = complete(Meta, ConsumerId, [MsgId], Con0, State1, Delete), + {State, DlxEffects ++ Effects0}; _ -> Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)}, @@ -1910,11 +1778,8 @@ checkout(Meta, OldState, State, Effects) -> checkout(#{index := Index} = Meta, #?MODULE{cfg = #cfg{resource = QName}} = OldState, State0, Effects0, HandleConsumerChanges) -> - {#?MODULE{dlx = DlxState0} = State1, _Result, Effects1} = - checkout0(Meta, checkout_one(Meta, State0, Effects0), #{}), - %%TODO For now we checkout the discards queue here. Move it to a better place - {DlxState1, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(DlxState0), - State2 = State1#?MODULE{dlx = DlxState1}, + {State1, _Result, Effects1} = checkout0(Meta, checkout_one(Meta, State0, Effects0), #{}), + {State2, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(State1), Effects2 = DlxDeliveryEffects ++ Effects1, case evaluate_limit(Index, false, OldState, State2, Effects2) of {State, true, Effects} -> @@ -2207,63 +2072,51 @@ checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> end. %% dequeue all expired messages -expire_msgs(RaCmdTs, State0, Effects0) -> - case peek_next_msg(State0) of - {value, ?INDEX_MSG(Idx, ?MSG(#{expiry := Expiry} = Header, _) = Msg) = FullMsg} +expire_msgs(RaCmdTs, State, Effects) -> + %% In the normal case, there are no expired messages. + %% Therefore, first queue:peek/1 to check whether we need to queue:out/1 + %% because the latter can be much slower than the former. + case peek_next_msg(State) of + {value, ?INDEX_MSG(_Idx, ?MSG(#{expiry := Expiry} = Header, _))} when RaCmdTs >= Expiry -> - {_, State1} = take_next_msg(State0), - #?MODULE{dlx = DlxState0, - cfg = #cfg{dead_letter_handler = DLH}, - ra_indexes = Indexes0} = State2 = add_bytes_drop(Header, State1), - case DLH of - at_least_once -> - DlxState = rabbit_fifo_dlx:discard(FullMsg, expired, DlxState0), - State = State2#?MODULE{dlx = DlxState}, - expire_msgs(RaCmdTs, State, Effects0); - _ -> - Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - State3 = decr_total(State2), - State4 = case Msg of - ?DISK_MSG(_) -> - State3; - _ -> - subtract_in_memory_counts(Header, State3) - end, - Effects = dead_letter_effects(expired, [FullMsg], - State4, Effects0), - State = State4#?MODULE{ra_indexes = Indexes}, - expire_msgs(RaCmdTs, State, Effects) - end; - {value, ?PREFIX_MEM_MSG(#{expiry := Expiry} = Header) = Msg} + expire(RaCmdTs, Header, State, Effects); + {value, ?PREFIX_MEM_MSG(#{expiry := Expiry} = Header)} when RaCmdTs >= Expiry -> - {_, State1} = take_next_msg(State0), - State2 = expire_prefix_msg(Msg, Header, State1), - expire_msgs(RaCmdTs, State2, Effects0); - {value, ?DISK_MSG(#{expiry := Expiry} = Header) = Msg} + expire(RaCmdTs, Header, State, Effects); + {value, ?DISK_MSG(#{expiry := Expiry} = Header)} when RaCmdTs >= Expiry -> - {_, State1} = take_next_msg(State0), - State2 = expire_prefix_msg(Msg, Header, State1), - expire_msgs(RaCmdTs, State2, Effects0); + expire(RaCmdTs, Header, State, Effects); _ -> - {State0, Effects0} + {State, Effects} end. -expire_prefix_msg(Msg, Header, State0) -> - #?MODULE{dlx = DlxState0, - cfg = #cfg{dead_letter_handler = DLH}} = State1 = add_bytes_drop(Header, State0), - case DLH of - at_least_once -> - DlxState = rabbit_fifo_dlx:discard(Msg, expired, DlxState0), - State1#?MODULE{dlx = DlxState}; - _ -> - State2 = case Msg of - ?DISK_MSG(_) -> - State1; - _ -> - subtract_in_memory_counts(Header, State1) - end, - decr_total(State2) - end. +expire(RaCmdTs, Header, State0, Effects) -> + {Msg, State1} = take_next_msg(State0), + State2 = add_bytes_drop(Header, State1), + {#?MODULE{ra_indexes = Indexes0} = State3, DlxEffects, Delete} = + rabbit_fifo_dlx:discard([Msg], expired, State2), + State = case Delete of + false -> + State3; + true -> + State5 = case Msg of + ?INDEX_MSG(Idx, ?DISK_MSG(_Header)) + when is_integer(Idx) -> + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + State3#?MODULE{ra_indexes = Indexes}; + ?INDEX_MSG(Idx, ?MSG(_Header, _)) + when is_integer(Idx) -> + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + State4 = State3#?MODULE{ra_indexes = Indexes}, + subtract_in_memory_counts(Header, State4); + ?PREFIX_MEM_MSG(_) -> + subtract_in_memory_counts(Header, State3); + ?DISK_MSG(_) -> + State3 + end, + decr_total(State5) + end, + expire_msgs(RaCmdTs, State, DlxEffects ++ Effects). timer_effect(RaCmdTs, State, Effects) -> T = case peek_next_msg(State) of @@ -2387,8 +2240,7 @@ dehydrate_state(#?MODULE{messages = Messages, consumers = Consumers, returns = Returns, prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0}, - waiting_consumers = Waiting0, - dlx = DlxState} = State) -> + waiting_consumers = Waiting0} = State0) -> RCnt = lqueue:len(Returns), %% TODO: optimise this function as far as possible PrefRet1 = lists:foldr(fun (M, Acc) -> @@ -2400,17 +2252,17 @@ dehydrate_state(#?MODULE{messages = Messages, %% recovering from a snapshot PrefMsgs = PrefMsg0 ++ PrefMsgsSuff, Waiting = [{Cid, dehydrate_consumer(C)} || {Cid, C} <- Waiting0], - State#?MODULE{messages = lqueue:new(), - ra_indexes = rabbit_fifo_index:empty(), - release_cursors = lqueue:new(), - consumers = maps:map(fun (_, C) -> - dehydrate_consumer(C) - end, Consumers), - returns = lqueue:new(), - prefix_msgs = {PRCnt + RCnt, PrefRet, - PPCnt + lqueue:len(Messages), PrefMsgs}, - waiting_consumers = Waiting, - dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. + State = State0#?MODULE{messages = lqueue:new(), + ra_indexes = rabbit_fifo_index:empty(), + release_cursors = lqueue:new(), + consumers = maps:map(fun (_, C) -> + dehydrate_consumer(C) + end, Consumers), + returns = lqueue:new(), + prefix_msgs = {PRCnt + RCnt, PrefRet, + PPCnt + lqueue:len(Messages), PrefMsgs}, + waiting_consumers = Waiting}, + rabbit_fifo_dlx:dehydrate(State). dehydrate_messages(Msgs0) -> {OutRes, Msgs} = lqueue:out(Msgs0), @@ -2442,13 +2294,12 @@ dehydrate_message(?INDEX_MSG(Idx, ?MSG(Header, _))) when is_integer(Idx) -> normalize(#?MODULE{ra_indexes = _Indexes, returns = Returns, messages = Messages, - release_cursors = Cursors, - dlx = DlxState} = State) -> - State#?MODULE{ - returns = lqueue:from_list(lqueue:to_list(Returns)), - messages = lqueue:from_list(lqueue:to_list(Messages)), - release_cursors = lqueue:from_list(lqueue:to_list(Cursors)), - dlx = rabbit_fifo_dlx:normalize(DlxState)}. + release_cursors = Cursors} = State0) -> + State = State0#?MODULE{ + returns = lqueue:from_list(lqueue:to_list(Returns)), + messages = lqueue:from_list(lqueue:to_list(Messages)), + release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}, + rabbit_fifo_dlx:normalize(State). is_over_limit(#?MODULE{cfg = #cfg{max_length = undefined, max_bytes = undefined}}) -> diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index ee46b6080441..70cc8d0dfe8c 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -214,6 +214,7 @@ %% TODO Remove this field and store prefix messages in-place. This will %% simplify the checkout logic. prefix_msgs = {0, [], 0, []} :: prefix_msgs(), + %% state for at-least-once dead-lettering dlx = rabbit_fifo_dlx:init() :: rabbit_fifo_dlx:state(), msg_bytes_enqueue = 0 :: non_neg_integer(), msg_bytes_checkout = 0 :: non_neg_integer(), diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 8208f9cd7da0..955370eec8ae 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -4,34 +4,24 @@ -include("rabbit_fifo.hrl"). -compile({no_auto_import, [apply/3]}). -% client API, e.g. for rabbit_fifo_dlx_client --export([make_checkout/2, - make_settle/1]). - -% called by rabbit_fifo delegating DLX handling to this module --export([init/0, - apply/2, +-export([ + %% rabbit_fifo_dlx_client + make_checkout/2, + make_settle/1, + %% rabbit_fifo delegating DLX handling to this module + init/0, + apply/3, discard/3, overview/1, checkout/1, - state_enter/3, - ensure_worker_started/2, - cleanup/1, + state_enter/2, + handle_aux/4, purge/1, - local_alive_consumer_pid/1, dehydrate/1, normalize/1, - stat/1]). - -%% This module handles the dead letter (DLX) part of the rabbit_fifo state machine. -%% This is a separate module to better unit test and provide separation of concerns. -%% This module maintains its own state: -%% a queue of DLX messages, a single node local DLX consumer, and some stats. -%% The state of this module is included into rabbit_fifo state because there can only by one Ra state machine. -%% The rabbit_fifo module forwards all DLX commands to this module where we then update the DLX specific state only: -%% e.g. DLX consumer subscribed, adding / removing discarded messages, stats -%% -%% It also runs its own checkout logic sending DLX messages to the DLX consumer. + stat/1, + update_config/2 + ]). -record(checkout,{ consumer :: pid(), @@ -57,15 +47,16 @@ make_checkout(Pid, NumUnsettled) -> make_settle(MessageIds) when is_list(MessageIds) -> {dlx, #settle{msg_ids = MessageIds}}. -overview(#?MODULE{consumer = undefined, - msg_bytes = MsgBytes, - msg_bytes_checkout = 0, - discards = Discards}) -> +-spec overview(rabbit_fifo:state()) -> map(). +overview(#rabbit_fifo{dlx = #?MODULE{consumer = undefined, + msg_bytes = MsgBytes, + msg_bytes_checkout = 0, + discards = Discards}}) -> overview0(Discards, #{}, MsgBytes, 0); -overview(#?MODULE{consumer = #dlx_consumer{checked_out = Checked}, - msg_bytes = MsgBytes, - msg_bytes_checkout = MsgBytesCheckout, - discards = Discards}) -> +overview(#rabbit_fifo{dlx = #?MODULE{consumer = #dlx_consumer{checked_out = Checked}, + msg_bytes = MsgBytes, + msg_bytes_checkout = MsgBytesCheckout, + discards = Discards}}) -> overview0(Discards, Checked, MsgBytes, MsgBytesCheckout). overview0(Discards, Checked, MsgBytes, MsgBytesCheckout) -> @@ -74,12 +65,12 @@ overview0(Discards, Checked, MsgBytes, MsgBytesCheckout) -> discard_message_bytes => MsgBytes, discard_checkout_message_bytes => MsgBytesCheckout}. --spec stat(state()) -> - {non_neg_integer(), non_neg_integer()}. -stat(#?MODULE{consumer = Con, - discards = Discards, - msg_bytes = MsgBytes, - msg_bytes_checkout = MsgBytesCheckout}) -> +-spec stat(rabbit_fifo:state()) -> + {Num :: non_neg_integer(), Bytes :: non_neg_integer()}. +stat(#rabbit_fifo{dlx = #?MODULE{consumer = Con, + discards = Discards, + msg_bytes = MsgBytes, + msg_bytes_checkout = MsgBytesCheckout}}) -> Num0 = lqueue:len(Discards), Num = case Con of undefined -> @@ -90,20 +81,23 @@ stat(#?MODULE{consumer = Con, Bytes = MsgBytes + MsgBytesCheckout, {Num, Bytes}. --spec apply(command(), state()) -> - {state(), ok | list()}. % TODO: refine return type -apply(#checkout{consumer = Pid, - prefetch = Prefetch}, - #?MODULE{consumer = undefined} = State0) -> - State = State0#?MODULE{consumer = #dlx_consumer{pid = Pid, - prefetch = Prefetch}}, - {State, ok}; -apply(#checkout{consumer = ConsumerPid, - prefetch = Prefetch}, - #?MODULE{consumer = #dlx_consumer{checked_out = CheckedOutOldConsumer}, - discards = Discards0, - msg_bytes = Bytes, - msg_bytes_checkout = BytesCheckout} = State0) -> +-spec apply(ra_machine:command_meta_data(), rabbit_fifo:command(), rabbit_fifo:state()) -> + {rabbit_fifo:state(), ra_machine:effects()}. +apply(Meta, {dlx, #checkout{consumer = Pid, + prefetch = Prefetch}}, + #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, + dlx = #?MODULE{consumer = undefined} = DlxState0} = State0) -> + DlxState = DlxState0#?MODULE{consumer = #dlx_consumer{pid = Pid, + prefetch = Prefetch}}, + State = set(State0, DlxState), + rabbit_fifo:checkout(Meta, State0, State, [], false); +apply(Meta, {dlx, #checkout{consumer = ConsumerPid, + prefetch = Prefetch}}, + #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, + dlx = #?MODULE{consumer = #dlx_consumer{checked_out = CheckedOutOldConsumer}, + discards = Discards0, + msg_bytes = Bytes, + msg_bytes_checkout = BytesCheckout} = DlxState0} = State0) -> %% Since we allow only a single consumer, the new consumer replaces the old consumer. %% All checked out messages to the old consumer need to be returned to the discards queue %% such that these messages can be (eventually) re-delivered to the new consumer. @@ -115,15 +109,17 @@ apply(#checkout{consumer = ConsumerPid, fun({_Id, {_Reason, IdxMsg} = Msg}, {D, B}) -> {lqueue:in_r(Msg, D), B + size_in_bytes(IdxMsg)} end, {Discards0, 0}, Checked1), - State = State0#?MODULE{consumer = #dlx_consumer{pid = ConsumerPid, - prefetch = Prefetch}, - discards = Discards, - msg_bytes = Bytes + BytesMoved, - msg_bytes_checkout = BytesCheckout - BytesMoved}, - {State, ok}; -apply(#settle{msg_ids = MsgIds}, - #?MODULE{consumer = #dlx_consumer{checked_out = Checked} = C, - msg_bytes_checkout = BytesCheckout} = State0) -> + DlxState = DlxState0#?MODULE{consumer = #dlx_consumer{pid = ConsumerPid, + prefetch = Prefetch}, + discards = Discards, + msg_bytes = Bytes + BytesMoved, + msg_bytes_checkout = BytesCheckout - BytesMoved}, + State = set(State0, DlxState), + rabbit_fifo:checkout(Meta, State0, State, [], false); +apply(#{index := IncomingRaftIdx} = Meta, {dlx, #settle{msg_ids = MsgIds}}, + #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, + dlx = #?MODULE{consumer = #dlx_consumer{checked_out = Checked} = C, + msg_bytes_checkout = BytesCheckout} = DlxState0} = State0) -> Acked = maps:with(MsgIds, Checked), AckedRsnMsgs = maps:values(Acked), AckedMsgs = lists:map(fun({_Reason, Msg}) -> Msg end, AckedRsnMsgs), @@ -131,24 +127,68 @@ apply(#settle{msg_ids = MsgIds}, Bytes + size_in_bytes(Msg) end, 0, AckedMsgs), Unacked = maps:without(MsgIds, Checked), - State = State0#?MODULE{consumer = C#dlx_consumer{checked_out = Unacked}, - msg_bytes_checkout = BytesCheckout - AckedBytes}, - {State, AckedMsgs}. - -%%TODO delete delivery_count header to save space? -%% It's not needed anymore. --spec discard(rabbit_fifo:indexed_msg(), term(), state()) -> state(). -discard(Msg, Reason, #?MODULE{discards = Discards0, - msg_bytes = MsgBytes0} = State) -> - Discards = lqueue:in({Reason, Msg}, Discards0), - MsgBytes = MsgBytes0 + size_in_bytes(Msg), - State#?MODULE{discards = Discards, - msg_bytes = MsgBytes}. - --spec checkout(state()) -> - {state(), {list(), list()}}. -checkout(#?MODULE{consumer = undefined, - discards = Discards} = State) -> + DlxState = DlxState0#?MODULE{consumer = C#dlx_consumer{checked_out = Unacked}, + msg_bytes_checkout = BytesCheckout - AckedBytes}, + State1 = set(State0, DlxState), + Total = rabbit_fifo:query_messages_total(State0) - length(AckedMsgs), + State2 = rabbit_fifo:subtract_in_memory(AckedMsgs, State1), + State3 = State2#rabbit_fifo{messages_total = Total}, + State4 = rabbit_fifo:delete_indexes(AckedMsgs, State3), + {State, ok, Effects} = rabbit_fifo:checkout(Meta, State0, State4, [], false), + rabbit_fifo:update_smallest_raft_index(IncomingRaftIdx, State, Effects); +apply(_, Cmd, #rabbit_fifo{cfg = #cfg{dead_letter_handler = DLH}} = State) -> + rabbit_log:debug("Ignoring command ~p for dead_letter_handler ~p", Cmd, DLH), + {State, []}. + +-spec discard([msg()], rabbit_dead_letter:reason(), rabbit_fifo:state()) -> + {rabbit_fifo:state(), ra_machine:effects(), Delete :: boolean()}. +discard(_, _, #rabbit_fifo{cfg = #cfg{dead_letter_handler = undefined}} = State) -> + {State, [], true}; +discard(Msgs, Reason, + #rabbit_fifo{cfg = #cfg{dead_letter_handler = {at_most_once, {Mod, Fun, Args}}}} = State) -> + RaftIdxs = lists:filtermap( + fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> + {true, RaftIdx}; + (_IgnorePrefixMessage) -> + false + end, Msgs), + Effect = {log, RaftIdxs, + fun (Log) -> + Lookup = maps:from_list(lists:zip(RaftIdxs, Log)), + DeadLetters = lists:filtermap( + fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> + {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), + {true, {Reason, Msg}}; + (?INDEX_MSG(_, ?MSG(_Header, Msg))) -> + {true, {Reason, Msg}}; + (_IgnorePrefixMessage) -> + false + end, Msgs), + [{mod_call, Mod, Fun, Args ++ [DeadLetters]}] + end}, + {State, [Effect], true}; +discard(Msgs, Reason, + #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, + dlx = #?MODULE{discards = Discards0, + msg_bytes = MsgBytes0} = DlxState0} = State0) + when Reason =/= maxlen -> + %%TODO delete delivery_count header to save space? + %% It's not needed anymore. + {Discards, MsgBytes} = lists:foldl(fun (Msg, {D0, B0}) -> + D = lqueue:in({Reason, Msg}, D0), + B = B0 + size_in_bytes(Msg), + {D, B} + end, {Discards0, MsgBytes0}, Msgs), + DlxState = DlxState0#?MODULE{discards = Discards, + msg_bytes = MsgBytes}, + State = set(State0, DlxState), + {State, [], false}. + +-spec checkout(rabbit_fifo:state()) -> + {rabbit_fifo:state(), ra_machine:effects()}. +checkout(#rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, + dlx = #?MODULE{consumer = undefined, + discards = Discards}} = State) -> case lqueue:is_empty(Discards) of true -> ok; @@ -156,8 +196,13 @@ checkout(#?MODULE{consumer = undefined, rabbit_log:warning("there are dead-letter messages but no dead-letter consumer") end, {State, []}; +checkout(#rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, + dlx = DlxState0} = State0) -> + {DlxState, Effects} = checkout0(checkout_one(DlxState0), {[],[]}), + State = set(State0, DlxState), + {State, Effects}; checkout(State) -> - checkout0(checkout_one(State), {[],[]}). + {State, []}. checkout0({success, MsgId, {Reason, ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header))}, State}, {InMemMsgs, LogMsgs}) when is_integer(RaftIdx) -> @@ -237,10 +282,19 @@ delivery_effects(CPid, {InMemMsgs, IdxMsgs0}) -> [{send_msg, CPid, {dlx_delivery, Msgs}, [ra_event]}] end}]. -state_enter(leader, QRef, State) -> - ensure_worker_started(QRef, State); -state_enter(_, _, State) -> - ensure_worker_terminated(State). +-spec state_enter(ra_server:ra_state(), rabbit_fifo:state()) -> + ra_machine:effects(). +state_enter(leader, #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once, + resource = QRef}, + dlx = DlxState}) -> + ensure_worker_started(QRef, DlxState), + []; +state_enter(_, #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, + dlx = DlxState}) -> + ensure_worker_terminated(DlxState), + []; +state_enter(_, _) -> + []. ensure_worker_started(QRef, #?MODULE{consumer = undefined}) -> start_worker(QRef); @@ -267,35 +321,112 @@ ensure_worker_terminated(#?MODULE{consumer = undefined}) -> ensure_worker_terminated(#?MODULE{consumer = #dlx_consumer{pid = Pid}}) -> case is_local_and_alive(Pid) of true -> - %% Note that we can't return a mod_call effect here because mod_call is executed on the leader only. + %% Note that we can't return a mod_call effect here + %% because mod_call is executed on the leader only. ok = supervisor:terminate_child(rabbit_fifo_dlx_sup, Pid), rabbit_log:debug("terminated rabbit_fifo_dlx_worker ~p", [Pid]); false -> ok end. -%% called when switching from at-least-once to at-most-once -cleanup(#?MODULE{consumer = Consumer, - discards = Discards} = State) -> - ensure_worker_terminated(State), - %% Return messages in the order they got discarded originally - %% for the final at-most-once dead-lettering. +local_alive_consumer_pid(#?MODULE{consumer = undefined}) -> + undefined; +local_alive_consumer_pid(#?MODULE{consumer = #dlx_consumer{pid = Pid}}) -> + case is_local_and_alive(Pid) of + true -> + Pid; + false -> + undefined + end. + +is_local_and_alive(Pid) + when node(Pid) =:= node() -> + is_process_alive(Pid); +is_local_and_alive(_) -> + false. + +-spec update_config(config(), rabbit_fifo:state()) -> + {rabbit_fifo:state(), ra_machine:effects()}. +update_config(#{dead_letter_handler := at_least_once}, + #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, + dlx = DlxState} = State) -> + %% dead_letter_handler stayed at_least_once. + %% Notify rabbit_fifo_dlx_worker about potentially updated policies. + case local_alive_consumer_pid(DlxState) of + undefined -> + {State, []}; + Pid -> + {State, [{send_msg, Pid, lookup_topology, ra_event}]} + end; +update_config(#{dead_letter_handler := DLH}, + #rabbit_fifo{cfg = #cfg{dead_letter_handler = DLH}} = State) -> + %% dead_letter_handler stayed same. + {State, []}; +update_config(#{dead_letter_handler := NewDLH}, + #rabbit_fifo{cfg = #cfg{dead_letter_handler = OldDLH, + resource = Res}} = State0) -> + rabbit_log:debug("Switching dead_letter_handler from ~p to ~p for ~s", + [OldDLH, NewDLH, rabbit_misc:rs(Res)]), + {#rabbit_fifo{cfg = Cfg} = State1, Effects0} = switch_from(State0), + State2 = State1#rabbit_fifo{cfg = Cfg#cfg{dead_letter_handler = NewDLH}, + dlx = init()}, + switch_to(State2, Effects0). + +-spec switch_to(rabbit_fifo:state(), ra_machine:effects()) -> + {rabbit_fifo:state(), ra_machine:effects()}. +switch_to(#rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}} = State, + Effects0) -> + %% Switch from some other strategy to at-least-once. + %% Dlx worker needs to be started on the leader. + %% The cleanest way to determine the Ra state of this node is delegation to handle_aux. + Effects = [{aux, {dlx, setup}} | Effects0], + {State, Effects}; +switch_to(State, Effects) -> + {State, Effects}. + +-spec switch_from(rabbit_fifo:state()) -> + {rabbit_fifo:state(), ra_machine:effects()}. +switch_from(#rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, + dlx = #?MODULE{consumer = Consumer, + discards = Discards} = DlxState} = State0) -> + %% switch from at-least-once to some other strategy + ensure_worker_terminated(DlxState), CheckedReasonMsgs = case Consumer of - #dlx_consumer{checked_out = Checked} when is_map(Checked) -> - L0 = maps:to_list(Checked), - L1 = lists:keysort(1, L0), - {_, L2} = lists:unzip(L1), - L2; - _ -> - [] + #dlx_consumer{checked_out = Checked} + when is_map(Checked) -> + maps:values(Checked); + _ -> [] end, DiscardReasonMsgs = lqueue:to_list(Discards), - CheckedReasonMsgs ++ DiscardReasonMsgs. - -purge(#?MODULE{consumer = Con0, - discards = Discards} = State0) -> + {_, Msgs} = lists:unzip(CheckedReasonMsgs ++ DiscardReasonMsgs), + Len = length(Msgs), + Total = rabbit_fifo:query_messages_total(State0), + State1 = State0#rabbit_fifo{messages_total = Total - Len}, + State2 = rabbit_fifo:delete_indexes(Msgs, State1), + State = rabbit_fifo:subtract_in_memory(Msgs, State2), + rabbit_log:debug("Deleted ~b dead-lettered messages", [Len]), + {State, []}; +switch_from(State) -> + {State, []}. + +-spec handle_aux(ra_server:ra_state(), Cmd :: term(), term(), rabbit_fifo:state()) -> + term(). +handle_aux(leader, setup, Aux, + #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once, + resource = QRef}, + dlx = DlxState}) -> + ensure_worker_started(QRef, DlxState), + Aux; +handle_aux(_, _, Aux, _) -> + Aux. + +-spec purge(rabbit_fifo:state()) -> + {rabbit_fifo:state(), [msg()]}. +purge(#rabbit_fifo{dlx = #?MODULE{consumer = Con0, + discards = Discards} = DlxState0} = State0) -> {Con, CheckedMsgs} = case Con0 of - #dlx_consumer{checked_out = Checked} when is_map(Checked) -> + #dlx_consumer{checked_out = Checked} + when is_map(Checked) -> L = maps:to_list(Checked), {_, CheckedReasonMsgs} = lists:unzip(L), {_, Msgs} = lists:unzip(CheckedReasonMsgs), @@ -307,19 +438,20 @@ purge(#?MODULE{consumer = Con0, DiscardReasonMsgs = lqueue:to_list(Discards), {_, DiscardMsgs} = lists:unzip(DiscardReasonMsgs), PurgedMsgs = CheckedMsgs ++ DiscardMsgs, - State = State0#?MODULE{consumer = Con, - discards = lqueue:new(), - msg_bytes = 0, - msg_bytes_checkout = 0 - }, + DlxState = DlxState0#?MODULE{consumer = Con, + discards = lqueue:new(), + msg_bytes = 0, + msg_bytes_checkout = 0 + }, + State = set(State0, DlxState), {State, PurgedMsgs}. -%% TODO Consider alternative to not dehydrate at all -%% by putting messages to disk before enqueueing them in discards queue. -dehydrate(#?MODULE{discards = Discards, - consumer = Con} = State) -> - State#?MODULE{discards = dehydrate_messages(Discards), - consumer = dehydrate_consumer(Con)}. +-spec dehydrate(rabbit_fifo:state()) -> + rabbit_fifo:state(). +dehydrate(#rabbit_fifo{dlx = #?MODULE{discards = Discards, + consumer = Con} = DlxState} = State) -> + set(State, DlxState#?MODULE{discards = dehydrate_messages(Discards), + consumer = dehydrate_consumer(Con)}). dehydrate_messages(Discards) -> L0 = lqueue:to_list(Discards), @@ -336,21 +468,10 @@ dehydrate_consumer(#dlx_consumer{checked_out = Checked0} = Con) -> dehydrate_consumer(undefined) -> undefined. -normalize(#?MODULE{discards = Discards} = State) -> - State#?MODULE{discards = lqueue:from_list(lqueue:to_list(Discards))}. +-spec normalize(rabbit_fifo:state()) -> + rabbit_fifo:state(). +normalize(#rabbit_fifo{dlx = #?MODULE{discards = Discards} = DlxState} = State) -> + set(State, DlxState#?MODULE{discards = lqueue:from_list(lqueue:to_list(Discards))}). -local_alive_consumer_pid(#?MODULE{consumer = undefined}) -> - undefined; -local_alive_consumer_pid(#?MODULE{consumer = #dlx_consumer{pid = Pid}}) -> - case is_local_and_alive(Pid) of - true -> - Pid; - false -> - undefined - end. - -is_local_and_alive(Pid) - when node(Pid) =:= node() -> - is_process_alive(Pid); -is_local_and_alive(_) -> - false. +set(State, #?MODULE{} = DlxState) -> + State#rabbit_fifo{dlx = DlxState}. diff --git a/deps/rabbit/src/rabbit_fifo_dlx.hrl b/deps/rabbit/src/rabbit_fifo_dlx.hrl index 7604a481f021..05b8e6b85e85 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.hrl +++ b/deps/rabbit/src/rabbit_fifo_dlx.hrl @@ -4,7 +4,7 @@ %% Reason of prefix messages is [] because the message will not be %% actually delivered and storing 2 bytes in the persisted snapshot %% is less than the reason atom. --type reason() :: 'expired' | 'rejected' | delivery_limit | ?NIL. +-type reason() :: expired | rejected | delivery_limit | ?NIL. % See snapshot scenarios in rabbit_fifo_prop_SUITE. Add dlx dehydrate tests. -record(dlx_consumer,{ diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 39ec57d7bb3b..9273d97d3020 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -201,8 +201,10 @@ code_change(_OldVsn, State, _Extra) -> lookup_topology(#state{queue_ref = {resource, Vhost, queue, _} = QRef} = State) -> {ok, Q} = rabbit_amqqueue:lookup(QRef), - DLRKey = rabbit_queue_type_util:args_policy_lookup(<<"dead-letter-routing-key">>, fun(_Pol, QArg) -> QArg end, Q), - DLX = rabbit_queue_type_util:args_policy_lookup(<<"dead-letter-exchange">>, fun(_Pol, QArg) -> QArg end, Q), + DLRKey = rabbit_queue_type_util:args_policy_lookup(<<"dead-letter-routing-key">>, + fun(_Pol, QArg) -> QArg end, Q), + DLX = rabbit_queue_type_util:args_policy_lookup(<<"dead-letter-exchange">>, + fun(_Pol, QArg) -> QArg end, Q), DLXRef = rabbit_misc:r(Vhost, exchange, DLX), State#state{exchange_ref = DLXRef, routing_key = DLRKey}. diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index d95f8e8b69fd..84dec119de2d 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -1266,14 +1266,9 @@ reclaim_memory(Vhost, QueueName) -> %%---------------------------------------------------------------------------- dead_letter_handler(Q, Overflow) -> - %% Queue arg continues to take precedence to not break existing configurations - %% for queues upgraded from =v3.10 Exchange = args_policy_lookup(<<"dead-letter-exchange">>, fun queueArgHasPrecedence/2, Q), RoutingKey = args_policy_lookup(<<"dead-letter-routing-key">>, fun queueArgHasPrecedence/2, Q), - %% Policy takes precedence because it's a new key introduced in v3.10 and we want - %% users to use policies instead of queue args allowing dynamic reconfiguration. - %% TODO change to queueArgHasPrecedence for dead-letter-strategy - Strategy = args_policy_lookup(<<"dead-letter-strategy">>, fun policyHasPrecedence/2, Q), + Strategy = args_policy_lookup(<<"dead-letter-strategy">>, fun queueArgHasPrecedence/2, Q), QName = amqqueue:get_name(Q), dlh(Exchange, RoutingKey, Strategy, Overflow, QName). diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index f736792b8d6f..bf372e3267a3 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -1390,7 +1390,8 @@ reject_publish_applied_after_limit_test(_) -> queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), max_length => 2, - overflow_strategy => reject_publish + overflow_strategy => reject_publish, + dead_letter_handler => undefined }, {State5, ok, Efx1} = apply(meta(5), rabbit_fifo:make_update_config(Conf), State4), ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx1), diff --git a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl index bb52649a60b1..23a0523ca0cd 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl @@ -1,4 +1,3 @@ - -module(rabbit_fifo_dlx_SUITE). -compile(nowarn_export_all). @@ -11,6 +10,7 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit/src/rabbit_fifo.hrl"). -include_lib("rabbit/src/rabbit_fifo_dlx.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). %%%=================================================================== %%% Common Test callbacks @@ -55,9 +55,15 @@ end_per_testcase(_TestCase, _Config) -> %%%=================================================================== discard_no_dlx_consumer(_Config) -> - S0 = rabbit_fifo_dlx:init(), + InitConfig = #{name => ?MODULE, + queue_resource => #resource{virtual_host = <<"/">>, + kind = queue, + name = <<"blah">>}, + release_cursor_interval => 1, + dead_letter_handler => at_least_once}, + S0 = rabbit_fifo:init(InitConfig), ?assertMatch(#{num_discarded := 0}, rabbit_fifo_dlx:overview(S0)), - S1 = rabbit_fifo_dlx:discard(make_msg(1), because, S0), + {S1, _, _} = rabbit_fifo_dlx:discard([make_msg(1)], because, S0), ?assertMatch(#{num_discarded := 1}, rabbit_fifo_dlx:overview(S1)), ok. diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 3d38b9cd00f6..6a59dc8d2d49 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -1687,7 +1687,9 @@ expand(Ops, Config, EnqFun) -> %% execute each command against a rabbit_fifo state and capture all relevant %% effects InitConfig0 = #{name => proper, - queue_resource => blah, + queue_resource => #resource{virtual_host = <<"/">>, + kind = queue, + name = <<"blah">>}, release_cursor_interval => 1}, InitConfig = case Config of #{dead_letter_handler := at_least_once} -> From a47468bb82441f97f943552136938e2e6f0117a5 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 28 Dec 2021 18:32:46 +0100 Subject: [PATCH 30/97] Add rabbit_fifo_dlx unit tests --- deps/rabbit/src/rabbit_fifo_dlx.erl | 5 +- deps/rabbit/src/rabbit_fifo_dlx_sup.erl | 27 ++-- deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl | 158 ++++++++++++++++++--- 3 files changed, 150 insertions(+), 40 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 955370eec8ae..e9ce4fd5c8e7 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -82,7 +82,8 @@ stat(#rabbit_fifo{dlx = #?MODULE{consumer = Con, {Num, Bytes}. -spec apply(ra_machine:command_meta_data(), rabbit_fifo:command(), rabbit_fifo:state()) -> - {rabbit_fifo:state(), ra_machine:effects()}. + {rabbit_fifo:state(), Reply :: term(), ra_machine:effects()} | + {rabbit_fifo:state(), Reply :: term()}. apply(Meta, {dlx, #checkout{consumer = Pid, prefetch = Prefetch}}, #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, @@ -138,7 +139,7 @@ apply(#{index := IncomingRaftIdx} = Meta, {dlx, #settle{msg_ids = MsgIds}}, rabbit_fifo:update_smallest_raft_index(IncomingRaftIdx, State, Effects); apply(_, Cmd, #rabbit_fifo{cfg = #cfg{dead_letter_handler = DLH}} = State) -> rabbit_log:debug("Ignoring command ~p for dead_letter_handler ~p", Cmd, DLH), - {State, []}. + {State, ok}. -spec discard([msg()], rabbit_dead_letter:reason(), rabbit_fifo:state()) -> {rabbit_fifo:state(), ra_machine:effects(), Delete :: boolean()}. diff --git a/deps/rabbit/src/rabbit_fifo_dlx_sup.erl b/deps/rabbit/src/rabbit_fifo_dlx_sup.erl index 29043eec3f06..8af496b60483 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_sup.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_sup.erl @@ -17,21 +17,12 @@ start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> - FeatureFlag = quorum_queue, - %%TODO rabbit_feature_flags:is_enabled(FeatureFlag) ? - case rabbit_ff_registry:is_enabled(FeatureFlag) of - true -> - SupFlags = #{strategy => simple_one_for_one, - intensity => 1, - period => 5}, - Worker = rabbit_fifo_dlx_worker, - ChildSpec = #{id => Worker, - start => {Worker, start_link, []}, - type => worker, - modules => [Worker]}, - {ok, {SupFlags, [ChildSpec]}}; - false -> - rabbit_log:info("not starting supervisor ~s because feature flag ~s is disabled", - [?MODULE, FeatureFlag]), - ignore - end. + SupFlags = #{strategy => simple_one_for_one, + intensity => 1, + period => 5}, + Worker = rabbit_fifo_dlx_worker, + ChildSpec = #{id => Worker, + start => {Worker, start_link, []}, + type => worker, + modules => [Worker]}, + {ok, {SupFlags, [ChildSpec]}}. diff --git a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl index 23a0523ca0cd..9693c2669402 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl @@ -3,9 +3,6 @@ -compile(nowarn_export_all). -compile(export_all). --export([ - ]). - % -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit/src/rabbit_fifo.hrl"). @@ -22,14 +19,14 @@ all() -> ]. -all_tests() -> - [ - discard_no_dlx_consumer - ]. - groups() -> [ - {tests, [], all_tests()} + {tests, [], [handler_undefined, + handler_at_most_once, + discard_dlx_consumer, + purge, + switch_strategies, + last_consumer_wins]} ]. init_per_suite(Config) -> @@ -54,19 +51,140 @@ end_per_testcase(_TestCase, _Config) -> %%% Test cases %%%=================================================================== -discard_no_dlx_consumer(_Config) -> - InitConfig = #{name => ?MODULE, - queue_resource => #resource{virtual_host = <<"/">>, - kind = queue, - name = <<"blah">>}, - release_cursor_interval => 1, - dead_letter_handler => at_least_once}, - S0 = rabbit_fifo:init(InitConfig), - ?assertMatch(#{num_discarded := 0}, rabbit_fifo_dlx:overview(S0)), - {S1, _, _} = rabbit_fifo_dlx:discard([make_msg(1)], because, S0), - ?assertMatch(#{num_discarded := 1}, rabbit_fifo_dlx:overview(S1)), +handler_undefined(_Config) -> + S0 = rabbit_fifo:init(init_config(undefined)), + ?assertEqual({S0, [], true}, rabbit_fifo_dlx:discard([make_msg(1)], because, S0)), + ok. + +handler_at_most_once(_Config) -> + S0 = rabbit_fifo:init(init_config({at_most_once, {m, f, [a]}})), + {S0, Effects, true} = rabbit_fifo_dlx:discard([make_msg(1), + make_msg(2)], because, S0), + ?assertMatch([{log, [1, 2], _}], Effects), + ok. + +discard_dlx_consumer(_Config) -> + S0 = rabbit_fifo:init(init_config(at_least_once)), + ?assertEqual(#{num_discarded => 0, + num_discard_checked_out => 0, + discard_message_bytes => 0, + discard_checkout_message_bytes => 0}, rabbit_fifo_dlx:overview(S0)), + + %% message without dlx consumer + {S1, [], false} = rabbit_fifo_dlx:discard([make_msg(1)], because, S0), + {S2, []} = rabbit_fifo_dlx:checkout(S1), + ?assertEqual(#{num_discarded => 1, + num_discard_checked_out => 0, + discard_message_bytes => 1, + discard_checkout_message_bytes => 0}, rabbit_fifo_dlx:overview(S2)), + + %% with dlx consumer + Checkout = rabbit_fifo_dlx:make_checkout(self(), 2), + {S3, ok, DeliveryEffects0} = rabbit_fifo_dlx:apply(meta(2), Checkout, S2), + ?assertEqual(#{num_discarded => 0, + num_discard_checked_out => 1, + discard_message_bytes => 0, + discard_checkout_message_bytes => 1}, rabbit_fifo_dlx:overview(S3)), + ?assertMatch([{log, [1], _}], DeliveryEffects0), + + %% more messages than dlx consumer's prefetch + {S4, [], false} = rabbit_fifo_dlx:discard([make_msg(3), make_msg(4)], because, S3), + {S5, DeliveryEffects1} = rabbit_fifo_dlx:checkout(S4), + ?assertEqual(#{num_discarded => 1, + num_discard_checked_out => 2, + discard_message_bytes => 1, + discard_checkout_message_bytes => 2}, rabbit_fifo_dlx:overview(S5)), + ?assertMatch([{log, [3], _}], DeliveryEffects1), + ?assertEqual({3, 3}, rabbit_fifo_dlx:stat(S5)), + + %% dlx consumer acks messages + Settle = rabbit_fifo_dlx:make_settle([0,1]), + {S6, ok, DeliveryEffects2} = rabbit_fifo_dlx:apply(meta(5), Settle, S5), + ?assertEqual(#{num_discarded => 0, + num_discard_checked_out => 1, + discard_message_bytes => 0, + discard_checkout_message_bytes => 1}, rabbit_fifo_dlx:overview(S6)), + ?assertMatch([{log, [4], _}], DeliveryEffects2), + ?assertEqual({1, 1}, rabbit_fifo_dlx:stat(S6)), + ok. + +purge(_Config) -> + S0 = rabbit_fifo:init(init_config(at_least_once)), + Checkout = rabbit_fifo_dlx:make_checkout(self(), 1), + {S1, ok, _} = rabbit_fifo_dlx:apply(meta(1), Checkout, S0), + Msgs = [make_msg(2), make_msg(3)], + {S2, _, _} = rabbit_fifo_dlx:discard(Msgs, because, S1), + {S3, _} = rabbit_fifo_dlx:checkout(S2), + ?assertMatch(#{num_discarded := 1, + num_discard_checked_out := 1}, rabbit_fifo_dlx:overview(S3)), + + {S4, PurgedMsgs} = rabbit_fifo_dlx:purge(S3), + ?assertEqual(Msgs, PurgedMsgs), + ?assertEqual(#{num_discarded => 0, + num_discard_checked_out => 0, + discard_message_bytes => 0, + discard_checkout_message_bytes => 0}, rabbit_fifo_dlx:overview(S4)), ok. +switch_strategies(_Config) -> + {ok, _} = rabbit_fifo_dlx_sup:start_link(), + S0 = rabbit_fifo:init(init_config(undefined)), + + %% Switching from undefined to at_least_once should start dlx consumer. + {S1, Effects} = rabbit_fifo_dlx:update_config( + #{dead_letter_handler => at_least_once}, S0), + ?assertEqual([{aux, {dlx, setup}}], Effects), + rabbit_fifo_dlx:handle_aux(leader, setup, fake_aux, S1), + [{_, WorkerPid, worker, _}] = supervisor:which_children(rabbit_fifo_dlx_sup), + {S2, _, _} = rabbit_fifo_dlx:discard([make_msg(1)], because, S1), + Checkout = rabbit_fifo_dlx:make_checkout(WorkerPid, 1), + {S3, ok, _} = rabbit_fifo_dlx:apply(meta(2), Checkout, S2), + ?assertMatch(#{num_discard_checked_out := 1}, rabbit_fifo_dlx:overview(S3)), + + %% Switching from at_least_once to undefined should terminate dlx consumer. + {S4, []} = rabbit_fifo_dlx:update_config( + #{dead_letter_handler => undefined}, S3), + ?assertMatch([_, {active, 0}, _, _], + supervisor:count_children(rabbit_fifo_dlx_sup)), + ?assertMatch(#{num_discarded := 0}, rabbit_fifo_dlx:overview(S4)), + ok. + +last_consumer_wins(_Config) -> + S0 = rabbit_fifo:init(init_config(at_least_once)), + Msgs = [make_msg(1), make_msg(2), make_msg(3), make_msg(4)], + {S1, [], false} = rabbit_fifo_dlx:discard(Msgs, because, S0), + Checkout = rabbit_fifo_dlx:make_checkout(self(), 5), + {S2, ok, DeliveryEffects0} = rabbit_fifo_dlx:apply(meta(5), Checkout, S1), + ?assertMatch([{log, [1, 2, 3, 4], _}], DeliveryEffects0), + ?assertEqual(#{num_discarded => 0, + num_discard_checked_out => 4, + discard_message_bytes => 0, + discard_checkout_message_bytes => 4}, rabbit_fifo_dlx:overview(S2)), + + %% When another (or the same) consumer (re)subscribes, + %% we expect this new consumer to be checked out and delivered all messages + %% from the previous consumer. + {S3, ok, DeliveryEffects1} = rabbit_fifo_dlx:apply(meta(6), Checkout, S2), + ?assertMatch([{log, [1, 2, 3, 4], _}], DeliveryEffects1), + ?assertEqual(#{num_discarded => 0, + num_discard_checked_out => 4, + discard_message_bytes => 0, + discard_checkout_message_bytes => 4}, rabbit_fifo_dlx:overview(S3)), + ok. make_msg(RaftIdx) -> ?INDEX_MSG(RaftIdx, ?DISK_MSG(1)). + +meta(Idx) -> + #{index => Idx, + term => 1, + system_time => 0, + from => {make_ref(), self()}}. + +init_config(Handler) -> + #{name => ?MODULE, + queue_resource => #resource{virtual_host = <<"/">>, + kind = queue, + name = <<"blah">>}, + release_cursor_interval => 1, + dead_letter_handler => Handler}. From 7134ca10158426333d22be2125851fc4399f8925 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 3 Jan 2022 15:47:43 +0100 Subject: [PATCH 31/97] Simplify receiving dlx deliveries Compared to rabbit_fifo_client, rabbit_fifo_dlx_client always gets delivered from a local queue process. Therefore, we expect no missing and no duplicate messages. If the queue leader process is down, the worker (and therefore dlx client) terminates itself. If the worker is down (crashes) a new one will be spawned by the supervisor so that the new worker checks out (i.e. registers) and the server will resend all in-flight messages to the new dlx client. --- deps/rabbit/src/rabbit_fifo_dlx_client.erl | 42 ++++++---------------- 1 file changed, 10 insertions(+), 32 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_client.erl b/deps/rabbit/src/rabbit_fifo_dlx_client.erl index 141926532083..8bd341392c15 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_client.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_client.erl @@ -43,44 +43,22 @@ process_command(Cmd, #state{leader = Leader} = State, Tries) -> handle_ra_event(Leader, {machine, {dlx_delivery, _} = Del}, #state{leader = Leader} = State) -> handle_delivery(Del, State); -handle_ra_event(_From, Evt, State) -> - rabbit_log:warning("~s received unknown ra event: ~p", [?MODULE, Evt]), +handle_ra_event(From, Evt, State) -> + rabbit_log:debug("Ignoring ra event ~p from ~p", [Evt, From]), {ok, State, []}. handle_delivery({dlx_delivery, [{FstId, _} | _] = IdMsgs}, #state{queue_resource = QRes, last_msg_id = Prev} = State0) -> - %% format as a deliver action - {LastId, _} = lists:last(IdMsgs), + %% Assert that messages get delivered in order since deliveries are node local. + %% (In contrast to rabbit_fifo_client, we expect neither duplicate nor missing messages.) + %% Let it crash if this assertion is wrong. + FstId = Prev + 1, + %% Format as a deliver action. Del = {deliver, transform_msgs(QRes, IdMsgs)}, - case Prev of - Prev when FstId =:= Prev+1 -> - %% expected message ID(s) got delivered - State = State0#state{last_msg_id = LastId}, - {ok, State, [Del]}; - Prev when FstId > Prev+1 -> - %% messages ID(s) are missing, therefore fetch all checked-out discarded messages - %% TODO implement as done in - %% https://github.com/rabbitmq/rabbitmq-server/blob/b4eb5e2cfd7f85a1681617dc489dd347fa9aac72/deps/rabbit/src/rabbit_fifo_client.erl#L732-L744 - %% A: not needed because of local guarantees, let it crash - exit(not_implemented); - Prev when FstId =< Prev -> - rabbit_log:debug("dropping messages with duplicate IDs (~b to ~b) consumed from ~s", - [FstId, Prev, rabbit_misc:rs(QRes)]), - case lists:dropwhile(fun({Id, _}) -> Id =< Prev end, IdMsgs) of - [] -> - {ok, State0, []}; - IdMsgs2 -> - handle_delivery({dlx_delivery, IdMsgs2}, State0) - end; - _ when FstId =:= 0 -> - % the very first delivery - % TODO We init last_msg_id with -1. So, why would we ever run into this branch? - % A: can be a leftover - rabbit_log:debug("very first delivery consumed from ~s", [rabbit_misc:rs(QRes)]), - State = State0#state{last_msg_id = 0}, - {ok, State, [Del]} - end. + {LastId, _} = lists:last(IdMsgs), + State = State0#state{last_msg_id = LastId}, + {ok, State, [Del]}. transform_msgs(QRes, Msgs) -> lists:map( From 897947b24021fdce050063a294d383fd2c2d2a21 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 3 Jan 2022 17:38:34 +0100 Subject: [PATCH 32/97] Clean up when terminating dlx worker Cancel the timer since it's not needed anymore. --- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 36 ++++++++++++---------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 9273d97d3020..c155316c37b5 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -121,9 +121,8 @@ handle_continue(QRef, undefined) -> {noreply, State#state{dlx_client_state = ConsumerState, monitor_ref = MonitorRef}}. -terminate(_Reason, _State) -> - %%TODO cancel timer? - ok. +terminate(_Reason, State) -> + cancel_timer(State). handle_call(Request, From, State) -> rabbit_log:warning("~s received unhandled call from ~p: ~p", [?MODULE, From, Request]), @@ -532,10 +531,12 @@ strings(QRefs) when is_list(QRefs) -> L1 = lists:join(", ", L0), lists:flatten(L1). -maybe_set_timer(#state{timer = TRef} = State) when is_reference(TRef) -> +maybe_set_timer(#state{timer = TRef} = State) + when is_reference(TRef) -> State; maybe_set_timer(#state{timer = undefined, - pendings = Pendings} = State) when map_size(Pendings) =:= 0 -> + pendings = Pendings} = State) + when map_size(Pendings) =:= 0 -> State; maybe_set_timer(#state{timer = undefined, settle_timeout = SettleTimeout} = State) -> @@ -543,18 +544,21 @@ maybe_set_timer(#state{timer = undefined, % rabbit_log:debug("set timer"), State#state{timer = TRef}. -maybe_cancel_timer(#state{timer = undefined} = State) -> - State; maybe_cancel_timer(#state{timer = TRef, - pendings = Pendings} = State) -> - case maps:size(Pendings) of - 0 -> - erlang:cancel_timer(TRef, [{async, true}, {info, false}]), - % rabbit_log:debug("cancelled timer"), - State#state{timer = undefined}; - _ -> - State - end. + pendings = Pendings} = State) + when is_reference(TRef), + map_size(Pendings) =:= 0 -> + erlang:cancel_timer(TRef, [{async, true}, {info, false}]), + State#state{timer = undefined}; +maybe_cancel_timer(State) -> + State. + +cancel_timer(#state{timer = undefined} = State) -> + State; +cancel_timer(#state{timer = TRef} = State) + when is_reference(TRef) -> + erlang:cancel_timer(TRef, [{async, true}, {info, false}]), + State#state{timer = undefined}. %% Avoids large message contents being logged. format_status(_Opt, [_PDict, #state{ From a87d337909576dfc7bd7c36eeb9c789400afe7b1 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 4 Jan 2022 09:37:31 +0100 Subject: [PATCH 33/97] Handle queue eol event to clean up target quorum queues when they get deleted. --- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 16 ++--- .../rabbit_fifo_dlx_integration_SUITE.erl | 66 +++++++++++++++++++ 2 files changed, 74 insertions(+), 8 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index c155316c37b5..0638c36cfd8b 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -135,25 +135,26 @@ handle_cast({queue_event, QRef, {_From, {machine, lookup_topology}}}, handle_cast({queue_event, QRef, {From, Evt}}, #state{queue_ref = QRef, dlx_client_state = DlxState0} = State0) -> - %% received dead-letter messsage from source queue - % rabbit_log:debug("~s received queue event: ~p", [rabbit_misc:rs(QRef), E]), + %% received dead-letter message from source queue {ok, DlxState, Actions} = rabbit_fifo_dlx_client:handle_ra_event(From, Evt, DlxState0), State1 = State0#state{dlx_client_state = DlxState}, State = handle_queue_actions(Actions, State1), {noreply, State}; handle_cast({queue_event, QRef, Evt}, #state{queue_type_state = QTypeState0} = State0) -> - %% received e.g. confirm from target queue case rabbit_queue_type:handle_event(QRef, Evt, QTypeState0) of {ok, QTypeState1, Actions} -> + %% received e.g. confirm from target queue State1 = State0#state{queue_type_state = QTypeState1}, State = handle_queue_actions(Actions, State1), {noreply, State}; - %% TODO handle as done in - %% https://github.com/rabbitmq/rabbitmq-server/blob/9cf18e83f279408e20430b55428a2b19156c90d7/deps/rabbit/src/rabbit_channel.erl#L771-L783 eol -> - {noreply, State0}; - {protocol_error, _Type, _Reason, _ReasonArgs} -> + %% Do not confirm pending messages whose target queue got deleted. + %% Irrespective of exchanges, queues, bindings created or deleted (actual state), + %% we respect the configured dead-letter routing topology (desired state). + QTypeState = rabbit_queue_type:remove(QRef, QTypeState0), + {noreply, State0#state{queue_type_state = QTypeState}}; + {protocol_error, _Type, _Reason, _Args} -> {noreply, State0} end; handle_cast(settle_timeout, State0) -> @@ -208,7 +209,6 @@ lookup_topology(#state{queue_ref = {resource, Vhost, queue, _} = QRef} = State) State#state{exchange_ref = DLXRef, routing_key = DLRKey}. -%% https://github.com/rabbitmq/rabbitmq-server/blob/9cf18e83f279408e20430b55428a2b19156c90d7/deps/rabbit/src/rabbit_channel.erl#L2855-L2888 handle_queue_actions(Actions, State0) -> lists:foldl( fun ({deliver, Msgs}, S0) -> diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 93bd98935a66..7599a6095967 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -30,6 +30,7 @@ groups() -> rejected, delivery_limit, target_queue_not_bound, + target_queue_deleted, dlx_missing, stats, drop_head_falls_back_to_at_most_once, @@ -247,6 +248,71 @@ target_queue_not_bound(Config) -> payload = Msg}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})). +%% Test that message is not lost when target queue gets deleted +%% because dead-letter routing topology should always be respected. +target_queue_deleted(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + TargetQ = ?config(target_queue_1, Config), + DLX = ?config(dead_letter_exchange, Config), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = SourceQ, + durable = true, + arguments = [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ] + }), + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), + %% Make target queue a quorum queue to provoke sending an 'eol' message to dlx worker. + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ + queue = TargetQ, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}] + }), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ + queue = TargetQ, + exchange = DLX, + routing_key = <<"k1">> + }), + Msg1 = <<"m1">>, + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = <<"0">>}, + payload = Msg1}), + RaName = ra_name(SourceQ), + eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}))), + eventually(?_assertEqual([{0, 0}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + #'queue.delete_ok'{message_count = 0} = amqp_channel:call(Ch, #'queue.delete'{queue = TargetQ}), + Msg2 = <<"m2">>, + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = <<"0">>}, + payload = Msg2}), + %% Message should not be lost despite deleted target queue. + eventually(?_assertMatch([{1, _}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + consistently(?_assertMatch([{1, _}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + %% Message should be delivered once target queue is recreated. + %% (This time we simply create a classic target queue.) + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ + queue = TargetQ, + exchange = DLX, + routing_key = <<"k1">> + }), + eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), 500, 5), + eventually(?_assertEqual([{0, 0}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))). + %% Test that message is not lost when configured dead-letter exchange does not exist. %% Once, the exchange gets declared, the message is delivered to the target queue %% and acked to the source quorum queue. From 61fcf33aa0b3cdafb190574b78673eec0db4a444 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 6 Jan 2022 21:22:10 +0100 Subject: [PATCH 34/97] Make rabbit_fifo_dlx not depend on rabbit_fifo so that we won't have to copy rabbit_fifo_dlx.erl file for new ra machine versions in the future. Consequences: 1. rabbit_fifo_dlx maintains its own ra_indexes. 2. rabbit_fifo_dlx does not modify rabbit_fifo's messages_total. Instead, rabbit_fifo asks rabbit_fifo_dlx for its total messages. This commit still maintains correct dehydration + snapshotting for in-memory messages. However, after this commit, messages in rabbit_fifo_dlx do not count as in-memory messages anymore even when they are in-memory. That's okay given that we remove in-memory messages shortly via https://github.com/rabbitmq/rabbitmq-server/issues/3898 --- deps/rabbit/src/rabbit_fifo.erl | 289 ++++++------ deps/rabbit/src/rabbit_fifo.hrl | 6 +- deps/rabbit/src/rabbit_fifo_dlx.erl | 418 ++++++++---------- deps/rabbit/src/rabbit_fifo_dlx.hrl | 6 +- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 11 +- deps/rabbit/src/rabbit_fifo_index.erl | 7 +- deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl | 100 +++-- .../rabbit_fifo_dlx_integration_SUITE.erl | 11 +- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 27 +- 9 files changed, 426 insertions(+), 449 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index e8c952f6b11b..ce5b1ba82ec8 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -70,13 +70,7 @@ make_purge/0, make_purge_nodes/1, make_update_config/1, - make_garbage_collection/0, - - %% called by rabbit_fifo_dlx - checkout/5, - update_smallest_raft_index/3, - delete_indexes/2, - subtract_in_memory/2 + make_garbage_collection/0 ]). %% command records representing all the protocol actions that are supported @@ -127,7 +121,7 @@ -type client_msg() :: delivery(). %% the messages `rabbit_fifo' can send to consumers. --type state() :: #?MODULE{}. +-opaque state() :: #?MODULE{}. -export_type([protocol/0, delivery/0, @@ -225,13 +219,15 @@ apply(Meta, case Cons0 of #{ConsumerId := Con0} -> complete_and_checkout(Meta, MsgIds, ConsumerId, - Con0, [], State, true); + Con0, [], State); _ -> {State, ok} end; apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons} = State0) -> + #?MODULE{consumers = Cons, + dlx = DlxState0, + cfg = #cfg{dead_letter_handler = DLH}} = State0) -> case Cons of #{ConsumerId := #consumer{checked_out = Checked} = Con} -> % Publishing to dead-letter exchange must maintain same order as messages got rejected. @@ -243,8 +239,9 @@ apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, false end end, MsgIds), - {State, Effects, Delete} = rabbit_fifo_dlx:discard(DiscardMsgs, rejected, State0), - complete_and_checkout(Meta, MsgIds, ConsumerId, Con, Effects, State, Delete); + {DlxState, Effects} = rabbit_fifo_dlx:discard(DiscardMsgs, rejected, DLH, DlxState0), + State = State0#?MODULE{dlx = DlxState}, + complete_and_checkout(Meta, MsgIds, ConsumerId, Con, Effects, State); _ -> {State0, ok} end; @@ -434,38 +431,33 @@ apply(#{index := Index}, #purge{}, #?MODULE{messages_total = Tot, returns = Returns, messages = Messages, - ra_indexes = Indexes0} = State0) -> + ra_indexes = Indexes0, + dlx = DlxState} = State0) -> NumReady = messages_ready(State0), Indexes1 = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_, _)), Acc0) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc0); (_, Acc) -> Acc end, Indexes0, lqueue:to_list(Returns)), - Indexes2 = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_, _)), Acc0) when is_integer(I) -> - rabbit_fifo_index:delete(I, Acc0); - (_, Acc) -> - Acc - end, Indexes1, lqueue:to_list(Messages)), - {State1, DiscardMsgs} = rabbit_fifo_dlx:purge(State0), Indexes = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_, _)), Acc0) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc0); (_, Acc) -> Acc - end, Indexes2, DiscardMsgs), - NumPurged = NumReady + length(DiscardMsgs), - - State2 = State1#?MODULE{ra_indexes = Indexes, + end, Indexes1, lqueue:to_list(Messages)), + {NumDlx, _} = rabbit_fifo_dlx:stat(DlxState), + State1 = State0#?MODULE{ra_indexes = Indexes, + dlx = rabbit_fifo_dlx:purge(DlxState), messages = lqueue:new(), - messages_total = Tot - NumPurged, + messages_total = Tot - NumReady, returns = lqueue:new(), msg_bytes_enqueue = 0, prefix_msgs = {0, [], 0, []}, msg_bytes_in_memory = 0, msgs_ready_in_memory = 0}, Effects0 = [garbage_collection], - Reply = {purge, NumPurged}, + Reply = {purge, NumReady + NumDlx}, {State, _, Effects} = evaluate_limit(Index, false, State0, - State2, Effects0), + State1, Effects0), update_smallest_raft_index(Index, Reply, State, Effects); apply(#{index := Idx}, #garbage_collection{}, State) -> update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); @@ -609,16 +601,25 @@ apply(#{index := Idx} = Meta, #purge_nodes{nodes = Nodes}, State0) -> purge_node(Meta, Node, S, E) end, {State0, []}, Nodes), update_smallest_raft_index(Idx, ok, State, Effects); -apply(#{index := Idx} = Meta, #update_config{config = Conf}, State0) -> - {State1, Effects0} = rabbit_fifo_dlx:update_config(Conf, State0), - State2 = update_config(Conf, State1), - {State, Reply, Effects} = checkout(Meta, State0, State2, Effects0), +apply(#{index := Idx} = Meta, + #update_config{config = #{dead_letter_handler := NewDLH} = Conf}, + #?MODULE{cfg = #cfg{dead_letter_handler = OldDLH, + resource = QRes}, + dlx = DlxState0} = State0) -> + {DlxState, Effects0} = rabbit_fifo_dlx:update_config(OldDLH, NewDLH, QRes, DlxState0), + State1 = update_config(Conf, State0#?MODULE{dlx = DlxState}), + {State, Reply, Effects} = checkout(Meta, State0, State1, Effects0), update_smallest_raft_index(Idx, Reply, State, Effects); apply(_Meta, {machine_version, FromVersion, ToVersion}, V0State) -> State = convert(FromVersion, ToVersion, V0State), {State, ok, [{aux, {dlx, setup}}]}; -apply(Meta, {dlx, _} = Cmd, State) -> - rabbit_fifo_dlx:apply(Meta, Cmd, State); +apply(#{index := IncomingRaftIdx} = Meta, {dlx, _} = Cmd, + #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0} = State0) -> + {DlxState, Effects0} = rabbit_fifo_dlx:apply(Meta, Cmd, DLH, DlxState0), + State1 = State0#?MODULE{dlx = DlxState}, + {State, ok, Effects} = checkout(Meta, State0, State1, Effects0, false), + update_smallest_raft_index(IncomingRaftIdx, State, Effects); apply(_Meta, Cmd, State) -> %% handle unhandled commands gracefully rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), @@ -780,8 +781,10 @@ update_waiting_consumer_status(Node, Consumer#consumer.status =/= cancelled]. -spec state_enter(ra_server:ra_state(), state()) -> ra_machine:effects(). -state_enter(RaState, State) -> - Effects = rabbit_fifo_dlx:state_enter(RaState, State), +state_enter(RaState, #?MODULE{cfg = #cfg{dead_letter_handler = DLH, + resource = QRes}, + dlx = DlxState} = State) -> + Effects = rabbit_fifo_dlx:state_enter(RaState, QRes, DLH, DlxState), state_enter0(RaState, State, Effects). state_enter0(leader, #?MODULE{consumers = Cons, @@ -833,12 +836,13 @@ state_enter0(_, _, Effects) -> tick(Ts, #?MODULE{cfg = #cfg{name = Name, resource = QName}, msg_bytes_enqueue = EnqueueBytes, - msg_bytes_checkout = CheckoutBytes} = State) -> + msg_bytes_checkout = CheckoutBytes, + dlx = DlxState} = State) -> case is_expired(Ts, State) of true -> [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]; false -> - {_, MsgBytesDiscard} = rabbit_fifo_dlx:stat(State), + {_, MsgBytesDiscard} = rabbit_fifo_dlx:stat(DlxState), Metrics = {Name, messages_ready(State), num_checked_out(State), % checked out @@ -860,7 +864,8 @@ overview(#?MODULE{consumers = Cons, msg_bytes_in_memory = InMemBytes, msg_bytes_enqueue = EnqueueBytes, msg_bytes_checkout = CheckoutBytes, - cfg = Cfg} = State) -> + cfg = Cfg, + dlx = DlxState} = State) -> Conf = #{name => Cfg#cfg.name, resource => Cfg#cfg.resource, release_cursor_interval => Cfg#cfg.release_cursor_interval, @@ -874,7 +879,6 @@ overview(#?MODULE{consumers = Cons, msg_ttl => Cfg#cfg.msg_ttl, delivery_limit => Cfg#cfg.delivery_limit }, - {Smallest, _} = smallest_raft_index(State), Overview = #{type => ?MODULE, config => Conf, num_consumers => maps:size(Cons), @@ -889,8 +893,8 @@ overview(#?MODULE{consumers = Cons, enqueue_message_bytes => EnqueueBytes, checkout_message_bytes => CheckoutBytes, in_memory_message_bytes => InMemBytes, - smallest_raft_index => Smallest}, - DlxOverview = rabbit_fifo_dlx:overview(State), + smallest_raft_index => smallest_raft_index(State)}, + DlxOverview = rabbit_fifo_dlx:overview(DlxState), maps:merge(Overview, DlxOverview). -spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) -> @@ -1013,9 +1017,9 @@ handle_aux(_RaState, {call, _From}, oldest_entry_timestamp, Aux, Ts = case smallest_raft_index(State) of %% if there are no entries, we return current timestamp %% so that any previously obtained entries are considered older than this - {undefined, _} -> + undefined -> erlang:system_time(millisecond); - {Idx, _} when is_integer(Idx) -> + Idx when is_integer(Idx) -> %% TODO: make more defensive to avoid potential crash {{_, _, {_, Meta, _, _}}, _Log1} = ra_log:fetch(Idx, Log), #{ts := Timestamp} = Meta, @@ -1035,8 +1039,11 @@ handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, Err -> {reply, Err, Aux0, Log0} end; -handle_aux(RaState, _, {dlx, Cmd}, Aux0, Log, State) -> - Aux = rabbit_fifo_dlx:handle_aux(RaState, Cmd, Aux0, State), +handle_aux(RaState, _, {dlx, _} = Cmd, Aux0, Log, + #?MODULE{dlx = DlxState, + cfg = #cfg{dead_letter_handler = DLH, + resource = QRes}}) -> + Aux = rabbit_fifo_dlx:handle_aux(RaState, Cmd, Aux0, QRes, DLH, DlxState), {no_reply, Aux, Log}. eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, @@ -1182,8 +1189,8 @@ query_in_memory_usage(#?MODULE{msg_bytes_in_memory = Bytes, msgs_ready_in_memory = Length}) -> {Length, Bytes}. -query_stat_dlx(State) -> - rabbit_fifo_dlx:stat(State). +query_stat_dlx(#?MODULE{dlx = DlxState}) -> + rabbit_fifo_dlx:stat(DlxState). query_peek(Pos, State0) when Pos > 0 -> case take_next_msg(State0) of @@ -1230,9 +1237,11 @@ messages_ready(#?MODULE{messages = M, messages_total(#?MODULE{messages = _M, messages_total = Total, ra_indexes = _Indexes, - prefix_msgs = _}) -> + prefix_msgs = _, + dlx = DlxState}) -> % lqueue:len(M) + rabbit_fifo_index:size(Indexes) + RCnt + PCnt. - Total; + {DlxTotal, _} = rabbit_fifo_dlx:stat(DlxState), + Total + DlxTotal; %% release cursors might be old state (e.g. after recent upgrade) messages_total(State) -> try @@ -1422,12 +1431,13 @@ drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects) -> Indexes = rabbit_fifo_index:delete(Idx, Indexes0), State2 = State1#?MODULE{ra_indexes = Indexes}, State3 = decr_total(add_bytes_drop(Header, State2)), - State4 = case Msg of - ?DISK_MSG(_) -> State3; - _ -> - subtract_in_memory_counts(Header, State3) - end, - {State, DlxEffects, true} = rabbit_fifo_dlx:discard([FullMsg], maxlen, State4), + #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState} = State = case Msg of + ?DISK_MSG(_) -> State3; + _ -> + subtract_in_memory_counts(Header, State3) + end, + {_, DlxEffects} = rabbit_fifo_dlx:discard([FullMsg], maxlen, DLH, DlxState), {State, DlxEffects ++ Effects}; empty -> {State0, Effects} @@ -1575,7 +1585,7 @@ return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, % used to process messages that are finished complete(Meta, ConsumerId, DiscardedMsgIds, #consumer{checked_out = Checked} = Con0, - #?MODULE{messages_total = Tot} = State0, Delete) -> + #?MODULE{messages_total = Tot} = State0) -> %% credit_mode = simple_prefetch should automatically top-up credit %% as messages are simple_prefetch or otherwise returned Discarded = maps:with(DiscardedMsgIds, Checked), @@ -1588,13 +1598,8 @@ complete(Meta, ConsumerId, DiscardedMsgIds, add_bytes_settle( get_msg_header(Msg), Acc) end, State1, DiscardedMsgs), - case Delete of - true -> - State = State2#?MODULE{messages_total = Tot - Len}, - delete_indexes(DiscardedMsgs, State); - false -> - State2 - end. + State = State2#?MODULE{messages_total = Tot - Len}, + delete_indexes(DiscardedMsgs, State). delete_indexes(Msgs, #?MODULE{ra_indexes = Indexes0} = State) -> %% TODO: optimise by passing a list to rabbit_fifo_index @@ -1619,8 +1624,8 @@ increase_credit(#consumer{credit = Current}, Credit) -> complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, #consumer{} = Con0, - Effects0, State0, Delete) -> - State1 = complete(Meta, ConsumerId, MsgIds, Con0, State0, Delete), + Effects0, State0) -> + State1 = complete(Meta, ConsumerId, MsgIds, Con0, State0), {State, ok, Effects} = checkout(Meta, State0, State1, Effects0, false), update_smallest_raft_index(IncomingRaftIdx, State, Effects). @@ -1635,12 +1640,11 @@ update_smallest_raft_index(Idx, State, Effects) -> update_smallest_raft_index(IncomingRaftIdx, Reply, #?MODULE{cfg = Cfg, - release_cursors = Cursors0} = State00, + release_cursors = Cursors0} = State0, Effects) -> - %% TODO: optimise - {Smallest, State0} = smallest_raft_index(State00), Total = messages_total(State0), - case Smallest of + %% TODO: optimise + case smallest_raft_index(State0) of undefined when Total == 0 -> % there are no messages on queue anymore and no pending enqueues % we can forward release_cursor all the way until @@ -1654,7 +1658,7 @@ update_smallest_raft_index(IncomingRaftIdx, Reply, {State, Reply, Effects ++ [{release_cursor, IncomingRaftIdx, State}]}; undefined -> {State0, Reply, Effects}; - _ -> + Smallest when is_integer(Smallest) -> case find_next_cursor(Smallest, Cursors0) of empty -> {State0, Reply, Effects}; @@ -1718,7 +1722,9 @@ get_header(Key, Header) when is_map(Header) -> return_one(Meta, MsgId, Msg0, #?MODULE{returns = Returns, consumers = Consumers, - cfg = #cfg{delivery_limit = DeliveryLimit}} = State0, + dlx = DlxState0, + cfg = #cfg{delivery_limit = DeliveryLimit, + dead_letter_handler = DLH}} = State0, Effects0, ConsumerId) -> #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers), Msg = update_msg_header(delivery_count, fun incr/1, 1, Msg0), @@ -1726,8 +1732,9 @@ return_one(Meta, MsgId, Msg0, case get_header(delivery_count, Header) of DeliveryCount when DeliveryCount > DeliveryLimit -> %% TODO: don't do for prefix msgs - {State1, DlxEffects, Delete} = rabbit_fifo_dlx:discard([Msg], delivery_limit, State0), - State = complete(Meta, ConsumerId, [MsgId], Con0, State1, Delete), + {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], delivery_limit, DLH, DlxState0), + State1 = State0#?MODULE{dlx = DlxState}, + State = complete(Meta, ConsumerId, [MsgId], Con0, State1), {State, DlxEffects ++ Effects0}; _ -> Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)}, @@ -1778,8 +1785,11 @@ checkout(Meta, OldState, State, Effects) -> checkout(#{index := Index} = Meta, #?MODULE{cfg = #cfg{resource = QName}} = OldState, State0, Effects0, HandleConsumerChanges) -> - {State1, _Result, Effects1} = checkout0(Meta, checkout_one(Meta, State0, Effects0), #{}), - {State2, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(State1), + {#?MODULE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0} = State1, _Result, Effects1} = + checkout0(Meta, checkout_one(Meta, State0, Effects0), #{}), + {DlxState, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(DLH, DlxState0), + State2 = State1#?MODULE{dlx = DlxState}, Effects2 = DlxDeliveryEffects ++ Effects1, case evaluate_limit(Index, false, OldState, State2, Effects2) of {State, true, Effects} -> @@ -2092,30 +2102,27 @@ expire_msgs(RaCmdTs, State, Effects) -> expire(RaCmdTs, Header, State0, Effects) -> {Msg, State1} = take_next_msg(State0), - State2 = add_bytes_drop(Header, State1), - {#?MODULE{ra_indexes = Indexes0} = State3, DlxEffects, Delete} = - rabbit_fifo_dlx:discard([Msg], expired, State2), - State = case Delete of - false -> - State3; - true -> - State5 = case Msg of - ?INDEX_MSG(Idx, ?DISK_MSG(_Header)) - when is_integer(Idx) -> - Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - State3#?MODULE{ra_indexes = Indexes}; - ?INDEX_MSG(Idx, ?MSG(_Header, _)) - when is_integer(Idx) -> - Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - State4 = State3#?MODULE{ra_indexes = Indexes}, - subtract_in_memory_counts(Header, State4); - ?PREFIX_MEM_MSG(_) -> - subtract_in_memory_counts(Header, State3); - ?DISK_MSG(_) -> - State3 - end, - decr_total(State5) - end, + #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0, + ra_indexes = Indexes0} = State2 = add_bytes_drop(Header, State1), + {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], expired, DLH, DlxState0), + State3 = State2#?MODULE{dlx = DlxState}, + State5 = case Msg of + ?INDEX_MSG(Idx, ?DISK_MSG(_Header)) + when is_integer(Idx) -> + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + State3#?MODULE{ra_indexes = Indexes}; + ?INDEX_MSG(Idx, ?MSG(_Header, _)) + when is_integer(Idx) -> + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + State4 = State3#?MODULE{ra_indexes = Indexes}, + subtract_in_memory_counts(Header, State4); + ?PREFIX_MEM_MSG(_) -> + subtract_in_memory_counts(Header, State3); + ?DISK_MSG(_) -> + State3 + end, + State = decr_total(State5), expire_msgs(RaCmdTs, State, DlxEffects ++ Effects). timer_effect(RaCmdTs, State, Effects) -> @@ -2227,7 +2234,8 @@ maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, %% potentially used to for a snaphot at a later point dehydrate_state(#?MODULE{msg_bytes_in_memory = 0, cfg = #cfg{max_in_memory_length = 0}, - consumers = Consumers} = State) -> + consumers = Consumers, + dlx = DlxState} = State) -> % no messages are kept in memory, no need to % overly mutate the current state apart from removing indexes and cursors State#?MODULE{ @@ -2235,12 +2243,14 @@ dehydrate_state(#?MODULE{msg_bytes_in_memory = 0, consumers = maps:map(fun (_, C) -> dehydrate_consumer(C) end, Consumers), - release_cursors = lqueue:new()}; + release_cursors = lqueue:new(), + dlx = rabbit_fifo_dlx:dehydrate(DlxState)}; dehydrate_state(#?MODULE{messages = Messages, consumers = Consumers, returns = Returns, prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0}, - waiting_consumers = Waiting0} = State0) -> + waiting_consumers = Waiting0, + dlx = DlxState} = State) -> RCnt = lqueue:len(Returns), %% TODO: optimise this function as far as possible PrefRet1 = lists:foldr(fun (M, Acc) -> @@ -2252,17 +2262,17 @@ dehydrate_state(#?MODULE{messages = Messages, %% recovering from a snapshot PrefMsgs = PrefMsg0 ++ PrefMsgsSuff, Waiting = [{Cid, dehydrate_consumer(C)} || {Cid, C} <- Waiting0], - State = State0#?MODULE{messages = lqueue:new(), - ra_indexes = rabbit_fifo_index:empty(), - release_cursors = lqueue:new(), - consumers = maps:map(fun (_, C) -> - dehydrate_consumer(C) - end, Consumers), - returns = lqueue:new(), - prefix_msgs = {PRCnt + RCnt, PrefRet, - PPCnt + lqueue:len(Messages), PrefMsgs}, - waiting_consumers = Waiting}, - rabbit_fifo_dlx:dehydrate(State). + State#?MODULE{messages = lqueue:new(), + ra_indexes = rabbit_fifo_index:empty(), + release_cursors = lqueue:new(), + consumers = maps:map(fun (_, C) -> + dehydrate_consumer(C) + end, Consumers), + returns = lqueue:new(), + prefix_msgs = {PRCnt + RCnt, PrefRet, + PPCnt + lqueue:len(Messages), PrefMsgs}, + waiting_consumers = Waiting, + dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. dehydrate_messages(Msgs0) -> {OutRes, Msgs} = lqueue:out(Msgs0), @@ -2294,12 +2304,12 @@ dehydrate_message(?INDEX_MSG(Idx, ?MSG(Header, _))) when is_integer(Idx) -> normalize(#?MODULE{ra_indexes = _Indexes, returns = Returns, messages = Messages, - release_cursors = Cursors} = State0) -> - State = State0#?MODULE{ - returns = lqueue:from_list(lqueue:to_list(Returns)), - messages = lqueue:from_list(lqueue:to_list(Messages)), - release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}, - rabbit_fifo_dlx:normalize(State). + release_cursors = Cursors, + dlx = DlxState} = State) -> + State#?MODULE{returns = lqueue:from_list(lqueue:to_list(Returns)), + messages = lqueue:from_list(lqueue:to_list(Messages)), + release_cursors = lqueue:from_list(lqueue:to_list(Cursors)), + dlx = rabbit_fifo_dlx:normalize(DlxState)}. is_over_limit(#?MODULE{cfg = #cfg{max_length = undefined, max_bytes = undefined}}) -> @@ -2526,37 +2536,18 @@ convert(0, To, State0) -> convert(1, To, State0) -> convert(2, To, convert_v1_to_v2(State0)). -smallest_raft_index(#?MODULE{cfg = _Cfg, - messages = Messages, - ra_indexes = Indexes0 - } = State) -> - case rabbit_fifo_index:smallest(Indexes0) of - I when is_integer(I) -> - case lqueue:peek(Messages) of - {value, ?INDEX_MSG(Idx, _)} -> - {min(I, Idx), State}; - _ -> - {I, State} - end; - _ -> - case lqueue:peek(Messages) of - {value, ?INDEX_MSG(I, _)} -> - {I, State}; - _ -> - {undefined, State} - end - end. - -subtract_in_memory(Msgs, State) -> - lists:foldl(fun(?INDEX_MSG(_, ?DISK_MSG(_)), S) -> - S; - (?INDEX_MSG(_, ?MSG(H, _)), S) -> - subtract_in_memory_counts(H, S); - (?DISK_MSG(_), S) -> - S; - (?PREFIX_MEM_MSG(H), S) -> - subtract_in_memory_counts(H, S) - end, State, Msgs). +smallest_raft_index(#?MODULE{messages = Messages, + ra_indexes = Indexes, + dlx = DlxState}) -> + SmallestDlxRaIdx = rabbit_fifo_dlx:smallest_raft_index(DlxState), + SmallestMsgsRaIdx = case lqueue:peek(Messages) of + {value, ?INDEX_MSG(I, _)} -> + I; + _ -> + undefined + end, + SmallestRaIdx = rabbit_fifo_index:smallest(Indexes), + lists:min([SmallestDlxRaIdx, SmallestMsgsRaIdx, SmallestRaIdx]). make_requeue(ConsumerId, Notify, [{MsgId, Msg}], Acc) -> lists:reverse([{append, diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 70cc8d0dfe8c..aba592a3e984 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -127,6 +127,8 @@ -type milliseconds() :: non_neg_integer(). +-type dead_letter_handler() :: option({at_most_once, applied_mfa()} | at_least_once). + -record(enqueuer, {next_seqno = 1 :: msg_seqno(), % out of order enqueues - sorted list @@ -144,7 +146,7 @@ {name :: atom(), resource :: rabbit_types:r('queue'), release_cursor_interval :: option({non_neg_integer(), non_neg_integer()}), - dead_letter_handler :: option({at_most_once, applied_mfa()} | at_least_once), + dead_letter_handler :: dead_letter_handler(), become_leader_handler :: option(applied_mfa()), overflow_strategy = drop_head :: drop_head | reject_publish, max_length :: option(non_neg_integer()), @@ -230,7 +232,7 @@ -type config() :: #{name := atom(), queue_resource := rabbit_types:r('queue'), - dead_letter_handler => option({at_most_once, applied_mfa()} | at_least_once), + dead_letter_handler => dead_letter_handler(), become_leader_handler => applied_mfa(), release_cursor_interval => non_neg_integer(), max_length => non_neg_integer(), diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index e9ce4fd5c8e7..24a9ab36e468 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -10,17 +10,18 @@ make_settle/1, %% rabbit_fifo delegating DLX handling to this module init/0, - apply/3, - discard/3, + apply/4, + discard/4, overview/1, - checkout/1, - state_enter/2, - handle_aux/4, + checkout/2, + state_enter/4, + handle_aux/6, purge/1, dehydrate/1, normalize/1, stat/1, - update_config/2 + update_config/4, + smallest_raft_index/1 ]). -record(checkout,{ @@ -28,8 +29,7 @@ prefetch :: non_neg_integer() }). -record(settle, {msg_ids :: [msg_id()]}). --type command() :: #checkout{} | #settle{}. --type protocol() :: {dlx, command()}. +-type protocol() :: {dlx, #checkout{} | #settle{}}. -type state() :: #?MODULE{}. -export_type([state/0, protocol/0, @@ -47,58 +47,76 @@ make_checkout(Pid, NumUnsettled) -> make_settle(MessageIds) when is_list(MessageIds) -> {dlx, #settle{msg_ids = MessageIds}}. --spec overview(rabbit_fifo:state()) -> map(). -overview(#rabbit_fifo{dlx = #?MODULE{consumer = undefined, - msg_bytes = MsgBytes, - msg_bytes_checkout = 0, - discards = Discards}}) -> +-spec overview(state()) -> map(). +overview(#?MODULE{consumer = undefined, + msg_bytes = MsgBytes, + msg_bytes_checkout = 0, + discards = Discards}) -> overview0(Discards, #{}, MsgBytes, 0); -overview(#rabbit_fifo{dlx = #?MODULE{consumer = #dlx_consumer{checked_out = Checked}, - msg_bytes = MsgBytes, - msg_bytes_checkout = MsgBytesCheckout, - discards = Discards}}) -> +overview(#?MODULE{consumer = #dlx_consumer{checked_out = Checked}, + msg_bytes = MsgBytes, + msg_bytes_checkout = MsgBytesCheckout, + discards = Discards}) -> overview0(Discards, Checked, MsgBytes, MsgBytesCheckout). overview0(Discards, Checked, MsgBytes, MsgBytesCheckout) -> #{num_discarded => lqueue:len(Discards), - num_discard_checked_out => map_size(Checked), + num_discard_checked_out => maps:size(Checked), discard_message_bytes => MsgBytes, discard_checkout_message_bytes => MsgBytesCheckout}. --spec stat(rabbit_fifo:state()) -> +-spec stat(state()) -> {Num :: non_neg_integer(), Bytes :: non_neg_integer()}. -stat(#rabbit_fifo{dlx = #?MODULE{consumer = Con, - discards = Discards, - msg_bytes = MsgBytes, - msg_bytes_checkout = MsgBytesCheckout}}) -> +stat(#?MODULE{consumer = Con, + discards = Discards, + msg_bytes = MsgBytes, + msg_bytes_checkout = MsgBytesCheckout}) -> Num0 = lqueue:len(Discards), Num = case Con of undefined -> Num0; #dlx_consumer{checked_out = Checked} -> - Num0 + map_size(Checked) + %% O(1) because Erlang maps maintain their own size + Num0 + maps:size(Checked) end, Bytes = MsgBytes + MsgBytesCheckout, {Num, Bytes}. --spec apply(ra_machine:command_meta_data(), rabbit_fifo:command(), rabbit_fifo:state()) -> - {rabbit_fifo:state(), Reply :: term(), ra_machine:effects()} | - {rabbit_fifo:state(), Reply :: term()}. -apply(Meta, {dlx, #checkout{consumer = Pid, - prefetch = Prefetch}}, - #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, - dlx = #?MODULE{consumer = undefined} = DlxState0} = State0) -> - DlxState = DlxState0#?MODULE{consumer = #dlx_consumer{pid = Pid, - prefetch = Prefetch}}, - State = set(State0, DlxState), - rabbit_fifo:checkout(Meta, State0, State, [], false); -apply(Meta, {dlx, #checkout{consumer = ConsumerPid, - prefetch = Prefetch}}, - #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, - dlx = #?MODULE{consumer = #dlx_consumer{checked_out = CheckedOutOldConsumer}, - discards = Discards0, - msg_bytes = Bytes, - msg_bytes_checkout = BytesCheckout} = DlxState0} = State0) -> +-spec apply(ra_machine:command_meta_data(), protocol(), dead_letter_handler(), state()) -> + {state(), ra_machine:effects()}. +apply(_, {dlx, #settle{msg_ids = MsgIds}}, at_least_once, + #?MODULE{consumer = #dlx_consumer{checked_out = Checked0}} = State0) -> + Acked = maps:with(MsgIds, Checked0), + State = maps:fold(fun(MsgId, {_Rsn, Msg}, + #?MODULE{consumer = #dlx_consumer{checked_out = Checked} = C, + msg_bytes_checkout = BytesCheckout, + ra_indexes = Indexes0} = S) -> + Indexes = case Msg of + ?INDEX_MSG(I, ?MSG(_,_)) + when is_integer(I) -> + rabbit_fifo_index:delete(I, Indexes0); + _ -> + Indexes0 + end, + S#?MODULE{consumer = C#dlx_consumer{checked_out = maps:remove(MsgId, Checked)}, + msg_bytes_checkout = BytesCheckout - size_in_bytes(Msg), + ra_indexes = Indexes} + end, State0, Acked), + {State, []}; +apply(_, {dlx, #checkout{consumer = Pid, + prefetch = Prefetch}}, + at_least_once, + #?MODULE{consumer = undefined} = State0) -> + State = State0#?MODULE{consumer = #dlx_consumer{pid = Pid, + prefetch = Prefetch}}, + {State, []}; +apply(_, {dlx, #checkout{consumer = ConsumerPid, + prefetch = Prefetch}}, + at_least_once, + #?MODULE{consumer = #dlx_consumer{checked_out = CheckedOutOldConsumer}, + discards = Discards0, + msg_bytes = Bytes, + msg_bytes_checkout = BytesCheckout} = State0) -> %% Since we allow only a single consumer, the new consumer replaces the old consumer. %% All checked out messages to the old consumer need to be returned to the discards queue %% such that these messages can be (eventually) re-delivered to the new consumer. @@ -110,43 +128,21 @@ apply(Meta, {dlx, #checkout{consumer = ConsumerPid, fun({_Id, {_Reason, IdxMsg} = Msg}, {D, B}) -> {lqueue:in_r(Msg, D), B + size_in_bytes(IdxMsg)} end, {Discards0, 0}, Checked1), - DlxState = DlxState0#?MODULE{consumer = #dlx_consumer{pid = ConsumerPid, - prefetch = Prefetch}, - discards = Discards, - msg_bytes = Bytes + BytesMoved, - msg_bytes_checkout = BytesCheckout - BytesMoved}, - State = set(State0, DlxState), - rabbit_fifo:checkout(Meta, State0, State, [], false); -apply(#{index := IncomingRaftIdx} = Meta, {dlx, #settle{msg_ids = MsgIds}}, - #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, - dlx = #?MODULE{consumer = #dlx_consumer{checked_out = Checked} = C, - msg_bytes_checkout = BytesCheckout} = DlxState0} = State0) -> - Acked = maps:with(MsgIds, Checked), - AckedRsnMsgs = maps:values(Acked), - AckedMsgs = lists:map(fun({_Reason, Msg}) -> Msg end, AckedRsnMsgs), - AckedBytes = lists:foldl(fun(Msg, Bytes) -> - Bytes + size_in_bytes(Msg) - end, 0, AckedMsgs), - Unacked = maps:without(MsgIds, Checked), - DlxState = DlxState0#?MODULE{consumer = C#dlx_consumer{checked_out = Unacked}, - msg_bytes_checkout = BytesCheckout - AckedBytes}, - State1 = set(State0, DlxState), - Total = rabbit_fifo:query_messages_total(State0) - length(AckedMsgs), - State2 = rabbit_fifo:subtract_in_memory(AckedMsgs, State1), - State3 = State2#rabbit_fifo{messages_total = Total}, - State4 = rabbit_fifo:delete_indexes(AckedMsgs, State3), - {State, ok, Effects} = rabbit_fifo:checkout(Meta, State0, State4, [], false), - rabbit_fifo:update_smallest_raft_index(IncomingRaftIdx, State, Effects); -apply(_, Cmd, #rabbit_fifo{cfg = #cfg{dead_letter_handler = DLH}} = State) -> + State = State0#?MODULE{consumer = #dlx_consumer{pid = ConsumerPid, + prefetch = Prefetch}, + discards = Discards, + msg_bytes = Bytes + BytesMoved, + msg_bytes_checkout = BytesCheckout - BytesMoved}, + {State, []}; +apply(_, Cmd, DLH, State) -> rabbit_log:debug("Ignoring command ~p for dead_letter_handler ~p", Cmd, DLH), - {State, ok}. - --spec discard([msg()], rabbit_dead_letter:reason(), rabbit_fifo:state()) -> - {rabbit_fifo:state(), ra_machine:effects(), Delete :: boolean()}. -discard(_, _, #rabbit_fifo{cfg = #cfg{dead_letter_handler = undefined}} = State) -> - {State, [], true}; -discard(Msgs, Reason, - #rabbit_fifo{cfg = #cfg{dead_letter_handler = {at_most_once, {Mod, Fun, Args}}}} = State) -> + {State, []}. + +-spec discard([msg()], rabbit_dead_letter:reason(), dead_letter_handler(), state()) -> + {state(), ra_machine:effects()}. +discard(_, _, undefined, State) -> + {State, []}; +discard(Msgs, Reason, {at_most_once, {Mod, Fun, Args}}, State) -> RaftIdxs = lists:filtermap( fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> {true, RaftIdx}; @@ -167,29 +163,32 @@ discard(Msgs, Reason, end, Msgs), [{mod_call, Mod, Fun, Args ++ [DeadLetters]}] end}, - {State, [Effect], true}; -discard(Msgs, Reason, - #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, - dlx = #?MODULE{discards = Discards0, - msg_bytes = MsgBytes0} = DlxState0} = State0) + {State, [Effect]}; +discard(Msgs, Reason, at_least_once, State0) when Reason =/= maxlen -> - %%TODO delete delivery_count header to save space? - %% It's not needed anymore. - {Discards, MsgBytes} = lists:foldl(fun (Msg, {D0, B0}) -> - D = lqueue:in({Reason, Msg}, D0), - B = B0 + size_in_bytes(Msg), - {D, B} - end, {Discards0, MsgBytes0}, Msgs), - DlxState = DlxState0#?MODULE{discards = Discards, - msg_bytes = MsgBytes}, - State = set(State0, DlxState), - {State, [], false}. - --spec checkout(rabbit_fifo:state()) -> - {rabbit_fifo:state(), ra_machine:effects()}. -checkout(#rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, - dlx = #?MODULE{consumer = undefined, - discards = Discards}} = State) -> + %%TODO delete delivery_count header to save space? It's not needed anymore. + State = lists:foldl(fun (Msg, #?MODULE{discards = D0, + msg_bytes = B0, + ra_indexes = I0} = S0) -> + D = lqueue:in({Reason, Msg}, D0), + B = B0 + size_in_bytes(Msg), + I = case Msg of + ?INDEX_MSG(Idx, ?MSG(_,_)) + when is_integer(Idx) -> + rabbit_fifo_index:append(Idx, I0); + _ -> + I0 + end, + S0#?MODULE{discards = D, + msg_bytes = B, + ra_indexes = I} + end, State0, Msgs), + {State, []}. + +-spec checkout(dead_letter_handler(), state()) -> + {state(), ra_machine:effects()}. +checkout(at_least_once, #?MODULE{consumer = undefined, + discards = Discards} = State) -> case lqueue:is_empty(Discards) of true -> ok; @@ -197,19 +196,16 @@ checkout(#rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, rabbit_log:warning("there are dead-letter messages but no dead-letter consumer") end, {State, []}; -checkout(#rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, - dlx = DlxState0} = State0) -> - {DlxState, Effects} = checkout0(checkout_one(DlxState0), {[],[]}), - State = set(State0, DlxState), - {State, Effects}; -checkout(State) -> +checkout(at_least_once, State0) -> + checkout0(checkout_one(State0), {[],[]}); +checkout(_, State) -> {State, []}. -checkout0({success, MsgId, {Reason, ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header))}, State}, {InMemMsgs, LogMsgs}) - when is_integer(RaftIdx) -> - DelMsg = {RaftIdx, {Reason, MsgId, Header}}, +checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?DISK_MSG(Header))}, State}, {InMemMsgs, LogMsgs}) + when is_integer(Idx) -> + DelMsg = {Idx, {Reason, MsgId, Header}}, SendAcc = {InMemMsgs, [DelMsg|LogMsgs]}, - checkout0(checkout_one(State ), SendAcc); + checkout0(checkout_one(State), SendAcc); checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?MSG(Header, Msg))}, State}, {InMemMsgs, LogMsgs}) when is_integer(Idx) -> DelMsg = {MsgId, {Reason, Header, Msg}}, @@ -228,28 +224,23 @@ checkout0(#?MODULE{consumer = #dlx_consumer{pid = Pid}} = State, SendAcc) -> {State, Effects}. checkout_one(#?MODULE{consumer = #dlx_consumer{checked_out = Checked, - prefetch = Prefetch}} = State) when map_size(Checked) >= Prefetch -> + prefetch = Prefetch}} = State) + when map_size(Checked) >= Prefetch -> State; -checkout_one(#?MODULE{consumer = #dlx_consumer{checked_out = Checked0, +checkout_one(#?MODULE{discards = Discards0, + consumer = #dlx_consumer{checked_out = Checked0, next_msg_id = Next} = Con0} = State0) -> - case take_next_msg(State0) of - {{_, Msg} = ReasonMsg, State1} -> + case lqueue:out(Discards0) of + {{value, {_, Msg} = ReasonMsg}, Discards} -> Checked = maps:put(Next, ReasonMsg, Checked0), - State2 = State1#?MODULE{consumer = Con0#dlx_consumer{checked_out = Checked, + State1 = State0#?MODULE{discards = Discards, + consumer = Con0#dlx_consumer{checked_out = Checked, next_msg_id = Next + 1}}, Bytes = size_in_bytes(Msg), - State = add_bytes_checkout(Bytes, State2), + State = add_bytes_checkout(Bytes, State1), {success, Next, ReasonMsg, State}; - empty -> - State0 - end. - -take_next_msg(#?MODULE{discards = Discards0} = State) -> - case lqueue:out(Discards0) of {empty, _} -> - empty; - {{value, ReasonMsg}, Discards} -> - {ReasonMsg, State#?MODULE{discards = Discards}} + State0 end. add_bytes_checkout(Size, #?MODULE{msg_bytes = Bytes, @@ -283,18 +274,15 @@ delivery_effects(CPid, {InMemMsgs, IdxMsgs0}) -> [{send_msg, CPid, {dlx_delivery, Msgs}, [ra_event]}] end}]. --spec state_enter(ra_server:ra_state(), rabbit_fifo:state()) -> +-spec state_enter(ra_server:ra_state(), rabbit_types:r('queue'), dead_letter_handler(), state()) -> ra_machine:effects(). -state_enter(leader, #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once, - resource = QRef}, - dlx = DlxState}) -> - ensure_worker_started(QRef, DlxState), +state_enter(leader, QRes, at_least_once, State) -> + ensure_worker_started(QRes, State), []; -state_enter(_, #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, - dlx = DlxState}) -> - ensure_worker_terminated(DlxState), +state_enter(_, _, at_least_once, State) -> + ensure_worker_terminated(State), []; -state_enter(_, _) -> +state_enter(_, _, _, _) -> []. ensure_worker_started(QRef, #?MODULE{consumer = undefined}) -> @@ -346,113 +334,79 @@ is_local_and_alive(Pid) is_local_and_alive(_) -> false. --spec update_config(config(), rabbit_fifo:state()) -> - {rabbit_fifo:state(), ra_machine:effects()}. -update_config(#{dead_letter_handler := at_least_once}, - #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, - dlx = DlxState} = State) -> - %% dead_letter_handler stayed at_least_once. - %% Notify rabbit_fifo_dlx_worker about potentially updated policies. - case local_alive_consumer_pid(DlxState) of +-spec update_config(Old :: dead_letter_handler(), New :: dead_letter_handler(), + rabbit_types:r('queue'), state()) -> + {state(), ra_machine:effects()}. +update_config(at_least_once, at_least_once, _, State) -> + case local_alive_consumer_pid(State) of undefined -> {State, []}; Pid -> + %% Notify rabbit_fifo_dlx_worker about potentially updated policies. {State, [{send_msg, Pid, lookup_topology, ra_event}]} end; -update_config(#{dead_letter_handler := DLH}, - #rabbit_fifo{cfg = #cfg{dead_letter_handler = DLH}} = State) -> - %% dead_letter_handler stayed same. +update_config(SameDLH, SameDLH, _, State) -> {State, []}; -update_config(#{dead_letter_handler := NewDLH}, - #rabbit_fifo{cfg = #cfg{dead_letter_handler = OldDLH, - resource = Res}} = State0) -> +update_config(OldDLH, NewDLH, QRes, State0) -> rabbit_log:debug("Switching dead_letter_handler from ~p to ~p for ~s", - [OldDLH, NewDLH, rabbit_misc:rs(Res)]), - {#rabbit_fifo{cfg = Cfg} = State1, Effects0} = switch_from(State0), - State2 = State1#rabbit_fifo{cfg = Cfg#cfg{dead_letter_handler = NewDLH}, - dlx = init()}, - switch_to(State2, Effects0). - --spec switch_to(rabbit_fifo:state(), ra_machine:effects()) -> - {rabbit_fifo:state(), ra_machine:effects()}. -switch_to(#rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}} = State, - Effects0) -> + [OldDLH, NewDLH, rabbit_misc:rs(QRes)]), + {State, Effects} = switch_from(OldDLH, QRes, State0), + switch_to(NewDLH, State, Effects). + +-spec switch_from(Old :: dead_letter_handler(), rabbit_types:r('queue'), state()) -> + {state(), ra_machine:effects()}. +switch_from(at_least_once, QRes, State) -> + %% switch from at-least-once to some other strategy + ensure_worker_terminated(State), + {Num, Bytes} = stat(State), + rabbit_log:info("Deleted ~b dead-lettered messages (with total messages size of ~b bytes) in ~s", + [Num, Bytes, rabbit_misc:rs(QRes)]), + {init(), []}; +switch_from(_, _, State) -> + {State, []}. + +-spec switch_to(New :: dead_letter_handler(), state(), ra_machine:effects()) -> + {state(), ra_machine:effects()}. +switch_to(at_least_once, _, Effects) -> %% Switch from some other strategy to at-least-once. %% Dlx worker needs to be started on the leader. %% The cleanest way to determine the Ra state of this node is delegation to handle_aux. - Effects = [{aux, {dlx, setup}} | Effects0], - {State, Effects}; -switch_to(State, Effects) -> + {init(), [{aux, {dlx, setup}} | Effects]}; +switch_to(_, State, Effects) -> {State, Effects}. --spec switch_from(rabbit_fifo:state()) -> - {rabbit_fifo:state(), ra_machine:effects()}. -switch_from(#rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once}, - dlx = #?MODULE{consumer = Consumer, - discards = Discards} = DlxState} = State0) -> - %% switch from at-least-once to some other strategy - ensure_worker_terminated(DlxState), - CheckedReasonMsgs = case Consumer of - #dlx_consumer{checked_out = Checked} - when is_map(Checked) -> - maps:values(Checked); - _ -> [] - end, - DiscardReasonMsgs = lqueue:to_list(Discards), - {_, Msgs} = lists:unzip(CheckedReasonMsgs ++ DiscardReasonMsgs), - Len = length(Msgs), - Total = rabbit_fifo:query_messages_total(State0), - State1 = State0#rabbit_fifo{messages_total = Total - Len}, - State2 = rabbit_fifo:delete_indexes(Msgs, State1), - State = rabbit_fifo:subtract_in_memory(Msgs, State2), - rabbit_log:debug("Deleted ~b dead-lettered messages", [Len]), - {State, []}; -switch_from(State) -> - {State, []}. - --spec handle_aux(ra_server:ra_state(), Cmd :: term(), term(), rabbit_fifo:state()) -> +-spec handle_aux(ra_server:ra_state(), Cmd :: term(), Aux :: term(), + rabbit_types:r('queue'), dead_letter_handler(), state()) -> term(). -handle_aux(leader, setup, Aux, - #rabbit_fifo{cfg = #cfg{dead_letter_handler = at_least_once, - resource = QRef}, - dlx = DlxState}) -> - ensure_worker_started(QRef, DlxState), +handle_aux(leader, {dlx, setup}, Aux, QRes, at_least_once, State) -> + ensure_worker_started(QRes, State), Aux; -handle_aux(_, _, Aux, _) -> +handle_aux(_, _, Aux, _, _, _) -> Aux. --spec purge(rabbit_fifo:state()) -> - {rabbit_fifo:state(), [msg()]}. -purge(#rabbit_fifo{dlx = #?MODULE{consumer = Con0, - discards = Discards} = DlxState0} = State0) -> - {Con, CheckedMsgs} = case Con0 of - #dlx_consumer{checked_out = Checked} - when is_map(Checked) -> - L = maps:to_list(Checked), - {_, CheckedReasonMsgs} = lists:unzip(L), - {_, Msgs} = lists:unzip(CheckedReasonMsgs), - C = Con0#dlx_consumer{checked_out = #{}}, - {C, Msgs}; - _ -> - {Con0, []} - end, - DiscardReasonMsgs = lqueue:to_list(Discards), - {_, DiscardMsgs} = lists:unzip(DiscardReasonMsgs), - PurgedMsgs = CheckedMsgs ++ DiscardMsgs, - DlxState = DlxState0#?MODULE{consumer = Con, - discards = lqueue:new(), - msg_bytes = 0, - msg_bytes_checkout = 0 - }, - State = set(State0, DlxState), - {State, PurgedMsgs}. - --spec dehydrate(rabbit_fifo:state()) -> - rabbit_fifo:state(). -dehydrate(#rabbit_fifo{dlx = #?MODULE{discards = Discards, - consumer = Con} = DlxState} = State) -> - set(State, DlxState#?MODULE{discards = dehydrate_messages(Discards), - consumer = dehydrate_consumer(Con)}). +-spec purge(state()) -> + state(). +purge(#?MODULE{consumer = Consumer0} = State) -> + Consumer = case Consumer0 of + undefined -> + undefined; + #dlx_consumer{} -> + Consumer0#dlx_consumer{checked_out = #{}} + end, + State#?MODULE{discards = lqueue:new(), + msg_bytes = 0, + msg_bytes_checkout = 0, + consumer = Consumer, + ra_indexes = rabbit_fifo_index:empty() + }. + +-spec dehydrate(state()) -> + state(). +dehydrate(#?MODULE{discards = Discards, + consumer = Con} = State) -> + State#?MODULE{discards = dehydrate_messages(Discards), + consumer = dehydrate_consumer(Con), + ra_indexes = rabbit_fifo_index:empty()}. dehydrate_messages(Discards) -> L0 = lqueue:to_list(Discards), @@ -469,10 +423,12 @@ dehydrate_consumer(#dlx_consumer{checked_out = Checked0} = Con) -> dehydrate_consumer(undefined) -> undefined. --spec normalize(rabbit_fifo:state()) -> - rabbit_fifo:state(). -normalize(#rabbit_fifo{dlx = #?MODULE{discards = Discards} = DlxState} = State) -> - set(State, DlxState#?MODULE{discards = lqueue:from_list(lqueue:to_list(Discards))}). +-spec normalize(state()) -> + state(). +normalize(#?MODULE{discards = Discards, + ra_indexes = Indexes} = State) -> + State#?MODULE{discards = lqueue:from_list(lqueue:to_list(Discards)), + ra_indexes = rabbit_fifo_index:normalize(Indexes)}. -set(State, #?MODULE{} = DlxState) -> - State#rabbit_fifo{dlx = DlxState}. +smallest_raft_index(#?MODULE{ra_indexes = Indexes}) -> + rabbit_fifo_index:smallest(Indexes). diff --git a/deps/rabbit/src/rabbit_fifo_dlx.hrl b/deps/rabbit/src/rabbit_fifo_dlx.hrl index 05b8e6b85e85..a3018cd90bf1 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.hrl +++ b/deps/rabbit/src/rabbit_fifo_dlx.hrl @@ -6,7 +6,6 @@ %% is less than the reason atom. -type reason() :: expired | rejected | delivery_limit | ?NIL. -% See snapshot scenarios in rabbit_fifo_prop_SUITE. Add dlx dehydrate tests. -record(dlx_consumer,{ %% We don't require a consumer tag because a consumer tag is a means to distinguish %% multiple consumers in the same channel. The rabbit_fifo_dlx_worker channel like process however @@ -14,13 +13,16 @@ pid :: pid(), prefetch :: non_neg_integer(), checked_out = #{} :: #{msg_id() => {reason(), indexed_msg()}}, - next_msg_id = 0 :: msg_id() % part of snapshot data + next_msg_id = 0 :: msg_id() }). -record(rabbit_fifo_dlx,{ consumer = undefined :: #dlx_consumer{} | undefined, %% Queue of dead-lettered messages. discards = lqueue:new() :: lqueue:lqueue({reason(), indexed_msg()}), + %% Raft indexes of messages in both discards queue and dlx_consumer's checked_out map + %% so that we get the smallest ra index in O(1). + ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), msg_bytes = 0 :: non_neg_integer(), msg_bytes_checkout = 0 :: non_neg_integer() }). diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 0638c36cfd8b..d05c23974755 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -40,10 +40,6 @@ %% TODO Reason is already stored in first x-death header of #content.properties.#'P_basic'.headers %% So, we could remove this convenience field and lookup the 1st header when redelivering. reason :: rabbit_fifo_dlx:reason(), - %% - %%TODO instead of using 'unsettled' and 'settled' fields, use rabbit_confirms because it handles many to one logic - %% in a generic way. Its API might need to be modified though if it is targeted only towards channel. - %% %% target queues for which publisher confirm has not been received yet unsettled = [] :: [rabbit_amqqueue:name()], %% target queues for which publisher confirm was received @@ -92,7 +88,7 @@ % -type state() :: #state{}. -%%TODO add metrics like global counters for messages routed, delivered, etc. +%%TODO Add metrics like global counters for messages routed, delivered, etc. by adding a new counter in seshat. start_link(QRef) -> gen_server:start_link(?MODULE, QRef, [{hibernate_after, ?HIBERNATE_AFTER}]). @@ -470,6 +466,11 @@ redeliver0(#pending{consumed_msg_id = ConsumedMsgId, }, %% Field 'mandatory' is set to false because our module checks on its own whether the message is routable. Delivery = rabbit_basic:delivery(_Mandatory = false, _Confirm = true, BasicMsg, OutSeq), + %%TODO Filter such that we re-delivery only to classic queues and NEW quorum / stream queues. + %% This is required because quorum and stream queue clients have their own re-send mechanisms + %% and we don't want to re-send on 2 levels ending up with many duplicate messages. + %% (Take care to not delete old messages in this case such that we'll receive acks from target quorum and stream + %% queues.) RouteToQs0 = rabbit_exchange:route(DLX, Delivery), %% Do not re-deliver to queues for which we already received a publisher confirm. RouteToQs1 = RouteToQs0 -- Settled, diff --git a/deps/rabbit/src/rabbit_fifo_index.erl b/deps/rabbit/src/rabbit_fifo_index.erl index 9dc92acb8252..685208795e8f 100644 --- a/deps/rabbit/src/rabbit_fifo_index.erl +++ b/deps/rabbit/src/rabbit_fifo_index.erl @@ -7,7 +7,8 @@ delete/2, size/1, smallest/1, - map/2 + map/2, + normalize/1 ]). -compile({no_auto_import, [size/1]}). @@ -100,6 +101,10 @@ find_next(Next, Last, Map) -> find_next(Next+1, Last, Map) end. +-spec normalize(state()) -> state(). +normalize(State) -> + State#?MODULE{largest = undefined}. + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). diff --git a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl index 9693c2669402..1da314b225d0 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl @@ -52,27 +52,30 @@ end_per_testcase(_TestCase, _Config) -> %%%=================================================================== handler_undefined(_Config) -> - S0 = rabbit_fifo:init(init_config(undefined)), - ?assertEqual({S0, [], true}, rabbit_fifo_dlx:discard([make_msg(1)], because, S0)), + S = rabbit_fifo_dlx:init(), + Handler = undefined, + ?assertEqual({S, []}, rabbit_fifo_dlx:discard([make_msg(1)], because, Handler, S)), ok. handler_at_most_once(_Config) -> - S0 = rabbit_fifo:init(init_config({at_most_once, {m, f, [a]}})), - {S0, Effects, true} = rabbit_fifo_dlx:discard([make_msg(1), - make_msg(2)], because, S0), + S = rabbit_fifo_dlx:init(), + Handler = {at_most_once, {m, f, [a]}}, + {S, Effects} = rabbit_fifo_dlx:discard([make_msg(1), + make_msg(2)], because, Handler, S), ?assertMatch([{log, [1, 2], _}], Effects), ok. discard_dlx_consumer(_Config) -> - S0 = rabbit_fifo:init(init_config(at_least_once)), + Handler = at_least_once, + S0 = rabbit_fifo_dlx:init(), ?assertEqual(#{num_discarded => 0, num_discard_checked_out => 0, discard_message_bytes => 0, discard_checkout_message_bytes => 0}, rabbit_fifo_dlx:overview(S0)), %% message without dlx consumer - {S1, [], false} = rabbit_fifo_dlx:discard([make_msg(1)], because, S0), - {S2, []} = rabbit_fifo_dlx:checkout(S1), + {S1, []} = rabbit_fifo_dlx:discard([make_msg(1)], because, Handler, S0), + {S2, []} = rabbit_fifo_dlx:checkout(Handler, S1), ?assertEqual(#{num_discarded => 1, num_discard_checked_out => 0, discard_message_bytes => 1, @@ -80,46 +83,48 @@ discard_dlx_consumer(_Config) -> %% with dlx consumer Checkout = rabbit_fifo_dlx:make_checkout(self(), 2), - {S3, ok, DeliveryEffects0} = rabbit_fifo_dlx:apply(meta(2), Checkout, S2), + {S3, []} = rabbit_fifo_dlx:apply(meta(2), Checkout, Handler, S2), + {S4, DeliveryEffects0} = rabbit_fifo_dlx:checkout(Handler, S3), ?assertEqual(#{num_discarded => 0, num_discard_checked_out => 1, discard_message_bytes => 0, - discard_checkout_message_bytes => 1}, rabbit_fifo_dlx:overview(S3)), + discard_checkout_message_bytes => 1}, rabbit_fifo_dlx:overview(S4)), ?assertMatch([{log, [1], _}], DeliveryEffects0), %% more messages than dlx consumer's prefetch - {S4, [], false} = rabbit_fifo_dlx:discard([make_msg(3), make_msg(4)], because, S3), - {S5, DeliveryEffects1} = rabbit_fifo_dlx:checkout(S4), + {S5, []} = rabbit_fifo_dlx:discard([make_msg(3), make_msg(4)], because, Handler, S4), + {S6, DeliveryEffects1} = rabbit_fifo_dlx:checkout(Handler, S5), ?assertEqual(#{num_discarded => 1, num_discard_checked_out => 2, discard_message_bytes => 1, - discard_checkout_message_bytes => 2}, rabbit_fifo_dlx:overview(S5)), + discard_checkout_message_bytes => 2}, rabbit_fifo_dlx:overview(S6)), ?assertMatch([{log, [3], _}], DeliveryEffects1), - ?assertEqual({3, 3}, rabbit_fifo_dlx:stat(S5)), + ?assertEqual({3, 3}, rabbit_fifo_dlx:stat(S6)), %% dlx consumer acks messages Settle = rabbit_fifo_dlx:make_settle([0,1]), - {S6, ok, DeliveryEffects2} = rabbit_fifo_dlx:apply(meta(5), Settle, S5), + {S7, []} = rabbit_fifo_dlx:apply(meta(5), Settle, Handler, S6), + {S8, DeliveryEffects2} = rabbit_fifo_dlx:checkout(Handler, S7), ?assertEqual(#{num_discarded => 0, num_discard_checked_out => 1, discard_message_bytes => 0, - discard_checkout_message_bytes => 1}, rabbit_fifo_dlx:overview(S6)), + discard_checkout_message_bytes => 1}, rabbit_fifo_dlx:overview(S8)), ?assertMatch([{log, [4], _}], DeliveryEffects2), - ?assertEqual({1, 1}, rabbit_fifo_dlx:stat(S6)), + ?assertEqual({1, 1}, rabbit_fifo_dlx:stat(S8)), ok. purge(_Config) -> - S0 = rabbit_fifo:init(init_config(at_least_once)), + Handler = at_least_once, + S0 = rabbit_fifo_dlx:init(), Checkout = rabbit_fifo_dlx:make_checkout(self(), 1), - {S1, ok, _} = rabbit_fifo_dlx:apply(meta(1), Checkout, S0), + {S1, _} = rabbit_fifo_dlx:apply(meta(1), Checkout, Handler, S0), Msgs = [make_msg(2), make_msg(3)], - {S2, _, _} = rabbit_fifo_dlx:discard(Msgs, because, S1), - {S3, _} = rabbit_fifo_dlx:checkout(S2), + {S2, _} = rabbit_fifo_dlx:discard(Msgs, because, Handler, S1), + {S3, _} = rabbit_fifo_dlx:checkout(Handler, S2), ?assertMatch(#{num_discarded := 1, num_discard_checked_out := 1}, rabbit_fifo_dlx:overview(S3)), - {S4, PurgedMsgs} = rabbit_fifo_dlx:purge(S3), - ?assertEqual(Msgs, PurgedMsgs), + S4 = rabbit_fifo_dlx:purge(S3), ?assertEqual(#{num_discarded => 0, num_discard_checked_out => 0, discard_message_bytes => 0, @@ -127,49 +132,56 @@ purge(_Config) -> ok. switch_strategies(_Config) -> + QRes = #resource{virtual_host = <<"/">>, + kind = queue, + name = <<"blah">>}, + Handler0 = undefined, + Handler1 = at_least_once, {ok, _} = rabbit_fifo_dlx_sup:start_link(), - S0 = rabbit_fifo:init(init_config(undefined)), + S0 = rabbit_fifo_dlx:init(), %% Switching from undefined to at_least_once should start dlx consumer. - {S1, Effects} = rabbit_fifo_dlx:update_config( - #{dead_letter_handler => at_least_once}, S0), + {S1, Effects} = rabbit_fifo_dlx:update_config(Handler0, Handler1, QRes, S0), ?assertEqual([{aux, {dlx, setup}}], Effects), - rabbit_fifo_dlx:handle_aux(leader, setup, fake_aux, S1), + rabbit_fifo_dlx:handle_aux(leader, {dlx, setup}, fake_aux, QRes, Handler1, S1), [{_, WorkerPid, worker, _}] = supervisor:which_children(rabbit_fifo_dlx_sup), - {S2, _, _} = rabbit_fifo_dlx:discard([make_msg(1)], because, S1), + {S2, _} = rabbit_fifo_dlx:discard([make_msg(1)], because, Handler1, S1), Checkout = rabbit_fifo_dlx:make_checkout(WorkerPid, 1), - {S3, ok, _} = rabbit_fifo_dlx:apply(meta(2), Checkout, S2), - ?assertMatch(#{num_discard_checked_out := 1}, rabbit_fifo_dlx:overview(S3)), + {S3, _} = rabbit_fifo_dlx:apply(meta(2), Checkout, Handler1, S2), + {S4, _} = rabbit_fifo_dlx:checkout(Handler1, S3), + ?assertMatch(#{num_discard_checked_out := 1}, rabbit_fifo_dlx:overview(S4)), %% Switching from at_least_once to undefined should terminate dlx consumer. - {S4, []} = rabbit_fifo_dlx:update_config( - #{dead_letter_handler => undefined}, S3), + {S5, []} = rabbit_fifo_dlx:update_config(Handler1, Handler0, QRes, S4), ?assertMatch([_, {active, 0}, _, _], supervisor:count_children(rabbit_fifo_dlx_sup)), - ?assertMatch(#{num_discarded := 0}, rabbit_fifo_dlx:overview(S4)), + ?assertMatch(#{num_discarded := 0}, rabbit_fifo_dlx:overview(S5)), ok. last_consumer_wins(_Config) -> - S0 = rabbit_fifo:init(init_config(at_least_once)), + S0 = rabbit_fifo_dlx:init(), + Handler = at_least_once, Msgs = [make_msg(1), make_msg(2), make_msg(3), make_msg(4)], - {S1, [], false} = rabbit_fifo_dlx:discard(Msgs, because, S0), - Checkout = rabbit_fifo_dlx:make_checkout(self(), 5), - {S2, ok, DeliveryEffects0} = rabbit_fifo_dlx:apply(meta(5), Checkout, S1), + {S1, []} = rabbit_fifo_dlx:discard(Msgs, because, Handler, S0), + Checkout = rabbit_fifo_dlx:make_checkout(self(), 10), + {S2, []} = rabbit_fifo_dlx:apply(meta(5), Checkout, Handler, S1), + {S3, DeliveryEffects0} = rabbit_fifo_dlx:checkout(Handler, S2), ?assertMatch([{log, [1, 2, 3, 4], _}], DeliveryEffects0), ?assertEqual(#{num_discarded => 0, num_discard_checked_out => 4, discard_message_bytes => 0, - discard_checkout_message_bytes => 4}, rabbit_fifo_dlx:overview(S2)), + discard_checkout_message_bytes => 4}, rabbit_fifo_dlx:overview(S3)), %% When another (or the same) consumer (re)subscribes, %% we expect this new consumer to be checked out and delivered all messages %% from the previous consumer. - {S3, ok, DeliveryEffects1} = rabbit_fifo_dlx:apply(meta(6), Checkout, S2), + {S4, []} = rabbit_fifo_dlx:apply(meta(6), Checkout, Handler, S3), + {S5, DeliveryEffects1} = rabbit_fifo_dlx:checkout(Handler, S4), ?assertMatch([{log, [1, 2, 3, 4], _}], DeliveryEffects1), ?assertEqual(#{num_discarded => 0, num_discard_checked_out => 4, discard_message_bytes => 0, - discard_checkout_message_bytes => 4}, rabbit_fifo_dlx:overview(S3)), + discard_checkout_message_bytes => 4}, rabbit_fifo_dlx:overview(S5)), ok. make_msg(RaftIdx) -> @@ -180,11 +192,3 @@ meta(Idx) -> term => 1, system_time => 0, from => {make_ref(), self()}}. - -init_config(Handler) -> - #{name => ?MODULE, - queue_resource => #resource{virtual_host = <<"/">>, - kind = queue, - name = <<"blah">>}, - release_cursor_interval => 1, - dead_letter_handler => Handler}. diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 7599a6095967..de63b9946fef 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -376,7 +376,6 @@ stats(Config) -> {<<"x-dead-letter-exchange">>, longstr, DLX}, {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-max-in-memory-length">>, long, 1}, {<<"x-overflow">>, longstr, <<"reject-publish">>}, {<<"x-queue-type">>, longstr, <<"quorum">>} ] @@ -405,11 +404,7 @@ stats(Config) -> %% 16 bytes (=8msgs*2bytes) should be in discards queue discard_message_bytes := 16, %% 10 msgs in total - num_messages := 10, - %% 1 msg (=x-max-in-memory-length) should be in-memory - num_in_memory_ready_messages := 1, - %% 2 bytes (1msg) should be in-memory - in_memory_message_bytes := 2 + num_messages := 10 }], dirty_query([Server], RaName, fun rabbit_fifo:overview/1)), %% Fix dead-letter toplology misconfiguration. @@ -429,9 +424,7 @@ stats(Config) -> discard_checkout_message_bytes := 0, num_discarded := 0, discard_message_bytes := 0, - num_messages := 0, - num_in_memory_ready_messages := 0, - in_memory_message_bytes := 0 + num_messages := 0 }], dirty_query([Server], RaName, fun rabbit_fifo:overview/1)), [?assertMatch({#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{expiration = undefined}, diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 6a59dc8d2d49..f2cd1028fd4d 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -81,7 +81,8 @@ all_tests() -> dlx_05, dlx_06, dlx_07, - dlx_08 + dlx_08, + dlx_09 % single_active_ordering_02 ]. @@ -1185,7 +1186,7 @@ dlx_05(_Config) -> ?assert(snapshots_prop(Config, Commands)), ok. -% Test that after recovery we can differentiate between index messge and (prefix) disk message +% Test that after recovery we can differentiate between index message and (prefix) disk message dlx_06(_Config) -> C1Pid = c:pid(0,883,1), C1 = {<<>>, C1Pid}, @@ -1293,6 +1294,28 @@ dlx_08(_Config) -> ?assert(snapshots_prop(Config, Commands)), ok. +dlx_09(_Config) -> + C1Pid = c:pid(0,883,1), + C1 = {<<>>, C1Pid}, + E = c:pid(0,176,1), + Commands = [ + make_checkout(C1, {auto,2,simple_prefetch}), + make_enqueue(E,1,msg(<<>>)), + %% 0 in checkout + make_enqueue(E,2,msg(<<>>)), + %% 0,1 in checkout + rabbit_fifo:make_return(C1, [0]), + %% 1,2 in checkout + rabbit_fifo:make_discard(C1, [1]), + %% 2 in checkout, 1 in discards + rabbit_fifo:make_discard(C1, [2]) + %% 1,2 in discards + ], + Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, undefined, undefined, + reject_publish, at_least_once), + ?assert(snapshots_prop(Config, Commands)), + ok. + config(Name, Length, Bytes, SingleActive, DeliveryLimit, InMemoryLength, InMemoryBytes) -> config(Name, Length, Bytes, SingleActive, DeliveryLimit, InMemoryLength, InMemoryBytes, drop_head, {at_most_once, {?MODULE, banana, []}}). From 5bbf471be7f2586d64f2d57ff1715d8b4cc17862 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 10 Jan 2022 16:34:15 +0100 Subject: [PATCH 35/97] Do not redeliver if target is quorum or stream queue For quorum queues, rabbit_fifo_client takes already care of redelivering Raft commands such as enqueues. Before this commit, both dlx worker and rabbit_fifo_client were redelivering at two levels which ended up in duplicate messages in the target queue. From now on, dlx worker will only redeliver for target classic queues. Also from now on, redelivery will use the exact same delivery as previously updating only exchange_name and routing_keys (if they changed via dlx policies). The assumption is that re-using same delivery.msg_seq_no and same basic_message.id will not cause issues for target classic queues. This commit also adds an integration test with different target mirrored classic queues (e.g. different number of replicas and leaders on different nodes). --- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 96 +++--- .../rabbit_fifo_dlx_integration_SUITE.erl | 326 ++++++++---------- 2 files changed, 188 insertions(+), 234 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index d05c23974755..82734924436e 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -36,7 +36,7 @@ %% This rabbit_fifo_dlx_worker does not have the concept of delivery tags because it settles (acks) %% message IDs directly back to the queue (and there is no AMQP consumer). consumed_msg_id :: non_neg_integer(), - content :: rabbit_types:decoded_content(), + delivery :: rabbit_types:delivery(), %% TODO Reason is already stored in first x-death header of #content.properties.#'P_basic'.headers %% So, we could remove this convenience field and lookup the 1st header when redelivering. reason :: rabbit_fifo_dlx:reason(), @@ -249,8 +249,7 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, pendings = Pendings, exchange_ref = DLXRef, routing_key = RKey} = State0) -> - #basic_message{content = Content, routing_keys = RKeys} = Msg = - rabbit_dead_letter:make_msg(ConsumedMsg, Reason, DLXRef, RKey, ConsumedQRef), + #basic_message{routing_keys = RKeys} = Msg = rabbit_dead_letter:make_msg(ConsumedMsg, Reason, DLXRef, RKey, ConsumedQRef), %% Field 'mandatory' is set to false because our module checks on its own whether the message is routable. Delivery = rabbit_basic:delivery(_Mandatory = false, _Confirm = true, Msg, OutSeq), TargetQs = case DLX of @@ -306,7 +305,7 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, Pend0 = #pending{ consumed_msg_id = ConsumedMsgId, consumed_at = Now, - content = Content, + delivery = Delivery, reason = Reason }, case TargetQs of @@ -410,8 +409,8 @@ redeliver_messsages(#state{pendings = Pendings, end, State, Pendings) end. -redeliver(#pending{content = Content} = Pend, DLX, OldOutSeq, - #state{routing_key = undefined} = State) -> +redeliver(#pending{delivery = #delivery{message = #basic_message{content = Content}}} = Pend, + DLX, OutSeq, #state{routing_key = undefined} = State) -> %% No dead-letter-routing-key defined for source quorum queue. %% Therefore use all of messages's original routing keys (which can include CC and BCC recipients). %% This complies with the behaviour of the rabbit_dead_letter module. @@ -421,59 +420,30 @@ redeliver(#pending{content = Content} = Pend, DLX, OldOutSeq, {array, [{table, MostRecentDeath}|_]} = rabbit_misc:table_lookup(Headers, <<"x-death">>), {<<"routing-keys">>, array, Routes0} = lists:keyfind(<<"routing-keys">>, 1, MostRecentDeath), Routes = [Route || {longstr, Route} <- Routes0], - redeliver0(Pend, DLX, Routes, OldOutSeq, State); -redeliver(Pend, DLX, OldOutSeq, #state{routing_key = DLRKey} = State) -> - redeliver0(Pend, DLX, [DLRKey], OldOutSeq, State). + redeliver0(Pend, DLX, Routes, OutSeq, State); +redeliver(Pend, DLX, OutSeq, #state{routing_key = DLRKey} = State) -> + redeliver0(Pend, DLX, [DLRKey], OutSeq, State). -%% Quorum queues maintain their own Raft sequene number mapping to the message sequence number (= Raft correlation ID). -%% So, they would just send us a 'settled' queue action containing the correct message sequence number. -%% -%% Classic queues however maintain their state by mapping the message sequence number to pending and confirmed queues. -%% While re-using the same message sequence number could work there as well, it just gets unnecssary complicated when -%% different target queues settle two separate deliveries referring to the same message sequence number (and same basic message). -%% -%% Therefore, to keep things simple, create a brand new delivery, store it in our state and forget about the old delivery and -%% sequence number. -%% -%% If a sequene number gets settled after settle_timeout, we can't map it anymore to the #pending{}. Hence, we ignore it. -%% -%% This can lead to issues when settle_timeout is too low and time to settle takes too long. -%% For example, if settle_timeout is set to only 10 seconds, but settling a message takes always longer than 10 seconds -%% (e.g. due to extremly slow hypervisor disks that ran out of credit), we will re-deliver the same message all over again -%% leading to many duplicates in the target queue without ever acking the message back to the source discards queue. -%% -%% Therefore, set settle_timeout reasonably high (e.g. 2 minutes). -%% %% TODO do not log per message? redeliver0(#pending{consumed_msg_id = ConsumedMsgId, - content = Content, - unsettled = Unsettled, + delivery = #delivery{message = BasicMsg} = Delivery0, + unsettled = Unsettled0, settled = Settled, publish_count = PublishCount, reason = Reason} = Pend0, - DLX, DLRKeys, OldOutSeq, - #state{next_out_seq = OutSeq, - queue_ref = QRef, + DLX, DLRKeys, OutSeq, + #state{queue_ref = QRef, pendings = Pendings0, exchange_ref = DLXRef, settle_timeout = SettleTimeout} = State0) when is_list(DLRKeys) -> - BasicMsg = #basic_message{exchange_name = DLXRef, - routing_keys = DLRKeys, - %% BCC Header was already stripped previously - content = Content, - id = rabbit_guid:gen(), - is_persistent = rabbit_basic:is_message_persistent(Content) - }, - %% Field 'mandatory' is set to false because our module checks on its own whether the message is routable. - Delivery = rabbit_basic:delivery(_Mandatory = false, _Confirm = true, BasicMsg, OutSeq), - %%TODO Filter such that we re-delivery only to classic queues and NEW quorum / stream queues. - %% This is required because quorum and stream queue clients have their own re-send mechanisms - %% and we don't want to re-send on 2 levels ending up with many duplicate messages. - %% (Take care to not delete old messages in this case such that we'll receive acks from target quorum and stream - %% queues.) + Delivery = Delivery0#delivery{message = BasicMsg#basic_message{exchange_name = DLXRef, + routing_keys = DLRKeys}}, RouteToQs0 = rabbit_exchange:route(DLX, Delivery), - %% Do not re-deliver to queues for which we already received a publisher confirm. - RouteToQs1 = RouteToQs0 -- Settled, + %% Do not redeliver message to a target queue + %% 1. for which we already received a publisher confirm, or + %% 2. whose queue client redelivers on our behalf. + Unsettled = RouteToQs0 -- Settled, + RouteToQs1 = Unsettled -- clients_redeliver(Unsettled0), {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, BasicMsg, RouteToQs1), Prefix = io_lib:format("Message has not received required publisher confirm(s). " "Received confirm from: [~s]. " @@ -482,8 +452,8 @@ redeliver0(#pending{consumed_msg_id = ConsumedMsgId, "message_sequence_number=~b " "consumed_message_sequence_number=~b " "publish_count=~b.", - [strings(Settled), strings(Unsettled), SettleTimeout, - OldOutSeq, ConsumedMsgId, PublishCount]), + [strings(Settled), strings(Unsettled0), SettleTimeout, + OutSeq, ConsumedMsgId, PublishCount]), case {RouteToQs, Cycles, Settled} of {[], [], []} -> rabbit_log:warning("~s Failed to re-deliver this message because no queue is bound " @@ -518,12 +488,10 @@ redeliver0(#pending{consumed_msg_id = ConsumedMsgId, end, Pend = Pend0#pending{publish_count = PublishCount + 1, last_published_at = os:system_time(millisecond), + delivery = Delivery, %% override 'unsettled' because topology could have changed - unsettled = RouteToQs}, - Pendings1 = maps:remove(OldOutSeq, Pendings0), - Pendings = maps:put(OutSeq, Pend, Pendings1), - State = State0#state{next_out_seq = OutSeq + 1, - pendings = Pendings}, + unsettled = Unsettled}, + State = State0#state{pendings = maps:update(OutSeq, Pend, Pendings0)}, deliver_to_queues(Delivery, RouteToQs, State) end. @@ -596,3 +564,19 @@ format_pending(#pending{consumed_msg_id = ConsumedMsgId, publish_count => PublishCount, last_published_at => LastPublishedAt, consumed_at => ConsumedAt}. + +%% Returns queues whose queue clients take care of redelivering messages. +clients_redeliver(QNames) -> + Qs = lists:filter(fun(Q) -> + case amqqueue:get_type(Q) of + rabbit_quorum_queue -> + %% If Raft command (#enqueue{}) does not get applied + %% rabbit_fifo_client will resend. + true; + rabbit_stream_queue -> + true; + _ -> + false + end + end, rabbit_amqqueue:lookup_many(QNames)), + lists:map(fun amqqueue:get_name/1, Qs). diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index de63b9946fef..4a85e157ed4c 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -94,17 +94,23 @@ init_per_testcase(Testcase, Config) -> {dead_letter_exchange, <>}, {target_queue_1, <>}, {target_queue_2, <>}, - {target_queue_3, <>} + {target_queue_3, <>}, + {target_queue_4, <>}, + {target_queue_5, <>}, + {target_queue_6, <>} ]), rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()). end_per_testcase(Testcase, Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - #'queue.delete_ok'{message_count = 0} = amqp_channel:call(Ch, #'queue.delete'{queue = ?config(source_queue, Config)}), - #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = ?config(target_queue_1, Config)}), - #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = ?config(target_queue_2, Config)}), - #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = ?config(target_queue_3, Config)}), + delete_queue(Ch, ?config(source_queue, Config)), + delete_queue(Ch, ?config(target_queue_1, Config)), + delete_queue(Ch, ?config(target_queue_2, Config)), + delete_queue(Ch, ?config(target_queue_3, Config)), + delete_queue(Ch, ?config(target_queue_4, Config)), + delete_queue(Ch, ?config(target_queue_5, Config)), + delete_queue(Ch, ?config(target_queue_6, Config)), #'exchange.delete_ok'{} = amqp_channel:call(Ch, #'exchange.delete'{exchange = ?config(dead_letter_exchange, Config)}), Config1 = rabbit_ct_helpers:run_steps( Config, @@ -117,24 +123,16 @@ declare_topology(Config, AdditionalQArgs) -> SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), DLX = ?config(dead_letter_exchange, Config), - QArgs = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>} - ], - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = SourceQ, - durable = true, - arguments = lists:keymerge(1, AdditionalQArgs, QArgs)}), + declare_queue(Ch, SourceQ, lists:keymerge(1, AdditionalQArgs, + [{<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ])), #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), - #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ - queue = TargetQ, - exchange = DLX, - routing_key = <<"k1">> - }), + declare_queue(Ch, TargetQ, []), + bind_queue(Ch, TargetQ, DLX, <<"k1">>), {Server, Ch, SourceQ, TargetQ}. %% Test that at-least-once dead-lettering works for message dead-lettered due to message TTL. @@ -208,19 +206,15 @@ target_queue_not_bound(Config) -> SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), DLX = ?config(dead_letter_exchange, Config), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = SourceQ, - durable = true, - arguments = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>} - ] - }), + declare_queue(Ch, SourceQ, [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ]), #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), + declare_queue(Ch, TargetQ, []), Msg = <<"msg">>, ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, @@ -234,11 +228,7 @@ target_queue_not_bound(Config) -> consistently(?_assertMatch([{1, _}], dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), %% Fix dead-letter toplology misconfiguration. - #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ - queue = TargetQ, - exchange = DLX, - routing_key = <<"k1">> - }), + bind_queue(Ch, TargetQ, DLX, <<"k1">>), %% Binding from target queue to DLX is now present. %% Therefore, message should be delivered to target queue and acked to source queue. eventually(?_assertEqual([{0, 0}], @@ -256,29 +246,17 @@ target_queue_deleted(Config) -> SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), DLX = ?config(dead_letter_exchange, Config), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = SourceQ, - durable = true, - arguments = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>} - ] - }), + declare_queue(Ch, SourceQ, [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ]), #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), %% Make target queue a quorum queue to provoke sending an 'eol' message to dlx worker. - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = TargetQ, - durable = true, - arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}] - }), - #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ - queue = TargetQ, - exchange = DLX, - routing_key = <<"k1">> - }), + declare_queue(Ch, TargetQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + bind_queue(Ch, TargetQ, DLX, <<"k1">>), Msg1 = <<"m1">>, ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, @@ -302,12 +280,8 @@ target_queue_deleted(Config) -> dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), %% Message should be delivered once target queue is recreated. %% (This time we simply create a classic target queue.) - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), - #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ - queue = TargetQ, - exchange = DLX, - routing_key = <<"k1">> - }), + declare_queue(Ch, TargetQ, []), + bind_queue(Ch, TargetQ, DLX, <<"k1">>), eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), 500, 5), eventually(?_assertEqual([{0, 0}], @@ -322,18 +296,14 @@ dlx_missing(Config) -> SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), DLX = ?config(dead_letter_exchange, Config), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = SourceQ, - durable = true, - arguments = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>} - ] - }), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), + declare_queue(Ch, SourceQ, [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ]), + declare_queue(Ch, TargetQ, []), Msg = <<"msg">>, ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, @@ -347,11 +317,7 @@ dlx_missing(Config) -> dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), %% Fix dead-letter toplology misconfiguration. #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), - #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ - queue = TargetQ, - exchange = DLX, - routing_key = <<"k1">> - }), + bind_queue(Ch, TargetQ, DLX, <<"k1">>), %% DLX is now present. %% Therefore, message should be delivered to target queue and acked to source queue. eventually(?_assertEqual([{0, 0}], @@ -369,19 +335,15 @@ stats(Config) -> SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), DLX = ?config(dead_letter_exchange, Config), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = SourceQ, - durable = true, - arguments = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>} - ] - }), + declare_queue(Ch, SourceQ, [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, <<"k1">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ]), #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ}), + declare_queue(Ch, TargetQ, []), Msg = <<"12">>, %% 2 bytes per message [ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, @@ -408,11 +370,7 @@ stats(Config) -> }], dirty_query([Server], RaName, fun rabbit_fifo:overview/1)), %% Fix dead-letter toplology misconfiguration. - #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ - queue = TargetQ, - exchange = DLX, - routing_key = <<"k1">> - }), + bind_queue(Ch, TargetQ, DLX, <<"k1">>), %% Binding from target queue to DLX is now present. %% Therefore, all messages should be delivered to target queue and acked to source queue. %% Therefore, all stats should be decremented back to 0. @@ -438,16 +396,12 @@ drop_head_falls_back_to_at_most_once(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, Server), SourceQ = ?config(source_queue, Config), DLX = ?config(dead_letter_exchange, Config), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = SourceQ, - durable = true, - arguments = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"drop-head">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>} - ] - }), + declare_queue(Ch, SourceQ, [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"drop-head">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ]), consistently( ?_assertMatch( [_, {active, 0}, _, _], @@ -460,15 +414,11 @@ switch_strategy(Config) -> SourceQ = ?config(source_queue, Config), RaName = ra_name(SourceQ), DLX = ?config(dead_letter_exchange, Config), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = SourceQ, - durable = true, - arguments = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>} - ] - }), + declare_queue(Ch, SourceQ, [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ]), %% default strategy is at-most-once assert_active_dlx_workers(0, Config, Server), ok = rabbit_ct_broker_helpers:set_policy(Config, Server, <<"my-policy">>, SourceQ, <<"queues">>, @@ -510,16 +460,20 @@ switch_strategy(Config) -> %% %% Lesson learnt by writing this test: %% If there are multiple target queues, messages will not be sent to target non-mirrored classic queues -%% it their host node is temporarily down because these queues get (temporarily) deleted. See: +%% (even if durable) when their host node is temporarily down because these queues get (temporarily) deleted. See: %% https://github.com/rabbitmq/rabbitmq-server/blob/cf76b479300b767b8ea450293d096cbf729ed734/deps/rabbit/src/rabbit_amqqueue.erl#L1955-L1964 many_target_queues(Config) -> [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), + Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server2), SourceQ = ?config(source_queue, Config), RaName = ra_name(SourceQ), TargetQ1 = ?config(target_queue_1, Config), TargetQ2 = ?config(target_queue_2, Config), TargetQ3 = ?config(target_queue_3, Config), + TargetQ4 = ?config(target_queue_4, Config), + TargetQ5 = ?config(target_queue_5, Config), + TargetQ6 = ?config(target_queue_6, Config), DLX = ?config(dead_letter_exchange, Config), DLRKey = <<"k1">>, %% Create topology: @@ -527,51 +481,43 @@ many_target_queues(Config) -> %% * target non-mirrored classic queue on node 1 %% * target quorum queue with 3 replicas %% * target stream queue with 3 replicas - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = SourceQ, - durable = true, - arguments = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-dead-letter-routing-key">>, longstr, DLRKey}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-quorum-initial-group-size">>, long, 1} - ] - }), + %% * target mirrored classic queue with 3 replicas (leader on node 1) + %% * target mirrored classic queue with 1 replica (leader on node 2) + %% * target mirrored classic queue with 3 replica (leader on node 2) + declare_queue(Ch, SourceQ, [{<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-routing-key">>, longstr, DLRKey}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 1} + ]), #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLX}), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = TargetQ1}), - #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ - queue = TargetQ1, - exchange = DLX, - routing_key = DLRKey - }), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = TargetQ2, - durable = true, - arguments = [ - {<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-quorum-initial-group-size">>, long, 3} - ] - }), - #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ - queue = TargetQ2, - exchange = DLX, - routing_key = DLRKey - }), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = TargetQ3, - durable = true, - arguments = [ - {<<"x-queue-type">>, longstr, <<"stream">>}, - {<<"x-initial-cluster-size">>, long, 3} - ] - }), - #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{ - queue = TargetQ3, - exchange = DLX, - routing_key = DLRKey - }), + declare_queue(Ch, TargetQ1, []), + bind_queue(Ch, TargetQ1, DLX, DLRKey), + declare_queue(Ch, TargetQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 3} + ]), + bind_queue(Ch, TargetQ2, DLX, DLRKey), + declare_queue(Ch, TargetQ3, [{<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 3} + ]), + bind_queue(Ch, TargetQ3, DLX, DLRKey), + ok = rabbit_ct_broker_helpers:set_policy(Config, Server1, <<"mirror-q4">>, TargetQ4, <<"queues">>, + [{<<"ha-mode">>, <<"all">>}, + {<<"queue-master-locator">>, <<"client-local">>}]), + declare_queue(Ch, TargetQ4, []), + bind_queue(Ch, TargetQ4, DLX, DLRKey), + ok = rabbit_ct_broker_helpers:set_policy(Config, Server1, <<"mirror-q5">>, TargetQ5, <<"queues">>, + [{<<"ha-mode">>, <<"exactly">>}, + {<<"ha-params">>, 1}, + {<<"queue-master-locator">>, <<"client-local">>}]), + declare_queue(Ch2, TargetQ5, []), + bind_queue(Ch2, TargetQ5, DLX, DLRKey), + ok = rabbit_ct_broker_helpers:set_policy(Config, Server1, <<"mirror-q6">>, TargetQ6, <<"queues">>, + [{<<"ha-mode">>, <<"all">>}, + {<<"queue-master-locator">>, <<"client-local">>}]), + declare_queue(Ch2, TargetQ6, []), + bind_queue(Ch2, TargetQ6, DLX, DLRKey), Msg1 = <<"m1">>, ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, @@ -603,6 +549,12 @@ many_target_queues(Config) -> after 2000 -> exit(deliver_timeout) end, + eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ4}))), + eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, + amqp_channel:call(Ch2, #'basic.get'{queue = TargetQ5}))), + eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, + amqp_channel:call(Ch2, #'basic.get'{queue = TargetQ6}))), eventually(?_assertEqual([{0, 0}], dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1))), ok = rabbit_ct_broker_helpers:stop_node(Config, Server3), @@ -623,9 +575,6 @@ many_target_queues(Config) -> amqp_channel:call(Ch, #'basic.get'{queue = TargetQ1})), ok = rabbit_ct_broker_helpers:start_node(Config, Server2), ok = rabbit_ct_broker_helpers:start_node(Config, Server3), - %%TODO By end of this test, there will be many duplicate dead-letter messages in the target quorum queue and - %% target stream queue since both their queue clients and rabbit_fifo_dlx_worker re-try. - %% Possible solution is to have rabbit_fifo_dlx_worker only resend for classic target queues? eventually(?_assertEqual([{0, 0}], dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1)), 500, 6), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, @@ -636,7 +585,14 @@ many_target_queues(Config) -> ok after 0 -> exit(deliver_timeout) - end. + end, + ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ4})), + eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ5}))), + %%TODO why is the 1st message (m1) a duplicate? + ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ6}), 2, 200). %% Test that there is a single active rabbit_fifo_dlx_worker that is co-located with the quorum queue leader. single_dlx_worker(Config) -> @@ -644,16 +600,12 @@ single_dlx_worker(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), SourceQ = ?config(source_queue, Config), DLX = ?config(dead_letter_exchange, Config), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{ - queue = SourceQ, - durable = true, - arguments = [ - {<<"x-dead-letter-exchange">>, longstr, DLX}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>} - ] - }), + declare_queue(Ch, SourceQ, [ + {<<"x-dead-letter-exchange">>, longstr, DLX}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ]), ?assertMatch( [[_, {active, 1}, _, _], [_, {active, 0}, _, _], @@ -689,6 +641,24 @@ assert_active_dlx_workers(N, Config, Server) -> [_, {active, N}, _, _], rabbit_ct_broker_helpers:rpc(Config, Server, supervisor, count_children, [rabbit_fifo_dlx_sup], 1000)). +declare_queue(Channel, Queue, Args) -> + #'queue.declare_ok'{} = amqp_channel:call(Channel, #'queue.declare'{ + queue = Queue, + durable = true, + arguments = Args + }). + +bind_queue(Channel, Queue, Exchange, RoutingKey) -> + #'queue.bind_ok'{} = amqp_channel:call(Channel, #'queue.bind'{ + queue = Queue, + exchange = Exchange, + routing_key = RoutingKey + }). + +delete_queue(Channel, Queue) -> + %% We implicitly test here that we don't end up with duplicate messages. + #'queue.delete_ok'{message_count = 0} = amqp_channel:call(Channel, #'queue.delete'{queue = Queue}). + %%TODO move to rabbitmq_ct_helpers/include/rabbit_assert.hrl consistently(TestObj) -> consistently(TestObj, 200, 5). From db4ad1045072dcd08d9b21d630a0c5f91ea36898 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 11 Jan 2022 16:53:09 +0100 Subject: [PATCH 36/97] Log warning once in dlx worker instead of per message. Before this commit, logs were excessive since there were detailed logs for each message that could not be delivered. From now on, a warning is logged once per dlx worker if dlx worker cannot forward messages. --- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 278 ++++++++---------- .../rabbit_fifo_dlx_integration_SUITE.erl | 44 ++- 2 files changed, 164 insertions(+), 158 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 82734924436e..043b2090cb03 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -83,7 +83,8 @@ %% redelivering messages for which not all publisher confirms were received. %% If there are no pending messages, this timer will eventually be cancelled to allow %% this worker to hibernate. - timer :: undefined | reference() + timer :: undefined | reference(), + logged = #{} :: map() }). % -type state() :: #state{}. @@ -121,7 +122,7 @@ terminate(_Reason, State) -> cancel_timer(State). handle_call(Request, From, State) -> - rabbit_log:warning("~s received unhandled call from ~p: ~p", [?MODULE, From, Request]), + rabbit_log:info("~s received unhandled call from ~p: ~p", [?MODULE, From, Request]), {noreply, State}. handle_cast({queue_event, QRef, {_From, {machine, lookup_topology}}}, @@ -157,7 +158,7 @@ handle_cast(settle_timeout, State0) -> State = State0#state{timer = undefined}, redeliver_and_ack(State); handle_cast(Request, State) -> - rabbit_log:warning("~s received unhandled cast ~p", [?MODULE, Request]), + rabbit_log:info("~s received unhandled cast ~p", [?MODULE, Request]), {noreply, State}. redeliver_and_ack(State0) -> @@ -189,7 +190,7 @@ handle_info({'DOWN', _MRef, process, QPid, Reason}, end, {noreply, State}; handle_info(Info, State) -> - rabbit_log:warning("~s received unhandled info ~p", [?MODULE, Info]), + rabbit_log:info("~s received unhandled info ~p", [?MODULE, Info]), {noreply, State}. code_change(_OldVsn, State, _Extra) -> @@ -217,31 +218,25 @@ handle_queue_actions(Actions, State0) -> ({rejected, QRef, MsgSeqNos}, S0) -> rabbit_log:debug("Ignoring rejected messages ~p from ~s", [MsgSeqNos, rabbit_misc:rs(QRef)]), S0; - ({queue_down, QRef}, S0) -> + ({queue_down, _QRef}, S0) -> %% target classic queue is down, but not deleted - rabbit_log:debug("Ignoring DOWN from ~s", [rabbit_misc:rs(QRef)]), S0 end, State0, Actions). -handle_deliver(Msgs, #state{queue_ref = QRef} = State) when is_list(Msgs) -> - DLX = lookup_dlx(State), +handle_deliver(Msgs, #state{queue_ref = QRef} = State0) + when is_list(Msgs) -> + {DLX, State} = lookup_dlx(State0), lists:foldl(fun({_QRef, MsgId, Msg, Reason}, S) -> forward(Msg, MsgId, QRef, DLX, Reason, S) end, State, Msgs). -lookup_dlx(#state{exchange_ref = DLXRef, - queue_ref = QRef}) -> +lookup_dlx(#state{exchange_ref = DLXRef} = State0) -> case rabbit_exchange:lookup(DLXRef) of {error, not_found} -> - rabbit_log:warning("Cannot forward any dead-letter messages from source quorum ~s because its configured " - "dead-letter-exchange ~s does not exist. " - "Either create the configured dead-letter-exchange or re-configure " - "the dead-letter-exchange policy for the source quorum queue to prevent " - "dead-lettered messages from piling up in the source quorum queue.", - [rabbit_misc:rs(QRef), rabbit_misc:rs(DLXRef)]), - not_found; + State = log_missing_dlx_once(State0), + {not_found, State}; {ok, X} -> - X + {X, State0} end. forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, @@ -250,58 +245,25 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, exchange_ref = DLXRef, routing_key = RKey} = State0) -> #basic_message{routing_keys = RKeys} = Msg = rabbit_dead_letter:make_msg(ConsumedMsg, Reason, DLXRef, RKey, ConsumedQRef), - %% Field 'mandatory' is set to false because our module checks on its own whether the message is routable. + %% Field 'mandatory' is set to false because we check ourselves whether the message is routable. Delivery = rabbit_basic:delivery(_Mandatory = false, _Confirm = true, Msg, OutSeq), - TargetQs = case DLX of - not_found -> - []; - _ -> - RouteToQs = rabbit_exchange:route(DLX, Delivery), - case rabbit_dead_letter:detect_cycles(Reason, Msg, RouteToQs) of - {[], []} -> - rabbit_log:warning("Cannot deliver message with sequence number ~b " - "(for consumed message sequence number ~b) " - "because no queue is bound to dead-letter ~s with routing keys ~p.", - [OutSeq, ConsumedMsgId, rabbit_misc:rs(DLXRef), RKeys]), - []; - {Qs, []} -> - %% the "normal" case, i.e. no dead-letter-topology misconfiguration - Qs; - {[], Cycles} -> - %%TODO introduce structured logging in rabbit_log by using type logger:report - rabbit_log:warning("Cannot route to any queues. Detected dead-letter queue cycles. " - "Fix the dead-letter routing topology to prevent dead-letter messages from " - "piling up in source quorum queue. " - "outgoing_sequene_number=~b " - "consumed_message_sequence_number=~b " - "consumed_queue=~s " - "dead_letter_exchange=~s " - "effective_dead_letter_routing_keys=~p " - "routed_to_queues=~s " - "dead_letter_queue_cycles=~p", - [OutSeq, ConsumedMsgId, rabbit_misc:rs(ConsumedQRef), - rabbit_misc:rs(DLXRef), RKeys, strings(RouteToQs), Cycles]), - []; - {Qs, Cycles} -> - rabbit_log:warning("Detected dead-letter queue cycles. " - "Fix the dead-letter routing topology. " - "outgoing_sequene_number=~b " - "consumed_message_sequence_number=~b " - "consumed_queue=~s " - "dead_letter_exchange=~s " - "effective_dead_letter_routing_keys=~p " - "routed_to_queues_desired=~s " - "routed_to_queues_effective=~s " - "dead_letter_queue_cycles=~p", - [OutSeq, ConsumedMsgId, rabbit_misc:rs(ConsumedQRef), - rabbit_misc:rs(DLXRef), RKeys, strings(RouteToQs), strings(Qs), Cycles]), - %% Ignore the target queues resulting in cycles. - %% We decide it's good enough to deliver to only routable target queues. - Qs - end - end, + {TargetQs, State3} = case DLX of + not_found -> + {[], State0}; + _ -> + RouteToQs0 = rabbit_exchange:route(DLX, Delivery), + {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, Msg, RouteToQs0), + State1 = log_cycles(Cycles, RKeys, State0), + State2 = case RouteToQs of + [] -> + log_no_route_once(State1); + _ -> + State1 + end, + {RouteToQs, State2} + end, Now = os:system_time(millisecond), - State1 = State0#state{next_out_seq = OutSeq + 1}, + State4 = State3#state{next_out_seq = OutSeq + 1}, Pend0 = #pending{ consumed_msg_id = ConsumedMsgId, consumed_at = Now, @@ -311,15 +273,13 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, case TargetQs of [] -> %% We can't deliver this message since there is no target queue we can route to. - %% Under no circumstances should we drop a message with dead-letter-strategy at-least-once. - %% We buffer this message and retry to send every settle_timeout milliseonds - %% (until the user has fixed the dead-letter routing topology). - State1#state{pendings = maps:put(OutSeq, Pend0, Pendings)}; + %% We buffer this message and retry to send every settle_timeout milliseonds. + State4#state{pendings = maps:put(OutSeq, Pend0, Pendings)}; _ -> Pend = Pend0#pending{publish_count = 1, last_published_at = Now, unsettled = TargetQs}, - State = State1#state{pendings = maps:put(OutSeq, Pend, Pendings)}, + State = State4#state{pendings = maps:put(OutSeq, Pend, Pendings)}, deliver_to_queues(Delivery, TargetQs, State) end. @@ -340,14 +300,13 @@ deliver_to_queues(Delivery, RouteToQNames, #state{queue_type_state = QTypeState0 State = State0#state{queue_type_state = QTypeState2}, handle_queue_actions(Actions, State). -handle_settled(QRef, MsgSeqs, #state{pendings = Pendings0, - settle_timeout = SettleTimeout} = State) -> +handle_settled(QRef, MsgSeqs, #state{pendings = Pendings0} = State) -> Pendings = lists:foldl(fun (MsgSeq, P0) -> - handle_settled0(QRef, MsgSeq, SettleTimeout, P0) + handle_settled0(QRef, MsgSeq, P0) end, Pendings0, MsgSeqs), State#state{pendings = Pendings}. -handle_settled0(QRef, MsgSeq, SettleTimeout, Pendings) -> +handle_settled0(QRef, MsgSeq, Pendings) -> case maps:find(MsgSeq, Pendings) of {ok, #pending{unsettled = Unset0, settled = Set0} = Pend0} -> Unset = lists:delete(QRef, Unset0), @@ -355,9 +314,9 @@ handle_settled0(QRef, MsgSeq, SettleTimeout, Pendings) -> Pend = Pend0#pending{unsettled = Unset, settled = Set}, maps:update(MsgSeq, Pend, Pendings); error -> - rabbit_log:warning("Ignoring publisher confirm for sequence number ~b " - "from target dead letter ~s after settle timeout of ~bms.", - [MsgSeq, rabbit_misc:rs(QRef), SettleTimeout]), + rabbit_log:info("Ignoring publisher confirm for sequence number ~b " + "from target dead letter ~s", + [MsgSeq, rabbit_misc:rs(QRef)]), Pendings end. @@ -387,14 +346,13 @@ maybe_ack(#state{pendings = Pendings0, %% Re-deliver messages that timed out waiting on publisher confirm and %% messages that got never sent due to routing topology misconfiguration. redeliver_messsages(#state{pendings = Pendings, - settle_timeout = SettleTimeout} = State) -> - case lookup_dlx(State) of - not_found -> + settle_timeout = SettleTimeout} = State0) -> + case lookup_dlx(State0) of + {not_found, State} -> %% Configured dead-letter-exchange does (still) not exist. - %% Warning got already logged. %% Keep the same Pendings in our state until user creates or re-configures the dead-letter-exchange. State; - DLX -> + {DLX, State} -> Now = os:system_time(millisecond), maps:fold(fun(OutSeq, #pending{last_published_at = LastPub} = Pend, S0) when LastPub + SettleTimeout =< Now -> @@ -424,18 +382,15 @@ redeliver(#pending{delivery = #delivery{message = #basic_message{content = Conte redeliver(Pend, DLX, OutSeq, #state{routing_key = DLRKey} = State) -> redeliver0(Pend, DLX, [DLRKey], OutSeq, State). -%% TODO do not log per message? -redeliver0(#pending{consumed_msg_id = ConsumedMsgId, - delivery = #delivery{message = BasicMsg} = Delivery0, +redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, unsettled = Unsettled0, settled = Settled, publish_count = PublishCount, reason = Reason} = Pend0, DLX, DLRKeys, OutSeq, - #state{queue_ref = QRef, - pendings = Pendings0, - exchange_ref = DLXRef, - settle_timeout = SettleTimeout} = State0) when is_list(DLRKeys) -> + #state{pendings = Pendings0, + exchange_ref = DLXRef} = State0) + when is_list(DLRKeys) -> Delivery = Delivery0#delivery{message = BasicMsg#basic_message{exchange_name = DLXRef, routing_keys = DLRKeys}}, RouteToQs0 = rabbit_exchange:route(DLX, Delivery), @@ -445,47 +400,11 @@ redeliver0(#pending{consumed_msg_id = ConsumedMsgId, Unsettled = RouteToQs0 -- Settled, RouteToQs1 = Unsettled -- clients_redeliver(Unsettled0), {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, BasicMsg, RouteToQs1), - Prefix = io_lib:format("Message has not received required publisher confirm(s). " - "Received confirm from: [~s]. " - "Did not receive confirm from: [~s]. " - "timeout=~bms " - "message_sequence_number=~b " - "consumed_message_sequence_number=~b " - "publish_count=~b.", - [strings(Settled), strings(Unsettled0), SettleTimeout, - OutSeq, ConsumedMsgId, PublishCount]), - case {RouteToQs, Cycles, Settled} of - {[], [], []} -> - rabbit_log:warning("~s Failed to re-deliver this message because no queue is bound " - "to dead-letter ~s with routing keys ~p.", - [Prefix, rabbit_misc:rs(DLXRef), DLRKeys]), - State0; - {[], [], [_|_]} -> - rabbit_log:debug("~s Routes changed dynamically so that this message does not need to be routed " - "to any queue anymore. This message will be acknowledged to the source ~s.", - [Prefix, rabbit_misc:rs(QRef)]), - State0; - {[], [_|_], []} -> - rabbit_log:warning("~s Failed to re-deliver this message because dead-letter queue cycles " - "got detected: ~p", - [Prefix, Cycles]), - State0; - {[], [_|_], [_|_]} -> - rabbit_log:warning("~s Dead-letter queue cycles detected: ~p. " - "This message will nevertheless be acknowledged to the source ~s " - "because it received at least one publisher confirm.", - [Prefix, Cycles, rabbit_misc:rs(QRef)]), - State0; + State1 = log_cycles(Cycles, DLRKeys, State0), + case RouteToQs of + [] -> + State1; _ -> - case Cycles of - [] -> - rabbit_log:debug("~s Re-delivering this message to ~s", - [Prefix, strings(RouteToQs)]); - [_|_] -> - rabbit_log:warning("~s Dead-letter queue cycles detected: ~p. " - "Re-delivering this message only to ~s", - [Prefix, Cycles, strings(RouteToQs)]) - end, Pend = Pend0#pending{publish_count = PublishCount + 1, last_published_at = os:system_time(millisecond), delivery = Delivery, @@ -495,10 +414,21 @@ redeliver0(#pending{consumed_msg_id = ConsumedMsgId, deliver_to_queues(Delivery, RouteToQs, State) end. -strings(QRefs) when is_list(QRefs) -> - L0 = lists:map(fun rabbit_misc:rs/1, QRefs), - L1 = lists:join(", ", L0), - lists:flatten(L1). +%% Returns queues whose queue clients take care of redelivering messages. +clients_redeliver(QNames) -> + Qs = lists:filter(fun(Q) -> + case amqqueue:get_type(Q) of + rabbit_quorum_queue -> + %% If Raft command (#enqueue{}) does not get applied + %% rabbit_fifo_client will resend. + true; + rabbit_stream_queue -> + true; + _ -> + false + end + end, rabbit_amqqueue:lookup_many(QNames)), + lists:map(fun amqqueue:get_name/1, Qs). maybe_set_timer(#state{timer = TRef} = State) when is_reference(TRef) -> @@ -510,7 +440,6 @@ maybe_set_timer(#state{timer = undefined, maybe_set_timer(#state{timer = undefined, settle_timeout = SettleTimeout} = State) -> TRef = erlang:send_after(SettleTimeout, self(), {'$gen_cast', settle_timeout}), - % rabbit_log:debug("set timer"), State#state{timer = TRef}. maybe_cancel_timer(#state{timer = TRef, @@ -529,7 +458,6 @@ cancel_timer(#state{timer = TRef} = State) erlang:cancel_timer(TRef, [{async, true}, {info, false}]), State#state{timer = undefined}. -%% Avoids large message contents being logged. format_status(_Opt, [_PDict, #state{ queue_ref = QueueRef, exchange_ref = ExchangeRef, @@ -538,7 +466,9 @@ format_status(_Opt, [_PDict, #state{ queue_type_state = QueueTypeState, pendings = Pendings, next_out_seq = NextOutSeq, - timer = Timer + settle_timeout = SettleTimeout, + timer = Timer, + logged = Logged }]) -> S = #{queue_ref => QueueRef, exchange_ref => ExchangeRef, @@ -547,10 +477,13 @@ format_status(_Opt, [_PDict, #state{ queue_type_state => QueueTypeState, pendings => maps:map(fun(_, P) -> format_pending(P) end, Pendings), next_out_seq => NextOutSeq, - timer_is_active => Timer =/= undefined}, + settle_timeout => SettleTimeout, + timer_is_active => Timer =/= undefined, + logged => Logged}, [{data, [{"State", S}]}]. format_pending(#pending{consumed_msg_id = ConsumedMsgId, + delivery = _DoNotLogLargeBinary, reason = Reason, unsettled = Unsettled, settled = Settled, @@ -565,18 +498,53 @@ format_pending(#pending{consumed_msg_id = ConsumedMsgId, last_published_at => LastPublishedAt, consumed_at => ConsumedAt}. -%% Returns queues whose queue clients take care of redelivering messages. -clients_redeliver(QNames) -> - Qs = lists:filter(fun(Q) -> - case amqqueue:get_type(Q) of - rabbit_quorum_queue -> - %% If Raft command (#enqueue{}) does not get applied - %% rabbit_fifo_client will resend. - true; - rabbit_stream_queue -> - true; - _ -> - false - end - end, rabbit_amqqueue:lookup_many(QNames)), - lists:map(fun amqqueue:get_name/1, Qs). +log_missing_dlx_once(#state{exchange_ref = SameDlx, + logged = #{missing_dlx := SameDlx}} = State) -> + State; +log_missing_dlx_once(#state{exchange_ref = DlxResource, + queue_ref = QueueResource, + logged = Logged} = State) -> + rabbit_log:warning("Cannot forward any dead-letter messages from source quorum ~s because " + "its configured dead-letter-exchange ~s does not exist. " + "Either create the configured dead-letter-exchange or re-configure " + "the dead-letter-exchange policy for the source quorum queue to prevent " + "dead-lettered messages from piling up in the source quorum queue. " + "This message will not be logged again.", + [rabbit_misc:rs(QueueResource), rabbit_misc:rs(DlxResource)]), + State#state{logged = maps:put(missing_dlx, DlxResource, Logged)}. + +log_no_route_once(#state{exchange_ref = SameDlx, + routing_key = SameRoutingKey, + logged = #{no_route := {SameDlx, SameRoutingKey}}} = State) -> + State; +log_no_route_once(#state{queue_ref = QueueResource, + exchange_ref = DlxResource, + routing_key = RoutingKey, + logged = Logged} = State) -> + rabbit_log:warning("Cannot forward any dead-letter messages from source quorum ~s " + "with configured dead-letter-exchange ~s and configured " + "dead-letter-routing-key '~s'. This can happen either if the dead-letter " + "routing topology is misconfigured (for example no queue bound to " + "dead-letter-exchange or wrong dead-letter-routing-key configured) or if " + "non-mirrored classic queues are bound whose host node is down. " + "Fix this issue to prevent dead-lettered messages from piling up " + "in the source quorum queue. " + "This message will not be logged again.", + [rabbit_misc:rs(QueueResource), rabbit_misc:rs(DlxResource), RoutingKey]), + State#state{logged = maps:put(no_route, {DlxResource, RoutingKey}, Logged)}. + +log_cycles(Cycles, RoutingKeys, State) -> + lists:foldl(fun(Cycle, S) -> log_cycle_once(Cycle, RoutingKeys, S) end, State, Cycles). + +log_cycle_once(Queues, _, #state{logged = Logged} = State) + when is_map_key({cycle, Queues}, Logged) -> + State; +log_cycle_once(Queues, RoutingKeys, #state{exchange_ref = DlxResource, + queue_ref = QueueResource, + logged = Logged} = State) -> + rabbit_log:warning("Dead-letter queues cycle detected for source quorum ~s " + "with dead-letter exchange ~s and routing keys ~p: ~p " + "This message will not be logged again.", + [rabbit_misc:rs(QueueResource), rabbit_misc:rs(DlxResource), + RoutingKeys, Queues]), + State#state{logged = maps:put({cycle, Queues}, true, Logged)}. diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 4a85e157ed4c..bf5a1c928b1d 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -32,6 +32,7 @@ groups() -> target_queue_not_bound, target_queue_deleted, dlx_missing, + cycle, stats, drop_head_falls_back_to_at_most_once, switch_strategy @@ -198,7 +199,7 @@ assert_dlx_headers(Headers, Reason, SourceQ) -> end. %% Test that message is not lost despite no route from dead-letter exchange to target queue. -%% Once, the route becomes available, the message is delivered to the target queue +%% Once the route becomes available, the message is delivered to the target queue %% and acked to the source quorum queue. target_queue_not_bound(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), @@ -288,7 +289,7 @@ target_queue_deleted(Config) -> dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))). %% Test that message is not lost when configured dead-letter exchange does not exist. -%% Once, the exchange gets declared, the message is delivered to the target queue +%% Once the exchange gets declared, the message is delivered to the target queue %% and acked to the source quorum queue. dlx_missing(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), @@ -322,11 +323,48 @@ dlx_missing(Config) -> %% Therefore, message should be delivered to target queue and acked to source queue. eventually(?_assertEqual([{0, 0}], dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), - 500, 10), + 500, 8), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{expiration = undefined}, payload = Msg}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})). +%% Test that message is not lost when it cycles. +%% Once the cycle is resolved, the message is delivered to the target queue and acked to +%% the source quorum queue. +cycle(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + TargetQ = ?config(target_queue_1, Config), + declare_queue(Ch, SourceQ, [ + {<<"x-dead-letter-exchange">>, longstr, <<"">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>} + ]), + Msg = <<"msg">>, + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = <<"0">>}, + payload = Msg}), + RaName = ra_name(SourceQ), + %% Message cycled when it was dead-lettered: + %% source queue -> default exchange -> source queue + %% Therefore, 1 message should be kept in discards queue. + eventually(?_assertMatch([{1, _}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + consistently(?_assertMatch([{1, _}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + %% Fix the cycle such that dead-lettering flows like this: + %% source queue -> default exchange -> target queue + declare_queue(Ch, TargetQ, []), + ok = rabbit_ct_broker_helpers:set_policy(Config, Server, <<"my-policy">>, SourceQ, <<"queues">>, + [{<<"dead-letter-routing-key">>, TargetQ}]), + eventually(?_assertEqual([{0, 0}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), + 500, 8), + ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})). %% Test that rabbit_fifo_dlx tracks statistics correctly. stats(Config) -> From 679bcc815185fd7ebeb58d578a589b5e79ba4425 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 13 Jan 2022 00:11:51 +0100 Subject: [PATCH 37/97] Allow for higher dlx throughput by default --- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 043b2090cb03..203e3b4e9259 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -25,7 +25,7 @@ code_change/3, format_status/2]). %%TODO make configurable via cuttlefish? --define(DEFAULT_PREFETCH, 100). +-define(DEFAULT_PREFETCH, 1000). -define(DEFAULT_SETTLE_TIMEOUT, 120_000). -define(HIBERNATE_AFTER, 180_000). From 60422c583867ff4a92bb1dd0c3efd56e87f5dc57 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 13 Jan 2022 12:45:23 +0100 Subject: [PATCH 38/97] Improve performance of dlx worker Experiment with enqueuing, dead-lettering, and consuming from target dead-letter queue revealed in the flamegraph that a lot of CPU cycles (>6%) are spent in function maps:filter/2 of rabbit_fifo_dlx_worker:maybe_ack/1 If we only consider the dlx worker, it's even much more than 6%. Therefore, instead of filtering over all pending messages to know which ones can be acked, we move messages to be acked directly to its own settled_ids list when we receive a settlement or when we re-deliver. That's also in-line with what rabbit_channel is doing. This commit increases the throughput of the dlx worker by ~150%. To re-produce, check the received message rate before and after this commit. Start consumer: bin/runjava com.rabbitmq.perf.PerfTest -x 0 y 1 -u dlx-queue Start producer: bin/runjava com.rabbitmq.perf.PerfTest -x 1 -y 0 -ad false -f persistent -qa 'x-queue-type=quorum,x-max-in-memory-length=0,x-overflow=reject-publish,x-dead-letter-strategy=at-least-once,x-dead-letter-routing-key=dlx-queue,x-dead-letter-exchange=,x-message-ttl=0' -u q1 --- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 119 ++++++++++----------- 1 file changed, 59 insertions(+), 60 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 203e3b4e9259..a27a5d9a0c31 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -69,10 +69,10 @@ routing_key, dlx_client_state :: undefined | rabbit_fifo_dlx_client:state(), queue_type_state :: undefined | rabbit_queue_type:state(), - %% Consumed messages for which we have not received all publisher confirms yet. - %% Therefore, they have not been ACKed yet to the consumer queue. - %% This buffer contains at most PREFETCH pending messages at any given point in time. + %% Consumed messages for which we are awaiting publisher confirms. pendings = #{} :: #{OutSeq :: non_neg_integer() => #pending{}}, + %% Consumed message IDs for which we received all publisher confirms. + settled_ids = [] :: [non_neg_integer()], %% next publisher confirm delivery tag sequence number next_out_seq = 1, %% If no publisher confirm was received for at least settle_timeout milliseconds, message will be redelivered. @@ -163,9 +163,7 @@ handle_cast(Request, State) -> redeliver_and_ack(State0) -> State1 = redeliver_messsages(State0), - %% Routes could have been changed dynamically. - %% If a publisher confirm timed out for a target queue to which we now don't route anymore, ack the message. - State2 = maybe_ack(State1), + State2 = ack(State1), State = maybe_set_timer(State2), {noreply, State}. @@ -213,7 +211,7 @@ handle_queue_actions(Actions, State0) -> maybe_set_timer(S1); ({settled, QRef, MsgSeqs}, S0) -> S1 = handle_settled(QRef, MsgSeqs, S0), - S2 = maybe_ack(S1), + S2 = ack(S1), maybe_cancel_timer(S2); ({rejected, QRef, MsgSeqNos}, S0) -> rabbit_log:debug("Ignoring rejected messages ~p from ~s", [MsgSeqNos, rabbit_misc:rs(QRef)]), @@ -300,48 +298,36 @@ deliver_to_queues(Delivery, RouteToQNames, #state{queue_type_state = QTypeState0 State = State0#state{queue_type_state = QTypeState2}, handle_queue_actions(Actions, State). -handle_settled(QRef, MsgSeqs, #state{pendings = Pendings0} = State) -> - Pendings = lists:foldl(fun (MsgSeq, P0) -> - handle_settled0(QRef, MsgSeq, P0) - end, Pendings0, MsgSeqs), - State#state{pendings = Pendings}. +handle_settled(QRef, MsgSeqs, State) -> + lists:foldl(fun (MsgSeq, S) -> + handle_settled0(QRef, MsgSeq, S) + end, State, MsgSeqs). -handle_settled0(QRef, MsgSeq, Pendings) -> +handle_settled0(QRef, MsgSeq, #state{pendings = Pendings, + settled_ids = SettledIds} = State) -> case maps:find(MsgSeq, Pendings) of - {ok, #pending{unsettled = Unset0, settled = Set0} = Pend0} -> - Unset = lists:delete(QRef, Unset0), - Set = [QRef | Set0], - Pend = Pend0#pending{unsettled = Unset, settled = Set}, - maps:update(MsgSeq, Pend, Pendings); + {ok, #pending{unsettled = [QRef], + consumed_msg_id = ConsumedId}} -> + State#state{pendings = maps:remove(MsgSeq, Pendings), + settled_ids = [ConsumedId | SettledIds]}; + {ok, #pending{unsettled = Unsettled, settled = Settled} = Pend0} -> + Pend = Pend0#pending{unsettled = lists:delete(QRef, Unsettled), + settled = [QRef | Settled]}, + State#state{pendings = maps:update(MsgSeq, Pend, Pendings)}; error -> rabbit_log:info("Ignoring publisher confirm for sequence number ~b " "from target dead letter ~s", [MsgSeq, rabbit_misc:rs(QRef)]), - Pendings + State end. -maybe_ack(#state{pendings = Pendings0, - dlx_client_state = DlxState0} = State) -> - Settled = maps:filter(fun(_OutSeq, #pending{unsettled = [], settled = [_|_]}) -> - %% Ack because there is at least one target queue and all - %% target queues settled (i.e. combining publisher confirm - %% and mandatory flag semantics). - true; - (_, _) -> - false - end, Pendings0), - case maps:size(Settled) of - 0 -> - %% nothing to ack - State; - _ -> - Ids = lists:map(fun(#pending{consumed_msg_id = Id}) -> Id end, maps:values(Settled)), - {ok, DlxState} = rabbit_fifo_dlx_client:settle(Ids, DlxState0), - SettledOutSeqs = maps:keys(Settled), - Pendings = maps:without(SettledOutSeqs, Pendings0), - State#state{pendings = Pendings, - dlx_client_state = DlxState} - end. +ack(#state{settled_ids = []} = State) -> + State; +ack(#state{settled_ids = Ids, + dlx_client_state = DlxState0} = State) -> + {ok, DlxState} = rabbit_fifo_dlx_client:settle(Ids, DlxState0), + State#state{settled_ids = [], + dlx_client_state = DlxState}. %% Re-deliver messages that timed out waiting on publisher confirm and %% messages that got never sent due to routing topology misconfiguration. @@ -386,32 +372,43 @@ redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, unsettled = Unsettled0, settled = Settled, publish_count = PublishCount, - reason = Reason} = Pend0, + reason = Reason, + consumed_msg_id = ConsumedId} = Pend0, DLX, DLRKeys, OutSeq, - #state{pendings = Pendings0, + #state{pendings = Pendings, + settled_ids = SettledIds, exchange_ref = DLXRef} = State0) when is_list(DLRKeys) -> Delivery = Delivery0#delivery{message = BasicMsg#basic_message{exchange_name = DLXRef, routing_keys = DLRKeys}}, RouteToQs0 = rabbit_exchange:route(DLX, Delivery), - %% Do not redeliver message to a target queue - %% 1. for which we already received a publisher confirm, or - %% 2. whose queue client redelivers on our behalf. - Unsettled = RouteToQs0 -- Settled, - RouteToQs1 = Unsettled -- clients_redeliver(Unsettled0), - {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, BasicMsg, RouteToQs1), - State1 = log_cycles(Cycles, DLRKeys, State0), - case RouteToQs of - [] -> - State1; + case {RouteToQs0, Settled} of + {[], [_|_]} -> + %% Routes changed dynamically so that we don't await any publisher confirms anymore. + %% Since we also received at least once publisher confirm (mandatory flag semantics), + %% we can ack the messasge to the source quorum queue. + State0#state{pendings = maps:remove(OutSeq, Pendings), + settled_ids = [ConsumedId | SettledIds]}; _ -> - Pend = Pend0#pending{publish_count = PublishCount + 1, - last_published_at = os:system_time(millisecond), - delivery = Delivery, - %% override 'unsettled' because topology could have changed - unsettled = Unsettled}, - State = State0#state{pendings = maps:update(OutSeq, Pend, Pendings0)}, - deliver_to_queues(Delivery, RouteToQs, State) + %% Do not redeliver message to a target queue + %% 1. for which we already received a publisher confirm, or + Unsettled = RouteToQs0 -- Settled, + %% 2. whose queue client redelivers on our behalf. + RouteToQs1 = Unsettled -- clients_redeliver(Unsettled0), + {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, BasicMsg, RouteToQs1), + State1 = log_cycles(Cycles, DLRKeys, State0), + case RouteToQs of + [] -> + State1; + _ -> + Pend = Pend0#pending{publish_count = PublishCount + 1, + last_published_at = os:system_time(millisecond), + delivery = Delivery, + %% override 'unsettled' because topology could have changed + unsettled = Unsettled}, + State = State0#state{pendings = maps:update(OutSeq, Pend, Pendings)}, + deliver_to_queues(Delivery, RouteToQs, State) + end end. %% Returns queues whose queue clients take care of redelivering messages. @@ -465,6 +462,7 @@ format_status(_Opt, [_PDict, #state{ dlx_client_state = DlxClientState, queue_type_state = QueueTypeState, pendings = Pendings, + settled_ids = SettledIds, next_out_seq = NextOutSeq, settle_timeout = SettleTimeout, timer = Timer, @@ -476,6 +474,7 @@ format_status(_Opt, [_PDict, #state{ dlx_client_state => rabbit_fifo_dlx_client:overview(DlxClientState), queue_type_state => QueueTypeState, pendings => maps:map(fun(_, P) -> format_pending(P) end, Pendings), + settled_ids => SettledIds, next_out_seq => NextOutSeq, settle_timeout => SettleTimeout, timer_is_active => Timer =/= undefined, From 297ae96d22c4fbc44874eed16550f8620f81261f Mon Sep 17 00:00:00 2001 From: David Ansari Date: Fri, 14 Jan 2022 12:38:40 +0100 Subject: [PATCH 39/97] Fix dialyzer warnings --- deps/rabbit/src/rabbit_disk_monitor.erl | 7 ++----- deps/rabbit/src/rabbit_fifo.erl | 3 ++- deps/rabbit/src/rabbit_fifo_dlx.erl | 2 +- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 12 ++++++------ 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/deps/rabbit/src/rabbit_disk_monitor.erl b/deps/rabbit/src/rabbit_disk_monitor.erl index 93016b112e6b..c32ea2a3baa5 100644 --- a/deps/rabbit/src/rabbit_disk_monitor.erl +++ b/deps/rabbit/src/rabbit_disk_monitor.erl @@ -246,11 +246,8 @@ get_disk_free(Dir, {win32, _}) -> rabbit_log:warning("Expected the mnesia directory absolute " "path to start with a drive letter like " "'C:'. The path is: '~p'", [Dir]), - case win32_get_disk_free_dir(Dir) of - {ok, Free} -> - Free; - _ -> exit(could_not_determine_disk_free) - end; + {ok, Free} = win32_get_disk_free_dir(Dir), + Free; DriveLetter -> case catch win32_get_disk_free_pwsh(DriveLetter) of {ok, Free1} -> Free1; diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index ce5b1ba82ec8..6904be3ccf1c 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -780,7 +780,8 @@ update_waiting_consumer_status(Node, end || {{_, Pid} = ConsumerId, Consumer} <- WaitingConsumers, Consumer#consumer.status =/= cancelled]. --spec state_enter(ra_server:ra_state(), state()) -> ra_machine:effects(). +-spec state_enter(ra_server:ra_state() | eol, state()) -> + ra_machine:effects(). state_enter(RaState, #?MODULE{cfg = #cfg{dead_letter_handler = DLH, resource = QRes}, dlx = DlxState} = State) -> diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 24a9ab36e468..b40478522ff0 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -135,7 +135,7 @@ apply(_, {dlx, #checkout{consumer = ConsumerPid, msg_bytes_checkout = BytesCheckout - BytesMoved}, {State, []}; apply(_, Cmd, DLH, State) -> - rabbit_log:debug("Ignoring command ~p for dead_letter_handler ~p", Cmd, DLH), + rabbit_log:debug("Ignoring command ~p for dead_letter_handler ~p", [Cmd, DLH]), {State, []}. -spec discard([msg()], rabbit_dead_letter:reason(), dead_letter_handler(), state()) -> diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index a27a5d9a0c31..4fb5a74aa8ac 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -106,17 +106,17 @@ handle_continue(QRef, undefined) -> SettleTimeout = application:get_env(rabbit, dead_letter_worker_publisher_confirm_timeout_ms, ?DEFAULT_SETTLE_TIMEOUT), - State = lookup_topology(#state{queue_ref = QRef, - queue_type_state = rabbit_queue_type:init(), - settle_timeout = SettleTimeout}), {ok, Q} = rabbit_amqqueue:lookup(QRef), {ClusterName, _MaybeOldLeaderNode} = amqqueue:get_pid(Q), {ok, ConsumerState} = rabbit_fifo_dlx_client:checkout(QRef, {ClusterName, node()}, Prefetch), - MonitorRef = erlang:monitor(process, ClusterName), - {noreply, State#state{dlx_client_state = ConsumerState, - monitor_ref = MonitorRef}}. + {noreply, lookup_topology(#state{queue_ref = QRef, + queue_type_state = rabbit_queue_type:init(), + settle_timeout = SettleTimeout, + dlx_client_state = ConsumerState, + monitor_ref = erlang:monitor(process, ClusterName) + })}. terminate(_Reason, State) -> cancel_timer(State). From c23fccf287e8d08b9ec62d522ba2d77cb5712f95 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Fri, 14 Jan 2022 17:11:38 +0100 Subject: [PATCH 40/97] Add more specs --- deps/rabbit/BUILD.bazel | 4 +- deps/rabbit/Makefile | 4 +- deps/rabbit/src/rabbit_fifo_dlx.erl | 5 +- deps/rabbit/src/rabbit_fifo_dlx_client.erl | 15 +++++- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 53 ++++++++++++------- .../rabbit_fifo_dlx_integration_SUITE.erl | 2 +- 6 files changed, 56 insertions(+), 27 deletions(-) diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index 930ced215895..399253c01b9e 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -144,7 +144,9 @@ _APP_ENV = """[ %% interval at which connection/channel tracking executes post operations {tracking_execution_timeout, 15000}, {stream_messages_soft_limit, 256}, - {track_auth_attempt_source, false} + {track_auth_attempt_source, false}, + {dead_letter_worker_consumer_prefetch, 1000}, + {dead_letter_worker_publisher_confirm_timeout, 120000} ] """ diff --git a/deps/rabbit/Makefile b/deps/rabbit/Makefile index 7a448c2aba84..3b5bba96b6e7 100644 --- a/deps/rabbit/Makefile +++ b/deps/rabbit/Makefile @@ -122,7 +122,9 @@ define PROJECT_ENV %% interval at which connection/channel tracking executes post operations {tracking_execution_timeout, 15000}, {stream_messages_soft_limit, 256}, - {track_auth_attempt_source, false} + {track_auth_attempt_source, false}, + {dead_letter_worker_consumer_prefetch, 1000}, + {dead_letter_worker_publisher_confirm_timeout, 120000} ] endef diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index b40478522ff0..5163f4fe861c 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -32,8 +32,7 @@ -type protocol() :: {dlx, #checkout{} | #settle{}}. -type state() :: #?MODULE{}. -export_type([state/0, - protocol/0, - reason/0]). + protocol/0]). -spec init() -> state(). init() -> @@ -274,7 +273,7 @@ delivery_effects(CPid, {InMemMsgs, IdxMsgs0}) -> [{send_msg, CPid, {dlx_delivery, Msgs}, [ra_event]}] end}]. --spec state_enter(ra_server:ra_state(), rabbit_types:r('queue'), dead_letter_handler(), state()) -> +-spec state_enter(ra_server:ra_state() | eol, rabbit_types:r('queue'), dead_letter_handler(), state()) -> ra_machine:effects(). state_enter(leader, QRes, at_least_once, State) -> ensure_worker_started(QRes, State), diff --git a/deps/rabbit/src/rabbit_fifo_dlx_client.erl b/deps/rabbit/src/rabbit_fifo_dlx_client.erl index 8bd341392c15..0b363f2e5e6c 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_client.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_client.erl @@ -9,14 +9,24 @@ last_msg_id :: non_neg_integer() | -1 }). -type state() :: #state{}. --export_type([state/0]). +-type action() :: {deliver, [{rabbit_amqqueue:name(), + MsgId :: non_neg_integer(), + rabbit_types:message(), + rabbit_dead_letter:reason()}]}. +-type actions() :: [action()]. +-export_type([state/0, + actions/0]). +-spec settle([non_neg_integer()], state()) -> + {ok, state()}. settle(MsgIds, #state{leader = Leader} = State) when is_list(MsgIds) -> Cmd = rabbit_fifo_dlx:make_settle(MsgIds), ra:pipeline_command(Leader, Cmd), {ok, State}. +-spec checkout(rabbit_amqqueue:name(), ra:server_id(), non_neg_integer()) -> + {ok, state()} | {error, ra_command_failed}. checkout(QResource, Leader, NumUnsettled) -> Cmd = rabbit_fifo_dlx:make_checkout(self(), NumUnsettled), State = #state{queue_resource = QResource, @@ -41,6 +51,8 @@ process_command(Cmd, #state{leader = Leader} = State, Tries) -> process_command(Cmd, State, Tries - 1) end. +-spec handle_ra_event(ra:server_id(), term(), state()) -> + {ok, state(), actions()}. handle_ra_event(Leader, {machine, {dlx_delivery, _} = Del}, #state{leader = Leader} = State) -> handle_delivery(Del, State); handle_ra_event(From, Evt, State) -> @@ -66,6 +78,7 @@ transform_msgs(QRes, Msgs) -> {QRes, MsgId, Msg, Reason} end, Msgs). +-spec overview(state()) -> map(). overview(#state{leader = Leader, last_msg_id = LastMsgId}) -> #{leader => Leader, diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 4fb5a74aa8ac..6c689e63c75a 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -24,9 +24,6 @@ handle_cast/2, handle_call/3, handle_info/2, code_change/3, format_status/2]). -%%TODO make configurable via cuttlefish? --define(DEFAULT_PREFETCH, 1000). --define(DEFAULT_SETTLE_TIMEOUT, 120_000). -define(HIBERNATE_AFTER, 180_000). -record(pending, { @@ -39,7 +36,7 @@ delivery :: rabbit_types:delivery(), %% TODO Reason is already stored in first x-death header of #content.properties.#'P_basic'.headers %% So, we could remove this convenience field and lookup the 1st header when redelivering. - reason :: rabbit_fifo_dlx:reason(), + reason :: rabbit_dead_letter:reason(), %% target queues for which publisher confirm has not been received yet unsettled = [] :: [rabbit_amqqueue:name()], %% target queues for which publisher confirm was received @@ -67,8 +64,8 @@ exchange_ref, %% configured (x-)dead-letter-routing-key of source queue routing_key, - dlx_client_state :: undefined | rabbit_fifo_dlx_client:state(), - queue_type_state :: undefined | rabbit_queue_type:state(), + dlx_client_state :: rabbit_fifo_dlx_client:state(), + queue_type_state :: rabbit_queue_type:state(), %% Consumed messages for which we are awaiting publisher confirms. pendings = #{} :: #{OutSeq :: non_neg_integer() => #pending{}}, %% Consumed message IDs for which we received all publisher confirms. @@ -87,25 +84,25 @@ logged = #{} :: map() }). -% -type state() :: #state{}. +-type state() :: #state{}. %%TODO Add metrics like global counters for messages routed, delivered, etc. by adding a new counter in seshat. start_link(QRef) -> gen_server:start_link(?MODULE, QRef, [{hibernate_after, ?HIBERNATE_AFTER}]). -% -spec init(rabbit_amqqueue:name()) -> -% {ok, undefined, {continue, rabbit_amqqueue:name()}}}. +-spec init(rabbit_amqqueue:name()) -> + {ok, undefined, {continue, rabbit_amqqueue:name()}}. init(QRef) -> {ok, undefined, {continue, QRef}}. +-spec handle_continue(rabbit_amqqueue:name(), undefined) -> + {noreply, state()}. handle_continue(QRef, undefined) -> - Prefetch = application:get_env(rabbit, - dead_letter_worker_consumer_prefetch, - ?DEFAULT_PREFETCH), - SettleTimeout = application:get_env(rabbit, - dead_letter_worker_publisher_confirm_timeout_ms, - ?DEFAULT_SETTLE_TIMEOUT), + {ok, Prefetch} = application:get_env(rabbit, + dead_letter_worker_consumer_prefetch), + {ok, SettleTimeout} = application:get_env(rabbit, + dead_letter_worker_publisher_confirm_timeout), {ok, Q} = rabbit_amqqueue:lookup(QRef), {ClusterName, _MaybeOldLeaderNode} = amqqueue:get_pid(Q), {ok, ConsumerState} = rabbit_fifo_dlx_client:checkout(QRef, @@ -162,7 +159,7 @@ handle_cast(Request, State) -> {noreply, State}. redeliver_and_ack(State0) -> - State1 = redeliver_messsages(State0), + State1 = redeliver_messages(State0), State2 = ack(State1), State = maybe_set_timer(State2), {noreply, State}. @@ -194,6 +191,7 @@ handle_info(Info, State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. +-spec lookup_topology(state()) -> state(). lookup_topology(#state{queue_ref = {resource, Vhost, queue, _} = QRef} = State) -> {ok, Q} = rabbit_amqqueue:lookup(QRef), DLRKey = rabbit_queue_type_util:args_policy_lookup(<<"dead-letter-routing-key">>, @@ -204,6 +202,8 @@ lookup_topology(#state{queue_ref = {resource, Vhost, queue, _} = QRef} = State) State#state{exchange_ref = DLXRef, routing_key = DLRKey}. +-spec handle_queue_actions(rabbit_queue_type:actions() | rabbit_fifo_dlx_client:actions(), state()) -> + state(). handle_queue_actions(Actions, State0) -> lists:foldl( fun ({deliver, Msgs}, S0) -> @@ -214,7 +214,8 @@ handle_queue_actions(Actions, State0) -> S2 = ack(S1), maybe_cancel_timer(S2); ({rejected, QRef, MsgSeqNos}, S0) -> - rabbit_log:debug("Ignoring rejected messages ~p from ~s", [MsgSeqNos, rabbit_misc:rs(QRef)]), + rabbit_log:debug("Ignoring rejected messages ~p from ~s", + [MsgSeqNos, rabbit_misc:rs(QRef)]), S0; ({queue_down, _QRef}, S0) -> %% target classic queue is down, but not deleted @@ -228,6 +229,8 @@ handle_deliver(Msgs, #state{queue_ref = QRef} = State0) forward(Msg, MsgId, QRef, DLX, Reason, S) end, State, Msgs). +-spec lookup_dlx(state()) -> + {rabbit_types:exchange() | not_found, state()}. lookup_dlx(#state{exchange_ref = DLXRef} = State0) -> case rabbit_exchange:lookup(DLXRef) of {error, not_found} -> @@ -237,12 +240,16 @@ lookup_dlx(#state{exchange_ref = DLXRef} = State0) -> {X, State0} end. +-spec forward(rabbit_types:message(), non_neg_integer(), rabbit_amqqueue:name(), + rabbit_types:exchange() | not_found, rabbit_dead_letter:reason(), state()) -> + state(). forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, #state{next_out_seq = OutSeq, pendings = Pendings, exchange_ref = DLXRef, routing_key = RKey} = State0) -> - #basic_message{routing_keys = RKeys} = Msg = rabbit_dead_letter:make_msg(ConsumedMsg, Reason, DLXRef, RKey, ConsumedQRef), + #basic_message{routing_keys = RKeys} = Msg = rabbit_dead_letter:make_msg(ConsumedMsg, Reason, + DLXRef, RKey, ConsumedQRef), %% Field 'mandatory' is set to false because we check ourselves whether the message is routable. Delivery = rabbit_basic:delivery(_Mandatory = false, _Confirm = true, Msg, OutSeq), {TargetQs, State3} = case DLX of @@ -281,6 +288,8 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, deliver_to_queues(Delivery, TargetQs, State) end. +-spec deliver_to_queues(rabbit_types:delivery(), [rabbit_amqqueue:name()], state()) -> + state(). deliver_to_queues(Delivery, RouteToQNames, #state{queue_type_state = QTypeState0} = State0) -> Qs = rabbit_amqqueue:lookup(RouteToQNames), {QTypeState2, Actions} = case rabbit_queue_type:deliver(Qs, Delivery, QTypeState0) of @@ -331,8 +340,10 @@ ack(#state{settled_ids = Ids, %% Re-deliver messages that timed out waiting on publisher confirm and %% messages that got never sent due to routing topology misconfiguration. -redeliver_messsages(#state{pendings = Pendings, - settle_timeout = SettleTimeout} = State0) -> +-spec redeliver_messages(state()) -> + state(). +redeliver_messages(#state{pendings = Pendings, + settle_timeout = SettleTimeout} = State0) -> case lookup_dlx(State0) of {not_found, State} -> %% Configured dead-letter-exchange does (still) not exist. @@ -412,6 +423,8 @@ redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, end. %% Returns queues whose queue clients take care of redelivering messages. +-spec clients_redeliver([rabbit_amqqueue:name()]) -> + [rabbit_amqqueue:name()]. clients_redeliver(QNames) -> Qs = lists:filter(fun(Q) -> case amqqueue:get_type(Q) of diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index bf5a1c928b1d..b652cc2bbfd4 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -48,7 +48,7 @@ init_per_suite(Config0) -> Config1 = rabbit_ct_helpers:merge_app_env( Config0, {rabbit, [{quorum_tick_interval, 1000}, {dead_letter_worker_consumer_prefetch, 2}, - {dead_letter_worker_publisher_confirm_timeout_ms, 1000} + {dead_letter_worker_publisher_confirm_timeout, 1000} ]}), Config2 = rabbit_ct_helpers:merge_app_env( Config1, {aten, [{poll_interval, 1000}]}), From 1f295c446f823dd95fbd3e2d4ad037d28bb0d24e Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 18 Jan 2022 19:29:40 +0100 Subject: [PATCH 41/97] Run dead letter TTL tests for quorum queues --- deps/rabbit/test/dead_lettering_SUITE.erl | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl index 5cffd1da1816..cf4fb41312a7 100644 --- a/deps/rabbit/test/dead_lettering_SUITE.erl +++ b/deps/rabbit/test/dead_lettering_SUITE.erl @@ -48,21 +48,17 @@ groups() -> dead_letter_headers_CC_with_routing_key, dead_letter_headers_first_death, dead_letter_headers_first_death_route - ], + dead_letter_headers_first_death, + dead_letter_ttl, + dead_letter_routing_key_cycle_ttl, + dead_letter_headers_reason_expired, + dead_letter_headers_reason_expired_per_message], Opts = [], [ {dead_letter_tests, [], [ - {classic_queue, Opts, DeadLetterTests ++ [dead_letter_ttl, - dead_letter_max_length_reject_publish_dlx, - dead_letter_routing_key_cycle_ttl, - dead_letter_headers_reason_expired, - dead_letter_headers_reason_expired_per_message]}, - {mirrored_queue, Opts, DeadLetterTests ++ [dead_letter_ttl, - dead_letter_max_length_reject_publish_dlx, - dead_letter_routing_key_cycle_ttl, - dead_letter_headers_reason_expired, - dead_letter_headers_reason_expired_per_message]}, + {classic_queue, Opts, [dead_letter_max_length_reject_publish_dlx | DeadLetterTests]}, + {mirrored_queue, Opts, [dead_letter_max_length_reject_publish_dlx | DeadLetterTests]}, {quorum_queue, Opts, DeadLetterTests} ]} ]. From 8c286cc68045a7f4942e1e85c60ed446e8bc31b9 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Wed, 19 Jan 2022 16:28:51 +0100 Subject: [PATCH 42/97] Add Prometheus metrics for dead-lettered messages > curl -s localhost:15692/metrics | grep rabbitmq_global_messages_dead_lettered \# TYPE rabbitmq_global_messages_dead_lettered_delivery_limit_total counter \# HELP rabbitmq_global_messages_dead_lettered_delivery_limit_total Total number of messages dead-lettered due to delivery-limit exceeded rabbitmq_global_messages_dead_lettered_delivery_limit_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="at_least_once"} 0 rabbitmq_global_messages_dead_lettered_delivery_limit_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="at_most_once"} 0 rabbitmq_global_messages_dead_lettered_delivery_limit_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="disabled"} 0 \# TYPE rabbitmq_global_messages_dead_lettered_expired_total counter \# HELP rabbitmq_global_messages_dead_lettered_expired_total Total number of messages dead-lettered due to message TTL exceeded rabbitmq_global_messages_dead_lettered_expired_total{queue_type="rabbit_classic_queue",dead_letter_strategy="at_most_once"} 0 rabbitmq_global_messages_dead_lettered_expired_total{queue_type="rabbit_classic_queue",dead_letter_strategy="disabled"} 0 rabbitmq_global_messages_dead_lettered_expired_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="at_least_once"} 0 rabbitmq_global_messages_dead_lettered_expired_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="at_most_once"} 0 rabbitmq_global_messages_dead_lettered_expired_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="disabled"} 0 \# TYPE rabbitmq_global_messages_dead_lettered_rejected_total counter \# HELP rabbitmq_global_messages_dead_lettered_rejected_total Total number of messages dead-lettered due to basic.reject or basic.nack rabbitmq_global_messages_dead_lettered_rejected_total{queue_type="rabbit_classic_queue",dead_letter_strategy="at_most_once"} 0 rabbitmq_global_messages_dead_lettered_rejected_total{queue_type="rabbit_classic_queue",dead_letter_strategy="disabled"} 0 rabbitmq_global_messages_dead_lettered_rejected_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="at_least_once"} 0 rabbitmq_global_messages_dead_lettered_rejected_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="at_most_once"} 0 rabbitmq_global_messages_dead_lettered_rejected_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="disabled"} 0 \# TYPE rabbitmq_global_messages_dead_lettered_confirmed_total counter \# HELP rabbitmq_global_messages_dead_lettered_confirmed_total Total number of messages dead-lettered and confirmed by target queues rabbitmq_global_messages_dead_lettered_confirmed_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="at_least_once"} 0 \# TYPE rabbitmq_global_messages_dead_lettered_maxlen_total counter \# HELP rabbitmq_global_messages_dead_lettered_maxlen_total Total number of messages dead-lettered due to overflow drop-head or reject-publish-dlx rabbitmq_global_messages_dead_lettered_maxlen_total{queue_type="rabbit_classic_queue",dead_letter_strategy="at_most_once"} 0 rabbitmq_global_messages_dead_lettered_maxlen_total{queue_type="rabbit_classic_queue",dead_letter_strategy="disabled"} 0 rabbitmq_global_messages_dead_lettered_maxlen_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="at_most_once"} 0 rabbitmq_global_messages_dead_lettered_maxlen_total{queue_type="rabbit_quorum_queue",dead_letter_strategy="disabled"} 0 A few notes: * dead_letter_strategy 'disabled' means either user did not configure dead-letter-exchange or configured dead-letter-exchange does not exist. * Only time series that make sense get output. Example 1: Combination of 'at_least_once' and 'maxlen' will always be 0. Hence, we omit that time series. Example 2: 'confirmed' makes only sense with quorum queues and 'at_least_once'. Example 3: 'delivery_limit' makes only sense with quorum queues. * Users get to know *why* messages were dead-lettered. * Before this commit, there was no possibilities for users to alert based on messages being dropped from the head of the queue when overflow=drop-head. * Users can now easily create alerts: Example 1: Message gets silently dropped (i.e. dead_letter_strategy='disabled') instead of actually dead-lettered. Example 2: Detect dead-letter topology misconfigurations. Example 3: Messages expire Example 4: Messages overflow Example 5: Messages requeued too often * Stream queues by definition do not dead-letter. --- deps/rabbit/src/rabbit_amqqueue_process.erl | 26 +- deps/rabbit/src/rabbit_fifo_dlx.erl | 35 +-- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 8 +- deps/rabbit/src/rabbit_global_counters.erl | 80 +++++- deps/rabbit/src/rabbit_quorum_queue.erl | 19 +- deps/rabbit/test/dead_lettering_SUITE.erl | 266 ++++++++++++++---- deps/rabbit/test/rabbit_fifo_SUITE.erl | 2 +- deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl | 24 +- .../rabbit_fifo_dlx_integration_SUITE.erl | 115 +++++--- deps/rabbit/test/rabbit_fifo_int_SUITE.erl | 11 +- deps/rabbitmq_prometheus/metrics.md | 23 ++ 11 files changed, 475 insertions(+), 134 deletions(-) diff --git a/deps/rabbit/src/rabbit_amqqueue_process.erl b/deps/rabbit/src/rabbit_amqqueue_process.erl index 0deb7370f55f..e03f99b014cc 100644 --- a/deps/rabbit/src/rabbit_amqqueue_process.erl +++ b/deps/rabbit/src/rabbit_amqqueue_process.erl @@ -728,10 +728,14 @@ maybe_deliver_or_enqueue(Delivery = #delivery{message = Message}, with_dlx( DLX, fun (X) -> + rabbit_global_counters:messages_dead_lettered(maxlen, rabbit_classic_queue, + at_most_once, 1), QName = qname(State), rabbit_dead_letter:publish(Message, maxlen, X, RK, QName) end, - fun () -> ok end), + fun () -> rabbit_global_counters:messages_dead_lettered(maxlen, rabbit_classic_queue, + disabled, 1) + end), %% Drop publish and nack to publisher send_reject_publish(Delivery, Delivered, State); _ -> @@ -763,6 +767,8 @@ deliver_or_enqueue(Delivery = #delivery{message = Message, {undelivered, State2 = #q{ttl = 0, dlx = undefined, backing_queue_state = BQS, msg_id_to_channel = MTC}} -> + rabbit_global_counters:messages_dead_lettered(expired, rabbit_classic_queue, + disabled, 1), {BQS1, MTC1} = discard(Delivery, BQ, BQS, MTC, amqqueue:get_name(Q)), State2#q{backing_queue_state = BQS1, msg_id_to_channel = MTC1}; {undelivered, State2 = #q{backing_queue_state = BQS}} -> @@ -804,6 +810,9 @@ maybe_drop_head(AlreadyDropped, State = #q{backing_queue = BQ, State#q.dlx, fun (X) -> dead_letter_maxlen_msg(X, State) end, fun () -> + rabbit_global_counters:messages_dead_lettered(maxlen, + rabbit_classic_queue, + disabled, 1), {_, BQS1} = BQ:drop(false, BQS), State#q{backing_queue_state = BQS1} end)); @@ -1012,11 +1021,18 @@ drop_expired_msgs(State) -> drop_expired_msgs(Now, State = #q{backing_queue_state = BQS, backing_queue = BQ }) -> ExpirePred = fun (#message_properties{expiry = Exp}) -> Now >= Exp end, + ExpirePredIncrement = fun(Properties) -> + ExpirePred(Properties) andalso + rabbit_global_counters:messages_dead_lettered(expired, + rabbit_classic_queue, + disabled, + 1) =:= ok + end, {Props, State1} = with_dlx( State#q.dlx, fun (X) -> dead_letter_expired_msgs(ExpirePred, X, State) end, - fun () -> {Next, BQS1} = BQ:dropwhile(ExpirePred, BQS), + fun () -> {Next, BQS1} = BQ:dropwhile(ExpirePredIncrement, BQS), {Next, State#q{backing_queue_state = BQS1}} end), ensure_ttl_timer(case Props of undefined -> undefined; @@ -1058,6 +1074,8 @@ dead_letter_msgs(Fun, Reason, X, State = #q{dlx_routing_key = RK, QName = qname(State), {Res, Acks1, BQS1} = Fun(fun (Msg, AckTag, Acks) -> + rabbit_global_counters:messages_dead_lettered(Reason, rabbit_classic_queue, + at_most_once, 1), rabbit_dead_letter:publish(Msg, Reason, X, RK, QName), [AckTag | Acks] end, [], BQS), @@ -1575,7 +1593,9 @@ handle_cast({reject, false, AckTags, ChPid}, State) -> dead_letter_rejected_msgs( AckTags, X, State1) end) end, - fun () -> ack(AckTags, ChPid, State) end)); + fun () -> rabbit_global_counters:messages_dead_lettered(rejected, rabbit_classic_queue, + disabled, length(AckTags)), + ack(AckTags, ChPid, State) end)); handle_cast({delete_exclusive, ConnPid}, State) -> log_delete_exclusive(ConnPid, State), diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 5163f4fe861c..007e922a4d27 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -101,7 +101,8 @@ apply(_, {dlx, #settle{msg_ids = MsgIds}}, at_least_once, msg_bytes_checkout = BytesCheckout - size_in_bytes(Msg), ra_indexes = Indexes} end, State0, Acked), - {State, []}; + {State, [{mod_call, rabbit_global_counters, messages_dead_lettered_confirmed, + [rabbit_quorum_queue, at_least_once, maps:size(Acked)]}]}; apply(_, {dlx, #checkout{consumer = Pid, prefetch = Prefetch}}, at_least_once, @@ -139,28 +140,29 @@ apply(_, Cmd, DLH, State) -> -spec discard([msg()], rabbit_dead_letter:reason(), dead_letter_handler(), state()) -> {state(), ra_machine:effects()}. -discard(_, _, undefined, State) -> - {State, []}; -discard(Msgs, Reason, {at_most_once, {Mod, Fun, Args}}, State) -> +discard(Msgs, Reason, undefined, State) -> + {State, [{mod_call, rabbit_global_counters, messages_dead_lettered, + [Reason, rabbit_quorum_queue, disabled, length(Msgs)]}]}; +discard(Msgs0, Reason, {at_most_once, {Mod, Fun, Args}}, State) -> RaftIdxs = lists:filtermap( fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> {true, RaftIdx}; (_IgnorePrefixMessage) -> false - end, Msgs), + end, Msgs0), Effect = {log, RaftIdxs, fun (Log) -> Lookup = maps:from_list(lists:zip(RaftIdxs, Log)), - DeadLetters = lists:filtermap( - fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> - {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), - {true, {Reason, Msg}}; - (?INDEX_MSG(_, ?MSG(_Header, Msg))) -> - {true, {Reason, Msg}}; - (_IgnorePrefixMessage) -> - false - end, Msgs), - [{mod_call, Mod, Fun, Args ++ [DeadLetters]}] + Msgs = lists:filtermap( + fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> + {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), + {true, Msg}; + (?INDEX_MSG(_, ?MSG(_Header, Msg))) -> + {true, Msg}; + (_IgnorePrefixMessage) -> + false + end, Msgs0), + [{mod_call, Mod, Fun, Args ++ [Reason, Msgs]}] end}, {State, [Effect]}; discard(Msgs, Reason, at_least_once, State0) @@ -182,7 +184,8 @@ discard(Msgs, Reason, at_least_once, State0) msg_bytes = B, ra_indexes = I} end, State0, Msgs), - {State, []}. + {State, [{mod_call, rabbit_global_counters, messages_dead_lettered, + [Reason, rabbit_quorum_queue, at_least_once, length(Msgs)]}]}. -spec checkout(dead_letter_handler(), state()) -> {state(), ra_machine:effects()}. diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 6c689e63c75a..aa61f3542e7a 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -86,8 +86,6 @@ -type state() :: #state{}. -%%TODO Add metrics like global counters for messages routed, delivered, etc. by adding a new counter in seshat. - start_link(QRef) -> gen_server:start_link(?MODULE, QRef, [{hibernate_after, ?HIBERNATE_AFTER}]). @@ -461,12 +459,12 @@ maybe_cancel_timer(#state{timer = TRef, maybe_cancel_timer(State) -> State. -cancel_timer(#state{timer = undefined} = State) -> - State; cancel_timer(#state{timer = TRef} = State) when is_reference(TRef) -> erlang:cancel_timer(TRef, [{async, true}, {info, false}]), - State#state{timer = undefined}. + State#state{timer = undefined}; +cancel_timer(State) -> + State. format_status(_Opt, [_PDict, #state{ queue_ref = QueueRef, diff --git a/deps/rabbit/src/rabbit_global_counters.erl b/deps/rabbit/src/rabbit_global_counters.erl index 2689cca0e79e..a78c51ae19a0 100644 --- a/deps/rabbit/src/rabbit_global_counters.erl +++ b/deps/rabbit/src/rabbit_global_counters.erl @@ -31,7 +31,9 @@ publisher_created/1, publisher_deleted/1, consumer_created/1, - consumer_deleted/1 + consumer_deleted/1, + messages_dead_lettered/4, + messages_dead_lettered_confirmed/3 ]). %% PROTOCOL COUNTERS: @@ -126,11 +128,59 @@ } ]). +-define(MESSAGES_DEAD_LETTERED_EXPIRED, 1). +-define(MESSAGES_DEAD_LETTERED_REJECTED, 2). +%% The following two counters are mutually exclusive because +%% quorum queue dead-letter-strategy at-least-once is incompatible with overflow drop-head. +-define(MESSAGES_DEAD_LETTERED_MAXLEN, 3). +-define(MESSAGES_DEAD_LETTERED_CONFIRMED, 3). +-define(MESSAGES_DEAD_LETTERED_DELIVERY_LIMIT, 4). +-define(MESSAGES_DEAD_LETTERED_COUNTERS, + [ + { + messages_dead_lettered_expired_total, ?MESSAGES_DEAD_LETTERED_EXPIRED, counter, + "Total number of messages dead-lettered due to message TTL exceeded" + }, + { + messages_dead_lettered_rejected_total, ?MESSAGES_DEAD_LETTERED_REJECTED, counter, + "Total number of messages dead-lettered due to basic.reject or basic.nack" + } + ]). +-define(MESSAGES_DEAD_LETTERED_MAXLEN_COUNTER, + { + messages_dead_lettered_maxlen_total, ?MESSAGES_DEAD_LETTERED_MAXLEN, counter, + "Total number of messages dead-lettered due to overflow drop-head or reject-publish-dlx" + }). +-define(MESSAGES_DEAD_LETTERED_CONFIRMED_COUNTER, + { + messages_dead_lettered_confirmed_total, ?MESSAGES_DEAD_LETTERED_CONFIRMED, counter, + "Total number of messages dead-lettered and confirmed by target queues" + }). +-define(MESSAGES_DEAD_LETTERED_DELIVERY_LIMIT_COUNTER, + { + messages_dead_lettered_delivery_limit_total, ?MESSAGES_DEAD_LETTERED_DELIVERY_LIMIT, counter, + "Total number of messages dead-lettered due to delivery-limit exceeded" + }). + boot_step() -> init([{protocol, amqp091}]), init([{protocol, amqp091}, {queue_type, rabbit_classic_queue}]), init([{protocol, amqp091}, {queue_type, rabbit_quorum_queue}]), - init([{protocol, amqp091}, {queue_type, rabbit_stream_queue}]). + init([{protocol, amqp091}, {queue_type, rabbit_stream_queue}]), + init([{queue_type, rabbit_classic_queue}, {dead_letter_strategy, disabled}], + [?MESSAGES_DEAD_LETTERED_MAXLEN_COUNTER]), + init([{queue_type, rabbit_classic_queue}, {dead_letter_strategy, at_most_once}], + [?MESSAGES_DEAD_LETTERED_MAXLEN_COUNTER]), + init([{queue_type, rabbit_quorum_queue}, {dead_letter_strategy, disabled}], + [?MESSAGES_DEAD_LETTERED_MAXLEN_COUNTER, + ?MESSAGES_DEAD_LETTERED_DELIVERY_LIMIT_COUNTER]), + init([{queue_type, rabbit_quorum_queue}, {dead_letter_strategy, at_most_once}], + [?MESSAGES_DEAD_LETTERED_MAXLEN_COUNTER, + ?MESSAGES_DEAD_LETTERED_DELIVERY_LIMIT_COUNTER]), + init([{queue_type, rabbit_quorum_queue}, {dead_letter_strategy, at_least_once}], + [?MESSAGES_DEAD_LETTERED_CONFIRMED_COUNTER, + ?MESSAGES_DEAD_LETTERED_DELIVERY_LIMIT_COUNTER + ]). init(Labels) -> init(Labels, []). @@ -138,13 +188,15 @@ init(Labels) -> init(Labels = [{protocol, Protocol}, {queue_type, QueueType}], Extra) -> _ = seshat_counters:new_group(?MODULE), Counters = seshat_counters:new(?MODULE, Labels, ?PROTOCOL_QUEUE_TYPE_COUNTERS ++ Extra), - persistent_term:put({?MODULE, Protocol, QueueType}, Counters), - ok; + persistent_term:put({?MODULE, Protocol, QueueType}, Counters); init(Labels = [{protocol, Protocol}], Extra) -> _ = seshat_counters:new_group(?MODULE), Counters = seshat_counters:new(?MODULE, Labels, ?PROTOCOL_COUNTERS ++ Extra), - persistent_term:put({?MODULE, Protocol}, Counters), - ok. + persistent_term:put({?MODULE, Protocol}, Counters); +init(Labels = [{queue_type, QueueType}, {dead_letter_strategy, DLS}], Extra) -> + _ = seshat_counters:new_group(?MODULE), + Counters = seshat_counters:new(?MODULE, Labels, ?MESSAGES_DEAD_LETTERED_COUNTERS ++ Extra), + persistent_term:put({?MODULE, QueueType, DLS}, Counters). overview() -> seshat_counters:overview(?MODULE). @@ -209,8 +261,20 @@ consumer_created(Protocol) -> consumer_deleted(Protocol) -> counters:add(fetch(Protocol), ?CONSUMERS, -1). +messages_dead_lettered(Reason, QueueType, DeadLetterStrategy, Num) -> + Index = case Reason of + expired -> ?MESSAGES_DEAD_LETTERED_EXPIRED; + rejected -> ?MESSAGES_DEAD_LETTERED_REJECTED; + maxlen -> ?MESSAGES_DEAD_LETTERED_MAXLEN; + delivery_limit -> ?MESSAGES_DEAD_LETTERED_DELIVERY_LIMIT + end, + counters:add(fetch(QueueType, DeadLetterStrategy), Index, Num). + +messages_dead_lettered_confirmed(rabbit_quorum_queue, at_least_once, Num) -> + counters:add(fetch(rabbit_quorum_queue, at_least_once), ?MESSAGES_DEAD_LETTERED_CONFIRMED, Num). + fetch(Protocol) -> persistent_term:get({?MODULE, Protocol}). -fetch(Protocol, QueueType) -> - persistent_term:get({?MODULE, Protocol, QueueType}). +fetch(Elem2, Elem3) -> + persistent_term:get({?MODULE, Elem2, Elem3}). diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index 84dec119de2d..f5396af31f95 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -26,7 +26,7 @@ -export([credit/4]). -export([purge/1]). -export([stateless_deliver/2, deliver/3, deliver/2]). --export([dead_letter_publish/4]). +-export([dead_letter_publish/5]). -export([queue_name/1]). -export([cluster_state/1, status/2]). -export([update_consumer_handler/8, update_consumer/9]). @@ -1301,15 +1301,20 @@ dlh_at_most_once(Exchange, RoutingKey, QName) -> MFA = {?MODULE, dead_letter_publish, [DLX, RoutingKey, QName]}, {at_most_once, MFA}. -dead_letter_publish(undefined, _, _, _) -> - ok; -dead_letter_publish(X, RK, QName, ReasonMsgs) -> +dead_letter_publish(X, RK, QName, Reason, Msgs) -> case rabbit_exchange:lookup(X) of {ok, Exchange} -> - [rabbit_dead_letter:publish(Msg, Reason, Exchange, RK, QName) - || {Reason, Msg} <- ReasonMsgs]; + lists:foreach(fun(Msg) -> + rabbit_dead_letter:publish(Msg, Reason, Exchange, RK, QName) + end, Msgs), + rabbit_global_counters:messages_dead_lettered(Reason, ?MODULE, at_most_once, length(Msgs)); {error, not_found} -> - ok + %% Even though dead-letter-strategy is at_most_once, + %% when configured dead-letter-exchange does not exist, + %% we increment counter for dead-letter-strategy 'disabled' because + %% 1. we know for certain that the message won't be delivered, and + %% 2. that's in line with classic queue behaviour + rabbit_global_counters:messages_dead_lettered(Reason, ?MODULE, disabled, length(Msgs)) end. find_quorum_queues(VHost) -> diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl index cf4fb41312a7..63422ab0a056 100644 --- a/deps/rabbit/test/dead_lettering_SUITE.erl +++ b/deps/rabbit/test/dead_lettering_SUITE.erl @@ -12,8 +12,9 @@ -include_lib("kernel/include/file.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). --compile([nowarn_export_all, export_all]). +-compile([export_all, nowarn_export_all]). -import(quorum_queue_utils, [wait_for_messages/2]). @@ -53,15 +54,31 @@ groups() -> dead_letter_routing_key_cycle_ttl, dead_letter_headers_reason_expired, dead_letter_headers_reason_expired_per_message], - Opts = [], + DisabledMetricTests = [metric_maxlen, + metric_rejected, + metric_expired_queue_msg_ttl, + metric_expired_per_msg_msg_ttl], + Opts = [shuffle], [ - {dead_letter_tests, [], + {dead_letter_tests, Opts, [ - {classic_queue, Opts, [dead_letter_max_length_reject_publish_dlx | DeadLetterTests]}, - {mirrored_queue, Opts, [dead_letter_max_length_reject_publish_dlx | DeadLetterTests]}, - {quorum_queue, Opts, DeadLetterTests} - ]} - ]. + {classic_queue, Opts, [{at_most_once, Opts, [dead_letter_max_length_reject_publish_dlx | DeadLetterTests]}, + {disabled, Opts, DisabledMetricTests}]}, + {mirrored_queue, Opts, [{at_most_once, Opts, [dead_letter_max_length_reject_publish_dlx | DeadLetterTests]}, + {disabled, Opts, DisabledMetricTests}]}, + {quorum_queue, Opts, [{at_most_once, Opts, DeadLetterTests}, + {disabled, Opts, DisabledMetricTests}, + {at_least_once, Opts, DeadLetterTests -- + [ + %% dead-letter-strategy at-least-once is incompatible with overflow drop-head + dead_letter_max_length_drop_head, + dead_letter_routing_key_cycle_max_length, + dead_letter_headers_reason_maxlen, + %% tested separately in rabbit_fifo_dlx_integration_SUITE + dead_letter_missing_exchange + ]} + ] + }]}]. suite() -> [ @@ -72,8 +89,10 @@ suite() -> %% Testsuite setup/teardown. %% ------------------------------------------------------------------- -init_per_suite(Config) -> +init_per_suite(Config0) -> rabbit_ct_helpers:log_environment(), + Config = rabbit_ct_helpers:merge_app_env( + Config0, {rabbit, [{dead_letter_worker_publisher_confirm_timeout, 2000}]}), rabbit_ct_helpers:run_setup_steps(Config). end_per_suite(Config) -> @@ -84,27 +103,53 @@ init_per_group(classic_queue, Config) -> Config, [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]}, {queue_durable, false}]); +init_per_group(mirrored_queue, Config) -> + rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>, + <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]), + Config1 = rabbit_ct_helpers:set_config( + Config, [{is_mirrored, true}, + {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]}, + {queue_durable, false}]), + rabbit_ct_helpers:run_steps(Config1, []); init_per_group(quorum_queue, Config) -> case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of ok -> rabbit_ct_helpers:set_config( Config, [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-delivery-limit">>, long, 100}, - %%TODO add at-least-once tests - {<<"x-dead-letter-strategy">>, longstr, <<"at-most-once">>}]}, + {<<"x-delivery-limit">>, long, 100}]}, {queue_durable, true}]); Skip -> Skip end; -init_per_group(mirrored_queue, Config) -> - rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>, - <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]), - Config1 = rabbit_ct_helpers:set_config( - Config, [{is_mirrored, true}, - {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]}, - {queue_durable, false}]), - rabbit_ct_helpers:run_steps(Config1, []); +init_per_group(at_most_once, Config) -> + case outer_group_name(Config) of + quorum_queue -> + QueueArgs0 = rabbit_ct_helpers:get_config(Config, queue_args), + QueueArgs = lists:keystore(<<"x-dead-letter-strategy">>, + 1, + QueueArgs0, + {<<"x-dead-letter-strategy">>, longstr, <<"at-most-once">>}), + rabbit_ct_helpers:set_config(Config, {queue_args, QueueArgs}); + _ -> + Config + end; +init_per_group(at_least_once, Config) -> + case outer_group_name(Config) of + quorum_queue -> + QueueArgs0 = rabbit_ct_helpers:get_config(Config, queue_args), + QueueArgs1 = lists:keystore(<<"x-dead-letter-strategy">>, + 1, + QueueArgs0, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}), + QueueArgs = lists:keystore(<<"x-overflow">>, + 1, + QueueArgs1, + {<<"x-overflow">>, longstr, <<"reject-publish">>}), + rabbit_ct_helpers:set_config(Config, {queue_args, QueueArgs}); + _ -> + Config + end; init_per_group(Group, Config) -> case lists:member({group, Group}, all()) of true -> @@ -137,12 +182,14 @@ init_per_testcase(Testcase, Config) -> Q3 = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_3", [Group, Testcase])), Policy = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_policy", [Group, Testcase])), DLXExchange = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_dlx_exchange", - [Group, Testcase])), + [Group, Testcase])), + Counters = get_global_counters(Config), Config1 = rabbit_ct_helpers:set_config(Config, [{dlx_exchange, DLXExchange}, {queue_name, Q}, {queue_name_dlx, Q2}, {queue_name_dlx_2, Q3}, - {policy, Policy}]), + {policy, Policy}, + {counters, Counters}]), rabbit_ct_helpers:testcase_started(Config1, Testcase). end_per_testcase(Testcase, Config) -> @@ -159,7 +206,7 @@ end_per_testcase(Testcase, Config) -> %% %% Messages are dead-lettered when: %% 1) message is rejected with basic.reject or basic.nack with requeue=false -%% 2) message ttl expires (not implemented in quorum queues) +%% 2) message ttl expires %% 3) queue length limit is exceeded (only drop-head implemented in quorum queues) %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -201,7 +248,8 @@ dead_letter_nack(Config) -> consume_empty(Ch, QName), %% Consume the first two messages from the dead letter queue consume(Ch, DLXQName, [P1, P2]), - consume_empty(Ch, DLXQName). + consume_empty(Ch, DLXQName), + ?assertEqual(3, counted(messages_dead_lettered_rejected_total, Config)). %% 1) message is rejected with basic.nack, requeue=false and multiple=true dead_letter_multiple_nack(Config) -> @@ -228,7 +276,8 @@ dead_letter_multiple_nack(Config) -> consume(Ch, DLXQName, [P1, P2, P3]), consume_empty(Ch, DLXQName), %% Queue is empty - consume_empty(Ch, QName). + consume_empty(Ch, QName), + ?assertEqual(3, counted(messages_dead_lettered_rejected_total, Config)). %% 1) message is rejected with basic.nack, requeue=true and multiple=false. Dead-lettering does not take place dead_letter_nack_requeue(Config) -> @@ -257,7 +306,8 @@ dead_letter_nack_requeue(Config) -> consume(Ch, QName, [P3]), consume_empty(Ch, QName), %% Dead letter queue is empty - consume_empty(Ch, DLXQName). + consume_empty(Ch, DLXQName), + ?assertEqual(0, counted(messages_dead_lettered_rejected_total, Config)). %% 1) message is rejected with basic.nack, requeue=true and multiple=true. Dead-lettering does not take place dead_letter_nack_requeue_multiple(Config) -> @@ -286,7 +336,8 @@ dead_letter_nack_requeue_multiple(Config) -> consume(Ch, QName, [P1, P2, P3]), consume_empty(Ch, QName), %% Dead letter queue is empty - consume_empty(Ch, DLXQName). + consume_empty(Ch, DLXQName), + ?assertEqual(0, counted(messages_dead_lettered_rejected_total, Config)). %% 1) message is rejected with basic.reject, requeue=false dead_letter_reject(Config) -> @@ -313,7 +364,8 @@ dead_letter_reject(Config) -> consume_empty(Ch, DLXQName), %% Consume the last two from the queue _ = consume(Ch, QName, [P2, P3]), - consume_empty(Ch, QName). + consume_empty(Ch, QName), + ?assertEqual(1, counted(messages_dead_lettered_rejected_total, Config)). %% 1) Many messages are rejected. They get dead-lettered in correct order. dead_letter_reject_many(Config) -> @@ -343,7 +395,8 @@ dead_letter_reject_many(Config) -> %% Consume all messages from dead letter queue in correct order (i.e. from payload <<1>> to <<100>>) wait_for_messages(Config, [[DLXQName, <<"100">>, <<"100">>, <<"0">>]]), _ = consume(Ch, DLXQName, Payloads), - consume_empty(Ch, DLXQName). + consume_empty(Ch, DLXQName), + ?assertEqual(100, counted(messages_dead_lettered_rejected_total, Config)). %% 1) Message is rejected with basic.reject, requeue=true. Dead-lettering does not take place. dead_letter_reject_requeue(Config) -> @@ -369,7 +422,8 @@ dead_letter_reject_requeue(Config) -> _ = consume(Ch, QName, [P1, P2, P3]), consume_empty(Ch, QName), %% Dead letter is empty - consume_empty(Ch, DLXQName). + consume_empty(Ch, DLXQName), + ?assertEqual(0, counted(messages_dead_lettered_rejected_total, Config)). %% 2) Message ttl expires dead_letter_ttl(Config) -> @@ -383,7 +437,8 @@ dead_letter_ttl(Config) -> publish(Ch, QName, [P1]), wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]), consume_empty(Ch, QName), - [_] = consume(Ch, DLXQName, [P1]). + [_] = consume(Ch, DLXQName, [P1]), + ?assertEqual(1, counted(messages_dead_lettered_expired_total, Config)). %% 3) The queue length limit is exceeded, message dropped is dead lettered. %% Default strategy: drop-head @@ -407,7 +462,8 @@ dead_letter_max_length_drop_head(Config) -> %% Consume the dropped ones from the dead letter queue wait_for_messages(Config, [[DLXQName, <<"2">>, <<"2">>, <<"0">>]]), _ = consume(Ch, DLXQName, [P1, P2]), - consume_empty(Ch, DLXQName). + consume_empty(Ch, DLXQName), + ?assertEqual(2, counted(messages_dead_lettered_maxlen_total, Config)). %% Another strategy: reject-publish-dlx dead_letter_max_length_reject_publish_dlx(Config) -> @@ -432,7 +488,8 @@ dead_letter_max_length_reject_publish_dlx(Config) -> %% Consume the dropped ones from the dead letter queue wait_for_messages(Config, [[DLXQName, <<"2">>, <<"2">>, <<"0">>]]), _ = consume(Ch, DLXQName, [P2, P3]), - consume_empty(Ch, DLXQName). + consume_empty(Ch, DLXQName), + ?assertEqual(2, counted(messages_dead_lettered_maxlen_total, Config)). %% Dead letter exchange does not have to be declared when the queue is declared, but it should %% exist by the time messages need to be dead-lettered; if it is missing then, the messages will @@ -484,8 +541,9 @@ dead_letter_missing_exchange(Config) -> %% Consume the rejected message from the dead letter queue wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]), {#'basic.get_ok'{}, #amqp_msg{payload = P2}} = - amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}), - consume_empty(Ch, DLXQName). + amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}), + consume_empty(Ch, DLXQName), + ?assertEqual(1, counted(messages_dead_lettered_rejected_total, Config)). %% %% ROUTING @@ -520,8 +578,13 @@ dead_letter_routing_key(Config) -> amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1, multiple = false, requeue = false}), - %% Both queues are empty as the message could not been routed in the dlx exchange - wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]), + case group_name(Config) of + at_most_once -> + %% Both queues are empty as the message could not been routed in the dlx exchange + wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]); + at_least_once -> + wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"0">>]]) + end, consume_empty(Ch, QName), consume_empty(Ch, DLXQName), %% Bind the dlx queue with the original queue routing key @@ -536,10 +599,17 @@ dead_letter_routing_key(Config) -> multiple = false, requeue = false}), %% Message can now be routed using the recently binded key - wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]), - consume(Ch, DLXQName, [P2]), - consume_empty(Ch, QName). - + case group_name(Config) of + at_most_once -> + wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]), + consume(Ch, DLXQName, [P2]); + at_least_once -> + wait_for_messages(Config, [[DLXQName, <<"2">>, <<"2">>, <<"0">>]]), + consume(Ch, DLXQName, [P1, P2]), + ?assertEqual(2, counted(messages_dead_lettered_confirmed_total, Config)) + end, + consume_empty(Ch, QName), + ?assertEqual(2, counted(messages_dead_lettered_rejected_total, Config)). %% 4a) If a specific routing key was not set for the queue, use routing keys added by the %% CC and BCC headers @@ -648,9 +718,10 @@ dead_letter_routing_key_cycle_max_length(Config) -> amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}), %% Queue is empty, P1 has not been republished in a loop wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]), - consume_empty(Ch, QName). + consume_empty(Ch, QName), + ?assertEqual(1, counted(messages_dead_lettered_maxlen_total, Config)). -%% 7) Message is dead lettered due to message ttl. Not yet implemented in quorum queues +%% 7) Message is dead lettered due to message ttl. dead_letter_routing_key_cycle_ttl(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Args = ?config(queue_args, Config), @@ -667,7 +738,8 @@ dead_letter_routing_key_cycle_ttl(Config) -> %% Publish messages publish(Ch, QName, [P1, P2]), wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]), - consume_empty(Ch, QName). + consume_empty(Ch, QName), + ?assertEqual(2, counted(messages_dead_lettered_expired_total, Config)). %% 5) Messages continue to be republished as there are manual rejections dead_letter_routing_key_cycle_with_reject(Config) -> @@ -695,7 +767,8 @@ dead_letter_routing_key_cycle_with_reject(Config) -> requeue = false}), %% Message its being republished wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]), - [_] = consume(Ch, QName, [P]). + [_] = consume(Ch, QName, [P]), + ?assertEqual(2, counted(messages_dead_lettered_rejected_total, Config)). %% %% For any given queue, a DLX can be defined by clients using the queue's arguments, @@ -1205,6 +1278,74 @@ dead_letter_headers_first_death_route(Config) -> _ = consume(Ch, DLXRejectedQName, [P2]), consume_empty(Ch, DLXRejectedQName). +metric_maxlen(Config) -> + {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + QName = ?config(queue_name, Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = QName, + arguments = [{<<"x-max-length">>, long, 1}, + {<<"x-overflow">>, longstr, <<"drop-head">>} | + ?config(queue_args, Config)], + durable = ?config(queue_durable, Config)}), + %% Publish 1000 messages + Payloads = lists:map(fun erlang:integer_to_binary/1, lists:seq(1, 1000)), + publish(Ch, QName, Payloads), + ?awaitMatch(999, counted(messages_dead_lettered_maxlen_total, Config), 3000, 300). + +metric_rejected(Config) -> + {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + QName = ?config(queue_name, Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = QName, + arguments = ?config(queue_args, Config), + durable = ?config(queue_durable, Config)}), + %% Publish 1000 messages + Payloads = lists:map(fun erlang:integer_to_binary/1, lists:seq(1, 1000)), + publish(Ch, QName, Payloads), + wait_for_messages(Config, [[QName, <<"1000">>, <<"1000">>, <<"0">>]]), + + %% Reject all messages using same consumer + amqp_channel:subscribe(Ch, #'basic.consume'{queue = QName}, self()), + CTag = receive #'basic.consume_ok'{consumer_tag = C} -> C end, + [begin + receive {#'basic.deliver'{consumer_tag = CTag, delivery_tag = DTag}, #amqp_msg{payload = P}} -> + amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag, requeue = false}) + after 5000 -> + amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}), + exit(timeout) + end + end || P <- Payloads], + amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}), + ?awaitMatch(1000, counted(messages_dead_lettered_rejected_total, Config), 3000, 300). + +metric_expired_queue_msg_ttl(Config) -> + {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + QName = ?config(queue_name, Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = QName, + arguments = [{<<"x-message-ttl">>, long, 0} | + ?config(queue_args, Config)], + durable = ?config(queue_durable, Config)}), + %% Publish 1000 messages + Payloads = lists:map(fun erlang:integer_to_binary/1, lists:seq(1, 1000)), + publish(Ch, QName, Payloads), + ?awaitMatch(1000, counted(messages_dead_lettered_expired_total, Config), 3000, 300). + +metric_expired_per_msg_msg_ttl(Config) -> + {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + QName = ?config(queue_name, Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = QName, + arguments = ?config(queue_args, Config), + durable = ?config(queue_durable, Config)}), + %% Publish 1000 messages + Payloads = lists:map(fun erlang:integer_to_binary/1, lists:seq(1, 1000)), + [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, + #amqp_msg{payload = Payload, + props = #'P_basic'{expiration = <<"0">>}}) + || Payload <- Payloads], + ?awaitMatch(1000, counted(messages_dead_lettered_expired_total, Config), 3000, 300). + %%%%%%%%%%%%%%%%%%%%%%%% %% Test helpers %%%%%%%%%%%%%%%%%%%%%%%% @@ -1222,8 +1363,9 @@ declare_dead_letter_queues(Ch, Config, QName, DLXQName, ExtraArgs) -> %% Declare queue DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}, {<<"x-dead-letter-routing-key">>, longstr, DLXQName}], - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args ++ ExtraArgs, durable = Durable}), - + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, + arguments = DeadLetterArgs ++ Args ++ ExtraArgs, + durable = Durable}), %% Declare and bind DLX queue #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}), #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName, @@ -1243,7 +1385,7 @@ publish(Ch, QName, Payloads, Headers) -> consume(Ch, QName, Payloads) -> [begin {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = Payload}} = - amqp_channel:call(Ch, #'basic.get'{queue = QName}), + amqp_channel:call(Ch, #'basic.get'{queue = QName}), DTag end || Payload <- Payloads]. @@ -1256,3 +1398,31 @@ sync_mirrors(QName, Config) -> rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]); _ -> ok end. + +get_global_counters(Config) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_global_counters, overview, []). + +%% Returns the delta of Metric between testcase start and now. +counted(Metric, Config) -> + QueueType = queue_type(outer_group_name(Config)), + Strategy = group_name(Config), + OldCounters = ?config(counters, Config), + Counters = get_global_counters(Config), + metric(QueueType, Strategy, Metric, Counters) - + metric(QueueType, Strategy, Metric, OldCounters). + +metric(QueueType, Strategy, Metric, Counters) -> + Metrics = maps:get([{queue_type, QueueType}, {dead_letter_strategy, Strategy}], Counters), + maps:get(Metric, Metrics). + +group_name(Config) -> + proplists:get_value(name, ?config(tc_group_properties, Config)). + +outer_group_name(Config) -> + [{name, Name} | _] = hd(?config(tc_group_path, Config)), + Name. + +queue_type(quorum_queue) -> + rabbit_quorum_queue; +queue_type(_) -> + rabbit_classic_queue. diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index bf372e3267a3..4d16bec338db 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -367,7 +367,7 @@ return_checked_out_limit_test(_) -> {State2, ok, [{send_msg, _, {delivery, _, [{MsgId2, _}]}, _}, {aux, active}]} = apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1), - {#rabbit_fifo{} = State, ok, [_ReleaseEff]} = + {#rabbit_fifo{} = State, ok, [_ModCallEffDeadLetterCounter | _ReleaseEff]} = apply(meta(4), rabbit_fifo:make_return(Cid, [MsgId2]), State2), ?assertEqual(0, rabbit_fifo:query_messages_total(State)), ok. diff --git a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl index 1da314b225d0..34623f21624b 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl @@ -54,7 +54,9 @@ end_per_testcase(_TestCase, _Config) -> handler_undefined(_Config) -> S = rabbit_fifo_dlx:init(), Handler = undefined, - ?assertEqual({S, []}, rabbit_fifo_dlx:discard([make_msg(1)], because, Handler, S)), + ?assertEqual({S, [{mod_call, rabbit_global_counters, messages_dead_lettered, + [because, rabbit_quorum_queue, disabled, 1]}]}, + rabbit_fifo_dlx:discard([make_msg(1)], because, Handler, S)), ok. handler_at_most_once(_Config) -> @@ -74,7 +76,9 @@ discard_dlx_consumer(_Config) -> discard_checkout_message_bytes => 0}, rabbit_fifo_dlx:overview(S0)), %% message without dlx consumer - {S1, []} = rabbit_fifo_dlx:discard([make_msg(1)], because, Handler, S0), + {S1, [{mod_call, rabbit_global_counters, messages_dead_lettered, + [because, rabbit_quorum_queue, at_least_once, 1]}]} = + rabbit_fifo_dlx:discard([make_msg(1)], because, Handler, S0), {S2, []} = rabbit_fifo_dlx:checkout(Handler, S1), ?assertEqual(#{num_discarded => 1, num_discard_checked_out => 0, @@ -92,7 +96,7 @@ discard_dlx_consumer(_Config) -> ?assertMatch([{log, [1], _}], DeliveryEffects0), %% more messages than dlx consumer's prefetch - {S5, []} = rabbit_fifo_dlx:discard([make_msg(3), make_msg(4)], because, Handler, S4), + {S5, [_ModCallGlobalCounter]} = rabbit_fifo_dlx:discard([make_msg(3), make_msg(4)], because, Handler, S4), {S6, DeliveryEffects1} = rabbit_fifo_dlx:checkout(Handler, S5), ?assertEqual(#{num_discarded => 1, num_discard_checked_out => 2, @@ -103,7 +107,9 @@ discard_dlx_consumer(_Config) -> %% dlx consumer acks messages Settle = rabbit_fifo_dlx:make_settle([0,1]), - {S7, []} = rabbit_fifo_dlx:apply(meta(5), Settle, Handler, S6), + {S7, [{mod_call, rabbit_global_counters, messages_dead_lettered_confirmed, + [rabbit_quorum_queue, at_least_once, 2]}]} = + rabbit_fifo_dlx:apply(meta(5), Settle, Handler, S6), {S8, DeliveryEffects2} = rabbit_fifo_dlx:checkout(Handler, S7), ?assertEqual(#{num_discarded => 0, num_discard_checked_out => 1, @@ -135,11 +141,13 @@ switch_strategies(_Config) -> QRes = #resource{virtual_host = <<"/">>, kind = queue, name = <<"blah">>}, - Handler0 = undefined, - Handler1 = at_least_once, + application:set_env(rabbit, dead_letter_worker_consumer_prefetch, 1), + application:set_env(rabbit, dead_letter_worker_publisher_confirm_timeout, 1000), {ok, _} = rabbit_fifo_dlx_sup:start_link(), S0 = rabbit_fifo_dlx:init(), + Handler0 = undefined, + Handler1 = at_least_once, %% Switching from undefined to at_least_once should start dlx consumer. {S1, Effects} = rabbit_fifo_dlx:update_config(Handler0, Handler1, QRes, S0), ?assertEqual([{aux, {dlx, setup}}], Effects), @@ -162,7 +170,9 @@ last_consumer_wins(_Config) -> S0 = rabbit_fifo_dlx:init(), Handler = at_least_once, Msgs = [make_msg(1), make_msg(2), make_msg(3), make_msg(4)], - {S1, []} = rabbit_fifo_dlx:discard(Msgs, because, Handler, S0), + {S1, [{mod_call, rabbit_global_counters, messages_dead_lettered, + [because, rabbit_quorum_queue, at_least_once, 4]}]} = + rabbit_fifo_dlx:discard(Msgs, because, Handler, S0), Checkout = rabbit_fifo_dlx:make_checkout(self(), 10), {S2, []} = rabbit_fifo_dlx:apply(meta(5), Checkout, Handler, S1), {S3, DeliveryEffects0} = rabbit_fifo_dlx:checkout(Handler, S2), diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index b652cc2bbfd4..444024d77610 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -25,18 +25,18 @@ all() -> groups() -> [ - {single_node, [], [ - expired, - rejected, - delivery_limit, - target_queue_not_bound, - target_queue_deleted, - dlx_missing, - cycle, - stats, - drop_head_falls_back_to_at_most_once, - switch_strategy - ]}, + {single_node, [shuffle], [ + expired, + rejected, + delivery_limit, + target_queue_not_bound, + target_queue_deleted, + dlx_missing, + cycle, + stats, + drop_head_falls_back_to_at_most_once, + switch_strategy + ]}, {cluster_size_3, [], [ many_target_queues, single_dlx_worker @@ -89,16 +89,19 @@ merge_app_env(Config) -> init_per_testcase(Testcase, Config) -> Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase), - Q = rabbit_data_coercion:to_binary(Testcase), + T = rabbit_data_coercion:to_binary(Testcase), + Counters = get_global_counters(Config1), Config2 = rabbit_ct_helpers:set_config(Config1, - [{source_queue, <>}, - {dead_letter_exchange, <>}, - {target_queue_1, <>}, - {target_queue_2, <>}, - {target_queue_3, <>}, - {target_queue_4, <>}, - {target_queue_5, <>}, - {target_queue_6, <>} + [{source_queue, <>}, + {dead_letter_exchange, <>}, + {target_queue_1, <>}, + {target_queue_2, <>}, + {target_queue_3, <>}, + {target_queue_4, <>}, + {target_queue_5, <>}, + {target_queue_6, <>}, + {policy, <>}, + {counters, Counters} ]), rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()). @@ -149,7 +152,9 @@ expired(Config) -> ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), 1000), - assert_dlx_headers(Headers, <<"expired">>, SourceQ). + assert_dlx_headers(Headers, <<"expired">>, SourceQ), + ?assertEqual(1, counted(messages_dead_lettered_expired_total, Config)), + eventually(?_assertEqual(1, counted(messages_dead_lettered_confirmed_total, Config))). %% Test that at-least-once dead-lettering works for message dead-lettered due to rejected by consumer. rejected(Config) -> @@ -164,7 +169,9 @@ rejected(Config) -> ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), 1000), - assert_dlx_headers(Headers, <<"rejected">>, SourceQ). + assert_dlx_headers(Headers, <<"rejected">>, SourceQ), + ?assertEqual(1, counted(messages_dead_lettered_rejected_total, Config)), + eventually(?_assertEqual(1, counted(messages_dead_lettered_confirmed_total, Config))). %% Test that at-least-once dead-lettering works for message dead-lettered due to delivery-limit exceeded. delivery_limit(Config) -> @@ -179,7 +186,9 @@ delivery_limit(Config) -> ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), 1000), - assert_dlx_headers(Headers, <<"delivery_limit">>, SourceQ). + assert_dlx_headers(Headers, <<"delivery_limit">>, SourceQ), + ?assertEqual(1, counted(messages_dead_lettered_delivery_limit_total, Config)), + eventually(?_assertEqual(1, counted(messages_dead_lettered_confirmed_total, Config))). assert_dlx_headers(Headers, Reason, SourceQ) -> ?assertEqual({longstr, Reason}, rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)), @@ -228,6 +237,8 @@ target_queue_not_bound(Config) -> dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), consistently(?_assertMatch([{1, _}], dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + ?assertEqual(1, counted(messages_dead_lettered_expired_total, Config)), + ?assertEqual(0, counted(messages_dead_lettered_confirmed_total, Config)), %% Fix dead-letter toplology misconfiguration. bind_queue(Ch, TargetQ, DLX, <<"k1">>), %% Binding from target queue to DLX is now present. @@ -237,7 +248,9 @@ target_queue_not_bound(Config) -> 500, 10), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{expiration = undefined}, payload = Msg}}, - amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})). + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), + ?assertEqual(1, counted(messages_dead_lettered_expired_total, Config)), + eventually(?_assertEqual(1, counted(messages_dead_lettered_confirmed_total, Config))). %% Test that message is not lost when target queue gets deleted %% because dead-letter routing topology should always be respected. @@ -286,7 +299,9 @@ target_queue_deleted(Config) -> eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), 500, 5), eventually(?_assertEqual([{0, 0}], - dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))). + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + ?assertEqual(2, counted(messages_dead_lettered_expired_total, Config)), + ?assertEqual(2, counted(messages_dead_lettered_confirmed_total, Config)). %% Test that message is not lost when configured dead-letter exchange does not exist. %% Once the exchange gets declared, the message is delivered to the target queue @@ -326,7 +341,9 @@ dlx_missing(Config) -> 500, 8), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{expiration = undefined}, payload = Msg}}, - amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})). + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), + ?assertEqual(1, counted(messages_dead_lettered_expired_total, Config)), + eventually(?_assertEqual(1, counted(messages_dead_lettered_confirmed_total, Config))). %% Test that message is not lost when it cycles. %% Once the cycle is resolved, the message is delivered to the target queue and acked to @@ -336,6 +353,7 @@ cycle(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, Server), SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), + PolicyName = ?config(policy, Config), declare_queue(Ch, SourceQ, [ {<<"x-dead-letter-exchange">>, longstr, <<"">>}, {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, @@ -358,13 +376,17 @@ cycle(Config) -> %% Fix the cycle such that dead-lettering flows like this: %% source queue -> default exchange -> target queue declare_queue(Ch, TargetQ, []), - ok = rabbit_ct_broker_helpers:set_policy(Config, Server, <<"my-policy">>, SourceQ, <<"queues">>, + ok = rabbit_ct_broker_helpers:set_policy(Config, Server, PolicyName, + SourceQ, <<"queues">>, [{<<"dead-letter-routing-key">>, TargetQ}]), eventually(?_assertEqual([{0, 0}], dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), 500, 8), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg}}, - amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})). + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), + ok = rabbit_ct_broker_helpers:clear_policy(Config, Server, PolicyName), + ?assertEqual(1, counted(messages_dead_lettered_expired_total, Config)), + eventually(?_assertEqual(1, counted(messages_dead_lettered_confirmed_total, Config))). %% Test that rabbit_fifo_dlx tracks statistics correctly. stats(Config) -> @@ -407,6 +429,8 @@ stats(Config) -> num_messages := 10 }], dirty_query([Server], RaName, fun rabbit_fifo:overview/1)), + ?assertEqual(10, counted(messages_dead_lettered_expired_total, Config)), + ?assertEqual(0, counted(messages_dead_lettered_confirmed_total, Config)), %% Fix dead-letter toplology misconfiguration. bind_queue(Ch, TargetQ, DLX, <<"k1">>), %% Binding from target queue to DLX is now present. @@ -425,7 +449,8 @@ stats(Config) -> dirty_query([Server], RaName, fun rabbit_fifo:overview/1)), [?assertMatch({#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{expiration = undefined}, payload = Msg}}, - amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})) || _ <- lists:seq(1, 10)]. + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})) || _ <- lists:seq(1, 10)], + ?assertEqual(10, counted(messages_dead_lettered_confirmed_total, Config)). %% Test that configuring overflow (default) drop-head will fall back to %% dead-letter-strategy at-most-once despite configuring at-least-once. @@ -452,6 +477,7 @@ switch_strategy(Config) -> SourceQ = ?config(source_queue, Config), RaName = ra_name(SourceQ), DLX = ?config(dead_letter_exchange, Config), + PolicyName = ?config(policy, Config), declare_queue(Ch, SourceQ, [ {<<"x-dead-letter-exchange">>, longstr, DLX}, {<<"x-overflow">>, longstr, <<"reject-publish">>}, @@ -459,7 +485,8 @@ switch_strategy(Config) -> ]), %% default strategy is at-most-once assert_active_dlx_workers(0, Config, Server), - ok = rabbit_ct_broker_helpers:set_policy(Config, Server, <<"my-policy">>, SourceQ, <<"queues">>, + ok = rabbit_ct_broker_helpers:set_policy(Config, Server, PolicyName, + SourceQ, <<"queues">>, [{<<"dead-letter-strategy">>, <<"at-least-once">>}]), assert_active_dlx_workers(1, Config, Server), [ok = amqp_channel:cast(Ch, @@ -479,7 +506,8 @@ switch_strategy(Config) -> num_messages := 5 }], dirty_query([Server], RaName, fun rabbit_fifo:overview/1))), - ok = rabbit_ct_broker_helpers:set_policy(Config, Server, <<"my-policy">>, SourceQ, <<"queues">>, + ok = rabbit_ct_broker_helpers:set_policy(Config, Server, PolicyName, + SourceQ, <<"queues">>, [{<<"dead-letter-strategy">>, <<"at-most-once">>}]), assert_active_dlx_workers(0, Config, Server), ?assertMatch( @@ -490,7 +518,10 @@ switch_strategy(Config) -> discard_message_bytes := 0, num_messages := 0 }], - dirty_query([Server], RaName, fun rabbit_fifo:overview/1)). + dirty_query([Server], RaName, fun rabbit_fifo:overview/1)), + ok = rabbit_ct_broker_helpers:clear_policy(Config, Server, PolicyName), + ?assertEqual(5, counted(messages_dead_lettered_expired_total, Config)), + ?assertEqual(0, counted(messages_dead_lettered_confirmed_total, Config)). %% Test that %% 1. Message is only acked to source queue once publisher confirms got received from **all** target queues. @@ -630,7 +661,9 @@ many_target_queues(Config) -> amqp_channel:call(Ch, #'basic.get'{queue = TargetQ5}))), %%TODO why is the 1st message (m1) a duplicate? ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, - amqp_channel:call(Ch, #'basic.get'{queue = TargetQ6}), 2, 200). + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ6}), 2, 200), + ?assertEqual(2, counted(messages_dead_lettered_expired_total, Config)), + ?assertEqual(2, counted(messages_dead_lettered_confirmed_total, Config)). %% Test that there is a single active rabbit_fifo_dlx_worker that is co-located with the quorum queue leader. single_dlx_worker(Config) -> @@ -728,3 +761,17 @@ eventually({Line, Assertion} = TestObj, PollInterval, PollCount) -> timer:sleep(PollInterval), eventually(TestObj, PollInterval, PollCount - 1) end. + +get_global_counters(Config) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_global_counters, overview, []). + +%% Returns the delta of Metric between testcase start and now. +counted(Metric, Config) -> + OldCounters = ?config(counters, Config), + Counters = get_global_counters(Config), + metric(Metric, Counters) - + metric(Metric, OldCounters). + +metric(Metric, Counters) -> + Metrics = maps:get([{queue_type, rabbit_quorum_queue}, {dead_letter_strategy, at_least_once}], Counters), + maps:get(Metric, Metrics). diff --git a/deps/rabbit/test/rabbit_fifo_int_SUITE.erl b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl index ec40d599f578..65464d8f05e4 100644 --- a/deps/rabbit/test/rabbit_fifo_int_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl @@ -375,7 +375,7 @@ discard(Config) -> machine => {module, rabbit_fifo, #{queue_resource => discard, dead_letter_handler => - {?MODULE, dead_letter_handler, [self()]}}}}, + {at_most_once, {?MODULE, dead_letter_handler, [self()]}}}}}, _ = rabbit_quorum_queue:start_server(Conf), ok = ra:trigger_election(ServerId), _ = ra:members(ServerId), @@ -387,8 +387,9 @@ discard(Config) -> F3 = discard_next_delivery(F2, 5000), {empty, _F4} = rabbit_fifo_client:dequeue(<<"tag1">>, settled, F3), receive - {dead_letter, Letters} -> - [{_, msg1}] = Letters, + {dead_letter, Reason, Letters} -> + [msg1] = Letters, + rejected = Reason, ok after 500 -> flush(), @@ -510,8 +511,8 @@ test_queries(Config) -> rabbit_quorum_queue:stop_server(ServerId), ok. -dead_letter_handler(Pid, Msgs) -> - Pid ! {dead_letter, Msgs}. +dead_letter_handler(Pid, Reason, Msgs) -> + Pid ! {dead_letter, Reason, Msgs}. dequeue(Config) -> ClusterName = ?config(cluster_name, Config), diff --git a/deps/rabbitmq_prometheus/metrics.md b/deps/rabbitmq_prometheus/metrics.md index 8a8a64390964..a5b05c2e0020 100644 --- a/deps/rabbitmq_prometheus/metrics.md +++ b/deps/rabbitmq_prometheus/metrics.md @@ -67,6 +67,29 @@ To generate these: | rabbitmq_global_publishers | Publishers currently connected | | rabbitmq_global_consumers | Consumers currently connected | +#### Dead letter global counters + +| Metric | Description | +| --- | --- | +| rabbitmq_global_messages_dead_lettered_confirmed_total | Total number of messages dead-lettered and confirmed by target queues | +| rabbitmq_global_messages_dead_lettered_delivery_limit_total | Total number of messages dead-lettered due to delivery-limit exceeded | +| rabbitmq_global_messages_dead_lettered_expired_total | Total number of messages dead-lettered due to message TTL exceeded | +| rabbitmq_global_messages_dead_lettered_maxlen_total | Total number of messages dead-lettered due to overflow drop-head or reject-publish-dlx | +| rabbitmq_global_messages_dead_lettered_rejected_total | Total number of messages dead-lettered due to basic.reject or basic.nack | + +Metrics `rabbitmq_global_messages_dead_lettered_*` have labels `queue_type` and `dead_letter_strategy`. + +Label `queue_type` denotes the type of queue messages were discarded from. It can have value +* `rabbit_classic_queue`, or +* `rabbit_quorum_queue` + +(Queue type `rabbit_stream_queue` does not dead letter messages.) + +Label `dead_letter_strategy` can have value +* `disabled` if queue has no dead-letter-exchange configured or if configured dead-letter-exchange does not exist implying messages get dropped, or +* `at_most_once` if queue's configured dead-lettered-exchange exists, or +* `at_least_once` if queue type is `rabbit_quorum_queue` with configured `dead-letter-exchange` and `dead-letter-strategy` set to `at-least-once` and `overflow` set to `reject-publish`. + #### Stream global counters These metrics are specific to the stream protocol. From bc0cc29a2704ff8c9555efee27e97f956dd35e4d Mon Sep 17 00:00:00 2001 From: David Ansari Date: Fri, 21 Jan 2022 19:32:13 +0100 Subject: [PATCH 43/97] Emit release cursor when expiring messages Before this commit, when message TTL was set and messages were only enqueued, a release cursor was never emitted continuously increasing disk usage. --- deps/rabbit/src/rabbit_fifo.erl | 86 +++++++++++++------------- deps/rabbit/test/rabbit_fifo_SUITE.erl | 15 +++++ 2 files changed, 59 insertions(+), 42 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 6904be3ccf1c..7725576bf56d 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -384,7 +384,7 @@ apply(#{index := Index, State1 = update_consumer(ConsumerId, ConsumerMeta, {once, 1, simple_prefetch}, 0, State0), - {success, _, MsgId, Msg, State2, Effects0} = checkout_one(Meta, State1, []), + {success, _, MsgId, Msg, ExpiredMsg, State2, Effects0} = checkout_one(Meta, false, State1, []), {State4, Effects1} = case Settlement of unsettled -> {_, Pid} = ConsumerId, @@ -407,11 +407,13 @@ apply(#{index := Index, end, NotifyEffect = notify_decorators_effect(State4), - case evaluate_limit(Index, false, State0, State4, [NotifyEffect | Effects2]) of - {State, true, Effects} -> - update_smallest_raft_index(Index, Reply, State, Effects); - {State, false, Effects} -> - {State, Reply, Effects} + {State, DroppedMsg, Effects} = evaluate_limit(Index, false, State0, State4, + [NotifyEffect | Effects2]), + case {DroppedMsg, ExpiredMsg} of + {false, false} -> + {State, Reply, Effects}; + _ -> + update_smallest_raft_index(Index, Reply, State, Effects) end end; apply(#{index := Idx} = Meta, @@ -461,9 +463,8 @@ apply(#{index := Index}, #purge{}, update_smallest_raft_index(Index, Reply, State, Effects); apply(#{index := Idx}, #garbage_collection{}, State) -> update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); -apply(#{system_time := Ts} = Meta, {timeout, expire_msgs}, State0) -> - {State, Effects} = expire_msgs(Ts, State0, []), - checkout(Meta, State, State, Effects, false); +apply(Meta, {timeout, expire_msgs}, State) -> + checkout(Meta, State, State, [], false); apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, #?MODULE{consumers = Cons0, cfg = #cfg{consumer_strategy = single_active}, @@ -1787,58 +1788,59 @@ checkout(#{index := Index} = Meta, #?MODULE{cfg = #cfg{resource = QName}} = OldState, State0, Effects0, HandleConsumerChanges) -> {#?MODULE{cfg = #cfg{dead_letter_handler = DLH}, - dlx = DlxState0} = State1, _Result, Effects1} = - checkout0(Meta, checkout_one(Meta, State0, Effects0), #{}), + dlx = DlxState0} = State1, ExpiredMsg, Effects1} = + checkout0(Meta, checkout_one(Meta, false, State0, Effects0), #{}), {DlxState, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(DLH, DlxState0), State2 = State1#?MODULE{dlx = DlxState}, Effects2 = DlxDeliveryEffects ++ Effects1, - case evaluate_limit(Index, false, OldState, State2, Effects2) of - {State, true, Effects} -> + {State, DroppedMsg, Effects} = evaluate_limit(Index, false, OldState, State2, Effects2), + case {DroppedMsg, ExpiredMsg} of + {false, false} -> case maybe_notify_decorators(State, HandleConsumerChanges) of {true, {MaxActivePriority, IsEmpty}} -> NotifyEffect = notify_decorators_effect(QName, MaxActivePriority, IsEmpty), - update_smallest_raft_index(Index, State, [NotifyEffect | Effects]); + {State, ok, [NotifyEffect | Effects]}; false -> - update_smallest_raft_index(Index, State, Effects) + {State, ok, Effects} end; - {State, false, Effects} -> + _ -> case maybe_notify_decorators(State, HandleConsumerChanges) of {true, {MaxActivePriority, IsEmpty}} -> NotifyEffect = notify_decorators_effect(QName, MaxActivePriority, IsEmpty), - {State, ok, [NotifyEffect | Effects]}; + update_smallest_raft_index(Index, State, [NotifyEffect | Effects]); false -> - {State, ok, Effects} + update_smallest_raft_index(Index, State, Effects) end end. checkout0(Meta, {success, ConsumerId, MsgId, - ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), State, Effects}, + ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), ExpiredMsg, State, Effects}, SendAcc0) when is_integer(RaftIdx) -> DelMsg = {RaftIdx, {MsgId, Header}}, SendAcc = maps:update_with(ConsumerId, fun ({InMem, LogMsgs}) -> {InMem, [DelMsg | LogMsgs]} end, {[], [DelMsg]}, SendAcc0), - checkout0(Meta, checkout_one(Meta, State, Effects), SendAcc); + checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); checkout0(Meta, {success, ConsumerId, MsgId, - ?INDEX_MSG(Idx, ?MSG(Header, Msg)), State, Effects}, + ?INDEX_MSG(Idx, ?MSG(Header, Msg)), ExpiredMsg, State, Effects}, SendAcc0) when is_integer(Idx) -> DelMsg = {MsgId, {Header, Msg}}, SendAcc = maps:update_with(ConsumerId, fun ({InMem, LogMsgs}) -> {[DelMsg | InMem], LogMsgs} end, {[DelMsg], []}, SendAcc0), - checkout0(Meta, checkout_one(Meta, State, Effects), SendAcc); -checkout0(Meta, {success, _ConsumerId, _MsgId, ?TUPLE(_, _), State, Effects}, + checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); +checkout0(Meta, {success, _ConsumerId, _MsgId, ?TUPLE(_, _), ExpiredMsg, State, Effects}, SendAcc) -> %% Do not append delivery effect for prefix messages. %% Prefix messages do not exist anymore, but they still go through the %% normal checkout flow to derive correct consumer states %% after recovery and will still be settled or discarded later on. - checkout0(Meta, checkout_one(Meta, State, Effects), SendAcc); -checkout0(_Meta, {Activity, State0, Effects0}, SendAcc) -> + checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); +checkout0(_Meta, {Activity, ExpiredMsg, State0, Effects0}, SendAcc) -> Effects1 = case Activity of nochange -> append_delivery_effects(Effects0, SendAcc); @@ -1846,7 +1848,7 @@ checkout0(_Meta, {Activity, State0, Effects0}, SendAcc) -> [{aux, inactive} | append_delivery_effects(Effects0, SendAcc)] end, - {State0, ok, lists:reverse(Effects1)}. + {State0, ExpiredMsg, lists:reverse(Effects1)}. evaluate_limit(_Index, Result, _BeforeState, #?MODULE{cfg = #cfg{max_length = undefined, @@ -2014,13 +2016,13 @@ reply_log_effect(RaftIdx, MsgId, Header, Ready, From) -> {dequeue, {MsgId, {Header, Msg}}, Ready}}}] end}. -checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> +checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> %% Before checking out any messsage to any consumer, %% first remove all expired messages from the head of the queue. - {#?MODULE{service_queue = SQ0, - messages = Messages0, - consumers = Cons0} = InitState, Effects1} = - expire_msgs(Ts, InitState0, Effects0), + {ExpiredMsg, #?MODULE{service_queue = SQ0, + messages = Messages0, + consumers = Cons0} = InitState, Effects1} = + expire_msgs(Ts, ExpiredMsg0, InitState0, Effects0), case priority_queue:out(SQ0) of {{value, ConsumerId}, SQ1} @@ -2037,11 +2039,11 @@ checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> %% NB: these retry cases introduce the "queue list reversal" %% inefficiency but this is a rare thing to happen %% so should not need optimising - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1); + checkout_one(Meta, ExpiredMsg, InitState#?MODULE{service_queue = SQ1}, Effects1); #consumer{status = cancelled} -> - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1); + checkout_one(Meta, ExpiredMsg, InitState#?MODULE{service_queue = SQ1}, Effects1); #consumer{status = suspected_down} -> - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1); + checkout_one(Meta, ExpiredMsg, InitState#?MODULE{service_queue = SQ1}, Effects1); #consumer{checked_out = Checked0, next_msg_id = Next, credit = Credit, @@ -2064,26 +2066,26 @@ checkout_one(#{system_time := Ts} = Meta, InitState0, Effects0) -> subtract_in_memory_counts( Header, add_bytes_checkout(Header, State1)) end, - {success, ConsumerId, Next, ConsumerMsg, State, Effects1} + {success, ConsumerId, Next, ConsumerMsg, ExpiredMsg, State, Effects1} end; empty -> - {nochange, InitState, Effects1} + {nochange, ExpiredMsg, InitState, Effects1} end; {{value, _ConsumerId}, SQ1} -> %% consumer did not exist but was queued, recurse - checkout_one(Meta, InitState#?MODULE{service_queue = SQ1}, Effects1); + checkout_one(Meta, ExpiredMsg, InitState#?MODULE{service_queue = SQ1}, Effects1); {empty, _} -> % Effects = timer_effect(Ts, InitState, Effects1), case lqueue:len(Messages0) of 0 -> - {nochange, InitState, Effects1}; + {nochange, ExpiredMsg, InitState, Effects1}; _ -> - {inactive, InitState, Effects1} + {inactive, ExpiredMsg, InitState, Effects1} end end. %% dequeue all expired messages -expire_msgs(RaCmdTs, State, Effects) -> +expire_msgs(RaCmdTs, Result, State, Effects) -> %% In the normal case, there are no expired messages. %% Therefore, first queue:peek/1 to check whether we need to queue:out/1 %% because the latter can be much slower than the former. @@ -2098,7 +2100,7 @@ expire_msgs(RaCmdTs, State, Effects) -> when RaCmdTs >= Expiry -> expire(RaCmdTs, Header, State, Effects); _ -> - {State, Effects} + {Result, State, Effects} end. expire(RaCmdTs, Header, State0, Effects) -> @@ -2124,7 +2126,7 @@ expire(RaCmdTs, Header, State0, Effects) -> State3 end, State = decr_total(State5), - expire_msgs(RaCmdTs, State, DlxEffects ++ Effects). + expire_msgs(RaCmdTs, true, State, DlxEffects ++ Effects). timer_effect(RaCmdTs, State, Effects) -> T = case peek_next_msg(State) of diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 4d16bec338db..481dcb21512c 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -1737,6 +1737,21 @@ empty_dequeue_should_emit_release_cursor_test(_) -> ?ASSERT_EFF({release_cursor, _, _}, Effects), ok. +expire_message_should_emit_release_cursor_test(_) -> + Conf = #{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), + release_cursor_interval => 0, + msg_ttl => 1}, + S0 = rabbit_fifo:init(Conf), + Msg = #basic_message{content = #content{properties = none, + payload_fragments_rev = []}}, + {S1, ok, _} = apply(meta(1, 100), rabbit_fifo:make_enqueue(self(), 1, Msg), S0), + {_S, ok, Effs} = apply(meta(2, 101), + rabbit_fifo:make_enqueue(self(), 2, Msg), + S1), + ?ASSERT_EFF({release_cursor, 1, _}, Effs), + ok. + %% Utility init(Conf) -> rabbit_fifo:init(Conf). From b8ab77ec8830d0593152f73003c775ab66c46bff Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Fri, 7 Jan 2022 10:01:50 +0000 Subject: [PATCH 44/97] Do not keep rabbit_fifo messages in memory --- deps/rabbit/src/rabbit_fifo.erl | 564 ++++++++++---------- deps/rabbit/src/rabbit_fifo.hrl | 23 +- deps/rabbit/src/rabbit_fifo_dlx.erl | 88 ++- deps/rabbit/test/quorum_queue_SUITE.erl | 18 +- deps/rabbit/test/rabbit_fifo_SUITE.erl | 240 +++++---- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 42 +- 6 files changed, 492 insertions(+), 483 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 7725576bf56d..4885970b2595 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -79,6 +79,8 @@ msg :: raw_msg()}). -record(requeue, {consumer_id :: consumer_id(), msg_id :: msg_id(), + index :: ra:index(), + header :: msg_header(), msg :: indexed_msg()}). -record(register_enqueuer, {pid :: pid()}). -record(checkout, {consumer_id :: consumer_id(), @@ -259,7 +261,9 @@ apply(#{index := Idx} = Meta, msg_id = MsgId, %% as we read the message from disk it is already %% an inmemory message - msg = ?INDEX_MSG(OldIdx, ?MSG(_Header, _RawMsg) = Msg)}, + index = OldIdx, + header = Header0, + msg = _Msg}, #?MODULE{consumers = Cons0, messages = Messages, ra_indexes = Indexes0} = State00) -> @@ -268,25 +272,18 @@ apply(#{index := Idx} = Meta, when is_map_key(MsgId, Checked0) -> %% construct an index message with the current raft index %% and update delivery count before adding it to the message queue - ?INDEX_MSG(_, ?MSG(Header, _)) = IdxMsg0 = - update_msg_header(delivery_count, fun incr/1, 1, ?INDEX_MSG(Idx, Msg)), + Header = update_header(delivery_count, fun incr/1, 1, Header0), State0 = add_bytes_return(Header, State00), - {State1, IdxMsg} = - case evaluate_memory_limit(Header, State0) of - true -> - % indexed message with header map - {State0, ?INDEX_MSG(Idx, ?DISK_MSG(Header))}; - false -> - {add_in_memory_counts(Header, State0), IdxMsg0} - end, + + IdxMsg = ?INDEX_MSG(Idx, ?DISK_MSG(Header)), Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked0), credit = increase_credit(Con0, 1)}, State2 = update_or_remove_sub( Meta, ConsumerId, Con, - State1#?MODULE{ra_indexes = rabbit_fifo_index:delete(OldIdx, Indexes0), + State0#?MODULE{ra_indexes = rabbit_fifo_index:delete(OldIdx, Indexes0), messages = lqueue:in(IdxMsg, Messages)}), %% We have to increment the enqueue counter to ensure release cursors @@ -397,15 +394,12 @@ apply(#{index := Index, {State3, SettleEffects ++ Effects0} end, {Reply, Effects2} = - case Msg of - ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)) -> - {'$ra_no_reply', - [reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From) | - Effects1]}; - ?INDEX_MSG(_, ?MSG(Header, Body)) -> - {{dequeue, {MsgId, {Header, Body}}, Ready-1}, Effects1} - - end, + case Msg of + ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)) -> + {'$ra_no_reply', + [reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From) | + Effects1]} + end, NotifyEffect = notify_decorators_effect(State4), {State, DroppedMsg, Effects} = evaluate_limit(Index, false, State0, State4, [NotifyEffect | Effects2]), @@ -436,12 +430,12 @@ apply(#{index := Index}, #purge{}, ra_indexes = Indexes0, dlx = DlxState} = State0) -> NumReady = messages_ready(State0), - Indexes1 = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_, _)), Acc0) when is_integer(I) -> + Indexes1 = lists:foldl(fun (?INDEX_MSG(I, _), Acc0) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc0); (_, Acc) -> Acc end, Indexes0, lqueue:to_list(Returns)), - Indexes = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_, _)), Acc0) when is_integer(I) -> + Indexes = lists:foldl(fun (?INDEX_MSG(I, _), Acc0) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc0); (_, Acc) -> Acc @@ -628,12 +622,14 @@ apply(_Meta, Cmd, State) -> convert_msg({RaftIdx, {Header, empty}}) -> ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); -convert_msg({RaftIdx, {Header, Msg}}) when is_integer(RaftIdx) -> - ?INDEX_MSG(RaftIdx, ?TUPLE(Header, Msg)); +convert_msg({RaftIdx, {Header, _Msg}}) when is_integer(RaftIdx) -> + ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); convert_msg({'$empty_msg', Header}) -> - ?DISK_MSG(Header); + %% dummy index + ?INDEX_MSG(0, ?DISK_MSG(Header)); convert_msg({'$prefix_msg', Header}) -> - ?PREFIX_MEM_MSG(Header). + %% dummy index + ?INDEX_MSG(0, ?DISK_MSG(Header)). convert_v1_to_v2(V1State) -> IndexesV1 = rabbit_fifo_v1:get_field(ra_indexes, V1State), @@ -710,8 +706,8 @@ convert_v1_to_v2(V1State) -> msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), waiting_consumers = rabbit_fifo_v1:get_field(waiting_consumers, V1State), - msg_bytes_in_memory = rabbit_fifo_v1:get_field(msg_bytes_in_memory, V1State), - msgs_ready_in_memory = rabbit_fifo_v1:get_field(msgs_ready_in_memory, V1State), + msg_bytes_in_memory = 0, + msgs_ready_in_memory = 0, last_active = rabbit_fifo_v1:get_field(last_active, V1State) }. @@ -905,8 +901,8 @@ get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) -> case Consumers of #{Cid := #consumer{checked_out = Checked}} -> [begin - ?INDEX_MSG(_, ?MSG(H, M)) = maps:get(K, Checked), - {K, {H, M}} + ?INDEX_MSG(I, ?DISK_MSG(H)) = maps:get(K, Checked), + {K, {I, H}} end || K <- lists:seq(From, To), maps:is_key(K, Checked)]; _ -> [] @@ -969,16 +965,17 @@ handle_aux(leader, cast, {#return{msg_ids = MsgIds, {{_, _, {_, _, Cmd, _}}, L} -> Msg = case Cmd of #enqueue{msg = M} -> M; - #requeue{msg = ?INDEX_MSG(_, ?MSG(_H, M))} -> - M + #requeue{msg = M} -> M end, - IdxMsg = ?INDEX_MSG(Idx, ?MSG(Header, Msg)), + IdxMsg = ?INDEX_MSG(Idx, ?TUPLE(Header, Msg)), {L, [{MsgId, IdxMsg} | Acc]}; {undefined, L} -> {L, Acc} - end; - (MsgId, IdxMsg, {L0, Acc}) -> - {L0, [{MsgId, IdxMsg} | Acc]} + end + %% TODO: handle old formats? + + % (MsgId, IdxMsg, {L0, Acc}) -> + % {L0, [{MsgId, IdxMsg} | Acc]} end, {Log0, []}, maps:with(MsgIds, Checked)), Appends = make_requeue(ConsumerId, {notify, Corr, Pid}, @@ -1034,10 +1031,11 @@ handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, {ok, ?INDEX_MSG(Idx, ?DISK_MSG(Header))} -> %% need to re-hydrate from the log {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0), + %% TODO: handle requeue? #enqueue{msg = Msg} = Cmd, {reply, {ok, {Header, Msg}}, Aux0, Log}; - {ok, ?INDEX_MSG(_Idx, ?MSG(Header, Msg))} -> - {reply, {ok, {Header, Msg}}, Aux0, Log0}; + % {ok, ?INDEX_MSG(_Idx, ?MSG(Header, Msg))} -> + % {reply, {ok, {Header, Msg}}, Aux0, Log0}; Err -> {reply, Err, Aux0, Log0} end; @@ -1422,24 +1420,20 @@ incr_total(#?MODULE{messages_total = Tot} = State) -> drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects) -> case take_next_msg(State0) of - {?PREFIX_MEM_MSG(Header), State1} -> - State2 = subtract_in_memory_counts(Header, - add_bytes_drop(Header, State1)), - {decr_total(State2), Effects}; - {?DISK_MSG(Header), State1} -> - State2 = add_bytes_drop(Header, State1), - {decr_total(State2), Effects}; - {?INDEX_MSG(Idx, ?MSG(Header, _) = Msg) = FullMsg, State1} -> + % {?PREFIX_MEM_MSG(Header), State1} -> + % State2 = subtract_in_memory_counts(Header, + % add_bytes_drop(Header, State1)), + % {decr_total(State2), Effects}; + % {?DISK_MSG(Header), State1} -> + % State2 = add_bytes_drop(Header, State1), + % {decr_total(State2), Effects}; + {?INDEX_MSG(Idx, ?DISK_MSG(Header)) = IdxMsg, State1} -> Indexes = rabbit_fifo_index:delete(Idx, Indexes0), State2 = State1#?MODULE{ra_indexes = Indexes}, State3 = decr_total(add_bytes_drop(Header, State2)), #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, - dlx = DlxState} = State = case Msg of - ?DISK_MSG(_) -> State3; - _ -> - subtract_in_memory_counts(Header, State3) - end, - {_, DlxEffects} = rabbit_fifo_dlx:discard([FullMsg], maxlen, DLH, DlxState), + dlx = DlxState} = State = State3, + {_, DlxEffects} = rabbit_fifo_dlx:discard([IdxMsg], maxlen, DLH, DlxState), {State, DlxEffects ++ Effects}; empty -> {State0, Effects} @@ -1450,17 +1444,21 @@ enqueue(RaftIdx, Ts, RawMsg, #?MODULE{messages = Messages} = State0) -> %% when the next required key is added Header0 = message_size(RawMsg), Header = maybe_set_msg_ttl(RawMsg, Ts, Header0, State0), - {State1, Msg} = - case evaluate_memory_limit(Header, State0) of - true -> - % indexed message with header map - {State0, - ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header))}; - false -> - {add_in_memory_counts(Header, State0), - ?INDEX_MSG(RaftIdx, ?MSG(Header, RawMsg))} - end, - State = add_bytes_enqueue(Header, State1), + %% TODO: enqueue as in memory message if there are no ready messages + %% and there are consumers with credit available. + %% I.e. the message will be immedately delivered so no benefit + %% in reading it back from the log + Msg = ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), + % case evaluate_memory_limit(Header, State0) of + % true -> + % % indexed message with header map + % {State0, + % ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header))}; + % false -> + % {add_in_memory_counts(Header, State0), + % ?INDEX_MSG(RaftIdx, ?MSG(Header, RawMsg))} + % end, + State = add_bytes_enqueue(Header, State0), State#?MODULE{messages = lqueue:in(Msg, Messages)}. maybe_set_msg_ttl(#basic_message{content = #content{properties = none}}, @@ -1543,7 +1541,9 @@ maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, Effects, State0) -> State = enqueue(RaftIdx, Ts, RawMsg, State0), {ok, State, Effects}; maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, - #?MODULE{enqueuers = Enqueuers0} = State0) -> + #?MODULE{msg_bytes_enqueue = Enqueue, + enqueuers = Enqueuers0, + messages = Messages} = State0) -> case maps:get(From, Enqueuers0, undefined) of undefined -> @@ -1553,9 +1553,13 @@ maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, {Res, State, [{monitor, process, From} | Effects]}; #enqueuer{next_seqno = MsgSeqNo} = Enq0 -> % it is the next expected seqno - State1 = enqueue(RaftIdx, Ts, RawMsg, State0), + Size = message_size(RawMsg), + Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), + Msg = ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1}, - State = State1#?MODULE{enqueuers = Enqueuers0#{From => Enq}}, + State = State0#?MODULE{msg_bytes_enqueue = Enqueue + Size, + messages = lqueue:in(Msg, Messages), + enqueuers = Enqueuers0#{From => Enq}}, {ok, State, Effects0}; #enqueuer{next_seqno = Next} when MsgSeqNo > Next -> @@ -1605,7 +1609,7 @@ complete(Meta, ConsumerId, DiscardedMsgIds, delete_indexes(Msgs, #?MODULE{ra_indexes = Indexes0} = State) -> %% TODO: optimise by passing a list to rabbit_fifo_index - Indexes = lists:foldl(fun (?INDEX_MSG(I, ?MSG(_,_)), Acc) when is_integer(I) -> + Indexes = lists:foldl(fun (?INDEX_MSG(I, _), Acc) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc); (_, Acc) -> Acc @@ -1686,12 +1690,12 @@ find_next_cursor(Smallest, Cursors0, Potential) -> {Potential, Cursors0} end. -update_msg_header(Key, Fun, Def, ?INDEX_MSG(Idx, ?MSG(Header, Body))) -> - ?INDEX_MSG(Idx, ?MSG(update_header(Key, Fun, Def, Header), Body)); -update_msg_header(Key, Fun, Def, ?DISK_MSG(Header)) -> - ?DISK_MSG(update_header(Key, Fun, Def, Header)); -update_msg_header(Key, Fun, Def, ?PREFIX_MEM_MSG(Header)) -> - ?PREFIX_MEM_MSG(update_header(Key, Fun, Def, Header)). +update_msg_header(Key, Fun, Def, ?INDEX_MSG(Idx, ?DISK_MSG(Header))) -> + ?INDEX_MSG(Idx, ?DISK_MSG(update_header(Key, Fun, Def, Header))). +% update_msg_header(Key, Fun, Def, ?DISK_MSG(Header)) -> +% ?DISK_MSG(update_header(Key, Fun, Def, Header)). +% update_msg_header(Key, Fun, Def, ?PREFIX_MEM_MSG(Header)) -> +% ?PREFIX_MEM_MSG(update_header(Key, Fun, Def, Header)). update_header(Key, UpdateFun, Default, Header) when is_integer(Header) -> @@ -1706,12 +1710,12 @@ update_header(Key, UpdateFun, Default, Header) -> % get_msg_header(Key, ?PREFIX_MEM_MSG(Header)) -> % get_header(Key, Header). -get_msg_header(?INDEX_MSG(_Idx, ?MSG(Header, _Body))) -> - Header; -get_msg_header(?DISK_MSG(Header)) -> - Header; -get_msg_header(?PREFIX_MEM_MSG(Header)) -> +get_msg_header(?INDEX_MSG(_Idx, ?DISK_MSG(Header))) -> Header. +% get_msg_header(?DISK_MSG(Header)) -> +% Header; +% get_msg_header(?PREFIX_MEM_MSG(Header)) -> +% Header. get_header(size, Header) when is_integer(Header) -> @@ -1741,37 +1745,37 @@ return_one(Meta, MsgId, Msg0, _ -> Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)}, - {RtnMsg, State1} = case is_disk_msg(Msg) of - true -> - {Msg, State0}; - false -> - case evaluate_memory_limit(Header, State0) of - true -> - {to_disk_msg(Msg), State0}; - false -> - {Msg, add_in_memory_counts(Header, State0)} - end - end, + % {RtnMsg, State1} = case is_disk_msg(Msg) of + % true -> + % {Msg, State0}; + % false -> + % case evaluate_memory_limit(Header, State0) of + % true -> + % {to_disk_msg(Msg), State0}; + % false -> + % {Msg, add_in_memory_counts(Header, State0)} + % end + % end, {add_bytes_return( Header, - State1#?MODULE{consumers = Consumers#{ConsumerId => Con}, - returns = lqueue:in(RtnMsg, Returns)}), + State0#?MODULE{consumers = Consumers#{ConsumerId => Con}, + returns = lqueue:in(Msg, Returns)}), Effects0} end. -is_disk_msg(?INDEX_MSG(RaftIdx, ?DISK_MSG(_))) when is_integer(RaftIdx) -> - true; -is_disk_msg(?DISK_MSG(_)) -> - true; -is_disk_msg(_) -> - false. +% is_disk_msg(?INDEX_MSG(RaftIdx, ?DISK_MSG(_))) when is_integer(RaftIdx) -> +% true; +% is_disk_msg(?DISK_MSG(_)) -> +% true; +% is_disk_msg(_) -> +% false. -to_disk_msg(?INDEX_MSG(RaftIdx, ?DISK_MSG(_)) = Msg) when is_integer(RaftIdx) -> - Msg; -to_disk_msg(?INDEX_MSG(RaftIdx, ?MSG(Header, _))) when is_integer(RaftIdx) -> - ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); -to_disk_msg(?PREFIX_MEM_MSG(Header)) -> - ?DISK_MSG(Header). +% to_disk_msg(?INDEX_MSG(RaftIdx, ?DISK_MSG(_)) = Msg) when is_integer(RaftIdx) -> +% Msg; +% to_disk_msg(?INDEX_MSG(RaftIdx, ?MSG(Header, _))) when is_integer(RaftIdx) -> +% ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); +% to_disk_msg(?PREFIX_MEM_MSG(Header)) -> +% ?DISK_MSG(Header). return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, #consumer{checked_out = Checked} = Con) -> @@ -1824,22 +1828,22 @@ checkout0(Meta, {success, ConsumerId, MsgId, {InMem, [DelMsg | LogMsgs]} end, {[], [DelMsg]}, SendAcc0), checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); -checkout0(Meta, {success, ConsumerId, MsgId, - ?INDEX_MSG(Idx, ?MSG(Header, Msg)), ExpiredMsg, State, Effects}, - SendAcc0) when is_integer(Idx) -> - DelMsg = {MsgId, {Header, Msg}}, - SendAcc = maps:update_with(ConsumerId, - fun ({InMem, LogMsgs}) -> - {[DelMsg | InMem], LogMsgs} - end, {[DelMsg], []}, SendAcc0), - checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); -checkout0(Meta, {success, _ConsumerId, _MsgId, ?TUPLE(_, _), ExpiredMsg, State, Effects}, - SendAcc) -> - %% Do not append delivery effect for prefix messages. - %% Prefix messages do not exist anymore, but they still go through the - %% normal checkout flow to derive correct consumer states - %% after recovery and will still be settled or discarded later on. - checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); +% checkout0(Meta, {success, ConsumerId, MsgId, +% ?INDEX_MSG(Idx, ?MSG(Header, Msg)), State, Effects}, +% SendAcc0) when is_integer(Idx) -> +% DelMsg = {MsgId, {Header, Msg}}, +% SendAcc = maps:update_with(ConsumerId, +% fun ({InMem, LogMsgs}) -> +% {[DelMsg | InMem], LogMsgs} +% end, {[DelMsg], []}, SendAcc0), +% checkout0(Meta, checkout_one(Meta, State, Effects), SendAcc); +% checkout0(Meta, {success, _ConsumerId, _MsgId, ?TUPLE(_, _), State, Effects}, +% SendAcc) -> +% %% Do not append delivery effect for prefix messages. +% %% Prefix messages do not exist anymore, but they still go through the +% %% normal checkout flow to derive correct consumer states +% %% after recovery and will still be settled or discarded later on. +% checkout0(Meta, checkout_one(Meta, State, Effects), SendAcc); checkout0(_Meta, {Activity, ExpiredMsg, State0, Effects0}, SendAcc) -> Effects1 = case Activity of nochange -> @@ -1902,27 +1906,27 @@ evaluate_limit(Index, Result, BeforeState, {State0, Result, Effects0} end. -evaluate_memory_limit(_Header, - #?MODULE{cfg = #cfg{max_in_memory_length = undefined, - max_in_memory_bytes = undefined}}) -> - false; -evaluate_memory_limit(#{size := Size}, State) -> - evaluate_memory_limit(Size, State); -evaluate_memory_limit(Header, - #?MODULE{cfg = #cfg{max_in_memory_length = MaxLength, - max_in_memory_bytes = MaxBytes}, - msg_bytes_in_memory = Bytes, - msgs_ready_in_memory = Length}) -> - Size = get_header(size, Header), - (Length >= MaxLength) orelse ((Bytes + Size) > MaxBytes). +% evaluate_memory_limit(_Header, +% #?MODULE{cfg = #cfg{max_in_memory_length = undefined, +% max_in_memory_bytes = undefined}}) -> +% false; +% evaluate_memory_limit(#{size := Size}, State) -> +% evaluate_memory_limit(Size, State); +% evaluate_memory_limit(Header, +% #?MODULE{cfg = #cfg{max_in_memory_length = MaxLength, +% max_in_memory_bytes = MaxBytes}, +% msg_bytes_in_memory = Bytes, +% msgs_ready_in_memory = Length}) -> +% Size = get_header(size, Header), +% (Length >= MaxLength) orelse ((Bytes + Size) > MaxBytes). append_delivery_effects(Effects0, AccMap) when map_size(AccMap) == 0 -> %% does this ever happen? Effects0; append_delivery_effects(Effects0, AccMap) -> [{aux, active} | - maps:fold(fun (C, {InMemMsgs, LogMsgs}, Ef) -> - [delivery_effect(C, lists:reverse(LogMsgs), InMemMsgs) | Ef] + maps:fold(fun (C, {InMemMsgs, DiskMsgs}, Ef) -> + [delivery_effect(C, lists:reverse(DiskMsgs), InMemMsgs) | Ef] end, Effects0, AccMap) ]. @@ -1934,18 +1938,20 @@ append_delivery_effects(Effects0, AccMap) -> %% %% When we return it is always done to the current return queue %% for both prefix messages and current messages -take_next_msg(#?MODULE{prefix_msgs = {NumR, [Msg | Rem], - NumP, P}} = State) -> - %% there are prefix returns, these should be served first - {Msg, State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; +% take_next_msg(#?MODULE{prefix_msgs = {NumR, [Msg | Rem], +% NumP, P}} = State) -> +% %% there are prefix returns, these should be served first +% {Msg, State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; take_next_msg(#?MODULE{returns = Returns0, messages = Messages0, - ra_indexes = Indexes0, - prefix_msgs = {NumR, R, NumP, P}} = State) -> + ra_indexes = Indexes0 + % prefix_msgs = {NumR, R, NumP, P} + } = State) -> case lqueue:out(Returns0) of {{value, NextMsg}, Returns} -> {NextMsg, State#?MODULE{returns = Returns}}; - {empty, _} when P == [] -> + % {empty, _} when P == [] -> + {empty, _} -> case lqueue:out(Messages0) of {empty, _} -> empty; @@ -1954,64 +1960,64 @@ take_next_msg(#?MODULE{returns = Returns0, Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), {IndexMsg, State#?MODULE{messages = Messages, ra_indexes = Indexes}} - end; - {empty, _} -> - case P of - [?PREFIX_MEM_MSG(_Header) = Msg | Rem] -> - {Msg, State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}}; - [?DISK_MSG(_Header) = Msg | Rem] -> - {Msg, State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}} end + % {empty, _} -> + % case P of + % [?PREFIX_MEM_MSG(_Header) = Msg | Rem] -> + % {Msg, State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}}; + % [?DISK_MSG(_Header) = Msg | Rem] -> + % {Msg, State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}} + % end end. -peek_next_msg(#?MODULE{prefix_msgs = {_NumR, [Msg | _], - _NumP, _P}}) -> - %% there are prefix returns, these should be served first - {value, Msg}; +% peek_next_msg(#?MODULE{prefix_msgs = {_NumR, [Msg | _], +% _NumP, _P}}) -> +% %% there are prefix returns, these should be served first +% {value, Msg}; peek_next_msg(#?MODULE{returns = Returns0, - messages = Messages0, - prefix_msgs = {_NumR, _R, _NumP, P}}) -> + messages = Messages0 + % prefix_msgs = {_NumR, _R, _NumP, P} + }) -> case lqueue:peek(Returns0) of {value, _} = Msg -> Msg; - empty when P == [] -> - lqueue:peek(Messages0); empty -> - case P of - [?PREFIX_MEM_MSG(_Header) = Msg | _] -> - {value, Msg}; - [?DISK_MSG(_Header) = Msg | _] -> - {value, Msg} - end + lqueue:peek(Messages0) + % empty -> + % case P of + % [?PREFIX_MEM_MSG(_Header) = Msg | _] -> + % {value, Msg}; + % [?DISK_MSG(_Header) = Msg | _] -> + % {value, Msg} + % end end. -delivery_effect({CTag, CPid}, [], InMemMsgs) -> - {send_msg, CPid, {delivery, CTag, lists:reverse(InMemMsgs)}, - [local, ra_event]}; -delivery_effect({CTag, CPid}, IdxMsgs, InMemMsgs) -> +% delivery_effect({CTag, CPid}, [], InMemMsgs) -> +% {send_msg, CPid, {delivery, CTag, lists:reverse(InMemMsgs)}, +% [local, ra_event]}; +delivery_effect({CTag, CPid}, IdxMsgs, []) -> %% InMemMsgs {RaftIdxs, Data} = lists:unzip(IdxMsgs), {log, RaftIdxs, fun(Log) -> - Msgs0 = lists:zipwith(fun - (#enqueue{msg = Msg}, {MsgId, Header}) -> - {MsgId, {Header, Msg}}; - (#requeue{msg = ?INDEX_MSG(_, ?MSG(_, Msg))}, - {MsgId, Header}) -> - {MsgId, {Header, Msg}} - end, Log, Data), - Msgs = case InMemMsgs of - [] -> - Msgs0; - _ -> - lists:sort(InMemMsgs ++ Msgs0) - end, + Msgs = lists:zipwith( + fun (#enqueue{msg = Msg}, + {MsgId, Header}) -> + {MsgId, {Header, Msg}}; + (#requeue{msg = Msg}, + {MsgId, Header}) -> + {MsgId, {Header, Msg}} + end, Log, Data), [{send_msg, CPid, {delivery, CTag, Msgs}, [local, ra_event]}] end, {local, node(CPid)}}. reply_log_effect(RaftIdx, MsgId, Header, Ready, From) -> {log, [RaftIdx], - fun([{enqueue, _, _, Msg}]) -> + fun + ([#enqueue{msg = Msg}]) -> + [{reply, From, {wrap_reply, + {dequeue, {MsgId, {Header, Msg}}, Ready}}}]; + ([#requeue{msg = Msg}]) -> [{reply, From, {wrap_reply, {dequeue, {MsgId, {Header, Msg}}, Ready}}}] end}. @@ -2057,15 +2063,7 @@ checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> Meta, ConsumerId, Con, State0#?MODULE{service_queue = SQ1}), Header = get_msg_header(ConsumerMsg), - State = case is_disk_msg(ConsumerMsg) of - true -> - add_bytes_checkout(Header, State1); - false -> - %% TODO do not subtract from memory here since - %% messages are still in memory when checked out - subtract_in_memory_counts( - Header, add_bytes_checkout(Header, State1)) - end, + State = add_bytes_checkout(Header, State1), {success, ConsumerId, Next, ConsumerMsg, ExpiredMsg, State, Effects1} end; empty -> @@ -2090,15 +2088,15 @@ expire_msgs(RaCmdTs, Result, State, Effects) -> %% Therefore, first queue:peek/1 to check whether we need to queue:out/1 %% because the latter can be much slower than the former. case peek_next_msg(State) of - {value, ?INDEX_MSG(_Idx, ?MSG(#{expiry := Expiry} = Header, _))} - when RaCmdTs >= Expiry -> - expire(RaCmdTs, Header, State, Effects); - {value, ?PREFIX_MEM_MSG(#{expiry := Expiry} = Header)} - when RaCmdTs >= Expiry -> - expire(RaCmdTs, Header, State, Effects); - {value, ?DISK_MSG(#{expiry := Expiry} = Header)} + % {value, ?DISK_MSG(#{expiry := Expiry} = Header)} + % when RaCmdTs >= Expiry -> + % expire(RaCmdTs, Header, State, Effects); + {value, ?INDEX_MSG(_Idx, ?DISK_MSG(#{expiry := Expiry} = Header))} when RaCmdTs >= Expiry -> expire(RaCmdTs, Header, State, Effects); + % {value, ?PREFIX_MEM_MSG(#{expiry := Expiry} = Header)} + % when RaCmdTs >= Expiry -> + % expire(RaCmdTs, Header, State, Effects); _ -> {Result, State, Effects} end. @@ -2115,13 +2113,6 @@ expire(RaCmdTs, Header, State0, Effects) -> when is_integer(Idx) -> Indexes = rabbit_fifo_index:delete(Idx, Indexes0), State3#?MODULE{ra_indexes = Indexes}; - ?INDEX_MSG(Idx, ?MSG(_Header, _)) - when is_integer(Idx) -> - Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - State4 = State3#?MODULE{ra_indexes = Indexes}, - subtract_in_memory_counts(Header, State4); - ?PREFIX_MEM_MSG(_) -> - subtract_in_memory_counts(Header, State3); ?DISK_MSG(_) -> State3 end, @@ -2130,7 +2121,7 @@ expire(RaCmdTs, Header, State0, Effects) -> timer_effect(RaCmdTs, State, Effects) -> T = case peek_next_msg(State) of - {value, ?INDEX_MSG(_, ?MSG(#{expiry := Expiry}, _))} + {value, ?INDEX_MSG(_, ?DISK_MSG(#{expiry := Expiry}))} when is_number(Expiry) -> %% Next message contains 'expiry' header. %% (Re)set timer so that mesage will be dropped or dead-lettered on time. @@ -2235,73 +2226,68 @@ maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, %% creates a dehydrated version of the current state to be cached and %% potentially used to for a snaphot at a later point -dehydrate_state(#?MODULE{msg_bytes_in_memory = 0, - cfg = #cfg{max_in_memory_length = 0}, - consumers = Consumers, +dehydrate_state(#?MODULE{cfg = #cfg{}, dlx = DlxState} = State) -> % no messages are kept in memory, no need to % overly mutate the current state apart from removing indexes and cursors State#?MODULE{ ra_indexes = rabbit_fifo_index:empty(), - consumers = maps:map(fun (_, C) -> - dehydrate_consumer(C) - end, Consumers), release_cursors = lqueue:new(), - dlx = rabbit_fifo_dlx:dehydrate(DlxState)}; -dehydrate_state(#?MODULE{messages = Messages, - consumers = Consumers, - returns = Returns, - prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0}, - waiting_consumers = Waiting0, - dlx = DlxState} = State) -> - RCnt = lqueue:len(Returns), - %% TODO: optimise this function as far as possible - PrefRet1 = lists:foldr(fun (M, Acc) -> - [dehydrate_message(M) | Acc] - end, [], lqueue:to_list(Returns)), - PrefRet = PrefRet0 ++ PrefRet1, - PrefMsgsSuff = dehydrate_messages(Messages), - %% prefix messages are not populated in normal operation only after - %% recovering from a snapshot - PrefMsgs = PrefMsg0 ++ PrefMsgsSuff, - Waiting = [{Cid, dehydrate_consumer(C)} || {Cid, C} <- Waiting0], - State#?MODULE{messages = lqueue:new(), - ra_indexes = rabbit_fifo_index:empty(), - release_cursors = lqueue:new(), - consumers = maps:map(fun (_, C) -> - dehydrate_consumer(C) - end, Consumers), - returns = lqueue:new(), - prefix_msgs = {PRCnt + RCnt, PrefRet, - PPCnt + lqueue:len(Messages), PrefMsgs}, - waiting_consumers = Waiting, - dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. - -dehydrate_messages(Msgs0) -> - {OutRes, Msgs} = lqueue:out(Msgs0), - case OutRes of - {value, Msg} -> - [dehydrate_message(Msg) | dehydrate_messages(Msgs)]; - empty -> - [] - end. - -dehydrate_consumer(#consumer{checked_out = Checked0} = Con) -> - Checked = maps:map(fun (_, M) -> - dehydrate_message(M) - end, Checked0), - Con#consumer{checked_out = Checked}. - -dehydrate_message(?PREFIX_MEM_MSG(_) = M) -> - M; -dehydrate_message(?DISK_MSG(_) = M) -> - M; + dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. +% dehydrate_state(#?MODULE{messages = Messages, +% consumers = Consumers, +% returns = Returns, +% prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0}, +% waiting_consumers = Waiting0, +% dlx = DlxState} = State) -> +% RCnt = lqueue:len(Returns), +% %% TODO: optimise this function as far as possible +% PrefRet1 = lists:foldr(fun (M, Acc) -> +% [dehydrate_message(M) | Acc] +% end, [], lqueue:to_list(Returns)), +% PrefRet = PrefRet0 ++ PrefRet1, +% PrefMsgsSuff = dehydrate_messages(Messages), +% %% prefix messages are not populated in normal operation only after +% %% recovering from a snapshot +% PrefMsgs = PrefMsg0 ++ PrefMsgsSuff, +% Waiting = [{Cid, dehydrate_consumer(C)} || {Cid, C} <- Waiting0], +% State#?MODULE{messages = lqueue:new(), +% ra_indexes = rabbit_fifo_index:empty(), +% release_cursors = lqueue:new(), +% consumers = maps:map(fun (_, C) -> +% dehydrate_consumer(C) +% end, Consumers), +% returns = lqueue:new(), +% prefix_msgs = {PRCnt + RCnt, PrefRet, +% PPCnt + lqueue:len(Messages), PrefMsgs}, +% waiting_consumers = Waiting, +% dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. + +% dehydrate_messages(Msgs0) -> +% {OutRes, Msgs} = lqueue:out(Msgs0), +% case OutRes of +% {value, Msg} -> +% [dehydrate_message(Msg) | dehydrate_messages(Msgs)]; +% empty -> +% [] +% end. + +% dehydrate_consumer(#consumer{checked_out = Checked0} = Con) -> +% Checked = maps:map(fun (_, M) -> +% dehydrate_message(M) +% end, Checked0), +% Con#consumer{checked_out = Checked}. + +% dehydrate_message(?PREFIX_MEM_MSG(_) = M) -> +% M; +% dehydrate_message(?DISK_MSG(_) = M) -> +% M; dehydrate_message(?INDEX_MSG(_Idx, ?DISK_MSG(_Header) = Msg)) -> %% Use disk msgs directly as prefix messages. %% This avoids memory allocation since we do not convert. - Msg; -dehydrate_message(?INDEX_MSG(Idx, ?MSG(Header, _))) when is_integer(Idx) -> - ?PREFIX_MEM_MSG(Header). + Msg. +% dehydrate_message(?INDEX_MSG(Idx, ?MSG(Header, _))) when is_integer(Idx) -> +% ?PREFIX_MEM_MSG(Header). %% make the state suitable for equality comparison normalize(#?MODULE{ra_indexes = _Indexes, @@ -2414,27 +2400,27 @@ add_bytes_return(Header, State#?MODULE{msg_bytes_checkout = Checkout - Size, msg_bytes_enqueue = Enqueue + Size}. -add_in_memory_counts(Header, - #?MODULE{msg_bytes_in_memory = InMemoryBytes, - msgs_ready_in_memory = InMemoryCount} = State) -> - Size = get_header(size, Header), - State#?MODULE{msg_bytes_in_memory = InMemoryBytes + Size, - msgs_ready_in_memory = InMemoryCount + 1}. +% add_in_memory_counts(Header, +% #?MODULE{msg_bytes_in_memory = InMemoryBytes, +% msgs_ready_in_memory = InMemoryCount} = State) -> +% Size = get_header(size, Header), +% State#?MODULE{msg_bytes_in_memory = InMemoryBytes + Size, +% msgs_ready_in_memory = InMemoryCount + 1}. -subtract_in_memory_counts(Header, - #?MODULE{msg_bytes_in_memory = InMemoryBytes, - msgs_ready_in_memory = InMemoryCount} = State) -> - Size = get_header(size, Header), - State#?MODULE{msg_bytes_in_memory = InMemoryBytes - Size, - msgs_ready_in_memory = InMemoryCount - 1}. +% subtract_in_memory_counts(Header, +% #?MODULE{msg_bytes_in_memory = InMemoryBytes, +% msgs_ready_in_memory = InMemoryCount} = State) -> +% Size = get_header(size, Header), +% State#?MODULE{msg_bytes_in_memory = InMemoryBytes - Size, +% msgs_ready_in_memory = InMemoryCount - 1}. message_size(#basic_message{content = Content}) -> #content{payload_fragments_rev = PFR} = Content, iolist_size(PFR); -message_size(?PREFIX_MEM_MSG(Header)) -> - get_header(size, Header); -message_size(?DISK_MSG(Header)) -> - get_header(size, Header); +% message_size(?PREFIX_MEM_MSG(Header)) -> +% get_header(size, Header); +% message_size(Header) ?IS_HEADER(Header) -> +% get_header(size, Header); message_size(B) when is_binary(B) -> byte_size(B); message_size(Msg) -> @@ -2552,21 +2538,25 @@ smallest_raft_index(#?MODULE{messages = Messages, SmallestRaIdx = rabbit_fifo_index:smallest(Indexes), lists:min([SmallestDlxRaIdx, SmallestMsgsRaIdx, SmallestRaIdx]). -make_requeue(ConsumerId, Notify, [{MsgId, Msg}], Acc) -> +make_requeue(ConsumerId, Notify, [{MsgId, ?INDEX_MSG(Idx, ?TUPLE(Header, Msg))}], Acc) -> lists:reverse([{append, #requeue{consumer_id = ConsumerId, + index = Idx, + header = Header, msg_id = MsgId, msg = Msg}, Notify} | Acc]); -make_requeue(ConsumerId, Notify, [{MsgId, Msg} | Rem], Acc) -> +make_requeue(ConsumerId, Notify, [{MsgId, ?INDEX_MSG(Idx, ?TUPLE(Header, Msg))} | Rem], Acc) -> make_requeue(ConsumerId, Notify, Rem, - [{append, - #requeue{consumer_id = ConsumerId, - msg_id = MsgId, - msg = Msg}, - noreply} - | Acc]); + [{append, + #requeue{consumer_id = ConsumerId, + index = Idx, + header = Header, + msg_id = MsgId, + msg = Msg}, + noreply} + | Acc]); make_requeue(_ConsumerId, _Notify, [], []) -> []. diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index aba592a3e984..2d3d373f4617 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -9,14 +9,15 @@ %% '$m' stand for 'memory'. -define(PREFIX_MEM_MSG_TAG, '$m'). --define(DISK_MSG(Header), [Header | ?DISK_MSG_TAG]). --define(MSG(Header, RawMsg), [Header | RawMsg]). +% -define(DISK_MSG(Header), [Header | ?DISK_MSG_TAG]). +-define(DISK_MSG(Header), Header). +% -define(MSG(Header, RawMsg), [Header | RawMsg]). -define(INDEX_MSG(Index, Msg), [Index | Msg]). --define(PREFIX_MEM_MSG(Header), [Header | ?PREFIX_MEM_MSG_TAG]). +% -define(PREFIX_MEM_MSG(Header), [Header | ?PREFIX_MEM_MSG_TAG]). -% -define(PREFIX_DISK_MSG_TAG, '$prefix_disk'). -% -define(PREFIX_DISK_MSG(Header), [?PREFIX_DISK_MSG_TAG | Header]). -% -define(PREFIX_DISK_MSG(Header), ?DISK_MSG(Header)). +-define(IS_HEADER(H), + when is_integer(H) orelse + (is_map(H) andalso is_map_key(size, H))). -type option(T) :: undefined | T. @@ -47,17 +48,17 @@ %% Value is determined by per-queue or per-message message TTL. %% If it only contains the size it can be condensed to an integer only --type msg() :: ?MSG(msg_header(), raw_msg()) | - ?DISK_MSG(msg_header()) | - ?PREFIX_MEM_MSG(msg_header()). +-type msg() :: %%?MSG(msg_header(), raw_msg()) | + ?DISK_MSG(msg_header()). + % ?PREFIX_MEM_MSG(msg_header()). %% message with a header map. -type msg_size() :: non_neg_integer(). %% the size in bytes of the msg payload --type indexed_msg() :: ?INDEX_MSG(ra:index(), msg()). +-type indexed_msg() :: ?INDEX_MSG(ra:index(), msg_header()). --type prefix_msg() :: {'$prefix_msg', msg_header()}. +% -type prefix_msg() :: {'$prefix_msg', msg_header()}. -type delivery_msg() :: {msg_id(), {msg_header(), term()}}. %% A tuple consisting of the message id and the headered message. diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 007e922a4d27..b7b59d7fda0e 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -83,23 +83,18 @@ stat(#?MODULE{consumer = Con, -spec apply(ra_machine:command_meta_data(), protocol(), dead_letter_handler(), state()) -> {state(), ra_machine:effects()}. -apply(_, {dlx, #settle{msg_ids = MsgIds}}, at_least_once, +apply(_Meta, {dlx, #settle{msg_ids = MsgIds}}, at_least_once, #?MODULE{consumer = #dlx_consumer{checked_out = Checked0}} = State0) -> Acked = maps:with(MsgIds, Checked0), - State = maps:fold(fun(MsgId, {_Rsn, Msg}, + State = maps:fold(fun(MsgId, {_Rsn,?INDEX_MSG(Idx, ?DISK_MSG(_)) = Msg}, #?MODULE{consumer = #dlx_consumer{checked_out = Checked} = C, msg_bytes_checkout = BytesCheckout, ra_indexes = Indexes0} = S) -> - Indexes = case Msg of - ?INDEX_MSG(I, ?MSG(_,_)) - when is_integer(I) -> - rabbit_fifo_index:delete(I, Indexes0); - _ -> - Indexes0 - end, - S#?MODULE{consumer = C#dlx_consumer{checked_out = maps:remove(MsgId, Checked)}, - msg_bytes_checkout = BytesCheckout - size_in_bytes(Msg), - ra_indexes = Indexes} + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + S#?MODULE{consumer = C#dlx_consumer{checked_out = + maps:remove(MsgId, Checked)}, + msg_bytes_checkout = BytesCheckout - size_in_bytes(Msg), + ra_indexes = Indexes} end, State0, Acked), {State, [{mod_call, rabbit_global_counters, messages_dead_lettered_confirmed, [rabbit_quorum_queue, at_least_once, maps:size(Acked)]}]}; @@ -138,7 +133,8 @@ apply(_, Cmd, DLH, State) -> rabbit_log:debug("Ignoring command ~p for dead_letter_handler ~p", [Cmd, DLH]), {State, []}. --spec discard([msg()], rabbit_dead_letter:reason(), dead_letter_handler(), state()) -> +-spec discard([indexed_msg()], rabbit_dead_letter:reason(), + dead_letter_handler(), state()) -> {state(), ra_machine:effects()}. discard(Msgs, Reason, undefined, State) -> {State, [{mod_call, rabbit_global_counters, messages_dead_lettered, @@ -157,8 +153,6 @@ discard(Msgs0, Reason, {at_most_once, {Mod, Fun, Args}}, State) -> fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), {true, Msg}; - (?INDEX_MSG(_, ?MSG(_Header, Msg))) -> - {true, Msg}; (_IgnorePrefixMessage) -> false end, Msgs0), @@ -168,18 +162,13 @@ discard(Msgs0, Reason, {at_most_once, {Mod, Fun, Args}}, State) -> discard(Msgs, Reason, at_least_once, State0) when Reason =/= maxlen -> %%TODO delete delivery_count header to save space? It's not needed anymore. - State = lists:foldl(fun (Msg, #?MODULE{discards = D0, - msg_bytes = B0, - ra_indexes = I0} = S0) -> + State = lists:foldl(fun (?INDEX_MSG(Idx, _) = Msg, + #?MODULE{discards = D0, + msg_bytes = B0, + ra_indexes = I0} = S0) -> D = lqueue:in({Reason, Msg}, D0), B = B0 + size_in_bytes(Msg), - I = case Msg of - ?INDEX_MSG(Idx, ?MSG(_,_)) - when is_integer(Idx) -> - rabbit_fifo_index:append(Idx, I0); - _ -> - I0 - end, + I = rabbit_fifo_index:append(Idx, I0), S0#?MODULE{discards = D, msg_bytes = B, ra_indexes = I} @@ -208,11 +197,11 @@ checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?DISK_MSG(Header))}, State}, DelMsg = {Idx, {Reason, MsgId, Header}}, SendAcc = {InMemMsgs, [DelMsg|LogMsgs]}, checkout0(checkout_one(State), SendAcc); -checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?MSG(Header, Msg))}, State}, {InMemMsgs, LogMsgs}) - when is_integer(Idx) -> - DelMsg = {MsgId, {Reason, Header, Msg}}, - SendAcc = {[DelMsg|InMemMsgs], LogMsgs}, - checkout0(checkout_one(State), SendAcc); +% checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?MSG(Header, Msg))}, State}, {InMemMsgs, LogMsgs}) +% when is_integer(Idx) -> +% DelMsg = {MsgId, {Reason, Header, Msg}}, +% SendAcc = {[DelMsg|InMemMsgs], LogMsgs}, +% checkout0(checkout_one(State), SendAcc); checkout0({success, _MsgId, {_Reason, ?TUPLE(_, _)}, State}, SendAcc) -> %% This is a prefix message which means we are recovering from a snapshot. %% We know: @@ -250,8 +239,7 @@ add_bytes_checkout(Size, #?MODULE{msg_bytes = Bytes, State#?MODULE{msg_bytes = Bytes - Size, msg_bytes_checkout = BytesCheckout + Size}. -size_in_bytes(Msg) -> - Header = rabbit_fifo:get_msg_header(Msg), +size_in_bytes(?INDEX_MSG(_Idx, ?DISK_MSG(Header))) -> rabbit_fifo:get_header(size, Header). %% returns at most one delivery effect because there is only one consumer @@ -404,26 +392,26 @@ purge(#?MODULE{consumer = Consumer0} = State) -> -spec dehydrate(state()) -> state(). -dehydrate(#?MODULE{discards = Discards, - consumer = Con} = State) -> - State#?MODULE{discards = dehydrate_messages(Discards), - consumer = dehydrate_consumer(Con), +dehydrate(#?MODULE{discards = _Discards, + consumer = _Con} = State) -> + State#?MODULE{%%discards = dehydrate_messages(Discards), + %%consumer = dehydrate_consumer(Con), ra_indexes = rabbit_fifo_index:empty()}. -dehydrate_messages(Discards) -> - L0 = lqueue:to_list(Discards), - L1 = lists:map(fun({_Reason, Msg}) -> - {?NIL, rabbit_fifo:dehydrate_message(Msg)} - end, L0), - lqueue:from_list(L1). - -dehydrate_consumer(#dlx_consumer{checked_out = Checked0} = Con) -> - Checked = maps:map(fun (_, {_, Msg}) -> - {?NIL, rabbit_fifo:dehydrate_message(Msg)} - end, Checked0), - Con#dlx_consumer{checked_out = Checked}; -dehydrate_consumer(undefined) -> - undefined. +% dehydrate_messages(Discards) -> +% L0 = lqueue:to_list(Discards), +% L1 = lists:map(fun({_Reason, Msg}) -> +% {?NIL, rabbit_fifo:dehydrate_message(Msg)} +% end, L0), +% lqueue:from_list(L1). + +% dehydrate_consumer(#dlx_consumer{checked_out = Checked0} = Con) -> +% Checked = maps:map(fun (_, {_, Msg}) -> +% {?NIL, rabbit_fifo:dehydrate_message(Msg)} +% end, Checked0), +% Con#dlx_consumer{checked_out = Checked}; +% dehydrate_consumer(undefined) -> +% undefined. -spec normalize(state()) -> state(). diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index 6f48e6c55348..7ffefeba36b9 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -122,15 +122,15 @@ all_tests() -> subscribe_redelivery_limit, subscribe_redelivery_policy, subscribe_redelivery_limit_with_dead_letter, - queue_length_in_memory_limit_basic_get, - queue_length_in_memory_limit_subscribe, - queue_length_in_memory_limit, - queue_length_in_memory_limit_returns, - queue_length_in_memory_bytes_limit_basic_get, - queue_length_in_memory_bytes_limit_subscribe, - queue_length_in_memory_bytes_limit, - queue_length_in_memory_purge, - in_memory, + % queue_length_in_memory_limit_basic_get, + % queue_length_in_memory_limit_subscribe, + % queue_length_in_memory_limit, + % queue_length_in_memory_limit_returns, + % queue_length_in_memory_bytes_limit_basic_get, + % queue_length_in_memory_bytes_limit_subscribe, + % queue_length_in_memory_bytes_limit, + % queue_length_in_memory_purge, + % in_memory, consumer_metrics, invalid_policy, delete_if_empty, diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 481dcb21512c..4b2a243b38ff 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -81,6 +81,7 @@ end_per_testcase(_TestCase, _Config) -> test_init(Name) -> init(#{name => Name, + max_in_memory_length => 0, queue_resource => rabbit_misc:r("/", queue, atom_to_binary(Name, utf8)), release_cursor_interval => 0}). @@ -94,8 +95,9 @@ enq_enq_checkout_test(_) -> apply(meta(3), rabbit_fifo:make_checkout(Cid, {once, 2, simple_prefetch}, #{}), State2), + ct:pal("~p", [Effects]), ?ASSERT_EFF({monitor, _, _}, Effects), - ?ASSERT_EFF({send_msg, _, {delivery, _, _}, _}, Effects), + ?ASSERT_EFF({log, [1,2], _Fun, _Local}, Effects), ok. credit_enq_enq_checkout_settled_credit_test(_) -> @@ -105,14 +107,11 @@ credit_enq_enq_checkout_settled_credit_test(_) -> {State3, _, Effects} = apply(meta(3), rabbit_fifo:make_checkout(Cid, {auto, 1, credited}, #{}), State2), ?ASSERT_EFF({monitor, _, _}, Effects), - Deliveries = lists:filter(fun ({send_msg, _, {delivery, _, _}, _}) -> true; - (_) -> false - end, Effects), - ?assertEqual(1, length(Deliveries)), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects), %% settle the delivery this should _not_ result in further messages being %% delivered {State4, SettledEffects} = settle(Cid, 4, 1, State3), - ?assertEqual(false, lists:any(fun ({send_msg, _, {delivery, _, _}, _}) -> + ?assertEqual(false, lists:any(fun ({log, _, _, _}) -> true; (_) -> false end, SettledEffects)), @@ -120,9 +119,9 @@ credit_enq_enq_checkout_settled_credit_test(_) -> %% delivery count is (1) {State5, CreditEffects} = credit(Cid, 5, 1, 1, false, State4), % ?debugFmt("CreditEffects ~p ~n~p", [CreditEffects, State4]), - ?ASSERT_EFF({send_msg, _, {delivery, _, _}, _}, CreditEffects), + ?ASSERT_EFF({log, [2], _, _}, CreditEffects), {_State6, FinalEffects} = enq(6, 3, third, State5), - ?assertEqual(false, lists:any(fun ({send_msg, _, {delivery, _, _}, _}) -> + ?assertEqual(false, lists:any(fun ({log, _, _, _}) -> true; (_) -> false end, FinalEffects)), @@ -157,18 +156,17 @@ credit_and_drain_test(_) -> apply(meta(3), rabbit_fifo:make_checkout(Cid, {auto, 0, credited}, #{}), State2), - ?ASSERT_NO_EFF({send_msg, _, {delivery, _, _}}, CheckEffs), + ?ASSERT_NO_EFF({log, _, _, _}, CheckEffs), {State4, {multi, [{send_credit_reply, 0}, {send_drained, {?FUNCTION_NAME, 2}}]}, Effects} = apply(meta(4), rabbit_fifo:make_credit(Cid, 4, 0, true), State3), ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 0, - delivery_count = 4}}}, + delivery_count = 4}}}, State4), - ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}, - {_, {_, second}}]}, _}, Effects), + ?ASSERT_EFF({log, [1, 2], _, _}, Effects), {_State5, EnqEffs} = enq(5, 2, third, State4), - ?ASSERT_NO_EFF({send_msg, _, {delivery, _, _}}, EnqEffs), + ?ASSERT_NO_EFF({log, _, _, _}, EnqEffs), ok. @@ -178,11 +176,15 @@ enq_enq_deq_test(_) -> {State1, _} = enq(1, 1, first, test_init(test)), {State2, _} = enq(2, 2, second, State1), % get returns a reply value - NumReady = 1, - {_State3, {dequeue, {0, {_, first}}, NumReady}, - [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, {monitor, _, _}]} = + % NumReady = 1, + Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), + {_State3, _, + [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, + {log, [1], Fun}, + {monitor, _, _}]} = apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State2), + ct:pal("Out ~p", [Fun([Msg1])]), ok. enq_enq_deq_deq_settle_test(_) -> @@ -190,8 +192,10 @@ enq_enq_deq_deq_settle_test(_) -> {State1, _} = enq(1, 1, first, test_init(test)), {State2, _} = enq(2, 2, second, State1), % get returns a reply value - {State3, {dequeue, {0, {_, first}}, 1}, - [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, {monitor, _, _}]} = + {State3, '$ra_no_reply', + [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, + {log, [1], _}, + {monitor, _, _}]} = apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State2), {_State4, {dequeue, empty}} = @@ -203,9 +207,10 @@ enq_enq_checkout_get_settled_test(_) -> Cid = {?FUNCTION_NAME, self()}, {State1, _} = enq(1, 1, first, test_init(test)), % get returns a reply value - {State2, {dequeue, {0, {_, first}}, _}, _Effs} = + {State2, _, Effs} = apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State1), + ?ASSERT_EFF({log, [1], _}, Effs), ?assertEqual(0, rabbit_fifo:query_messages_total(State2)), ok. @@ -222,8 +227,9 @@ untracked_enq_deq_test(_) -> {State1, _, _} = apply(meta(1), rabbit_fifo:make_enqueue(undefined, undefined, first), State0), - {_State2, {dequeue, {0, {_, first}}, _}, _} = + {_State2, _, Effs} = apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State1), + ?ASSERT_EFF({log, [1], _}, Effs), ok. release_cursor_test(_) -> @@ -244,10 +250,12 @@ checkout_enq_settle_test(_) -> {monitor, _, _} | _]} = check(Cid, 1, test_init(test)), {State2, Effects0} = enq(2, 1, first, State1), ct:pal("Effects0 ~p", [Effects0]), - ?ASSERT_EFF({send_msg, _, - {delivery, ?FUNCTION_NAME, - [{0, {_, first}}]}, _}, - Effects0), + %% TODO: this should go back to a send_msg effect after optimisation + ?ASSERT_EFF({log, [2], _, _}, Effects0), + % ?ASSERT_EFF({send_msg, _, + % {delivery, ?FUNCTION_NAME, + % [{0, {_, first}}]}, _}, + % Effects0), {State3, [_Inactive]} = enq(3, 2, second, State2), {_, _Effects} = settle(Cid, 4, 0, State3), % the release cursor is the smallest raft index that does not @@ -255,46 +263,16 @@ checkout_enq_settle_test(_) -> % ?ASSERT_EFF({release_cursor, 2, _}, Effects), ok. -%% this should not be needed anymore as we don't hold pending messages -% out_of_order_enqueue_test(_) -> -% Cid = {?FUNCTION_NAME, self()}, -% {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, -% {monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)), -% {State2, Effects2} = enq(2, 1, first, State1), -% ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2), -% % assert monitor was set up -% ?ASSERT_EFF({monitor, _, _}, Effects2), -% % enqueue seq num 3 and 4 before 2 -% {State3, Effects3} = enq(3, 3, third, State2), -% ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects3), -% {State4, Effects4} = enq(4, 4, fourth, State3), -% % assert no further deliveries where made -% ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects4), -% {_State5, Effects5} = enq(5, 2, second, State4), -% % assert two deliveries were now made -% ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, second}}, -% {_, {_, third}}, -% {_, {_, fourth}}]}, _}, -% Effects5), -% ok. - -% out_of_order_first_enqueue_test(_) -> -% Cid = {?FUNCTION_NAME, self()}, -% {State1, _} = check_n(Cid, 5, 5, test_init(test)), -% {_State2, Effects2} = enq(2, 10, first, State1), -% ?ASSERT_EFF({monitor, process, _}, Effects2), -% ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, -% Effects2), -% ok. - duplicate_enqueue_test(_) -> Cid = {<<"duplicate_enqueue_test">>, self()}, {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, {monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)), {State2, Effects2} = enq(2, 1, first, State1), - ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2), + ?ASSERT_EFF({log, [2], _, _}, Effects2), + % ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2), {_State3, Effects3} = enq(3, 1, first, State2), - ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects3), + ?ASSERT_NO_EFF({log, [_], _, _}, Effects3), + % ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects3), ok. return_test(_) -> @@ -314,6 +292,7 @@ return_dequeue_delivery_limit_test(_) -> Init = init(#{name => test, queue_resource => rabbit_misc:r("/", queue, atom_to_binary(test, utf8)), + max_in_memory_length => 0, release_cursor_interval => 0, delivery_limit => 1}), {State0, _} = enq(1, 1, msg, Init), @@ -321,11 +300,12 @@ return_dequeue_delivery_limit_test(_) -> Cid = {<<"cid">>, self()}, Cid2 = {<<"cid2">>, self()}, - {State1, {MsgId1, _}} = deq(2, Cid, unsettled, State0), + Msg = rabbit_fifo:make_enqueue(self(), 1, msg), + {State1, {MsgId1, _}} = deq(2, Cid, unsettled, Msg, State0), {State2, _, _} = apply(meta(4), rabbit_fifo:make_return(Cid, [MsgId1]), State1), - {State3, {MsgId2, _}} = deq(2, Cid2, unsettled, State2), + {State3, {MsgId2, _}} = deq(2, Cid2, unsettled, Msg, State2), {State4, _, _} = apply(meta(4), rabbit_fifo:make_return(Cid2, [MsgId2]), State3), ?assertMatch(#{num_messages := 0}, rabbit_fifo:overview(State4)), @@ -343,10 +323,16 @@ return_checked_out_test(_) -> {State0, [_, _]} = enq(1, 1, first, test_init(test)), {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, _Monitor, - {send_msg, _, {delivery, _, [{MsgId, _}]}, _}, + {log, [1], Fun, _}, {aux, active} | _ ]} = check_auto(Cid, 2, State0), + + Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), + + [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun([Msg1]), % returning immediately checks out the same message again - {_, ok, [{send_msg, _, {delivery, _, [{_, _}]}, _}, + {_, ok, [ + {log, [1], _, _}, + % {send_msg, _, {delivery, _, [{_, _}]}, _}, {aux, active}]} = apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1), ok. @@ -357,38 +343,46 @@ return_checked_out_limit_test(_) -> queue_resource => rabbit_misc:r("/", queue, atom_to_binary(test, utf8)), release_cursor_interval => 0, + max_in_memory_length => 0, delivery_limit => 1}), + Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), {State0, [_, _]} = enq(1, 1, first, Init), {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, _Monitor, - {send_msg, _, {delivery, _, [{MsgId, _}]}, _}, + {log, [1], Fun1, _}, {aux, active} | _ ]} = check_auto(Cid, 2, State0), + [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun1([Msg1]), % returning immediately checks out the same message again - {State2, ok, [{send_msg, _, {delivery, _, [{MsgId2, _}]}, _}, + {State2, ok, [ + {log, [1], Fun2, _}, {aux, active}]} = apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1), - {#rabbit_fifo{} = State, ok, [_ModCallEffDeadLetterCounter | _ReleaseEff]} = + [{send_msg, _, {delivery, _, [{MsgId2, _}]}, _}] = Fun2([Msg1]), + {#rabbit_fifo{} = State, ok, _} = apply(meta(4), rabbit_fifo:make_return(Cid, [MsgId2]), State2), ?assertEqual(0, rabbit_fifo:query_messages_total(State)), ok. return_auto_checked_out_test(_) -> Cid = {<<"cid">>, self()}, + Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), {State00, [_, _]} = enq(1, 1, first, test_init(test)), {State0, [_]} = enq(2, 2, second, State00), % it first active then inactive as the consumer took on but cannot take % any more {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, _Monitor, - {send_msg, _, {delivery, _, [{MsgId, _}]}, _}, + {log, [1], Fun1, _}, {aux, active}, {aux, inactive} ]} = check_auto(Cid, 2, State0), + [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun1([Msg1]), % return should include another delivery {_State2, _, Effects} = apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1), - ?ASSERT_EFF({send_msg, _, - {delivery, _, [{_, {#{delivery_count := 1}, first}}]}, _}, - Effects), + [{log, [1], Fun2, _} | _] = Effects, + + [{send_msg, _, {delivery, _, [{_MsgId2, {#{delivery_count := 1}, first}}]}, _}] + = Fun2([Msg1]), ok. cancelled_checkout_empty_queue_test(_) -> @@ -399,7 +393,6 @@ cancelled_checkout_empty_queue_test(_) -> {State2, _, Effects} = apply(meta(3), rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), ?assertEqual(0, map_size(State2#rabbit_fifo.consumers)), ?assertEqual(0, priority_queue:len(State2#rabbit_fifo.service_queue)), - ct:pal("Effs: ~p", [Effects]), ?ASSERT_EFF({release_cursor, _, _}, Effects), ok. @@ -407,21 +400,21 @@ cancelled_checkout_out_test(_) -> Cid = {<<"cid">>, self()}, {State00, [_, _]} = enq(1, 1, first, test_init(test)), {State0, [_]} = enq(2, 2, second, State00), - {State1, _} = check_auto(Cid, 2, State0), + {State1, _} = check_auto(Cid, 3, State0),%% prefetch of 1 % cancelled checkout should not return pending messages to queue - {State2, _, _} = apply(meta(3), rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), + {State2, _, _} = apply(meta(4), rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), ?assertEqual(1, lqueue:len(State2#rabbit_fifo.messages)), ?assertEqual(0, lqueue:len(State2#rabbit_fifo.returns)), ?assertEqual(0, priority_queue:len(State2#rabbit_fifo.service_queue)), {State3, {dequeue, empty}} = - apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State2), + apply(meta(5), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State2), %% settle {State4, ok, _} = - apply(meta(4), rabbit_fifo:make_settle(Cid, [0]), State3), + apply(meta(6), rabbit_fifo:make_settle(Cid, [0]), State3), - {_State, {dequeue, {_, {_, second}}, _}, _} = - apply(meta(5), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State4), + {_State, _, [_, {log, [2], _Fun} | _]} = + apply(meta(7), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State4), ok. down_with_noproc_consumer_returns_unsettled_test(_) -> @@ -466,10 +459,11 @@ down_with_noconnection_marks_suspect_and_node_is_monitored_test(_) -> down_with_noconnection_returns_unack_test(_) -> Pid = spawn(fun() -> ok end), Cid = {<<"down_with_noconnect">>, Pid}, + Msg = rabbit_fifo:make_enqueue(self(), 1, second), {State0, _} = enq(1, 1, second, test_init(test)), ?assertEqual(1, lqueue:len(State0#rabbit_fifo.messages)), ?assertEqual(0, lqueue:len(State0#rabbit_fifo.returns)), - {State1, {_, _}} = deq(2, Cid, unsettled, State0), + {State1, {_, _}} = deq(2, Cid, unsettled, Msg, State0), ?assertEqual(0, lqueue:len(State1#rabbit_fifo.messages)), ?assertEqual(0, lqueue:len(State1#rabbit_fifo.returns)), {State2a, _, _} = apply(meta(3), {down, Pid, noconnection}, State1), @@ -495,32 +489,34 @@ discarded_message_without_dead_letter_handler_is_removed_test(_) -> Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()}, {State0, [_, _]} = enq(1, 1, first, test_init(test)), {State1, Effects1} = check_n(Cid, 2, 10, State0), - ?ASSERT_EFF({send_msg, _, - {delivery, _, [{0, {_, first}}]}, _}, - Effects1), + ?ASSERT_EFF({log, [1], _Fun, _}, Effects1), {_State2, _, Effects2} = apply(meta(1), rabbit_fifo:make_discard(Cid, [0]), State1), - ?assertNoEffect({send_msg, _, - {delivery, _, [{0, {_, first}}]}, _}, - Effects2), + ?ASSERT_NO_EFF({log, [1], _Fun, _}, Effects2), ok. discarded_message_with_dead_letter_handler_emits_log_effect_test(_) -> - Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()}, + Cid = {<<"cid1">>, self()}, State00 = init(#{name => test, queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), + max_in_memory_length => 0, dead_letter_handler => {at_most_once, {somemod, somefun, [somearg]}}}), + Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), {State0, [_, _]} = enq(1, 1, first, State00), {State1, Effects1} = check_n(Cid, 2, 10, State0), - ?ASSERT_EFF({send_msg, _, - {delivery, _, [{0, {_, first}}]}, _}, - Effects1), + ?ASSERT_EFF({log, [1], _, _}, Effects1), {_State2, _, Effects2} = apply(meta(1), rabbit_fifo:make_discard(Cid, [0]), State1), % assert mod call effect with appended reason and message - ?ASSERT_EFF({log, _RaftIdxs, _}, Effects2), + {value, {log, [1], Fun}} = lists:search(fun (E) -> element(1, E) == log end, + Effects2), + ?assertMatch([{mod_call,somemod,somefun,[somearg,rejected,[first]]}], Fun([Msg1])), ok. +get_log_eff(Effs) -> + {value, Log} = lists:search(fun (E) -> element(1, E) == log end, Effs), + Log. + mixed_send_msg_and_log_effects_are_correctly_ordered_test(_) -> Cid = {cid(?FUNCTION_NAME), self()}, State00 = init(#{name => test, @@ -530,11 +526,16 @@ mixed_send_msg_and_log_effects_are_correctly_ordered_test(_) -> {at_most_once, {somemod, somefun, [somearg]}}}), %% enqueue two messages + Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), {State0, _} = enq(1, 1, first, State00), + Msg2 = rabbit_fifo:make_enqueue(self(), 2, snd), {State1, _} = enq(2, 2, snd, State0), {_State2, Effects1} = check_n(Cid, 3, 10, State1), ct:pal("Effects ~w", [Effects1]), + {log, [1, 2], Fun, _} = get_log_eff(Effects1), + [{send_msg, _, {delivery, _Cid, [{0,{0,first}},{1,{0,snd}}]}, + [local,ra_event]}] = Fun([Msg1, Msg2]), %% in this case we expect no send_msg effect as any in memory messages %% should be weaved into the send_msg effect emitted by the log effect %% later. hence this is all we can assert on @@ -546,10 +547,13 @@ mixed_send_msg_and_log_effects_are_correctly_ordered_test(_) -> tick_test(_) -> Cid = {<<"c">>, self()}, Cid2 = {<<"c2">>, self()}, + + Msg1 = rabbit_fifo:make_enqueue(self(), 1, <<"fst">>), + Msg2 = rabbit_fifo:make_enqueue(self(), 2, <<"snd">>), {S0, _} = enq(1, 1, <<"fst">>, test_init(?FUNCTION_NAME)), {S1, _} = enq(2, 2, <<"snd">>, S0), - {S2, {MsgId, _}} = deq(3, Cid, unsettled, S1), - {S3, {_, _}} = deq(4, Cid2, unsettled, S2), + {S2, {MsgId, _}} = deq(3, Cid, unsettled, Msg1, S1), + {S3, {_, _}} = deq(4, Cid2, unsettled, Msg2, S2), {S4, _, _} = apply(meta(5), rabbit_fifo:make_return(Cid, [MsgId]), S3), [{mod_call, rabbit_quorum_queue, handle_tick, @@ -574,7 +578,7 @@ delivery_query_returns_deliveries_test(_) -> Entries = lists:zip(Indexes, Commands), {State, _Effects} = run_log(test_init(help), Entries), % 3 deliveries are returned - [{0, {_, one}}] = rabbit_fifo:get_checked_out(Cid, 0, 0, State), + [{0, {_, _}}] = rabbit_fifo:get_checked_out(Cid, 0, 0, State), [_, _, _] = rabbit_fifo:get_checked_out(Cid, 1, 3, State), ok. @@ -639,9 +643,9 @@ purge_test(_) -> {State2, {purge, 1}, _} = apply(meta(2), rabbit_fifo:make_purge(), State1), {State3, _} = enq(3, 2, second, State2), % get returns a reply value - {_State4, {dequeue, {0, {_, second}}, _}, - [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, {monitor, _, _}]} = + {_State4, _, Effs} = apply(meta(4), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State3), + ?ASSERT_EFF({log, [3], _}, Effs), ok. purge_with_checkout_test(_) -> @@ -1311,6 +1315,7 @@ register_enqueuer_test(_) -> queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), max_length => 2, + max_in_memory_length => 0, overflow_strategy => reject_publish}), %% simply registering should be ok when we're below limit Pid1 = test_util:fake_pid(node()), @@ -1335,20 +1340,23 @@ register_enqueuer_test(_) -> %% remove two messages this should make the queue fall below the 0.8 limit - {State7, {dequeue, _, _}, _Efx7} = + {State7, _, Efx7} = apply(meta(7), rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State6), - ct:pal("Efx7 ~p", [_Efx7]), - {State8, {dequeue, _, _}, Efx8} = + ?ASSERT_EFF({log, [_], _}, Efx7), + % ct:pal("Efx7 ~p", [_Efx7]), + {State8, _, Efx8} = apply(meta(8), rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State7), - ct:pal("Efx8 ~p", [Efx8]), + ?ASSERT_EFF({log, [_], _}, Efx8), + % ct:pal("Efx8 ~p", [Efx8]), %% validate all registered enqueuers are notified of overflow state ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid1, Efx8), ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid2, Efx8), - {_State9, {dequeue, _, _}, Efx9} = + {_State9, _, Efx9} = apply(meta(9), rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State8), + ?ASSERT_EFF({log, [_], _}, Efx9), ?ASSERT_NO_EFF({send_msg, P, go, [ra_event]}, P == Pid1, Efx9), ?ASSERT_NO_EFF({send_msg, P, go, [ra_event]}, P == Pid2, Efx9), ok. @@ -1358,6 +1366,7 @@ reject_publish_purge_test(_) -> queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), max_length => 2, + max_in_memory_length => 0, overflow_strategy => reject_publish}), %% simply registering should be ok when we're below limit Pid1 = test_util:fake_pid(node()), @@ -1372,10 +1381,11 @@ reject_publish_purge_test(_) -> ok. reject_publish_applied_after_limit_test(_) -> + QName = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), InitConf = #{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)) - }, + max_in_memory_length => 0, + queue_resource => QName + }, State0 = init(InitConf), %% simply registering should be ok when we're below limit Pid1 = test_util:fake_pid(node()), @@ -1387,10 +1397,10 @@ reject_publish_applied_after_limit_test(_) -> ?ASSERT_NO_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx), %% apply new config Conf = #{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => QName, max_length => 2, overflow_strategy => reject_publish, + max_in_memory_length => 0, dead_letter_handler => undefined }, {State5, ok, Efx1} = apply(meta(5), rabbit_fifo:make_update_config(Conf), State4), @@ -1458,11 +1468,15 @@ enq(Idx, MsgSeq, Msg, State) -> strip_reply( apply(meta(Idx), rabbit_fifo:make_enqueue(self(), MsgSeq, Msg), State)). -deq(Idx, Cid, Settlement, State0) -> - {State, {dequeue, {MsgId, Msg}, _}, _} = +deq(Idx, Cid, Settlement, Msg, State0) -> + {State, _, Effs} = apply(meta(Idx), rabbit_fifo:make_checkout(Cid, {dequeue, Settlement}, #{}), State0), + {value, {log, [_Idx], Fun}} = lists:search(fun(E) -> element(1, E) == log end, Effs), + [{reply, _From, + {wrap_reply, {dequeue, {MsgId, _}, _}}}] = Fun([Msg]), + {State, {MsgId, Msg}}. check_n(Cid, Idx, N, State) -> @@ -1635,13 +1649,17 @@ queue_ttl_test(_) -> [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}] = rabbit_fifo:tick(Now + 2500, S1Deq), %% Enqueue message, - {E1, _, _} = apply(meta(2, Now), - rabbit_fifo:make_enqueue(self(), 1, msg1), S0), + Msg = rabbit_fifo:make_enqueue(self(), 1, msg1), + {E1, _, _} = apply(meta(2, Now), Msg, S0), Deq = {<<"deq1">>, self()}, - {E2, {dequeue, {MsgId, _}, _}, _} = + {E2, _, Effs2} = apply(meta(3, Now), rabbit_fifo:make_checkout(Deq, {dequeue, unsettled}, #{}), E1), + + {log, [2], Fun2} = get_log_eff(Effs2), + [{reply, _From, + {wrap_reply, {dequeue, {MsgId, _}, _}}}] = Fun2([Msg]), {E3, _, _} = apply(meta(3, Now + 1000), rabbit_fifo:make_settle(Deq, [MsgId]), E2), [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1500, E3), @@ -1696,10 +1714,10 @@ query_peek_test(_) -> ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(1, State0)), {State1, _} = enq(1, 1, first, State0), {State2, _} = enq(2, 2, second, State1), - ?assertMatch({ok, [_, _ | first]}, rabbit_fifo:query_peek(1, State1)), + ?assertMatch({ok, [1 | _]}, rabbit_fifo:query_peek(1, State1)), ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(2, State1)), - ?assertMatch({ok, [_, _ | first]}, rabbit_fifo:query_peek(1, State2)), - ?assertMatch({ok, [_, _ | second]}, rabbit_fifo:query_peek(2, State2)), + ?assertMatch({ok, [1 | _]}, rabbit_fifo:query_peek(1, State2)), + ?assertMatch({ok, [2 | _]}, rabbit_fifo:query_peek(2, State2)), ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(3, State2)), ok. diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index f2cd1028fd4d..5f3a9bca2626 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -1667,10 +1667,7 @@ enqueue_gen(Pid) -> enqueue_gen(Pid, 10, 1). enqueue_gen(Pid, _Enq, _Del) -> - ?LET(E, {enqueue, Pid, enqueue, - % frequency([{Enq, enqueue}, - % {Del, delay}]), - msg_gen()}, E). + ?LET(E, {enqueue, Pid, enqueue, msg_gen()}, E). %% It's fair to assume that every message enqueued is a #basic_message. %% That's what the channel expects and what rabbit_quorum_queue invokes rabbit_fifo_client with. @@ -1699,7 +1696,8 @@ checkout_gen(Pid) -> enq_body_fun = {0, fun ra_lib:id/1}, config :: map(), log = [] :: list(), - down = #{} :: #{pid() => noproc | noconnection} + down = #{} :: #{pid() => noproc | noconnection}, + enq_cmds = #{} :: #{ra:index() => rabbit_fifo:enqueue()} }). expand(Ops, Config) -> @@ -1846,12 +1844,20 @@ handle_op({checkout_dlx, Prefetch}, #t{config = #{dead_letter_handler := at_leas do_apply(Cmd, #t{effects = Effs, index = Index, state = S0, down = Down, + enq_cmds = EnqCmds0, log = Log} = T) -> case Cmd of - {enqueue, Pid, _, _} when is_map_key(Pid, Down) -> + {enqueue, Pid, _, _Msg} when is_map_key(Pid, Down) -> %% down T; _ -> + EnqCmds = case Cmd of + {enqueue, _Pid, _, _Msg} -> + EnqCmds0#{Index => Cmd}; + _ -> + EnqCmds0 + end, + {St, Effects} = case rabbit_fifo:apply(meta(Index), Cmd, S0) of {S, _, E} when is_list(E) -> {S, E}; @@ -1863,23 +1869,28 @@ do_apply(Cmd, #t{effects = Effs, T#t{state = St, index = Index + 1, - effects = enq_effs(Effects, Effs), + enq_cmds = EnqCmds, + effects = enq_effs(Effects, Effs, EnqCmds), log = [Cmd | Log]} end. -enq_effs([], Q) -> Q; -enq_effs([{send_msg, P, {delivery, CTag, Msgs}, _Opts} | Rem], Q) -> +enq_effs([], Q, _) -> Q; +enq_effs([{send_msg, P, {delivery, CTag, Msgs}, _Opts} | Rem], Q, Cmds) -> MsgIds = [I || {I, _} <- Msgs], %% always make settle commands by default %% they can be changed depending on the input event later Cmd = rabbit_fifo:make_settle({CTag, P}, MsgIds), - enq_effs(Rem, queue:in(Cmd, Q)); -enq_effs([{send_msg, _, {dlx_delivery, Msgs}, _Opts} | Rem], Q) -> + enq_effs(Rem, queue:in(Cmd, Q), Cmds); +enq_effs([{log, RaIdxs, Fun, _} | Rem], Q, Cmds) -> + M = [maps:get(I, Cmds) || I <- RaIdxs], + Effs = Fun(M), + enq_effs(Effs ++ Rem, Q, Cmds); +enq_effs([{send_msg, _, {dlx_delivery, Msgs}, _Opts} | Rem], Q, Cmds) -> MsgIds = [I || {I, _} <- Msgs], Cmd = rabbit_fifo_dlx:make_settle(MsgIds), - enq_effs(Rem, queue:in(Cmd, Q)); -enq_effs([_ | Rem], Q) -> - enq_effs(Rem, Q). + enq_effs(Rem, queue:in(Cmd, Q), Cmds); +enq_effs([_ | Rem], Q, Cmds) -> + enq_effs(Rem, Q, Cmds). %% Utility @@ -1908,7 +1919,8 @@ run_snapshot_test(Conf, Commands, Invariant) -> run_snapshot_test0(Conf, Commands) -> run_snapshot_test0(Conf, Commands, fun (_) -> true end). -run_snapshot_test0(Conf, Commands, Invariant) -> +run_snapshot_test0(Conf0, Commands, Invariant) -> + Conf = Conf0#{max_in_memory_length => 0}, Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), {State0, Effects} = run_log(test_init(Conf), Entries, Invariant), From 953fe90ac736303cd642c1c2bcab0f57edf66064 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 24 Jan 2022 12:51:17 +0100 Subject: [PATCH 45/97] Fix pipeline as done in https://github.com/rabbitmq/rabbitmq-server/commit/efcd88165800d6603bda7d9bbcc34ef63e0bcf6f --- deps/rabbit/BUILD.bazel | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index 399253c01b9e..101b8598f27d 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -726,7 +726,7 @@ suites = [ "src/rabbit_fifo_dlx.hrl", ], deps = [ - "//deps/rabbit_common:bazel_erlang_lib", + "//deps/rabbit_common:erlang_app", ], ), rabbitmq_integration_suite( @@ -743,10 +743,10 @@ suites = [ "src/rabbit_fifo_dlx.hrl", ], runtime_deps = [ - "@ra//:bazel_erlang_lib", + "@ra//:erlang_app", ], deps = [ - "@proper//:bazel_erlang_lib", + "@proper//:erlang_app", ], ), rabbitmq_suite( From 9d784fac8916ccb8c614d0e1b24f0c8a38588750 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 24 Jan 2022 13:00:51 +0000 Subject: [PATCH 46/97] rabbit_fifo: optimise enqueue path --- deps/rabbit/src/rabbit_fifo.erl | 128 ++++++++++++++++---------------- 1 file changed, 65 insertions(+), 63 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 4885970b2595..956fa65cefed 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -266,7 +266,8 @@ apply(#{index := Idx} = Meta, msg = _Msg}, #?MODULE{consumers = Cons0, messages = Messages, - ra_indexes = Indexes0} = State00) -> + ra_indexes = Indexes0, + enqueue_count = EnqCount} = State00) -> case Cons0 of #{ConsumerId := #consumer{checked_out = Checked0} = Con0} when is_map_key(MsgId, Checked0) -> @@ -284,13 +285,9 @@ apply(#{index := Idx} = Meta, ConsumerId, Con, State0#?MODULE{ra_indexes = rabbit_fifo_index:delete(OldIdx, Indexes0), - messages = lqueue:in(IdxMsg, Messages)}), - - %% We have to increment the enqueue counter to ensure release cursors - %% are generated - State3 = incr_enqueue_count(State2), - - {State, Ret, Effs} = checkout(Meta, State0, State3, []), + messages = lqueue:in(IdxMsg, Messages), + enqueue_count = EnqCount + 1}), + {State, Ret, Effs} = checkout(Meta, State0, State2, []), update_smallest_raft_index(Idx, Ret, maybe_store_dehydrated_state(Idx, State), Effs); @@ -1403,8 +1400,7 @@ apply_enqueue(#{index := RaftIdx, system_time := Ts} = Meta, From, Seq, RawMsg, State0) -> case maybe_enqueue(RaftIdx, Ts, From, Seq, RawMsg, [], State0) of {ok, State1, Effects1} -> - State2 = incr_enqueue_count(incr_total(State1)), - {State, ok, Effects} = checkout(Meta, State0, State2, Effects1, false), + {State, ok, Effects} = checkout(Meta, State0, State1, Effects1, false), {maybe_store_dehydrated_state(RaftIdx, State), ok, Effects}; {out_of_sequence, State, Effects} -> {State, not_enqueued, Effects}; @@ -1415,18 +1411,8 @@ apply_enqueue(#{index := RaftIdx, decr_total(#?MODULE{messages_total = Tot} = State) -> State#?MODULE{messages_total = Tot - 1}. -incr_total(#?MODULE{messages_total = Tot} = State) -> - State#?MODULE{messages_total = Tot + 1}. - drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects) -> case take_next_msg(State0) of - % {?PREFIX_MEM_MSG(Header), State1} -> - % State2 = subtract_in_memory_counts(Header, - % add_bytes_drop(Header, State1)), - % {decr_total(State2), Effects}; - % {?DISK_MSG(Header), State1} -> - % State2 = add_bytes_drop(Header, State1), - % {decr_total(State2), Effects}; {?INDEX_MSG(Idx, ?DISK_MSG(Header)) = IdxMsg, State1} -> Indexes = rabbit_fifo_index:delete(Idx, Indexes0), State2 = State1#?MODULE{ra_indexes = Indexes}, @@ -1439,27 +1425,27 @@ drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects) -> {State0, Effects} end. -enqueue(RaftIdx, Ts, RawMsg, #?MODULE{messages = Messages} = State0) -> - %% the initial header is an integer only - it will get expanded to a map - %% when the next required key is added - Header0 = message_size(RawMsg), - Header = maybe_set_msg_ttl(RawMsg, Ts, Header0, State0), - %% TODO: enqueue as in memory message if there are no ready messages - %% and there are consumers with credit available. - %% I.e. the message will be immedately delivered so no benefit - %% in reading it back from the log - Msg = ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), - % case evaluate_memory_limit(Header, State0) of - % true -> - % % indexed message with header map - % {State0, - % ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header))}; - % false -> - % {add_in_memory_counts(Header, State0), - % ?INDEX_MSG(RaftIdx, ?MSG(Header, RawMsg))} - % end, - State = add_bytes_enqueue(Header, State0), - State#?MODULE{messages = lqueue:in(Msg, Messages)}. +% enqueue(RaftIdx, Ts, RawMsg, #?MODULE{messages = Messages} = State0) -> +% %% the initial header is an integer only - it will get expanded to a map +% %% when the next required key is added +% Header0 = message_size(RawMsg), +% Header = maybe_set_msg_ttl(RawMsg, Ts, Header0, State0), +% %% TODO: enqueue as in memory message if there are no ready messages +% %% and there are consumers with credit available. +% %% I.e. the message will be immedately delivered so no benefit +% %% in reading it back from the log +% Msg = ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), +% % case evaluate_memory_limit(Header, State0) of +% % true -> +% % % indexed message with header map +% % {State0, +% % ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header))}; +% % false -> +% % {add_in_memory_counts(Header, State0), +% % ?INDEX_MSG(RaftIdx, ?MSG(Header, RawMsg))} +% % end, +% State = add_bytes_enqueue(Header, State0), +% State#?MODULE{messages = lqueue:in(Msg, Messages)}. maybe_set_msg_ttl(#basic_message{content = #content{properties = none}}, _, Header, @@ -1497,29 +1483,29 @@ update_expiry_header(RaCmdTs, TTL, Header) -> update_expiry_header(ExpiryTs, Header) -> update_header(expiry, fun(Ts) -> Ts end, ExpiryTs, Header). -incr_enqueue_count(#?MODULE{enqueue_count = EC, - cfg = #cfg{release_cursor_interval = {_Base, C}} - } = State0) when EC >= C -> - %% this will trigger a dehydrated version of the state to be stored - %% at this raft index for potential future snapshot generation - %% Q: Why don't we just stash the release cursor here? - %% A: Because it needs to be the very last thing we do and we - %% first needs to run the checkout logic. - State0#?MODULE{enqueue_count = 0}; -incr_enqueue_count(#?MODULE{enqueue_count = C} = State) -> - State#?MODULE{enqueue_count = C + 1}. +% eval_enqueue_count(#?MODULE{enqueue_count = EC, +% cfg = #cfg{release_cursor_interval = {_Base, C}} +% } = State0) when EC >= C -> +% %% this will trigger a dehydrated version of the state to be stored +% %% at this raft index for potential future snapshot generation +% %% Q: Why don't we just stash the release cursor here? +% %% A: Because it needs to be the very last thing we do and we +% %% first needs to run the checkout logic. +% State0#?MODULE{enqueue_count = 0}; +% eval_enqueue_count(#?MODULE{} = State) -> +% State. +% % State#?MODULE{enqueue_count = C + 1}. maybe_store_dehydrated_state(RaftIdx, #?MODULE{cfg = - #cfg{release_cursor_interval = {Base, _}} - = Cfg, - ra_indexes = _Indexes, - enqueue_count = 0, - release_cursors = Cursors0} = State0) -> + #cfg{release_cursor_interval = {Base, C}} = Cfg, + enqueue_count = EC, + release_cursors = Cursors0} = State0) + when EC >= C -> case messages_total(State0) of 0 -> %% message must have been immediately dropped - State0; + State0#?MODULE{enqueue_count = 0}; Total -> Interval = case Base of 0 -> 0; @@ -1531,25 +1517,39 @@ maybe_store_dehydrated_state(RaftIdx, Dehydrated = dehydrate_state(State), Cursor = {release_cursor, RaftIdx, Dehydrated}, Cursors = lqueue:in(Cursor, Cursors0), - State#?MODULE{release_cursors = Cursors} + State#?MODULE{enqueue_count = 0, + release_cursors = Cursors} end; maybe_store_dehydrated_state(_RaftIdx, State) -> State. -maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, Effects, State0) -> +maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, Effects, + #?MODULE{msg_bytes_enqueue = Enqueue, + enqueue_count = EnqCount, + messages = Messages, + messages_total = Total} = State0) -> % direct enqueue without tracking - State = enqueue(RaftIdx, Ts, RawMsg, State0), + Size = message_size(RawMsg), + Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), + Msg = ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), + State = State0#?MODULE{msg_bytes_enqueue = Enqueue + Size, + enqueue_count = EnqCount + 1, + messages_total = Total + 1, + messages = lqueue:in(Msg, Messages) + }, {ok, State, Effects}; maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, #?MODULE{msg_bytes_enqueue = Enqueue, + enqueue_count = EnqCount, enqueuers = Enqueuers0, - messages = Messages} = State0) -> + messages = Messages, + messages_total = Total} = State0) -> case maps:get(From, Enqueuers0, undefined) of undefined -> State1 = State0#?MODULE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, {Res, State, Effects} = maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, - RawMsg, Effects0, State1), + RawMsg, Effects0, State1), {Res, State, [{monitor, process, From} | Effects]}; #enqueuer{next_seqno = MsgSeqNo} = Enq0 -> % it is the next expected seqno @@ -1558,6 +1558,8 @@ maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, Msg = ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1}, State = State0#?MODULE{msg_bytes_enqueue = Enqueue + Size, + enqueue_count = EnqCount + 1, + messages_total = Total + 1, messages = lqueue:in(Msg, Messages), enqueuers = Enqueuers0#{From => Enq}}, {ok, State, Effects0}; From 522378614780934b2b53a536f1560228b1818715 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 24 Jan 2022 13:09:16 +0000 Subject: [PATCH 47/97] rabbit_fifo: remove some commented code --- deps/rabbit/src/rabbit_fifo.erl | 66 ++++++--------------------------- 1 file changed, 11 insertions(+), 55 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 956fa65cefed..793f4e6e6108 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -280,16 +280,13 @@ apply(#{index := Idx} = Meta, IdxMsg = ?INDEX_MSG(Idx, ?DISK_MSG(Header)), Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked0), credit = increase_credit(Con0, 1)}, - State2 = update_or_remove_sub( - Meta, - ConsumerId, - Con, - State0#?MODULE{ra_indexes = rabbit_fifo_index:delete(OldIdx, Indexes0), - messages = lqueue:in(IdxMsg, Messages), - enqueue_count = EnqCount + 1}), + State1 = State0#?MODULE{ra_indexes = rabbit_fifo_index:delete(OldIdx, Indexes0), + messages = lqueue:in(IdxMsg, Messages), + enqueue_count = EnqCount + 1}, + State2 = update_or_remove_sub(Meta, ConsumerId, Con, State1), {State, Ret, Effs} = checkout(Meta, State0, State2, []), update_smallest_raft_index(Idx, Ret, - maybe_store_dehydrated_state(Idx, State), + maybe_store_release_cursor(Idx, State), Effs); _ -> {State00, ok} @@ -970,9 +967,6 @@ handle_aux(leader, cast, {#return{msg_ids = MsgIds, {L, Acc} end %% TODO: handle old formats? - - % (MsgId, IdxMsg, {L0, Acc}) -> - % {L0, [{MsgId, IdxMsg} | Acc]} end, {Log0, []}, maps:with(MsgIds, Checked)), Appends = make_requeue(ConsumerId, {notify, Corr, Pid}, @@ -1031,8 +1025,6 @@ handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, %% TODO: handle requeue? #enqueue{msg = Msg} = Cmd, {reply, {ok, {Header, Msg}}, Aux0, Log}; - % {ok, ?INDEX_MSG(_Idx, ?MSG(Header, Msg))} -> - % {reply, {ok, {Header, Msg}}, Aux0, Log0}; Err -> {reply, Err, Aux0, Log0} end; @@ -1401,7 +1393,7 @@ apply_enqueue(#{index := RaftIdx, case maybe_enqueue(RaftIdx, Ts, From, Seq, RawMsg, [], State0) of {ok, State1, Effects1} -> {State, ok, Effects} = checkout(Meta, State0, State1, Effects1, false), - {maybe_store_dehydrated_state(RaftIdx, State), ok, Effects}; + {maybe_store_release_cursor(RaftIdx, State), ok, Effects}; {out_of_sequence, State, Effects} -> {State, not_enqueued, Effects}; {duplicate, State, Effects} -> @@ -1425,28 +1417,6 @@ drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects) -> {State0, Effects} end. -% enqueue(RaftIdx, Ts, RawMsg, #?MODULE{messages = Messages} = State0) -> -% %% the initial header is an integer only - it will get expanded to a map -% %% when the next required key is added -% Header0 = message_size(RawMsg), -% Header = maybe_set_msg_ttl(RawMsg, Ts, Header0, State0), -% %% TODO: enqueue as in memory message if there are no ready messages -% %% and there are consumers with credit available. -% %% I.e. the message will be immedately delivered so no benefit -% %% in reading it back from the log -% Msg = ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), -% % case evaluate_memory_limit(Header, State0) of -% % true -> -% % % indexed message with header map -% % {State0, -% % ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header))}; -% % false -> -% % {add_in_memory_counts(Header, State0), -% % ?INDEX_MSG(RaftIdx, ?MSG(Header, RawMsg))} -% % end, -% State = add_bytes_enqueue(Header, State0), -% State#?MODULE{messages = lqueue:in(Msg, Messages)}. - maybe_set_msg_ttl(#basic_message{content = #content{properties = none}}, _, Header, #?MODULE{cfg = #cfg{msg_ttl = undefined}}) -> @@ -1483,24 +1453,10 @@ update_expiry_header(RaCmdTs, TTL, Header) -> update_expiry_header(ExpiryTs, Header) -> update_header(expiry, fun(Ts) -> Ts end, ExpiryTs, Header). -% eval_enqueue_count(#?MODULE{enqueue_count = EC, -% cfg = #cfg{release_cursor_interval = {_Base, C}} -% } = State0) when EC >= C -> -% %% this will trigger a dehydrated version of the state to be stored -% %% at this raft index for potential future snapshot generation -% %% Q: Why don't we just stash the release cursor here? -% %% A: Because it needs to be the very last thing we do and we -% %% first needs to run the checkout logic. -% State0#?MODULE{enqueue_count = 0}; -% eval_enqueue_count(#?MODULE{} = State) -> -% State. -% % State#?MODULE{enqueue_count = C + 1}. - -maybe_store_dehydrated_state(RaftIdx, - #?MODULE{cfg = - #cfg{release_cursor_interval = {Base, C}} = Cfg, - enqueue_count = EC, - release_cursors = Cursors0} = State0) +maybe_store_release_cursor(RaftIdx, + #?MODULE{cfg = #cfg{release_cursor_interval = {Base, C}} = Cfg, + enqueue_count = EC, + release_cursors = Cursors0} = State0) when EC >= C -> case messages_total(State0) of 0 -> @@ -1520,7 +1476,7 @@ maybe_store_dehydrated_state(RaftIdx, State#?MODULE{enqueue_count = 0, release_cursors = Cursors} end; -maybe_store_dehydrated_state(_RaftIdx, State) -> +maybe_store_release_cursor(_RaftIdx, State) -> State. maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, Effects, From d44666d48c28870e9ab262dcc509c74cd819de84 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 24 Jan 2022 15:24:38 +0100 Subject: [PATCH 48/97] Reduce dlx consumer prefetch Messages in the dlx worker are kept in memory. If dlx workers do not receive publisher confirms from target queues, a high prefetch combined with many queues (i.e. many dlx workers) will consume a lot of memory. Reducing prefetch will substantially slow down dead-lettering throughput however. --- deps/rabbit/BUILD.bazel | 2 +- deps/rabbit/Makefile | 2 +- deps/rabbit/src/rabbit_fifo_dlx.erl | 13 ++----------- 3 files changed, 4 insertions(+), 13 deletions(-) diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index 101b8598f27d..d75ee211234b 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -145,7 +145,7 @@ _APP_ENV = """[ {tracking_execution_timeout, 15000}, {stream_messages_soft_limit, 256}, {track_auth_attempt_source, false}, - {dead_letter_worker_consumer_prefetch, 1000}, + {dead_letter_worker_consumer_prefetch, 32}, {dead_letter_worker_publisher_confirm_timeout, 120000} ] """ diff --git a/deps/rabbit/Makefile b/deps/rabbit/Makefile index 3b5bba96b6e7..32388e25717e 100644 --- a/deps/rabbit/Makefile +++ b/deps/rabbit/Makefile @@ -123,7 +123,7 @@ define PROJECT_ENV {tracking_execution_timeout, 15000}, {stream_messages_soft_limit, 256}, {track_auth_attempt_source, false}, - {dead_letter_worker_consumer_prefetch, 1000}, + {dead_letter_worker_consumer_prefetch, 32}, {dead_letter_worker_publisher_confirm_timeout, 120000} ] endef diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index b7b59d7fda0e..439710e725a8 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -178,17 +178,8 @@ discard(Msgs, Reason, at_least_once, State0) -spec checkout(dead_letter_handler(), state()) -> {state(), ra_machine:effects()}. -checkout(at_least_once, #?MODULE{consumer = undefined, - discards = Discards} = State) -> - case lqueue:is_empty(Discards) of - true -> - ok; - false -> - rabbit_log:warning("there are dead-letter messages but no dead-letter consumer") - end, - {State, []}; -checkout(at_least_once, State0) -> - checkout0(checkout_one(State0), {[],[]}); +checkout(at_least_once, #?MODULE{consumer = #dlx_consumer{}} = State) -> + checkout0(checkout_one(State), {[],[]}); checkout(_, State) -> {State, []}. From 62f3d40da092f4f9b80dcd353f72b9368e256a92 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 24 Jan 2022 16:39:22 +0000 Subject: [PATCH 49/97] rabbit_fifo: cache incoming message if it can be immedately delivered This saves us re-reading the message from the Ra log whenever a message can be delivered as part of the enqueue apply evaluation. --- deps/rabbit/src/rabbit_fifo.erl | 226 +++++++------------------ deps/rabbit/src/rabbit_fifo.hrl | 2 +- deps/rabbit/test/rabbit_fifo_SUITE.erl | 16 +- 3 files changed, 73 insertions(+), 171 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 793f4e6e6108..a1a1803a6366 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -1373,7 +1373,8 @@ activate_next_consumer(#?MODULE{consumers = Cons, -maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, Consumer, S0, Effects0, Reason) -> +maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, Consumer, S0, + Effects0, Reason) -> case Reason of consumer_cancel -> {update_or_remove_sub(Meta, ConsumerId, @@ -1513,11 +1514,19 @@ maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), Msg = ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1}, + MsgCache = case can_immediately_deliver(State0) of + true -> + {RaftIdx, RawMsg}; + false -> + undefined + end, State = State0#?MODULE{msg_bytes_enqueue = Enqueue + Size, enqueue_count = EnqCount + 1, messages_total = Total + 1, messages = lqueue:in(Msg, Messages), - enqueuers = Enqueuers0#{From => Enq}}, + enqueuers = Enqueuers0#{From => Enq}, + msg_cache = MsgCache + }, {ok, State, Effects0}; #enqueuer{next_seqno = Next} when MsgSeqNo > Next -> @@ -1721,20 +1730,6 @@ return_one(Meta, MsgId, Msg0, Effects0} end. -% is_disk_msg(?INDEX_MSG(RaftIdx, ?DISK_MSG(_))) when is_integer(RaftIdx) -> -% true; -% is_disk_msg(?DISK_MSG(_)) -> -% true; -% is_disk_msg(_) -> -% false. - -% to_disk_msg(?INDEX_MSG(RaftIdx, ?DISK_MSG(_)) = Msg) when is_integer(RaftIdx) -> -% Msg; -% to_disk_msg(?INDEX_MSG(RaftIdx, ?MSG(Header, _))) when is_integer(RaftIdx) -> -% ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); -% to_disk_msg(?PREFIX_MEM_MSG(Header)) -> -% ?DISK_MSG(Header). - return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, #consumer{checked_out = Checked} = Con) -> State = State0#?MODULE{consumers = Cons#{ConsumerId => Con}}, @@ -1751,9 +1746,11 @@ checkout(#{index := Index} = Meta, State0, Effects0, HandleConsumerChanges) -> {#?MODULE{cfg = #cfg{dead_letter_handler = DLH}, dlx = DlxState0} = State1, ExpiredMsg, Effects1} = - checkout0(Meta, checkout_one(Meta, false, State0, Effects0), #{}), + checkout0(Meta, checkout_one(Meta, false, State0, Effects0), #{}), {DlxState, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(DLH, DlxState0), - State2 = State1#?MODULE{dlx = DlxState}, + %% TODO: only update dlx state if it has changed? + State2 = State1#?MODULE{msg_cache = undefined, %% by this time the cache should be used + dlx = DlxState}, Effects2 = DlxDeliveryEffects ++ Effects1, {State, DroppedMsg, Effects} = evaluate_limit(Index, false, OldState, State2, Effects2), case {DroppedMsg, ExpiredMsg} of @@ -1782,33 +1779,17 @@ checkout0(Meta, {success, ConsumerId, MsgId, SendAcc0) when is_integer(RaftIdx) -> DelMsg = {RaftIdx, {MsgId, Header}}, SendAcc = maps:update_with(ConsumerId, - fun ({InMem, LogMsgs}) -> - {InMem, [DelMsg | LogMsgs]} - end, {[], [DelMsg]}, SendAcc0), + fun (LogMsgs) -> + [DelMsg | LogMsgs] + end, [DelMsg], SendAcc0), checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); -% checkout0(Meta, {success, ConsumerId, MsgId, -% ?INDEX_MSG(Idx, ?MSG(Header, Msg)), State, Effects}, -% SendAcc0) when is_integer(Idx) -> -% DelMsg = {MsgId, {Header, Msg}}, -% SendAcc = maps:update_with(ConsumerId, -% fun ({InMem, LogMsgs}) -> -% {[DelMsg | InMem], LogMsgs} -% end, {[DelMsg], []}, SendAcc0), -% checkout0(Meta, checkout_one(Meta, State, Effects), SendAcc); -% checkout0(Meta, {success, _ConsumerId, _MsgId, ?TUPLE(_, _), State, Effects}, -% SendAcc) -> -% %% Do not append delivery effect for prefix messages. -% %% Prefix messages do not exist anymore, but they still go through the -% %% normal checkout flow to derive correct consumer states -% %% after recovery and will still be settled or discarded later on. -% checkout0(Meta, checkout_one(Meta, State, Effects), SendAcc); checkout0(_Meta, {Activity, ExpiredMsg, State0, Effects0}, SendAcc) -> Effects1 = case Activity of nochange -> - append_delivery_effects(Effects0, SendAcc); + append_delivery_effects(Effects0, SendAcc, State0); inactive -> [{aux, inactive} - | append_delivery_effects(Effects0, SendAcc)] + | append_delivery_effects(Effects0, SendAcc, State0)] end, {State0, ExpiredMsg, lists:reverse(Effects1)}. @@ -1864,51 +1845,23 @@ evaluate_limit(Index, Result, BeforeState, {State0, Result, Effects0} end. -% evaluate_memory_limit(_Header, -% #?MODULE{cfg = #cfg{max_in_memory_length = undefined, -% max_in_memory_bytes = undefined}}) -> -% false; -% evaluate_memory_limit(#{size := Size}, State) -> -% evaluate_memory_limit(Size, State); -% evaluate_memory_limit(Header, -% #?MODULE{cfg = #cfg{max_in_memory_length = MaxLength, -% max_in_memory_bytes = MaxBytes}, -% msg_bytes_in_memory = Bytes, -% msgs_ready_in_memory = Length}) -> -% Size = get_header(size, Header), -% (Length >= MaxLength) orelse ((Bytes + Size) > MaxBytes). - -append_delivery_effects(Effects0, AccMap) when map_size(AccMap) == 0 -> +append_delivery_effects(Effects0, AccMap, _State) when map_size(AccMap) == 0 -> %% does this ever happen? Effects0; -append_delivery_effects(Effects0, AccMap) -> +append_delivery_effects(Effects0, AccMap, State) -> [{aux, active} | - maps:fold(fun (C, {InMemMsgs, DiskMsgs}, Ef) -> - [delivery_effect(C, lists:reverse(DiskMsgs), InMemMsgs) | Ef] + maps:fold(fun (C, DiskMsgs, Ef) when is_list(DiskMsgs) -> + [delivery_effect(C, lists:reverse(DiskMsgs), State) | Ef] end, Effects0, AccMap) ]. -%% next message is determined as follows: -%% First we check if there are are prefex returns -%% Then we check if there are current returns -%% then we check prefix msgs -%% then we check current messages -%% -%% When we return it is always done to the current return queue -%% for both prefix messages and current messages -% take_next_msg(#?MODULE{prefix_msgs = {NumR, [Msg | Rem], -% NumP, P}} = State) -> -% %% there are prefix returns, these should be served first -% {Msg, State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}}; take_next_msg(#?MODULE{returns = Returns0, messages = Messages0, ra_indexes = Indexes0 - % prefix_msgs = {NumR, R, NumP, P} } = State) -> case lqueue:out(Returns0) of {{value, NextMsg}, Returns} -> {NextMsg, State#?MODULE{returns = Returns}}; - % {empty, _} when P == [] -> {empty, _} -> case lqueue:out(Messages0) of {empty, _} -> @@ -1919,41 +1872,22 @@ take_next_msg(#?MODULE{returns = Returns0, {IndexMsg, State#?MODULE{messages = Messages, ra_indexes = Indexes}} end - % {empty, _} -> - % case P of - % [?PREFIX_MEM_MSG(_Header) = Msg | Rem] -> - % {Msg, State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}}; - % [?DISK_MSG(_Header) = Msg | Rem] -> - % {Msg, State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}} - % end end. -% peek_next_msg(#?MODULE{prefix_msgs = {_NumR, [Msg | _], -% _NumP, _P}}) -> -% %% there are prefix returns, these should be served first -% {value, Msg}; peek_next_msg(#?MODULE{returns = Returns0, - messages = Messages0 - % prefix_msgs = {_NumR, _R, _NumP, P} - }) -> + messages = Messages0}) -> case lqueue:peek(Returns0) of {value, _} = Msg -> Msg; empty -> lqueue:peek(Messages0) - % empty -> - % case P of - % [?PREFIX_MEM_MSG(_Header) = Msg | _] -> - % {value, Msg}; - % [?DISK_MSG(_Header) = Msg | _] -> - % {value, Msg} - % end end. -% delivery_effect({CTag, CPid}, [], InMemMsgs) -> -% {send_msg, CPid, {delivery, CTag, lists:reverse(InMemMsgs)}, -% [local, ra_event]}; -delivery_effect({CTag, CPid}, IdxMsgs, []) -> %% InMemMsgs +delivery_effect({CTag, CPid}, [{Idx, {MsgId, Header}}], + #?MODULE{msg_cache = {Idx, RawMsg}}) -> + {send_msg, CPid, {delivery, CTag, [{MsgId, {Header, RawMsg}}]}, + [local, ra_event]}; +delivery_effect({CTag, CPid}, IdxMsgs, _State) -> {RaftIdxs, Data} = lists:unzip(IdxMsgs), {log, RaftIdxs, fun(Log) -> @@ -1984,8 +1918,8 @@ checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> %% Before checking out any messsage to any consumer, %% first remove all expired messages from the head of the queue. {ExpiredMsg, #?MODULE{service_queue = SQ0, - messages = Messages0, - consumers = Cons0} = InitState, Effects1} = + messages = Messages0, + consumers = Cons0} = InitState, Effects1} = expire_msgs(Ts, ExpiredMsg0, InitState0, Effects0), case priority_queue:out(SQ0) of @@ -2003,11 +1937,14 @@ checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> %% NB: these retry cases introduce the "queue list reversal" %% inefficiency but this is a rare thing to happen %% so should not need optimising - checkout_one(Meta, ExpiredMsg, InitState#?MODULE{service_queue = SQ1}, Effects1); + checkout_one(Meta, ExpiredMsg, + InitState#?MODULE{service_queue = SQ1}, Effects1); #consumer{status = cancelled} -> - checkout_one(Meta, ExpiredMsg, InitState#?MODULE{service_queue = SQ1}, Effects1); + checkout_one(Meta, ExpiredMsg, + InitState#?MODULE{service_queue = SQ1}, Effects1); #consumer{status = suspected_down} -> - checkout_one(Meta, ExpiredMsg, InitState#?MODULE{service_queue = SQ1}, Effects1); + checkout_one(Meta, ExpiredMsg, + InitState#?MODULE{service_queue = SQ1}, Effects1); #consumer{checked_out = Checked0, next_msg_id = Next, credit = Credit, @@ -2022,14 +1959,16 @@ checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> State0#?MODULE{service_queue = SQ1}), Header = get_msg_header(ConsumerMsg), State = add_bytes_checkout(Header, State1), - {success, ConsumerId, Next, ConsumerMsg, ExpiredMsg, State, Effects1} + {success, ConsumerId, Next, ConsumerMsg, ExpiredMsg, + State, Effects1} end; empty -> {nochange, ExpiredMsg, InitState, Effects1} end; {{value, _ConsumerId}, SQ1} -> %% consumer did not exist but was queued, recurse - checkout_one(Meta, ExpiredMsg, InitState#?MODULE{service_queue = SQ1}, Effects1); + checkout_one(Meta, ExpiredMsg, + InitState#?MODULE{service_queue = SQ1}, Effects1); {empty, _} -> % Effects = timer_effect(Ts, InitState, Effects1), case lqueue:len(Messages0) of @@ -2176,7 +2115,7 @@ maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, ServiceQueue0) -> case Credit > 0 of true -> - % consumerect needs service - check if already on service queue + % consumer needs service - check if already on service queue uniq_queue_in(ConsumerId, Con, ServiceQueue0); false -> ServiceQueue0 @@ -2188,64 +2127,16 @@ dehydrate_state(#?MODULE{cfg = #cfg{}, dlx = DlxState} = State) -> % no messages are kept in memory, no need to % overly mutate the current state apart from removing indexes and cursors - State#?MODULE{ - ra_indexes = rabbit_fifo_index:empty(), - release_cursors = lqueue:new(), - dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. -% dehydrate_state(#?MODULE{messages = Messages, -% consumers = Consumers, -% returns = Returns, -% prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0}, -% waiting_consumers = Waiting0, -% dlx = DlxState} = State) -> -% RCnt = lqueue:len(Returns), -% %% TODO: optimise this function as far as possible -% PrefRet1 = lists:foldr(fun (M, Acc) -> -% [dehydrate_message(M) | Acc] -% end, [], lqueue:to_list(Returns)), -% PrefRet = PrefRet0 ++ PrefRet1, -% PrefMsgsSuff = dehydrate_messages(Messages), -% %% prefix messages are not populated in normal operation only after -% %% recovering from a snapshot -% PrefMsgs = PrefMsg0 ++ PrefMsgsSuff, -% Waiting = [{Cid, dehydrate_consumer(C)} || {Cid, C} <- Waiting0], -% State#?MODULE{messages = lqueue:new(), -% ra_indexes = rabbit_fifo_index:empty(), -% release_cursors = lqueue:new(), -% consumers = maps:map(fun (_, C) -> -% dehydrate_consumer(C) -% end, Consumers), -% returns = lqueue:new(), -% prefix_msgs = {PRCnt + RCnt, PrefRet, -% PPCnt + lqueue:len(Messages), PrefMsgs}, -% waiting_consumers = Waiting, -% dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. - -% dehydrate_messages(Msgs0) -> -% {OutRes, Msgs} = lqueue:out(Msgs0), -% case OutRes of -% {value, Msg} -> -% [dehydrate_message(Msg) | dehydrate_messages(Msgs)]; -% empty -> -% [] -% end. - -% dehydrate_consumer(#consumer{checked_out = Checked0} = Con) -> -% Checked = maps:map(fun (_, M) -> -% dehydrate_message(M) -% end, Checked0), -% Con#consumer{checked_out = Checked}. - -% dehydrate_message(?PREFIX_MEM_MSG(_) = M) -> -% M; -% dehydrate_message(?DISK_MSG(_) = M) -> -% M; + State#?MODULE{ra_indexes = rabbit_fifo_index:empty(), + release_cursors = lqueue:new(), + enqueue_count = 0, + msg_cache = undefined, + dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. + dehydrate_message(?INDEX_MSG(_Idx, ?DISK_MSG(_Header) = Msg)) -> %% Use disk msgs directly as prefix messages. %% This avoids memory allocation since we do not convert. Msg. -% dehydrate_message(?INDEX_MSG(Idx, ?MSG(Header, _))) when is_integer(Idx) -> -% ?PREFIX_MEM_MSG(Header). %% make the state suitable for equality comparison normalize(#?MODULE{ra_indexes = _Indexes, @@ -2328,10 +2219,10 @@ make_purge_nodes(Nodes) -> make_update_config(Config) -> #update_config{config = Config}. -add_bytes_enqueue(Header, - #?MODULE{msg_bytes_enqueue = Enqueue} = State) -> - Size = get_header(size, Header), - State#?MODULE{msg_bytes_enqueue = Enqueue + Size}. +% add_bytes_enqueue(Header, +% #?MODULE{msg_bytes_enqueue = Enqueue} = State) -> +% Size = get_header(size, Header), +% State#?MODULE{msg_bytes_enqueue = Enqueue + Size}. add_bytes_drop(Header, #?MODULE{msg_bytes_enqueue = Enqueue} = State) -> @@ -2518,5 +2409,16 @@ make_requeue(ConsumerId, Notify, [{MsgId, ?INDEX_MSG(Idx, ?TUPLE(Header, Msg))} make_requeue(_ConsumerId, _Notify, [], []) -> []. + +can_immediately_deliver(#?MODULE{service_queue = SQ, + consumers = Consumers} = State) -> + case messages_ready(State) of + 0 when map_size(Consumers) > 0 -> + %% TODO: check consumers actually have credit + priority_queue:is_empty(SQ) == false; + _ -> + false + end. + incr(I) -> I + 1. diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 2d3d373f4617..1d132a761c45 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -227,7 +227,7 @@ msg_bytes_in_memory = 0 :: non_neg_integer(), msgs_ready_in_memory = 0 :: non_neg_integer(), last_active :: undefined | non_neg_integer(), - unused_1, + msg_cache :: undefined | {ra:index(), raw_msg()}, unused_2 }). diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 4b2a243b38ff..3261722c8560 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -251,11 +251,11 @@ checkout_enq_settle_test(_) -> {State2, Effects0} = enq(2, 1, first, State1), ct:pal("Effects0 ~p", [Effects0]), %% TODO: this should go back to a send_msg effect after optimisation - ?ASSERT_EFF({log, [2], _, _}, Effects0), - % ?ASSERT_EFF({send_msg, _, - % {delivery, ?FUNCTION_NAME, - % [{0, {_, first}}]}, _}, - % Effects0), + % ?ASSERT_EFF({log, [2], _, _}, Effects0), + ?ASSERT_EFF({send_msg, _, + {delivery, ?FUNCTION_NAME, + [{0, {_, first}}]}, _}, + Effects0), {State3, [_Inactive]} = enq(3, 2, second, State2), {_, _Effects} = settle(Cid, 4, 0, State3), % the release cursor is the smallest raft index that does not @@ -268,11 +268,10 @@ duplicate_enqueue_test(_) -> {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, {monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)), {State2, Effects2} = enq(2, 1, first, State1), - ?ASSERT_EFF({log, [2], _, _}, Effects2), - % ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2), + % ?ASSERT_EFF({log, [2], _, _}, Effects2), + ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2), {_State3, Effects3} = enq(3, 1, first, State2), ?ASSERT_NO_EFF({log, [_], _, _}, Effects3), - % ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects3), ok. return_test(_) -> @@ -1737,6 +1736,7 @@ checkout_priority_test(_) -> #{args => []}), S1), {S3, E3} = enq(1, 1, first, S2), + ct:pal("E3 ~p ~p", [E3, self()]), ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == self(), E3), {S4, E4} = enq(2, 2, second, S3), ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == self(), E4), From 1425155cd882efcd2e32b5adc1c0ac43263cefce Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Mon, 24 Jan 2022 17:05:06 +0000 Subject: [PATCH 50/97] rabbit_fifo: slight optimisation around completion Avoid some allocs. --- deps/rabbit/src/rabbit_fifo.erl | 48 +++++++++++++-------------------- 1 file changed, 19 insertions(+), 29 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index a1a1803a6366..eb50aecf28a6 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -1558,30 +1558,28 @@ return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, % used to process messages that are finished complete(Meta, ConsumerId, DiscardedMsgIds, #consumer{checked_out = Checked} = Con0, - #?MODULE{messages_total = Tot} = State0) -> + #?MODULE{ra_indexes = Indexes0, + msg_bytes_checkout = BytesCheckout, + messages_total = Tot} = State0) -> %% credit_mode = simple_prefetch should automatically top-up credit %% as messages are simple_prefetch or otherwise returned Discarded = maps:with(DiscardedMsgIds, Checked), DiscardedMsgs = maps:values(Discarded), - Len = length(DiscardedMsgs), + Len = map_size(Discarded), Con = Con0#consumer{checked_out = maps:without(DiscardedMsgIds, Checked), credit = increase_credit(Con0, Len)}, State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), - State2 = lists:foldl(fun(Msg, Acc) -> - add_bytes_settle( - get_msg_header(Msg), Acc) - end, State1, DiscardedMsgs), - State = State2#?MODULE{messages_total = Tot - Len}, - delete_indexes(DiscardedMsgs, State). - -delete_indexes(Msgs, #?MODULE{ra_indexes = Indexes0} = State) -> - %% TODO: optimise by passing a list to rabbit_fifo_index + SettledSize = lists:foldl(fun(Msg, Acc) -> + get_header(size, get_msg_header(Msg)) + Acc + end, 0, DiscardedMsgs), Indexes = lists:foldl(fun (?INDEX_MSG(I, _), Acc) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc); (_, Acc) -> Acc - end, Indexes0, Msgs), - State#?MODULE{ra_indexes = Indexes}. + end, Indexes0, DiscardedMsgs), + State1#?MODULE{ra_indexes = Indexes, + msg_bytes_checkout = BytesCheckout - SettledSize, + messages_total = Tot - Len}. increase_credit(#consumer{lifetime = once, credit = Credit}, _) -> @@ -1670,19 +1668,8 @@ update_header(Key, UpdateFun, Default, Header) update_header(Key, UpdateFun, Default, Header) -> maps:update_with(Key, UpdateFun, Default, Header). -% get_msg_header(Key, ?INDEX_MSG(_Idx, ?MSG(Header, _Body))) -> -% get_header(Key, Header); -% get_msg_header(Key, ?DISK_MSG(Header)) -> -% get_header(Key, Header); -% get_msg_header(Key, ?PREFIX_MEM_MSG(Header)) -> -% get_header(Key, Header). - get_msg_header(?INDEX_MSG(_Idx, ?DISK_MSG(Header))) -> Header. -% get_msg_header(?DISK_MSG(Header)) -> -% Header; -% get_msg_header(?PREFIX_MEM_MSG(Header)) -> -% Header. get_header(size, Header) when is_integer(Header) -> @@ -1768,7 +1755,8 @@ checkout(#{index := Index} = Meta, {true, {MaxActivePriority, IsEmpty}} -> NotifyEffect = notify_decorators_effect(QName, MaxActivePriority, IsEmpty), - update_smallest_raft_index(Index, State, [NotifyEffect | Effects]); + update_smallest_raft_index(Index, State, + [NotifyEffect | Effects]); false -> update_smallest_raft_index(Index, State, Effects) end @@ -1778,10 +1766,12 @@ checkout0(Meta, {success, ConsumerId, MsgId, ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), ExpiredMsg, State, Effects}, SendAcc0) when is_integer(RaftIdx) -> DelMsg = {RaftIdx, {MsgId, Header}}, - SendAcc = maps:update_with(ConsumerId, - fun (LogMsgs) -> - [DelMsg | LogMsgs] - end, [DelMsg], SendAcc0), + SendAcc = case maps:get(ConsumerId, SendAcc0, undefined) of + undefined -> + SendAcc0#{ConsumerId => [DelMsg]}; + LogMsgs -> + SendAcc0#{ConsumerId => [DelMsg | LogMsgs]} + end, checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); checkout0(_Meta, {Activity, ExpiredMsg, State0, Effects0}, SendAcc) -> Effects1 = case Activity of From dcae0e50863450be97821345ab7926e7f8e9202b Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 25 Jan 2022 09:55:22 +0100 Subject: [PATCH 51/97] Comment out unused function --- deps/rabbit/BUILD.bazel | 2 +- deps/rabbit/src/rabbit_fifo.erl | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index d75ee211234b..5ffcd3825cbd 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -403,7 +403,7 @@ suites = [ ":quorum_queue_utils", ], flaky = True, - shard_count = 3, + shard_count = 7, ), rabbitmq_integration_suite( PACKAGE, diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index eb50aecf28a6..f0b45d2817cd 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -2227,10 +2227,10 @@ add_bytes_checkout(Header, State#?MODULE{msg_bytes_checkout = Checkout + Size, msg_bytes_enqueue = Enqueue - Size}. -add_bytes_settle(Header, - #?MODULE{msg_bytes_checkout = Checkout} = State) -> - Size = get_header(size, Header), - State#?MODULE{msg_bytes_checkout = Checkout - Size}. +% add_bytes_settle(Header, + % #?MODULE{msg_bytes_checkout = Checkout} = State) -> + % Size = get_header(size, Header), + % State#?MODULE{msg_bytes_checkout = Checkout - Size}. add_bytes_return(Header, #?MODULE{msg_bytes_checkout = Checkout, From 63a93b9e03786ea53067a00e7013163bb4eb69ce Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Tue, 25 Jan 2022 10:02:11 +0000 Subject: [PATCH 52/97] rabbit_fifo: tidy up, fix tests --- deps/rabbit/src/rabbit_fifo.erl | 126 ++++--------------------- deps/rabbit/src/rabbit_fifo.hrl | 4 - deps/rabbit/test/rabbit_fifo_SUITE.erl | 46 +++++---- 3 files changed, 40 insertions(+), 136 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index f0b45d2817cd..6252e0f95165 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -156,8 +156,6 @@ update_config(Conf, State) -> Overflow = maps:get(overflow_strategy, Conf, drop_head), MaxLength = maps:get(max_length, Conf, undefined), MaxBytes = maps:get(max_bytes, Conf, undefined), - MaxMemoryLength = maps:get(max_in_memory_length, Conf, undefined), - MaxMemoryBytes = maps:get(max_in_memory_bytes, Conf, undefined), DeliveryLimit = maps:get(delivery_limit, Conf, undefined), Expires = maps:get(expires, Conf, undefined), MsgTTL = maps:get(msg_ttl, Conf, undefined), @@ -171,15 +169,12 @@ update_config(Conf, State) -> RCISpec = {RCI, RCI}, LastActive = maps:get(created, Conf, undefined), - MaxMemoryBytes = maps:get(max_in_memory_bytes, Conf, undefined), State#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCISpec, dead_letter_handler = DLH, become_leader_handler = BLH, overflow_strategy = Overflow, max_length = MaxLength, max_bytes = MaxBytes, - max_in_memory_length = MaxMemoryLength, - max_in_memory_bytes = MaxMemoryBytes, consumer_strategy = ConsumerStrategy, delivery_limit = DeliveryLimit, expires = Expires, @@ -440,10 +435,8 @@ apply(#{index := Index}, #purge{}, messages = lqueue:new(), messages_total = Tot - NumReady, returns = lqueue:new(), - msg_bytes_enqueue = 0, - prefix_msgs = {0, [], 0, []}, - msg_bytes_in_memory = 0, - msgs_ready_in_memory = 0}, + msg_bytes_enqueue = 0 + }, Effects0 = [garbage_collection], Reply = {purge, NumReady + NumDlx}, {State, _, Effects} = evaluate_limit(Index, false, State0, @@ -538,12 +531,7 @@ apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, % comes back, then re-issue all monitors and discover the final fate of % these processes - Effects = case maps:size(State#?MODULE.consumers) of - 0 -> - [{aux, inactive}, {monitor, node, Node}]; - _ -> - [{monitor, node, Node}] - end ++ Effects1, + Effects = [{monitor, node, Node} | Effects1], checkout(Meta, State0, State#?MODULE{enqueuers = Enqs, last_active = Ts}, Effects); apply(Meta, {down, Pid, _Info}, State0) -> @@ -639,6 +627,9 @@ convert_v1_to_v2(V1State) -> lqueue:in(convert_msg(Msg), Acc) end, lqueue:new(), ReturnsV1), + %% TODO: prefix message need to be turned into disk msgs and added + %% to messages and returns respectively + % prefix_msgs = rabbit_fifo_v1:get_field(prefix_msgs, V1State), ConsumersV2 = maps:map( fun (_, #consumer{checked_out = Ch} = C) -> C#consumer{ @@ -680,8 +671,6 @@ convert_v1_to_v2(V1State) -> max_bytes = rabbit_fifo_v1:get_cfg_field(max_bytes, V1State), consumer_strategy = rabbit_fifo_v1:get_cfg_field(consumer_strategy, V1State), delivery_limit = rabbit_fifo_v1:get_cfg_field(delivery_limit, V1State), - max_in_memory_length = rabbit_fifo_v1:get_cfg_field(max_in_memory_length, V1State), - max_in_memory_bytes = rabbit_fifo_v1:get_cfg_field(max_in_memory_bytes, V1State), expires = rabbit_fifo_v1:get_cfg_field(expires, V1State) }, @@ -700,8 +689,6 @@ convert_v1_to_v2(V1State) -> msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), waiting_consumers = rabbit_fifo_v1:get_field(waiting_consumers, V1State), - msg_bytes_in_memory = 0, - msgs_ready_in_memory = 0, last_active = rabbit_fifo_v1:get_field(last_active, V1State) }. @@ -852,8 +839,6 @@ overview(#?MODULE{consumers = Cons, enqueuers = Enqs, release_cursors = Cursors, enqueue_count = EnqCount, - msgs_ready_in_memory = InMemReady, - msg_bytes_in_memory = InMemBytes, msg_bytes_enqueue = EnqueueBytes, msg_bytes_checkout = CheckoutBytes, cfg = Cfg, @@ -865,8 +850,6 @@ overview(#?MODULE{consumers = Cons, max_length => Cfg#cfg.max_length, max_bytes => Cfg#cfg.max_bytes, consumer_strategy => Cfg#cfg.consumer_strategy, - max_in_memory_length => Cfg#cfg.max_in_memory_length, - max_in_memory_bytes => Cfg#cfg.max_in_memory_bytes, expires => Cfg#cfg.expires, msg_ttl => Cfg#cfg.msg_ttl, delivery_limit => Cfg#cfg.delivery_limit @@ -877,14 +860,14 @@ overview(#?MODULE{consumers = Cons, num_checked_out => num_checked_out(State), num_enqueuers => maps:size(Enqs), num_ready_messages => messages_ready(State), - num_in_memory_ready_messages => InMemReady, + num_in_memory_ready_messages => 0, %% backwards compat num_messages => messages_total(State), num_release_cursors => lqueue:len(Cursors), release_cursors => [{I, messages_total(S)} || {_, I, S} <- lqueue:to_list(Cursors)], release_cursor_enqueue_counter => EnqCount, enqueue_message_bytes => EnqueueBytes, checkout_message_bytes => CheckoutBytes, - in_memory_message_bytes => InMemBytes, + in_memory_message_bytes => 0, %% backwards compat smallest_raft_index => smallest_raft_index(State)}, DlxOverview = rabbit_fifo_dlx:overview(DlxState), maps:merge(Overview, DlxOverview). @@ -1174,9 +1157,8 @@ query_single_active_consumer(_) -> query_stat(#?MODULE{consumers = Consumers} = State) -> {messages_ready(State), maps:size(Consumers)}. -query_in_memory_usage(#?MODULE{msg_bytes_in_memory = Bytes, - msgs_ready_in_memory = Length}) -> - {Length, Bytes}. +query_in_memory_usage(#?MODULE{ }) -> + {0, 0}. query_stat_dlx(#?MODULE{dlx = DlxState}) -> rabbit_fifo_dlx:stat(DlxState). @@ -1223,12 +1205,8 @@ messages_ready(#?MODULE{messages = M, returns = R}) -> lqueue:len(M) + lqueue:len(R) + RCnt + PCnt. -messages_total(#?MODULE{messages = _M, - messages_total = Total, - ra_indexes = _Indexes, - prefix_msgs = _, +messages_total(#?MODULE{messages_total = Total, dlx = DlxState}) -> - % lqueue:len(M) + rabbit_fifo_index:size(Indexes) + RCnt + PCnt. {DlxTotal, _} = rabbit_fifo_dlx:stat(DlxState), Total + DlxTotal; %% release cursors might be old state (e.g. after recent upgrade) @@ -1329,13 +1307,7 @@ cancel_consumer0(Meta, ConsumerId, %% in line with what classic queues do (from an external point of %% view) Effects = cancel_consumer_effects(ConsumerId, S, Effects2), - - case maps:size(S#?MODULE.consumers) of - 0 -> - {S, [{aux, inactive} | Effects]}; - _ -> - {S, Effects} - end; + {S, Effects}; _ -> %% already removed: do nothing {S0, Effects0} @@ -1365,7 +1337,7 @@ activate_next_consumer(#?MODULE{consumers = Cons, single_active, Effects0), {State, Effects}; [] -> - {State0, [{aux, inactive} | Effects0]} + {State0, Effects0} end; _ -> {State0, Effects0} @@ -1588,7 +1560,7 @@ increase_credit(#consumer{lifetime = once, increase_credit(#consumer{lifetime = auto, credit_mode = credited, credit = Credit}, _) -> - %% credit_mode: credit also doesn't automatically increment credit + %% credit_mode: `credited' also doesn't automatically increment credit Credit; increase_credit(#consumer{credit = Current}, Credit) -> Current + Credit. @@ -1657,10 +1629,6 @@ find_next_cursor(Smallest, Cursors0, Potential) -> update_msg_header(Key, Fun, Def, ?INDEX_MSG(Idx, ?DISK_MSG(Header))) -> ?INDEX_MSG(Idx, ?DISK_MSG(update_header(Key, Fun, Def, Header))). -% update_msg_header(Key, Fun, Def, ?DISK_MSG(Header)) -> -% ?DISK_MSG(update_header(Key, Fun, Def, Header)). -% update_msg_header(Key, Fun, Def, ?PREFIX_MEM_MSG(Header)) -> -% ?PREFIX_MEM_MSG(update_header(Key, Fun, Def, Header)). update_header(Key, UpdateFun, Default, Header) when is_integer(Header) -> @@ -1698,18 +1666,6 @@ return_one(Meta, MsgId, Msg0, {State, DlxEffects ++ Effects0}; _ -> Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)}, - - % {RtnMsg, State1} = case is_disk_msg(Msg) of - % true -> - % {Msg, State0}; - % false -> - % case evaluate_memory_limit(Header, State0) of - % true -> - % {to_disk_msg(Msg), State0}; - % false -> - % {Msg, add_in_memory_counts(Header, State0)} - % end - % end, {add_bytes_return( Header, State0#?MODULE{consumers = Consumers#{ConsumerId => Con}, @@ -1773,15 +1729,9 @@ checkout0(Meta, {success, ConsumerId, MsgId, SendAcc0#{ConsumerId => [DelMsg | LogMsgs]} end, checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); -checkout0(_Meta, {Activity, ExpiredMsg, State0, Effects0}, SendAcc) -> - Effects1 = case Activity of - nochange -> - append_delivery_effects(Effects0, SendAcc, State0); - inactive -> - [{aux, inactive} - | append_delivery_effects(Effects0, SendAcc, State0)] - end, - {State0, ExpiredMsg, lists:reverse(Effects1)}. +checkout0(_Meta, {_Activity, ExpiredMsg, State0, Effects0}, SendAcc) -> + Effects = append_delivery_effects(Effects0, SendAcc, State0), + {State0, ExpiredMsg, lists:reverse(Effects)}. evaluate_limit(_Index, Result, _BeforeState, #?MODULE{cfg = #cfg{max_length = undefined, @@ -1839,11 +1789,9 @@ append_delivery_effects(Effects0, AccMap, _State) when map_size(AccMap) == 0 -> %% does this ever happen? Effects0; append_delivery_effects(Effects0, AccMap, State) -> - [{aux, active} | maps:fold(fun (C, DiskMsgs, Ef) when is_list(DiskMsgs) -> [delivery_effect(C, lists:reverse(DiskMsgs), State) | Ef] - end, Effects0, AccMap) - ]. + end, Effects0, AccMap). take_next_msg(#?MODULE{returns = Returns0, messages = Messages0, @@ -1924,9 +1872,6 @@ checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> %% no credit but was still on queue %% can happen when draining %% recurse without consumer on queue - %% NB: these retry cases introduce the "queue list reversal" - %% inefficiency but this is a rare thing to happen - %% so should not need optimising checkout_one(Meta, ExpiredMsg, InitState#?MODULE{service_queue = SQ1}, Effects1); #consumer{status = cancelled} -> @@ -1960,7 +1905,6 @@ checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> checkout_one(Meta, ExpiredMsg, InitState#?MODULE{service_queue = SQ1}, Effects1); {empty, _} -> - % Effects = timer_effect(Ts, InitState, Effects1), case lqueue:len(Messages0) of 0 -> {nochange, ExpiredMsg, InitState, Effects1}; @@ -1975,15 +1919,9 @@ expire_msgs(RaCmdTs, Result, State, Effects) -> %% Therefore, first queue:peek/1 to check whether we need to queue:out/1 %% because the latter can be much slower than the former. case peek_next_msg(State) of - % {value, ?DISK_MSG(#{expiry := Expiry} = Header)} - % when RaCmdTs >= Expiry -> - % expire(RaCmdTs, Header, State, Effects); {value, ?INDEX_MSG(_Idx, ?DISK_MSG(#{expiry := Expiry} = Header))} when RaCmdTs >= Expiry -> expire(RaCmdTs, Header, State, Effects); - % {value, ?PREFIX_MEM_MSG(#{expiry := Expiry} = Header)} - % when RaCmdTs >= Expiry -> - % expire(RaCmdTs, Header, State, Effects); _ -> {Result, State, Effects} end. @@ -2209,11 +2147,6 @@ make_purge_nodes(Nodes) -> make_update_config(Config) -> #update_config{config = Config}. -% add_bytes_enqueue(Header, -% #?MODULE{msg_bytes_enqueue = Enqueue} = State) -> -% Size = get_header(size, Header), -% State#?MODULE{msg_bytes_enqueue = Enqueue + Size}. - add_bytes_drop(Header, #?MODULE{msg_bytes_enqueue = Enqueue} = State) -> Size = get_header(size, Header), @@ -2227,11 +2160,6 @@ add_bytes_checkout(Header, State#?MODULE{msg_bytes_checkout = Checkout + Size, msg_bytes_enqueue = Enqueue - Size}. -% add_bytes_settle(Header, - % #?MODULE{msg_bytes_checkout = Checkout} = State) -> - % Size = get_header(size, Header), - % State#?MODULE{msg_bytes_checkout = Checkout - Size}. - add_bytes_return(Header, #?MODULE{msg_bytes_checkout = Checkout, msg_bytes_enqueue = Enqueue} = State) -> @@ -2239,27 +2167,9 @@ add_bytes_return(Header, State#?MODULE{msg_bytes_checkout = Checkout - Size, msg_bytes_enqueue = Enqueue + Size}. -% add_in_memory_counts(Header, -% #?MODULE{msg_bytes_in_memory = InMemoryBytes, -% msgs_ready_in_memory = InMemoryCount} = State) -> -% Size = get_header(size, Header), -% State#?MODULE{msg_bytes_in_memory = InMemoryBytes + Size, -% msgs_ready_in_memory = InMemoryCount + 1}. - -% subtract_in_memory_counts(Header, -% #?MODULE{msg_bytes_in_memory = InMemoryBytes, -% msgs_ready_in_memory = InMemoryCount} = State) -> -% Size = get_header(size, Header), -% State#?MODULE{msg_bytes_in_memory = InMemoryBytes - Size, -% msgs_ready_in_memory = InMemoryCount - 1}. - message_size(#basic_message{content = Content}) -> #content{payload_fragments_rev = PFR} = Content, iolist_size(PFR); -% message_size(?PREFIX_MEM_MSG(Header)) -> -% get_header(size, Header); -% message_size(Header) ?IS_HEADER(Header) -> -% get_header(size, Header); message_size(B) when is_binary(B) -> byte_size(B); message_size(Msg) -> diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 1d132a761c45..70b5e378dc94 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -156,8 +156,6 @@ consumer_strategy = competing :: consumer_strategy(), %% the maximum number of unsuccessful delivery attempts permitted delivery_limit :: option(non_neg_integer()), - max_in_memory_length :: option(non_neg_integer()), - max_in_memory_bytes :: option(non_neg_integer()), expires :: undefined | milliseconds(), msg_ttl :: undefined | milliseconds(), unused_1, @@ -224,8 +222,6 @@ %% waiting consumers, one is picked active consumer is cancelled or dies %% used only when single active consumer is on waiting_consumers = [] :: [{consumer_id(), consumer()}], - msg_bytes_in_memory = 0 :: non_neg_integer(), - msgs_ready_in_memory = 0 :: non_neg_integer(), last_active :: undefined | non_neg_integer(), msg_cache :: undefined | {ra:index(), raw_msg()}, unused_2 diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 3261722c8560..4e8a418291a0 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -249,14 +249,13 @@ checkout_enq_settle_test(_) -> {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, {monitor, _, _} | _]} = check(Cid, 1, test_init(test)), {State2, Effects0} = enq(2, 1, first, State1), - ct:pal("Effects0 ~p", [Effects0]), %% TODO: this should go back to a send_msg effect after optimisation % ?ASSERT_EFF({log, [2], _, _}, Effects0), ?ASSERT_EFF({send_msg, _, {delivery, ?FUNCTION_NAME, [{0, {_, first}}]}, _}, Effects0), - {State3, [_Inactive]} = enq(3, 2, second, State2), + {State3, _} = enq(3, 2, second, State2), {_, _Effects} = settle(Cid, 4, 0, State3), % the release cursor is the smallest raft index that does not % contribute to the state of the application @@ -312,27 +311,28 @@ return_dequeue_delivery_limit_test(_) -> return_non_existent_test(_) -> Cid = {<<"cid">>, self()}, - {State0, [_, _Inactive]} = enq(1, 1, second, test_init(test)), + {State0, _} = enq(1, 1, second, test_init(test)), % return non-existent {_State2, _} = apply(meta(3), rabbit_fifo:make_return(Cid, [99]), State0), ok. return_checked_out_test(_) -> Cid = {<<"cid">>, self()}, - {State0, [_, _]} = enq(1, 1, first, test_init(test)), + {State0, _} = enq(1, 1, first, test_init(test)), {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, _Monitor, - {log, [1], Fun, _}, - {aux, active} | _ ]} = check_auto(Cid, 2, State0), + {log, [1], Fun, _} + | _ ] + } = check_auto(Cid, 2, State0), Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun([Msg1]), % returning immediately checks out the same message again {_, ok, [ - {log, [1], _, _}, + {log, [1], _, _} % {send_msg, _, {delivery, _, [{_, _}]}, _}, - {aux, active}]} = + ]} = apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1), ok. @@ -345,16 +345,16 @@ return_checked_out_limit_test(_) -> max_in_memory_length => 0, delivery_limit => 1}), Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - {State0, [_, _]} = enq(1, 1, first, Init), + {State0, _} = enq(1, 1, first, Init), {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, _Monitor, - {log, [1], Fun1, _}, - {aux, active} | _ ]} = check_auto(Cid, 2, State0), + {log, [1], Fun1, _} + | _ ]} = check_auto(Cid, 2, State0), [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun1([Msg1]), % returning immediately checks out the same message again {State2, ok, [ - {log, [1], Fun2, _}, - {aux, active}]} = + {log, [1], Fun2, _} + ]} = apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1), [{send_msg, _, {delivery, _, [{MsgId2, _}]}, _}] = Fun2([Msg1]), {#rabbit_fifo{} = State, ok, _} = @@ -365,15 +365,13 @@ return_checked_out_limit_test(_) -> return_auto_checked_out_test(_) -> Cid = {<<"cid">>, self()}, Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - {State00, [_, _]} = enq(1, 1, first, test_init(test)), - {State0, [_]} = enq(2, 2, second, State00), + {State00, _} = enq(1, 1, first, test_init(test)), + {State0, _} = enq(2, 2, second, State00), % it first active then inactive as the consumer took on but cannot take % any more {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, _Monitor, - {log, [1], Fun1, _}, - {aux, active}, - {aux, inactive} + {log, [1], Fun1, _} ]} = check_auto(Cid, 2, State0), [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun1([Msg1]), % return should include another delivery @@ -397,8 +395,8 @@ cancelled_checkout_empty_queue_test(_) -> cancelled_checkout_out_test(_) -> Cid = {<<"cid">>, self()}, - {State00, [_, _]} = enq(1, 1, first, test_init(test)), - {State0, [_]} = enq(2, 2, second, State00), + {State00, _} = enq(1, 1, first, test_init(test)), + {State0, _} = enq(2, 2, second, State00), {State1, _} = check_auto(Cid, 3, State0),%% prefetch of 1 % cancelled checkout should not return pending messages to queue {State2, _, _} = apply(meta(4), rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), @@ -418,7 +416,7 @@ cancelled_checkout_out_test(_) -> down_with_noproc_consumer_returns_unsettled_test(_) -> Cid = {<<"down_consumer_returns_unsettled_test">>, self()}, - {State0, [_, _]} = enq(1, 1, second, test_init(test)), + {State0, _} = enq(1, 1, second, test_init(test)), {State1, [_, {monitor, process, Pid} | _]} = check(Cid, 2, State0), {State2, _, _} = apply(meta(3), {down, Pid, noproc}, State1), {_State, Effects} = check(Cid, 4, State2), @@ -486,7 +484,7 @@ down_with_noproc_enqueuer_is_cleaned_up_test(_) -> discarded_message_without_dead_letter_handler_is_removed_test(_) -> Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()}, - {State0, [_, _]} = enq(1, 1, first, test_init(test)), + {State0, _} = enq(1, 1, first, test_init(test)), {State1, Effects1} = check_n(Cid, 2, 10, State0), ?ASSERT_EFF({log, [1], _Fun, _}, Effects1), {_State2, _, Effects2} = apply(meta(1), @@ -502,7 +500,7 @@ discarded_message_with_dead_letter_handler_emits_log_effect_test(_) -> dead_letter_handler => {at_most_once, {somemod, somefun, [somearg]}}}), Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - {State0, [_, _]} = enq(1, 1, first, State00), + {State0, _} = enq(1, 1, first, State00), {State1, Effects1} = check_n(Cid, 2, 10, State0), ?ASSERT_EFF({log, [1], _, _}, Effects1), {_State2, _, Effects2} = apply(meta(1), rabbit_fifo:make_discard(Cid, [0]), State1), @@ -1215,7 +1213,7 @@ active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_co {State2, _, Effects2} = apply(meta(2), {down, Pid1, noconnection}, State1), % one monitor and one consumer status update (deactivated) - ?assertEqual(4, length(Effects2)), + ?assertEqual(3, length(Effects2)), {_, _, Effects3} = apply(meta(3), {nodeup, node(self())}, State2), % for each consumer: 1 effect to monitor the consumer PID From cdca2e5e2e450b160d82fbcbb64a9f5021c873d3 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Tue, 25 Jan 2022 14:46:35 +0000 Subject: [PATCH 53/97] rabbit_fifo alloc opts --- deps/rabbit/src/rabbit_fifo.erl | 18 +++++++----------- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 16 ++++++++-------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 6252e0f95165..d431a17cdc5d 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -1857,6 +1857,8 @@ checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> %% first remove all expired messages from the head of the queue. {ExpiredMsg, #?MODULE{service_queue = SQ0, messages = Messages0, + msg_bytes_checkout = BytesCheckout, + msg_bytes_enqueue = BytesEnqueue, consumers = Cons0} = InitState, Effects1} = expire_msgs(Ts, ExpiredMsg0, InitState0, Effects0), @@ -1889,11 +1891,12 @@ checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> next_msg_id = Next + 1, credit = Credit - 1, delivery_count = DelCnt + 1}, - State1 = update_or_remove_sub( + Size = get_header(size, get_msg_header(ConsumerMsg)), + State = update_or_remove_sub( Meta, ConsumerId, Con, - State0#?MODULE{service_queue = SQ1}), - Header = get_msg_header(ConsumerMsg), - State = add_bytes_checkout(Header, State1), + State0#?MODULE{service_queue = SQ1, + msg_bytes_checkout = BytesCheckout + Size, + msg_bytes_enqueue = BytesEnqueue - Size}), {success, ConsumerId, Next, ConsumerMsg, ExpiredMsg, State, Effects1} end; @@ -2153,13 +2156,6 @@ add_bytes_drop(Header, State#?MODULE{msg_bytes_enqueue = Enqueue - Size}. -add_bytes_checkout(Header, - #?MODULE{msg_bytes_checkout = Checkout, - msg_bytes_enqueue = Enqueue } = State) -> - Size = get_header(size, Header), - State#?MODULE{msg_bytes_checkout = Checkout + Size, - msg_bytes_enqueue = Enqueue - Size}. - add_bytes_return(Header, #?MODULE{msg_bytes_checkout = Checkout, msg_bytes_enqueue = Enqueue} = State) -> diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 5f3a9bca2626..8b6ae5ac11cd 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -1368,7 +1368,7 @@ max_length_prop(Conf0, Commands) -> #{num_ready_messages := MsgReady} = rabbit_fifo:overview(S), MsgReady =< MaxLen end, - try run_log(test_init(Conf), Entries, Invariant) of + try run_log(test_init(Conf), Entries, Invariant, rabbit_fifo) of {_State, _Effects} -> true; _ -> @@ -1414,7 +1414,7 @@ single_active_prop(Conf0, Commands, ValidateOrder) -> map_size(Up) =< 1 end, - try run_log(test_init(Conf), Entries, Invariant) of + try run_log(test_init(Conf), Entries, Invariant, rabbit_fifo) of {_State, Effects} when ValidateOrder -> %% validate message ordering lists:foldl(fun ({send_msg, Pid, {delivery, Tag, Msgs}, ra_event}, @@ -1438,7 +1438,7 @@ messages_total_prop(Conf0, Commands) -> Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), InitState = test_init(Conf), - run_log(InitState, Entries, messages_total_invariant()), + run_log(InitState, Entries, messages_total_invariant(), rabbit_fifo), true. messages_total_invariant() -> @@ -1923,7 +1923,7 @@ run_snapshot_test0(Conf0, Commands, Invariant) -> Conf = Conf0#{max_in_memory_length => 0}, Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), - {State0, Effects} = run_log(test_init(Conf), Entries, Invariant), + {State0, Effects} = run_log(test_init(Conf), Entries, Invariant, rabbit_fifo), State = rabbit_fifo:normalize(State0), Cursors = [ C || {release_cursor, _, _} = C <- Effects], @@ -1933,7 +1933,7 @@ run_snapshot_test0(Conf0, Commands, Invariant) -> (_) -> false end, Entries), % ct:pal("release_cursor: ~b from ~w~n", [SnapIdx, element(1, hd_or(Filtered))]), - {S0, _} = run_log(SnapState, Filtered, Invariant), + {S0, _} = run_log(SnapState, Filtered, Invariant, rabbit_fifo), S = rabbit_fifo:normalize(S0), % assert log can be restored from any release cursor index case S of @@ -1962,9 +1962,9 @@ prefixes(Source, N, Acc) -> prefixes(Source, N+1, [X | Acc]). run_log(InitState, Entries) -> - run_log(InitState, Entries, fun(_) -> true end). + run_log(InitState, Entries, fun(_) -> true end, rabbit_fifo). -run_log(InitState, Entries, InvariantFun) -> +run_log(InitState, Entries, InvariantFun, FifoMod) -> Invariant = fun(E, S) -> case InvariantFun(S) of true -> ok; @@ -1974,7 +1974,7 @@ run_log(InitState, Entries, InvariantFun) -> end, lists:foldl(fun ({Idx, E}, {Acc0, Efx0}) -> - case rabbit_fifo:apply(meta(Idx), E, Acc0) of + case FifoMod:apply(meta(Idx), E, Acc0) of {Acc, _, Efx} when is_list(Efx) -> Invariant(E, Acc), {Acc, Efx0 ++ Efx}; From 8dd5c13141cb5fcfb9be20e9ed5cd673a93acca9 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Tue, 25 Jan 2022 16:52:01 +0000 Subject: [PATCH 54/97] rabbit_fifo_client: allocation optimisations of common code paths. --- deps/rabbit/src/rabbit_fifo_client.erl | 82 +++++++++++++------------- 1 file changed, 40 insertions(+), 42 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl index 3b084acbc544..cf9625c841dd 100644 --- a/deps/rabbit/src/rabbit_fifo_client.erl +++ b/deps/rabbit/src/rabbit_fifo_client.erl @@ -178,18 +178,31 @@ enqueue(_Correlation, _Msg, {reject_publish, State}; enqueue(Correlation, Msg, #state{slow = Slow, + pending = Pending, queue_status = go, - cfg = #cfg{block_handler = BlockFun}} = State0) -> - Node = pick_server(State0), - {Next, State1} = next_enqueue_seq(State0), + next_seq = Seq0, + next_enqueue_seq = Next, + cfg = #cfg{soft_limit = SftLmt, + block_handler = BlockFun}} = State0) -> + Server = pick_server(State0), % by default there is no correlation id Cmd = rabbit_fifo:make_enqueue(self(), Next, Msg), - case send_command(Node, Correlation, Cmd, low, State1) of - {slow, State} when not Slow -> + Seq = Seq0 + 1, + ok = ra:pipeline_command(Server, Cmd, Seq, low), + Tag = case map_size(Pending) >= SftLmt of + true -> slow; + false -> ok + end, + State = State0#state{pending = Pending#{Seq => {Correlation, Cmd}}, + next_seq = Seq, + next_enqueue_seq = Next + 1, + slow = Tag == slow}, + case Tag of + slow when not Slow -> BlockFun(), {slow, set_timer(State)}; - Any -> - Any + _ -> + {ok, State} end. %% @doc Enqueues a message. @@ -272,11 +285,7 @@ add_delivery_count_header(Msg, _Count) -> settle(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) -> Node = pick_server(State0), Cmd = rabbit_fifo:make_settle(consumer_id(ConsumerTag), MsgIds), - case send_command(Node, undefined, Cmd, normal, State0) of - {_, S} -> - % turn slow into ok for this function - {S, []} - end; + {send_command(Node, undefined, Cmd, normal, State0), []}; settle(ConsumerTag, [_|_] = MsgIds, #state{unsent_commands = Unsent0} = State0) -> ConsumerId = consumer_id(ConsumerTag), @@ -304,8 +313,7 @@ return(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) -> Node = pick_server(State0), % TODO: make rabbit_fifo return support lists of message ids Cmd = rabbit_fifo:make_return(consumer_id(ConsumerTag), MsgIds), - {_Tag, State1} = send_command(Node, undefined, Cmd, normal, State0), - {State1, []}; + {send_command(Node, undefined, Cmd, normal, State0), []}; return(ConsumerTag, [_|_] = MsgIds, #state{unsent_commands = Unsent0} = State0) -> ConsumerId = consumer_id(ConsumerTag), @@ -333,11 +341,7 @@ return(ConsumerTag, [_|_] = MsgIds, discard(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) -> Node = pick_server(State0), Cmd = rabbit_fifo:make_discard(consumer_id(ConsumerTag), MsgIds), - case send_command(Node, undefined, Cmd, normal, State0) of - {_, S} -> - % turn slow into ok for this function - {S, []} - end; + {send_command(Node, undefined, Cmd, normal, State0), []}; discard(ConsumerTag, [_|_] = MsgIds, #state{unsent_commands = Unsent0} = State0) -> ConsumerId = consumer_id(ConsumerTag), @@ -424,11 +428,7 @@ credit(ConsumerTag, Credit, Drain, Node = pick_server(State0), Cmd = rabbit_fifo:make_credit(ConsumerId, Credit, C#consumer.last_msg_id + 1, Drain), - case send_command(Node, undefined, Cmd, normal, State0) of - {_, S} -> - % turn slow into ok for this function - {S, []} - end. + {send_command(Node, undefined, Cmd, normal, State0), []}. %% @doc Cancels a checkout with the rabbit_fifo queue for the consumer tag %% @@ -579,11 +579,8 @@ handle_ra_event(From, {applied, Seqs}, Node = pick_server(State2), %% send all the settlements and returns State = lists:foldl(fun (C, S0) -> - case send_command(Node, undefined, - C, normal, S0) of - {T, S} when T =/= error -> - S - end + send_command(Node, undefined, C, + normal, S0) end, State2, Commands), UnblockFun(), {ok, State, Actions}; @@ -854,36 +851,37 @@ sorted_servers(#state{leader = Leader, next_seq(#state{next_seq = Seq} = State) -> {Seq, State#state{next_seq = Seq + 1}}. -next_enqueue_seq(#state{next_enqueue_seq = Seq} = State) -> - {Seq, State#state{next_enqueue_seq = Seq + 1}}. - consumer_id(ConsumerTag) -> {ConsumerTag, self()}. send_command(Server, Correlation, Command, _Priority, #state{pending = Pending, - cfg = #cfg{soft_limit = SftLmt}} = State0) + next_seq = Seq0, + cfg = #cfg{soft_limit = SftLmt}} = State) when element(1, Command) == return -> %% returns are sent to the aux machine for pre-evaluation - {Seq, State} = next_seq(State0), + Seq = Seq0 + 1, ok = ra:cast_aux_command(Server, {Command, Seq, self()}), - Tag = case maps:size(Pending) >= SftLmt of + Tag = case map_size(Pending) >= SftLmt of true -> slow; false -> ok end, - {Tag, State#state{pending = Pending#{Seq => {Correlation, Command}}, - slow = Tag == slow}}; + State#state{pending = Pending#{Seq => {Correlation, Command}}, + next_seq = Seq, + slow = Tag == slow}; send_command(Server, Correlation, Command, Priority, #state{pending = Pending, - cfg = #cfg{soft_limit = SftLmt}} = State0) -> - {Seq, State} = next_seq(State0), + next_seq = Seq0, + cfg = #cfg{soft_limit = SftLmt}} = State) -> + Seq = Seq0 + 1, ok = ra:pipeline_command(Server, Command, Seq, Priority), - Tag = case maps:size(Pending) >= SftLmt of + Tag = case map_size(Pending) >= SftLmt of true -> slow; false -> ok end, - {Tag, State#state{pending = Pending#{Seq => {Correlation, Command}}, - slow = Tag == slow}}. + State#state{pending = Pending#{Seq => {Correlation, Command}}, + next_seq = Seq, + slow = Tag == slow}. resend_command(Node, Correlation, Command, From c7cd53063380274fb98b8943f5391486c15b4e5a Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 25 Jan 2022 18:41:27 +0100 Subject: [PATCH 55/97] Add memory details of dlx workers to see memory usage across all dlx workers at a glance. --- deps/rabbit/src/rabbit_vm.erl | 12 ++++++++---- deps/rabbitmq_management/priv/www/js/tmpl/binary.ejs | 5 +++-- deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs | 4 +++- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/deps/rabbit/src/rabbit_vm.erl b/deps/rabbit/src/rabbit_vm.erl index f01c383b63df..ef49ef5f036a 100644 --- a/deps/rabbit/src/rabbit_vm.erl +++ b/deps/rabbit/src/rabbit_vm.erl @@ -20,7 +20,7 @@ memory() -> {Sums, _Other} = sum_processes( lists:append(All), distinguishers(), [memory]), - [Qs, QsSlave, Qqs, Ssqs, Srqs, SCoor, ConnsReader, ConnsWriter, ConnsChannel, + [Qs, QsSlave, Qqs, DlxWorkers, Ssqs, Srqs, SCoor, ConnsReader, ConnsWriter, ConnsChannel, ConnsOther, MsgIndexProc, MgmtDbProc, Plugins] = [aggregate(Names, Sums, memory, fun (X) -> X end) || Names <- distinguished_interesting_sups()], @@ -55,7 +55,7 @@ memory() -> OtherProc = Processes - ConnsReader - ConnsWriter - ConnsChannel - ConnsOther - - Qs - QsSlave - Qqs - Ssqs - Srqs - SCoor - MsgIndexProc - Plugins + - Qs - QsSlave - Qqs - DlxWorkers - Ssqs - Srqs - SCoor - MsgIndexProc - Plugins - MgmtDbProc - MetricsProc, [ @@ -69,6 +69,7 @@ memory() -> {queue_procs, Qs}, {queue_slave_procs, QsSlave}, {quorum_queue_procs, Qqs}, + {quorum_queue_dlx_procs, DlxWorkers}, {stream_queue_procs, Ssqs}, {stream_queue_replica_reader_procs, Srqs}, {stream_queue_coordinator_procs, SCoor}, @@ -118,7 +119,7 @@ binary() -> sets:add_element({Ptr, Sz}, Acc0) end, Acc, Info) end, distinguishers(), [{binary, sets:new()}]), - [Other, Qs, QsSlave, Qqs, Ssqs, Srqs, Scoor, ConnsReader, ConnsWriter, + [Other, Qs, QsSlave, Qqs, DlxWorkers, Ssqs, Srqs, Scoor, ConnsReader, ConnsWriter, ConnsChannel, ConnsOther, MsgIndexProc, MgmtDbProc, Plugins] = [aggregate(Names, [{other, Rest} | Sums], binary, fun sum_binary/1) || Names <- [[other] | distinguished_interesting_sups()]], @@ -129,6 +130,7 @@ binary() -> {queue_procs, Qs}, {queue_slave_procs, QsSlave}, {quorum_queue_procs, Qqs}, + {quorum_queue_dlx_procs, DlxWorkers}, {stream_queue_procs, Ssqs}, {stream_queue_replica_reader_procs, Srqs}, {stream_queue_coordinator_procs, Scoor}, @@ -175,7 +177,7 @@ bytes(Words) -> try end. interesting_sups() -> - [queue_sups(), quorum_sups(), stream_server_sups(), stream_reader_sups(), + [queue_sups(), quorum_sups(), dlx_sups(), stream_server_sups(), stream_reader_sups(), conn_sups() | interesting_sups0()]. queue_sups() -> @@ -192,6 +194,7 @@ quorum_sups() -> supervisor:which_children(ra_server_sup_sup)] end. +dlx_sups() -> [rabbit_fifo_dlx_sup]. stream_server_sups() -> [osiris_server_sup]. stream_reader_sups() -> [osiris_replica_reader_sup]. @@ -248,6 +251,7 @@ distinguished_interesting_sups() -> with(queue_sups(), master), with(queue_sups(), slave), with(quorum_sups(), quorum), + dlx_sups(), stream_server_sups(), stream_reader_sups(), with(quorum_sups(), stream), diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/binary.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/binary.ejs index 4adcf149af88..666adc99b05b 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/binary.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/binary.ejs @@ -9,6 +9,7 @@ var sections = {'queue_procs' : ['classic', 'Classic queues (masters)'], 'queue_slave_procs' : ['classic', 'Classic queues (mirrors)'], 'quorum_queue_procs' : ['quorum', 'Quorum queues'], + 'quorum_queue_dlx_procs' : ['quorum', 'Dead letter workers'], 'stream_queue_procs' : ['stream', 'Stream queues'], 'stream_queue_replica_reader_procs' : ['stream', 'Stream queues (replica reader)'], 'stream_queue_coordinator_procs' : ['stream', 'Stream queues (coordinator)'], @@ -26,12 +27,12 @@  
<% - var key = [[{name: 'Classic Queues', colour: 'classic', keys: [['queue_procs', 'queues'], ['queue_slave_procs', 'mirrors']]}, {name: 'Quorum Queues', colour: 'quorum', - keys: [['quorum_queue_procs', 'quorum']]}, + keys: [['quorum_queue_procs', 'quorum'], + ['quorum_queue_dlx_procs', 'dead letter workers']]}, {name: 'Streams', colour: 'stream', keys: [['stream_queue_procs', 'stream'], ['stream_queue_replica_reader_procs', 'stream replica reader'], diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs index 8db336835619..df7b33eb070f 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs @@ -9,6 +9,7 @@ var sections = {'queue_procs' : ['classic', 'Classic queues (masters)'], 'queue_slave_procs' : ['classic', 'Classic queues (mirrors)'], 'quorum_queue_procs' : ['quorum', 'Quorum queues'], + 'quorum_queue_dlx_procs' : ['quorum', 'Dead letter workers'], 'stream_queue_procs' : ['stream', 'Stream queues'], 'stream_queue_replica_reader_procs' : ['stream', 'Stream queues (replica reader)'], 'stream_queue_coordinator_procs' : ['stream', 'Stream queues (coordinator)'], @@ -38,7 +39,8 @@ var key = [[{name: 'Classic Queues', colour: 'classic', keys: [['queue_procs', 'queues'], ['queue_slave_procs', 'mirrors']]}, {name: 'Quorum Queues', colour: 'quorum', - keys: [['quorum_queue_procs','quorum']]}, + keys: [['quorum_queue_procs','quorum'], + ['quorum_queue_dlx_procs', 'dead letter workers']]}, {name: 'Streams', colour: 'stream', keys: [['stream_queue_procs', 'stream'], ['stream_queue_replica_reader_procs', 'stream replica reader'], From 672b9831b3369bb8206b4b95e2b306fb8ef4b114 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Wed, 26 Jan 2022 16:04:15 +0000 Subject: [PATCH 56/97] rabbit_fifo: conversion bug fixes and remove prefix_msgs field --- deps/rabbit/src/rabbit_fifo.erl | 63 ++++++++++++--------- deps/rabbit/src/rabbit_fifo.hrl | 30 ++-------- deps/rabbit/src/rabbit_fifo_index.erl | 17 ++++-- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 37 ++++++++++-- 4 files changed, 83 insertions(+), 64 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index d431a17cdc5d..e2f978eb984a 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -602,41 +602,53 @@ apply(_Meta, Cmd, State) -> rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), {State, ok, []}. -convert_msg({RaftIdx, {Header, empty}}) -> +convert_msg({RaftIdx, {Header, empty}}) when is_integer(RaftIdx) -> ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); convert_msg({RaftIdx, {Header, _Msg}}) when is_integer(RaftIdx) -> ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); convert_msg({'$empty_msg', Header}) -> %% dummy index - ?INDEX_MSG(0, ?DISK_MSG(Header)); + ?INDEX_MSG(undefined, ?DISK_MSG(Header)); convert_msg({'$prefix_msg', Header}) -> %% dummy index - ?INDEX_MSG(0, ?DISK_MSG(Header)). + ?INDEX_MSG(undefined, ?DISK_MSG(Header)); +convert_msg({Header, empty}) -> + convert_msg(Header); +convert_msg(Header) + when is_integer(Header) orelse + is_map_key(size, Header) -> + ?INDEX_MSG(undefined, ?DISK_MSG(Header)). convert_v1_to_v2(V1State) -> IndexesV1 = rabbit_fifo_v1:get_field(ra_indexes, V1State), ReturnsV1 = rabbit_fifo_v1:get_field(returns, V1State), MessagesV1 = rabbit_fifo_v1:get_field(messages, V1State), - % EnqueuersV1 = rabbit_fifo_v1:get_field(enqueuers, V1State), ConsumersV1 = rabbit_fifo_v1:get_field(consumers, V1State), %% remove all raft idx in messages from index + {_, PrefMsgs, _, PrefReturns} = rabbit_fifo_v1:get_field(prefix_msgs, V1State), + V2PrefMsgs = lists:foldl(fun(Hdr, Acc) -> + lqueue:in(convert_msg(Hdr), Acc) + end, lqueue:new(), PrefMsgs), + V2PrefReturns = lists:foldl(fun(Hdr, Acc) -> + lqueue:in(convert_msg(Hdr), Acc) + end, lqueue:new(), PrefReturns), MessagesV2 = lqueue:foldl(fun ({_, IdxMsg}, Acc) -> lqueue:in(convert_msg(IdxMsg), Acc) - end, lqueue:new(), MessagesV1), + end, V2PrefMsgs, MessagesV1), ReturnsV2 = lqueue:foldl(fun ({_SeqId, Msg}, Acc) -> lqueue:in(convert_msg(Msg), Acc) - end, lqueue:new(), ReturnsV1), + end, V2PrefReturns, ReturnsV1), - %% TODO: prefix message need to be turned into disk msgs and added - %% to messages and returns respectively - % prefix_msgs = rabbit_fifo_v1:get_field(prefix_msgs, V1State), ConsumersV2 = maps:map( fun (_, #consumer{checked_out = Ch} = C) -> C#consumer{ - checked_out = maps:map( - fun (_, {_SeqId, IdxMsg}) -> - convert_msg(IdxMsg) - end, Ch)} + checked_out = + maps:map( + fun (_, {Tag, _} = Msg) when is_atom(Tag) -> + convert_msg(Msg); + (_, {_Seq, Msg}) -> + convert_msg(Msg) + end, Ch)} end, ConsumersV1), %% The (old) format of dead_letter_handler in RMQ < v3.10 is: @@ -685,7 +697,6 @@ convert_v1_to_v2(V1State) -> release_cursors = rabbit_fifo_v1:get_field(release_cursors, V1State), consumers = ConsumersV2, service_queue = rabbit_fifo_v1:get_field(service_queue, V1State), - prefix_msgs = rabbit_fifo_v1:get_field(prefix_msgs, V1State), msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), waiting_consumers = rabbit_fifo_v1:get_field(waiting_consumers, V1State), @@ -771,8 +782,7 @@ state_enter0(leader, #?MODULE{consumers = Cons, waiting_consumers = WaitingConsumers, cfg = #cfg{name = Name, resource = Resource, - become_leader_handler = BLH}, - prefix_msgs = {0, [], 0, []} + become_leader_handler = BLH} } = State, Effects0) -> TimerEffs = timer_effect(erlang:system_time(millisecond), State, Effects0), @@ -863,7 +873,7 @@ overview(#?MODULE{consumers = Cons, num_in_memory_ready_messages => 0, %% backwards compat num_messages => messages_total(State), num_release_cursors => lqueue:len(Cursors), - release_cursors => [{I, messages_total(S)} || {_, I, S} <- lqueue:to_list(Cursors)], + release_cursors => [I || {_, I, _} <- lqueue:to_list(Cursors)], release_cursor_enqueue_counter => EnqCount, enqueue_message_bytes => EnqueueBytes, checkout_message_bytes => CheckoutBytes, @@ -1201,21 +1211,20 @@ usage(Name) when is_atom(Name) -> %%% Internal messages_ready(#?MODULE{messages = M, - prefix_msgs = {RCnt, _R, PCnt, _P}, returns = R}) -> - lqueue:len(M) + lqueue:len(R) + RCnt + PCnt. + lqueue:len(M) + lqueue:len(R). messages_total(#?MODULE{messages_total = Total, dlx = DlxState}) -> {DlxTotal, _} = rabbit_fifo_dlx:stat(DlxState), - Total + DlxTotal; + Total + DlxTotal. %% release cursors might be old state (e.g. after recent upgrade) -messages_total(State) -> - try - rabbit_fifo_v1:query_messages_total(State) - catch _:_ -> - rabbit_fifo_v0:query_messages_total(State) - end. +% messages_total(State) -> +% try +% rabbit_fifo_v1:query_messages_total(State) +% catch _:_ -> +% rabbit_fifo_v0:query_messages_total(State) +% end. update_use({inactive, _, _, _} = CUInfo, inactive) -> CUInfo; @@ -2317,4 +2326,4 @@ can_immediately_deliver(#?MODULE{service_queue = SQ, end. incr(I) -> - I + 1. + I + 1. diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 70b5e378dc94..cd959cee0fd0 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -170,7 +170,7 @@ {cfg :: #cfg{}, % unassigned messages messages = lqueue:new() :: lqueue:lqueue(indexed_msg()), - % defines the next message id + % messages_total = 0 :: non_neg_integer(), % queue of returned msg_in_ids - when checking out it picks from returns = lqueue:new() :: lqueue:lqueue(term()), @@ -178,43 +178,23 @@ % reset to 0 when release_cursor gets stored enqueue_count = 0 :: non_neg_integer(), % a map containing all the live processes that have ever enqueued - % a message to this queue as well as a cached value of the smallest - % ra_index of all pending enqueues + % a message to this queue enqueuers = #{} :: #{pid() => #enqueuer{}}, - % master index of all enqueue raft indexes including pending - % enqueues + % index of all messages that have been delivered at least once + % used to work out the smallest live raft index % rabbit_fifo_index can be slow when calculating the smallest % index when there are large gaps but should be faster than gb_trees % for normal appending operations as it's backed by a map ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), - %% A release cursor is essentially a snapshot without message bodies - %% (aka. "dehydrated state") taken at time T in order to truncate - %% the log at some point in the future when all messages that were enqueued - %% up to time T have been removed (e.g. consumed, dead-lettered, or dropped). - %% This concept enables snapshots to not contain any message bodies. - %% Advantage: Smaller snapshots are sent between Ra nodes. + %% A release cursor is essentially a snapshot for a past raft index %% Working assumption: Messages are consumed in a FIFO-ish order because %% the log is truncated only until the oldest message. release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor, ra:index(), #rabbit_fifo{}}), % consumers need to reflect consumer state at time of snapshot - % needs to be part of snapshot consumers = #{} :: #{consumer_id() => consumer()}, % consumers that require further service are queued here - % needs to be part of snapshot service_queue = priority_queue:new() :: priority_queue:q(), - %% This is a special field that is only used for snapshots - %% It represents the queued messages at the time the - %% dehydrated snapshot state was cached. - %% As release_cursors are only emitted for raft indexes where all - %% prior messages no longer contribute to the current state we can - %% replace all message payloads with their sizes (to be used for - %% overflow calculations). - %% This is done so that consumers are still served in a deterministic - %% order on recovery. - %% TODO Remove this field and store prefix messages in-place. This will - %% simplify the checkout logic. - prefix_msgs = {0, [], 0, []} :: prefix_msgs(), %% state for at-least-once dead-lettering dlx = rabbit_fifo_dlx:init() :: rabbit_fifo_dlx:state(), msg_bytes_enqueue = 0 :: non_neg_integer(), diff --git a/deps/rabbit/src/rabbit_fifo_index.erl b/deps/rabbit/src/rabbit_fifo_index.erl index 685208795e8f..67c2cdf6e665 100644 --- a/deps/rabbit/src/rabbit_fifo_index.erl +++ b/deps/rabbit/src/rabbit_fifo_index.erl @@ -41,24 +41,29 @@ append(Key, #?MODULE{data = Data, smallest = Smallest, largest = Largest} = State) - when Key > Largest orelse - Largest =:= undefined -> + when is_integer(Key) andalso + (Key > Largest orelse + Largest =:= undefined) -> State#?MODULE{data = maps:put(Key, ?NIL, Data), smallest = ra_lib:default(Smallest, Key), largest = Key}; append(Key, #?MODULE{data = Data, largest = Largest, - smallest = Smallest} = State) -> + smallest = Smallest} = State) + when is_integer(Key) -> State#?MODULE{data = maps:put(Key, ?NIL, Data), smallest = min(Key, ra_lib:default(Smallest, Key)), largest = max(Key, ra_lib:default(Largest, Key)) - }. + }; +append(undefined, State) -> + State. --spec delete(Index :: integer(), state()) -> state(). +-spec delete(Index :: integer() | undefined, state()) -> state(). delete(Smallest, #?MODULE{data = Data0, largest = Largest, - smallest = Smallest} = State) -> + smallest = Smallest} = State) + when is_integer(Smallest) -> Data = maps:remove(Smallest, Data0), case find_next(Smallest + 1, Largest, Data) of undefined -> diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 8b6ae5ac11cd..bc354b95b1c9 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -1470,17 +1470,15 @@ messages_total_invariant() -> end. upgrade_prop(Conf0, Commands) -> - Conf = Conf0#{release_cursor_interval => 1}, + Conf = Conf0#{release_cursor_interval => 0}, Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), InitState = test_init_v1(Conf), [begin {PreEntries, PostEntries} = lists:split(SplitPos, Entries), %% run log v1 - V1 = lists:foldl( - fun ({Idx, E}, Acc0) -> - element(1, rabbit_fifo_v1:apply(meta(Idx), E, Acc0)) - end, InitState, PreEntries), + {V1, _V1Effs} = run_log(InitState, PreEntries, fun (_) -> true end, + rabbit_fifo_v1), %% perform conversion #rabbit_fifo{} = V2 = element(1, rabbit_fifo:apply(meta(length(PreEntries) + 1), @@ -1506,6 +1504,33 @@ upgrade_prop(Conf0, Commands) -> %% check we can run the post entries from the converted state run_log(V2, PostEntries) end || SplitPos <- lists:seq(1, length(Entries))], + + {_, V1Effs} = run_log(InitState, Entries, fun (_) -> true end, + rabbit_fifo_v1), + [begin + % ct:pal("V1 ~p", [RCS]), + Res = rabbit_fifo:apply(meta(Idx + 1), {machine_version, 1, 2}, RCS) , + % ct:pal("V2 ~p", [Res]), + #rabbit_fifo{} = V2 = element(1, Res), + %% assert invariants + Fields = [num_messages, + num_ready_messages, + smallest_raft_index, + num_enqueuers, + num_consumers, + enqueue_message_bytes, + checkout_message_bytes + ], + V1Overview = maps:with(Fields, rabbit_fifo_v1:overview(RCS)), + V2Overview = maps:with(Fields, rabbit_fifo:overview(V2)), + case V1Overview == V2Overview of + true -> ok; + false -> + ct:pal("upgrade_prop failed expected~n~p~nGot:~n~p", + [V1Overview, V2Overview]), + ?assertEqual(V1Overview, V2Overview) + end + end || {release_cursor, Idx, RCS} <- V1Effs], true. %% single active consumer ordering invariant: @@ -1685,7 +1710,7 @@ checkout_cancel_gen(Pid) -> checkout_gen(Pid) -> %% pid, tag, prefetch - ?LET(C, {checkout, {binary(), Pid}, choose(1, 100)}, C). + ?LET(C, {checkout, {binary(), Pid}, choose(1, 10)}, C). -record(t, {state :: rabbit_fifo:state(), index = 1 :: non_neg_integer(), %% raft index From 96e6fabc7e431d897f6eb6ed53de783edea785d6 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Thu, 27 Jan 2022 10:21:03 +0000 Subject: [PATCH 57/97] rabbit_fifo_prop: fix build error --- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index bc354b95b1c9..0cf3af77d348 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -1444,11 +1444,10 @@ messages_total_prop(Conf0, Commands) -> messages_total_invariant() -> fun(#rabbit_fifo{messages = M, consumers = C, - prefix_msgs = {PTot, _, RTot, _}, returns = R, dlx = #rabbit_fifo_dlx{discards = D, consumer = DlxCon}} = S) -> - Base = lqueue:len(M) + lqueue:len(R) + PTot + RTot, + Base = lqueue:len(M) + lqueue:len(R), Tot0 = maps:fold(fun (_, #consumer{checked_out = Ch}, Acc) -> Acc + map_size(Ch) end, Base, C), From 31b39168da1f0fda9c36502687d49810bba87aa2 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 27 Jan 2022 16:17:06 +0100 Subject: [PATCH 58/97] Output some logs only on Ra leader Use mod_call effect to log switching strategies only on leader node to prevent excessive logging on different nodes. Other logs in this file are already output only on the leader. (Example "worker started"). Again other logs are okay to output on followers. (Example "leader terminated" or "unknown command"). --- deps/rabbit/src/rabbit_fifo_dlx.erl | 19 +++++++++++-------- deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl | 17 ++++++++++++++--- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 439710e725a8..d0239565a67f 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -329,20 +329,23 @@ update_config(at_least_once, at_least_once, _, State) -> update_config(SameDLH, SameDLH, _, State) -> {State, []}; update_config(OldDLH, NewDLH, QRes, State0) -> - rabbit_log:debug("Switching dead_letter_handler from ~p to ~p for ~s", - [OldDLH, NewDLH, rabbit_misc:rs(QRes)]), - {State, Effects} = switch_from(OldDLH, QRes, State0), - switch_to(NewDLH, State, Effects). + LogOnLeader = {mod_call, rabbit_log, debug, + ["Switching dead_letter_handler from ~p to ~p for ~s", + [OldDLH, NewDLH, rabbit_misc:rs(QRes)]]}, + {State1, Effects0} = switch_from(OldDLH, QRes, State0), + {State, Effects} = switch_to(NewDLH, State1, Effects0), + {State, [LogOnLeader|Effects]}. -spec switch_from(Old :: dead_letter_handler(), rabbit_types:r('queue'), state()) -> {state(), ra_machine:effects()}. switch_from(at_least_once, QRes, State) -> - %% switch from at-least-once to some other strategy + %% Switch from at-least-once to some other strategy. ensure_worker_terminated(State), {Num, Bytes} = stat(State), - rabbit_log:info("Deleted ~b dead-lettered messages (with total messages size of ~b bytes) in ~s", - [Num, Bytes, rabbit_misc:rs(QRes)]), - {init(), []}; + %% Log only on leader. + {init(), [{mod_call, rabbit_log, info, + ["Deleted ~b dead-lettered messages (with total messages size of ~b bytes) in ~s", + [Num, Bytes, rabbit_misc:rs(QRes)]]}]}; switch_from(_, _, State) -> {State, []}. diff --git a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl index 34623f21624b..73a079f3db2d 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl @@ -149,8 +149,12 @@ switch_strategies(_Config) -> Handler0 = undefined, Handler1 = at_least_once, %% Switching from undefined to at_least_once should start dlx consumer. - {S1, Effects} = rabbit_fifo_dlx:update_config(Handler0, Handler1, QRes, S0), - ?assertEqual([{aux, {dlx, setup}}], Effects), + {S1, Effects0} = rabbit_fifo_dlx:update_config(Handler0, Handler1, QRes, S0), + ?assertEqual([{mod_call, rabbit_log, debug, + ["Switching dead_letter_handler from ~p to ~p for ~s", + [undefined, at_least_once, "queue 'blah' in vhost '/'"]]}, + {aux, {dlx, setup}}], + Effects0), rabbit_fifo_dlx:handle_aux(leader, {dlx, setup}, fake_aux, QRes, Handler1, S1), [{_, WorkerPid, worker, _}] = supervisor:which_children(rabbit_fifo_dlx_sup), {S2, _} = rabbit_fifo_dlx:discard([make_msg(1)], because, Handler1, S1), @@ -160,7 +164,14 @@ switch_strategies(_Config) -> ?assertMatch(#{num_discard_checked_out := 1}, rabbit_fifo_dlx:overview(S4)), %% Switching from at_least_once to undefined should terminate dlx consumer. - {S5, []} = rabbit_fifo_dlx:update_config(Handler1, Handler0, QRes, S4), + {S5, Effects} = rabbit_fifo_dlx:update_config(Handler1, Handler0, QRes, S4), + ?assertEqual([{mod_call, rabbit_log, debug, + ["Switching dead_letter_handler from ~p to ~p for ~s", + [at_least_once, undefined, "queue 'blah' in vhost '/'"]]}, + {mod_call, rabbit_log, info, + ["Deleted ~b dead-lettered messages (with total messages size of ~b bytes) in ~s", + [1, 1, "queue 'blah' in vhost '/'"]]}], + Effects), ?assertMatch([_, {active, 0}, _, _], supervisor:count_children(rabbit_fifo_dlx_sup)), ?assertMatch(#{num_discarded := 0}, rabbit_fifo_dlx:overview(S5)), From 4618fac01caa44ec5fcb75d03200f32d35ca1dbe Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 27 Jan 2022 17:29:30 +0100 Subject: [PATCH 59/97] Allocate less memory by updating record at once (vs. splitting over muliple functions / lines). --- deps/rabbit/src/rabbit_fifo_dlx.erl | 18 ++++++++---------- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 7 ++++--- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index d0239565a67f..dada6ddb0860 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -210,26 +210,24 @@ checkout_one(#?MODULE{consumer = #dlx_consumer{checked_out = Checked, when map_size(Checked) >= Prefetch -> State; checkout_one(#?MODULE{discards = Discards0, + msg_bytes = Bytes, + msg_bytes_checkout = BytesCheckout, consumer = #dlx_consumer{checked_out = Checked0, next_msg_id = Next} = Con0} = State0) -> case lqueue:out(Discards0) of {{value, {_, Msg} = ReasonMsg}, Discards} -> Checked = maps:put(Next, ReasonMsg, Checked0), - State1 = State0#?MODULE{discards = Discards, - consumer = Con0#dlx_consumer{checked_out = Checked, - next_msg_id = Next + 1}}, - Bytes = size_in_bytes(Msg), - State = add_bytes_checkout(Bytes, State1), + Size = size_in_bytes(Msg), + State = State0#?MODULE{discards = Discards, + msg_bytes = Bytes - Size, + msg_bytes_checkout = BytesCheckout + Size, + consumer = Con0#dlx_consumer{checked_out = Checked, + next_msg_id = Next + 1}}, {success, Next, ReasonMsg, State}; {empty, _} -> State0 end. -add_bytes_checkout(Size, #?MODULE{msg_bytes = Bytes, - msg_bytes_checkout = BytesCheckout} = State) -> - State#?MODULE{msg_bytes = Bytes - Size, - msg_bytes_checkout = BytesCheckout + Size}. - size_in_bytes(?INDEX_MSG(_Idx, ?DISK_MSG(Header))) -> rabbit_fifo:get_header(size, Header). diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index aa61f3542e7a..faa53f0e16d8 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -266,7 +266,6 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, {RouteToQs, State2} end, Now = os:system_time(millisecond), - State4 = State3#state{next_out_seq = OutSeq + 1}, Pend0 = #pending{ consumed_msg_id = ConsumedMsgId, consumed_at = Now, @@ -277,12 +276,14 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, [] -> %% We can't deliver this message since there is no target queue we can route to. %% We buffer this message and retry to send every settle_timeout milliseonds. - State4#state{pendings = maps:put(OutSeq, Pend0, Pendings)}; + State3#state{next_out_seq = OutSeq + 1, + pendings = maps:put(OutSeq, Pend0, Pendings)}; _ -> Pend = Pend0#pending{publish_count = 1, last_published_at = Now, unsettled = TargetQs}, - State = State4#state{pendings = maps:put(OutSeq, Pend, Pendings)}, + State = State3#state{next_out_seq = OutSeq + 1, + pendings = maps:put(OutSeq, Pend, Pendings)}, deliver_to_queues(Delivery, TargetQs, State) end. From 13a30898cf17be39a0b5f5644ebacaac3095552a Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 27 Jan 2022 18:18:54 +0100 Subject: [PATCH 60/97] Delete queue returns number of ready messages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AMQP 0.9.1 spec: 1.7.2.10.1. Parameter queue.delete­ok.message­count (message­count) Ordinal: 1 Domain: message­coun Reports the number of messages deleted. For classic and quorum queues, deleting a queue returns the number of ready messages. This does not include unacked (or dead lettered messages). Make it clear in the CLI output. --- .../lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex index 5c5aebe3eb65..a1ee2e09b58e 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex @@ -80,7 +80,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DeleteQueueCommand do end def output({:ok, qlen}, _options) do - {:ok, "Queue was successfully deleted with #{qlen} messages"} + {:ok, "Queue was successfully deleted with #{qlen} ready messages"} end ## Use default output for all non-special case outputs From fffb9c4295b662f71bf06160c269b53874e4a745 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 31 Jan 2022 17:29:08 +0100 Subject: [PATCH 61/97] Add dead-letter-strategy policy validator test --- deps/rabbit/src/rabbit_basic.erl | 3 +-- deps/rabbit/test/unit_policy_validators_SUITE.erl | 8 ++++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/deps/rabbit/src/rabbit_basic.erl b/deps/rabbit/src/rabbit_basic.erl index b42e832f71eb..cc7c00047e63 100644 --- a/deps/rabbit/src/rabbit_basic.erl +++ b/deps/rabbit/src/rabbit_basic.erl @@ -12,8 +12,7 @@ -export([publish/4, publish/5, publish/1, message/3, message/4, properties/1, prepend_table_header/3, extract_headers/1, extract_timestamp/1, map_headers/2, delivery/4, - header_routes/1, parse_expiration/1, header/2, header/3, - is_message_persistent/1]). + header_routes/1, parse_expiration/1, header/2, header/3]). -export([build_content/2, from_content/1, msg_size/1, maybe_gc_large_msg/1, maybe_gc_large_msg/2]). -export([add_header/4, diff --git a/deps/rabbit/test/unit_policy_validators_SUITE.erl b/deps/rabbit/test/unit_policy_validators_SUITE.erl index 54dbdd56cf48..aaa2206a299e 100644 --- a/deps/rabbit/test/unit_policy_validators_SUITE.erl +++ b/deps/rabbit/test/unit_policy_validators_SUITE.erl @@ -24,6 +24,7 @@ groups() -> alternate_exchange, dead_letter_exchange, dead_letter_routing_key, + dead_letter_strategy, message_ttl, expires, max_length, @@ -86,6 +87,13 @@ dead_letter_exchange(_Config) -> dead_letter_routing_key(_Config) -> requires_binary_value(<<"dead-letter-routing-key">>). +dead_letter_strategy(_Config) -> + test_valid_and_invalid_values(<<"dead-letter-strategy">>, + %% valid values + [<<"at-most-once">>, <<"at-least-once">>], + %% invalid values + [<<"unknown">>, <<"dead-letter-strategy">>, <<"undefined">>]). + message_ttl(_Config) -> requires_non_negative_integer_value(<<"message-ttl">>). From 15b70bf973124dec26ef05d3242b8202dcd1abf0 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Wed, 2 Feb 2022 15:25:56 +0000 Subject: [PATCH 62/97] rabbit_fifo: refactor consumer record Refactor consumer record to keep rarely changing fields in a nested cfg record. This avoids some allocations as well as sets the consumer up for further changes around how consumer identities are managed. --- deps/rabbit/src/rabbit_fifo.erl | 199 ++++++++++++++++++------- deps/rabbit/src/rabbit_fifo.hrl | 26 ++-- deps/rabbit/test/rabbit_fifo_SUITE.erl | 18 ++- 3 files changed, 171 insertions(+), 72 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index e2f978eb984a..fe71dbdb0f08 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -73,6 +73,11 @@ make_garbage_collection/0 ]). +-define(SETTLE_V2, '$s'). +-define(RETURN_V2, '$r'). +-define(DISCARD_V2, '$d'). +-define(CREDIT_V2, '$c'). + %% command records representing all the protocol actions that are supported -record(enqueue, {pid :: option(pid()), seq :: option(msg_seqno()), @@ -100,6 +105,21 @@ -record(purge_nodes, {nodes :: [node()]}). -record(update_config, {config :: config()}). -record(garbage_collection, {}). +%% v2 alternative commands +%% each consumer is assigned an integer index which can be used +%% instead of the consumer id to identify the consumer +-type consumer_idx() :: non_neg_integer(). + +-record(?SETTLE_V2, {consumer_idx :: consumer_idx(), + msg_ids :: [msg_id()]}). +-record(?RETURN_V2, {consumer_idx :: consumer_idx(), + msg_ids :: [msg_id()]}). +-record(?DISCARD_V2, {consumer_idx :: consumer_idx(), + msg_ids :: [msg_id()]}). +-record(?CREDIT_V2, {consumer_idx :: consumer_idx(), + credit :: non_neg_integer(), + delivery_count :: non_neg_integer(), + drain :: boolean()}). -opaque protocol() :: #enqueue{} | @@ -113,7 +133,12 @@ #purge{} | #purge_nodes{} | #update_config{} | - #garbage_collection{}. + #garbage_collection{} | + % v2 + #?SETTLE_V2{} | + #?RETURN_V2{} | + #?DISCARD_V2{} | + #?CREDIT_V2{}. -type command() :: protocol() | rabbit_fifo_dlx:protocol() | @@ -619,11 +644,40 @@ convert_msg(Header) is_map_key(size, Header) -> ?INDEX_MSG(undefined, ?DISK_MSG(Header)). +convert_consumer({ConsumerTag, Pid}, CV1) -> + Meta = element(2, CV1), + CheckedOut = element(3, CV1), + NextMsgId = element(4, CV1), + Credit = element(5, CV1), + DeliveryCount = element(6, CV1), + CreditMode = element(7, CV1), + LifeTime = element(8, CV1), + Status = element(9, CV1), + Priority = element(10, CV1), + #consumer{cfg = #consumer_cfg{tag = ConsumerTag, + pid = Pid, + meta = Meta, + credit_mode = CreditMode, + lifetime = LifeTime, + priority = Priority}, + credit = Credit, + status = Status, + delivery_count = DeliveryCount, + next_msg_id = NextMsgId, + checked_out = maps:map( + fun (_, {Tag, _} = Msg) when is_atom(Tag) -> + convert_msg(Msg); + (_, {_Seq, Msg}) -> + convert_msg(Msg) + end, CheckedOut) + }. + convert_v1_to_v2(V1State) -> IndexesV1 = rabbit_fifo_v1:get_field(ra_indexes, V1State), ReturnsV1 = rabbit_fifo_v1:get_field(returns, V1State), MessagesV1 = rabbit_fifo_v1:get_field(messages, V1State), ConsumersV1 = rabbit_fifo_v1:get_field(consumers, V1State), + WaitingConsumersV1 = rabbit_fifo_v1:get_field(waiting_consumers, V1State), %% remove all raft idx in messages from index {_, PrefMsgs, _, PrefReturns} = rabbit_fifo_v1:get_field(prefix_msgs, V1State), V2PrefMsgs = lists:foldl(fun(Hdr, Acc) -> @@ -640,16 +694,13 @@ convert_v1_to_v2(V1State) -> end, V2PrefReturns, ReturnsV1), ConsumersV2 = maps:map( - fun (_, #consumer{checked_out = Ch} = C) -> - C#consumer{ - checked_out = - maps:map( - fun (_, {Tag, _} = Msg) when is_atom(Tag) -> - convert_msg(Msg); - (_, {_Seq, Msg}) -> - convert_msg(Msg) - end, Ch)} + fun (ConsumerId, CV1) -> + convert_consumer(ConsumerId, CV1) end, ConsumersV1), + WaitingConsumersV2 = lists:map( + fun ({ConsumerId, CV1}) -> + {ConsumerId, convert_consumer(ConsumerId, CV1)} + end, WaitingConsumersV1), %% The (old) format of dead_letter_handler in RMQ < v3.10 is: %% {Module, Function, Args} @@ -699,7 +750,7 @@ convert_v1_to_v2(V1State) -> service_queue = rabbit_fifo_v1:get_field(service_queue, V1State), msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), - waiting_consumers = rabbit_fifo_v1:get_field(waiting_consumers, V1State), + waiting_consumers = WaitingConsumersV2, last_active = rabbit_fifo_v1:get_field(last_active, V1State) }. @@ -1118,7 +1169,9 @@ query_consumers(#?MODULE{consumers = Consumers, FromConsumers = maps:fold(fun (_, #consumer{status = cancelled}, Acc) -> Acc; - ({Tag, Pid}, #consumer{meta = Meta} = Consumer, Acc) -> + ({Tag, Pid}, + #consumer{cfg = #consumer_cfg{meta = Meta}} = Consumer, + Acc) -> {Active, ActivityStatus} = ActiveActivityStatusFun({Tag, Pid}, Consumer), maps:put({Tag, Pid}, @@ -1134,7 +1187,9 @@ query_consumers(#?MODULE{consumers = Consumers, FromWaitingConsumers = lists:foldl(fun ({_, #consumer{status = cancelled}}, Acc) -> Acc; - ({{Tag, Pid}, #consumer{meta = Meta} = Consumer}, Acc) -> + ({{Tag, Pid}, + #consumer{cfg = #consumer_cfg{meta = Meta}} = Consumer}, + Acc) -> {Active, ActivityStatus} = ActiveActivityStatusFun({Tag, Pid}, Consumer), maps:put({Tag, Pid}, @@ -1185,9 +1240,11 @@ query_peek(Pos, State0) when Pos > 0 -> end. query_notify_decorators_info(#?MODULE{consumers = Consumers} = State) -> - MaxActivePriority = maps:fold(fun(_, #consumer{credit = C, - status = up, - priority = P0}, MaxP) + MaxActivePriority = maps:fold(fun(_, + #consumer{credit = C, + status = up, + cfg = #consumer_cfg{priority = P0} + }, MaxP) when C > 0 -> P = -P0, case MaxP of @@ -1294,7 +1351,8 @@ cancel_consumer(Meta, ConsumerId, end. consumer_update_active_effects(#?MODULE{cfg = #cfg{resource = QName}}, - ConsumerId, #consumer{meta = Meta}, + ConsumerId, + #consumer{cfg = #consumer_cfg{meta = Meta}}, Active, ActivityStatus, Effects) -> Ack = maps:get(ack, Meta, undefined), @@ -1354,15 +1412,17 @@ activate_next_consumer(#?MODULE{consumers = Cons, -maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, Consumer, S0, +maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, + #consumer{cfg = CCfg} = Consumer, S0, Effects0, Reason) -> case Reason of consumer_cancel -> - {update_or_remove_sub(Meta, ConsumerId, - Consumer#consumer{lifetime = once, - credit = 0, - status = cancelled}, - S0), Effects0}; + {update_or_remove_sub( + Meta, ConsumerId, + Consumer#consumer{cfg = CCfg#consumer_cfg{lifetime = once}, + credit = 0, + status = cancelled}, + S0), Effects0}; down -> {S1, Effects1} = return_all(Meta, S0, Effects0, ConsumerId, Consumer), {S1#?MODULE{consumers = maps:remove(ConsumerId, S1#?MODULE.consumers), @@ -1562,12 +1622,12 @@ complete(Meta, ConsumerId, DiscardedMsgIds, msg_bytes_checkout = BytesCheckout - SettledSize, messages_total = Tot - Len}. -increase_credit(#consumer{lifetime = once, +increase_credit(#consumer{cfg = #consumer_cfg{lifetime = once}, credit = Credit}, _) -> %% once consumers cannot increment credit Credit; -increase_credit(#consumer{lifetime = auto, - credit_mode = credited, +increase_credit(#consumer{cfg = #consumer_cfg{lifetime = auto, + credit_mode = credited}, credit = Credit}, _) -> %% credit_mode: `credited' also doesn't automatically increment credit Credit; @@ -1970,19 +2030,22 @@ timer_effect(RaCmdTs, State, Effects) -> end, [{timer, expire_msgs, T} | Effects]. -update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = auto, - credit = 0} = Con, +update_or_remove_sub(_Meta, ConsumerId, + #consumer{cfg = #consumer_cfg{lifetime = auto}, + credit = 0} = Con, #?MODULE{consumers = Cons} = State) -> State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)}; -update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = auto} = Con, +update_or_remove_sub(_Meta, ConsumerId, + #consumer{cfg = #consumer_cfg{lifetime = auto}} = Con, #?MODULE{consumers = Cons, service_queue = ServiceQueue} = State) -> State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons), service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}; update_or_remove_sub(#{system_time := Ts}, - ConsumerId, #consumer{lifetime = once, - checked_out = Checked, - credit = 0} = Con, + ConsumerId, + #consumer{cfg = #consumer_cfg{lifetime = once}, + checked_out = Checked, + credit = 0} = Con, #?MODULE{consumers = Cons} = State) -> case maps:size(Checked) of 0 -> @@ -1993,13 +2056,14 @@ update_or_remove_sub(#{system_time := Ts}, % there are unsettled items so need to keep around State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)} end; -update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = once} = Con, +update_or_remove_sub(_Meta, ConsumerId, + #consumer{cfg = #consumer_cfg{lifetime = once}} = Con, #?MODULE{consumers = Cons, service_queue = ServiceQueue} = State) -> State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons), service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}. -uniq_queue_in(Key, #consumer{priority = P}, Queue) -> +uniq_queue_in(Key, #consumer{cfg = #consumer_cfg{priority = P}}, Queue) -> % TODO: queue:member could surely be quite expensive, however the practical % number of unique consumers may not be large enough for it to matter case priority_queue:member(Key, Queue) of @@ -2021,35 +2085,58 @@ update_consumer(ConsumerId, Meta, Spec, Priority, %% single active consumer on, no one is consuming yet or %% the currently active consumer is the same update_consumer0(ConsumerId, Meta, Spec, Priority, State0); -update_consumer(ConsumerId, Meta, {Life, Credit, Mode}, Priority, +update_consumer({Tag, Pid} = ConsumerId, Meta, {Life, Credit, Mode}, Priority, #?MODULE{cfg = #cfg{consumer_strategy = single_active}, waiting_consumers = WaitingConsumers0} = State0) -> %% single active consumer on and one active consumer already %% adding the new consumer to the waiting list - Consumer = #consumer{lifetime = Life, meta = Meta, - priority = Priority, - credit = Credit, credit_mode = Mode}, + Consumer = #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + lifetime = Life, + meta = Meta, + priority = Priority, + credit_mode = Mode}, + credit = Credit}, WaitingConsumers1 = WaitingConsumers0 ++ [{ConsumerId, Consumer}], State0#?MODULE{waiting_consumers = WaitingConsumers1}. -update_consumer0(ConsumerId, Meta, {Life, Credit, Mode}, Priority, +update_consumer0({Tag, Pid} = ConsumerId, Meta, {Life, Credit, Mode}, Priority, #?MODULE{consumers = Cons0, service_queue = ServiceQueue0} = State0) -> %% TODO: this logic may not be correct for updating a pre-existing consumer - Init = #consumer{lifetime = Life, meta = Meta, - priority = Priority, - credit = Credit, credit_mode = Mode}, - Cons = maps:update_with(ConsumerId, - fun(S) -> - %% remove any in-flight messages from - %% the credit update - N = maps:size(S#consumer.checked_out), - C = max(0, Credit - N), - S#consumer{lifetime = Life, credit = C} - end, Init, Cons0), - ServiceQueue = maybe_queue_consumer(ConsumerId, maps:get(ConsumerId, Cons), - ServiceQueue0), - State0#?MODULE{consumers = Cons, service_queue = ServiceQueue}. + % Init = #consumer{lifetime = Life, meta = Meta, + % priority = Priority, + % credit = Credit, credit_mode = Mode}, + Init = #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + lifetime = Life, + meta = Meta, + priority = Priority, + credit_mode = Mode}, + credit = Credit}, + Consumer = case Cons0 of + #{ConsumerId := #consumer{cfg = CCfg, + checked_out = Checked} = C} -> + NumChecked = map_size(Checked), + NewCredit = max(0, Credit - NumChecked), + C#consumer{cfg = CCfg#consumer_cfg{lifetime = Life}, + credit = NewCredit}; + _ -> + Init + end, + % Cons = maps:update_with(ConsumerId, + % fun(S) -> + % %% remove any in-flight messages from + % %% the credit update + % N = maps:size(S#consumer.checked_out), + % C = max(0, Credit - N), + % CCfg = S#consumer.cfg, + % S#consumer{cfg = CCfg#consumer_cfg{lifetime = Life}, + % credit = C} + % end, Init, Cons0), + ServiceQueue = maybe_queue_consumer(ConsumerId, Consumer, ServiceQueue0), + State0#?MODULE{consumers = Cons0#{ConsumerId => Consumer}, + service_queue = ServiceQueue}. maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, ServiceQueue0) -> @@ -2121,7 +2208,7 @@ make_register_enqueuer(Pid) -> -spec make_checkout(consumer_id(), checkout_spec(), consumer_meta()) -> protocol(). -make_checkout(ConsumerId, Spec, Meta) -> +make_checkout({_, _} = ConsumerId, Spec, Meta) -> #checkout{consumer_id = ConsumerId, spec = Spec, meta = Meta}. @@ -2218,7 +2305,9 @@ all_pids_for(Node, #?MODULE{consumers = Cons0, suspected_pids_for(Node, #?MODULE{consumers = Cons0, enqueuers = Enqs0, waiting_consumers = WaitingConsumers0}) -> - Cons = maps:fold(fun({_, P}, #consumer{status = suspected_down}, Acc) + Cons = maps:fold(fun({_, P}, + #consumer{status = suspected_down}, + Acc) when node(P) =:= Node -> [P | Acc]; (_, _, Acc) -> Acc diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index cd959cee0fd0..9e43dd76f582 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -101,16 +101,10 @@ -define(MB, 1048576). -define(LOW_LIMIT, 0.8). --record(consumer, +-record(consumer_cfg, {meta = #{} :: consumer_meta(), - checked_out = #{} :: #{msg_id() => indexed_msg()}, - next_msg_id = 0 :: msg_id(), % part of snapshot data - %% max number of messages that can be sent - %% decremented for each delivery - credit = 0 : non_neg_integer(), - %% total number of checked out messages - ever - %% incremented for each delivery - delivery_count = 0 :: non_neg_integer(), + pid :: pid(), + tag :: consumer_tag(), %% the mode of how credit is incremented %% simple_prefetch: credit is re-filled as deliveries are settled %% or returned. @@ -118,8 +112,20 @@ %% command: `{consumer_credit, ReceiverDeliveryCount, Credit}' credit_mode = simple_prefetch :: credit_mode(), % part of snapshot data lifetime = once :: once | auto, + priority = 0 :: non_neg_integer()}). + +-record(consumer, + {cfg = #consumer_cfg{}, status = up :: up | suspected_down | cancelled, - priority = 0 :: non_neg_integer() + next_msg_id = 0 :: msg_id(), % part of snapshot data + checked_out = #{} :: #{msg_id() => indexed_msg()}, + %% max number of messages that can be sent + %% decremented for each delivery + credit = 0 : non_neg_integer(), + %% total number of checked out messages - ever + %% incremented for each delivery + delivery_count = 0 :: non_neg_integer() + }). -type consumer() :: #consumer{}. diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 4e8a418291a0..900219f0511e 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -1254,7 +1254,7 @@ single_active_cancelled_with_unacked_test(_) -> State4#rabbit_fifo.consumers), %% C1 should be a cancelled consumer ?assertMatch(#{C1 := #consumer{status = cancelled, - lifetime = once, + cfg = #consumer_cfg{lifetime = once}, checked_out = #{0 := _}}}, State4#rabbit_fifo.consumers), ?assertMatch([], State4#rabbit_fifo.waiting_consumers), @@ -1336,15 +1336,18 @@ register_enqueuer_test(_) -> ?assertMatch(#{num_enqueuers := 3}, rabbit_fifo:overview(State6)), + Pid3 = test_util:fake_pid(node()), %% remove two messages this should make the queue fall below the 0.8 limit {State7, _, Efx7} = apply(meta(7), - rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State6), + rabbit_fifo:make_checkout({<<"a">>, Pid3}, {dequeue, settled}, #{}), + State6), ?ASSERT_EFF({log, [_], _}, Efx7), % ct:pal("Efx7 ~p", [_Efx7]), {State8, _, Efx8} = apply(meta(8), - rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State7), + rabbit_fifo:make_checkout({<<"a">>, Pid3}, {dequeue, settled}, #{}), + State7), ?ASSERT_EFF({log, [_], _}, Efx8), % ct:pal("Efx8 ~p", [Efx8]), %% validate all registered enqueuers are notified of overflow state @@ -1352,7 +1355,8 @@ register_enqueuer_test(_) -> ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid2, Efx8), {_State9, _, Efx9} = apply(meta(9), - rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State8), + rabbit_fifo:make_checkout({<<"a">>, Pid3}, {dequeue, settled}, #{}), + State8), ?ASSERT_EFF({log, [_], _}, Efx9), ?ASSERT_NO_EFF({send_msg, P, go, [ra_event]}, P == Pid1, Efx9), ?ASSERT_NO_EFF({send_msg, P, go, [ra_event]}, P == Pid2, Efx9), @@ -1562,7 +1566,7 @@ machine_version_test(_) -> {S1, _Effects} = rabbit_fifo_v0_SUITE:run_log(S0, Entries), Self = self(), {#rabbit_fifo{enqueuers = #{Self := #enqueuer{}}, - consumers = #{Cid := #consumer{priority = 0}}, + consumers = #{Cid := #consumer{cfg = #consumer_cfg{priority = 0}}}, service_queue = S, messages = Msgs}, ok, [_|_]} = apply(meta(Idx), @@ -1588,7 +1592,7 @@ machine_version_waiting_consumer_test(_) -> {S1, _Effects} = rabbit_fifo_v0_SUITE:run_log(S0, Entries), Self = self(), {#rabbit_fifo{enqueuers = #{Self := #enqueuer{}}, - consumers = #{Cid := #consumer{priority = 0}}, + consumers = #{Cid := #consumer{cfg = #consumer_cfg{priority = 0}}}, service_queue = S, messages = Msgs}, ok, _} = apply(meta(Idx), {machine_version, 0, 2}, S1), @@ -1744,7 +1748,7 @@ checkout_priority_test(_) -> empty_dequeue_should_emit_release_cursor_test(_) -> State0 = test_init(?FUNCTION_NAME), - Cid = <<"basic.get1">>, + Cid = {<<"basic.get1">>, self()}, {_State, {dequeue, empty}, Effects} = apply(meta(2, 1234), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), From 5179419b1bad8513c9c363d0c8f5a5e449725cef Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Thu, 3 Feb 2022 11:07:50 +0000 Subject: [PATCH 63/97] rabbit_fifo: enqueue any pending messages during v2 conversion As else these would never be enqueued and thus lost. --- deps/rabbit/src/rabbit_fifo.erl | 41 +++++++++++++++++------------- deps/rabbit/src/rabbit_fifo_v1.erl | 13 +++++++++- 2 files changed, 36 insertions(+), 18 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index fe71dbdb0f08..660e25bc6059 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -672,7 +672,8 @@ convert_consumer({ConsumerTag, Pid}, CV1) -> end, CheckedOut) }. -convert_v1_to_v2(V1State) -> +convert_v1_to_v2(V1State0) -> + V1State = rabbit_fifo_v1:enqueue_all_pending(V1State0), IndexesV1 = rabbit_fifo_v1:get_field(ra_indexes, V1State), ReturnsV1 = rabbit_fifo_v1:get_field(returns, V1State), MessagesV1 = rabbit_fifo_v1:get_field(messages, V1State), @@ -702,6 +703,13 @@ convert_v1_to_v2(V1State) -> {ConsumerId, convert_consumer(ConsumerId, CV1)} end, WaitingConsumersV1), + + EnqueuersV1 = rabbit_fifo_v1:get_field(enqueuers, V1State), + EnqueuersV2 = maps:map(fun (_EnqPid, Enq) -> + Enq#enqueuer{unused = undefined} + end, EnqueuersV1), + + %% do after state conversion %% The (old) format of dead_letter_handler in RMQ < v3.10 is: %% {Module, Function, Args} %% The (new) format of dead_letter_handler in RMQ >= v3.10 is: @@ -737,22 +745,21 @@ convert_v1_to_v2(V1State) -> expires = rabbit_fifo_v1:get_cfg_field(expires, V1State) }, - #?MODULE{ - cfg = Cfg, - messages = MessagesV2, - messages_total = rabbit_fifo_v1:query_messages_total(V1State), - returns = ReturnsV2, - enqueue_count = rabbit_fifo_v1:get_field(enqueue_count, V1State), - enqueuers = rabbit_fifo_v1:get_field(enqueuers, V1State), - ra_indexes = IndexesV1, - release_cursors = rabbit_fifo_v1:get_field(release_cursors, V1State), - consumers = ConsumersV2, - service_queue = rabbit_fifo_v1:get_field(service_queue, V1State), - msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), - msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), - waiting_consumers = WaitingConsumersV2, - last_active = rabbit_fifo_v1:get_field(last_active, V1State) - }. + #?MODULE{cfg = Cfg, + messages = MessagesV2, + messages_total = rabbit_fifo_v1:query_messages_total(V1State), + returns = ReturnsV2, + enqueue_count = rabbit_fifo_v1:get_field(enqueue_count, V1State), + enqueuers = EnqueuersV2, + ra_indexes = IndexesV1, + release_cursors = rabbit_fifo_v1:get_field(release_cursors, V1State), + consumers = ConsumersV2, + service_queue = rabbit_fifo_v1:get_field(service_queue, V1State), + msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), + msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), + waiting_consumers = WaitingConsumersV2, + last_active = rabbit_fifo_v1:get_field(last_active, V1State) + }. purge_node(Meta, Node, State, Effects) -> lists:foldl(fun(Pid, {S0, E0}) -> diff --git a/deps/rabbit/src/rabbit_fifo_v1.erl b/deps/rabbit/src/rabbit_fifo_v1.erl index 065c3dd4e936..c141cc0ecf7f 100644 --- a/deps/rabbit/src/rabbit_fifo_v1.erl +++ b/deps/rabbit/src/rabbit_fifo_v1.erl @@ -66,7 +66,9 @@ make_purge/0, make_purge_nodes/1, make_update_config/1, - make_garbage_collection/0 + make_garbage_collection/0, + + enqueue_all_pending/1 ]). -export([convert_v0_to_v1/1]). @@ -592,6 +594,15 @@ purge_node(Meta, Node, State, Effects) -> {S, E0 ++ E} end, {State, Effects}, all_pids_for(Node, State)). +%% used by v1 -> v2 conversion code +enqueue_all_pending(#?STATE{enqueuers = Enqs} = State) -> + maps:fold(fun(_, #enqueuer{pending = Pend}, Acc) -> + lists:foldl(fun ({_, RIdx, RawMsg}, S) -> + enqueue(RIdx, RawMsg, S) + end, Acc, Pend) + end, State, Enqs). + + %% any downs that re not noconnection handle_down(Meta, Pid, #?STATE{consumers = Cons0, enqueuers = Enqs0} = State0) -> From f85731ef8bc477b73d0c81142dfbb807ccac29ac Mon Sep 17 00:00:00 2001 From: David Ansari Date: Wed, 9 Feb 2022 11:52:45 +0100 Subject: [PATCH 64/97] Take into account dead-lettered messages for max-length If a source quorum queue has at-least-once dead-lettering, reject-publish and also max-length (or max-length-bytes) configured, a publisher should be rejected if max-length is exceeded where max-length should take into account not only ready messages, but also dead-lettered messages that haven't been confirmed yet. Before this commit, although max-length is set to a small number, many more messages ended up in the quorum queue's discards queue when for example message TTL was configured. This is bad and unexpected because the quorum queue grew unbounded although max-length is set. --- deps/rabbit/src/rabbit_fifo.erl | 17 +++-- .../rabbit_fifo_dlx_integration_SUITE.erl | 71 ++++++++++++++++++- 2 files changed, 81 insertions(+), 7 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 660e25bc6059..f969b7e6f57a 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -1842,7 +1842,7 @@ evaluate_limit(Index, Result, BeforeState, Before = is_below_soft_limit(BeforeState), case {Before, is_below_soft_limit(State0)} of {false, true} -> - %% we have moved below the lower limit which + %% we have moved below the lower limit {Enqs, Effects} = maps:fold( fun (P, #enqueuer{} = E0, {Enqs, Acc}) -> @@ -2188,17 +2188,22 @@ is_over_limit(#?MODULE{cfg = #cfg{max_length = undefined, false; is_over_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, max_bytes = MaxBytes}, - msg_bytes_enqueue = BytesEnq} = State) -> - messages_ready(State) > MaxLength orelse (BytesEnq > MaxBytes). + msg_bytes_enqueue = BytesEnq, + dlx = DlxState} = State) -> + {NumDlx, BytesDlx} = rabbit_fifo_dlx:stat(DlxState), + (messages_ready(State) + NumDlx > MaxLength) orelse + (BytesEnq + BytesDlx > MaxBytes). is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = undefined, max_bytes = undefined}}) -> false; is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, max_bytes = MaxBytes}, - msg_bytes_enqueue = BytesEnq} = State) -> - is_below(MaxLength, messages_ready(State)) andalso - is_below(MaxBytes, BytesEnq). + msg_bytes_enqueue = BytesEnq, + dlx = DlxState} = State) -> + {NumDlx, BytesDlx} = rabbit_fifo_dlx:stat(DlxState), + is_below(MaxLength, messages_ready(State) + NumDlx) andalso + is_below(MaxBytes, BytesEnq + BytesDlx). is_below(undefined, _Num) -> true; diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 444024d77610..07f2bba90497 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -35,7 +35,9 @@ groups() -> cycle, stats, drop_head_falls_back_to_at_most_once, - switch_strategy + switch_strategy, + reject_publish_max_length, + reject_publish_max_length_bytes ]}, {cluster_size_3, [], [ many_target_queues, @@ -523,6 +525,73 @@ switch_strategy(Config) -> ?assertEqual(5, counted(messages_dead_lettered_expired_total, Config)), ?assertEqual(0, counted(messages_dead_lettered_confirmed_total, Config)). +%% Test that source quorum queue rejects messages when source quorum queue's max-length is reached. +%% max-length should also take into account dead-lettered messages. +reject_publish_max_length(Config) -> + reject_publish(Config, {<<"x-max-length">>, long, 1}). + +%% Test that source quorum queue rejects messages when source quorum queue's max-length-bytes is reached. +%% max-length-bytes should also take into account dead-lettered messages. +reject_publish_max_length_bytes(Config) -> + reject_publish(Config, {<<"x-max-length-bytes">>, long, 1}). + +reject_publish(Config, QArg) when is_tuple(QArg) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + TargetQ = ?config(target_queue_1, Config), + PolicyName = ?config(policy, Config), + %% This routing key prevents messages from being routed to target dead-letter queue. + ok = rabbit_ct_broker_helpers:set_policy(Config, Server, PolicyName, SourceQ, <<"queues">>, + [{<<"dead-letter-routing-key">>, <<"fake">>}]), + declare_queue(Ch, SourceQ, [ + {<<"x-dead-letter-exchange">>, longstr, <<"">>}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-message-ttl">>, long, 0}, + QArg + ]), + declare_queue(Ch, TargetQ, []), + #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), + ok = publish_confirm(Ch, SourceQ), + ok = publish_confirm(Ch, SourceQ), + RaName = ra_name(SourceQ), + eventually(?_assertMatch([{2, 2}], %% 2 messages with 1 byte each + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + %% Now, we have 2 expired messages in the source quorum queue's discards queue. + %% Now that we are over the limit we expect publishes to be rejected. + ?assertEqual(fail, publish_confirm(Ch, SourceQ)), + %% Fix the dead-letter routing topology. + ok = rabbit_ct_broker_helpers:set_policy(Config, Server, PolicyName, SourceQ, <<"queues">>, + [{<<"dead-letter-routing-key">>, TargetQ}]), + eventually(?_assertEqual([{0, 0}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), 500, 6), + %% Publish should be allowed again. + ok = publish_confirm(Ch, SourceQ), + %% Consume the 3 expired messages from the target dead-letter queue. + ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"m">>}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), + ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"m">>}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ})), + eventually(?_assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"m">>}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}))), + ok = rabbit_ct_broker_helpers:clear_policy(Config, Server, PolicyName). + +publish_confirm(Ch, QName) -> + ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = QName}, + #amqp_msg{payload = <<"m">>}), + amqp_channel:register_confirm_handler(Ch, self()), + receive + #'basic.ack'{} -> + ok; + #'basic.nack'{} -> + fail + after 2500 -> + ct:fail(confirm_timeout) + end. + %% Test that %% 1. Message is only acked to source queue once publisher confirms got received from **all** target queues. %% 2. Target queue can be classic queue, quorum queue, or stream queue. From 1f0fd4941c99fb74af539808fe229c90ac9b4942 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 10 Feb 2022 11:25:43 +0100 Subject: [PATCH 65/97] Redeliver rejected messages in dlx worker Before this commit, when a target quroum queue rejected messages due to overflow, the rejected messages were stuck in the dlx worker and did not get resent. This is fixed by this commit. We keep a separate data structure 'rejected' for each message containing target queues that rejected messages. After re-delivering when the timeout triggers, these target queues are moved back from 'rejected' to 'unsettled'. (An alternative would have been to store tuples {TargetQueue, Rejected} in 'unsettled') From now on, dlx worker only re-deliver to a target quorum queue if it rejected a message previously. If dlx worker just didn't hear back of the target quroum queue, it assumes that the quorum queue client takes care of redelivery. --- deps/rabbit/BUILD.bazel | 2 +- deps/rabbit/Makefile | 2 +- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 67 +++++++++++---- .../rabbit_fifo_dlx_integration_SUITE.erl | 82 ++++++++++++++++++- 4 files changed, 132 insertions(+), 21 deletions(-) diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index 5ffcd3825cbd..205e5728a878 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -146,7 +146,7 @@ _APP_ENV = """[ {stream_messages_soft_limit, 256}, {track_auth_attempt_source, false}, {dead_letter_worker_consumer_prefetch, 32}, - {dead_letter_worker_publisher_confirm_timeout, 120000} + {dead_letter_worker_publisher_confirm_timeout, 180000} ] """ diff --git a/deps/rabbit/Makefile b/deps/rabbit/Makefile index 32388e25717e..2dac35a0779e 100644 --- a/deps/rabbit/Makefile +++ b/deps/rabbit/Makefile @@ -124,7 +124,7 @@ define PROJECT_ENV {stream_messages_soft_limit, 256}, {track_auth_attempt_source, false}, {dead_letter_worker_consumer_prefetch, 32}, - {dead_letter_worker_publisher_confirm_timeout, 120000} + {dead_letter_worker_publisher_confirm_timeout, 180000} ] endef diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index faa53f0e16d8..fe0b9158b2ec 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -24,7 +24,7 @@ handle_cast/2, handle_call/3, handle_info/2, code_change/3, format_status/2]). --define(HIBERNATE_AFTER, 180_000). +-define(HIBERNATE_AFTER, 4*60*1000). -record(pending, { %% consumed_msg_id is not to be confused with consumer delivery tag. @@ -34,11 +34,11 @@ %% message IDs directly back to the queue (and there is no AMQP consumer). consumed_msg_id :: non_neg_integer(), delivery :: rabbit_types:delivery(), - %% TODO Reason is already stored in first x-death header of #content.properties.#'P_basic'.headers - %% So, we could remove this convenience field and lookup the 1st header when redelivering. reason :: rabbit_dead_letter:reason(), %% target queues for which publisher confirm has not been received yet unsettled = [] :: [rabbit_amqqueue:name()], + %% target queues for which publisher rejection was received recently + rejected = [] :: [rabbit_amqqueue:name()], %% target queues for which publisher confirm was received settled = [] :: [rabbit_amqqueue:name()], %% Number of times the message was published (i.e. rabbit_queue_type:deliver/3 invoked). @@ -211,10 +211,8 @@ handle_queue_actions(Actions, State0) -> S1 = handle_settled(QRef, MsgSeqs, S0), S2 = ack(S1), maybe_cancel_timer(S2); - ({rejected, QRef, MsgSeqNos}, S0) -> - rabbit_log:debug("Ignoring rejected messages ~p from ~s", - [MsgSeqNos, rabbit_misc:rs(QRef)]), - S0; + ({rejected, QRef, MsgSeqs}, S0) -> + handle_rejected(QRef, MsgSeqs, S0); ({queue_down, _QRef}, S0) -> %% target classic queue is down, but not deleted S0 @@ -227,6 +225,27 @@ handle_deliver(Msgs, #state{queue_ref = QRef} = State0) forward(Msg, MsgId, QRef, DLX, Reason, S) end, State, Msgs). +handle_rejected(QRef, MsgSeqNos, #state{pendings = Pendings0} = State) + when is_list(MsgSeqNos) -> + Pendings = lists:foldl(fun(SeqNo, Pends) -> + case maps:is_key(SeqNo, Pends) of + true -> + maps:update_with(SeqNo, + fun(#pending{unsettled = Unsettled, + rejected = Rejected} = P) -> + P#pending{unsettled = lists:delete(QRef, Unsettled), + rejected = [QRef | Rejected]} + end, + Pends); + false -> + rabbit_log:debug("Ignoring rejection for unknown sequence number ~b " + "from target dead letter ~s", + [SeqNo, rabbit_misc:rs(QRef)]), + Pends + end + end, Pendings0, MsgSeqNos), + State#state{pendings = Pendings}. + -spec lookup_dlx(state()) -> {rabbit_types:exchange() | not_found, state()}. lookup_dlx(#state{exchange_ref = DLXRef} = State0) -> @@ -315,17 +334,26 @@ handle_settled0(QRef, MsgSeq, #state{pendings = Pendings, settled_ids = SettledIds} = State) -> case maps:find(MsgSeq, Pendings) of {ok, #pending{unsettled = [QRef], + rejected = [], + consumed_msg_id = ConsumedId}} -> + State#state{pendings = maps:remove(MsgSeq, Pendings), + settled_ids = [ConsumedId | SettledIds]}; + {ok, #pending{unsettled = [], + rejected = [QRef], consumed_msg_id = ConsumedId}} -> State#state{pendings = maps:remove(MsgSeq, Pendings), settled_ids = [ConsumedId | SettledIds]}; - {ok, #pending{unsettled = Unsettled, settled = Settled} = Pend0} -> + {ok, #pending{unsettled = Unsettled, + rejected = Rejected, + settled = Settled} = Pend0} -> Pend = Pend0#pending{unsettled = lists:delete(QRef, Unsettled), + rejected = lists:delete(QRef, Rejected), settled = [QRef | Settled]}, State#state{pendings = maps:update(MsgSeq, Pend, Pendings)}; error -> - rabbit_log:info("Ignoring publisher confirm for sequence number ~b " - "from target dead letter ~s", - [MsgSeq, rabbit_misc:rs(QRef)]), + rabbit_log:debug("Ignoring publisher confirm for unknown sequence number ~b " + "from target dead letter ~s", + [MsgSeq, rabbit_misc:rs(QRef)]), State end. @@ -337,8 +365,10 @@ ack(#state{settled_ids = Ids, State#state{settled_ids = [], dlx_client_state = DlxState}. -%% Re-deliver messages that timed out waiting on publisher confirm and -%% messages that got never sent due to routing topology misconfiguration. +%% Re-deliver messages that +%% 1. timed out waiting on publisher confirm, or +%% 2. got rejected by target queue, or +%% 3. never got sent due to routing topology misconfiguration. -spec redeliver_messages(state()) -> state(). redeliver_messages(#state{pendings = Pendings, @@ -404,6 +434,8 @@ redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, %% 1. for which we already received a publisher confirm, or Unsettled = RouteToQs0 -- Settled, %% 2. whose queue client redelivers on our behalf. + %% Note that a quorum queue client does not redeliver on our behalf if it previously + %% rejected the message. This is why we always redeliver rejected messages here. RouteToQs1 = Unsettled -- clients_redeliver(Unsettled0), {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, BasicMsg, RouteToQs1), State1 = log_cycles(Cycles, DLRKeys, State0), @@ -414,8 +446,11 @@ redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, Pend = Pend0#pending{publish_count = PublishCount + 1, last_published_at = os:system_time(millisecond), delivery = Delivery, - %% override 'unsettled' because topology could have changed - unsettled = Unsettled}, + %% Override 'unsettled' because topology could have changed. + unsettled = Unsettled, + %% Any target queue that rejected previously and still need + %% to be routed to is moved back to 'unsettled'. + rejected = []}, State = State0#state{pendings = maps:update(OutSeq, Pend, Pendings)}, deliver_to_queues(Delivery, RouteToQs, State) end @@ -497,6 +532,7 @@ format_pending(#pending{consumed_msg_id = ConsumedMsgId, delivery = _DoNotLogLargeBinary, reason = Reason, unsettled = Unsettled, + rejected = Rejected, settled = Settled, publish_count = PublishCount, last_published_at = LastPublishedAt, @@ -504,6 +540,7 @@ format_pending(#pending{consumed_msg_id = ConsumedMsgId, #{consumed_msg_id => ConsumedMsgId, reason => Reason, unsettled => Unsettled, + rejected => Rejected, settled => Settled, publish_count => PublishCount, last_published_at => LastPublishedAt, diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 07f2bba90497..f2d93c7e1a64 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -36,8 +36,10 @@ groups() -> stats, drop_head_falls_back_to_at_most_once, switch_strategy, - reject_publish_max_length, - reject_publish_max_length_bytes + reject_publish_source_queue_max_length, + reject_publish_source_queue_max_length_bytes, + reject_publish_target_classic_queue, + reject_publish_target_quorum_queue ]}, {cluster_size_3, [], [ many_target_queues, @@ -527,12 +529,12 @@ switch_strategy(Config) -> %% Test that source quorum queue rejects messages when source quorum queue's max-length is reached. %% max-length should also take into account dead-lettered messages. -reject_publish_max_length(Config) -> +reject_publish_source_queue_max_length(Config) -> reject_publish(Config, {<<"x-max-length">>, long, 1}). %% Test that source quorum queue rejects messages when source quorum queue's max-length-bytes is reached. %% max-length-bytes should also take into account dead-lettered messages. -reject_publish_max_length_bytes(Config) -> +reject_publish_source_queue_max_length_bytes(Config) -> reject_publish(Config, {<<"x-max-length-bytes">>, long, 1}). reject_publish(Config, QArg) when is_tuple(QArg) -> @@ -578,6 +580,78 @@ reject_publish(Config, QArg) when is_tuple(QArg) -> amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}))), ok = rabbit_ct_broker_helpers:clear_policy(Config, Server, PolicyName). +%% Test that message gets eventually delivered to target quorum queue when it gets rejected initially. +reject_publish_target_quorum_queue(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + RaName = ra_name(SourceQ), + TargetQ = ?config(target_queue_1, Config), + declare_queue(Ch, SourceQ, [{<<"x-dead-letter-exchange">>, longstr, <<"">>}, + {<<"x-dead-letter-routing-key">>, longstr, TargetQ}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-message-ttl">>, long, 1} + ]), + declare_queue(Ch, TargetQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-max-length">>, long, 1} + ]), + Msg = <<"m">>, + [ok,ok,ok,ok] = [amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, #amqp_msg{payload = Msg}) + || _N <- lists:seq(1,4)], + %% Quorum queues reject publishes once the limit is already exceeded. + %% Therefore, although max-length of target queue is configured to be 1, + %% it will contain 2 messages before rejecting publishes. + %% Therefore, we expect target queue confirmed 2 messages and rejected 2 messages. + wait_for_messages_ready([Server], ra_name(TargetQ), 2), + consistently(?_assertEqual([{2, 2}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + %% Let's make some space in the target queue for the 2 rejected messages. + {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + eventually(?_assertEqual([{0, 0}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), 500, 5), + {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + ?assertEqual(4, counted(messages_dead_lettered_expired_total, Config)), + eventually(?_assertEqual(4, counted(messages_dead_lettered_confirmed_total, Config))). + +%% Test that message gets eventually delivered to target classic queue when it gets rejected initially. +reject_publish_target_classic_queue(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + RaName = ra_name(SourceQ), + TargetQ = ?config(target_queue_1, Config), + declare_queue(Ch, SourceQ, [{<<"x-dead-letter-exchange">>, longstr, <<"">>}, + {<<"x-dead-letter-routing-key">>, longstr, TargetQ}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-message-ttl">>, long, 1} + ]), + declare_queue(Ch, TargetQ, [{<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-max-length">>, long, 1} + ]), + Msg = <<"m">>, + ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, #amqp_msg{payload = Msg}), + ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, #amqp_msg{payload = Msg}), + %% By now we expect target classic queue confirmed 1 message and rejected 1 message. + eventually(?_assertEqual([{1, 1}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + consistently(?_assertEqual([{1, 1}], + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), + ?assertEqual(2, counted(messages_dead_lettered_expired_total, Config)), + ?assertEqual(1, counted(messages_dead_lettered_confirmed_total, Config)), + %% Let's make space in the target queue for the rejected message. + {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + eventually(?_assertEqual(2, counted(messages_dead_lettered_confirmed_total, Config)), 500, 6), + ?assertEqual([{0, 0}], dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), + {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + ok. + publish_confirm(Ch, QName) -> ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = QName}, From 94e0be38eccf509e7803dd27dec702152d6d0d6c Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Wed, 9 Feb 2022 09:19:28 +0000 Subject: [PATCH 66/97] rabbit_fifo: refactor decorator notifications So that they are only evaluated after each Ra "batch" using the handle aux `eval` message. This results in fewer decorator evaluations during high throughput. Also fix bug where recreating a consumer that was previously cancelled but still had pending messages resulted in the consumer being stuck in cancelled status. This in turn introduced a bug in single active consumer which will be fixed in the next commit. --- deps/rabbit/src/rabbit_fifo.erl | 193 ++++++++++-------------- deps/rabbit/src/rabbit_quorum_queue.erl | 8 +- deps/rabbit/test/rabbit_fifo_SUITE.erl | 33 ++-- 3 files changed, 94 insertions(+), 140 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index f969b7e6f57a..3762b6b84679 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -328,7 +328,7 @@ apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, {State1, ok, Effects} = checkout(Meta, State0, State0#?MODULE{service_queue = ServiceQueue, - consumers = Cons}, [], false), + consumers = Cons}, []), Response = {send_credit_reply, messages_ready(State1)}, %% by this point all checkouts for the updated credit value %% should be processed so we can evaluate the drain @@ -386,8 +386,7 @@ apply(#{index := Index, Exists = maps:is_key(ConsumerId, Consumers), case messages_ready(State0) of 0 -> - update_smallest_raft_index(Index, {dequeue, empty}, State0, - [notify_decorators_effect(State0)]); + update_smallest_raft_index(Index, {dequeue, empty}, State0, []); _ when Exists -> %% a dequeue using the same consumer_id isn't possible at this point {State0, {dequeue, empty}}; @@ -395,7 +394,8 @@ apply(#{index := Index, State1 = update_consumer(ConsumerId, ConsumerMeta, {once, 1, simple_prefetch}, 0, State0), - {success, _, MsgId, Msg, ExpiredMsg, State2, Effects0} = checkout_one(Meta, false, State1, []), + {success, _, MsgId, Msg, ExpiredMsg, State2, Effects0} = + checkout_one(Meta, false, State1, []), {State4, Effects1} = case Settlement of unsettled -> {_, Pid} = ConsumerId, @@ -414,9 +414,8 @@ apply(#{index := Index, [reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From) | Effects1]} end, - NotifyEffect = notify_decorators_effect(State4), {State, DroppedMsg, Effects} = evaluate_limit(Index, false, State0, State4, - [NotifyEffect | Effects2]), + Effects2), case {DroppedMsg, ExpiredMsg} of {false, false} -> {State, Reply, Effects}; @@ -470,7 +469,7 @@ apply(#{index := Index}, #purge{}, apply(#{index := Idx}, #garbage_collection{}, State) -> update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); apply(Meta, {timeout, expire_msgs}, State) -> - checkout(Meta, State, State, [], false); + checkout(Meta, State, State, []); apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, #?MODULE{consumers = Cons0, cfg = #cfg{consumer_strategy = single_active}, @@ -591,8 +590,7 @@ apply(Meta, {nodeup, Node}, #?MODULE{consumers = Cons0, Acc end, {State0, Monitors}, Cons0), Waiting = update_waiting_consumer_status(Node, State1, up), - State2 = State1#?MODULE{ - enqueuers = Enqs1, + State2 = State1#?MODULE{enqueuers = Enqs1, waiting_consumers = Waiting}, {State, Effects} = activate_next_consumer(State2, Effects1), checkout(Meta, State0, State, Effects); @@ -620,7 +618,7 @@ apply(#{index := IncomingRaftIdx} = Meta, {dlx, _} = Cmd, dlx = DlxState0} = State0) -> {DlxState, Effects0} = rabbit_fifo_dlx:apply(Meta, Cmd, DLH, DlxState0), State1 = State0#?MODULE{dlx = DlxState}, - {State, ok, Effects} = checkout(Meta, State0, State1, Effects0, false), + {State, ok, Effects} = checkout(Meta, State0, State1, Effects0), update_smallest_raft_index(IncomingRaftIdx, State, Effects); apply(_Meta, Cmd, State) -> %% handle unhandled commands gracefully @@ -781,14 +779,16 @@ handle_down(Meta, Pid, #?MODULE{consumers = Cons0, cancel_consumer(Meta, ConsumerId, S, E, down) end, {State2, Effects1}, DownConsumers). -consumer_active_flag_update_function(#?MODULE{cfg = #cfg{consumer_strategy = competing}}) -> +consumer_active_flag_update_function( + #?MODULE{cfg = #cfg{consumer_strategy = competing}}) -> fun(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) -> - consumer_update_active_effects(State, ConsumerId, Consumer, Active, - ActivityStatus, Effects) + consumer_update_active_effects(State, ConsumerId, Consumer, Active, + ActivityStatus, Effects) end; -consumer_active_flag_update_function(#?MODULE{cfg = #cfg{consumer_strategy = single_active}}) -> +consumer_active_flag_update_function( + #?MODULE{cfg = #cfg{consumer_strategy = single_active}}) -> fun(_, _, _, _, _, Effects) -> - Effects + Effects end. handle_waiting_consumer_down(_Pid, @@ -967,6 +967,7 @@ which_module(2) -> ?MODULE. capacity :: term(), gc = #aux_gc{} :: #aux_gc{}}). -record(?AUX, {name :: atom(), + last_decorators_state :: term(), capacity :: term(), gc = #aux_gc{} :: #aux_gc{}, unused, @@ -1030,13 +1031,21 @@ handle_aux(leader, cast, {#return{} = Ret, Corr, Pid}, Aux0, Log, #?MODULE{}) -> %% for returns with a delivery limit set we can just return as before {no_reply, Aux0, Log, [{append, Ret, {notify, Corr, Pid}}]}; -handle_aux(leader, cast, eval, Aux0, Log, MacState) -> +handle_aux(leader, cast, eval, #?AUX{last_decorators_state = LastDec} = Aux0, + Log, #?MODULE{cfg = #cfg{resource = QName}} = MacState) -> %% this is called after each batch of commands have been applied %% set timer for message expire %% should really be the last applied index ts but this will have to do Ts = erlang:system_time(millisecond), - Effects = timer_effect(Ts, MacState, []), - {no_reply, Aux0, Log, Effects}; + Effects0 = timer_effect(Ts, MacState, []), + case query_notify_decorators_info(MacState) of + LastDec -> + {no_reply, Aux0, Log, Effects0}; + {MaxActivePriority, IsEmpty} = NewLast -> + Effects = [notify_decorators_effect(QName, MaxActivePriority, IsEmpty) + | Effects0], + {no_reply, Aux0#?AUX{last_decorators_state = NewLast}, Log, Effects} + end; handle_aux(_RaftState, cast, eval, Aux0, Log, _MacState) -> {no_reply, Aux0, Log}; handle_aux(_RaState, cast, Cmd, #?AUX{capacity = Use0} = Aux0, @@ -1073,8 +1082,10 @@ handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, {ok, ?INDEX_MSG(Idx, ?DISK_MSG(Header))} -> %% need to re-hydrate from the log {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0), - %% TODO: handle requeue? - #enqueue{msg = Msg} = Cmd, + Msg = case Cmd of + #enqueue{msg = M} -> M; + #requeue{msg = M} -> M + end, {reply, {ok, {Header, Msg}}, Aux0, Log}; Err -> {reply, Err, Aux0, Log0} @@ -1247,21 +1258,20 @@ query_peek(Pos, State0) when Pos > 0 -> end. query_notify_decorators_info(#?MODULE{consumers = Consumers} = State) -> - MaxActivePriority = maps:fold(fun(_, - #consumer{credit = C, - status = up, - cfg = #consumer_cfg{priority = P0} - }, MaxP) - when C > 0 -> - P = -P0, - case MaxP of - empty -> P; - MaxP when MaxP > P -> MaxP; - _ -> P - end; - (_, _, MaxP) -> - MaxP - end, empty, Consumers), + MaxActivePriority = maps:fold( + fun(_, #consumer{credit = C, + status = up, + cfg = #consumer_cfg{priority = P0}}, + MaxP) when C > 0 -> + P = -P0, + case MaxP of + empty -> P; + MaxP when MaxP > P -> MaxP; + _ -> P + end; + (_, _, MaxP) -> + MaxP + end, empty, Consumers), IsEmpty = (messages_ready(State) == 0), {MaxActivePriority, IsEmpty}. @@ -1282,13 +1292,6 @@ messages_total(#?MODULE{messages_total = Total, dlx = DlxState}) -> {DlxTotal, _} = rabbit_fifo_dlx:stat(DlxState), Total + DlxTotal. -%% release cursors might be old state (e.g. after recent upgrade) -% messages_total(State) -> -% try -% rabbit_fifo_v1:query_messages_total(State) -% catch _:_ -> -% rabbit_fifo_v0:query_messages_total(State) -% end. update_use({inactive, _, _, _} = CUInfo, inactive) -> CUInfo; @@ -1441,7 +1444,7 @@ apply_enqueue(#{index := RaftIdx, system_time := Ts} = Meta, From, Seq, RawMsg, State0) -> case maybe_enqueue(RaftIdx, Ts, From, Seq, RawMsg, [], State0) of {ok, State1, Effects1} -> - {State, ok, Effects} = checkout(Meta, State0, State1, Effects1, false), + {State, ok, Effects} = checkout(Meta, State0, State1, Effects1), {maybe_store_release_cursor(RaftIdx, State), ok, Effects}; {out_of_sequence, State, Effects} -> {State, not_enqueued, Effects}; @@ -1600,7 +1603,7 @@ return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned, _ -> State1 end, - {State, ok, Effects} = checkout(Meta, State0, State2, Effects1, false), + {State, ok, Effects} = checkout(Meta, State0, State2, Effects1), update_smallest_raft_index(IncomingRaftIdx, State, Effects). % used to process messages that are finished @@ -1645,14 +1648,13 @@ complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, #consumer{} = Con0, Effects0, State0) -> State1 = complete(Meta, ConsumerId, MsgIds, Con0, State0), - {State, ok, Effects} = checkout(Meta, State0, State1, Effects0, false), + {State, ok, Effects} = checkout(Meta, State0, State1, Effects0), update_smallest_raft_index(IncomingRaftIdx, State, Effects). cancel_consumer_effects(ConsumerId, - #?MODULE{cfg = #cfg{resource = QName}} = State, Effects) -> + #?MODULE{cfg = #cfg{resource = QName}} = _State, Effects) -> [{mod_call, rabbit_quorum_queue, - cancel_consumer_handler, [QName, ConsumerId]}, - notify_decorators_effect(State) | Effects]. + cancel_consumer_handler, [QName, ConsumerId]} | Effects]. update_smallest_raft_index(Idx, State, Effects) -> update_smallest_raft_index(Idx, ok, State, Effects). @@ -1756,13 +1758,9 @@ return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, return_one(Meta, MsgId, Msg, S, E, ConsumerId) end, {State, Effects0}, lists:sort(maps:to_list(Checked))). -%% checkout new messages to consumers -checkout(Meta, OldState, State, Effects) -> - checkout(Meta, OldState, State, Effects, true). - checkout(#{index := Index} = Meta, - #?MODULE{cfg = #cfg{resource = QName}} = OldState, - State0, Effects0, HandleConsumerChanges) -> + #?MODULE{cfg = #cfg{resource = _QName}} = OldState, + State0, Effects0) -> {#?MODULE{cfg = #cfg{dead_letter_handler = DLH}, dlx = DlxState0} = State1, ExpiredMsg, Effects1} = checkout0(Meta, checkout_one(Meta, false, State0, Effects0), #{}), @@ -1771,27 +1769,11 @@ checkout(#{index := Index} = Meta, State2 = State1#?MODULE{msg_cache = undefined, %% by this time the cache should be used dlx = DlxState}, Effects2 = DlxDeliveryEffects ++ Effects1, - {State, DroppedMsg, Effects} = evaluate_limit(Index, false, OldState, State2, Effects2), - case {DroppedMsg, ExpiredMsg} of - {false, false} -> - case maybe_notify_decorators(State, HandleConsumerChanges) of - {true, {MaxActivePriority, IsEmpty}} -> - NotifyEffect = notify_decorators_effect(QName, MaxActivePriority, - IsEmpty), - {State, ok, [NotifyEffect | Effects]}; - false -> - {State, ok, Effects} - end; - _ -> - case maybe_notify_decorators(State, HandleConsumerChanges) of - {true, {MaxActivePriority, IsEmpty}} -> - NotifyEffect = notify_decorators_effect(QName, MaxActivePriority, - IsEmpty), - update_smallest_raft_index(Index, State, - [NotifyEffect | Effects]); - false -> - update_smallest_raft_index(Index, State, Effects) - end + case evaluate_limit(Index, false, OldState, State2, Effects2) of + {State, false, Effects} when ExpiredMsg == false -> + {State, ok, Effects}; + {State, _, Effects} -> + update_smallest_raft_index(Index, State, Effects) end. checkout0(Meta, {success, ConsumerId, MsgId, @@ -1844,15 +1826,15 @@ evaluate_limit(Index, Result, BeforeState, {false, true} -> %% we have moved below the lower limit {Enqs, Effects} = - maps:fold( - fun (P, #enqueuer{} = E0, {Enqs, Acc}) -> - E = E0#enqueuer{blocked = undefined}, - {Enqs#{P => E}, - [{send_msg, P, {queue_status, go}, [ra_event]} - | Acc]}; - (_P, _E, Acc) -> - Acc - end, {Enqs0, Effects0}, Enqs0), + maps:fold( + fun (P, #enqueuer{} = E0, {Enqs, Acc}) -> + E = E0#enqueuer{blocked = undefined}, + {Enqs#{P => E}, + [{send_msg, P, {queue_status, go}, [ra_event]} + | Acc]}; + (_P, _E, Acc) -> + Acc + end, {Enqs0, Effects0}, Enqs0), {State0#?MODULE{enqueuers = Enqs}, Result, Effects}; _ -> {State0, Result, Effects0} @@ -2110,37 +2092,23 @@ update_consumer({Tag, Pid} = ConsumerId, Meta, {Life, Credit, Mode}, Priority, update_consumer0({Tag, Pid} = ConsumerId, Meta, {Life, Credit, Mode}, Priority, #?MODULE{consumers = Cons0, service_queue = ServiceQueue0} = State0) -> - %% TODO: this logic may not be correct for updating a pre-existing consumer - % Init = #consumer{lifetime = Life, meta = Meta, - % priority = Priority, - % credit = Credit, credit_mode = Mode}, - Init = #consumer{cfg = #consumer_cfg{tag = Tag, - pid = Pid, - lifetime = Life, - meta = Meta, - priority = Priority, - credit_mode = Mode}, - credit = Credit}, Consumer = case Cons0 of #{ConsumerId := #consumer{cfg = CCfg, checked_out = Checked} = C} -> NumChecked = map_size(Checked), NewCredit = max(0, Credit - NumChecked), C#consumer{cfg = CCfg#consumer_cfg{lifetime = Life}, + status = up, credit = NewCredit}; _ -> - Init + #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + lifetime = Life, + meta = Meta, + priority = Priority, + credit_mode = Mode}, + credit = Credit} end, - % Cons = maps:update_with(ConsumerId, - % fun(S) -> - % %% remove any in-flight messages from - % %% the credit update - % N = maps:size(S#consumer.checked_out), - % C = max(0, Credit - N), - % CCfg = S#consumer.cfg, - % S#consumer{cfg = CCfg#consumer_cfg{lifetime = Life}, - % credit = C} - % end, Init, Cons0), ServiceQueue = maybe_queue_consumer(ConsumerId, Consumer, ServiceQueue0), State0#?MODULE{consumers = Cons0#{ConsumerId => Consumer}, service_queue = ServiceQueue}. @@ -2360,15 +2328,6 @@ get_priority_from_args(#{args := Args}) -> get_priority_from_args(_) -> 0. -maybe_notify_decorators(_, false) -> - false; -maybe_notify_decorators(State, _) -> - {true, query_notify_decorators_info(State)}. - -notify_decorators_effect(#?MODULE{cfg = #cfg{resource = QName}} = State) -> - {MaxActivePriority, IsEmpty} = query_notify_decorators_info(State), - notify_decorators_effect(QName, MaxActivePriority, IsEmpty). - notify_decorators_effect(QName, MaxActivePriority, IsEmpty) -> {mod_call, rabbit_quorum_queue, spawn_notify_decorators, [QName, consumer_state_changed, [MaxActivePriority, IsEmpty]]}. @@ -2415,12 +2374,12 @@ make_requeue(ConsumerId, Notify, [{MsgId, ?INDEX_MSG(Idx, ?TUPLE(Header, Msg))} make_requeue(_ConsumerId, _Notify, [], []) -> []. - can_immediately_deliver(#?MODULE{service_queue = SQ, consumers = Consumers} = State) -> case messages_ready(State) of 0 when map_size(Consumers) > 0 -> - %% TODO: check consumers actually have credit + %% TODO: is is probably good enough but to be 100% we'd need to + %% scan all consumers and ensure at least one has credit priority_queue:is_empty(SQ) == false; _ -> false diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index f5396af31f95..e38542ca1ea9 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -414,9 +414,11 @@ spawn_deleter(QName) -> end). spawn_notify_decorators(QName, Fun, Args) -> - spawn(fun () -> - notify_decorators(QName, Fun, Args) - end). + %% run in ra process for now + notify_decorators(QName, Fun, Args). + % spawn(fun () -> + % notify_decorators(QName, Fun, Args) + % end). handle_tick(QName, {Name, MR, MU, M, C, MsgBytesReady, MsgBytesUnack, MsgBytesDiscard}, diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 900219f0511e..79f273ef23fa 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -179,8 +179,7 @@ enq_enq_deq_test(_) -> % NumReady = 1, Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), {_State3, _, - [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, - {log, [1], Fun}, + [{log, [1], Fun}, {monitor, _, _}]} = apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State2), @@ -193,8 +192,7 @@ enq_enq_deq_deq_settle_test(_) -> {State2, _} = enq(2, 2, second, State1), % get returns a reply value {State3, '$ra_no_reply', - [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, - {log, [1], _}, + [{log, [1], _}, {monitor, _, _}]} = apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State2), @@ -246,8 +244,7 @@ release_cursor_test(_) -> checkout_enq_settle_test(_) -> Cid = {?FUNCTION_NAME, self()}, - {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, - {monitor, _, _} | _]} = check(Cid, 1, test_init(test)), + {State1, [{monitor, _, _} | _]} = check(Cid, 1, test_init(test)), {State2, Effects0} = enq(2, 1, first, State1), %% TODO: this should go back to a send_msg effect after optimisation % ?ASSERT_EFF({log, [2], _, _}, Effects0), @@ -264,8 +261,7 @@ checkout_enq_settle_test(_) -> duplicate_enqueue_test(_) -> Cid = {<<"duplicate_enqueue_test">>, self()}, - {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, - {monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)), + {State1, [ {monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)), {State2, Effects2} = enq(2, 1, first, State1), % ?ASSERT_EFF({log, [2], _, _}, Effects2), ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2), @@ -319,8 +315,7 @@ return_non_existent_test(_) -> return_checked_out_test(_) -> Cid = {<<"cid">>, self()}, {State0, _} = enq(1, 1, first, test_init(test)), - {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, - _Monitor, + {State1, [_Monitor, {log, [1], Fun, _} | _ ] } = check_auto(Cid, 2, State0), @@ -346,8 +341,7 @@ return_checked_out_limit_test(_) -> delivery_limit => 1}), Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), {State0, _} = enq(1, 1, first, Init), - {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, - _Monitor, + {State1, [_Monitor, {log, [1], Fun1, _} | _ ]} = check_auto(Cid, 2, State0), [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun1([Msg1]), @@ -369,8 +363,7 @@ return_auto_checked_out_test(_) -> {State0, _} = enq(2, 2, second, State00), % it first active then inactive as the consumer took on but cannot take % any more - {State1, [{mod_call, rabbit_quorum_queue, spawn_notify_decorators, _}, - _Monitor, + {State1, [_Monitor, {log, [1], Fun1, _} ]} = check_auto(Cid, 2, State0), [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun1([Msg1]), @@ -410,14 +403,14 @@ cancelled_checkout_out_test(_) -> {State4, ok, _} = apply(meta(6), rabbit_fifo:make_settle(Cid, [0]), State3), - {_State, _, [_, {log, [2], _Fun} | _]} = + {_State, _, [{log, [2], _Fun} | _]} = apply(meta(7), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State4), ok. down_with_noproc_consumer_returns_unsettled_test(_) -> Cid = {<<"down_consumer_returns_unsettled_test">>, self()}, {State0, _} = enq(1, 1, second, test_init(test)), - {State1, [_, {monitor, process, Pid} | _]} = check(Cid, 2, State0), + {State1, [{monitor, process, Pid} | _]} = check(Cid, 2, State0), {State2, _, _} = apply(meta(3), {down, Pid, noproc}, State1), {_State, Effects} = check(Cid, 4, State2), ?ASSERT_EFF({monitor, process, _}, Effects), @@ -1180,11 +1173,11 @@ active_flag_updated_when_consumer_suspected_unsuspected_test(_) -> {State2, _, Effects2} = apply(meta(3), {down, Pid1, noconnection}, State1), % 1 effect to update the metrics of each consumer (they belong to the same node), 1 more effect to monitor the node, 1 more decorators effect - ?assertEqual(4 + 1 + 1, length(Effects2)), + ?assertEqual(4 + 1, length(Effects2)), {_, _, Effects3} = apply(meta(4), {nodeup, node(self())}, State2), % for each consumer: 1 effect to update the metrics, 1 effect to monitor the consumer PID, 1 more decorators effect - ?assertEqual(4 + 4 + 1, length(Effects3)). + ?assertEqual(4 + 4, length(Effects3)). active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_consumer_is_on_test(_) -> State0 = init(#{name => ?FUNCTION_NAME, @@ -1213,11 +1206,11 @@ active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_co {State2, _, Effects2} = apply(meta(2), {down, Pid1, noconnection}, State1), % one monitor and one consumer status update (deactivated) - ?assertEqual(3, length(Effects2)), + ?assertEqual(2, length(Effects2)), {_, _, Effects3} = apply(meta(3), {nodeup, node(self())}, State2), % for each consumer: 1 effect to monitor the consumer PID - ?assertEqual(6, length(Effects3)). + ?assertEqual(5, length(Effects3)). single_active_cancelled_with_unacked_test(_) -> State0 = init(#{name => ?FUNCTION_NAME, From c55fd217adeb195517f1657867382309764ccf52 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Thu, 10 Feb 2022 11:22:46 +0000 Subject: [PATCH 67/97] rabbit_fifo: fix SAC bug when a cancelled consumer is revived If a cancelled single active consumer with a message checked out is revived through another checkout commend we'd previously would have overwritten the active consumer and lost track of the in-flight message. This change handles this case and merges the cancelled and new consumer with the same identity. --- deps/rabbit/src/rabbit_fifo.erl | 219 ++++++++++++++----------- deps/rabbit/src/rabbit_fifo.hrl | 3 +- deps/rabbit/test/rabbit_fifo_SUITE.erl | 106 +++++++++--- 3 files changed, 205 insertions(+), 123 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 3762b6b84679..1b0d0b935f93 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -40,6 +40,7 @@ query_messages_total/1, query_processes/1, query_ra_indexes/1, + query_waiting_consumers/1, query_consumer_count/1, query_consumers/1, query_stat/1, @@ -279,8 +280,6 @@ apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId}, apply(#{index := Idx} = Meta, #requeue{consumer_id = ConsumerId, msg_id = MsgId, - %% as we read the message from disk it is already - %% an inmemory message index = OldIdx, header = Header0, msg = _Msg}, @@ -309,7 +308,7 @@ apply(#{index := Idx} = Meta, maybe_store_release_cursor(Idx, State), Effs); _ -> - {State00, ok} + {State00, ok, []} end; apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, drain = Drain, consumer_id = ConsumerId}, @@ -391,7 +390,7 @@ apply(#{index := Index, %% a dequeue using the same consumer_id isn't possible at this point {State0, {dequeue, empty}}; Ready -> - State1 = update_consumer(ConsumerId, ConsumerMeta, + State1 = update_consumer(Meta, ConsumerId, ConsumerMeta, {once, 1, simple_prefetch}, 0, State0), {success, _, MsgId, Msg, ExpiredMsg, State2, Effects0} = @@ -403,8 +402,8 @@ apply(#{index := Index, settled -> %% immediately settle the checkout {State3, _, SettleEffects} = - apply(Meta, make_settle(ConsumerId, [MsgId]), - State2), + apply(Meta, make_settle(ConsumerId, [MsgId]), + State2), {State3, SettleEffects ++ Effects0} end, {Reply, Effects2} = @@ -431,11 +430,11 @@ apply(#{index := Idx} = Meta, {State, Reply, Effects} = checkout(Meta, State0, State1, Effects1), update_smallest_raft_index(Idx, Reply, State, Effects); apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta, - consumer_id = {_, Pid} = ConsumerId}, - State0) -> + consumer_id = {_, Pid} = ConsumerId}, State0) -> Priority = get_priority_from_args(ConsumerMeta), - State1 = update_consumer(ConsumerId, ConsumerMeta, Spec, Priority, State0), - checkout(Meta, State0, State1, [{monitor, process, Pid}]); + State1 = update_consumer(Meta, ConsumerId, ConsumerMeta, Spec, Priority, State0), + {State2, Effs} = activate_next_consumer(State1, []), + checkout(Meta, State0, State2, [{monitor, process, Pid} | Effs]); apply(#{index := Index}, #purge{}, #?MODULE{messages_total = Tot, returns = Returns, @@ -997,8 +996,7 @@ handle_aux(follower, _, garbage_collection, Aux, Log, MacState) -> handle_aux(leader, cast, {#return{msg_ids = MsgIds, consumer_id = ConsumerId}, Corr, Pid}, Aux0, Log0, #?MODULE{cfg = #cfg{delivery_limit = undefined}, - consumers = Consumers, - ra_indexes = _Indexes}) -> + consumers = Consumers}) -> case Consumers of #{ConsumerId := #consumer{checked_out = Checked}} -> {Log, ToReturn} = @@ -1006,7 +1004,6 @@ handle_aux(leader, cast, {#return{msg_ids = MsgIds, fun (MsgId, ?INDEX_MSG(Idx, ?DISK_MSG(Header)), {L0, Acc}) -> %% it is possible this is not found if the consumer %% crashed and the message got removed - %% TODO: handle when log entry is not found case ra_log:fetch(Idx, L0) of {{_, _, {_, _, Cmd, _}}, L} -> Msg = case Cmd of @@ -1018,7 +1015,6 @@ handle_aux(leader, cast, {#return{msg_ids = MsgIds, {undefined, L} -> {L, Acc} end - %% TODO: handle old formats? end, {Log0, []}, maps:with(MsgIds, Checked)), Appends = make_requeue(ConsumerId, {notify, Corr, Pid}, @@ -1151,6 +1147,9 @@ query_processes(#?MODULE{enqueuers = Enqs, consumers = Cons0}) -> query_ra_indexes(#?MODULE{ra_indexes = RaIndexes}) -> RaIndexes. +query_waiting_consumers(#?MODULE{waiting_consumers = WaitingConsumers}) -> + WaitingConsumers. + query_consumer_count(#?MODULE{consumers = Consumers, waiting_consumers = WaitingConsumers}) -> Up = maps:filter(fun(_ConsumerId, #consumer{status = Status}) -> @@ -1223,17 +1222,15 @@ query_consumers(#?MODULE{consumers = Consumers, maps:merge(FromConsumers, FromWaitingConsumers). -query_single_active_consumer(#?MODULE{cfg = #cfg{consumer_strategy = single_active}, - consumers = Consumers}) -> - case maps:size(Consumers) of - 0 -> +query_single_active_consumer( + #?MODULE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Consumers}) -> + case active_consumer(Consumers) of + undefined -> {error, no_value}; - 1 -> - {value, lists:nth(1, maps:keys(Consumers))}; - _ - -> - {error, illegal_size} - end ; + {ActiveCid, _} -> + {value, ActiveCid} + end; query_single_active_consumer(_) -> disabled. @@ -1344,13 +1341,13 @@ cancel_consumer(Meta, ConsumerId, waiting_consumers = Waiting0} = State0, Effects0, Reason) -> %% single active consumer on, consumers are waiting - case maps:is_key(ConsumerId, Cons0) of - true -> + case Cons0 of + #{ConsumerId := #consumer{status = _}} -> % The active consumer is to be removed {State1, Effects1} = cancel_consumer0(Meta, ConsumerId, State0, Effects0, Reason), activate_next_consumer(State1, Effects1); - false -> + _ -> % The cancelled consumer is not active or cancelled % Just remove it from idle_consumers Waiting = lists:keydelete(ConsumerId, 1, Waiting0), @@ -1390,37 +1387,58 @@ cancel_consumer0(Meta, ConsumerId, {S0, Effects0} end. +activate_next_consumer(#?MODULE{cfg = #cfg{consumer_strategy = competing}} = State0, + Effects0) -> + {State0, Effects0}; activate_next_consumer(#?MODULE{consumers = Cons, waiting_consumers = Waiting0} = State0, Effects0) -> - case maps:filter(fun (_, #consumer{status = S}) -> S == up end, Cons) of - Up when map_size(Up) == 0 -> - %% there are no active consumer in the consumer map + case has_active_consumer(Cons) of + false -> case lists:filter(fun ({_, #consumer{status = Status}}) -> Status == up end, Waiting0) of - [{NextConsumerId, NextConsumer} | _] -> - %% there is a potential next active consumer + [{NextConsumerId, #consumer{cfg = NextCCfg} = NextConsumer} | _] -> Remaining = lists:keydelete(NextConsumerId, 1, Waiting0), + Consumer = case maps:get(NextConsumerId, Cons, undefined) of + undefined -> + NextConsumer; + Existing -> + %% there was an exisiting non-active consumer + %% just update the existing cancelled consumer + %% with the new config + Existing#consumer{cfg = NextCCfg} + end, #?MODULE{service_queue = ServiceQueue} = State0, ServiceQueue1 = maybe_queue_consumer(NextConsumerId, - NextConsumer, + Consumer, ServiceQueue), - State = State0#?MODULE{consumers = Cons#{NextConsumerId => NextConsumer}, + State = State0#?MODULE{consumers = Cons#{NextConsumerId => Consumer}, service_queue = ServiceQueue1, waiting_consumers = Remaining}, Effects = consumer_update_active_effects(State, NextConsumerId, - NextConsumer, true, + Consumer, true, single_active, Effects0), {State, Effects}; [] -> {State0, Effects0} end; - _ -> - {State0, Effects0} + true -> + {State0, Effects0} end. +has_active_consumer(Consumers) -> + active_consumer(Consumers) /= undefined. +active_consumer({Cid, #consumer{status = up} = Consumer, _I}) -> + {Cid, Consumer}; +active_consumer({_Cid, #consumer{status = _}, I}) -> + active_consumer(maps:next(I)); +active_consumer(none) -> + undefined; +active_consumer(M) when is_map(M) -> + I = maps:iterator(M), + active_consumer(maps:next(I)). maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, #consumer{cfg = CCfg} = Consumer, S0, @@ -1652,7 +1670,8 @@ complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, update_smallest_raft_index(IncomingRaftIdx, State, Effects). cancel_consumer_effects(ConsumerId, - #?MODULE{cfg = #cfg{resource = QName}} = _State, Effects) -> + #?MODULE{cfg = #cfg{resource = QName}} = _State, + Effects) -> [{mod_call, rabbit_quorum_queue, cancel_consumer_handler, [QName, ConsumerId]} | Effects]. @@ -2019,25 +2038,14 @@ timer_effect(RaCmdTs, State, Effects) -> end, [{timer, expire_msgs, T} | Effects]. -update_or_remove_sub(_Meta, ConsumerId, - #consumer{cfg = #consumer_cfg{lifetime = auto}, - credit = 0} = Con, - #?MODULE{consumers = Cons} = State) -> - State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)}; -update_or_remove_sub(_Meta, ConsumerId, - #consumer{cfg = #consumer_cfg{lifetime = auto}} = Con, - #?MODULE{consumers = Cons, - service_queue = ServiceQueue} = State) -> - State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons), - service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}; -update_or_remove_sub(#{system_time := Ts}, - ConsumerId, +update_or_remove_sub(Meta, ConsumerId, #consumer{cfg = #consumer_cfg{lifetime = once}, checked_out = Checked, credit = 0} = Con, #?MODULE{consumers = Cons} = State) -> - case maps:size(Checked) of + case map_size(Checked) of 0 -> + #{system_time := Ts} = Meta, % we're done with this consumer State#?MODULE{consumers = maps:remove(ConsumerId, Cons), last_active = Ts}; @@ -2046,60 +2054,34 @@ update_or_remove_sub(#{system_time := Ts}, State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)} end; update_or_remove_sub(_Meta, ConsumerId, - #consumer{cfg = #consumer_cfg{lifetime = once}} = Con, + #consumer{cfg = #consumer_cfg{}} = Con, #?MODULE{consumers = Cons, service_queue = ServiceQueue} = State) -> State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons), service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}. -uniq_queue_in(Key, #consumer{cfg = #consumer_cfg{priority = P}}, Queue) -> +uniq_queue_in(Key, #consumer{credit = Credit, + status = up, + cfg = #consumer_cfg{priority = P}}, ServiceQueue) + when Credit > 0 -> % TODO: queue:member could surely be quite expensive, however the practical % number of unique consumers may not be large enough for it to matter - case priority_queue:member(Key, Queue) of + case priority_queue:member(Key, ServiceQueue) of true -> - Queue; + ServiceQueue; false -> - priority_queue:in(Key, P, Queue) - end. + priority_queue:in(Key, P, ServiceQueue) + end; +uniq_queue_in(_Key, _Consumer, ServiceQueue) -> + ServiceQueue. -update_consumer(ConsumerId, Meta, Spec, Priority, - #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State0) -> - %% general case, single active consumer off - update_consumer0(ConsumerId, Meta, Spec, Priority, State0); -update_consumer(ConsumerId, Meta, Spec, Priority, - #?MODULE{consumers = Cons0, - cfg = #cfg{consumer_strategy = single_active}} = State0) - when map_size(Cons0) == 0 orelse - is_map_key(ConsumerId, Cons0) -> - %% single active consumer on, no one is consuming yet or - %% the currently active consumer is the same - update_consumer0(ConsumerId, Meta, Spec, Priority, State0); -update_consumer({Tag, Pid} = ConsumerId, Meta, {Life, Credit, Mode}, Priority, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, - waiting_consumers = WaitingConsumers0} = State0) -> - %% single active consumer on and one active consumer already - %% adding the new consumer to the waiting list - Consumer = #consumer{cfg = #consumer_cfg{tag = Tag, - pid = Pid, - lifetime = Life, - meta = Meta, - priority = Priority, - credit_mode = Mode}, - credit = Credit}, - WaitingConsumers1 = WaitingConsumers0 ++ [{ConsumerId, Consumer}], - State0#?MODULE{waiting_consumers = WaitingConsumers1}. - -update_consumer0({Tag, Pid} = ConsumerId, Meta, {Life, Credit, Mode}, Priority, - #?MODULE{consumers = Cons0, - service_queue = ServiceQueue0} = State0) -> +update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, + {Life, Credit, Mode} = Spec, Priority, + #?MODULE{cfg = #cfg{consumer_strategy = competing}, + consumers = Cons0} = State0) -> Consumer = case Cons0 of - #{ConsumerId := #consumer{cfg = CCfg, - checked_out = Checked} = C} -> - NumChecked = map_size(Checked), - NewCredit = max(0, Credit - NumChecked), - C#consumer{cfg = CCfg#consumer_cfg{lifetime = Life}, - status = up, - credit = NewCredit}; + #{ConsumerId := #consumer{} = Consumer0} -> + merge_consumer(Consumer0, ConsumerMeta, Spec, Priority); _ -> #consumer{cfg = #consumer_cfg{tag = Tag, pid = Pid, @@ -2109,9 +2091,50 @@ update_consumer0({Tag, Pid} = ConsumerId, Meta, {Life, Credit, Mode}, Priority, credit_mode = Mode}, credit = Credit} end, - ServiceQueue = maybe_queue_consumer(ConsumerId, Consumer, ServiceQueue0), - State0#?MODULE{consumers = Cons0#{ConsumerId => Consumer}, - service_queue = ServiceQueue}. + update_or_remove_sub(Meta, ConsumerId, Consumer, State0); +update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, + {Life, Credit, Mode} = Spec, Priority, + #?MODULE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Cons0, + waiting_consumers = Waiting, + service_queue = _ServiceQueue0} = State0) -> + %% if it is the current active consumer, just update + %% if it is a cancelled active consumer, add to waiting unless it is the only + %% one, then merge + case active_consumer(Cons0) of + {ConsumerId, #consumer{status = up} = Consumer0} -> + Consumer = merge_consumer(Consumer0, ConsumerMeta, Spec, Priority), + update_or_remove_sub(Meta, ConsumerId, Consumer, State0); + undefined when is_map_key(ConsumerId, Cons0) -> + %% there is no active consumer and the current consumer is in the + %% consumers map and thus must be cancelled, in this case we can just + %% merge and effectively make this the current active one + Consumer0 = maps:get(ConsumerId, Cons0), + Consumer = merge_consumer(Consumer0, ConsumerMeta, Spec, Priority), + update_or_remove_sub(Meta, ConsumerId, Consumer, State0); + _ -> + %% add as a new waiting consumer + Consumer = #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + lifetime = Life, + meta = ConsumerMeta, + priority = Priority, + credit_mode = Mode}, + credit = Credit}, + + State0#?MODULE{waiting_consumers = Waiting ++ [{ConsumerId, Consumer}]} + end. + +merge_consumer(#consumer{cfg = CCfg, checked_out = Checked} = Consumer, + ConsumerMeta, {Life, Credit, Mode}, Priority) -> + NumChecked = map_size(Checked), + NewCredit = max(0, Credit - NumChecked), + Consumer#consumer{cfg = CCfg#consumer_cfg{priority = Priority, + meta = ConsumerMeta, + credit_mode = Mode, + lifetime = Life}, + status = up, + credit = NewCredit}. maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, ServiceQueue0) -> diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 9e43dd76f582..8cc5c74841dd 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -116,7 +116,7 @@ -record(consumer, {cfg = #consumer_cfg{}, - status = up :: up | suspected_down | cancelled, + status = up :: up | suspected_down | cancelled | waiting, next_msg_id = 0 :: msg_id(), % part of snapshot data checked_out = #{} :: #{msg_id() => indexed_msg()}, %% max number of messages that can be sent @@ -125,7 +125,6 @@ %% total number of checked out messages - ever %% incremented for each delivery delivery_count = 0 :: non_neg_integer() - }). -type consumer() :: #consumer{}. diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 79f273ef23fa..6ab1aa4159a8 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -713,6 +713,63 @@ single_active_consumer_basic_get_test(_) -> State1), ok. +single_active_consumer_revive_test(_) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), + single_active_consumer_on => true}), + Cid1 = {<<"one">>, self()}, + Cid2 = {<<"two">>, self()}, + {S1, _} = check_auto(Cid1, 1, S0), + {S2, _} = check_auto(Cid2, 2, S1), + {S3, _} = enq(3, 1, first, S2), + %% cancel the active consumer whilst it has a message pending + {S4, _, _} = rabbit_fifo:apply(meta(4), make_checkout(Cid1, cancel, #{}), S3), + {S5, _} = check_auto(Cid1, 5, S4), + + ct:pal("S5 ~p", [S5]), + ?assertEqual(1, rabbit_fifo:query_messages_checked_out(S5)), + ?assertEqual(1, rabbit_fifo:query_messages_total(S5)), + Consumers = S5#rabbit_fifo.consumers, + ?assertEqual(2, map_size(Consumers)), + Up = maps:filter(fun (_, #consumer{status = Status}) -> + Status == up + end, Consumers), + ?assertEqual(1, map_size(Up)), + + %% settle message and ensure it is handled correctly + {S6, _} = settle(Cid1, 6, 0, S5), + ?assertEqual(0, rabbit_fifo:query_messages_checked_out(S6)), + ?assertEqual(0, rabbit_fifo:query_messages_total(S6)), + + %% requeue message and check that is handled + {S6b, _} = return(Cid1, 6, 0, S5), + ?assertEqual(1, rabbit_fifo:query_messages_checked_out(S6b)), + ?assertEqual(1, rabbit_fifo:query_messages_total(S6b)), + %% + %% TOOD: test this but without the fallback consumer + %% + %% + %% + %% MULTI checkout should not result in multiple waiting + ok. + +single_active_consumer_revive_2_test(_) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), + single_active_consumer_on => true}), + Cid1 = {<<"one">>, self()}, + {S1, _} = check_auto(Cid1, 1, S0), + {S2, _} = enq(3, 1, first, S1), + %% cancel the active consumer whilst it has a message pending + {S3, _, _} = rabbit_fifo:apply(meta(4), make_checkout(Cid1, cancel, #{}), S2), + {S4, _} = check_auto(Cid1, 5, S3), + ?assertEqual(1, rabbit_fifo:query_consumer_count(S4)), + ?assertEqual(0, length(rabbit_fifo:query_waiting_consumers(S4))), + ?assertEqual(1, rabbit_fifo:query_messages_total(S4)), + ?assertEqual(1, rabbit_fifo:query_messages_checked_out(S4)), + + ok. + single_active_consumer_test(_) -> State0 = init(#{name => ?FUNCTION_NAME, queue_resource => rabbit_misc:r("/", queue, @@ -742,10 +799,10 @@ single_active_consumer_test(_) -> % the first registered consumer is the active one, the others are waiting ?assertEqual(1, map_size(State1#rabbit_fifo.consumers)), ?assertMatch(#{C1 := _}, State1#rabbit_fifo.consumers), - ?assertEqual(3, length(State1#rabbit_fifo.waiting_consumers)), - ?assertNotEqual(false, lists:keyfind(C2, 1, State1#rabbit_fifo.waiting_consumers)), - ?assertNotEqual(false, lists:keyfind(C3, 1, State1#rabbit_fifo.waiting_consumers)), - ?assertNotEqual(false, lists:keyfind(C4, 1, State1#rabbit_fifo.waiting_consumers)), + ?assertEqual(3, length(rabbit_fifo:query_waiting_consumers(State1))), + ?assertNotEqual(false, lists:keyfind(C2, 1, rabbit_fifo:query_waiting_consumers(State1))), + ?assertNotEqual(false, lists:keyfind(C3, 1, rabbit_fifo:query_waiting_consumers(State1))), + ?assertNotEqual(false, lists:keyfind(C4, 1, rabbit_fifo:query_waiting_consumers(State1))), % cancelling a waiting consumer {State2, _, Effects1} = apply(meta(2), @@ -755,9 +812,9 @@ single_active_consumer_test(_) -> ?assertEqual(1, map_size(State2#rabbit_fifo.consumers)), ?assertMatch(#{C1 := _}, State2#rabbit_fifo.consumers), % the cancelled consumer has been removed from waiting consumers - ?assertEqual(2, length(State2#rabbit_fifo.waiting_consumers)), - ?assertNotEqual(false, lists:keyfind(C2, 1, State2#rabbit_fifo.waiting_consumers)), - ?assertNotEqual(false, lists:keyfind(C4, 1, State2#rabbit_fifo.waiting_consumers)), + ?assertEqual(2, length(rabbit_fifo:query_waiting_consumers(State2))), + ?assertNotEqual(false, lists:keyfind(C2, 1, rabbit_fifo:query_waiting_consumers(State2))), + ?assertNotEqual(false, lists:keyfind(C4, 1, rabbit_fifo:query_waiting_consumers(State2))), % there are some effects to unregister the consumer ?ASSERT_EFF({mod_call, rabbit_quorum_queue, cancel_consumer_handler, [_, C]}, C == C3, Effects1), @@ -770,9 +827,9 @@ single_active_consumer_test(_) -> ?assertEqual(1, map_size(State3#rabbit_fifo.consumers)), ?assertMatch(#{C2 := _}, State3#rabbit_fifo.consumers), % the new active consumer is no longer in the waiting list - ?assertEqual(1, length(State3#rabbit_fifo.waiting_consumers)), + ?assertEqual(1, length(rabbit_fifo:query_waiting_consumers(State3))), ?assertNotEqual(false, lists:keyfind(C4, 1, - State3#rabbit_fifo.waiting_consumers)), + rabbit_fifo:query_waiting_consumers(State3))), %% should have a cancel consumer handler mod_call effect and %% an active new consumer effect ?ASSERT_EFF({mod_call, rabbit_quorum_queue, @@ -788,7 +845,7 @@ single_active_consumer_test(_) -> ?assertEqual(1, map_size(State4#rabbit_fifo.consumers)), ?assertMatch(#{C4 := _}, State4#rabbit_fifo.consumers), % the waiting consumer list is now empty - ?assertEqual(0, length(State4#rabbit_fifo.waiting_consumers)), + ?assertEqual(0, length(rabbit_fifo:query_waiting_consumers(State4))), % there are some effects to unregister the consumer and % to update the new active one (metrics) ?ASSERT_EFF({mod_call, rabbit_quorum_queue, @@ -803,7 +860,7 @@ single_active_consumer_test(_) -> % no active consumer anymore ?assertEqual(0, map_size(State5#rabbit_fifo.consumers)), % still nothing in the waiting list - ?assertEqual(0, length(State5#rabbit_fifo.waiting_consumers)), + ?assertEqual(0, length(rabbit_fifo:query_waiting_consumers(State5))), % there is an effect to unregister the consumer + queue inactive effect ?ASSERT_EFF({mod_call, rabbit_quorum_queue, cancel_consumer_handler, _}, Effects4), @@ -840,7 +897,7 @@ single_active_consumer_cancel_consumer_when_channel_is_down_test(_) -> % fell back to another consumer ?assertEqual(1, map_size(State2#rabbit_fifo.consumers)), % there are still waiting consumers - ?assertEqual(2, length(State2#rabbit_fifo.waiting_consumers)), + ?assertEqual(2, length(rabbit_fifo:query_waiting_consumers(State2))), % effects to unregister the consumer and % to update the new active one (metrics) are there ?ASSERT_EFF({mod_call, rabbit_quorum_queue, @@ -853,7 +910,7 @@ single_active_consumer_cancel_consumer_when_channel_is_down_test(_) -> % fell back to another consumer ?assertEqual(1, map_size(State3#rabbit_fifo.consumers)), % no more waiting consumer - ?assertEqual(0, length(State3#rabbit_fifo.waiting_consumers)), + ?assertEqual(0, length(rabbit_fifo:query_waiting_consumers(State3))), % effects to cancel both consumers of this channel + effect to update the new active one (metrics) ?ASSERT_EFF({mod_call, rabbit_quorum_queue, cancel_consumer_handler, [_, C]}, C == C2, Effects2), @@ -866,7 +923,7 @@ single_active_consumer_cancel_consumer_when_channel_is_down_test(_) -> {State4, _, Effects3} = apply(meta(4), {down, Pid3, doesnotmatter}, State3), % no more consumers ?assertEqual(0, map_size(State4#rabbit_fifo.consumers)), - ?assertEqual(0, length(State4#rabbit_fifo.waiting_consumers)), + ?assertEqual(0, length(rabbit_fifo:query_waiting_consumers(State4))), % there is an effect to unregister the consumer + queue inactive effect ?ASSERT_EFF({mod_call, rabbit_quorum_queue, cancel_consumer_handler, [_, C]}, C == C4, Effects3), @@ -904,7 +961,7 @@ single_active_returns_messages_on_noconnection_test(_) -> ?assertMatch([_], lqueue:to_list(State3#rabbit_fifo.returns)), ?assertMatch([{_, #consumer{checked_out = Checked}}] when map_size(Checked) == 0, - State3#rabbit_fifo.waiting_consumers), + rabbit_fifo:query_waiting_consumers(State3)), ok. @@ -950,8 +1007,8 @@ single_active_consumer_replaces_consumer_when_down_noconnection_test(_) -> %% the disconnected consumer has been returned to waiting ?assert(lists:any(fun ({C,_}) -> C =:= C1 end, - State2#rabbit_fifo.waiting_consumers)), - ?assertEqual(2, length(State2#rabbit_fifo.waiting_consumers)), + rabbit_fifo:query_waiting_consumers(State2))), + ?assertEqual(2, length(rabbit_fifo:query_waiting_consumers(State2))), % simulate node comes back up {State3, _, _} = apply(meta(2), {nodeup, node(DownPid)}, State2), @@ -960,10 +1017,10 @@ single_active_consumer_replaces_consumer_when_down_noconnection_test(_) -> ?assertMatch([{C2, #consumer{status = up}}], maps:to_list(State3#rabbit_fifo.consumers)), % the waiting consumers should be un-suspected - ?assertEqual(2, length(State3#rabbit_fifo.waiting_consumers)), + ?assertEqual(2, length(rabbit_fifo:query_waiting_consumers(State3))), lists:foreach(fun({_, #consumer{status = Status}}) -> ?assert(Status /= suspected_down) - end, State3#rabbit_fifo.waiting_consumers), + end, rabbit_fifo:query_waiting_consumers(State3)), ok. single_active_consumer_all_disconnected_test(_) -> @@ -1250,7 +1307,7 @@ single_active_cancelled_with_unacked_test(_) -> cfg = #consumer_cfg{lifetime = once}, checked_out = #{0 := _}}}, State4#rabbit_fifo.consumers), - ?assertMatch([], State4#rabbit_fifo.waiting_consumers), + ?assertMatch([], rabbit_fifo:query_waiting_consumers(State4)), %% Ack both messages {State5, _Effects5} = settle(C1, 1, 0, State4), @@ -1263,7 +1320,7 @@ single_active_cancelled_with_unacked_test(_) -> %% C1 should be gone ?assertNotMatch(#{C1 := _}, State6#rabbit_fifo.consumers), - ?assertMatch([], State6#rabbit_fifo.waiting_consumers), + ?assertMatch([], rabbit_fifo:query_waiting_consumers(State6)), ok. single_active_with_credited_test(_) -> @@ -1296,7 +1353,7 @@ single_active_with_credited_test(_) -> ?assertMatch(#{C1 := #consumer{credit = 5}}, State3#rabbit_fifo.consumers), ?assertMatch([{C2, #consumer{credit = 4}}], - State3#rabbit_fifo.waiting_consumers), + rabbit_fifo:query_waiting_consumers(State3)), ok. @@ -1460,7 +1517,7 @@ meta(Idx, Timestamp) -> enq(Idx, MsgSeq, Msg, State) -> strip_reply( - apply(meta(Idx), rabbit_fifo:make_enqueue(self(), MsgSeq, Msg), State)). + rabbit_fifo:apply(meta(Idx), rabbit_fifo:make_enqueue(self(), MsgSeq, Msg), State)). deq(Idx, Cid, Settlement, Msg, State0) -> {State, _, Effs} = @@ -1500,6 +1557,9 @@ check(Cid, Idx, Num, State) -> settle(Cid, Idx, MsgId, State) -> strip_reply(apply(meta(Idx), rabbit_fifo:make_settle(Cid, [MsgId]), State)). +return(Cid, Idx, MsgId, State) -> + strip_reply(apply(meta(Idx), rabbit_fifo:make_return(Cid, [MsgId]), State)). + credit(Cid, Idx, Credit, DelCnt, Drain, State) -> strip_reply(apply(meta(Idx), rabbit_fifo:make_credit(Cid, Credit, DelCnt, Drain), State)). From e7280e49369422630682031de8f7af896f120911 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 10 Feb 2022 12:50:37 +0100 Subject: [PATCH 68/97] Fix rebase onto origin/master lqueue:foldl was renamed to lqueue:fold --- deps/rabbit/src/rabbit_fifo.erl | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 1b0d0b935f93..e5192e3e1d31 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -684,13 +684,12 @@ convert_v1_to_v2(V1State0) -> V2PrefReturns = lists:foldl(fun(Hdr, Acc) -> lqueue:in(convert_msg(Hdr), Acc) end, lqueue:new(), PrefReturns), - MessagesV2 = lqueue:foldl(fun ({_, IdxMsg}, Acc) -> - lqueue:in(convert_msg(IdxMsg), Acc) - end, V2PrefMsgs, MessagesV1), - ReturnsV2 = lqueue:foldl(fun ({_SeqId, Msg}, Acc) -> - lqueue:in(convert_msg(Msg), Acc) - end, V2PrefReturns, ReturnsV1), - + MessagesV2 = lqueue:fold(fun ({_, IdxMsg}, Acc) -> + lqueue:in(convert_msg(IdxMsg), Acc) + end, V2PrefMsgs, MessagesV1), + ReturnsV2 = lqueue:fold(fun ({_SeqId, Msg}, Acc) -> + lqueue:in(convert_msg(Msg), Acc) + end, V2PrefReturns, ReturnsV1), ConsumersV2 = maps:map( fun (ConsumerId, CV1) -> convert_consumer(ConsumerId, CV1) @@ -699,8 +698,6 @@ convert_v1_to_v2(V1State0) -> fun ({ConsumerId, CV1}) -> {ConsumerId, convert_consumer(ConsumerId, CV1)} end, WaitingConsumersV1), - - EnqueuersV1 = rabbit_fifo_v1:get_field(enqueuers, V1State), EnqueuersV2 = maps:map(fun (_EnqPid, Enq) -> Enq#enqueuer{unused = undefined} From 2361e299b36edf02c8ae27657b8a7b108d0e9c3c Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 10 Feb 2022 12:59:13 +0100 Subject: [PATCH 69/97] Replace lqueue:peek/1 with new lqueue:get/2 to allocate less garbage --- deps/rabbit/src/rabbit_fifo.erl | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index e5192e3e1d31..b60989fba2f6 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -1886,13 +1886,13 @@ take_next_msg(#?MODULE{returns = Returns0, end end. -peek_next_msg(#?MODULE{returns = Returns0, +get_next_msg(#?MODULE{returns = Returns0, messages = Messages0}) -> - case lqueue:peek(Returns0) of - {value, _} = Msg -> - Msg; + case lqueue:get(Returns0, empty) of empty -> - lqueue:peek(Messages0) + lqueue:get(Messages0, empty); + Msg -> + Msg end. delivery_effect({CTag, CPid}, [{Idx, {MsgId, Header}}], @@ -1993,10 +1993,10 @@ checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> %% dequeue all expired messages expire_msgs(RaCmdTs, Result, State, Effects) -> %% In the normal case, there are no expired messages. - %% Therefore, first queue:peek/1 to check whether we need to queue:out/1 + %% Therefore, first lqueue:get/2 to check whether we need to lqueue:out/1 %% because the latter can be much slower than the former. - case peek_next_msg(State) of - {value, ?INDEX_MSG(_Idx, ?DISK_MSG(#{expiry := Expiry} = Header))} + case get_next_msg(State) of + ?INDEX_MSG(_Idx, ?DISK_MSG(#{expiry := Expiry} = Header)) when RaCmdTs >= Expiry -> expire(RaCmdTs, Header, State, Effects); _ -> @@ -2022,8 +2022,8 @@ expire(RaCmdTs, Header, State0, Effects) -> expire_msgs(RaCmdTs, true, State, DlxEffects ++ Effects). timer_effect(RaCmdTs, State, Effects) -> - T = case peek_next_msg(State) of - {value, ?INDEX_MSG(_, ?DISK_MSG(#{expiry := Expiry}))} + T = case get_next_msg(State) of + ?INDEX_MSG(_, ?DISK_MSG(#{expiry := Expiry})) when is_number(Expiry) -> %% Next message contains 'expiry' header. %% (Re)set timer so that mesage will be dropped or dead-lettered on time. @@ -2363,8 +2363,8 @@ smallest_raft_index(#?MODULE{messages = Messages, ra_indexes = Indexes, dlx = DlxState}) -> SmallestDlxRaIdx = rabbit_fifo_dlx:smallest_raft_index(DlxState), - SmallestMsgsRaIdx = case lqueue:peek(Messages) of - {value, ?INDEX_MSG(I, _)} -> + SmallestMsgsRaIdx = case lqueue:get(Messages, empty) of + ?INDEX_MSG(I, _) -> I; _ -> undefined From a3eaaa11c0d1dd6edfa46023994e4e226066b8fe Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Thu, 10 Feb 2022 12:17:02 +0000 Subject: [PATCH 70/97] rabbit_fifo: test fixes --- deps/rabbit/src/rabbit_amqqueue.erl | 3 ++- deps/rabbit/src/rabbit_fifo.erl | 2 +- deps/rabbit/src/rabbit_quorum_queue.erl | 5 +---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/deps/rabbit/src/rabbit_amqqueue.erl b/deps/rabbit/src/rabbit_amqqueue.erl index ec73770faaa7..39013365ec4d 100644 --- a/deps/rabbit/src/rabbit_amqqueue.erl +++ b/deps/rabbit/src/rabbit_amqqueue.erl @@ -1520,7 +1520,8 @@ get_queue_consumer_info(Q, ConsumerInfoKeys) -> [lists:zip(ConsumerInfoKeys, [amqqueue:get_name(Q), ChPid, CTag, AckRequired, Prefetch, Active, ActivityStatus, Args]) || - {ChPid, CTag, AckRequired, Prefetch, Active, ActivityStatus, Args, _} <- consumers(Q)]. + {ChPid, CTag, AckRequired, Prefetch, Active, ActivityStatus, Args, _} + <- consumers(Q)]. -spec stat(amqqueue:amqqueue()) -> {'ok', non_neg_integer(), non_neg_integer()}. diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index b60989fba2f6..8b154def1c6d 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -2083,7 +2083,7 @@ update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, #consumer{cfg = #consumer_cfg{tag = Tag, pid = Pid, lifetime = Life, - meta = Meta, + meta = ConsumerMeta, priority = Priority, credit_mode = Mode}, credit = Credit} diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index e38542ca1ea9..45dc26a565f0 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -415,10 +415,7 @@ spawn_deleter(QName) -> spawn_notify_decorators(QName, Fun, Args) -> %% run in ra process for now - notify_decorators(QName, Fun, Args). - % spawn(fun () -> - % notify_decorators(QName, Fun, Args) - % end). + catch notify_decorators(QName, Fun, Args). handle_tick(QName, {Name, MR, MU, M, C, MsgBytesReady, MsgBytesUnack, MsgBytesDiscard}, From d50dd90022e42cdac48780f9f46a413bb2f3dd66 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 10 Feb 2022 13:29:05 +0100 Subject: [PATCH 71/97] Fix dialyzer warnings by removing support for in-memory messages. Delete some commented code. --- deps/rabbit/src/rabbit_fifo.erl | 6 --- deps/rabbit/src/rabbit_fifo_dlx.erl | 62 +++++++---------------------- 2 files changed, 15 insertions(+), 53 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 8b154def1c6d..4b91c05a3761 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -55,7 +55,6 @@ %% misc dehydrate_state/1, - dehydrate_message/1, normalize/1, get_msg_header/1, get_header/2, @@ -2155,11 +2154,6 @@ dehydrate_state(#?MODULE{cfg = #cfg{}, msg_cache = undefined, dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. -dehydrate_message(?INDEX_MSG(_Idx, ?DISK_MSG(_Header) = Msg)) -> - %% Use disk msgs directly as prefix messages. - %% This avoids memory allocation since we do not convert. - Msg. - %% make the state suitable for equality comparison normalize(#?MODULE{ra_indexes = _Indexes, returns = Returns, diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index dada6ddb0860..5a9c5224b172 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -90,11 +90,11 @@ apply(_Meta, {dlx, #settle{msg_ids = MsgIds}}, at_least_once, #?MODULE{consumer = #dlx_consumer{checked_out = Checked} = C, msg_bytes_checkout = BytesCheckout, ra_indexes = Indexes0} = S) -> - Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - S#?MODULE{consumer = C#dlx_consumer{checked_out = - maps:remove(MsgId, Checked)}, - msg_bytes_checkout = BytesCheckout - size_in_bytes(Msg), - ra_indexes = Indexes} + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + S#?MODULE{consumer = C#dlx_consumer{checked_out = + maps:remove(MsgId, Checked)}, + msg_bytes_checkout = BytesCheckout - size_in_bytes(Msg), + ra_indexes = Indexes} end, State0, Acked), {State, [{mod_call, rabbit_global_counters, messages_dead_lettered_confirmed, [rabbit_quorum_queue, at_least_once, maps:size(Acked)]}]}; @@ -179,20 +179,14 @@ discard(Msgs, Reason, at_least_once, State0) -spec checkout(dead_letter_handler(), state()) -> {state(), ra_machine:effects()}. checkout(at_least_once, #?MODULE{consumer = #dlx_consumer{}} = State) -> - checkout0(checkout_one(State), {[],[]}); + checkout0(checkout_one(State), []); checkout(_, State) -> {State, []}. -checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?DISK_MSG(Header))}, State}, {InMemMsgs, LogMsgs}) +checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?DISK_MSG(Header))}, State}, SendAcc) when is_integer(Idx) -> DelMsg = {Idx, {Reason, MsgId, Header}}, - SendAcc = {InMemMsgs, [DelMsg|LogMsgs]}, - checkout0(checkout_one(State), SendAcc); -% checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?MSG(Header, Msg))}, State}, {InMemMsgs, LogMsgs}) -% when is_integer(Idx) -> -% DelMsg = {MsgId, {Reason, Header, Msg}}, -% SendAcc = {[DelMsg|InMemMsgs], LogMsgs}, -% checkout0(checkout_one(State), SendAcc); + checkout0(checkout_one(State), [DelMsg | SendAcc]); checkout0({success, _MsgId, {_Reason, ?TUPLE(_, _)}, State}, SendAcc) -> %% This is a prefix message which means we are recovering from a snapshot. %% We know: @@ -232,24 +226,16 @@ size_in_bytes(?INDEX_MSG(_Idx, ?DISK_MSG(Header))) -> rabbit_fifo:get_header(size, Header). %% returns at most one delivery effect because there is only one consumer -delivery_effects(_CPid, {[], []}) -> +delivery_effects(_CPid, []) -> []; -delivery_effects(CPid, {InMemMsgs, []}) -> - [{send_msg, CPid, {dlx_delivery, lists:reverse(InMemMsgs)}, [ra_event]}]; -delivery_effects(CPid, {InMemMsgs, IdxMsgs0}) -> +delivery_effects(CPid, IdxMsgs0) -> IdxMsgs = lists:reverse(IdxMsgs0), {RaftIdxs, Data} = lists:unzip(IdxMsgs), [{log, RaftIdxs, fun(Log) -> - Msgs0 = lists:zipwith(fun ({enqueue, _, _, Msg}, {Reason, MsgId, Header}) -> - {MsgId, {Reason, Header, Msg}} - end, Log, Data), - Msgs = case InMemMsgs of - [] -> - Msgs0; - _ -> - lists:sort(InMemMsgs ++ Msgs0) - end, + Msgs = lists:zipwith(fun ({enqueue, _, _, Msg}, {Reason, MsgId, Header}) -> + {MsgId, {Reason, Header, Msg}} + end, Log, Data), [{send_msg, CPid, {dlx_delivery, Msgs}, [ra_event]}] end}]. @@ -384,26 +370,8 @@ purge(#?MODULE{consumer = Consumer0} = State) -> -spec dehydrate(state()) -> state(). -dehydrate(#?MODULE{discards = _Discards, - consumer = _Con} = State) -> - State#?MODULE{%%discards = dehydrate_messages(Discards), - %%consumer = dehydrate_consumer(Con), - ra_indexes = rabbit_fifo_index:empty()}. - -% dehydrate_messages(Discards) -> -% L0 = lqueue:to_list(Discards), -% L1 = lists:map(fun({_Reason, Msg}) -> -% {?NIL, rabbit_fifo:dehydrate_message(Msg)} -% end, L0), -% lqueue:from_list(L1). - -% dehydrate_consumer(#dlx_consumer{checked_out = Checked0} = Con) -> -% Checked = maps:map(fun (_, {_, Msg}) -> -% {?NIL, rabbit_fifo:dehydrate_message(Msg)} -% end, Checked0), -% Con#dlx_consumer{checked_out = Checked}; -% dehydrate_consumer(undefined) -> -% undefined. +dehydrate(State) -> + State#?MODULE{ra_indexes = rabbit_fifo_index:empty()}. -spec normalize(state()) -> state(). From 1e812f99825ed22e835c3610039a2708450794ce Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 10 Feb 2022 16:25:21 +0100 Subject: [PATCH 72/97] Enable required feature flags for mixed version cluster tests --- deps/rabbit/test/dead_lettering_SUITE.erl | 8 +++++++- deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl | 9 +++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl index 63422ab0a056..59d4c09c8588 100644 --- a/deps/rabbit/test/dead_lettering_SUITE.erl +++ b/deps/rabbit/test/dead_lettering_SUITE.erl @@ -146,7 +146,13 @@ init_per_group(at_least_once, Config) -> 1, QueueArgs1, {<<"x-overflow">>, longstr, <<"reject-publish">>}), - rabbit_ct_helpers:set_config(Config, {queue_args, QueueArgs}); + Config1 = rabbit_ct_helpers:set_config(Config, {queue_args, QueueArgs}), + case rabbit_ct_broker_helpers:enable_feature_flag(Config1, stream_queue) of + ok -> + Config1; + Skip -> + Skip + end; _ -> Config end; diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index f2d93c7e1a64..9b69aaca5d13 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -78,8 +78,13 @@ init_per_group(Group, Config, NodesCount) -> ok = rabbit_ct_broker_helpers:rpc( Config2, 0, application, set_env, [rabbit, channel_tick_interval, 100]), - timer:sleep(1000), - Config2. + case rabbit_ct_broker_helpers:enable_feature_flag(Config2, quorum_queue) of + ok -> case rabbit_ct_broker_helpers:enable_feature_flag(Config2, stream_queue) of + ok -> Config2; + Skip -> Skip + end; + Skip -> Skip + end. end_per_group(_, Config) -> rabbit_ct_helpers:run_steps(Config, From b934a42df3a132fa6861470692dca81392b4e86f Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 14 Feb 2022 09:30:04 +0100 Subject: [PATCH 73/97] Fall back to at-most-once if feature flag stream_queue is disabled because dead-letter-strategy at-least-once requires the rabbit_queue_type refactor that comes with the stream_queue feature flag. It is reasonable to assume that feature flag stream_queue introduced in 3.9 will be enabled in 3.10. --- deps/rabbit/src/rabbit_quorum_queue.erl | 14 ++++++++++++-- .../test/rabbit_fifo_dlx_integration_SUITE.erl | 9 +++++---- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index 45dc26a565f0..89816372337b 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -1283,8 +1283,18 @@ dlh(undefined, _, Strategy, _, QName) -> "because dead-letter-exchange is not configured.", [rabbit_misc:rs(QName), Strategy]), undefined; -dlh(_, _, <<"at-least-once">>, reject_publish, _) -> - at_least_once; +dlh(Exchange, RoutingKey, <<"at-least-once">>, reject_publish, QName) -> + %% Feature flag stream_queue includes the rabbit_queue_type refactor + %% which is required by rabbit_fifo_dlx_worker. + case rabbit_feature_flags:is_enabled(stream_queue) of + true -> + at_least_once; + false -> + rabbit_log:warning("Falling back to dead-letter-strategy at-most-once for ~s " + "because feature flag stream_queue is disabled.", + [rabbit_misc:rs(QName)]), + dlh_at_most_once(Exchange, RoutingKey, QName) + end; dlh(Exchange, RoutingKey, <<"at-least-once">>, drop_head, QName) -> rabbit_log:warning("Falling back to dead-letter-strategy at-most-once for ~s " "because configured dead-letter-strategy at-least-once is incompatible with " diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 9b69aaca5d13..1aed1a8b17ca 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -596,16 +596,17 @@ reject_publish_target_quorum_queue(Config) -> {<<"x-dead-letter-routing-key">>, longstr, TargetQ}, {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-message-ttl">>, long, 1} + {<<"x-queue-type">>, longstr, <<"quorum">>} ]), declare_queue(Ch, TargetQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, {<<"x-overflow">>, longstr, <<"reject-publish">>}, {<<"x-max-length">>, long, 1} ]), Msg = <<"m">>, - [ok,ok,ok,ok] = [amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, #amqp_msg{payload = Msg}) - || _N <- lists:seq(1,4)], + [ok,ok,ok,ok] = [amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = integer_to_binary(N)}, + payload = Msg}) + || N <- lists:seq(1,4)], %% Quorum queues reject publishes once the limit is already exceeded. %% Therefore, although max-length of target queue is configured to be 1, %% it will contain 2 messages before rejecting publishes. From 9230f62e2992fc9e6cf71b03b5ab8a278234d477 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Wed, 16 Feb 2022 11:51:48 +0100 Subject: [PATCH 74/97] Delete from unsettled target queues when target queue is deleted Before this commit, messages were stuck in 'pendings' when a target quorum queue got deleted and thereafter recreated with the same name. The dlx worker relies on the rabbit_fifo_client to redeliver messages once the dlx worker has routed to a target quroum queue. Therefore, we need to delete a target quorum queue from the list of unsettled target queues when it is deleted. This is a O(N) operation. However, this should be fine, given that we do not expect target queues to be deleted often in combintation with massive fanouts for dead-lettered messages. (The channel does something similar by removing sequence numbers from rabbit_confirms when a target queue is deleted.) Furthermore, if the dead-letter exchange is the default exchange, rabbit_router:route/2 will always route to a target queue even if it does not exist. Therefore, we also check whether the target queues the dlx worker routes to exist before storing them in the 'unsettled' list of queues. --- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 79 ++++++++++++------- .../rabbit_fifo_dlx_integration_SUITE.erl | 44 +++++++++++ deps/rabbit/test/rabbit_fifo_int_SUITE.erl | 2 +- 3 files changed, 94 insertions(+), 31 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index fe0b9158b2ec..17a892bcedec 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -141,11 +141,7 @@ handle_cast({queue_event, QRef, Evt}, State = handle_queue_actions(Actions, State1), {noreply, State}; eol -> - %% Do not confirm pending messages whose target queue got deleted. - %% Irrespective of exchanges, queues, bindings created or deleted (actual state), - %% we respect the configured dead-letter routing topology (desired state). - QTypeState = rabbit_queue_type:remove(QRef, QTypeState0), - {noreply, State0#state{queue_type_state = QTypeState}}; + remove_queue(QRef, State0); {protocol_error, _Type, _Reason, _Args} -> {noreply, State0} end; @@ -173,15 +169,13 @@ handle_info({'DOWN', Ref, process, _, _}, handle_info({'DOWN', _MRef, process, QPid, Reason}, #state{queue_type_state = QTypeState0} = State0) -> %% received from target classic queue - State = case rabbit_queue_type:handle_down(QPid, Reason, QTypeState0) of - {ok, QTypeState, Actions} -> - State1 = State0#state{queue_type_state = QTypeState}, - handle_queue_actions(Actions, State1); - {eol, QTypeState1, QRef} -> - QTypeState = rabbit_queue_type:remove(QRef, QTypeState1), - State0#state{queue_type_state = QTypeState} - end, - {noreply, State}; + case rabbit_queue_type:handle_down(QPid, Reason, QTypeState0) of + {ok, QTypeState, Actions} -> + State = State0#state{queue_type_state = QTypeState}, + {noreply, handle_queue_actions(Actions, State)}; + {eol, QTypeState, QRef} -> + remove_queue(QRef, State0#state{queue_type_state = QTypeState}) + end; handle_info(Info, State) -> rabbit_log:info("~s received unhandled info ~p", [?MODULE, Info]), {noreply, State}. @@ -189,6 +183,33 @@ handle_info(Info, State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. +remove_queue(QRef, #state{pendings = Pendings0, + queue_type_state = QTypeState0} = State) -> + Pendings = maps:map(fun(_Seq, #pending{unsettled = Unsettled} = Pending) -> + Pending#pending{unsettled = lists:delete(QRef, Unsettled)} + end, Pendings0), + QTypeState = rabbit_queue_type:remove(QRef, QTypeState0), + %% Wait for max 1s (we don't want to block the gen_server process for a longer time) + %% until target queue is deleted from ETS table to prevent subsequently consumed message(s) + %% from being routed to a deleted target queue. (If that happens for a target quorum or + %% stream queue and that queue gets re-created with the same name, these messages will + %% be stuck in our 'pendings' state.) + wait_for_queue_deleted(QRef, 20), + {noreply, State#state{pendings = Pendings, + queue_type_state = QTypeState}}. + +wait_for_queue_deleted(QRef, 0) -> + rabbit_log:debug("Received deletion event for ~s but queue still exists in ETS table.", + [rabbit_misc:rs(QRef)]); +wait_for_queue_deleted(QRef, N) -> + case rabbit_amqqueue:lookup(QRef) of + {error, not_found} -> + ok; + _ -> + timer:sleep(50), + wait_for_queue_deleted(QRef, N-1) + end. + -spec lookup_topology(state()) -> state(). lookup_topology(#state{queue_ref = {resource, Vhost, queue, _} = QRef} = State) -> {ok, Q} = rabbit_amqqueue:lookup(QRef), @@ -274,8 +295,9 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, {[], State0}; _ -> RouteToQs0 = rabbit_exchange:route(DLX, Delivery), - {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, Msg, RouteToQs0), + {RouteToQs1, Cycles} = rabbit_dead_letter:detect_cycles(Reason, Msg, RouteToQs0), State1 = log_cycles(Cycles, RKeys, State0), + RouteToQs = rabbit_amqqueue:lookup(RouteToQs1), State2 = case RouteToQs of [] -> log_no_route_once(State1); @@ -300,7 +322,7 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, _ -> Pend = Pend0#pending{publish_count = 1, last_published_at = Now, - unsettled = TargetQs}, + unsettled = lists:map(fun amqqueue:get_name/1, TargetQs)}, State = State3#state{next_out_seq = OutSeq + 1, pendings = maps:put(OutSeq, Pend, Pendings)}, deliver_to_queues(Delivery, TargetQs, State) @@ -308,18 +330,12 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, -spec deliver_to_queues(rabbit_types:delivery(), [rabbit_amqqueue:name()], state()) -> state(). -deliver_to_queues(Delivery, RouteToQNames, #state{queue_type_state = QTypeState0} = State0) -> - Qs = rabbit_amqqueue:lookup(RouteToQNames), +deliver_to_queues(Delivery, Qs, #state{queue_type_state = QTypeState0} = State0) -> {QTypeState2, Actions} = case rabbit_queue_type:deliver(Qs, Delivery, QTypeState0) of {ok, QTypeState1, Actions0} -> {QTypeState1, Actions0}; - {error, {coordinator_unavailable, Resource}} -> - rabbit_log:warning("Cannot deliver message because stream coordinator unavailable for ~s", - [rabbit_misc:rs(Resource)]), - {QTypeState0, []}; - {error, {stream_not_found, Resource}} -> - rabbit_log:warning("Cannot deliver message because stream not found for ~s", - [rabbit_misc:rs(Resource)]), + {error, Reason} -> + rabbit_log:info("Failed to deliver message: ~p", [Reason]), {QTypeState0, []} end, State = State0#state{queue_type_state = QTypeState2}, @@ -422,7 +438,10 @@ redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, Delivery = Delivery0#delivery{message = BasicMsg#basic_message{exchange_name = DLXRef, routing_keys = DLRKeys}}, RouteToQs0 = rabbit_exchange:route(DLX, Delivery), - case {RouteToQs0, Settled} of + %% rabbit_exchange:route/2 can route to target queues that do not exist (e.g. in case of default exchange). + %% Therefore, filter out non-existent target queues. + RouteToQs1 = lists:map(fun amqqueue:get_name/1, rabbit_amqqueue:lookup(RouteToQs0)), + case {RouteToQs1, Settled} of {[], [_|_]} -> %% Routes changed dynamically so that we don't await any publisher confirms anymore. %% Since we also received at least once publisher confirm (mandatory flag semantics), @@ -432,12 +451,12 @@ redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, _ -> %% Do not redeliver message to a target queue %% 1. for which we already received a publisher confirm, or - Unsettled = RouteToQs0 -- Settled, + Unsettled = RouteToQs1 -- Settled, %% 2. whose queue client redelivers on our behalf. %% Note that a quorum queue client does not redeliver on our behalf if it previously %% rejected the message. This is why we always redeliver rejected messages here. - RouteToQs1 = Unsettled -- clients_redeliver(Unsettled0), - {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, BasicMsg, RouteToQs1), + RouteToQs2 = Unsettled -- clients_redeliver(Unsettled0), + {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, BasicMsg, RouteToQs2), State1 = log_cycles(Cycles, DLRKeys, State0), case RouteToQs of [] -> @@ -452,7 +471,7 @@ redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, %% to be routed to is moved back to 'unsettled'. rejected = []}, State = State0#state{pendings = maps:update(OutSeq, Pend, Pendings)}, - deliver_to_queues(Delivery, RouteToQs, State) + deliver_to_queues(Delivery, rabbit_amqqueue:lookup(RouteToQs), State) end end. diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 1aed1a8b17ca..dcfec6ad53e0 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -43,6 +43,7 @@ groups() -> ]}, {cluster_size_3, [], [ many_target_queues, + target_quorum_queue_delete_create, single_dlx_worker ]} ]. @@ -814,6 +815,49 @@ many_target_queues(Config) -> ?assertEqual(2, counted(messages_dead_lettered_expired_total, Config)), ?assertEqual(2, counted(messages_dead_lettered_confirmed_total, Config)). +%% Test that all dead-lettered messages reach target quorum queue eventually +%% when target queue is deleted and recreated with same name +%% and when dead-letter-exchange is default exchange. +target_quorum_queue_delete_create(Config) -> + [Server1, _, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), + SourceQ = ?config(source_queue, Config), + TargetQ = ?config(target_queue_1, Config), + %% Create topology: + %% * source quorum queue with 1 replica on node 1 + %% * target quorum queue with 3 replicas + declare_queue(Ch, SourceQ, [{<<"x-dead-letter-exchange">>, longstr, <<"">>}, + {<<"x-dead-letter-routing-key">>, longstr, TargetQ}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 1}, + {<<"x-message-ttl">>, long, 1} + ]), + DeclareTargetQueue = fun() -> + declare_queue(Ch, TargetQ, + [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 3} + ]) + end, + DeclareTargetQueue(), + [ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{payload = <<"msg">>}) + || _ <- lists:seq(1, 100)], %% 100 messages in total + eventually(?_assertNotEqual([{0, 0}], + dirty_query([Server1], ra_name(SourceQ), fun rabbit_fifo:query_stat_dlx/1)), 500, 20), + %% Delete and recreate target queue (immediately or after some while). + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = TargetQ}), + timer:sleep(rand:uniform(500)), + DeclareTargetQueue(), + %% Expect no message to get stuck in dlx worker. + eventually(?_assertEqual([{0, 0}], + dirty_query([Server1], ra_name(SourceQ), fun rabbit_fifo:query_stat_dlx/1)), 500, 40), + ?assertEqual(100, counted(messages_dead_lettered_expired_total, Config)), + ?assertEqual(100, counted(messages_dead_lettered_confirmed_total, Config)), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = TargetQ}). + %% Test that there is a single active rabbit_fifo_dlx_worker that is co-located with the quorum queue leader. single_dlx_worker(Config) -> [Server1, Server2, _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), diff --git a/deps/rabbit/test/rabbit_fifo_int_SUITE.erl b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl index 65464d8f05e4..a8c4f8871f61 100644 --- a/deps/rabbit/test/rabbit_fifo_int_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl @@ -295,7 +295,7 @@ returns_after_down(Config) -> F0 = rabbit_fifo_client:init(ClusterName, [ServerId]), {ok, F1} = rabbit_fifo_client:enqueue(msg1, F0), {_, _, F2} = process_ra_events(receive_ra_events(1, 0), F1), - % start a customer in a separate processes + % start a consumer in a separate processes % that exits after checkout Self = self(), _Pid = spawn(fun () -> From 64c078a360476bba649b069f1a577483d5c05be8 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Wed, 16 Feb 2022 17:44:20 +0100 Subject: [PATCH 75/97] Reject message if it cannot be delivered Before this commit, a message that failed to be delivered to a target quorum queue was still left in 'unsettled' and therefore the dlx worker relied upon the rabbit_fifo_client to re-deliver which resulted in that message to be stuck in the dlx worker. --- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 72 +++++++++++++--------- 1 file changed, 43 insertions(+), 29 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 17a892bcedec..c53aefce1855 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -248,25 +248,29 @@ handle_deliver(Msgs, #state{queue_ref = QRef} = State0) handle_rejected(QRef, MsgSeqNos, #state{pendings = Pendings0} = State) when is_list(MsgSeqNos) -> - Pendings = lists:foldl(fun(SeqNo, Pends) -> - case maps:is_key(SeqNo, Pends) of - true -> - maps:update_with(SeqNo, - fun(#pending{unsettled = Unsettled, - rejected = Rejected} = P) -> - P#pending{unsettled = lists:delete(QRef, Unsettled), - rejected = [QRef | Rejected]} - end, - Pends); - false -> - rabbit_log:debug("Ignoring rejection for unknown sequence number ~b " - "from target dead letter ~s", - [SeqNo, rabbit_misc:rs(QRef)]), - Pends - end + Pendings = lists:foldl(fun(SeqNo, P) -> + rejected(SeqNo, [QRef], P) end, Pendings0, MsgSeqNos), State#state{pendings = Pendings}. +rejected(SeqNo, Qs, Pendings) + when is_list(Qs) -> + case maps:is_key(SeqNo, Pendings) of + true -> + maps:update_with(SeqNo, + fun(#pending{unsettled = Unsettled, + rejected = Rejected} = P) -> + P#pending{unsettled = Unsettled -- Qs, + rejected = Qs ++ Rejected} + end, + Pendings); + false -> + rabbit_log:debug("Ignoring rejection for unknown sequence number ~b " + "from target dead letter queues ~p", + [SeqNo, Qs]), + Pendings + end. + -spec lookup_dlx(state()) -> {rabbit_types:exchange() | not_found, state()}. lookup_dlx(#state{exchange_ref = DLXRef} = State0) -> @@ -322,23 +326,29 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, _ -> Pend = Pend0#pending{publish_count = 1, last_published_at = Now, - unsettled = lists:map(fun amqqueue:get_name/1, TargetQs)}, + unsettled = queue_names(TargetQs)}, State = State3#state{next_out_seq = OutSeq + 1, pendings = maps:put(OutSeq, Pend, Pendings)}, deliver_to_queues(Delivery, TargetQs, State) end. --spec deliver_to_queues(rabbit_types:delivery(), [rabbit_amqqueue:name()], state()) -> +-spec deliver_to_queues(rabbit_types:delivery(), [amqqueue:amqqueue()], state()) -> state(). -deliver_to_queues(Delivery, Qs, #state{queue_type_state = QTypeState0} = State0) -> - {QTypeState2, Actions} = case rabbit_queue_type:deliver(Qs, Delivery, QTypeState0) of - {ok, QTypeState1, Actions0} -> - {QTypeState1, Actions0}; - {error, Reason} -> - rabbit_log:info("Failed to deliver message: ~p", [Reason]), - {QTypeState0, []} - end, - State = State0#state{queue_type_state = QTypeState2}, +deliver_to_queues(#delivery{msg_seq_no = SeqNo} = Delivery, Qs, #state{queue_type_state = QTypeState0, + pendings = Pendings} = State0) -> + {State, Actions} = case rabbit_queue_type:deliver(Qs, Delivery, QTypeState0) of + {ok, QTypeState, Actions0} -> + {State0#state{queue_type_state = QTypeState}, Actions0}; + {error, Reason} -> + %% rabbit_queue_type:deliver/3 does not tell us which target queue failed. + %% Therefore, reject all target queues. We need to reject them such that + %% we won't rely on rabbit_fifo_client to re-deliver on behalf of us + %% (and therefore preventing messages to get stuck in our 'unsettled' state). + QNames = queue_names(Qs), + rabbit_log:debug("Failed to deliver message with seq_no ~b to queues ~p: ~p", + [SeqNo, QNames, Reason]), + {State0#state{pendings = rejected(SeqNo, QNames, Pendings)}, []} + end, handle_queue_actions(Actions, State). handle_settled(QRef, MsgSeqs, State) -> @@ -440,7 +450,7 @@ redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, RouteToQs0 = rabbit_exchange:route(DLX, Delivery), %% rabbit_exchange:route/2 can route to target queues that do not exist (e.g. in case of default exchange). %% Therefore, filter out non-existent target queues. - RouteToQs1 = lists:map(fun amqqueue:get_name/1, rabbit_amqqueue:lookup(RouteToQs0)), + RouteToQs1 = queue_names(rabbit_amqqueue:lookup(RouteToQs0)), case {RouteToQs1, Settled} of {[], [_|_]} -> %% Routes changed dynamically so that we don't await any publisher confirms anymore. @@ -491,7 +501,7 @@ clients_redeliver(QNames) -> false end end, rabbit_amqqueue:lookup_many(QNames)), - lists:map(fun amqqueue:get_name/1, Qs). + queue_names(Qs). maybe_set_timer(#state{timer = TRef} = State) when is_reference(TRef) -> @@ -521,6 +531,10 @@ cancel_timer(#state{timer = TRef} = State) cancel_timer(State) -> State. +queue_names(Qs) + when is_list(Qs) -> + lists:map(fun amqqueue:get_name/1, Qs). + format_status(_Opt, [_PDict, #state{ queue_ref = QueueRef, exchange_ref = ExchangeRef, From 0bf07685628c18e35bb74068c9a77038a7415a07 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Thu, 17 Feb 2022 10:57:20 +0100 Subject: [PATCH 76/97] Test at-least-once routes to different target queues based on reason Rebase onto origin/master and therefore add a test for at-least-once dead-lettering where dead-lettered messages are routed to different target queues based on x-first-death-reason header. --- deps/rabbit/test/dead_lettering_SUITE.erl | 34 ++--- .../rabbit_fifo_dlx_integration_SUITE.erl | 124 +++++++++--------- 2 files changed, 79 insertions(+), 79 deletions(-) diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl index 59d4c09c8588..3f8c1c7d1b41 100644 --- a/deps/rabbit/test/dead_lettering_SUITE.erl +++ b/deps/rabbit/test/dead_lettering_SUITE.erl @@ -48,8 +48,7 @@ groups() -> dead_letter_headers_CC, dead_letter_headers_CC_with_routing_key, dead_letter_headers_first_death, - dead_letter_headers_first_death_route - dead_letter_headers_first_death, + dead_letter_headers_first_death_route, dead_letter_ttl, dead_letter_routing_key_cycle_ttl, dead_letter_headers_reason_expired, @@ -1239,7 +1238,7 @@ dead_letter_headers_first_death(Config) -> dead_letter_headers_first_death_route(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), QName = ?config(queue_name, Config), - DLXMaxLengthQName = ?config(queue_name_dlx, Config), + DLXExpiredQName = ?config(queue_name_dlx, Config), DLXRejectedQName = ?config(queue_name_dlx_2, Config), Args = ?config(queue_args, Config), Durable = ?config(queue_durable, Config), @@ -1248,38 +1247,41 @@ dead_letter_headers_first_death_route(Config) -> #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange, type = <<"headers">>}), #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, - arguments = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}, - {<<"x-max-length">>, long, 1} | Args], + arguments = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange} | Args], durable = Durable}), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXMaxLengthQName, + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXExpiredQName, durable = Durable}), #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXRejectedQName, durable = Durable}), MatchAnyWithX = {<<"x-match">>, longstr, <<"any-with-x">>}, - #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXMaxLengthQName, + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXExpiredQName, exchange = DLXExchange, arguments = [MatchAnyWithX, - {<<"x-first-death-reason">>, longstr, <<"maxlen">>}] + {<<"x-first-death-reason">>, longstr, <<"expired">>}] }), #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXRejectedQName, exchange = DLXExchange, arguments = [MatchAnyWithX, {<<"x-first-death-reason">>, longstr, <<"rejected">>}] }), + %% Send 1st message and let it expire. P1 = <<"msg1">>, + amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, + #amqp_msg{payload = P1, + props = #'P_basic'{expiration = <<"0">>}}), + %% The 1st message gets dead-lettered to DLXExpiredQName. + wait_for_messages(Config, [[DLXExpiredQName, <<"1">>, <<"1">>, <<"0">>]]), + _ = consume(Ch, DLXExpiredQName, [P1]), + consume_empty(Ch, DLXExpiredQName), + wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]), + %% Send and reject the 2nd message. P2 = <<"msg2">>, - %% Publish 2 messages - publish(Ch, QName, [P1, P2]), - %% The 1st message gets dropped from head of queue, dead-lettered and routed to DLXMaxLengthQName. - wait_for_messages(Config, [[DLXMaxLengthQName, <<"1">>, <<"1">>, <<"0">>]]), - _ = consume(Ch, DLXMaxLengthQName, [P1]), - consume_empty(Ch, DLXMaxLengthQName), - %% Reject the 2nd message. + publish(Ch, QName, [P2]), wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]), [DTag] = consume(Ch, QName, [P2]), amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag, requeue = false}), - %% The 2nd message gets dead-lettered and routed to DLXRejectedQName. + %% The 2nd message gets dead-lettered to DLXRejectedQName. wait_for_messages(Config, [[DLXRejectedQName, <<"1">>, <<"1">>, <<"0">>]]), _ = consume(Ch, DLXRejectedQName, [P2]), consume_empty(Ch, DLXRejectedQName). diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index dcfec6ad53e0..f7597a60651c 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -39,11 +39,11 @@ groups() -> reject_publish_source_queue_max_length, reject_publish_source_queue_max_length_bytes, reject_publish_target_classic_queue, - reject_publish_target_quorum_queue + reject_publish_target_quorum_queue, + target_quorum_queue_delete_create ]}, {cluster_size_3, [], [ many_target_queues, - target_quorum_queue_delete_create, single_dlx_worker ]} ]. @@ -586,7 +586,7 @@ reject_publish(Config, QArg) when is_tuple(QArg) -> amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}))), ok = rabbit_ct_broker_helpers:clear_policy(Config, Server, PolicyName). -%% Test that message gets eventually delivered to target quorum queue when it gets rejected initially. +%% Test that message gets delivered to target quorum queue eventually when it gets rejected initially. reject_publish_target_quorum_queue(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server), @@ -604,24 +604,20 @@ reject_publish_target_quorum_queue(Config) -> {<<"x-max-length">>, long, 1} ]), Msg = <<"m">>, - [ok,ok,ok,ok] = [amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, - #amqp_msg{props = #'P_basic'{expiration = integer_to_binary(N)}, - payload = Msg}) - || N <- lists:seq(1,4)], - %% Quorum queues reject publishes once the limit is already exceeded. - %% Therefore, although max-length of target queue is configured to be 1, - %% it will contain 2 messages before rejecting publishes. - %% Therefore, we expect target queue confirmed 2 messages and rejected 2 messages. - wait_for_messages_ready([Server], ra_name(TargetQ), 2), - consistently(?_assertEqual([{2, 2}], - dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1))), - %% Let's make some space in the target queue for the 2 rejected messages. - {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), - {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + %% Send 4 messages although target queue has max-length of 1. + [ok,ok,ok,ok] = [begin + amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{props = #'P_basic'{expiration = integer_to_binary(N)}, + payload = Msg}) + end || N <- lists:seq(1,4)], + %% Make space in target queue by consuming messages one by one + %% allowing for more dead-lettered messages to reach the target queue. + [begin + timer:sleep(2000), + {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}) + end || _ <- lists:seq(1,4)], eventually(?_assertEqual([{0, 0}], - dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), 500, 5), - {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), - {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), 500, 10), ?assertEqual(4, counted(messages_dead_lettered_expired_total, Config)), eventually(?_assertEqual(4, counted(messages_dead_lettered_confirmed_total, Config))). @@ -673,6 +669,51 @@ publish_confirm(Ch, QName) -> ct:fail(confirm_timeout) end. +%% Test that all dead-lettered messages reach target quorum queue eventually +%% when target queue is deleted and recreated with same name +%% and when dead-letter-exchange is default exchange. +target_quorum_queue_delete_create(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + SourceQ = ?config(source_queue, Config), + TargetQ = ?config(target_queue_1, Config), + %% Create topology: + %% * source quorum queue with 1 replica + %% * target quorum queue with 1 replica + declare_queue(Ch, SourceQ, [{<<"x-dead-letter-exchange">>, longstr, <<"">>}, + {<<"x-dead-letter-routing-key">>, longstr, TargetQ}, + {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, + {<<"x-overflow">>, longstr, <<"reject-publish">>}, + {<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 1}, + {<<"x-message-ttl">>, long, 1} + ]), + DeclareTargetQueue = fun() -> + declare_queue(Ch, TargetQ, + [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 1} + ]) + end, + DeclareTargetQueue(), + spawn(fun() -> + [ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{payload = <<"msg">>}) + || _ <- lists:seq(1, 100)] %% 100 messages in total + end), + eventually(?_assertNotEqual([{0, 0}], + dirty_query([Server], ra_name(SourceQ), fun rabbit_fifo:query_stat_dlx/1)), 200, 100), + %% Delete and recreate target queue (immediately or after some while). + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = TargetQ}), + timer:sleep(rand:uniform(500)), + DeclareTargetQueue(), + %% Expect no message to get stuck in dlx worker. + eventually(?_assertEqual([{0, 0}], + dirty_query([Server], ra_name(SourceQ), fun rabbit_fifo:query_stat_dlx/1)), 1000, 60), + ?assertEqual(100, counted(messages_dead_lettered_expired_total, Config)), + ?assertEqual(100, counted(messages_dead_lettered_confirmed_total, Config)), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = TargetQ}). + %% Test that %% 1. Message is only acked to source queue once publisher confirms got received from **all** target queues. %% 2. Target queue can be classic queue, quorum queue, or stream queue. @@ -815,49 +856,6 @@ many_target_queues(Config) -> ?assertEqual(2, counted(messages_dead_lettered_expired_total, Config)), ?assertEqual(2, counted(messages_dead_lettered_confirmed_total, Config)). -%% Test that all dead-lettered messages reach target quorum queue eventually -%% when target queue is deleted and recreated with same name -%% and when dead-letter-exchange is default exchange. -target_quorum_queue_delete_create(Config) -> - [Server1, _, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), - SourceQ = ?config(source_queue, Config), - TargetQ = ?config(target_queue_1, Config), - %% Create topology: - %% * source quorum queue with 1 replica on node 1 - %% * target quorum queue with 3 replicas - declare_queue(Ch, SourceQ, [{<<"x-dead-letter-exchange">>, longstr, <<"">>}, - {<<"x-dead-letter-routing-key">>, longstr, TargetQ}, - {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, - {<<"x-overflow">>, longstr, <<"reject-publish">>}, - {<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-quorum-initial-group-size">>, long, 1}, - {<<"x-message-ttl">>, long, 1} - ]), - DeclareTargetQueue = fun() -> - declare_queue(Ch, TargetQ, - [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-quorum-initial-group-size">>, long, 3} - ]) - end, - DeclareTargetQueue(), - [ok = amqp_channel:cast(Ch, - #'basic.publish'{routing_key = SourceQ}, - #amqp_msg{payload = <<"msg">>}) - || _ <- lists:seq(1, 100)], %% 100 messages in total - eventually(?_assertNotEqual([{0, 0}], - dirty_query([Server1], ra_name(SourceQ), fun rabbit_fifo:query_stat_dlx/1)), 500, 20), - %% Delete and recreate target queue (immediately or after some while). - #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = TargetQ}), - timer:sleep(rand:uniform(500)), - DeclareTargetQueue(), - %% Expect no message to get stuck in dlx worker. - eventually(?_assertEqual([{0, 0}], - dirty_query([Server1], ra_name(SourceQ), fun rabbit_fifo:query_stat_dlx/1)), 500, 40), - ?assertEqual(100, counted(messages_dead_lettered_expired_total, Config)), - ?assertEqual(100, counted(messages_dead_lettered_confirmed_total, Config)), - #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = TargetQ}). - %% Test that there is a single active rabbit_fifo_dlx_worker that is co-located with the quorum queue leader. single_dlx_worker(Config) -> [Server1, Server2, _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), From 863d02ab138447c936f13e299eddd5a07ae52882 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Fri, 18 Feb 2022 16:38:57 +0100 Subject: [PATCH 77/97] Fix next_seq in rabbit_fifo_client Before this commit, next_seq was by one off in different functions. --- deps/rabbit/src/rabbit_fifo_client.erl | 32 +++++++++++--------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl index cf9625c841dd..7bf9f0f57b91 100644 --- a/deps/rabbit/src/rabbit_fifo_client.erl +++ b/deps/rabbit/src/rabbit_fifo_client.erl @@ -180,22 +180,21 @@ enqueue(Correlation, Msg, #state{slow = Slow, pending = Pending, queue_status = go, - next_seq = Seq0, - next_enqueue_seq = Next, + next_seq = Seq, + next_enqueue_seq = EnqueueSeq, cfg = #cfg{soft_limit = SftLmt, block_handler = BlockFun}} = State0) -> Server = pick_server(State0), % by default there is no correlation id - Cmd = rabbit_fifo:make_enqueue(self(), Next, Msg), - Seq = Seq0 + 1, + Cmd = rabbit_fifo:make_enqueue(self(), EnqueueSeq, Msg), ok = ra:pipeline_command(Server, Cmd, Seq, low), Tag = case map_size(Pending) >= SftLmt of true -> slow; false -> ok end, State = State0#state{pending = Pending#{Seq => {Correlation, Cmd}}, - next_seq = Seq, - next_enqueue_seq = Next + 1, + next_seq = Seq + 1, + next_enqueue_seq = EnqueueSeq + 1, slow = Tag == slow}, case Tag of slow when not Slow -> @@ -848,47 +847,42 @@ sorted_servers(#state{leader = Leader, cfg = #cfg{servers = Servers}}) -> [Leader | lists:delete(Leader, Servers)]. -next_seq(#state{next_seq = Seq} = State) -> - {Seq, State#state{next_seq = Seq + 1}}. - consumer_id(ConsumerTag) -> {ConsumerTag, self()}. send_command(Server, Correlation, Command, _Priority, #state{pending = Pending, - next_seq = Seq0, + next_seq = Seq, cfg = #cfg{soft_limit = SftLmt}} = State) when element(1, Command) == return -> %% returns are sent to the aux machine for pre-evaluation - Seq = Seq0 + 1, ok = ra:cast_aux_command(Server, {Command, Seq, self()}), Tag = case map_size(Pending) >= SftLmt of true -> slow; false -> ok end, State#state{pending = Pending#{Seq => {Correlation, Command}}, - next_seq = Seq, + next_seq = Seq + 1, slow = Tag == slow}; send_command(Server, Correlation, Command, Priority, #state{pending = Pending, - next_seq = Seq0, + next_seq = Seq, cfg = #cfg{soft_limit = SftLmt}} = State) -> - Seq = Seq0 + 1, ok = ra:pipeline_command(Server, Command, Seq, Priority), Tag = case map_size(Pending) >= SftLmt of true -> slow; false -> ok end, State#state{pending = Pending#{Seq => {Correlation, Command}}, - next_seq = Seq, + next_seq = Seq + 1, slow = Tag == slow}. - resend_command(Node, Correlation, Command, - #state{pending = Pending} = State0) -> - {Seq, State} = next_seq(State0), + #state{pending = Pending, + next_seq = Seq} = State) -> ok = ra:pipeline_command(Node, Command, Seq), - State#state{pending = Pending#{Seq => {Correlation, Command}}}. + State#state{pending = Pending#{Seq => {Correlation, Command}}, + next_seq = Seq + 1}. add_command(_, _, [], Acc) -> Acc; From 9433100ef65444d8c20092cb59501021eceb3ef8 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Fri, 18 Feb 2022 16:50:52 +0100 Subject: [PATCH 78/97] Resend pending commands if same leader re-elected Before this commit, if the same leader was re-elected, rabbit_fifo_client did not resend. That's undesired because a queue restart can happen on a single node cluster. --- deps/rabbit/src/rabbit_fifo_client.erl | 4 ---- 1 file changed, 4 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl index 7bf9f0f57b91..e377037d101d 100644 --- a/deps/rabbit/src/rabbit_fifo_client.erl +++ b/deps/rabbit/src/rabbit_fifo_client.erl @@ -592,10 +592,6 @@ handle_ra_event(_, {machine, {queue_status, Status}}, #state{} = State) -> %% just set the queue status {ok, State#state{queue_status = Status}, []}; -handle_ra_event(Leader, {machine, leader_change}, - #state{leader = Leader} = State) -> - %% leader already known - {ok, State, []}; handle_ra_event(Leader, {machine, leader_change}, #state{leader = OldLeader} = State0) -> %% we need to update leader From 0729a6009abeffe3415c8e1f526558f6c5707b05 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Fri, 18 Feb 2022 17:53:57 +0100 Subject: [PATCH 79/97] Avoid ETS lookup since ETS lookup is more expensive than getting the queue module via rabbit_queue_type:module/2. --- deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 34 +++++++++++----------- deps/rabbit/src/rabbit_queue_type.erl | 13 +++------ 2 files changed, 21 insertions(+), 26 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index c53aefce1855..5837fe6053b8 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -443,7 +443,8 @@ redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, DLX, DLRKeys, OutSeq, #state{pendings = Pendings, settled_ids = SettledIds, - exchange_ref = DLXRef} = State0) + exchange_ref = DLXRef, + queue_type_state = QTypeState} = State0) when is_list(DLRKeys) -> Delivery = Delivery0#delivery{message = BasicMsg#basic_message{exchange_name = DLXRef, routing_keys = DLRKeys}}, @@ -465,7 +466,7 @@ redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, %% 2. whose queue client redelivers on our behalf. %% Note that a quorum queue client does not redeliver on our behalf if it previously %% rejected the message. This is why we always redeliver rejected messages here. - RouteToQs2 = Unsettled -- clients_redeliver(Unsettled0), + RouteToQs2 = Unsettled -- clients_redeliver(Unsettled0, QTypeState), {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, BasicMsg, RouteToQs2), State1 = log_cycles(Cycles, DLRKeys, State0), case RouteToQs of @@ -486,22 +487,21 @@ redeliver0(#pending{delivery = #delivery{message = BasicMsg} = Delivery0, end. %% Returns queues whose queue clients take care of redelivering messages. --spec clients_redeliver([rabbit_amqqueue:name()]) -> +-spec clients_redeliver([rabbit_amqqueue:name()], rabbit_queue_type:state()) -> [rabbit_amqqueue:name()]. -clients_redeliver(QNames) -> - Qs = lists:filter(fun(Q) -> - case amqqueue:get_type(Q) of - rabbit_quorum_queue -> - %% If Raft command (#enqueue{}) does not get applied - %% rabbit_fifo_client will resend. - true; - rabbit_stream_queue -> - true; - _ -> - false - end - end, rabbit_amqqueue:lookup_many(QNames)), - queue_names(Qs). +clients_redeliver(Qs, QTypeState) -> + lists:filter(fun(Q) -> + case rabbit_queue_type:module(Q, QTypeState) of + {ok, rabbit_quorum_queue} -> + % If #enqueue{} Raft command does not get applied + % rabbit_fifo_client will resend. + true; + {ok, rabbit_stream_queue} -> + true; + _ -> + false + end + end, Qs). maybe_set_timer(#state{timer = TRef} = State) when is_reference(TRef) -> diff --git a/deps/rabbit/src/rabbit_queue_type.erl b/deps/rabbit/src/rabbit_queue_type.erl index 906ae341ac6e..34375cedf4a5 100644 --- a/deps/rabbit/src/rabbit_queue_type.erl +++ b/deps/rabbit/src/rabbit_queue_type.erl @@ -80,7 +80,7 @@ state :: queue_state()}). --record(?STATE, {ctxs = #{} :: #{queue_ref() => #ctx{} | queue_ref()}, +-record(?STATE, {ctxs = #{} :: #{queue_ref() => #ctx{}}, monitor_registry = #{} :: #{pid() => queue_ref()} }). @@ -436,10 +436,10 @@ handle_event(QRef, Evt, Ctxs) -> -spec module(queue_ref(), state()) -> {ok, module()} | {error, not_found}. -module(QRef, Ctxs) -> +module(QRef, State) -> %% events can arrive after a queue state has been cleared up %% so need to be defensive here - case get_ctx(QRef, Ctxs, undefined) of + case get_ctx(QRef, State, undefined) of #ctx{module = Mod} -> {ok, Mod}; undefined -> @@ -577,12 +577,7 @@ get_ctx_with(QRef, Contexts, undefined) when ?QREF(QRef) -> get_ctx(QRef, #?STATE{ctxs = Contexts}, Default) -> Ref = qref(QRef), %% if we use a QRef it should always be initialised - case maps:get(Ref, Contexts, undefined) of - #ctx{} = Ctx -> - Ctx; - undefined -> - Default - end. + maps:get(Ref, Contexts, Default). set_ctx(Q, Ctx, #?STATE{ctxs = Contexts} = State) when ?is_amqqueue(Q) -> From fc486fc7eb961ea08031662658dfbaa342a54423 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Sun, 20 Feb 2022 12:15:41 +0100 Subject: [PATCH 80/97] Fix failing GitHub action for test target_quorum_queue_delete_create by not depending on any timing when an Erlang process is scheduled. --- .../rabbit_fifo_dlx_integration_SUITE.erl | 33 ++++++++++++------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index f7597a60651c..ed501f082dda 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -10,6 +10,7 @@ -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). -import(quorum_queue_utils, [wait_for_messages_ready/3, + wait_for_min_messages/3, dirty_query/3, ra_name/1]). -import(quorum_queue_SUITE, [publish/2, @@ -694,24 +695,32 @@ target_quorum_queue_delete_create(Config) -> {<<"x-quorum-initial-group-size">>, long, 1} ]) end, + + Send100Msgs = fun() -> + [ok = amqp_channel:cast(Ch, + #'basic.publish'{routing_key = SourceQ}, + #amqp_msg{payload = <<"msg">>}) + || _ <- lists:seq(1, 100)] + end, DeclareTargetQueue(), - spawn(fun() -> - [ok = amqp_channel:cast(Ch, - #'basic.publish'{routing_key = SourceQ}, - #amqp_msg{payload = <<"msg">>}) - || _ <- lists:seq(1, 100)] %% 100 messages in total - end), - eventually(?_assertNotEqual([{0, 0}], - dirty_query([Server], ra_name(SourceQ), fun rabbit_fifo:query_stat_dlx/1)), 200, 100), + Send100Msgs(), %% Delete and recreate target queue (immediately or after some while). + timer:sleep(rand:uniform(50)), + %% Log the current number of messages. + rabbit_ct_broker_helpers:rabbitmqctl_list( + Config, 0, ["list_queues", "name", "messages", "messages_ready", + "messages_unacknowledged"]), #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = TargetQ}), - timer:sleep(rand:uniform(500)), + Send100Msgs(), + timer:sleep(rand:uniform(200)), DeclareTargetQueue(), + Send100Msgs(), %% Expect no message to get stuck in dlx worker. + wait_for_min_messages(Config, TargetQ, 200), eventually(?_assertEqual([{0, 0}], - dirty_query([Server], ra_name(SourceQ), fun rabbit_fifo:query_stat_dlx/1)), 1000, 60), - ?assertEqual(100, counted(messages_dead_lettered_expired_total, Config)), - ?assertEqual(100, counted(messages_dead_lettered_confirmed_total, Config)), + dirty_query([Server], ra_name(SourceQ), fun rabbit_fifo:query_stat_dlx/1)), 500, 10), + ?assertEqual(300, counted(messages_dead_lettered_expired_total, Config)), + ?assertEqual(300, counted(messages_dead_lettered_confirmed_total, Config)), #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = TargetQ}). %% Test that From fc2d37ed1cd10e9c26d40ce898df9340cf57d063 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 21 Feb 2022 14:30:00 +0100 Subject: [PATCH 81/97] Route also to extra BCC queues when dead-lettering Before this commit, messages dead-lettered to target queues were not routed to the target queues' extra BCC queues. This commit also fixes a bug in the channel where confirms from extra BCC queues were not waited for. Instead of adding logic for extra BCC queues for every component using rabbit_exchange:route/2, function rabbit_exchange:route/2 will from now on also return extra BCC queues. Therefore, both at-most-once and at-least-once dead-lettering will respect target queues' extra BCCs. --- deps/rabbit/src/rabbit_channel.erl | 52 ++++--------------- deps/rabbit/src/rabbit_exchange.erl | 47 +++++++++++++---- deps/rabbit/test/dead_lettering_SUITE.erl | 34 +++++++++++- .../rabbit_fifo_dlx_integration_SUITE.erl | 11 ++-- 4 files changed, 83 insertions(+), 61 deletions(-) diff --git a/deps/rabbit/src/rabbit_channel.erl b/deps/rabbit/src/rabbit_channel.erl index 531a36af4517..af99c78f3e59 100644 --- a/deps/rabbit/src/rabbit_channel.erl +++ b/deps/rabbit/src/rabbit_channel.erl @@ -2165,22 +2165,18 @@ deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{ex confirm = Confirm, msg_seq_no = MsgSeqNo}, _RoutedToQueueNames = [QName]}, State0 = #ch{queue_states = QueueStates0}) -> %% optimisation when there is one queue - AllNames = case rabbit_amqqueue:lookup(QName) of - {ok, Q0} -> - case amqqueue:get_options(Q0) of - #{extra_bcc := BCC} -> [QName, rabbit_misc:r(QName#resource.virtual_host, queue, BCC)]; - _ -> [QName] - end; - _ -> [] - end, - Qs = rabbit_amqqueue:lookup(AllNames), + {QueueNames, Qs} = case rabbit_amqqueue:lookup(QName) of + {ok, Q} -> + {[QName], [Q]}; + _ -> {[], []} + end, case rabbit_queue_type:deliver(Qs, Delivery, QueueStates0) of {ok, QueueStates, Actions} -> rabbit_global_counters:messages_routed(amqp091, erlang:min(1, length(Qs))), %% NB: the order here is important since basic.returns must be %% sent before confirms. ok = process_routing_mandatory(Mandatory, Qs, Message, State0), - State1 = process_routing_confirm(Confirm, AllNames, MsgSeqNo, XName, State0), + State1 = process_routing_confirm(Confirm, QueueNames, MsgSeqNo, XName, State0), %% Actions must be processed after registering confirms as actions may %% contain rejections of publishes State = handle_queue_actions(Actions, State1#ch{queue_states = QueueStates}), @@ -2208,21 +2204,15 @@ deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{ex confirm = Confirm, msg_seq_no = MsgSeqNo}, RoutedToQueueNames}, State0 = #ch{queue_states = QueueStates0}) -> - Qs0 = rabbit_amqqueue:lookup(RoutedToQueueNames), - AllQueueNames = lists:map(fun amqqueue:get_name/1, Qs0), - AllExtraBCCs = infer_extra_bcc(Qs0), - %% Collect implicit BCC targets these queues may have - Qs = case AllExtraBCCs of - [] -> Qs0; - ExtraNames -> Qs0 ++ rabbit_amqqueue:lookup(ExtraNames) - end, + Qs = rabbit_amqqueue:lookup(RoutedToQueueNames), + QueueNames = lists:map(fun amqqueue:get_name/1, Qs), case rabbit_queue_type:deliver(Qs, Delivery, QueueStates0) of {ok, QueueStates, Actions} -> rabbit_global_counters:messages_routed(amqp091, length(Qs)), %% NB: the order here is important since basic.returns must be %% sent before confirms. ok = process_routing_mandatory(Mandatory, Qs, Message, State0), - State1 = process_routing_confirm(Confirm, AllQueueNames, + State1 = process_routing_confirm(Confirm, QueueNames, MsgSeqNo, XName, State0), %% Actions must be processed after registering confirms as actions may %% contain rejections of publishes @@ -2231,7 +2221,7 @@ deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{ex fine -> ?INCR_STATS(exchange_stats, XName, 1, publish), [?INCR_STATS(queue_exchange_stats, {QName, XName}, 1, publish) - || QName <- AllQueueNames]; + || QName <- QueueNames]; _ -> ok end, @@ -2243,28 +2233,6 @@ deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{ex [rabbit_misc:rs(Resource)]) end. --spec infer_extra_bcc([amqqueue:amqqueue()]) -> [rabbit_amqqueue:name()]. -infer_extra_bcc([]) -> - []; -infer_extra_bcc([Q]) -> - case amqqueue:get_options(Q) of - #{extra_bcc := BCC} -> - #resource{virtual_host = VHost} = amqqueue:get_name(Q), - [rabbit_misc:r(VHost, queue, BCC)]; - _ -> - [] - end; -infer_extra_bcc(Qs) -> - lists:foldl(fun(Q, Acc) -> - case amqqueue:get_options(Q) of - #{extra_bcc := BCC} -> - #resource{virtual_host = VHost} = amqqueue:get_name(Q), - [rabbit_misc:r(VHost, queue, BCC) | Acc]; - _ -> - Acc - end - end, [], Qs). - process_routing_mandatory(_Mandatory = true, _RoutedToQs = [], Msg, State) -> diff --git a/deps/rabbit/src/rabbit_exchange.erl b/deps/rabbit/src/rabbit_exchange.erl index 5406b541dc27..bb97eaa546ab 100644 --- a/deps/rabbit/src/rabbit_exchange.erl +++ b/deps/rabbit/src/rabbit_exchange.erl @@ -408,21 +408,46 @@ info_all(VHostPath, Items, Ref, AggregatorPid) -> route(#exchange{name = #resource{virtual_host = VHost, name = RName} = XName, decorators = Decorators} = X, #delivery{message = #basic_message{routing_keys = RKs}} = Delivery) -> - case RName of - <<>> -> - RKsSorted = lists:usort(RKs), - [rabbit_channel:deliver_reply(RK, Delivery) || - RK <- RKsSorted, virtual_reply_queue(RK)], - [rabbit_misc:r(VHost, queue, RK) || RK <- RKsSorted, - not virtual_reply_queue(RK)]; - _ -> - Decs = rabbit_exchange_decorator:select(route, Decorators), - lists:usort(route1(Delivery, Decs, {[X], XName, []})) - end. + QNames = case RName of + <<>> -> + RKsSorted = lists:usort(RKs), + [rabbit_channel:deliver_reply(RK, Delivery) || + RK <- RKsSorted, virtual_reply_queue(RK)], + [rabbit_misc:r(VHost, queue, RK) || RK <- RKsSorted, + not virtual_reply_queue(RK)]; + _ -> + Decs = rabbit_exchange_decorator:select(route, Decorators), + lists:usort(route1(Delivery, Decs, {[X], XName, []})) + end, + Qs = rabbit_amqqueue:lookup(QNames), + ExtraBccQNames = infer_extra_bcc(Qs), + ExtraBccQNames ++ QNames. virtual_reply_queue(<<"amq.rabbitmq.reply-to.", _/binary>>) -> true; virtual_reply_queue(_) -> false. +-spec infer_extra_bcc([amqqueue:amqqueue()]) -> [rabbit_amqqueue:name()]. +infer_extra_bcc([]) -> + []; +infer_extra_bcc([Q]) -> + case amqqueue:get_options(Q) of + #{extra_bcc := BCC} -> + #resource{virtual_host = VHost} = amqqueue:get_name(Q), + [rabbit_misc:r(VHost, queue, BCC)]; + _ -> + [] + end; +infer_extra_bcc(Qs) -> + lists:foldl(fun(Q, Acc) -> + case amqqueue:get_options(Q) of + #{extra_bcc := BCC} -> + #resource{virtual_host = VHost} = amqqueue:get_name(Q), + [rabbit_misc:r(VHost, queue, BCC) | Acc]; + _ -> + Acc + end + end, [], Qs). + route1(_, _, {[], _, QNames}) -> QNames; route1(Delivery, Decorators, diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl index 3f8c1c7d1b41..856ce13441f3 100644 --- a/deps/rabbit/test/dead_lettering_SUITE.erl +++ b/deps/rabbit/test/dead_lettering_SUITE.erl @@ -52,7 +52,8 @@ groups() -> dead_letter_ttl, dead_letter_routing_key_cycle_ttl, dead_letter_headers_reason_expired, - dead_letter_headers_reason_expired_per_message], + dead_letter_headers_reason_expired_per_message, + dead_letter_extra_bcc], DisabledMetricTests = [metric_maxlen, metric_rejected, metric_expired_queue_msg_ttl, @@ -1286,6 +1287,37 @@ dead_letter_headers_first_death_route(Config) -> _ = consume(Ch, DLXRejectedQName, [P2]), consume_empty(Ch, DLXRejectedQName). +%% Route dead-letter messages also to extra BCC queues of target queues. +dead_letter_extra_bcc(Config) -> + {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + SourceQ = ?config(queue_name, Config), + TargetQ = ?config(queue_name_dlx, Config), + ExtraBCCQ = ?config(queue_name_dlx_2, Config), + Durable = ?config(queue_durable, Config), + declare_dead_letter_queues(Ch, Config, SourceQ, TargetQ, [{<<"x-message-ttl">>, long, 0}]), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = ExtraBCCQ, + durable = Durable}), + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, set_queue_options, + [TargetQ, #{extra_bcc => ExtraBCCQ}]), + %% Publish message + P = <<"msg">>, + publish(Ch, SourceQ, [P]), + wait_for_messages(Config, [[TargetQ, <<"1">>, <<"1">>, <<"0">>], + [ExtraBCCQ, <<"1">>, <<"1">>, <<"0">>]]), + consume_empty(Ch, SourceQ), + [_] = consume(Ch, TargetQ, [P]), + [_] = consume(Ch, ExtraBCCQ, [P]), + ok. + +set_queue_options(QName, Options) -> + rabbit_misc:execute_mnesia_transaction( + fun() -> + rabbit_amqqueue:update(rabbit_misc:r(<<"/">>, queue, QName), + fun(Q) -> + amqqueue:set_options(Q, Options) + end) + end). + metric_maxlen(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), QName = ?config(queue_name, Config), diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index ed501f082dda..e454f0a39d49 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -3,6 +3,9 @@ %% Integration tests for at-least-once dead-lettering comprising mainly %% rabbit_fifo_dlx, rabbit_fifo_dlx_worker, rabbit_fifo_dlx_client %% rabbit_quorum_queue, rabbit_fifo. +%% +%% Some at-least-once dead-lettering tests can also be found in +%% module dead_lettering_SUITE. -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -678,22 +681,16 @@ target_quorum_queue_delete_create(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, Server), SourceQ = ?config(source_queue, Config), TargetQ = ?config(target_queue_1, Config), - %% Create topology: - %% * source quorum queue with 1 replica - %% * target quorum queue with 1 replica declare_queue(Ch, SourceQ, [{<<"x-dead-letter-exchange">>, longstr, <<"">>}, {<<"x-dead-letter-routing-key">>, longstr, TargetQ}, {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, {<<"x-overflow">>, longstr, <<"reject-publish">>}, {<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-quorum-initial-group-size">>, long, 1}, {<<"x-message-ttl">>, long, 1} ]), DeclareTargetQueue = fun() -> declare_queue(Ch, TargetQ, - [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-quorum-initial-group-size">>, long, 1} - ]) + [{<<"x-queue-type">>, longstr, <<"quorum">>}]) end, Send100Msgs = fun() -> From a3905da47cb5411b014dc3ff3b2cbbede2c5eb08 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 21 Feb 2022 19:12:45 +0100 Subject: [PATCH 82/97] Add note about missed Prometheus counter updates Currently, the quorum queue state machine updates counters via mod_call effects which are not guaranteed to be executed. They are updated via mod_call effects such that only the leader increments the counter (and not the followers). In certain failure scenarios when dead-lettering lots of messages at the same time, these mod_call effects might not be executed. Hence, one shouldn't rely that counters for dead lettered messages and dead lettered confirmed messages match up 100% even though all dead-lettered messages were confirmed eventually. --- deps/rabbitmq_prometheus/metrics.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/deps/rabbitmq_prometheus/metrics.md b/deps/rabbitmq_prometheus/metrics.md index a5b05c2e0020..d7cc644e3f0c 100644 --- a/deps/rabbitmq_prometheus/metrics.md +++ b/deps/rabbitmq_prometheus/metrics.md @@ -85,6 +85,11 @@ Label `queue_type` denotes the type of queue messages were discarded from. It ca (Queue type `rabbit_stream_queue` does not dead letter messages.) +Note that metrics `rabbitmq_global_messages_dead_lettered_*` with label `queue_type` set to `rabbit_quorum_queue` +might miss some counter updates in certain failure scenarios, i.e. the reported Prometheus value could be +slightly lower than the actual number of messages dead lettered (and confirmed). +(This is because in the current implementation quorum queue leaders update the counters asynchronously.) + Label `dead_letter_strategy` can have value * `disabled` if queue has no dead-letter-exchange configured or if configured dead-letter-exchange does not exist implying messages get dropped, or * `at_most_once` if queue's configured dead-lettered-exchange exists, or From 781328be36bc5958c7097054130a55143d60ff59 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 22 Feb 2022 10:17:42 +0100 Subject: [PATCH 83/97] Fix dialyzer warning Fixes: rabbit_fifo.erl:1756:62: The call rabbit_fifo_dlx:discard ([nonempty_improper_list(any(), map()), ...], 'delivery_limit', DLH :: 'at_least_once' | 'undefined' | {'at_most_once', {atom(), atom(), [any()]}}, DlxState0 :: {'rabbit_fifo_dlx', 'undefined' | {'dlx_consumer', pid(), non_neg_integer(), #{non_neg_integer() => {'delivery_limit' | 'expired' | 'rejected' | [], [any()]}}, non_neg_integer()}, lqueue:lqueue({'delivery_limit' | 'expired' | 'rejected' | [], [any)]}), rabbit_fifo_index:state(), non_neg_integer(), non_neg_integer()}) breaks the contract ([indexed_msg()], rabbit_dead_letter:reason(), dead_letter_handler(), state()) -> {state(), ra_machine:effects()}( It seems that [a|b] is not a type that dialyzer understands. Instead, we need to use nonempty_improper_list(a,b). --- deps/rabbit/src/rabbit_fifo.hrl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 8cc5c74841dd..33d4892ec3fb 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -56,7 +56,7 @@ -type msg_size() :: non_neg_integer(). %% the size in bytes of the msg payload --type indexed_msg() :: ?INDEX_MSG(ra:index(), msg_header()). +-type indexed_msg() :: nonempty_improper_list(ra:index(), msg_header()). % -type prefix_msg() :: {'$prefix_msg', msg_header()}. From 6eed54ffae178c7a75185ec584eb656189cee93e Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 22 Feb 2022 11:53:38 +0100 Subject: [PATCH 84/97] Move eventually and consistently to rabbit_ct_helpers so that these functions can be reused in other tests. Inspired by Gomega's Eventually and Consistently functions. See https://onsi.github.io/gomega/#making-asynchronous-assertions "Eventually checks that an assertion eventually passes. Eventually blocks when called and attempts an assertion periodically until it passes or a timeout occurs. Both the timeout and polling interval are configurable as optional arguments." "Consistently checks that an assertion passes for a period of time. It does this by polling its argument repeatedly during the period. It fails if the matcher ever fails during that period." --- .../rabbit_fifo_dlx_integration_SUITE.erl | 35 ++------------- .../src/rabbit_ct_helpers.erl | 44 ++++++++++++++++++- 2 files changed, 46 insertions(+), 33 deletions(-) diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index e454f0a39d49..a60fafe9d6ea 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -16,6 +16,9 @@ wait_for_min_messages/3, dirty_query/3, ra_name/1]). +-import(rabbit_ct_helpers, [eventually/1, + eventually/3, + consistently/1]). -import(quorum_queue_SUITE, [publish/2, consume/3]). @@ -927,38 +930,6 @@ delete_queue(Channel, Queue) -> %% We implicitly test here that we don't end up with duplicate messages. #'queue.delete_ok'{message_count = 0} = amqp_channel:call(Channel, #'queue.delete'{queue = Queue}). -%%TODO move to rabbitmq_ct_helpers/include/rabbit_assert.hrl -consistently(TestObj) -> - consistently(TestObj, 200, 5). - -consistently(_, _, 0) -> - ok; -consistently({_Line, Assertion} = TestObj, PollInterval, PollCount) -> - Assertion(), - timer:sleep(PollInterval), - consistently(TestObj, PollInterval, PollCount - 1). - -eventually(TestObj) -> - eventually(TestObj, 200, 5). - -eventually({Line, _}, _, 0) -> - erlang:error({assert_timeout, - [{file, ?FILE}, - {line, ?LINE}, - {assertion_line, Line} - ]}); -eventually({Line, Assertion} = TestObj, PollInterval, PollCount) -> - case catch Assertion() of - ok -> - ok; - Err -> - ct:pal(?LOW_IMPORTANCE, - "Retrying in ~b ms for ~b more times in file ~s, line ~b due to failed assertion in line ~b: ~p", - [PollInterval, PollCount - 1, ?FILE, ?LINE, Line, Err]), - timer:sleep(PollInterval), - eventually(TestObj, PollInterval, PollCount - 1) - end. - get_global_counters(Config) -> rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_global_counters, overview, []). diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl index 12e7a158c82f..7e02c3bd93fa 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl @@ -50,7 +50,10 @@ await_condition/1, await_condition/2, - await_condition_with_retries/2 + await_condition_with_retries/2, + + eventually/1, eventually/3, + consistently/1, consistently/3 ]). -define(SSL_CERT_PASSWORD, "test"). @@ -1047,6 +1050,45 @@ await_condition_with_retries(ConditionFun, RetriesLeft) -> ok end. +%% Pass in any EUnit test object. Example: +%% eventually(?_assertEqual(1, Actual)) +eventually({Line, Assertion} = TestObj) + when is_integer(Line), Line >= 0, is_function(Assertion, 0) -> + eventually(TestObj, 200, 5). + +eventually({Line, _}, _, 0) -> + erlang:error({assert_timeout_line, Line}); +eventually({Line, Assertion} = TestObj, PollInterval, PollCount) + when is_integer(Line), Line >= 0, is_function(Assertion, 0), + is_integer(PollInterval), PollInterval >= 0, + is_integer(PollCount), PollCount >= 0 -> + case catch Assertion() of + ok -> + ok; + Err -> + ct:pal(?LOW_IMPORTANCE, + "Retrying in ~bms for ~b more times due to failed assertion in line ~b: ~p", + [PollInterval, PollCount - 1, Line, Err]), + timer:sleep(PollInterval), + eventually(TestObj, PollInterval, PollCount - 1) + end. + +%% Pass in any EUnit test object. Example: +%% consistently(?_assertEqual(1, Actual)) +consistently({Line, Assertion} = TestObj) + when is_integer(Line), Line >= 0, is_function(Assertion, 0) -> + consistently(TestObj, 200, 5). + +consistently(_, _, 0) -> + ok; +consistently({Line, Assertion} = TestObj, PollInterval, PollCount) + when is_integer(Line), Line >= 0, is_function(Assertion, 0), + is_integer(PollInterval), PollInterval >= 0, + is_integer(PollCount), PollCount >= 0 -> + Assertion(), + timer:sleep(PollInterval), + consistently(TestObj, PollInterval, PollCount - 1). + %% ------------------------------------------------------------------- %% Cover-related functions. %% ------------------------------------------------------------------- From 6554bca3a58b35f17ce41c7210dbd957dcd64353 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 22 Feb 2022 16:11:56 +0100 Subject: [PATCH 85/97] Save memory for dead-lettered messages Save per-message memory for dead-lettered messages by 1. removing all headers that are not needed anymore once the message got dead-lettered (delivery_count and expiry), and 2. saving 1 byte for every lqueue element of the discards queue by using an improper list instead of tuple to store {Reason, Message}. In total, this will save 9 bytes per dead-lettered message: ``` 1> erts_debug:size({expired, #{delivery_count => 2, size => 3}}). 11 2> erts_debug:size([expired | 3]). 2 ``` Many messages can pile up in the discards queue if target queues do not confirm messages fast enough or not at all. --- deps/rabbit/src/rabbit_fifo.erl | 4 +- deps/rabbit/src/rabbit_fifo.hrl | 33 +++++---------- deps/rabbit/src/rabbit_fifo_dlx.erl | 48 +++++++++++----------- deps/rabbit/src/rabbit_fifo_dlx.hrl | 18 ++++---- deps/rabbit/src/rabbit_fifo_dlx_client.erl | 2 +- 5 files changed, 43 insertions(+), 62 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 4b91c05a3761..d7dce1c28c66 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -635,9 +635,7 @@ convert_msg({'$prefix_msg', Header}) -> ?INDEX_MSG(undefined, ?DISK_MSG(Header)); convert_msg({Header, empty}) -> convert_msg(Header); -convert_msg(Header) - when is_integer(Header) orelse - is_map_key(size, Header) -> +convert_msg(Header) when ?IS_HEADER(Header) -> ?INDEX_MSG(undefined, ?DISK_MSG(Header)). convert_consumer({ConsumerTag, Pid}, CV1) -> diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 33d4892ec3fb..77b56e3c208c 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -1,23 +1,14 @@ - %% macros for memory optimised tuple structures -define(TUPLE(A, B), [A | B]). -%% We want short atoms since their binary representations will get -%% persisted in a snapshot for every message. -%% '$d' stand for 'disk'. --define(DISK_MSG_TAG, '$d'). -%% '$m' stand for 'memory'. --define(PREFIX_MEM_MSG_TAG, '$m'). - -% -define(DISK_MSG(Header), [Header | ?DISK_MSG_TAG]). -define(DISK_MSG(Header), Header). -% -define(MSG(Header, RawMsg), [Header | RawMsg]). --define(INDEX_MSG(Index, Msg), [Index | Msg]). -% -define(PREFIX_MEM_MSG(Header), [Header | ?PREFIX_MEM_MSG_TAG]). +-define(INDEX_MSG(Index, Msg), ?TUPLE(Index, Msg)). -define(IS_HEADER(H), - when is_integer(H) orelse - (is_map(H) andalso is_map_key(size, H))). + is_integer(H) orelse + (is_map(H) andalso is_map_key(size, H))). + +-type tuple(A, B) :: nonempty_improper_list(A, B). -type option(T) :: undefined | T. @@ -48,17 +39,13 @@ %% Value is determined by per-queue or per-message message TTL. %% If it only contains the size it can be condensed to an integer only --type msg() :: %%?MSG(msg_header(), raw_msg()) | - ?DISK_MSG(msg_header()). - % ?PREFIX_MEM_MSG(msg_header()). +-type msg() :: ?DISK_MSG(msg_header()). %% message with a header map. -type msg_size() :: non_neg_integer(). %% the size in bytes of the msg payload --type indexed_msg() :: nonempty_improper_list(ra:index(), msg_header()). - -% -type prefix_msg() :: {'$prefix_msg', msg_header()}. +-type indexed_msg() :: tuple(ra:index(), msg_header()). -type delivery_msg() :: {msg_id(), {msg_header(), term()}}. %% A tuple consisting of the message id and the headered message. @@ -92,13 +79,13 @@ % represents a partially applied module call -define(RELEASE_CURSOR_EVERY, 2048). --define(RELEASE_CURSOR_EVERY_MAX, 3200000). +-define(RELEASE_CURSOR_EVERY_MAX, 3_200_000). -define(USE_AVG_HALF_LIFE, 10000.0). %% an average QQ without any message uses about 100KB so setting this limit %% to ~10 times that should be relatively safe. --define(GC_MEM_LIMIT_B, 2000000). +-define(GC_MEM_LIMIT_B, 2_000_000). --define(MB, 1048576). +-define(MB, 1_048_576). -define(LOW_LIMIT, 0.8). -record(consumer_cfg, diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 5a9c5224b172..e942ef1e63bd 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -86,7 +86,7 @@ stat(#?MODULE{consumer = Con, apply(_Meta, {dlx, #settle{msg_ids = MsgIds}}, at_least_once, #?MODULE{consumer = #dlx_consumer{checked_out = Checked0}} = State0) -> Acked = maps:with(MsgIds, Checked0), - State = maps:fold(fun(MsgId, {_Rsn,?INDEX_MSG(Idx, ?DISK_MSG(_)) = Msg}, + State = maps:fold(fun(MsgId, ?TUPLE(_Rsn, ?INDEX_MSG(Idx, ?DISK_MSG(_)) = Msg), #?MODULE{consumer = #dlx_consumer{checked_out = Checked} = C, msg_bytes_checkout = BytesCheckout, ra_indexes = Indexes0} = S) -> @@ -120,7 +120,7 @@ apply(_, {dlx, #checkout{consumer = ConsumerPid, Checked0 = maps:to_list(CheckedOutOldConsumer), Checked1 = lists:keysort(1, Checked0), {Discards, BytesMoved} = lists:foldr( - fun({_Id, {_Reason, IdxMsg} = Msg}, {D, B}) -> + fun({_Id, ?TUPLE(_Reason, IdxMsg) = Msg}, {D, B}) -> {lqueue:in_r(Msg, D), B + size_in_bytes(IdxMsg)} end, {Discards0, 0}, Checked1), State = State0#?MODULE{consumer = #dlx_consumer{pid = ConsumerPid, @@ -140,34 +140,32 @@ discard(Msgs, Reason, undefined, State) -> {State, [{mod_call, rabbit_global_counters, messages_dead_lettered, [Reason, rabbit_quorum_queue, disabled, length(Msgs)]}]}; discard(Msgs0, Reason, {at_most_once, {Mod, Fun, Args}}, State) -> - RaftIdxs = lists:filtermap( - fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> - {true, RaftIdx}; - (_IgnorePrefixMessage) -> - false - end, Msgs0), + RaftIdxs = lists:map(fun(?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> + RaftIdx + end, Msgs0), Effect = {log, RaftIdxs, fun (Log) -> Lookup = maps:from_list(lists:zip(RaftIdxs, Log)), - Msgs = lists:filtermap( - fun (?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> - {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), - {true, Msg}; - (_IgnorePrefixMessage) -> - false - end, Msgs0), + Msgs = lists:map(fun(?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> + {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), + Msg + end, Msgs0), [{mod_call, Mod, Fun, Args ++ [Reason, Msgs]}] end}, {State, [Effect]}; discard(Msgs, Reason, at_least_once, State0) when Reason =/= maxlen -> - %%TODO delete delivery_count header to save space? It's not needed anymore. - State = lists:foldl(fun (?INDEX_MSG(Idx, _) = Msg, + State = lists:foldl(fun (?INDEX_MSG(Idx, ?DISK_MSG(_Header)) = Msg0, #?MODULE{discards = D0, msg_bytes = B0, ra_indexes = I0} = S0) -> - D = lqueue:in({Reason, Msg}, D0), - B = B0 + size_in_bytes(Msg), + MsgSize = size_in_bytes(Msg0), + %% Condense header to an integer representing the message size. + %% We do not need delivery_count or expiry header fields anymore. + %% This saves per-message memory usage. + Msg = ?INDEX_MSG(Idx, ?DISK_MSG(MsgSize)), + D = lqueue:in(?TUPLE(Reason, Msg), D0), + B = B0 + MsgSize, I = rabbit_fifo_index:append(Idx, I0), S0#?MODULE{discards = D, msg_bytes = B, @@ -183,11 +181,11 @@ checkout(at_least_once, #?MODULE{consumer = #dlx_consumer{}} = State) -> checkout(_, State) -> {State, []}. -checkout0({success, MsgId, {Reason, ?INDEX_MSG(Idx, ?DISK_MSG(Header))}, State}, SendAcc) +checkout0({success, MsgId, ?TUPLE(Reason, ?INDEX_MSG(Idx, ?DISK_MSG(_Header))), State}, SendAcc) when is_integer(Idx) -> - DelMsg = {Idx, {Reason, MsgId, Header}}, + DelMsg = {Idx, {Reason, MsgId}}, checkout0(checkout_one(State), [DelMsg | SendAcc]); -checkout0({success, _MsgId, {_Reason, ?TUPLE(_, _)}, State}, SendAcc) -> +checkout0({success, _MsgId, ?TUPLE(_Reason, ?TUPLE(_, _)), State}, SendAcc) -> %% This is a prefix message which means we are recovering from a snapshot. %% We know: %% 1. This message was already delivered in the past, and @@ -209,7 +207,7 @@ checkout_one(#?MODULE{discards = Discards0, consumer = #dlx_consumer{checked_out = Checked0, next_msg_id = Next} = Con0} = State0) -> case lqueue:out(Discards0) of - {{value, {_, Msg} = ReasonMsg}, Discards} -> + {{value, ?TUPLE(_, Msg) = ReasonMsg}, Discards} -> Checked = maps:put(Next, ReasonMsg, Checked0), Size = size_in_bytes(Msg), State = State0#?MODULE{discards = Discards, @@ -233,8 +231,8 @@ delivery_effects(CPid, IdxMsgs0) -> {RaftIdxs, Data} = lists:unzip(IdxMsgs), [{log, RaftIdxs, fun(Log) -> - Msgs = lists:zipwith(fun ({enqueue, _, _, Msg}, {Reason, MsgId, Header}) -> - {MsgId, {Reason, Header, Msg}} + Msgs = lists:zipwith(fun ({enqueue, _, _, Msg}, {Reason, MsgId}) -> + {MsgId, {Reason, Msg}} end, Log, Data), [{send_msg, CPid, {dlx_delivery, Msgs}, [ra_event]}] end}]. diff --git a/deps/rabbit/src/rabbit_fifo_dlx.hrl b/deps/rabbit/src/rabbit_fifo_dlx.hrl index a3018cd90bf1..e191b20f0355 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.hrl +++ b/deps/rabbit/src/rabbit_fifo_dlx.hrl @@ -1,25 +1,23 @@ --define(NIL, []). - -%% At-least-once dead-lettering does not support reason 'maxlen'. -%% Reason of prefix messages is [] because the message will not be -%% actually delivered and storing 2 bytes in the persisted snapshot -%% is less than the reason atom. --type reason() :: expired | rejected | delivery_limit | ?NIL. - -record(dlx_consumer,{ %% We don't require a consumer tag because a consumer tag is a means to distinguish %% multiple consumers in the same channel. The rabbit_fifo_dlx_worker channel like process however %% creates only a single consumer to this quorum queue's discards queue. pid :: pid(), prefetch :: non_neg_integer(), - checked_out = #{} :: #{msg_id() => {reason(), indexed_msg()}}, + checked_out = #{} :: #{msg_id() => tuple( + rabbit_dead_letter:reason(), + indexed_msg() + )}, next_msg_id = 0 :: msg_id() }). -record(rabbit_fifo_dlx,{ consumer = undefined :: #dlx_consumer{} | undefined, %% Queue of dead-lettered messages. - discards = lqueue:new() :: lqueue:lqueue({reason(), indexed_msg()}), + discards = lqueue:new() :: lqueue:lqueue(tuple( + rabbit_dead_letter:reason(), + indexed_msg() + )), %% Raft indexes of messages in both discards queue and dlx_consumer's checked_out map %% so that we get the smallest ra index in O(1). ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), diff --git a/deps/rabbit/src/rabbit_fifo_dlx_client.erl b/deps/rabbit/src/rabbit_fifo_dlx_client.erl index 0b363f2e5e6c..459d27cc3eec 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_client.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_client.erl @@ -74,7 +74,7 @@ handle_delivery({dlx_delivery, [{FstId, _} | _] = IdMsgs}, transform_msgs(QRes, Msgs) -> lists:map( - fun({MsgId, {Reason, _MsgHeader, Msg}}) -> + fun({MsgId, {Reason, Msg}}) -> {QRes, MsgId, Msg, Reason} end, Msgs). From 980ef3f93c5a855aff5a3ab2bcdea4d32c91492a Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 22 Feb 2022 19:15:49 +0100 Subject: [PATCH 86/97] Use one ?MSG macro instead of ?INDEX_MSG and ?DISK_MSG macros because from now on there will be only disk messages. This makes the code more readable. --- deps/rabbit/src/rabbit_fifo.erl | 147 +++++++++------------ deps/rabbit/src/rabbit_fifo.hrl | 19 ++- deps/rabbit/src/rabbit_fifo_dlx.erl | 34 +++-- deps/rabbit/src/rabbit_fifo_dlx.hrl | 10 +- deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl | 2 +- 5 files changed, 92 insertions(+), 120 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index d7dce1c28c66..2c882c96954e 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -86,7 +86,7 @@ msg_id :: msg_id(), index :: ra:index(), header :: msg_header(), - msg :: indexed_msg()}). + msg :: msg()}). -record(register_enqueuer, {pid :: pid()}). -record(checkout, {consumer_id :: consumer_id(), spec :: checkout_spec(), @@ -158,7 +158,6 @@ consumer_meta/0, consumer_id/0, client_msg/0, - indexed_msg/0, msg/0, msg_id/0, msg_seqno/0, @@ -289,17 +288,14 @@ apply(#{index := Idx} = Meta, case Cons0 of #{ConsumerId := #consumer{checked_out = Checked0} = Con0} when is_map_key(MsgId, Checked0) -> - %% construct an index message with the current raft index + %% construct a message with the current raft index %% and update delivery count before adding it to the message queue Header = update_header(delivery_count, fun incr/1, 1, Header0), - State0 = add_bytes_return(Header, State00), - - IdxMsg = ?INDEX_MSG(Idx, ?DISK_MSG(Header)), Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked0), credit = increase_credit(Con0, 1)}, State1 = State0#?MODULE{ra_indexes = rabbit_fifo_index:delete(OldIdx, Indexes0), - messages = lqueue:in(IdxMsg, Messages), + messages = lqueue:in(?MSG(Idx, Header), Messages), enqueue_count = EnqCount + 1}, State2 = update_or_remove_sub(Meta, ConsumerId, Con, State1), {State, Ret, Effs} = checkout(Meta, State0, State2, []), @@ -392,7 +388,7 @@ apply(#{index := Index, State1 = update_consumer(Meta, ConsumerId, ConsumerMeta, {once, 1, simple_prefetch}, 0, State0), - {success, _, MsgId, Msg, ExpiredMsg, State2, Effects0} = + {success, _, MsgId, ?MSG(RaftIdx, Header), ExpiredMsg, State2, Effects0} = checkout_one(Meta, false, State1, []), {State4, Effects1} = case Settlement of unsettled -> @@ -405,15 +401,10 @@ apply(#{index := Index, State2), {State3, SettleEffects ++ Effects0} end, - {Reply, Effects2} = - case Msg of - ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)) -> - {'$ra_no_reply', - [reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From) | - Effects1]} - end, + Effects2 = [reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From) | Effects1], {State, DroppedMsg, Effects} = evaluate_limit(Index, false, State0, State4, Effects2), + Reply = '$ra_no_reply', case {DroppedMsg, ExpiredMsg} of {false, false} -> {State, Reply, Effects}; @@ -441,15 +432,11 @@ apply(#{index := Index}, #purge{}, ra_indexes = Indexes0, dlx = DlxState} = State0) -> NumReady = messages_ready(State0), - Indexes1 = lists:foldl(fun (?INDEX_MSG(I, _), Acc0) when is_integer(I) -> - rabbit_fifo_index:delete(I, Acc0); - (_, Acc) -> - Acc + Indexes1 = lists:foldl(fun(?MSG(I, _), Acc0) when is_integer(I) -> + rabbit_fifo_index:delete(I, Acc0) end, Indexes0, lqueue:to_list(Returns)), - Indexes = lists:foldl(fun (?INDEX_MSG(I, _), Acc0) when is_integer(I) -> - rabbit_fifo_index:delete(I, Acc0); - (_, Acc) -> - Acc + Indexes = lists:foldl(fun(?MSG(I, _), Acc0) when is_integer(I) -> + rabbit_fifo_index:delete(I, Acc0) end, Indexes1, lqueue:to_list(Messages)), {NumDlx, _} = rabbit_fifo_dlx:stat(DlxState), State1 = State0#?MODULE{ra_indexes = Indexes, @@ -624,19 +611,19 @@ apply(_Meta, Cmd, State) -> {State, ok, []}. convert_msg({RaftIdx, {Header, empty}}) when is_integer(RaftIdx) -> - ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); + ?MSG(RaftIdx, Header); convert_msg({RaftIdx, {Header, _Msg}}) when is_integer(RaftIdx) -> - ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)); + ?MSG(RaftIdx, Header); convert_msg({'$empty_msg', Header}) -> %% dummy index - ?INDEX_MSG(undefined, ?DISK_MSG(Header)); + ?MSG(undefined, Header); convert_msg({'$prefix_msg', Header}) -> %% dummy index - ?INDEX_MSG(undefined, ?DISK_MSG(Header)); + ?MSG(undefined, Header); convert_msg({Header, empty}) -> convert_msg(Header); convert_msg(Header) when ?IS_HEADER(Header) -> - ?INDEX_MSG(undefined, ?DISK_MSG(Header)). + ?MSG(undefined, Header). convert_consumer({ConsumerTag, Pid}, CV1) -> Meta = element(2, CV1), @@ -681,8 +668,8 @@ convert_v1_to_v2(V1State0) -> V2PrefReturns = lists:foldl(fun(Hdr, Acc) -> lqueue:in(convert_msg(Hdr), Acc) end, lqueue:new(), PrefReturns), - MessagesV2 = lqueue:fold(fun ({_, IdxMsg}, Acc) -> - lqueue:in(convert_msg(IdxMsg), Acc) + MessagesV2 = lqueue:fold(fun ({_, Msg}, Acc) -> + lqueue:in(convert_msg(Msg), Acc) end, V2PrefMsgs, MessagesV1), ReturnsV2 = lqueue:fold(fun ({_SeqId, Msg}, Acc) -> lqueue:in(convert_msg(Msg), Acc) @@ -939,7 +926,7 @@ get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) -> case Consumers of #{Cid := #consumer{checked_out = Checked}} -> [begin - ?INDEX_MSG(I, ?DISK_MSG(H)) = maps:get(K, Checked), + ?MSG(I, H) = maps:get(K, Checked), {K, {I, H}} end || K <- lists:seq(From, To), maps:is_key(K, Checked)]; _ -> @@ -995,7 +982,7 @@ handle_aux(leader, cast, {#return{msg_ids = MsgIds, #{ConsumerId := #consumer{checked_out = Checked}} -> {Log, ToReturn} = maps:fold( - fun (MsgId, ?INDEX_MSG(Idx, ?DISK_MSG(Header)), {L0, Acc}) -> + fun (MsgId, ?MSG(Idx, Header), {L0, Acc}) -> %% it is possible this is not found if the consumer %% crashed and the message got removed case ra_log:fetch(Idx, L0) of @@ -1004,8 +991,7 @@ handle_aux(leader, cast, {#return{msg_ids = MsgIds, #enqueue{msg = M} -> M; #requeue{msg = M} -> M end, - IdxMsg = ?INDEX_MSG(Idx, ?TUPLE(Header, Msg)), - {L, [{MsgId, IdxMsg} | Acc]}; + {L, [{MsgId, Idx, Header, Msg} | Acc]}; {undefined, L} -> {L, Acc} end @@ -1069,14 +1055,14 @@ handle_aux(_RaState, {call, _From}, oldest_entry_timestamp, Aux, handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, Log0, MacState) -> case rabbit_fifo:query_peek(Pos, MacState) of - {ok, ?INDEX_MSG(Idx, ?DISK_MSG(Header))} -> + {ok, ?MSG(Idx, Header)} -> %% need to re-hydrate from the log - {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0), - Msg = case Cmd of - #enqueue{msg = M} -> M; - #requeue{msg = M} -> M - end, - {reply, {ok, {Header, Msg}}, Aux0, Log}; + {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0), + Msg = case Cmd of + #enqueue{msg = M} -> M; + #requeue{msg = M} -> M + end, + {reply, {ok, {Header, Msg}}, Aux0, Log}; Err -> {reply, Err, Aux0, Log0} end; @@ -1241,9 +1227,9 @@ query_peek(Pos, State0) when Pos > 0 -> case take_next_msg(State0) of empty -> {error, no_message_at_pos}; - {IdxMsg, _State} + {Msg, _State} when Pos == 1 -> - {ok, IdxMsg}; + {ok, Msg}; {_Msg, State} -> query_peek(Pos-1, State) end. @@ -1469,13 +1455,13 @@ decr_total(#?MODULE{messages_total = Tot} = State) -> drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects) -> case take_next_msg(State0) of - {?INDEX_MSG(Idx, ?DISK_MSG(Header)) = IdxMsg, State1} -> + {?MSG(Idx, Header) = Msg, State1} -> Indexes = rabbit_fifo_index:delete(Idx, Indexes0), State2 = State1#?MODULE{ra_indexes = Indexes}, State3 = decr_total(add_bytes_drop(Header, State2)), #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, dlx = DlxState} = State = State3, - {_, DlxEffects} = rabbit_fifo_dlx:discard([IdxMsg], maxlen, DLH, DlxState), + {_, DlxEffects} = rabbit_fifo_dlx:discard([Msg], maxlen, DLH, DlxState), {State, DlxEffects ++ Effects}; empty -> {State0, Effects} @@ -1551,7 +1537,7 @@ maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, Effects, % direct enqueue without tracking Size = message_size(RawMsg), Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), - Msg = ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), + Msg = ?MSG(RaftIdx, Header), State = State0#?MODULE{msg_bytes_enqueue = Enqueue + Size, enqueue_count = EnqCount + 1, messages_total = Total + 1, @@ -1575,7 +1561,7 @@ maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, % it is the next expected seqno Size = message_size(RawMsg), Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), - Msg = ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), + Msg = ?MSG(RaftIdx, Header), Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1}, MsgCache = case can_immediately_deliver(State0) of true -> @@ -1635,10 +1621,8 @@ complete(Meta, ConsumerId, DiscardedMsgIds, SettledSize = lists:foldl(fun(Msg, Acc) -> get_header(size, get_msg_header(Msg)) + Acc end, 0, DiscardedMsgs), - Indexes = lists:foldl(fun (?INDEX_MSG(I, _), Acc) when is_integer(I) -> - rabbit_fifo_index:delete(I, Acc); - (_, Acc) -> - Acc + Indexes = lists:foldl(fun(?MSG(I, _), Acc) when is_integer(I) -> + rabbit_fifo_index:delete(I, Acc) end, Indexes0, DiscardedMsgs), State1#?MODULE{ra_indexes = Indexes, msg_bytes_checkout = BytesCheckout - SettledSize, @@ -1718,8 +1702,8 @@ find_next_cursor(Smallest, Cursors0, Potential) -> {Potential, Cursors0} end. -update_msg_header(Key, Fun, Def, ?INDEX_MSG(Idx, ?DISK_MSG(Header))) -> - ?INDEX_MSG(Idx, ?DISK_MSG(update_header(Key, Fun, Def, Header))). +update_msg_header(Key, Fun, Def, ?MSG(Idx, Header)) -> + ?MSG(Idx, update_header(Key, Fun, Def, Header)). update_header(Key, UpdateFun, Default, Header) when is_integer(Header) -> @@ -1727,7 +1711,7 @@ update_header(Key, UpdateFun, Default, Header) update_header(Key, UpdateFun, Default, Header) -> maps:update_with(Key, UpdateFun, Default, Header). -get_msg_header(?INDEX_MSG(_Idx, ?DISK_MSG(Header))) -> +get_msg_header(?MSG(_Idx, Header)) -> Header. get_header(size, Header) @@ -1790,7 +1774,7 @@ checkout(#{index := Index} = Meta, end. checkout0(Meta, {success, ConsumerId, MsgId, - ?INDEX_MSG(RaftIdx, ?DISK_MSG(Header)), ExpiredMsg, State, Effects}, + ?MSG(RaftIdx, Header), ExpiredMsg, State, Effects}, SendAcc0) when is_integer(RaftIdx) -> DelMsg = {RaftIdx, {MsgId, Header}}, SendAcc = case maps:get(ConsumerId, SendAcc0, undefined) of @@ -1875,11 +1859,11 @@ take_next_msg(#?MODULE{returns = Returns0, case lqueue:out(Messages0) of {empty, _} -> empty; - {{value, ?INDEX_MSG(RaftIdx, _) = IndexMsg}, Messages} -> + {{value, ?MSG(RaftIdx, _) = Msg}, Messages} -> %% add index here Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), - {IndexMsg, State#?MODULE{messages = Messages, - ra_indexes = Indexes}} + {Msg, State#?MODULE{messages = Messages, + ra_indexes = Indexes}} end end. @@ -1896,11 +1880,11 @@ delivery_effect({CTag, CPid}, [{Idx, {MsgId, Header}}], #?MODULE{msg_cache = {Idx, RawMsg}}) -> {send_msg, CPid, {delivery, CTag, [{MsgId, {Header, RawMsg}}]}, [local, ra_event]}; -delivery_effect({CTag, CPid}, IdxMsgs, _State) -> - {RaftIdxs, Data} = lists:unzip(IdxMsgs), +delivery_effect({CTag, CPid}, Msgs, _State) -> + {RaftIdxs, Data} = lists:unzip(Msgs), {log, RaftIdxs, fun(Log) -> - Msgs = lists:zipwith( + DelMsgs = lists:zipwith( fun (#enqueue{msg = Msg}, {MsgId, Header}) -> {MsgId, {Header, Msg}}; @@ -1908,7 +1892,7 @@ delivery_effect({CTag, CPid}, IdxMsgs, _State) -> {MsgId, Header}) -> {MsgId, {Header, Msg}} end, Log, Data), - [{send_msg, CPid, {delivery, CTag, Msgs}, [local, ra_event]}] + [{send_msg, CPid, {delivery, CTag, DelMsgs}, [local, ra_event]}] end, {local, node(CPid)}}. @@ -1993,34 +1977,31 @@ expire_msgs(RaCmdTs, Result, State, Effects) -> %% Therefore, first lqueue:get/2 to check whether we need to lqueue:out/1 %% because the latter can be much slower than the former. case get_next_msg(State) of - ?INDEX_MSG(_Idx, ?DISK_MSG(#{expiry := Expiry} = Header)) + ?MSG(_, #{expiry := Expiry}) when RaCmdTs >= Expiry -> - expire(RaCmdTs, Header, State, Effects); + expire(RaCmdTs, State, Effects); _ -> {Result, State, Effects} end. -expire(RaCmdTs, Header, State0, Effects) -> - {Msg, State1} = take_next_msg(State0), - #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, - dlx = DlxState0, - ra_indexes = Indexes0} = State2 = add_bytes_drop(Header, State1), +expire(RaCmdTs, State0, Effects) -> + {?MSG(Idx, Header) = Msg, + #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0, + ra_indexes = Indexes0, + messages_total = Tot, + msg_bytes_enqueue = MsgBytesEnqueue} = State1} = take_next_msg(State0), {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], expired, DLH, DlxState0), - State3 = State2#?MODULE{dlx = DlxState}, - State5 = case Msg of - ?INDEX_MSG(Idx, ?DISK_MSG(_Header)) - when is_integer(Idx) -> - Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - State3#?MODULE{ra_indexes = Indexes}; - ?DISK_MSG(_) -> - State3 - end, - State = decr_total(State5), + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + State = State1#?MODULE{dlx = DlxState, + ra_indexes = Indexes, + messages_total = Tot - 1, + msg_bytes_enqueue = MsgBytesEnqueue - get_header(size, Header)}, expire_msgs(RaCmdTs, true, State, DlxEffects ++ Effects). timer_effect(RaCmdTs, State, Effects) -> T = case get_next_msg(State) of - ?INDEX_MSG(_, ?DISK_MSG(#{expiry := Expiry})) + ?MSG(_, #{expiry := Expiry}) when is_number(Expiry) -> %% Next message contains 'expiry' header. %% (Re)set timer so that mesage will be dropped or dead-lettered on time. @@ -2356,7 +2337,7 @@ smallest_raft_index(#?MODULE{messages = Messages, dlx = DlxState}) -> SmallestDlxRaIdx = rabbit_fifo_dlx:smallest_raft_index(DlxState), SmallestMsgsRaIdx = case lqueue:get(Messages, empty) of - ?INDEX_MSG(I, _) -> + ?MSG(I, _) -> I; _ -> undefined @@ -2364,7 +2345,7 @@ smallest_raft_index(#?MODULE{messages = Messages, SmallestRaIdx = rabbit_fifo_index:smallest(Indexes), lists:min([SmallestDlxRaIdx, SmallestMsgsRaIdx, SmallestRaIdx]). -make_requeue(ConsumerId, Notify, [{MsgId, ?INDEX_MSG(Idx, ?TUPLE(Header, Msg))}], Acc) -> +make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg}], Acc) -> lists:reverse([{append, #requeue{consumer_id = ConsumerId, index = Idx, @@ -2373,7 +2354,7 @@ make_requeue(ConsumerId, Notify, [{MsgId, ?INDEX_MSG(Idx, ?TUPLE(Header, Msg))}] msg = Msg}, Notify} | Acc]); -make_requeue(ConsumerId, Notify, [{MsgId, ?INDEX_MSG(Idx, ?TUPLE(Header, Msg))} | Rem], Acc) -> +make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg} | Rem], Acc) -> make_requeue(ConsumerId, Notify, Rem, [{append, #requeue{consumer_id = ConsumerId, diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 77b56e3c208c..0fab40be796b 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -1,8 +1,10 @@ %% macros for memory optimised tuple structures +%% [A|B] saves 1 byte compared to {A,B} -define(TUPLE(A, B), [A | B]). --define(DISK_MSG(Header), Header). --define(INDEX_MSG(Index, Msg), ?TUPLE(Index, Msg)). +%% We only hold Raft index and message header in memory. +%% Raw message data is always stored on disk. +-define(MSG(Index, Header), ?TUPLE(Index, Header)). -define(IS_HEADER(H), is_integer(H) orelse @@ -39,16 +41,13 @@ %% Value is determined by per-queue or per-message message TTL. %% If it only contains the size it can be condensed to an integer only --type msg() :: ?DISK_MSG(msg_header()). -%% message with a header map. - -type msg_size() :: non_neg_integer(). %% the size in bytes of the msg payload --type indexed_msg() :: tuple(ra:index(), msg_header()). +-type msg() :: tuple(ra:index(), msg_header()). --type delivery_msg() :: {msg_id(), {msg_header(), term()}}. -%% A tuple consisting of the message id and the headered message. +-type delivery_msg() :: {msg_id(), {msg_header(), raw_msg()}}. +%% A tuple consisting of the message id, and the headered message. -type consumer_tag() :: binary(). %% An arbitrary binary tag used to distinguish between different consumers @@ -105,7 +104,7 @@ {cfg = #consumer_cfg{}, status = up :: up | suspected_down | cancelled | waiting, next_msg_id = 0 :: msg_id(), % part of snapshot data - checked_out = #{} :: #{msg_id() => indexed_msg()}, + checked_out = #{} :: #{msg_id() => msg()}, %% max number of messages that can be sent %% decremented for each delivery credit = 0 : non_neg_integer(), @@ -161,7 +160,7 @@ -record(rabbit_fifo, {cfg :: #cfg{}, % unassigned messages - messages = lqueue:new() :: lqueue:lqueue(indexed_msg()), + messages = lqueue:new() :: lqueue:lqueue(msg()), % messages_total = 0 :: non_neg_integer(), % queue of returned msg_in_ids - when checking out it picks from diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index e942ef1e63bd..3fedcaef7427 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -86,7 +86,7 @@ stat(#?MODULE{consumer = Con, apply(_Meta, {dlx, #settle{msg_ids = MsgIds}}, at_least_once, #?MODULE{consumer = #dlx_consumer{checked_out = Checked0}} = State0) -> Acked = maps:with(MsgIds, Checked0), - State = maps:fold(fun(MsgId, ?TUPLE(_Rsn, ?INDEX_MSG(Idx, ?DISK_MSG(_)) = Msg), + State = maps:fold(fun(MsgId, ?TUPLE(_Rsn, ?MSG(Idx, _) = Msg), #?MODULE{consumer = #dlx_consumer{checked_out = Checked} = C, msg_bytes_checkout = BytesCheckout, ra_indexes = Indexes0} = S) -> @@ -133,37 +133,35 @@ apply(_, Cmd, DLH, State) -> rabbit_log:debug("Ignoring command ~p for dead_letter_handler ~p", [Cmd, DLH]), {State, []}. --spec discard([indexed_msg()], rabbit_dead_letter:reason(), +-spec discard([msg()], rabbit_dead_letter:reason(), dead_letter_handler(), state()) -> {state(), ra_machine:effects()}. discard(Msgs, Reason, undefined, State) -> {State, [{mod_call, rabbit_global_counters, messages_dead_lettered, [Reason, rabbit_quorum_queue, disabled, length(Msgs)]}]}; discard(Msgs0, Reason, {at_most_once, {Mod, Fun, Args}}, State) -> - RaftIdxs = lists:map(fun(?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> - RaftIdx - end, Msgs0), - Effect = {log, RaftIdxs, + Idxs = [I || ?MSG(I, _) <- Msgs0], + Effect = {log, Idxs, fun (Log) -> - Lookup = maps:from_list(lists:zip(RaftIdxs, Log)), - Msgs = lists:map(fun(?INDEX_MSG(RaftIdx, ?DISK_MSG(_Header))) -> - {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup), - Msg - end, Msgs0), + Lookup = maps:from_list(lists:zip(Idxs, Log)), + Msgs = [begin + {enqueue, _, _, Msg} = maps:get(Idx, Lookup), + Msg + end || ?MSG(Idx, _) <- Msgs0], [{mod_call, Mod, Fun, Args ++ [Reason, Msgs]}] end}, {State, [Effect]}; discard(Msgs, Reason, at_least_once, State0) when Reason =/= maxlen -> - State = lists:foldl(fun (?INDEX_MSG(Idx, ?DISK_MSG(_Header)) = Msg0, - #?MODULE{discards = D0, - msg_bytes = B0, - ra_indexes = I0} = S0) -> + State = lists:foldl(fun(?MSG(Idx, _) = Msg0, + #?MODULE{discards = D0, + msg_bytes = B0, + ra_indexes = I0} = S0) -> MsgSize = size_in_bytes(Msg0), %% Condense header to an integer representing the message size. %% We do not need delivery_count or expiry header fields anymore. %% This saves per-message memory usage. - Msg = ?INDEX_MSG(Idx, ?DISK_MSG(MsgSize)), + Msg = ?MSG(Idx, MsgSize), D = lqueue:in(?TUPLE(Reason, Msg), D0), B = B0 + MsgSize, I = rabbit_fifo_index:append(Idx, I0), @@ -181,7 +179,7 @@ checkout(at_least_once, #?MODULE{consumer = #dlx_consumer{}} = State) -> checkout(_, State) -> {State, []}. -checkout0({success, MsgId, ?TUPLE(Reason, ?INDEX_MSG(Idx, ?DISK_MSG(_Header))), State}, SendAcc) +checkout0({success, MsgId, ?TUPLE(Reason, ?MSG(Idx, _)), State}, SendAcc) when is_integer(Idx) -> DelMsg = {Idx, {Reason, MsgId}}, checkout0(checkout_one(State), [DelMsg | SendAcc]); @@ -220,7 +218,7 @@ checkout_one(#?MODULE{discards = Discards0, State0 end. -size_in_bytes(?INDEX_MSG(_Idx, ?DISK_MSG(Header))) -> +size_in_bytes(?MSG(_, Header)) -> rabbit_fifo:get_header(size, Header). %% returns at most one delivery effect because there is only one consumer diff --git a/deps/rabbit/src/rabbit_fifo_dlx.hrl b/deps/rabbit/src/rabbit_fifo_dlx.hrl index e191b20f0355..6be15d7de333 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.hrl +++ b/deps/rabbit/src/rabbit_fifo_dlx.hrl @@ -4,20 +4,14 @@ %% creates only a single consumer to this quorum queue's discards queue. pid :: pid(), prefetch :: non_neg_integer(), - checked_out = #{} :: #{msg_id() => tuple( - rabbit_dead_letter:reason(), - indexed_msg() - )}, + checked_out = #{} :: #{msg_id() => tuple(rabbit_dead_letter:reason(), msg())}, next_msg_id = 0 :: msg_id() }). -record(rabbit_fifo_dlx,{ consumer = undefined :: #dlx_consumer{} | undefined, %% Queue of dead-lettered messages. - discards = lqueue:new() :: lqueue:lqueue(tuple( - rabbit_dead_letter:reason(), - indexed_msg() - )), + discards = lqueue:new() :: lqueue:lqueue(tuple(rabbit_dead_letter:reason(), msg())), %% Raft indexes of messages in both discards queue and dlx_consumer's checked_out map %% so that we get the smallest ra index in O(1). ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), diff --git a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl index 73a079f3db2d..1246476acf26 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl @@ -206,7 +206,7 @@ last_consumer_wins(_Config) -> ok. make_msg(RaftIdx) -> - ?INDEX_MSG(RaftIdx, ?DISK_MSG(1)). + ?MSG(RaftIdx, _Bytes = 1). meta(Idx) -> #{index => Idx, From fd470b801543c2adfc0b8dc6a442bce01eda11cc Mon Sep 17 00:00:00 2001 From: David Ansari Date: Tue, 22 Feb 2022 21:25:00 +0100 Subject: [PATCH 87/97] Reduce per-message memory usage if message TTL is set Decrease memory usage by 6 bytes per message if per-message or per-queue message TTL is set: 1> erts_debug:size(#{size => 1, expiry => 1000}). 8 2> erts_debug:size([1|1000]). 2 While we could always use a record for the header, we do want the flexibility of a map. --- deps/rabbit/src/rabbit_fifo.erl | 51 ++++++++++++++++++++------ deps/rabbit/src/rabbit_fifo.hrl | 8 +++- deps/rabbit/src/rabbit_fifo_dlx.erl | 6 +-- deps/rabbit/test/rabbit_fifo_SUITE.erl | 44 ++++++++++++++++++++++ 4 files changed, 93 insertions(+), 16 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 2c882c96954e..3f6edfa34980 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -73,6 +73,10 @@ make_garbage_collection/0 ]). +-ifdef(TEST). +-export([update_header/4]). +-endif. + -define(SETTLE_V2, '$s'). -define(RETURN_V2, '$r'). -define(DISCARD_V2, '$d'). @@ -1705,21 +1709,40 @@ find_next_cursor(Smallest, Cursors0, Potential) -> update_msg_header(Key, Fun, Def, ?MSG(Idx, Header)) -> ?MSG(Idx, update_header(Key, Fun, Def, Header)). +update_header(expiry, _, Expiry, Size) + when is_integer(Size) -> + ?TUPLE(Size, Expiry); +update_header(Key, UpdateFun, Default, Size) + when is_integer(Size) -> + update_header(Key, UpdateFun, Default, #{size => Size}); +update_header(Key, UpdateFun, Default, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + update_header(Key, UpdateFun, Default, #{size => Size, + expiry => Expiry}); update_header(Key, UpdateFun, Default, Header) - when is_integer(Header) -> - update_header(Key, UpdateFun, Default, #{size => Header}); -update_header(Key, UpdateFun, Default, Header) -> + when is_map(Header), is_map_key(size, Header) -> maps:update_with(Key, UpdateFun, Default, Header). get_msg_header(?MSG(_Idx, Header)) -> Header. -get_header(size, Header) - when is_integer(Header) -> - Header; -get_header(_Key, Header) when is_integer(Header) -> +get_header(size, Size) + when is_integer(Size) -> + Size; +get_header(_Key, Size) + when is_integer(Size) -> undefined; -get_header(Key, Header) when is_map(Header) -> +get_header(size, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + Size; +get_header(expiry, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + Expiry; +get_header(_Key, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + undefined; +get_header(Key, Header) + when is_map(Header) andalso is_map_key(size, Header) -> maps:get(Key, Header, undefined). return_one(Meta, MsgId, Msg0, @@ -1977,8 +2000,11 @@ expire_msgs(RaCmdTs, Result, State, Effects) -> %% Therefore, first lqueue:get/2 to check whether we need to lqueue:out/1 %% because the latter can be much slower than the former. case get_next_msg(State) of + ?MSG(_, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry), RaCmdTs >= Expiry -> + expire(RaCmdTs, State, Effects); ?MSG(_, #{expiry := Expiry}) - when RaCmdTs >= Expiry -> + when is_integer(Expiry), RaCmdTs >= Expiry -> expire(RaCmdTs, State, Effects); _ -> {Result, State, Effects} @@ -2001,11 +2027,14 @@ expire(RaCmdTs, State0, Effects) -> timer_effect(RaCmdTs, State, Effects) -> T = case get_next_msg(State) of - ?MSG(_, #{expiry := Expiry}) - when is_number(Expiry) -> + ?MSG(_, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> %% Next message contains 'expiry' header. %% (Re)set timer so that mesage will be dropped or dead-lettered on time. max(0, Expiry - RaCmdTs); + ?MSG(_, #{expiry := Expiry}) + when is_integer(Expiry) -> + max(0, Expiry - RaCmdTs); _ -> %% Next message does not contain 'expiry' header. %% Therefore, do not set timer or cancel timer if it was set. diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 0fab40be796b..d10ad0d84c9f 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -7,7 +7,8 @@ -define(MSG(Index, Header), ?TUPLE(Index, Header)). -define(IS_HEADER(H), - is_integer(H) orelse + (is_integer(H) andalso H >= 0) orelse + is_list(H) orelse (is_map(H) andalso is_map_key(size, H))). -type tuple(A, B) :: nonempty_improper_list(A, B). @@ -31,15 +32,18 @@ %% same process -type msg_header() :: msg_size() | + tuple(msg_size(), milliseconds()) | #{size := msg_size(), delivery_count => non_neg_integer(), expiry => milliseconds()}. %% The message header: +%% size: The size of the message payload in bytes. %% delivery_count: the number of unsuccessful delivery attempts. %% A non-zero value indicates a previous attempt. %% expiry: Epoch time in ms when a message expires. Set during enqueue. %% Value is determined by per-queue or per-message message TTL. -%% If it only contains the size it can be condensed to an integer only +%% If it contains only the size it can be condensed to an integer. +%% If it contains only the size and expiry it can be condensed to an improper list. -type msg_size() :: non_neg_integer(). %% the size in bytes of the msg payload diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 3fedcaef7427..82013cd743b9 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -159,8 +159,7 @@ discard(Msgs, Reason, at_least_once, State0) ra_indexes = I0} = S0) -> MsgSize = size_in_bytes(Msg0), %% Condense header to an integer representing the message size. - %% We do not need delivery_count or expiry header fields anymore. - %% This saves per-message memory usage. + %% We need neither delivery_count nor expiry anymore. Msg = ?MSG(Idx, MsgSize), D = lqueue:in(?TUPLE(Reason, Msg), D0), B = B0 + MsgSize, @@ -218,7 +217,8 @@ checkout_one(#?MODULE{discards = Discards0, State0 end. -size_in_bytes(?MSG(_, Header)) -> +size_in_bytes(MSG) -> + Header = rabbit_fifo:get_msg_header(MSG), rabbit_fifo:get_header(size, Header). %% returns at most one delivery effect because there is only one consumer diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 6ab1aa4159a8..be5f0a2b5f68 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -1825,6 +1825,50 @@ expire_message_should_emit_release_cursor_test(_) -> ?ASSERT_EFF({release_cursor, 1, _}, Effs), ok. +header_test(_) -> + H0 = Size = 5, + ?assertEqual(Size, rabbit_fifo:get_header(size, H0)), + ?assertEqual(undefined, rabbit_fifo:get_header(expiry, H0)), + ?assertEqual(undefined, rabbit_fifo:get_header(delivery_count, H0)), + + H1 = rabbit_fifo:update_header(delivery_count, fun(C) -> C+1 end, 1, H0), + ?assertEqual(#{size => Size, + delivery_count => 1}, H1), + ?assertEqual(Size, rabbit_fifo:get_header(size, H1)), + ?assertEqual(undefined, rabbit_fifo:get_header(expiry, H1)), + ?assertEqual(1, rabbit_fifo:get_header(delivery_count, H1)), + + Expiry = 1000, + H2 = rabbit_fifo:update_header(expiry, fun(Ts) -> Ts end, Expiry, H0), + ?assertEqual([Size | Expiry], H2), + ?assertEqual(Size, rabbit_fifo:get_header(size, H2)), + ?assertEqual(Expiry, rabbit_fifo:get_header(expiry, H2)), + ?assertEqual(undefined, rabbit_fifo:get_header(delivery_count, H2)), + + H3 = rabbit_fifo:update_header(delivery_count, fun(C) -> C+1 end, 1, H2), + ?assertEqual(#{size => Size, + expiry => Expiry, + delivery_count => 1}, H3), + ?assertEqual(Size, rabbit_fifo:get_header(size, H3)), + ?assertEqual(Expiry, rabbit_fifo:get_header(expiry, H3)), + ?assertEqual(1, rabbit_fifo:get_header(delivery_count, H3)), + + H4 = rabbit_fifo:update_header(delivery_count, fun(C) -> C+1 end, 1, H3), + ?assertEqual(#{size => Size, + expiry => Expiry, + delivery_count => 2}, H4), + ?assertEqual(2, rabbit_fifo:get_header(delivery_count, H4)), + + H5 = rabbit_fifo:update_header(expiry, fun(Ts) -> Ts end, Expiry, H1), + ?assertEqual(#{size => Size, + expiry => Expiry, + delivery_count => 1}, H5), + ?assertEqual(Size, rabbit_fifo:get_header(size, H5)), + ?assertEqual(Expiry, rabbit_fifo:get_header(expiry, H5)), + ?assertEqual(1, rabbit_fifo:get_header(delivery_count, H5)), + ?assertEqual(undefined, rabbit_fifo:get_header(blah, H5)), + ok. + %% Utility init(Conf) -> rabbit_fifo:init(Conf). From 57f921178c06dc7cf212818d24b68cb4ca7eb143 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Wed, 23 Feb 2022 10:26:00 +0100 Subject: [PATCH 88/97] Remove prefix message code since they do not exist anymore --- deps/rabbit/src/rabbit_fifo.erl | 18 +++++------------- deps/rabbit/src/rabbit_fifo.hrl | 19 +++++++------------ deps/rabbit/src/rabbit_fifo_dlx.erl | 25 +++++++++---------------- deps/rabbit/src/rabbit_fifo_dlx.hrl | 5 +---- deps/rabbit/src/rabbit_fifo_index.erl | 3 +-- 5 files changed, 23 insertions(+), 47 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 3f6edfa34980..f27481979206 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -51,8 +51,6 @@ query_notify_decorators_info/1, usage/1, - zero/1, - %% misc dehydrate_state/1, normalize/1, @@ -209,9 +207,6 @@ update_config(Conf, State) -> msg_ttl = MsgTTL}, last_active = LastActive}. -zero(_) -> - 0. - % msg_ids are scoped per consumer % ra_indexes holds all raft indexes for enqueues currently on queue -spec apply(ra_machine:command_meta_data(), command(), state()) -> @@ -223,7 +218,6 @@ apply(Meta, #enqueue{pid = From, seq = Seq, apply(_Meta, #register_enqueuer{pid = Pid}, #?MODULE{enqueuers = Enqueuers0, cfg = #cfg{overflow_strategy = Overflow}} = State0) -> - State = case maps:is_key(Pid, Enqueuers0) of true -> %% if the enqueuer exits just echo the overflow state @@ -247,7 +241,6 @@ apply(Meta, Con0, [], State); _ -> {State, ok} - end; apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, #?MODULE{consumers = Cons, @@ -257,11 +250,11 @@ apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, #{ConsumerId := #consumer{checked_out = Checked} = Con} -> % Publishing to dead-letter exchange must maintain same order as messages got rejected. DiscardMsgs = lists:filtermap(fun(Id) -> - case maps:find(Id, Checked) of - {ok, Msg} -> - {true, Msg}; - error -> - false + case maps:get(Id, Checked, undefined) of + undefined -> + false; + Msg -> + {true, Msg} end end, MsgIds), {DlxState, Effects} = rabbit_fifo_dlx:discard(DiscardMsgs, rejected, DLH, DlxState0), @@ -1757,7 +1750,6 @@ return_one(Meta, MsgId, Msg0, Header = get_msg_header(Msg), case get_header(delivery_count, Header) of DeliveryCount when DeliveryCount > DeliveryLimit -> - %% TODO: don't do for prefix msgs {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], delivery_limit, DLH, DlxState0), State1 = State0#?MODULE{dlx = DlxState}, State = complete(Meta, ConsumerId, [MsgId], Con0, State1), diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index d10ad0d84c9f..7e65e94cee0a 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -18,9 +18,6 @@ -type raw_msg() :: term(). %% The raw message. It is opaque to rabbit_fifo. -% a queue scoped monotonically incrementing integer used to enforce order -% in the unassigned messages map - -type msg_id() :: non_neg_integer(). %% A consumer-scoped monotonically incrementing integer included with a %% {@link delivery/0.}. Used to settle deliveries using @@ -32,7 +29,7 @@ %% same process -type msg_header() :: msg_size() | - tuple(msg_size(), milliseconds()) | + tuple(msg_size(), Expiry :: milliseconds()) | #{size := msg_size(), delivery_count => non_neg_integer(), expiry => milliseconds()}. @@ -77,7 +74,6 @@ args => list()}. %% static meta data associated with a consumer - -type applied_mfa() :: {module(), atom(), list()}. % represents a partially applied module call @@ -133,7 +129,7 @@ %% it is useful to have a record of when this was blocked %% so that we can retry sending the block effect if %% the publisher did not receive the initial one - blocked :: undefined | ra:index(), + blocked :: option(ra:index()), unused_1, unused_2 }). @@ -151,8 +147,8 @@ consumer_strategy = competing :: consumer_strategy(), %% the maximum number of unsuccessful delivery attempts permitted delivery_limit :: option(non_neg_integer()), - expires :: undefined | milliseconds(), - msg_ttl :: undefined | milliseconds(), + expires :: option(milliseconds()), + msg_ttl :: option(milliseconds()), unused_1, unused_2 }). @@ -165,7 +161,6 @@ {cfg :: #cfg{}, % unassigned messages messages = lqueue:new() :: lqueue:lqueue(msg()), - % messages_total = 0 :: non_neg_integer(), % queue of returned msg_in_ids - when checking out it picks from returns = lqueue:new() :: lqueue:lqueue(term()), @@ -181,7 +176,7 @@ % index when there are large gaps but should be faster than gb_trees % for normal appending operations as it's backed by a map ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), - %% A release cursor is essentially a snapshot for a past raft index + %% A release cursor is essentially a snapshot for a past raft index. %% Working assumption: Messages are consumed in a FIFO-ish order because %% the log is truncated only until the oldest message. release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor, @@ -197,8 +192,8 @@ %% waiting consumers, one is picked active consumer is cancelled or dies %% used only when single active consumer is on waiting_consumers = [] :: [{consumer_id(), consumer()}], - last_active :: undefined | non_neg_integer(), - msg_cache :: undefined | {ra:index(), raw_msg()}, + last_active :: option(non_neg_integer()), + msg_cache :: option({ra:index(), raw_msg()}), unused_2 }). diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 82013cd743b9..164563a7b88d 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -114,14 +114,14 @@ apply(_, {dlx, #checkout{consumer = ConsumerPid, msg_bytes_checkout = BytesCheckout} = State0) -> %% Since we allow only a single consumer, the new consumer replaces the old consumer. %% All checked out messages to the old consumer need to be returned to the discards queue - %% such that these messages can be (eventually) re-delivered to the new consumer. + %% such that these messages will be re-delivered to the new consumer. %% When inserting back into the discards queue, we respect the original order in which messages %% were discarded. Checked0 = maps:to_list(CheckedOutOldConsumer), Checked1 = lists:keysort(1, Checked0), {Discards, BytesMoved} = lists:foldr( - fun({_Id, ?TUPLE(_Reason, IdxMsg) = Msg}, {D, B}) -> - {lqueue:in_r(Msg, D), B + size_in_bytes(IdxMsg)} + fun({_Id, ?TUPLE(_, Msg) = RsnMsg}, {D, B}) -> + {lqueue:in_r(RsnMsg, D), B + size_in_bytes(Msg)} end, {Discards0, 0}, Checked1), State = State0#?MODULE{consumer = #dlx_consumer{pid = ConsumerPid, prefetch = Prefetch}, @@ -182,14 +182,6 @@ checkout0({success, MsgId, ?TUPLE(Reason, ?MSG(Idx, _)), State}, SendAcc) when is_integer(Idx) -> DelMsg = {Idx, {Reason, MsgId}}, checkout0(checkout_one(State), [DelMsg | SendAcc]); -checkout0({success, _MsgId, ?TUPLE(_Reason, ?TUPLE(_, _)), State}, SendAcc) -> - %% This is a prefix message which means we are recovering from a snapshot. - %% We know: - %% 1. This message was already delivered in the past, and - %% 2. The recovery Raft log ahead of this Raft command will defintely settle this message. - %% Therefore, here, we just check this message out to the consumer but do not re-deliver this message - %% so that we will end up with the correct and deterministic state once the whole recovery log replay is completed. - checkout0(checkout_one(State), SendAcc); checkout0(#?MODULE{consumer = #dlx_consumer{pid = Pid}} = State, SendAcc) -> Effects = delivery_effects(Pid, SendAcc), {State, Effects}. @@ -221,17 +213,16 @@ size_in_bytes(MSG) -> Header = rabbit_fifo:get_msg_header(MSG), rabbit_fifo:get_header(size, Header). -%% returns at most one delivery effect because there is only one consumer delivery_effects(_CPid, []) -> []; -delivery_effects(CPid, IdxMsgs0) -> - IdxMsgs = lists:reverse(IdxMsgs0), - {RaftIdxs, Data} = lists:unzip(IdxMsgs), +delivery_effects(CPid, Msgs0) -> + Msgs1 = lists:reverse(Msgs0), + {RaftIdxs, RsnIds} = lists:unzip(Msgs1), [{log, RaftIdxs, fun(Log) -> Msgs = lists:zipwith(fun ({enqueue, _, _, Msg}, {Reason, MsgId}) -> {MsgId, {Reason, Msg}} - end, Log, Data), + end, Log, RsnIds), [{send_msg, CPid, {dlx_delivery, Msgs}, [ra_event]}] end}]. @@ -376,5 +367,7 @@ normalize(#?MODULE{discards = Discards, State#?MODULE{discards = lqueue:from_list(lqueue:to_list(Discards)), ra_indexes = rabbit_fifo_index:normalize(Indexes)}. +-spec smallest_raft_index(state()) -> + option(non_neg_integer()). smallest_raft_index(#?MODULE{ra_indexes = Indexes}) -> rabbit_fifo_index:smallest(Indexes). diff --git a/deps/rabbit/src/rabbit_fifo_dlx.hrl b/deps/rabbit/src/rabbit_fifo_dlx.hrl index 6be15d7de333..dda2c2652847 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.hrl +++ b/deps/rabbit/src/rabbit_fifo_dlx.hrl @@ -1,7 +1,4 @@ -record(dlx_consumer,{ - %% We don't require a consumer tag because a consumer tag is a means to distinguish - %% multiple consumers in the same channel. The rabbit_fifo_dlx_worker channel like process however - %% creates only a single consumer to this quorum queue's discards queue. pid :: pid(), prefetch :: non_neg_integer(), checked_out = #{} :: #{msg_id() => tuple(rabbit_dead_letter:reason(), msg())}, @@ -9,7 +6,7 @@ }). -record(rabbit_fifo_dlx,{ - consumer = undefined :: #dlx_consumer{} | undefined, + consumer :: option(#dlx_consumer{}), %% Queue of dead-lettered messages. discards = lqueue:new() :: lqueue:lqueue(tuple(rabbit_dead_letter:reason(), msg())), %% Raft indexes of messages in both discards queue and dlx_consumer's checked_out map diff --git a/deps/rabbit/src/rabbit_fifo_index.erl b/deps/rabbit/src/rabbit_fifo_index.erl index 67c2cdf6e665..f838620d49fb 100644 --- a/deps/rabbit/src/rabbit_fifo_index.erl +++ b/deps/rabbit/src/rabbit_fifo_index.erl @@ -80,11 +80,10 @@ delete(Key, #?MODULE{data = Data} = State) -> size(#?MODULE{data = Data}) -> maps:size(Data). --spec smallest(state()) -> undefined | integer(). +-spec smallest(state()) -> undefined | non_neg_integer(). smallest(#?MODULE{smallest = Smallest}) -> Smallest. - -spec map(fun(), state()) -> state(). map(F, #?MODULE{data = Data} = State) -> State#?MODULE{data = maps:map(F, Data)}. From 58603010192b50d4624bffa0fc68f27942236180 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Wed, 23 Feb 2022 16:33:03 +0100 Subject: [PATCH 89/97] Remove purge from rabbit_fifo_dlx because at-least-once dead-lettered messages are arguably consumer messages which should not be purged. According to the AMQP 0.9.1 purge spec: "This method removes all messages from a queue which are not awaiting acknowledgment." At-least-once dead-lettered messages are awaiting acks from target queues. --- deps/rabbit/src/rabbit_fifo.erl | 8 +++----- deps/rabbit/src/rabbit_fifo_dlx.erl | 17 ----------------- deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl | 19 ------------------- 3 files changed, 3 insertions(+), 41 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index f27481979206..0abfe3cd0dcf 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -426,8 +426,8 @@ apply(#{index := Index}, #purge{}, #?MODULE{messages_total = Tot, returns = Returns, messages = Messages, - ra_indexes = Indexes0, - dlx = DlxState} = State0) -> + ra_indexes = Indexes0 + } = State0) -> NumReady = messages_ready(State0), Indexes1 = lists:foldl(fun(?MSG(I, _), Acc0) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc0) @@ -435,16 +435,14 @@ apply(#{index := Index}, #purge{}, Indexes = lists:foldl(fun(?MSG(I, _), Acc0) when is_integer(I) -> rabbit_fifo_index:delete(I, Acc0) end, Indexes1, lqueue:to_list(Messages)), - {NumDlx, _} = rabbit_fifo_dlx:stat(DlxState), State1 = State0#?MODULE{ra_indexes = Indexes, - dlx = rabbit_fifo_dlx:purge(DlxState), messages = lqueue:new(), messages_total = Tot - NumReady, returns = lqueue:new(), msg_bytes_enqueue = 0 }, Effects0 = [garbage_collection], - Reply = {purge, NumReady + NumDlx}, + Reply = {purge, NumReady}, {State, _, Effects} = evaluate_limit(Index, false, State0, State1, Effects0), update_smallest_raft_index(Index, Reply, State, Effects); diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 164563a7b88d..df2ba17cf2fc 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -16,7 +16,6 @@ checkout/2, state_enter/4, handle_aux/6, - purge/1, dehydrate/1, normalize/1, stat/1, @@ -339,22 +338,6 @@ handle_aux(leader, {dlx, setup}, Aux, QRes, at_least_once, State) -> handle_aux(_, _, Aux, _, _, _) -> Aux. --spec purge(state()) -> - state(). -purge(#?MODULE{consumer = Consumer0} = State) -> - Consumer = case Consumer0 of - undefined -> - undefined; - #dlx_consumer{} -> - Consumer0#dlx_consumer{checked_out = #{}} - end, - State#?MODULE{discards = lqueue:new(), - msg_bytes = 0, - msg_bytes_checkout = 0, - consumer = Consumer, - ra_indexes = rabbit_fifo_index:empty() - }. - -spec dehydrate(state()) -> state(). dehydrate(State) -> diff --git a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl index 1246476acf26..f3f78b21f100 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl @@ -24,7 +24,6 @@ groups() -> {tests, [], [handler_undefined, handler_at_most_once, discard_dlx_consumer, - purge, switch_strategies, last_consumer_wins]} ]. @@ -119,24 +118,6 @@ discard_dlx_consumer(_Config) -> ?assertEqual({1, 1}, rabbit_fifo_dlx:stat(S8)), ok. -purge(_Config) -> - Handler = at_least_once, - S0 = rabbit_fifo_dlx:init(), - Checkout = rabbit_fifo_dlx:make_checkout(self(), 1), - {S1, _} = rabbit_fifo_dlx:apply(meta(1), Checkout, Handler, S0), - Msgs = [make_msg(2), make_msg(3)], - {S2, _} = rabbit_fifo_dlx:discard(Msgs, because, Handler, S1), - {S3, _} = rabbit_fifo_dlx:checkout(Handler, S2), - ?assertMatch(#{num_discarded := 1, - num_discard_checked_out := 1}, rabbit_fifo_dlx:overview(S3)), - - S4 = rabbit_fifo_dlx:purge(S3), - ?assertEqual(#{num_discarded => 0, - num_discard_checked_out => 0, - discard_message_bytes => 0, - discard_checkout_message_bytes => 0}, rabbit_fifo_dlx:overview(S4)), - ok. - switch_strategies(_Config) -> QRes = #resource{virtual_host = <<"/">>, kind = queue, From 4e415f594b399ec95588a24567e51d9cb9cb8c5c Mon Sep 17 00:00:00 2001 From: David Ansari Date: Wed, 23 Feb 2022 17:16:36 +0100 Subject: [PATCH 90/97] Speed up purging quorum queues 1. If all messages in the queue are either in 'messages' or 'returns', (i.e. no message currently checked out to consumers) we can directly empty the index. 2. Even if there are consumers, there is no need to delete indexes from the 'messages' queue because we only append to the index once we move a message out of the 'messages' queue. 3. Use lqueue:fold/3 instead of lists:foldl/3 and lqueue:to_list/1 --- deps/rabbit/src/rabbit_fifo.erl | 32 ++++++++++++++--------- deps/rabbit/src/rabbit_fifo.hrl | 2 +- deps/rabbit/test/queue_parallel_SUITE.erl | 15 ++++++++++- 3 files changed, 35 insertions(+), 14 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 0abfe3cd0dcf..29330fb7d6d8 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -423,21 +423,29 @@ apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta, {State2, Effs} = activate_next_consumer(State1, []), checkout(Meta, State0, State2, [{monitor, process, Pid} | Effs]); apply(#{index := Index}, #purge{}, - #?MODULE{messages_total = Tot, + #?MODULE{messages_total = Total, returns = Returns, - messages = Messages, ra_indexes = Indexes0 } = State0) -> NumReady = messages_ready(State0), - Indexes1 = lists:foldl(fun(?MSG(I, _), Acc0) when is_integer(I) -> - rabbit_fifo_index:delete(I, Acc0) - end, Indexes0, lqueue:to_list(Returns)), - Indexes = lists:foldl(fun(?MSG(I, _), Acc0) when is_integer(I) -> - rabbit_fifo_index:delete(I, Acc0) - end, Indexes1, lqueue:to_list(Messages)), + Indexes = case Total of + NumReady -> + %% All messages are either in 'messages' queue or 'returns' queue. + %% No message is awaiting acknowledgement. + %% Optimization: empty all 'ra_indexes'. + rabbit_fifo_index:empty(); + _ -> + %% Some messages are checked out to consumers awaiting acknowledgement. + %% Therefore we cannot empty all 'ra_indexes'. + %% We only need to delete the indexes from the 'returns' queue because + %% messages of the 'messages' queue are not part of the 'ra_indexes'. + lqueue:fold(fun(?MSG(I, _), Acc) -> + rabbit_fifo_index:delete(I, Acc) + end, Indexes0, Returns) + end, State1 = State0#?MODULE{ra_indexes = Indexes, messages = lqueue:new(), - messages_total = Tot - NumReady, + messages_total = Total - NumReady, returns = lqueue:new(), msg_bytes_enqueue = 0 }, @@ -1616,7 +1624,7 @@ complete(Meta, ConsumerId, DiscardedMsgIds, SettledSize = lists:foldl(fun(Msg, Acc) -> get_header(size, get_msg_header(Msg)) + Acc end, 0, DiscardedMsgs), - Indexes = lists:foldl(fun(?MSG(I, _), Acc) when is_integer(I) -> + Indexes = lists:foldl(fun(?MSG(I, _), Acc) -> rabbit_fifo_index:delete(I, Acc) end, Indexes0, DiscardedMsgs), State1#?MODULE{ra_indexes = Indexes, @@ -2355,8 +2363,8 @@ smallest_raft_index(#?MODULE{messages = Messages, ra_indexes = Indexes, dlx = DlxState}) -> SmallestDlxRaIdx = rabbit_fifo_dlx:smallest_raft_index(DlxState), - SmallestMsgsRaIdx = case lqueue:get(Messages, empty) of - ?MSG(I, _) -> + SmallestMsgsRaIdx = case lqueue:get(Messages, undefined) of + ?MSG(I, _) when is_integer(I) -> I; _ -> undefined diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 7e65e94cee0a..06ff722c26a6 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -45,7 +45,7 @@ -type msg_size() :: non_neg_integer(). %% the size in bytes of the msg payload --type msg() :: tuple(ra:index(), msg_header()). +-type msg() :: tuple(option(ra:index()), msg_header()). -type delivery_msg() :: {msg_id(), {msg_header(), raw_msg()}}. %% A tuple consisting of the message id, and the headered message. diff --git a/deps/rabbit/test/queue_parallel_SUITE.erl b/deps/rabbit/test/queue_parallel_SUITE.erl index f8eb9191aa8a..95543db0f29c 100644 --- a/deps/rabbit/test/queue_parallel_SUITE.erl +++ b/deps/rabbit/test/queue_parallel_SUITE.erl @@ -14,7 +14,7 @@ -compile(export_all). --define(TIMEOUT, 30000). +-define(TIMEOUT, 30_000). -import(quorum_queue_utils, [wait_for_messages/2]). @@ -46,6 +46,7 @@ groups() -> consume_and_multiple_nack, basic_cancel, purge, + purge_no_consumer, basic_recover, delete_immediately_by_resource ], @@ -596,6 +597,18 @@ purge(Config) -> rabbit_ct_client_helpers:close_channel(Ch), ok. +purge_no_consumer(Config) -> + {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + QName = ?config(queue_name, Config), + declare_queue(Ch, Config, QName), + + publish(Ch, QName, [<<"msg1">>, <<"msg2">>]), + wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]), + {'queue.purge_ok', 2} = amqp_channel:call(Ch, #'queue.purge'{queue = QName}), + wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]), + rabbit_ct_client_helpers:close_channel(Ch), + ok. + basic_recover(Config) -> {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), QName = ?config(queue_name, Config), From 9f10f79ccff74ecaa8b5954429a27b2f5ef958e7 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Fri, 25 Feb 2022 15:59:13 +0100 Subject: [PATCH 91/97] Fix upgrades from rabbit_fifo_v1 to v2 1. A Raft index in ?MSG(RaftIndex, Header) can be 'undefined' if it was a prefix message in v1. Therefore do not use is_integer(RaftIndex) guards. Instead, expect and handle RaftIndex=undefined correctly. 2. The total number of messages did not include any checked out messages after the conversion. --- deps/rabbit/src/rabbit_fifo.erl | 16 +- deps/rabbit/src/rabbit_fifo_dlx.erl | 3 +- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 165 +++++++++++++++++++- 3 files changed, 173 insertions(+), 11 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 29330fb7d6d8..4bb38589d6c7 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -711,7 +711,6 @@ convert_v1_to_v2(V1State0) -> Other end, - %% Then add all pending messages back into the index Cfg = #cfg{name = rabbit_fifo_v1:get_cfg_field(name, V1State), resource = rabbit_fifo_v1:get_cfg_field(resource, V1State), release_cursor_interval = rabbit_fifo_v1:get_cfg_field(release_cursor_interval, V1State), @@ -726,9 +725,20 @@ convert_v1_to_v2(V1State0) -> expires = rabbit_fifo_v1:get_cfg_field(expires, V1State) }, + MessagesConsumersV2 = maps:fold(fun(_ConsumerId, #consumer{checked_out = Checked}, Acc) -> + Acc + maps:size(Checked) + end, 0, ConsumersV2), + MessagesWaitingConsumersV2 = lists:foldl(fun({_ConsumerId, #consumer{checked_out = Checked}}, Acc) -> + Acc + maps:size(Checked) + end, 0, WaitingConsumersV2), + MessagesTotal = lqueue:len(MessagesV2) + + lqueue:len(ReturnsV2) + + MessagesConsumersV2 + + MessagesWaitingConsumersV2, + #?MODULE{cfg = Cfg, messages = MessagesV2, - messages_total = rabbit_fifo_v1:query_messages_total(V1State), + messages_total = MessagesTotal, returns = ReturnsV2, enqueue_count = rabbit_fifo_v1:get_field(enqueue_count, V1State), enqueuers = EnqueuersV2, @@ -1796,7 +1806,7 @@ checkout(#{index := Index} = Meta, checkout0(Meta, {success, ConsumerId, MsgId, ?MSG(RaftIdx, Header), ExpiredMsg, State, Effects}, - SendAcc0) when is_integer(RaftIdx) -> + SendAcc0) -> DelMsg = {RaftIdx, {MsgId, Header}}, SendAcc = case maps:get(ConsumerId, SendAcc0, undefined) of undefined -> diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index df2ba17cf2fc..c6a9e699b9a2 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -177,8 +177,7 @@ checkout(at_least_once, #?MODULE{consumer = #dlx_consumer{}} = State) -> checkout(_, State) -> {State, []}. -checkout0({success, MsgId, ?TUPLE(Reason, ?MSG(Idx, _)), State}, SendAcc) - when is_integer(Idx) -> +checkout0({success, MsgId, ?TUPLE(Reason, ?MSG(Idx, _)), State}, SendAcc) -> DelMsg = {Idx, {Reason, MsgId}}, checkout0(checkout_one(State), [DelMsg | SendAcc]); checkout0(#?MODULE{consumer = #dlx_consumer{pid = Pid}} = State, SendAcc) -> diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 0cf3af77d348..8d1618d26645 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -62,6 +62,9 @@ all_tests() -> scenario31, scenario32, upgrade, + upgrade_snapshots, + upgrade_snapshots_scenario1, + upgrade_snapshots_scenario2, messages_total, single_active, single_active_01, @@ -687,6 +690,45 @@ scenario23(_Config) -> Commands), ok. +upgrade_snapshots_scenario1(_Config) -> + E = c:pid(0,327,1), + Commands = [make_enqueue(E,1,msg(<<"msg1">>)), + make_enqueue(E,2,msg(<<"msg2">>)), + make_enqueue(E,3,msg(<<"msg3">>))], + run_upgrade_snapshot_test(#{name => ?FUNCTION_NAME, + deliver_limit => 100, + max_length => 1, + max_bytes => 100, + max_in_memory_length => undefined, + max_in_memory_bytes => undefined, + overflow_strategy => drop_head, + single_active_consumer_on => false, + dead_letter_handler => {?MODULE, banana, []} + }, + Commands), + ok. + +upgrade_snapshots_scenario2(_Config) -> + E = c:pid(0,240,0), + CPid = c:pid(0,242,0), + C = {<<>>, CPid}, + Commands = [make_checkout(C, {auto,1,simple_prefetch}), + make_enqueue(E,1,msg(<<"msg1">>)), + make_enqueue(E,2,msg(<<"msg2">>)), + rabbit_fifo:make_settle(C, [0])], + run_upgrade_snapshot_test(#{name => ?FUNCTION_NAME, + deliver_limit => undefined, + max_length => undefined, + max_bytes => undefined, + max_in_memory_length => undefined, + max_in_memory_bytes => undefined, + overflow_strategy => drop_head, + single_active_consumer_on => false, + dead_letter_handler => {?MODULE, banana, []} + }, + Commands), + ok. + single_active_01(_Config) -> C1Pid = test_util:fake_pid(rabbit@fake_node1), C1 = {<<0>>, C1Pid}, @@ -907,6 +949,35 @@ upgrade(_Config) -> end) end, [], Size). +upgrade_snapshots(_Config) -> + Size = 500, + run_proper( + fun () -> + ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, SingleActive}, + frequency([{5, {undefined, undefined, undefined, undefined, false}}, + {5, {oneof([range(1, 10), undefined]), + oneof([range(1, 1000), undefined]), + oneof([range(1, 3), undefined]), + oneof([range(1, 10), 0, undefined]), + oneof([true, false]) + }}]), + begin + Config = config(?FUNCTION_NAME, + Length, + Bytes, + SingleActive, + DeliveryLimit, + InMemoryLength, + undefined, + drop_head, + {?MODULE, banana, []} + ), + ?FORALL(O, ?LET(Ops, log_gen_upgrade_snapshots(Size), expand(Ops, Config)), + collect({log_size, length(O)}, + upgrade_snapshots_prop(Config, O))) + end) + end, [], Size). + messages_total(_Config) -> Size = 1000, run_proper( @@ -1483,8 +1554,11 @@ upgrade_prop(Conf0, Commands) -> #rabbit_fifo{} = V2 = element(1, rabbit_fifo:apply(meta(length(PreEntries) + 1), {machine_version, 1, 2}, V1)), %% assert invariants - Fields = [num_messages, - num_ready_messages, + %% + %% Note that we cannot test for num_messages because rabbit_fifo_v1:messages_total/1 + %% relies on ra_indexes not to be empty. However ra_indexes are empty in snapshots + %% in which case the number of messages checked out to consumers will not be included. + Fields = [num_ready_messages, smallest_raft_index, num_enqueuers, num_consumers, @@ -1507,13 +1581,10 @@ upgrade_prop(Conf0, Commands) -> {_, V1Effs} = run_log(InitState, Entries, fun (_) -> true end, rabbit_fifo_v1), [begin - % ct:pal("V1 ~p", [RCS]), Res = rabbit_fifo:apply(meta(Idx + 1), {machine_version, 1, 2}, RCS) , - % ct:pal("V2 ~p", [Res]), #rabbit_fifo{} = V2 = element(1, Res), %% assert invariants - Fields = [num_messages, - num_ready_messages, + Fields = [num_ready_messages, smallest_raft_index, num_enqueuers, num_consumers, @@ -1570,6 +1641,16 @@ snapshots_prop(Conf, Commands) -> false end. +upgrade_snapshots_prop(Conf, Commands) -> + try run_upgrade_snapshot_test(Conf, Commands) of + _ -> true + catch + Err -> + ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]), + ct:pal("Err: ~p~n", [Err]), + false + end. + log_gen(Size) -> Nodes = [node(), fakenode@fake, @@ -1593,6 +1674,33 @@ log_gen(Size) -> {1, purge} ]))))). +log_gen_upgrade_snapshots(Size) -> + Nodes = [node(), + fakenode@fake, + fakenode@fake2 + ], + ?LET(EPids, vector(2, pid_gen(Nodes)), + ?LET(CPids, vector(2, pid_gen(Nodes)), + resize(Size, + list( + frequency( + %% Below commented commands make the test fail. + %% Hypothesis: There are behavioural differences between v1 and v2 + %% which will end up in different numbers of messages. + [{20, enqueue_gen(oneof(EPids))}, + {40, {input_event, + frequency([{10, settle}, + % {2, return}, + {2, discard}, + {2, requeue} + ])}}, + {2, checkout_gen(oneof(CPids))}, + % {1, checkout_cancel_gen(oneof(CPids))}, + % {1, down_gen(oneof(EPids ++ CPids))}, + {1, nodeup_gen(Nodes)}, + {1, purge} + ]))))). + log_gen_dlx(Size) -> Nodes = [node(), fakenode@fake, @@ -1975,6 +2083,51 @@ run_snapshot_test0(Conf0, Commands, Invariant) -> end || {release_cursor, SnapIdx, SnapState} <- Cursors], ok. +run_upgrade_snapshot_test(Conf, Commands) -> + ct:pal("running test with ~b commands using config ~p", + [length(Commands), Conf]), + Indexes = lists:seq(1, length(Commands)), + Entries = lists:zip(Indexes, Commands), + Invariant = fun(_) -> true end, + %% Run the whole command log in v1 to emit release cursors. + {_, Effects} = run_log(test_init_v1(Conf), Entries, Invariant, rabbit_fifo_v1), + Cursors = [ C || {release_cursor, _, _} = C <- Effects], + [begin + %% Drop all entries below and including the snapshot. + FilteredV1 = lists:dropwhile(fun({X, _}) when X =< SnapIdx -> true; + (_) -> false + end, Entries), + %% For V2 we will apply the same commands to the snapshot state as for V1. + %% However, we need to increment all Raft indexes by 1 because V2 + %% requires one additional Raft index for the conversion command from V1 to V2. + FilteredV2 = lists:keymap(fun(Idx) -> Idx + 1 end, 1, FilteredV1), + %% Recover in V1. + {StateV1, _} = run_log(SnapState, FilteredV1, Invariant, rabbit_fifo_v1), + %% Perform conversion and recover in V2. + Res = rabbit_fifo:apply(meta(SnapIdx + 1), {machine_version, 1, 2}, SnapState), + #rabbit_fifo{} = V2 = element(1, Res), + {StateV2, _} = run_log(V2, FilteredV2, Invariant, rabbit_fifo), + %% Invariant: Recovering a V1 snapshot in V1 or V2 should end up in the same + %% number of messages. + Fields = [num_messages, + num_ready_messages, + num_enqueuers, + num_consumers, + enqueue_message_bytes, + checkout_message_bytes + ], + V1Overview = maps:with(Fields, rabbit_fifo_v1:overview(StateV1)), + V2Overview = maps:with(Fields, rabbit_fifo:overview(StateV2)), + case V1Overview == V2Overview of + true -> ok; + false -> + ct:pal("property failed, expected:~n~p~ngot:~n~p", + [V1Overview, V2Overview]), + ?assertEqual(V1Overview, V2Overview) + end + end || {release_cursor, SnapIdx, SnapState} <- Cursors], + ok. + hd_or([H | _]) -> H; hd_or(_) -> {undefined}. From 17954d790cbe1f870c793007aa0ad467cc65386a Mon Sep 17 00:00:00 2001 From: David Ansari Date: Fri, 25 Feb 2022 18:49:02 +0100 Subject: [PATCH 92/97] Fix bug in v1 to v2 conversion --- deps/rabbit/src/rabbit_fifo.erl | 2 +- deps/rabbit/test/rabbit_fifo_prop_SUITE.erl | 20 ++++++++++++-------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 4bb38589d6c7..30d76d13a0bc 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -664,7 +664,7 @@ convert_v1_to_v2(V1State0) -> ConsumersV1 = rabbit_fifo_v1:get_field(consumers, V1State), WaitingConsumersV1 = rabbit_fifo_v1:get_field(waiting_consumers, V1State), %% remove all raft idx in messages from index - {_, PrefMsgs, _, PrefReturns} = rabbit_fifo_v1:get_field(prefix_msgs, V1State), + {_, PrefReturns, _, PrefMsgs} = rabbit_fifo_v1:get_field(prefix_msgs, V1State), V2PrefMsgs = lists:foldl(fun(Hdr, Acc) -> lqueue:in(convert_msg(Hdr), Acc) end, lqueue:new(), PrefMsgs), diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 8d1618d26645..06481c22e106 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -696,7 +696,7 @@ upgrade_snapshots_scenario1(_Config) -> make_enqueue(E,2,msg(<<"msg2">>)), make_enqueue(E,3,msg(<<"msg3">>))], run_upgrade_snapshot_test(#{name => ?FUNCTION_NAME, - deliver_limit => 100, + delivery_limit => 100, max_length => 1, max_bytes => 100, max_in_memory_length => undefined, @@ -717,7 +717,7 @@ upgrade_snapshots_scenario2(_Config) -> make_enqueue(E,2,msg(<<"msg2">>)), rabbit_fifo:make_settle(C, [0])], run_upgrade_snapshot_test(#{name => ?FUNCTION_NAME, - deliver_limit => undefined, + delivery_limit => undefined, max_length => undefined, max_bytes => undefined, max_in_memory_length => undefined, @@ -1684,18 +1684,21 @@ log_gen_upgrade_snapshots(Size) -> resize(Size, list( frequency( - %% Below commented commands make the test fail. - %% Hypothesis: There are behavioural differences between v1 and v2 - %% which will end up in different numbers of messages. [{20, enqueue_gen(oneof(EPids))}, {40, {input_event, frequency([{10, settle}, - % {2, return}, + {2, return}, {2, discard}, {2, requeue} ])}}, {2, checkout_gen(oneof(CPids))}, + %% v2 fixes a bug that exists in v1 where a cancelled consumer is revived. + %% Therefore, there is an expected behavioural difference between v1 and v2 + %% and below line must be commented out. % {1, checkout_cancel_gen(oneof(CPids))}, + %% Likewise there is a behavioural difference between v1 and v2 + %% when 'up' is followed by 'down' where v2 behaves correctly. + %% Therefore, below line must be commented out. % {1, down_gen(oneof(EPids ++ CPids))}, {1, nodeup_gen(Nodes)}, {1, purge} @@ -2121,8 +2124,9 @@ run_upgrade_snapshot_test(Conf, Commands) -> case V1Overview == V2Overview of true -> ok; false -> - ct:pal("property failed, expected:~n~p~ngot:~n~p", - [V1Overview, V2Overview]), + ct:pal("property failed, expected:~n~p~ngot:~n~p~nstate v1:~n~p~nstate v2:~n~p~n" + "snapshot index: ~p", + [V1Overview, V2Overview, StateV1, ?record_info(rabbit_fifo, StateV2), SnapIdx]), ?assertEqual(V1Overview, V2Overview) end end || {release_cursor, SnapIdx, SnapState} <- Cursors], From c4b6d32e4249e994eea0540a1b822ecb3f1d3b7a Mon Sep 17 00:00:00 2001 From: David Ansari Date: Sun, 27 Feb 2022 14:47:55 +0100 Subject: [PATCH 93/97] Add test for all-with-x binding argument --- deps/rabbit/test/dead_lettering_SUITE.erl | 51 +++++++++++++++-------- 1 file changed, 34 insertions(+), 17 deletions(-) diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl index 856ce13441f3..bb9b250dd387 100644 --- a/deps/rabbit/test/dead_lettering_SUITE.erl +++ b/deps/rabbit/test/dead_lettering_SUITE.erl @@ -1235,10 +1235,14 @@ dead_letter_headers_first_death(Config) -> ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Headers2, <<"x-first-death-exchange">>)). -%% Route dead-letter messages to different target queues according to first death reason. +%% Test that headers exchange's x-match binding argument set to all-with-x and any-with-x +%% works as expected. The use case being tested here: +%% Route dead-letter messages to different target queues +%% according to first death reason and first death queue. dead_letter_headers_first_death_route(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), - QName = ?config(queue_name, Config), + QName1 = ?config(queue_name, Config), + QName2 = <<"dead_letter_headers_first_death_route_source_queue_2">>, DLXExpiredQName = ?config(queue_name_dlx, Config), DLXRejectedQName = ?config(queue_name_dlx_2, Config), Args = ?config(queue_args, Config), @@ -1247,45 +1251,58 @@ dead_letter_headers_first_death_route(Config) -> #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange, type = <<"headers">>}), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName1, + arguments = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange} | Args], + durable = Durable}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName2, arguments = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange} | Args], durable = Durable}), #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXExpiredQName, durable = Durable}), #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXRejectedQName, durable = Durable}), - MatchAnyWithX = {<<"x-match">>, longstr, <<"any-with-x">>}, #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXExpiredQName, exchange = DLXExchange, - arguments = [MatchAnyWithX, - {<<"x-first-death-reason">>, longstr, <<"expired">>}] + arguments = [{<<"x-match">>, longstr, <<"all-with-x">>}, + {<<"x-first-death-reason">>, longstr, <<"expired">>}, + {<<"x-first-death-queue">>, longstr, QName1}] }), #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXRejectedQName, exchange = DLXExchange, - arguments = [MatchAnyWithX, + arguments = [{<<"x-match">>, longstr, <<"any-with-x">>}, {<<"x-first-death-reason">>, longstr, <<"rejected">>}] }), - %% Send 1st message and let it expire. + %% Send 1st message to 1st source queue and let it expire. P1 = <<"msg1">>, - amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, + amqp_channel:call(Ch, #'basic.publish'{routing_key = QName1}, #amqp_msg{payload = P1, props = #'P_basic'{expiration = <<"0">>}}), %% The 1st message gets dead-lettered to DLXExpiredQName. wait_for_messages(Config, [[DLXExpiredQName, <<"1">>, <<"1">>, <<"0">>]]), _ = consume(Ch, DLXExpiredQName, [P1]), consume_empty(Ch, DLXExpiredQName), - wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]), - %% Send and reject the 2nd message. + wait_for_messages(Config, [[QName1, <<"0">>, <<"0">>, <<"0">>]]), + %% Send 2nd message to 2nd source queue and let it expire. P2 = <<"msg2">>, - publish(Ch, QName, [P2]), - wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]), - [DTag] = consume(Ch, QName, [P2]), + amqp_channel:call(Ch, #'basic.publish'{routing_key = QName2}, + #amqp_msg{payload = P2, + props = #'P_basic'{expiration = <<"0">>}}), + %% Send 2nd message should not be routed by the dead letter headers exchange. + rabbit_ct_helpers:consistently(?_assertEqual(#'basic.get_empty'{}, + amqp_channel:call(Ch, #'basic.get'{queue = DLXExpiredQName}))), + %% Send and reject the 3rd message. + P3 = <<"msg3">>, + publish(Ch, QName2, [P3]), + timer:sleep(1000), + [DTag] = consume(Ch, QName2, [P3]), amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag, requeue = false}), - %% The 2nd message gets dead-lettered to DLXRejectedQName. + %% The 3rd message gets dead-lettered to DLXRejectedQName. wait_for_messages(Config, [[DLXRejectedQName, <<"1">>, <<"1">>, <<"0">>]]), - _ = consume(Ch, DLXRejectedQName, [P2]), - consume_empty(Ch, DLXRejectedQName). + _ = consume(Ch, DLXRejectedQName, [P3]), + consume_empty(Ch, DLXRejectedQName), + _ = amqp_channel:call(Ch, #'queue.delete'{queue = QName2}), + ok. %% Route dead-letter messages also to extra BCC queues of target queues. dead_letter_extra_bcc(Config) -> From fd2023a11801b56af2907c884ff04ac7f26fc591 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 28 Feb 2022 17:26:50 +0100 Subject: [PATCH 94/97] Exclude single_dlx_worker from mixed version tests because at-least-once dead-lettering and therefore process rabbit_fifo_dlx_sup does not exist in 3.9. Once the new base version in mixed version tests becomes 3.10 (base version is 3.9 as of March 2022), we can revert this commit. --- .../rabbit_fifo_dlx_integration_SUITE.erl | 38 +++++++++++-------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index a60fafe9d6ea..34b6870e424f 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -105,22 +105,28 @@ merge_app_env(Config) -> {ra, [{min_wal_roll_over_interval, 30000}]}). init_per_testcase(Testcase, Config) -> - Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase), - T = rabbit_data_coercion:to_binary(Testcase), - Counters = get_global_counters(Config1), - Config2 = rabbit_ct_helpers:set_config(Config1, - [{source_queue, <>}, - {dead_letter_exchange, <>}, - {target_queue_1, <>}, - {target_queue_2, <>}, - {target_queue_3, <>}, - {target_queue_4, <>}, - {target_queue_5, <>}, - {target_queue_6, <>}, - {policy, <>}, - {counters, Counters} - ]), - rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()). + case {Testcase, rabbit_ct_helpers:is_mixed_versions()} of + {single_dlx_worker, true} -> + {skip, "single_dlx_worker is not mixed version compatible because process " + "rabbit_fifo_dlx_sup does not exist in 3.9"}; + _ -> + Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase), + T = rabbit_data_coercion:to_binary(Testcase), + Counters = get_global_counters(Config1), + Config2 = rabbit_ct_helpers:set_config(Config1, + [{source_queue, <>}, + {dead_letter_exchange, <>}, + {target_queue_1, <>}, + {target_queue_2, <>}, + {target_queue_3, <>}, + {target_queue_4, <>}, + {target_queue_5, <>}, + {target_queue_6, <>}, + {policy, <>}, + {counters, Counters} + ]), + rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()) + end. end_per_testcase(Testcase, Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), From 2db128f68356763fcedb9769bc00a7879779635c Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 28 Feb 2022 17:39:36 +0100 Subject: [PATCH 95/97] Delete commented quorum queue in-memory tests --- deps/rabbit/src/rabbit_fifo.erl | 5 +- deps/rabbit/src/rabbit_fifo.hrl | 6 + deps/rabbit/src/rabbit_fifo_client.erl | 5 - deps/rabbit/src/rabbit_fifo_dlx.erl | 13 +- deps/rabbit/src/rabbit_fifo_dlx.hrl | 6 + deps/rabbit/src/rabbit_fifo_dlx_client.erl | 6 + deps/rabbit/src/rabbit_fifo_dlx_sup.erl | 6 + deps/rabbit/src/rabbit_fifo_dlx_worker.erl | 29 +- deps/rabbit/test/quorum_queue_SUITE.erl | 321 +----------------- deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl | 7 +- .../rabbit_fifo_dlx_integration_SUITE.erl | 6 + 11 files changed, 73 insertions(+), 337 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 30d76d13a0bc..57a7d410e12a 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -1,10 +1,9 @@ -%% This Source Code Form is subject tconsumer_ido the terms of the Mozilla Public +%% This Source Code Form is subject to the terms of the Mozilla Public %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% %% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. -%% -%% + %% before post gc 1M msg: 203MB, after recovery + gc: 203MB -module(rabbit_fifo). diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 06ff722c26a6..ad41b07c0a81 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -1,3 +1,9 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. + %% macros for memory optimised tuple structures %% [A|B] saves 1 byte compared to {A,B} -define(TUPLE(A, B), [A | B]). diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl index e377037d101d..134f2814731c 100644 --- a/deps/rabbit/src/rabbit_fifo_client.erl +++ b/deps/rabbit/src/rabbit_fifo_client.erl @@ -695,11 +695,6 @@ maybe_add_action(Action, Acc, State) -> %% anything else is assumed to be an action {[Action | Acc], State}. -% do_resends(From, To, State) when From =< To -> -% lists:foldl(fun resend/2, State, lists:seq(From, To)); -% do_resends(_, _, State) -> -% State. - % resends a command with a new sequence number resend(OldSeq, #state{pending = Pending0, leader = Leader} = State) -> case maps:take(OldSeq, Pending0) of diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index c6a9e699b9a2..965363ac5202 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -1,3 +1,9 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. + -module(rabbit_fifo_dlx). -include("rabbit_fifo_dlx.hrl"). @@ -29,7 +35,7 @@ }). -record(settle, {msg_ids :: [msg_id()]}). -type protocol() :: {dlx, #checkout{} | #settle{}}. --type state() :: #?MODULE{}. +-opaque state() :: #?MODULE{}. -export_type([state/0, protocol/0]). @@ -37,11 +43,13 @@ init() -> #?MODULE{}. +-spec make_checkout(pid(), non_neg_integer()) -> protocol(). make_checkout(Pid, NumUnsettled) -> {dlx, #checkout{consumer = Pid, prefetch = NumUnsettled }}. +-spec make_settle([msg_id()]) -> protocol(). make_settle(MessageIds) when is_list(MessageIds) -> {dlx, #settle{msg_ids = MessageIds}}. @@ -132,8 +140,7 @@ apply(_, Cmd, DLH, State) -> rabbit_log:debug("Ignoring command ~p for dead_letter_handler ~p", [Cmd, DLH]), {State, []}. --spec discard([msg()], rabbit_dead_letter:reason(), - dead_letter_handler(), state()) -> +-spec discard([msg()], rabbit_dead_letter:reason(), dead_letter_handler(), state()) -> {state(), ra_machine:effects()}. discard(Msgs, Reason, undefined, State) -> {State, [{mod_call, rabbit_global_counters, messages_dead_lettered, diff --git a/deps/rabbit/src/rabbit_fifo_dlx.hrl b/deps/rabbit/src/rabbit_fifo_dlx.hrl index dda2c2652847..bd2035f1b5c0 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.hrl +++ b/deps/rabbit/src/rabbit_fifo_dlx.hrl @@ -1,3 +1,9 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. + -record(dlx_consumer,{ pid :: pid(), prefetch :: non_neg_integer(), diff --git a/deps/rabbit/src/rabbit_fifo_dlx_client.erl b/deps/rabbit/src/rabbit_fifo_dlx_client.erl index 459d27cc3eec..767d19a19a23 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_client.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_client.erl @@ -1,3 +1,9 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. + -module(rabbit_fifo_dlx_client). -export([checkout/3, settle/2, handle_ra_event/3, diff --git a/deps/rabbit/src/rabbit_fifo_dlx_sup.erl b/deps/rabbit/src/rabbit_fifo_dlx_sup.erl index 8af496b60483..b928f0974d55 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_sup.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_sup.erl @@ -1,3 +1,9 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2018-2021 VMware, Inc. or its affiliates. All rights reserved. + -module(rabbit_fifo_dlx_sup). -behaviour(supervisor). diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 5837fe6053b8..0499b08271e1 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -1,15 +1,25 @@ -%% This module consumes from a single quroum queue's discards queue (containing dead-letttered messages) -%% and forwards the DLX messages at least once to every target queue. +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. + +%% One rabbit_fifo_dlx_worker process exists per (source) quorum queue that has at-least-once dead lettering +%% enabled. The rabbit_fifo_dlx_worker process is co-located on the quorum queue leader node. +%% Its job is to consume from the quorum queue's 'discards' queue (containing dead lettered messages) +%% and to forward each dead lettered message at least once to every target queue. +%% This is in contrast to at-most-once semantics of rabbit_dead_letter:publish/5 which is +%% the only option for classic queues and was the only option for quorum queues in RMQ <= v3.9 %% %% Some parts of this module resemble the channel process in the sense that it needs to keep track what messages %% are consumed but not acked yet and what messages are published but not confirmed yet. -%% Compared to the channel process, this module is protocol independent since it doesn't deal with AMQP clients. +%% Compared to the channel process, this module is protocol independent since it does not deal with AMQP clients. %% %% This module consumes directly from the rabbit_fifo_dlx_client bypassing the rabbit_queue_type interface, %% but publishes via the rabbit_queue_type interface. %% While consuming via rabbit_queue_type interface would have worked in practice (by using a special consumer argument, -%% e.g. {<<"x-internal-queue">>, longstr, <<"discards">>} ) using the rabbit_fifo_dlx_client directly provides -%% separation of concerns making things much easier to test, to debug, and to understand. +%% e.g. {<<"x-internal-queue">>, longstr, <<"discards">>}) using the rabbit_fifo_dlx_client directly provides +%% separation of concerns making things easier to test, to debug, and to understand. -module(rabbit_fifo_dlx_worker). @@ -55,22 +65,23 @@ }). -record(state, { - %% There is one rabbit_fifo_dlx_worker per source quorum queue - %% (if dead-letter-strategy at-least-once is used). + %% source queue queue_ref :: rabbit_amqqueue:name(), %% monitors source queue monitor_ref :: reference(), %% configured (x-)dead-letter-exchange of source queue - exchange_ref, + exchange_ref :: rabbit_exchange:name() | undefined, %% configured (x-)dead-letter-routing-key of source queue routing_key, + %% client of source queue dlx_client_state :: rabbit_fifo_dlx_client:state(), + %% clients of target queues queue_type_state :: rabbit_queue_type:state(), %% Consumed messages for which we are awaiting publisher confirms. pendings = #{} :: #{OutSeq :: non_neg_integer() => #pending{}}, %% Consumed message IDs for which we received all publisher confirms. settled_ids = [] :: [non_neg_integer()], - %% next publisher confirm delivery tag sequence number + %% next outgoing message sequence number next_out_seq = 1, %% If no publisher confirm was received for at least settle_timeout milliseconds, message will be redelivered. %% To prevent duplicates in the target queue and to ensure message will eventually be acked to the source queue, diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index 7ffefeba36b9..8686ac65fff6 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -3,7 +3,6 @@ %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% %% Copyright (c) 2018-2021 VMware, Inc. or its affiliates. All rights reserved. -%% -module(quorum_queue_SUITE). @@ -122,15 +121,7 @@ all_tests() -> subscribe_redelivery_limit, subscribe_redelivery_policy, subscribe_redelivery_limit_with_dead_letter, - % queue_length_in_memory_limit_basic_get, - % queue_length_in_memory_limit_subscribe, - % queue_length_in_memory_limit, - % queue_length_in_memory_limit_returns, - % queue_length_in_memory_bytes_limit_basic_get, - % queue_length_in_memory_bytes_limit_subscribe, - % queue_length_in_memory_bytes_limit, - % queue_length_in_memory_purge, - % in_memory, + purge, consumer_metrics, invalid_policy, delete_if_empty, @@ -2270,287 +2261,25 @@ queue_length_limit_reject_publish(Config) -> ok = publish_confirm(Ch, QQ), ok. -queue_length_in_memory_limit_basic_get(Config) -> - [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - QQ = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-max-in-memory-length">>, long, 1}])), - - RaName = ra_name(QQ), - Msg1 = <<"msg1">>, - ok = amqp_channel:cast(Ch, - #'basic.publish'{routing_key = QQ}, - #amqp_msg{props = #'P_basic'{delivery_mode = 2}, - payload = Msg1}), - ok = amqp_channel:cast(Ch, - #'basic.publish'{routing_key = QQ}, - #amqp_msg{props = #'P_basic'{delivery_mode = 2}, - payload = <<"msg2">>}), - - wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), - - ?assertEqual([{1, byte_size(Msg1)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - - ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, - amqp_channel:call(Ch, #'basic.get'{queue = QQ, - no_ack = true})), - ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg2">>}}, - amqp_channel:call(Ch, #'basic.get'{queue = QQ, - no_ack = true})). - -queue_length_in_memory_limit_subscribe(Config) -> - [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - QQ = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-max-in-memory-length">>, long, 1}])), - - RaName = ra_name(QQ), - Msg1 = <<"msg1">>, - Msg2 = <<"msg11">>, - publish(Ch, QQ, Msg1), - publish(Ch, QQ, Msg2), - wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), - - ?assertEqual([{1, byte_size(Msg1)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - - subscribe(Ch, QQ, false), - receive - {#'basic.deliver'{delivery_tag = DeliveryTag1, - redelivered = false}, - #amqp_msg{payload = Msg1}} -> - amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag1, - multiple = false}) - end, - ?assertEqual([{0, 0}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - receive - {#'basic.deliver'{delivery_tag = DeliveryTag2, - redelivered = false}, - #amqp_msg{payload = Msg2}} -> - amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag2, - multiple = false}) - end. - -queue_length_in_memory_limit(Config) -> - [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - QQ = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-max-in-memory-length">>, long, 2}])), - - RaName = ra_name(QQ), - Msg1 = <<"msg1">>, - Msg2 = <<"msg11">>, - Msg3 = <<"msg111">>, - Msg4 = <<"msg1111">>, - Msg5 = <<"msg1111">>, - - - publish(Ch, QQ, Msg1), - publish(Ch, QQ, Msg2), - publish(Ch, QQ, Msg3), - wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]), - - ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - - ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, - amqp_channel:call(Ch, #'basic.get'{queue = QQ, - no_ack = true})), - - wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), - publish(Ch, QQ, Msg4), - wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]), - - ?assertEqual([{2, byte_size(Msg2) + byte_size(Msg4)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - publish(Ch, QQ, Msg5), - wait_for_messages(Config, [[QQ, <<"4">>, <<"4">>, <<"0">>]]), - ExpectedMsgs = [Msg2, Msg3, Msg4, Msg5], - validate_queue(Ch, QQ, ExpectedMsgs), - ok. - -queue_length_in_memory_limit_returns(Config) -> - [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - QQ = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-max-in-memory-length">>, long, 2}])), - - RaName = ra_name(QQ), - Msg1 = <<"msg1">>, - Msg2 = <<"msg11">>, - Msg3 = <<"msg111">>, - Msg4 = <<"msg111">>, - publish(Ch, QQ, Msg1), - publish(Ch, QQ, Msg2), - wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), - - ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - - ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, - amqp_channel:call(Ch, #'basic.get'{queue = QQ, - no_ack = false})), - - {#'basic.get_ok'{delivery_tag = DTag2}, #amqp_msg{payload = Msg2}} = - amqp_channel:call(Ch, #'basic.get'{queue = QQ, - no_ack = false}), - - publish(Ch, QQ, Msg3), - publish(Ch, QQ, Msg4), - - %% Ensure that returns are subject to in memory limits too - wait_for_messages(Config, [[QQ, <<"4">>, <<"2">>, <<"2">>]]), - amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2, - multiple = true, - requeue = true}), - wait_for_messages(Config, [[QQ, <<"4">>, <<"4">>, <<"0">>]]), - - ?assertEqual([{2, byte_size(Msg3) + byte_size(Msg4)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)). - -queue_length_in_memory_bytes_limit_basic_get(Config) -> - [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - QQ = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-max-in-memory-bytes">>, long, 6}])), - - RaName = ra_name(QQ), - Msg1 = <<"msg1">>, - ok = amqp_channel:cast(Ch, - #'basic.publish'{routing_key = QQ}, - #amqp_msg{props = #'P_basic'{delivery_mode = 2}, - payload = Msg1}), - ok = amqp_channel:cast(Ch, - #'basic.publish'{routing_key = QQ}, - #amqp_msg{props = #'P_basic'{delivery_mode = 2}, - payload = <<"msg2">>}), - - wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), - - ?assertEqual([{1, byte_size(Msg1)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - - ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, - amqp_channel:call(Ch, #'basic.get'{queue = QQ, - no_ack = true})), - ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg2">>}}, - amqp_channel:call(Ch, #'basic.get'{queue = QQ, - no_ack = true})). - -queue_length_in_memory_bytes_limit_subscribe(Config) -> - [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - QQ = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-max-in-memory-bytes">>, long, 6}])), - - RaName = ra_name(QQ), - Msg1 = <<"msg1">>, - Msg2 = <<"msg11">>, - publish(Ch, QQ, Msg1), - publish(Ch, QQ, Msg2), - wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), - - ?assertEqual([{1, byte_size(Msg1)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - - subscribe(Ch, QQ, false), - receive - {#'basic.deliver'{delivery_tag = DeliveryTag1, - redelivered = false}, - #amqp_msg{payload = Msg1}} -> - amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag1, - multiple = false}) - end, - ?assertEqual([{0, 0}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - receive - {#'basic.deliver'{delivery_tag = DeliveryTag2, - redelivered = false}, - #amqp_msg{payload = Msg2}} -> - amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag2, - multiple = false}) - end. - -queue_length_in_memory_bytes_limit(Config) -> +purge(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server), QQ = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-max-in-memory-bytes">>, long, 12}])), + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), RaName = ra_name(QQ), Msg1 = <<"msg1">>, Msg2 = <<"msg11">>, - Msg3 = <<"msg111">>, - Msg4 = <<"msg1111">>, publish(Ch, QQ, Msg1), publish(Ch, QQ, Msg2), - publish(Ch, QQ, Msg3), - wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]), - - ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - - ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, - amqp_channel:call(Ch, #'basic.get'{queue = QQ, - no_ack = true})), - wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), - publish(Ch, QQ, Msg4), - wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]), - ?assertEqual([{2, byte_size(Msg2) + byte_size(Msg4)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)). + {'queue.purge_ok', 2} = amqp_channel:call(Ch, #'queue.purge'{queue = QQ}), -queue_length_in_memory_purge(Config) -> - [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - QQ = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-max-in-memory-length">>, long, 2}])), - - RaName = ra_name(QQ), - Msg1 = <<"msg1">>, - Msg2 = <<"msg11">>, - Msg3 = <<"msg111">>, - - publish(Ch, QQ, Msg1), - publish(Ch, QQ, Msg2), - publish(Ch, QQ, Msg3), - wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]), - - ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - - {'queue.purge_ok', 3} = amqp_channel:call(Ch, #'queue.purge'{queue = QQ}), - - ?assertEqual([{0, 0}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)). + ?assertEqual([0], dirty_query([Server], RaName, fun rabbit_fifo:query_messages_total/1)). peek(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -2673,46 +2402,6 @@ per_message_ttl_mixed_expiry(Config) -> end, ok. -in_memory(Config) -> - [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - QQ = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), - - RaName = ra_name(QQ), - Msg1 = <<"msg1">>, - Msg2 = <<"msg11">>, - - publish(Ch, QQ, Msg1), - - wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]), - ?assertEqual([{1, byte_size(Msg1)}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - - subscribe(Ch, QQ, false), - - wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]), - ?assertEqual([{0, 0}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - - publish(Ch, QQ, Msg2), - - wait_for_messages(Config, [[QQ, <<"2">>, <<"0">>, <<"2">>]]), - ?assertEqual([{0, 0}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)), - - receive - {#'basic.deliver'{delivery_tag = DeliveryTag}, #amqp_msg{}} -> - amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag, - multiple = false}) - end, - - wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]), - ?assertEqual([{0, 0}], - dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)). - consumer_metrics(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), diff --git a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl index f3f78b21f100..37c512489dda 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl @@ -1,9 +1,14 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2018-2021 VMware, Inc. or its affiliates. All rights reserved. + -module(rabbit_fifo_dlx_SUITE). -compile(nowarn_export_all). -compile(export_all). -% -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit/src/rabbit_fifo.hrl"). -include_lib("rabbit/src/rabbit_fifo_dlx.hrl"). diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 34b6870e424f..69184c489082 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -1,3 +1,9 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. + -module(rabbit_fifo_dlx_integration_SUITE). %% Integration tests for at-least-once dead-lettering comprising mainly From 08f20612b69d5d87314d6ab0dbe17ab2a5b7f8e8 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Wed, 2 Mar 2022 16:45:54 +0100 Subject: [PATCH 96/97] Fix badmatch when basic.get on quorum queue with expired messages Before this commit, when a quorum queue has only expired messages, but the timer hasn't expired them yet, and a basic.get tries to dequeue a message the following error occurred: {{badmatch, {nochange,true, {rabbit_fifo, ...}, [{mod_call,rabbit_global_counters, messages_dead_lettered, [expired,rabbit_quorum_queue,disabled,1]}]}}, [{rabbit_fifo,apply,3,[{file,"rabbit_fifo.erl"},{line,387}]}, --- deps/rabbit/src/rabbit_fifo.erl | 52 ++++++++++++++----------- deps/rabbit/test/rabbit_fifo_SUITE.erl | 54 ++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 23 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 57a7d410e12a..131bfc6cc5dd 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -380,32 +380,38 @@ apply(#{index := Index, _ when Exists -> %% a dequeue using the same consumer_id isn't possible at this point {State0, {dequeue, empty}}; - Ready -> + _ -> State1 = update_consumer(Meta, ConsumerId, ConsumerMeta, {once, 1, simple_prefetch}, 0, State0), - {success, _, MsgId, ?MSG(RaftIdx, Header), ExpiredMsg, State2, Effects0} = - checkout_one(Meta, false, State1, []), - {State4, Effects1} = case Settlement of - unsettled -> - {_, Pid} = ConsumerId, - {State2, [{monitor, process, Pid} | Effects0]}; - settled -> - %% immediately settle the checkout - {State3, _, SettleEffects} = - apply(Meta, make_settle(ConsumerId, [MsgId]), - State2), - {State3, SettleEffects ++ Effects0} - end, - Effects2 = [reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From) | Effects1], - {State, DroppedMsg, Effects} = evaluate_limit(Index, false, State0, State4, - Effects2), - Reply = '$ra_no_reply', - case {DroppedMsg, ExpiredMsg} of - {false, false} -> - {State, Reply, Effects}; - _ -> - update_smallest_raft_index(Index, Reply, State, Effects) + case checkout_one(Meta, false, State1, []) of + {success, _, MsgId, ?MSG(RaftIdx, Header), ExpiredMsg, State2, Effects0} -> + {State4, Effects1} = case Settlement of + unsettled -> + {_, Pid} = ConsumerId, + {State2, [{monitor, process, Pid} | Effects0]}; + settled -> + %% immediately settle the checkout + {State3, _, SettleEffects} = + apply(Meta, make_settle(ConsumerId, [MsgId]), + State2), + {State3, SettleEffects ++ Effects0} + end, + Effects2 = [reply_log_effect(RaftIdx, MsgId, Header, messages_ready(State4), From) | Effects1], + {State, DroppedMsg, Effects} = evaluate_limit(Index, false, State0, State4, + Effects2), + Reply = '$ra_no_reply', + case {DroppedMsg, ExpiredMsg} of + {false, false} -> + {State, Reply, Effects}; + _ -> + update_smallest_raft_index(Index, Reply, State, Effects) + end; + {nochange, _ExpiredMsg = true, State2, Effects0} -> + %% All ready messages expired. + State3 = State2#?MODULE{consumers = maps:remove(ConsumerId, State2#?MODULE.consumers)}, + {State, _, Effects} = evaluate_limit(Index, false, State0, State3, Effects0), + update_smallest_raft_index(Index, {dequeue, empty}, State, Effects) end end; apply(#{index := Idx} = Meta, diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index be5f0a2b5f68..99a657d5a703 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -12,6 +12,7 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit/src/rabbit_fifo.hrl"). %%%=================================================================== @@ -230,6 +231,59 @@ untracked_enq_deq_test(_) -> ?ASSERT_EFF({log, [1], _}, Effs), ok. +enq_expire_deq_test(_) -> + Conf = #{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), + msg_ttl => 0}, + S0 = rabbit_fifo:init(Conf), + Msg = #basic_message{content = #content{properties = none, + payload_fragments_rev = []}}, + {S1, ok, _} = apply(meta(1, 100), rabbit_fifo:make_enqueue(self(), 1, Msg), S0), + Cid = {?FUNCTION_NAME, self()}, + {_S2, {dequeue, empty}, Effs} = + apply(meta(2, 101), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), S1), + ?ASSERT_EFF({mod_call, rabbit_global_counters, messages_dead_lettered, + [expired, rabbit_quorum_queue, disabled, 1]}, Effs), + ok. + +enq_expire_enq_deq_test(_) -> + S0 = test_init(test), + %% Msg1 and Msg2 get enqueued in the same millisecond, + %% but only Msg1 expires immediately. + Msg1 = #basic_message{content = #content{properties = #'P_basic'{expiration = <<"0">>}, + payload_fragments_rev = [<<"msg1">>]}}, + Enq1 = rabbit_fifo:make_enqueue(self(), 1, Msg1), + {S1, ok, _} = apply(meta(1, 100), Enq1, S0), + Msg2 = #basic_message{content = #content{properties = none, + payload_fragments_rev = [<<"msg2">>]}}, + Enq2 = rabbit_fifo:make_enqueue(self(), 2, Msg2), + {S2, ok, _} = apply(meta(2, 100), Enq2, S1), + Cid = {?FUNCTION_NAME, self()}, + {_S3, _, Effs} = + apply(meta(3, 101), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), S2), + {log, [2], Fun} = get_log_eff(Effs), + [{reply, _From, + {wrap_reply, {dequeue, {_MsgId, _HeaderMsg}, ReadyMsgCount}}}] = Fun([Enq2]), + ?assertEqual(0, ReadyMsgCount). + +enq_expire_deq_enq_enq_deq_deq_test(_) -> + S0 = test_init(test), + Msg1 = #basic_message{content = #content{properties = #'P_basic'{expiration = <<"0">>}, + payload_fragments_rev = [<<"msg1">>]}}, + {S1, ok, _} = apply(meta(1, 100), rabbit_fifo:make_enqueue(self(), 1, Msg1), S0), + {S2, {dequeue, empty}, _} = apply(meta(2, 101), + rabbit_fifo:make_checkout({c1, self()}, {dequeue, unsettled}, #{}), S1), + {S3, _} = enq(3, 2, msg2, S2), + {S4, _} = enq(4, 3, msg3, S3), + {S5, '$ra_no_reply', + [{log, [3], _}, + {monitor, _, _}]} = + apply(meta(5), rabbit_fifo:make_checkout({c2, self()}, {dequeue, unsettled}, #{}), S4), + {_S6, '$ra_no_reply', + [{log, [4], _}, + {monitor, _, _}]} = + apply(meta(6), rabbit_fifo:make_checkout({c3, self()}, {dequeue, unsettled}, #{}), S5). + release_cursor_test(_) -> Cid = {?FUNCTION_NAME, self()}, {State1, _} = enq(1, 1, first, test_init(test)), From 4a2b00a76e323e3edc9e6934a186ebd057b8664c Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Tue, 8 Mar 2022 15:00:02 +0000 Subject: [PATCH 97/97] rabbit_fifo: tidy up and formatting --- deps/rabbit/src/rabbit_fifo.erl | 22 +-------------------- deps/rabbit/src/rabbit_fifo_dlx.erl | 27 +++++++++++++------------- deps/rabbit/src/rabbit_fifo_dlx.hrl | 30 ++++++++++++++--------------- 3 files changed, 28 insertions(+), 51 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 131bfc6cc5dd..1fe6bbbef8e6 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -106,21 +106,6 @@ -record(purge_nodes, {nodes :: [node()]}). -record(update_config, {config :: config()}). -record(garbage_collection, {}). -%% v2 alternative commands -%% each consumer is assigned an integer index which can be used -%% instead of the consumer id to identify the consumer --type consumer_idx() :: non_neg_integer(). - --record(?SETTLE_V2, {consumer_idx :: consumer_idx(), - msg_ids :: [msg_id()]}). --record(?RETURN_V2, {consumer_idx :: consumer_idx(), - msg_ids :: [msg_id()]}). --record(?DISCARD_V2, {consumer_idx :: consumer_idx(), - msg_ids :: [msg_id()]}). --record(?CREDIT_V2, {consumer_idx :: consumer_idx(), - credit :: non_neg_integer(), - delivery_count :: non_neg_integer(), - drain :: boolean()}). -opaque protocol() :: #enqueue{} | @@ -134,12 +119,7 @@ #purge{} | #purge_nodes{} | #update_config{} | - #garbage_collection{} | - % v2 - #?SETTLE_V2{} | - #?RETURN_V2{} | - #?DISCARD_V2{} | - #?CREDIT_V2{}. + #garbage_collection{}. -type command() :: protocol() | rabbit_fifo_dlx:protocol() | diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 965363ac5202..fef9a2f123a1 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -29,10 +29,8 @@ smallest_raft_index/1 ]). --record(checkout,{ - consumer :: pid(), - prefetch :: non_neg_integer() - }). +-record(checkout, {consumer :: pid(), + prefetch :: non_neg_integer()}). -record(settle, {msg_ids :: [msg_id()]}). -type protocol() :: {dlx, #checkout{} | #settle{}}. -opaque state() :: #?MODULE{}. @@ -93,16 +91,17 @@ stat(#?MODULE{consumer = Con, apply(_Meta, {dlx, #settle{msg_ids = MsgIds}}, at_least_once, #?MODULE{consumer = #dlx_consumer{checked_out = Checked0}} = State0) -> Acked = maps:with(MsgIds, Checked0), - State = maps:fold(fun(MsgId, ?TUPLE(_Rsn, ?MSG(Idx, _) = Msg), - #?MODULE{consumer = #dlx_consumer{checked_out = Checked} = C, - msg_bytes_checkout = BytesCheckout, - ra_indexes = Indexes0} = S) -> - Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - S#?MODULE{consumer = C#dlx_consumer{checked_out = - maps:remove(MsgId, Checked)}, - msg_bytes_checkout = BytesCheckout - size_in_bytes(Msg), - ra_indexes = Indexes} - end, State0, Acked), + State = maps:fold( + fun(MsgId, ?TUPLE(_Rsn, ?MSG(Idx, _) = Msg), + #?MODULE{consumer = #dlx_consumer{checked_out = Checked} = C, + msg_bytes_checkout = BytesCheckout, + ra_indexes = Indexes0} = S) -> + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + S#?MODULE{consumer = C#dlx_consumer{checked_out = + maps:remove(MsgId, Checked)}, + msg_bytes_checkout = BytesCheckout - size_in_bytes(Msg), + ra_indexes = Indexes} + end, State0, Acked), {State, [{mod_call, rabbit_global_counters, messages_dead_lettered_confirmed, [rabbit_quorum_queue, at_least_once, maps:size(Acked)]}]}; apply(_, {dlx, #checkout{consumer = Pid, diff --git a/deps/rabbit/src/rabbit_fifo_dlx.hrl b/deps/rabbit/src/rabbit_fifo_dlx.hrl index bd2035f1b5c0..512b13ae7744 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.hrl +++ b/deps/rabbit/src/rabbit_fifo_dlx.hrl @@ -4,20 +4,18 @@ %% %% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. --record(dlx_consumer,{ - pid :: pid(), - prefetch :: non_neg_integer(), - checked_out = #{} :: #{msg_id() => tuple(rabbit_dead_letter:reason(), msg())}, - next_msg_id = 0 :: msg_id() - }). +-record(dlx_consumer, + {pid :: pid(), + prefetch :: non_neg_integer(), + checked_out = #{} :: #{msg_id() => tuple(rabbit_dead_letter:reason(), msg())}, + next_msg_id = 0 :: msg_id()}). --record(rabbit_fifo_dlx,{ - consumer :: option(#dlx_consumer{}), - %% Queue of dead-lettered messages. - discards = lqueue:new() :: lqueue:lqueue(tuple(rabbit_dead_letter:reason(), msg())), - %% Raft indexes of messages in both discards queue and dlx_consumer's checked_out map - %% so that we get the smallest ra index in O(1). - ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), - msg_bytes = 0 :: non_neg_integer(), - msg_bytes_checkout = 0 :: non_neg_integer() - }). +-record(rabbit_fifo_dlx, + {consumer :: option(#dlx_consumer{}), + %% Queue of dead-lettered messages. + discards = lqueue:new() :: lqueue:lqueue(tuple(rabbit_dead_letter:reason(), msg())), + %% Raft indexes of messages in both discards queue and dlx_consumer's checked_out map + %% so that we get the smallest ra index in O(1). + ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), + msg_bytes = 0 :: non_neg_integer(), + msg_bytes_checkout = 0 :: non_neg_integer()}).