Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix sync log setting #383

Merged
merged 3 commits into from
Aug 16, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions include/antidote.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,6 @@
-define(META_TABLE_NAME, a_meta_data_table).
-define(REMOTE_META_TABLE_NAME, a_remote_meta_data_table).
-define(META_TABLE_STABLE_NAME, a_meta_data_table_stable).
%% At commit, if this is set to true, the logging vnode
%% will ensure that the transaction record is written to disk
-define(SYNC_LOG, false).
%% Uncomment the following line to use erlang:now()
%% Otherwise os:timestamp() is used which can go backwards
%% which is unsafe for clock-si
Expand Down Expand Up @@ -269,4 +266,4 @@
%% true if this is the most recent snapshot in the cache
is_newest_snapshot :: boolean()
}).
-type snapshot_get_response() :: #snapshot_get_response{}.
-type snapshot_get_response() :: #snapshot_get_response{}.
16 changes: 8 additions & 8 deletions src/logging_vnode.erl
Original file line number Diff line number Diff line change
Expand Up @@ -140,19 +140,19 @@ read(Node, Log) ->
{read, Log},
?LOGGING_MASTER).

%% @doc Sends an `append' asyncrhonous command to the Logs in `Preflist'
%% @doc Sends an `append' asynchronous command to the Logs in `Preflist'
-spec asyn_append(index_node(), key(), log_operation(), sender()) -> ok.
asyn_append(IndexNode, Log, LogOperation, ReplyTo) ->
riak_core_vnode_master:command(IndexNode,
{append, Log, LogOperation, ?SYNC_LOG},
{append, Log, LogOperation, is_sync_log()},
ReplyTo,
?LOGGING_MASTER).

%% @doc synchronous append operation payload
-spec append(index_node(), key(), log_operation()) -> {ok, op_id()} | {error, term()}.
append(IndexNode, LogId, LogOperation) ->
riak_core_vnode_master:sync_command(IndexNode,
{append, LogId, LogOperation, false},
{append, LogId, LogOperation, is_sync_log()},
?LOGGING_MASTER,
infinity).

Expand Down Expand Up @@ -192,7 +192,7 @@ get_up_to_time(IndexNode, LogId, MaxSnapshotTime, Type, Key) ->
?LOGGING_MASTER,
infinity).

%% @doc given the MinSnapshotTime and the type, this method fetchss from the log the
%% @doc given the MinSnapshotTime and the type, this method fetches from the log the
%% desired operations so a new snapshot can be created.
%% It returns a snapshot_get_response() record which is defined in antidote.hrl
-spec get_from_time(index_node(), key(), vectorclock(), type(), key()) ->
Expand All @@ -216,13 +216,13 @@ get_range(IndexNode, LogId, MinSnapshotTime, MaxSnapshotTime, Type, Key) ->


%% @doc Given the logid and position in the log (given by continuation) and a dict
%% of non_commited operations up to this position returns
%% of non_committed operations up to this position returns
%% a tuple with three elements
%% the first is a dict with all operations that had been committed until the next chunk in the log
%% the second contains those without commit operations
%% the third is the location of the next chunk
%% Otherwise if the end of the file is reached it returns a tuple
%% where the first elelment is 'eof' and the second is a dict of commited operations
%% where the first element is 'eof' and the second is a dict of committed operations
-spec get_all(index_node(), log_id(), start | disk_log:continuation(), dict:dict(key(), [{non_neg_integer(), clocksi_payload()}])) ->
{disk_log:continuation(), dict:dict(txid(), [any_log_payload()]), dict:dict(key(), [{non_neg_integer(), clocksi_payload()}])}
| {error, reason()} | {eof, dict:dict(key(), [{non_neg_integer(), clocksi_payload()}])}.
Expand All @@ -245,7 +245,7 @@ request_bucket_op_id(IndexNode, DCID, Bucket, Partition) ->
?LOGGING_MASTER,
infinity).

%% @doc Returns true if syncrounous logging is enabled
%% @doc Returns true if synchronous logging is enabled
%% False otherwise.
%% Uses environment variable "sync_log" set in antidote.app.src
-spec is_sync_log() -> boolean().
Expand Down Expand Up @@ -550,7 +550,7 @@ handle_command({get, LogId, MinSnapshotTime, MaxSnapshotTime, Type, Key}, _Sende

%% This will reply with all downstream operations that have
%% been stored in the log given by LogId
%% The resut is a dict, with a list of ops per key
%% The result is a dict, with a list of ops per key
%% The following spec is only for reference
%% -spec handle_command({get_all, log_id(), disk_log:continuation() | start, dict:dict()}, term(), #state{}) ->
%% {reply, {error, reason()} | dict:dict(), #state{}}.
Expand Down