Skip to content

Commit

Permalink
some follow up fixes (#4280)
Browse files Browse the repository at this point in the history
* check if context status before getting doc

* do not use raw json

* check pip version for circle

* fix circle TO_FMT

* no_raw_json needs to be compiled before

* fix docs-build

* some fixes and code cleanup for crossbar_view

* don't echo literal -e
  • Loading branch information
icehess authored and k-anderson committed Oct 19, 2017
1 parent 40430c7 commit fd18856
Show file tree
Hide file tree
Showing 6 changed files with 88 additions and 41 deletions.
18 changes: 13 additions & 5 deletions Makefile
Expand Up @@ -244,12 +244,19 @@ validate-schemas:


CHANGED := $(shell git --no-pager diff --name-only HEAD origin/master -- applications core scripts)
TO_FMT := $(git --no-pager diff --name-only HEAD origin/master -- "*.erl" "*.hrl" "*.escript")
TO_FMT := $(shell git --no-pager diff --name-only HEAD origin/master -- "*.erl" "*.hrl" "*.escript")
CHANGED_SWAGGER := $(shell git --no-pager diff --name-only HEAD origin/master -- applications/crossbar/priv/api/swagger.json)
PIP2 := $(shell { command -v pip || command -v pip2; } 2>/dev/null)

circle-pre:
@pip install --upgrade pip
@pip install PyYAML mkdocs pyembed-markdown jsonschema
ifneq ($(PIP2),)
## needs root access
@echo $(CHANGED)
@$(PIP2) install --upgrade pip
@$(PIP2) install PyYAML mkdocs pyembed-markdown jsonschema
else
$(error "pip/pip2 is not available, please install python2-pip package")
endif

circle-docs:
@./scripts/state-of-docs.sh || true
Expand All @@ -262,7 +269,8 @@ circle-codechecks:
@./scripts/validate-js.sh $(CHANGED)

circle-fmt:
@$(if $(TO_FMT), $(MAKE) fmt)
@echo $(TO_FMT)
@$(if $(TO_FMT), TO_FMT="$(TO_FMT)" $(MAKE) fmt)
@$(MAKE) elvis

circle-build:
Expand All @@ -289,5 +297,5 @@ circle-dialyze:
circle-release:
@$(MAKE) build-ci-release

circle: circle-pre circle-fmt circle-codechecks circle-build circle-docs circle-schemas circle-dialyze circle-release
circle: circle-pre circle-fmt circle-build circle-codechecks circle-docs circle-schemas circle-dialyze circle-release
@$(if $(git status --porcelain | wc -l), $(MAKE) circle-unstaged)
77 changes: 52 additions & 25 deletions applications/crossbar/src/crossbar_view.erl
Expand Up @@ -7,6 +7,7 @@
%%% Hesaam Farhang
%%%-------------------------------------------------------------------
-module(crossbar_view).

-export([load/2, load/3
,load_range/2, load_range/3
,load_modb/2, load_modb/3
Expand All @@ -24,18 +25,17 @@

,suffix_key_fun/1

,init_chunk_stream/1, chunk_send_jsons/2, chunk_send_jsons/3
,init_chunk_stream/1
,chunk_send_jsons/2, chunk_send_jsons/3

,map_doc_fun/0
,map_value_fun/0
]).

-include("crossbar.hrl").
-include_lib("kazoo_stdlib/include/kazoo_json.hrl").

-define(CB_SPECIFIC_VIEW_OPTIONS,
['ascending', 'databases'
,'descending', 'mapper'
['ascending', 'databases', 'mapper'

%% non-range query
,'end_keymap', 'keymap', 'start_keymap'
Expand All @@ -53,7 +53,7 @@

-type time_range() :: {gregorian_seconds(), gregorian_seconds()}.

-type api_range_key() :: 'undefined' | kazoo_data:range_key().
-type api_range_key() :: 'undefined' | ['undefined'] | kazoo_data:range_key().
-type range_keys() :: {api_range_key(), api_range_key()}.

-type keymap_fun() :: fun((cb_context:context()) -> api_range_key()) |
Expand All @@ -73,6 +73,7 @@
-type chunked_mapper_fun() :: 'undefined' |
fun((cb_cowboy_payload(), kz_json:objects()) -> chunked_mapper_ret()) |
fun((cb_cowboy_payload(), kz_json:objects(), ne_binary()) -> chunked_mapper_ret()).
-type chunk_resp_type() :: 'json' | 'csv'.

-type mapper_fun() :: 'undefined' |
fun((kz_json:objects()) -> kz_json:objects()) |
Expand All @@ -90,7 +91,7 @@

%% for chunked query
{'chunked_mapper', chunked_mapper_fun()} |
{'chunk_response_type', 'json' | 'csv'} |
{'chunk_response_type', chunk_resp_type()} |
{'chunk_size', pos_integer()} |
{'cowboy_req', cowboy_req:req()} |
{'is_chunked', boolean()} |
Expand All @@ -105,7 +106,7 @@
].

-type load_params() :: #{chunked_mapper => chunked_mapper_fun()
,chunk_response_type => 'json' | 'csv'
,chunk_response_type => chunk_resp_type()
,chunk_size => pos_integer()
,context => cb_context:context()
,cowboy_req => cowboy_req:req()
Expand Down Expand Up @@ -305,7 +306,7 @@ build_load_modb_params(Context, View, Options) ->
kazoo_data:view_options().
build_view_query(Options, Direction, StartKey, EndKey, HasQSFilter) ->
DeleteKeys = ['startkey', 'endkey'
,'ascending', 'limit'
,'descending', 'limit'
| ?CB_SPECIFIC_VIEW_OPTIONS
],
DefaultOptions =
Expand Down Expand Up @@ -421,6 +422,7 @@ ranged_start_end_keys(Context, Options, Direction, StartTime, EndTime) ->
-spec suffix_key_fun(range_keymap()) -> range_keymap_fun().
suffix_key_fun('nil') -> fun(_) -> 'undefined' end;
suffix_key_fun('undefined') -> fun kz_term:identity/1;
suffix_key_fun(['undefined']) -> fun kz_term:identity/1;
suffix_key_fun(K) when is_binary(K) -> fun(Ts) -> [Ts, K] end;
suffix_key_fun(K) when is_integer(K) -> fun(Ts) -> [Ts, K] end;
suffix_key_fun(K) when is_list(K) -> fun(Ts) -> [Ts | K] end;
Expand Down Expand Up @@ -492,7 +494,7 @@ time_range(Context, Options, Key) ->
%%--------------------------------------------------------------------
%% @public
%% @doc
%% Checks whether or not start time is prior to end time. Returns a ranged
%% Checks whether or not end time is prior to start time. Returns a ranged
%% tuple `{start_time, end_time}` or `context` with validation error.
%% @end
%%--------------------------------------------------------------------
Expand All @@ -502,12 +504,12 @@ time_range(Context, MaxRange, Key, RangeFrom, RangeTo) ->
Path = <<Key/binary, "_from">>,
case RangeTo - RangeFrom of
N when N < 0 ->
Msg = kz_term:to_binary(io_lib:format("~s ~b is prior to ~s ~b", [Path, RangeFrom, <<Key/binary, "_to">>, RangeTo])),
Msg = kz_term:to_binary(io_lib:format("~s_to ~b is prior to ~s ~b", [Key, RangeTo, Path, RangeFrom])),
JObj = kz_json:from_list([{<<"message">>, Msg}, {<<"cause">>, RangeFrom}]),
lager:debug("~s", [Msg]),
cb_context:add_validation_error(Path, <<"date_range">>, JObj, Context);
N when N > MaxRange ->
Msg = kz_term:to_binary(io_lib:format("~s ~b is more than ~b seconds from ~s ~b", [<<Key/binary, "_to">>, RangeTo, MaxRange, Path, RangeFrom])),
Msg = kz_term:to_binary(io_lib:format("~s_to ~b is more than ~b seconds from ~s ~b", [Key, RangeTo, MaxRange, Path, RangeFrom])),
JObj = kz_json:from_list([{<<"message">>, Msg}, {<<"cause">>, RangeTo}]),
lager:debug("~s", [Msg]),
cb_context:add_validation_error(Path, <<"date_range">>, JObj, Context);
Expand All @@ -524,8 +526,8 @@ map_value_fun() -> fun(JObj, Acc) -> [kz_json:get_value(<<"value">>, JObj)|Acc]
%%--------------------------------------------------------------------
%% @public
%% @doc
%% Encode the JObj and send it in chunked. Start chunk response if
%% chunk response is not started yet.
%% Encode the JObj and send it as a chunk. Start chunk response if is
%% not started yet.
%% @end
%%--------------------------------------------------------------------
-spec chunk_send_jsons(cb_cowboy_payload(), kz_json:objects()) ->
Expand Down Expand Up @@ -555,17 +557,19 @@ chunk_send_jsons({Req, _}=Payload, JObjs, StartedChunk) ->
Payload
end.

%% private
%% @private
-spec do_encode_to_json(kz_json:objects()) -> binary().
do_encode_to_json(JObjs) ->
Encoded = kz_json:encode(JObjs),
%% remove first "[" and last "]" from json
binary:part(Encoded, 1, size(Encoded) - 2).

%% @public
-spec init_chunk_stream(cb_cowboy_payload()) -> cb_cowboy_payload().
init_chunk_stream({_, Context}=Payload) ->
init_chunk_stream(Payload, cb_context:fetch(Context, 'started_chunk')).
init_chunk_stream(Payload, cb_context:fetch(Context, 'chunk_response_type')).

-spec init_chunk_stream(cb_cowboy_payload(), 'json' | 'csv') -> cb_cowboy_payload().
-spec init_chunk_stream(cb_cowboy_payload(), chunk_resp_type()) -> cb_cowboy_payload().
init_chunk_stream({Req, Context}, 'json') ->
Headers = cowboy_req:get('resp_headers', Req),
{'ok', Req1} = cowboy_req:chunked_reply(200, Headers, Req),
Expand All @@ -587,7 +591,9 @@ init_chunk_stream({Req, Context}, 'csv') ->
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Load view results based on options.
%% Load view results based on options. If the request is chunked
%% finish the chunk if it's started and set is_chunked or return
%% the cb_cowboy_payload() back to api_resource and api_util.
%% @end
%%--------------------------------------------------------------------
-spec load_view(load_params() | cb_context:context(), options()) -> cb_context:context() | cb_cowboy_payload().
Expand All @@ -600,7 +606,8 @@ load_view(#{is_chunked := 'true', chunk_response_type := 'csv', cowboy_req := Re
,started_chunk := StartedChunk
} = get_results(LoadMap#{cowboy_req => Req}),
case cb_context:resp_status(Context) of
'success' when StartedChunk ->
_ when StartedChunk ->
%% covers both success and failed result (in the middle of a chunked resp)
{Req1, cb_context:store(Context, 'is_chunked', 'true')};
_ -> {Req1, Context}
end;
Expand Down Expand Up @@ -632,13 +639,29 @@ get_results(#{databases := Dbs}=LoadMap) ->
%% Fold over databases and fetch result from each and count total result.
%% If pagination is requested keeps track of last key.
%% If `page_size` is not in the options, make unlimited get_results.
%%
%% Based on chunked, limited or unlimited query, get the correct
%% Limit for this loop (if it's limited query) and do the query.
%%
%% We use limit (limit + 1) to get an extra object (if available) to
%% get last object's key as the `next_start_key`. If the page size
%% has been satisfied and the last key has been found, return the result,
%% if the last key is not defined, query next DBs until DBs exhausted.
%%
%% If `chunked_size` is lower than sum of the `total_queried` and
%% `current_db_length`, we set the chunk_size as the limit. In this
%% case the db may return up to the limit size result, if the last_key
%% is defined it means the db has more results to give, so we query
%% the same db again, until the page size satisfied or no last_key is
%% defined. In that case if pages size is not exhausted yet we query
%% the next db.
%%
%% @end
%%--------------------------------------------------------------------
-spec fold_query(ne_binaries(), load_params()) -> load_params().
fold_query([], #{context := Context}=LoadMap) ->
lager:debug("databases exhausted"),
LoadMap#{context := cb_context:set_resp_status(Context, 'success')};
%% query is limited by page_size
fold_query([Db|RestDbs]=Dbs, #{view := View
,view_options := ViewOpts
,direction := Direction
Expand Down Expand Up @@ -671,9 +694,10 @@ fold_query([Db|RestDbs]=Dbs, #{view := View
lager:debug("either the db ~s or view ~s was not found", [Db, View]),
LoadMap#{context => crossbar_util:response_missing_view(Context)};
{'error', 'not_found'} ->
lager:debug("either the db ~s or view ~s was not found", [Db, View]),
lager:debug("either the db ~s or view ~s was not found, querying next db...", [Db, View]),
fold_query(RestDbs, LoadMap);
{'error', Error} ->
lager:debug("failed to query view ~s from db ~s: ~p", [View, Db, Error]),
LoadMap#{context => crossbar_doc:handle_datamgr_errors(Error, View, Context)};
{'ok', JObjs} ->
%% catching crashes when applying users map functions (filter map and chunk map)
Expand All @@ -691,10 +715,10 @@ fold_query([Db|RestDbs]=Dbs, #{view := View
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Apply filter to result, find last key and if chunk is requested
%% Apply filter to result, find last key and if it's chunked query
%% apply chunked mapper function.
%% Then check page_size, limit, result length and chunk size to see
%% we're done or shall continue.
%% Then based on page_size, limit, result length and last key see
%% we're done or shall we continue.
%% @end
%%--------------------------------------------------------------------
-spec handle_query_result(load_params(), ne_binaries(), kz_json:objects(), api_pos_integer()) ->
Expand Down Expand Up @@ -731,6 +755,8 @@ handle_query_result(LoadMap, [Db|RestDbs]=Dbs, Results, Limit) ->
-spec check_page_size_and_length(load_params(), non_neg_integer(), non_neg_integer() | 'undefined', last_key()) ->
{'exhausted' | 'next_db' | 'same_db', load_params()}.
%% page_size is exhausted when query is limited by page_size
%% Condition: page_size = total_queried + current_db_results
%% and the last key has been found.
check_page_size_and_length(#{context := Context
,page_size := PageSize
,total_queried := TotalQueried
Expand All @@ -747,6 +773,7 @@ check_page_size_and_length(#{context := Context
}
};
%% query next chunk from same db when query is chunked
%% Condition: the current last_key has been found and it's not equal to the previous lasy_key
check_page_size_and_length(#{total_queried := TotalQueried, last_key := OldLastKey}=LoadMap, Length, _Limit, LastKey)
when OldLastKey =/= LastKey,
LastKey =/= 'undefined' ->
Expand All @@ -771,7 +798,7 @@ check_page_size_and_length(#{total_queried := TotalQueried}=LoadMap, Length, _Li
%%--------------------------------------------------------------------
-spec limit_with_last_key(boolean(), api_pos_integer(), pos_integer(), non_neg_integer()) ->
api_pos_integer().
%% non-chunked unlimited request
%% non-chunked unlimited request => no limit
limit_with_last_key('false', 'undefined', _, _) ->
'undefined';
%% non-chunked limited request
Expand Down Expand Up @@ -996,7 +1023,7 @@ finish_chunked_json_response(#{total_queried := TotalQueried
,context := Context
}=LoadMap) ->
%% Because chunk is already started closing envelope,
%% no matter what Context resp_status is success or not.
%% it doesn't matter Context resp_status is success or not.
NextStartKey = maps:get(last_key, LoadMap, 'undefined'),
StartKey = maps:get(start_key, LoadMap, 'undefined'),
EnvJObj = add_paging(StartKey, TotalQueried, NextStartKey, cb_context:resp_envelope(Context)),
Expand Down
14 changes: 9 additions & 5 deletions applications/crossbar/src/modules/cb_whitelabel.erl
Expand Up @@ -710,11 +710,15 @@ find_whitelabel_binary_meta(Context, Domain, AttachType) ->
-spec whitelabel_binary_meta(cb_context:context(), ne_binary()) ->
'undefined' | {ne_binary(), kz_json:object()}.
whitelabel_binary_meta(Context, AttachType) ->
JObj = kz_doc:attachments(cb_context:doc(Context), kz_json:new()),
case whitelabel_attachment_id(JObj, AttachType) of
'undefined' -> 'undefined';
AttachmentId ->
{AttachmentId, kz_json:get_value(AttachmentId, JObj)}
case cb_context:resp_status(Context) of
'success' ->
JObj = kz_doc:attachments(cb_context:doc(Context), kz_json:new()),
case whitelabel_attachment_id(JObj, AttachType) of
'undefined' -> 'undefined';
AttachmentId ->
{AttachmentId, kz_json:get_value(AttachmentId, JObj)}
end;
_ -> 'undefined'
end.

-spec whitelabel_attachment_id(kz_json:object(), ne_binary()) ->
Expand Down
14 changes: 9 additions & 5 deletions core/kazoo_schemas/test/kz_json_schema_test.erl
Expand Up @@ -152,16 +152,20 @@ get_schema_sms() ->
default_object_test() ->
Schema = get_schema(),
Default = kz_json_schema:default_object(Schema),
[?_assertEqual({[{<<"caller_id">>,{[{<<"emergency">>,{[{<<"name">>,<<"emer_default">>}]}}]}}]}, Default)].
[?_assertEqual(kz_json:from_list_recursive([{<<"caller_id">>, [{<<"emergency">>,[{<<"name">>,<<"emer_default">>}]}]}])
,Default
)
].

flatten_sms_schema_test() ->
SMSSchema = get_schema_sms(),
Flat = kz_json_schema:flatten(SMSSchema),

[?_assertEqual(Flat, {[{[<<"outbound">>,<<"options">>,<<"default">>], {[{<<"delivery_mode">>,2},{<<"mandatory">>,true}]}}
,{[<<"outbound">>,<<"options">>,<<"description">>], <<"sms options">>}
,{[<<"outbound">>,<<"options">>,<<"type">>],<<"object">>}]
})].
JObj = kz_json:from_list_recursive([{<<"outbound">>, [{<<"options">>, [{<<"default">>, [{<<"delivery_mode">>,2},{<<"mandatory">>,true}]}]}]}
,{[<<"outbound">>,<<"options">>,<<"description">>], <<"sms options">>}
,{[<<"outbound">>,<<"options">>,<<"type">>],<<"object">>}
]),
[?_assertEqual(Flat, JObj)].

did_duplication_test() ->
SrvA = kz_json:from_list([{<<"DIDs">>,kz_json:new()}
Expand Down
2 changes: 1 addition & 1 deletion doc/mkdocs/Makefile
Expand Up @@ -14,7 +14,7 @@ docs-build: $(LOCAL)
@$(shell cp $< $@)
@$(shell cp "$*.md" $(DOCS_ROOT)/docs/index.md)
@if [ -f $(DOCS_ROOT)/theme/global.yml ]; then cat $(DOCS_ROOT)/theme/global.yml >> $@ ; fi
@echo "\ntheme: null\ntheme_dir: '$(DOCS_ROOT)/theme'\ndocs_dir: '$(DOCS_ROOT)/docs'\n" >> $@
@$(echo -e "\ntheme: null\ntheme_dir: '$(DOCS_ROOT)/theme'\ndocs_dir: '$(DOCS_ROOT)/docs'\n" >> $@)
@echo "building $*"
@mkdocs build -f $@ --clean -q --site-dir "$(DOCS_ROOT)/site/$<"

Expand Down
4 changes: 4 additions & 0 deletions scripts/setup_docs.bash
Expand Up @@ -6,6 +6,10 @@ pushd $(dirname $0) > /dev/null
cd $(pwd -P)/..
DOCS_ROOT=`readlink -f ./doc/mkdocs`

if [ -z `command -v cpio` ] ; then
echo "cpio command is not available, please install it" && exit 1
fi

find {scripts,doc,core,applications} -type f -path "doc/mkdocs*" -prune -o -regex ".+\.\(md\|png\|jpg\|svg|json\)$" -print | cpio -p -dum --quiet $DOCS_ROOT/docs

popd > /dev/null

0 comments on commit fd18856

Please sign in to comment.