From a50b5189221ef009d711fed5933e2319989f4f62 Mon Sep 17 00:00:00 2001 From: Wilson Li Date: Mon, 21 Nov 2016 17:48:08 +0900 Subject: [PATCH] Added Latency and Cache Status in Access Log --- include/leo_gateway.hrl | 112 ++++++++++++++++---------- src/leo_gateway_http_commons.erl | 132 +++++++++++++++---------------- src/leo_gateway_s3_api.erl | 49 ++++++++---- 3 files changed, 169 insertions(+), 124 deletions(-) diff --git a/include/leo_gateway.hrl b/include/leo_gateway.hrl index 0f6a23f..7e615e3 100644 --- a/include/leo_gateway.hrl +++ b/include/leo_gateway.hrl @@ -351,168 +351,198 @@ end end). --define(access_log_get(_Bucket,_Path,_Size,_Response), +-define(access_log_get(_Bucket,_Path,_Size,_Response,_Begin), + begin + ?access_log_get(_Bucket,_Path,_Size,_Response,_Begin,"miss") + end). +-define(access_log_get(_Bucket,_Path,_Size,_Response,_Begin,_Cache), begin {_OrgPath, _ChildNum} = ?get_child_num(binary_to_list(_Path)), + _Latency = erlang:round((leo_date:clock() - _Begin) / 1000), leo_logger_client_base:append( {?LOG_ID_ACCESS, - #message_log{format = "[GET]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\n", + #message_log{format = "[GET]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\t~w\t~s\n", message = [binary_to_list(_Bucket), _OrgPath, _ChildNum, _Size, leo_date:date_format(), leo_date:clock(), - _Response]}}) + _Response, + _Latency, + _Cache + ]} + }) %% ?notify_metrics(<<"GET">>,_Bucket,_Size) end). --define(access_log_put(_Bucket,_Path,_Size,_Response), +-define(access_log_put(_Bucket,_Path,_Size,_Response,_Begin), begin + _Latency = erlang:round((leo_date:clock() - _Begin) / 1000), {_OrgPath, _ChildNum} = ?get_child_num(binary_to_list(_Path)), leo_logger_client_base:append( {?LOG_ID_ACCESS, - #message_log{format = "[PUT]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\n", + #message_log{format = "[PUT]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\t~w\t~s\n", message = [binary_to_list(_Bucket), _OrgPath, _ChildNum, _Size, leo_date:date_format(), leo_date:clock(), - _Response + _Response, + _Latency, + "" ]} }) %% ?notify_metrics(<<"PUT">>,_Bucket,_Size) end). --define(access_log_delete(_Bucket,_Path,_Size,_Response), +-define(access_log_delete(_Bucket,_Path,_Size,_Response,_Begin), begin {_OrgPath, _ChildNum} = ?get_child_num(binary_to_list(_Path)), + _Latency = erlang:round((leo_date:clock() - _Begin) / 1000), leo_logger_client_base:append( {?LOG_ID_ACCESS, - #message_log{format = "[DELETE]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\n", + #message_log{format = "[DELETE]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\t~w\t~s\n", message = [binary_to_list(_Bucket), _OrgPath, _ChildNum, _Size, leo_date:date_format(), leo_date:clock(), - _Response + _Response, + _Latency, + "" ]} }) %% ?notify_metrics(<<"DELETE">>,_Bucket,_Size) end). --define(access_log_head(_Bucket,_Path,_Response), +-define(access_log_head(_Bucket,_Path,_Response,_Begin), begin {_OrgPath, _ChildNum} = ?get_child_num(binary_to_list(_Path)), + _Latency = erlang:round((leo_date:clock() - _Begin) / 1000), leo_logger_client_base:append( {?LOG_ID_ACCESS, - #message_log{format = "[HEAD]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\n", + #message_log{format = "[HEAD]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\t~w\t~s\n", message = [binary_to_list(_Bucket), _OrgPath, _ChildNum, 0, leo_date:date_format(), leo_date:clock(), - _Response + _Response, + _Latency, + "" ]} }) end). --define(access_log(_Method,_Bucket,_Path,_Size,_Response), +-define(access_log(_Method,_Bucket,_Path,_Size,_Response,_Begin), begin case _Method of get -> - ?access_log_get(_Bucket,_Path,_Size,_Response); + ?access_log_get(_Bucket,_Path,_Size,_Response,_Begin); put -> - ?access_log_put(_Bucket,_Path,_Size,_Response); + ?access_log_put(_Bucket,_Path,_Size,_Response,_Begin); delete -> - ?access_log_delete(_Bucket,_Path,_Size,_Response); + ?access_log_delete(_Bucket,_Path,_Size,_Response,_Begin); head -> - ?access_log_head(_Bucket,_Path,_Response) + ?access_log_head(_Bucket,_Path,_Response,_Begin) end end). %% access-log for buckets --define(access_log_bucket_put(_Bucket,_Response), +-define(access_log_bucket_put(_Bucket,_Response,_Begin), begin + _Latency = erlang:round((leo_date:clock() - _Begin) / 1000), leo_logger_client_base:append( {?LOG_ID_ACCESS, - #message_log{format = "[BUCKET-PUT]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\n", + #message_log{format = "[BUCKET-PUT]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\t~w\t~s\n", message = [binary_to_list(_Bucket), "", 0, 0, leo_date:date_format(), leo_date:clock(), - _Response + _Response, + _Latency, + "" ]} }) %% ?notify_metrics(<<"PUT">>,_Bucket,_Size) end). --define(access_log_bucket_delete(_Bucket,_Response), +-define(access_log_bucket_delete(_Bucket,_Response,_Begin), begin + _Latency = erlang:round((leo_date:clock() - _Begin) / 1000), leo_logger_client_base:append( {?LOG_ID_ACCESS, - #message_log{format = "[BUCKET-DELETE]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\n", + #message_log{format = "[BUCKET-DELETE]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\t~w\t~s\n", message = [binary_to_list(_Bucket), "", 0, 0, leo_date:date_format(), leo_date:clock(), - _Response + _Response, + _Latency, + "" ]} }) %% ?notify_metrics(<<"DELETE">>,_Bucket,_Size) end). --define(access_log_bucket_head(_Bucket,_Response), +-define(access_log_bucket_head(_Bucket,_Response,_Begin), begin + _Latency = erlang:round((leo_date:clock() - _Begin) / 1000), leo_logger_client_base:append( {?LOG_ID_ACCESS, - #message_log{format = "[BUCKET-HEAD]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\n", + #message_log{format = "[BUCKET-HEAD]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\t~w\t~s\n", message = [binary_to_list(_Bucket), "", 0, 0, leo_date:date_format(), leo_date:clock(), - _Response + _Response, + _Latency, + "" ]} }) end). --define(access_log_bucket_get(_Bucket, _Prefix, _Response), +-define(access_log_bucket_get(_Bucket, _Prefix, _Response,_Begin), begin + _Latency = erlang:round((leo_date:clock() - _Begin) / 1000), leo_logger_client_base:append( {?LOG_ID_ACCESS, - #message_log{format = "[BUCKET-GET]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\n", + #message_log{format = "[BUCKET-GET]\t~s\t~s\t~w\t~w\t~s\t~w\t~w\t~w\t~s\n", message = [binary_to_list(_Bucket), binary_to_list(_Prefix), 0, 0, leo_date:date_format(), leo_date:clock(), - _Response + _Response, + _Latency, + "" ]} }) end). --define(reply_fun(_Cause,_Method,_Bucket,_Key,_Len), +-define(reply_fun(_Cause,_Method,_Bucket,_Key,_Len,_Begin), case _Cause of not_found -> - ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_NOT_FOUND); + ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_NOT_FOUND,_Begin); unavailable -> - ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_SERVICE_UNAVAILABLE); + ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_SERVICE_UNAVAILABLE,_Begin); timeout -> - ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_GATEWAY_TIMEOUT); + ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_GATEWAY_TIMEOUT,_Begin); bad_range -> - ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_BAD_RANGE); + ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_BAD_RANGE,_Begin); _ -> - ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_INTERNAL_ERROR) + ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_INTERNAL_ERROR,_Begin) end). --define(reply_fun(_Cause,_Method,_Bucket,_Key,_Len,_Req), +-define(reply_fun(_Cause,_Method,_Bucket,_Key,_Len,_Req,_Begin), case _Cause of not_found -> - ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_NOT_FOUND), + ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_NOT_FOUND,_Begin), case _Method of delete -> ?reply_no_content([?SERVER_HEADER], Req); @@ -522,15 +552,15 @@ ?reply_not_found([?SERVER_HEADER],_Key, <<>>,_Req) end; unavailable -> - ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_SERVICE_UNAVAILABLE), + ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_SERVICE_UNAVAILABLE,_Begin), ?reply_service_unavailable_error([?SERVER_HEADER],_Key, <<>>,_Req); timeout -> - ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_GATEWAY_TIMEOUT), + ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_GATEWAY_TIMEOUT,_Begin), ?reply_timeout([?SERVER_HEADER],_Key, <<>>,_Req); bad_range -> - ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_BAD_RANGE), + ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_BAD_RANGE,_Begin), ?reply_bad_range([?SERVER_HEADER], Key, <<>>, Req); _ -> - ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_INTERNAL_ERROR), + ?access_log(_Method,_Bucket,_Key,_Len, ?HTTP_ST_INTERNAL_ERROR,_Begin), ?reply_internal_error([?SERVER_HEADER],_Key, <<>>,_Req) end). diff --git a/src/leo_gateway_http_commons.erl b/src/leo_gateway_http_commons.erl index 8e184fd..9218d7e 100644 --- a/src/leo_gateway_http_commons.erl +++ b/src/leo_gateway_http_commons.erl @@ -282,6 +282,7 @@ get_object(Req, Key, #req_params{bucket_name = BucketName, custom_header_settings = CustomHeaderSettings, has_inner_cache = HasInnerCache, sending_chunked_obj_len = SendChunkLen}) -> + BeginTime = leo_date:clock(), case leo_gateway_rpc_handler:get(Key) of %% For regular case (NOT a chunked object) {ok, #?METADATA{cnumber = 0, @@ -302,7 +303,6 @@ get_object(Req, Key, #req_params{bucket_name = BucketName, void end, - ?access_log_get(BucketName, Key, Meta#?METADATA.dsize, ?HTTP_ST_OK), Headers = [?SERVER_HEADER, {?HTTP_HEAD_RESP_CONTENT_TYPE, Mime}, {?HTTP_HEAD_RESP_ETAG, ?http_etag(Meta#?METADATA.checksum)}, @@ -320,6 +320,8 @@ get_object(Req, Key, #req_params{bucket_name = BucketName, leo_net:chunked_send( Transport, Socket, RespObject, SendChunkLen) end, + + ?access_log_get(BucketName, Key, Meta#?METADATA.dsize, ?HTTP_ST_OK, BeginTime), ?reply_ok(Headers2, {Meta#?METADATA.dsize, BodyFunc}, Req); %% For a chunked object. @@ -347,16 +349,16 @@ get_object(Req, Key, #req_params{bucket_name = BucketName, try Ret = leo_large_object_get_handler:get( Pid, TotalChunkedObjs, Req, Meta), - reply_fun(Ret, get, BucketName, Key, ObjLen), + reply_fun(Ret, get, BucketName, Key, ObjLen, BeginTime), ok after - ?access_log_get(BucketName, Key, Meta#?METADATA.dsize, 0), + ?access_log_get(BucketName, Key, Meta#?METADATA.dsize, 0, BeginTime), catch leo_large_object_get_handler:stop(Pid) end end, cowboy_req:reply(?HTTP_ST_OK, Headers2, {Meta#?METADATA.dsize, BodyFunc}, Req); {error, Cause} -> - reply_fun({error, Cause}, get, BucketName, Key, 0, Req) + reply_fun({error, Cause}, get, BucketName, Key, 0, Req, BeginTime) end. @@ -366,6 +368,7 @@ get_object(Req, Key, #req_params{bucket_name = BucketName, get_object_with_cache(Req, Key, CacheObj, #req_params{bucket_name = BucketName, custom_header_settings = CustomHeaderSettings, sending_chunked_obj_len = SendChunkLen}) -> + BeginTime = leo_date:clock(), Path = CacheObj#cache.file_path, HasDiskCache = case Path of [] -> @@ -397,7 +400,6 @@ get_object_with_cache(Req, Key, CacheObj, #req_params{bucket_name = BucketName, case file:open(Path, [raw, read]) of {ok, Fd} -> - ?access_log_get(BucketName, Key, CacheObj#cache.size, ?HTTP_ST_OK), BodyFunc = fun(Socket,_Transport) -> case file:sendfile(Fd, Socket, 0, 0, [{chunk_size, SendChunkLen}]) of @@ -412,31 +414,32 @@ get_object_with_cache(Req, Key, CacheObj, #req_params{bucket_name = BucketName, _ = file:close(Fd), ok end, + + ?access_log_get(BucketName, Key, CacheObj#cache.size, ?HTTP_ST_OK, BeginTime, "hit:disk-cache"), cowboy_req:reply(?HTTP_ST_OK, Headers2, {CacheObj#cache.size, BodyFunc}, Req); {error, Reason} -> - ?access_log_get(BucketName, Key, 0, ?HTTP_ST_INTERNAL_ERROR), - catch leo_cache_api:delete(Key), ?warn("get_object_with_cache/4", [{key, Path}, {summary, ?ERROR_COULD_NOT_OPEN_DISK_CACHE}, {cause, Reason}]), + + ?access_log_get(BucketName, Key, 0, ?HTTP_ST_INTERNAL_ERROR, BeginTime, "hit:disk-cache"), ?reply_internal_error([?SERVER_HEADER], Key, <<>>, Req) end; {error, Reason} -> - ?access_log_get(BucketName, Key, 0, ?HTTP_ST_INTERNAL_ERROR), - catch leo_cache_api:delete(Key), ?warn("get_object_with_cache/4", [{key, Path}, {summary, ?ERROR_COULD_NOT_OPEN_DISK_CACHE}, {cause, Reason}]), + + ?access_log_get(BucketName, Key, 0, ?HTTP_ST_INTERNAL_ERROR, BeginTime), ?reply_internal_error([?SERVER_HEADER], Key, <<>>, Req) end; %% HIT: get an object from memory-cache {ok, match} when Path == [] -> - ?access_log_get(BucketName, Key, CacheObj#cache.size, ?HTTP_ST_OK), Headers = [?SERVER_HEADER, {?HTTP_HEAD_RESP_CONTENT_TYPE, CacheObj#cache.content_type}, {?HTTP_HEAD_RESP_ETAG, ?http_etag(CacheObj#cache.etag)}, @@ -455,12 +458,13 @@ get_object_with_cache(Req, Key, CacheObj, #req_params{bucket_name = BucketName, leo_net:chunked_send( Transport, Socket, CacheObj#cache.body, SendChunkLen) end, + + ?access_log_get(BucketName, Key, CacheObj#cache.size, ?HTTP_ST_OK, BeginTime, "hit:mem-cache"), ?reply_ok(Headers2, {CacheObj#cache.size, BodyFunc}, Req); %% MISS: get an object from storage (small-size) {ok, #?METADATA{cnumber = 0, meta = CMetaBin} = Meta, RespObject} -> - ?access_log_get(BucketName, Key, Meta#?METADATA.dsize, ?HTTP_ST_OK), Mime = leo_mime:guess_mime(Key), Val = term_to_binary(#cache{etag = Meta#?METADATA.checksum, mtime = Meta#?METADATA.timestamp, @@ -486,6 +490,8 @@ get_object_with_cache(Req, Key, CacheObj, #req_params{bucket_name = BucketName, leo_net:chunked_send( Transport, Socket, RespObject, SendChunkLen) end, + + ?access_log_get(BucketName, Key, Meta#?METADATA.dsize, ?HTTP_ST_OK, BeginTime), ?reply_ok(Headers2, {Meta#?METADATA.dsize, BodyFunc}, Req); %% MISS: get an object from storage (large-size) @@ -513,25 +519,26 @@ get_object_with_cache(Req, Key, CacheObj, #req_params{bucket_name = BucketName, try Ret = leo_large_object_get_handler:get( Pid, TotalChunkedObjs, Req, Meta), - reply_fun(Ret, get, BucketName, Key, ObjLen), + reply_fun(Ret, get, BucketName, Key, ObjLen, BeginTime), ok after - ?access_log_get(BucketName, Key, Meta#?METADATA.dsize, 0), + ?access_log_get(BucketName, Key, Meta#?METADATA.dsize, 0, BeginTime), catch leo_large_object_get_handler:stop(Pid) end end, cowboy_req:reply(?HTTP_ST_OK, Headers2, {Meta#?METADATA.dsize, BodyFunc}, Req); {error, Cause} -> - reply_fun({error, Cause}, get, BucketName, Key, 0, Req) + reply_fun({error, Cause}, get, BucketName, Key, 0, Req, BeginTime) end. %% @doc MOVE/COPY an object -spec(move_large_object(#?METADATA{}, binary(), #req_params{}) -> ok | {error, any()}). move_large_object(#?METADATA{key = Key, cnumber = TotalChunkedObjs} = SrcMeta, DestKey, Params) -> + BeginTime = leo_date:clock(), {ok, ReadHandler} = leo_large_object_move_handler:start_link(Key, 0, TotalChunkedObjs), try - move_large_object(SrcMeta, DestKey, Params, ReadHandler) + move_large_object(SrcMeta, DestKey, Params, ReadHandler, BeginTime) after catch leo_large_object_move_handler:stop(ReadHandler) end. @@ -540,7 +547,7 @@ move_large_object(#?METADATA{dsize = Size}, DestKey, #req_params{chunked_obj_len = ChunkedSize, custom_metadata = CMeta, bucket_name = BucketName, - bucket_info = BucketInfo}, ReadHandler) -> + bucket_info = BucketInfo}, ReadHandler, BeginTime) -> {ok, WriteHandler} = leo_large_object_put_handler:start_link( BucketInfo, DestKey, ChunkedSize), @@ -553,7 +560,7 @@ move_large_object(#?METADATA{dsize = Size}, DestKey, key = DestKey, meta = CMeta, length = Size, - chunked_size = ChunkedSize}, ReadHandler) of + chunked_size = ChunkedSize}, ReadHandler, BeginTime) of ok -> ok; {error, Cause} -> @@ -567,12 +574,12 @@ move_large_object(#?METADATA{dsize = Size}, DestKey, %% @private move_large_object_1({ok, Data}, #req_large_obj{key = Key, - handler = WriteHandler} = ReqLargeObj, ReadHandler) -> + handler = WriteHandler} = ReqLargeObj, ReadHandler, BeginTime) -> case catch leo_large_object_put_handler:put(WriteHandler, Data) of ok -> move_large_object_1( leo_large_object_move_handler:get_chunk_obj(ReadHandler), - ReqLargeObj, ReadHandler); + ReqLargeObj, ReadHandler, BeginTime); {'EXIT', Cause} -> ?error("move_large_object_1/3", [{key, binary_to_list(Key)}, {cause, Cause}]), @@ -583,7 +590,7 @@ move_large_object_1({ok, Data}, {error, ?ERROR_FAIL_PUT_OBJ} end; move_large_object_1({error, Cause}, - #req_large_obj{key = Key},_ReadHandler) -> + #req_large_obj{key = Key},_ReadHandler,_) -> ?error("move_large_object_1/3", [{key, binary_to_list(Key)}, {cause, Cause}]), {error, ?ERROR_FAIL_RETRIEVE_OBJ}; @@ -593,7 +600,7 @@ move_large_object_1(done, #req_large_obj{handler = WriteHandler, key = Key, meta = CMeta, length = Size, - chunked_size = ChunkedSize},_ReadHandler) -> + chunked_size = ChunkedSize},_ReadHandler,BeginTime) -> case catch leo_large_object_put_handler:result(WriteHandler) of {ok, #large_obj_info{length = TotalSize, num_of_chunks = TotalChunks, @@ -610,7 +617,7 @@ move_large_object_1(done, #req_large_obj{handler = WriteHandler, digest = Digest_1, bucket_info = BucketInfo}) of {ok, _ETag} -> - ?access_log_put(BucketName, Key, Size, ?HTTP_ST_OK), + ?access_log_put(BucketName, Key, Size, ?HTTP_ST_OK, BeginTime), ok; {error, timeout = Cause} -> {error, Cause}; @@ -635,12 +642,13 @@ put_object(Req, Key, #req_params{bucket_name = BucketName, threshold_of_chunk_len = ThresholdObjLen, transfer_decode_fun = TransferDecodeFun, transfer_decode_state = TransferDecodeState} = Params) -> + BeginTime = leo_date:clock(), {Size, _} = cowboy_req:body_length(Req), ?debug("put_object/3", "Object Size: ~p", [Size]), case (Size >= ThresholdObjLen) of true when Size >= MaxLenForObj -> - ?access_log_put(BucketName, Key, 0, ?HTTP_ST_BAD_REQ), + ?access_log_put(BucketName, Key, 0, ?HTTP_ST_BAD_REQ, BeginTime), ?reply_bad_request([?SERVER_HEADER], ?XML_ERROR_CODE_EntityTooLarge, ?XML_ERROR_MSG_EntityTooLarge, @@ -693,6 +701,7 @@ put_small_object({ok, {Size, Bin, Req}}, Key, #req_params{bucket_name = BucketNa upload_part_num = UploadPartNum, has_inner_cache = HasInnerCache, bucket_info = BucketInfo}) -> + BeginTime = leo_date:clock(), case leo_gateway_rpc_handler:put(#put_req_params{path = Key, body = Bin, meta = CMeta, @@ -718,12 +727,12 @@ put_small_object({ok, {Size, Bin, Req}}, Key, #req_params{bucket_name = BucketNa void end, - ?access_log_put(BucketName, Key, Size, ?HTTP_ST_OK), Header = [?SERVER_HEADER, {?HTTP_HEAD_RESP_ETAG, ?http_etag(ETag)}], + ?access_log_put(BucketName, Key, Size, ?HTTP_ST_OK, BeginTime), ?reply_ok(Header, Req); {error, Cause} -> - reply_fun({error, Cause}, put, BucketName, Key, 0, Req) + reply_fun({error, Cause}, put, BucketName, Key, 0, Req, BeginTime) end. @@ -739,6 +748,7 @@ put_large_object(Req, Key, Size, #req_params{bucket_name = BucketName, reading_chunked_obj_len = ReadingChunkedSize, transfer_decode_fun = TransferDecodeFun, transfer_decode_state = TransferDecodeState})-> + BeginTime = leo_date:clock(), %% launch 'large_object_handler' {ok, Handler} = leo_large_object_put_handler:start_link(BucketInfo, Key, ChunkedSize), @@ -775,9 +785,9 @@ put_large_object(Req, Key, Size, #req_params{bucket_name = BucketName, true -> ErrorRet; false -> {Req, ErrorRet} end, - reply_fun({error, Cause}, put, BucketName, Key, Size, Req_1); + reply_fun({error, Cause}, put, BucketName, Key, Size, Req_1, BeginTime); Ret -> - ?access_log_put(BucketName, Key, Size, ?HTTP_ST_OK), + ?access_log_put(BucketName, Key, Size, ?HTTP_ST_OK, BeginTime), Ret end, catch leo_large_object_put_handler:stop(Handler), @@ -878,6 +888,7 @@ put_large_object_1({ok, Data, Req}, #req_large_obj{handler = Handler, -spec(delete_object(cowboy_req:req(), binary(), #req_params{}) -> {ok, cowboy_req:req()}). delete_object(Req, Key, #req_params{bucket_name = BucketName}) -> + BeginTime = leo_date:clock(), Size1 = case leo_gateway_rpc_handler:head(Key) of {ok, #?METADATA{del = 0, dsize = Size}} -> Size; @@ -887,10 +898,10 @@ delete_object(Req, Key, #req_params{bucket_name = BucketName}) -> case leo_gateway_rpc_handler:delete(Key) of ok -> - ?access_log_delete(BucketName, Key, Size1, ?HTTP_ST_NO_CONTENT), + ?access_log_delete(BucketName, Key, Size1, ?HTTP_ST_NO_CONTENT, BeginTime), ?reply_no_content([?SERVER_HEADER], Req); {error, Cause} -> - reply_fun({error, Cause}, delete, BucketName, Key, 0, Req) + reply_fun({error, Cause}, delete, BucketName, Key, 0, Req, BeginTime) end. @@ -898,11 +909,11 @@ delete_object(Req, Key, #req_params{bucket_name = BucketName}) -> -spec(head_object(cowboy_req:req(), binary(), #req_params{}) -> {ok, cowboy_req:req()}). head_object(Req, Key, #req_params{bucket_name = BucketName}) -> + BeginTime = leo_date:clock(), case leo_gateway_rpc_handler:head(Key) of {ok, #?METADATA{del = 0, meta = CMetaBin} = Meta} -> Timestamp = leo_http:rfc1123_date(Meta#?METADATA.timestamp), - ?access_log_head(BucketName, Key, ?HTTP_ST_OK), Headers = [?SERVER_HEADER, {?HTTP_HEAD_RESP_CONTENT_TYPE, leo_mime:guess_mime(Key)}, {?HTTP_HEAD_RESP_ETAG, ?http_etag(Meta#?METADATA.checksum)}, @@ -915,12 +926,13 @@ head_object(Req, Key, #req_params{bucket_name = BucketName}) -> CMeta = binary_to_term(CMetaBin), CMeta ++ Headers end, + ?access_log_head(BucketName, Key, ?HTTP_ST_OK, BeginTime), cowboy_req:reply(?HTTP_ST_OK, Headers2, fun() -> void end, Req); {ok, #?METADATA{del = 1}} -> - ?access_log_head(BucketName, Key, ?HTTP_ST_NOT_FOUND), + ?access_log_head(BucketName, Key, ?HTTP_ST_NOT_FOUND, BeginTime), ?reply_not_found_without_body([?SERVER_HEADER], Req); {error, Cause} -> - reply_fun({error, Cause}, head, BucketName, Key, 0, Req) + reply_fun({error, Cause}, head, BucketName, Key, 0, Req, BeginTime) end. @@ -930,15 +942,16 @@ head_object(Req, Key, #req_params{bucket_name = BucketName}) -> range_object(Req, Key, #req_params{bucket_name = BucketName, range_header = RangeHeader, sending_chunked_obj_len = SendChunkLen}) -> + BeginTime = leo_date:clock(), Range = cowboy_http:range(RangeHeader), - get_range_object(Req, BucketName, Key, Range, SendChunkLen). + get_range_object(Req, BucketName, Key, Range, SendChunkLen, BeginTime). %% @private -get_range_object(Req, BucketName, Key, {error, badarg}, _) -> - ?access_log_get(BucketName, Key, 0, ?HTTP_ST_BAD_RANGE), +get_range_object(Req, BucketName, Key, {error, badarg}, _, BeginTime) -> + ?access_log_get(BucketName, Key, 0, ?HTTP_ST_BAD_RANGE, BeginTime), ?reply_bad_range([?SERVER_HEADER], Key, <<>>, Req); -get_range_object(Req, BucketName, Key, {_Unit, Range}, SendChunkLen) when is_list(Range) -> +get_range_object(Req, BucketName, Key, {_Unit, Range}, SendChunkLen, BeginTime) when is_list(Range) -> case get_body_length(Key, Range) of {ok, Length} -> case leo_gateway_rpc_handler:head(Key) of @@ -966,30 +979,16 @@ get_range_object(Req, BucketName, Key, {_Unit, Range}, SendChunkLen) when is_lis Req), ?reply_partial_content(Headers2, Req2); {ok, #?METADATA{del = 1}} -> - ?access_log_head(BucketName, Key, ?HTTP_ST_NOT_FOUND), - ?reply_not_found_without_body([?SERVER_HEADER], Req); - {error, not_found} -> - ?access_log_head(BucketName, Key, ?HTTP_ST_NOT_FOUND), + ?access_log_get(BucketName, Key, 0, ?HTTP_ST_NOT_FOUND, BeginTime), ?reply_not_found_without_body([?SERVER_HEADER], Req); - {error, unavailable} -> - ?reply_service_unavailable_error([?SERVER_HEADER], Key, <<>>, Req); - {error, ?ERR_TYPE_INTERNAL_ERROR} -> - ?access_log_head(BucketName, Key, ?HTTP_ST_INTERNAL_ERROR), - ?reply_internal_error_without_body([?SERVER_HEADER], Req); - {error, timeout} -> - ?access_log_head(BucketName, Key, ?HTTP_ST_GATEWAY_TIMEOUT), - ?reply_timeout_without_body([?SERVER_HEADER], Req) + {error, Cause} -> + reply_fun({error, Cause}, get, BucketName, Key, 0, Req, BeginTime) end; {error, bad_range} -> + ?access_log_get(BucketName, Key, 0, ?HTTP_ST_BAD_RANGE, BeginTime), ?reply_bad_range([?SERVER_HEADER], Key, <<>>, Req); - {error, unavailable} -> - ?reply_service_unavailable_error([?SERVER_HEADER], Key, <<>>, Req); - {error, timeout} -> - ?reply_timeout_without_body([?SERVER_HEADER], Req); - {error, ?ERR_TYPE_INTERNAL_ERROR} -> - ?reply_internal_error_without_body([?SERVER_HEADER], Req); - _ -> - ?reply_not_found_without_body([?SERVER_HEADER], Req) + {error, Cause} -> + reply_fun({error, Cause}, get, BucketName, Key, 0, Req, BeginTime) end. %% @private @@ -1059,12 +1058,13 @@ get_range_object_small(_Req, BucketName, Key, Start, End, #transport_record{transport = Transport, socket = Socket, sending_chunked_obj_len = SendChunkLen}) -> + BeginTime = leo_date:clock(), case leo_gateway_rpc_handler:get(Key, Start, End) of {ok, _Meta, <<>>} -> - ?access_log_get(BucketName, Key, 0, ?HTTP_ST_OK), + ?access_log_get(BucketName, Key, 0, ?HTTP_ST_OK, BeginTime), ok; {ok, _Meta, Bin} -> - ?access_log_get(BucketName, Key, byte_size(Bin), ?HTTP_ST_OK), + ?access_log_get(BucketName, Key, byte_size(Bin), ?HTTP_ST_OK, BeginTime), case leo_net:chunked_send(Transport, Socket, Bin, SendChunkLen) of ok -> ok; @@ -1248,11 +1248,11 @@ is_cachable_req3(_Key, #cache_condition{content_types = CTypes}, Headers, _Body) %% @doc Reply to a request %% @private -reply_fun(ok, get, Bucket, Key, ObjLen) -> - ?access_log_get(Bucket, Key, ObjLen, ?HTTP_ST_OK); -reply_fun({ok,_}, get, Bucket, Key, ObjLen) -> - ?access_log_get(Bucket, Key, ObjLen, ?HTTP_ST_OK); -reply_fun({error, Cause}, Method, Bucket, Key, ObjLen) -> - ?reply_fun(Cause, Method, Bucket, Key, ObjLen). -reply_fun({error, Cause}, Method, Bucket, Key, ObjLen, Req) -> - ?reply_fun(Cause, Method, Bucket, Key, ObjLen, Req). +reply_fun(ok, get, Bucket, Key, ObjLen, BeginTime) -> + ?access_log_get(Bucket, Key, ObjLen, ?HTTP_ST_OK, BeginTime); +reply_fun({ok,_}, get, Bucket, Key, ObjLen, BeginTime) -> + ?access_log_get(Bucket, Key, ObjLen, ?HTTP_ST_OK, BeginTime); +reply_fun({error, Cause}, Method, Bucket, Key, ObjLen, BeginTime) -> + ?reply_fun(Cause, Method, Bucket, Key, ObjLen, BeginTime). +reply_fun({error, Cause}, Method, Bucket, Key, ObjLen, Req, BeginTime) -> + ?reply_fun(Cause, Method, Bucket, Key, ObjLen, Req, BeginTime). diff --git a/src/leo_gateway_s3_api.erl b/src/leo_gateway_s3_api.erl index 19d450f..268551b 100644 --- a/src/leo_gateway_s3_api.erl +++ b/src/leo_gateway_s3_api.erl @@ -221,6 +221,7 @@ onresponse(CacheCondition) -> get_bucket(Req, Key, #req_params{access_key_id = AccessKeyId, is_acl = false, qs_prefix = Prefix}) -> + BeginTime = leo_date:clock(), NormalizedMarker = case cowboy_req:qs_val(?HTTP_QS_BIN_MARKER, Req) of {undefined,_} -> <<>>; @@ -261,25 +262,25 @@ get_bucket(Req, Key, #req_params{access_key_id = AccessKeyId, case get_bucket_1(AccessKeyId, Key, Delimiter, NormalizedMarker, MaxKeys, Prefix) of {ok, XMLRet} -> - ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_OK), + ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_OK, BeginTime), Header = [?SERVER_HEADER, {?HTTP_HEAD_RESP_CONTENT_TYPE, ?HTTP_CTYPE_XML}], ?reply_ok(Header, XMLRet, Req); {error, badarg} -> - ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_BAD_REQ), + ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_BAD_REQ, BeginTime), ?reply_bad_request([?SERVER_HEADER], ?XML_ERROR_CODE_InvalidArgument, ?XML_ERROR_MSG_InvalidArgument, Key, <<>>, Req); {error, not_found} -> - ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_NOT_FOUND), + ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_NOT_FOUND, BeginTime), ?reply_not_found([?SERVER_HEADER], Key, <<>>, Req); {error, unavailable} -> - ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_SERVICE_UNAVAILABLE), + ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_SERVICE_UNAVAILABLE, BeginTime), ?reply_service_unavailable_error([?SERVER_HEADER], Key, <<>>, Req); {error, ?ERR_TYPE_INTERNAL_ERROR} -> - ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_INTERNAL_ERROR), + ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_INTERNAL_ERROR, BeginTime), ?reply_internal_error([?SERVER_HEADER], Key, <<>>, Req); {error, timeout} -> - ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_SERVICE_UNAVAILABLE), + ?access_log_bucket_get(Key, PrefixBin, ?HTTP_ST_SERVICE_UNAVAILABLE, BeginTime), ?reply_timeout([?SERVER_HEADER], Key, <<>>, Req) end; get_bucket(Req, Bucket, #req_params{access_key_id = _AccessKeyId, @@ -305,6 +306,7 @@ get_bucket(Req, Bucket, #req_params{access_key_id = _AccessKeyId, ReqParams::#req_params{}). put_bucket(Req, Key, #req_params{access_key_id = AccessKeyId, is_acl = false}) -> + BeginTime = leo_date:clock(), Bucket = formalize_bucket(Key), CannedACL = string:to_lower(binary_to_list(?http_header(Req, ?HTTP_HEAD_X_AMZ_ACL))), %% Consume CreateBucketConfiguration @@ -317,7 +319,7 @@ put_bucket(Req, Key, #req_params{access_key_id = AccessKeyId, end, case put_bucket_1(CannedACL, AccessKeyId, Bucket) of ok -> - ?access_log_bucket_put(Bucket, ?HTTP_ST_OK), + ?access_log_bucket_put(Bucket, ?HTTP_ST_OK, BeginTime), ?reply_ok([?SERVER_HEADER], Req_1); {error, ?ERR_TYPE_INTERNAL_ERROR} -> ?reply_internal_error([?SERVER_HEADER], Key, <<>>, Req_1); @@ -360,14 +362,20 @@ put_bucket(Req, Key, #req_params{access_key_id = AccessKeyId, Key::binary(), ReqParams::#req_params{}). delete_bucket(Req, Key, #req_params{access_key_id = AccessKeyId}) -> + BeginTime = leo_date:clock(), + Bucket = formalize_bucket(Key), case delete_bucket_1(AccessKeyId, Key) of ok -> - Bucket = formalize_bucket(Key), - ?access_log_bucket_delete(Bucket, ?HTTP_ST_NO_CONTENT), + ?access_log_bucket_delete(Bucket, ?HTTP_ST_NO_CONTENT, BeginTime), ?reply_no_content([?SERVER_HEADER], Req); not_found -> + ?access_log_bucket_delete(Bucket, ?HTTP_ST_NOT_FOUND, BeginTime), ?reply_not_found([?SERVER_HEADER], Key, <<>>, Req); - {error, ?ERR_TYPE_INTERNAL_ERROR} -> + {error, timeout} -> + ?access_log_bucket_delete(Bucket, ?HTTP_ST_SERVICE_UNAVAILABLE, BeginTime), + ?reply_timeout_without_body([?SERVER_HEADER], Req); + {error, _} -> + ?access_log_bucket_delete(Bucket, ?HTTP_ST_INTERNAL_ERROR, BeginTime), ?reply_internal_error([?SERVER_HEADER], Key, <<>>, Req) end. @@ -378,17 +386,21 @@ delete_bucket(Req, Key, #req_params{access_key_id = AccessKeyId}) -> Key::binary(), ReqParams::#req_params{}). head_bucket(Req, Key, #req_params{access_key_id = AccessKeyId}) -> + BeginTime = leo_date:clock(), Bucket = formalize_bucket(Key), case head_bucket_1(AccessKeyId, Bucket) of ok -> - ?access_log_bucket_head(Bucket, ?HTTP_ST_OK), + ?access_log_bucket_head(Bucket, ?HTTP_ST_OK, BeginTime), ?reply_ok([?SERVER_HEADER], Req); not_found -> + ?access_log_bucket_head(Bucket, ?HTTP_ST_NOT_FOUND, BeginTime), ?reply_not_found_without_body([?SERVER_HEADER], Req); - {error, ?ERR_TYPE_INTERNAL_ERROR} -> - ?reply_internal_error_without_body([?SERVER_HEADER], Req); {error, timeout} -> - ?reply_timeout_without_body([?SERVER_HEADER], Req) + ?access_log_bucket_head(Bucket, ?HTTP_ST_SERVICE_UNAVAILABLE, BeginTime), + ?reply_timeout_without_body([?SERVER_HEADER], Req); + {error, _} -> + ?access_log_bucket_delete(Bucket, ?HTTP_ST_INTERNAL_ERROR, BeginTime), + ?reply_internal_error_without_body([?SERVER_HEADER], Req) end. @@ -565,6 +577,7 @@ put_object(Directive, Req, Key, #req_params{handler = ?PROTO_HANDLER_S3, put_object_1(Directive, Req, Key, Meta, Bin, #req_params{bucket_name = BucketName, bucket_info = BucketInfo, custom_metadata = CMetaBin} = Params) -> + BeginTime = leo_date:clock(), Size = size(Bin), case leo_gateway_rpc_handler:put(#put_req_params{path = Key, body = Bin, @@ -573,7 +586,7 @@ put_object_1(Directive, Req, Key, Meta, Bin, #req_params{bucket_name = BucketNam msize = byte_size(CMetaBin), bucket_info = BucketInfo}) of {ok, _ETag} when Directive == ?HTTP_HEAD_X_AMZ_META_DIRECTIVE_COPY -> - ?access_log_put(BucketName, Key, Size, ?HTTP_ST_OK), + ?access_log_put(BucketName, Key, Size, ?HTTP_ST_OK, BeginTime), resp_copy_obj_xml(Req, Meta); {ok, _ETag} when Directive == ?HTTP_HEAD_X_AMZ_META_DIRECTIVE_REPLACE -> put_object_2(Req, Key, Meta, Params); @@ -597,9 +610,10 @@ put_object_2(Req, Key, Meta, Params) -> %% @private put_object_3(Req, #?METADATA{key = Key, dsize = Size} = Meta, #req_params{bucket_name = BucketName}) -> + BeginTime = leo_date:clock(), case leo_gateway_rpc_handler:delete(Meta#?METADATA.key) of ok -> - ?access_log_delete(BucketName, Key, Size, ?HTTP_ST_NO_CONTENT), + ?access_log_delete(BucketName, Key, Size, ?HTTP_ST_NO_CONTENT, BeginTime), resp_copy_obj_xml(Req, Meta); {error, not_found} -> resp_copy_obj_xml(Req, Meta); @@ -1969,9 +1983,10 @@ delete_multi_objects_4(Req, IsQuiet, [Key|Rest], DeletedKeys, ErrorKeys, Path = << BucketName/binary, <<"/">>/binary, BinKey/binary >>, case leo_gateway_rpc_handler:head(Path) of {ok, Meta} -> + BeginTime = leo_date:clock(), case leo_gateway_rpc_handler:delete(Path) of ok -> - ?access_log_delete(BucketName, Path, Meta#?METADATA.dsize, ?HTTP_ST_NO_CONTENT), + ?access_log_delete(BucketName, Path, Meta#?METADATA.dsize, ?HTTP_ST_NO_CONTENT, BeginTime), delete_multi_objects_4(Req, IsQuiet, Rest, [Key|DeletedKeys], ErrorKeys, Params); {error, not_found} ->