diff --git a/conf/chunkserver.conf.example b/conf/chunkserver.conf.example index 443412215b..f7ab284dd9 100644 --- a/conf/chunkserver.conf.example +++ b/conf/chunkserver.conf.example @@ -1,18 +1,18 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log levels: INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip=127.0.0.1 global.port=8200 global.subnet=127.0.0.0/24 global.enable_external_server=false global.external_ip=127.0.0.1 global.external_subnet=127.0.0.0/24 -# chunk大小,一般16MB +# Chunk size, typically 16MB global.chunk_size=16777216 -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, typically 4KB global.meta_page_size=4096 -# clone chunk允许的最长location长度 +# Maximum length allowed for the location of a clone chunk # chunk's block size, IO requests must align with it, supported value is |512| and |4096| # it should consist with `block_size` in chunkfilepool.meta_path and `mds.volume.blockSize` in MDS's configurations # for clone chunk and snapshot chunk, it's also the minimum granularity that each bit represents @@ -23,34 +23,35 @@ global.location_limit=3000 # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Support for multiple addresses for MDS, separated by commas: 127.0.0.1:6666,127.0.0.1:7777 mds.listen.addr=127.0.0.1:6666 -# 向mds注册的最大重试次数 +# Maximum retry count for registering with MDS mds.register_retries=100 -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for MDS registration, typically 1000ms mds.register_timeout=1000 -# 向mds发送心跳的间隔,一般10s +# Interval for sending heartbeats to MDS, usually 10s mds.heartbeat_interval=10 -# 向mds发送心跳的rpc超时间,一般1000ms +# RPC timeout for sending heartbeats to MDS, typically 1000ms mds.heartbeat_timeout=5000 # # Chunkserver settings # -# chunkserver主目录 +# Main directory for chunkserver chunkserver.stor_uri=local://./0/ -# chunkserver元数据文件 +# Metadata file for chunkserver chunkserver.meta_uri=local://./0/chunkserver.dat -# disk类型 +# Disk type chunkserver.disk_type=nvme -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes=20971520 -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Throttle check cycles are for finer-grained bandwidth control. For example, +# with snapshotThroughputBytes=100MB and check cycles=10, it ensures that +# the bandwidth is 10MB every 1/10 second, without accumulation. For instance, +# the bandwidth is 10MB for the first 1/10 second, but it expires after that. +# In the second 1/10 second, the bandwidth remains 10MB, not 20MB. chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit for the number of inflight IO requests, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -64,41 +65,41 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0 # # Copyset settings # -# 是否检查任期,一般检查 +# Whether to check the term, usually checked copyset.check_term=true -# 是否关闭raft配置变更的服务,一般不关闭 +# Whether to disable the service for raft configuration changes, generally not disabled copyset.disable_cli=false copyset.log_applied_task=false -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms=1000 -# raft打快照间隔,一般是1800s,也就是30分钟 +# Raft snapshot interval, usually 1800s, i.e., 30 minutes copyset.snapshot_interval_s=1800 -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# When adding a node, the added node first copies data in a role similar to a learner. +# When there is a difference of catchup_margin entries from the leader, the leader +# will attempt to commit and apply the configuration change entry (usually the committed +# entry will certainly be committed and applied). A smaller catchup_margin can ensure +# that the learner can quickly join the replication group. copyset.catchup_margin=1000 -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri=local://./0/copysets -# raft wal log目录 +# Raft WAL log directory copyset.raft_log_uri=curve://./0/copysets -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri=local://./0/copysets -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri=curve://./0/copysets -# copyset回收目录 +# Copyset recycling directory copyset.recycler_uri=local://./0/recycler -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When the chunk server starts, the threshold for concurrent loading of copysets, set to 0 to indicate no limitation. copyset.load_concurrency=10 -# chunkserver use how many threads to use copyset complete sync. +# Number of threads used by chunk server for copyset complete synchronization. copyset.sync_concurrency=20 -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Maximum retry times when checking for exceptions during copyset loading. copyset.check_retrytimes=3 -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# If the difference between the applied_index of the current peer and the committed_index +# on the leader is less than this value, the copyset is considered loaded. copyset.finishload_margin=2000 -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for cyclically determining if the copyset is loaded. copyset.check_loadmargin_interval_ms=1000 # scan copyset interval copyset.scan_interval_sec=5 @@ -124,26 +125,26 @@ copyset.check_syncing_interval_ms=500 # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client=false -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter=false -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size=1048576 -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Do I need to paste to the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste=false -# 克隆的线程数量 +# Number of cloned threads clone.thread_num=10 -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth=6000 -# curve用户名 +# Curve username curve.root_username=root -# curve密码 +# Curve password curve.root_password=root_password -# client配置文件 +# Client configuration file curve.config_path=conf/cs_client.conf -# s3配置文件 +# S3 configuration file s3.config_path=conf/s3.conf # Curve File time to live curve.curve_file_timeout_s=30 @@ -151,7 +152,7 @@ curve.curve_file_timeout_s=30 # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2=true # @@ -171,27 +172,27 @@ storeng.sync_write=false # # Concurrent apply module -# 并发模块写线程的并发度,一般是10 +# The concurrency of concurrent module writing threads is generally 10 wconcurrentapply.size=10 -# 并发模块写线程的队列深度 +# Queue depth of concurrent module write threads wconcurrentapply.queuedepth=1 -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size=5 -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth=1 # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool=true -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir=./0/chunks -# chunkfilepool meta文件路径 +# chunkfilepool meta file path #chunkfilepool.meta_path=./chunkfilepool.meta -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size=4096 -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable=true @@ -211,23 +212,23 @@ chunkfilepool.thread_num=1 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效 +# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry walfilepool.use_chunk_file_pool=true -# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间 +# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation walfilepool.use_chunk_file_pool_reserve=15 -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool=true -# walpool目录 +# Walpool directory walfilepool.file_pool_dir=./0/ -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path=./walfilepool.meta -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size=8388608 -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size=4096 -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size=4096 -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times=5 # Whether allocate filePool by percent of disk size. walfilepool.allocated_by_percent=true @@ -241,14 +242,14 @@ walfilepool.thread_num=1 # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec=300 -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec=120 # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir=./ -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/curvefs/test/volume/bitmap_allocator_test.cpp b/curvefs/test/volume/bitmap_allocator_test.cpp index 3eca470fec..88c324e9e4 100644 --- a/curvefs/test/volume/bitmap_allocator_test.cpp +++ b/curvefs/test/volume/bitmap_allocator_test.cpp @@ -18,9 +18,8 @@ #include -#include "curvefs/test/volume/common.h" - #include "absl/memory/memory.h" +#include "curvefs/test/volume/common.h" namespace curvefs { namespace volume { @@ -100,7 +99,7 @@ TEST_F(BitmapAllocatorTest, AllocFromBitmap) { Extents expected = { Extent(opt_.startOffset + opt_.length * opt_.smallAllocProportion, - allocSize)}; + allocSize)}; ASSERT_EQ(expected, exts); @@ -225,7 +224,7 @@ TEST_F(BitmapAllocatorTest, TestMarkUsedRandom) { uint64_t off = opt_.startOffset; uint64_t usedSize = 0; - // 对于每一个 size per bit,随机其中一部分设置 + // For each size per bit, randomly set a portion of it auto select = [this, &usedSize](uint64_t startOffset) { auto off = rand_r(&seed) * 4096 % opt_.sizePerBit; auto len = rand_r(&seed) * 4096 % opt_.sizePerBit; diff --git a/curvefs_python/cbd_client.h b/curvefs_python/cbd_client.h index 64109ef8e5..a5415b26e3 100644 --- a/curvefs_python/cbd_client.h +++ b/curvefs_python/cbd_client.h @@ -56,15 +56,17 @@ class CBDClient { int Rename(UserInfo_t* info, const char* oldpath, const char* newpath); int Extend(const char* filename, UserInfo_t* info, uint64_t size); - // 同步读写 - int Read(int fd, char* buf, unsigned long offset, unsigned long length); // NOLINT - int Write(int fd, const char* buf, unsigned long offset, unsigned long length); // NOLINT + // Synchronous read and write + int Read(int fd, char* buf, unsigned long offset, + unsigned long length); // NOLINT + int Write(int fd, const char* buf, unsigned long offset, + unsigned long length); // NOLINT - // 异步读写 + // Asynchronous read and write int AioRead(int fd, AioContext* aioctx); int AioWrite(int fd, AioContext* aioctx); - // 获取文件的基本信息 + // Obtain basic information about the file int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); diff --git a/curvefs_python/test.py b/curvefs_python/test.py index 0f0045fa62..eb77fd7f9e 100644 --- a/curvefs_python/test.py +++ b/curvefs_python/test.py @@ -19,11 +19,12 @@ import os + def exec_cmd(cmd): ret = os.system(cmd) if ret == 0: print cmd + " exec success" - else : + else: print cmd + " exec fail, ret = " + str(ret) @@ -37,10 +38,10 @@ def exec_cmd(cmd): exec_cmd(cmd) cmd = "curve list --user k8s --dirname /k8s" exec_cmd(cmd) -# 不是root,失败 +# Not root, failed cmd = "curve list --user k8s --dirname /" exec_cmd(cmd) -# root没有传入密码,失败 +# Root did not pass in password, failed cmd = "curve list --user root --dirname /" exec_cmd(cmd) cmd = "curve list --user root --dirname / --password root_password" diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.0 b/deploy/local/chunkserver/conf/chunkserver.conf.0 index 1525855ebe..f7ac0f1f19 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.0 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.0 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./0/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -145,7 +145,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# If walpool is set to true, the following configuration is invalid as walpool shares chunkfilepool. walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./0/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.1 b/deploy/local/chunkserver/conf/chunkserver.conf.1 index d14fa15bb6..62719e0c30 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.1 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.1 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./1/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -143,7 +143,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./1/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.2 b/deploy/local/chunkserver/conf/chunkserver.conf.2 index 2604423d6f..edc5750db7 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.2 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.2 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./2/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -142,7 +142,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./2/walfilepool/ diff --git a/include/chunkserver/chunkserver_common.h b/include/chunkserver/chunkserver_common.h index c483dbea82..bb5ab9b87f 100644 --- a/include/chunkserver/chunkserver_common.h +++ b/include/chunkserver/chunkserver_common.h @@ -24,9 +24,9 @@ #define INCLUDE_CHUNKSERVER_CHUNKSERVER_COMMON_H_ #include +#include #include #include -#include #include #include @@ -35,16 +35,16 @@ namespace curve { namespace chunkserver { /* for IDs */ -using LogicPoolID = uint32_t; -using CopysetID = uint32_t; -using ChunkID = uint64_t; -using SnapshotID = uint64_t; -using SequenceNum = uint64_t; +using LogicPoolID = uint32_t; +using CopysetID = uint32_t; +using ChunkID = uint64_t; +using SnapshotID = uint64_t; +using SequenceNum = uint64_t; using ChunkSizeType = uint32_t; -using PageSizeType = uint32_t; +using PageSizeType = uint32_t; -using GroupNid = uint64_t; +using GroupNid = uint64_t; using ChunkServerID = uint32_t; // braft @@ -60,57 +60,57 @@ using PosixFileSystemAdaptor = braft::PosixFileSystemAdaptor; using SnapshotThrottle = braft::SnapshotThrottle; using ThroughputSnapshotThrottle = braft::ThroughputSnapshotThrottle; - -// TODO(lixiaocui): 考虑一下后续的单元测试或者校验要怎么做 +// TODO(lixiaocui): Consider how to proceed with subsequent unit testing or +// validation /* - * IO性能统计复合metric类型 + * IO performance statistics composite metric type */ struct IoPerfMetric { - uint64_t readCount; - uint64_t writeCount; - uint64_t readBytes; - uint64_t writeBytes; - uint64_t readIops; - uint64_t writeIops; - uint64_t readBps; - uint64_t writeBps; + uint64_t readCount; + uint64_t writeCount; + uint64_t readBytes; + uint64_t writeBytes; + uint64_t readIops; + uint64_t writeIops; + uint64_t readBps; + uint64_t writeBps; }; /** - * 将(LogicPoolID, CopysetID)二元组转换成数字格式的复制组ID,格式如下: + * Convert the (LogicPoolID, CopysetID) binary into a copy group ID in numerical + * format, as follows: * | group id | * | 32 | 32 | * | logic pool id | copyset id | */ -inline GroupNid ToGroupNid(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +inline GroupNid ToGroupNid(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { return (static_cast(logicPoolId) << 32) | copysetId; } /** - * 将(LogicPoolID, CopysetID)二元组转换成字符串格式的复制组ID + * Convert the (LogicPoolID, CopysetID) binary to a copy group ID in string + * format */ -inline GroupId ToGroupId(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +inline GroupId ToGroupId(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { return std::to_string(ToGroupNid(logicPoolId, copysetId)); } -#define ToBraftGroupId ToGroupId +#define ToBraftGroupId ToGroupId /** - * 从数字格式的复制组ID中解析LogicPoolID + * Parsing LogicPoolID from Copy Group ID in Numeric Format */ -inline LogicPoolID GetPoolID(const GroupNid &groupId) { - return groupId >> 32; -} +inline LogicPoolID GetPoolID(const GroupNid& groupId) { return groupId >> 32; } /** - * 从数字格式的复制组ID中解析CopysetID + * Parsing CopysetID from Copy Group ID in Numeric Format */ -inline CopysetID GetCopysetID(const GroupNid &groupId) { +inline CopysetID GetCopysetID(const GroupNid& groupId) { return groupId & (((uint64_t)1 << 32) - 1); } -/* 格式输出 group id 的 字符串 (logicPoolId, copysetId) */ -inline std::string ToGroupIdString(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +/* Format output string for group ID (logicPoolId, copysetId) */ +inline std::string ToGroupIdString(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { std::string groupIdString; groupIdString.append("("); groupIdString.append(std::to_string(logicPoolId)); @@ -121,7 +121,7 @@ inline std::string ToGroupIdString(const LogicPoolID &logicPoolId, groupIdString.append(")"); return groupIdString; } -#define ToGroupIdStr ToGroupIdString +#define ToGroupIdStr ToGroupIdString // Meta page is header of chunkfile, and is used to store meta data of // chunkfile. diff --git a/nebd/src/common/timeutility.h b/nebd/src/common/timeutility.h index a80afb61b5..9e454f15a7 100644 --- a/nebd/src/common/timeutility.h +++ b/nebd/src/common/timeutility.h @@ -26,9 +26,10 @@ #include #include #include + +#include #include #include -#include namespace nebd { namespace common { @@ -53,7 +54,8 @@ class TimeUtility { return tm.tv_sec; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with + // the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; @@ -64,7 +66,7 @@ class TimeUtility { } }; -} // namespace common -} // namespace nebd +} // namespace common +} // namespace nebd -#endif // NEBD_SRC_COMMON_TIMEUTILITY_H_ +#endif // NEBD_SRC_COMMON_TIMEUTILITY_H_ diff --git a/nebd/src/part1/async_request_closure.cpp b/nebd/src/part1/async_request_closure.cpp index 94d1a9f50f..c9ab8e873e 100644 --- a/nebd/src/part1/async_request_closure.cpp +++ b/nebd/src/part1/async_request_closure.cpp @@ -22,8 +22,8 @@ #include "nebd/src/part1/async_request_closure.h" -#include #include +#include #include #include @@ -40,11 +40,10 @@ void AsyncRequestClosure::Run() { int64_t sleepUs = GetRpcRetryIntervalUs(aioCtx->retryCount); LOG_EVERY_SECOND(WARNING) << OpTypeToString(aioCtx->op) << " rpc failed" - << ", error = " << cntl.ErrorText() - << ", fd = " << fd + << ", error = " << cntl.ErrorText() << ", fd = " << fd << ", log id = " << cntl.log_id() - << ", retryCount = " << aioCtx->retryCount - << ", sleep " << (sleepUs / 1000) << " ms"; + << ", retryCount = " << aioCtx->retryCount << ", sleep " + << (sleepUs / 1000) << " ms"; bthread_usleep(sleepUs); Retry(); } else { @@ -52,7 +51,7 @@ void AsyncRequestClosure::Run() { if (nebd::client::RetCode::kOK == retCode) { DVLOG(6) << OpTypeToString(aioCtx->op) << " success, fd = " << fd; - // 读请求复制数据 + // Read Request Copy Data if (aioCtx->op == LIBAIO_OP::LIBAIO_OP_READ) { cntl.response_attachment().copy_to( aioCtx->buf, cntl.response_attachment().size()); @@ -73,8 +72,8 @@ void AsyncRequestClosure::Run() { } int64_t AsyncRequestClosure::GetRpcRetryIntervalUs(int64_t retryCount) const { - // EHOSTDOWN: 找不到可用的server。 - // server可能停止服务了,也可能正在退出中(返回了ELOGOFF) + // EHOSTDOWN: Unable to find an available server. + // The server may have stopped serving or may be exiting (returning ELOGOFF) if (cntl.ErrorCode() == EHOSTDOWN) { return requestOption_.rpcHostDownRetryIntervalUs; } @@ -83,10 +82,9 @@ int64_t AsyncRequestClosure::GetRpcRetryIntervalUs(int64_t retryCount) const { return requestOption_.rpcRetryIntervalUs; } - return std::max( - requestOption_.rpcRetryIntervalUs, - std::min(requestOption_.rpcRetryIntervalUs * retryCount, - requestOption_.rpcRetryMaxIntervalUs)); + return std::max(requestOption_.rpcRetryIntervalUs, + std::min(requestOption_.rpcRetryIntervalUs * retryCount, + requestOption_.rpcRetryMaxIntervalUs)); } void AsyncRequestClosure::Retry() const { diff --git a/nebd/src/part1/async_request_closure.h b/nebd/src/part1/async_request_closure.h index 27ab7f613d..0df2f03172 100644 --- a/nebd/src/part1/async_request_closure.h +++ b/nebd/src/part1/async_request_closure.h @@ -32,12 +32,9 @@ namespace nebd { namespace client { struct AsyncRequestClosure : public google::protobuf::Closure { - AsyncRequestClosure(int fd, - NebdClientAioContext* ctx, + AsyncRequestClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : fd(fd), - aioCtx(ctx), - requestOption_(option) {} + : fd(fd), aioCtx(ctx), requestOption_(option) {} void Run() override; @@ -47,94 +44,70 @@ struct AsyncRequestClosure : public google::protobuf::Closure { void Retry() const; - // 请求fd + // Request fd int fd; - // 请求上下文信息 + // Request Context Information NebdClientAioContext* aioCtx; - // brpc请求的controller + // Controller requested by brpc brpc::Controller cntl; RequestOption requestOption_; }; struct AioWriteClosure : public AsyncRequestClosure { - AioWriteClosure(int fd, - NebdClientAioContext* ctx, + AioWriteClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} WriteResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioReadClosure : public AsyncRequestClosure { - AioReadClosure(int fd, - NebdClientAioContext* ctx, + AioReadClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} ReadResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioDiscardClosure : public AsyncRequestClosure { - AioDiscardClosure(int fd, - NebdClientAioContext* ctx, + AioDiscardClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} DiscardResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioFlushClosure : public AsyncRequestClosure { - AioFlushClosure(int fd, - NebdClientAioContext* ctx, + AioFlushClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} FlushResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; inline const char* OpTypeToString(LIBAIO_OP opType) { switch (opType) { - case LIBAIO_OP::LIBAIO_OP_READ: - return "Read"; - case LIBAIO_OP::LIBAIO_OP_WRITE: - return "Write"; - case LIBAIO_OP::LIBAIO_OP_DISCARD: - return "Discard"; - case LIBAIO_OP::LIBAIO_OP_FLUSH: - return "Flush"; - default: - return "Unknown"; + case LIBAIO_OP::LIBAIO_OP_READ: + return "Read"; + case LIBAIO_OP::LIBAIO_OP_WRITE: + return "Write"; + case LIBAIO_OP::LIBAIO_OP_DISCARD: + return "Discard"; + case LIBAIO_OP::LIBAIO_OP_FLUSH: + return "Flush"; + default: + return "Unknown"; } } diff --git a/nebd/src/part2/util.h b/nebd/src/part2/util.h index f733a04577..0894d69ebe 100644 --- a/nebd/src/part2/util.h +++ b/nebd/src/part2/util.h @@ -23,9 +23,9 @@ #ifndef NEBD_SRC_PART2_UTIL_H_ #define NEBD_SRC_PART2_UTIL_H_ -#include #include // NOLINT #include +#include #include "nebd/src/part2/define.h" @@ -51,9 +51,9 @@ class FdAllocator { FdAllocator() : fd_(0) {} ~FdAllocator() {} - // fd的有效值范围为[1, INT_MAX] + // The valid range of values for fd is [1, INT_MAX] int GetNext(); - // 初始化fd的值 + // Initialize the value of fd void InitFd(int fd); private: diff --git a/nebd/test/common/test_name_lock.cpp b/nebd/test/common/test_name_lock.cpp index 1f79ec5800..574667ad8b 100644 --- a/nebd/test/common/test_name_lock.cpp +++ b/nebd/test/common/test_name_lock.cpp @@ -21,6 +21,7 @@ */ #include + #include #include // NOLINT @@ -32,29 +33,27 @@ namespace common { TEST(TestNameLock, TestNameLockBasic) { NameLock lock1, lock2, lock3; - // lock测试 + // Lock test lock1.Lock("str1"); - // 同锁不同str可lock不死锁 + // Same lock but different strs can lock without deadlock lock1.Lock("str2"); - // 不同锁同str可lock不死锁 + // Different locks with the same str can lock without deadlock lock2.Lock("str1"); - - - // 同锁同str TryLock失败 + // Same lock with str TryLock failed ASSERT_FALSE(lock1.TryLock("str1")); - // 同锁不同str TryLock成功 + // Same lock different str TryLock successful ASSERT_TRUE(lock1.TryLock("str3")); - // 不同锁同str TryLock成功 + // Different locks with str TryLock succeeded ASSERT_TRUE(lock3.TryLock("str1")); - // unlock测试 + // Unlock test lock1.Unlock("str1"); lock1.Unlock("str2"); lock1.Unlock("str3"); lock2.Unlock("str1"); lock3.Unlock("str1"); - // 未锁unlock ok + // Unlock OK lock2.Unlock("str2"); } @@ -64,12 +63,13 @@ TEST(TestNameLock, TestNameLockGuardBasic) { NameLockGuard guard1(lock1, "str1"); NameLockGuard guard2(lock1, "str2"); NameLockGuard guard3(lock2, "str1"); - // 作用域内加锁成功,不可再加锁 + // Successfully locked within the scope, unable to lock again ASSERT_FALSE(lock1.TryLock("str1")); ASSERT_FALSE(lock1.TryLock("str2")); ASSERT_FALSE(lock2.TryLock("str1")); } - // 作用域外自动解锁,可再加锁 + // Automatically unlocking outside the scope, with the option to add locks + // again ASSERT_TRUE(lock1.TryLock("str1")); ASSERT_TRUE(lock1.TryLock("str2")); ASSERT_TRUE(lock2.TryLock("str1")); @@ -80,14 +80,14 @@ TEST(TestNameLock, TestNameLockGuardBasic) { TEST(TestNameLock, TestNameLockConcurrent) { NameLock lock1; - auto worker = [&] (const std::string &str) { + auto worker = [&](const std::string& str) { for (int i = 0; i < 10000; i++) { NameLockGuard guard(lock1, str); } }; std::vector threadpool; - for (auto &t : threadpool) { + for (auto& t : threadpool) { std::string str1 = "aaaa"; std::string str2 = "bbbb"; std::srand(std::time(nullptr)); @@ -95,12 +95,10 @@ TEST(TestNameLock, TestNameLockConcurrent) { t = std::thread(worker, rstr); } - for (auto &t : threadpool) { + for (auto& t : threadpool) { t.join(); } } - - -} // namespace common -} // namespace nebd +} // namespace common +} // namespace nebd diff --git a/proto/cli.proto b/proto/cli.proto index 46981c967d..2d0f84696d 100755 --- a/proto/cli.proto +++ b/proto/cli.proto @@ -20,12 +20,12 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli"; -// 这里都用 logicPoolId, copysetId,进入 rpc service 之后,会转换成 string -// 类型的 groupId,在传给 raft +// Here, the terms 'logicPoolId' and 'copysetId' are used. After entering the RPC service, +// they will be converted into a string type 'groupId' and then passed to Raft. // | groupId | // | logicPoolId | copysetId | message AddPeerRequest { - required uint32 logicPoolId = 1; // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + required uint32 logicPoolId = 1; // logicPoolId is actually uint16, but proto does not have uint16 required uint32 copysetId = 2; required string leader_id = 3; required string peer_id = 4; diff --git a/proto/cli2.proto b/proto/cli2.proto index 76416f7a9f..b41d00c322 100755 --- a/proto/cli2.proto +++ b/proto/cli2.proto @@ -23,17 +23,17 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli2"; -// cli.proto 供老的使用保证 +// cli.proto guarantee for elderly use message AddPeerRequest2 { - required uint32 logicPoolId = 1; // 逻辑池id - required uint32 copysetId = 2; // 复制组id + required uint32 logicPoolId = 1; // Logical Pool ID + required uint32 copysetId = 2; // Copy Group ID required common.Peer leader = 3; // leader - required common.Peer addPeer = 4; // 新增peer + required common.Peer addPeer = 4; // Add a peer } message AddPeerResponse2 { - repeated common.Peer oldPeers = 1; // 老配置 - repeated common.Peer newPeers = 2; // 新配置 + repeated common.Peer oldPeers = 1; // Old configuration + repeated common.Peer newPeers = 2; // New configuration } message RemovePeerRequest2 { @@ -87,11 +87,11 @@ message SnapshotAllResponse { message GetLeaderRequest2 { required uint32 logicPoolId = 1; required uint32 copysetId = 2; - optional common.Peer peer = 3; // 可以不指定peer查leader + optional common.Peer peer = 3; // You can check the leader without specifying a peer } message GetLeaderResponse2 { - required common.Peer leader = 1; // 通过peer判空来判断是否返回leader + required common.Peer leader = 1; // Determine whether to return the leader by judging whether the peer is null } message ResetPeerRequest2 { diff --git a/proto/topology.proto b/proto/topology.proto index 6e88d4e102..f9864de5e9 100644 --- a/proto/topology.proto +++ b/proto/topology.proto @@ -415,7 +415,7 @@ message CreateLogicalPoolRequest { required LogicalPoolType type = 4; required bytes redundanceAndPlaceMentPolicy = 5; //json body required bytes userPolicy = 6; //json body - optional uint32 scatterWidth = 7; //生成copyset依据的scatterWidth平均值 + optional uint32 scatterWidth = 7; // Generate copyset based on the average scatterWidth value optional AllocateStatus status = 8; } diff --git a/src/chunkserver/chunk_closure.cpp b/src/chunkserver/chunk_closure.cpp index 09b259ae7e..ba547381d8 100644 --- a/src/chunkserver/chunk_closure.cpp +++ b/src/chunkserver/chunk_closure.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/chunk_closure.h" + #include namespace curve { @@ -28,21 +29,23 @@ namespace chunkserver { void ChunkClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * After the completion of Run, automatically destruct + * itself to prevent any missed destructor calls. */ std::unique_ptr selfGuard(this); /** - * 确保done能够被调用,目的是保证rpc一定会返回 + * Ensure that done can be called to ensure that rpc will definitely return */ brpc::ClosureGuard doneGuard(request_->Closure()); /** - * 尽管在request propose给copyset的之前已经 - * 对leader身份进行了确认,但是在copyset处理 - * request的时候,当前copyset的身份还是有可能 - * 变成非leader,所以需要判断ChunkClosure被调 - * 用的时候,request的status,如果 ok,说明是 - * 正常的apply处理,否则将请求转发 + * Although the identity of the leader has been confirmed + * before proposing the request to the copyset, during the + * processing of the request by the copyset, the current + * identity of the copyset may still change to a non-leader. + * Therefore, it is necessary to check the status of the + * request when ChunkClosure is invoked. If it is 'ok', it + * indicates a normal apply processing; otherwise, the + * request should be forwarded. */ if (status().ok()) { return; @@ -61,13 +64,13 @@ void ScanChunkClosure::Run() { case CHUNK_OP_STATUS_CHUNK_NOTEXIST: LOG(WARNING) << "scan chunk failed, read chunk not exist. " << request_->ShortDebugString(); - break; + break; case CHUNK_OP_STATUS_FAILURE_UNKNOWN: LOG(ERROR) << "scan chunk failed, read chunk unknown failure. " << request_->ShortDebugString(); - break; - default: - break; + break; + default: + break; } } diff --git a/src/chunkserver/chunk_closure.h b/src/chunkserver/chunk_closure.h index e2d76b7174..6700527c26 100755 --- a/src/chunkserver/chunk_closure.h +++ b/src/chunkserver/chunk_closure.h @@ -24,20 +24,23 @@ #define SRC_CHUNKSERVER_CHUNK_CLOSURE_H_ #include + #include -#include "src/chunkserver/op_request.h" #include "proto/chunk.pb.h" +#include "src/chunkserver/op_request.h" namespace curve { namespace chunkserver { /** - * 携带op request的所有上下文的closure,通过braft::Task传递给raft处理, - * 调用会有两个地方: - * 1.op request正常的被raft处理,最后on apply的时候会调用返回 - * 2.op request被打包给raft处理之后,但是还没有来得及处理就出错了,例如leader - * step down变为了非leader,那么会明确的提前向client返回错误 + * Carry all the contextual closures of the op request and pass them to the raft + * for processing through the braft::Task, There are two places to call: + * 1. The op request is processed normally by the raft, and will be called and + * returned when it is finally applied + * 2. After the op request was packaged for raft processing, an error occurred + * before it could be processed, such as leader If the step down becomes a non + * leader, it will explicitly return an error to the client in advance */ class ChunkClosure : public braft::Closure { public: @@ -49,37 +52,37 @@ class ChunkClosure : public braft::Closure { void Run() override; public: - // 包含了op request 的上下文信息 + // Contains contextual information for op request std::shared_ptr request_; }; class ScanChunkClosure : public google::protobuf::Closure { public: - ScanChunkClosure(ChunkRequest *request, ChunkResponse *response) : - request_(request), response_(response) {} + ScanChunkClosure(ChunkRequest* request, ChunkResponse* response) + : request_(request), response_(response) {} ~ScanChunkClosure() = default; void Run() override; public: - ChunkRequest *request_; - ChunkResponse *response_; + ChunkRequest* request_; + ChunkResponse* response_; }; class SendScanMapClosure : public google::protobuf::Closure { public: - SendScanMapClosure(FollowScanMapRequest * request, - FollowScanMapResponse *response, - uint64_t timeout, - uint32_t retry, - uint64_t retryIntervalUs, - brpc::Controller* cntl, - brpc::Channel *channel) : - request_(request), response_(response), - rpcTimeoutMs_(timeout), retry_(retry), - retryIntervalUs_(retryIntervalUs), - cntl_(cntl), channel_(channel) {} + SendScanMapClosure(FollowScanMapRequest* request, + FollowScanMapResponse* response, uint64_t timeout, + uint32_t retry, uint64_t retryIntervalUs, + brpc::Controller* cntl, brpc::Channel* channel) + : request_(request), + response_(response), + rpcTimeoutMs_(timeout), + retry_(retry), + retryIntervalUs_(retryIntervalUs), + cntl_(cntl), + channel_(channel) {} ~SendScanMapClosure() = default; @@ -89,13 +92,13 @@ class SendScanMapClosure : public google::protobuf::Closure { void Guard(); public: - FollowScanMapRequest *request_; - FollowScanMapResponse *response_; + FollowScanMapRequest* request_; + FollowScanMapResponse* response_; uint64_t rpcTimeoutMs_; uint32_t retry_; uint64_t retryIntervalUs_; - brpc::Controller *cntl_; - brpc::Channel *channel_; + brpc::Controller* cntl_; + brpc::Channel* channel_; }; } // namespace chunkserver diff --git a/src/chunkserver/chunk_service.cpp b/src/chunkserver/chunk_service.cpp index d6e9034641..85d3d241a5 100755 --- a/src/chunkserver/chunk_service.cpp +++ b/src/chunkserver/chunk_service.cpp @@ -22,31 +22,30 @@ #include "src/chunkserver/chunk_service.h" -#include #include #include +#include -#include #include +#include #include +#include "include/curve_compiler_specific.h" +#include "src/chunkserver/chunk_service_closure.h" +#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/op_request.h" -#include "src/chunkserver/chunk_service_closure.h" #include "src/common/fast_align.h" -#include "include/curve_compiler_specific.h" - namespace curve { namespace chunkserver { using ::curve::common::is_aligned; ChunkServiceImpl::ChunkServiceImpl( - const ChunkServiceOptions& chunkServiceOptions, - const std::shared_ptr& epochMap) + const ChunkServiceOptions& chunkServiceOptions, + const std::shared_ptr& epochMap) : chunkServiceOptions_(chunkServiceOptions), copysetNodeManager_(chunkServiceOptions.copysetNodeManager), inflightThrottle_(chunkServiceOptions.inflightThrottle), @@ -55,15 +54,11 @@ ChunkServiceImpl::ChunkServiceImpl( maxChunkSize_ = copysetNodeManager_->GetCopysetNodeOptions().maxChunkSize; } -void ChunkServiceImpl::DeleteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::DeleteChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -76,7 +71,7 @@ void ChunkServiceImpl::DeleteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -86,24 +81,17 @@ void ChunkServiceImpl::DeleteChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::WriteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::WriteChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -116,11 +104,11 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); DVLOG(9) << "Get write I/O request, op: " << request->optype() - << " offset: " << request->offset() - << " size: " << request->size() << " buf header: " - << *(unsigned int *) cntl->request_attachment().to_string().c_str() + << " offset: " << request->offset() << " size: " << request->size() + << " buf header: " + << *(unsigned int*)cntl->request_attachment().to_string().c_str() << " attachement size " << cntl->request_attachment().size(); if (request->has_epoch()) { @@ -134,7 +122,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, } } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "I/O request, op: " << request->optype() @@ -144,7 +132,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -154,24 +142,18 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::CreateCloneChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, + Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -184,7 +166,8 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 请求创建的chunk大小和copyset配置的大小不一致 + // The chunk size requested for creation does not match the size configured + // for copyset if (request->size() != maxChunkSize_) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "Invalid chunk size: " << request->optype() @@ -193,7 +176,7 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -203,19 +186,15 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared( + nodePtr, controller, request, response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, - const CreateS3CloneChunkRequest* request, - CreateS3CloneChunkResponse* response, - Closure* done) { +void ChunkServiceImpl::CreateS3CloneChunk( + RpcController* controller, const CreateS3CloneChunkRequest* request, + CreateS3CloneChunkResponse* response, Closure* done) { (void)controller; (void)request; brpc::ClosureGuard doneGuard(done); @@ -223,15 +202,11 @@ void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, LOG(INFO) << "Invalid request, serverSide Not implement yet"; } -void ChunkServiceImpl::ReadChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::ReadChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -244,7 +219,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -254,7 +229,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -264,25 +239,17 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - std::shared_ptr req = - std::make_shared(nodePtr, - chunkServiceOptions_.cloneManager, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = std::make_shared( + nodePtr, chunkServiceOptions_.cloneManager, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::RecoverChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::RecoverChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -295,7 +262,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -305,7 +272,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -315,26 +282,19 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // RecoverChunk请求和ReadChunk请求共用ReadChunkRequest - std::shared_ptr req = - std::make_shared(nodePtr, - chunkServiceOptions_.cloneManager, - controller, - request, - response, - doneGuard.release()); + // RecoverChunk request and ReadChunk request share ReadChunkRequest + std::shared_ptr req = std::make_shared( + nodePtr, chunkServiceOptions_.cloneManager, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::ReadChunkSnapshot(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, + Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -347,13 +307,13 @@ void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -363,25 +323,17 @@ void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( - RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); + RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -401,7 +353,7 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -412,31 +364,26 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } /** - * 当前GetChunkInfo在rpc service层定义和Chunk Service分离的, - * 且其并不经过QoS或者raft一致性协议,所以这里没有让其继承 - * OpRequest或者QoSRequest来重新封装,而是直接原地处理掉了 + * Currently, GetChunkInfo is defined in the rpc service layer and separated + * from Chunk Service, And it does not go through QoS or raft consistency + * protocols, so it is not allowed to inherit here OpRequest or QoSRequest to be + * re encapsulated, but directly processed in place */ -void ChunkServiceImpl::GetChunkInfo(RpcController *controller, - const GetChunkInfoRequest *request, - GetChunkInfoResponse *response, - Closure *done) { +void ChunkServiceImpl::GetChunkInfo(RpcController* controller, + const GetChunkInfoRequest* request, + GetChunkInfoResponse* response, + Closure* done) { (void)controller; - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - nullptr, - nullptr, - done); + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, nullptr, nullptr, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -449,10 +396,9 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 判断copyset是否存在 - auto nodePtr = - copysetNodeManager_->GetCopysetNode(request->logicpoolid(), - request->copysetid()); + // Determine if the copyset exists + auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), + request->copysetid()); if (nullptr == nodePtr) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); LOG(WARNING) << "GetChunkInfo failed, copyset node is not found: " @@ -460,7 +406,7 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 检查任期和自己是不是Leader + // Check tenure and whether you are a leader if (!nodePtr->IsLeaderTerm()) { PeerId leader = nodePtr->GetLeaderId(); if (!leader.is_empty()) { @@ -476,16 +422,15 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, ret = nodePtr->GetDataStore()->GetChunkInfo(request->chunkid(), &chunkInfo); if (CSErrorCode::Success == ret) { - // 1.成功,此时chunk文件肯定存在 + // 1. Success, the chunk file must exist at this time response->add_chunksn(chunkInfo.curSn); - if (chunkInfo.snapSn > 0) - response->add_chunksn(chunkInfo.snapSn); + if (chunkInfo.snapSn > 0) response->add_chunksn(chunkInfo.snapSn); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回的版本集合为空 + // 2. Chunk file does not exist, returned version set is empty response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk info failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -497,14 +442,14 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, } } -void ChunkServiceImpl::GetChunkHash(RpcController *controller, - const GetChunkHashRequest *request, - GetChunkHashResponse *response, - Closure *done) { +void ChunkServiceImpl::GetChunkHash(RpcController* controller, + const GetChunkHashRequest* request, + GetChunkHashResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->length())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "GetChunkHash illegal parameter:" @@ -517,10 +462,9 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, return; } - // 判断copyset是否存在 - auto nodePtr = - copysetNodeManager_->GetCopysetNode(request->logicpoolid(), - request->copysetid()); + // Determine if the copyset exists + auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), + request->copysetid()); if (nullptr == nodePtr) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); LOG(WARNING) << "GetChunkHash failed, copyset node is not found: " @@ -531,21 +475,19 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, CSErrorCode ret; std::string hash; - ret = nodePtr->GetDataStore()->GetChunkHash(request->chunkid(), - request->offset(), - request->length(), - &hash); + ret = nodePtr->GetDataStore()->GetChunkHash( + request->chunkid(), request->offset(), request->length(), &hash); if (CSErrorCode::Success == ret) { - // 1.成功 + // 1. Success response->set_hash(hash); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回0的hash值 + // 2. Chunk file does not exist, return a hash value of 0 response->set_hash("0"); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk hash failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -557,18 +499,17 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, } } -void ChunkServiceImpl::UpdateEpoch(RpcController *controller, - const UpdateEpochRequest *request, - UpdateEpochResponse *response, - Closure *done) { +void ChunkServiceImpl::UpdateEpoch(RpcController* controller, + const UpdateEpochRequest* request, + UpdateEpochResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); bool success = epochMap_->UpdateEpoch(request->fileid(), request->epoch()); if (success) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); LOG(INFO) << "Update fileId: " << request->fileid() - << " to epoch: " << request->epoch() - << " success."; + << " to epoch: " << request->epoch() << " success."; } else { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD); LOG(WARNING) << "Update fileId: " << request->fileid() @@ -579,7 +520,7 @@ void ChunkServiceImpl::UpdateEpoch(RpcController *controller, bool ChunkServiceImpl::CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const { - // 检查offset+len是否越界 + // Check if offset+len is out of range if (CURVE_UNLIKELY(offset + len > maxChunkSize_)) { return false; } diff --git a/src/chunkserver/chunk_service.h b/src/chunkserver/chunk_service.h index e15aea389b..6792c230e1 100755 --- a/src/chunkserver/chunk_service.h +++ b/src/chunkserver/chunk_service.h @@ -23,9 +23,9 @@ #ifndef SRC_CHUNKSERVER_CHUNK_SERVICE_H_ #define SRC_CHUNKSERVER_CHUNK_SERVICE_H_ -#include #include #include +#include #include "proto/chunk.pb.h" #include "src/chunkserver/config_info.h" @@ -34,84 +34,71 @@ namespace curve { namespace chunkserver { -using ::google::protobuf::RpcController; using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; class CopysetNodeManager; class ChunkServiceImpl : public ChunkService { public: explicit ChunkServiceImpl(const ChunkServiceOptions& chunkServiceOptions, - const std::shared_ptr &epochMap); + const std::shared_ptr& epochMap); ~ChunkServiceImpl() {} - void DeleteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void ReadChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void WriteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void ReadChunkSnapshot(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void DeleteChunkSnapshotOrCorrectSn(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void CreateCloneChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); + void DeleteChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void ReadChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void WriteChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void ReadChunkSnapshot(RpcController* controller, + const ChunkRequest* request, ChunkResponse* response, + Closure* done); + + void DeleteChunkSnapshotOrCorrectSn(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void CreateCloneChunk(RpcController* controller, + const ChunkRequest* request, ChunkResponse* response, + Closure* done); void CreateS3CloneChunk(RpcController* controller, - const CreateS3CloneChunkRequest* request, - CreateS3CloneChunkResponse* response, - Closure* done); - void RecoverChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void GetChunkInfo(RpcController *controller, - const GetChunkInfoRequest *request, - GetChunkInfoResponse *response, - Closure *done); - - void GetChunkHash(RpcController *controller, - const GetChunkHashRequest *request, - GetChunkHashResponse *response, - Closure *done); - - void UpdateEpoch(RpcController *controller, - const UpdateEpochRequest *request, - UpdateEpochResponse *response, - Closure *done); + const CreateS3CloneChunkRequest* request, + CreateS3CloneChunkResponse* response, + Closure* done); + void RecoverChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void GetChunkInfo(RpcController* controller, + const GetChunkInfoRequest* request, + GetChunkInfoResponse* response, Closure* done); + + void GetChunkHash(RpcController* controller, + const GetChunkHashRequest* request, + GetChunkHashResponse* response, Closure* done); + + void UpdateEpoch(RpcController* controller, + const UpdateEpochRequest* request, + UpdateEpochResponse* response, Closure* done); private: /** - * 验证op request的offset和length是否越界和对齐 - * @param offset[in]: op request' offset - * @param len[in]: op request' length - * @return true,说明合法,否则返回false + * Validates whether the offset and length of the op request are within + * bounds and aligned. + * @param offset[in]: Offset of the op request. + * @param len[in]: Length of the op request. + * @return true if valid, false otherwise. */ bool CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const; private: ChunkServiceOptions chunkServiceOptions_; - CopysetNodeManager *copysetNodeManager_; + CopysetNodeManager* copysetNodeManager_; std::shared_ptr inflightThrottle_; - uint32_t maxChunkSize_; + uint32_t maxChunkSize_; std::shared_ptr epochMap_; uint32_t blockSize_; diff --git a/src/chunkserver/chunk_service_closure.cpp b/src/chunkserver/chunk_service_closure.cpp index d680b37d93..32362d04d2 100644 --- a/src/chunkserver/chunk_service_closure.cpp +++ b/src/chunkserver/chunk_service_closure.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/chunk_service_closure.h" + #include #include "src/chunkserver/chunkserver_metrics.h" @@ -30,55 +31,53 @@ namespace chunkserver { void ChunkServiceClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * After the completion of Run, automatically destructs itself to + * avoid missing the destructor call. */ std::unique_ptr selfGuard(this); { - // 所有brpcDone_调用之前要做的操作都放到这个生命周期内 + // All operations to be performed before any brpcDone_ invocation are + // placed within this lifecycle. brpc::ClosureGuard doneGuard(brpcDone_); - // 记录请求处理结果,收集到metric中 + // Record the request processing results and collect them in metric OnResonse(); } - // closure调用的时候减1,closure创建的什么加1 - // 这一行必须放在brpcDone_调用之后,ut里需要测试inflightio超过限制时的表现 - // 会在传进来的closure里面加一个sleep来控制inflightio个数 + // Decrement by 1 when the closure is invoked, and increment by 1 when the + // closure is created. This line must be placed after the brpcDone_ + // invocation. It is necessary to test the behavior when inflightio exceeds + // the limit in unit tests. A sleep is added in the provided closure to + // control the number of inflightio. if (nullptr != inflightThrottle_) { inflightThrottle_->Decrement(); } } void ChunkServiceClosure::OnRequest() { - // 如果request或者response为空就不统计metric - if (request_ == nullptr || response_ == nullptr) - return; + // If request or response is empty, metric will not be counted + if (request_ == nullptr || response_ == nullptr) return; - // 根据request类型统计请求数量 + // Count the number of requests based on their type ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::READ_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_WRITE: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::WRITE_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_RECOVER: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::RECOVER_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_PASTE: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::PASTE_CHUNK); break; } @@ -88,62 +87,51 @@ void ChunkServiceClosure::OnRequest() { } void ChunkServiceClosure::OnResonse() { - // 如果request或者response为空就不统计metric - if (request_ == nullptr || response_ == nullptr) - return; + // If request or response is empty, metric will not be counted + if (request_ == nullptr || response_ == nullptr) return; - // 可以根据response中的返回值来统计此次请求的处理结果 + // The processing result of this request can be calculated based on the + // return value in the response ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); bool hasError = false; uint64_t latencyUs = common::TimeUtility::GetTimeofDayUs() - receivedTimeUs_; switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { - // 如果是read请求,返回CHUNK_OP_STATUS_CHUNK_NOTEXIST也认为是正确的 - hasError = (response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) && - (response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + // For read requests, returning CHUNK_OP_STATUS_CHUNK_NOTEXIST is + // also considered correct + hasError = (response_->status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) && + (response_->status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::READ_CHUNK, - request_->size(), - latencyUs, - hasError); + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::READ_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_WRITE: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::WRITE_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::WRITE_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_RECOVER: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::RECOVER_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::RECOVER_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_PASTE: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::PASTE_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::PASTE_CHUNK, request_->size(), + latencyUs, hasError); break; } default: diff --git a/src/chunkserver/chunk_service_closure.h b/src/chunkserver/chunk_service_closure.h index b6dc7d4d65..1d1e79c02a 100755 --- a/src/chunkserver/chunk_service_closure.h +++ b/src/chunkserver/chunk_service_closure.h @@ -24,66 +24,71 @@ #define SRC_CHUNKSERVER_CHUNK_SERVICE_CLOSURE_H_ #include + #include #include "proto/chunk.pb.h" -#include "src/chunkserver/op_request.h" #include "src/chunkserver/inflight_throttle.h" +#include "src/chunkserver/op_request.h" #include "src/common/timeutility.h" namespace curve { namespace chunkserver { -// chunk service层的闭包,对rpc的闭包再做一层封装,用于请求返回时统计metric信息 +// The closure of the chunk service layer encapsulates the closure of the rpc +// layer, which is used to count metric information when requesting returns class ChunkServiceClosure : public braft::Closure { public: explicit ChunkServiceClosure( - std::shared_ptr inflightThrottle, - const ChunkRequest *request, - ChunkResponse *response, - google::protobuf::Closure *done) - : inflightThrottle_(inflightThrottle) - , request_(request) - , response_(response) - , brpcDone_(done) - , receivedTimeUs_(common::TimeUtility::GetTimeofDayUs()) { - // closure创建的什么加1,closure调用的时候减1 - if (nullptr != inflightThrottle_) { - inflightThrottle_->Increment(); - } - // 统计请求数量 - OnRequest(); + std::shared_ptr inflightThrottle, + const ChunkRequest* request, ChunkResponse* response, + google::protobuf::Closure* done) + : inflightThrottle_(inflightThrottle), + request_(request), + response_(response), + brpcDone_(done), + receivedTimeUs_(common::TimeUtility::GetTimeofDayUs()) { + // Increment by 1 when the closure is created, and decrement by 1 when + // the closure is invoked. + if (nullptr != inflightThrottle_) { + inflightThrottle_->Increment(); } + // Count the number of requests + OnRequest(); + } ~ChunkServiceClosure() = default; /** - * 该闭包的guard生命周期结束时会调用该函数 - * 该函数内目前主要是对读写请求返回结果的一些metric统计 - * 后面如果有类似的场景(在service请求结束时做一些处理)可以在内部添加逻辑 + * This function will be called at the end of the guard lifecycle of the + * closure Currently, this function mainly performs some metric statistics + * on the returned results of read and write requests If there are similar + * scenarios in the future (doing some processing at the end of the service + * request), logic can be added internally */ void Run() override; private: /** - * 统计请求数量和速率 + * Count the number and rate of requests */ void OnRequest(); /** - * 记录请求处理的结果,例如请求是否出错、请求的延时等 + * Record the results of request processing, such as whether the request was + * incorrect, the delay of the request, etc */ void OnResonse(); private: - // inflight流控 + // inflight flow control std::shared_ptr inflightThrottle_; - // rpc请求的request - const ChunkRequest *request_; - // rpc请求的response - ChunkResponse *response_; - // rpc请求回调 - google::protobuf::Closure *brpcDone_; - // 接受到请求的时间 + // Request for rpc requests + const ChunkRequest* request_; + // Response to rpc requests + ChunkResponse* response_; + // Rpc request callback + google::protobuf::Closure* brpcDone_; + // Time of receiving the request uint64_t receivedTimeUs_; }; diff --git a/src/chunkserver/chunkserver.cpp b/src/chunkserver/chunkserver.cpp index 22f302c9da..5a1911dd73 100644 --- a/src/chunkserver/chunkserver.cpp +++ b/src/chunkserver/chunkserver.cpp @@ -44,15 +44,15 @@ #include "src/common/bytes_convert.h" #include "src/common/concurrent/task_thread_pool.h" #include "src/common/curve_version.h" -#include "src/common/uri_parser.h" #include "src/common/log_util.h" +#include "src/common/uri_parser.h" +using ::curve::chunkserver::concurrent::ConcurrentApplyModule; +using ::curve::common::UriParser; +using ::curve::fs::FileSystemType; using ::curve::fs::LocalFileSystem; using ::curve::fs::LocalFileSystemOption; using ::curve::fs::LocalFsFactory; -using ::curve::fs::FileSystemType; -using ::curve::chunkserver::concurrent::ConcurrentApplyModule; -using ::curve::common::UriParser; DEFINE_string(conf, "ChunkServer.conf", "Path of configuration file"); DEFINE_string(chunkServerIp, "127.0.0.1", "chunkserver ip"); @@ -60,19 +60,19 @@ DEFINE_bool(enableExternalServer, false, "start external server or not"); DEFINE_string(chunkServerExternalIp, "127.0.0.1", "chunkserver external ip"); DEFINE_int32(chunkServerPort, 8200, "chunkserver port"); DEFINE_string(chunkServerStoreUri, "local://./0/", "chunkserver store uri"); -DEFINE_string(chunkServerMetaUri, - "local://./0/chunkserver.dat", "chunkserver meta uri"); +DEFINE_string(chunkServerMetaUri, "local://./0/chunkserver.dat", + "chunkserver meta uri"); DEFINE_string(copySetUri, "local://./0/copysets", "copyset data uri"); DEFINE_string(raftSnapshotUri, "curve://./0/copysets", "raft snapshot uri"); DEFINE_string(raftLogUri, "curve://./0/copysets", "raft log uri"); -DEFINE_string(recycleUri, "local://./0/recycler" , "recycle uri"); +DEFINE_string(recycleUri, "local://./0/recycler", "recycle uri"); DEFINE_string(chunkFilePoolDir, "./0/", "chunk file pool location"); DEFINE_int32(chunkFilePoolAllocatedPercent, 80, "format percent for chunkfillpool."); DEFINE_uint32(chunkFormatThreadNum, 1, "number of threads while file pool formatting"); -DEFINE_string(chunkFilePoolMetaPath, - "./chunkfilepool.meta", "chunk file pool meta path"); +DEFINE_string(chunkFilePoolMetaPath, "./chunkfilepool.meta", + "chunk file pool meta path"); DEFINE_string(logPath, "./0/chunkserver.log-", "log file path"); DEFINE_string(mdsListenAddr, "127.0.0.1:6666", "mds listen addr"); DEFINE_bool(enableChunkfilepool, true, "enable chunkfilepool"); @@ -80,8 +80,7 @@ DEFINE_uint32(copysetLoadConcurrency, 5, "copyset load concurrency"); DEFINE_bool(enableWalfilepool, true, "enable WAL filepool"); DEFINE_string(walFilePoolDir, "./0/", "WAL filepool location"); DEFINE_string(walFilePoolMetaPath, "./walfilepool.meta", - "WAL filepool meta path"); - + "WAL filepool meta path"); const char* kProtocalCurve = "curve"; @@ -93,57 +92,56 @@ int ChunkServer::Run(int argc, char** argv) { RegisterCurveSegmentLogStorageOrDie(); - // ==========================加载配置项===============================// + // =====================Load Configuration Items=======================// LOG(INFO) << "Loading Configuration."; common::Configuration conf; conf.SetConfigPath(FLAGS_conf.c_str()); - // 在从配置文件获取 + // Obtaining from the configuration file LOG_IF(FATAL, !conf.LoadConfig()) << "load chunkserver configuration fail, conf path = " << conf.GetConfigPath(); - // 命令行可以覆盖配置文件中的参数 + // The command line can override parameters in the configuration file LoadConfigFromCmdline(&conf); // 初始化日志模块 curve::common::DisableLoggingToStdErr(); google::InitGoogleLogging(argv[0]); - // 打印参数 + // Print parameters conf.PrintConfig(); curve::common::ExposeCurveVersion(); - // ============================初始化各模块==========================// + // ====================Initialize each module======================// LOG(INFO) << "Initializing ChunkServer modules"; - // 优先初始化 metric 收集模块 + // Prioritize initializing the metric collection module ChunkServerMetricOptions metricOptions; InitMetricOptions(&conf, &metricOptions); ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); LOG_IF(FATAL, metric->Init(metricOptions) != 0) << "Failed to init chunkserver metric."; - // 初始化并发持久模块 + // Initialize concurrent persistence module ConcurrentApplyModule concurrentapply; ConcurrentApplyOption concurrentApplyOptions; InitConcurrentApplyOptions(&conf, &concurrentApplyOptions); LOG_IF(FATAL, false == concurrentapply.Init(concurrentApplyOptions)) << "Failed to initialize concurrentapply module!"; - // 初始化本地文件系统 + // Initialize local file system std::shared_ptr fs( LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); LocalFileSystemOption lfsOption; - LOG_IF(FATAL, !conf.GetBoolValue( - "fs.enable_renameat2", &lfsOption.enableRenameat2)); + LOG_IF(FATAL, !conf.GetBoolValue("fs.enable_renameat2", + &lfsOption.enableRenameat2)); LOG_IF(FATAL, 0 != fs->Init(lfsOption)) << "Failed to initialize local filesystem module!"; - // 初始化chunk文件池 + // Initialize chunk file pool FilePoolOptions chunkFilePoolOptions; InitChunkFilePoolOptions(&conf, &chunkFilePoolOptions); - std::shared_ptr chunkfilePool = - std::make_shared(fs); + std::shared_ptr chunkfilePool = std::make_shared(fs); LOG_IF(FATAL, false == chunkfilePool->Initialize(chunkFilePoolOptions)) << "Failed to init chunk file pool"; @@ -156,9 +154,8 @@ int ChunkServer::Run(int argc, char** argv) { bool useChunkFilePoolAsWalPool = true; uint32_t useChunkFilePoolAsWalPoolReserve = 15; if (raftLogProtocol == kProtocalCurve) { - LOG_IF(FATAL, !conf.GetBoolValue( - "walfilepool.use_chunk_file_pool", - &useChunkFilePoolAsWalPool)); + LOG_IF(FATAL, !conf.GetBoolValue("walfilepool.use_chunk_file_pool", + &useChunkFilePoolAsWalPool)); if (!useChunkFilePoolAsWalPool) { FilePoolOptions walFilePoolOptions; @@ -170,20 +167,20 @@ int ChunkServer::Run(int argc, char** argv) { } else { walFilePool = chunkfilePool; LOG_IF(FATAL, !conf.GetUInt32Value( - "walfilepool.use_chunk_file_pool_reserve", - &useChunkFilePoolAsWalPoolReserve)); + "walfilepool.use_chunk_file_pool_reserve", + &useChunkFilePoolAsWalPoolReserve)); LOG(INFO) << "initialize to use chunkfilePool as walpool success."; } } - // 远端拷贝管理模块选项 + // Remote Copy Management Module Options CopyerOptions copyerOptions; InitCopyerOptions(&conf, ©erOptions); auto copyer = std::make_shared(); LOG_IF(FATAL, copyer->Init(copyerOptions) != 0) << "Failed to initialize clone copyer."; - // 克隆管理模块初始化 + // Clone Management Module Initialization CloneOptions cloneOptions; InitCloneOptions(&conf, &cloneOptions); uint32_t sliceSize; @@ -195,11 +192,11 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, cloneManager_.Init(cloneOptions) != 0) << "Failed to initialize clone manager."; - // 初始化注册模块 + // Initialize registration module RegisterOptions registerOptions; InitRegisterOptions(&conf, ®isterOptions); registerOptions.useChunkFilePoolAsWalPoolReserve = - useChunkFilePoolAsWalPoolReserve; + useChunkFilePoolAsWalPoolReserve; registerOptions.useChunkFilePoolAsWalPool = useChunkFilePoolAsWalPool; registerOptions.fs = fs; registerOptions.chunkFilepool = chunkfilePool; @@ -208,40 +205,39 @@ int ChunkServer::Run(int argc, char** argv) { Register registerMDS(registerOptions); ChunkServerMetadata metadata; ChunkServerMetadata localMetadata; - // 从本地获取meta - std::string metaPath = UriParser::GetPathFromUri( - registerOptions.chunkserverMetaUri); + // Get Meta from Local + std::string metaPath = + UriParser::GetPathFromUri(registerOptions.chunkserverMetaUri); auto epochMap = std::make_shared(); if (fs->FileExists(metaPath)) { LOG_IF(FATAL, GetChunkServerMetaFromLocal( - registerOptions.chunserverStoreUri, - registerOptions.chunkserverMetaUri, - registerOptions.fs, &localMetadata) != 0) + registerOptions.chunserverStoreUri, + registerOptions.chunkserverMetaUri, + registerOptions.fs, &localMetadata) != 0) << "Failed to GetChunkServerMetaFromLocal."; - LOG_IF(FATAL, registerMDS.RegisterToMDS( - &localMetadata, &metadata, epochMap) != 0) + LOG_IF(FATAL, registerMDS.RegisterToMDS(&localMetadata, &metadata, + epochMap) != 0) << "Failed to register to MDS."; } else { - // 如果本地获取不到,向mds注册 - LOG(INFO) << "meta file " - << metaPath << " do not exist, register to mds"; - LOG_IF(FATAL, registerMDS.RegisterToMDS( - nullptr, &metadata, epochMap) != 0) + // If it cannot be obtained locally, register with MDS + LOG(INFO) << "meta file " << metaPath + << " do not exist, register to mds"; + LOG_IF(FATAL, + registerMDS.RegisterToMDS(nullptr, &metadata, epochMap) != 0) << "Failed to register to MDS."; } - // trash模块初始化 + // Trash module initialization TrashOptions trashOptions; InitTrashOptions(&conf, &trashOptions); trashOptions.localFileSystem = fs; trashOptions.chunkFilePool = chunkfilePool; trashOptions.walPool = walFilePool; trash_ = std::make_shared(); - LOG_IF(FATAL, trash_->Init(trashOptions) != 0) - << "Failed to init Trash"; + LOG_IF(FATAL, trash_->Init(trashOptions) != 0) << "Failed to init Trash"; - // 初始化复制组管理模块 + // Initialize replication group management module CopysetNodeOptions copysetNodeOptions; InitCopysetNodeOptions(&conf, ©setNodeOptions); copysetNodeOptions.concurrentapply = &concurrentapply; @@ -262,23 +258,25 @@ int ChunkServer::Run(int argc, char** argv) { } } - // install snapshot的带宽限制 + // Bandwidth limitation of install snapshot int snapshotThroughputBytes; LOG_IF(FATAL, !conf.GetIntValue("chunkserver.snapshot_throttle_throughput_bytes", &snapshotThroughputBytes)); /** - * checkCycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, - * checkCycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 - * 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 - * 不是20MB的带宽 + * CheckCycles is used for finer bandwidth control, with + * snapshotThroughputBytes=100MB, Taking checkCycles=10 as an example, it + * can ensure a bandwidth of 10MB every 1/10 second without accumulation, + * such as the first one The bandwidth of 1/10 second is 10MB, but it + * expires. In the second 1/10 second, only 10MB of bandwidth can be used, + * and Not a bandwidth of 20MB */ int checkCycles; LOG_IF(FATAL, !conf.GetIntValue("chunkserver.snapshot_throttle_check_cycles", &checkCycles)); - scoped_refptr snapshotThrottle - = new ThroughputSnapshotThrottle(snapshotThroughputBytes, checkCycles); + scoped_refptr snapshotThrottle = + new ThroughputSnapshotThrottle(snapshotThroughputBytes, checkCycles); snapshotThrottle_ = snapshotThrottle; copysetNodeOptions.snapshotThrottle = &snapshotThrottle_; @@ -288,7 +286,7 @@ int ChunkServer::Run(int argc, char** argv) { return -1; } butil::EndPoint endPoint = butil::EndPoint(ip, copysetNodeOptions.port); - // 注册curve snapshot storage + // Register curve snapshot storage RegisterCurveSnapshotStorageOrDie(); CurveSnapshotStorage::set_server_addr(endPoint); copysetNodeManager_ = &CopysetNodeManager::GetInstance(); @@ -302,7 +300,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, scanManager_.Init(scanOpts) != 0) << "Failed to init scan manager."; - // 心跳模块初始化 + // Heartbeat module initialization HeartbeatOptions heartbeatOptions; InitHeartbeatOptions(&conf, &heartbeatOptions); heartbeatOptions.copysetNodeManager = copysetNodeManager_; @@ -314,7 +312,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, heartbeat_.Init(heartbeatOptions) != 0) << "Failed to init Heartbeat manager."; - // 监控部分模块的metric指标 + // Metric indicators for monitoring some modules metric->MonitorTrash(trash_.get()); metric->MonitorChunkFilePool(chunkfilePool.get()); if (raftLogProtocol == kProtocalCurve && !useChunkFilePoolAsWalPool) { @@ -322,8 +320,8 @@ int ChunkServer::Run(int argc, char** argv) { } metric->ExposeConfigMetric(&conf); - // ========================添加rpc服务===============================// - // TODO(lixiaocui): rpc中各接口添加上延迟metric + // =====================Add RPC Service===================== // + // TODO(lixiaocui): Add delay metric to each interface in rpc brpc::Server server; brpc::Server externalServer; // We need call braft::add_service to add endPoint to braft::NodeManager @@ -331,17 +329,16 @@ int ChunkServer::Run(int argc, char** argv) { // copyset service CopysetServiceImpl copysetService(copysetNodeManager_); - int ret = server.AddService(©setService, - brpc::SERVER_DOESNT_OWN_SERVICE); + int ret = + server.AddService(©setService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CopysetService"; // inflight throttle int maxInflight; - LOG_IF(FATAL, - !conf.GetIntValue("chunkserver.max_inflight_requests", - &maxInflight)); - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + LOG_IF(FATAL, !conf.GetIntValue("chunkserver.max_inflight_requests", + &maxInflight)); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service @@ -351,8 +348,7 @@ int ChunkServer::Run(int argc, char** argv) { chunkServiceOptions.inflightThrottle = inflightThrottle; ChunkServiceImpl chunkService(chunkServiceOptions, epochMap); - ret = server.AddService(&chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = server.AddService(&chunkService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ChunkService"; // We need to replace braft::CliService with our own implementation @@ -360,14 +356,12 @@ int ChunkServer::Run(int argc, char** argv) { ret = server.RemoveService(service); CHECK(0 == ret) << "Fail to remove braft::CliService"; BRaftCliServiceImpl braftCliService; - ret = server.AddService(&braftCliService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = server.AddService(&braftCliService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService"; // braftclient service BRaftCliServiceImpl2 braftCliService2; - ret = server.AddService(&braftCliService2, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = server.AddService(&braftCliService2, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService2"; // We need to replace braft::FileServiceImpl with our own implementation @@ -375,51 +369,53 @@ int ChunkServer::Run(int argc, char** argv) { ret = server.RemoveService(service); CHECK(0 == ret) << "Fail to remove braft::FileService"; kCurveFileService.set_snapshot_attachment(new CurveSnapshotAttachment(fs)); - ret = server.AddService(&kCurveFileService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = + server.AddService(&kCurveFileService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CurveFileService"; // chunkserver service ChunkServerServiceImpl chunkserverService(copysetNodeManager_); - ret = server.AddService(&chunkserverService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = + server.AddService(&chunkserverService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ChunkServerService"; // scan copyset service ScanServiceImpl scanCopysetService(&scanManager_); - ret = server.AddService(&scanCopysetService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = + server.AddService(&scanCopysetService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ScanCopysetService"; - // 启动rpc service + // Start rpc service LOG(INFO) << "Internal server is going to serve on: " << copysetNodeOptions.ip << ":" << copysetNodeOptions.port; if (server.Start(endPoint, NULL) != 0) { LOG(ERROR) << "Fail to start Internal Server"; return -1; } - /* 启动external server - external server用于向client和工具等外部提供服务 - 区别于mds和chunkserver之间的通信*/ + /* Start external server + external server is used to provide services to external clients and + tools Different from communication between MDS and chunkserver + */ if (registerOptions.enableExternalServer) { ret = externalServer.AddService(©setService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNbT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CopysetService at external server"; ret = externalServer.AddService(&chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ChunkService at external server"; ret = externalServer.AddService(&braftCliService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService at external server"; ret = externalServer.AddService(&braftCliService2, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService2 at external server"; braft::RaftStatImpl raftStatService; ret = externalServer.AddService(&raftStatService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add RaftStatService at external server"; - std::string externalAddr = registerOptions.chunkserverExternalIp + ":" + - std::to_string(registerOptions.chunkserverPort); + std::string externalAddr = + registerOptions.chunkserverExternalIp + ":" + + std::to_string(registerOptions.chunkserverPort); LOG(INFO) << "External server is going to serve on: " << externalAddr; if (externalServer.Start(externalAddr.c_str(), NULL) != 0) { LOG(ERROR) << "Fail to start External Server"; @@ -427,30 +423,29 @@ int ChunkServer::Run(int argc, char** argv) { } } - // =======================启动各模块==================================// + // ===============Start each module=============== // LOG(INFO) << "ChunkServer starts."; /** - * 将模块启动放到rpc 服务启动后面,主要是为了解决内存增长的问题 - * 控制并发恢复的copyset数量,copyset恢复需要依赖rpc服务先启动 + * Placing module startup after RPC service startup is mainly to address + * memory growth issues Control the number of copysets for concurrent + * recovery. Copyset recovery requires the RPC service to be started first */ - LOG_IF(FATAL, trash_->Run() != 0) - << "Failed to start trash."; - LOG_IF(FATAL, cloneManager_.Run() != 0) - << "Failed to start clone manager."; + LOG_IF(FATAL, trash_->Run() != 0) << "Failed to start trash."; + LOG_IF(FATAL, cloneManager_.Run() != 0) << "Failed to start clone manager."; LOG_IF(FATAL, heartbeat_.Run() != 0) << "Failed to start heartbeat manager."; LOG_IF(FATAL, copysetNodeManager_->Run() != 0) << "Failed to start CopysetNodeManager."; - LOG_IF(FATAL, scanManager_.Run() != 0) - << "Failed to start scan manager."; + LOG_IF(FATAL, scanManager_.Run() != 0) << "Failed to start scan manager."; LOG_IF(FATAL, !chunkfilePool->StartCleaning()) << "Failed to start file pool clean worker."; - // =======================等待进程退出==================================// + // ===============Wait for the process to exit=============== // while (!brpc::IsAskedToQuit()) { bthread_usleep(1000000L); } - // scanmanager stop maybe need a little while, so stop it first before stop service NOLINT + // scanmanager stop maybe need a little while, NOLINT + // so stop it first before stop service NOLINT LOG(INFO) << "ChunkServer is going to quit."; LOG_IF(ERROR, scanManager_.Fini() != 0) << "Failed to shutdown scan manager."; @@ -469,10 +464,8 @@ int ChunkServer::Run(int argc, char** argv) { << "Failed to shutdown CopysetNodeManager."; LOG_IF(ERROR, cloneManager_.Fini() != 0) << "Failed to shutdown clone manager."; - LOG_IF(ERROR, copyer->Fini() != 0) - << "Failed to shutdown clone copyer."; - LOG_IF(ERROR, trash_->Fini() != 0) - << "Failed to shutdown trash."; + LOG_IF(ERROR, copyer->Fini() != 0) << "Failed to shutdown clone copyer."; + LOG_IF(ERROR, trash_->Fini() != 0) << "Failed to shutdown trash."; LOG_IF(ERROR, !chunkfilePool->StopCleaning()) << "Failed to shutdown file pool clean worker."; concurrentapply.Stop(); @@ -481,14 +474,12 @@ int ChunkServer::Run(int argc, char** argv) { return 0; } -void ChunkServer::Stop() { - brpc::AskToQuit(); -} +void ChunkServer::Stop() { brpc::AskToQuit(); } void ChunkServer::InitChunkFilePoolOptions( - common::Configuration *conf, FilePoolOptions *chunkFilePoolOptions) { + common::Configuration* conf, FilePoolOptions* chunkFilePoolOptions) { LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", - &chunkFilePoolOptions->fileSize)); + &chunkFilePoolOptions->fileSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", &chunkFilePoolOptions->metaPageSize)) @@ -499,24 +490,23 @@ void ChunkServer::InitChunkFilePoolOptions( << "Not found `global.block_size` in config file"; LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.cpmeta_file_size", - &chunkFilePoolOptions->metaFileSize)); - LOG_IF(FATAL, !conf->GetBoolValue( - "chunkfilepool.enable_get_chunk_from_pool", - &chunkFilePoolOptions->getFileFromPool)); + &chunkFilePoolOptions->metaFileSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("chunkfilepool.enable_get_chunk_from_pool", + &chunkFilePoolOptions->getFileFromPool)); if (chunkFilePoolOptions->getFileFromPool == false) { std::string chunkFilePoolUri; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.chunk_file_pool_dir", &chunkFilePoolUri)); - ::memcpy(chunkFilePoolOptions->filePoolDir, - chunkFilePoolUri.c_str(), + LOG_IF(FATAL, !conf->GetStringValue("chunkfilepool.chunk_file_pool_dir", + &chunkFilePoolUri)); + ::memcpy(chunkFilePoolOptions->filePoolDir, chunkFilePoolUri.c_str(), chunkFilePoolUri.size()); } else { std::string metaUri; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.meta_path", &metaUri)); - ::memcpy( - chunkFilePoolOptions->metaPath, metaUri.c_str(), metaUri.size()); + LOG_IF(FATAL, + !conf->GetStringValue("chunkfilepool.meta_path", &metaUri)); + ::memcpy(chunkFilePoolOptions->metaPath, metaUri.c_str(), + metaUri.size()); std::string chunkFilePoolUri; LOG_IF(FATAL, !conf->GetStringValue("chunkfilepool.chunk_file_pool_dir", @@ -539,12 +529,12 @@ void ChunkServer::InitChunkFilePoolOptions( "chunkfilepool.chunk_file_pool_format_thread_num", &chunkFilePoolOptions->formatThreadNum)); LOG_IF(FATAL, !conf->GetBoolValue("chunkfilepool.clean.enable", - &chunkFilePoolOptions->needClean)); + &chunkFilePoolOptions->needClean)); LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.clean.bytes_per_write", &chunkFilePoolOptions->bytesPerWrite)); LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.clean.throttle_iops", - &chunkFilePoolOptions->iops4clean)); + &chunkFilePoolOptions->iops4clean)); std::string copysetUri; LOG_IF(FATAL, @@ -567,9 +557,9 @@ void ChunkServer::InitChunkFilePoolOptions( (useChunkFilePoolAsWalPool && Trash::IsWALFile(filename)); }; - if (0 == chunkFilePoolOptions->bytesPerWrite - || chunkFilePoolOptions->bytesPerWrite > 1 * 1024 * 1024 - || 0 != chunkFilePoolOptions->bytesPerWrite % 4096) { + if (0 == chunkFilePoolOptions->bytesPerWrite || + chunkFilePoolOptions->bytesPerWrite > 1 * 1024 * 1024 || + 0 != chunkFilePoolOptions->bytesPerWrite % 4096) { LOG(FATAL) << "The bytesPerWrite must be in [1, 1048576] " << "and should be aligned to 4K, " << "but now is: " << chunkFilePoolOptions->bytesPerWrite; @@ -577,41 +567,40 @@ void ChunkServer::InitChunkFilePoolOptions( } } -void ChunkServer::InitConcurrentApplyOptions(common::Configuration *conf, - ConcurrentApplyOption *concurrentApplyOptions) { - LOG_IF(FATAL, !conf->GetIntValue( - "rconcurrentapply.size", &concurrentApplyOptions->rconcurrentsize)); - LOG_IF(FATAL, !conf->GetIntValue( - "wconcurrentapply.size", &concurrentApplyOptions->wconcurrentsize)); - LOG_IF(FATAL, !conf->GetIntValue( - "rconcurrentapply.queuedepth", &concurrentApplyOptions->rqueuedepth)); - LOG_IF(FATAL, !conf->GetIntValue( - "wconcurrentapply.queuedepth", &concurrentApplyOptions->wqueuedepth)); +void ChunkServer::InitConcurrentApplyOptions( + common::Configuration* conf, + ConcurrentApplyOption* concurrentApplyOptions) { + LOG_IF(FATAL, !conf->GetIntValue("rconcurrentapply.size", + &concurrentApplyOptions->rconcurrentsize)); + LOG_IF(FATAL, !conf->GetIntValue("wconcurrentapply.size", + &concurrentApplyOptions->wconcurrentsize)); + LOG_IF(FATAL, !conf->GetIntValue("rconcurrentapply.queuedepth", + &concurrentApplyOptions->rqueuedepth)); + LOG_IF(FATAL, !conf->GetIntValue("wconcurrentapply.queuedepth", + &concurrentApplyOptions->wqueuedepth)); } -void ChunkServer::InitWalFilePoolOptions( - common::Configuration *conf, FilePoolOptions *walPoolOptions) { +void ChunkServer::InitWalFilePoolOptions(common::Configuration* conf, + FilePoolOptions* walPoolOptions) { LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.segment_size", - &walPoolOptions->fileSize)); + &walPoolOptions->fileSize)); LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.metapage_size", - &walPoolOptions->metaPageSize)); + &walPoolOptions->metaPageSize)); LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.meta_file_size", - &walPoolOptions->metaFileSize)); - LOG_IF(FATAL, !conf->GetBoolValue( - "walfilepool.enable_get_segment_from_pool", - &walPoolOptions->getFileFromPool)); + &walPoolOptions->metaFileSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("walfilepool.enable_get_segment_from_pool", + &walPoolOptions->getFileFromPool)); if (walPoolOptions->getFileFromPool == false) { std::string filePoolUri; - LOG_IF(FATAL, !conf->GetStringValue( - "walfilepool.file_pool_dir", &filePoolUri)); - ::memcpy(walPoolOptions->filePoolDir, - filePoolUri.c_str(), + LOG_IF(FATAL, !conf->GetStringValue("walfilepool.file_pool_dir", + &filePoolUri)); + ::memcpy(walPoolOptions->filePoolDir, filePoolUri.c_str(), filePoolUri.size()); } else { std::string metaUri; - LOG_IF(FATAL, !conf->GetStringValue( - "walfilepool.meta_path", &metaUri)); + LOG_IF(FATAL, !conf->GetStringValue("walfilepool.meta_path", &metaUri)); std::string pool_size; LOG_IF(FATAL, !conf->GetStringValue("walfilepool.chunk_file_pool_size", @@ -642,94 +631,98 @@ void ChunkServer::InitWalFilePoolOptions( walPoolOptions->isAllocated = [](const string& filename) { return Trash::IsWALFile(filename); }; - ::memcpy( - walPoolOptions->metaPath, metaUri.c_str(), metaUri.size()); + ::memcpy(walPoolOptions->metaPath, metaUri.c_str(), metaUri.size()); } } void ChunkServer::InitCopysetNodeOptions( - common::Configuration *conf, CopysetNodeOptions *copysetNodeOptions) { + common::Configuration* conf, CopysetNodeOptions* copysetNodeOptions) { LOG_IF(FATAL, !conf->GetStringValue("global.ip", ©setNodeOptions->ip)); - LOG_IF(FATAL, !conf->GetUInt32Value( - "global.port", ©setNodeOptions->port)); + LOG_IF(FATAL, + !conf->GetUInt32Value("global.port", ©setNodeOptions->port)); if (copysetNodeOptions->port <= 0 || copysetNodeOptions->port >= 65535) { LOG(FATAL) << "Invalid server port provided: " << copysetNodeOptions->port; } LOG_IF(FATAL, !conf->GetIntValue("copyset.election_timeout_ms", - ©setNodeOptions->electionTimeoutMs)); + ©setNodeOptions->electionTimeoutMs)); LOG_IF(FATAL, !conf->GetIntValue("copyset.snapshot_interval_s", - ©setNodeOptions->snapshotIntervalS)); + ©setNodeOptions->snapshotIntervalS)); bool ret = conf->GetBoolValue("copyset.enable_lease_read", - ©setNodeOptions->enbaleLeaseRead); + ©setNodeOptions->enbaleLeaseRead); LOG_IF(WARNING, ret == false) << "config no copyset.enable_lease_read info, using default value " << copysetNodeOptions->enbaleLeaseRead; LOG_IF(FATAL, !conf->GetIntValue("copyset.catchup_margin", - ©setNodeOptions->catchupMargin)); + ©setNodeOptions->catchupMargin)); LOG_IF(FATAL, !conf->GetStringValue("copyset.chunk_data_uri", - ©setNodeOptions->chunkDataUri)); + ©setNodeOptions->chunkDataUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_log_uri", - ©setNodeOptions->logUri)); + ©setNodeOptions->logUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_meta_uri", - ©setNodeOptions->raftMetaUri)); + ©setNodeOptions->raftMetaUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_snapshot_uri", - ©setNodeOptions->raftSnapshotUri)); + ©setNodeOptions->raftSnapshotUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", - ©setNodeOptions->recyclerUri)); + ©setNodeOptions->recyclerUri)); LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", - ©setNodeOptions->maxChunkSize)); + ©setNodeOptions->maxChunkSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - ©setNodeOptions->metaPageSize)); + ©setNodeOptions->metaPageSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.block_size", - ©setNodeOptions->blockSize)); + ©setNodeOptions->blockSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.location_limit", - ©setNodeOptions->locationLimit)); + ©setNodeOptions->locationLimit)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.load_concurrency", - ©setNodeOptions->loadConcurrency)); + ©setNodeOptions->loadConcurrency)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_retrytimes", - ©setNodeOptions->checkRetryTimes)); + ©setNodeOptions->checkRetryTimes)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.finishload_margin", - ©setNodeOptions->finishLoadMargin)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_loadmargin_interval_ms", - ©setNodeOptions->checkLoadMarginIntervalMs)); + ©setNodeOptions->finishLoadMargin)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "copyset.check_loadmargin_interval_ms", + ©setNodeOptions->checkLoadMarginIntervalMs)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_concurrency", - ©setNodeOptions->syncConcurrency)); + ©setNodeOptions->syncConcurrency)); LOG_IF(FATAL, !conf->GetBoolValue( - "copyset.enable_odsync_when_open_chunkfile", - ©setNodeOptions->enableOdsyncWhenOpenChunkFile)); + "copyset.enable_odsync_when_open_chunkfile", + ©setNodeOptions->enableOdsyncWhenOpenChunkFile)); if (!copysetNodeOptions->enableOdsyncWhenOpenChunkFile) { - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.sync_chunk_limits", - ©setNodeOptions->syncChunkLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.sync_threshold", - ©setNodeOptions->syncThreshold)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_syncing_interval_ms", - ©setNodeOptions->checkSyncingIntervalMs)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_trigger_seconds", - ©setNodeOptions->syncTriggerSeconds)); + LOG_IF(FATAL, + !conf->GetUInt64Value("copyset.sync_chunk_limits", + ©setNodeOptions->syncChunkLimit)); + LOG_IF(FATAL, + !conf->GetUInt64Value("copyset.sync_threshold", + ©setNodeOptions->syncThreshold)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "copyset.check_syncing_interval_ms", + ©setNodeOptions->checkSyncingIntervalMs)); + LOG_IF(FATAL, + !conf->GetUInt32Value("copyset.sync_trigger_seconds", + ©setNodeOptions->syncTriggerSeconds)); } } -void ChunkServer::InitCopyerOptions( - common::Configuration *conf, CopyerOptions *copyerOptions) { +void ChunkServer::InitCopyerOptions(common::Configuration* conf, + CopyerOptions* copyerOptions) { LOG_IF(FATAL, !conf->GetStringValue("curve.root_username", - ©erOptions->curveUser.owner)); + ©erOptions->curveUser.owner)); LOG_IF(FATAL, !conf->GetStringValue("curve.root_password", - ©erOptions->curveUser.password)); + ©erOptions->curveUser.password)); LOG_IF(FATAL, !conf->GetStringValue("curve.config_path", - ©erOptions->curveConf)); + ©erOptions->curveConf)); LOG_IF(FATAL, - !conf->GetStringValue("s3.config_path", ©erOptions->s3Conf)); + !conf->GetStringValue("s3.config_path", ©erOptions->s3Conf)); bool disableCurveClient = false; bool disableS3Adapter = false; LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_curve_client", - &disableCurveClient)); - LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_s3_adapter", - &disableS3Adapter)); + &disableCurveClient)); + LOG_IF(FATAL, + !conf->GetBoolValue("clone.disable_s3_adapter", &disableS3Adapter)); LOG_IF(FATAL, !conf->GetUInt64Value("curve.curve_file_timeout_s", - ©erOptions->curveFileTimeoutSec)); + ©erOptions->curveFileTimeoutSec)); if (disableCurveClient) { copyerOptions->curveClient = nullptr; @@ -744,105 +737,105 @@ void ChunkServer::InitCopyerOptions( } } -void ChunkServer::InitCloneOptions( - common::Configuration *conf, CloneOptions *cloneOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value("clone.thread_num", - &cloneOptions->threadNum)); +void ChunkServer::InitCloneOptions(common::Configuration* conf, + CloneOptions* cloneOptions) { + LOG_IF(FATAL, + !conf->GetUInt32Value("clone.thread_num", &cloneOptions->threadNum)); LOG_IF(FATAL, !conf->GetUInt32Value("clone.queue_depth", - &cloneOptions->queueCapacity)); + &cloneOptions->queueCapacity)); } -void ChunkServer::InitScanOptions( - common::Configuration *conf, ScanManagerOptions *scanOptions) { +void ChunkServer::InitScanOptions(common::Configuration* conf, + ScanManagerOptions* scanOptions) { LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_interval_sec", - &scanOptions->intervalSec)); + &scanOptions->intervalSec)); LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_size_byte", - &scanOptions->scanSize)); + &scanOptions->scanSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - &scanOptions->chunkMetaPageSize)); + &scanOptions->chunkMetaPageSize)); LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_timeout_ms", - &scanOptions->timeoutMs)); + &scanOptions->timeoutMs)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_rpc_retry_times", - &scanOptions->retry)); + &scanOptions->retry)); LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_retry_interval_us", - &scanOptions->retryIntervalUs)); + &scanOptions->retryIntervalUs)); } -void ChunkServer::InitHeartbeatOptions( - common::Configuration *conf, HeartbeatOptions *heartbeatOptions) { +void ChunkServer::InitHeartbeatOptions(common::Configuration* conf, + HeartbeatOptions* heartbeatOptions) { LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", - &heartbeatOptions->storeUri)); + &heartbeatOptions->storeUri)); LOG_IF(FATAL, !conf->GetStringValue("global.ip", &heartbeatOptions->ip)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.port", - &heartbeatOptions->port)); + LOG_IF(FATAL, + !conf->GetUInt32Value("global.port", &heartbeatOptions->port)); LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", - &heartbeatOptions->mdsListenAddr)); + &heartbeatOptions->mdsListenAddr)); LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_interval", - &heartbeatOptions->intervalSec)); + &heartbeatOptions->intervalSec)); LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_timeout", - &heartbeatOptions->timeout)); + &heartbeatOptions->timeout)); } -void ChunkServer::InitRegisterOptions( - common::Configuration *conf, RegisterOptions *registerOptions) { +void ChunkServer::InitRegisterOptions(common::Configuration* conf, + RegisterOptions* registerOptions) { LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", - ®isterOptions->mdsListenAddr)); - LOG_IF(FATAL, !conf->GetStringValue("global.ip", - ®isterOptions->chunkserverInternalIp)); + ®isterOptions->mdsListenAddr)); + LOG_IF(FATAL, !conf->GetStringValue( + "global.ip", ®isterOptions->chunkserverInternalIp)); LOG_IF(FATAL, !conf->GetBoolValue("global.enable_external_server", - ®isterOptions->enableExternalServer)); - LOG_IF(FATAL, !conf->GetStringValue("global.external_ip", - ®isterOptions->chunkserverExternalIp)); + ®isterOptions->enableExternalServer)); + LOG_IF(FATAL, + !conf->GetStringValue("global.external_ip", + ®isterOptions->chunkserverExternalIp)); LOG_IF(FATAL, !conf->GetIntValue("global.port", - ®isterOptions->chunkserverPort)); + ®isterOptions->chunkserverPort)); LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", - ®isterOptions->chunserverStoreUri)); + ®isterOptions->chunserverStoreUri)); LOG_IF(FATAL, !conf->GetStringValue("chunkserver.meta_uri", - ®isterOptions->chunkserverMetaUri)); + ®isterOptions->chunkserverMetaUri)); LOG_IF(FATAL, !conf->GetStringValue("chunkserver.disk_type", - ®isterOptions->chunkserverDiskType)); + ®isterOptions->chunkserverDiskType)); LOG_IF(FATAL, !conf->GetIntValue("mds.register_retries", - ®isterOptions->registerRetries)); + ®isterOptions->registerRetries)); LOG_IF(FATAL, !conf->GetIntValue("mds.register_timeout", - ®isterOptions->registerTimeout)); + ®isterOptions->registerTimeout)); } -void ChunkServer::InitTrashOptions( - common::Configuration *conf, TrashOptions *trashOptions) { - LOG_IF(FATAL, !conf->GetStringValue( - "copyset.recycler_uri", &trashOptions->trashPath)); - LOG_IF(FATAL, !conf->GetIntValue( - "trash.expire_afterSec", &trashOptions->expiredAfterSec)); - LOG_IF(FATAL, !conf->GetIntValue( - "trash.scan_periodSec", &trashOptions->scanPeriodSec)); +void ChunkServer::InitTrashOptions(common::Configuration* conf, + TrashOptions* trashOptions) { + LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", + &trashOptions->trashPath)); + LOG_IF(FATAL, !conf->GetIntValue("trash.expire_afterSec", + &trashOptions->expiredAfterSec)); + LOG_IF(FATAL, !conf->GetIntValue("trash.scan_periodSec", + &trashOptions->scanPeriodSec)); } -void ChunkServer::InitMetricOptions( - common::Configuration *conf, ChunkServerMetricOptions *metricOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value( - "global.port", &metricOptions->port)); - LOG_IF(FATAL, !conf->GetStringValue( - "global.ip", &metricOptions->ip)); - LOG_IF(FATAL, !conf->GetBoolValue( - "metric.onoff", &metricOptions->collectMetric)); +void ChunkServer::InitMetricOptions(common::Configuration* conf, + ChunkServerMetricOptions* metricOptions) { + LOG_IF(FATAL, !conf->GetUInt32Value("global.port", &metricOptions->port)); + LOG_IF(FATAL, !conf->GetStringValue("global.ip", &metricOptions->ip)); + LOG_IF(FATAL, + !conf->GetBoolValue("metric.onoff", &metricOptions->collectMetric)); } -void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { - // 如果命令行有设置, 命令行覆盖配置文件中的字段 +void ChunkServer::LoadConfigFromCmdline(common::Configuration* conf) { + // If there are settings on the command line, the command line overwrites + // the fields in the configuration file google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("chunkServerIp", &info) && !info.is_default) { conf->SetStringValue("global.ip", FLAGS_chunkServerIp); } else { LOG(FATAL) - << "chunkServerIp must be set when run chunkserver in command."; + << "chunkServerIp must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("enableExternalServer", &info) && - !info.is_default) { - conf->SetBoolValue( - "global.enable_external_server", FLAGS_enableExternalServer); + !info.is_default) { + conf->SetBoolValue("global.enable_external_server", + FLAGS_enableExternalServer); } if (GetCommandLineFlagInfo("chunkServerExternalIp", &info) && - !info.is_default) { + !info.is_default) { conf->SetStringValue("global.external_ip", FLAGS_chunkServerExternalIp); } @@ -850,23 +843,23 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { conf->SetIntValue("global.port", FLAGS_chunkServerPort); } else { LOG(FATAL) - << "chunkServerPort must be set when run chunkserver in command."; + << "chunkServerPort must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("chunkServerStoreUri", &info) && !info.is_default) { conf->SetStringValue("chunkserver.stor_uri", FLAGS_chunkServerStoreUri); } else { - LOG(FATAL) - << "chunkServerStoreUri must be set when run chunkserver in command."; + LOG(FATAL) << "chunkServerStoreUri must be set when run chunkserver in " + "command."; } if (GetCommandLineFlagInfo("chunkServerMetaUri", &info) && !info.is_default) { conf->SetStringValue("chunkserver.meta_uri", FLAGS_chunkServerMetaUri); } else { - LOG(FATAL) - << "chunkServerMetaUri must be set when run chunkserver in command."; + LOG(FATAL) << "chunkServerMetaUri must be set when run chunkserver in " + "command."; } if (GetCommandLineFlagInfo("copySetUri", &info) && !info.is_default) { @@ -875,39 +868,33 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { conf->SetStringValue("copyset.raft_snapshot_uri", FLAGS_copySetUri); conf->SetStringValue("copyset.raft_meta_uri", FLAGS_copySetUri); } else { - LOG(FATAL) - << "copySetUri must be set when run chunkserver in command."; + LOG(FATAL) << "copySetUri must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("raftSnapshotUri", &info) && !info.is_default) { - conf->SetStringValue( - "copyset.raft_snapshot_uri", FLAGS_raftSnapshotUri); + conf->SetStringValue("copyset.raft_snapshot_uri", + FLAGS_raftSnapshotUri); } else { LOG(FATAL) - << "raftSnapshotUri must be set when run chunkserver in command."; + << "raftSnapshotUri must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("raftLogUri", &info) && !info.is_default) { - conf->SetStringValue( - "copyset.raft_log_uri", FLAGS_raftLogUri); + conf->SetStringValue("copyset.raft_log_uri", FLAGS_raftLogUri); } else { - LOG(FATAL) - << "raftLogUri must be set when run chunkserver in command."; + LOG(FATAL) << "raftLogUri must be set when run chunkserver in command."; } - if (GetCommandLineFlagInfo("recycleUri", &info) && - !info.is_default) { + if (GetCommandLineFlagInfo("recycleUri", &info) && !info.is_default) { conf->SetStringValue("copyset.recycler_uri", FLAGS_recycleUri); } else { - LOG(FATAL) - << "recycleUri must be set when run chunkserver in command."; + LOG(FATAL) << "recycleUri must be set when run chunkserver in command."; } - if (GetCommandLineFlagInfo("chunkFilePoolDir", &info) && - !info.is_default) { - conf->SetStringValue( - "chunkfilepool.chunk_file_pool_dir", FLAGS_chunkFilePoolDir); + if (GetCommandLineFlagInfo("chunkFilePoolDir", &info) && !info.is_default) { + conf->SetStringValue("chunkfilepool.chunk_file_pool_dir", + FLAGS_chunkFilePoolDir); } else { LOG(FATAL) - << "chunkFilePoolDir must be set when run chunkserver in command."; + << "chunkFilePoolDir must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("chunkFilePoolAllocatedPercent", &info)) { @@ -922,38 +909,37 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { if (GetCommandLineFlagInfo("chunkFilePoolMetaPath", &info) && !info.is_default) { - conf->SetStringValue( - "chunkfilepool.meta_path", FLAGS_chunkFilePoolMetaPath); + conf->SetStringValue("chunkfilepool.meta_path", + FLAGS_chunkFilePoolMetaPath); } else { - LOG(FATAL) - << "chunkFilePoolMetaPath must be set when run chunkserver in command."; + LOG(FATAL) << "chunkFilePoolMetaPath must be set when run chunkserver " + "in command."; } - if (GetCommandLineFlagInfo("walFilePoolDir", &info) && - !info.is_default) { - conf->SetStringValue( - "walfilepool.file_pool_dir", FLAGS_walFilePoolDir); + if (GetCommandLineFlagInfo("walFilePoolDir", &info) && !info.is_default) { + conf->SetStringValue("walfilepool.file_pool_dir", FLAGS_walFilePoolDir); } else { LOG(FATAL) - << "walFilePoolDir must be set when run chunkserver in command."; + << "walFilePoolDir must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("walFilePoolMetaPath", &info) && !info.is_default) { - conf->SetStringValue( - "walfilepool.meta_path", FLAGS_walFilePoolMetaPath); + conf->SetStringValue("walfilepool.meta_path", + FLAGS_walFilePoolMetaPath); } else { - LOG(FATAL) - << "walFilePoolMetaPath must be set when run chunkserver in command."; + LOG(FATAL) << "walFilePoolMetaPath must be set when run chunkserver in " + "command."; } if (GetCommandLineFlagInfo("mdsListenAddr", &info) && !info.is_default) { conf->SetStringValue("mds.listen.addr", FLAGS_mdsListenAddr); } - // 设置日志存放文件夹 + // Set log storage folder if (FLAGS_log_dir.empty()) { - if (!conf->GetStringValue("chunkserver.common.logDir", &FLAGS_log_dir)) { // NOLINT + if (!conf->GetStringValue("chunkserver.common.logDir", + &FLAGS_log_dir)) { // NOLINT LOG(WARNING) << "no chunkserver.common.logDir in " << FLAGS_conf << ", will log to /tmp"; } @@ -962,42 +948,40 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { if (GetCommandLineFlagInfo("enableChunkfilepool", &info) && !info.is_default) { conf->SetBoolValue("chunkfilepool.enable_get_chunk_from_pool", - FLAGS_enableChunkfilepool); + FLAGS_enableChunkfilepool); } if (GetCommandLineFlagInfo("enableWalfilepool", &info) && !info.is_default) { conf->SetBoolValue("walfilepool.enable_get_segment_from_pool", - FLAGS_enableWalfilepool); + FLAGS_enableWalfilepool); } if (GetCommandLineFlagInfo("copysetLoadConcurrency", &info) && !info.is_default) { conf->SetIntValue("copyset.load_concurrency", - FLAGS_copysetLoadConcurrency); + FLAGS_copysetLoadConcurrency); } } int ChunkServer::GetChunkServerMetaFromLocal( - const std::string &storeUri, - const std::string &metaUri, - const std::shared_ptr &fs, - ChunkServerMetadata *metadata) { + const std::string& storeUri, const std::string& metaUri, + const std::shared_ptr& fs, ChunkServerMetadata* metadata) { std::string proto = UriParser::GetProtocolFromUri(storeUri); if (proto != "local") { LOG(ERROR) << "Datastore protocal " << proto << " is not supported yet"; return -1; } - // 从配置文件中获取chunkserver元数据的文件路径 + // Obtain the file path for chunkserver metadata from the configuration file proto = UriParser::GetProtocolFromUri(metaUri); if (proto != "local") { - LOG(ERROR) << "Chunkserver meta protocal " - << proto << " is not supported yet"; + LOG(ERROR) << "Chunkserver meta protocal " << proto + << " is not supported yet"; return -1; } - // 元数据文件已经存在 + // The metadata file already exists if (fs->FileExists(UriParser::GetPathFromUri(metaUri).c_str())) { - // 获取文件内容 + // Get File Content if (ReadChunkServerMeta(fs, metaUri, metadata) != 0) { LOG(ERROR) << "Fail to read persisted chunkserver meta data"; return -1; @@ -1011,8 +995,9 @@ int ChunkServer::GetChunkServerMetaFromLocal( return -1; } -int ChunkServer::ReadChunkServerMeta(const std::shared_ptr &fs, - const std::string &metaUri, ChunkServerMetadata *metadata) { +int ChunkServer::ReadChunkServerMeta(const std::shared_ptr& fs, + const std::string& metaUri, + ChunkServerMetadata* metadata) { int fd; std::string metaFile = UriParser::GetPathFromUri(metaUri); @@ -1022,7 +1007,7 @@ int ChunkServer::ReadChunkServerMeta(const std::shared_ptr &fs, return -1; } - #define METAFILE_MAX_SIZE 4096 +#define METAFILE_MAX_SIZE 4096 int size; char json[METAFILE_MAX_SIZE] = {0}; diff --git a/src/chunkserver/chunkserver.h b/src/chunkserver/chunkserver.h index b9e9005545..6698281fec 100644 --- a/src/chunkserver/chunkserver.h +++ b/src/chunkserver/chunkserver.h @@ -23,18 +23,19 @@ #ifndef SRC_CHUNKSERVER_CHUNKSERVER_H_ #define SRC_CHUNKSERVER_CHUNKSERVER_H_ -#include #include -#include "src/common/configuration.h" +#include + +#include "src/chunkserver/chunkserver_metrics.h" +#include "src/chunkserver/clone_manager.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/heartbeat.h" -#include "src/chunkserver/scan_manager.h" -#include "src/chunkserver/clone_manager.h" #include "src/chunkserver/register.h" -#include "src/chunkserver/trash.h" -#include "src/chunkserver/chunkserver_metrics.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/scan_manager.h" #include "src/chunkserver/scan_service.h" +#include "src/chunkserver/trash.h" +#include "src/common/configuration.h" using ::curve::chunkserver::concurrent::ConcurrentApplyOption; @@ -43,81 +44,84 @@ namespace chunkserver { class ChunkServer { public: /** - * @brief 初始化Chunkserve各子模块 + * @brief Initialize Chunkserve sub modules * - * @param[in] argc 命令行参数总数 - * @param[in] argv 命令行参数列表 + * @param[in] argc Total number of command line arguments + * @param[in] argv command line argument list * - * @return 0表示成功,非0失败 + * @return 0 indicates success, non 0 indicates failure */ int Run(int argc, char** argv); /** - * @brief 停止chunkserver,结束各子模块 + * @brief: Stop chunkserver and end each sub module */ void Stop(); private: - void InitChunkFilePoolOptions(common::Configuration *conf, - FilePoolOptions *chunkFilePoolOptions); + void InitChunkFilePoolOptions(common::Configuration* conf, + FilePoolOptions* chunkFilePoolOptions); - void InitWalFilePoolOptions(common::Configuration *conf, - FilePoolOptions *walPoolOption); + void InitWalFilePoolOptions(common::Configuration* conf, + FilePoolOptions* walPoolOption); - void InitConcurrentApplyOptions(common::Configuration *conf, - ConcurrentApplyOption *concurrentApplyOption); + void InitConcurrentApplyOptions( + common::Configuration* conf, + ConcurrentApplyOption* concurrentApplyOption); - void InitCopysetNodeOptions(common::Configuration *conf, - CopysetNodeOptions *copysetNodeOptions); + void InitCopysetNodeOptions(common::Configuration* conf, + CopysetNodeOptions* copysetNodeOptions); - void InitCopyerOptions(common::Configuration *conf, - CopyerOptions *copyerOptions); + void InitCopyerOptions(common::Configuration* conf, + CopyerOptions* copyerOptions); - void InitCloneOptions(common::Configuration *conf, - CloneOptions *cloneOptions); + void InitCloneOptions(common::Configuration* conf, + CloneOptions* cloneOptions); - void InitScanOptions(common::Configuration *conf, - ScanManagerOptions *scanOptions); + void InitScanOptions(common::Configuration* conf, + ScanManagerOptions* scanOptions); - void InitHeartbeatOptions(common::Configuration *conf, - HeartbeatOptions *heartbeatOptions); + void InitHeartbeatOptions(common::Configuration* conf, + HeartbeatOptions* heartbeatOptions); - void InitRegisterOptions(common::Configuration *conf, - RegisterOptions *registerOptions); + void InitRegisterOptions(common::Configuration* conf, + RegisterOptions* registerOptions); - void InitTrashOptions(common::Configuration *conf, - TrashOptions *trashOptions); + void InitTrashOptions(common::Configuration* conf, + TrashOptions* trashOptions); - void InitMetricOptions(common::Configuration *conf, - ChunkServerMetricOptions *metricOptions); + void InitMetricOptions(common::Configuration* conf, + ChunkServerMetricOptions* metricOptions); - void LoadConfigFromCmdline(common::Configuration *conf); + void LoadConfigFromCmdline(common::Configuration* conf); - int GetChunkServerMetaFromLocal(const std::string &storeUri, - const std::string &metaUri, - const std::shared_ptr &fs, - ChunkServerMetadata *metadata); + int GetChunkServerMetaFromLocal(const std::string& storeUri, + const std::string& metaUri, + const std::shared_ptr& fs, + ChunkServerMetadata* metadata); - int ReadChunkServerMeta(const std::shared_ptr &fs, - const std::string &metaUri, ChunkServerMetadata *metadata); + int ReadChunkServerMeta(const std::shared_ptr& fs, + const std::string& metaUri, + ChunkServerMetadata* metadata); private: - // copysetNodeManager_ 管理chunkserver上所有copysetNode + // copysetNodeManager_ Manage all copysetNodes on the chunkserver CopysetNodeManager* copysetNodeManager_; - // cloneManager_ 管理克隆任务 + // cloneManager_ Manage Clone Tasks CloneManager cloneManager_; // scan copyset manager ScanManager scanManager_; - // heartbeat_ 负责向mds定期发送心跳,并下发心跳中任务 + // heartbeat_ Responsible for regularly sending heartbeat to MDS and issuing + // tasks in the heartbeat Heartbeat heartbeat_; - // trash_ 定期回收垃圾站中的物理空间 + // trash_ Regularly recycle physical space in the garbage bin std::shared_ptr trash_; - // install snapshot流控 + // install snapshot flow control scoped_refptr snapshotThrottle_; }; @@ -125,4 +129,3 @@ class ChunkServer { } // namespace curve #endif // SRC_CHUNKSERVER_CHUNKSERVER_H_ - diff --git a/src/chunkserver/chunkserver_helper.cpp b/src/chunkserver/chunkserver_helper.cpp index cf12df7f67..96afcf39e8 100644 --- a/src/chunkserver/chunkserver_helper.cpp +++ b/src/chunkserver/chunkserver_helper.cpp @@ -20,19 +20,20 @@ * Author: lixiaocui */ -#include -#include +#include "src/chunkserver/chunkserver_helper.h" + #include +#include +#include #include "src/common/crc32.h" -#include "src/chunkserver/chunkserver_helper.h" namespace curve { namespace chunkserver { const uint64_t DefaultMagic = 0x6225929368674118; bool ChunkServerMetaHelper::EncodeChunkServerMeta( - const ChunkServerMetadata &meta, std::string *out) { + const ChunkServerMetadata& meta, std::string* out) { if (!out->empty()) { LOG(ERROR) << "out string must empty!"; return false; @@ -50,8 +51,8 @@ bool ChunkServerMetaHelper::EncodeChunkServerMeta( return true; } -bool ChunkServerMetaHelper::DecodeChunkServerMeta( - const std::string &meta, ChunkServerMetadata *out) { +bool ChunkServerMetaHelper::DecodeChunkServerMeta(const std::string& meta, + ChunkServerMetadata* out) { std::string jsonStr(meta); std::string err; json2pb::Json2PbOptions opt; @@ -63,7 +64,7 @@ bool ChunkServerMetaHelper::DecodeChunkServerMeta( return false; } - // 验证meta是否正确 + // Verify if the meta is correct uint32_t crc = MetadataCrc(*out); if (crc != out->checksum()) { LOG(ERROR) << "ChunkServer persisted metadata CRC dismatch." @@ -75,8 +76,7 @@ bool ChunkServerMetaHelper::DecodeChunkServerMeta( return true; } -uint32_t ChunkServerMetaHelper::MetadataCrc( - const ChunkServerMetadata &meta) { +uint32_t ChunkServerMetaHelper::MetadataCrc(const ChunkServerMetadata& meta) { uint32_t crc = 0; uint32_t ver = meta.version(); uint32_t id = meta.id(); @@ -87,7 +87,7 @@ uint32_t ChunkServerMetaHelper::MetadataCrc( crc = curve::common::CRC32(crc, reinterpret_cast(&id), sizeof(id)); crc = curve::common::CRC32(crc, token, meta.token().size()); crc = curve::common::CRC32(crc, reinterpret_cast(&magic), - sizeof(magic)); + sizeof(magic)); return crc; } diff --git a/src/chunkserver/chunkserver_main.cpp b/src/chunkserver/chunkserver_main.cpp index 5bc4cb8736..4c13bf64d8 100644 --- a/src/chunkserver/chunkserver_main.cpp +++ b/src/chunkserver/chunkserver_main.cpp @@ -26,7 +26,7 @@ int main(int argc, char* argv[]) { butil::AtExitManager atExitManager; ::curve::chunkserver::ChunkServer chunkserver; LOG(INFO) << "ChunkServer starting."; - // 这里不能用fork创建守护进程,bvar会存在一些问题 + // You cannot use fork to create daemons here, as bvar may have some issues // https://github.com/apache/incubator-brpc/issues/697 // https://github.com/apache/incubator-brpc/issues/208 chunkserver.Run(argc, argv); diff --git a/src/chunkserver/chunkserver_metrics.cpp b/src/chunkserver/chunkserver_metrics.cpp index 339ecbbe66..f8a361d94e 100644 --- a/src/chunkserver/chunkserver_metrics.cpp +++ b/src/chunkserver/chunkserver_metrics.cpp @@ -21,8 +21,9 @@ */ #include "src/chunkserver/chunkserver_metrics.h" -#include + #include +#include #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/passive_getfn.h" @@ -31,13 +32,15 @@ namespace curve { namespace chunkserver { IOMetric::IOMetric() - : rps_(&reqNum_, 1), iops_(&ioNum_, 1), eps_(&errorNum_, 1), + : rps_(&reqNum_, 1), + iops_(&ioNum_, 1), + eps_(&errorNum_, 1), bps_(&ioBytes_, 1) {} IOMetric::~IOMetric() {} -int IOMetric::Init(const std::string &prefix) { - // 暴露所有的metric +int IOMetric::Init(const std::string& prefix) { + // Expose all metrics if (reqNum_.expose_as(prefix, "request_num") != 0) { LOG(ERROR) << "expose request num failed."; return -1; @@ -94,9 +97,8 @@ void IOMetric::OnResponse(size_t size, int64_t latUs, bool hasError) { } } - -int CSIOMetric::Init(const std::string &prefix) { - // 初始化io统计项metric +int CSIOMetric::Init(const std::string& prefix) { + // Initialize IO statistics item metric std::string readPrefix = prefix + "_read"; std::string writePrefix = prefix + "_write"; std::string recoverPrefix = prefix + "_recover"; @@ -161,30 +163,30 @@ void CSIOMetric::OnResponse(CSIOMetricType type, size_t size, int64_t latUs, IOMetricPtr CSIOMetric::GetIOMetric(CSIOMetricType type) { IOMetricPtr result = nullptr; switch (type) { - case CSIOMetricType::READ_CHUNK: - result = readMetric_; - break; - case CSIOMetricType::WRITE_CHUNK: - result = writeMetric_; - break; - case CSIOMetricType::RECOVER_CHUNK: - result = recoverMetric_; - break; - case CSIOMetricType::PASTE_CHUNK: - result = pasteMetric_; - break; - case CSIOMetricType::DOWNLOAD: - result = downloadMetric_; - break; - default: - result = nullptr; - break; + case CSIOMetricType::READ_CHUNK: + result = readMetric_; + break; + case CSIOMetricType::WRITE_CHUNK: + result = writeMetric_; + break; + case CSIOMetricType::RECOVER_CHUNK: + result = recoverMetric_; + break; + case CSIOMetricType::PASTE_CHUNK: + result = pasteMetric_; + break; + case CSIOMetricType::DOWNLOAD: + result = downloadMetric_; + break; + default: + result = nullptr; + break; } return result; } -int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int CSCopysetMetric::Init(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { logicPoolId_ = logicPoolId; copysetId_ = copysetId; int ret = ioMetrics_.Init(Prefix()); @@ -196,7 +198,7 @@ int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, return 0; } -void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { +void CSCopysetMetric::MonitorDataStore(CSDataStore* datastore) { std::string chunkCountPrefix = Prefix() + "_chunk_count"; std::string snapshotCountPrefix = Prefix() + "snapshot_count"; std::string cloneChunkCountPrefix = Prefix() + "_clonechunk_count"; @@ -209,30 +211,36 @@ void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { } void CSCopysetMetric::MonitorCurveSegmentLogStorage( - CurveSegmentLogStorage *logStorage) { + CurveSegmentLogStorage* logStorage) { std::string walSegmentCountPrefix = Prefix() + "_walsegment_count"; walSegmentCount_ = std::make_shared>( walSegmentCountPrefix, GetLogStorageWalSegmentCountFunc, logStorage); } ChunkServerMetric::ChunkServerMetric() - : hasInited_(false), leaderCount_(nullptr), chunkLeft_(nullptr), - walSegmentLeft_(nullptr), chunkTrashed_(nullptr), chunkCount_(nullptr), - walSegmentCount_(nullptr), snapshotCount_(nullptr), + : hasInited_(false), + leaderCount_(nullptr), + chunkLeft_(nullptr), + walSegmentLeft_(nullptr), + chunkTrashed_(nullptr), + chunkCount_(nullptr), + walSegmentCount_(nullptr), + snapshotCount_(nullptr), cloneChunkCount_(nullptr) {} -ChunkServerMetric *ChunkServerMetric::self_ = nullptr; +ChunkServerMetric* ChunkServerMetric::self_ = nullptr; -ChunkServerMetric *ChunkServerMetric::GetInstance() { - // chunkserver metric 在chunkserver启动时初始化创建 - // 因此创建的时候不会存在竞争,不需要锁保护 +ChunkServerMetric* ChunkServerMetric::GetInstance() { + // Chunkserver metric initializes creation when chunkserver starts + // Therefore, there will be no competition during creation and lock + // protection is not required if (self_ == nullptr) { self_ = new ChunkServerMetric; } return self_; } -int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { +int ChunkServerMetric::Init(const ChunkServerMetricOptions& option) { if (hasInited_) { LOG(WARNING) << "chunkserver metric has inited."; return 0; @@ -245,14 +253,14 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { return 0; } - // 初始化io统计项metric + // Initialize IO statistics item metric int ret = ioMetrics_.Init(Prefix()); if (ret < 0) { LOG(ERROR) << "Init chunkserver metric failed."; return -1; } - // 初始化资源统计 + // Initialize resource statistics std::string leaderCountPrefix = Prefix() + "_leader_count"; leaderCount_ = std::make_shared>(leaderCountPrefix); @@ -278,7 +286,7 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { } int ChunkServerMetric::Fini() { - // 释放资源,从而将暴露的metric从全局的map中移除 + // Release resources to remove exposed metrics from the global map ioMetrics_.Fini(); leaderCount_ = nullptr; chunkLeft_ = nullptr; @@ -293,8 +301,8 @@ int ChunkServerMetric::Fini() { return 0; } -int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { if (!option_.collectMetric) { return 0; } @@ -321,9 +329,8 @@ int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, return 0; } -CopysetMetricPtr -ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +CopysetMetricPtr ChunkServerMetric::GetCopysetMetric( + const LogicPoolID& logicPoolId, const CopysetID& copysetId) { if (!option_.collectMetric) { return nullptr; } @@ -332,18 +339,18 @@ ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, return copysetMetricMap_.Get(groupId); } -int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { GroupId groupId = ToGroupId(logicPoolId, copysetId); - // 这里先保存copyset metric,等remove后再去释放 - // 防止在读写锁里面去操作metric,导致死锁 + // Save the copyset metric here first, and then release it after removing it + // Prevent operating metrics within read write locks, resulting in deadlocks auto metric = copysetMetricMap_.Get(groupId); copysetMetricMap_.Remove(groupId); return 0; } -void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void ChunkServerMetric::OnRequest(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, CSIOMetricType type) { if (!option_.collectMetric) { return; @@ -356,8 +363,8 @@ void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, ioMetrics_.OnRequest(type); } -void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void ChunkServerMetric::OnResponse(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { if (!option_.collectMetric) { @@ -371,7 +378,7 @@ void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, ioMetrics_.OnResponse(type, size, latUs, hasError); } -void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { +void ChunkServerMetric::MonitorChunkFilePool(FilePool* chunkFilePool) { if (!option_.collectMetric) { return; } @@ -381,7 +388,7 @@ void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { chunkLeftPrefix, GetChunkLeftFunc, chunkFilePool); } -void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { +void ChunkServerMetric::MonitorWalFilePool(FilePool* walFilePool) { if (!option_.collectMetric) { return; } @@ -391,7 +398,7 @@ void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { walSegmentLeftPrefix, GetWalSegmentLeftFunc, walFilePool); } -void ChunkServerMetric::MonitorTrash(Trash *trash) { +void ChunkServerMetric::MonitorTrash(Trash* trash) { if (!option_.collectMetric) { return; } @@ -417,7 +424,7 @@ void ChunkServerMetric::DecreaseLeaderCount() { *leaderCount_ << -1; } -void ChunkServerMetric::ExposeConfigMetric(common::Configuration *conf) { +void ChunkServerMetric::ExposeConfigMetric(common::Configuration* conf) { if (!option_.collectMetric) { return; } diff --git a/src/chunkserver/chunkserver_metrics.h b/src/chunkserver/chunkserver_metrics.h index d4354d196f..c2fdb91823 100644 --- a/src/chunkserver/chunkserver_metrics.h +++ b/src/chunkserver/chunkserver_metrics.h @@ -23,18 +23,19 @@ #ifndef SRC_CHUNKSERVER_CHUNKSERVER_METRICS_H_ #define SRC_CHUNKSERVER_CHUNKSERVER_METRICS_H_ -#include #include +#include + +#include #include #include -#include #include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uncopyable.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/configuration.h" -#include "src/chunkserver/datastore/file_pool.h" +#include "src/common/uncopyable.h" using curve::common::Configuration; using curve::common::ReadLockGuard; @@ -54,57 +55,59 @@ class Trash; template using PassiveStatusPtr = std::shared_ptr>; -template using AdderPtr = std::shared_ptr>; +template +using AdderPtr = std::shared_ptr>; -// 使用LatencyRecorder的实现来统计读写请求的size情况 -// 可以统计分位值、最大值、中位数、平均值等情况 +// Using the implementation of LatencyRecorder to count the size of read and +// write requests Statistics can be conducted on quantile values, maximum +// values, median values, mean values, and other factors using IOSizeRecorder = bvar::LatencyRecorder; -// io 相关的统计项 +// IO related statistical items class IOMetric { public: IOMetric(); virtual ~IOMetric(); /** - * 初始化 io metric - * 主要用于曝光各metric指标 - * @param prefix: 用于bvar曝光时使用的前缀 - * @return 成功返回0,失败返回-1 + * Initialize io metric + * Mainly used for exposing various metric indicators + * @param prefix: The prefix used for bvar exposure + * @return returns 0 for success, -1 for failure */ - int Init(const std::string &prefix); + int Init(const std::string& prefix); /** - * IO请求到来时统计requestNum + * Count requestNum when IO requests arrive */ void OnRequest(); /** - * IO 完成以后,记录该次IO的指标 - * 错误的io不会计入iops和bps统计 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * After IO is completed, record the indicators for this IO + * Incorrect IO will not be included in iops and bps statistics + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(size_t size, int64_t latUs, bool hasError); public: - // io请求的数量 + // Number of IO requests bvar::Adder reqNum_; - // 成功io的数量 + // Number of successful IO bvar::Adder ioNum_; - // 失败的io个数 + // Number of failed IO bvar::Adder errorNum_; - // 所有io的数据量 + // The data volume of all IO bvar::Adder ioBytes_; - // io的延时情况(分位值、最大值、中位数、平均值) + // Delay situation of IO (quantile, maximum, median, average) bvar::LatencyRecorder latencyRecorder_; - // io大小的情况(分位值、最大值、中位数、平均值) + // The size of IO (quantile, maximum, median, average) IOSizeRecorder sizeRecorder_; - // 最近1秒请求的IO数量 + // Number of IO requests in the last 1 second bvar::PerSecond> rps_; - // 最近1秒的iops + // iops in the last 1 second bvar::PerSecond> iops_; - // 最近1秒的出错IO数量 + // Number of IO errors in the last 1 second bvar::PerSecond> eps_; - // 最近1秒的数据量 + // Data volume in the last 1 second bvar::PerSecond> bps_; }; using IOMetricPtr = std::shared_ptr; @@ -120,100 +123,109 @@ enum class CSIOMetricType { class CSIOMetric { public: CSIOMetric() - : readMetric_(nullptr), writeMetric_(nullptr), recoverMetric_(nullptr), - pasteMetric_(nullptr), downloadMetric_(nullptr) {} + : readMetric_(nullptr), + writeMetric_(nullptr), + recoverMetric_(nullptr), + pasteMetric_(nullptr), + downloadMetric_(nullptr) {} ~CSIOMetric() {} /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type); /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type); /** - * 初始化各项op的metric统计项 - * @return 成功返回0,失败返回-1 + * Initialize metric statistics for each op + * @return returns 0 for success, -1 for failure */ - int Init(const std::string &prefix); + int Init(const std::string& prefix); /** - * 释放各项op的metric资源 + * Release metric resources for various OPs */ void Fini(); protected: - // ReadChunk统计 + // ReadChunk statistics IOMetricPtr readMetric_; - // WriteChunk统计 + // WriteChunk statistics IOMetricPtr writeMetric_; - // RecoverChunk统计 + // RecoverChunk statistics IOMetricPtr recoverMetric_; - // PasteChunk信息 + // PasteChunk Information IOMetricPtr pasteMetric_; - // Download统计 + // Download statistics IOMetricPtr downloadMetric_; }; class CSCopysetMetric { public: CSCopysetMetric() - : logicPoolId_(0), copysetId_(0), chunkCount_(nullptr), - walSegmentCount_(nullptr), snapshotCount_(nullptr), + : logicPoolId_(0), + copysetId_(0), + chunkCount_(nullptr), + walSegmentCount_(nullptr), + snapshotCount_(nullptr), cloneChunkCount_(nullptr) {} ~CSCopysetMetric() {} /** - * 初始化copyset级别的metric统计项 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + * Initialize metric statistics at the copyset level + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ - int Init(const LogicPoolID &logicPoolId, const CopysetID ©setId); + int Init(const LogicPoolID& logicPoolId, const CopysetID& copysetId); /** - * 监控DataStore指标,主要包括chunk的数量、快照的数量等 - * @param datastore: 该copyset下的datastore指针 + * Monitor DataStore indicators, mainly including the number of chunks, + * number of snapshots, etc + * @param datastore: The datastore pointer under this copyset */ - void MonitorDataStore(CSDataStore *datastore); + void MonitorDataStore(CSDataStore* datastore); /** * @brief: Monitor log storage's metric, like the number of WAL segment file * @param logStorage: The pointer to CurveSegmentLogStorage */ - void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage *logStorage); + void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage* logStorage); /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type) { ioMetrics_.OnRequest(type); } /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { @@ -221,9 +233,10 @@ class CSCopysetMetric { } /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); @@ -264,27 +277,27 @@ class CSCopysetMetric { } private: - // 逻辑池id + // Logical Pool ID LogicPoolID logicPoolId_; // copyset id CopysetID copysetId_; - // copyset上的 chunk 的数量 + // Number of chunks on copyset PassiveStatusPtr chunkCount_; // The total number of WAL segment in copyset PassiveStatusPtr walSegmentCount_; - // copyset上的 快照文件 的数量 + // Number of snapshot files on copyset PassiveStatusPtr snapshotCount_; - // copyset上的 clone chunk 的数量 + // The number of clone chunks on the copyset PassiveStatusPtr cloneChunkCount_; - // copyset上的IO类型的metric统计 + // Metric statistics of IO types on copyset CSIOMetric ioMetrics_; }; struct ChunkServerMetricOptions { bool collectMetric; - // chunkserver的ip + // Chunkserver IP std::string ip; - // chunkserver的端口号 + // The port number of chunkserver uint32_t port; ChunkServerMetricOptions() : collectMetric(false), ip("127.0.0.1"), port(8888) {} @@ -344,173 +357,175 @@ class CopysetMetricMap { } private: - // 保护复制组metric map的读写锁 + // Protect the read write lock of the replication group metric map RWLock rwLock_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key std::unordered_map map_; }; class ChunkServerMetric : public Uncopyable { public: - // 实现单例 - static ChunkServerMetric *GetInstance(); + // Implementation singleton + static ChunkServerMetric* GetInstance(); /** - * 初始化chunkserver统计项 - * @pa)ram option: 初始化配置项 - * @return 成功返回0,失败返回-1 + * Initialize chunkserver statistics + * @param option: Initialize configuration item + * @return returns 0 for success, -1 for failure */ - int Init(const ChunkServerMetricOptions &option); + int Init(const ChunkServerMetricOptions& option); /** - * 释放metric资源 - * @return 成功返回0,失败返回-1 + * Release metric resources + * @return returns 0 for success, -1 for failure */ int Fini(); /** - * 请求前记录metric - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 + * Record metric before request + * @param logicPoolId: The logical pool ID where this io operation is + * located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type */ - void OnRequest(const LogicPoolID &logicPoolId, const CopysetID ©setId, + void OnRequest(const LogicPoolID& logicPoolId, const CopysetID& copysetId, CSIOMetricType type); /** - * 请求结束时记录该次IO指标 - * 错误的io不会计入iops和bps统计 - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record the IO metric at the end of the request + * Incorrect IO will not be included in iops and bps statistics + * @param logicPoolId: The logical pool ID where this io operation is + * located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ - void OnResponse(const LogicPoolID &logicPoolId, const CopysetID ©setId, + void OnResponse(const LogicPoolID& logicPoolId, const CopysetID& copysetId, CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 创建指定copyset的metric - * 如果collectMetric为false,返回0,但实际并不会创建 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1,如果指定metric已存在返回失败 + * Create a metric for the specified copyset + * If collectMetric is false, it returns 0, but it is not actually created + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure, or failure if the + * specified metric already exists */ - int CreateCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + int CreateCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 获取指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回指定的copyset metric,失败返回nullptr + * Obtain the metric of the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return successfully returns the specified copyset metric, while failure + * returns nullptr */ - CopysetMetricPtr GetCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + CopysetMetricPtr GetCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 删除指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + * Delete the metric for the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ - int RemoveCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + int RemoveCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 监视chunk分配池,主要监视池中chunk的数量 - * @param chunkFilePool: chunkfilePool的对象指针 + * Monitor the chunk allocation pool, mainly monitoring the number of chunks + * in the pool + * @param chunkFilePool: Object pointer to chunkfilePool */ - void MonitorChunkFilePool(FilePool *chunkFilePool); + void MonitorChunkFilePool(FilePool* chunkFilePool); /** - * 监视wal segment分配池,主要监视池中segment的数量 - * @param walFilePool: walfilePool的对象指针 + * Monitor the allocation pool of wall segments, mainly monitoring the + * number of segments in the pool + * @param walFilePool: Object pointer to walfilePool */ - void MonitorWalFilePool(FilePool *walFilePool); + void MonitorWalFilePool(FilePool* walFilePool); /** - * 监视回收站 - * @param trash: trash的对象指针 + * Monitor Recycle Bin + * @param trash: Object pointer to trash */ - void MonitorTrash(Trash *trash); + void MonitorTrash(Trash* trash); /** - * 增加 leader count 计数 + * Increase the leader count count */ void IncreaseLeaderCount(); /** - * 减少 leader count 计数 + * Reduce leader count count */ void DecreaseLeaderCount(); /** - * 更新配置项数据 - * @param conf: 配置内容 + * Update configuration item data + * @param conf: Configuration content */ - void ExposeConfigMetric(common::Configuration *conf); + void ExposeConfigMetric(common::Configuration* conf); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); } - CopysetMetricMap *GetCopysetMetricMap() { return ©setMetricMap_; } + CopysetMetricMap* GetCopysetMetricMap() { return ©setMetricMap_; } uint32_t GetCopysetCount() { return copysetMetricMap_.Size(); } uint32_t GetLeaderCount() const { - if (leaderCount_ == nullptr) - return 0; + if (leaderCount_ == nullptr) return 0; return leaderCount_->get_value(); } uint32_t GetTotalChunkCount() { - if (chunkCount_ == nullptr) - return 0; + if (chunkCount_ == nullptr) return 0; return chunkCount_->get_value(); } uint32_t GetTotalSnapshotCount() { - if (snapshotCount_ == nullptr) - return 0; + if (snapshotCount_ == nullptr) return 0; return snapshotCount_->get_value(); } uint32_t GetTotalCloneChunkCount() { - if (cloneChunkCount_ == nullptr) - return 0; + if (cloneChunkCount_ == nullptr) return 0; return cloneChunkCount_->get_value(); } uint32_t GetTotalWalSegmentCount() { - if (nullptr == walSegmentCount_) - return 0; + if (nullptr == walSegmentCount_) return 0; return walSegmentCount_->get_value(); } uint32_t GetChunkLeftCount() const { - if (chunkLeft_ == nullptr) - return 0; + if (chunkLeft_ == nullptr) return 0; return chunkLeft_->get_value(); } uint32_t GetWalSegmentLeftCount() const { - if (nullptr == walSegmentLeft_) - return 0; + if (nullptr == walSegmentLeft_) return 0; return walSegmentLeft_->get_value(); } uint32_t GetChunkTrashedCount() const { - if (chunkTrashed_ == nullptr) - return 0; + if (chunkTrashed_ == nullptr) return 0; return chunkTrashed_->get_value(); } @@ -522,32 +537,32 @@ class ChunkServerMetric : public Uncopyable { } private: - // 初始化标志 + // Initialization flag bool hasInited_; - // 配置项 + // Configuration Item ChunkServerMetricOptions option_; - // leader 的数量 + // Number of leaders AdderPtr leaderCount_; - // chunkfilepool 中剩余的 chunk 的数量 + // The number of remaining chunks in the chunkfilepool PassiveStatusPtr chunkLeft_; - // walfilepool 中剩余的 wal segment 的数量 + // The number of remaining wal segments in the walfilepool PassiveStatusPtr walSegmentLeft_; - // trash 中的 chunk 的数量 + // Number of chunks in trash PassiveStatusPtr chunkTrashed_; - // chunkserver上的 chunk 的数量 + // Number of chunks on chunkserver PassiveStatusPtr chunkCount_; // The total number of WAL segment in chunkserver PassiveStatusPtr walSegmentCount_; - // chunkserver上的 快照文件 的数量 + // Number of snapshot files on chunkserver PassiveStatusPtr snapshotCount_; - // chunkserver上的 clone chunk 的数量 + // Number of clone chunks on chunkserver PassiveStatusPtr cloneChunkCount_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key CopysetMetricMap copysetMetricMap_; - // chunkserver上的IO类型的metric统计 + // Metric statistics of IO types on chunkserver CSIOMetric ioMetrics_; - // 用于单例模式的自指指针 - static ChunkServerMetric *self_; + // Self pointing pointer for singleton mode + static ChunkServerMetric* self_; }; } // namespace chunkserver diff --git a/src/chunkserver/cli.h b/src/chunkserver/cli.h index 3c8ecc6997..ed048dc460 100644 --- a/src/chunkserver/cli.h +++ b/src/chunkserver/cli.h @@ -33,41 +33,37 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is + * convenient to use and avoids direct RPC operations */ -// 获取leader -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - PeerId *leaderId); +// Get the leader +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + PeerId* leaderId); -// 增加一个peer -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer_id, - const braft::cli::CliOptions &options); +// Add a peer +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const PeerId& peer_id, + const braft::cli::CliOptions& options); -// 移除一个peer -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer_id, - const braft::cli::CliOptions &options); +// Remove a peer +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const PeerId& peer_id, + const braft::cli::CliOptions& options); -// 转移leader -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer, - const braft::cli::CliOptions &options); +// Transfer leader +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const PeerId& peer, + const braft::cli::CliOptions& options); -// 触发快照 -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const PeerId &peer, - const braft::cli::CliOptions &options); +// Trigger snapshot +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const PeerId& peer, + const braft::cli::CliOptions& options); } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/cli2.cpp b/src/chunkserver/cli2.cpp index 5328724316..bf76d58934 100644 --- a/src/chunkserver/cli2.cpp +++ b/src/chunkserver/cli2.cpp @@ -22,10 +22,10 @@ #include "src/chunkserver/cli2.h" -#include -#include #include #include +#include +#include #include @@ -34,16 +34,14 @@ namespace curve { namespace chunkserver { -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - Peer *leader) { +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + Peer* leader) { if (conf.empty()) { return butil::Status(EINVAL, "Empty group configuration"); } - butil::Status st(-1, - "Fail to get leader of copyset node %s", + butil::Status st(-1, "Fail to get leader of copyset node %s", ToGroupIdString(logicPoolId, copysetId).c_str()); PeerId leaderId; Configuration::const_iterator iter = conf.begin(); @@ -53,7 +51,7 @@ butil::Status GetLeader(const LogicPoolID &logicPoolId, return butil::Status(-1, "Fail to init channel to %s", iter->to_string().c_str()); } - Peer *peer = new Peer(); + Peer* peer = new Peer(); CliService2_Stub stub(&channel); GetLeaderRequest2 request; GetLeaderResponse2 response; @@ -84,11 +82,9 @@ butil::Status GetLeader(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -101,10 +97,10 @@ butil::Status AddPeer(const LogicPoolID &logicPoolId, AddPeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *addPeer = new Peer(); + Peer* addPeer = new Peer(); request.set_allocated_addpeer(addPeer); *addPeer = peer; AddPeerResponse2 response; @@ -128,17 +124,15 @@ butil::Status AddPeer(const LogicPoolID &logicPoolId, new_conf.add_peer(peer); } LOG(INFO) << "Configuration of replication group ` " - << ToGroupIdString(logicPoolId, copysetId) - << " ' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << " ' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -151,10 +145,10 @@ butil::Status RemovePeer(const LogicPoolID &logicPoolId, RemovePeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *removePeer = new Peer(); + Peer* removePeer = new Peer(); request.set_allocated_removepeer(removePeer); *removePeer = peer; RemovePeerResponse2 response; @@ -179,17 +173,15 @@ butil::Status RemovePeer(const LogicPoolID &logicPoolId, new_conf.add_peer(peer); } LOG(INFO) << "Configuration of replication group ` " - << ToGroupIdString(logicPoolId, copysetId) - << " ' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << " ' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status ChangePeers(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Configuration &newPeers, - const braft::cli::CliOptions &options) { +butil::Status ChangePeers(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Configuration& newPeers, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -203,11 +195,11 @@ butil::Status ChangePeers(const LogicPoolID &logicPoolId, ChangePeersRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); *leaderPeer = leader; request.set_allocated_leader(leaderPeer); - for (Configuration::const_iterator - iter = newPeers.begin(); iter != newPeers.end(); ++iter) { + for (Configuration::const_iterator iter = newPeers.begin(); + iter != newPeers.end(); ++iter) { request.add_newpeers()->set_address(iter->to_string()); } ChangePeersResponse2 response; @@ -229,17 +221,15 @@ butil::Status ChangePeers(const LogicPoolID &logicPoolId, new_conf.add_peer(response.newpeers(i).address()); } LOG(INFO) << "Configuration of replication group `" - << ToGroupIdString(logicPoolId, copysetId) - << "' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << "' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const Peer& peer, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -256,10 +246,10 @@ butil::Status TransferLeader(const LogicPoolID &logicPoolId, TransferLeaderRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *transfereePeer = new Peer(); + Peer* transfereePeer = new Peer(); request.set_allocated_transferee(transfereePeer); *transfereePeer = peer; TransferLeaderResponse2 response; @@ -274,18 +264,24 @@ butil::Status TransferLeader(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -// reset peer不走一致性协议,直接将peers重置,因此存在一定的风险 -// 应用场景:大多数节点挂掉的极端情况。在这种情况下,该copyset将无法写入,直 -// 到半小时后mds将挂掉的副本上的copyset迁移,因此有一段时间不可用,为了应对这种场景,引入了 -// reset peer工具,直接将复制组成员reset成只包含存活的副本。 -// 注意事项: -// 1、reset peer之前,需要通过check-copyset工具确认复制组中的大多数副本确实挂掉 -// 2、reset peer的时候,要确保剩下的副本有最新的数据,不然存在丢数据的风险 -// 3、reset peer适用于其他两个副本不能恢复的情况,不然可能会扰乱集群 -butil::Status ResetPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration& newPeers, - const Peer& requestPeer, +// reset peer does not follow a consistency protocol and directly resets them, +// thus posing certain risks Application scenario: Extreme situation where most +// nodes fail. In this case, the copyset will not be able to be written directly +// After half an hour, MDS will migrate the copyset on the suspended replica, +// which will be unavailable for a period of time. To cope with this scenario, +// we have introduced The reset peer tool directly resets replication group +// members to only contain surviving replicas. +// Precautions: +// 1. Before resetting the peer, it is necessary to confirm through the +// check-copyset tool that most of the replicas in the replication group have +// indeed been suspended. +// 2. When resetting the peer, ensure that the remaining replicas have the +// latest data, otherwise there is a risk of data loss. +// 3. Reset peer is suitable for situations where the other two replicas cannot +// be restored, otherwise it may disrupt the cluster. +butil::Status ResetPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& newPeers, const Peer& requestPeer, const braft::cli::CliOptions& options) { if (newPeers.empty()) { return butil::Status(EINVAL, "new_conf is empty"); @@ -294,7 +290,7 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, brpc::Channel channel; if (channel.Init(requestPeerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - requestPeerId.to_string().c_str()); + requestPeerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); @@ -302,11 +298,11 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, ResetPeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *requestPeerPtr = new Peer(); + Peer* requestPeerPtr = new Peer(); *requestPeerPtr = requestPeer; request.set_allocated_requestpeer(requestPeerPtr); - for (Configuration::const_iterator - iter = newPeers.begin(); iter != newPeers.end(); ++iter) { + for (Configuration::const_iterator iter = newPeers.begin(); + iter != newPeers.end(); ++iter) { request.add_newpeers()->set_address(iter->to_string()); } ResetPeerResponse2 response; @@ -318,15 +314,14 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Peer& peer, +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Peer& peer, const braft::cli::CliOptions& options) { brpc::Channel channel; PeerId peerId(peer.address()); if (channel.Init(peerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - peerId.to_string().c_str()); + peerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); @@ -334,7 +329,7 @@ butil::Status Snapshot(const LogicPoolID &logicPoolId, SnapshotRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(peer); + Peer* peerPtr = new Peer(peer); request.set_allocated_peer(peerPtr); SnapshotResponse2 response; CliService2_Stub stub(&channel); @@ -351,7 +346,7 @@ butil::Status SnapshotAll(const Peer& peer, PeerId peerId(peer.address()); if (channel.Init(peerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - peerId.to_string().c_str()); + peerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); diff --git a/src/chunkserver/cli2.h b/src/chunkserver/cli2.h index ba60e057e7..512850b747 100644 --- a/src/chunkserver/cli2.h +++ b/src/chunkserver/cli2.h @@ -33,57 +33,50 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is + * convenient to use and avoids direct RPC operations */ -// 获取leader -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - Peer *leader); - -// 增加一个peer -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 移除一个peer -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 变更配置 -butil::Status ChangePeers(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Configuration &newPeers, - const braft::cli::CliOptions &options); - -// 转移leader -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 重置复制组 -butil::Status ResetPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration& newPeers, - const Peer& requestPeer, +// Get the leader +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + Peer* leader); + +// Add a peer +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, const braft::cli::CliOptions& options); + +// Remove a peer +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, + const braft::cli::CliOptions& options); + +// Change configuration +butil::Status ChangePeers(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Configuration& newPeers, + const braft::cli::CliOptions& options); + +// Transfer leader +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const Peer& peer, + const braft::cli::CliOptions& options); + +// Reset replication group +butil::Status ResetPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& newPeers, const Peer& requestPeer, const braft::cli::CliOptions& options); -// 触发快照 -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Peer& peer, +// Trigger snapshot +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Peer& peer, const braft::cli::CliOptions& options); -// 给chunkserver上全部copyset副本触发快照 +// Trigger a snapshot for all copyset replicas on the chunkserver butil::Status SnapshotAll(const Peer& peer, const braft::cli::CliOptions& options); diff --git a/src/chunkserver/clone_task.h b/src/chunkserver/clone_task.h index 48766bce9a..cd55f0b439 100644 --- a/src/chunkserver/clone_task.h +++ b/src/chunkserver/clone_task.h @@ -25,37 +25,33 @@ #include #include + #include #include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uncopyable.h" #include "src/chunkserver/clone_copyer.h" #include "src/chunkserver/clone_core.h" +#include "src/common/uncopyable.h" namespace curve { namespace chunkserver { using curve::common::Uncopyable; -class CloneTask : public Uncopyable - , public std::enable_shared_from_this{ +class CloneTask : public Uncopyable, + public std::enable_shared_from_this { public: CloneTask(std::shared_ptr request, std::shared_ptr core, ::google::protobuf::Closure* done) - : core_(core) - , readRequest_(request) - , done_(done) - , isComplete_(false) {} + : core_(core), readRequest_(request), done_(done), isComplete_(false) {} virtual ~CloneTask() {} virtual std::function Closure() { auto sharedThis = shared_from_this(); - return [sharedThis] () { - sharedThis->Run(); - }; + return [sharedThis]() { sharedThis->Run(); }; } virtual void Run() { @@ -65,18 +61,16 @@ class CloneTask : public Uncopyable isComplete_ = true; } - virtual bool IsComplete() { - return isComplete_; - } + virtual bool IsComplete() { return isComplete_; } protected: - // 克隆核心逻辑 + // Clone Core Logic std::shared_ptr core_; - // 此次任务相关信息 + // Information related to this task std::shared_ptr readRequest_; - // 任务结束后要执行的Closure + // Closure to be executed after the task is completed ::google::protobuf::Closure* done_; - // 任务是否结束 + // Is the task completed bool isComplete_; }; diff --git a/src/chunkserver/copyset_node.cpp b/src/chunkserver/copyset_node.cpp index a00f7aaf9a..8267c268d8 100755 --- a/src/chunkserver/copyset_node.cpp +++ b/src/chunkserver/copyset_node.cpp @@ -22,33 +22,34 @@ #include "src/chunkserver/copyset_node.h" -#include -#include -#include #include -#include #include -#include -#include +#include +#include +#include +#include + #include #include -#include -#include -#include #include #include +#include +#include +#include +#include +#include -#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/chunk_closure.h" -#include "src/chunkserver/op_request.h" -#include "src/common/concurrent/task_thread_pool.h" -#include "src/fs/fs_common.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/datastore/define.h" #include "src/chunkserver/datastore/datastore_file_helper.h" -#include "src/common/uri_parser.h" +#include "src/chunkserver/datastore/define.h" +#include "src/chunkserver/op_request.h" +#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" +#include "src/common/concurrent/task_thread_pool.h" #include "src/common/crc32.h" #include "src/common/fs_util.h" +#include "src/common/uri_parser.h" +#include "src/fs/fs_common.h" namespace braft { DECLARE_bool(raft_enable_leader_lease); @@ -59,37 +60,36 @@ namespace chunkserver { using curve::fs::FileSystemInfo; -const char *kCurveConfEpochFilename = "conf.epoch"; +const char* kCurveConfEpochFilename = "conf.epoch"; uint32_t CopysetNode::syncTriggerSeconds_ = 25; -std::shared_ptr> - CopysetNode::copysetSyncPool_ = nullptr; - -CopysetNode::CopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &initConf) : - logicPoolId_(logicPoolId), - copysetId_(copysetId), - conf_(initConf), - epoch_(0), - peerId_(), - nodeOptions_(), - raftNode_(nullptr), - chunkDataApath_(), - chunkDataRpath_(), - appliedIndex_(0), - leaderTerm_(-1), - configChange_(std::make_shared()), - lastSnapshotIndex_(0), - scaning_(false), - lastScanSec_(0), - enableOdsyncWhenOpenChunkFile_(false), - isSyncing_(false), - checkSyncingIntervalMs_(500) { -} +std::shared_ptr> CopysetNode::copysetSyncPool_ = + nullptr; + +CopysetNode::CopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& initConf) + : logicPoolId_(logicPoolId), + copysetId_(copysetId), + conf_(initConf), + epoch_(0), + peerId_(), + nodeOptions_(), + raftNode_(nullptr), + chunkDataApath_(), + chunkDataRpath_(), + appliedIndex_(0), + leaderTerm_(-1), + configChange_(std::make_shared()), + lastSnapshotIndex_(0), + scaning_(false), + lastScanSec_(0), + enableOdsyncWhenOpenChunkFile_(false), + isSyncing_(false), + checkSyncingIntervalMs_(500) {} CopysetNode::~CopysetNode() { - // 移除 copyset的metric + // Remove metric from copyset ChunkServerMetric::GetInstance()->RemoveCopysetMetric(logicPoolId_, copysetId_); metric_ = nullptr; @@ -98,17 +98,16 @@ CopysetNode::~CopysetNode() { delete nodeOptions_.snapshot_file_system_adaptor; nodeOptions_.snapshot_file_system_adaptor = nullptr; } - LOG(INFO) << "release copyset node: " - << GroupIdString(); + LOG(INFO) << "release copyset node: " << GroupIdString(); } -int CopysetNode::Init(const CopysetNodeOptions &options) { +int CopysetNode::Init(const CopysetNodeOptions& options) { std::string groupId = GroupId(); std::string protocol = curve::common::UriParser::ParseUri( options.chunkDataUri, ©setDirPath_); if (protocol.empty()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "not support chunk data uri's protocol" << " error chunkDataDir is: " << options.chunkDataUri << ". Copyset: " << GroupIdString(); @@ -135,12 +134,11 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { dsOptions.locationLimit = options.locationLimit; dsOptions.enableOdsyncWhenOpenChunkFile = options.enableOdsyncWhenOpenChunkFile; - dataStore_ = std::make_shared(options.localFileSystem, - options.chunkFilePool, - dsOptions); + dataStore_ = std::make_shared( + options.localFileSystem, options.chunkFilePool, dsOptions); CHECK(nullptr != dataStore_); if (false == dataStore_->Initialize()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "data store init failed. " << "Copyset: " << GroupIdString(); return -1; @@ -150,10 +148,10 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { syncThread_.Init(this); dataStore_->SetCacheCondPtr(syncThread_.cond_); dataStore_->SetCacheLimits(options.syncChunkLimit, - options.syncThreshold); + options.syncThreshold); LOG(INFO) << "init sync thread success limit = " - << options.syncChunkLimit << - "syncthreshold = " << options.syncThreshold; + << options.syncChunkLimit + << "syncthreshold = " << options.syncThreshold; } recyclerUri_ = options.recyclerUri; @@ -166,21 +164,21 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { // initialize raft node options corresponding to the copy set node InitRaftNodeOptions(options); - /* 初始化 peer id */ + /* Initialize peer id */ butil::ip_t ip; butil::str2ip(options.ip.c_str(), &ip); butil::EndPoint addr(ip, options.port); /** - * idx默认是零,在chunkserver不允许一个进程有同一个copyset的多副本, - * 这一点注意和不让braft区别开来 + * The default idx is zero, and chunkserver does not allow a process to have + * multiple copies of the same copyset, Pay attention to this point and not + * distinguish between braces */ peerId_ = PeerId(addr, 0); raftNode_ = std::make_shared(groupId, peerId_); concurrentapply_ = options.concurrentapply; - /* - * 初始化copyset性能metrics + * Initialize copyset performance metrics */ int ret = ChunkServerMetric::GetInstance()->CreateCopysetMetric( logicPoolId_, copysetId_); @@ -189,10 +187,11 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { << "Copyset: " << GroupIdString(); return -1; } - metric_ = ChunkServerMetric::GetInstance()->GetCopysetMetric( - logicPoolId_, copysetId_); + metric_ = ChunkServerMetric::GetInstance()->GetCopysetMetric(logicPoolId_, + copysetId_); if (metric_ != nullptr) { - // TODO(yyk) 后续考虑添加datastore层面的io metric + // TODO(yyk) will consider adding io metrics at the datastore level in + // the future metric_->MonitorDataStore(dataStore_.get()); } @@ -213,7 +212,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { } int CopysetNode::Run() { - // raft node的初始化实际上让起run起来 + // The initialization of the raft node actually starts running if (0 != raftNode_->init(nodeOptions_)) { LOG(ERROR) << "Fail to init raft node. " << "Copyset: " << GroupIdString(); @@ -237,19 +236,20 @@ void CopysetNode::Fini() { WaitSnapshotDone(); if (nullptr != raftNode_) { - // 关闭所有关于此raft node的服务 + // Close all services related to this raft node raftNode_->shutdown(nullptr); - // 等待所有的正在处理的task结束 + // Waiting for all tasks being processed to end raftNode_->join(); } if (nullptr != concurrentapply_) { - // 将未刷盘的数据落盘,如果不刷盘 - // 迁移copyset时,copyset移除后再去执行WriteChunk操作可能出错 + // Drop the data that has not been flushed onto the disk, if not flushed + // When migrating a copyset, removing the copyset before executing the + // WriteChunk operation may result in errors concurrentapply_->Flush(); } } -void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { +void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions& options) { auto groupId = GroupId(); nodeOptions_.initial_conf = conf_; nodeOptions_.election_timeout_ms = options.electionTimeoutMs; @@ -257,20 +257,19 @@ void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { nodeOptions_.node_owns_fsm = false; nodeOptions_.snapshot_interval_s = options.snapshotIntervalS; nodeOptions_.log_uri = options.logUri; - nodeOptions_.log_uri.append("/").append(groupId) - .append("/").append(RAFT_LOG_DIR); + nodeOptions_.log_uri.append("/").append(groupId).append("/").append( + RAFT_LOG_DIR); nodeOptions_.raft_meta_uri = options.raftMetaUri; - nodeOptions_.raft_meta_uri.append("/").append(groupId) - .append("/").append(RAFT_META_DIR); + nodeOptions_.raft_meta_uri.append("/").append(groupId).append("/").append( + RAFT_META_DIR); nodeOptions_.snapshot_uri = options.raftSnapshotUri; - nodeOptions_.snapshot_uri.append("/").append(groupId) - .append("/").append(RAFT_SNAP_DIR); + nodeOptions_.snapshot_uri.append("/").append(groupId).append("/").append( + RAFT_SNAP_DIR); nodeOptions_.usercode_in_pthread = options.usercodeInPthread; nodeOptions_.snapshot_throttle = options.snapshotThrottle; - CurveFilesystemAdaptor* cfa = - new CurveFilesystemAdaptor(options.chunkFilePool, - options.localFileSystem); + CurveFilesystemAdaptor* cfa = new CurveFilesystemAdaptor( + options.chunkFilePool, options.localFileSystem); std::vector filterList; std::string snapshotMeta(BRAFT_SNAPSHOT_META_FILE); filterList.push_back(kCurveConfEpochFilename); @@ -282,37 +281,42 @@ void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { new scoped_refptr(cfa); } -void CopysetNode::on_apply(::braft::Iterator &iter) { +void CopysetNode::on_apply(::braft::Iterator& iter) { for (; iter.valid(); iter.next()) { - // 放在bthread中异步执行,避免阻塞当前状态机的执行 + // Asynchronous execution in bthread to avoid blocking the execution of + // the current state machine braft::AsyncClosureGuard doneGuard(iter.done()); /** - * 获取向braft提交任务时候传递的ChunkClosure,里面包含了 - * Op的所有上下文 ChunkOpRequest + * Obtain the ChunkClosure passed when submitting tasks to Braft, which + * includes All Contextual ChunkOpRequest for Op */ - braft::Closure *closure = iter.done(); + braft::Closure* closure = iter.done(); if (nullptr != closure) { /** - * 1.closure不是null,那么说明当前节点正常,直接从内存中拿到Op - * context进行apply + * 1. If the closure is not null, it indicates that the current node + * is normal and Op is directly obtained from memory Apply in + * context */ - ChunkClosure - *chunkClosure = dynamic_cast(iter.done()); + ChunkClosure* chunkClosure = + dynamic_cast(iter.done()); CHECK(nullptr != chunkClosure) << "ChunkClosure dynamic cast failed"; std::shared_ptr& opRequest = chunkClosure->request_; - concurrentapply_->Push(opRequest->ChunkId(), ChunkOpRequest::Schedule(opRequest->OpType()), // NOLINT - &ChunkOpRequest::OnApply, opRequest, - iter.index(), doneGuard.release()); + concurrentapply_->Push( + opRequest->ChunkId(), + ChunkOpRequest::Schedule(opRequest->OpType()), // NOLINT + &ChunkOpRequest::OnApply, opRequest, iter.index(), + doneGuard.release()); } else { - // 获取log entry + // Obtain log entry butil::IOBuf log = iter.data(); /** - * 2.closure是null,有两种情况: - * 2.1. 节点重启,回放apply,这里会将Op log entry进行反序列化, - * 然后获取Op信息进行apply + * 2. If the closure is null, there are two situations: + * 2.1. Restart the node and replay the application. Here, the Op + * log entry will be deserialized, Then obtain Op information + * for application * 2.2. follower apply */ ChunkRequest request; @@ -320,9 +324,10 @@ void CopysetNode::on_apply(::braft::Iterator &iter) { auto opReq = ChunkOpRequest::Decode(log, &request, &data, iter.index(), GetLeaderId()); auto chunkId = request.chunkid(); - concurrentapply_->Push(chunkId, ChunkOpRequest::Schedule(request.optype()), // NOLINT - &ChunkOpRequest::OnApplyFromLog, opReq, - dataStore_, std::move(request), data); + concurrentapply_->Push( + chunkId, ChunkOpRequest::Schedule(request.optype()), // NOLINT + &ChunkOpRequest::OnApplyFromLog, opReq, dataStore_, + std::move(request), data); } } } @@ -331,11 +336,11 @@ void CopysetNode::on_shutdown() { LOG(INFO) << GroupIdString() << " is shutdown"; } -void CopysetNode::on_snapshot_save(::braft::SnapshotWriter *writer, - ::braft::Closure *done) { +void CopysetNode::on_snapshot_save(::braft::SnapshotWriter* writer, + ::braft::Closure* done) { snapshotFuture_ = - std::async(std::launch::async, - &CopysetNode::save_snapshot_background, this, writer, done); + std::async(std::launch::async, &CopysetNode::save_snapshot_background, + this, writer, done); } void CopysetNode::WaitSnapshotDone() { @@ -345,12 +350,12 @@ void CopysetNode::WaitSnapshotDone() { } } -void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, - ::braft::Closure *done) { +void CopysetNode::save_snapshot_background(::braft::SnapshotWriter* writer, + ::braft::Closure* done) { brpc::ClosureGuard doneGuard(done); /** - * 1.flush I/O to disk,确保数据都落盘 + * 1. flush I/O to disk to ensure that all data is dropped */ concurrentapply_->Flush(); @@ -359,37 +364,41 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 2.保存配置版本: conf.epoch,注意conf.epoch是存放在data目录下 + * 2. Save the configuration version: conf.epoch, please note that + * conf.epoch is stored in the data directory */ - std::string - filePathTemp = writer->get_path() + "/" + kCurveConfEpochFilename; + std::string filePathTemp = + writer->get_path() + "/" + kCurveConfEpochFilename; if (0 != SaveConfEpoch(filePathTemp)) { done->status().set_error(errno, "invalid: %s", strerror(errno)); LOG(ERROR) << "SaveConfEpoch failed. " - << "Copyset: " << GroupIdString() - << ", errno: " << errno << ", " + << "Copyset: " << GroupIdString() << ", errno: " << errno + << ", " << ", error message: " << strerror(errno); return; } /** - * 3.保存chunk文件名的列表到快照元数据文件中 + * 3. Save the list of chunk file names to the snapshot metadata file */ std::vector files; if (0 == fs_->List(chunkDataApath_, &files)) { for (const auto& fileName : files) { - // raft保存快照时,meta信息中不用保存快照文件列表 - // raft下载快照的时候,在下载完chunk以后,会单独获取snapshot列表 + // When saving a snapshot in the raft, there is no need to save the + // list of snapshot files in the meta information. + // When raft downloads a snapshot, after downloading the chunk, + // a separate snapshot list will be obtained. bool isSnapshot = DatastoreFileHelper::IsSnapshotFile(fileName); if (isSnapshot) { continue; } std::string chunkApath; - // 通过绝对路径,算出相对于快照目录的路径 + // Calculate the path relative to the snapshot directory through + // absolute path chunkApath.append(chunkDataApath_); chunkApath.append("/").append(fileName); - std::string filePath = curve::common::CalcRelativePath( - writer->get_path(), chunkApath); + std::string filePath = + curve::common::CalcRelativePath(writer->get_path(), chunkApath); writer->add_file(filePath); } } else { @@ -401,16 +410,16 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 4. 保存conf.epoch文件到快照元数据文件中 + * 4. Save the conf.epoch file to the snapshot metadata file */ - writer->add_file(kCurveConfEpochFilename); + writer->add_file(kCurveConfEpochFilename); } -int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { +int CopysetNode::on_snapshot_load(::braft::SnapshotReader* reader) { /** - * 1. 加载快照数据 + * 1. Loading snapshot data */ - // 打开的 snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 + // Open snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 std::string snapshotPath = reader->get_path(); // /mnt/sda/1-10001/raft_snapshot/snapshot_0043/data @@ -419,15 +428,21 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { snapshotChunkDataDir.append("/").append(chunkDataRpath_); LOG(INFO) << "load snapshot data path: " << snapshotChunkDataDir << ", Copyset: " << GroupIdString(); - // 如果数据目录不存在,那么说明 load snapshot 数据部分就不需要处理 + // If the data directory does not exist, then the load snapshot data section + // does not need to be processed if (fs_->DirExists(snapshotChunkDataDir)) { - // 加载快照数据前,要先清理copyset data目录下的文件 - // 否则可能导致快照加载以后存在一些残留的数据 - // 如果delete_file失败或者rename失败,当前node状态会置为ERROR - // 如果delete_file或者rename期间进程重启,copyset起来后会加载快照 - // 由于rename可以保证原子性,所以起来加载快照后,data目录一定能还原 - bool ret = nodeOptions_.snapshot_file_system_adaptor->get()-> - delete_file(chunkDataApath_, true); + // Before loading snapshot data, clean the files in the copyset data + // directory first Otherwise, it may result in some residual data after + // the snapshot is loaded. + // If delete_file or rename fails, the current node status will be set + // to ERROR. + // If delete_file or during the renamethe process restarts, and + // after copyset is set, the snapshot will be loaded Since rename + // ensures atomicity, after loading the snapshot, the data directory + // must be restored. + bool ret = + nodeOptions_.snapshot_file_system_adaptor->get()->delete_file( + chunkDataApath_, true); if (!ret) { LOG(ERROR) << "delete chunk data dir failed. " << "Copyset: " << GroupIdString() @@ -437,8 +452,8 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { LOG(INFO) << "delete chunk data dir success. " << "Copyset: " << GroupIdString() << ", path: " << chunkDataApath_; - ret = nodeOptions_.snapshot_file_system_adaptor->get()-> - rename(snapshotChunkDataDir, chunkDataApath_); + ret = nodeOptions_.snapshot_file_system_adaptor->get()->rename( + snapshotChunkDataDir, chunkDataApath_); if (!ret) { LOG(ERROR) << "rename snapshot data dir " << snapshotChunkDataDir << "to chunk data dir " << chunkDataApath_ << " failed. " @@ -449,13 +464,13 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { << "to chunk data dir " << chunkDataApath_ << " success. " << "Copyset: " << GroupIdString(); } else { - LOG(INFO) << "load snapshot data path: " - << snapshotChunkDataDir << " not exist. " + LOG(INFO) << "load snapshot data path: " << snapshotChunkDataDir + << " not exist. " << "Copyset: " << GroupIdString(); } /** - * 2. 加载配置版本文件 + * 2. Load Configuration Version File */ std::string filePath = reader->get_path() + "/" + kCurveConfEpochFilename; if (fs_->FileExists(filePath)) { @@ -468,20 +483,25 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 3.重新init data store,场景举例: + * 3. Reinitializing the data store, with examples: * - * (1) 例如一个add peer,之后立马read这个时候data store会返回chunk - * not exist,因为这个新增的peer在刚开始起来的时候,没有任何数据,这 - * 个时候data store init了,那么新增的peer在leader恢复了数据之后, - * data store并不感知; + * (1) For instance, when adding a new peer and immediately reading data, + * the data store may return "chunk not exist." This is because the newly + * added peer initially has no data, and when the data store is initialized, + * it is not aware of the data that the new peer receives after the leader + * recovers its data. * - * (2) peer通过install snapshot恢复了所有的数据是通过rename操作的, - * 如果某个file之前被data store打开了,那么rename能成功,但是老的 - * 文件只有等data store close老的文件才能删除,所以需要重新init data - * store,并且close的文件的fd,然后重新open新的文件,不然data store - * 会一直是操作的老的文件,而一旦data store close相应的fd一次之后, - * 后面的write的数据就会丢,除此之外,如果 datastore init没有重新open - * 文件,也将导致read不到恢复过来的数据,而是read到老的数据。 + * (2) When a peer recovers all of its data through an install snapshot + * operation, it is performed through a rename operation. If a file was + * previously open in the data store, the rename operation can succeed, but + * the old file can only be deleted after the data store closes it. + * Therefore, it is necessary to reinitialize the data store, close the + * file's file descriptor (fd), and then reopen the new file. Otherwise, the + * data store will continue to operate on the old file. Once the data store + * closes, the corresponding fd, any subsequent write operations will be + * lost. Additionally, if the datastore is not reinitialized and the new + * file is not reopened, it may result in reading the old data rather than + * the recovered data. */ if (!dataStore_->Initialize()) { LOG(ERROR) << "data store init failed in on snapshot load. " @@ -490,8 +510,9 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 4.如果snapshot中存 conf,那么加载初始化,保证不需要以来 - * on_configuration_committed。需要注意的是这里会忽略joint stage的日志。 + * 4. If conf is stored in the snapshot, load initialization to ensure that + * there is no need for on_configuration_committed. It should be noted + * that the log of the joint stage will be ignored here. */ braft::SnapshotMeta meta; reader->load_meta(&meta); @@ -510,7 +531,7 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { void CopysetNode::on_leader_start(int64_t term) { /* - * Invoke order in on_leader_start: + * Invoke order in on_leader_start: * 1. flush concurrent apply queue. * 2. set term in states machine. * @@ -536,7 +557,7 @@ void CopysetNode::on_leader_start(int64_t term) { << " become leader, term is: " << leaderTerm_; } -void CopysetNode::on_leader_stop(const butil::Status &status) { +void CopysetNode::on_leader_stop(const butil::Status& status) { (void)status; leaderTerm_.store(-1, std::memory_order_release); ChunkServerMetric::GetInstance()->DecreaseLeaderCount(); @@ -544,7 +565,7 @@ void CopysetNode::on_leader_stop(const butil::Status &status) { << ", peer id: " << peerId_.to_string() << " stepped down"; } -void CopysetNode::on_error(const ::braft::Error &e) { +void CopysetNode::on_error(const ::braft::Error& e) { LOG(FATAL) << "Copyset: " << GroupIdString() << ", peer id: " << peerId_.to_string() << " meet raft error: " << e; @@ -556,7 +577,7 @@ void CopysetNode::on_configuration_committed(const Configuration& conf, // Loading snapshot should not increase epoch. When loading // snapshot, the index is equal with lastSnapshotIndex_. LOG(INFO) << "index: " << index - << ", lastSnapshotIndex_: " << lastSnapshotIndex_; + << ", lastSnapshotIndex_: " << lastSnapshotIndex_; if (index != lastSnapshotIndex_) { std::unique_lock lock_guard(confLock_); conf_ = conf; @@ -569,63 +590,47 @@ void CopysetNode::on_configuration_committed(const Configuration& conf, << ", epoch: " << epoch_.load(std::memory_order_acquire); } -void CopysetNode::on_stop_following(const ::braft::LeaderChangeContext &ctx) { +void CopysetNode::on_stop_following(const ::braft::LeaderChangeContext& ctx) { LOG(INFO) << "Copyset: " << GroupIdString() - << ", peer id: " << peerId_.to_string() - << " stops following" << ctx; + << ", peer id: " << peerId_.to_string() << " stops following" + << ctx; } -void CopysetNode::on_start_following(const ::braft::LeaderChangeContext &ctx) { +void CopysetNode::on_start_following(const ::braft::LeaderChangeContext& ctx) { LOG(INFO) << "Copyset: " << GroupIdString() - << ", peer id: " << peerId_.to_string() - << "start following" << ctx; + << ", peer id: " << peerId_.to_string() << "start following" + << ctx; } -LogicPoolID CopysetNode::GetLogicPoolId() const { - return logicPoolId_; -} +LogicPoolID CopysetNode::GetLogicPoolId() const { return logicPoolId_; } -CopysetID CopysetNode::GetCopysetId() const { - return copysetId_; -} +CopysetID CopysetNode::GetCopysetId() const { return copysetId_; } -void CopysetNode::SetScan(bool scan) { - scaning_ = scan; -} +void CopysetNode::SetScan(bool scan) { scaning_ = scan; } -bool CopysetNode::GetScan() const { - return scaning_; -} +bool CopysetNode::GetScan() const { return scaning_; } -void CopysetNode::SetLastScan(uint64_t time) { - lastScanSec_ = time; -} +void CopysetNode::SetLastScan(uint64_t time) { lastScanSec_ = time; } -uint64_t CopysetNode::GetLastScan() const { - return lastScanSec_; -} +uint64_t CopysetNode::GetLastScan() const { return lastScanSec_; } std::vector& CopysetNode::GetFailedScanMap() { return failedScanMaps_; } -std::string CopysetNode::GetCopysetDir() const { - return copysetDirPath_; -} +std::string CopysetNode::GetCopysetDir() const { return copysetDirPath_; } uint64_t CopysetNode::GetConfEpoch() const { std::lock_guard lockguard(confLock_); return epoch_.load(std::memory_order_relaxed); } -int CopysetNode::LoadConfEpoch(const std::string &filePath) { +int CopysetNode::LoadConfEpoch(const std::string& filePath) { LogicPoolID loadLogicPoolID = 0; CopysetID loadCopysetID = 0; uint64_t loadEpoch = 0; - int ret = epochFile_->Load(filePath, - &loadLogicPoolID, - &loadCopysetID, + int ret = epochFile_->Load(filePath, &loadLogicPoolID, &loadCopysetID, &loadEpoch); if (0 == ret) { if (logicPoolId_ != loadLogicPoolID || copysetId_ != loadCopysetID) { @@ -643,7 +648,7 @@ int CopysetNode::LoadConfEpoch(const std::string &filePath) { return ret; } -int CopysetNode::SaveConfEpoch(const std::string &filePath) { +int CopysetNode::SaveConfEpoch(const std::string& filePath) { return epochFile_->Save(filePath, logicPoolId_, copysetId_, epoch_); } @@ -678,17 +683,17 @@ void CopysetNode::SetCopysetNode(std::shared_ptr node) { raftNode_ = node; } -void CopysetNode::SetSnapshotFileSystem(scoped_refptr *fs) { +void CopysetNode::SetSnapshotFileSystem(scoped_refptr* fs) { nodeOptions_.snapshot_file_system_adaptor = fs; } bool CopysetNode::IsLeaderTerm() const { - if (0 < leaderTerm_.load(std::memory_order_acquire)) - return true; + if (0 < leaderTerm_.load(std::memory_order_acquire)) return true; return false; } -bool CopysetNode::IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) const { // NOLINT +bool CopysetNode::IsLeaseLeader( + const braft::LeaderLeaseStatus& lease_status) const { // NOLINT /* * Why not use lease_status.state==LEASE_VALID directly to judge? * @@ -707,13 +712,12 @@ bool CopysetNode::IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) co return term > 0 && term == lease_status.term; } -bool CopysetNode::IsLeaseExpired(const braft::LeaderLeaseStatus &lease_status) const { // NOLINT +bool CopysetNode::IsLeaseExpired( + const braft::LeaderLeaseStatus& lease_status) const { // NOLINT return lease_status.state == braft::LEASE_EXPIRED; } -PeerId CopysetNode::GetLeaderId() const { - return raftNode_->leader_id(); -} +PeerId CopysetNode::GetLeaderId() const { return raftNode_->leader_id(); } butil::Status CopysetNode::TransferLeader(const Peer& peer) { butil::Status status; @@ -722,15 +726,15 @@ butil::Status CopysetNode::TransferLeader(const Peer& peer) { if (raftNode_->leader_id() == peerId) { butil::Status status = butil::Status::OK(); DVLOG(6) << "Skipped transferring leader to leader itself. " - << "peerid: " << peerId - << ", Copyset: " << GroupIdString(); + << "peerid: " << peerId << ", Copyset: " << GroupIdString(); return status; } int rc = raftNode_->transfer_leadership_to(peerId); if (rc != 0) { - status = butil::Status(rc, "Failed to transfer leader of copyset " + status = butil::Status(rc, + "Failed to transfer leader of copyset " "%s to peer %s, error: %s", GroupIdString().c_str(), peerId.to_string().c_str(), berror(rc)); @@ -741,9 +745,8 @@ butil::Status CopysetNode::TransferLeader(const Peer& peer) { transferee_ = peer; status = butil::Status::OK(); - LOG(INFO) << "Transferred leader of copyset " - << GroupIdString() - << " to peer " << peerId; + LOG(INFO) << "Transferred leader of copyset " << GroupIdString() + << " to peer " << peerId; return status; } @@ -761,14 +764,13 @@ butil::Status CopysetNode::AddPeer(const Peer& peer) { if (peer == peerId) { butil::Status status = butil::Status::OK(); DVLOG(6) << peerId << " is already a member of copyset " - << GroupIdString() - << ", skip adding peer"; + << GroupIdString() << ", skip adding peer"; return status; } } ConfigurationChangeDone* addPeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange(ConfigChangeType::ADD_PEER, peer); addPeerDone->expectedCfgChange = expectedCfgChange; raftNode_->add_peer(peerId, addPeerDone); @@ -797,13 +799,13 @@ butil::Status CopysetNode::RemovePeer(const Peer& peer) { if (!peerValid) { butil::Status status = butil::Status::OK(); - DVLOG(6) << peerId << " is not a member of copyset " - << GroupIdString() << ", skip removing"; + DVLOG(6) << peerId << " is not a member of copyset " << GroupIdString() + << ", skip removing"; return status; } ConfigurationChangeDone* removePeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange(ConfigChangeType::REMOVE_PEER, peer); removePeerDone->expectedCfgChange = expectedCfgChange; raftNode_->remove_peer(peerId, removePeerDone); @@ -831,7 +833,7 @@ butil::Status CopysetNode::ChangePeer(const std::vector& newPeers) { return st; } ConfigurationChangeDone* changePeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange; expectedCfgChange.type = ConfigChangeType::CHANGE_PEER; expectedCfgChange.alterPeer.set_address(adding.begin()->to_string()); @@ -845,18 +847,22 @@ butil::Status CopysetNode::ChangePeer(const std::vector& newPeers) { void CopysetNode::UpdateAppliedIndex(uint64_t index) { uint64_t curIndex = appliedIndex_.load(std::memory_order_acquire); - // 只更新比自己大的 index + // Only update indexes larger than oneself if (index > curIndex) { /** - * compare_exchange_strong解释: - * 首先比较curIndex是不是等于appliedIndex,如果是,那么说明没有人 - * 修改appliedindex,那么用index去修改appliedIndex,更新成功,完成; - * 如果不等于,说明有人更新了appliedindex,那么通过curIndex返回当前 - * 的appliedindex,并且返回false。整个过程都是原子的 + * Explanation of compare_exchange_strong: + * First, it compares whether curIndex is equal to appliedIndex. If it + * is equal, it means that no one has modified appliedindex. In this + * case, it tries to update appliedIndex with the value of index, and if + * the update is successful, it's done. If curIndex is not equal to + * appliedindex, it indicates that someone else has updated appliedIndex + * in the meantime. In this case, it returns the current value of + * appliedindex through curIndex and returns false. This entire process + * is atomic. */ - while (!appliedIndex_.compare_exchange_strong(curIndex, - index, - std::memory_order_acq_rel)) { //NOLINT + while (!appliedIndex_.compare_exchange_strong( + curIndex, index, + std::memory_order_acq_rel)) { // NOLINT if (index <= curIndex) { break; } @@ -876,27 +882,29 @@ CurveSegmentLogStorage* CopysetNode::GetLogStorage() const { return logStorage_; } -ConcurrentApplyModule *CopysetNode::GetConcurrentApplyModule() const { +ConcurrentApplyModule* CopysetNode::GetConcurrentApplyModule() const { return concurrentapply_; } -void CopysetNode::Propose(const braft::Task &task) { - raftNode_->apply(task); -} +void CopysetNode::Propose(const braft::Task& task) { raftNode_->apply(task); } -int CopysetNode::GetConfChange(ConfigChangeType *type, - Configuration *oldConf, - Peer *alterPeer) { +int CopysetNode::GetConfChange(ConfigChangeType* type, Configuration* oldConf, + Peer* alterPeer) { /** - * 避免new leader当选leader之后,提交noop entry之前,epoch和 - * 配置可能不一致的情况。考虑如下情形: + * To prevent inconsistencies between the epoch and configuration before + * a new leader is elected and a noop entry is committed, consider the + * following scenario: * - * 三个成员的复制组{ABC},当前epoch=5,A是leader,收到配置配置+D, - * 假设B收到了{ABC+D}的配置变更日志,然后leader A挂了,B当选为了 - * new leader,在B提交noop entry之前,B上查询到的epoch值最大可能为5, - * 而查询到的配置确实{ABCD}了,所以这里在new leader B在提交noop entry - * 之前,也就是实现隐公提交配置变更日志{ABC+D}之前,不允许向用户返回 - * 配置和配置变更信息,避免epoch和配置信息不一致 + * In a replication group with three members {ABC}, the current epoch is 5, + * and A is the leader. A receives a configuration change log that adds D, + * and assume that B also receives the configuration change log {ABC+D}. + * Then, leader A crashes, and B is elected as the new leader. Before B + * commits the noop entry, the maximum epoch value it can query on B is + * still 5, but the queried configuration is {ABCD}. Therefore, here, before + * the new leader B commits the noop entry, which is effectively committing + * the hidden configuration change log {ABC+D}, it does not allow returning + * the configuration and configuration change information to the user to + * avoid epoch and configuration information inconsistency. */ if (leaderTerm_.load(std::memory_order_acquire) <= 0) { *type = ConfigChangeType::NONE; @@ -922,9 +930,9 @@ uint64_t CopysetNode::LeaderTerm() const { return leaderTerm_.load(std::memory_order_acquire); } -int CopysetNode::GetHash(std::string *hash) { +int CopysetNode::GetHash(std::string* hash) { int ret = 0; - int fd = 0; + int fd = 0; int len = 0; uint32_t crc32c = 0; std::vector files; @@ -934,7 +942,8 @@ int CopysetNode::GetHash(std::string *hash) { return -1; } - // 计算所有chunk文件crc需要保证计算的顺序是一样的 + // Calculating all chunk files' crc requires ensuring that the order of + // calculations is the same std::sort(files.begin(), files.end()); for (std::string file : files) { @@ -953,7 +962,7 @@ int CopysetNode::GetHash(std::string *hash) { } len = fileInfo.st_size; - char *buff = new (std::nothrow) char[len]; + char* buff = new (std::nothrow) char[len]; if (nullptr == buff) { return -1; } @@ -974,15 +983,15 @@ int CopysetNode::GetHash(std::string *hash) { return 0; } -void CopysetNode::GetStatus(NodeStatus *status) { +void CopysetNode::GetStatus(NodeStatus* status) { raftNode_->get_status(status); } -void CopysetNode::GetLeaderLeaseStatus(braft::LeaderLeaseStatus *status) { +void CopysetNode::GetLeaderLeaseStatus(braft::LeaderLeaseStatus* status) { raftNode_->get_leader_lease_status(status); } -bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { +bool CopysetNode::GetLeaderStatus(NodeStatus* leaderStaus) { NodeStatus status; GetStatus(&status); if (status.leader_id.is_empty()) { @@ -997,16 +1006,15 @@ bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { brpc::Controller cntl; cntl.set_timeout_ms(500); brpc::Channel channel; - if (channel.Init(status.leader_id.addr, nullptr) !=0) { - LOG(WARNING) << "can not create channel to " - << status.leader_id.addr + if (channel.Init(status.leader_id.addr, nullptr) != 0) { + LOG(WARNING) << "can not create channel to " << status.leader_id.addr << ", copyset " << GroupIdString(); return false; } CopysetStatusRequest request; CopysetStatusResponse response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(status.leader_id.to_string()); request.set_logicpoolid(logicPoolId_); request.set_copysetid(copysetId_); @@ -1016,16 +1024,15 @@ bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { CopysetService_Stub stub(&channel); stub.GetCopysetStatus(&cntl, &request, &response, nullptr); if (cntl.Failed()) { - LOG(WARNING) << "get leader status failed: " - << cntl.ErrorText() + LOG(WARNING) << "get leader status failed: " << cntl.ErrorText() << ", copyset " << GroupIdString(); return false; } if (response.status() != COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { LOG(WARNING) << "get leader status failed" - << ", status: " << response.status() - << ", copyset " << GroupIdString(); + << ", status: " << response.status() << ", copyset " + << GroupIdString(); return false; } @@ -1078,9 +1085,8 @@ void CopysetNode::SyncAllChunks() { CSErrorCode r = dataStore_->SyncChunk(chunk); if (r != CSErrorCode::Success) { LOG(FATAL) << "Sync Chunk failed in Copyset: " - << GroupIdString() - << ", chunkid: " << chunk - << " data store return: " << r; + << GroupIdString() << ", chunkid: " << chunk + << " data store return: " << r; } }); } @@ -1093,11 +1099,11 @@ void SyncChunkThread::Init(CopysetNode* node) { } void SyncChunkThread::Run() { - syncThread_ = std::thread([this](){ + syncThread_ = std::thread([this]() { while (running_) { std::unique_lock lock(mtx_); - cond_->wait_for(lock, - std::chrono::seconds(CopysetNode::syncTriggerSeconds_)); + cond_->wait_for( + lock, std::chrono::seconds(CopysetNode::syncTriggerSeconds_)); node_->SyncAllChunks(); } }); @@ -1111,9 +1117,7 @@ void SyncChunkThread::Stop() { } } -SyncChunkThread::~SyncChunkThread() { - Stop(); -} +SyncChunkThread::~SyncChunkThread() { Stop(); } } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/trash.cpp b/src/chunkserver/trash.cpp index 2941261240..f4a8fc7965 100644 --- a/src/chunkserver/trash.cpp +++ b/src/chunkserver/trash.cpp @@ -20,21 +20,24 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/trash.h" + #include +#include + #include -#include "src/chunkserver/trash.h" -#include "src/common/string_util.h" -#include "src/chunkserver/datastore/filename_operator.h" -#include "src/chunkserver/copyset_node.h" + #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uri_parser.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/datastore/filename_operator.h" #include "src/chunkserver/raftlog/define.h" +#include "src/common/string_util.h" +#include "src/common/uri_parser.h" using ::curve::chunkserver::RAFT_DATA_DIR; +using ::curve::chunkserver::RAFT_LOG_DIR; using ::curve::chunkserver::RAFT_META_DIR; using ::curve::chunkserver::RAFT_SNAP_DIR; -using ::curve::chunkserver::RAFT_LOG_DIR; namespace curve { namespace chunkserver { @@ -60,13 +63,13 @@ int Trash::Init(TrashOptions options) { walPool_ = options.walPool; chunkNum_.store(0); - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; localFileSystem_->List(trashPath_, &files); - // 遍历trash下的文件 - for (auto &file : files) { - // 如果不是copyset目录,跳过 + // Traverse through files under trash + for (auto& file : files) { + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -100,8 +103,8 @@ int Trash::Fini() { return 0; } -int Trash::RecycleCopySet(const std::string &dirPath) { - // 回收站目录不存在,需要创建 +int Trash::RecycleCopySet(const std::string& dirPath) { + // The recycle bin directory does not exist and needs to be created if (!localFileSystem_->DirExists(trashPath_)) { LOG(INFO) << "Copyset recyler directory " << trashPath_ << " does not exist, creating it"; @@ -113,10 +116,11 @@ int Trash::RecycleCopySet(const std::string &dirPath) { } } - // 如果回收站已存在该目录,本次删除失败 - std::string dst = trashPath_ + "/" + - dirPath.substr(dirPath.find_last_of('/', dirPath.length()) + 1) + - '.' + std::to_string(std::time(nullptr)); + // If the directory already exists in the recycle bin, this deletion failed + std::string dst = + trashPath_ + "/" + + dirPath.substr(dirPath.find_last_of('/', dirPath.length()) + 1) + '.' + + std::to_string(std::time(nullptr)); if (localFileSystem_->DirExists(dst)) { LOG(WARNING) << "recycle error: " << dst << " already exist in " << trashPath_; @@ -137,28 +141,28 @@ int Trash::RecycleCopySet(const std::string &dirPath) { } void Trash::DeleteEligibleFileInTrashInterval() { - while (sleeper_.wait_for(std::chrono::seconds(scanPeriodSec_))) { - // 扫描回收站 - DeleteEligibleFileInTrash(); - } + while (sleeper_.wait_for(std::chrono::seconds(scanPeriodSec_))) { + // Scan Recycle Bin + DeleteEligibleFileInTrash(); + } } void Trash::DeleteEligibleFileInTrash() { - // trash目录暂不存在 + // The trash directory does not currently exist if (!localFileSystem_->DirExists(trashPath_)) { return; } - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; if (0 != localFileSystem_->List(trashPath_, &files)) { LOG(ERROR) << "Trash failed list files in " << trashPath_; return; } - // 遍历trash下的文件 - for (auto &file : files) { - // 如果不是copyset目录,跳过 + // Traverse through files under trash + for (auto& file : files) { + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -172,7 +176,7 @@ void Trash::DeleteEligibleFileInTrash() { continue; } - // 删除copyset目录 + // Delete copyset directory if (0 != localFileSystem_->Delete(copysetDir)) { LOG(ERROR) << "Trash fail to delete " << copysetDir; return; @@ -180,10 +184,10 @@ void Trash::DeleteEligibleFileInTrash() { } } -bool Trash::IsCopysetInTrash(const std::string &dirName) { - // 合法的copyset目录: 高32位PoolId(>0)组成, 低32位由copysetId(>0)组成 - // 目录是十进制形式 - // 例如:2860448220024 (poolId: 666, copysetId: 888) +bool Trash::IsCopysetInTrash(const std::string& dirName) { + // Legal copyset directory: composed of high 32-bit PoolId(>0), and low + // 32-bit composed of copysetId(>0) The directory is in decimal form For + // example: 2860448220024 (poolId: 666, copysetId: 888) uint64_t groupId; auto n = dirName.find("."); if (n == std::string::npos) { @@ -196,7 +200,7 @@ bool Trash::IsCopysetInTrash(const std::string &dirName) { return GetPoolID(groupId) >= 1 && GetCopysetID(groupId) >= 1; } -bool Trash::NeedDelete(const std::string ©setDir) { +bool Trash::NeedDelete(const std::string& copysetDir) { int fd = localFileSystem_->Open(copysetDir, O_RDONLY); if (0 > fd) { LOG(ERROR) << "Trash fail open " << copysetDir; @@ -219,15 +223,15 @@ bool Trash::NeedDelete(const std::string ©setDir) { return true; } -bool Trash::IsChunkOrSnapShotFile(const std::string &chunkName) { +bool Trash::IsChunkOrSnapShotFile(const std::string& chunkName) { return FileNameOperator::FileType::UNKNOWN != - FileNameOperator::ParseFileName(chunkName).type; + FileNameOperator::ParseFileName(chunkName).type; } -bool Trash::RecycleChunksAndWALInDir( - const std::string ©setPath, const std::string &filename) { +bool Trash::RecycleChunksAndWALInDir(const std::string& copysetPath, + const std::string& filename) { bool isDir = localFileSystem_->DirExists(copysetPath); - // 是文件看是否需要回收 + // It's a file, check if recycling is required. if (!isDir) { if (IsChunkOrSnapShotFile(filename)) { return RecycleChunkfile(copysetPath, filename); @@ -238,18 +242,18 @@ bool Trash::RecycleChunksAndWALInDir( } } - // 是目录,继续list + // It's a directory, continue with the list std::vector files; if (0 != localFileSystem_->List(copysetPath, &files)) { LOG(ERROR) << "Trash failed to list files in " << copysetPath; return false; } - // 遍历子文件 + // Traverse sub files bool ret = true; - for (auto &file : files) { + for (auto& file : files) { std::string filePath = copysetPath + "/" + file; - // recycle 失败不应该中断其他文件的recycle + // recycle, failure should not interrupt the recycle of other files if (!RecycleChunksAndWALInDir(filePath, file)) { ret = false; } @@ -257,13 +261,13 @@ bool Trash::RecycleChunksAndWALInDir( return ret; } -bool Trash::RecycleChunkfile( - const std::string &filepath, const std::string &filename) { +bool Trash::RecycleChunkfile(const std::string& filepath, + const std::string& filename) { (void)filename; LockGuard lg(mtx_); if (0 != chunkFilePool_->RecycleFile(filepath)) { LOG(ERROR) << "Trash failed recycle chunk " << filepath - << " to FilePool"; + << " to FilePool"; return false; } @@ -271,13 +275,12 @@ bool Trash::RecycleChunkfile( return true; } -bool Trash::RecycleWAL( - const std::string &filepath, const std::string &filename) { +bool Trash::RecycleWAL(const std::string& filepath, + const std::string& filename) { (void)filename; LockGuard lg(mtx_); if (walPool_ != nullptr && 0 != walPool_->RecycleFile(filepath)) { - LOG(ERROR) << "Trash failed recycle WAL " << filepath - << " to WALPool"; + LOG(ERROR) << "Trash failed recycle WAL " << filepath << " to WALPool"; return false; } @@ -285,12 +288,12 @@ bool Trash::RecycleWAL( return true; } -bool Trash::IsWALFile(const std::string &fileName) { +bool Trash::IsWALFile(const std::string& fileName) { int match = 0; int64_t first_index = 0; int64_t last_index = 0; - match = sscanf(fileName.c_str(), CURVE_SEGMENT_CLOSED_PATTERN, - &first_index, &last_index); + match = sscanf(fileName.c_str(), CURVE_SEGMENT_CLOSED_PATTERN, &first_index, + &last_index); if (match == 2) { LOG(INFO) << "recycle closed segment wal file, path: " << fileName << " first_index: " << first_index @@ -298,8 +301,7 @@ bool Trash::IsWALFile(const std::string &fileName) { return true; } - match = sscanf(fileName.c_str(), CURVE_SEGMENT_OPEN_PATTERN, - &first_index); + match = sscanf(fileName.c_str(), CURVE_SEGMENT_OPEN_PATTERN, &first_index); if (match == 1) { LOG(INFO) << "recycle open segment wal file, path: " << fileName << " first_index: " << first_index; @@ -308,7 +310,7 @@ bool Trash::IsWALFile(const std::string &fileName) { return false; } -uint32_t Trash::CountChunkNumInCopyset(const std::string ©setPath) { +uint32_t Trash::CountChunkNumInCopyset(const std::string& copysetPath) { std::vector files; if (0 != localFileSystem_->List(copysetPath, &files)) { LOG(ERROR) << "Trash failed to list files in " << copysetPath; @@ -317,15 +319,14 @@ uint32_t Trash::CountChunkNumInCopyset(const std::string ©setPath) { // Traverse subdirectories uint32_t chunkNum = 0; - for (auto &file : files) { + for (auto& file : files) { std::string filePath = copysetPath + "/" + file; bool isDir = localFileSystem_->DirExists(filePath); if (!isDir) { // valid: chunkfile, snapshotfile, walfile - if (!(IsChunkOrSnapShotFile(file) || - IsWALFile(file))) { - LOG(WARNING) << "Trash find a illegal file:" - << file << " in " << copysetPath; + if (!(IsChunkOrSnapShotFile(file) || IsWALFile(file))) { + LOG(WARNING) << "Trash find a illegal file:" << file << " in " + << copysetPath; continue; } ++chunkNum; diff --git a/src/chunkserver/trash.h b/src/chunkserver/trash.h index a3a3c89d53..c6791c52c0 100644 --- a/src/chunkserver/trash.h +++ b/src/chunkserver/trash.h @@ -25,25 +25,27 @@ #include #include -#include "src/fs/local_filesystem.h" + #include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/concurrent.h" #include "src/common/interruptible_sleeper.h" +#include "src/fs/local_filesystem.h" -using ::curve::common::Thread; using ::curve::common::Atomic; -using ::curve::common::Mutex; -using ::curve::common::LockGuard; using ::curve::common::InterruptibleSleeper; +using ::curve::common::LockGuard; +using ::curve::common::Mutex; +using ::curve::common::Thread; namespace curve { namespace chunkserver { -struct TrashOptions{ - // copyset的trash路径 +struct TrashOptions { + // The trash path of copyset std::string trashPath; - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 + // The file can be physically recycled after being placed in trash for + // expiredAfteSec seconds int expiredAfterSec; - // 扫描trash目录的时间间隔 + // Time interval for scanning the trash directory int scanPeriodSec; std::shared_ptr localFileSystem; @@ -60,18 +62,19 @@ class Trash { int Fini(); /* - * @brief DeleteEligibleFileInTrash 回收trash目录下的物理空间 - */ + * @brief DeleteEligibleFileInTrash recycles the physical space in the trash + * directory + */ void DeleteEligibleFileInTrash(); - int RecycleCopySet(const std::string &dirPath); + int RecycleCopySet(const std::string& dirPath); /* - * @brief 获取回收站中chunk的个数 - * - * @return chunk个数 - */ - uint32_t GetChunkNum() {return chunkNum_.load();} + * @brief Get the number of chunks in the recycle bin + * + * @return Number of chunks + */ + uint32_t GetChunkNum() { return chunkNum_.load(); } /** * @brief is WAL or not ? @@ -94,46 +97,49 @@ class Trash { private: /* - * @brief DeleteEligibleFileInTrashInterval 每隔一段时间进行trash物理空间回收 - */ + * @brief DeleteEligibleFileInTrashInterval Trash physical space recycling + * at regular intervals + */ void DeleteEligibleFileInTrashInterval(); /* - * @brief NeedDelete 文件是否需要删除,放入trash的时间大于 - * trash中expiredAfterSec可以删除 - * - * @param[in] copysetDir copyset的目录路径 - * - * @return true-可以被删除 - */ - bool NeedDelete(const std::string ©setDir); + * @brief NeedDelete Does the file need to be deleted, and the time it takes + * to place the trash is greater than ExpiredAfterSec in trash can be + * deleted + * + * @param[in] copysetDir: copyset directory path + * + * @return true-can be deleted + */ + bool NeedDelete(const std::string& copysetDir); /* - * @brief IsCopysetInTrash 是否为回收站中的copyset的目录 - * - * @param[in] dirName 文目录路径 - * - * @return true-符合copyset目录命名规则 - */ - bool IsCopysetInTrash(const std::string &dirName); + * @brief IsCopysetInTrash Is the directory of the copyset in the recycle + * bin + * + * @param[in] dirName: directory path + * + * @return true-Complies with copyset directory naming rules + */ + bool IsCopysetInTrash(const std::string& dirName); /* - * @brief Recycle Chunkfile and wal file in Copyset - * - * @param[in] copysetDir copyset dir - * @param[in] filename filename - */ - bool RecycleChunksAndWALInDir( - const std::string ©setDir, const std::string &filename); + * @brief Recycle Chunkfile and wal file in Copyset + * + * @param[in] copysetDir: copyset dir + * @param[in] filename: filename + */ + bool RecycleChunksAndWALInDir(const std::string& copysetDir, + const std::string& filename); /* - * @brief Recycle Chunkfile - * - * @param[in] filepath 文件路径 - * @param[in] filename 文件名 - */ - bool RecycleChunkfile( - const std::string &filepath, const std::string &filename); + * @brief Recycle Chunkfile + * + * @param[in] filepath: file path + * @param[in] filename: File name + */ + bool RecycleChunkfile(const std::string& filepath, + const std::string& filename); /** * @brief Recycle WAL @@ -147,41 +153,42 @@ class Trash { bool RecycleWAL(const std::string& filepath, const std::string& filename); /* - * @brief 统计copyset目录中的chunk个数 - * - * @param[in] copysetPath chunk所在目录 - * @return 返回chunk个数 - */ - uint32_t CountChunkNumInCopyset(const std::string ©setPath); + * @brief Counts the number of chunks in the copyset directory + * + * @param[in] copysetPath: Chunk directory + * @return the number of chunks + */ + uint32_t CountChunkNumInCopyset(const std::string& copysetPath); private: - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 + // The file can be physically recycled after being placed in trash for + // expiredAfterSec seconds int expiredAfterSec_; - // 扫描trash目录的时间间隔 + // Time interval for scanning the trash directory int scanPeriodSec_; - // 回收站中chunk的个数 + // Number of chunks in the Recycle Bin Atomic chunkNum_; Mutex mtx_; - // 本地文件系统 + // Local File System std::shared_ptr localFileSystem_; - // chunk池子 + // chunk Pool std::shared_ptr chunkFilePool_; // wal pool std::shared_ptr walPool_; - // 回收站全路径 + // Recycle Bin Full Path std::string trashPath_; - // 后台清理回收站的线程 + // Thread for background cleaning of the recycle bin Thread recycleThread_; - // false-开始后台任务,true-停止后台任务 + // false-Start background task, true-Stop background task Atomic isStop_; InterruptibleSleeper sleeper_; @@ -190,4 +197,3 @@ class Trash { } // namespace curve #endif // SRC_CHUNKSERVER_TRASH_H_ - diff --git a/src/client/chunk_closure.cpp b/src/client/chunk_closure.cpp index 592e9d2a06..00af4a73d5 100644 --- a/src/client/chunk_closure.cpp +++ b/src/client/chunk_closure.cpp @@ -22,48 +22,58 @@ #include "src/client/chunk_closure.h" -#include -#include #include +#include +#include #include "src/client/client_common.h" #include "src/client/copyset_client.h" +#include "src/client/io_tracker.h" #include "src/client/metacache.h" #include "src/client/request_closure.h" #include "src/client/request_context.h" #include "src/client/service_helper.h" -#include "src/client/io_tracker.h" -// TODO(tongguangxun) :优化重试逻辑,将重试逻辑与RPC返回逻辑拆开 +// TODO(tongguangxun): Optimize retry logic by separating the retry logic from +// the RPC return logic namespace curve { namespace client { -ClientClosure::BackoffParam ClientClosure::backoffParam_; -FailureRequestOption ClientClosure::failReqOpt_; +ClientClosure::BackoffParam ClientClosure::backoffParam_; +FailureRequestOption ClientClosure::failReqOpt_; void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { RequestClosure* reqDone = static_cast(done_); - // 如果对应的cooysetId leader可能发生变更 - // 那么设置这次重试请求超时时间为默认值 - // 这是为了尽快重试这次请求 - // 从copysetleader迁移到client GetLeader获取到新的leader会有1~2s的延迟 - // 对于一个请求来说,GetLeader仍然可能返回旧的Leader - // rpc timeout时间可能会被设置成2s/4s,等到超时后再去获取leader信息 - // 为了尽快在新的Leader上重试请求,将rpc timeout时间设置为默认值 + // If the leader of the corresponding copysetId may change, + // set the retry request timeout to the default value. + // This is done to retry this request as soon as possible. + // When migrating from the copyset leader to obtaining a new leader + // through client GetLeader, there may be a delay of 1~2 seconds. + // For a given request, GetLeader may still return the old Leader. + // The RPC timeout may be set to 2s/4s, and it will be only after + // the timeout that the leader information is retrieved again. + // To promptly retry the request on the new Leader, set the RPC timeout + // to the default value. if (cntlstatus == brpc::ERPCTIMEDOUT || cntlstatus == ETIMEDOUT) { uint64_t nextTimeout = 0; uint64_t retriedTimes = reqDone->GetRetriedTimes(); bool leaderMayChange = metaCache_->IsLeaderMayChange( chunkIdInfo_.lpid_, chunkIdInfo_.cpid_); - // 当某一个IO重试超过一定次数后,超时时间一定进行指数退避 - // 当底层chunkserver压力大时,可能也会触发unstable - // 由于copyset leader may change,会导致请求超时时间设置为默认值 - // 而chunkserver在这个时间内处理不了,导致IO hang - // 真正宕机的情况下,请求重试一定次数后会处理完成 - // 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 - if (retriedTimes < failReqOpt_.chunkserverMinRetryTimesForceTimeoutBackoff && // NOLINT + // When a certain IO retry exceeds a certain number of times, an + // exponential backoff must be performed during the timeout period When + // the underlying chunkserver is under high pressure, unstable may also + // be triggered Due to copyset leader may change, the request timeout + // time will be set to the default value And chunkserver cannot process + // it within this time, resulting in IO hang In the case of real + // downtime, the request will be processed after a certain number of + // retries If you keep trying again, it's not a downtime situation, and + // at this point, the timeout still needs to enter the exponential + // backoff logic + if (retriedTimes < + failReqOpt_ + .chunkserverMinRetryTimesForceTimeoutBackoff && // NOLINT leaderMayChange) { nextTimeout = failReqOpt_.chunkserverRPCTimeoutMS; } else { @@ -71,25 +81,23 @@ void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { } reqDone->SetNextTimeOutMS(nextTimeout); - LOG(WARNING) << "rpc timeout, next timeout = " << nextTimeout - << ", " << *reqCtx_ - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << "rpc timeout, next timeout = " << nextTimeout << ", " + << *reqCtx_ + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); return; } if (rpcstatus == CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD) { uint64_t nextsleeptime = OverLoadBackOff(reqDone->GetRetriedTimes()); LOG(WARNING) << "chunkserver overload, sleep(us) = " << nextsleeptime - << ", " << *reqCtx_ - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << ", " << *reqCtx_ + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); bthread_usleep(nextsleeptime); return; } @@ -103,19 +111,19 @@ void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { } } - LOG(WARNING) - << "Rpc failed " - << (retryDirectly_ ? "retry directly, " - : "sleep " + std::to_string(nextSleepUS) + " us, ") - << *reqCtx_ << ", cntl status = " << cntlstatus - << ", response status = " - << curve::chunkserver::CHUNK_OP_STATUS_Name( - static_cast(rpcstatus)) - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << "Rpc failed " + << (retryDirectly_ + ? "retry directly, " + : "sleep " + std::to_string(nextSleepUS) + " us, ") + << *reqCtx_ << ", cntl status = " << cntlstatus + << ", response status = " + << curve::chunkserver::CHUNK_OP_STATUS_Name( + static_cast( + rpcstatus)) + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (nextSleepUS != 0) { bthread_usleep(nextSleepUS); @@ -134,8 +142,11 @@ uint64_t ClientClosure::OverLoadBackOff(uint64_t currentRetryTimes) { random_time -= nextsleeptime / 10; nextsleeptime += random_time; - nextsleeptime = std::min(nextsleeptime, failReqOpt_.chunkserverMaxRetrySleepIntervalUS); // NOLINT - nextsleeptime = std::max(nextsleeptime, failReqOpt_.chunkserverOPRetryIntervalUS); // NOLINT + nextsleeptime = + std::min(nextsleeptime, + failReqOpt_.chunkserverMaxRetrySleepIntervalUS); // NOLINT + nextsleeptime = std::max( + nextsleeptime, failReqOpt_.chunkserverOPRetryIntervalUS); // NOLINT return nextsleeptime; } @@ -153,10 +164,11 @@ uint64_t ClientClosure::TimeoutBackOff(uint64_t currentRetryTimes) { return nextTimeout; } -// 统一请求回调函数入口 -// 整体处理逻辑与之前相同 -// 针对不同的请求类型和返回状态码,进行相应的处理 -// 各子类需要实现SendRetryRequest,进行重试请求 +// Unified entry point for request callback functions. +// The overall processing logic remains the same as before. +// Specific handling is performed based on different request types +// and response status codes. +// Subclasses need to implement SendRetryRequest for retrying requests. void ClientClosure::Run() { std::unique_ptr selfGuard(this); std::unique_ptr cntlGuard(cntl_); @@ -176,80 +188,81 @@ void ClientClosure::Run() { needRetry = true; OnRpcFailed(); } else { - // 只要rpc正常返回,就清空超时计数器 - metaCache_->GetUnstableHelper().ClearTimeout( - chunkserverID_, chunkserverEndPoint_); + // As long as RPC returns normally, clear the timeout counter + metaCache_->GetUnstableHelper().ClearTimeout(chunkserverID_, + chunkserverEndPoint_); status_ = GetResponseStatus(); switch (status_) { - // 1. 请求成功 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: - OnSuccess(); - break; - - // 2.1 不是leader - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED: - MetricHelper::IncremRedirectRPCCount(fileMetric_, reqCtx_->optype_); - needRetry = true; - OnRedirected(); - break; - - // 2.2 Copyset不存在,大概率都是配置变更了 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: - needRetry = true; - OnCopysetNotExist(); - break; - - // 2.3 chunk not exist,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: - OnChunkNotExist(); - break; - - // 2.4 非法参数,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST: - OnInvalidRequest(); - break; + // 1. Request successful + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: + OnSuccess(); + break; + + // 2.1 is not a leader + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED: + MetricHelper::IncremRedirectRPCCount(fileMetric_, + reqCtx_->optype_); + needRetry = true; + OnRedirected(); + break; - // 2.5 返回backward - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD: - if (reqCtx_->optype_ == OpType::WRITE) { + // 2.2 Copyset does not exist, most likely due to configuration + // changes + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: needRetry = true; - OnBackward(); - } else { - LOG(ERROR) << OpTypeToString(reqCtx_->optype_) - << " return backward, " - << *reqCtx_ - << ", status=" << status_ + OnCopysetNotExist(); + break; + + // 2.3 Chunk not exist, return directly without retry + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: + OnChunkNotExist(); + break; + + // 2.4 Illegal parameter, returned directly without retry + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST: + OnInvalidRequest(); + break; + + // 2.5 Return to feedback + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD: + if (reqCtx_->optype_ == OpType::WRITE) { + needRetry = true; + OnBackward(); + } else { + LOG(ERROR) + << OpTypeToString(reqCtx_->optype_) + << " return backward, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); + } + break; + + // 2.6 Return Chunk Exist, directly return without retrying + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST: + OnChunkExist(); + break; + + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD: + OnEpochTooOld(); + break; + + default: + needRetry = true; + LOG(WARNING) + << OpTypeToString(reqCtx_->optype_) + << " failed for UNKNOWN reason, " << *reqCtx_ << ", status=" + << curve::chunkserver::CHUNK_OP_STATUS_Name( + static_cast(status_)) << ", retried times = " << reqDone_->GetRetriedTimes() << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " + << ", request id = " << reqCtx_->id_ << ", remote side = " << butil::endpoint2str(cntl_->remote_side()).c_str(); - } - break; - - // 2.6 返回chunk exist,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST: - OnChunkExist(); - break; - - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD: - OnEpochTooOld(); - break; - - default: - needRetry = true; - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " failed for UNKNOWN reason, " << *reqCtx_ - << ", status=" - << curve::chunkserver::CHUNK_OP_STATUS_Name( - static_cast(status_)) - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); } } @@ -264,22 +277,22 @@ void ClientClosure::OnRpcFailed() { status_ = cntl_->ErrorCode(); - // 如果连接失败,再等一定时间再重试 + // If the connection fails, wait for a certain amount of time before trying + // again if (cntlstatus_ == brpc::ERPCTIMEDOUT) { - // 如果RPC超时, 对应的chunkserver超时请求次数+1 + // If RPC times out, the corresponding number of chunkserver timeout + // requests+1 metaCache_->GetUnstableHelper().IncreTimeout(chunkserverID_); MetricHelper::IncremTimeOutRPCCount(fileMetric_, reqCtx_->optype_); } - LOG_EVERY_SECOND(WARNING) << OpTypeToString(reqCtx_->optype_) - << " failed, error code: " - << cntl_->ErrorCode() - << ", error: " << cntl_->ErrorText() - << ", " << *reqCtx_ + LOG_EVERY_SECOND(WARNING) + << OpTypeToString(reqCtx_->optype_) + << " failed, error code: " << cntl_->ErrorCode() + << ", error: " << cntl_->ErrorText() << ", " << *reqCtx_ << ", retried times = " << reqDone_->GetRetriedTimes() << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " + << ", request id = " << reqCtx_->id_ << ", remote side = " << butil::endpoint2str(cntl_->remote_side()).c_str(); ProcessUnstableState(); @@ -291,26 +304,27 @@ void ClientClosure::ProcessUnstableState() { chunkserverID_, chunkserverEndPoint_); switch (state) { - case UnstableState::ServerUnstable: { - std::string ip = butil::ip2str(chunkserverEndPoint_.ip).c_str(); - int ret = metaCache_->SetServerUnstable(ip); - if (ret != 0) { - LOG(WARNING) << "Set server(" << ip << ") unstable failed, " - << "now set chunkserver(" << chunkserverID_ << ") unstable"; + case UnstableState::ServerUnstable: { + std::string ip = butil::ip2str(chunkserverEndPoint_.ip).c_str(); + int ret = metaCache_->SetServerUnstable(ip); + if (ret != 0) { + LOG(WARNING) + << "Set server(" << ip << ") unstable failed, " + << "now set chunkserver(" << chunkserverID_ << ") unstable"; + metaCache_->SetChunkserverUnstable(chunkserverID_); + } + break; + } + case UnstableState::ChunkServerUnstable: { metaCache_->SetChunkserverUnstable(chunkserverID_); + break; } - break; - } - case UnstableState::ChunkServerUnstable: { - metaCache_->SetChunkserverUnstable(chunkserverID_); - break; - } - case UnstableState::NoUnstable: { - RefreshLeader(); - break; - } - default: - break; + case UnstableState::NoUnstable: { + RefreshLeader(); + break; + } + default: + break; } } @@ -319,64 +333,58 @@ void ClientClosure::OnSuccess() { auto duration = cntl_->latency_us(); MetricHelper::LatencyRecord(fileMetric_, duration, reqCtx_->optype_); - MetricHelper::IncremRPCQPSCount( - fileMetric_, reqCtx_->rawlength_, reqCtx_->optype_); + MetricHelper::IncremRPCQPSCount(fileMetric_, reqCtx_->rawlength_, + reqCtx_->optype_); } void ClientClosure::OnChunkNotExist() { reqDone_->SetFailed(status_); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " not exists, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " not exists, " + << *reqCtx_ << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); auto duration = cntl_->latency_us(); MetricHelper::LatencyRecord(fileMetric_, duration, reqCtx_->optype_); - MetricHelper::IncremRPCQPSCount( - fileMetric_, reqCtx_->rawlength_, reqCtx_->optype_); + MetricHelper::IncremRPCQPSCount(fileMetric_, reqCtx_->rawlength_, + reqCtx_->optype_); } void ClientClosure::OnChunkExist() { reqDone_->SetFailed(status_); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " exists, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " exists, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); } void ClientClosure::OnEpochTooOld() { reqDone_->SetFailed(status_); LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " epoch too old, reqCtx: " << *reqCtx_ - << ", status: " << status_ - << ", retried times: " << reqDone_->GetRetriedTimes() - << ", IO id: " << reqDone_->GetIOTracker()->GetID() - << ", request id: " << reqCtx_->id_ - << ", remote side: " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << " epoch too old, reqCtx: " << *reqCtx_ + << ", status: " << status_ + << ", retried times: " << reqDone_->GetRetriedTimes() + << ", IO id: " << reqDone_->GetIOTracker()->GetID() + << ", request id: " << reqCtx_->id_ << ", remote side: " + << butil::endpoint2str(cntl_->remote_side()).c_str(); } void ClientClosure::OnRedirected() { LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " redirected, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", redirect leader is " - << (response_->has_redirect() ? response_->redirect() : "empty") - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", redirect leader is " + << (response_->has_redirect() ? response_->redirect() + : "empty") + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (response_->has_redirect()) { int ret = UpdateLeaderWithRedirectInfo(response_->redirect()); @@ -390,13 +398,11 @@ void ClientClosure::OnRedirected() { void ClientClosure::OnCopysetNotExist() { LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " copyset not exists, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); RefreshLeader(); } @@ -443,23 +449,20 @@ void ClientClosure::RefreshLeader() { << ", IO id = " << reqDone_->GetIOTracker()->GetID() << ", request id = " << reqCtx_->id_; } else { - // 如果refresh leader获取到了新的leader信息 - // 则重试之前不进行睡眠 + // If refresh leader obtains new leader information, + // retry without sleeping before. retryDirectly_ = (leaderId != chunkserverID_); } } void ClientClosure::OnBackward() { const auto latestSn = metaCache_->GetLatestFileSn(); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " return BACKWARD, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " return BACKWARD, " + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); reqCtx_->seq_ = latestSn; } @@ -467,38 +470,26 @@ void ClientClosure::OnBackward() { void ClientClosure::OnInvalidRequest() { reqDone_->SetFailed(status_); LOG(ERROR) << OpTypeToString(reqCtx_->optype_) - << " failed for invalid format, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << " failed for invalid format, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); MetricHelper::IncremFailRPCCount(fileMetric_, reqCtx_->optype_); } void WriteChunkClosure::SendRetryRequest() { - client_->WriteChunk(reqCtx_->idinfo_, - reqCtx_->fileId_, - reqCtx_->epoch_, - reqCtx_->seq_, - reqCtx_->writeData_, - reqCtx_->offset_, - reqCtx_->rawlength_, - reqCtx_->sourceInfo_, - done_); + client_->WriteChunk(reqCtx_->idinfo_, reqCtx_->fileId_, reqCtx_->epoch_, + reqCtx_->seq_, reqCtx_->writeData_, reqCtx_->offset_, + reqCtx_->rawlength_, reqCtx_->sourceInfo_, done_); } -void WriteChunkClosure::OnSuccess() { - ClientClosure::OnSuccess(); -} +void WriteChunkClosure::OnSuccess() { ClientClosure::OnSuccess(); } void ReadChunkClosure::SendRetryRequest() { - client_->ReadChunk(reqCtx_->idinfo_, reqCtx_->seq_, - reqCtx_->offset_, - reqCtx_->rawlength_, - reqCtx_->sourceInfo_, - done_); + client_->ReadChunk(reqCtx_->idinfo_, reqCtx_->seq_, reqCtx_->offset_, + reqCtx_->rawlength_, reqCtx_->sourceInfo_, done_); } void ReadChunkClosure::OnSuccess() { @@ -516,9 +507,7 @@ void ReadChunkClosure::OnChunkNotExist() { void ReadChunkSnapClosure::SendRetryRequest() { client_->ReadChunkSnapshot(reqCtx_->idinfo_, reqCtx_->seq_, - reqCtx_->offset_, - reqCtx_->rawlength_, - done_); + reqCtx_->offset_, reqCtx_->rawlength_, done_); } void ReadChunkSnapClosure::OnSuccess() { @@ -528,10 +517,8 @@ void ReadChunkSnapClosure::OnSuccess() { } void DeleteChunkSnapClosure::SendRetryRequest() { - client_->DeleteChunkSnapshotOrCorrectSn( - reqCtx_->idinfo_, - reqCtx_->correctedSeq_, - done_); + client_->DeleteChunkSnapshotOrCorrectSn(reqCtx_->idinfo_, + reqCtx_->correctedSeq_, done_); } void GetChunkInfoClosure::SendRetryRequest() { @@ -548,17 +535,16 @@ void GetChunkInfoClosure::OnSuccess() { } void GetChunkInfoClosure::OnRedirected() { - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " redirected, " << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", redirect leader is " - << (chunkinforesponse_->has_redirect() ? chunkinforesponse_->redirect() - : "empty") - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " redirected, " + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", redirect leader is " + << (chunkinforesponse_->has_redirect() + ? chunkinforesponse_->redirect() + : "empty") + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (chunkinforesponse_->has_redirect()) { int ret = UpdateLeaderWithRedirectInfo(chunkinforesponse_->redirect()); @@ -571,19 +557,14 @@ void GetChunkInfoClosure::OnRedirected() { } void CreateCloneChunkClosure::SendRetryRequest() { - client_->CreateCloneChunk(reqCtx_->idinfo_, - reqCtx_->location_, - reqCtx_->seq_, - reqCtx_->correctedSeq_, - reqCtx_->chunksize_, - done_); + client_->CreateCloneChunk(reqCtx_->idinfo_, reqCtx_->location_, + reqCtx_->seq_, reqCtx_->correctedSeq_, + reqCtx_->chunksize_, done_); } void RecoverChunkClosure::SendRetryRequest() { - client_->RecoverChunk(reqCtx_->idinfo_, - reqCtx_->offset_, - reqCtx_->rawlength_, - done_); + client_->RecoverChunk(reqCtx_->idinfo_, reqCtx_->offset_, + reqCtx_->rawlength_, done_); } int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { @@ -601,7 +582,7 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { ret = metaCache_->UpdateLeader(lpId, cpId, leaderAddr.addr_); if (ret != 0) { LOG(WARNING) << "Update leader of copyset (" << lpId << ", " << cpId - << ") in metaCache fail"; + << ") in metaCache fail"; return -1; } @@ -609,7 +590,7 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { ret = metaCache_->GetLeader(lpId, cpId, &leaderId, &leaderEp); if (ret != 0) { LOG(INFO) << "Get leader of copyset (" << lpId << ", " << cpId - << ") from metaCache fail"; + << ") from metaCache fail"; return -1; } @@ -617,5 +598,5 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { return 0; } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/chunk_closure.h b/src/client/chunk_closure.h index f5d9acd220..eb7e42221a 100644 --- a/src/client/chunk_closure.h +++ b/src/client/chunk_closure.h @@ -23,15 +23,16 @@ #ifndef SRC_CLIENT_CHUNK_CLOSURE_H_ #define SRC_CLIENT_CHUNK_CLOSURE_H_ -#include #include #include +#include + #include #include #include "proto/chunk.pb.h" -#include "src/client/client_config.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" #include "src/client/client_metric.h" #include "src/client/request_closure.h" #include "src/common/math_util.h" @@ -42,15 +43,16 @@ namespace client { using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::ChunkResponse; using curve::chunkserver::GetChunkInfoResponse; -using ::google::protobuf::Message; using ::google::protobuf::Closure; +using ::google::protobuf::Message; class MetaCache; class CopysetClient; /** - * ClientClosure,负责保存Rpc上下文, - * 包含cntl和response已经重试次数 + * ClientClosure, responsible for maintaining the Rpc context, + * including the control (cntl) and response, as well as the + * retry count. */ class ClientClosure : public Closure { public: @@ -59,67 +61,59 @@ class ClientClosure : public Closure { virtual ~ClientClosure() = default; - void SetCntl(brpc::Controller* cntl) { - cntl_ = cntl; - } + void SetCntl(brpc::Controller* cntl) { cntl_ = cntl; } virtual void SetResponse(Message* response) { response_.reset(static_cast(response)); } - void SetChunkServerID(ChunkServerID csid) { - chunkserverID_ = csid; - } + void SetChunkServerID(ChunkServerID csid) { chunkserverID_ = csid; } - ChunkServerID GetChunkServerID() const { - return chunkserverID_; - } + ChunkServerID GetChunkServerID() const { return chunkserverID_; } void SetChunkServerEndPoint(const butil::EndPoint& endPoint) { chunkserverEndPoint_ = endPoint; } - EndPoint GetChunkServerEndPoint() const { - return chunkserverEndPoint_; - } + EndPoint GetChunkServerEndPoint() const { return chunkserverEndPoint_; } - // 统一Run函数入口 + // Unified Run Function Entry void Run() override; - // 重试请求 + // Retrying the request void OnRetry(); - // Rpc Failed 处理函数 + // Rpc Failed processing function void OnRpcFailed(); - // 返回成功 处理函数 + // Return successful processing function virtual void OnSuccess(); - // 返回重定向 处理函数 + // Return redirection processing function virtual void OnRedirected(); - // copyset不存在 + // copyset does not exist void OnCopysetNotExist(); - // 返回backward + // Return backward void OnBackward(); - // 返回chunk不存在 处理函数 + // Returning chunk with no processing function present virtual void OnChunkNotExist(); - // 返回chunk存在 处理函数 + // Return Chunk Existence Processing Function void OnChunkExist(); // handle epoch too old void OnEpochTooOld(); - // 非法参数 + // Illegal parameter void OnInvalidRequest(); - // 发送重试请求 + // Send retry request virtual void SendRetryRequest() = 0; - // 获取response返回的状态码 + // Obtain the status code returned by the response virtual CHUNK_OP_STATUS GetResponseStatus() const { return response_->status(); } @@ -132,45 +126,43 @@ class ClientClosure : public Closure { SetBackoffParam(); DVLOG(9) << "Client clousre conf info: " - << "chunkserverOPRetryIntervalUS = " - << failReqOpt_.chunkserverOPRetryIntervalUS - << ", chunkserverOPMaxRetry = " - << failReqOpt_.chunkserverOPMaxRetry; + << "chunkserverOPRetryIntervalUS = " + << failReqOpt_.chunkserverOPRetryIntervalUS + << ", chunkserverOPMaxRetry = " + << failReqOpt_.chunkserverOPMaxRetry; } - Closure* GetClosure() const { - return done_; - } + Closure* GetClosure() const { return done_; } - // 测试使用,设置closure - void SetClosure(Closure* done) { - done_ = done; - } + // Test usage, set closure + void SetClosure(Closure* done) { done_ = done; } - static FailureRequestOption GetFailOpt() { - return failReqOpt_; - } + static FailureRequestOption GetFailOpt() { return failReqOpt_; } /** - * 在重试之前根据返回值进行预处理 - * 场景1: rpc timeout,那么这时候会指数增加当前rpc的超时时间,然后直接进行重试 - * 场景2:底层OVERLOAD,那么需要在重试之前睡眠一段时间,睡眠时间根据重试次数指数增长 - * @param: rpcstatue为rpc返回值 - * @param: cntlstatus为本次rpc controller返回值 + * Preprocess based on the return value before retrying. + * Scenario 1: RPC timeout results in exponentially increasing + * the current RPC timeout and retrying immediately. + * Scenario 2: Underlying OVERLOAD condition requires sleeping + * for a period of time before retrying, where the sleep time + * exponentially increases based on the retry count. + * @param rpcstatus: Return value of the RPC. + * @param cntlstatus: Return value of the RPC controller for this instance. */ void PreProcessBeforeRetry(int rpcstatue, int cntlstatus); /** - * 底层chunkserver overload之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回当前的需要睡眠的时间 + * After underlying chunkserver overload, backoff is required + * based on the retry count. + * @param currentRetryTimes: The current number of retries. + * @return: Returns the current sleep time needed. */ static uint64_t OverLoadBackOff(uint64_t currentRetryTimes); /** - * rpc timeout之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回下一次RPC 超时时间 + * After RPC timeout, backoff is required based on the retry count. + * @param currentRetryTimes: The current number of retries. + * @return: Returns the next RPC timeout duration. */ static uint64_t TimeoutBackOff(uint64_t currentRetryTimes); @@ -207,32 +199,35 @@ class ClientClosure : public Closure { void RefreshLeader(); - static FailureRequestOption failReqOpt_; - - brpc::Controller* cntl_; - std::unique_ptr response_; - CopysetClient* client_; - Closure* done_; - // 这里保存chunkserverID,是为了区别当前这个rpc是发给哪个chunkserver的 - // 这样方便在rpc closure里直接找到,当前是哪个chunkserver返回的失败 - ChunkServerID chunkserverID_; - butil::EndPoint chunkserverEndPoint_; - - // 记录当前请求的相关信息 - MetaCache* metaCache_; - RequestClosure* reqDone_; - FileMetric* fileMetric_; - RequestContext* reqCtx_; - ChunkIDInfo chunkIdInfo_; - - // 发送重试请求前是否睡眠 + static FailureRequestOption failReqOpt_; + + brpc::Controller* cntl_; + std::unique_ptr response_; + CopysetClient* client_; + Closure* done_; + + // Saving the Chunkserver ID here is to distinguish which Chunkserver + // this RPC is sent to. This makes it convenient to identify, within + // the RPC closure, which Chunkserver returned a failure. + + ChunkServerID chunkserverID_; + butil::EndPoint chunkserverEndPoint_; + + // Record relevant information for the current request + MetaCache* metaCache_; + RequestClosure* reqDone_; + FileMetric* fileMetric_; + RequestContext* reqCtx_; + ChunkIDInfo chunkIdInfo_; + + // Whether to sleep before sending a retry request bool retryDirectly_ = false; - // response 状态码 - int status_; + // response status code + int status_; - // rpc 状态码 - int cntlstatus_; + // rpc status code + int cntlstatus_; }; class WriteChunkClosure : public ClientClosure { @@ -308,7 +303,7 @@ class RecoverChunkClosure : public ClientClosure { void SendRetryRequest() override; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CHUNK_CLOSURE_H_ diff --git a/src/client/client_common.h b/src/client/client_common.h index 8620f050d1..ab067e8114 100644 --- a/src/client/client_common.h +++ b/src/client/client_common.h @@ -28,8 +28,8 @@ #include #include -#include #include +#include #include "include/client/libcurve.h" #include "src/common/throttle.h" @@ -53,7 +53,7 @@ constexpr uint64_t KiB = 1024; constexpr uint64_t MiB = 1024 * KiB; constexpr uint64_t GiB = 1024 * MiB; -// 操作类型 +// Operation type enum class OpType { READ = 0, WRITE, @@ -67,7 +67,7 @@ enum class OpType { }; /** - * 与nameserver.proto中的FileStatus一一对应 + * Corresponds one-to-one with FileStatus in nameserver.proto */ enum class FileStatus { Created = 0, @@ -90,12 +90,10 @@ typedef struct ChunkIDInfo { ChunkIDInfo(ChunkID cid, LogicPoolID lpid, CopysetID cpid) : cid_(cid), cpid_(cpid), lpid_(lpid) {} - bool Valid() const { - return lpid_ > 0 && cpid_ > 0; - } + bool Valid() const { return lpid_ > 0 && cpid_ > 0; } } ChunkIDInfo_t; -// 保存每个chunk对应的版本信息 +// Save the version information corresponding to each chunk typedef struct ChunkInfoDetail { std::vector chunkSn; } ChunkInfoDetail_t; @@ -106,7 +104,8 @@ typedef struct LeaseSession { uint64_t createTime; } LeaseSession_t; -// 保存logicalpool中segment对应的copysetid信息 +// Save the copysetid information corresponding to +// the segment in the logicalpool typedef struct LogicalPoolCopysetIDInfo { LogicPoolID lpid; std::vector cpidVec; @@ -117,7 +116,7 @@ typedef struct LogicalPoolCopysetIDInfo { } } LogicalPoolCopysetIDInfo_t; -// 保存每个segment的基本信息 +// Save basic information for each segment typedef struct SegmentInfo { uint32_t segmentsize; uint32_t chunksize; @@ -147,9 +146,9 @@ typedef struct FInfo { uint64_t length; uint64_t ctime; uint64_t seqnum; - // userinfo是当前操作这个文件的用户信息 + // userinfo is the user information currently operating on this file UserInfo_t userinfo; - // owner是当前文件所属信息 + // owner is the information to which the current file belongs std::string owner; std::string filename; std::string fullPathName; @@ -162,7 +161,7 @@ typedef struct FInfo { uint64_t stripeCount; std::string poolset; - OpenFlags openflags; + OpenFlags openflags; common::ReadWriteThrottleParams throttleParams; FInfo() { @@ -187,10 +186,10 @@ typedef struct FileEpoch { } } FileEpoch_t; -// PeerAddr 代表一个copyset group里的一个chunkserver节点 -// 与braft中的PeerID对应 +// PeerAddr represents a chunkserver node in a copyset group +// Corresponds to PeerID in braft struct PeerAddr { - // 节点的地址信息 + // Address information of nodes EndPoint addr_; PeerAddr() = default; @@ -198,17 +197,17 @@ struct PeerAddr { bool IsEmpty() const { return (addr_.ip == butil::IP_ANY && addr_.port == 0) && - addr_.socket_file.empty(); + addr_.socket_file.empty(); } - // 重置当前地址信息 + // Reset current address information void Reset() { addr_.ip = butil::IP_ANY; addr_.port = 0; } - // 从字符串中将地址信息解析出来 - int Parse(const std::string &str) { + // Parse address information from a string + int Parse(const std::string& str) { int idx; char ip_str[64]; if (2 > sscanf(str.c_str(), "%[^:]%*[:]%d%*[:]%d", ip_str, &addr_.port, @@ -224,8 +223,9 @@ struct PeerAddr { return 0; } - // 将该节点地址信息转化为字符串形式 - // 在get leader调用中可以将该值直接传入request + // Convert the node address information into a string format. + // In the get leader call, this value can be directly passed + // into the request. std::string ToString() const { char str[128]; snprintf(str, sizeof(str), "%s:%d", butil::endpoint2str(addr_).c_str(), @@ -233,32 +233,32 @@ struct PeerAddr { return std::string(str); } - bool operator==(const PeerAddr &other) const { + bool operator==(const PeerAddr& other) const { return addr_ == other.addr_; } }; -inline const char *OpTypeToString(OpType optype) { +inline const char* OpTypeToString(OpType optype) { switch (optype) { - case OpType::READ: - return "Read"; - case OpType::WRITE: - return "Write"; - case OpType::READ_SNAP: - return "ReadSnapshot"; - case OpType::DELETE_SNAP: - return "DeleteSnapshot"; - case OpType::CREATE_CLONE: - return "CreateCloneChunk"; - case OpType::RECOVER_CHUNK: - return "RecoverChunk"; - case OpType::GET_CHUNK_INFO: - return "GetChunkInfo"; - case OpType::DISCARD: - return "Discard"; - case OpType::UNKNOWN: - default: - return "Unknown"; + case OpType::READ: + return "Read"; + case OpType::WRITE: + return "Write"; + case OpType::READ_SNAP: + return "ReadSnapshot"; + case OpType::DELETE_SNAP: + return "DeleteSnapshot"; + case OpType::CREATE_CLONE: + return "CreateCloneChunk"; + case OpType::RECOVER_CHUNK: + return "RecoverChunk"; + case OpType::GET_CHUNK_INFO: + return "GetChunkInfo"; + case OpType::DISCARD: + return "Discard"; + case OpType::UNKNOWN: + default: + return "Unknown"; } } @@ -279,16 +279,14 @@ class SnapCloneClosure : public google::protobuf::Closure { class ClientDummyServerInfo { public: - static ClientDummyServerInfo &GetInstance() { + static ClientDummyServerInfo& GetInstance() { static ClientDummyServerInfo clientInfo; return clientInfo; } - void SetIP(const std::string &ip) { localIP_ = ip; } + void SetIP(const std::string& ip) { localIP_ = ip; } - std::string GetIP() const { - return localIP_; - } + std::string GetIP() const { return localIP_; } void SetPort(uint32_t port) { localPort_ = port; } @@ -309,22 +307,22 @@ class ClientDummyServerInfo { inline void TrivialDeleter(void*) {} -inline const char *FileStatusToName(FileStatus status) { +inline const char* FileStatusToName(FileStatus status) { switch (status) { - case FileStatus::Created: - return "Created"; - case FileStatus::Deleting: - return "Deleting"; - case FileStatus::Cloning: - return "Cloning"; - case FileStatus::CloneMetaInstalled: - return "CloneMetaInstalled"; - case FileStatus::Cloned: - return "Cloned"; - case FileStatus::BeingCloned: - return "BeingCloned"; - default: - return "Unknown"; + case FileStatus::Created: + return "Created"; + case FileStatus::Deleting: + return "Deleting"; + case FileStatus::Cloning: + return "Cloning"; + case FileStatus::CloneMetaInstalled: + return "CloneMetaInstalled"; + case FileStatus::Cloned: + return "Cloned"; + case FileStatus::BeingCloned: + return "BeingCloned"; + default: + return "Unknown"; } } @@ -359,7 +357,7 @@ struct CreateFileContext { std::string poolset; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CLIENT_COMMON_H_ diff --git a/src/client/client_metric.h b/src/client/client_metric.h index 826b8b9b2d..a2b48f5a16 100644 --- a/src/client/client_metric.h +++ b/src/client/client_metric.h @@ -28,9 +28,9 @@ #include #include -#include "src/common/timeutility.h" #include "src/client/client_common.h" #include "src/common/string_util.h" +#include "src/common/timeutility.h" using curve::common::TimeUtility; @@ -48,11 +48,11 @@ struct SlowRequestMetric { : count(prefix, name + "_total") {} }; -// 秒级信息统计 +// Second-level information statistics struct PerSecondMetric { - // 当前persecond计数总数 + // Current total number of second counts bvar::Adder count; - // persecond真实数据,这个数据依赖于count + // persecond real data depends on the count bvar::PerSecond> value; PerSecondMetric(const std::string& prefix, const std::string& name) @@ -60,21 +60,21 @@ struct PerSecondMetric { value(prefix, name, &count, 1) {} }; -// 接口统计信息metric信息统计 +// Interface statistics information metric information statistics struct InterfaceMetric { - // 接口统计信息调用qps + // Call qps for interface statistics information PerSecondMetric qps; // error request persecond PerSecondMetric eps; // receive request persecond PerSecondMetric rps; - // 调用吞吐 + // Call throughput PerSecondMetric bps; - // 调用超时次数qps + // Call timeout count qps PerSecondMetric timeoutQps; - // 调用redirect次数qps + // Number of calls to redirect qps PerSecondMetric redirectQps; - // 调用latency + // Call latency bvar::LatencyRecorder latency; InterfaceMetric(const std::string& prefix, const std::string& name) @@ -102,33 +102,36 @@ struct DiscardMetric { bvar::Adder pending; }; -// 文件级别metric信息统计 +// File level metric information statistics struct FileMetric { const std::string prefix = "curve_client"; - // 当前metric归属于哪个文件 + // Which file does the current metric belong to std::string filename; - // 当前文件inflight io数量 + // Current file inflight io quantity bvar::Adder inflightRPCNum; - // 当前文件请求的最大请求字节数,这种统计方式可以很方便的看到最大值,分位值 + // The maximum number of request bytes for the current file request, which + // is a convenient statistical method to see the maximum and quantile values bvar::LatencyRecorder readSizeRecorder; bvar::LatencyRecorder writeSizeRecorder; bvar::LatencyRecorder discardSizeRecorder; - // libcurve最底层read rpc接口统计信息metric统计 + // Libcurve's lowest level read rpc interface statistics information metric + // statistics InterfaceMetric readRPC; - // libcurve最底层write rpc接口统计信息metric统计 + // Libcurve's lowest level write rpc interface statistics information metric + // statistics InterfaceMetric writeRPC; - // 用户读请求qps、eps、rps + // User Read Request QPS, EPS, RPS InterfaceMetric userRead; - // 用户写请求qps、eps、rps + // User write request QPS, EPS, RPS InterfaceMetric userWrite; // user's discard request InterfaceMetric userDiscard; - // get leader失败重试qps + // Get leader failed and retry qps PerSecondMetric getLeaderRetryQPS; // Number of slow requests @@ -153,52 +156,52 @@ struct FileMetric { discardMetric(prefix + filename) {} }; -// 用于全局mds接口统计信息调用信息统计 +// Used for global mds interface statistics, call information statistics struct MDSClientMetric { std::string prefix; - // mds的地址信息 + // Address information of mds std::string metaserverAddr; bvar::PassiveStatus metaserverAddress; - // openfile接口统计信息 + // Openfile interface statistics InterfaceMetric openFile; - // createFile接口统计信息 + // CreateFile interface statistics InterfaceMetric createFile; - // closeFile接口统计信息 + // CloseFile interface statistics InterfaceMetric closeFile; - // getFileInfo接口统计信息 + // GetFileInfo interface statistics InterfaceMetric getFile; - // RefreshSession接口统计信息 + // RefreshSession Interface Statistics InterfaceMetric refreshSession; - // GetServerList接口统计信息 + // GetServerList interface statistics InterfaceMetric getServerList; - // GetOrAllocateSegment接口统计信息 + // GetOrAllocateSegment interface statistics InterfaceMetric getOrAllocateSegment; - // DeAllocateSegment接口统计信息 + // DeAllocateSegment Interface Statistics InterfaceMetric deAllocateSegment; - // RenameFile接口统计信息 + // RenameFile Interface Statistics InterfaceMetric renameFile; - // Extend接口统计信息 + // Extend Interface Statistics InterfaceMetric extendFile; - // DeleteFile接口统计信息 + // DeleteFile interface statistics InterfaceMetric deleteFile; // RecoverFile interface metric InterfaceMetric recoverFile; - // changeowner接口统计信息 + // Changeowner Interface Statistics InterfaceMetric changeOwner; - // listdir接口统计信息 + // Listdir interface statistics InterfaceMetric listDir; - // register接口统计信息 + // Register Interface Statistics InterfaceMetric registerClient; - // GetChunkServerID接口统计 + // GetChunkServerID interface statistics InterfaceMetric getChunkServerId; - // ListChunkServerInServer接口统计 + // ListChunkServerInServer Interface Statistics InterfaceMetric listChunkserverInServer; // IncreaseEpoch InterfaceMetric increaseEpoch; - // 切换mds server总次数 + // Total number of switching MDS server bvar::Adder mdsServerChangeTimes; explicit MDSClientMetric(const std::string& prefix_ = "") @@ -245,8 +248,8 @@ struct LatencyGuard { class MetricHelper { public: /** - * 统计getleader重试次数 - * @param: fm为当前文件的metric指针 + * Count the number of retries for getleader + * @param: fm is the metric pointer of the current file */ static void IncremGetLeaderRetryTime(FileMetric* fm) { if (fm != nullptr) { @@ -255,13 +258,13 @@ class MetricHelper { } /** - * 统计用户当前读写请求次数,用于qps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of read and write requests from users for QPS + * calculation + * @param: fm: The metric pointer of the current file + * @param: length: The current request size + * @param: read: whether the current operation is a read or write operation */ - static void IncremUserQPSCount(FileMetric* fm, - uint64_t length, + static void IncremUserQPSCount(FileMetric* fm, uint64_t length, OpType type) { if (fm != nullptr) { switch (type) { @@ -286,9 +289,10 @@ class MetricHelper { } /** - * 统计用户当前读写请求失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of failed read/write requests by users for EPS + * calculation + * @param: fm: The metric pointer of the current file + * @param: read: whether the current operation is a read or write operation */ static void IncremUserEPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -308,13 +312,19 @@ class MetricHelper { } /** - * 统计用户当前接收到的读写请求次数,用于rps计算 - * rps: receive request persecond, 就是当前接口每秒接收到的请求数量 - * qps: query request persecond, 就是当前接口每秒处理的请求数量 - * eps: error request persecond, 就是当前接口每秒出错的请求数量 - * rps减去qps就是当前client端每秒钟等待的请求数量,这部分请求会持久占用当前一秒内的内存 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the number of read and write requests currently received by the + * user for RPS calculation. + * rps: receive request persecond, which is the + * number of requests received by the current interface per second. + * qps:query request persecond, which is the number of requests processed by + * the current interface per second. + * eps: error request persecond, which is the number of requests that make + * errors per second on the current interface. + * rps minus qps is the number of requests that the current client is + * waiting for per second, which will persistently occupy the current memory + * for one second. + * @param: fm: The metric pointer of the current file + * @param: read: whether the current operation is a read or write operation */ static void IncremUserRPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -334,9 +344,9 @@ class MetricHelper { } /** - * 统计当前rpc失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of RPC failures for EPS calculation + * @param: fm: The metric pointer of the current file + * @param: read: whether the current operation is a read or write operation */ static void IncremFailRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -354,9 +364,10 @@ class MetricHelper { } /** - * 统计用户当前读写请求超时次数,用于timeoutQps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Counts the number of times a user's current read/write request has timed + * out, used for timeoutQps calculation + * @param: fm: The metric pointer of the current file + * @param: read: whether the current operation is a read or write operation */ static void IncremTimeOutRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -374,9 +385,9 @@ class MetricHelper { } /** - * 统计请求被redirect的次数 - * @param fileMetric 当前文件的metric指针 - * @param opType 请求类型 + * Count the number of times requests have been redirected + * @param fileMetric: The metric pointer of the current file + * @param opType: request type */ static void IncremRedirectRPCCount(FileMetric* fileMetric, OpType opType) { if (fileMetric) { @@ -394,13 +405,13 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于qps及bps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + * Statistics of the number of requests and bandwidth for reading and + * writing RPC interfaces, used for QPS and bps calculations + * @param: fm: The metric pointer of the current file + * @param: length: The current request size + * @param: read: whether the current operation is a read or write operation */ - static void IncremRPCQPSCount(FileMetric* fm, - uint64_t length, + static void IncremRPCQPSCount(FileMetric* fm, uint64_t length, OpType type) { if (fm != nullptr) { switch (type) { @@ -419,13 +430,13 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于rps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + * Statistics of the number of requests and bandwidth for reading and writing + * RPC interfaces, used for RPS calculations + * @param: fm: The metric pointer of the current file + * @param: length: The current request size + * @param: read: whether the current operation is a read or write operation */ - static void IncremRPCRPSCount(FileMetric* fm, - OpType type) { + static void IncremRPCRPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { switch (type) { case OpType::READ: @@ -440,9 +451,7 @@ class MetricHelper { } } - static void LatencyRecord(FileMetric* fm, - uint64_t duration, - OpType type) { + static void LatencyRecord(FileMetric* fm, uint64_t duration, OpType type) { if (fm != nullptr) { switch (type) { case OpType::READ: @@ -457,8 +466,7 @@ class MetricHelper { } } - static void UserLatencyRecord(FileMetric* fm, - uint64_t duration, + static void UserLatencyRecord(FileMetric* fm, uint64_t duration, OpType type) { if (fm != nullptr) { switch (type) { @@ -502,7 +510,7 @@ class MetricHelper { } } }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CLIENT_METRIC_H_ diff --git a/src/client/splitor.h b/src/client/splitor.h index eaffa27a62..4dd95ddded 100644 --- a/src/client/splitor.h +++ b/src/client/splitor.h @@ -46,53 +46,51 @@ class Splitor { static void Init(const IOSplitOption& ioSplitOpt); /** - * 用户IO拆分成Chunk级别的IO - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: FileEpoch_t file epoch info + * Split user IO into Chunk level IO + * @param: iotracker: Big IO Context Information + * @param: metaCache: The cache information that needs to be used during + * the IO splitting process + * @param: targetlist: The storage list of small IO after the large IO is + * split + * @param: data: The data to be written + * @param: offset: The actual offset of IO issued by the user + * @param: length: Data length + * @param: mdsclient: Searches for information through mdsclient when + * searching for metaahe fails + * @param: fi: stores some basic information about the current IO, such as + * chunksize, etc + * @param: FileEpoch_t: file epoch information */ - static int IO2ChunkRequests(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - butil::IOBuf* data, - off_t offset, - size_t length, - MDSClient* mdsclient, - const FInfo_t* fi, - const FileEpoch_t* fEpoch); + static int IO2ChunkRequests(IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, size_t length, + MDSClient* mdsclient, const FInfo_t* fi, + const FileEpoch_t* fEpoch); /** - * 对单ChunkIO进行细粒度拆分 - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: cid是当前chunk的ID信息 - * @param: data是待写的数据 - * @param: offset是当前chunk内的偏移 - * @param: length数据长度 - * @param: seq是当前chunk的版本号 + * Fine grained splitting of single ChunkIO + * @param: iotracker: Big IO Context Information + * @param: metaCache: The cache information that needs to be used during + * the IO splitting process + * @param: targetlist: The storage list of small IO after the large IO is + * split + * @param: cid: The ID information of the current chunk + * @param: data: The data to be written + * @param: offset: The offset within the current chunk + * @param: length: Data length + * @param: seq: The version number of the current chunk */ - static int SingleChunkIO2ChunkRequests(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - const ChunkIDInfo& cid, - butil::IOBuf* data, - off_t offset, - size_t length, - uint64_t seq); + static int SingleChunkIO2ChunkRequests( + IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, const ChunkIDInfo& cid, + butil::IOBuf* data, off_t offset, size_t length, uint64_t seq); /** - * @brief 计算请求的location信息 - * @param ioTracker io上下文信息 - * @param metaCache 文件缓存信息 - * @param chunkIdx 当前chunk信息 - * @return source信息 + * @brief Calculates the location information of the request + * @param ioTracker: IO Context Information + * @param metaCache: File cache information + * @param chunkIdx: Current chunk information + * @return source information */ static RequestSourceInfo CalcRequestSourceInfo(IOTracker* ioTracker, MetaCache* metaCache, @@ -105,34 +103,33 @@ class Splitor { private: /** - * IO2ChunkRequests内部会调用这个函数,进行真正的拆分操作 - * @param: iotracker大IO上下文信息 - * @param: mc是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: chunkidx是当前chunk在vdisk中的索引值 + * IO2ChunkRequests will internally call this function for actual splitting + * operations + * @param: iotracker: Big IO Context Information + * @param: mc: The cache information that needs to be used during IO + * splitting process + * @param: targetlist: The storage list of small IO after the large IO is + * split + * @param: Data: The data to be written + * @param: offset: The actual offset of IO issued by the user + * @param: length: Data length + * @param: mdsclient: Searches for information through mdsclient when + * searching for metaahe fails + * @param: fi: Stores some basic information about the current IO, such as + * chunksize, etc + * @param: chunkidx: The index value of the current chunk in the vdisk */ - static bool AssignInternal(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - butil::IOBuf* data, - off_t offset, - uint64_t length, - MDSClient* mdsclient, - const FInfo_t* fi, - const FileEpoch_t* fEpoch, - ChunkIndex chunkidx); - - static bool GetOrAllocateSegment(bool allocateIfNotExist, - uint64_t offset, - MDSClient* mdsClient, - MetaCache* metaCache, + static bool AssignInternal(IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, + uint64_t length, MDSClient* mdsclient, + const FInfo_t* fi, const FileEpoch_t* fEpoch, + ChunkIndex chunkidx); + + static bool GetOrAllocateSegment(bool allocateIfNotExist, uint64_t offset, + MDSClient* mdsClient, MetaCache* metaCache, const FInfo* fileInfo, - const FileEpoch_t *fEpoch, + const FileEpoch_t* fEpoch, ChunkIndex chunkidx); static int SplitForNormal(IOTracker* iotracker, MetaCache* metaCache, @@ -149,14 +146,13 @@ class Splitor { static bool MarkDiscardBitmap(IOTracker* iotracker, FileSegment* fileSegment, - SegmentIndex segmentIndex, - uint64_t offset, + SegmentIndex segmentIndex, uint64_t offset, uint64_t len); private: - // IO拆分模块所使用的配置信息 + // Configuration information used for IO split modules static IOSplitOption iosplitopt_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_SPLITOR_H_ diff --git a/src/client/unstable_helper.cpp b/src/client/unstable_helper.cpp index 5cc99945fe..ae330b1294 100644 --- a/src/client/unstable_helper.cpp +++ b/src/client/unstable_helper.cpp @@ -24,13 +24,13 @@ namespace curve { namespace client { -UnstableState -UnstableHelper::GetCurrentUnstableState(ChunkServerID csId, - const butil::EndPoint &csEndPoint) { +UnstableState UnstableHelper::GetCurrentUnstableState( + ChunkServerID csId, const butil::EndPoint& csEndPoint) { std::string ip = butil::ip2str(csEndPoint.ip).c_str(); mtx_.lock(); - // 如果当前ip已经超过阈值,则直接返回chunkserver unstable + // If the current IP has exceeded the threshold, it will directly return + // chunkserver unstable uint32_t unstabled = serverUnstabledChunkservers_[ip].size(); if (unstabled >= option_.serverUnstableThreshold) { serverUnstabledChunkservers_[ip].emplace(csId); diff --git a/src/client/unstable_helper.h b/src/client/unstable_helper.h index 89cc22be8e..ba23343501 100644 --- a/src/client/unstable_helper.h +++ b/src/client/unstable_helper.h @@ -35,20 +35,17 @@ namespace curve { namespace client { -enum class UnstableState { - NoUnstable, - ChunkServerUnstable, - ServerUnstable -}; - -// 如果chunkserver宕机或者网络不可达, 发往对应chunkserver的rpc会超时 -// 返回之后, 回去refresh leader然后再去发送请求 -// 这种情况下不同copyset上的请求,总会先rpc timedout然后重新refresh leader -// 为了避免一次多余的rpc timedout -// 记录一下发往同一个chunkserver上超时请求的次数 -// 如果超过一定的阈值,会发送http请求检查chunkserver是否健康 -// 如果不健康,则通知所有leader在这台chunkserver上的copyset -// 主动去refresh leader,而不是根据缓存的leader信息直接发送rpc +enum class UnstableState { NoUnstable, ChunkServerUnstable, ServerUnstable }; + +// If the chunkserver goes down or the network is unreachable, the RPC sent to +// the corresponding chunkserver will time out. After returning, go back to the +// refresh leader and then send the request. In this case, requests on different +// copysets will always first RPC timeout and then refresh the leader again. To +// avoid a redundant RPC timeout, record the number of timeout requests sent to +// the same chunkserver. If the threshold is exceeded, an HTTP request will be +// sent to check if the chunkserver is healthy. If not healthy, notify all +// leaders of the copyset on this chunkserver. Actively refresh the leader +// instead of directly sending RPC based on cached leader information. class UnstableHelper { public: UnstableHelper() = default; @@ -56,9 +53,7 @@ class UnstableHelper { UnstableHelper(const UnstableHelper&) = delete; UnstableHelper& operator=(const UnstableHelper&) = delete; - void Init(const ChunkServerUnstableOption& opt) { - option_ = opt; - } + void Init(const ChunkServerUnstableOption& opt) { option_ = opt; } void IncreTimeout(ChunkServerID csId) { std::unique_lock guard(mtx_); @@ -78,10 +73,10 @@ class UnstableHelper { private: /** - * @brief 检查chunkserver状态 + * @brief Check chunkserver status * - * @param: endPoint chunkserver的ip:port地址 - * @return: true 健康 / false 不健康 + * @param: endPoint: ip:port address of endPoint chunkserver + * @return: true healthy/ false unhealthy */ bool CheckChunkServerHealth(const butil::EndPoint& endPoint) const { return ServiceHelper::CheckChunkServerHealth( @@ -92,10 +87,10 @@ class UnstableHelper { bthread::Mutex mtx_; - // 同一chunkserver连续超时请求次数 + // Number of consecutive timeout requests for the same chunkserver std::unordered_map timeoutTimes_; - // 同一server上unstable chunkserver的id + // The ID of an unstable chunkserver on the same server std::unordered_map> serverUnstabledChunkservers_; }; diff --git a/src/common/authenticator.h b/src/common/authenticator.h index 7d9ba319c3..f52560379a 100644 --- a/src/common/authenticator.h +++ b/src/common/authenticator.h @@ -30,31 +30,30 @@ namespace common { class Authenticator { public: /** - * bref: 获取要进行签名的字符串 - * @param: date, 当前的时间 - * @param: owner, 文件所有者 - * @return: 返回需要进行加密的字符串 + * bref: Get the string to be signed + * @param: date, current time + * @param: owner, file owner + * @return: Returns the string that needs to be encrypted */ static std::string GetString2Signature(uint64_t date, - const std::string& owner); + const std::string& owner); /** - * bref: 为字符串计算签名 - * @param: String2Signature, 需要进行签名计算的字符串 - * @param: secretKey, 为计算的秘钥 - * @return: 返回需要进行签名过后的字符串 + * bref: Calculate signature for string + * @param: String2Signature, a string that requires signature calculation + * @param: secretKey, which is the calculated secret key + * @return: Returns the string that needs to be signed */ static std::string CalcString2Signature(const std::string& String2Signature, const std::string& secretKey); private: - static int HMacSha256(const void* key, int key_size, - const void* data, int data_size, - void* digest); + static int HMacSha256(const void* key, int key_size, const void* data, + int data_size, void* digest); - static std::string Base64(const unsigned char *src, size_t sz); + static std::string Base64(const unsigned char* src, size_t sz); }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve #endif // SRC_COMMON_AUTHENTICATOR_H_ diff --git a/src/common/bitmap.cpp b/src/common/bitmap.cpp index dbff32702c..50d33181d9 100644 --- a/src/common/bitmap.cpp +++ b/src/common/bitmap.cpp @@ -20,20 +20,22 @@ * Author: yangyaokai */ +#include "src/common/bitmap.h" + #include #include -#include + #include -#include "src/common/bitmap.h" +#include namespace curve { namespace common { -std::string BitRangeVecToString(const std::vector &ranges) { +std::string BitRangeVecToString(const std::vector& ranges) { std::stringstream ss; for (uint32_t i = 0; i < ranges.size(); ++i) { if (i != 0) { - ss << ", "; + ss << ", "; } ss << "(" << ranges[i].beginIndex << "," << ranges[i].endIndex << ")"; } @@ -44,14 +46,14 @@ const uint32_t Bitmap::NO_POS = 0xFFFFFFFF; Bitmap::Bitmap(uint32_t bits) : bits_(bits) { int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memset(bitmap_, 0, count); } Bitmap::Bitmap(uint32_t bits, const char* bitmap) : bits_(bits) { int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; if (bitmap != nullptr) { memcpy(bitmap_, bitmap, count); @@ -64,7 +66,7 @@ Bitmap::Bitmap(uint32_t bits, char* bitmap, bool transfer) : bits_(bits) { int count = unitCount(); if (!transfer) { - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; if (bitmap != nullptr) { memcpy(bitmap_, bitmap, count); @@ -87,18 +89,17 @@ Bitmap::~Bitmap() { Bitmap::Bitmap(const Bitmap& bitmap) { bits_ = bitmap.Size(); int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memcpy(bitmap_, bitmap.GetBitmap(), count); } -Bitmap& Bitmap::operator = (const Bitmap& bitmap) { - if (this == &bitmap) - return *this; +Bitmap& Bitmap::operator=(const Bitmap& bitmap) { + if (this == &bitmap) return *this; delete[] bitmap_; bits_ = bitmap.Size(); int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memcpy(bitmap_, bitmap.GetBitmap(), count); return *this; @@ -118,23 +119,19 @@ Bitmap& Bitmap::operator=(Bitmap&& other) noexcept { return *this; } -bool Bitmap::operator == (const Bitmap& bitmap) const { - if (bits_ != bitmap.Size()) - return false; +bool Bitmap::operator==(const Bitmap& bitmap) const { + if (bits_ != bitmap.Size()) return false; return 0 == memcmp(bitmap_, bitmap.GetBitmap(), unitCount()); } -bool Bitmap::operator != (const Bitmap& bitmap) const { +bool Bitmap::operator!=(const Bitmap& bitmap) const { return !(*this == bitmap); } -void Bitmap::Set() { - memset(bitmap_, 0xff, unitCount()); -} +void Bitmap::Set() { memset(bitmap_, 0xff, unitCount()); } void Bitmap::Set(uint32_t index) { - if (index < bits_) - bitmap_[indexOfUnit(index)] |= mask(index); + if (index < bits_) bitmap_[indexOfUnit(index)] |= mask(index); } void Bitmap::Set(uint32_t startIndex, uint32_t endIndex) { @@ -144,13 +141,10 @@ void Bitmap::Set(uint32_t startIndex, uint32_t endIndex) { } } -void Bitmap::Clear() { - memset(bitmap_, 0, unitCount()); -} +void Bitmap::Clear() { memset(bitmap_, 0, unitCount()); } void Bitmap::Clear(uint32_t index) { - if (index < bits_) - bitmap_[indexOfUnit(index)] &= ~mask(index); + if (index < bits_) bitmap_[indexOfUnit(index)] &= ~mask(index); } void Bitmap::Clear(uint32_t startIndex, uint32_t endIndex) { @@ -169,106 +163,93 @@ bool Bitmap::Test(uint32_t index) const { uint32_t Bitmap::NextSetBit(uint32_t index) const { for (; index < bits_; ++index) { - if (Test(index)) - break; + if (Test(index)) break; } - if (index >= bits_) - index = NO_POS; + if (index >= bits_) index = NO_POS; return index; } uint32_t Bitmap::NextSetBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; - // bitmap中最后一个bit的index值 + // The index value of the last bit in the bitmap uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex - if (endIndex > lastIndex) - endIndex = lastIndex; + // The endIndex value cannot exceed lastIndex + if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { - if (Test(index)) - break; + if (Test(index)) break; } - if (index > endIndex) - index = NO_POS; + if (index > endIndex) index = NO_POS; return index; } uint32_t Bitmap::NextClearBit(uint32_t index) const { for (; index < bits_; ++index) { - if (!Test(index)) - break; + if (!Test(index)) break; } - if (index >= bits_) - index = NO_POS; + if (index >= bits_) index = NO_POS; return index; } uint32_t Bitmap::NextClearBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex - if (endIndex > lastIndex) - endIndex = lastIndex; + // The endIndex value cannot exceed lastIndex + if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { - if (!Test(index)) - break; + if (!Test(index)) break; } - if (index > endIndex) - index = NO_POS; + if (index > endIndex) index = NO_POS; return index; } -void Bitmap::Divide(uint32_t startIndex, - uint32_t endIndex, +void Bitmap::Divide(uint32_t startIndex, uint32_t endIndex, vector* clearRanges, vector* setRanges) const { - // endIndex的值不能小于startIndex - if (endIndex < startIndex) - return; + // The value of endIndex cannot be less than startIndex + if (endIndex < startIndex) return; - // endIndex值不能超过lastIndex + // The endIndex value cannot exceed lastIndex uint32_t lastIndex = bits_ - 1; - if (endIndex > lastIndex) - endIndex = lastIndex; + if (endIndex > lastIndex) endIndex = lastIndex; BitRange clearRange; BitRange setRange; vector tmpClearRanges; vector tmpSetRanges; - // 下一个位为0的index + // Next index with 0 bits uint32_t nextClearIndex; - // 下一个位为1的index + // Next index with bit 1 uint32_t nextSetIndex; - // 划分所有range + // Divide all ranges while (startIndex != NO_POS) { nextClearIndex = NextClearBit(startIndex, endIndex); - // 1.存放当前clear index之前的 set range - // nextClearIndex如果等于startIndex说明前面没有 set range + // 1. Store the set range before the current clear index + // If nextClearIndex is equal to startIndex, it indicates that there + // is no set range before it if (nextClearIndex != startIndex) { setRange.beginIndex = startIndex; - // nextClearIndex等于NO_POS说明已经找到末尾 - // 最后一块连续区域是 set range - setRange.endIndex = nextClearIndex == NO_POS - ? endIndex - : nextClearIndex - 1; + // nextClearIndex equals NO_POS description has found the end + // The last continuous area is set range + setRange.endIndex = + nextClearIndex == NO_POS ? endIndex : nextClearIndex - 1; tmpSetRanges.push_back(setRange); } - if (nextClearIndex == NO_POS) - break; + if (nextClearIndex == NO_POS) break; nextSetIndex = NextSetBit(nextClearIndex, endIndex); - // 2.存放当前set index之前的 clear range - // 能到这一步说明前面肯定存在clear range,所以不用像第1步一样做判断 + // 2. Store the clear range before the current set index + // Being able to reach this step indicates that there must be a clear + // range ahead, so there is no need to make a judgment like in step 1 clearRange.beginIndex = nextClearIndex; - clearRange.endIndex = nextSetIndex == NO_POS - ? endIndex - : nextSetIndex - 1; + clearRange.endIndex = + nextSetIndex == NO_POS ? endIndex : nextSetIndex - 1; tmpClearRanges.push_back(clearRange); startIndex = nextSetIndex; } - // 根据参数中的clearRanges和setRanges指针是否为空返回结果 + // Returns a result based on whether the clearRanges and setRanges pointers + // in the parameters are empty if (clearRanges != nullptr) { *clearRanges = std::move(tmpClearRanges); } @@ -277,13 +258,9 @@ void Bitmap::Divide(uint32_t startIndex, } } -uint32_t Bitmap::Size() const { - return bits_; -} +uint32_t Bitmap::Size() const { return bits_; } -const char* Bitmap::GetBitmap() const { - return bitmap_; -} +const char* Bitmap::GetBitmap() const { return bitmap_; } } // namespace common } // namespace curve diff --git a/src/common/bitmap.h b/src/common/bitmap.h index e7a0e1270d..370c55e070 100644 --- a/src/common/bitmap.h +++ b/src/common/bitmap.h @@ -24,8 +24,9 @@ #define SRC_COMMON_BITMAP_H_ #include -#include + #include +#include namespace curve { namespace common { @@ -36,30 +37,30 @@ const int BITMAP_UNIT_SIZE = 8; const int ALIGN_FACTOR = 3; // 2 ^ ALIGN_FACTOR = BITMAP_UNIT_SIZE /** - * 表示bitmap中的一段连续区域,为闭区间 + * Represents a continuous region in a bitmap, which is a closed interval */ struct BitRange { - // 连续区域起始位置在bitmap中的索引 + // Index of the starting position of a continuous region in Bitmap uint32_t beginIndex; - // 连续区域结束位置在bitmap中的索引 + // Index of the end position of a continuous region in Bitmap uint32_t endIndex; }; - -std::string BitRangeVecToString(const std::vector &ranges); +std::string BitRangeVecToString(const std::vector& ranges); class Bitmap { public: /** - * 新建bitmap时的构造函数 - * @param bits: 要构造的bitmap的位数 + * Constructor when creating a new bitmap + * @param bits: The number of bits to construct the bitmap */ explicit Bitmap(uint32_t bits); /** - * 从已有的快照文件初始化时的构造函数 - * 构造函数内部会再new一个新的bitmap,然后从参数中的bitmap memcpy过去 - * @param bits: bitmap的位数 - * @param bitmap: 外部提供的用于初始化的bitmap + * Constructor when initializing from an existing snapshot file + * The constructor will create a new bitmap internally, and then use the + * bitmap memcpy in the parameters + * @param bits: Bitmap bits + * @param bitmap: An externally provided bitmap for initialization */ explicit Bitmap(uint32_t bits, const char* bitmap); @@ -70,142 +71,158 @@ class Bitmap { ~Bitmap(); /** - * 拷贝构造,使用深拷贝 - * @param bitmap:从该对象拷贝内容 + * Copy construction, using deep copy + * @param bitmap: Copy content from this object */ Bitmap(const Bitmap& bitmap); /** - * 赋值函数,使用深拷贝 - * @param bitmap:从该对象拷贝内容 - * @reutrn:返回拷贝后对象引用 + * Assignment function, using deep copy + * @param bitmap: Copy content from this object + * @reutrn: Returns the copied object reference */ - Bitmap& operator = (const Bitmap& bitmap); + Bitmap& operator=(const Bitmap& bitmap); Bitmap(Bitmap&& other) noexcept; Bitmap& operator=(Bitmap&& other) noexcept; /** - * 比较两个bitmap是否相同 - * @param bitmap:待比较的bitmap - * @return:如果相同返回true,如果不同返回false + * Compare whether two bitmaps are the same + * @param bitmap: Bitmap to be compared + * @return: Returns true if the same, false if different */ - bool operator == (const Bitmap& bitmap) const; + bool operator==(const Bitmap& bitmap) const; /** - * 比较两个bitmap是否不同 - * @param bitmap:待比较的bitmap - * @return:如果不同返回true,如果相同返回false + * Compare whether two bitmaps are different + * @param bitmap: Bitmap to be compared + * @return: Returns true if different, false if the same */ - bool operator != (const Bitmap& bitmap) const; + bool operator!=(const Bitmap& bitmap) const; /** - * 将所有位置1 + * Place all positions 1 */ void Set(); /** - * 将指定位置1 - * @param index: 指定位的位置 + * Specify position 1 + * @param index: Refers to the location of the positioning */ void Set(uint32_t index); /** - * 将指定范围的位置为1 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 + * Set the position of the specified range to 1 + * @param startIndex: The starting position of the range, including this + * position + * @param endIndex: The end position of the range, including this position */ void Set(uint32_t startIndex, uint32_t endIndex); /** - * 将所有位置0 + * Move all positions to 0 */ void Clear(); /** - * 将指定位置0 - * @param index: 指定位的位置 + * Will specify position 0 + * @param index: Refers to the location of the positioning */ void Clear(uint32_t index); /** - * 将指定范围的位置为0 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 + * Set the position of the specified range to 0 + * @param startIndex: The starting position of the range, including this + * position + * @param endIndex: The end position of the range, including this position */ void Clear(uint32_t startIndex, uint32_t endIndex); /** - * 获取指定位置位的状态 - * @param index: 指定位的位置 - * @return: true表示当前位状态为1,false表示为0 + * Obtain the status of the specified position bit + * @param index: Refers to the location of the positioning + * @return: true indicates that the current bit status is 1, while false + * indicates that it is 0 */ bool Test(uint32_t index) const; /** - * 获取指定位置及之后的首个位为1的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为1的位置,如果不存在返回NO_POS + * Obtain the specified position and the position after which the first bit + * is 1 + * @param index: Refers to the location of the positioning, including this + * location + * @return: The position where the first bit is 1. If it does not exist, + * return NO_POS */ uint32_t NextSetBit(uint32_t index) const; /** - * 获取指定起始位置到结束位置之间的的首个位为1的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为1的位置,如果指定范围内不存在则返回NO_POS + * Gets the position where the first bit between the specified start + * position and end position is 1 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 1. If it does not exist + * within the specified range, return NO_POS */ uint32_t NextSetBit(uint32_t startIndex, uint32_t endIndex) const; /** - * 获取指定位置及之后的首个位为0的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为0的位置,如果不存在返回NO_POS + * Obtain the specified position and the position after which the first bit + * is 0 + * @param index: Refers to the location of the positioning, including this + * location + * @return: The position where the first bit is 0. If it does not exist, + * return NO_POS */ uint32_t NextClearBit(uint32_t index) const; /** - * 获取指定起始位置到结束位置之间的的首个位为0的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为0的位置,如果指定范围内不存在则返回NO_POS + * Gets the position where the first bit between the specified start + * position and end position is 0 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 0. If it does not exist + * within the specified range, return NO_POS */ uint32_t NextClearBit(uint32_t startIndex, uint32_t endIndex) const; /** - * 将bitmap的指定区域分割成若干连续区域,划分依据为位状态,连续区域内的位状态一致 - * 例如:00011100会被划分为三个区域,[0,2]、[3,5]、[6,7] - * @param startIndex: 指定区域的起始索引 - * @param endIndex: 指定范围的结束索引 - * @param clearRanges: 存放位状态为0的连续区域的向量,可以指定为nullptr - * @param setRanges: 存放位状态为1的连续区域的向量,可以指定为nullptr - */ - void Divide(uint32_t startIndex, - uint32_t endIndex, + * Divide the designated area of the bitmap into several continuous areas + * based on bit states, with consistent bit states within the continuous + * areas For example, 00011100 will be divided into three regions: [0,2], + * [3,5], [6,7] + * @param startIndex: The starting index of the specified region + * @param endIndex: The end index of the specified range + * @param clearRanges: A vector that stores a continuous region with a bit + * state of 0, which can be specified as nullptr + * @param setRanges: A vector that stores a continuous region with a bit + * state of 1, which can be specified as nullptr + */ + void Divide(uint32_t startIndex, uint32_t endIndex, vector* clearRanges, vector* setRanges) const; /** - * bitmap的有效位数 - * @return: 返回位数 + * Bitmap's significant digits + * @return: Returns the number of digits */ uint32_t Size() const; /** - * 获取bitmap的内存指针,用于持久化bitmap - * @return: bitmap的内存指针 + * Obtain a memory pointer to Bitmap for persisting Bitmap + * @return: Memory pointer to bitmap */ const char* GetBitmap() const; private: - // bitmap的字节数 + // Bytes of bitmap int unitCount() const { - // 同 (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE + // Same as (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE return (bits_ + BITMAP_UNIT_SIZE - 1) >> ALIGN_FACTOR; } - // 指定位置的bit在其所在字节中的偏移 + // The offset of the bit at the specified position in its byte int indexOfUnit(uint32_t index) const { - // 同 index / BITMAP_UNIT_SIZE + // Same as index / BITMAP_UNIT_SIZE return index >> ALIGN_FACTOR; } - // 逻辑计算掩码值 + // Logical calculation mask value char mask(uint32_t index) const { - int indexInUnit = index % BITMAP_UNIT_SIZE; + int indexInUnit = index % BITMAP_UNIT_SIZE; char mask = 0x01 << indexInUnit; return mask; } public: - // 表示不存在的位置,值为0xffffffff + // Represents a non-existent position, with a value of 0xffffffff static const uint32_t NO_POS; private: - uint32_t bits_; - char* bitmap_; + uint32_t bits_; + char* bitmap_; }; } // namespace common diff --git a/src/common/channel_pool.h b/src/common/channel_pool.h index 458baa33d3..fb549023e9 100644 --- a/src/common/channel_pool.h +++ b/src/common/channel_pool.h @@ -24,9 +24,10 @@ #define SRC_COMMON_CHANNEL_POOL_H_ #include -#include -#include + #include +#include +#include #include #include "src/common/concurrent/concurrent.h" @@ -39,18 +40,18 @@ namespace common { class ChannelPool { public: /** - * @brief 从channelMap获取或创建并Init到指定地址的channel + * @brief Obtain or create a channel from channelMap and Init it to the + * specified address * - * @param addr 对端的地址 - * @param[out] channelPtr 到指定地址的channel + * @param addr The address of the opposite end + * @param[out] channelPtr to the specified channel address * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ - int GetOrInitChannel(const std::string& addr, - ChannelPtr* channelPtr); + int GetOrInitChannel(const std::string& addr, ChannelPtr* channelPtr); /** - * @brief 清空map + * @brief Clear map */ void Clear(); @@ -62,5 +63,4 @@ class ChannelPool { } // namespace common } // namespace curve -#endif // SRC_COMMON_CHANNEL_POOL_H_ - +#endif // SRC_COMMON_CHANNEL_POOL_H_ diff --git a/src/common/concurrent/bounded_blocking_queue.h b/src/common/concurrent/bounded_blocking_queue.h index 56c59fcfc1..7d8449d812 100644 --- a/src/common/concurrent/bounded_blocking_queue.h +++ b/src/common/concurrent/bounded_blocking_queue.h @@ -23,12 +23,12 @@ #ifndef SRC_COMMON_CONCURRENT_BOUNDED_BLOCKING_QUEUE_H_ #define SRC_COMMON_CONCURRENT_BOUNDED_BLOCKING_QUEUE_H_ +#include #include +#include //NOLINT #include -#include //NOLINT #include -#include //NOLINT -#include +#include //NOLINT #include #include "src/common/uncopyable.h" @@ -36,18 +36,17 @@ namespace curve { namespace common { -template +template class BBQItem { public: - explicit BBQItem(const T &t, bool stop = false) - : item_(t) { + explicit BBQItem(const T& t, bool stop = false) : item_(t) { stop_.store(stop, std::memory_order_release); } - BBQItem(const BBQItem &bbqItem) { + BBQItem(const BBQItem& bbqItem) { item_ = bbqItem.item_; stop_.store(bbqItem.stop_, std::memory_order_release); } - BBQItem &operator=(const BBQItem &bbqItem) { + BBQItem& operator=(const BBQItem& bbqItem) { if (&bbqItem == this) { return *this; } @@ -56,13 +55,9 @@ class BBQItem { return *this; } - bool IsStop() const { - return stop_.load(std::memory_order_acquire); - } + bool IsStop() const { return stop_.load(std::memory_order_acquire); } - T Item() { - return item_; - } + T Item() { return item_; } private: T item_; @@ -70,18 +65,13 @@ class BBQItem { }; /** - * 有 capacity 限制的阻塞队列,线程安全 + * Blocking queues with capacity restrictions, thread safe */ -template +template class BoundedBlockingDeque : public Uncopyable { public: BoundedBlockingDeque() - : mutex_(), - notEmpty_(), - notFull_(), - deque_(), - capacity_(0) { - } + : mutex_(), notEmpty_(), notFull_(), deque_(), capacity_(0) {} int Init(const int capacity) { if (0 >= capacity) { @@ -91,7 +81,7 @@ class BoundedBlockingDeque : public Uncopyable { return 0; } - void PutBack(const T &x) { + void PutBack(const T& x) { std::unique_lock guard(mutex_); while (deque_.size() == capacity_) { notFull_.wait(guard); @@ -100,7 +90,7 @@ class BoundedBlockingDeque : public Uncopyable { notEmpty_.notify_one(); } - void PutFront(const T &x) { + void PutFront(const T& x) { std::unique_lock guard(mutex_); while (deque_.size() == capacity_) { notFull_.wait(guard); diff --git a/src/common/stringstatus.h b/src/common/stringstatus.h index 203b851bfc..9883f0d3ee 100644 --- a/src/common/stringstatus.h +++ b/src/common/stringstatus.h @@ -20,28 +20,28 @@ * Author: lixiaocui */ - -#ifndef SRC_COMMON_STRINGSTATUS_H_ -#define SRC_COMMON_STRINGSTATUS_H_ +#ifndef SRC_COMMON_STRINGSTATUS_H_ +#define SRC_COMMON_STRINGSTATUS_H_ #include -#include + #include +#include namespace curve { namespace common { class StringStatus { public: /** - * @brief ExposeAs 用于初始化bvar + * @brief ExposeAs: Used to initialize bvar * - * @param[in] prefix, 前缀 - * @param[in] name, 名字 + * @param[in] prefix: Prefix + * @param[in] name: Name */ - void ExposeAs(const std::string &prefix, const std::string &name); + void ExposeAs(const std::string& prefix, const std::string& name); /** - * @brief Set 设置每项key-value信息 + * @brief Set: sets the key-value information for each item * * @param[in] key * @param[in] value @@ -49,30 +49,31 @@ class StringStatus { void Set(const std::string& key, const std::string& value); /** - * @brief Update 把当前key-value map中的键值对以json string的形式设置到status中 //NOLINT + * @brief Update: Sets the key-value pairs in the current // NOLINT + * key-value map to status as JSON strings // NOLINT */ void Update(); /** - * @brief GetValueByKey 获取指定key对应的value + * @brief GetValueByKey: Get the value corresponding to the specified key * - * @param[in] key 指定key + * @param[in] key: Specify the key */ - std::string GetValueByKey(const std::string &key); + std::string GetValueByKey(const std::string& key); /** - * @brief JsonBody 获取当前key-value map对应的json形式字符串 + * @brief JsonBody: obtains the JSON format string corresponding to the + * current key-value map */ std::string JsonBody(); private: - // 需要导出的结构体的key-value map + // The key-value map of the structure to be exported std::map kvs_; - // 该导出项对应的status + // The status corresponding to the exported item bvar::Status status_; }; } // namespace common } // namespace curve #endif // SRC_COMMON_STRINGSTATUS_H_ - diff --git a/src/common/timeutility.h b/src/common/timeutility.h index 1ba3483d34..d3fc2d244c 100644 --- a/src/common/timeutility.h +++ b/src/common/timeutility.h @@ -25,9 +25,10 @@ #include #include #include + +#include #include #include -#include namespace curve { namespace common { @@ -57,7 +58,8 @@ class TimeUtility { return localtime(&now)->tm_hour; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with + // the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; @@ -67,7 +69,7 @@ class TimeUtility { *standard = std::string(now); } - // 时间戳转成标准时间并返回,时间戳单位为秒 + // The timestamp is converted to standard time and returned in seconds static inline std::string TimeStampToStandard(time_t timeStamp) { char now[64]; struct tm p; @@ -85,13 +87,9 @@ class ExpiredTime { public: ExpiredTime() : startUs_(TimeUtility::GetTimeofDayUs()) {} - double ExpiredSec() const { - return ExpiredUs() / 1000000; - } + double ExpiredSec() const { return ExpiredUs() / 1000000; } - double ExpiredMs() const { - return ExpiredUs() / 1000; - } + double ExpiredMs() const { return ExpiredUs() / 1000; } double ExpiredUs() const { return TimeUtility::GetTimeofDayUs() - startUs_; @@ -101,7 +99,7 @@ class ExpiredTime { uint64_t startUs_; }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve -#endif // SRC_COMMON_TIMEUTILITY_H_ +#endif // SRC_COMMON_TIMEUTILITY_H_ diff --git a/src/common/uuid.h b/src/common/uuid.h index 8fbc41f61c..a68f865fcf 100644 --- a/src/common/uuid.h +++ b/src/common/uuid.h @@ -27,26 +27,29 @@ extern "C" { void uuid_generate(uuid_t out); void uuid_generate_random(uuid_t out); void uuid_generate_time(uuid_t out); -// 指明由uuid_generate_time生成的uuid是否使用了时间同步机制,不进行封装。 +// Indicate whether the UUID generated by uuid_generate_time utilizes a time +// synchronization mechanism without encapsulation int uuid_generate_time_safe(uuid_t out); } #include #define BUFF_LEN 36 namespace curve { namespace common { -// 生成uuid的生成器 +// Generator for generating uuid class UUIDGenerator { public: UUIDGenerator() {} /** - * @brief 生成uuid,优先采用的算法 - * 如果存在一个高质量的随机数生成器(/dev/urandom), - * UUID将基于其生成的随机数产生。 - * 备用算法:在高质量的随机数生成器不可用的情况下,如果可以获取到MAC地址, - * 则将利用由随机数生成器产生的随机数、当前时间、MAC地址生成UUID。 + * @brief Generate a UUID with a prioritized algorithm selection + * If a high-quality random number generator (/dev/urandom) is available, + * the UUID will be generated based on the random numbers it generates. + * Backup algorithm: If a high-quality random number generator is not + * available and if the MAC address can be obtained, the UUID will be + * generated using a combination of random numbers, current time, and the + * MAC address. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUID() { uuid_t out; @@ -58,11 +61,14 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 使用全局时钟、MAC地址。有MAC地址泄露风险。为了保证唯一性还使用的时间同步机制, - * 如果,时间同步机制不可用,多台机器上生成的uuid可能会重复。 + * @brief Generate a UUID for the specified purpose. + * It utilizes the global clock and MAC address, but there is a risk of + * MAC address leakage. To ensure uniqueness, it also employs a time + * synchronization mechanism. However, if the time synchronization + * mechanism is not available, there is a possibility of UUID duplication + * when generated on multiple machines. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUIDTime() { uuid_t out; @@ -74,10 +80,11 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 强制完全使用随机数,优先使用(/dev/urandom),备用(伪随机数生成器)。 - * 在使用伪随机数生成器的情况下,uuid有重复的风险。 - * @return 生成的uuid + * @brief Generate a UUID + * Forcefully utilize random numbers, with a preference for (/dev/urandom) + * and a fallback to pseudo-random number generation. When using the + * pseudo-random number generator, there is a risk of UUID duplication. + * @return The generated UUID */ std::string GenerateUUIDRandom() { uuid_t out; diff --git a/src/common/wait_interval.h b/src/common/wait_interval.h index 69c82143c2..a6f64d0c0e 100644 --- a/src/common/wait_interval.h +++ b/src/common/wait_interval.h @@ -20,36 +20,37 @@ * Author: lixiaocui */ -#ifndef SRC_COMMON_WAIT_INTERVAL_H_ -#define SRC_COMMON_WAIT_INTERVAL_H_ +#ifndef SRC_COMMON_WAIT_INTERVAL_H_ +#define SRC_COMMON_WAIT_INTERVAL_H_ #include "src/common/interruptible_sleeper.h" namespace curve { namespace common { -class WaitInterval { +class WaitInterval { public: /** - * Init 初始化任务的执行间隔 + * Init: Execution interval of initialization task * - * @param[in] intervalMs 执行间隔单位是ms + * @param[in] intervalMs: The execution interval unit is ms */ void Init(uint64_t intervalMs); /** - * WaitForNextExcution 根据最近一次的执行时间点和周期确定需要等待多久之后再执行 + * WaitForNextExcution: Determines how long to wait before executing based + * on the latest execution time and cycle */ void WaitForNextExcution(); /** - * StopWait 退出sleep等待 + * StopWait: Exit Sleep Wait */ void StopWait(); private: - // 最近一次的执行时间 + // Last execution time uint64_t lastSend_; - // 任务的执行周期 + // Task execution cycle uint64_t intevalMs_; InterruptibleSleeper sleeper_; diff --git a/src/mds/nameserver2/clean_task.h b/src/mds/nameserver2/clean_task.h index 9001312870..c865ff6271 100644 --- a/src/mds/nameserver2/clean_task.h +++ b/src/mds/nameserver2/clean_task.h @@ -23,24 +23,26 @@ #ifndef SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ #define SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ +#include //NOLINT +#include //NOLINT + #include #include //NOLINT #include -#include //NOLINT -#include //NOLINT + #include "proto/nameserver2.pb.h" -#include "src/mds/nameserver2/task_progress.h" -#include "src/mds/nameserver2/clean_core.h" -#include "src/mds/nameserver2/async_delete_snapshot_entity.h" -#include "src/common/concurrent/dlock.h" #include "src/common/concurrent/count_down_event.h" +#include "src/common/concurrent/dlock.h" +#include "src/mds/nameserver2/async_delete_snapshot_entity.h" +#include "src/mds/nameserver2/clean_core.h" +#include "src/mds/nameserver2/task_progress.h" using curve::common::DLock; namespace curve { namespace mds { -typedef uint64_t TaskIDType; +typedef uint64_t TaskIDType; // default clean task retry times const uint32_t kDefaultTaskRetryTimes = 5; @@ -52,56 +54,40 @@ class Task { virtual void Run(void) = 0; std::function Closure() { - return [this] () { - Run(); - }; + return [this]() { Run(); }; } - TaskProgress GetTaskProgress(void) const { - return progress_; - } + TaskProgress GetTaskProgress(void) const { return progress_; } - void SetTaskProgress(TaskProgress progress) { - progress_ = progress; - } + void SetTaskProgress(TaskProgress progress) { progress_ = progress; } - TaskProgress* GetMutableTaskProgress(void) { - return &progress_; - } + TaskProgress* GetMutableTaskProgress(void) { return &progress_; } - void SetTaskID(TaskIDType taskID) { - taskID_ = taskID; - } + void SetTaskID(TaskIDType taskID) { taskID_ = taskID; } - TaskIDType GetTaskID(void) const { - return taskID_; - } + TaskIDType GetTaskID(void) const { return taskID_; } - void SetRetryTimes(uint32_t retry) { - retry_ = retry; - } + void SetRetryTimes(uint32_t retry) { retry_ = retry; } void Retry() { retry_--; progress_ = TaskProgress(); } - bool RetryTimesExceed() { - return retry_ == 0; - } + bool RetryTimesExceed() { return retry_ == 0; } protected: TaskIDType taskID_; TaskProgress progress_; - // 任务最大重试次数 + // Maximum number of task retries uint32_t retry_; }; -class SnapShotCleanTask: public Task { +class SnapShotCleanTask : public Task { public: - SnapShotCleanTask(TaskIDType taskID, std::shared_ptr core, - FileInfo fileInfo, - std::shared_ptr entity = nullptr) { + SnapShotCleanTask( + TaskIDType taskID, std::shared_ptr core, FileInfo fileInfo, + std::shared_ptr entity = nullptr) { cleanCore_ = core; fileInfo_ = fileInfo; SetTaskProgress(TaskProgress()); @@ -110,29 +96,29 @@ class SnapShotCleanTask: public Task { SetRetryTimes(kDefaultTaskRetryTimes); } void Run(void) override { - StatusCode ret = cleanCore_->CleanSnapShotFile(fileInfo_, - GetMutableTaskProgress()); + StatusCode ret = + cleanCore_->CleanSnapShotFile(fileInfo_, GetMutableTaskProgress()); if (asyncEntity_ != nullptr) { brpc::ClosureGuard doneGuard(asyncEntity_->GetClosure()); brpc::Controller* cntl = static_cast(asyncEntity_->GetController()); - DeleteSnapShotResponse *response = - asyncEntity_->GetDeleteResponse(); - const DeleteSnapShotRequest *request = - asyncEntity_->GetDeleteRequest(); + DeleteSnapShotResponse* response = + asyncEntity_->GetDeleteResponse(); + const DeleteSnapShotRequest* request = + asyncEntity_->GetDeleteRequest(); response->set_statuscode(ret); if (ret != StatusCode::kOK) { LOG(ERROR) << "logid = " << cntl->log_id() - << ", CleanSnapShotFile fail, filename = " - << request->filename() - << ", sequencenum = " << request->seq() - << ", statusCode = " << ret; + << ", CleanSnapShotFile fail, filename = " + << request->filename() + << ", sequencenum = " << request->seq() + << ", statusCode = " << ret; } else { LOG(INFO) << "logid = " << cntl->log_id() - << ", CleanSnapShotFile ok, filename = " - << request->filename() - << ", sequencenum = " << request->seq(); + << ", CleanSnapShotFile ok, filename = " + << request->filename() + << ", sequencenum = " << request->seq(); } } return; @@ -144,10 +130,10 @@ class SnapShotCleanTask: public Task { std::shared_ptr asyncEntity_; }; -class CommonFileCleanTask: public Task { +class CommonFileCleanTask : public Task { public: CommonFileCleanTask(TaskIDType taskID, std::shared_ptr core, - FileInfo fileInfo) { + FileInfo fileInfo) { cleanCore_ = core; fileInfo_ = fileInfo; SetTaskProgress(TaskProgress()); @@ -211,4 +197,4 @@ class SegmentCleanTask : public Task { } // namespace mds } // namespace curve -#endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ +#endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ diff --git a/src/snapshotcloneserver/clone/clone_service_manager.h b/src/snapshotcloneserver/clone/clone_service_manager.h index 0cd66e9d09..6e085df318 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.h +++ b/src/snapshotcloneserver/clone/clone_service_manager.h @@ -22,18 +22,18 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_SERVICE_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_SERVICE_MANAGER_H_ +#include #include #include -#include +#include "src/common/concurrent/dlock.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/common/wait_interval.h" +#include "src/snapshotcloneserver/clone/clone_closure.h" #include "src/snapshotcloneserver/clone/clone_core.h" #include "src/snapshotcloneserver/clone/clone_task.h" #include "src/snapshotcloneserver/clone/clone_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" -#include "src/snapshotcloneserver/clone/clone_closure.h" -#include "src/common/concurrent/dlock.h" namespace curve { namespace snapshotcloneserver { @@ -44,26 +44,16 @@ class TaskCloneInfo { public: TaskCloneInfo() = default; - TaskCloneInfo(const CloneInfo &cloneInfo, - uint32_t progress) - : cloneInfo_(cloneInfo), - cloneProgress_(progress) {} + TaskCloneInfo(const CloneInfo& cloneInfo, uint32_t progress) + : cloneInfo_(cloneInfo), cloneProgress_(progress) {} - void SetCloneInfo(const CloneInfo &cloneInfo) { - cloneInfo_ = cloneInfo; - } + void SetCloneInfo(const CloneInfo& cloneInfo) { cloneInfo_ = cloneInfo; } - CloneInfo GetCloneInfo() const { - return cloneInfo_; - } + CloneInfo GetCloneInfo() const { return cloneInfo_; } - void SetCloneProgress(uint32_t progress) { - cloneProgress_ = progress; - } + void SetCloneProgress(uint32_t progress) { cloneProgress_ = progress; } - uint32_t GetCloneProgress() const { - return cloneProgress_; - } + uint32_t GetCloneProgress() const { return cloneProgress_; } Json::Value ToJsonObj() const { Json::Value cloneTaskObj; @@ -72,88 +62,76 @@ class TaskCloneInfo { cloneTaskObj["User"] = info.GetUser(); cloneTaskObj["File"] = info.GetDest(); cloneTaskObj["Src"] = info.GetSrc(); - cloneTaskObj["TaskType"] = static_cast ( - info.GetTaskType()); - cloneTaskObj["TaskStatus"] = static_cast ( - info.GetStatus()); + cloneTaskObj["TaskType"] = static_cast(info.GetTaskType()); + cloneTaskObj["TaskStatus"] = static_cast(info.GetStatus()); cloneTaskObj["IsLazy"] = info.GetIsLazy(); - cloneTaskObj["NextStep"] = static_cast (info.GetNextStep()); + cloneTaskObj["NextStep"] = static_cast(info.GetNextStep()); cloneTaskObj["Time"] = info.GetTime(); cloneTaskObj["Progress"] = GetCloneProgress(); - cloneTaskObj["FileType"] = static_cast (info.GetFileType()); + cloneTaskObj["FileType"] = static_cast(info.GetFileType()); return cloneTaskObj; } - void LoadFromJsonObj(const Json::Value &jsonObj) { + void LoadFromJsonObj(const Json::Value& jsonObj) { CloneInfo info; info.SetTaskId(jsonObj["UUID"].asString()); info.SetUser(jsonObj["User"].asString()); info.SetDest(jsonObj["File"].asString()); info.SetSrc(jsonObj["Src"].asString()); - info.SetTaskType(static_cast( - jsonObj["TaskType"].asInt())); - info.SetStatus(static_cast( - jsonObj["TaskStatus"].asInt())); + info.SetTaskType( + static_cast(jsonObj["TaskType"].asInt())); + info.SetStatus(static_cast(jsonObj["TaskStatus"].asInt())); info.SetIsLazy(jsonObj["IsLazy"].asBool()); info.SetNextStep(static_cast(jsonObj["NextStep"].asInt())); info.SetTime(jsonObj["Time"].asUInt64()); - info.SetFileType(static_cast( - jsonObj["FileType"].asInt())); + info.SetFileType( + static_cast(jsonObj["FileType"].asInt())); SetCloneInfo(info); } private: - CloneInfo cloneInfo_; - uint32_t cloneProgress_; + CloneInfo cloneInfo_; + uint32_t cloneProgress_; }; class CloneFilterCondition { public: CloneFilterCondition() - : uuid_(nullptr), - source_(nullptr), - destination_(nullptr), - user_(nullptr), - status_(nullptr), - type_(nullptr) {} - - CloneFilterCondition(const std::string *uuid, const std::string *source, - const std::string *destination, const std::string *user, - const std::string *status, const std::string *type) - : uuid_(uuid), - source_(source), - destination_(destination), - user_(user), - status_(status), - type_(type) {} - bool IsMatchCondition(const CloneInfo &cloneInfo); - - void SetUuid(const std::string *uuid) { - uuid_ = uuid; - } - void SetSource(const std::string *source) { - source_ = source; - } - void SetDestination(const std::string *destination) { + : uuid_(nullptr), + source_(nullptr), + destination_(nullptr), + user_(nullptr), + status_(nullptr), + type_(nullptr) {} + + CloneFilterCondition(const std::string* uuid, const std::string* source, + const std::string* destination, + const std::string* user, const std::string* status, + const std::string* type) + : uuid_(uuid), + source_(source), + destination_(destination), + user_(user), + status_(status), + type_(type) {} + bool IsMatchCondition(const CloneInfo& cloneInfo); + + void SetUuid(const std::string* uuid) { uuid_ = uuid; } + void SetSource(const std::string* source) { source_ = source; } + void SetDestination(const std::string* destination) { destination_ = destination; } - void SetUser(const std::string *user) { - user_ = user; - } - void SetStatus(const std::string *status) { - status_ = status; - } - void SetType(const std::string *type) { - type_ = type; - } + void SetUser(const std::string* user) { user_ = user; } + void SetStatus(const std::string* status) { status_ = status; } + void SetType(const std::string* type) { type_ = type; } private: - const std::string *uuid_; - const std::string *source_; - const std::string *destination_; - const std::string *user_; - const std::string *status_; - const std::string *type_; + const std::string* uuid_; + const std::string* source_; + const std::string* destination_; + const std::string* user_; + const std::string* status_; + const std::string* type_; }; class CloneServiceManagerBackend { public: @@ -161,7 +139,8 @@ class CloneServiceManagerBackend { virtual ~CloneServiceManagerBackend() {} /** - * @brief 后台扫描线程执行函数,扫描克隆卷是否存在 + * @brief Background scan thread execution function to scan for the + * existence of cloned volumes * */ virtual void Func() = 0; @@ -177,12 +156,9 @@ class CloneServiceManagerBackendImpl : public CloneServiceManagerBackend { public: explicit CloneServiceManagerBackendImpl( std::shared_ptr cloneCore) - : cloneCore_(cloneCore), - isStop_(true) { - } + : cloneCore_(cloneCore), isStop_(true) {} - ~CloneServiceManagerBackendImpl() { - } + ~CloneServiceManagerBackendImpl() {} void Func() override; void Init(uint32_t recordIntevalMs, uint32_t roundIntevalMs) override; @@ -191,13 +167,14 @@ class CloneServiceManagerBackendImpl : public CloneServiceManagerBackend { private: std::shared_ptr cloneCore_; - // 后台扫描线程,扫描clone卷是否存在 + // Background scan thread to check if clone volume exists std::thread backEndReferenceScanThread_; - // 当前后台扫描是否停止,用于支持start,stop功能 + // Is the current background scanning stopped? Used to + // support start and stop functions std::atomic_bool isStop_; - // 后台扫描线程记录使用定时器 + // Using a timer for background scanning thread records common::WaitInterval recordWaitInterval_; - // 后台扫描线程每轮使用定时器 + // The backend scanning thread uses a timer for each round common::WaitInterval roundWaitInterval_; }; @@ -207,250 +184,242 @@ class CloneServiceManager { std::shared_ptr cloneTaskMgr, std::shared_ptr cloneCore, std::shared_ptr cloneServiceManagerBackend) - : cloneTaskMgr_(cloneTaskMgr), - cloneCore_(cloneCore), - cloneServiceManagerBackend_(cloneServiceManagerBackend) { + : cloneTaskMgr_(cloneTaskMgr), + cloneCore_(cloneCore), + cloneServiceManagerBackend_(cloneServiceManagerBackend) { destFileLock_ = std::make_shared(); } virtual ~CloneServiceManager() {} /** - * @brief 初始化 + * @brief initialization * - * @return 错误码 + * @return error code */ - virtual int Init(const SnapshotCloneServerOptions &option); + virtual int Init(const SnapshotCloneServerOptions& option); /** - * @brief 启动服务 + * @brief Start Service * - * @return 错误码 + * @return error code */ virtual int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ virtual void Stop(); /** - * @brief 从文件或快照克隆出一个文件 + * @brief Clone a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source: Uuid of file or snapshot + * @param user: The user of the file or snapshot + * @param destination: Destination file + * @param lazyFlag: Is in lazy mode + * @param closure: Asynchronous callback entity + * @param[out] taskId: Task ID * - * @return 错误码 + * @return error code */ - virtual int CloneFile(const UUID &source, - const std::string &user, - const std::string &destination, - const std::string &poolset, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId); + virtual int CloneFile(const UUID& source, const std::string& user, + const std::string& destination, + const std::string& poolset, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId); /** - * @brief 从文件或快照恢复一个文件 + * @brief Restore a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件名 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source: Uuid of file or snapshot + * @param user: The user of the file or snapshot + * @param destination: Destination file name + * @param lazyFlag: Is in lazy mode + * @param closure: Asynchronous callback entity + * @param[out] taskId: Task ID * - * @return 错误码 + * @return error code */ - virtual int RecoverFile(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId); + virtual int RecoverFile(const UUID& source, const std::string& user, + const std::string& destination, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId); /** - * @brief 安装克隆文件的数据,用于Lazy克隆 + * @brief Install data from clone files for Lazy cloning * - * @param user 用户 - * @param taskId 任务ID + * @param user: user + * @param taskId: Task ID * - * @return 错误码 + * @return error code */ - virtual int Flatten( - const std::string &user, - const TaskIdType &taskId); + virtual int Flatten(const std::string& user, const TaskIdType& taskId); /** - * @brief 查询某个用户的克隆/恢复任务信息 + * @brief Query the clone/restore task information of a certain user * - * @param user 用户名 - * @param info 克隆/恢复任务信息 + * @param user: username + * @param info: Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfo(const std::string &user, - std::vector *info); + virtual int GetCloneTaskInfo(const std::string& user, + std::vector* info); /** - * @brief 通过Id查询某个用户的克隆/恢复任务信息 + * @brief Query the clone/restore task information of a certain user + * through ID * - * @param user 用户名 - * @param taskId 指定的任务Id - * @param info 克隆/恢复任务信息 + * @param user: username + * @param taskId: Task Id specified + * @param info: Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoById( - const std::string &user, - const TaskIdType &taskId, - std::vector *info); + virtual int GetCloneTaskInfoById(const std::string& user, + const TaskIdType& taskId, + std::vector* info); /** - * @brief 通过文件名查询某个用户的克隆/恢复任务信息 + * @brief Query the clone/restore task information of a certain user through + * a file name * - * @param user 用户名 - * @param fileName 指定的文件名 - * @param info 克隆/恢复任务信息 + * @param user: username + * @param fileName: The file name specified + * @param info: Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoByName( - const std::string &user, - const std::string &fileName, - std::vector *info); + virtual int GetCloneTaskInfoByName(const std::string& user, + const std::string& fileName, + std::vector* info); /** - * @brief 通过过滤条件查询某个用户的克隆/恢复任务信息 + * @brief: Query a user's clone/restore task information through filtering + * criteria * - * @param filter 过滤条件 - * @param info 克隆/恢复任务信息 + * @param filter: filtering conditions + * @param info: Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoByFilter(const CloneFilterCondition &filter, - std::vector *info); + virtual int GetCloneTaskInfoByFilter(const CloneFilterCondition& filter, + std::vector* info); /** - * @brief 查询src是否有依赖 + * @brief: Check if src has dependencies * - * @param src 指定的文件名 - * @param refStatus 0表示没有依赖,1表示有依赖,2表示需要进一步确认 - * @param needCheckFiles 需要进一步确认的文件列表 + * @param src: specified file name + * @param refStatus: 0 indicates no dependencies, 1 indicates dependencies, + * and 2 indicates further confirmation is needed + * @param needCheckFiles: List of files that require further confirmation * - * @return 错误码 + * @return error code */ - virtual int GetCloneRefStatus(const std::string &src, - CloneRefStatus *refStatus, - std::vector *needCheckFiles); + virtual int GetCloneRefStatus(const std::string& src, + CloneRefStatus* refStatus, + std::vector* needCheckFiles); /** - * @brief 清除失败的clone/Recover任务、状态、文件 + * @brief Clear failed clone/recover tasks, status, files * - * @param user 用户名 - * @param taskId 任务Id + * @param user: username + * @param taskId: Task Id * - * @return 错误码 + * @return error code */ - virtual int CleanCloneTask(const std::string &user, - const TaskIdType &taskId); + virtual int CleanCloneTask(const std::string& user, + const TaskIdType& taskId); /** - * @brief 重启后恢复未完成clone和recover任务 + * @brief Restore unfinished clone and recover tasks after restarting * - * @return 错误码 + * @return error code */ virtual int RecoverCloneTask(); // for test - void SetDLock(std::shared_ptr dlock) { - dlock_ = dlock; - } + void SetDLock(std::shared_ptr dlock) { dlock_ = dlock; } private: /** - * @brief 从给定的任务列表中获取指定用户的任务集 + * @brief Get the task set of the specified user from the given task list * - * @param cloneInfos 克隆/恢复信息 - * @param user 用户信息 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos: Clone/Restore Information + * @param user: User information + * @param[out] info: Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, - const std::string &user, - std::vector *info); + const std::string& user, + std::vector* info); /** - * @brief 从给定的任务列表中获取符合过滤条件的任务集 + * @brief Retrieve task sets that meet the filtering criteria from the given + * task list * - * @param cloneInfos 克隆/恢复信息 - * @param filter 过滤条件 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos: Clone/Restore Information + * @param filter: Filtering conditions + * @param[out] info: Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, - CloneFilterCondition filter, - std::vector *info); + CloneFilterCondition filter, + std::vector* info); /** - * @brief 获取已经完成任务信息 + * @brief to obtain completed task information * - * @param taskId 任务ID - * @param taskCloneInfoOut 克隆任务信息 + * @param taskId: Task ID + * @param taskCloneInfoOut: Clone task information * - * @return 错误码 + * @return error code */ - int GetFinishedCloneTask( - const TaskIdType &taskId, - TaskCloneInfo *taskCloneInfoOut); + int GetFinishedCloneTask(const TaskIdType& taskId, + TaskCloneInfo* taskCloneInfoOut); /** - * @brief 根据克隆任务信息恢复克隆任务 + * @brief Restore clone task based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo: Clone task information * - * @return 错误码 + * @return error code */ - int RecoverCloneTaskInternal(const CloneInfo &cloneInfo); + int RecoverCloneTaskInternal(const CloneInfo& cloneInfo); /** - * @brief 根据克隆任务信息恢复清除克隆任务 + * @brief Restore and clear clone tasks based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo: Clone task information * - * @return 错误码 + * @return error code */ - int RecoverCleanTaskInternal(const CloneInfo &cloneInfo); + int RecoverCleanTaskInternal(const CloneInfo& cloneInfo); /** - * @brief 构建和push Lazy的任务 + * @brief Task of building and pushing Lazy * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo: Clone task information + * @param closure: Asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure); + CloneInfo cloneInfo, std::shared_ptr closure); /** - * @brief 构建和push 非Lazy的任务 + * @brief Build and push non Lazy tasks * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo: Clone task information + * @param closure: Asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverNotLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure); + CloneInfo cloneInfo, std::shared_ptr closure); private: std::shared_ptr dlockOpts_; @@ -461,8 +430,6 @@ class CloneServiceManager { std::shared_ptr cloneServiceManagerBackend_; }; - - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/clone/clone_task.h b/src/snapshotcloneserver/clone/clone_task.h index 8ea5c6be51..2ddc10976e 100644 --- a/src/snapshotcloneserver/clone/clone_task.h +++ b/src/snapshotcloneserver/clone/clone_task.h @@ -23,17 +23,17 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_H_ -#include #include +#include -#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/common/concurrent/dlock.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_closure.h" +#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/common/curvefs_client.h" +#include "src/snapshotcloneserver/common/snapshotclone_metric.h" #include "src/snapshotcloneserver/common/task.h" #include "src/snapshotcloneserver/common/task_info.h" -#include "src/snapshotcloneserver/common/snapshotclone_metric.h" -#include "src/snapshotcloneserver/common/curvefs_client.h" -#include "src/snapshotcloneserver/clone/clone_closure.h" -#include "src/common/concurrent/dlock.h" using ::curve::common::DLock; @@ -42,33 +42,23 @@ namespace snapshotcloneserver { class CloneTaskInfo : public TaskInfo { public: - CloneTaskInfo(const CloneInfo &cloneInfo, - std::shared_ptr metric, - std::shared_ptr closure) + CloneTaskInfo(const CloneInfo& cloneInfo, + std::shared_ptr metric, + std::shared_ptr closure) : TaskInfo(), cloneInfo_(cloneInfo), metric_(metric), closure_(closure) {} - CloneInfo& GetCloneInfo() { - return cloneInfo_; - } + CloneInfo& GetCloneInfo() { return cloneInfo_; } - const CloneInfo& GetCloneInfo() const { - return cloneInfo_; - } + const CloneInfo& GetCloneInfo() const { return cloneInfo_; } - TaskIdType GetTaskId() const { - return cloneInfo_.GetTaskId(); - } + TaskIdType GetTaskId() const { return cloneInfo_.GetTaskId(); } - void UpdateMetric() { - metric_->Update(this); - } + void UpdateMetric() { metric_->Update(this); } - std::shared_ptr GetClosure() { - return closure_; - } + std::shared_ptr GetClosure() { return closure_; } private: CloneInfo cloneInfo_; @@ -76,20 +66,16 @@ class CloneTaskInfo : public TaskInfo { std::shared_ptr closure_; }; -std::ostream& operator<<(std::ostream& os, const CloneTaskInfo &taskInfo); +std::ostream& operator<<(std::ostream& os, const CloneTaskInfo& taskInfo); class CloneTaskBase : public Task { public: - CloneTaskBase(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) - : Task(taskId), - taskInfo_(taskInfo), - core_(core) {} - - std::shared_ptr GetTaskInfo() const { - return taskInfo_; - } + CloneTaskBase(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) + : Task(taskId), taskInfo_(taskInfo), core_(core) {} + + std::shared_ptr GetTaskInfo() const { return taskInfo_; } protected: std::shared_ptr taskInfo_; @@ -98,9 +84,8 @@ class CloneTaskBase : public Task { class CloneTask : public CloneTaskBase { public: - CloneTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + CloneTask(const TaskIdType& taskId, std::shared_ptr taskInfo, + std::shared_ptr core) : CloneTaskBase(taskId, taskInfo, core) {} void Run() override { @@ -121,17 +106,14 @@ class CloneTask : public CloneTaskBase { } }; - class CloneCleanTask : public CloneTaskBase { public: - CloneCleanTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + CloneCleanTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) : CloneTaskBase(taskId, taskInfo, core) {} - void Run() override { - core_->HandleCleanCloneOrRecoverTask(taskInfo_); - } + void Run() override { core_->HandleCleanCloneOrRecoverTask(taskInfo_); } }; struct SnapCloneCommonClosure : public SnapCloneClosure { @@ -145,9 +127,9 @@ struct SnapCloneCommonClosure : public SnapCloneClosure { }; struct CreateCloneChunkContext { - // 数据源 + // Data source std::string location; - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; // seqNum uint64_t sn; @@ -155,16 +137,16 @@ struct CreateCloneChunkContext { uint64_t csn; // chunk size uint64_t chunkSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; - // chunk信息 - struct CloneChunkInfo *cloneChunkInfo; + // Chunk Information + struct CloneChunkInfo* cloneChunkInfo; }; using CreateCloneChunkContextPtr = std::shared_ptr; @@ -173,21 +155,20 @@ struct CreateCloneChunkClosure : public SnapCloneClosure { CreateCloneChunkClosure( std::shared_ptr tracker, CreateCloneChunkContextPtr context) - : tracker_(tracker), - context_(context) {} + : tracker_(tracker), context_(context) {} void Run() { std::unique_ptr self_guard(this); context_->retCode = GetRetCode(); if (context_->retCode < 0) { LOG(WARNING) << "CreateCloneChunkClosure return fail" - << ", ret = " << context_->retCode - << ", location = " << context_->location - << ", logicalPoolId = " << context_->cidInfo.lpid_ - << ", copysetId = " << context_->cidInfo.cpid_ - << ", chunkId = " << context_->cidInfo.cid_ - << ", seqNum = " << context_->sn - << ", csn = " << context_->csn - << ", taskid = " << context_->taskid; + << ", ret = " << context_->retCode + << ", location = " << context_->location + << ", logicalPoolId = " << context_->cidInfo.lpid_ + << ", copysetId = " << context_->cidInfo.cpid_ + << ", chunkId = " << context_->cidInfo.cid_ + << ", seqNum = " << context_->sn + << ", csn = " << context_->csn + << ", taskid = " << context_->taskid; } tracker_->PushResultContext(context_); tracker_->HandleResponse(context_->retCode); @@ -197,21 +178,21 @@ struct CreateCloneChunkClosure : public SnapCloneClosure { }; struct RecoverChunkContext { - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; - // chunk的分片index + // Chunk's sharding index uint64_t partIndex; - // 总的chunk分片数 + // Total Chunk Fragments uint64_t totalPartNum; - // 分片大小 + // Slice size uint64_t partSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; }; @@ -219,17 +200,15 @@ using RecoverChunkContextPtr = std::shared_ptr; struct RecoverChunkClosure : public SnapCloneClosure { RecoverChunkClosure(std::shared_ptr tracker, - RecoverChunkContextPtr context) - : tracker_(tracker), - context_(context) {} + RecoverChunkContextPtr context) + : tracker_(tracker), context_(context) {} void Run() { std::unique_ptr self_guard(this); context_->retCode = GetRetCode(); if (context_->retCode < 0) { LOG(WARNING) << "RecoverChunkClosure return fail" << ", ret = " << context_->retCode - << ", logicalPoolId = " - << context_->cidInfo.lpid_ + << ", logicalPoolId = " << context_->cidInfo.lpid_ << ", copysetId = " << context_->cidInfo.cpid_ << ", chunkId = " << context_->cidInfo.cid_ << ", partIndex = " << context_->partIndex diff --git a/src/snapshotcloneserver/clone/clone_task_manager.h b/src/snapshotcloneserver/clone/clone_task_manager.h index d9607ccedc..56a030a8da 100644 --- a/src/snapshotcloneserver/clone/clone_task_manager.h +++ b/src/snapshotcloneserver/clone/clone_task_manager.h @@ -23,50 +23,46 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ -#include -#include #include -#include #include +#include +#include +#include #include // NOLINT -#include "src/snapshotcloneserver/clone/clone_task.h" -#include "src/snapshotcloneserver/common/thread_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/clone/clone_task.h" #include "src/snapshotcloneserver/common/config.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" -#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/common/thread_pool.h" -using ::curve::common::RWLock; +using ::curve::common::LockGuard; +using ::curve::common::Mutex; using ::curve::common::ReadLockGuard; +using ::curve::common::RWLock; using ::curve::common::WriteLockGuard; -using ::curve::common::Mutex; -using ::curve::common::LockGuard; namespace curve { namespace snapshotcloneserver { class CloneTaskManager { public: - explicit CloneTaskManager( - std::shared_ptr core, - std::shared_ptr cloneMetric) + explicit CloneTaskManager(std::shared_ptr core, + std::shared_ptr cloneMetric) : isStop_(true), core_(core), cloneMetric_(cloneMetric), cloneTaskManagerScanIntervalMs_(0) {} - ~CloneTaskManager() { - Stop(); - } + ~CloneTaskManager() { Stop(); } int Init(std::shared_ptr stage1Pool, - std::shared_ptr stage2Pool, - std::shared_ptr commonPool, - const SnapshotCloneServerOptions &option) { - cloneTaskManagerScanIntervalMs_ = - option.cloneTaskManagerScanIntervalMs; + std::shared_ptr stage2Pool, + std::shared_ptr commonPool, + const SnapshotCloneServerOptions& option) { + cloneTaskManagerScanIntervalMs_ = option.cloneTaskManagerScanIntervalMs; stage1Pool_ = stage1Pool; stage2Pool_ = stage2Pool; commonPool_ = commonPool; @@ -78,40 +74,39 @@ class CloneTaskManager { void Stop(); /** - * @brief 往任务管理器中加入任务 + * @brief Add a task to the task manager * - * 用于非Lazy克隆及其他删除克隆等管控面的请求 + * Request for non Lazy clones and other deletion of control surfaces such + * as clones * - * @param task 任务 + * @param task: task * - * @return 错误码 + * @return error code */ - int PushCommonTask( - std::shared_ptr task); + int PushCommonTask(std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段一的的任务 + * @brief Add LazyClone Phase 1 tasks to the task manager * - * @param task 任务 + * @param task: task * - * @return 错误码 + * @return error code */ - int PushStage1Task( - std::shared_ptr task); + int PushStage1Task(std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段二的的任务 + * @brief: Add LazyClone Phase 2 tasks to the task manager * - * 目前只用于重启恢复时,将Lazy克隆恢复克隆数据阶段的任务加入任务管理器 + * At present, it is only used for adding tasks from the Lazy clone recovery + * clone data stage to the task manager during restart recovery * - * @param task 任务 + * @param task: task * - * @return 错误码 + * @return error code */ - int PushStage2Task( - std::shared_ptr task); + int PushStage2Task(std::shared_ptr task); - std::shared_ptr GetTask(const TaskIdType &taskId) const; + std::shared_ptr GetTask(const TaskIdType& taskId) const; private: void BackEndThreadFunc(); @@ -120,51 +115,52 @@ class CloneTaskManager { void ScanStage2Tasks(); /** - * @brief 往对应线程池和map中push任务 + * @brief pushes tasks to the corresponding thread pool and map * - * @param task 任务 - * @param taskMap 任务表 - * @param taskMapMutex 任务表和线程池的锁 - * @param taskPool 线程池 + * @param task: Task + * @param taskMap: Task table + * @param taskMapMutex: Task table and thread pool locks + * @param taskPool: Thread Pool * - * @return 错误码 + * @return error code */ int PushTaskInternal( std::shared_ptr task, - std::map > *taskMap, - Mutex *taskMapMutex, - std::shared_ptr taskPool); + std::map >* taskMap, + Mutex* taskMapMutex, std::shared_ptr taskPool); private: - // 后端线程 + // Backend Thread std::thread backEndThread; - // id->克隆任务表 + // ID -> Clone Task Table std::map > cloneTaskMap_; mutable RWLock cloneTaskMapLock_; - // 存放stage1Pool_池的当前任务,key为destination + // Storing stage1Pool_ The current task of the pool, with key as destination std::map > stage1TaskMap_; mutable Mutex stage1TasksLock_; - // 存放stage1Poo2_池的当前任务,key为destination + // Storage stage1Poo2_ The current task of the pool, with key as destination std::map > stage2TaskMap_; mutable Mutex stage2TasksLock_; - // 存放commonPool_池的当前任务 + // Store commonPool_ Current task of the pool std::map > commonTaskMap_; mutable Mutex commonTasksLock_; - // 用于Lazy克隆元数据部分的线程池 + // Thread pool for Lazy clone metadata section std::shared_ptr stage1Pool_; - // 用于Lazy克隆数据部分的线程池 + // Thread pool for Lazy clone data section std::shared_ptr stage2Pool_; - // 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池 + // Thread pool for requests for non Lazy clones and deletion of clones and + // other control surfaces std::shared_ptr commonPool_; - // 当前任务管理是否停止,用于支持start,stop功能 + // Is the current task management stopped? Used to support start and stop + // functions std::atomic_bool isStop_; // clone core @@ -173,16 +169,11 @@ class CloneTaskManager { // metric std::shared_ptr cloneMetric_; - // CloneTaskManager 后台线程扫描间隔 + // CloneTaskManager backend thread scan interval uint32_t cloneTaskManagerScanIntervalMs_; }; } // namespace snapshotcloneserver } // namespace curve - - - - - #endif // SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ diff --git a/src/snapshotcloneserver/common/task_info.h b/src/snapshotcloneserver/common/task_info.h index cc72201d52..e2835c5d59 100644 --- a/src/snapshotcloneserver/common/task_info.h +++ b/src/snapshotcloneserver/common/task_info.h @@ -23,11 +23,10 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_TASK_INFO_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_TASK_INFO_H_ - -#include -#include -#include //NOLINT #include +#include +#include //NOLINT +#include #include "src/common/concurrent/concurrent.h" @@ -36,10 +35,7 @@ namespace snapshotcloneserver { class TaskInfo { public: - TaskInfo() - : progress_(0), - isFinish_(false), - isCanceled_(false) {} + TaskInfo() : progress_(0), isFinish_(false), isCanceled_(false) {} virtual ~TaskInfo() {} TaskInfo(const TaskInfo&) = delete; @@ -48,59 +44,47 @@ class TaskInfo { TaskInfo& operator=(TaskInfo&&) = default; /** - * @brief 设置任务完成度百分比 + * @brief Set task completion percentage * - * @param persent 任务完成度百分比 + * @param persent: task completion percentage */ - void SetProgress(uint32_t persent) { - progress_ = persent; - } + void SetProgress(uint32_t persent) { progress_ = persent; } /** - * @brief 获取任务完成度百分比 + * @brief Get task completion percentage * - * @return 任务完成度百分比 + * @return Task completion percentage */ - uint32_t GetProgress() const { - return progress_; - } + uint32_t GetProgress() const { return progress_; } /** - * @brief 完成任务 + * @brief Complete the task */ - void Finish() { - isFinish_.store(true); - } + void Finish() { isFinish_.store(true); } /** - * @brief 获取任务是否完成 + * @brief: Is the task completed * - * @retval true 任务完成 - * @retval false 任务未完成 + * @retval true: Task completed + * @retval false: Task not completed */ - bool IsFinish() const { - return isFinish_.load(); - } + bool IsFinish() const { return isFinish_.load(); } /** - * @brief 取消任务 + * @brief Cancel Task */ - void Cancel() { - isCanceled_ = true; - } + void Cancel() { isCanceled_ = true; } /** - * @brief 获取任务是否取消 + * @brief: Check if the task is canceled * - * @retval true 任务已取消 - * @retval false 任务未取消 + * @retval true: The task has been canceled + * @retval false: The task was not canceled */ - bool IsCanceled() const { - return isCanceled_; - } + bool IsCanceled() const { return isCanceled_; } /** - * @brief 重置任务 + * @brief reset task */ void Reset() { isFinish_.store(false); @@ -108,26 +92,24 @@ class TaskInfo { } /** - * @brief 获取任务锁的引用,以便使用LockGuard加锁解锁 + * @brief: Obtain a reference to the task lock for unlocking using LockGuard * - * 用于同步任务完成和取消功能 - * 1. 任务完成前,先锁定任务,然后判断任务是否取消, - * 若已取消,则释放锁, - * 否则执行任务完成逻辑之后释放锁。 - * 2. 任务取消前,先锁定任务,然后判断任务是否完成, - * 若已完成,则释放锁, - * 否则执行任务取消逻辑之后释放锁。 + * Used to synchronize task completion and cancellation functions + * 1. Before completing the task, first lock the task and then determine + * whether the task is cancelled, If cancelled, release the lock, + * Otherwise, release the lock after completing the logic of the task. + * 2. Before canceling a task, first lock the task and then determine + * whether the task is completed, If completed, release the lock, + * Otherwise, execute the task to cancel the logic and release the lock. */ - curve::common::Mutex& GetLockRef() { - return lock_; - } + curve::common::Mutex& GetLockRef() { return lock_; } private: - // 任务完成度百分比 + // Task completion percentage uint32_t progress_; - // 任务任务是否结束 + // Is the task completed std::atomic_bool isFinish_; - // 任务是否被取消 + // Has the task been canceled bool isCanceled_; mutable curve::common::Mutex lock_; }; diff --git a/src/snapshotcloneserver/common/thread_pool.h b/src/snapshotcloneserver/common/thread_pool.h index 1f7b4ea697..b9f553b671 100644 --- a/src/snapshotcloneserver/common/thread_pool.h +++ b/src/snapshotcloneserver/common/thread_pool.h @@ -24,6 +24,7 @@ #define SRC_SNAPSHOTCLONESERVER_COMMON_THREAD_POOL_H_ #include + #include "src/common/concurrent/task_thread_pool.h" #include "src/snapshotcloneserver/common/task.h" @@ -31,52 +32,49 @@ namespace curve { namespace snapshotcloneserver { /** - * @brief 快照线程池 + * @brief Snapshot thread pool */ class ThreadPool { public: - /** - * @brief 构造函数 - * - * @param threadNum 最大线程数 - */ - explicit ThreadPool(int threadNum) - : threadNum_(threadNum) {} /** - * @brief 启动线程池 + * @brief Constructor + * + * @param threadNum: Maximum number of threads + */ + explicit ThreadPool(int threadNum) : threadNum_(threadNum) {} + /** + * @brief Start Thread Pool */ int Start(); /** - * @brief 停止线程池 + * @brief Stop thread pool */ void Stop(); /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task: Snapshot task */ void PushTask(std::shared_ptr task) { threadPool_.Enqueue(task->clousre()); } /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task: Snapshot task */ - void PushTask(Task* task) { - threadPool_.Enqueue(task->clousre()); - } + void PushTask(Task* task) { threadPool_.Enqueue(task->clousre()); } private: /** - * @brief 通用线程池 + * @brief Universal Thread Pool */ curve::common::TaskThreadPool<> threadPool_; /** - * @brief 线程数 + * @brief Number of threads */ int threadNum_; }; diff --git a/src/tools/chunkserver_client.cpp b/src/tools/chunkserver_client.cpp index 69eb492d5c..40b1eb24aa 100644 --- a/src/tools/chunkserver_client.cpp +++ b/src/tools/chunkserver_client.cpp @@ -28,11 +28,10 @@ namespace curve { namespace tool { std::ostream& operator<<(std::ostream& os, const Chunk& chunk) { - uint64_t groupId = (static_cast(chunk.logicPoolId) << 32) | - chunk.copysetId; + uint64_t groupId = + (static_cast(chunk.logicPoolId) << 32) | chunk.copysetId; os << "logicalPoolId:" << chunk.logicPoolId - << ",copysetId:" << chunk.copysetId - << ",groupId:" << groupId + << ",copysetId:" << chunk.copysetId << ",groupId:" << groupId << ",chunkId:" << chunk.chunkId; return os; } @@ -40,8 +39,8 @@ std::ostream& operator<<(std::ostream& os, const Chunk& chunk) { int ChunkServerClient::Init(const std::string& csAddr) { csAddr_ = csAddr; if (channel_.Init(csAddr.c_str(), nullptr) != 0) { - std::cout << "Init channel to chunkserver: " << csAddr - << " failed!" << std::endl; + std::cout << "Init channel to chunkserver: " << csAddr << " failed!" + << std::endl; return -1; } return 0; @@ -69,7 +68,7 @@ int ChunkServerClient::GetRaftStatus(butil::IOBuf* iobuf) { } retryTimes++; } - // 只打最后一次失败的原因 + // Outputs only the reason for the last failure. std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -97,9 +96,8 @@ bool ChunkServerClient::CheckChunkServerOnline() { return false; } -int ChunkServerClient::GetCopysetStatus( - const CopysetStatusRequest& request, - CopysetStatusResponse* response) { +int ChunkServerClient::GetCopysetStatus(const CopysetStatusRequest& request, + CopysetStatusResponse* response) { brpc::Controller cntl; curve::chunkserver::CopysetService_Stub stub(&channel_); uint64_t retryTimes = 0; @@ -112,17 +110,16 @@ int ChunkServerClient::GetCopysetStatus( continue; } if (response->status() != - COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { + COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { std::cout << "GetCopysetStatus fail, request: " << request.DebugString() - << ", errCode: " - << response->status() << std::endl; + << ", errCode: " << response->status() << std::endl; return -1; } else { return 0; } } - // 只打最后一次失败的原因 + // Outputs only the reason for the last failure. std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -151,15 +148,14 @@ int ChunkServerClient::GetChunkHash(const Chunk& chunk, if (response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) { std::cout << "GetCopysetStatus fail, request: " << request.DebugString() - << ", errCode: " - << response.status() << std::endl; + << ", errCode: " << response.status() << std::endl; return -1; } else { *chunkHash = response.hash(); return 0; } } - // 只打最后一次失败的原因 + // Outputs only the reason for the last failure. std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; diff --git a/src/tools/chunkserver_client.h b/src/tools/chunkserver_client.h index 5945737ae8..3ef9282239 100644 --- a/src/tools/chunkserver_client.h +++ b/src/tools/chunkserver_client.h @@ -23,30 +23,30 @@ #ifndef SRC_TOOLS_CHUNKSERVER_CLIENT_H_ #define SRC_TOOLS_CHUNKSERVER_CLIENT_H_ -#include -#include #include +#include +#include -#include #include +#include #include "proto/chunk.pb.h" #include "proto/copyset.pb.h" #include "src/tools/curve_tool_define.h" +using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::COPYSET_OP_STATUS; using curve::chunkserver::CopysetStatusRequest; using curve::chunkserver::CopysetStatusResponse; -using curve::chunkserver::COPYSET_OP_STATUS; using curve::chunkserver::GetChunkHashRequest; using curve::chunkserver::GetChunkHashResponse; -using curve::chunkserver::CHUNK_OP_STATUS; namespace curve { namespace tool { struct Chunk { - Chunk(uint32_t poolId, uint32_t csId, uint64_t chunkId2) : - logicPoolId(poolId), copysetId(csId), chunkId(chunkId2) {} + Chunk(uint32_t poolId, uint32_t csId, uint64_t chunkId2) + : logicPoolId(poolId), copysetId(csId), chunkId(chunkId2) {} uint32_t logicPoolId; uint32_t copysetId; uint64_t chunkId; @@ -58,40 +58,45 @@ class ChunkServerClient { public: virtual ~ChunkServerClient() = default; /** - * @brief 初始化channel,对一个地址,初始化一次就好 - * @param csAddr chunkserver地址 - * @return 成功返回0,失败返回-1 - */ + * @brief initializes the channel. For an address, just initialize it once + * @param csAddr chunkserver address + * @return returns 0 for success, -1 for failure + */ virtual int Init(const std::string& csAddr); /** - * @brief 调用braft的RaftStat接口获取复制组的详细信息,放到iobuf里面 - * @param iobuf 复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ + * @brief Invoke the RaftStat interface of braft to retrieve detailed + * information about the replication group and store it in the 'iobuf'. + * @param iobuf: Replication group details; valid when the return value is + * 0. + * @return 0 on success, -1 on failure. + */ virtual int GetRaftStatus(butil::IOBuf* iobuf); /** - * @brief 检查chunkserver是否在线,只检查controller,不检查response - * @return 在线返回true,不在线返回false - */ + * @brief Check if the chunkserver is online, only check the controller, not + * the response. + * @return true if online, false if offline. + */ virtual bool CheckChunkServerOnline(); /** - * @brief 调用chunkserver的GetCopysetStatus接口 - & @param request 查询copyset的request - * @param response 返回的response,里面有复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ + * @brief Invoke the GetCopysetStatus interface of the chunkserver. + * @param request: The request to query the copyset. + * @param[out] response: The response containing detailed information about + * the replication group; valid when the return value is 0. + * @return 0 on success, -1 on failure. + */ virtual int GetCopysetStatus(const CopysetStatusRequest& request, CopysetStatusResponse* response); /** - * @brief 从chunkserver获取chunk的hash值 - & @param chunk 要查询的chunk - * @param[out] chunkHash chunk的hash值,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ + * @brief Obtain the hash value of a chunk from the chunkserver. + * @param chunk: The chunk to be queried. + * @param[out] chunkHash: The hash value of the chunk; valid when the return + * value is 0. + * @return 0 on success, -1 on failure. + */ virtual int GetChunkHash(const Chunk& chunk, std::string* chunkHash); private: diff --git a/src/tools/chunkserver_tool_factory.h b/src/tools/chunkserver_tool_factory.h index 9a1e344b3c..a1f71c33c6 100644 --- a/src/tools/chunkserver_tool_factory.h +++ b/src/tools/chunkserver_tool_factory.h @@ -26,9 +26,9 @@ #include #include +#include "src/fs/ext4_filesystem_impl.h" #include "src/tools/curve_meta_tool.h" #include "src/tools/raft_log_tool.h" -#include "src/fs/ext4_filesystem_impl.h" namespace curve { namespace tool { @@ -38,20 +38,21 @@ using curve::fs::Ext4FileSystemImpl; class ChunkServerToolFactory { public: /** - * @brief 根据输入的command获取CurveTool对象 - * @param command 要执行的命令的名称 - * @return CurveTool实例 + * @brief Retrieve the CurveTool object based on the input command + * @param command: The name of the command to be executed + * @return CurveTool instance */ static std::shared_ptr GenerateChunkServerTool( - const std::string& command); + const std::string& command); + private: /** - * @brief 获取CurveMetaTool实例 + * @brief Get CurveMetaTool instance */ static std::shared_ptr GenerateCurveMetaTool(); /** - * @brief 获取RaftLogMetaTool实例 + * @brief Get RaftLogMetaTool instance */ static std::shared_ptr GenerateRaftLogTool(); }; diff --git a/src/tools/common.cpp b/src/tools/common.cpp index 35f29bf738..cdcdc369ba 100644 --- a/src/tools/common.cpp +++ b/src/tools/common.cpp @@ -29,11 +29,11 @@ namespace curve { namespace tool { void TrimMetricString(std::string* str) { - // 去掉头部空格 + // Remove header spaces str->erase(0, str->find_first_not_of(" ")); - // 去掉尾部回车 + // Remove the rear carriage return str->erase(str->find_last_not_of("\r\n") + 1); - // 去掉两边双引号 + // Remove double quotes from both sides str->erase(0, str->find_first_not_of("\"")); str->erase(str->find_last_not_of("\"") + 1); } diff --git a/src/tools/common.h b/src/tools/common.h index 1465a76ac7..132eec8360 100644 --- a/src/tools/common.h +++ b/src/tools/common.h @@ -24,8 +24,9 @@ #define SRC_TOOLS_COMMON_H_ #include -#include + #include +#include DECLARE_uint32(logicalPoolId); DECLARE_uint32(copysetId); @@ -34,9 +35,9 @@ namespace curve { namespace tool { /** - * @brief 格式化,从metric获取的string - * 去掉string两边的双引号以及空格和回车 - * @param[out] str 要格式化的string + * @brief formatting, string obtained from metric + * Remove double quotes, spaces, and carriage returns around the string + * @param[out] str: The string to format */ void TrimMetricString(std::string* str); diff --git a/src/tools/status_tool.cpp b/src/tools/status_tool.cpp index 4444f51fd2..ea855bf094 100644 --- a/src/tools/status_tool.cpp +++ b/src/tools/status_tool.cpp @@ -20,17 +20,22 @@ * Author: charisu */ #include "src/tools/status_tool.h" + #include DEFINE_bool(offline, false, "if true, only list offline chunskervers"); -DEFINE_bool(unhealthy, false, "if true, only list chunkserver that unhealthy " - "ratio greater than 0"); -DEFINE_bool(checkHealth, true, "if true, it will check the health " - "state of chunkserver in chunkserver-list"); -DEFINE_bool(checkCSAlive, false, "if true, it will check the online state of " - "chunkservers with rpc in chunkserver-list"); -DEFINE_bool(listClientInRepo, true, "if true, list-client will list all clients" - " include that in repo"); +DEFINE_bool(unhealthy, false, + "if true, only list chunkserver that unhealthy " + "ratio greater than 0"); +DEFINE_bool(checkHealth, true, + "if true, it will check the health " + "state of chunkserver in chunkserver-list"); +DEFINE_bool(checkCSAlive, false, + "if true, it will check the online state of " + "chunkservers with rpc in chunkserver-list"); +DEFINE_bool(listClientInRepo, true, + "if true, list-client will list all clients" + " include that in repo"); DEFINE_uint64(walSegmentSize, 8388608, "wal segment size"); DECLARE_string(mdsAddr); DECLARE_string(etcdAddr); @@ -42,8 +47,7 @@ const char* kProtocalCurve = "curve"; namespace curve { namespace tool { -std::ostream& operator<<(std::ostream& os, - std::vector strs) { +std::ostream& operator<<(std::ostream& os, std::vector strs) { for (uint32_t i = 0; i < strs.size(); ++i) { if (i != 0) { os << ", "; @@ -54,11 +58,10 @@ std::ostream& operator<<(std::ostream& os, } std::string ToString(ServiceName name) { - static std::map serviceNameMap = - {{ServiceName::kMds, "mds"}, - {ServiceName::kEtcd, "etcd"}, - {ServiceName::kSnapshotCloneServer, - "snapshot-clone-server"}}; + static std::map serviceNameMap = { + {ServiceName::kMds, "mds"}, + {ServiceName::kEtcd, "etcd"}, + {ServiceName::kSnapshotCloneServer, "snapshot-clone-server"}}; return serviceNameMap[name]; } @@ -83,7 +86,7 @@ int StatusTool::Init(const std::string& command) { } if (CommandNeedSnapshotClone(command)) { int snapshotRet = snapshotClient_->Init(FLAGS_snapshotCloneAddr, - FLAGS_snapshotCloneDummyPort); + FLAGS_snapshotCloneDummyPort); switch (snapshotRet) { case 0: // success @@ -166,7 +169,7 @@ int StatusTool::SpaceCmd() { double physicalUsedRatio = 0; if (spaceInfo.totalChunkSize != 0) { physicalUsedRatio = static_cast(spaceInfo.usedChunkSize) / - spaceInfo.totalChunkSize; + spaceInfo.totalChunkSize; } double logicalUsedRatio = 0; @@ -175,28 +178,28 @@ int StatusTool::SpaceCmd() { double createdFileRatio = 0; if (spaceInfo.totalCapacity != 0) { logicalUsedRatio = static_cast(spaceInfo.allocatedSize) / - spaceInfo.totalCapacity; - logicalLeftRatio = static_cast( - spaceInfo.totalCapacity - spaceInfo.allocatedSize) / - spaceInfo.totalCapacity; + spaceInfo.totalCapacity; + logicalLeftRatio = static_cast(spaceInfo.totalCapacity - + spaceInfo.allocatedSize) / + spaceInfo.totalCapacity; createdFileRatio = static_cast(spaceInfo.currentFileSize) / - spaceInfo.totalCapacity; + spaceInfo.totalCapacity; } if (spaceInfo.allocatedSize != 0) { canBeRecycledRatio = static_cast(spaceInfo.recycleAllocSize) / - spaceInfo.allocatedSize; + spaceInfo.allocatedSize; } - std:: cout.setf(std::ios::fixed); + std::cout.setf(std::ios::fixed); std::cout << std::setprecision(2); std::cout << "Space info:" << std::endl; - std::cout << "physical: total = " - << spaceInfo.totalChunkSize / mds::kGB << "GB" - << ", used = " << spaceInfo.usedChunkSize / mds::kGB - << "GB(" << physicalUsedRatio * 100 << "%), left = " + std::cout << "physical: total = " << spaceInfo.totalChunkSize / mds::kGB + << "GB" + << ", used = " << spaceInfo.usedChunkSize / mds::kGB << "GB(" + << physicalUsedRatio * 100 << "%), left = " << (spaceInfo.totalChunkSize - spaceInfo.usedChunkSize) / mds::kGB << "GB(" << (1 - physicalUsedRatio) * 100 << "%)" << std::endl; - std::cout << "logical: total = " - << spaceInfo.totalCapacity / mds::kGB << "GB" + std::cout << "logical: total = " << spaceInfo.totalCapacity / mds::kGB + << "GB" << ", used = " << spaceInfo.allocatedSize / mds::kGB << "GB" << "(" << logicalUsedRatio * 100 << "%, can be recycled = " << spaceInfo.recycleAllocSize / mds::kGB << "GB(" @@ -205,18 +208,19 @@ int StatusTool::SpaceCmd() { << (spaceInfo.totalCapacity - spaceInfo.allocatedSize) / mds::kGB << "GB(" << logicalLeftRatio * 100 << "%)" << ", created file size = " - << spaceInfo.currentFileSize / mds::kGB - << "GB(" << createdFileRatio * 100 << "%)" << std::endl; + << spaceInfo.currentFileSize / mds::kGB << "GB(" + << createdFileRatio * 100 << "%)" << std::endl; std::cout << "Every Logicalpool Space info:" << std::endl; - for (const auto &i : spaceInfo.lpoolspaceinfo) { - std::cout << "logicalPool: name = "<< i.second.poolName - << ", poolid = " << i.first - << ", total = "<< i.second.totalCapacity / mds::kGB << "GB" - << ", used = " << i.second.allocatedSize / mds::kGB << "GB" - << ", left = " << (i.second.totalCapacity - - i.second.allocatedSize) / mds::kGB - << "GB"<< std::endl; + for (const auto& i : spaceInfo.lpoolspaceinfo) { + std::cout << "logicalPool: name = " << i.second.poolName + << ", poolid = " << i.first + << ", total = " << i.second.totalCapacity / mds::kGB << "GB" + << ", used = " << i.second.allocatedSize / mds::kGB << "GB" + << ", left = " + << (i.second.totalCapacity - i.second.allocatedSize) / + mds::kGB + << "GB" << std::endl; } return 0; } @@ -264,9 +268,9 @@ int StatusTool::ChunkServerListCmd() { double unhealthyRatio = 0.0; if (FLAGS_checkCSAlive) { - // 发RPC重置online状态 - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + // Send RPC to reset online status + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); bool isOnline = copysetCheckCore_->CheckChunkServerOnline(csAddr); if (isOnline) { chunkserver.set_onlinestate(OnlineState::ONLINE); @@ -290,7 +294,7 @@ int StatusTool::ChunkServerListCmd() { if (FLAGS_checkHealth) { copysetCheckCore_->CheckCopysetsOnChunkServer(csId); const auto& statistics = - copysetCheckCore_->GetCopysetStatistics(); + copysetCheckCore_->GetCopysetStatistics(); unhealthyRatio = statistics.unhealthyRatio; if (FLAGS_unhealthy && unhealthyRatio == 0) { continue; @@ -309,8 +313,7 @@ int StatusTool::ChunkServerListCmd() { std::cout << "chunkServerID = " << csId << ", diskType = " << chunkserver.disktype() << ", hostIP = " << chunkserver.hostip() - << ", port = " << chunkserver.port() - << ", rwStatus = " + << ", port = " << chunkserver.port() << ", rwStatus = " << ChunkServerStatus_Name(chunkserver.status()) << ", diskState = " << DiskState_Name(chunkserver.diskstatus()) @@ -318,13 +321,13 @@ int StatusTool::ChunkServerListCmd() { << OnlineState_Name(chunkserver.onlinestate()) << ", copysetNum = " << copysets.size() << ", mountPoint = " << chunkserver.mountpoint() - << ", diskCapacity = " << chunkserver.diskcapacity() - / curve::mds::kGB << " GB" - << ", diskUsed = " << chunkserver.diskused() - / curve::mds::kGB << " GB"; + << ", diskCapacity = " + << chunkserver.diskcapacity() / curve::mds::kGB << " GB" + << ", diskUsed = " << chunkserver.diskused() / curve::mds::kGB + << " GB"; if (FLAGS_checkHealth) { - std::cout << ", unhealthyCopysetRatio = " - << unhealthyRatio * 100 << "%"; + std::cout << ", unhealthyCopysetRatio = " << unhealthyRatio * 100 + << "%"; } if (chunkserver.has_externalip()) { std::cout << ", externalIP = " << chunkserver.externalip(); @@ -333,7 +336,7 @@ int StatusTool::ChunkServerListCmd() { } std::cout << "total: " << total << ", online: " << online; if (!FLAGS_checkCSAlive) { - std::cout <<", unstable: " << unstable; + std::cout << ", unstable: " << unstable; } std::cout << ", offline: " << offline << std::endl; @@ -378,8 +381,8 @@ int StatusTool::LogicalPoolListCmd() { uint64_t total = 0; uint64_t allocSize; AllocMap allocMap; - res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, - &allocSize, &allocMap); + res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, &allocSize, + &allocMap); if (res != 0) { std::cout << "GetAllocatedSize of recycle bin fail!" << std::endl; return -1; @@ -417,15 +420,17 @@ int StatusTool::LogicalPoolListCmd() { << curve::mds::topology::LogicalPoolType_Name(lgPool.type()) << ", scanEnable = " << lgPool.scanenable() << ", allocateStatus = " - << curve::mds::topology:: - AllocateStatus_Name(lgPool.allocatestatus()) + << curve::mds::topology::AllocateStatus_Name( + lgPool.allocatestatus()) << ", total space = " << totalSize / curve::mds::kGB << "GB" << ", used space = " << usedSize / curve::mds::kGB << "GB" - << "(" << usedRatio * 100 << "%, can be recycled = " - << canBeRecycle / curve::mds::kGB << "GB" - << "(" << recycleRatio * 100 << "%))" << ", left space = " - << (totalSize - usedSize) / curve::mds::kGB - << "GB(" << (1 - usedRatio) * 100 << "%)" << std::endl; + << "(" << usedRatio * 100 + << "%, can be recycled = " << canBeRecycle / curve::mds::kGB + << "GB" + << "(" << recycleRatio * 100 << "%))" + << ", left space = " + << (totalSize - usedSize) / curve::mds::kGB << "GB(" + << (1 - usedRatio) * 100 << "%)" << std::endl; } std::cout << "total: " << total << std::endl; return 0; @@ -469,9 +474,7 @@ int StatusTool::StatusCmd() { } } -int StatusTool::ChunkServerStatusCmd() { - return PrintChunkserverStatus(false); -} +int StatusTool::ChunkServerStatusCmd() { return PrintChunkserverStatus(false); } int StatusTool::PrintClusterStatus() { int ret = 0; @@ -486,8 +489,8 @@ int StatusTool::PrintClusterStatus() { const auto& statistics = copysetCheckCore_->GetCopysetStatistics(); std::cout << "total copysets: " << statistics.totalNum << ", unhealthy copysets: " << statistics.unhealthyNum - << ", unhealthy_ratio: " - << statistics.unhealthyRatio * 100 << "%" << std::endl; + << ", unhealthy_ratio: " << statistics.unhealthyRatio * 100 << "%" + << std::endl; std::vector phyPools; std::vector lgPools; int res = GetPoolsInCluster(&phyPools, &lgPools); @@ -506,24 +509,24 @@ int StatusTool::PrintClusterStatus() { bool StatusTool::IsClusterHeatlhy() { bool ret = true; - // 1、检查copyset健康状态 + // 1. Check the health status of copyset int res = copysetCheckCore_->CheckCopysetsInCluster(); if (res != 0) { std::cout << "Copysets are not healthy!" << std::endl; ret = false; } - // 2、检查mds状态 + // 2. Check the mds status if (!CheckServiceHealthy(ServiceName::kMds)) { ret = false; } - // 3、检查etcd在线状态 + // 3. Check the online status of ETCD if (!CheckServiceHealthy(ServiceName::kEtcd)) { ret = false; } - // 4、检查snapshot clone server状态 + // 4. Check the status of the snapshot clone server if (!noSnapshotServer_ && !CheckServiceHealthy(ServiceName::kSnapshotCloneServer)) { ret = false; @@ -542,10 +545,10 @@ bool StatusTool::CheckServiceHealthy(const ServiceName& name) { break; } case ServiceName::kEtcd: { - int res = etcdClient_->GetEtcdClusterStatus(&leaderVec, - &onlineStatus); + int res = + etcdClient_->GetEtcdClusterStatus(&leaderVec, &onlineStatus); if (res != 0) { - std:: cout << "GetEtcdClusterStatus fail!" << std::endl; + std::cout << "GetEtcdClusterStatus fail!" << std::endl; return false; } break; @@ -579,8 +582,8 @@ bool StatusTool::CheckServiceHealthy(const ServiceName& name) { return ret; } -void StatusTool::PrintOnlineStatus(const std::string& name, - const std::map& onlineStatus) { +void StatusTool::PrintOnlineStatus( + const std::string& name, const std::map& onlineStatus) { std::vector online; std::vector offline; for (const auto& item : onlineStatus) { @@ -674,8 +677,8 @@ int StatusTool::PrintSnapshotCloneStatus() { } std::string version; std::vector failedList; - int res = versionTool_->GetAndCheckSnapshotCloneVersion(&version, - &failedList); + int res = + versionTool_->GetAndCheckSnapshotCloneVersion(&version, &failedList); int ret = 0; if (res != 0) { std::cout << "GetAndCheckSnapshotCloneVersion fail" << std::endl; @@ -710,7 +713,7 @@ int StatusTool::PrintClientStatus() { if (!first) { std::cout << ", "; } - std::cout << "version-" << item2.first << ": " + std::cout << "version-" << item2.first << ": " << item2.second.size(); first = false; } @@ -746,13 +749,12 @@ int StatusTool::ScanStatusCmd() { return -1; } - std::cout - << "Scan status for copyset(" - << lpid << "," << copysetId << "):" << std::endl - << " scaning=" << copysetInfo.scaning() - << " lastScanSec=" << copysetInfo.lastscansec() - << " lastScanConsistent=" << copysetInfo.lastscanconsistent() - << std::endl; + std::cout << "Scan status for copyset(" << lpid << "," << copysetId + << "):" << std::endl + << " scaning=" << copysetInfo.scaning() + << " lastScanSec=" << copysetInfo.lastscansec() + << " lastScanConsistent=" << copysetInfo.lastscanconsistent() + << std::endl; return 0; } @@ -769,8 +771,8 @@ int StatusTool::ScanStatusCmd() { if (count % 5 == 0) { std::cout << std::endl; } - std::cout << " (" << copysetInfo.logicalpoolid() - << "," << copysetInfo.copysetid() << ")"; + std::cout << " (" << copysetInfo.logicalpoolid() << "," + << copysetInfo.copysetid() << ")"; count++; } @@ -779,47 +781,47 @@ int StatusTool::ScanStatusCmd() { return 0; } -int CheckUseWalPool(const std::map> - &poolChunkservers, - bool *useWalPool, - bool *useChunkFilePoolAsWalPool, - std::shared_ptr metricClient) { +int CheckUseWalPool( + const std::map>& poolChunkservers, + bool* useWalPool, bool* useChunkFilePoolAsWalPool, + std::shared_ptr metricClient) { int ret = 0; if (!poolChunkservers.empty()) { ChunkServerInfo chunkserver = poolChunkservers.begin()->second[0]; - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); // check whether use chunkfilepool std::string metricValue; std::string metricName = GetUseWalPoolName(csAddr); - MetricRet res = metricClient->GetConfValueFromMetric(csAddr, - metricName, &metricValue); + MetricRet res = metricClient->GetConfValueFromMetric(csAddr, metricName, + &metricValue); if (res != MetricRet::kOK) { - std::cout << "Get use chunkfilepool conf " - << csAddr << " fail!" << std::endl; + std::cout << "Get use chunkfilepool conf " << csAddr << " fail!" + << std::endl; ret = -1; } std::string raftLogProtocol = curve::common::UriParser ::GetProtocolFromUri(metricValue); - *useWalPool = kProtocalCurve == raftLogProtocol ? true : false; + *useWalPool = kProtocalCurve == raftLogProtocol ? true : false; - // check whether use chunkfilepool as walpool from chunkserver conf metric // NOLINT + // check whether use chunkfilepool as walpool // NOLINT + // from chunkserver conf metric // NOLINT metricName = GetUseChunkFilePoolAsWalPoolName(csAddr); res = metricClient->GetConfValueFromMetric(csAddr, metricName, - &metricValue); + &metricValue); if (res != MetricRet::kOK) { - std::cout << "Get use chunkfilepool as walpool conf " - << csAddr << " fail!" << std::endl; + std::cout << "Get use chunkfilepool as walpool conf " << csAddr + << " fail!" << std::endl; ret = -1; } - *useChunkFilePoolAsWalPool = StringToBool(metricValue, - useChunkFilePoolAsWalPool); + *useChunkFilePoolAsWalPool = + StringToBool(metricValue, useChunkFilePoolAsWalPool); } return ret; } int PrintChunkserverOnlineStatus( - const std::map> &poolChunkservers, + const std::map>& poolChunkservers, std::shared_ptr copysetCheckCore, std::shared_ptr mdsClient) { int ret = 0; @@ -830,8 +832,8 @@ int PrintChunkserverOnlineStatus( for (const auto& poolChunkserver : poolChunkservers) { for (const auto& chunkserver : poolChunkserver.second) { total++; - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); if (copysetCheckCore->CheckChunkServerOnline(csAddr)) { online++; } else { @@ -844,11 +846,11 @@ int PrintChunkserverOnlineStatus( std::vector offlineRecover; if (offlineCs.size() > 0) { std::map statusMap; - int res = mdsClient->QueryChunkServerRecoverStatus( - offlineCs, &statusMap); + int res = + mdsClient->QueryChunkServerRecoverStatus(offlineCs, &statusMap); if (res != 0) { std::cout << "query offlinne chunkserver recover status fail"; - ret = -1; + ret = -1; } else { // Distinguish between recovering and unrecovered for (auto it = statusMap.begin(); it != statusMap.end(); ++it) { @@ -858,14 +860,13 @@ int PrintChunkserverOnlineStatus( } } } - std::cout << "chunkserver: total num = " << total - << ", online = " << online - << ", offline = " << offline - << "(recoveringout = " << offlineRecover.size() - << ", chunkserverlist: ["; + std::cout << "chunkserver: total num = " << total << ", online = " << online + << ", offline = " << offline + << "(recoveringout = " << offlineRecover.size() + << ", chunkserverlist: ["; int i = 0; - for (ChunkServerIdType csId : offlineRecover) { + for (ChunkServerIdType csId : offlineRecover) { i++; if (i == static_cast(offlineRecover.size())) { std::cout << csId; @@ -878,26 +879,25 @@ int PrintChunkserverOnlineStatus( } int GetChunkserverLeftSize( - const std::map> &poolChunkservers, - std::map> *poolChunkLeftSize, - std::map> *poolWalSegmentLeftSize, - bool useWalPool, - bool useChunkFilePoolAsWalPool, + const std::map>& poolChunkservers, + std::map>* poolChunkLeftSize, + std::map>* poolWalSegmentLeftSize, + bool useWalPool, bool useChunkFilePoolAsWalPool, std::shared_ptr metricClient) { int ret = 0; for (const auto& poolChunkserver : poolChunkservers) { std::vector chunkLeftSize; std::vector walSegmentLeftSize; for (const auto& chunkserver : poolChunkserver.second) { - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); std::string metricName = GetCSLeftChunkName(csAddr); uint64_t chunkNum; - MetricRet res = metricClient->GetMetricUint(csAddr, - metricName, &chunkNum); + MetricRet res = + metricClient->GetMetricUint(csAddr, metricName, &chunkNum); if (res != MetricRet::kOK) { std::cout << "Get left chunk size of chunkserver " << csAddr - << " fail!" << std::endl; + << " fail!" << std::endl; ret = -1; continue; } @@ -909,10 +909,10 @@ int GetChunkserverLeftSize( metricName = GetCSLeftWalSegmentName(csAddr); uint64_t walSegmentNum; res = metricClient->GetMetricUint(csAddr, metricName, - &walSegmentNum); + &walSegmentNum); if (res != MetricRet::kOK) { std::cout << "Get left wal segment size of chunkserver " - << csAddr << " fail!" << std::endl; + << csAddr << " fail!" << std::endl; ret = -1; continue; } @@ -922,7 +922,7 @@ int GetChunkserverLeftSize( } poolChunkLeftSize->emplace(poolChunkserver.first, chunkLeftSize); poolWalSegmentLeftSize->emplace(poolChunkserver.first, - walSegmentLeftSize); + walSegmentLeftSize); } return ret; } @@ -932,8 +932,8 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { std::cout << "ChunkServer status:" << std::endl; std::string version; std::vector failedList; - int res = versionTool_->GetAndCheckChunkServerVersion(&version, - &failedList); + int res = + versionTool_->GetAndCheckChunkServerVersion(&version, &failedList); int ret = 0; if (res != 0) { std::cout << "GetAndCheckChunkserverVersion fail" << std::endl; @@ -954,8 +954,7 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { } // get chunkserver online status - ret = PrintChunkserverOnlineStatus(poolChunkservers, - copysetCheckCore_, + ret = PrintChunkserverOnlineStatus(poolChunkservers, copysetCheckCore_, mdsClient_); if (!checkLeftSize) { return ret; @@ -970,12 +969,9 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { // get chunkserver left size std::map> poolChunkLeftSize; std::map> poolWalSegmentLeftSize; - ret = GetChunkserverLeftSize(poolChunkservers, - &poolChunkLeftSize, - &poolWalSegmentLeftSize, - useWalPool, - useChunkFilePoolAsWalPool, - metricClient_); + ret = GetChunkserverLeftSize(poolChunkservers, &poolChunkLeftSize, + &poolWalSegmentLeftSize, useWalPool, + useChunkFilePoolAsWalPool, metricClient_); if (0 != ret) { return ret; } @@ -993,9 +989,9 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { return ret; } -void StatusTool::PrintCsLeftSizeStatistics(const std::string& name, - const std::map>& poolLeftSize) { +void StatusTool::PrintCsLeftSizeStatistics( + const std::string& name, + const std::map>& poolLeftSize) { if (poolLeftSize.empty()) { std::cout << "No " << name << " left size found!" << std::endl; return; @@ -1024,19 +1020,19 @@ void StatusTool::PrintCsLeftSizeStatistics(const std::string& name, } double var = sum / leftSize.second.size(); - std:: cout.setf(std::ios::fixed); - std::cout<< std::setprecision(2); - std::cout<< "pool" << leftSize.first << " " << name; + std::cout.setf(std::ios::fixed); + std::cout << std::setprecision(2); + std::cout << "pool" << leftSize.first << " " << name; std::cout << " left size: min = " << min << "GB" - << ", max = " << max << "GB" - << ", average = " << avg << "GB" - << ", range = " << range << "GB" - << ", variance = " << var << std::endl; + << ", max = " << max << "GB" + << ", average = " << avg << "GB" + << ", range = " << range << "GB" + << ", variance = " << var << std::endl; } } int StatusTool::GetPoolsInCluster(std::vector* phyPools, - std::vector* lgPools) { + std::vector* lgPools) { int res = mdsClient_->ListPhysicalPoolsInCluster(phyPools); if (res != 0) { std::cout << "ListPhysicalPoolsInCluster fail!" << std::endl; @@ -1044,7 +1040,7 @@ int StatusTool::GetPoolsInCluster(std::vector* phyPools, } for (const auto& phyPool : *phyPools) { int res = mdsClient_->ListLogicalPoolsInPhysicalPool( - phyPool.physicalpoolid(), lgPools) != 0; + phyPool.physicalpoolid(), lgPools) != 0; if (res != 0) { std::cout << "ListLogicalPoolsInPhysicalPool fail!" << std::endl; return -1; @@ -1066,9 +1062,9 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { std::cout << "Get root directory file size from mds fail!" << std::endl; return -1; } - // 从metric获取space信息 + // Obtain space information from metric for (const auto& lgPool : lgPools) { - LogicalpoolSpaceInfo lpinfo; + LogicalpoolSpaceInfo lpinfo; std::string poolName = lgPool.logicalpoolname(); lpinfo.poolName = poolName; std::string metricName = GetPoolTotalChunkSizeName(poolName); @@ -1079,7 +1075,7 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { return -1; } spaceInfo->totalChunkSize += size; - lpinfo.totalChunkSize +=size; + lpinfo.totalChunkSize += size; metricName = GetPoolUsedChunkSizeName(poolName); res = mdsClient_->GetMetric(metricName, &size); if (res != 0) { @@ -1105,10 +1101,10 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { spaceInfo->allocatedSize += size; lpinfo.allocatedSize += size; spaceInfo->lpoolspaceinfo.insert( - std::pair( - lgPool.logicalpoolid(), lpinfo)); + std::pair(lgPool.logicalpoolid(), + lpinfo)); } - // 获取RecycleBin的分配大小 + // Obtain the allocation size of RecycleBin res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, &spaceInfo->recycleAllocSize); if (res != 0) { @@ -1118,7 +1114,7 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { return 0; } -int StatusTool::RunCommand(const std::string &cmd) { +int StatusTool::RunCommand(const std::string& cmd) { if (Init(cmd) != 0) { std::cout << "Init StatusTool failed" << std::endl; return -1; diff --git a/src/tools/status_tool.h b/src/tools/status_tool.h index 82b776fa73..ea68db615c 100644 --- a/src/tools/status_tool.h +++ b/src/tools/status_tool.h @@ -23,31 +23,33 @@ #ifndef SRC_TOOLS_STATUS_TOOL_H_ #define SRC_TOOLS_STATUS_TOOL_H_ +#include #include #include -#include -#include + #include -#include -#include -#include +#include #include +#include +#include #include +#include + #include "proto/topology.pb.h" #include "src/common/timeutility.h" +#include "src/common/uri_parser.h" #include "src/mds/common/mds_define.h" -#include "src/tools/mds_client.h" #include "src/tools/chunkserver_client.h" -#include "src/tools/namespace_tool_core.h" #include "src/tools/copyset_check_core.h" -#include "src/tools/etcd_client.h" -#include "src/tools/version_tool.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/etcd_client.h" +#include "src/tools/mds_client.h" #include "src/tools/metric_client.h" #include "src/tools/metric_name.h" +#include "src/tools/namespace_tool_core.h" #include "src/tools/snapshot_clone_client.h" -#include "src/common/uri_parser.h" +#include "src/tools/version_tool.h" using curve::mds::topology::ChunkServerInfo; using curve::mds::topology::ChunkServerStatus; @@ -63,22 +65,22 @@ struct LogicalpoolSpaceInfo { std::string poolName = ""; uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; }; struct SpaceInfo { uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; - // recycleBin的分配大小 + // Allocation size of recycleBin uint64_t recycleAllocSize = 0; - // 系统中存在的文件大小 + // File size present in the system uint64_t currentFileSize = 0; std::unordered_map lpoolspaceinfo; }; @@ -100,49 +102,54 @@ class StatusTool : public CurveTool { std::shared_ptr versionTool, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), copysetCheckCore_(copysetCheckCore), - etcdClient_(etcdClient), metricClient_(metricClient), - snapshotClient_(snapshotClient), versionTool_(versionTool), - mdsInited_(false), etcdInited_(false), noSnapshotServer_(false) {} + : mdsClient_(mdsClient), + copysetCheckCore_(copysetCheckCore), + etcdClient_(etcdClient), + metricClient_(metricClient), + snapshotClient_(snapshotClient), + versionTool_(versionTool), + mdsInited_(false), + etcdInited_(false), + noSnapshotServer_(false) {} ~StatusTool() = default; /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief Returns whether the command is supported + * @param command: The command executed + * @return true/false */ - static bool SupportCommand(const std::string &command); + static bool SupportCommand(const std::string& command); /** - * @brief 判断集群是否健康 + * @brief Determine whether the cluster is healthy */ bool IsClusterHeatlhy(); private: - int Init(const std::string &command); + int Init(const std::string& command); int SpaceCmd(); int StatusCmd(); int ChunkServerListCmd(); int ServerListCmd(); int LogicalPoolListCmd(); int ChunkServerStatusCmd(); - int GetPoolsInCluster(std::vector *phyPools, - std::vector *lgPools); - int GetSpaceInfo(SpaceInfo *spaceInfo); + int GetPoolsInCluster(std::vector* phyPools, + std::vector* lgPools); + int GetSpaceInfo(SpaceInfo* spaceInfo); int PrintClusterStatus(); int PrintMdsStatus(); int PrintEtcdStatus(); @@ -152,67 +159,68 @@ class StatusTool : public CurveTool { int ScanStatusCmd(); int FormatStatusCmd(); void PrintCsLeftSizeStatistics( - const std::string &name, - const std::map> &poolLeftSize); + const std::string& name, + const std::map>& poolLeftSize); int PrintSnapshotCloneStatus(); /** - * @brief 判断命令是否需要和etcd交互 - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief Determine whether the command needs to interact with etcd + * @param command: The command to be executed + * @return Returns true if interaction is needed, otherwise returns false */ - bool CommandNeedEtcd(const std::string &command); - + bool CommandNeedEtcd(const std::string& command); /** - * @brief 判断命令是否需要mds - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief Determine if the command requires mds + * @param command: The command executed + * @return Returns true if mds is needed, otherwise returns false */ - bool CommandNeedMds(const std::string &command); + bool CommandNeedMds(const std::string& command); /** - * @brief 判断命令是否需要snapshot clone server - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief: Determine if the command requires a snapshot clone server + * @param command: The command executed + * @return Returns true if snapshot clone server is needed, otherwise + * returns false */ - bool CommandNeedSnapshotClone(const std::string &command); + bool CommandNeedSnapshotClone(const std::string& command); /** - * @brief 打印在线状态 - * @param name : 在线状态对应的名字 - * @param onlineStatus 在线状态的map + * @brief Print online status + * @param name: The name corresponding to the online status + * @param onlineStatus: Map of online status */ - void PrintOnlineStatus(const std::string &name, - const std::map &onlineStatus); + void PrintOnlineStatus(const std::string& name, + const std::map& onlineStatus); /** - * @brief 获取并打印mds version信息 + * @brief Get and print mds version information */ int GetAndPrintMdsVersion(); /** - * @brief 检查服务是否健康 - * @param name 服务名 + * @brief Check if the service is healthy + * @param name: Service Name */ - bool CheckServiceHealthy(const ServiceName &name); + bool CheckServiceHealthy(const ServiceName& name); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // Copyset检查工具,用于检查集群和chunkserver的健康状态 + // Copyset checking tool, used to check the health status of clusters and + // chunkservers std::shared_ptr copysetCheckCore_; - // etcd client,用于调etcd API获取状态 + // ETCD client, used to call the ETCD API to obtain status std::shared_ptr etcdClient_; - // 用于获取metric + // Used to obtain metric std::shared_ptr metricClient_; - // 用于获取snapshot clone的状态 + // Used to obtain the status of snapshot clones std::shared_ptr snapshotClient_; - // version client,用于获取version信息 + // Version client, used to obtain version information std::shared_ptr versionTool_; - // mds是否初始化过 + // Has the mds been initialized bool mdsInited_; - // etcd是否初始化过 + // Has ETCD been initialized bool etcdInited_; // Is there a snapshot service or not bool noSnapshotServer_; diff --git a/src/tools/version_tool.cpp b/src/tools/version_tool.cpp index 6e519bab4a..42b1d3e9a5 100644 --- a/src/tools/version_tool.cpp +++ b/src/tools/version_tool.cpp @@ -48,8 +48,8 @@ int VersionTool::GetAndCheckMdsVersion(std::string* version, return ret; } -int VersionTool::GetAndCheckChunkServerVersion(std::string* version, - std::vector* failedList) { +int VersionTool::GetAndCheckChunkServerVersion( + std::string* version, std::vector* failedList) { std::vector chunkServers; int res = mdsClient_->ListChunkServersInCluster(&chunkServers); if (res != 0) { @@ -78,8 +78,8 @@ int VersionTool::GetAndCheckChunkServerVersion(std::string* version, return ret; } -int VersionTool::GetAndCheckSnapshotCloneVersion(std::string* version, - std::vector* failedList) { +int VersionTool::GetAndCheckSnapshotCloneVersion( + std::string* version, std::vector* failedList) { const auto& dummyServerMap = snapshotClient_->GetDummyServerMap(); std::vector dummyServers; for (const auto& item : dummyServerMap) { @@ -123,9 +123,8 @@ void VersionTool::FetchClientProcessMap(const std::vector& addrVec, ProcessMapType* processMap) { for (const auto& addr : addrVec) { std::string cmd; - MetricRet res = metricClient_->GetMetric(addr, - kProcessCmdLineMetricName, - &cmd); + MetricRet res = + metricClient_->GetMetric(addr, kProcessCmdLineMetricName, &cmd); if (res != MetricRet::kOK) { continue; } @@ -156,10 +155,11 @@ void VersionTool::GetVersionMap(const std::vector& addrVec, failedList->clear(); for (const auto& addr : addrVec) { std::string version; - MetricRet res = metricClient_->GetMetric(addr, kCurveVersionMetricName, - &version); + MetricRet res = + metricClient_->GetMetric(addr, kCurveVersionMetricName, &version); if (res != MetricRet::kOK) { - // 0.0.5.2版本之前没有curve_version的metric,因此再判断一下 + // Before version 0.0.5.2, there was no "curve_version" metric, so + // let's double-check. if (res == MetricRet::kNotFound) { version = kOldVersion; } else { diff --git a/src/tools/version_tool.h b/src/tools/version_tool.h index 9231d1e4fc..01cd05a6c8 100644 --- a/src/tools/version_tool.h +++ b/src/tools/version_tool.h @@ -23,13 +23,14 @@ #ifndef SRC_TOOLS_VERSION_TOOL_H_ #define SRC_TOOLS_VERSION_TOOL_H_ -#include #include -#include #include +#include +#include + +#include "src/common/string_util.h" #include "src/tools/mds_client.h" #include "src/tools/metric_client.h" -#include "src/common/string_util.h" #include "src/tools/snapshot_clone_client.h" namespace curve { @@ -49,95 +50,97 @@ class VersionTool { explicit VersionTool(std::shared_ptr mdsClient, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), snapshotClient_(snapshotClient), + : mdsClient_(mdsClient), + snapshotClient_(snapshotClient), metricClient_(metricClient) {} virtual ~VersionTool() {} /** - * @brief 获取mds的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of mds and check version consistency + * @param[out] version: Version + * @return returns 0 for success, -1 for failure */ - virtual int GetAndCheckMdsVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckMdsVersion(std::string* version, + std::vector* failedList); /** - * @brief 获取chunkserver的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of chunkserver and check version consistency + * @param[out] version: Version + * @return returns 0 for success, -1 for failure */ - virtual int - GetAndCheckChunkServerVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckChunkServerVersion( + std::string* version, std::vector* failedList); /** - * @brief 获取snapshot clone server的版本 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the snapshot clone server + * @param[out] version: Version + * @return returns 0 for success, -1 for failure */ - virtual int - GetAndCheckSnapshotCloneVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckSnapshotCloneVersion( + std::string* version, std::vector* failedList); /** - * @brief 获取client的版本 - * @param[out] versionMap process->版本->地址的映射表 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the client + * @param[out] versionMap: Process ->Version ->Address mapping table + * @return returns 0 for success, -1 for failure */ - virtual int GetClientVersion(ClientVersionMapType *versionMap); + virtual int GetClientVersion(ClientVersionMapType* versionMap); /** - * @brief 打印每个version对应的地址 - * @param versionMap version到地址列表的map + * @brief Print the address corresponding to each version + * @param versionMap: Version to address list map */ - static void PrintVersionMap(const VersionMapType &versionMap); + static void PrintVersionMap(const VersionMapType& versionMap); /** - * @brief 打印访问失败的地址 - * @param failedList 访问失败的地址列表 + * @brief Print access failed addresses + * @param failedList: Access Failed Address List */ - static void PrintFailedList(const std::vector &failedList); + static void PrintFailedList(const std::vector& failedList); private: /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] versionMap version到地址的map - * @param[out] failedList 查询version失败的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and + * store the version and address correspondence in the map + * @param addrVec: Address List + * @param[out] versionMap: Version to address map + * @param[out] failedList: Query address list for version failure */ - void GetVersionMap(const std::vector &addrVec, - VersionMapType *versionMap, - std::vector *failedList); + void GetVersionMap(const std::vector& addrVec, + VersionMapType* versionMap, + std::vector* failedList); /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] processMap 不同的process对应的client的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and + * store the version and address correspondence in the map + * @param addrVec Address List + * @param[out] processMap The address list of clients corresponding to + * different processes */ - void FetchClientProcessMap(const std::vector &addrVec, - ProcessMapType *processMap); + void FetchClientProcessMap(const std::vector& addrVec, + ProcessMapType* processMap); /** - * @brief 从启动server的命令行获取对应的程序的名字 - * 比如nebd的命令行为 + * @brief Get the name of the corresponding program from the command line of + * starting the server For example, the command behavior of nebd * process_cmdline : "/usr/bin/nebd-server * -confPath=/etc/nebd/nebd-server.conf * -log_dir=/data/log/nebd/server * -graceful_quit_on_sigterm=true * -stderrthreshold=3 * " - * 那么我们要解析出的名字是nebd-server - * @param addrVec 地址列表 - * @return 进程的名字 + * So the name we need to resolve is nebd-server + * @param addrVec: Address List + * @return The name of the process */ - std::string GetProcessNameFromCmd(const std::string &cmd); + std::string GetProcessNameFromCmd(const std::string& cmd); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // 用于获取snapshotClone状态 + // Used to obtain snapshotClone status std::shared_ptr snapshotClient_; - // 获取metric的client + // Obtain metric client std::shared_ptr metricClient_; }; diff --git a/test/chunkserver/braft_cli_service2_test.cpp b/test/chunkserver/braft_cli_service2_test.cpp index cc97980aa2..1e1ca66d8c 100644 --- a/test/chunkserver/braft_cli_service2_test.cpp +++ b/test/chunkserver/braft_cli_service2_test.cpp @@ -20,25 +20,26 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/braft_cli_service2.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/braft_cli_service2.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/common/uuid.h" #include "src/common/timeutility.h" +#include "src/common/uuid.h" #include "src/fs/local_filesystem.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -48,10 +49,12 @@ using curve::common::UUIDGenerator; class BraftCliService2Test : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "SetUpTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "TearDownTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "TearDownTestCase"; } virtual void SetUp() { peer1.set_address("127.0.0.1:9310:0"); @@ -75,10 +78,10 @@ class BraftCliService2Test : public testing::Test { } public: - const char *ip = "127.0.0.1"; - int port = 9310; - const char *confs = "127.0.0.1:9310:0,127.0.0.1:9311:0,127.0.0.1:9312:0"; - int snapshotInterval = 3600; // 防止自动打快照 + const char* ip = "127.0.0.1"; + int port = 9310; + const char* confs = "127.0.0.1:9310:0,127.0.0.1:9311:0,127.0.0.1:9312:0"; + int snapshotInterval = 3600; // Prevent automatic snapshot taking int electionTimeoutMs = 3000; pid_t pid1; @@ -128,12 +131,8 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dirMap[peer1.address()]; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -143,12 +142,8 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dirMap[peer2.address()]; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -158,16 +153,12 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dirMap[peer3.address()]; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit */ WaitpidGuard waitpidGuard(pid1, pid2, pid3); ::usleep(1.2 * 1000 * electionTimeoutMs); @@ -182,15 +173,15 @@ TEST_F(BraftCliService2Test, basic2) { options.timeout_ms = 3000; options.max_retry = 3; - /* add peer - 非法copyset */ + /* Add peer - illegal copyset */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); AddPeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -210,10 +201,10 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法peerid */ + /* Add peer - illegal peer id */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -223,7 +214,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_addpeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); AddPeerResponse2 response; @@ -237,13 +228,14 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /* Add peer - sent to peers who are not leaders */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 + // Find a peer that is not a leader and send the configuration change + // request to it for processing if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); *leaderPeer = peer2; @@ -274,15 +266,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法copyset */ + /* Remove peer - illegal copyset */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); RemovePeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -302,10 +294,10 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法peer id */ + /* Remove peer - illegal peer id */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -315,7 +307,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_removepeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); RemovePeerResponse2 response; @@ -329,15 +321,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "remove peer: " << cntl.ErrorText(); } - /* remove peer - 发送给不是leader的peer */ + /* Remove peer - sent to peers who are not leaders */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 - if (0 - == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { + // Find a peer that is not a leader and send the configuration change + // request to it for processing + if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); *leaderPeer = peer2; } else { @@ -367,15 +359,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法copyset */ + /* Transfer leader - illegal copyset */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); TransferLeaderRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -395,8 +387,8 @@ TEST_F(BraftCliService2Test, basic2) { } /* transfer leader to leader */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -417,10 +409,10 @@ TEST_F(BraftCliService2Test, basic2) { stub.TransferLeader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法peer */ + /* Transfer leader - illegal peer */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -430,7 +422,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_transferee(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); TransferLeaderResponse2 response; @@ -444,18 +436,17 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "Transfer leader peer: " << cntl.ErrorText(); } - /* get leader - 非法copyset */ + /* Get leader - illegal copyset */ { PeerId leaderId = leaderId; brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); - GetLeaderRequest2 request; GetLeaderResponse2 response; brpc::Controller cntl; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); @@ -467,14 +458,13 @@ TEST_F(BraftCliService2Test, basic2) { /* remove peer then add peer */ { // 1 remove peer - Peer *removePeer = new Peer(); - Peer *leaderPeer1 = new Peer(); - Peer *leaderPeer2 = new Peer(); - Peer *addPeer = new Peer(); + Peer* removePeer = new Peer(); + Peer* leaderPeer1 = new Peer(); + Peer* leaderPeer2 = new Peer(); + Peer* addPeer = new Peer(); PeerId removePeerId; - // 找一个不是leader的peer,作为remove peer - if (0 - == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { + // Find a peer that is not a leader as a remove peer + if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { removePeerId.parse(peer2.address()); *removePeer = peer2; } else { @@ -508,7 +498,6 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl1.Failed()); ASSERT_EQ(0, cntl1.ErrorCode()); - // add peer AddPeerRequest2 request2; request2.set_logicpoolid(logicPoolId); @@ -529,17 +518,17 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl2.Failed()); ASSERT_EQ(0, cntl2.ErrorCode()); } - /* snapshot - 非法copyset */ + /* Snapshot - illegal copyset */ { PeerId peer(peer1.address()); brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); SnapshotRequest2 request; - /* 非法 copyset */ + /* Illegal copyset */ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(); + Peer* peerPtr = new Peer(); *peerPtr = peer1; request.set_allocated_peer(peerPtr); @@ -557,11 +546,12 @@ TEST_F(BraftCliService2Test, basic2) { } /* snapshot - normal */ { - // 初始状态快照不为空 + // The initial state snapshot is not empty std::string copysetDataDir = dirMap[gLeader.address()] + "/" + - ToGroupId(logicPoolId, copysetId) + "/" + RAFT_LOG_DIR; + ToGroupId(logicPoolId, copysetId) + "/" + + RAFT_LOG_DIR; std::shared_ptr fs( - LocalFsFactory::CreateFs(curve::fs::FileSystemType::EXT4, "")); + LocalFsFactory::CreateFs(curve::fs::FileSystemType::EXT4, "")); std::vector files; fs->List(copysetDataDir.c_str(), &files); ASSERT_GE(files.size(), 1); @@ -574,7 +564,7 @@ TEST_F(BraftCliService2Test, basic2) { SnapshotRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(); + Peer* peerPtr = new Peer(); peerPtr->set_address(leaderId.to_string()); request.set_allocated_peer(peerPtr); @@ -586,19 +576,20 @@ TEST_F(BraftCliService2Test, basic2) { LOG(INFO) << "Start do snapshot"; CliService2_Stub stub(&channel); stub.Snapshot(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot fail, error: " - << cntl.ErrorText(); - // 需要连续打两次快照才能删除第一次快照时的log + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot fail, error: " << cntl.ErrorText(); + // Two consecutive snapshots are required to delete the log from the + // first snapshot sleep(5); cntl.Reset(); LOG(INFO) << "Start do snapshot"; stub.Snapshot(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot fail, error: " - << cntl.ErrorText(); + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot fail, error: " << cntl.ErrorText(); for (int i = 0; i < 60; ++i) { files.clear(); fs->List(copysetDataDir.c_str(), &files); - // 打完快照应该只剩下meta信息 + // After taking the snapshot, only meta information should be left if (files.size() == 1) { break; } @@ -619,18 +610,18 @@ TEST_F(BraftCliService2Test, basic2) { CliService2_Stub stub(&channel); stub.SnapshotAll(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot all fail, error: " - << cntl.ErrorText(); + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot all fail, error: " << cntl.ErrorText(); } - /* reset peer - 非法 copyset */ + /* Reset peer - illegal copyset */ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); ResetPeerRequest2 request; - /* 非法 copyset */ + /* Illegal copyset */ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_requestpeer(targetPeer); @@ -646,9 +637,9 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* reset peer - new peer为空 */ + /* Reset peer - new peer is empty */ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; @@ -669,7 +660,7 @@ TEST_F(BraftCliService2Test, basic2) { } /* reset peer - normal */ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; diff --git a/test/chunkserver/braft_cli_service_test.cpp b/test/chunkserver/braft_cli_service_test.cpp index 50f04588af..29d65a9af1 100644 --- a/test/chunkserver/braft_cli_service_test.cpp +++ b/test/chunkserver/braft_cli_service_test.cpp @@ -20,21 +20,22 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/braft_cli_service.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/braft_cli_service.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" #include "test/chunkserver/chunkserver_test_util.h" namespace curve { @@ -43,10 +44,12 @@ namespace chunkserver { class BraftCliServiceTest : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "SetUpTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "TearDownTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "TearDownTestCase"; } virtual void SetUp() { Exec("mkdir 6"); @@ -68,9 +71,9 @@ class BraftCliServiceTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(BraftCliServiceTest, basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9015; - const char *confs = "127.0.0.1:9015:0,127.0.0.1:9016:0,127.0.0.1:9017:0"; + const char* confs = "127.0.0.1:9015:0,127.0.0.1:9016:0,127.0.0.1:9017:0"; int snapshotInterval = 600; PeerId peer1("127.0.0.1:9015:0"); PeerId peer2("127.0.0.1:9016:0"); @@ -87,12 +90,8 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 1 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid1) { - const char *copysetdir = "local://./6"; - StartChunkserver(ip, - port + 0, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./6"; + StartChunkserver(ip, port + 0, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } @@ -102,12 +101,8 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 2 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid2) { - const char *copysetdir = "local://./7"; - StartChunkserver(ip, - port + 1, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./7"; + StartChunkserver(ip, port + 1, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } @@ -117,17 +112,13 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 3 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid3) { - const char *copysetdir = "local://./8"; - StartChunkserver(ip, - port + 2, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./8"; + StartChunkserver(ip, port + 2, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit */ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -144,6 +135,7 @@ TEST_F(BraftCliServiceTest, basic) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -166,7 +158,7 @@ TEST_F(BraftCliServiceTest, basic) { options.timeout_ms = 1500; options.max_retry = 3; - /* add peer - 非法 copyset */ + /* Add peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -188,7 +180,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法 peerid */ + /* add peer - illegal peerid */ { PeerId leaderId = leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leaderId); @@ -210,12 +202,12 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /* add peer - sent to peers who are not leader */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); - if (0 - == strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { + if (0 == + strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { leaderId = peer2; } else { leaderId = peer1; @@ -240,13 +232,13 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法 copyset */ + /* remove peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); RemovePeerRequest request; - /* 非法 copyset */ + /* Illegal copyset */ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_leader_id(leaderId.to_string()); @@ -261,7 +253,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法 peer id */ + /* remove peer - illegal peer id */ { PeerId leaderId = leader; brpc::Channel channel; @@ -281,12 +273,12 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* remove peer - 发送给不是 leader 的 peer */ + /* remove peer - sent to peers who are not leaders */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); - if (0 - == strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { + if (0 == + strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { leaderId = peer2; } else { leaderId = peer1; @@ -309,7 +301,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法 copyset */ + /* transfer leader - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -346,7 +338,7 @@ TEST_F(BraftCliServiceTest, basic) { stub.transfer_leader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法 peer */ + /* transfer leader - illegal peer */ { PeerId leaderId = leader; brpc::Channel channel; @@ -365,7 +357,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* get leader - 非法 copyset */ + /* get leader - illegal copyset */ { PeerId leaderId = leaderId; brpc::Channel channel; diff --git a/test/chunkserver/chunk_service_test.cpp b/test/chunkserver/chunk_service_test.cpp index 3968766d91..b62f02f9c8 100644 --- a/test/chunkserver/chunk_service_test.cpp +++ b/test/chunkserver/chunk_service_test.cpp @@ -20,24 +20,24 @@ * Author: wudemiao */ +#include "src/chunkserver/chunk_service.h" -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" -#include "src/chunkserver/chunk_service.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -75,11 +75,10 @@ class ChunkserverTest : public testing::Test { butil::AtExitManager atExitManager; - TEST_F(ChunkserverTest, normal_read_write_test) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9020; - const char *confs = "127.0.0.1:9020:0,127.0.0.1:9021:0,127.0.0.1:9022:0"; + const char* confs = "127.0.0.1:9020:0,127.0.0.1:9021:0,127.0.0.1:9022:0"; int rpcTimeoutMs = 3000; int snapshotInterval = 600; @@ -96,12 +95,8 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -111,12 +106,8 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -126,16 +117,12 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit */ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -152,6 +139,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -313,7 +301,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /* Delete a non-existent chunk (duplicate deletion) */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -329,7 +317,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response.status()); } - /* Read 一个不存在的 Chunk */ + /* Read a non-existent Chunk */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -347,7 +335,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response.status()); } - /* Applied index Read 一个不存在的 Chunk */ + /* Applied index Read a non-existent Chunk */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -416,9 +404,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { request.set_copysetid(copysetId); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -435,9 +421,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { request.set_copysetid(copysetId); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -467,7 +451,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(0, channel.Init(leader.addr, NULL)); ChunkService_Stub stub(&channel); - // get hash : 访问不存在的chunk + // Get hash: Access non-existent chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -485,7 +469,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_STREQ("0", response.hash().c_str()); } - // get hash : 非法的offset和length + // Get hash: illegal offset and length { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -560,7 +544,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(1, response.chunksn().size()); } - // get hash : 访问存在的chunk + // Get hash: Access existing chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -579,7 +563,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } - /* 多 chunk read/write/delete */ + /* Multi chunk read/write/delete */ { brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { @@ -685,7 +669,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /* Delete a non-existent chunk (duplicate deletion) */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -703,7 +687,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } } - /* read 一个不存在的 chunk */ + /* Read a non-existent chunk */ { brpc::Channel channel; uint32_t requestSize = kOpRequestAlignSize; @@ -770,7 +754,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* read 一个不存在的 chunk */ + /* Read a non-existent chun */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); diff --git a/test/chunkserver/chunk_service_test2.cpp b/test/chunkserver/chunk_service_test2.cpp index 674220d91a..ef7ecf2ebd 100644 --- a/test/chunkserver/chunk_service_test2.cpp +++ b/test/chunkserver/chunk_service_test2.cpp @@ -20,24 +20,23 @@ * Author: wudemiao */ - -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/chunk_service.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" -#include "src/chunkserver/chunk_service.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -76,9 +75,9 @@ class ChunkService2Test : public testing::Test { butil::AtExitManager atExitManager; TEST_F(ChunkService2Test, illegial_parameters_test) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9023; - const char *confs = "127.0.0.1:9023:0,127.0.0.1:9024:0,127.0.0.1:9025:0"; + const char* confs = "127.0.0.1:9023:0,127.0.0.1:9024:0,127.0.0.1:9025:0"; int rpcTimeoutMs = 3000; int snapshotInterval = 600; @@ -95,12 +94,8 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -110,12 +105,8 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -125,16 +116,12 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit */ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -151,6 +138,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -177,13 +165,13 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); - /* 非法参数 request 测试 */ + /* Illegal parameter request test */ brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { LOG(ERROR) << "Fail to init channel to " << leader; } ChunkService_Stub stub(&channel); - /* read 溢出 */ + /* Read overflow */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -201,7 +189,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read offset没对齐 */ + /* Read offset not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -219,7 +207,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read size没对齐 */ + /* Read size not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -237,7 +225,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read copyset 不存在 */ + /* Read copyset does not exist */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -256,7 +244,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* read snapshot 溢出 */ + /* Read snapshot overflow */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -274,7 +262,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot offset没对齐 */ + /* Read snapshot offset not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -293,7 +281,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot size没对齐 */ + /* Read snapshot size not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -312,7 +300,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot copyset 不存在 */ + /* Read snapshot copyset does not exist */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -331,7 +319,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* write 溢出 */ + /* Write overflow */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -350,7 +338,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write offset没对齐 */ + /* Write offset not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -369,7 +357,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write size没对齐 */ + /* Write size not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -388,7 +376,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write copyset 不存在 */ + /* The write copyset does not exist */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -407,7 +395,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete copyset 不存在*/ + /* Delete copyset does not exist */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -423,7 +411,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete snapshot copyset 不存在*/ + /* Delete snapshot copyset does not exist */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -434,9 +422,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { request.set_copysetid(copysetId + 1); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, @@ -456,7 +442,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* 不是 leader */ + /* Not a leader */ { PeerId peer1; PeerId peer2; @@ -562,13 +548,12 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { class ChunkServiceTestClosure : public ::google::protobuf::Closure { public: - explicit ChunkServiceTestClosure(int sleepUs = 0) : sleep_(sleepUs) { - } + explicit ChunkServiceTestClosure(int sleepUs = 0) : sleep_(sleepUs) {} virtual ~ChunkServiceTestClosure() = default; void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + // Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -580,13 +565,12 @@ class ChunkServiceTestClosure : public ::google::protobuf::Closure { class UpdateEpochTestClosure : public ::google::protobuf::Closure { public: - explicit UpdateEpochTestClosure(int sleepUs = 0) : sleep_(sleepUs) { - } + explicit UpdateEpochTestClosure(int sleepUs = 0) : sleep_(sleepUs) {} virtual ~UpdateEpochTestClosure() = default; void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + // Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -602,12 +586,12 @@ TEST_F(ChunkService2Test, overload_test) { // inflight throttle uint64_t maxInflight = 0; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -690,9 +674,7 @@ TEST_F(ChunkService2Test, overload_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -750,12 +732,12 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { // inflight throttle uint64_t maxInflight = 10; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -780,17 +762,17 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { }; std::vector threads; - // 启动10个线程,将chunkserver压满 + // Start 10 threads to fully load the chunkserver for (int i = 0; i < 10; ++i) { std::thread t1(writeFunc); threads.push_back(std::move(t1)); } - // 等待进程启动起来 + // Waiting for the process to start ::usleep(500 * 1000); ASSERT_FALSE(inflightThrottle->IsOverLoad()); - // 压满之后chunkserver后面收到的request都会被拒绝 + // All requests received after the chunkserver is filled will be rejected // write chunk { brpc::Controller cntl; @@ -863,9 +845,7 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -916,7 +896,8 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } - // 等待request处理完成,之后chunkserver又重新可以接收新的request + // Wait for the request processing to complete, and then chunkserver can + // receive new requests again for (auto it = threads.begin(); it != threads.end(); ++it) { it->join(); } @@ -995,9 +976,7 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_NE(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -1055,12 +1034,12 @@ TEST_F(ChunkService2Test, CheckEpochTest) { // inflight throttle uint64_t maxInflight = 10000; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -1083,7 +1062,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_chunkid(chunkId); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // write chunk request have epoch, but epoch map have no epoch @@ -1100,7 +1079,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // update epoch map to {(1, 1) , (2, 2)} { @@ -1130,7 +1109,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // write chunk check epoch failed { @@ -1146,7 +1125,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - response.status()); + response.status()); } // update epoch map to {(1, 2) , (2, 2)} @@ -1174,7 +1153,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - response.status()); + response.status()); } } diff --git a/test/chunkserver/chunkserver_helper_test.cpp b/test/chunkserver/chunkserver_helper_test.cpp index e9d538bf0c..d401a22185 100644 --- a/test/chunkserver/chunkserver_helper_test.cpp +++ b/test/chunkserver/chunkserver_helper_test.cpp @@ -20,14 +20,16 @@ * Author: lixiaocui */ -#include #include "src/chunkserver/chunkserver_helper.h" + +#include + #include "src/chunkserver/register.h" namespace curve { namespace chunkserver { TEST(ChunkServerMeta, test_encode_and_decode) { - // 1. 正常编解码 + // 1. Normal encoding and decoding ChunkServerMetadata metadata; metadata.set_version(CURRENT_METADATA_VERSION); metadata.set_id(1); @@ -43,13 +45,13 @@ TEST(ChunkServerMeta, test_encode_and_decode) { ASSERT_EQ(metadata.id(), metaOut.id()); ASSERT_EQ(metadata.token(), metaOut.token()); - // 2. 编码异常 + // 2. Encoding anomaly metadata.clear_token(); strOut.clear(); ASSERT_FALSE( ChunkServerMetaHelper::EncodeChunkServerMeta(metadata, &strOut)); - // 3. 解码异常 + // 3. Decoding exception metadata.set_token("hello"); metadata.set_checksum(9999); ASSERT_TRUE( diff --git a/test/chunkserver/chunkserver_service_test.cpp b/test/chunkserver/chunkserver_service_test.cpp index 106501e267..4b834a5037 100644 --- a/test/chunkserver/chunkserver_service_test.cpp +++ b/test/chunkserver/chunkserver_service_test.cpp @@ -20,40 +20,41 @@ * Author: lixiaocui1 */ -#include -#include +#include "src/chunkserver/chunkserver_service.h" + #include +#include #include +#include #include -#include "src/chunkserver/chunkserver_service.h" -#include "test/chunkserver/mock_copyset_node_manager.h" + #include "proto/chunkserver.pb.h" +#include "test/chunkserver/mock_copyset_node_manager.h" namespace curve { namespace chunkserver { -using ::testing::Return; using ::testing::_; +using ::testing::Return; TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { - // 启动ChunkServerService + // Start ChunkServerService auto server = new brpc::Server(); MockCopysetNodeManager* copysetNodeManager = new MockCopysetNodeManager(); ChunkServerServiceImpl* chunkserverService = new ChunkServerServiceImpl(copysetNodeManager); - ASSERT_EQ(0, - server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); + ASSERT_EQ( + 0, server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1", {5900, 5999}, nullptr)); auto listenAddr = butil::endpoint2str(server->listen_address()).c_str(); - brpc::Channel channel; ASSERT_EQ(0, channel.Init(listenAddr, NULL)); ChunkServerService_Stub stub(&channel); ChunkServerStatusRequest request; ChunkServerStatusResponse response; - // 1. 指定chunkserver加载copyset完成 + // 1. Specify chunkserver to load copyset complete { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(false)); @@ -63,23 +64,22 @@ TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { ASSERT_FALSE(response.copysetloadfin()); } - // 2. 指定chunkserver加载copyset未完成 + // 2. The specified chunkserver loading copyset did not complete { - EXPECT_CALL(*copysetNodeManager, LoadFinished()) - .WillOnce(Return(true)); + EXPECT_CALL(*copysetNodeManager, LoadFinished()).WillOnce(Return(true)); brpc::Controller cntl; stub.ChunkServerStatus(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_TRUE(response.copysetloadfin()); } - // 停止chunkserver service + // Stop chunkserver service server->Stop(0); server->Join(); delete server; server = nullptr; - // 3. 未获取到指定chunkserver加载copyset状态 + // 3. Unable to obtain the specified chunkserver loading copyset status { brpc::Controller cntl; stub.ChunkServerStatus(&cntl, &request, &response, nullptr); diff --git a/test/chunkserver/client.cpp b/test/chunkserver/client.cpp index 7f8c2e6243..dfc3ca4a99 100644 --- a/test/chunkserver/client.cpp +++ b/test/chunkserver/client.cpp @@ -20,49 +20,47 @@ * Author: wudemiao */ -#include -#include -#include #include #include +#include +#include +#include -#include "src/chunkserver/copyset_node.h" #include "proto/chunk.pb.h" #include "proto/copyset.pb.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node.h" #include "test/chunkserver/chunkserver_test_util.h" DEFINE_int32(request_size, 10, "Size of each requst"); DEFINE_int32(timeout_ms, 500, "Timeout for each request"); DEFINE_int32(election_timeout_ms, 3000, "election timeout ms"); DEFINE_int32(write_percentage, 100, "Percentage of fetch_add"); -DEFINE_string(confs, - "127.0.0.1:18200:0,127.0.0.1:18201:0,127.0.0.1:18202:0", +DEFINE_string(confs, "127.0.0.1:18200:0,127.0.0.1:18201:0,127.0.0.1:18202:0", "Configuration of the raft group"); -using curve::chunkserver::CopysetRequest; -using curve::chunkserver::CopysetResponse; -using curve::chunkserver::CopysetService_Stub; +using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::CHUNK_OP_TYPE; using curve::chunkserver::ChunkRequest; using curve::chunkserver::ChunkResponse; using curve::chunkserver::ChunkService_Stub; -using curve::chunkserver::PeerId; -using curve::chunkserver::LogicPoolID; -using curve::chunkserver::CopysetID; using curve::chunkserver::Configuration; -using curve::chunkserver::CHUNK_OP_TYPE; -using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::COPYSET_OP_STATUS; +using curve::chunkserver::CopysetID; +using curve::chunkserver::CopysetRequest; +using curve::chunkserver::CopysetResponse; +using curve::chunkserver::CopysetService_Stub; +using curve::chunkserver::LogicPoolID; +using curve::chunkserver::PeerId; -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { gflags::ParseCommandLineFlags(&argc, &argv, true); - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - uint64_t chunkId = 1; - uint64_t sn = 1; - char fillCh = 'a'; + CopysetID copysetId = 100001; + uint64_t chunkId = 1; + uint64_t sn = 1; + char fillCh = 'a'; PeerId leader; curve::chunkserver::Configuration conf; @@ -70,9 +68,7 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "conf parse failed: " << FLAGS_confs; } - - - // 创建 copyset + // Create copyset { std::vector peers; conf.list_peers(&peers); @@ -105,8 +101,10 @@ int main(int argc, char *argv[]) { if (cntl.Failed()) { LOG(FATAL) << "create copyset fialed: " << cntl.ErrorText(); } - if (response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS //NOLINT - || response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { //NOLINT + if (response.status() == // NOLINT + COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS // NOLINT + || response.status() == // NOLINT + COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { // NOLINT LOG(INFO) << "create copyset success: " << response.status(); } else { LOG(FATAL) << "create copyset failed: "; @@ -116,11 +114,9 @@ int main(int argc, char *argv[]) { // wait leader ::usleep(1000 * FLAGS_election_timeout_ms); - butil::Status status = curve::chunkserver::WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - FLAGS_election_timeout_ms); //NOLINT + butil::Status status = + curve::chunkserver::WaitLeader(logicPoolId, copysetId, conf, &leader, + FLAGS_election_timeout_ms); // NOLINT LOG(INFO) << "leader is: " << leader.to_string(); if (0 != status.error_code()) { LOG(FATAL) << "Wait leader failed"; @@ -176,8 +172,5 @@ int main(int argc, char *argv[]) { } } - return 0; } - - diff --git a/test/chunkserver/trash_test.cpp b/test/chunkserver/trash_test.cpp index 3ddf32f27e..2c28a6015c 100644 --- a/test/chunkserver/trash_test.cpp +++ b/test/chunkserver/trash_test.cpp @@ -512,7 +512,7 @@ TEST_F(TrashTest, recycle_wal_failed) { "curve_log_inprogress_10088")) .WillOnce(Return(-1)); - // 失败的情况下不应删除 + // Should not be deleted in case of failure EXPECT_CALL(*lfs, Delete("./runlog/trash_test0/trash/4294967493.55555")) .Times(0); @@ -556,8 +556,7 @@ TEST_F(TrashTest, recycle_copyset_dir_list_err) { .WillOnce(Return(false)); EXPECT_CALL(*lfs, Mkdir(trashPath)).WillOnce(Return(0)); EXPECT_CALL(*lfs, Rename(dirPath, _, 0)).WillOnce(Return(0)); - EXPECT_CALL(*lfs, List(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*lfs, List(_, _)).WillOnce(Return(-1)); ASSERT_EQ(0, trash->RecycleCopySet(dirPath)); } @@ -569,8 +568,7 @@ TEST_F(TrashTest, recycle_copyset_dir_ok) { .WillOnce(Return(false)); EXPECT_CALL(*lfs, Mkdir(trashPath)).WillOnce(Return(0)); EXPECT_CALL(*lfs, Rename(dirPath, _, 0)).WillOnce(Return(0)); - EXPECT_CALL(*lfs, List(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs, List(_, _)).WillOnce(Return(0)); ASSERT_EQ(0, trash->RecycleCopySet(dirPath)); } @@ -607,18 +605,18 @@ TEST_F(TrashTest, test_chunk_num_statistic) { // chunk_200_snap_1, abc +1 // log/ - using item4list = struct{ + using item4list = struct { std::string subdir; std::vector& names; }; std::vector action4List{ - { "", copysets }, - { "/4294967493.55555", dirs}, - { "/4294967493.55555/data", chunks1 }, - { "/4294967493.55555/log", logfiles1 }, - { "/4294967494.55555", dirs}, - { "/4294967494.55555/data", chunks2 }, - { "/4294967494.55555/log", logfiles2 }, + {"", copysets}, + {"/4294967493.55555", dirs}, + {"/4294967493.55555/data", chunks1}, + {"/4294967493.55555/log", logfiles1}, + {"/4294967494.55555", dirs}, + {"/4294967494.55555/data", chunks2}, + {"/4294967494.55555/log", logfiles2}, }; for (auto& it : action4List) { @@ -627,18 +625,18 @@ TEST_F(TrashTest, test_chunk_num_statistic) { } EXPECT_CALL(*lfs, DirExists(_)) - .WillOnce(Return(true)) // data - .WillOnce(Return(false)) // chunk_100 - .WillOnce(Return(false)) // chunk_101 - .WillOnce(Return(true)) // log - .WillOnce(Return(false)) // curve_log_10086_10087 - .WillOnce(Return(false)) // curve_log_inprogress_10088_10088 - .WillOnce(Return(false)) // log_10083_10084 - .WillOnce(Return(false)) // log_inprogress_10085 - .WillOnce(Return(true)) // data - .WillOnce(Return(false)) // chunk_200_snap_1 - .WillOnce(Return(false)) // abc - .WillOnce(Return(true)); // log + .WillOnce(Return(true)) // data + .WillOnce(Return(false)) // chunk_100 + .WillOnce(Return(false)) // chunk_101 + .WillOnce(Return(true)) // log + .WillOnce(Return(false)) // curve_log_10086_10087 + .WillOnce(Return(false)) // curve_log_inprogress_10088_10088 + .WillOnce(Return(false)) // log_10083_10084 + .WillOnce(Return(false)) // log_inprogress_10085 + .WillOnce(Return(true)) // data + .WillOnce(Return(false)) // chunk_200_snap_1 + .WillOnce(Return(false)) // abc + .WillOnce(Return(true)); // log trash->Init(ops); ASSERT_EQ(5, trash->GetChunkNum()); @@ -657,14 +655,14 @@ TEST_F(TrashTest, test_chunk_num_statistic) { EXPECT_CALL(*lfs, DirExists(_)) .WillOnce(Return(true)) .WillOnce(Return(false)) - .WillOnce(Return(true)) // data + .WillOnce(Return(true)) // data .WillOnce(Return(false)) .WillOnce(Return(false)) - .WillOnce(Return(true)) // log + .WillOnce(Return(true)) // log .WillOnce(Return(false)) - .WillOnce(Return(true)) // raft_snapshot - .WillOnce(Return(true)) // temp - .WillOnce(Return(true)) // data + .WillOnce(Return(true)) // raft_snapshot + .WillOnce(Return(true)) // temp + .WillOnce(Return(true)) // data .WillOnce(Return(false)); std::string trashedCopysetDir = "/trash_test0/copysets/4294967495"; @@ -695,21 +693,21 @@ TEST_F(TrashTest, test_chunk_num_statistic) { std::vector raftfiles{RAFT_DATA_DIR, RAFT_LOG_DIR}; // DirExists - using item4dirExists = struct{ + using item4dirExists = struct { std::string subdir; bool exist; }; std::vector action4DirExists{ - { "", true }, - { "/4294967493.55555", true }, - { "/4294967493.55555/data", true }, - { "/4294967493.55555/log", true }, - { "/4294967493.55555/data/chunk_100", false }, - { "/4294967493.55555/data/chunk_101", false }, - { "/4294967493.55555/log/curve_log_10086_10087", false }, - { "/4294967493.55555/log/curve_log_inprogress_10088", false }, - { "/4294967493.55555/log/log_10083_10084", false }, - { "/4294967493.55555/log/log_inprogress_10085", false }, + {"", true}, + {"/4294967493.55555", true}, + {"/4294967493.55555/data", true}, + {"/4294967493.55555/log", true}, + {"/4294967493.55555/data/chunk_100", false}, + {"/4294967493.55555/data/chunk_101", false}, + {"/4294967493.55555/log/curve_log_10086_10087", false}, + {"/4294967493.55555/log/curve_log_inprogress_10088", false}, + {"/4294967493.55555/log/log_10083_10084", false}, + {"/4294967493.55555/log/log_inprogress_10085", false}, }; for (auto& it : action4DirExists) { @@ -719,10 +717,10 @@ TEST_F(TrashTest, test_chunk_num_statistic) { // List std::vector action4List2{ - { "", copysets }, - { "/4294967493.55555", raftfiles }, - { "/4294967493.55555/data", chunks1 }, - { "/4294967493.55555/log", logfiles1 }, + {"", copysets}, + {"/4294967493.55555", raftfiles}, + {"/4294967493.55555/data", chunks1}, + {"/4294967493.55555/log", logfiles1}, }; for (auto& it : action4List2) { @@ -735,16 +733,16 @@ TEST_F(TrashTest, test_chunk_num_statistic) { SetCopysetNeedDelete(trashPath + "/" + copysets[2], notNeedDelete); // RecycleFile - using item4CycleFile = struct{ + using item4CycleFile = struct { std::shared_ptr pool; std::string subdir; int ret; }; std::vector action4CycleFile{ - { pool, "/4294967493.55555/data/chunk_100", 0 }, - { pool, "/4294967493.55555/data/chunk_101", -1 }, - { walPool, "/4294967493.55555/log/curve_log_10086_10087", 0 }, - { walPool, "/4294967493.55555/log/curve_log_inprogress_10088", -1 }, + {pool, "/4294967493.55555/data/chunk_100", 0}, + {pool, "/4294967493.55555/data/chunk_101", -1}, + {walPool, "/4294967493.55555/log/curve_log_10086_10087", 0}, + {walPool, "/4294967493.55555/log/curve_log_inprogress_10088", -1}, }; for (auto& it : action4CycleFile) { diff --git a/test/client/client_metric_test.cpp b/test/client/client_metric_test.cpp index 2f092fc79f..4072bd60f4 100644 --- a/test/client/client_metric_test.cpp +++ b/test/client/client_metric_test.cpp @@ -20,37 +20,38 @@ * Author: tongguangxun */ -#include +#include "src/client/client_metric.h" + #include #include +#include -#include // NOLINT -#include // NOLINT -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT -#include "proto/nameserver2.pb.h" #include "include/client/libcurve.h" -#include "src/client/client_metric.h" -#include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" -#include "test/client/fake/fakeMDS.h" -#include "src/client/libcurve_file.h" +#include "proto/nameserver2.pb.h" #include "src/client/client_common.h" #include "src/client/client_config.h" +#include "src/client/file_instance.h" +#include "src/client/libcurve_file.h" +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mock_schedule.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" DECLARE_string(chunkserver_list); -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:9150"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9150"; // NOLINT namespace curve { namespace client { -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9150"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -64,7 +65,7 @@ const std::vector clientConf { }; TEST(MetricTest, ChunkServer_MetricTest) { - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back(mdsMetaServerAddr); metaopt.rpcRetryOpt.rpcTimeoutMs = 500; metaopt.rpcRetryOpt.rpcRetryIntervalUS = 200; @@ -72,25 +73,26 @@ TEST(MetricTest, ChunkServer_MetricTest) { std::shared_ptr mdsclient = std::make_shared(); ASSERT_EQ(0, mdsclient->Initialize(metaopt)); - FLAGS_chunkserver_list = "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT + FLAGS_chunkserver_list = + "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT std::string configpath("./test/client/configs/client_metric.conf"); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); ClientConfig cc; ASSERT_EQ(0, cc.Init(configpath.c_str())); - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -147,13 +149,13 @@ TEST(MetricTest, ChunkServer_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + // Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + // Read write timeout retry mds.EnableNetUnstable(8000); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); @@ -165,8 +167,8 @@ TEST(MetricTest, ChunkServer_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(-2, ret); - - // 4次正确读写,4次超时读写,超时会引起重试,重试次数为3,数据量最大是8192 + // 4 correct reads and writes, 4 timeout reads and writes, timeout will + // cause retries, retry count is 3, and the maximum data volume is 8192 ASSERT_EQ(fm->inflightRPCNum.get_value(), 0); ASSERT_EQ(fm->userRead.qps.count.get_value(), 2); ASSERT_EQ(fm->userWrite.qps.count.get_value(), 2); @@ -204,7 +206,7 @@ void cb(CurveAioContext* ctx) { } // namespace TEST(MetricTest, SlowRequestMetricTest) { - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back(mdsMetaServerAddr); metaopt.rpcRetryOpt.rpcTimeoutMs = 500; metaopt.rpcRetryOpt.rpcRetryIntervalUS = 200; @@ -212,16 +214,17 @@ TEST(MetricTest, SlowRequestMetricTest) { std::shared_ptr mdsclient = std::make_shared(); ASSERT_EQ(0, mdsclient->Initialize(metaopt)); - FLAGS_chunkserver_list = "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT + FLAGS_chunkserver_list = + "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -267,13 +270,13 @@ TEST(MetricTest, SlowRequestMetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + // Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + // Read write timeout retry mds.EnableNetUnstable(100); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); @@ -383,5 +386,5 @@ TEST(MetricTest, MetricHelperTest) { ASSERT_NO_THROW(MetricHelper::IncremSlowRequestNum(nullptr)); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/test/client/client_session_unittest.cpp b/test/client/client_session_unittest.cpp index 5606204b83..680d80ce93 100644 --- a/test/client/client_session_unittest.cpp +++ b/test/client/client_session_unittest.cpp @@ -20,30 +20,29 @@ * Author: tongguangxun */ -#include -#include +#include +#include #include #include -#include +#include +#include +#include #include #include -#include -#include - -#include // NOLINT -#include // NOLINT #include +#include // NOLINT +#include // NOLINT #include -#include // NOLINT +#include // NOLINT #include #include "src/client/client_config.h" -#include "test/client/fake/fakeMDS.h" #include "src/client/file_instance.h" #include "src/client/iomanager4file.h" #include "src/client/libcurve_file.h" #include "test/client/fake/fakeChunkserver.h" +#include "test/client/fake/fakeMDS.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -72,7 +71,7 @@ void sessioncallback(CurveAioContext* aioctx) { TEST(ClientSession, LeaseTaskTest) { FLAGS_chunkserver_list = - "127.0.0.1:9176:0,127.0.0.1:9177:0,127.0.0.1:9178:0"; + "127.0.0.1:9176:0,127.0.0.1:9177:0,127.0.0.1:9178:0"; std::string filename = "/1"; @@ -80,7 +79,7 @@ TEST(ClientSession, LeaseTaskTest) { FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid curve::client::EndPoint ep; butil::str2endpoint("127.0.0.1", 9176, &ep); PeerId pd(ep); @@ -104,7 +103,7 @@ TEST(ClientSession, LeaseTaskTest) { // set openfile response ::curve::mds::OpenFileResponse openresponse; - curve::mds::FileInfo * finfo = new curve::mds::FileInfo; + curve::mds::FileInfo* finfo = new curve::mds::FileInfo; ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -117,8 +116,8 @@ TEST(ClientSession, LeaseTaskTest) { openresponse.set_allocated_protosession(se); openresponse.set_allocated_fileinfo(finfo); - FakeReturn* openfakeret - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice->SetOpenFile(openfakeret); // 2. set refresh response @@ -129,7 +128,7 @@ TEST(ClientSession, LeaseTaskTest) { std::unique_lock lk(mtx); refreshcv.notify_one(); }; - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; info->set_filename(filename); info->set_seqnum(2); info->set_id(1); @@ -143,8 +142,8 @@ TEST(ClientSession, LeaseTaskTest) { refreshresp.set_statuscode(::curve::mds::StatusCode::kOK); refreshresp.set_sessionid("1234"); refreshresp.set_allocated_fileinfo(info); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(&refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(&refreshresp)); curvefsservice->SetRefreshSession(refreshfakeret, refresht); // 3. open the file @@ -253,10 +252,9 @@ TEST(ClientSession, LeaseTaskTest) { refreshresp.set_allocated_fileinfo(newFileInfo); refreshresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* refreshFakeRetWithNewInodeId = new FakeReturn( - nullptr, static_cast(&refreshresp)); - curvefsservice->SetRefreshSession( - refreshFakeRetWithNewInodeId, refresht); + FakeReturn* refreshFakeRetWithNewInodeId = + new FakeReturn(nullptr, static_cast(&refreshresp)); + curvefsservice->SetRefreshSession(refreshFakeRetWithNewInodeId, refresht); { std::unique_lock lk(mtx); @@ -302,8 +300,8 @@ TEST(ClientSession, LeaseTaskTest) { // 11. set fake close return ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice->SetCloseFile(closefileret); LOG(INFO) << "uninit fileinstance"; @@ -321,12 +319,12 @@ TEST(ClientSession, LeaseTaskTest) { } // namespace client } // namespace curve -std::string mdsMetaServerAddr = "127.0.0.1:9101"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/client_session.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9101"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/client_session.conf"; // NOLINT -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9101,127.0.0.1:9102"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -337,18 +335,17 @@ const std::vector clientConf { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("mds.maxRetryMS=5000") -}; + std::string("mds.maxRetryMS=5000")}; -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); int ret = RUN_ALL_TESTS(); return ret; diff --git a/test/client/client_unstable_helper_test.cpp b/test/client/client_unstable_helper_test.cpp index cbb62891a5..4ef1c6487c 100644 --- a/test/client/client_unstable_helper_test.cpp +++ b/test/client/client_unstable_helper_test.cpp @@ -20,10 +20,11 @@ * Author: wuhanqing */ -#include -#include -#include #include +#include +#include +#include + #include #include "src/client/unstable_helper.h" @@ -48,50 +49,51 @@ TEST(UnstableHelperTest, normal_test) { chunkservers.emplace_back(std::make_pair(i, ep)); } - // 先对每个chunkserver进行10次连续超时 + // First, perform 10 consecutive timeouts on each chunkserver for (const auto& cs : chunkservers) { for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { helper.IncreTimeout(cs.first); ASSERT_EQ(UnstableState::NoUnstable, - helper.GetCurrentUnstableState( - cs.first, cs.second)); + helper.GetCurrentUnstableState(cs.first, cs.second)); } } - // 再对每个chunkserver增加一次超时 - // 前两个是chunkserver unstable状态,第三个是server unstable + // Add another timeout to each chunkserver + // The first two are in the chunkserver unstable state, and the third is in + // the server unstable state helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[0].first, chunkservers[0].second)); + helper.GetCurrentUnstableState(chunkservers[0].first, + chunkservers[0].second)); helper.IncreTimeout(chunkservers[1].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[1].first, chunkservers[1].second)); + helper.GetCurrentUnstableState(chunkservers[1].first, + chunkservers[1].second)); helper.IncreTimeout(chunkservers[2].first); ASSERT_EQ(UnstableState::ServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[2].first, chunkservers[2].second)); + helper.GetCurrentUnstableState(chunkservers[2].first, + chunkservers[2].second)); - // 继续增加超时次数 - // 这种情况下,每次都是chunkserver unstable + // Continue to increase the number of timeouts + // In this case, it is always chunkserver unstable helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[0].first, chunkservers[0].second)); + helper.GetCurrentUnstableState(chunkservers[0].first, + chunkservers[0].second)); helper.IncreTimeout(chunkservers[1].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[1].first, chunkservers[1].second)); + helper.GetCurrentUnstableState(chunkservers[1].first, + chunkservers[1].second)); helper.IncreTimeout(chunkservers[2].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[2].first, chunkservers[2].second)); + helper.GetCurrentUnstableState(chunkservers[2].first, + chunkservers[2].second)); - // 新chunkserver第一次超时,根据ip判断,可以直接设置为chunkserver unstable + // The first timeout of a new chunkserver can be directly set to chunkserver + // unstable based on the IP address butil::EndPoint ep; butil::str2endpoint("127.100.0.1:60999", &ep); auto chunkserver4 = std::make_pair(4, ep); @@ -99,22 +101,22 @@ TEST(UnstableHelperTest, normal_test) { helper.IncreTimeout(chunkserver4.first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkserver4.first, chunkserver4.second)); + helper.GetCurrentUnstableState(chunkserver4.first, + chunkserver4.second)); - // 其他ip的chunkserver + // Chunkservers for other IPs butil::str2endpoint("127.200.0.1:60999", &ep); auto chunkserver5 = std::make_pair(5, ep); for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { helper.IncreTimeout(chunkserver5.first); ASSERT_EQ(UnstableState::NoUnstable, - helper.GetCurrentUnstableState( - chunkserver5.first, chunkserver5.second)); + helper.GetCurrentUnstableState(chunkserver5.first, + chunkserver5.second)); } helper.IncreTimeout(chunkserver5.first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkserver5.first, chunkserver5.second)); + helper.GetCurrentUnstableState(chunkserver5.first, + chunkserver5.second)); } } // namespace client diff --git a/test/client/client_userinfo_unittest.cpp b/test/client/client_userinfo_unittest.cpp index 6153f23e5e..442af59c6f 100644 --- a/test/client/client_userinfo_unittest.cpp +++ b/test/client/client_userinfo_unittest.cpp @@ -20,23 +20,23 @@ * Author: tongguangxun */ -#include +#include #include #include -#include +#include -#include // NOLINT #include +#include // NOLINT #include -#include // NOLINT +#include // NOLINT #include #include "include/client/libcurve.h" #include "src/client/client_common.h" -#include "test/client/fake/fakeMDS.h" -#include "src/client/libcurve_file.h" #include "src/client/iomanager4chunk.h" +#include "src/client/libcurve_file.h" #include "src/client/libcurve_snapshot.h" +#include "test/client/fake/fakeMDS.h" extern std::string mdsMetaServerAddr; extern std::string configpath; @@ -70,8 +70,8 @@ class CurveClientUserAuthFail : public ::testing::Test { ASSERT_EQ(0, server.Join()); } - brpc::Server server; - MetaServerOption metaopt; + brpc::Server server; + MetaServerOption metaopt; FakeMDSCurveFSService curvefsservice; FakeMDSTopologyService topologyservice; }; @@ -102,7 +102,7 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { // set openfile response ::curve::mds::OpenFileResponse openresponse; - curve::mds::FileInfo * finfo = new curve::mds::FileInfo; + curve::mds::FileInfo* finfo = new curve::mds::FileInfo; ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -115,16 +115,16 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { openresponse.mutable_fileinfo()->set_seqnum(2); openresponse.mutable_fileinfo()->set_filename(filename); - FakeReturn* openfakeret - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(openfakeret); // 1. create a File authfailed ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret); size_t len = 4 * 1024 * 1024ul; @@ -138,7 +138,7 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { LOG(INFO) << "get refresh session request!"; refreshcv.notify_one(); }; - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::ReFreshSessionResponse refreshresp; refreshresp.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); refreshresp.set_sessionid("1234"); @@ -147,12 +147,13 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { refreshresp.mutable_fileinfo()->set_filename(filename); refreshresp.mutable_fileinfo()->set_id(1); refreshresp.mutable_fileinfo()->set_parentid(0); - refreshresp.mutable_fileinfo()->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + refreshresp.mutable_fileinfo()->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT refreshresp.mutable_fileinfo()->set_chunksize(4 * 1024 * 1024); refreshresp.mutable_fileinfo()->set_length(4 * 1024 * 1024 * 1024ul); refreshresp.mutable_fileinfo()->set_ctime(12345678); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(&refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(&refreshresp)); curvefsservice.SetRefreshSession(refreshfakeret, refresht); // 3. open the file auth failed @@ -161,47 +162,47 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { // 4. open file success openresponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* openfakeret2 - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret2 = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(openfakeret2); openret = fileinstance.Open(); ASSERT_EQ(openret, LIBCURVE_ERROR::OK); -/* - // 5. wait for refresh - for (int i = 0; i < 4; i++) { - { - std::unique_lock lk(mtx); - refreshcv.wait(lk); + /* + // 5. wait for refresh + for (int i = 0; i < 4; i++) { + { + std::unique_lock lk(mtx); + refreshcv.wait(lk); + } } - } - CurveAioContext aioctx; - aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; - aioctx.length = 4 * 1024 * 1024 + 8 * 1024; - aioctx.ret = LIBCURVE_ERROR::OK; - aioctx.cb = sessioncallback; - aioctx.buf = nullptr; - - fileinstance.AioRead(&aioctx); - fileinstance.AioWrite(&aioctx); - - for (int i = 0; i < 1; i++) { - { - std::unique_lock lk(mtx); - refreshcv.wait(lk); + CurveAioContext aioctx; + aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; + aioctx.length = 4 * 1024 * 1024 + 8 * 1024; + aioctx.ret = LIBCURVE_ERROR::OK; + aioctx.cb = sessioncallback; + aioctx.buf = nullptr; + + fileinstance.AioRead(&aioctx); + fileinstance.AioWrite(&aioctx); + + for (int i = 0; i < 1; i++) { + { + std::unique_lock lk(mtx); + refreshcv.wait(lk); + } } - } - char buffer[10]; - ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Write(buffer, 0, 0)); - ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Read(buffer, 0, 0)); -*/ + char buffer[10]; + ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Write(buffer, 0, 0)); + ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Read(buffer, 0, 0)); + */ // 6. set fake close return ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice.SetCloseFile(closefileret); ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, fileinstance.Close()); @@ -235,12 +236,11 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { ::curve::mds::CreateSnapShotResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.clear_snapshotfileinfo(); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret); - ASSERT_EQ(-LIBCURVE_ERROR::FAILED, cl.CreateSnapShot(filename, - emptyuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::FAILED, + cl.CreateSnapShot(filename, emptyuserinfo, &seq)); // set response response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -255,54 +255,51 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { finf->set_seqnum(2); finf->set_segmentsize(1 * 1024 * 1024 * 1024); response.set_allocated_snapshotfileinfo(finf); - FakeReturn* fakeret1 - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret1); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.CreateSnapShot(filename, - emptyuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.CreateSnapShot(filename, emptyuserinfo, &seq)); // test delete // normal delete test ::curve::mds::DeleteSnapShotResponse delresponse; delresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* delfakeret - = new FakeReturn(nullptr, static_cast(&delresponse)); + FakeReturn* delfakeret = + new FakeReturn(nullptr, static_cast(&delresponse)); curvefsservice.SetDeleteSnapShot(delfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.DeleteSnapShot(filename, - emptyuserinfo, - seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.DeleteSnapShot(filename, emptyuserinfo, seq)); // test get SegmentInfo // normal getinfo curve::mds::GetOrAllocateSegmentResponse* getresponse = - new curve::mds::GetOrAllocateSegmentResponse(); + new curve::mds::GetOrAllocateSegmentResponse(); curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; pfs->set_logicalpoolid(0); - pfs->set_segmentsize(1ull*1024*1024*1024); - pfs->set_chunksize(16*1024*1024); + pfs->set_segmentsize(1ull * 1024 * 1024 * 1024); + pfs->set_chunksize(16 * 1024 * 1024); pfs->set_startoffset(0); getresponse->set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); getresponse->set_allocated_pagefilesegment(pfs); - FakeReturn* getfakeret = new FakeReturn(nullptr, - static_cast(getresponse)); + FakeReturn* getfakeret = + new FakeReturn(nullptr, static_cast(getresponse)); curvefsservice.SetGetSnapshotSegmentInfo(getfakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse* geresponse_1 = - new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); + new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); geresponse_1->set_statuscode(0); - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(geresponse_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(geresponse_1)); topologyservice.SetFakeReturn(faktopologyeret); SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.GetSnapshotSegmentInfo(filename, - emptyuserinfo, - 0, 0, &seginfo)); + ASSERT_EQ( + -LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapshotSegmentInfo(filename, emptyuserinfo, 0, 0, &seginfo)); // test list snapshot // normal delete test @@ -311,7 +308,8 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { listresponse.mutable_fileinfo(0)->set_filename(filename); listresponse.mutable_fileinfo(0)->set_id(1); listresponse.mutable_fileinfo(0)->set_parentid(0); - listresponse.mutable_fileinfo(0)->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + listresponse.mutable_fileinfo(0)->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT listresponse.mutable_fileinfo(0)->set_chunksize(4 * 1024 * 1024); listresponse.mutable_fileinfo(0)->set_length(4 * 1024 * 1024 * 1024ul); listresponse.mutable_fileinfo(0)->set_ctime(12345678); @@ -319,20 +317,19 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { listresponse.mutable_fileinfo(0)->set_segmentsize(1 * 1024 * 1024 * 1024ul); listresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* listfakeret - = new FakeReturn(nullptr, static_cast(&listresponse)); + FakeReturn* listfakeret = + new FakeReturn(nullptr, static_cast(&listresponse)); curve::client::FInfo_t sinfo; curvefsservice.SetListSnapShot(listfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.GetSnapShot(filename, - emptyuserinfo, - seq, &sinfo)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapShot(filename, emptyuserinfo, seq, &sinfo)); std::vector seqvec; std::map fivec; seqvec.push_back(seq); curve::client::FInfo_t ffinfo; ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.ListSnapShot(filename, emptyuserinfo, &seqvec, &fivec)); + cl.ListSnapShot(filename, emptyuserinfo, &seqvec, &fivec)); cl.UnInit(); delete fakeret; @@ -341,7 +338,7 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { delete delfakeret; } -// root user测试 +// Root user testing TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ClientConfigOption opt; opt.metaServerOpt.rpcRetryOpt.rpcTimeoutMs = 500; @@ -359,7 +356,7 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ASSERT_TRUE(!cl.Init(opt)); UserInfo_t rootuserinfo; - rootuserinfo.owner ="root"; + rootuserinfo.owner = "root"; rootuserinfo.password = "123"; std::string filename = "./1_usertest_.img"; @@ -370,12 +367,11 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ::curve::mds::CreateSnapShotResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.clear_snapshotfileinfo(); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret); - ASSERT_EQ(-LIBCURVE_ERROR::FAILED, cl.CreateSnapShot(filename, - rootuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::FAILED, + cl.CreateSnapShot(filename, rootuserinfo, &seq)); // set response response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -390,54 +386,51 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { finf->set_seqnum(2); finf->set_segmentsize(1 * 1024 * 1024 * 1024); response.set_allocated_snapshotfileinfo(finf); - FakeReturn* fakeret1 - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret1); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.CreateSnapShot(filename, - rootuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.CreateSnapShot(filename, rootuserinfo, &seq)); // test delete // normal delete test ::curve::mds::DeleteSnapShotResponse delresponse; delresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* delfakeret - = new FakeReturn(nullptr, static_cast(&delresponse)); + FakeReturn* delfakeret = + new FakeReturn(nullptr, static_cast(&delresponse)); curvefsservice.SetDeleteSnapShot(delfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.DeleteSnapShot(filename, - rootuserinfo, - seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.DeleteSnapShot(filename, rootuserinfo, seq)); // test get SegmentInfo // normal getinfo curve::mds::GetOrAllocateSegmentResponse* getresponse = - new curve::mds::GetOrAllocateSegmentResponse(); + new curve::mds::GetOrAllocateSegmentResponse(); curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; pfs->set_logicalpoolid(0); - pfs->set_segmentsize(1ull*1024*1024*1024); - pfs->set_chunksize(16ull*1024*1024); + pfs->set_segmentsize(1ull * 1024 * 1024 * 1024); + pfs->set_chunksize(16ull * 1024 * 1024); pfs->set_startoffset(0); getresponse->set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); getresponse->set_allocated_pagefilesegment(pfs); - FakeReturn* getfakeret = new FakeReturn(nullptr, - static_cast(getresponse)); + FakeReturn* getfakeret = + new FakeReturn(nullptr, static_cast(getresponse)); curvefsservice.SetGetSnapshotSegmentInfo(getfakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse* geresponse_1 = - new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); + new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); geresponse_1->set_statuscode(0); - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(geresponse_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(geresponse_1)); topologyservice.SetFakeReturn(faktopologyeret); SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.GetSnapshotSegmentInfo(filename, - rootuserinfo, - 0, 0, &seginfo)); + ASSERT_EQ( + -LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapshotSegmentInfo(filename, rootuserinfo, 0, 0, &seginfo)); // test list snapshot // normal delete test @@ -446,7 +439,8 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { listresponse.mutable_fileinfo(0)->set_filename(filename); listresponse.mutable_fileinfo(0)->set_id(1); listresponse.mutable_fileinfo(0)->set_parentid(0); - listresponse.mutable_fileinfo(0)->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + listresponse.mutable_fileinfo(0)->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT listresponse.mutable_fileinfo(0)->set_chunksize(4 * 1024 * 1024); listresponse.mutable_fileinfo(0)->set_length(4 * 1024 * 1024 * 1024ul); listresponse.mutable_fileinfo(0)->set_ctime(12345678); @@ -454,21 +448,19 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { listresponse.mutable_fileinfo(0)->set_segmentsize(1 * 1024 * 1024 * 1024ul); listresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* listfakeret - = new FakeReturn(nullptr, static_cast(&listresponse)); + FakeReturn* listfakeret = + new FakeReturn(nullptr, static_cast(&listresponse)); curve::client::FInfo_t sinfo; curvefsservice.SetListSnapShot(listfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.GetSnapShot(filename, - rootuserinfo, - seq, &sinfo)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapShot(filename, rootuserinfo, seq, &sinfo)); std::vector seqvec; std::map fivec; seqvec.push_back(seq); curve::client::FInfo_t ffinfo; ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.ListSnapShot(filename, rootuserinfo, - &seqvec, &fivec)); + cl.ListSnapShot(filename, rootuserinfo, &seqvec, &fivec)); cl.UnInit(); delete fakeret; diff --git a/test/common/bitmap_test.cpp b/test/common/bitmap_test.cpp index 8bb85b01ad..8e80e255df 100644 --- a/test/common/bitmap_test.cpp +++ b/test/common/bitmap_test.cpp @@ -20,10 +20,10 @@ * Author: yangyaokai */ -#include - #include "src/common/bitmap.h" +#include + namespace curve { namespace common { @@ -62,7 +62,7 @@ TEST(BitmapTEST, constructor_test) { delete[] mem; } - // 测试拷贝构造 + // Test copy construction { Bitmap bitmap1(32); Bitmap bitmap2(bitmap1); @@ -72,7 +72,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试赋值操作 + // Test assignment operation { Bitmap bitmap1(32); Bitmap bitmap2(16); @@ -88,7 +88,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试比较操作符 + // Test comparison operator { Bitmap bitmap1(16); Bitmap bitmap2(16); @@ -229,7 +229,7 @@ TEST(BitmapTEST, divide_test) { vector clearRanges; vector setRanges; - // 所有位为0 + // All bits are 0 { bitmap.Clear(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -241,7 +241,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 所有位为1 + // All bits are 1 { bitmap.Set(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -253,7 +253,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为clear range,末尾为set range + // Two ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 16); bitmap.Set(17, 31); @@ -268,7 +268,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为 set range,末尾为 clear range + // Two ranges, starting with set range and ending with clear range { bitmap.Set(0, 16); bitmap.Clear(17, 31); @@ -283,7 +283,8 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 set range,中间为 clear range + // Three ranges, with set range at the beginning and end, and clear range + // in the middle { bitmap.Set(0, 8); bitmap.Clear(9, 25); @@ -301,7 +302,8 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 clear range,中间为 set range + // Three ranges, with clear range at the beginning and end, and set range + // in the middle { bitmap.Clear(0, 8); bitmap.Set(9, 25); @@ -319,7 +321,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 clear range,末尾为 set range + // Four ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 7); bitmap.Set(8, 15); @@ -340,7 +342,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 set range,末尾为 clear range + // Four ranges, starting with set range and ending with clear range { bitmap.Set(0, 7); bitmap.Clear(8, 15); @@ -361,7 +363,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 复杂场景随机偏移测试 + // Random offset testing for complex scenes { bitmap.Set(0, 5); bitmap.Clear(6, 9); diff --git a/test/common/channel_pool_test.cpp b/test/common/channel_pool_test.cpp index e327f6f82f..d573142cf0 100644 --- a/test/common/channel_pool_test.cpp +++ b/test/common/channel_pool_test.cpp @@ -20,30 +20,30 @@ * Author: charisu */ -#include - #include "src/common/channel_pool.h" +#include + namespace curve { namespace common { TEST(Common, ChannelPool) { ChannelPool channelPool; ChannelPtr channelPtr; - // 地址非法,init失败 + // Illegal address, init failed std::string addr = "127.0.0.1:80000"; ASSERT_EQ(-1, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_FALSE(channelPtr); - // 地址合法,init成功 + // The address is legal, init succeeded addr = "127.0.0.1:8000"; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_TRUE(channelPtr); - // 同一个地址应该返回同一个channelPtr + // The same address should return the same channelPtr ChannelPtr channelPtr2; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr2)); ASSERT_TRUE(channelPtr2); ASSERT_EQ(channelPtr, channelPtr2); - // 清空 + // Clear channelPool.Clear(); } diff --git a/test/common/task_thread_pool_test.cpp b/test/common/task_thread_pool_test.cpp index cb44a36b09..ac658e9a90 100644 --- a/test/common/task_thread_pool_test.cpp +++ b/test/common/task_thread_pool_test.cpp @@ -20,26 +20,27 @@ * Author: wudemiao */ +#include "src/common/concurrent/task_thread_pool.h" + #include -#include #include +#include #include "src/common/concurrent/count_down_event.h" -#include "src/common/concurrent/task_thread_pool.h" namespace curve { namespace common { using curve::common::CountDownEvent; -void TestAdd1(int a, double b, CountDownEvent *cond) { +void TestAdd1(int a, double b, CountDownEvent* cond) { double c = a + b; (void)c; cond->Signal(); } -int TestAdd2(int a, double b, CountDownEvent *cond) { +int TestAdd2(int a, double b, CountDownEvent* cond) { double c = a + b; (void)c; cond->Signal(); @@ -47,7 +48,7 @@ int TestAdd2(int a, double b, CountDownEvent *cond) { } TEST(TaskThreadPool, basic) { - /* 测试线程池 start 入参 */ + /* Test thread pool start input parameter */ { TaskThreadPool<> taskThreadPool; ASSERT_EQ(-1, taskThreadPool.Start(2, 0)); @@ -74,7 +75,7 @@ TEST(TaskThreadPool, basic) { } { - /* 测试不设置,此时为 INT_MAX */ + /* Test not set, at this time it is INT_MAX */ TaskThreadPool<> taskThreadPool; ASSERT_EQ(0, taskThreadPool.Start(4)); ASSERT_EQ(INT_MAX, taskThreadPool.QueueCapacity()); @@ -92,7 +93,7 @@ TEST(TaskThreadPool, basic) { CountDownEvent cond1(1); taskThreadPool.Enqueue(TestAdd1, 1, 1.234, &cond1); cond1.Wait(); - /* TestAdd2 是有返回值的 function */ + /* TestAdd2 is a function with a return value */ CountDownEvent cond2(1); taskThreadPool.Enqueue(TestAdd2, 1, 1.234, &cond2); cond2.Wait(); @@ -100,7 +101,7 @@ TEST(TaskThreadPool, basic) { taskThreadPool.Stop(); } - /* 基本运行 task 测试 */ + /* Basic task testing */ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -133,14 +134,14 @@ TEST(TaskThreadPool, basic) { t2.join(); t3.join(); - /* 等待所有 task 执行完毕 */ + /* Wait for all tasks to complete execution */ cond.Wait(); ASSERT_EQ(3 * kMaxLoop, runTaskCount.load(std::memory_order_acquire)); taskThreadPool.Stop(); } - /* 测试队列满了,push会阻塞 */ + /* The test queue is full, push will block */ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -157,8 +158,7 @@ TEST(TaskThreadPool, basic) { CountDownEvent cond4(1); CountDownEvent startRunCond4(1); - auto waitTask = [&](CountDownEvent* sigCond, - CountDownEvent* waitCond) { + auto waitTask = [&](CountDownEvent* sigCond, CountDownEvent* waitCond) { sigCond->Signal(); waitCond->Wait(); runTaskCount.fetch_add(1, std::memory_order_acq_rel); @@ -169,12 +169,15 @@ TEST(TaskThreadPool, basic) { ASSERT_EQ(kQueueCapacity, taskThreadPool.QueueCapacity()); ASSERT_EQ(kThreadNums, taskThreadPool.ThreadOfNums()); - /* 把线程池的所有处理线程都卡住了 */ + /* Stuck all processing threads in the thread pool */ taskThreadPool.Enqueue(waitTask, &startRunCond1, &cond1); taskThreadPool.Enqueue(waitTask, &startRunCond2, &cond2); taskThreadPool.Enqueue(waitTask, &startRunCond3, &cond3); taskThreadPool.Enqueue(waitTask, &startRunCond4, &cond4); - /* 等待 waitTask1、waitTask2、waitTask3、waitTask4 都开始运行 */ + /* + * Wait for waitTask1, waitTask2, waitTask3, and waitTask4 + * to start running + */ startRunCond1.Wait(); startRunCond2.Wait(); startRunCond3.Wait(); @@ -186,7 +189,7 @@ TEST(TaskThreadPool, basic) { runTaskCount.fetch_add(1, std::memory_order_acq_rel); }; - /* 记录线程 push 到线程池 queue 的 task 数量 */ + /* Record the number of tasks from thread push to thread pool queue */ std::atomic pushTaskCount1; std::atomic pushTaskCount2; std::atomic pushTaskCount3; @@ -208,7 +211,7 @@ TEST(TaskThreadPool, basic) { std::thread t2(std::bind(threadFunc, &pushTaskCount2)); std::thread t3(std::bind(threadFunc, &pushTaskCount3)); - /* 等待线程池 queue 被 push 满 */ + /* Waiting for thread pool queue to be pushed full */ int pushTaskCount; while (true) { ::usleep(50); @@ -222,32 +225,33 @@ TEST(TaskThreadPool, basic) { } } - /* push 进去的 task 都没有被执行 */ + /* The tasks that were pushed in were not executed */ ASSERT_EQ(0, runTaskCount.load(std::memory_order_acquire)); /** - * 此时,thread pool 的 queue 肯定 push 满了,且 push - * 满了之后就没法再 push 了 + * At this point, the thread pool queue must be full of push, and the + * push After it's full, it can't push anymore */ ASSERT_EQ(pushTaskCount, taskThreadPool.QueueCapacity()); ASSERT_EQ(taskThreadPool.QueueCapacity(), taskThreadPool.QueueSize()); - /* 将线程池中的线程都唤醒 */ + /* Wake up all threads in the thread pool */ cond1.Signal(); cond2.Signal(); cond3.Signal(); cond4.Signal(); - /* 等待所有 task 执行完成 */ + /* Wait for all task executions to complete */ while (true) { ::usleep(10); - if (runTaskCount.load(std::memory_order_acquire) - >= 4 + 3 * kMaxLoop) { + if (runTaskCount.load(std::memory_order_acquire) >= + 4 + 3 * kMaxLoop) { break; } } /** - * 等待所有的 push thread 退出,这样才能保证 pushThreadCount 计数更新了 + * Wait for all push threads to exit so that + * the pushThreadCount count is updated */ pushThreadCond.Wait(); diff --git a/test/integration/chunkserver/chunkserver_basic_test.cpp b/test/integration/chunkserver/chunkserver_basic_test.cpp index a36bfedcee..6181e40bfa 100644 --- a/test/integration/chunkserver/chunkserver_basic_test.cpp +++ b/test/integration/chunkserver/chunkserver_basic_test.cpp @@ -24,8 +24,8 @@ #include #include -#include #include +#include #include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/chunkservice_op.h" @@ -49,24 +49,23 @@ static constexpr uint32_t kOpRequestAlignSize = 4096; const char* kFakeMdsAddr = "127.0.0.1:9079"; -static const char *chunkServerParams[1][16] = { - { "chunkserver", "-chunkServerIp=127.0.0.1", - "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, - "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", - "-chunkServerMetaUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT - "/chunkserver.dat", - "-copySetUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-raftSnapshotUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-raftLogUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-recycleUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/recycler", - "-chunkFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool/", - "-chunkFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT - "/chunkfilepool.meta", - "-walFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool/", - "-walFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT - "/walfilepool.meta", - "-conf=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkserver.conf", - "-raft_sync_segments=true", NULL }, +static const char* chunkServerParams[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", + "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, + "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", + "-chunkServerMetaUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT + "/chunkserver.dat", + "-copySetUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-raftSnapshotUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-raftLogUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-recycleUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/recycler", + "-chunkFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool/", + "-chunkFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT + "/chunkfilepool.meta", + "-walFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool/", + "-walFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool.meta", + "-conf=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkserver.conf", + "-raft_sync_segments=true", NULL}, }; butil::AtExitManager atExitManager; @@ -107,7 +106,7 @@ class ChunkServerIoTest : public testing::Test { paramsIndexs_[PeerCluster::PeerToId(peer1_)] = 0; params_.push_back(const_cast(chunkServerParams[0])); - // 初始化chunkfilepool,这里会预先分配一些chunk + // Initialize chunkfilepool, where some chunks will be pre allocated lfs_ = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); poolDir_ = "./" + std::to_string(PeerCluster::PeerToId(peer1_)) + "/chunkfilepool/"; @@ -125,11 +124,11 @@ class ChunkServerIoTest : public testing::Test { ::system(rmdir1.c_str()); - // 等待进程结束 + // Waiting for the process to end ::usleep(100 * 1000); } - int InitCluster(PeerCluster *cluster) { + int InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs_); @@ -139,7 +138,7 @@ class ChunkServerIoTest : public testing::Test { return -1; } - // 等待leader产生 + // Waiting for the leader to be generated if (cluster->WaitLeader(&leaderPeer_)) { LOG(ERROR) << "WaiteLeader failed"; return -1; @@ -168,45 +167,50 @@ class ChunkServerIoTest : public testing::Test { std::string leader = ""; PeerCluster cluster("InitShutdown-cluster", logicPoolId_, copysetId_, - peers_, params_, paramsIndexs_); + peers_, params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - /* 场景一:新建的文件,Chunk文件不存在 */ + /* + * Scenario 1: Newly created file, Chunk file does not exist + */ ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, 0, length, nullptr)); - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunkId, NULL_SN, NULL_SN, leader)); + ASSERT_EQ( + 0, verify->VerifyGetChunkInfo(chunkId, NULL_SN, NULL_SN, leader)); ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); - /* 场景二:通过WriteChunk产生chunk文件后操作 */ + /* + * Scenario 2: After generating a chunk file through WriteChunk, + * perform the operation + */ data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 0, 4 * KB, - data.c_str(), &chunkData)); + data.c_str(), &chunkData)); ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunkId, sn1, NULL_SN, leader)); - ASSERT_EQ(0, verify->VerifyReadChunk( - chunkId, sn1, 0, 4 * KB, &chunkData)); + ASSERT_EQ(0, + verify->VerifyReadChunk(chunkId, sn1, 0, 4 * KB, &chunkData)); ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, kChunkSize - 4 * KB, - 4 * KB, nullptr)); + 4 * KB, nullptr)); data.assign(length, 'b'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 0, 4 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); data.assign(length, 'c'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 4 * KB, 4 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); data.assign(length * 2, 'd'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 4 * KB, 8 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); - /* 场景三:用户删除文件 */ + /* Scenario 3: User deletes files */ ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunkId, NULL_SN, NULL_SN, leader)); + ASSERT_EQ( + 0, verify->VerifyGetChunkInfo(chunkId, NULL_SN, NULL_SN, leader)); } void TestSnapshotIO(std::shared_ptr verify) { @@ -217,150 +221,164 @@ class ChunkServerIoTest : public testing::Test { const SequenceNum sn3 = 3; int length = kOpRequestAlignSize; std::string data(length * 4, 0); - std::string chunkData1a(kChunkSize, 0); // chunk1版本1预期数据 - std::string chunkData1b(kChunkSize, 0); // chunk1版本2预期数据 - std::string chunkData1c(kChunkSize, 0); // chunk1版本3预期数据 - std::string chunkData2(kChunkSize, 0); // chunk2预期数据 + std::string chunkData1a(kChunkSize, + 0); // chunk1 version 1 expected data + std::string chunkData1b(kChunkSize, + 0); // chunk1 version 2 expected data + std::string chunkData1c(kChunkSize, + 0); // chunk1 version 3 expected data + std::string chunkData2(kChunkSize, 0); // chunk2 expected data std::string leader = ""; PeerCluster cluster("InitShutdown-cluster", logicPoolId_, copysetId_, - peers_, params_, paramsIndexs_); + peers_, params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - // 构造初始环境 - // 写chunk1产生chunk1,chunk1版本为1,chunk2开始不存在。 + // Construct initial environment + // Writing chunk1 generates chunk1, which is version 1 and does not + // exist at the beginning of chunk2. data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn1, 0, 12 * KB, - data.c_str(), &chunkData1a)); + data.c_str(), &chunkData1a)); /* - * 场景一:第一次给文件打快照 - */ - chunkData1b.assign(chunkData1a); // 模拟对chunk1数据进行COW + * Scenario 1: Taking a snapshot of a file for the first time + */ + chunkData1b.assign(chunkData1a); // Simulate COW on chunk1 data data.assign(length, 'b'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, - data.c_str(), &chunkData1b)); - // 重复写入同一区域,用于验证不会重复cow + data.c_str(), &chunkData1b)); + // Write repeatedly to the same area to verify that there will be no + // duplicate rows data.assign(length, 'c'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, - data.c_str(), &chunkData1b)); + data.c_str(), &chunkData1b)); - // 读取chunk1快照,预期读到版本1数据 + // Reading chunk1 snapshot, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, - &chunkData1a)); + &chunkData1a)); - // chunk1写[0, 4KB] + // Chunk1 write [0, 4KB] data.assign(length, 'd'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 0, 4 * KB, - data.c_str(), &chunkData1b)); - // chunk1写[4KB, 16KB] + data.c_str(), &chunkData1b)); + // Chunk1 write [4KB, 16KB] data.assign(length, 'e'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 12 * KB, - data.c_str(), &chunkData1b)); + data.c_str(), &chunkData1b)); - // 获取chunk1信息,预期其版本为2,快照版本为1, + // Obtain chunk1 information, with expected version 2 and snapshot + // version 1, ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, sn1, leader)); - // chunk1读[0, 12KB], 预期读到版本2数据 - ASSERT_EQ(0, - verify->VerifyReadChunk(chunk1, sn2, 0, 12 * KB, &chunkData1b)); + // Chunk1 read [0, 12KB], expected to read version 2 data + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunk1, sn2, 0, 12 * KB, &chunkData1b)); - // 读取chunk1的快照, 预期读到版本1数据 + // Reading snapshot of chunk1, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, - &chunkData1a)); + &chunkData1a)); - // 读取chunk2的快照, 预期chunk不存在 - ASSERT_EQ(0, verify->VerifyReadChunkSnapshot( - chunk2, sn1, 0, 12 * KB, nullptr)); + // Reading snapshot of chunk2, expected chunk not to exist + ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk2, sn1, 0, 12 * KB, + nullptr)); /* - * 场景二:第一次快照结束,删除快照 - */ - // 删除chunk1快照 + * Scenario 2: The first snapshot ends and the snapshot is deleted + */ + // Delete chunk1 snapshot ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn2)); - // 获取chunk1信息,预期其版本为2,无快照版本 + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn2)); + // Obtain chunk1 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, NULL_SN, leader)); - // 删chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn2)); + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn2)); - // chunk2写[0, 8KB] + // Chunk2 write [0, 8KB] data.assign(length, 'f'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn2, 0, 8 * KB, - data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为2,无快照版本 + data.c_str(), &chunkData2)); + // Obtain chunk2 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); /* - * 场景三:第二次打快照 - */ - // chunk1写[0, 8KB] - chunkData1c.assign(chunkData1b); // 模拟对chunk1数据进行COW + * Scenario 3: Taking a second snapshot + */ + // Chunk1 write [0, 8KB] + chunkData1c.assign(chunkData1b); // Simulate COW on chunk1 data data.assign(length, 'g'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn3, 0, 8 * KB, - data.c_str(), &chunkData1c)); - // 获取chunk1信息,预期其版本为3,快照版本为2 + data.c_str(), &chunkData1c)); + // Obtain chunk1 information, expect its version to be 3 and snapshot + // version to be 2 ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn3, sn2, leader)); - // 读取chunk1的快照, 预期读到版本2数据 + // Reading snapshot of chunk1, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn2, 0, 12 * KB, - &chunkData1b)); + &chunkData1b)); - // 读取chunk2的快照, 预期读到版本2数据 + // Reading snapshot of chunk2, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk2, sn2, 0, 8 * KB, - &chunkData2)); + &chunkData2)); - // 删除chunk1文件,预期成功,本地快照存在的情况下,会将快照也一起删除 + // Delete chunk1 file, expected success. If the local snapshot exists, + // the snapshot will also be deleted together ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk1, sn3)); + verify->VerifyDeleteChunk(chunk1, sn3)); /* - * 场景四:第二次快照结束,删除快照 - */ - // 删除chunk1快照,因为chunk1及其快照上一步已经删除,预期成功 + * Scenario 4: The second snapshot ends and the snapshot is deleted + */ + // Delete chunk1 snapshot because chunk1 and its snapshot have been + // deleted in the previous step and are expected to succeed ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn3)); - // 获取chunk1信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk1, NULL_SN, NULL_SN, leader)); + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn3)); + // Obtaining chunk1 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn3)); - // 获取chunk2信息,预期其版本为2,无快照版本 + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn3)); + // Obtain chunk2 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'h'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, - data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + data.c_str(), &chunkData2)); + // Obtain chunk2 information, expect its version to be 3, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'i'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + // Obtain chunk2 information, expect its version to be 3, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); /* - * 场景五:用户删除文件 - */ - // 删除chunk1,已不存在,预期成功 + * Scenario 5: User deletes files + */ + // Delete chunk1, it no longer exists, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk1, sn3)); - // 获取chunk1信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2,预期成功 + verify->VerifyDeleteChunk(chunk1, sn3)); + // Obtaining chunk1 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk1, NULL_SN, NULL_SN, leader)); + // Delete chunk2, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk2, sn3)); - // 获取chunk2信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk2, NULL_SN, NULL_SN, leader)); + verify->VerifyDeleteChunk(chunk2, sn3)); + // Obtaining chunk2 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk2, NULL_SN, NULL_SN, leader)); } public: @@ -370,7 +388,7 @@ class ChunkServerIoTest : public testing::Test { CopysetID copysetId_; std::map paramsIndexs_; - std::vector params_; + std::vector params_; std::string externalIp_; private: @@ -391,8 +409,8 @@ class ChunkServerIoTest : public testing::Test { * */ TEST_F(ChunkServerIoTest, BasicIO) { - struct ChunkServiceOpConf opConf = { &leaderPeer_, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&leaderPeer_, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestBasicIO(verify); } @@ -401,15 +419,15 @@ TEST_F(ChunkServerIoTest, BasicIO_from_external_ip) { Peer exPeer; exPeer.set_address(externalIp_ + ":" + BASIC_TEST_CHUNK_SERVER_PORT + ":0"); - struct ChunkServiceOpConf opConf = { &exPeer, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&exPeer, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestBasicIO(verify); } TEST_F(ChunkServerIoTest, SnapshotIO) { - struct ChunkServiceOpConf opConf = { &leaderPeer_, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&leaderPeer_, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestSnapshotIO(verify); } @@ -417,8 +435,8 @@ TEST_F(ChunkServerIoTest, SnapshotIO) { TEST_F(ChunkServerIoTest, SnapshotIO_from_external_ip) { Peer exPeer; exPeer.set_address(externalIp_ + ":" + BASIC_TEST_CHUNK_SERVER_PORT + ":0"); - struct ChunkServiceOpConf opConf = { &exPeer, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&exPeer, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestSnapshotIO(verify); } diff --git a/test/integration/chunkserver/chunkserver_clone_recover.cpp b/test/integration/chunkserver/chunkserver_clone_recover.cpp index 58ce282336..6b9da23b79 100644 --- a/test/integration/chunkserver/chunkserver_clone_recover.cpp +++ b/test/integration/chunkserver/chunkserver_clone_recover.cpp @@ -20,9 +20,9 @@ * Author: qinyi */ -#include -#include #include +#include +#include #include #include @@ -30,14 +30,14 @@ #include #include "include/client/libcurve.h" -#include "src/common/s3_adapter.h" -#include "src/common/timeutility.h" -#include "src/client/inflight_controller.h" #include "src/chunkserver/cli2.h" +#include "src/client/inflight_controller.h" #include "src/common/concurrent/count_down_event.h" -#include "test/integration/common/chunkservice_op.h" +#include "src/common/s3_adapter.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" +#include "test/integration/common/chunkservice_op.h" #include "test/util/config_generator.h" using curve::CurveCluster; @@ -91,11 +91,11 @@ const uint32_t kChunkSize = 16 * 1024 * 1024; const uint32_t kChunkServerMaxIoSize = 64 * 1024; const std::vector mdsConf0{ - { "--confPath=" + MDS0_CONF_PATH }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--mdsDbName=" + CSCLONE_TEST_MDS_DBNAME }, - { "--sessionInterSec=20" }, - { "--etcdAddr=" + ETCD_CLIENT_IP_PORT }, + {"--confPath=" + MDS0_CONF_PATH}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--mdsDbName=" + CSCLONE_TEST_MDS_DBNAME}, + {"--sessionInterSec=20"}, + {"--etcdAddr=" + ETCD_CLIENT_IP_PORT}, }; const std::vector mdsFileConf0{ @@ -129,73 +129,67 @@ const std::vector csCommonConf{ }; const std::vector chunkserverConf1{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER0_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER0_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER0_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER0_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER0_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER0_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER0_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER0_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/walfilepool.meta"}}; const std::vector chunkserverConf2{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER1_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER1_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER1_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/filepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER1_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER1_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER1_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER1_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/filepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER1_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/walfilepool.meta"}}; const std::vector chunkserverConf3{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER2_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER2_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER2_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/filepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER2_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER2_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER2_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER2_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/filepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER2_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/walfilepool.meta"}}; namespace curve { namespace chunkserver { @@ -203,7 +197,9 @@ namespace chunkserver { class CSCloneRecoverTest : public ::testing::Test { public: CSCloneRecoverTest() - : logicPoolId_(1), copysetId_(1), chunkData1_(kChunkSize, 'X'), + : logicPoolId_(1), + copysetId_(1), + chunkData1_(kChunkSize, 'X'), chunkData2_(kChunkSize, 'Y') {} void SetUp() { @@ -217,11 +213,11 @@ class CSCloneRecoverTest : public ::testing::Test { s3Conf); cluster_->PrepareConfig(MDS0_CONF_PATH, mdsFileConf0); - // 生成chunkserver配置文件 + // Generate chunkserver configuration file cluster_->PrepareConfig(CHUNKSERVER_CONF_PATH, csCommonConf); - // 1. 启动etcd + // 1. Start etcd LOG(INFO) << "begin to start etcd"; pid_t pid = cluster_->StartSingleEtcd( 1, ETCD_CLIENT_IP_PORT, ETCD_PEER_IP_PORT, @@ -231,19 +227,20 @@ class CSCloneRecoverTest : public ::testing::Test { ASSERT_GT(pid, 0); ASSERT_TRUE(cluster_->WaitForEtcdClusterAvalible(5)); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster_->StartSingleMDS(0, MDS0_IP_PORT, MDS0_DUMMY_PORT, - mdsConf0, true); + mdsConf0, true); LOG(INFO) << "mds 0 started on " + MDS0_IP_PORT + ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 生成topo.json + // Generate topo.json Json::Value topo; Json::Value servers; std::string chunkServerIpPort[] = {CHUNK_SERVER0_IP_PORT, - CHUNK_SERVER1_IP_PORT, - CHUNK_SERVER2_IP_PORT}; + CHUNK_SERVER1_IP_PORT, + CHUNK_SERVER2_IP_PORT}; for (int i = 0; i < 3; ++i) { Json::Value server; std::vector ipPort; @@ -278,7 +275,7 @@ class CSCloneRecoverTest : public ::testing::Test { topoConf << topo.toStyledString(); topoConf.close(); - // 3. 创建物理池 + // 3. Creating a physical pool string createPPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -291,13 +288,12 @@ class CSCloneRecoverTest : public ::testing::Test { while (retry < 5) { LOG(INFO) << "exec createPPCmd: " << createPPCmd; ret = system(createPPCmd.c_str()); - if (ret == 0) - break; + if (ret == 0) break; retry++; } ASSERT_EQ(ret, 0); - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster_->StartSingleChunkServer(1, CHUNK_SERVER0_IP_PORT, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " + CHUNK_SERVER0_IP_PORT + @@ -319,7 +315,8 @@ class CSCloneRecoverTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first string createLPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -331,27 +328,26 @@ class CSCloneRecoverTest : public ::testing::Test { while (retry < 5) { LOG(INFO) << "exec createLPCmd: " << createLPCmd; ret = system(createLPCmd.c_str()); - if (ret == 0) - break; + if (ret == 0) break; retry++; } ASSERT_EQ(ret, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 获取chunkserver主节点 + // Obtain the chunkserver master node logicPoolId_ = 1; copysetId_ = 1; ASSERT_EQ(0, chunkSeverGetLeader()); - struct ChunkServiceOpConf conf0 = { &leaderPeer_, logicPoolId_, - copysetId_, 5000 }; + struct ChunkServiceOpConf conf0 = {&leaderPeer_, logicPoolId_, + copysetId_, 5000}; opConf_ = conf0; - // 6. 初始化client配置 + // 6. Initialize client configuration LOG(INFO) << "init globalclient"; ret = Init(clientConfPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 先睡眠5s,让chunkserver选出leader + // 7. Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); s3Adapter_.Init(kS3ConfigPath); @@ -417,10 +413,11 @@ class CSCloneRecoverTest : public ::testing::Test { system(("mkdir " + CHUNKSERVER2_BASE_DIR + "/filepool").c_str())); } - /**下发一个写请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @return: IO是否成功完成 + /** + * Issue a write request and wait for its completion. + * @param offset: The offset for the current IO to be issued. + * @param size: The size of the IO to be issued. + * @return: Whether the IO has been successfully completed. */ bool HandleAioWriteRequest(uint64_t offset, uint64_t size, const char* data) { @@ -432,7 +429,8 @@ class CSCloneRecoverTest : public ::testing::Test { char* buffer = reinterpret_cast(context->buf); delete[] buffer; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it + // returns, it triggers cond gCond.Signal(); }; @@ -447,8 +445,7 @@ class CSCloneRecoverTest : public ::testing::Test { int ret; if ((ret = AioWrite(fd_, context))) { - LOG(ERROR) << "failed to send aio write request, err=" - << ret; + LOG(ERROR) << "failed to send aio write request, err=" << ret; return false; } @@ -460,11 +457,12 @@ class CSCloneRecoverTest : public ::testing::Test { return true; } - /**下发一个读请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @data: 读出的数据 - * @return: IO是否成功完成 + /** + * Issue a read request and wait for its completion. + * @param offset: The current offset for the IO to be issued. + * @param size: The size of the IO to be issued. + * @param data: The read data. + * @return Whether the IO is successfully completed. */ bool HandleAioReadRequest(uint64_t offset, uint64_t size, char* data) { gCond.Reset(1); @@ -473,7 +471,8 @@ class CSCloneRecoverTest : public ::testing::Test { auto readCallBack = [](CurveAioContext* context) { gIoRet = context->ret; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it + // returns, it triggers cond gCond.Signal(); }; @@ -485,8 +484,7 @@ class CSCloneRecoverTest : public ::testing::Test { context->cb = readCallBack; int ret; if ((ret = AioRead(fd_, context))) { - LOG(ERROR) << "failed to send aio read request, err=" - << ret; + LOG(ERROR) << "failed to send aio read request, err=" << ret; return false; } @@ -547,7 +545,7 @@ class CSCloneRecoverTest : public ::testing::Test { return -1; } - // 先睡眠5s,让chunkserver选出leader + // Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); status = curve::chunkserver::GetLeader(logicPoolId_, copysetId_, csConf, &leaderPeer_); @@ -559,26 +557,26 @@ class CSCloneRecoverTest : public ::testing::Test { } void prepareSourceDataInCurve() { - // 创建一个curveFS文件 + // Create a curveFS file LOG(INFO) << "create source curveFS file: " << CURVEFS_FILENAME; fd_ = curve::test::FileCommonOperation::Open(CURVEFS_FILENAME, "curve"); ASSERT_NE(fd_, -1); - // 写数据到curveFS的第1个chunk + // Write data to the first chunk of curveFS LOG(INFO) << "Write first 16MB of source curveFS file"; ASSERT_TRUE(HandleAioWriteRequest(0, kChunkSize, chunkData1_.c_str())); - // 读出数据进行验证 + // Read data for verification std::unique_ptr temp(new char[kChunkSize]); ASSERT_TRUE(HandleAioReadRequest(0, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData1_.c_str(), temp.get(), kChunkSize)); - // 写数据到curveFS的第2个chunk + // Write data to the second chunk of curveFS LOG(INFO) << "Write second 16MB of source curveFS file"; ASSERT_TRUE( HandleAioWriteRequest(kChunkSize, kChunkSize, chunkData2_.c_str())); - // 读出数据进行验证 + // Read data for verification ASSERT_TRUE(HandleAioReadRequest(kChunkSize, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData2_.c_str(), temp.get(), kChunkSize)); @@ -613,14 +611,14 @@ class CSCloneRecoverTest : public ::testing::Test { bool s3ObjExisted_; }; -// 场景一:通过ReadChunk从curve恢复克隆文件 +// Scenario 1: Restore clone files from curve through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { LOG(INFO) << "current case: CloneFromCurveByReadChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; ChunkID cloneChunk2 = 332; @@ -633,7 +631,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -647,7 +645,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -667,17 +665,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * After traversing a clone file, it will not be converted into a regular + * chunk file. Writing is performed by incrementing the version number: + * - If it is a clone chunk, the write will fail; + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -685,19 +684,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景二:通过RecoverChunk从curve恢复克隆文件 +// Scenario 2: Restore cloned files from curve through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { LOG(INFO) << "current case: CloneFromCurveByRecoverChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 333; ChunkID cloneChunk2 = 334; @@ -710,7 +709,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -724,7 +723,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -750,16 +749,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件,写成功。 + * It is expected that the cloned file will be transformed into a regular + * chunk1 file. Writing is performed by increasing the version: + * - If it is a clone chunk, the write will fail; + * - If it is a regular chunk, a snapshot file will be generated, and the + * write will succeed. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -767,19 +768,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景三:lazy allocate场景下读克隆文件 +// Scenario 3: Lazy allocate scenario: Reading clone files TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { LOG(INFO) << "current case: CloneFromCurveByReadChunkWhenLazyAlloc"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. chunk文件不存在 + // 1. Chunk file does not exist ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; SequenceNum sn1 = 1; @@ -802,9 +803,9 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 将leader切换到follower + // Switch leader to follower ASSERT_EQ(0, TransferLeaderToFollower()); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); @@ -817,7 +818,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk string ioBuf(kChunkServerMaxIoSize, 'c'); for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyWriteChunk( @@ -828,9 +829,10 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { cloneData1.get(), CURVEFS_FILENAME, 0)); /** - * clone文件遍写后会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * After traversing and writing a clone file, it will be transformed into a + * regular chunk file. Writing is performed by incrementing the version: + * - If it is a clone chunk, the write operation will fail; + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -838,18 +840,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); } -// 场景四:通过ReadChunk从S3恢复克隆文件 +// Scenario 4: Restore cloned files from S3 through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { LOG(INFO) << "current case: CloneFromS3ByReadChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 335; ChunkID cloneChunk2 = 336; @@ -862,7 +864,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -875,7 +877,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -895,17 +897,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * 预期clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * It is expected that after a clone file is traversed, it will not be + * converted to a regular chunk file. Write operations are performed by + * increasing the version: + * - If it is a clone chunk, the write operation will fail. + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -913,19 +917,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景五:通过RecoverChunk从S3恢复克隆文件 +// Scenario 5: Restore cloned files from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { LOG(INFO) << "current case: CloneFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 337; ChunkID cloneChunk2 = 338; @@ -938,7 +942,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -951,7 +955,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -977,16 +981,17 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version: + * - If it is a clone chunk, the write will fail; + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -994,19 +999,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景六:通过ReadChunk从S3恢复 +// Scenario 6: Restore from S3 through ReadChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { LOG(INFO) << "current case: RecoverFromS3ByReadChunk"; - // 0. 构造数据上传到S3,模拟转储 + // 0. Upload construction data to S3 and simulate dump prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 339; SequenceNum sn2 = 2; @@ -1018,13 +1023,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, cloneData1.get())); @@ -1044,17 +1049,18 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn3, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * 预期clone文件不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file not to be converted to regular chunk1 file + * Write by increasing the version: + * - If it is a clone chunk, the write will fail; + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1062,18 +1068,18 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn3)); } -// 场景七:通过RecoverChunk从S3恢复 +// Scenario 7: Recovering from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { LOG(INFO) << "current case: RecoverFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 341; SequenceNum sn2 = 2; @@ -1085,13 +1091,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, @@ -1117,16 +1123,17 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version: + * - If it is a clone chunk, the write will fail; + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1134,7 +1141,7 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn4)); } diff --git a/test/integration/chunkserver/chunkserver_concurrent_test.cpp b/test/integration/chunkserver/chunkserver_concurrent_test.cpp index a5ac75a823..a79c13eeaa 100644 --- a/test/integration/chunkserver/chunkserver_concurrent_test.cpp +++ b/test/integration/chunkserver/chunkserver_concurrent_test.cpp @@ -21,72 +21,60 @@ */ #include -#include #include +#include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" +#include "src/common/concurrent/concurrent.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "src/common/concurrent/concurrent.h" -#include "test/integration/common/peer_cluster.h" #include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::common::Thread; +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; -using curve::common::Thread; static const char* kFakeMdsAddr = "127.0.0.1:9329"; constexpr uint32_t kOpRequestAlignSize = 4096; -static const char *chunkConcurrencyParams1[1][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9076", - "-chunkServerStoreUri=local://./9076/", - "-chunkServerMetaUri=local://./9076/chunkserver.dat", - "-copySetUri=local://./9076/copysets", - "-raftSnapshotUri=curve://./9076/copysets", - "-raftLogUri=curve://./9076/copysets", - "-recycleUri=local://./9076/recycler", - "-chunkFilePoolDir=./9076/chunkfilepool/", - "-chunkFilePoolMetaPath=./9076/chunkfilepool.meta", - "-walFilePoolDir=./9076/walfilepool/", - "-walFilePoolMetaPath=./9076/walfilepool.meta", - "-conf=./9076/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, +static const char* chunkConcurrencyParams1[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9076", + "-chunkServerStoreUri=local://./9076/", + "-chunkServerMetaUri=local://./9076/chunkserver.dat", + "-copySetUri=local://./9076/copysets", + "-raftSnapshotUri=curve://./9076/copysets", + "-raftLogUri=curve://./9076/copysets", + "-recycleUri=local://./9076/recycler", + "-chunkFilePoolDir=./9076/chunkfilepool/", + "-chunkFilePoolMetaPath=./9076/chunkfilepool.meta", + "-walFilePoolDir=./9076/walfilepool/", + "-walFilePoolMetaPath=./9076/walfilepool.meta", + "-conf=./9076/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; -static const char *chunkConcurrencyParams2[1][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9077", - "-chunkServerStoreUri=local://./9077/", - "-chunkServerMetaUri=local://./9077/chunkserver.dat", - "-copySetUri=local://./9077/copysets", - "-raftSnapshotUri=curve://./9077/copysets", - "-raftLogUri=curve://./9077/copysets", - "-recycleUri=local://./9077/recycler", - "-chunkFilePoolDir=./9077/chunkfilepool/", - "-chunkFilePoolMetaPath=./9077/chunkfilepool.meta", - "-walFilePoolDir=./9077/walfilepool/", - "-walFilePoolMetaPath=./9077/walfilepool.meta", - "-conf=./9077/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, +static const char* chunkConcurrencyParams2[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9077", + "-chunkServerStoreUri=local://./9077/", + "-chunkServerMetaUri=local://./9077/chunkserver.dat", + "-copySetUri=local://./9077/copysets", + "-raftSnapshotUri=curve://./9077/copysets", + "-raftLogUri=curve://./9077/copysets", + "-recycleUri=local://./9077/recycler", + "-chunkFilePoolDir=./9077/chunkfilepool/", + "-chunkFilePoolMetaPath=./9077/chunkfilepool.meta", + "-walFilePoolDir=./9077/walfilepool/", + "-walFilePoolMetaPath=./9077/walfilepool.meta", + "-conf=./9077/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; butil::AtExitManager atExitManager; @@ -94,7 +82,7 @@ const int kChunkNum = 10; const ChunkSizeType kChunkSize = 16 * 1024 * 1024; const PageSizeType kPageSize = kOpRequestAlignSize; -// chunk不从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency testing for chunks not obtained from FilePool class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -136,14 +124,14 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { ::usleep(100 * 1000); } - void InitCluster(PeerCluster *cluster) { + void InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs); cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -162,10 +150,10 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { CopysetID copysetId; std::map paramsIndexs; - std::vector params; + std::vector params; }; -// chunk从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency test obtained by chunk from FilePool class ChunkServerConcurrentFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -182,7 +170,6 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { electionTimeoutMs = 3000; snapshotIntervalS = 60; - ASSERT_TRUE(cg1.Init("9077")); cg1.SetKV("copyset.election_timeout_ms", "3000"); cg1.SetKV("copyset.snapshot_interval_s", "60"); @@ -198,14 +185,12 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { params.push_back(const_cast(chunkConcurrencyParams2[0])); - // 初始化FilePool,这里会预先分配一些chunk + // Initialize FilePool, where some chunks will be pre allocated lfs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - poolDir = "./" - + std::to_string(PeerCluster::PeerToId(peer1)) - + "/chunkfilepool/"; - metaDir = "./" - + std::to_string(PeerCluster::PeerToId(peer1)) - + "/chunkfilepool.meta"; + poolDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) + + "/chunkfilepool/"; + metaDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) + + "/chunkfilepool.meta"; FilePoolMeta meta(kChunkSize, kPageSize, poolDir); FilePoolHelper::PersistEnCodeMetaInfo(lfs, meta, metaDir); @@ -213,7 +198,7 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { // There maybe one chunk in cleaning, so you should allocate // (kChunkNum + 1) chunks in start if you want to use kChunkNum chunks. // This situation will not occur in the production environment - allocateChunk(lfs, kChunkNum+1, poolDir, kChunkSize); + allocateChunk(lfs, kChunkNum + 1, poolDir, kChunkSize); } virtual void TearDown() { std::string rmdir1("rm -fr "); @@ -224,14 +209,14 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { // wait for process exit ::usleep(100 * 1000); } - void InitCluster(PeerCluster *cluster) { + void InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs); cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -243,28 +228,23 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { std::vector peers; PeerId leaderId; Peer leaderPeer; - int electionTimeoutMs; - int snapshotIntervalS; + int electionTimeoutMs; + int snapshotIntervalS; LogicPoolID logicPoolId; - CopysetID copysetId; + CopysetID copysetId; - std::map paramsIndexs; - std::vector params; + std::map paramsIndexs; + std::vector params; std::string poolDir; std::string metaDir; - std::shared_ptr lfs; + std::shared_ptr lfs; }; -// 写chunk -int WriteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - off_t offset, - size_t len, - const char *data, +// Write chunk +int WriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkId, off_t offset, size_t len, const char* data, const int sn = 1) { PeerId leaderId(leader.address()); brpc::Channel channel; @@ -299,13 +279,9 @@ int WriteChunk(Peer leader, return 0; } -// 随机选择一个chunk的随机offset进行read -void RandReadChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop, - const int sn = 1) { +// Randomly select a chunk's random offset for read +void RandReadChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop, const int sn = 1) { int ret = 0; uint64_t appliedIndex = 1; PeerId leaderId(leader.address()); @@ -314,7 +290,7 @@ void RandReadChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -330,7 +306,7 @@ void RandReadChunk(Peer leader, request.set_size(kOpRequestAlignSize); request.set_appliedindex(appliedIndex); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -342,7 +318,8 @@ void RandReadChunk(Peer leader, } if (response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS && - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST) { //NOLINT + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST) { // NOLINT LOG(INFO) << "read failed: " << CHUNK_OP_STATUS_Name(response.status()); ret = -1; @@ -352,13 +329,9 @@ void RandReadChunk(Peer leader, } } -// 随机选择一个chunk的随机offset进行write -void RandWriteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop, - const int sn = 1) { +// Randomly select a chunk's random offset for writing +void RandWriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop, const int sn = 1) { int ret = 0; char data[kOpRequestAlignSize] = {'a'}; int length = kOpRequestAlignSize; @@ -369,7 +342,7 @@ void RandWriteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -385,7 +358,7 @@ void RandWriteChunk(Peer leader, request.set_size(kOpRequestAlignSize); cntl.request_attachment().append(data, length); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -406,12 +379,9 @@ void RandWriteChunk(Peer leader, } } -// 随机选择一个chunk删除 -void RandDeleteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop) { +// Randomly select a chunk to delete +void RandDeleteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop) { int ret = 0; PeerId leaderId(leader.address()); @@ -420,7 +390,7 @@ void RandDeleteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -450,12 +420,9 @@ void RandDeleteChunk(Peer leader, } } -// 创建clone chunk -void CreateCloneChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID start, - ChunkID end) { +// Create clone chunk +void CreateCloneChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID start, ChunkID end) { int ret = 0; SequenceNum sn = 2; SequenceNum correctedSn = 1; @@ -497,10 +464,10 @@ void CreateCloneChunk(Peer leader, } /** - * chunk不是事先在FilePool分配好的 + * Chunks are not pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -511,37 +478,21 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); + // 2. Initiate a write to the chunk to ensure that it has been generated + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, chunkId, offset, + length, data.c_str(), sn)); - // 2. 对chunk发起一次写,保证chunk已经产生 - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - data.c_str(), - sn)); - - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -549,33 +500,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -583,8 +525,9 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +// Multiple threads simultaneously writing the same chunk and offset +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, + WriteOneChunkOnTheSameOffset) { // NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -592,29 +535,19 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); datas.push_back(data); - threads.push_back(Thread(WriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - datas[i].c_str(), + threads.push_back(Thread(WriteChunk, leaderPeer, logicPoolId, copysetId, + chunkId, offset, length, datas[i].c_str(), sn)); } @@ -622,7 +555,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 3. 将数据read出来验证 + // 3. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -646,7 +579,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data>='a 'and<='a'+kThreadNum -1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -654,7 +587,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -664,50 +597,30 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -716,7 +629,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -726,38 +639,23 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -765,33 +663,26 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 -TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { //NOLINT +// Multiple threads simultaneously read different chunks, please note that none +// of these chunks have been written yet +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, + RandReadMultiNotExistChunk) { // NOLINT const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -799,7 +690,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -809,39 +700,26 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生,避免下面同时从 - // chunkfile pool生成new chunk导致write 超时失败 + // 2. Initiate a write to the chunk to ensure that the chunk has already + // been generated, avoiding the need for both + // Chunkfile pool generates new chunks, resulting in write timeout + // failure for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 4. 起多个线程执行随机write chunk + // 4. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -849,7 +727,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -857,38 +735,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -897,7 +761,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -907,38 +771,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 - threads.push_back(Thread(RandDeleteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop)); + // Start delete thread + threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop)); } for (int j = 0; j < kThreadNum; ++j) { @@ -946,30 +796,23 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(CreateCloneChunk, - leaderPeer, - logicPoolId, - copysetId, - i * chunksPerThread + 1, + threads.push_back(Thread(CreateCloneChunk, leaderPeer, logicPoolId, + copysetId, i * chunksPerThread + 1, (i + 1) * chunksPerThread)); } @@ -979,10 +822,10 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { } /** - * chunk是事先在FilePool分配好的 + * Chunks are pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -993,36 +836,21 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - data.c_str(), - sn)); - - // 3. 起多个线程执行随机read chunk + // 2. Initiate a write to the chunk to ensure that it has been generated + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, chunkId, offset, + length, data.c_str(), sn)); + + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1030,33 +858,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1064,8 +883,9 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +// Multiple threads simultaneously writing the same chunk and offset +TEST_F(ChunkServerConcurrentFromFilePoolTest, + WriteOneChunkOnTheSameOffset) { // NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -1073,29 +893,19 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); datas.push_back(data); - threads.push_back(Thread(WriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - datas[i].c_str(), + threads.push_back(Thread(WriteChunk, leaderPeer, logicPoolId, copysetId, + chunkId, offset, length, datas[i].c_str(), sn)); } @@ -1103,7 +913,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 4. 将数据read出来验证 + // 4. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -1127,7 +937,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data >='a' and <= 'a' + kThreadNum - 1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -1135,7 +945,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1143,38 +953,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -1183,7 +979,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1193,38 +989,23 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 4. 起多个线程执行随机read chunk + // 4. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1232,33 +1013,25 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 +// Multiple threads simultaneously read different chunks, please note that none +// of these chunks have been written yet TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1266,7 +1039,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1274,26 +1047,17 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1301,7 +1065,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1309,38 +1073,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -1349,7 +1099,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1359,38 +1109,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 - threads.push_back(Thread(RandDeleteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop)); + // Start delete thread + threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1398,30 +1134,23 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(CreateCloneChunk, - leaderPeer, - logicPoolId, - copysetId, - i * chunksPerThread + 1, + threads.push_back(Thread(CreateCloneChunk, leaderPeer, logicPoolId, + copysetId, i * chunksPerThread + 1, (i + 1) * chunksPerThread)); } @@ -1430,7 +1159,8 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { } } -// 多线程并发随机读写同多个chunk,同事伴随这并发的COW +// Multiple threads simultaneously read and write randomly to the same chunk, +// with colleagues accompanying the concurrent COW TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1440,52 +1170,32 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { ChunkID chunkIdRange = kChunkNum / 2; int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 用低版本的sn写一遍chunk + // 2. Write a chunk using a lower version of SN for (int k = 1; k <= chunkIdRange; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // sn加1,保证后面的write会产生COW + // Add 1 to sn to ensure that subsequent writes will generate COW sn += 1; - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(10); if (read <= 1) { - // 起read线程,20%概率 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread with a 20% probability + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } diff --git a/test/integration/client/chunkserver_exception_test.cpp b/test/integration/client/chunkserver_exception_test.cpp index aa676fc718..9653b4857a 100644 --- a/test/integration/client/chunkserver_exception_test.cpp +++ b/test/integration/client/chunkserver_exception_test.cpp @@ -20,22 +20,22 @@ * Author: tongguangxun */ -#include -#include #include +#include +#include -#include -#include -#include // NOLINT -#include // NOLINT -#include -#include #include +#include #include // NOLINT +#include +#include // NOLINT +#include +#include +#include // NOLINT -#include "src/common/timeutility.h" #include "include/client/libcurve.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -48,12 +48,12 @@ curve::client::InflightControl inflightContl; using curve::CurveCluster; const std::vector mdsConf{ - { "--confPath=./conf/mds.conf" }, - { "--log_dir=./runlog/ChunkserverException" }, - { "--mdsDbName=module_exception_curve_chunkserver" }, - { "--sessionInterSec=20" }, - { "--etcdAddr=127.0.0.1:22233" }, - { "--updateToRepoSec=5" }, + {"--confPath=./conf/mds.conf"}, + {"--log_dir=./runlog/ChunkserverException"}, + {"--mdsDbName=module_exception_curve_chunkserver"}, + {"--sessionInterSec=20"}, + {"--etcdAddr=127.0.0.1:22233"}, + {"--updateToRepoSec=5"}, }; const std::vector chunkserverConf4{ @@ -143,15 +143,16 @@ class CSModuleException : public ::testing::Test { cluster->PrepareConfig(confPath, clientConf); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, "127.0.0.1:22233", "127.0.0.1:22234", std::vector{ - "--name=module_exception_test_chunkserver" }); + "--name=module_exception_test_chunkserver"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:22233:22234, pid = " << pid; ASSERT_GT(pid, 0); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster->StartSingleMDS(1, "127.0.0.1:22122", 22128, mdsConf, true); LOG(INFO) << "mds 1 started on 127.0.0.1:22122, pid = " << pid; @@ -168,7 +169,7 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 3. 创建物理池 + // 3. Creating a physical pool std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( " -cluster_map=./test/integration/client/" @@ -190,7 +191,7 @@ class CSModuleException : public ::testing::Test { retry++; } - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; @@ -207,7 +208,8 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( @@ -228,15 +230,15 @@ class CSModuleException : public ::testing::Test { } ASSERT_EQ(ret, 0); - // 6. 初始化client配置 + // 6. Initialize client configuration ret = Init(confPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 创建一个文件 + // 7. Create a file fd = curve::test::FileCommonOperation::Open("/test1", "curve"); ASSERT_NE(fd, -1); - // 8. 先睡眠10s,让chunkserver选出leader + // 8. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } @@ -282,12 +284,15 @@ class CSModuleException : public ::testing::Test { } /** - * 监测client io能否在预期时间内正常下发 - * @param: off是当前需要下发IO的偏移 - * @param: size是下发io的大小 - * @param: predictTimeS是预期在多少秒内IO可以恢复 - * @param[out]: failCount为当前io下发中错误返回的数量 - * @return: 如果io在预期时间内能够正常下发,则返true,否则返回false + * Monitor whether client I/O can be issued within the expected time. + * @param off: The current offset for the I/O to be issued. + * @param size: The size of the I/O to be issued. + * @param predictTimeS: The expected time in seconds within which the I/O + * should recover. + * @param[out] failCount: The count of errors returned during the ongoing + * I/O. + * @return true if I/O can be issued normally within the expected time, + * false otherwise. */ bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS, uint64_t* failCount = nullptr) { @@ -335,7 +340,7 @@ class CSModuleException : public ::testing::Test { failCount == nullptr ? 0 : (*failCount = ioFailedCount); - // 唤醒io线程 + // Wake up IO thread iothread.join(); inflightContl.WaitInflightAllComeBack(); @@ -345,7 +350,7 @@ class CSModuleException : public ::testing::Test { int fd; - // 是否出现挂卸载失败 + // Whether there is a failure in mounting or unmounting. bool createOrOpenFailed; bool createDone; std::mutex createMtx; @@ -354,173 +359,186 @@ class CSModuleException : public ::testing::Test { CurveCluster* cluster; }; -// 测试环境拓扑:在单节点上启动一个client、三个chunkserver、三个mds、一个etcd +// Test environment topology: Start one client, three chunkservers, three mds, +// and one etcd on a single node TEST_F(CSModuleException, ChunkserverException) { LOG(INFO) << "current case: KillOneChunkserverThenRestartTheChunkserver"; /********* KillOneChunkserverThenRestartTheChunkserver **********/ - // 1. 测试重启一个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill一台chunkserver:client 读写请求最多卡顿 - // election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting a chunkserver. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Killing one chunkserver: Client + // read and write requests may experience at most a temporary delay. + // They should resume normal operation after election_timeout * 2s. + // c. Recovering the chunkserver: Client read and write requests should + // be unaffected. + // 1. Initial state of the cluster, with I/O being issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉一个chunkserver + // 2. Kill a chunkserver ASSERT_EQ(0, cluster->StopChunkServer(1)); - // 3. kill掉一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After killing a chunkserver, the client's IO is expected to recover at + // most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver + // 4. Pull up the chunkserver that was just killed pid_t pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangOneChunkserverThenResumeTheChunkserver"; /********* HangOneChunkserverThenResumeTheChunkserver ***********/ - // 1. hang一台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台chunkserver:client - // 读写请求最多卡顿election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang one chunkserver, then recover the hung chunkserver. + // 2. Expectations: + // a. When the cluster is in a normal state: client read and write + // requests can be issued normally. b. Hang one chunkserver: client read + // and write requests may experience a maximum delay of + // election_timeout*2s but can eventually proceed normally. c. Recover + // chunkserver: client read and write requests are not affected. + // 1. Initial state of the cluster, where I/O is issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一个chunkserver + // 2. Hang a chunkserver ASSERT_EQ(0, cluster->HangChunkServer(1)); - // 3. hang一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After hanging a chunkserver, the client's IO is expected to recover at + // most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver + // 4. Pull up the chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillTwoChunkserverThenRestartTheChunkserver"; /******** KillTwoChunkserverThenRestartTheChunkserver *********/ - // 1. 测试重启两个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill两台chunkserver:预期client IO持续hang,新写IO和覆盖写都hang - // 拉起被kill中的一台chunkserver:client IO预期在最多在 - // (chunkserver启动回放数据+2*election_timeout)时间内恢复读写 - // c. 拉起另外一台kill的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting two chunk servers. + // 2. Expectations: + // a. When the cluster is in a normal state: client read and write + // requests should be issued normally. b. Kill two chunk servers: Expect + // ongoing client I/O to hang, both for new writes and overwrite writes. + // Bring up one of the killed chunk servers: Expect client I/O to + // recover for read and write within the time of (chunk server startup + // replay data + 2 * election_timeout) at most. + // c. Bring up the other killed chunk server: No impact on ongoing client + // I/O. + // 1. Initial state of the cluster, I/O issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉两个chunkserver + // 2. Kill two chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); - // 3. kill掉两个chunkserver, io无法正常下发 + // 3. Kill two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 拉起刚才被kill的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Pull up the first chunkserver that was just killed, + // The client's IO is expected to recover at most 2 * electtime + // If slow start is configured, wait // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunk server that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangTwoChunkserverThenResumeTheChunkserver"; /******* HangTwoChunkserverThenResumeTheChunkserver **********/ - // 1. hang两台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang两台chunkserver:client IO持续hang,新写IO和覆盖写都hang - // c. 恢复其中的一台chunkserver:client IO 恢复读写, - // 从恢复chunkserver到client IO恢复时间在election_timeout*2 - // d. 恢复另外一台hang的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang two chunk servers and then recover the hung chunk servers. + // 2. Expectations: + // a. When the cluster is in a normal state: Client read and write + // requests can be issued normally. b. Hang two chunk servers: Client I/O + // remains hung, both new and overwrite write I/O. c. Recover one of the + // hung chunk servers: Client I/O resumes read and write operations, + // the time from recovering the chunk server to the client I/O + // recovery is within election_timeout*2. + // d. Recover the other hung chunk server: No impact on client I/O. + // 1. Initial state of the cluster, I/O issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉两个个chunkserver + // 2. Hang off two chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); - // 3. hang两个chunkserver, io无法正常下发 + // 3. Hang two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 拉起刚才被hang的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Bring up the first chunk server that was previously hung. + // The client's I/O is expected to recover within a maximum of 2 * + // election_timeout. If slow start is configured, waiting may be + // required. // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillThreeChunkserverThenRestartTheChunkserver"; /******** KillThreeChunkserverThenRestartTheChunkserver ******/ - // 1. 测试重启三个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭三台chunkserver:client IO hang - // c. 重启一台chunkserver:client IO hang - // d. 重启第二台chunkserver:client IO hang, - // 直到chunkserver完全恢复,IO恢复。 - // 恢复时间约等于(chunkserver启动回放数据+2*election_timeout) - // e. 重启第三台chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting three chunk servers. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Shutting down three chunk servers: + // Client I/O hangs. c. Restarting one chunk server: Client I/O hangs. d. + // Restarting the second chunk server: Client I/O hangs until the chunk + // server is fully recovered. + // The recovery time is approximately equal to (chunk server startup + // replay data + 2 * election_timeout). + // e. Restarting the third chunk server: Client I/O is unaffected. + // 1. Initial state of the cluster, I/O issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉三个chunkserver + // 2. Kill three chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); ASSERT_EQ(0, cluster->StopChunkServer(3)); - // 3. kill掉三个chunkserver, io无法正常下发 + // 3. Kill three chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. client的io恢复 + // 7. Client's IO recovery ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 8. 拉起其他被kil的chunkserver + // 8. Pull up other chunkservers that have been killed pid = cluster->StartSingleChunkServer(3, "127.0.0.1:22127", chunkserverConf6); LOG(INFO) << "chunkserver 3 started on 127.0.0.1:22127, pid = " << pid; @@ -528,37 +546,37 @@ TEST_F(CSModuleException, ChunkserverException) { LOG(INFO) << "current case: HangThreeChunkserverThenResumeTheChunkserver"; /******** HangThreeChunkserverThenResumeTheChunkserver **********/ - // 1. hang三台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang三台chunkserver:client IO hang - // c. 恢复一台chunkserver:client IO hang - // d. 再恢复一台chunkserver:预期在 - // election_timeout*2左右的时间,client IO恢复 - // e. 恢复最后一台chunkserver:预期client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang three chunk servers and then recover the hung chunk servers. + // 2. Expectations: + // a. When the cluster is in a normal state: client read and write + // requests can be issued normally. b. Hang three chunk servers: client + // I/O hangs. c. Recover one chunk server: client I/O hangs. d. Recover + // another chunk server: Expect client I/O to recover in approximately + // election_timeout*2 time. + // e. Recover the last chunk server: Expect no impact on client I/O. + // 1. Initial state of the cluster, I/O issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉三个chunkserver + // 2. Hang down three chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); ASSERT_EQ(0, cluster->HangChunkServer(3)); - // 3. hang三个chunkserver, io无法正常下发 + // 3. Hang three chunkservers, IO cannot be distributed normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); ASSERT_EQ(0, cluster->RecoverHangChunkServer(3)); - // 7. client的io预期最多会在2*electtime s内恢复 - // 如果配置了慢启动,则需要等待 - // (copysetNum / load_concurrency) * election_timeout + // 7. The client's IO is expected to recover within a maximum of 2 * + // electtime seconds If slow start is configured, wait (copysetNum / + // load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); } diff --git a/test/integration/client/unstable_chunkserver_exception_test.cpp b/test/integration/client/unstable_chunkserver_exception_test.cpp index ea5c7e4c37..3f10f365ae 100644 --- a/test/integration/client/unstable_chunkserver_exception_test.cpp +++ b/test/integration/client/unstable_chunkserver_exception_test.cpp @@ -20,26 +20,26 @@ * Author: wuhanqing */ -#include -#include #include +#include +#include -#include -#include -#include // NOLINT -#include // NOLINT +#include #include -#include +#include +#include // NOLINT +#include #include -#include +#include // NOLINT #include -#include -#include // NOLINT +#include +#include // NOLINT +#include #include "include/client/libcurve.h" -#include "src/common/timeutility.h" #include "src/client/client_metric.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -58,16 +58,14 @@ const char* kLogPath = "./runlog/"; curve::client::PerSecondMetric iops("test", "iops"); -std::atomic running{ false }; +std::atomic running{false}; const std::vector chunkserverConfigOpts{ "chunkfilepool.enable_get_chunk_from_pool=false", - "walfilepool.enable_get_segment_from_pool=false" -}; + "walfilepool.enable_get_segment_from_pool=false"}; -const std::vector mdsConfigOpts{ - std::string("mds.etcd.endpoint=") + std::string(kEtcdClientIpPort) -}; +const std::vector mdsConfigOpts{std::string("mds.etcd.endpoint=") + + std::string(kEtcdClientIpPort)}; const std::vector clientConfigOpts{ std::string("mds.listen.addr=") + kMdsIpPort, @@ -81,9 +79,8 @@ const std::vector mdsConf{ std::string("--confPath=") + kMdsConfPath, std::string("--mdsAddr=") + kMdsIpPort, std::string("--etcdAddr=") + kEtcdClientIpPort, - { "--log_dir=./runlog/mds" }, - { "--stderrthreshold=3" } -}; + {"--log_dir=./runlog/mds"}, + {"--stderrthreshold=3"}}; const std::vector chunkserverConfTemplate{ {"-raft_sync_segments=true"}, @@ -138,20 +135,16 @@ std::vector GenChunkserverConf(int port) { return conf; } -off_t RandomWriteOffset() { - return rand() % 32 * (16 * 1024 * 1024); -} +off_t RandomWriteOffset() { return rand() % 32 * (16 * 1024 * 1024); } -size_t RandomWriteLength() { - return rand() % 32 * 4096; -} +size_t RandomWriteLength() { return rand() % 32 * 4096; } static char buffer[1024 * 4096]; struct ChunkserverParam { int id; int port; - std::string addr{ "127.0.0.1:" }; + std::string addr{"127.0.0.1:"}; std::vector conf; ChunkserverParam(int id, int port) { @@ -165,7 +158,7 @@ struct ChunkserverParam { class UnstableCSModuleException : public ::testing::Test { protected: static void SetUpTestCase() { - // 清理文件夹 + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf ttt"); system("mkdir -p ttt"); @@ -175,7 +168,7 @@ class UnstableCSModuleException : public ::testing::Test { cluster.reset(new CurveCluster()); ASSERT_NE(nullptr, cluster.get()); - // 生成配置文件 + // Generate Configuration File cluster->PrepareConfig(kMdsConfPath, mdsConfigOpts); cluster->PrepareConfig(kCSConfPath, @@ -183,50 +176,52 @@ class UnstableCSModuleException : public ::testing::Test { cluster->PrepareConfig(kClientConfPath, clientConfigOpts); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{ - "--name=module_exception_curve_unstable_cs" }); + "--name=module_exception_curve_unstable_cs"}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << ":" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 2. 启动一个mds + // 2. Start an mds pid = cluster->StartSingleMDS(1, kMdsIpPort, 30013, mdsConf, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(2)); - // 3. 创建物理池 - ASSERT_EQ( - 0, - cluster->PreparePhysicalPool( - 1, - "./test/integration/client/config/unstable/" - "topo_unstable.json")); + // 3. Creating a physical pool + ASSERT_EQ(0, cluster->PreparePhysicalPool( + 1, + "./test/integration/client/config/unstable/" + "topo_unstable.json")); - // 4. 创建chunkserver + // 4. Create chunkserver StartAllChunkserver(); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池,并睡眠一段时间让底层copyset先选主 - ASSERT_EQ(0, cluster->PrepareLogicalPool( - 1, "test/integration/client/config/unstable/topo_unstable.json")); + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first + ASSERT_EQ( + 0, + cluster->PrepareLogicalPool( + 1, + "test/integration/client/config/unstable/topo_unstable.json")); std::this_thread::sleep_for(std::chrono::seconds(10)); - // 6. 初始化client配置 + // 6. Initialize client configuration int ret = Init(kClientConfPath); ASSERT_EQ(ret, 0); - // 7. 先睡眠10s,让chunkserver选出leader + // 7. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } static void TearDownTestCase() { UnInit(); ASSERT_EQ(0, cluster->StopCluster()); - // 清理文件夹 + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf module_exception_curve_unstable_cs"); system("rm -rf ttt"); @@ -300,7 +295,8 @@ class UnstableCSModuleException : public ::testing::Test { int UnstableCSModuleException::fd = 0; std::unique_ptr UnstableCSModuleException::cluster; -std::unordered_map UnstableCSModuleException::chunkServers; // NOLINT +std::unordered_map + UnstableCSModuleException::chunkServers; // NOLINT TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { const std::string filename = "/TestCommonReadAndWrite"; @@ -323,15 +319,15 @@ TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { ::Close(fd); } -// 集群拓扑结构 -// 1个client -// 1个etcd -// 1个mds -// 3个zone,每个里面2个chunkserver +// Cluster topology +// 1 client +// 1 ETCD +// 1 mds +// 3 zones, each with 2 chunkservers inside TEST_F(UnstableCSModuleException, HangOneZone) { srand(time(nullptr)); - // 开启多个线程写文件 + // Enable multiple threads to write files LOG(INFO) << "starting write..."; running = true; std::vector openAndWriteThreads; @@ -341,7 +337,7 @@ TEST_F(UnstableCSModuleException, HangOneZone) { "/test" + std::to_string(i)); } - // 正常写入60s, 并记录后30秒的iops + // Write normally for 60 seconds and record the IOPS for the next 30 seconds std::vector beforeRecords; std::this_thread::sleep_for(std::chrono::seconds(30)); for (int i = 1; i <= 30; ++i) { @@ -353,18 +349,18 @@ TEST_F(UnstableCSModuleException, HangOneZone) { beforeRecords.size(); LOG(INFO) << "iops before hang: " << beforeAvgIOps; - // hang一个zone的chunkserver + // Hang a chunkserver for a zone LOG(INFO) << "hang one zone"; ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); std::vector afterRecords; - // 打印每一秒的iops情况 + // Print IOPS per second for (int i = 1; i <= 10; ++i) { std::this_thread::sleep_for(std::chrono::seconds(1)); auto tmp = iops.value.get_value(1); LOG(INFO) << "after " << i << "s, iops: " << tmp; - // 记录后5s的iops值 + // Record the iops value for 5 seconds after recording if (i >= 5) { afterRecords.push_back(tmp); } diff --git a/test/integration/cluster_common/cluster.cpp b/test/integration/cluster_common/cluster.cpp index 5d32ef8539..72410a5ca7 100644 --- a/test/integration/cluster_common/cluster.cpp +++ b/test/integration/cluster_common/cluster.cpp @@ -20,26 +20,28 @@ * Author: lixiaocui */ -#include +#include "test/integration/cluster_common/cluster.h" + #include -#include -#include -#include #include #include #include -#include -#include //NOLINT +#include +#include +#include +#include + #include //NOLINT +#include #include +#include +#include //NOLINT #include #include -#include -#include "test/integration/cluster_common/cluster.h" +#include "src/client/client_common.h" #include "src/common/string_util.h" #include "src/common/timeutility.h" -#include "src/client/client_common.h" #include "src/kvstorageclient/etcd_client.h" using ::curve::client::UserInfo_t; @@ -50,29 +52,29 @@ namespace curve { using ::curve::client::CreateFileContext; -int CurveCluster::InitMdsClient(const curve::client::MetaServerOption &op) { +int CurveCluster::InitMdsClient(const curve::client::MetaServerOption& op) { mdsClient_ = std::make_shared(); return mdsClient_->Initialize(op); } -std::vector VecStr2VecChar(std::vector args) { - std::vector argv(args.size() + 1); // for the NULL terminator +std::vector VecStr2VecChar(std::vector args) { + std::vector argv(args.size() + 1); // for the NULL terminator for (std::size_t i = 0; i < args.size(); ++i) { // not include cmd - argv[i] = new char[args[i].size()+1]; + argv[i] = new char[args[i].size() + 1]; snprintf(argv[i], args[i].size() + 1, "%s", args[i].c_str()); } argv[args.size()] = NULL; return argv; } -void ClearArgv(const std::vector &argv) { - for (auto const &item : argv) { - delete [] item; +void ClearArgv(const std::vector& argv) { + for (auto const& item : argv) { + delete[] item; } } int CurveCluster::InitSnapshotCloneMetaStoreEtcd( - const std::string &etcdEndpoints) { + const std::string& etcdEndpoints) { EtcdConf conf; conf.Endpoints = new char[etcdEndpoints.size()]; std::memcpy(conf.Endpoints, etcdEndpoints.c_str(), etcdEndpoints.size()); @@ -88,8 +90,8 @@ int CurveCluster::InitSnapshotCloneMetaStoreEtcd( } auto codec = std::make_shared(); - metaStore_ = std::make_shared(etcdClient, - codec); + metaStore_ = + std::make_shared(etcdClient, codec); if (metaStore_->Init() < 0) { LOG(ERROR) << "metaStore init fail."; return -1; @@ -106,17 +108,13 @@ int CurveCluster::StopCluster() { LOG(INFO) << "stop cluster begin..."; int ret = 0; - if (StopAllMDS() < 0) - ret = -1; + if (StopAllMDS() < 0) ret = -1; - if (StopAllChunkServer() < 0) - ret = -1; + if (StopAllChunkServer() < 0) ret = -1; - if (StopAllSnapshotCloneServer() < 0) - ret = -1; + if (StopAllSnapshotCloneServer() < 0) ret = -1; - if (StopAllEtcd() < 0) - ret = -1; + if (StopAllEtcd() < 0) ret = -1; if (!ret) LOG(INFO) << "success stop cluster"; @@ -125,9 +123,9 @@ int CurveCluster::StopCluster() { return ret; } -int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, +int CurveCluster::StartSingleMDS(int id, const std::string& ipPort, int dummyPort, - const std::vector &mdsConf, + const std::vector& mdsConf, bool expectLeader) { LOG(INFO) << "start mds " << ipPort << " begin..."; pid_t pid = ::fork(); @@ -135,20 +133,21 @@ int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, LOG(ERROR) << "start mds " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个mds + // Start an mds in a child process // ./bazel-bin/src/mds/main/curvemds std::vector args; args.emplace_back("./bazel-bin/src/mds/main/curvemds"); args.emplace_back("--mdsAddr=" + ipPort); args.emplace_back("--dummyPort=" + std::to_string(dummyPort)); - for (auto &item : mdsConf) { + for (auto& item : mdsConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/mds/main/curvemds", argv.data()); ClearArgv(argv); @@ -221,26 +220,27 @@ int CurveCluster::StopAllMDS() { } int CurveCluster::StartSnapshotCloneServer( - int id, const std::string &ipPort, - const std::vector &snapshotcloneConf) { + int id, const std::string& ipPort, + const std::vector& snapshotcloneConf) { LOG(INFO) << "start snapshotcloneserver " << ipPort << " begin ..."; pid_t pid = ::fork(); if (0 > pid) { LOG(ERROR) << "start snapshotcloneserver " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个snapshotcloneserver + // Starting a snapshotcloneserver in a child process std::vector args; args.emplace_back( "./bazel-bin/src/snapshotcloneserver/snapshotcloneserver"); args.emplace_back("--addr=" + ipPort); - for (auto &item : snapshotcloneConf) { + for (auto& item : snapshotcloneConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/snapshotcloneserver/snapshotcloneserver", argv.data()); @@ -317,19 +317,18 @@ int CurveCluster::StopAllSnapshotCloneServer() { int ret = 0; auto tempMap = snapPidMap_; for (auto pair : tempMap) { - if (StopSnapshotCloneServer(pair.first) < 0) - ret = -1; + if (StopSnapshotCloneServer(pair.first) < 0) ret = -1; } - // 等待进程完全退出 + // Wait for the process to completely exit ::sleep(2); LOG(INFO) << "stop all snapshotcloneservver end."; return ret; } -int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, - const std::string &peerIpPort, - const std::vector &etcdConf) { +int CurveCluster::StartSingleEtcd(int id, const std::string& clientIpPort, + const std::string& peerIpPort, + const std::vector& etcdConf) { LOG(INFO) << "start etcd " << clientIpPort << " begin..."; pid_t pid = ::fork(); @@ -337,7 +336,7 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, LOG(ERROR) << "start etcd " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个etcd + // Start an ETCD in a child process // ip netns exec integ_etcd1 etcd std::vector args{"etcd"}; args.emplace_back("--listen-peer-urls=http://" + peerIpPort); @@ -348,14 +347,15 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, args.emplace_back("--initial-cluster-token=etcd-cluster-1"); args.emplace_back("--election-timeout=3000"); args.emplace_back("--heartbeat-interval=300"); - for (auto &item : etcdConf) { + for (auto& item : etcdConf) { args.push_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execvp("etcd", argv.data()); ClearArgv(argv); @@ -380,7 +380,7 @@ bool CurveCluster::WaitForEtcdClusterAvalible(int waitSec) { return false; } else { int i = 0; - for (auto &item : etcdClientIpPort_) { + for (auto& item : etcdClientIpPort_) { i++; if (i == etcdClientIpPort_.size()) { endpoint += "http://" + item.second; @@ -464,9 +464,9 @@ int CurveCluster::StopAllEtcd() { return ret; } -int CurveCluster::FormatFilePool(const std::string &filePooldir, - const std::string &filePoolmetapath, - const std::string &filesystempath, +int CurveCluster::FormatFilePool(const std::string& filePooldir, + const std::string& filePoolmetapath, + const std::string& filesystempath, uint32_t size) { LOG(INFO) << "FormatFilePool begin..."; @@ -475,8 +475,7 @@ int CurveCluster::FormatFilePool(const std::string &filePooldir, " -filePoolMetaPath=" + filePoolmetapath + " -fileSystemPath=" + filesystempath + " -allocateByPercent=false -preAllocateNum=" + - std::to_string(size * 300) + - " -needWriteZero=false"; + std::to_string(size * 300) + " -needWriteZero=false"; RETURN_IF_NOT_ZERO(system(cmd.c_str())); @@ -485,8 +484,8 @@ int CurveCluster::FormatFilePool(const std::string &filePooldir, } int CurveCluster::StartSingleChunkServer( - int id, const std::string &ipPort, - const std::vector &chunkserverConf) { + int id, const std::string& ipPort, + const std::vector& chunkserverConf) { LOG(INFO) << "start chunkserver " << id << ", " << ipPort << " begin..."; std::vector split; ::curve::common::SplitString(ipPort, ":", &split); @@ -500,19 +499,20 @@ int CurveCluster::StartSingleChunkServer( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("./bazel-bin/src/chunkserver/chunkserver"); args.emplace_back("-chunkServerIp=" + split[0]); args.emplace_back("-chunkServerPort=" + split[1]); - for (auto &item : chunkserverConf) { + for (auto& item : chunkserverConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/chunkserver/chunkserver", argv.data()); ClearArgv(argv); @@ -530,7 +530,7 @@ int CurveCluster::StartSingleChunkServer( } int CurveCluster::StartSingleChunkServerInBackground( - int id, const std::vector &chunkserverConf) { + int id, const std::vector& chunkserverConf) { std::vector ipPort; ::curve::common::SplitString(ChunkServerIpPortInBackground(id), ":", &ipPort); @@ -547,7 +547,7 @@ int CurveCluster::StartSingleChunkServerInBackground( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("netns"); args.emplace_back("exec"); @@ -555,13 +555,14 @@ int CurveCluster::StartSingleChunkServerInBackground( args.emplace_back("./bazel-bin/src/chunkserver/chunkserver"); args.emplace_back("-chunkServerIp=" + ipPort[0]); args.emplace_back("-chunkServerPort=" + ipPort[1]); - for (auto &item : chunkserverConf) { + for (auto& item : chunkserverConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execvp("ip", argv.data()); ClearArgv(argv); @@ -723,7 +724,7 @@ std::string CurveCluster::ChunkServerIpPortInBackground(int id) { } int CurveCluster::PreparePhysicalPool(int mdsId, - const std::string &clusterMap) { + const std::string& clusterMap) { LOG(INFO) << "create physicalpool begin..."; std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + @@ -741,15 +742,14 @@ int CurveCluster::PreparePhysicalPool(int mdsId, return 0; } -int CurveCluster::PrepareLogicalPool(int mdsId, const std::string &clusterMap) { +int CurveCluster::PrepareLogicalPool(int mdsId, const std::string& clusterMap) { LOG(INFO) << "create logicalpool begin..."; - std::string createLPCmd = - std::string("./bazel-bin/tools/curvefsTool") + - std::string(" -cluster_map=") + clusterMap + - std::string(" -mds_addr=") + MDSIpPort(mdsId) + - std::string(" -op=create_logicalpool") + - std::string(" -stderrthreshold=0 -minloglevel=0"); + std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + + std::string(" -cluster_map=") + clusterMap + + std::string(" -mds_addr=") + MDSIpPort(mdsId) + + std::string(" -op=create_logicalpool") + + std::string(" -stderrthreshold=0 -minloglevel=0"); LOG(INFO) << "exec cmd: " << createLPCmd; RETURN_IF_NOT_ZERO(system(createLPCmd.c_str())); @@ -758,7 +758,7 @@ int CurveCluster::PrepareLogicalPool(int mdsId, const std::string &clusterMap) { return 0; } -bool CurveCluster::CurrentServiceMDS(int *curId) { +bool CurveCluster::CurrentServiceMDS(int* curId) { for (auto mdsId : mdsPidMap_) { if (0 == ProbePort(mdsIpPort_[mdsId.first], 20000, true)) { *curId = mdsId.first; @@ -772,8 +772,8 @@ bool CurveCluster::CurrentServiceMDS(int *curId) { return false; } -int CurveCluster::CreateFile(const std::string &user, const std::string &pwd, - const std::string &fileName, uint64_t fileSize, +int CurveCluster::CreateFile(const std::string& user, const std::string& pwd, + const std::string& fileName, uint64_t fileSize, bool normalFile, const std::string& poolset) { LOG(INFO) << "create file: " << fileName << ", size: " << fileSize << " begin..."; @@ -785,13 +785,12 @@ int CurveCluster::CreateFile(const std::string &user, const std::string &pwd, context.length = fileSize; context.poolset = poolset; - RETURN_IF_NOT_ZERO( - mdsClient_->CreateFile(context)); + RETURN_IF_NOT_ZERO(mdsClient_->CreateFile(context)); LOG(INFO) << "success create file"; return 0; } -int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, +int CurveCluster::ProbePort(const std::string& ipPort, int64_t timeoutMs, bool expectOpen) { int socket_fd = socket(AF_INET, SOCK_STREAM, 0); if (-1 == socket_fd) { @@ -819,7 +818,7 @@ int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, uint64_t start = ::curve::common::TimeUtility::GetTimeofDayMs(); while (::curve::common::TimeUtility::GetTimeofDayMs() - start < timeoutMs) { int connectRes = - connect(socket_fd, (struct sockaddr *)&addr, sizeof(addr)); + connect(socket_fd, (struct sockaddr*)&addr, sizeof(addr)); if (expectOpen && connectRes == 0) { LOG(INFO) << "probe " << ipPort << " success."; close(socket_fd); diff --git a/test/integration/cluster_common/cluster.h b/test/integration/cluster_common/cluster.h index e5b04d30c7..4418f65ede 100644 --- a/test/integration/cluster_common/cluster.h +++ b/test/integration/cluster_common/cluster.h @@ -23,215 +23,222 @@ #ifndef TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ #define TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ -#include #include -#include #include -#include "src/client/mds_client.h" +#include +#include + #include "src/client/config_info.h" -#include "test/util/config_generator.h" +#include "src/client/mds_client.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h" +#include "test/util/config_generator.h" -using ::curve::snapshotcloneserver::SnapshotCloneMetaStoreEtcd; using ::curve::client::MDSClient; +using ::curve::snapshotcloneserver::SnapshotCloneMetaStoreEtcd; namespace curve { -#define RETURN_IF_NOT_ZERO(x) \ - do { \ - int ret = (x); \ - if (ret != 0) { \ - LOG(ERROR) << __FILE__ << ":" << __LINE__ \ - << "-> get non-ZERO, return -1"; \ - return ret; \ - } \ +#define RETURN_IF_NOT_ZERO(x) \ + do { \ + int ret = (x); \ + if (ret != 0) { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ \ + << "-> get non-ZERO, return -1"; \ + return ret; \ + } \ } while (0) -#define RETURN_IF_FALSE(x) \ - do { \ - bool ret = (x); \ - if (!ret) { \ - LOG(ERROR) << __FILE__ << ":" << __LINE__ \ - << "-> get FALSE, return -1"; \ - return -1; \ - } \ +#define RETURN_IF_FALSE(x) \ + do { \ + bool ret = (x); \ + if (!ret) { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ \ + << "-> get FALSE, return -1"; \ + return -1; \ + } \ } while (0) class CurveCluster { public: /** - * CurveCluster 构造函数 + * @brief CurveCluster constructor * - * @param[in] netWorkSegment 网桥的网络地址,默认为"192.168.200." - * @param[in] nsPrefix 网络命名空间的前缀,默认为"integ_" + * @param[in] netWorkSegment: The network address of the bridge, which + * defaults to "192.168.200." + * @param[in] nsPrefix: The prefix of the network namespace, which defaults + * to "integ_" */ - CurveCluster(const std::string &netWorkSegment = "192.168.200.", - const std::string &nsPrefix = "integ_") + CurveCluster(const std::string& netWorkSegment = "192.168.200.", + const std::string& nsPrefix = "integ_") : networkSegment_(netWorkSegment), nsPrefix_(nsPrefix) {} /** - * InitMdsClient 初始化mdsclient, 用于和mds交互 + * @brief InitMdsClient, initializes mdsclient for interaction with mds * - * @param op 参数设置 - * @return 0.成功; 非0.失败 + * @param op: parameter setting + * @return 0 Success; Non 0 Failure */ - int InitMdsClient(const curve::client::MetaServerOption &op); - + int InitMdsClient(const curve::client::MetaServerOption& op); /** - * @brief 初始化metastore + * @brief Initialize metastore * - * @param[in] etcdEndpoints etcd client的ip port + * @param[in] etcdEndpoints: etcd client's IP port * - * @return 返回错误码 + * @return returns an error code */ - int InitSnapshotCloneMetaStoreEtcd( - const std::string &etcdEndpoints); + int InitSnapshotCloneMetaStoreEtcd(const std::string& etcdEndpoints); /** - * BuildNetWork 如果需要是用不同的ip来起chunkserver, - * 需要在测试用例的SetUp中先 调用该函数 - * @return 0.成功; 非0.失败 + * @brief BuildNetWork, If BuildNet needs to use a different IP to start the + * chunkserver, This function needs to be called first in the SetUp of the + * test case + * + * @return 0 Success; Non 0 Failure */ int BuildNetWork(); /** - * StopCluster 停止该集群中所有的进程 - * @return 0.成功; -1.失败 + * @brief StopCluster, stops all processes in the cluster + * + * @return 0.Success; -1.Failure */ int StopCluster(); /** - * @brief 生成各模块配置文件 + * @brief Generate configuration files for each module * - * @tparam T 任一ConfigGenerator - * @param configPath 配置文件路径 - * @param options 修改的配置项 + * @tparam T: any ConfigGenerator + * @param configPath: Configuration file path + * @param options: Configuration items modified */ - template - void PrepareConfig(const std::string &configPath, - const std::vector &options) { + template + void PrepareConfig(const std::string& configPath, + const std::vector& options) { T gentor(configPath); gentor.SetConfigOptions(options); gentor.Generate(); } /** - * StartSingleMDS 启动一个mds - * 如果需要不同ip的chunkserver,ipPort请设置为192.168.200.1:XXXX + * @brief StartSingleMDS starts an mds + * If need chunkservers with different IPs, please set the ipPort to + * 192.168.200.1:XXXX * - * @param[in] id mdsId - * @param[in] ipPort 指定mds的ipPort - * @param[in] mdsConf mds启动参数项, 示例: + * @param[in] id: mdsId + * @param[in] ipPort: specifies the ipPort of the mds + * @param[in] mdsConf: mds startup parameter item, example: * const std::vector mdsConf{ {"--graceful_quit_on_sigterm"}, {"--confPath=./test/integration/cluster_common/mds.basic.conf"}, }; - * @param[in] expectLeader 是否预期是leader - * @return 成功则返回pid; 失败则返回-1 + * @param[in] expectLeader: is the expected leader expected + * @return success returns pid; Failure returns -1 */ - int StartSingleMDS(int id, const std::string &ipPort, int dummyPort, - const std::vector &mdsConf, + int StartSingleMDS(int id, const std::string& ipPort, int dummyPort, + const std::vector& mdsConf, bool expectLeader); /** - * StopMDS 停止指定id的mds - * @return 0.成功; -1.失败 + * @brief StopMDS, stops the specified id's mds + * @return 0.Success; -1.Failure */ int StopMDS(int id); /** - * StopAllMDS 停止所有mds - * @return 0.成功; -1.失败 + * @brief StopAllMDS, stops all mds + * @return 0.Success; -1.Failure */ int StopAllMDS(); /** - * @brief 启动一个snapshotcloneserver + * @brief Start a snapshotcloneserver * - * @param id snapshotcloneserver 的Id - * @param ipPort ip端口 - * @param snapshotcloneConf 参数项 - * @return 成功则返回pid; 失败则返回-1 + * @param id: The ID of snapshotclone server + * @param ipPort: IP Port + * @param snapshot: clone Conf parameter item + * @return success returns pid; Failure returns -1 */ - int - StartSnapshotCloneServer(int id, const std::string &ipPort, - const std::vector &snapshotcloneConf); + int StartSnapshotCloneServer( + int id, const std::string& ipPort, + const std::vector& snapshotcloneConf); /** - * @brief 停止指定Id的snapshotcloneserver + * @brief Stop the snapshotcloneserver for the specified Id * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功返回0,失败返回-1 + * @param id: The ID of the snapshotcloneserver + * @param force: Use kill -9 when it is true + * @return returns 0 for success, -1 for failure */ int StopSnapshotCloneServer(int id, bool force = false); /** - * @brief 重启指定Id的snapshotcloneserver + * @brief: Restart the snapshotcloneserver with the specified Id * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功则返回pid; 失败则返回-1 + * @param id: The ID of the snapshotcloneserver + * @param force: Use kill -9 when it is true + * @return success returns pid; Failure returns -1 */ int RestartSnapshotCloneServer(int id, bool force = false); /** - * @brief 停止所有的snapshotcloneserver - * @return 成功返回0,失败返回-1 + * @brief Stop all snapshotcloneserver + * @return returns 0 for success, -1 for failure */ int StopAllSnapshotCloneServer(); /** - * StartSingleEtcd 启动一个etcd节点 + * @brief StartSingleEtcd starts an etcd node * * @param clientIpPort * @param peerIpPort - * @param etcdConf etcd启动项参数, 建议按照模块指定name,防止并发运行时冲突 + * @param etcdConf: etcd startup parameter, it is recommended to specify the + * name according to the module to prevent concurrent runtime conflicts * std::vector{"--name basic_test_start_stop_module1"} - * @return 成功则返回pid; 失败则返回-1 + * @return success returns pid; Failure returns -1 */ - int StartSingleEtcd(int id, const std::string &clientIpPort, - const std::string &peerIpPort, - const std::vector &etcdConf); + int StartSingleEtcd(int id, const std::string& clientIpPort, + const std::string& peerIpPort, + const std::vector& etcdConf); /** - * WaitForEtcdClusterAvalible - * 在一定时间内等待etcd集群leader选举成功,处于可用状态 + * @brief WaitForEtcdClusterAvalible + * Wait for the ETCD cluster leader election to be successful and available + * for a certain period of time */ bool WaitForEtcdClusterAvalible(int waitSec = 20); /** - * StopEtcd 停止指定id的etcd节点 - * @return 0.成功; -1.失败 + * @brief StopEtcd stops the etcd node with the specified id + * @return 0.Success; -1.Failure */ int StopEtcd(int id); /** - * StopAllEtcd 停止所有etcd节点 - * @return 0.成功; -1.失败 + * @brief StopAllEtcd stops all etcd nodes + * @return 0.Success; -1.Failure */ int StopAllEtcd(); /** - * @brief 格式化FilePool + * @brief FormatFilePool, Format FilePool * - * @param filePooldir FilePool目录 - * @param filePoolmetapath FilePool元数据目录 - * @param filesystemPath 文件系统目录 - * @param size FilePool size (GB) - * @return 成功返回0,失败返回-1 + * @param filePooldir: FilePool directory + * @param filePoolmetapath: FilePool metadata directory + * @param filesystemPath: file system directory + * @param size: FilePool size (GB) + * @return returns 0 for success, -1 for failure */ - int FormatFilePool(const std::string &filePooldir, - const std::string &filePoolmetapath, - const std::string &filesystemPath, uint32_t size); + int FormatFilePool(const std::string& filePooldir, + const std::string& filePoolmetapath, + const std::string& filesystemPath, uint32_t size); /** - * StartSingleChunkServer 启动一个chunkserver节点 + * @brief StartSingleChunkServer starts a chunkserver node * * @param[in] id * @param[in] ipPort - * @param[in] chunkserverConf chunkserver启动项,示例: + * @param[in] chunkserverConf chunkserver startup item, example: * const std::vector chunkserverConf1{ {"--graceful_quit_on_sigterm"}, {"-chunkServerStoreUri=local://./basic1/"}, @@ -243,209 +250,219 @@ class CurveCluster { {"-conf=./test/integration/cluster_common/chunkserver.basic.conf"}, {"-raft_sync_segments=true"}, }; - 建议文件名也按模块的缩写来,文件名不能太长,否则注册到数据库会失败 - * @return 成功则返回pid; 失败则返回-1 + It is recommended to also use the abbreviation of the module for the + file name. The file name should not be too long, otherwise registering + to the database will fail + * @return success returns pid; Failure returns -1 */ - int StartSingleChunkServer(int id, const std::string &ipPort, - const std::vector &chunkserverConf); + int StartSingleChunkServer(int id, const std::string& ipPort, + const std::vector& chunkserverConf); /** - * StartSingleChunkServer 在网络命名空间内启动一个指定id的chunkserver - * 无需指定ipPort + * @brief StartSingleChunkServer Starts a chunkserver with the specified id + * in the network namespace No need to specify ipPort * * @param id - * @param chunkserverConf, 同StartSingleChunkServer的示例 - * @return 成功则返回pid; 失败则返回-1 + * @param chunkserverConf: same as the example of StartSingleChunkServer + * @return success returns pid; Failure returns -1 */ int StartSingleChunkServerInBackground( - int id, const std::vector &chunkserverConf); + int id, const std::vector& chunkserverConf); /** - * StopChunkServer 停掉指定id的chunkserver进程 - * @return 0.成功; -1.失败 + * @brief StopChunkServer stops the chunkserver process with the specified + * id + * @return 0.Success; -1.Failure */ int StopChunkServer(int id); /** - * StopAllChunkServer 停止所有chunkserver - * @return 0.成功; -1.失败 + * @brief StopAllChunkServer Stop all chunkserver + * @return 0.Success; -1.Failure */ int StopAllChunkServer(); /** - * PreparePhysicalPool 创建物理池 + * @brief PreparePhysicalPool Create Physical Pool * - * @param[in] id 给指定id的mds发送命令 - * @param[in] clusterMap 拓扑信息,示例: - * ./test/integration/cluster_common/cluster_common_topo_1.txt (不同ip) + * @param[in] id: Send command to the specified mds with id + * @param[in] clusterMap: topology information, example: + * ./test/integration/cluster_common/cluster_common_topo_1.txt (different + * IPs) * ./test/integration/cluster_common/cluster_common_topo_2.txt - * (相同ip, 一定要加上port加以区分, - * chunkserver也必须和clusterMap中server的ipPort相同) - * @return 0.成功; -1.失败 + * (The same IP address must be distinguished by adding a port, + * The chunkserver must also be the same as the ipPort of the server in + * the clusterMap) + * @return 0.Success; -1.Failure */ - int PreparePhysicalPool(int mdsId, const std::string &clusterMap); + int PreparePhysicalPool(int mdsId, const std::string& clusterMap); /** - * @return 0.成功; -1.失败 + * @return 0.Success; -1.Failure */ - int PrepareLogicalPool(int mdsId, const std::string &clusterMap); + int PrepareLogicalPool(int mdsId, const std::string& clusterMap); /** - * MDSIpPort 获取指定id的mds地址 + * MDSIpPort retrieves the mds address of the specified id */ std::string MDSIpPort(int id); /** - * EtcdClientIpPort 获取指定id的etcd client地址 + * EtcdClientIpPort retrieves the etcd client address for the specified id */ std::string EtcdClientIpPort(int id); /** - * EtcdPeersIpPort 获取指定id的etcd peers地址 + * EtcdPeersIpPort retrieves the etcd Peers address of the specified id */ std::string EtcdPeersIpPort(int id); /** - * ChunkServerIpPort 获取指定id的chunkserver地址 + * ChunkServerIpPort retrieves the chunkserver address for the specified id */ std::string ChunkServerIpPort(int id); /** - * HangMDS hang住指定mds进程 - * @return 0.成功; -1.失败 + * @brief HangMDS hang resides in the specified mds process + * @return 0.Success; -1.Failure */ int HangMDS(int id); /** - * RecoverHangMDS 恢复hang住的mds进程 - * @return 0.成功; -1.失败 + * @brief RecoverHangMDS restores the mds process where hang resides + * @return 0.Success; -1.Failure */ int RecoverHangMDS(int id); /** - * HangEtcd hang住指定etcd进程 - * @return 0.成功; -1.失败 + * @brief HangEtcd hang lives in the specified etcd process + * @return 0.Success; -1.Failure */ int HangEtcd(int id); /** - * RecoverHangEtcd 恢复hang住的mds进程 - * @return 0.成功; -1.失败 + * @brief RecoverHangEtcd recovers the mds process where hang resides + * @return 0.Success; -1.Failure */ int RecoverHangEtcd(int id); /** - * HangChunkServer hang住指定chunkserver进程 - * @return 0.成功; -1.失败 + * @brief HangChunkServer hang resides in the specified chunkserver process + * @return 0.Success; -1.Failure */ int HangChunkServer(int id); /** - * RecoverHangChunkServer 恢复hang住的chunkserver进程 - * @return 0.成功; -1.失败 + * @brief RecoverHangChunkServer Restores the chunkserver process where hang + * resides + * @return 0.Success; -1.Failure */ int RecoverHangChunkServer(int id); /** - * CurrentServiceMDS 获取当前正在提供服务的mds + * CurrentServiceMDS obtains the mds that are currently providing services * - * @param[out] curId 当前正在服务的mds编号 + * @param[out] curId: the currently serving the mds number * - * @return true表示有正在服务的mds, false表示没有正在服务的mds + * @return true indicates that there are serving mds, while false indicates + * that there are no serving mds */ - bool CurrentServiceMDS(int *curId); + bool CurrentServiceMDS(int* curId); /** - * CreateFile 在curve中创建文件 + * @brief CreateFile creates a file in Curve. * - * @param[in] user 用户 - * @param[in] pwd 密码 - * @param[in] fileName 文件名 - * @param[in] fileSize 文件大小 - * @param[in] normalFile 是否为normal file - * @return 0.成功; -1.失败 - */ - int CreateFile(const std::string &user, const std::string &pwd, - const std::string &fileName, uint64_t fileSize = 0, + * @param[in] user: User + * @param[in] pwd: Password + * @param[in] fileName: File name + * @param[in] fileSize: File size + * @param[in] normalFile: Whether it is a normal file + * @return 0. Success; -1. Failure + */ + int CreateFile(const std::string& user, const std::string& pwd, + const std::string& fileName, uint64_t fileSize = 0, bool normalFile = true, const std::string& poolset = ""); private: /** - * ProbePort 探测指定ipPort是否处于监听状态 + * @brief ProbePort checks if the specified ipPort is in a listening state. * - * @param[in] ipPort 指定的ipPort值 - * @param[in] timeoutMs 探测的超时时间,单位是ms - * @param[in] expectOpen 是否希望是监听状态 + * @param[in] ipPort: The specified ipPort value. + * @param[in] timeoutMs: The timeout for probing in milliseconds. + * @param[in] expectOpen: Whether it is expected to be in a listening state. * - * @return 0表示指定时间内的探测符合预期. -1表示指定时间内的探测不符合预期 + * @return 0 indicates that the probing meets the expected condition within + * the specified time. -1 indicates that the probing does not meet the + * expected condition within the specified time. */ - int ProbePort(const std::string &ipPort, int64_t timeoutMs, + int ProbePort(const std::string& ipPort, int64_t timeoutMs, bool expectOpen); /** - * ChunkServerIpPortInBackground - * 在需要不同ip的chunkserver的情况下,用于生成chunkserver ipPort + * @brief ChunkServerIpPortInBackground + * Used to generate chunkserver ipPort when chunkservers with different + * IPs are required */ std::string ChunkServerIpPortInBackground(int id); /** - * HangProcess hang住一个进程 + * @brief HangProcess hang a process * - * @param pid 进程id - * @return 0.成功; -1.失败 + * @param pid: process id + * @return 0.Success; -1.Failure */ int HangProcess(pid_t pid); /** - * RecoverHangProcess 恢复hang住的进程 + * @brief RecoverHangProcess recovers a hung process. * - * @param pid 进程id - * @return 0.成功; -1.失败 + * @param pid: Process ID + * @return 0. Success; -1. Failure */ int RecoverHangProcess(pid_t pid); private: - // 网络号 + // Network number std::string networkSegment_; - // 网络命名空间前缀 + // Network namespace prefix std::string nsPrefix_; - // mds的id对应的进程号 + // The process number corresponding to the ID of the mds std::map mdsPidMap_; - // mds的id对应的ipport + // The ipport corresponding to the ID of the mds std::map mdsIpPort_; - // snapshotcloneserver id对应的pid + // The pid corresponding to the snapshotcloneserver id std::map snapPidMap_; - // snapshotcloneserver id对应的ipPort + // The ipPort corresponding to the snapshotcloneserver ID std::map snapIpPort_; - // snapshotcloneserver id对应的conf + // Conf corresponding to snapshotcloneserver id std::map> snapConf_; - // etcd的id对应的进程号 + // The process number corresponding to the id of ETCD std::map etcdPidMap_; - // etcd的id对应的client ipport + // The client ipport corresponding to the id of ETCD std::map etcdClientIpPort_; - // etcd的id对应的peer ipport + // Peer ipport corresponding to the id of ETCD std::map etcdPeersIpPort_; - // chunkserver的id对应的进程号 + // The process number corresponding to the id of chunkserver std::map chunkserverPidMap_; - // chunkserver的id对应的ipport + // The IP port corresponding to the ID of the chunkserver std::map chunkserverIpPort_; // mdsClient std::shared_ptr mdsClient_; public: - // SnapshotCloneMetaStore用于测试过程中灌数据 + // SnapshotCloneMetaStore for filling data during testing std::shared_ptr metaStore_; }; } // namespace curve diff --git a/test/integration/common/chunkservice_op.h b/test/integration/common/chunkservice_op.h index 28f32c6891..58322ea380 100644 --- a/test/integration/common/chunkservice_op.h +++ b/test/integration/common/chunkservice_op.h @@ -24,9 +24,11 @@ #define TEST_INTEGRATION_COMMON_CHUNKSERVICE_OP_H_ #include -#include -#include + #include +#include +#include + #include "include/chunkserver/chunkserver_common.h" #include "proto/common.pb.h" @@ -40,7 +42,7 @@ using std::string; #define NULL_SN -1 struct ChunkServiceOpConf { - Peer *leaderPeer; + Peer* leaderPeer; LogicPoolID logicPoolId; CopysetID copysetId; uint32_t rpcTimeout; @@ -49,221 +51,247 @@ struct ChunkServiceOpConf { class ChunkServiceOp { public: /** - * @brief 通过chunkService写chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Write a chunk through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn: chunk version * @param offset * @param len - * @param data 待写数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data: The data to be written + * @param cloneFileSource: The file path of the clone source + * @param cloneFileOffset: Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int WriteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int WriteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - const char *data, + const char* data, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn: Chunk version * @param offset * @param len - * @param data 读取内容 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data: The reading content + * @param cloneFileSource: The file path of the clone source + * @param cloneFileOffset: Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int ReadChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, - SequenceNum sn, off_t offset, size_t len, - string *data, + static int ReadChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, + SequenceNum sn, off_t offset, size_t len, string* data, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk快照 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk snapshot through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn: chunk version * @param offset * @param len - * @param data 读取内容 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data: The reading content + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, + static int ReadChunkSnapshot(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, std::string *data); + size_t len, std::string* data); /** - * @brief 通过chunkService删除chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Delete chunk through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param sn: chunk version + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int DeleteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int DeleteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn); /** - * @brief 通过chunkService删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Delete the snapshot generated during this dump or historical + * legacy through chunkService If no snapshot is generated during + * the dump process, modify the correctedSn of the chunk + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId * @param correctedSn - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int DeleteChunkSnapshotOrCorrectSn(struct ChunkServiceOpConf *opConf, + static int DeleteChunkSnapshotOrCorrectSn(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 通过chunkService创建clone chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Create a clone chunk through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param location 源chunk在源端的位置,可能在curve或S3上 + * @param location: The location of the source chunk on the source side, + * possibly on curve or S3 * @param correctedSn * @param sn * @param chunkSize - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int CreateCloneChunk(struct ChunkServiceOpConf *opConf, - ChunkID chunkId, const std::string &location, + static int CreateCloneChunk(struct ChunkServiceOpConf* opConf, + ChunkID chunkId, const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize); /** - * @brief 通过chunkService恢复chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Restore Chunk through ChunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int RecoverChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int RecoverChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, off_t offset, size_t len); /** - * @brief 通过chunkService获取chunk元数据 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Obtain chunk metadata through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param curSn 返回当前chunk版本 - * @param snapSn 返回快照chunk版本 - * @param redirectedLeader 返回重定向主节点 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param curSn: returns the current chunk version + * @param snapSn: returns the snapshot chunk version + * @param redirectedLeader returns the redirected master node + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int GetChunkInfo(struct ChunkServiceOpConf *opConf, ChunkID chunkId, - SequenceNum *curSn, SequenceNum *snapSn, - string *redirectedLeader); + static int GetChunkInfo(struct ChunkServiceOpConf* opConf, ChunkID chunkId, + SequenceNum* curSn, SequenceNum* snapSn, + string* redirectedLeader); }; class ChunkServiceVerify { public: - explicit ChunkServiceVerify(struct ChunkServiceOpConf *opConf) + explicit ChunkServiceVerify(struct ChunkServiceOpConf* opConf) : opConf_(opConf) {} /** - * @brief 执行写chunk, 并将数据写入到chunkdata对应区域,以便于后续验证数据。 + * @brief Executes the write chunk and writes the data to the corresponding + * area of chunkdata for subsequent data validation. * @param chunkId - * @param sn chunk版本 + * @param sn: chunk version * @param offset * @param len - * @param data 待写数据 - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 返回写操作的错误码 + * @param data: the data to be written + * @param chunkData: Expected data for the entire chunk + * @param cloneFileSource: The file path of the clone source + * @param cloneFileOffset: Relative offset of clone chunk in clone source + * @return returns the error code for the write operation */ int VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, const char *data, string *chunkData, + size_t len, const char* data, string* chunkData, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 执行读chunk, 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief Executes the read chunk and verifies whether the read content + * matches the expected data in the corresponding region of the + * chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn: chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData: Expected data for the entire chunk + * @param cloneFileSource: The file path of the clone source + * @param cloneFileOffset: Relative offset of clone chunk in clone source + * @return The read request result meets the expected return of 0, + * otherwise it returns -1 */ int VerifyReadChunk(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, string *chunkData, + size_t len, string* chunkData, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 执行读chunk快照, - * 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief Execute read chunk snapshot, + * And verify whether the read content matches the expected data in + * the corresponding area of chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn: chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData: Expected data for the entire chunk + * @return The read request result meets the expected return of 0, + * otherwise it returns -1 */ int VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, string *chunkData); + size_t len, string* chunkData); /** - * @brief 删除chunk + * @brief delete chunk * @param chunkId - * @param sn chunk版本 - * @return 返回删除操作的错误码 + * @param sn: chunk version + * @return returns the error code for the delete operation */ int VerifyDeleteChunk(ChunkID chunkId, SequenceNum sn); /** - * @brief 删除chunk的快照 + * @brief Delete the snapshot of the chunk * @param chunkId * @param correctedSn - * @return 返回删除操作的错误码 + * @return returns the error code for the delete operation */ int VerifyDeleteChunkSnapshotOrCorrectSn(ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 创建clone chunk + * @brief Create clone chunk * @param chunkId - * @param location 源地址 + * @param location: source address * @param correctedSn * @param sn * @param chunkSize - * @return 返回创建操作的错误码 + * @return returns the error code for the creation operation */ - int VerifyCreateCloneChunk(ChunkID chunkId, const std::string &location, + int VerifyCreateCloneChunk(ChunkID chunkId, const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize); /** - * @brief 恢复chunk + * @brief Restore chunk * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ int VerifyRecoverChunk(ChunkID chunkId, off_t offset, size_t len); /** - * @brief 获取chunk元数据,并检验结果是否符合预期 + * @brief To obtain chunk metadata and verify if the results meet + * expectations * @param chunkId - * @param expCurSn 预期chunk版本,-1表示不存在 - * @param expSanpSn 预期快照版本,-1表示不存在 - * @param expLeader 预期redirectedLeader - * @return 验证成功返回0,否则返回-1 + * @param expCurSn: Expected chunk version, -1 indicates non-existent + * @param expSanpSn: Expected snapshot version, -1 indicates non-existent + * @param expLeader: Expected redirectedLeader + * @return returns 0 after successful verification, otherwise returns -1 */ int VerifyGetChunkInfo(ChunkID chunkId, SequenceNum expCurSn, SequenceNum expSnapSn, string expLeader); private: - struct ChunkServiceOpConf *opConf_; - // 记录写过的chunkId(预期存在),用于判断请求的返回值是否符合预期 + struct ChunkServiceOpConf* opConf_; + // Record the chunkId (expected existence) that has been written, used to + // determine whether the return value of the request meets expectations std::set existChunks_; }; diff --git a/test/integration/heartbeat/common.cpp b/test/integration/heartbeat/common.cpp index 5d09293287..ae597506bc 100644 --- a/test/integration/heartbeat/common.cpp +++ b/test/integration/heartbeat/common.cpp @@ -21,44 +21,44 @@ */ #include "test/integration/heartbeat/common.h" + #include "test/mds/mock/mock_alloc_statistic.h" namespace curve { namespace mds { -void HeartbeatIntegrationCommon::PrepareAddPoolset( - const Poolset &poolset) { +void HeartbeatIntegrationCommon::PrepareAddPoolset(const Poolset& poolset) { int ret = topology_->AddPoolset(poolset); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret); } void HeartbeatIntegrationCommon::PrepareAddLogicalPool( - const LogicalPool &lpool) { + const LogicalPool& lpool) { int ret = topology_->AddLogicalPool(lpool); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; } void HeartbeatIntegrationCommon::PrepareAddPhysicalPool( - const PhysicalPool &ppool) { + const PhysicalPool& ppool) { int ret = topology_->AddPhysicalPool(ppool); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret); } -void HeartbeatIntegrationCommon::PrepareAddZone(const Zone &zone) { +void HeartbeatIntegrationCommon::PrepareAddZone(const Zone& zone) { int ret = topology_->AddZone(zone); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } -void HeartbeatIntegrationCommon::PrepareAddServer(const Server &server) { +void HeartbeatIntegrationCommon::PrepareAddServer(const Server& server) { int ret = topology_->AddServer(server); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } void HeartbeatIntegrationCommon::PrepareAddChunkServer( - const ChunkServer &chunkserver) { + const ChunkServer& chunkserver) { ChunkServer cs(chunkserver); cs.SetOnlineState(OnlineState::ONLINE); int ret = topology_->AddChunkServer(cs); @@ -68,7 +68,7 @@ void HeartbeatIntegrationCommon::PrepareAddChunkServer( void HeartbeatIntegrationCommon::PrepareAddCopySet( CopySetIdType copysetId, PoolIdType logicalPoolId, - const std::set &members) { + const std::set& members) { CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); int ret = topology_->AddCopySet(cs); @@ -78,10 +78,10 @@ void HeartbeatIntegrationCommon::PrepareAddCopySet( void HeartbeatIntegrationCommon::UpdateCopysetTopo( CopySetIdType copysetId, PoolIdType logicalPoolId, uint64_t epoch, - ChunkServerIdType leader, const std::set &members, + ChunkServerIdType leader, const std::set& members, ChunkServerIdType candidate) { ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE(topology_->GetCopySet(CopySetKey{ logicalPoolId, copysetId }, + ASSERT_TRUE(topology_->GetCopySet(CopySetKey{logicalPoolId, copysetId}, ©setInfo)); copysetInfo.SetEpoch(epoch); copysetInfo.SetLeader(leader); @@ -93,8 +93,8 @@ void HeartbeatIntegrationCommon::UpdateCopysetTopo( } void HeartbeatIntegrationCommon::SendHeartbeat( - const ChunkServerHeartbeatRequest &request, bool expectFailed, - ChunkServerHeartbeatResponse *response) { + const ChunkServerHeartbeatRequest& request, bool expectFailed, + ChunkServerHeartbeatResponse* response) { // init brpc client brpc::Channel channel; ASSERT_EQ(0, channel.Init(listenAddr_.c_str(), NULL)); @@ -109,7 +109,7 @@ void HeartbeatIntegrationCommon::SendHeartbeat( } void HeartbeatIntegrationCommon::BuildBasicChunkServerRequest( - ChunkServerIdType id, ChunkServerHeartbeatRequest *req) { + ChunkServerIdType id, ChunkServerHeartbeatRequest* req) { ChunkServer out; EXPECT_TRUE(topology_->GetChunkServer(id, &out)) << "get chunkserver: " << id << " fail"; @@ -139,7 +139,7 @@ void HeartbeatIntegrationCommon::BuildBasicChunkServerRequest( } void HeartbeatIntegrationCommon::AddCopySetToRequest( - ChunkServerHeartbeatRequest *req, const CopySetInfo &csInfo, + ChunkServerHeartbeatRequest* req, const CopySetInfo& csInfo, ConfigChangeType type) { auto info = req->add_copysetinfos(); info->set_logicalpoolid(csInfo.GetLogicalPoolId()); @@ -170,7 +170,7 @@ void HeartbeatIntegrationCommon::AddCopySetToRequest( << "get chunkserver: " << csInfo.GetCandidate() << " error"; std::string ipport = out.GetHostIp() + ":" + std::to_string(out.GetPort()) + ":0"; - ConfigChangeInfo *confChxInfo = new ConfigChangeInfo(); + ConfigChangeInfo* confChxInfo = new ConfigChangeInfo(); auto replica = new ::curve::common::Peer(); replica->set_address(ipport.c_str()); confChxInfo->set_allocated_peer(replica); @@ -180,13 +180,13 @@ void HeartbeatIntegrationCommon::AddCopySetToRequest( } } -void HeartbeatIntegrationCommon::AddOperatorToOpController(const Operator &op) { +void HeartbeatIntegrationCommon::AddOperatorToOpController(const Operator& op) { auto opController = coordinator_->GetOpController(); ASSERT_TRUE(opController->AddOperator(op)); } void HeartbeatIntegrationCommon::RemoveOperatorFromOpController( - const CopySetKey &id) { + const CopySetKey& id) { auto opController = coordinator_->GetOpController(); opController->RemoveOperator(id); } @@ -243,11 +243,11 @@ void HeartbeatIntegrationCommon::PrepareBasicCluseter() { PrepareAddChunkServer(cs3); // add copyset - PrepareAddCopySet(1, 1, std::set{ 1, 2, 3 }); + PrepareAddCopySet(1, 1, std::set{1, 2, 3}); } void HeartbeatIntegrationCommon::InitHeartbeatOption( - Configuration *conf, HeartbeatOption *heartbeatOption) { + Configuration* conf, HeartbeatOption* heartbeatOption) { heartbeatOption->heartbeatIntervalMs = conf->GetIntValue("mds.heartbeat.intervalMs"); heartbeatOption->heartbeatMissTimeOutMs = @@ -259,7 +259,7 @@ void HeartbeatIntegrationCommon::InitHeartbeatOption( } void HeartbeatIntegrationCommon::InitSchedulerOption( - Configuration *conf, ScheduleOption *scheduleOption) { + Configuration* conf, ScheduleOption* scheduleOption) { scheduleOption->enableCopysetScheduler = conf->GetBoolValue("mds.enable.copyset.scheduler"); scheduleOption->enableLeaderScheduler = @@ -305,22 +305,20 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { auto idGen = std::make_shared(); auto tokenGen = std::make_shared(); - auto topologyStorage = - std::make_shared(); + auto topologyStorage = std::make_shared(); topology_ = std::make_shared(idGen, tokenGen, topologyStorage); ASSERT_EQ(kTopoErrCodeSuccess, topology_->Init(topologyOption)); // init topology manager - topologyStat_ = - std::make_shared(topology_); + topologyStat_ = std::make_shared(topology_); topologyStat_->Init(); auto copysetManager = std::make_shared(CopysetOption()); auto allocStat = std::make_shared(); auto topologyServiceManager = std::make_shared( topology_, topologyStat_, nullptr, copysetManager, nullptr); - // 初始化basic集群 + // Initialize basic cluster PrepareBasicCluseter(); // init coordinator @@ -341,7 +339,7 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { heartbeatManager_->Init(); heartbeatManager_->Run(); - // 启动心跳rpc + // Start heartbeat rpc listenAddr_ = conf_.GetStringValue("mds.listen.addr"); heartbeatService_ = std::make_shared(heartbeatManager_); diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp index d4ccb66c65..8ed3364576 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp @@ -20,20 +20,20 @@ * Author: xuchaojie */ -#include -#include #include +#include +#include #include -#include "src/common/uuid.h" -#include "src/common/location_operator.h" -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" +#include "src/common/location_operator.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/common/uuid.h" +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/integration/cluster_common/cluster.h" +#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" using curve::CurveCluster; using curve::client::FileClient; @@ -49,27 +49,27 @@ const uint64_t chunkSize = 16ULL * 1024 * 1024; const uint64_t segmentSize = 32ULL * 1024 * 1024; const uint64_t chunkSplitSize = 8388608; -// 测试文件只写2个segment +// Write only 2 segments in the test file const uint64_t testFile1AllocSegmentNum = 2; -// 一些常数定义 -const char *cloneTempDir_ = "/clone"; -const char *mdsRootUser_ = "root"; -const char *mdsRootPassword_ = "root_password"; +// Some constant definitions +const char* cloneTempDir_ = "/clone"; +const char* mdsRootUser_ = "root"; +const char* mdsRootPassword_ = "root_password"; constexpr uint32_t kProgressTransferSnapshotDataStart = 10; -const char *kEtcdClientIpPort = "127.0.0.1:10021"; -const char *kEtcdPeerIpPort = "127.0.0.1:10022"; -const char *kMdsIpPort = "127.0.0.1:10023"; -const char *kChunkServerIpPort1 = "127.0.0.1:10024"; -const char *kChunkServerIpPort2 = "127.0.0.1:10025"; -const char *kChunkServerIpPort3 = "127.0.0.1:10026"; -const char *kSnapshotCloneServerIpPort = "127.0.0.1:10027"; -const char *kSnapshotCloneServerDummyServerPort = "12002"; -const char *kLeaderCampaginPrefix = "snapshotcloneserverleaderlock1"; +const char* kEtcdClientIpPort = "127.0.0.1:10021"; +const char* kEtcdPeerIpPort = "127.0.0.1:10022"; +const char* kMdsIpPort = "127.0.0.1:10023"; +const char* kChunkServerIpPort1 = "127.0.0.1:10024"; +const char* kChunkServerIpPort2 = "127.0.0.1:10025"; +const char* kChunkServerIpPort3 = "127.0.0.1:10026"; +const char* kSnapshotCloneServerIpPort = "127.0.0.1:10027"; +const char* kSnapshotCloneServerDummyServerPort = "12002"; +const char* kLeaderCampaginPrefix = "snapshotcloneserverleaderlock1"; -static const char *kDefaultPoolset = "default"; +static const char* kDefaultPoolset = "default"; const int kMdsDummyPort = 10028; @@ -79,27 +79,26 @@ const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kCSConfigPath = // NOLINT +const std::string kCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_chunkserver.conf"; -const std::string kCsClientConfigPath = // NOLINT +const std::string kCsClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_cs_client.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT - "./test/integration/snapshotcloneserver/config/" + kTestPrefix + - "_s3.conf"; +const std::string kS3ConfigPath = // NOLINT + "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; -const std::string kClientConfigPath = // NOLINT +const std::string kClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_client.conf"; @@ -120,11 +119,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector chunkserverConfigOptions{ @@ -151,66 +150,63 @@ const std::vector snapClientConfigOptions{ const std::vector s3ConfigOptions{}; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "1/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "1/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "1/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "1/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "1/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "1/walfilepool.meta"}, }; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "2/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "2/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "2/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "2/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "2/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "2/walfilepool.meta"}, }; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "3/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "3/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "3/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "3/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "3/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "3/walfilepool.meta"}, }; const std::vector snapshotcloneserverConfigOptions{ @@ -237,7 +233,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector clientConfigOptions{ @@ -246,8 +242,8 @@ const std::vector clientConfigOptions{ std::string("mds.rpcTimeoutMS=4000"), }; -const char *testFile1_ = "/RcvItUser1/file1"; -const char *testUser1_ = "RcvItUser1"; +const char* testFile1_ = "/RcvItUser1/file1"; +const char* testUser1_ = "RcvItUser1"; int testFd1_ = 0; namespace curve { @@ -262,16 +258,16 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{ "--name=" + kTestPrefix }); + std::vector{"--name=" + kTestPrefix}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << "::" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -281,13 +277,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -296,21 +292,18 @@ class SnapshotCloneServerTest : public ::testing::Test { // format chunkfilepool and walfilepool std::vector threadpool(3); - threadpool[0] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 2); - threadpool[1] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 2); - threadpool[2] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 2); + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 2); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 2); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 2); for (int i = 0; i < 3; i++) { threadpool[i].join(); } @@ -324,7 +317,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -343,7 +336,8 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -381,9 +375,9 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(INFO) << "Write testFile1_ success."; } - static bool CreateAndWriteFile(const std::string &fileName, - const std::string &user, - const std::string &dataSample, int *fdOut) { + static bool CreateAndWriteFile(const std::string& fileName, + const std::string& user, + const std::string& dataSample, int* fdOut) { UserInfo_t userinfo; userinfo.owner = user; int ret = fileClient_->Create(fileName, userinfo, testFile1Length); @@ -394,8 +388,8 @@ class SnapshotCloneServerTest : public ::testing::Test { return WriteFile(fileName, user, dataSample, fdOut); } - static bool WriteFile(const std::string &fileName, const std::string &user, - const std::string &dataSample, int *fdOut) { + static bool WriteFile(const std::string& fileName, const std::string& user, + const std::string& dataSample, int* fdOut) { int ret = 0; UserInfo_t userinfo; userinfo.owner = user; @@ -404,7 +398,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << *fdOut; return false; } - // 2个segment,每个写第一个chunk + // 2 segments, each with the first chunk written for (uint64_t i = 0; i < testFile1AllocSegmentNum; i++) { ret = fileClient_->Write(*fdOut, dataSample.c_str(), i * segmentSize, dataSample.size()); @@ -421,14 +415,14 @@ class SnapshotCloneServerTest : public ::testing::Test { return true; } - static bool CheckFileData(const std::string &fileName, - const std::string &user, - const std::string &dataSample) { + static bool CheckFileData(const std::string& fileName, + const std::string& user, + const std::string& dataSample) { UserInfo_t userinfo; userinfo.owner = user; int ret = 0; - // 检查文件状态 + // Check file status FInfo fileInfo; ret = snapClient_->GetFileInfo(fileName, userinfo, &fileInfo); if (ret < 0) { @@ -490,7 +484,7 @@ class SnapshotCloneServerTest : public ::testing::Test { void TearDown() {} - void PrepareSnapshotForTestFile1(std::string *uuid1) { + void PrepareSnapshotForTestFile1(std::string* uuid1) { if (!hasSnapshotForTestFile1_) { int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", uuid1); ASSERT_EQ(0, ret); @@ -509,23 +503,23 @@ class SnapshotCloneServerTest : public ::testing::Test { } } - int PrepareCreateCloneFile(const std::string &fileName, FInfo *fInfoOut, + int PrepareCreateCloneFile(const std::string& fileName, FInfo* fInfoOut, bool IsRecover = false) { uint64_t seqNum = 1; if (IsRecover) { - seqNum = 2; // 恢复新文件使用版本号+1 + seqNum = 2; // Restore new files using version number+1 } else { - seqNum = 1; // 克隆新文件使用初始版本号1 + seqNum = 1; // Clone new files using initial version number 1 } int ret = snapClient_->CreateCloneFile( - testFile1_, fileName, - UserInfo_t(mdsRootUser_, mdsRootPassword_), testFile1Length, - seqNum, chunkSize, 0, 0, kDefaultPoolset, fInfoOut); + testFile1_, fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_), + testFile1Length, seqNum, chunkSize, 0, 0, kDefaultPoolset, + fInfoOut); return ret; } - int PrepareCreateCloneMeta(FInfo *fInfoOut, const std::string &newFileName, - std::vector *segInfoOutVec) { + int PrepareCreateCloneMeta(FInfo* fInfoOut, const std::string& newFileName, + std::vector* segInfoOutVec) { fInfoOut->fullPathName = newFileName; fInfoOut->userinfo = UserInfo_t(mdsRootUser_, mdsRootPassword_); for (int i = 0; i < testFile1AllocSegmentNum; i++) { @@ -540,7 +534,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCreateCloneChunk(const std::vector &segInfoVec, + int PrepareCreateCloneChunk(const std::vector& segInfoVec, bool IsRecover = false) { if (segInfoVec.size() != testFile1AllocSegmentNum) { LOG(ERROR) << "internal error!"; @@ -555,13 +549,14 @@ class SnapshotCloneServerTest : public ::testing::Test { name.chunkIndex_ = i * segmentSize / chunkSize; std::string location = LocationOperator::GenerateS3Location(name.ToDataChunkKey()); - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Create第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Create全部的chunk。 + // Due to the fact that each segment in the test file only + // writes the first chunk, Snapshots can only dump the currently + // written chunks, So cloning each segment from the snapshot + // only creates the first chunk. And when cloning from a file, + // because mds doesn't know if chunk has been written or not, So + // we need to create all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); LOG(INFO) << "CreateCloneChunk, location = " << location @@ -571,8 +566,10 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", seqNum = " << 1 << ", csn = " << 2; int ret = snapClient_->CreateCloneChunk( location, cidInfo, - 1, // 恢复使用快照中chunk的版本号 - 2, // 恢复使用新文件的版本号, 即原文件版本号+1 + 1, // Restore the version number of the chunk in the + // snapshot + 2, // Restore the version number of the new file, which is + // the original file version number+1 chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; @@ -585,7 +582,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LocationOperator::GenerateCurveLocation( testFile1_, i * segmentSize + j * chunkSize); ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[j]; - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); LOG(INFO) << "CreateCloneChunk, location = " << location @@ -593,11 +590,11 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", copysetId = " << cidInfo.cpid_ << ", chunkId = " << cidInfo.cid_ << ", seqNum = " << 1 << ", csn = " << 0; - int ret = - snapClient_->CreateCloneChunk(location, cidInfo, - 1, // 克隆使用初始版本号1 - 0, // 克隆使用0 - chunkSize, cb); + int ret = snapClient_->CreateCloneChunk( + location, cidInfo, + 1, // Clone using initial version number 1 + 0, // Clone using 0 + chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; } @@ -614,14 +611,14 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCompleteCloneMeta(const std::string &uuid) { + int PrepareCompleteCloneMeta(const std::string& uuid) { std::string fileName = std::string(cloneTempDir_) + "/" + uuid; int ret = snapClient_->CompleteCloneMeta( fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_)); return ret; } - int PrepareRecoverChunk(const std::vector &segInfoVec, + int PrepareRecoverChunk(const std::vector& segInfoVec, bool IsSnapshot = false) { if (segInfoVec.size() != testFile1AllocSegmentNum) { LOG(ERROR) << "internal error!"; @@ -630,14 +627,15 @@ class SnapshotCloneServerTest : public ::testing::Test { auto tracker = std::make_shared(); if (IsSnapshot) { for (int i = 0; i < testFile1AllocSegmentNum; i++) { - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Recover第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Recover全部的chunk。 + // Due to the fact that each segment in the test file only + // writes the first chunk, Snapshots can only dump the currently + // written chunks, So clone each segment from the snapshot and + // only recover the first chunk. And when cloning from a file, + // because mds doesn't know if chunk has been written or not, So + // we need to recover all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; for (uint64_t k = 0; k < chunkSize / chunkSplitSize; k++) { - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); uint64_t offset = k * chunkSplitSize; @@ -658,7 +656,7 @@ class SnapshotCloneServerTest : public ::testing::Test { for (uint64_t j = 0; j < segmentSize / chunkSize; j++) { ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[j]; for (uint64_t k = 0; k < chunkSize / chunkSplitSize; k++) { - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); uint64_t offset = k * chunkSplitSize; @@ -686,44 +684,42 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCompleteCloneFile(const std::string &fileName) { + int PrepareCompleteCloneFile(const std::string& fileName) { return snapClient_->CompleteCloneFile( fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_)); } - int PrepareChangeOwner(const std::string &fileName) { + int PrepareChangeOwner(const std::string& fileName) { return fileClient_->ChangeOwner( fileName, testUser1_, UserInfo_t(mdsRootUser_, mdsRootPassword_)); } int PrepareRenameCloneFile(uint64_t originId, uint64_t destinationId, - const std::string &fileName, - const std::string &newFileName) { + const std::string& fileName, + const std::string& newFileName) { return snapClient_->RenameCloneFile( UserInfo_t(mdsRootUser_, mdsRootPassword_), originId, destinationId, fileName, newFileName); } - static CurveCluster *cluster_; - static FileClient *fileClient_; - static SnapshotClient *snapClient_; + static CurveCluster* cluster_; + static FileClient* fileClient_; + static SnapshotClient* snapClient_; bool hasSnapshotForTestFile1_ = false; std::string snapIdForTestFile1_; }; -CurveCluster *SnapshotCloneServerTest::cluster_ = nullptr; -FileClient *SnapshotCloneServerTest::fileClient_ = nullptr; -SnapshotClient *SnapshotCloneServerTest::snapClient_ = nullptr; +CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; +FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; +SnapshotClient* SnapshotCloneServerTest::snapClient_ = nullptr; -// 未在curve中创建快照阶段,重启恢复 +// Failed to create snapshot phase in curve, restart recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { std::string uuid1 = UUIDGenerator().GenerateUUID(); - SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, - "snapxxx", 0, chunkSize, - segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -740,19 +736,18 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,但成功结果未返回,重启恢复 +// A snapshot has been created in the curve, but the successful result has not +// been returned. Restart for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsNotReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); std::string uuid1 = UUIDGenerator().GenerateUUID(); - SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, - "snapxxx", 0, chunkSize, - segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -769,18 +764,18 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,结果已返回,重启恢复 +// A snapshot has been created in the curve, and the results have been returned. +// Restart to recover TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); std::string uuid1 = UUIDGenerator().GenerateUUID(); SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", seq, - chunkSize, segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -797,7 +792,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照阶段,nos上传部分快照,重启恢复 +// The snapshot phase has been created in the curve. NOS uploads some snapshots +// and restarts for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { std::string uuid1; int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); @@ -812,7 +808,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { } if (info1.GetSnapshotInfo().GetStatus() == Status::pending) { if (info1.GetSnapProgress() > kProgressTransferSnapshotDataStart) { - // 当进度到达转储的百分比时重启 + // Restart when the progress reaches the percentage of the dump pid_t pid = cluster_->RestartSnapshotCloneServer(1, true); LOG(INFO) << "SnapshotCloneServer 1 restarted, pid = " << pid; ASSERT_GT(pid, 0); @@ -836,16 +832,14 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上未创建文件 +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, 0, 0, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, + false, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -860,7 +854,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but +// not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasCreateCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -870,12 +865,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneHasCreateCloneFileSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, 0, 0, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, + false, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -890,7 +883,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -898,12 +891,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneMeta"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -918,7 +909,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds +// but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -932,12 +924,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCreateCloneMetaSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -952,7 +942,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -964,12 +955,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneChunk"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneChunk, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -984,7 +973,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneChunkSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1000,12 +990,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCreateCloneChunkSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneChunk, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1020,7 +1008,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1034,12 +1022,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneMeta"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1054,7 +1040,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta +// on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1072,12 +1059,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCompleteCloneMetaSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1092,7 +1077,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1108,12 +1093,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRecoverChunk"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRecoverChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kRecoverChunk, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1128,7 +1111,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1147,12 +1131,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { std::string dstFile = "/RcvItUser1/TestRecoverCloneRecoverChunkSuccssNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRecoverChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kRecoverChunk, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1167,7 +1149,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1185,12 +1167,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1205,7 +1185,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1227,12 +1208,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCompleteCloneFileSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1247,7 +1226,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1267,12 +1246,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneFile(fileName)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotChangeOwner"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kChangeOwner, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kChangeOwner, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1287,7 +1264,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did +// not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1310,12 +1288,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { std::string dstFile = "/RcvItUser1/TestRecoverCloneChangeOwnerSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kChangeOwner, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kChangeOwner, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1330,7 +1306,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1352,12 +1328,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRenameCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRenameCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kRenameCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1372,7 +1346,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneRenameCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1400,12 +1375,10 @@ TEST_F(SnapshotCloneServerTest, LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, fInfoOut.id, fileName, dstFile)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRenameCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kRenameCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1420,18 +1393,16 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 以下为Lazy模式用例 -// CreateCloneFile阶段重启,mds上未创建文件 +// The following are the Lazy pattern use cases +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); std::string uuid1 = UUIDGenerator().GenerateUUID(); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, 0, 0, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, 0, 0, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1451,7 +1422,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but +// not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasCreateCloneFileSuccessNotReturn) { std::string snapId; @@ -1462,12 +1434,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, 0, 0, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, 0, 0, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1487,7 +1457,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1497,12 +1467,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1522,7 +1490,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds +// but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneMetaSuccessNotReturn) { std::string snapId; @@ -1537,12 +1506,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1562,7 +1529,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1576,12 +1544,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1601,7 +1567,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneChunkSuccessNotReturn) { std::string snapId; @@ -1618,12 +1585,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1643,7 +1608,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1659,12 +1624,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1684,7 +1647,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta +// on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneMetaSuccessNotReturn) { std::string snapId; @@ -1703,12 +1667,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1728,7 +1690,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1746,12 +1708,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kChangeOwner, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, + CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1771,7 +1731,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did +// not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyChangeOwnerSuccessNotReturn) { std::string snapId; @@ -1792,12 +1753,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kChangeOwner, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, + CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1817,7 +1776,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1837,12 +1796,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRenameCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRenameCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1862,7 +1819,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRenameCloneFileSuccessNotReturn) { std::string snapId; @@ -1886,12 +1844,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, testFd1_, fileName, testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRenameCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRenameCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1911,7 +1867,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1934,12 +1890,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, testFd1_, fileName, testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1954,7 +1908,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRecoverChunkSuccssNotReturn) { std::string snapId; @@ -1980,12 +1935,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -2000,7 +1953,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -2025,12 +1978,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -2045,7 +1996,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneFileSuccessNotReturn) { std::string snapId; @@ -2073,12 +2025,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneFile(testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); diff --git a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp index 7fa055321b..a2d71f4485 100644 --- a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp +++ b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp @@ -20,24 +20,26 @@ * Author: lixiaocui */ -#include -#include #include "src/mds/heartbeat/chunkserver_healthy_checker.h" + +#include +#include + #include "src/mds/topology/topology_item.h" #include "test/mds/mock/mock_topology.h" +using ::curve::mds::topology::MockTopology; +using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; -using ::curve::mds::topology::MockTopology; using ::curve::mds::topology::ChunkServer; using ::curve::mds::topology::ChunkServerStatus; -using ::curve::mds::topology::OnlineState; using ::curve::mds::topology::CopySetKey; -using ::curve::mds::topology::kTopoErrCodeSuccess; using ::curve::mds::topology::kTopoErrCodeInternalError; +using ::curve::mds::topology::kTopoErrCodeSuccess; +using ::curve::mds::topology::OnlineState; namespace curve { namespace mds { @@ -53,7 +55,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { HeartbeatInfo info; { - // chunkserver首次更新heartbeatInfo + // Chunkserver updates heartbeatInfo for the first time checker->UpdateLastReceivedHeartbeatTime(1, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( 2, steady_clock::now() - std::chrono::milliseconds(4000)); @@ -65,8 +67,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { 6, steady_clock::now() - std::chrono::milliseconds(10000)); checker->UpdateLastReceivedHeartbeatTime( 7, steady_clock::now() - std::chrono::milliseconds(10000)); - checker->UpdateLastReceivedHeartbeatTime( - 8, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(8, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( 9, steady_clock::now() - std::chrono::milliseconds(4000)); checker->UpdateLastReceivedHeartbeatTime( @@ -94,30 +95,32 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver-1 更新为online - // chunkserver-2 心跳miss,保持unstable - // chunkserver-3,chunkserver-5,chunkserver-6心跳offline, - // chunkserver-3的retired状态会被更新, 从心跳map中移除 - // chunkserver-5已经是retired状态,无需更新 - // chunkserver-6 get info失败, 未成功更新状态 - // chunnkserver-7 update失败, 未成功更新状态 - // chunkserver-8, pendding && online, 更新为onLine - // chunkserver-9, pendding && unstable, 更新为retired - // chunkserver-10, pendding && offline, 更新为retired + // Chunkserver-1 is updated to online. + // Chunkserver-2 has a heartbeat miss and remains in the unstable state. + // Chunkserver-3, Chunkserver-5, and Chunkserver-6 have heartbeat + // offline. Chunkserver-3's retired status will be updated and removed + // from the heartbeat map. Chunkserver-5 is already in a retired state + // and does not need an update. Chunkserver-6 fails to get information, + // and the status update is unsuccessful. Chunkserver-7 fails to update, + // and the status update is unsuccessful. Chunkserver-8 is pending and + // online, updated to online. Chunkserver-9 is pending and unstable, + // updated to retired. Chunkserver-10 is pending and offline, updated to + // retired. EXPECT_CALL(*topology, UpdateChunkServerOnlineState(_, _)) - .Times(7).WillRepeatedly(Return(kTopoErrCodeSuccess)); - ChunkServer cs2(2, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs3(3, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs5(5, "", "", 1, "", 0, "", - ChunkServerStatus::RETIRED, OnlineState::UNSTABLE); - ChunkServer cs7(7, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs9(9, "", "", 1, "", 0, "", - ChunkServerStatus::PENDDING, OnlineState::UNSTABLE); - ChunkServer cs10(10, "", "", 1, "", 0, "", - ChunkServerStatus::PENDDING, OnlineState::UNSTABLE); + .Times(7) + .WillRepeatedly(Return(kTopoErrCodeSuccess)); + ChunkServer cs2(2, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs3(3, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs5(5, "", "", 1, "", 0, "", ChunkServerStatus::RETIRED, + OnlineState::UNSTABLE); + ChunkServer cs7(7, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs9(9, "", "", 1, "", 0, "", ChunkServerStatus::PENDDING, + OnlineState::UNSTABLE); + ChunkServer cs10(10, "", "", 1, "", 0, "", ChunkServerStatus::PENDDING, + OnlineState::UNSTABLE); EXPECT_CALL(*topology, GetChunkServer(2, _)) .WillOnce(DoAll(SetArgPointee<1>(cs2), Return(true))); EXPECT_CALL(*topology, GetChunkServer(3, _)) @@ -128,8 +131,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { .WillOnce(Return(std::vector{})); EXPECT_CALL(*topology, GetChunkServer(5, _)) .WillOnce(DoAll(SetArgPointee<1>(cs5), Return(true))); - EXPECT_CALL(*topology, GetChunkServer(6, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*topology, GetChunkServer(6, _)).WillOnce(Return(false)); EXPECT_CALL(*topology, GetChunkServer(7, _)) .WillOnce(DoAll(SetArgPointee<1>(cs7), Return(true))); EXPECT_CALL(*topology, GetChunkServer(9, _)) @@ -164,15 +166,13 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver 2, 6 ,7 收到心跳 - checker->UpdateLastReceivedHeartbeatTime( - 2, steady_clock::now()); - checker->UpdateLastReceivedHeartbeatTime( - 6, steady_clock::now()); - checker->UpdateLastReceivedHeartbeatTime( - 7, steady_clock::now()); + // chunkserver 2, 6, 7 Heartbeat received + checker->UpdateLastReceivedHeartbeatTime(2, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(6, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(7, steady_clock::now()); EXPECT_CALL(*topology, UpdateChunkServerOnlineState(_, _)) - .Times(3).WillRepeatedly(Return(kTopoErrCodeSuccess)); + .Times(3) + .WillRepeatedly(Return(kTopoErrCodeSuccess)); checker->CheckHeartBeatInterval(); ASSERT_TRUE(checker->GetHeartBeatInfo(2, &info)); ASSERT_EQ(OnlineState::ONLINE, info.state); diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp index 11c70f8572..2a388c8944 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp @@ -20,23 +20,26 @@ * Author: lixiaocui */ +#include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" + #include + #include -#include "src/mds/nameserver2/helper/namespace_helper.h" -#include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" + #include "src/common/namespace_define.h" +#include "src/mds/nameserver2/helper/namespace_helper.h" #include "test/mds/mock/mock_etcdclient.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTALLOCSIZEKEY; -using ::curve::common::SEGMENTINFOKEYPREFIX; +using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; +using ::curve::common::SEGMENTINFOKEYPREFIX; namespace curve { namespace mds { @@ -44,18 +47,18 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { auto mockEtcdClient = std::make_shared(); { - // 1. list失败 + // 1. list failed EXPECT_CALL(*mockEtcdClient, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, Matcher*>(_))) .WillOnce(Return(EtcdErrCode::EtcdCanceled)); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); } { - // 2. list成功,解析失败 + // 2. list successful, parsing failed std::vector values{"hello"}; EXPECT_CALL(*mockEtcdClient, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, @@ -64,10 +67,10 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); } { - // 3. 获取已有的segment alloc value成功 + // 3. Successfully obtained the existing segment alloc value std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; EXPECT_CALL(*mockEtcdClient, @@ -77,7 +80,7 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); ASSERT_EQ(1, out.size()); ASSERT_EQ(1024, out[1]); } @@ -89,32 +92,35 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { // 1. CalculateSegmentAlloc ok LOG(INFO) << "start test1......"; EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(Return(EtcdErrCode::EtcdUnknown)); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); } { - // 2. ListWithLimitAndRevision成功,但是解析失败 + // 2. ListWithLimitAndRevision succeeded, but parsing failed LOG(INFO) << "start test2......"; std::vector values{"hello"}; std::string lastKey = "021"; EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce( DoAll(SetArgPointee<4>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); } { - // 3. ListWithLimitAndRevision成功, 解析成功, bundle=1000, 获取个数为1 + // 3. ListWithLimitAndRevision successful, parsing successful, + // bundle=1000, number obtained is 1 LOG(INFO) << "start test3......"; PageFileSegment segment; segment.set_segmentsize(1 << 30); segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); + segment.set_chunksize(16 * 1024 * 1024); segment.set_startoffset(0); std::string encodeSegment; ASSERT_TRUE( @@ -123,23 +129,24 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { std::string lastKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 0); EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey), - Return(EtcdErrCode::EtcdOK))); + Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); ASSERT_EQ(1, out.size()); ASSERT_EQ(1 << 30, out[1]); } { - // 4. ListWithLimitAndRevision成功, 解析成功 - // bundle=1000, 获取个数为1001 + // 4. ListWithLimitAndRevision successful, parsing successful + // bundle=1000, get a number of 1001 LOG(INFO) << "start test4......"; PageFileSegment segment; segment.set_segmentsize(1 << 30); segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); + segment.set_chunksize(16 * 1024 * 1024); segment.set_startoffset(0); std::string encodeSegment; std::vector values; @@ -160,20 +167,22 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { std::string lastKey2 = NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey1), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>( - std::vector{encodeSegment, encodeSegment}), + EXPECT_CALL(*mockEtcdClient, + ListWithLimitAndRevision(lastKey1, SEGMENTINFOKEYEND, + GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(std::vector{ + encodeSegment, encodeSegment}), SetArgPointee<5>(lastKey2), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); ASSERT_EQ(2, out.size()); ASSERT_EQ(500L * (1 << 30), out[1]); ASSERT_EQ(501L * (1 << 30), out[2]); @@ -181,5 +190,3 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { } } // namespace mds } // namespace curve - - diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp index c51e91587c..b260bb502e 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp @@ -20,21 +20,23 @@ * Author: lixiaocui */ +#include "src/mds/nameserver2/allocstatistic/alloc_statistic.h" + #include -#include "src/mds/nameserver2/helper/namespace_helper.h" + +#include "src/common/namespace_define.h" #include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" +#include "src/mds/nameserver2/helper/namespace_helper.h" #include "test/mds/mock/mock_etcdclient.h" -#include "src/mds/nameserver2/allocstatistic/alloc_statistic.h" -#include "src/common/namespace_define.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTALLOCSIZEKEY; +using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; using ::curve::common::SEGMENTINFOKEYPREFIX; @@ -60,17 +62,18 @@ class AllocStatisticTest : public ::testing::Test { TEST_F(AllocStatisticTest, test_Init) { { - // 1. 从etcd中获取当前revision失败 + // 1. Failed to obtain the current revision from ETCD LOG(INFO) << "test1......"; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(Return(EtcdErrCode::EtcdCanceled)); + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + .WillOnce(Return(EtcdErrCode::EtcdCanceled)); ASSERT_EQ(-1, allocStatistic_->Init()); } { - // 2. 获取已经存在的logicalPool对应的alloc大小失败 + // 2. Failed to obtain the alloc size corresponding to the existing + // logicalPool LOG(INFO) << "test2......"; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + .WillOnce(Return(EtcdErrCode::EtcdOK)); EXPECT_CALL(*mockEtcdClient_, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, Matcher*>(_))) @@ -80,12 +83,12 @@ TEST_F(AllocStatisticTest, test_Init) { ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); } { - // 3. init成功 + // 3. init successful LOG(INFO) << "test3......"; std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); EXPECT_CALL(*mockEtcdClient_, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, Matcher*>(_))) @@ -99,10 +102,10 @@ TEST_F(AllocStatisticTest, test_Init) { } TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { - // 初始化 allocStatistic - // 旧值: logicalPooId(1):1024 + // Initialize allocStatistic + // Old value: logicalPooId(1):1024 std::vector values{ - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); EXPECT_CALL(*mockEtcdClient_, @@ -114,91 +117,96 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { PageFileSegment segment; segment.set_segmentsize(1 << 30); segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); + segment.set_chunksize(16 * 1024 * 1024); segment.set_startoffset(0); std::string encodeSegment; values.clear(); - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + ASSERT_TRUE(NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); for (int i = 1; i <= 500; i++) { values.emplace_back(encodeSegment); } - // 1. 在定期持久化线程和统计线程启动前,只能获取旧值 + // 1. Only old values can be obtained before regular persistent threads and + // statistical threads are started int64_t alloc; ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); ASSERT_EQ(1024, alloc); ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - // 2. 更新segment的值 + // 2. Update the value of segment allocStatistic_->DeAllocSpace(1, 64, 1); allocStatistic_->AllocSpace(1, 32, 1); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); ASSERT_EQ(1024 - 32, alloc); - // 设置mock的etcd中segment的值 + // Set the value of segment in the ETCD of the mock // logicalPoolId(1):500 * (1<<30) // logicalPoolId(2):501 * (1<<30) segment.set_logicalpoolid(2); - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + ASSERT_TRUE(NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); for (int i = 501; i <= 1000; i++) { values.emplace_back(encodeSegment); } - std::string lastKey1 = - NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); + std::string lastKey1 = NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); std::string lastKey2 = NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); - EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + EXPECT_CALL(*mockEtcdClient_, + ListWithLimitAndRevision(SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .Times(2) .WillOnce(Return(EtcdErrCode::EtcdCanceled)) - .WillOnce(DoAll(SetArgPointee<4>(values), - SetArgPointee<5>(lastKey1), + .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey1), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( - lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>( - std::vector{encodeSegment, encodeSegment}), + EXPECT_CALL(*mockEtcdClient_, + ListWithLimitAndRevision(lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, + 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(std::vector{ + encodeSegment, encodeSegment}), SetArgPointee<5>(lastKey2), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) .Times(2) .WillOnce(Return(EtcdErrCode::EtcdCanceled)) .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); - // 设置mock的Put结果 - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue( - 1, 1024 - 32 + (1L << 30)))) + // Set the Put result of the mock + EXPECT_CALL(*mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 1, 1024 - 32 + (1L << 30)))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 1L << 30))) + EXPECT_CALL( + *mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 1L << 30))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 501L *(1 << 30)))) + EXPECT_CALL(*mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 1, 501L * (1 << 30)))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 502L *(1 << 30)))) + EXPECT_CALL(*mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 2, 502L * (1 << 30)))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 500L *(1 << 30)))) + EXPECT_CALL(*mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 1, 500L * (1 << 30)))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 501L *(1 << 30)))) + EXPECT_CALL(*mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 2, 501L * (1 << 30)))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(3), - NameSpaceStorageCodec::EncodeSegmentAllocValue(3, 1L << 30))) + EXPECT_CALL( + *mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(3), + NameSpaceStorageCodec::EncodeSegmentAllocValue(3, 1L << 30))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - // 2. 启动定期持久化线程和统计线程 + // 2. Start regular persistence and statistics threads for (int i = 1; i <= 2; i++) { allocStatistic_->AllocSpace(i, 1L << 30, i + 3); } @@ -206,21 +214,21 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { std::this_thread::sleep_for(std::chrono::seconds(6)); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(501L *(1 << 30), alloc); + ASSERT_EQ(501L * (1 << 30), alloc); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - ASSERT_EQ(502L *(1 << 30), alloc); + ASSERT_EQ(502L * (1 << 30), alloc); std::this_thread::sleep_for(std::chrono::milliseconds(30)); - // 再通过alloc进行更新 + // Update through alloc again for (int i = 1; i <= 2; i++) { allocStatistic_->DeAllocSpace(i, 1L << 30, i + 4); } allocStatistic_->AllocSpace(3, 1L << 30, 10); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(500L *(1 << 30), alloc); + ASSERT_EQ(500L * (1 << 30), alloc); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - ASSERT_EQ(501L *(1 << 30), alloc); + ASSERT_EQ(501L * (1 << 30), alloc); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(3, &alloc)); ASSERT_EQ(1L << 30, alloc); std::this_thread::sleep_for(std::chrono::milliseconds(30)); diff --git a/test/mds/topology/test_topology.cpp b/test/mds/topology/test_topology.cpp index c1e878deb4..d139e5c68e 100644 --- a/test/mds/topology/test_topology.cpp +++ b/test/mds/topology/test_topology.cpp @@ -22,25 +22,25 @@ #include -#include "test/mds/topology/mock_topology.h" -#include "src/mds/topology/topology.h" -#include "src/mds/topology/topology_item.h" #include "src/common/configuration.h" #include "src/common/namespace_define.h" +#include "src/mds/topology/topology.h" +#include "src/mds/topology/topology_item.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; -using ::testing::_; -using ::testing::Contains; -using ::testing::SetArgPointee; -using ::testing::SaveArg; -using ::testing::DoAll; using ::curve::common::Configuration; using ::curve::common::kDefaultPoolsetId; using ::curve::common::kDefaultPoolsetName; +using ::testing::_; +using ::testing::Contains; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SaveArg; +using ::testing::SetArgPointee; class TestTopology : public ::testing::Test { protected: @@ -52,13 +52,11 @@ class TestTopology : public ::testing::Test { tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); const std::unordered_map poolsetMap{ {kDefaultPoolsetId, - {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} - }; + {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}}}; ON_CALL(*storage_, LoadPoolset(_, _)) .WillByDefault(DoAll( @@ -80,128 +78,90 @@ class TestTopology : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(id, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool", - uint64_t diskCapacity = 0) { - PhysicalPool pool(id, - name, - pid, - desc); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool", + uint64_t diskCapacity = 0) { + PhysicalPool pool(id, name, pid, desc); pool.SetDiskCapacity(diskCapacity); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - uint32_t internalPort = 0, - const std::string &externalHostIp = "testExternalIp", - uint32_t externalPort = 0, - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - internalPort, - externalHostIp, - externalPort, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + uint32_t internalPort = 0, + const std::string& externalHostIp = "testExternalIp", + uint32_t externalPort = 0, ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, internalPort, + externalHostIp, externalPort, zoneId, physicalPoolId, + desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } - void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/", - uint64_t diskUsed = 512, - uint64_t diskCapacity = 1024) { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState state; - state.SetDiskCapacity(diskCapacity); - state.SetDiskUsed(diskUsed); - cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + void PrepareAddChunkServer( + ChunkServerIdType id = 0x41, const std::string& token = "testToken", + const std::string& diskType = "nvme", ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", uint32_t port = 0, + const std::string& diskPath = "/", uint64_t diskUsed = 512, + uint64_t diskCapacity = 1024) { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState state; + state.SetDiskCapacity(diskCapacity); + state.SetDiskUsed(diskUsed); + cs.SetChunkServerState(state); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -218,16 +178,12 @@ class TestTopology : public ::testing::Test { TEST_F(TestTopology, test_init_success) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(true)); const std::unordered_map poolsetMap{ - {kDefaultPoolsetId, - {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} - }; + {kDefaultPoolsetId, {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}}}; std::unordered_map logicalPoolMap_; std::unordered_map physicalPoolMap_; std::unordered_map zoneMap_; @@ -235,40 +191,33 @@ TEST_F(TestTopology, test_init_success) { std::unordered_map chunkServerMap_; std::map copySetMap_; - logicalPoolMap_[0x01] = LogicalPool(0x01, "lpool1", 0x11, PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, false, true); + logicalPoolMap_[0x01] = + LogicalPool(0x01, "lpool1", 0x11, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, false, true); physicalPoolMap_[0x11] = PhysicalPool(0x11, "pPool1", 0X61, "des1"); zoneMap_[0x21] = Zone(0x21, "zone1", 0x11, "desc1"); - serverMap_[0x31] = Server(0x31, "server1", "127.0.0.1", 8200, - "127.0.0.1", 8200, 0x21, 0x11, "desc1"); - chunkServerMap_[0x41] = ChunkServer(0x41, "token", "ssd", - 0x31, "127.0.0.1", 8200, "/"); + serverMap_[0x31] = Server(0x31, "server1", "127.0.0.1", 8200, "127.0.0.1", + 8200, 0x21, 0x11, "desc1"); + chunkServerMap_[0x41] = + ChunkServer(0x41, "token", "ssd", 0x31, "127.0.0.1", 8200, "/"); copySetMap_[std::pair(0x01, 0x51)] = CopySetInfo(0x01, 0x51); EXPECT_CALL(*storage_, LoadPoolset(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(poolsetMap), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(poolsetMap), Return(true))); EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(logicalPoolMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(logicalPoolMap_), Return(true))); EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap_), Return(true))); EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(zoneMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(zoneMap_), Return(true))); EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(serverMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(serverMap_), Return(true))); EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(chunkServerMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(chunkServerMap_), Return(true))); EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(copySetMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(copySetMap_), Return(true))); EXPECT_CALL(*idGenerator_, initPoolsetIdGenerator(_)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); @@ -278,10 +227,8 @@ TEST_F(TestTopology, test_init_success) { EXPECT_CALL(*idGenerator_, initChunkServerIdGenerator(_)); EXPECT_CALL(*idGenerator_, initCopySetIdGenerator(_)); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(true)); TopologyOption option; int ret = topology_->Init(option); @@ -291,8 +238,7 @@ TEST_F(TestTopology, test_init_success) { TEST_F(TestTopology, test_init_loadClusterFail) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(false))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(false))); TopologyOption option; int ret = topology_->Init(option); @@ -302,11 +248,9 @@ TEST_F(TestTopology, test_init_loadClusterFail) { TEST_F(TestTopology, test_init_StorageClusterInfoFail) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(false)); TopologyOption option; int ret = topology_->Init(option); @@ -318,11 +262,9 @@ TEST_F(TestTopology, test_init_loadLogicalPoolFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(false)); TopologyOption option; int ret = topology_->Init(option); @@ -334,13 +276,10 @@ TEST_F(TestTopology, test_init_LoadPhysicalPoolFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); @@ -354,15 +293,11 @@ TEST_F(TestTopology, test_init_LoadZoneFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -377,16 +312,11 @@ TEST_F(TestTopology, test_init_LoadServerFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(false)); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -402,19 +332,13 @@ TEST_F(TestTopology, test_init_LoadChunkServerFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -431,21 +355,14 @@ TEST_F(TestTopology, test_init_LoadCopysetFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadCopySet(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -462,18 +379,11 @@ TEST_F(TestTopology, test_AddLogicalPool_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(0x01, "test1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); @@ -487,15 +397,9 @@ TEST_F(TestTopology, test_AddLogicalPool_IdDuplicated) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "test1", physicalPoolId); - LogicalPool pool(id, - "test2", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(id, "test2", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->AddLogicalPool(pool); @@ -506,18 +410,11 @@ TEST_F(TestTopology, test_AddLogicalPool_StorageFail) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(0x01, "test1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->AddLogicalPool(pool); @@ -528,16 +425,9 @@ TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - ++physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); - + LogicalPool pool(0x01, "test1", ++physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->AddLogicalPool(pool); @@ -546,26 +436,18 @@ TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { TEST_F(TestTopology, test_AddPhysicalPool_success) { PrepareAddPoolset(); - PhysicalPool pool(0x11, - "test1", - 0X61, - "desc"); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + PhysicalPool pool(0x11, "test1", 0X61, "desc"); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { PrepareAddPoolset(); PoolIdType id = 0x11; PoolsetIdType pid = 0x61; - PhysicalPool pool(id, - "test1", - pid, - "desc"); + PhysicalPool pool(id, "test1", pid, "desc"); PrepareAddPhysicalPool(id); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeIdDuplicated, ret); @@ -573,12 +455,8 @@ TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { TEST_F(TestTopology, test_AddPhysicalPool_StorageFail) { PrepareAddPoolset(); - PhysicalPool pool(0x11, - "test1", - 0X61, - "desc"); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(false)); + PhysicalPool pool(0x11, "test1", 0X61, "desc"); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -590,13 +468,9 @@ TEST_F(TestTopology, test_AddZone_success) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); @@ -616,10 +490,7 @@ TEST_F(TestTopology, test_AddZone_IdDuplicated) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); int ret = topology_->AddZone(zone); @@ -631,13 +502,9 @@ TEST_F(TestTopology, test_AddZone_StorageFail) { PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - Zone zone(0x21, - "testZone", - physicalPoolId, - "desc"); + Zone zone(0x21, "testZone", physicalPoolId, "desc"); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(false)); int ret = topology_->AddZone(zone); @@ -649,11 +516,7 @@ TEST_F(TestTopology, test_AddZone_PhysicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); - + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); int ret = topology_->AddZone(zone); @@ -668,18 +531,10 @@ TEST_F(TestTopology, test_AddServer_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -701,15 +556,8 @@ TEST_F(TestTopology, test_AddServer_IdDuplicated) { PrepareAddZone(zoneId, "test", physicalPoolId); PrepareAddServer(id); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); @@ -724,46 +572,29 @@ TEST_F(TestTopology, test_AddServer_StorageFail) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(false)); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - TEST_F(TestTopology, test_AddServer_ZoneNotFound) { PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeZoneNotFound, ret); } - TEST_F(TestTopology, test_AddChunkServers_success) { PrepareAddPoolset(); ChunkServerIdType csId = 0x41; @@ -773,20 +604,13 @@ TEST_F(TestTopology, test_AddChunkServers_success) { PrepareAddZone(); PrepareAddServer(serverId); - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); ChunkServerState state; state.SetDiskCapacity(1024); state.SetDiskUsed(512); cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); @@ -812,18 +636,9 @@ TEST_F(TestTopology, test_AddChunkServer_IdDuplicated) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token2", - "ssd", - serverId); - - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + PrepareAddChunkServer(csId, "token2", "ssd", serverId); + + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); int ret = topology_->AddChunkServer(cs); @@ -839,16 +654,9 @@ TEST_F(TestTopology, test_AddChunkServer_StorageFail) { PrepareAddZone(); PrepareAddServer(serverId); - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(false)); int ret = topology_->AddChunkServer(cs); @@ -860,13 +668,7 @@ TEST_F(TestTopology, test_AddChunkServer_ServerNotFound) { ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); int ret = topology_->AddChunkServer(cs); @@ -880,8 +682,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_success) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->RemoveLogicalPool(id); @@ -904,8 +705,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_StorageFail) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->RemoveLogicalPool(id); @@ -917,8 +717,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_success) { PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - EXPECT_CALL(*storage_, DeletePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeletePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->RemovePhysicalPool(poolId); @@ -939,8 +738,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_StorageFail) { PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - EXPECT_CALL(*storage_, DeletePhysicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeletePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->RemovePhysicalPool(poolId); @@ -952,12 +750,9 @@ TEST_F(TestTopology, test_RemoveZone_success) { ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - PrepareAddZone(zoneId, - "testZone", - poolId); + PrepareAddZone(zoneId, "testZone", poolId); - EXPECT_CALL(*storage_, DeleteZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteZone(_)).WillOnce(Return(true)); int ret = topology_->RemoveZone(zoneId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -982,8 +777,7 @@ TEST_F(TestTopology, test_RemoveZone_StorageFail) { PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - EXPECT_CALL(*storage_, DeleteZone(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteZone(_)).WillOnce(Return(false)); int ret = topology_->RemoveZone(zoneId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -995,16 +789,9 @@ TEST_F(TestTopology, test_RemoveServer_success) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "testSever", - "ip1", - 0, - "ip2", - 0, - zoneId); + PrepareAddServer(serverId, "testSever", "ip1", 0, "ip2", 0, zoneId); - EXPECT_CALL(*storage_, DeleteServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteServer(_)).WillOnce(Return(true)); int ret = topology_->RemoveServer(serverId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1030,16 +817,9 @@ TEST_F(TestTopology, test_RemoveServer_StorageFail) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "testSever", - "ip1", - 0, - "ip2", - 0, - zoneId); + PrepareAddServer(serverId, "testSever", "ip1", 0, "ip2", 0, zoneId); - EXPECT_CALL(*storage_, DeleteServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteServer(_)).WillOnce(Return(false)); int ret = topology_->RemoveServer(serverId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -1052,18 +832,14 @@ TEST_F(TestTopology, test_RemoveChunkServer_success) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId); + PrepareAddChunkServer(csId, "token", "ssd", serverId); - int ret = topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId); + int ret = + topology_->UpdateChunkServerRwState(ChunkServerStatus::RETIRED, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - EXPECT_CALL(*storage_, DeleteChunkServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteChunkServer(_)).WillOnce(Return(true)); ret = topology_->RemoveChunkServer(csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1075,7 +851,6 @@ TEST_F(TestTopology, test_RemoveChunkServer_success) { ASSERT_TRUE(it == csList.end()); } - TEST_F(TestTopology, test_RemoveChunkServer_ChunkSeverNotFound) { ChunkServerIdType csId = 0x41; @@ -1090,19 +865,14 @@ TEST_F(TestTopology, test_RemoveChunkServer_StorageFail) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId); + PrepareAddChunkServer(csId, "token", "ssd", serverId); - int ret = topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId); + int ret = + topology_->UpdateChunkServerRwState(ChunkServerStatus::RETIRED, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - - EXPECT_CALL(*storage_, DeleteChunkServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteChunkServer(_)).WillOnce(Return(false)); ret = topology_->RemoveChunkServer(csId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -1113,26 +883,15 @@ TEST_F(TestTopology, UpdateLogicalPool_success) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->UpdateLogicalPool(pool); @@ -1146,15 +905,9 @@ TEST_F(TestTopology, UpdateLogicalPool_success) { TEST_F(TestTopology, UpdateLogicalPool_LogicalPoolNotFound) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->UpdateLogicalPool(pool); @@ -1166,26 +919,15 @@ TEST_F(TestTopology, UpdateLogicalPool_StorageFail) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->UpdateLogicalPool(pool); @@ -1197,24 +939,19 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); LogicalPool pool2; topology_->GetLogicalPool(logicalPoolId, &pool2); ASSERT_EQ(AllocateStatus::ALLOW, pool2.GetStatus()); // update to deny - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::DENY, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::DENY, + logicalPoolId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1223,11 +960,10 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { ASSERT_EQ(AllocateStatus::DENY, pool3.GetStatus()); // update to allow - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1239,18 +975,12 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_LogicalPoolNotFound) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeLogicalPoolNotFound, ret); } @@ -1260,19 +990,14 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_StorageFail) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1285,8 +1010,7 @@ TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { PrepareAddLogicalPool(lpid, "name", ppid); auto set_state = [&](PoolIdType lpid, bool scanEnable) { - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); auto retCode = topology_->UpdateLogicalPoolScanState(lpid, scanEnable); ASSERT_EQ(retCode, kTopoErrCodeSuccess); }; @@ -1309,14 +1033,12 @@ TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { check_state(lpid, true); // CASE 4: logical pool not found -> set scan state fail - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .Times(0); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).Times(0); auto retCode = topology_->UpdateLogicalPoolScanState(lpid + 1, true); ASSERT_EQ(retCode, kTopoErrCodeLogicalPoolNotFound); // CASE 5: update storage fail -> set scan state fail - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); retCode = topology_->UpdateLogicalPoolScanState(lpid, true); ASSERT_EQ(retCode, kTopoErrCodeStorgeFail); } @@ -1325,18 +1047,11 @@ TEST_F(TestTopology, UpdatePhysicalPool_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PoolsetIdType poolsetId = 0x61; - PrepareAddPhysicalPool(physicalPoolId, - "name1", - poolsetId, - "desc1"); + PrepareAddPhysicalPool(physicalPoolId, "name1", poolsetId, "desc1"); - PhysicalPool newPool(physicalPoolId, - "name1", - poolsetId, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", poolsetId, "desc2"); - EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdatePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1349,69 +1064,45 @@ TEST_F(TestTopology, UpdatePhysicalPool_success) { TEST_F(TestTopology, UpdatePhysicalPool_PhysicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; PoolIdType pid = 0x61; - PhysicalPool newPool(physicalPoolId, - "name1", - pid, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", pid, "desc2"); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodePhysicalPoolNotFound, ret); } - TEST_F(TestTopology, UpdatePhysicalPool_StorageFail) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PoolsetIdType poolsetId = 0x61; - PrepareAddPhysicalPool(physicalPoolId, - "name1", - poolsetId, - "desc1"); + PrepareAddPhysicalPool(physicalPoolId, "name1", poolsetId, "desc1"); - PhysicalPool newPool(physicalPoolId, - "name1", - poolsetId, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", poolsetId, "desc2"); - EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdatePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - - TEST_F(TestTopology, UpdateZone_success) { PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddZone(zoneId, - "name1", - physicalPoolId, - "desc1"); - - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateZone(_)) - .WillOnce(Return(true)); + PrepareAddZone(zoneId, "name1", physicalPoolId, "desc1"); + + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateZone(_)).WillOnce(Return(true)); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - TEST_F(TestTopology, UpdateZone_ZoneNotFound) { ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeZoneNotFound, ret); @@ -1422,18 +1113,11 @@ TEST_F(TestTopology, UpdateZone_StorageFail) { ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddZone(zoneId, - "name1", - physicalPoolId, - "desc1"); - - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateZone(_)) - .WillOnce(Return(false)); + PrepareAddZone(zoneId, "name1", physicalPoolId, "desc1"); + + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateZone(_)).WillOnce(Return(false)); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1445,28 +1129,13 @@ TEST_F(TestTopology, UpdateServer_success) { ServerIdType serverId = 0x31; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc1"); - - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateServer(_)) - .WillOnce(Return(true)); + PrepareAddServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc1"); + + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1477,15 +1146,8 @@ TEST_F(TestTopology, UpdateServer_ServerNotFound) { ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeServerNotFound, ret); @@ -1498,34 +1160,18 @@ TEST_F(TestTopology, UpdateServer_StorageFail) { ServerIdType serverId = 0x31; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc1"); - - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateServer(_)) - .WillOnce(Return(false)); + PrepareAddServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc1"); + + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateServer(_)).WillOnce(Return(false)); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - TEST_F(TestTopology, UpdateChunkServerTopo_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -1535,24 +1181,11 @@ TEST_F(TestTopology, UpdateChunkServerTopo_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -1566,28 +1199,15 @@ TEST_F(TestTopology, UpdateChunkServerTopo_UpdateServerSuccess) { ChunkServerIdType csId = 0x41; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, "server1", - "ip1", 0, "ip2", 0, zoneId, physicalPoolId); - PrepareAddServer(serverId2, "server2", - "ip3", 0, "ip4", 0, zoneId, physicalPoolId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId2, - "ip3", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + PrepareAddServer(serverId, "server1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId); + PrepareAddServer(serverId2, "server2", "ip3", 0, "ip4", 0, zoneId, + physicalPoolId); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId2, "ip3", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -1596,13 +1216,7 @@ TEST_F(TestTopology, UpdateChunkServerTopo_ChunkServerNotFound) { ServerIdType serverId = 0x31; ChunkServerIdType csId = 0x41; - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); @@ -1617,24 +1231,11 @@ TEST_F(TestTopology, UpdateChunkServerTopo_StorageFail) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(false)); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(false)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1648,11 +1249,7 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); PhysicalPool pool; ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); @@ -1662,17 +1259,16 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { csState.SetDiskState(DISKERROR); csState.SetDiskCapacity(100); - int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); + int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(100, pool.GetDiskCapacity()); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep, waiting to flush the database sleep(5); topology_->Stop(); } @@ -1684,7 +1280,7 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_ChunkServerNotFound) { csState.SetDiskState(DISKERROR); csState.SetDiskCapacity(100); - int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); + int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1697,22 +1293,17 @@ TEST_F(TestTopology, UpdateChunkServerRwStateToStorage_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); ChunkServerStatus rwState; rwState = ChunkServerStatus::PENDDING; - int ret = topology_->UpdateChunkServerRwState(rwState, csId); + int ret = topology_->UpdateChunkServerRwState(rwState, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep, waiting to flush the database sleep(5); topology_->Stop(); } @@ -1726,60 +1317,50 @@ TEST_F(TestTopology, UpdateChunkServerRwStateTestPhysicalPoolCapacity_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); PhysicalPool pool; ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // READWRITE -> RETIRED - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::RETIRED, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(0, pool.GetDiskCapacity()); // RETIRED -> PENDDING - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::PENDDING, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::PENDDING, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // PENDDING -> RETIRED - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::RETIRED, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(0, pool.GetDiskCapacity()); // RETIRED -> READWRITE - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::READWRITE, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::READWRITE, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // READWRITE -> PENDDING - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::PENDDING, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::PENDDING, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // PENDDING -> READWRITE - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::READWRITE, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::READWRITE, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); @@ -1790,7 +1371,7 @@ TEST_F(TestTopology, UpdateChunkServerRwState_ChunkServerNotFound) { ChunkServerStatus rwState; rwState = ChunkServerStatus::PENDDING; - int ret = topology_->UpdateChunkServerRwState(rwState, csId); + int ret = topology_->UpdateChunkServerRwState(rwState, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1803,13 +1384,9 @@ TEST_F(TestTopology, UpdateChunkServerStartUpTime_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); uint64_t time = 0x1234567812345678; - int ret = topology_->UpdateChunkServerStartUpTime(time, csId); + int ret = topology_->UpdateChunkServerStartUpTime(time, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); ChunkServer cs; @@ -1819,7 +1396,7 @@ TEST_F(TestTopology, UpdateChunkServerStartUpTime_success) { TEST_F(TestTopology, UpdateChunkServerStartUpTime_ChunkServerNotFound) { ChunkServerIdType csId = 0x41; - int ret = topology_->UpdateChunkServerStartUpTime(1000, csId); + int ret = topology_->UpdateChunkServerStartUpTime(1000, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1831,19 +1408,18 @@ TEST_F(TestTopology, FindLogicalPool_success) { std::string physicalPoolName = "PhysiclPool1"; PrepareAddPhysicalPool(physicalPoolId, physicalPoolName); PrepareAddLogicalPool(logicalPoolId, logicalPoolName, physicalPoolId); - PoolIdType ret = topology_->FindLogicalPool(logicalPoolName, - physicalPoolName); + PoolIdType ret = + topology_->FindLogicalPool(logicalPoolName, physicalPoolName); ASSERT_EQ(logicalPoolId, ret); } TEST_F(TestTopology, FindLogicalPool_LogicalPoolNotFound) { std::string logicalPoolName = "logicalPool1"; std::string physicalPoolName = "PhysiclPool1"; - PoolIdType ret = topology_->FindLogicalPool(logicalPoolName, - physicalPoolName); + PoolIdType ret = + topology_->FindLogicalPool(logicalPoolName, physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindPhysicalPool_success) { @@ -1858,11 +1434,9 @@ TEST_F(TestTopology, FindPhysicalPool_success) { TEST_F(TestTopology, FindPhysicalPool_PhysicalPoolNotFound) { std::string physicalPoolName = "physicalPoolName"; PoolIdType ret = topology_->FindPhysicalPool(physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } - TEST_F(TestTopology, FindZone_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -1879,8 +1453,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound) { std::string physicalPoolName = "physicalPoolName"; std::string zoneName = "zoneName"; ZoneIdType ret = topology_->FindZone(zoneName, physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindZone_success2) { @@ -1900,8 +1473,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound2) { std::string physicalPoolName = "physicalPoolName"; std::string zoneName = "zoneName"; ZoneIdType ret = topology_->FindZone(zoneName, physicalPoolId); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindServerByHostName_success) { @@ -1910,8 +1482,7 @@ TEST_F(TestTopology, FindServerByHostName_success) { std::string hostName = "host1"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName); + PrepareAddServer(serverId, hostName); ServerIdType ret = topology_->FindServerByHostName(hostName); ASSERT_EQ(serverId, ret); @@ -1920,8 +1491,7 @@ TEST_F(TestTopology, FindServerByHostName_success) { TEST_F(TestTopology, FindServerByHostName_ServerNotFound) { std::string hostName = "host1"; ServerIdType ret = topology_->FindServerByHostName(hostName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindServerByHostIpPort_success) { @@ -1932,12 +1502,7 @@ TEST_F(TestTopology, FindServerByHostIpPort_success) { std::string externalHostIp = "ip2"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); ServerIdType ret = topology_->FindServerByHostIpPort(internalHostIp, 0); ASSERT_EQ(serverId, ret); @@ -1954,16 +1519,10 @@ TEST_F(TestTopology, FindSeverByHostIp_ServerNotFound) { std::string externalHostIp = "ip2"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); ServerIdType ret = topology_->FindServerByHostIpPort("ip3", 0); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindChunkServerNotRetired_success) { @@ -1977,21 +1536,11 @@ TEST_F(TestTopology, FindChunkServerNotRetired_success) { PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/", - port); - - ChunkServerIdType ret = topology_->FindChunkServerNotRetired( - internalHostIp, port); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/", port); + + ChunkServerIdType ret = + topology_->FindChunkServerNotRetired(internalHostIp, port); ASSERT_EQ(csId, ret); } @@ -2006,22 +1555,11 @@ TEST_F(TestTopology, FindChunkServerNotRetired_ChunkServerNotFound) { PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/", - port); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/", port); ChunkServerIdType ret = topology_->FindChunkServerNotRetired("ip3", port); - ASSERT_EQ(static_cast( - UNINTIALIZE_ID), ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, GetLogicalPool_success) { @@ -2089,7 +1627,6 @@ TEST_F(TestTopology, GetServer_success) { ASSERT_EQ(true, ret); } - TEST_F(TestTopology, GetServer_GetServerNotFound) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -2133,7 +1670,6 @@ TEST_F(TestTopology, GetChunkServer_ChunkServerNotFound) { ASSERT_EQ(false, ret); } - TEST_F(TestTopology, GetChunkServerInCluster_success) { PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -2371,8 +1907,8 @@ TEST_F(TestTopology, GetChunkServerInLogicalPool_success) { PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "name", physicalPoolId); - PrepareAddServer( - serverId, "name2", "ip1", 0, "ip2", 0, zoneId, physicalPoolId); + PrepareAddServer(serverId, "name2", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId); PrepareAddChunkServer(csId, "token", "ssd", serverId); PrepareAddChunkServer(csId2, "token", "ssd", serverId); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId); @@ -2452,12 +1988,12 @@ TEST_F(TestTopology, AddCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2470,8 +2006,7 @@ TEST_F(TestTopology, AddCopySet_success) { CopySetInfo csInfo(logicalPoolId, copysetId); csInfo.SetCopySetMembers(replicas); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(csInfo); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -2486,12 +2021,12 @@ TEST_F(TestTopology, AddCopySet_IdDuplicated) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2519,12 +2054,12 @@ TEST_F(TestTopology, AddCopySet_LogicalPoolNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2551,12 +2086,12 @@ TEST_F(TestTopology, AddCopySet_StorageFail) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2569,8 +2104,7 @@ TEST_F(TestTopology, AddCopySet_StorageFail) { CopySetInfo csInfo(logicalPoolId, copysetId); csInfo.SetCopySetMembers(replicas); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(false)); int ret = topology_->AddCopySet(csInfo); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -2585,12 +2119,12 @@ TEST_F(TestTopology, RemoveCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2601,8 +2135,7 @@ TEST_F(TestTopology, RemoveCopySet_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(true)); int ret = topology_->RemoveCopySet( std::pair(logicalPoolId, copysetId)); @@ -2620,12 +2153,12 @@ TEST_F(TestTopology, RemoveCopySet_storageFail) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2636,8 +2169,7 @@ TEST_F(TestTopology, RemoveCopySet_storageFail) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(false)); int ret = topology_->RemoveCopySet( std::pair(logicalPoolId, copysetId)); @@ -2655,12 +2187,12 @@ TEST_F(TestTopology, RemoveCopySet_CopySetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2687,12 +2219,12 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2716,11 +2248,10 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateCopySet(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateCopySet(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep, waiting to flush the database sleep(5); topology_->Stop(); } @@ -2735,12 +2266,12 @@ TEST_F(TestTopology, UpdateCopySetTopo_CopySetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2775,12 +2306,12 @@ TEST_F(TestTopology, GetCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2809,12 +2340,12 @@ TEST_F(TestTopology, GetCopySet_CopysetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2843,12 +2374,12 @@ TEST_F(TestTopology, GetCopySetsInLogicalPool_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2860,7 +2391,7 @@ TEST_F(TestTopology, GetCopySetsInLogicalPool_success) { PrepareAddCopySet(copysetId, logicalPoolId, replicas); std::vector csList = - topology_->GetCopySetsInLogicalPool(logicalPoolId); + topology_->GetCopySetsInLogicalPool(logicalPoolId); ASSERT_EQ(1, csList.size()); } @@ -2874,12 +2405,12 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2890,8 +2421,7 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - std::vector csList = - topology_->GetCopySetsInCluster(); + std::vector csList = topology_->GetCopySetsInCluster(); ASSERT_EQ(1, csList.size()); } @@ -2905,12 +2435,12 @@ TEST_F(TestTopology, GetCopySetsInChunkServer_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2921,44 +2451,33 @@ TEST_F(TestTopology, GetCopySetsInChunkServer_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - std::vector csList = - topology_->GetCopySetsInChunkServer(0x41); + std::vector csList = topology_->GetCopySetsInChunkServer(0x41); ASSERT_EQ(1, csList.size()); } TEST_F(TestTopology, test_create_default_poolset) { - EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadClusterInfo(_)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPoolset(_, _)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPoolset(_, _)).WillOnce(Return(true)); Poolset poolset; EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce( - DoAll(SaveArg<0>(&poolset), Return(true))); + .WillOnce(DoAll(SaveArg<0>(&poolset), Return(true))); std::unordered_map physicalPoolMap{ {1, {1, "pool1", UNINTIALIZE_ID, ""}}, {2, {2, "pool2", UNINTIALIZE_ID, ""}}, }; EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap), - SetArgPointee<1>(2), + .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap), SetArgPointee<1>(2), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadCopySet(_, _)).WillOnce(Return(true)); int rc = topology_->Init({}); ASSERT_EQ(kTopoErrCodeSuccess, rc); diff --git a/test/mds/topology/test_topology_chunk_allocator.cpp b/test/mds/topology/test_topology_chunk_allocator.cpp index a1ea8aa942..08289623f4 100644 --- a/test/mds/topology/test_topology_chunk_allocator.cpp +++ b/test/mds/topology/test_topology_chunk_allocator.cpp @@ -20,30 +20,28 @@ * Author: xuchaojie */ -#include #include +#include #include - -#include "src/mds/topology/topology_chunk_allocator.h" -#include "src/mds/common/mds_define.h" -#include "test/mds/topology/mock_topology.h" -#include "test/mds/mock/mock_topology.h" #include "proto/nameserver2.pb.h" #include "src/common/timeutility.h" +#include "src/mds/common/mds_define.h" +#include "src/mds/topology/topology_chunk_allocator.h" #include "test/mds/mock/mock_alloc_statistic.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; using ::testing::_; using ::testing::AnyOf; -using ::testing::SetArgPointee; using ::testing::Invoke; - +using ::testing::Return; +using ::testing::SetArgPointee; class TestTopologyChunkAllocator : public ::testing::Test { protected: @@ -54,21 +52,17 @@ class TestTopologyChunkAllocator : public ::testing::Test { tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); TopologyOption option; topoStat_ = std::make_shared(topology_); - chunkFilePoolAllocHelp_ = - std::make_shared(); + chunkFilePoolAllocHelp_ = std::make_shared(); chunkFilePoolAllocHelp_->UpdateChunkFilePoolAllocConfig(true, true, 15); option.PoolUsagePercentLimit = 85; option.enableLogicalPoolStatus = true; allocStatistic_ = std::make_shared(); - testObj_ = std::make_shared(topology_, - allocStatistic_, - topoStat_, - chunkFilePoolAllocHelp_, - option); + testObj_ = std::make_shared( + topology_, allocStatistic_, topoStat_, chunkFilePoolAllocHelp_, + option); } virtual void TearDown() { @@ -85,53 +79,37 @@ class TestTopologyChunkAllocator : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(pid, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool", - uint64_t diskCapacity = 10240) { - PhysicalPool pool(id, - name, - pid, - desc); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool", + uint64_t diskCapacity = 10240) { + PhysicalPool pool(id, name, pid, desc); pool.SetDiskCapacity(diskCapacity); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) @@ -139,78 +117,56 @@ class TestTopologyChunkAllocator : public ::testing::Test { } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); - ASSERT_EQ(kTopoErrCodeSuccess, ret) << - "should have PrepareAddPhysicalPool()"; + ASSERT_EQ(kTopoErrCodeSuccess, ret) + << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - const std::string &externalHostIp = "testExternalIp", - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - 0, - externalHostIp, - 0, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + const std::string& externalHostIp = "testExternalIp", + ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, 0, externalHostIp, 0, + zoneId, physicalPoolId, desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } - void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/", - uint64_t diskUsed = 512, - uint64_t diskCapacity = 1024) { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState state; - state.SetDiskCapacity(diskCapacity); - state.SetDiskUsed(diskUsed); - cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + void PrepareAddChunkServer( + ChunkServerIdType id = 0x41, const std::string& token = "testToken", + const std::string& diskType = "nvme", ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", uint32_t port = 0, + const std::string& diskPath = "/", uint64_t diskUsed = 512, + uint64_t diskCapacity = 1024) { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState state; + state.SetDiskCapacity(diskCapacity); + state.SetDiskUsed(diskUsed); + cs.SetChunkServerState(state); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ChunkServerStat stat; - stat.chunkFilepoolSize = diskCapacity-diskUsed; + stat.chunkFilepoolSize = diskCapacity - diskUsed; topoStat_->UpdateChunkServerStat(id, stat); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members, - bool availFlag = true) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members, + bool availFlag = true) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); cs.SetAvailableFlag(availFlag); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -228,7 +184,7 @@ class TestTopologyChunkAllocator : public ::testing::Test { }; TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_success) { + Test_AllocateChunkRandomInSingleLogicalPool_success) { std::vector infos; PrepareAddPoolset(); @@ -247,7 +203,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -258,12 +214,8 @@ TEST_F(TestTopologyChunkAllocator, EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_TRUE(ret); @@ -275,20 +227,16 @@ TEST_F(TestTopologyChunkAllocator, } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_logicalPoolNotFound) { + Test_AllocateChunkRandomInSingleLogicalPool_logicalPoolNotFound) { std::vector infos; - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_shouldfail) { + Test_AllocateChunkRandomInSingleLogicalPool_shouldfail) { std::vector infos; PoolIdType logicalPoolId = 0x01; @@ -304,7 +252,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddServer(0x32, "server2", "127.0.0.1", "127.0.0.1", 0x22, 0x11); PrepareAddServer(0x33, "server3", "127.0.0.1", "127.0.0.1", 0x23, 0x11); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -315,12 +263,8 @@ TEST_F(TestTopologyChunkAllocator, EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_FALSE(ret); @@ -328,12 +272,8 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); - ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_TRUE(ret); @@ -343,18 +283,14 @@ TEST_F(TestTopologyChunkAllocator, topoStat_->UpdateChunkServerStat(0x42, stat); topoStat_->UpdateChunkServerStat(0x43, stat); - ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_GetRemainingSpaceInLogicalPool_UseChunkFilePool) { + Test_GetRemainingSpaceInLogicalPool_UseChunkFilePool) { std::vector infos; PoolIdType logicalPoolId = 0x01; @@ -370,7 +306,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddServer(0x32, "server2", "127.0.0.1", "127.0.0.1", 0x22, 0x11); PrepareAddServer(0x33, "server3", "127.0.0.1", "127.0.0.1", 0x23, 0x11); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -385,16 +321,16 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); std::map enoughsize; - std::vector pools ={0x01}; + std::vector pools = {0x01}; for (int i = 0; i < 10; i++) { - testObj_->GetRemainingSpaceInLogicalPool(pools, - &enoughsize, "testPoolset"); + testObj_->GetRemainingSpaceInLogicalPool(pools, &enoughsize, + "testPoolset"); ASSERT_EQ(enoughsize[logicalPoolId], 1109); } } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_success) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_success) { std::vector infos; PrepareAddPoolset(); @@ -412,7 +348,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -423,16 +359,11 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddCopySet(0x54, logicalPoolId, replicas); PrepareAddCopySet(0x55, logicalPoolId, replicas); - EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos); ASSERT_TRUE(ret); @@ -443,12 +374,8 @@ TEST_F(TestTopologyChunkAllocator, // second time std::vector infos2; - ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos2); + ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos2); ASSERT_TRUE(ret); @@ -493,20 +420,16 @@ TEST_F(TestTopologyChunkAllocator, } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolNotFound) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolNotFound) { std::vector infos; - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_copysetEmpty) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_copysetEmpty) { std::vector infos; PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; @@ -514,18 +437,14 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddPhysicalPool(physicalPoolId); PrepareAddLogicalPool(logicalPoolId); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolIsDENY) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolIsDENY) { std::vector infos; PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; @@ -542,7 +461,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -553,27 +472,23 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddCopySet(0x54, logicalPoolId, replicas); PrepareAddCopySet(0x55, logicalPoolId, replicas); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::DENY, logicalPoolId); + topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::DENY, + logicalPoolId); EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos); ASSERT_FALSE(ret); } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, with 64 chunks allocated each + // time std::vector copySetIds; std::map copySetMap; for (int i = 0; i < 2000; i++) { @@ -584,12 +499,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { for (int i = 0; i < 100000; i++) { int chunkNumber = 64; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - 1, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( + copySetIds, 1, chunkNumber, &infos); ASSERT_TRUE(ret); ASSERT_EQ(chunkNumber, infos.size()); for (int j = 0; j < chunkNumber; j++) { @@ -598,7 +509,7 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { } int minCount = copySetMap[0]; int maxCount = copySetMap[0]; - for (auto &pair : copySetMap) { + for (auto& pair : copySetMap) { if (pair.second > maxCount) { maxCount = pair.second; } @@ -610,10 +521,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { double minPercent = static_cast(avg - minCount) / avg; double maxPercent = static_cast(maxCount - avg) / avg; LOG(INFO) << "AllocateChunkRandomInSingleLogicalPool poc" - <<", minCount = " << minCount - <<", maxCount = " << maxCount - << ", avg = " << avg - << ", minPercent = " << minPercent + << ", minCount = " << minCount << ", maxCount = " << maxCount + << ", avg = " << avg << ", minPercent = " << minPercent << ", maxPercent = " << maxPercent; ASSERT_TRUE(minPercent < 0.1); @@ -621,7 +530,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolTps) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, + // with 64 chunks allocated each time std::vector copySetIds; for (int i = 0; i < 2000; i++) { copySetIds.push_back(i); @@ -632,23 +542,19 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolTps) { int chunkNumber = 64; std::vector infos; AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - 1, - chunkNumber, - &infos); + copySetIds, 1, chunkNumber, &infos); } uint64_t stoptime = curve::common::TimeUtility::GetTimeofDayUs(); double usetime = stoptime - startime; - double tps = 1000000.0 * 100000.0/usetime; + double tps = 1000000.0 * 100000.0 / usetime; - std::cout << "TestAllocateChunkRandomInSingleLogicalPool, TPS = " - << tps + std::cout << "TestAllocateChunkRandomInSingleLogicalPool, TPS = " << tps << " * 64 chunk per second."; } TEST(TestAllocateChunkPolicy, - TestAllocateChunkRoundRobinInSingleLogicalPoolSuccess) { + TestAllocateChunkRoundRobinInSingleLogicalPoolSuccess) { std::vector copySetIds; std::map copySetMap; for (int i = 0; i < 20; i++) { @@ -657,13 +563,8 @@ TEST(TestAllocateChunkPolicy, uint32_t nextIndex = 15; int chunkNumber = 10; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - 1, - &nextIndex, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( + copySetIds, 1, &nextIndex, chunkNumber, &infos); ASSERT_TRUE(ret); ASSERT_EQ(5, nextIndex); ASSERT_EQ(chunkNumber, infos.size()); @@ -680,26 +581,20 @@ TEST(TestAllocateChunkPolicy, } TEST(TestAllocateChunkPolicy, - TestAllocateChunkRoundRobinInSingleLogicalPoolEmpty) { + TestAllocateChunkRoundRobinInSingleLogicalPoolEmpty) { std::vector copySetIds; std::map copySetMap; uint32_t nextIndex = 15; int chunkNumber = 10; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - 1, - &nextIndex, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( + copySetIds, 1, &nextIndex, chunkNumber, &infos); ASSERT_FALSE(ret); ASSERT_EQ(15, nextIndex); ASSERT_EQ(0, infos.size()); } -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolByWeightPoc) { +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolByWeightPoc) { std::map poolWeightMap; std::map poolMap; for (int i = 0; i < 5; i++) { @@ -709,8 +604,8 @@ TEST(TestAllocateChunkPolicy, for (int i = 0; i < 100000; i++) { PoolIdType pid; - AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - poolWeightMap, &pid); + AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight(poolWeightMap, + &pid); poolMap[pid]++; } @@ -719,7 +614,8 @@ TEST(TestAllocateChunkPolicy, ASSERT_TRUE(poolMap[1] < poolMap[2]); ASSERT_TRUE(poolMap[2] < poolMap[3]); ASSERT_TRUE(poolMap[3] < poolMap[4]); - // 5个池大概分布因该是0, 10000,20000,30000,40000 + // The approximate distribution of 5 pools should be + // 0, 10000, 20000, 30000, 40000 LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -727,8 +623,7 @@ TEST(TestAllocateChunkPolicy, << "pool4 : " << poolMap[4] << std::endl; } -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolByWeightPoc2) { +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolByWeightPoc2) { std::map poolMap; poolMap[0] = 100000; poolMap[1] = 90000; @@ -738,12 +633,11 @@ TEST(TestAllocateChunkPolicy, for (int i = 0; i < 100000; i++) { PoolIdType pid; - AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - poolMap, &pid); + AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight(poolMap, &pid); poolMap[pid] -= 1; } - // 测试是否能逐渐拉平pool之间差距 + // Test to see if it is possible to gradually equalize the gap between pools LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -751,9 +645,8 @@ TEST(TestAllocateChunkPolicy, << "pool4 : " << poolMap[4] << std::endl; } -// 测试能否随机到每个pool -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolRandom) { +// Test to see if random allocation to each pool is possible +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolRandom) { std::vector pools = {1, 2, 3, 4, 5}; std::map allocMap; allocMap[1] = 0; diff --git a/test/mds/topology/test_topology_metric.cpp b/test/mds/topology/test_topology_metric.cpp index 2a38263784..fd1112a4ec 100644 --- a/test/mds/topology/test_topology_metric.cpp +++ b/test/mds/topology/test_topology_metric.cpp @@ -21,24 +21,24 @@ */ #include -#include #include +#include #include "src/mds/topology/topology_metric.h" -#include "test/mds/topology/mock_topology.h" #include "test/mds/mock/mock_alloc_statistic.h" #include "test/mds/mock/mock_topology.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; using ::testing::_; using ::testing::AnyOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; class TestTopologyMetric : public ::testing::Test { public: @@ -48,10 +48,9 @@ class TestTopologyMetric : public ::testing::Test { idGenerator_ = std::make_shared(); tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); - // 使用真实的topology + // Using real topology topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); topologyStat_ = std::make_shared(); allocStatistic_ = std::make_shared(); @@ -76,122 +75,87 @@ class TestTopologyMetric : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(pid, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool") { - PhysicalPool pool(id, - name, - pid, - desc); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool") { + PhysicalPool pool(id, name, pid, desc); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); - ASSERT_EQ(kTopoErrCodeSuccess, ret) << - "should have PrepareAddPhysicalPool()"; + ASSERT_EQ(kTopoErrCodeSuccess, ret) + << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - const std::string &externalHostIp = "testExternalIp", - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - 0, - externalHostIp, - 0, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + const std::string& externalHostIp = "testExternalIp", + ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, 0, externalHostIp, 0, + zoneId, physicalPoolId, desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/") { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState st; - st.SetDiskCapacity(100 * 1024); - st.SetDiskUsed(10 * 1024); - cs.SetChunkServerState(st); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + const std::string& token = "testToken", + const std::string& diskType = "nvme", + ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", + uint32_t port = 0, + const std::string& diskPath = "/") { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState st; + st.SetDiskCapacity(100 * 1024); + st.SetDiskUsed(10 * 1024); + cs.SetChunkServerState(st); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -207,7 +171,7 @@ class TestTopologyMetric : public ::testing::Test { std::shared_ptr testObj_; }; -TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { +TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { PoolsetIdType poolsetId = 0x61; PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -229,14 +193,13 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { rap.pageFileRAP.replicaNum = 3; PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE, rap); + PAGEFILE, rap); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - ChunkServerStat stat1; CopysetStat cstat1; stat1.leaderCount = 1; @@ -258,12 +221,10 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { stat1.copysetStats.push_back(cstat1); EXPECT_CALL(*topologyStat_, GetChunkServerStat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), Return(true))); EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(20 * 1024), - Return(true))); + .WillOnce(DoAll(SetArgPointee<1>(20 * 1024), Return(true))); testObj_->UpdateTopologyMetrics(); @@ -283,9 +244,9 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x41]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x41]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x41]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x41]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x41]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x41]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(2, gChunkServerMetrics[0x42]->scatterWidth.get_value()); ASSERT_EQ(1, gChunkServerMetrics[0x42]->copysetNum.get_value()); @@ -301,9 +262,9 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x42]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x42]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x42]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x42]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x42]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x42]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(2, gChunkServerMetrics[0x43]->scatterWidth.get_value()); ASSERT_EQ(1, gChunkServerMetrics[0x43]->copysetNum.get_value()); @@ -319,43 +280,75 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x43]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x43]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x43]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x43]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x43]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x43]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(1, gLogicalPoolMetrics.size()); - ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->serverNum.get_value()); //NOLINT - ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->chunkServerNum.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNum.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthRange.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthMin.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthMax.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumRange.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumMin.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumMax.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumRange.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumMin.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumMax.get_value()); //NOLINT - ASSERT_EQ(100 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskCapacity.get_value()); //NOLINT - ASSERT_EQ(20 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskAlloc.get_value()); //NOLINT - ASSERT_EQ(10 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskUsed.get_value()); //NOLINT - - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 3, + gLogicalPoolMetrics[logicalPoolId]->serverNum.get_value()); // NOLINT + ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId] + ->chunkServerNum.get_value()); // NOLINT + ASSERT_EQ( + 1, + gLogicalPoolMetrics[logicalPoolId]->copysetNum.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthRange.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthMin.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthMax.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumRange.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumMin.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumMax.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumRange.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumMin.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumMax.get_value()); // NOLINT + ASSERT_EQ(100 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId] + ->diskCapacity.get_value()); // NOLINT + ASSERT_EQ( + 20 * 1024 * 3, + gLogicalPoolMetrics[logicalPoolId]->diskAlloc.get_value()); // NOLINT + ASSERT_EQ( + 10 * 1024 * 3, + gLogicalPoolMetrics[logicalPoolId]->diskUsed.get_value()); // NOLINT + + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeUsedBytes.get_value()); - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeLeftBytes.get_value()); - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeTrashedBytes.get_value()); - ASSERT_EQ(1024 * 9, + ASSERT_EQ( + 1024 * 9, gLogicalPoolMetrics[logicalPoolId]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->readIOPS.get_value()); @@ -372,7 +365,7 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1, gClusterMetrics->copysetNum.get_value()); } -TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { +TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -396,7 +389,6 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - ChunkServerStat stat1; CopysetStat cstat1; stat1.leaderCount = 1; @@ -414,8 +406,7 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { stat1.copysetStats.push_back(cstat1); EXPECT_CALL(*topologyStat_, GetChunkServerStat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), Return(true))); testObj_->UpdateTopologyMetrics(); diff --git a/test/snapshotcloneserver/test_curvefs_client.cpp b/test/snapshotcloneserver/test_curvefs_client.cpp index b4e79b17b0..fe3821011d 100644 --- a/test/snapshotcloneserver/test_curvefs_client.cpp +++ b/test/snapshotcloneserver/test_curvefs_client.cpp @@ -20,9 +20,8 @@ * Author: xuchaojie */ - -#include #include +#include #include "src/snapshotcloneserver/common/curvefs_client.h" #include "test/util/config_generator.h" @@ -40,19 +39,14 @@ class TestCurveFsClientImpl : public ::testing::Test { static void SetUpTestCase() { ClientConfigGenerator gentor(kClientConfigPath); - // 把超时时间和重试次数改小,已使得测试尽快完成 + // Reducing the timeout and retry times has enabled the testing to + // complete as soon as possible std::vector options = { - {"mds.listen.addr=127.0.0.1:8888", - "mds.registerToMDS=false", - "mds.rpcTimeoutMS=1", - "mds.maxRPCTimeoutMS=1", - "mds.maxRetryMS=1", - "mds.rpcRetryIntervalUS=1", - "metacache.getLeaderTimeOutMS=1", - "metacache.getLeaderRetry=1", - "metacache.rpcRetryIntervalUS=1", - "chunkserver.opRetryIntervalUS=1", - "chunkserver.opMaxRetry=1", + {"mds.listen.addr=127.0.0.1:8888", "mds.registerToMDS=false", + "mds.rpcTimeoutMS=1", "mds.maxRPCTimeoutMS=1", "mds.maxRetryMS=1", + "mds.rpcRetryIntervalUS=1", "metacache.getLeaderTimeOutMS=1", + "metacache.getLeaderRetry=1", "metacache.rpcRetryIntervalUS=1", + "chunkserver.opRetryIntervalUS=1", "chunkserver.opMaxRetry=1", "chunkserver.rpcTimeoutMS=1", "chunkserver.maxRetrySleepIntervalUS=1", "chunkserver.maxRPCTimeoutMS=1"}, @@ -64,8 +58,7 @@ class TestCurveFsClientImpl : public ::testing::Test { virtual void SetUp() { std::shared_ptr snapClient = std::make_shared(); - std::shared_ptr fileClient = - std::make_shared(); + std::shared_ptr fileClient = std::make_shared(); client_ = std::make_shared(snapClient, fileClient); clientOption_.configPath = kClientConfigPath; clientOption_.mdsRootUser = "root"; @@ -75,9 +68,7 @@ class TestCurveFsClientImpl : public ::testing::Test { client_->Init(clientOption_); } - virtual void TearDown() { - client_->UnInit(); - } + virtual void TearDown() { client_->UnInit(); } protected: std::shared_ptr client_; @@ -85,9 +76,7 @@ class TestCurveFsClientImpl : public ::testing::Test { }; struct TestClosure : public SnapCloneClosure { - void Run() { - std::unique_ptr selfGuard(this); - } + void Run() { std::unique_ptr selfGuard(this); } }; TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { @@ -111,35 +100,35 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { SegmentInfo segInfo; ret = client_->GetSnapshotSegmentInfo("file1", "user1", 1, 0, &segInfo); ASSERT_LT(ret, 0); - ret = client_->GetSnapshotSegmentInfo( - "file1", clientOption_.mdsRootUser, 1, 0, &segInfo); + ret = client_->GetSnapshotSegmentInfo("file1", clientOption_.mdsRootUser, 1, + 0, &segInfo); ASSERT_LT(ret, 0); ChunkIDInfo cidinfo; FileStatus fstatus; ret = client_->CheckSnapShotStatus("file1", "user1", 1, &fstatus); ASSERT_LT(ret, 0); - ret = client_->CheckSnapShotStatus( - "file1", clientOption_.mdsRootUser, 1, &fstatus); + ret = client_->CheckSnapShotStatus("file1", clientOption_.mdsRootUser, 1, + &fstatus); ASSERT_LT(ret, 0); ChunkInfoDetail chunkInfo; ret = client_->GetChunkInfo(cidinfo, &chunkInfo); ASSERT_LT(ret, 0); - ret = client_->CreateCloneFile( - "source1", "file1", "user1", 1024, 1, 1024, 0, 0, "default", &fInfo); + ret = client_->CreateCloneFile("source1", "file1", "user1", 1024, 1, 1024, + 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); - ret = client_->CreateCloneFile( - "source1", "file1", clientOption_.mdsRootUser, 1024, 1, 1024, - 0, 0, "default", &fInfo); + ret = + client_->CreateCloneFile("source1", "file1", clientOption_.mdsRootUser, + 1024, 1, 1024, 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); - TestClosure *cb = new TestClosure(); + TestClosure* cb = new TestClosure(); ret = client_->CreateCloneChunk("", cidinfo, 1, 2, 1024, cb); ASSERT_EQ(ret, 0); - TestClosure *cb2 = new TestClosure(); + TestClosure* cb2 = new TestClosure(); ret = client_->RecoverChunk(cidinfo, 0, 1024, cb2); ASSERT_EQ(ret, 0); @@ -159,7 +148,10 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ret = client_->GetFileInfo("file1", clientOption_.mdsRootUser, &fInfo); ASSERT_LT(ret, 0); - // client 对mds接口无限重试,这两个接口死循环,先注释掉 + // // The client is indefinitely retrying the MDS interface, causing + // these two interfaces to enter into an infinite loop. Commenting + // them out for now. + // ret = client_->GetOrAllocateSegmentInfo( // true, 0, &fInfo, "user1", &segInfo); // ASSERT_LT(ret, 0); @@ -169,8 +161,8 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ret = client_->RenameCloneFile("user1", 1, 2, "file1", "file2"); ASSERT_LT(ret, 0); - ret = client_->RenameCloneFile( - clientOption_.mdsRootUser, 1, 2, "file1", "file2"); + ret = client_->RenameCloneFile(clientOption_.mdsRootUser, 1, 2, "file1", + "file2"); ASSERT_LT(ret, 0); ret = client_->DeleteFile("file1", "user1", 1); @@ -187,7 +179,5 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ASSERT_LT(ret, 0); } - - } // namespace snapshotcloneserver } // namespace curve diff --git a/test/snapshotcloneserver/test_snapshot_service_manager.cpp b/test/snapshotcloneserver/test_snapshot_service_manager.cpp index ba51d90f98..0af03c9315 100644 --- a/test/snapshotcloneserver/test_snapshot_service_manager.cpp +++ b/test/snapshotcloneserver/test_snapshot_service_manager.cpp @@ -20,25 +20,24 @@ * Author: xuchaojie */ -#include #include +#include -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" - -#include "test/snapshotcloneserver/mock_snapshot_server.h" #include "src/common/concurrent/count_down_event.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/snapshotcloneserver/mock_snapshot_server.h" using curve::common::CountDownEvent; -using ::testing::Return; using ::testing::_; -using ::testing::AnyOf; using ::testing::AllOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; +using ::testing::AnyOf; using ::testing::DoAll; +using ::testing::Invoke; using ::testing::Property; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace snapshotcloneserver { @@ -51,21 +50,16 @@ class TestSnapshotServiceManager : public ::testing::Test { virtual void SetUp() { serverOption_.snapshotPoolThreadNum = 8; serverOption_.snapshotTaskManagerScanIntervalMs = 100; - core_ = - std::make_shared(); - auto metaStore_ = - std::shared_ptr(); + core_ = std::make_shared(); + auto metaStore_ = std::shared_ptr(); snapshotMetric_ = std::make_shared(metaStore_); - std::shared_ptr - taskMgr_ = + std::shared_ptr taskMgr_ = std::make_shared(core_, snapshotMetric_); manager_ = std::make_shared(taskMgr_, core_); - ASSERT_EQ(0, manager_->Init(serverOption_)) - << "manager init fail."; - ASSERT_EQ(0, manager_->Start()) - << "manager start fail."; + ASSERT_EQ(0, manager_->Init(serverOption_)) << "manager init fail."; + ASSERT_EQ(0, manager_->Start()) << "manager start fail."; } virtual void TearDown() { @@ -75,31 +69,22 @@ class TestSnapshotServiceManager : public ::testing::Test { snapshotMetric_ = nullptr; } - void PrepareCreateSnapshot( - const std::string &file, - const std::string &user, - const std::string &desc, - UUID uuid) { + void PrepareCreateSnapshot(const std::string& file, const std::string& user, + const std::string& desc, UUID uuid) { SnapshotInfo info(uuid, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(Invoke([&cond1](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); @@ -112,8 +97,7 @@ class TestSnapshotServiceManager : public ::testing::Test { SnapshotCloneServerOptions serverOption_; }; -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotSuccess) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotSuccess) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -122,32 +106,25 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -155,8 +132,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotPreFail) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotPreFail) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -165,21 +141,13 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeInternalError))); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeInternalError, ret); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotSuccessByTaskExist) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotSuccessByTaskExist) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -188,20 +156,13 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeTaskExist))); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeTaskExist))); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotPushTaskFail) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotPushTaskFail) { const std::string file1 = "file1"; const std::string user1 = "user1"; const std::string desc1 = "snap1"; @@ -209,33 +170,21 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuid1, user1, file1, desc1); EXPECT_CALL(*core_, CreateSnapshotPre(file1, user1, desc1, _)) - .WillRepeatedly(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillRepeatedly(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([] (std::shared_ptr task) { - })); + .WillOnce(Invoke([](std::shared_ptr task) {})); UUID uuid; - int ret = manager_->CreateSnapshot( - file1, - user1, - desc1, - &uuid); + int ret = manager_->CreateSnapshot(file1, user1, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); UUID uuid2; - ret = manager_->CreateSnapshot( - file1, - user1, - desc1, - &uuid2); + ret = manager_->CreateSnapshot(file1, user1, desc1, &uuid2); ASSERT_EQ(kErrCodeInternalError, ret); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(1, snapshotMetric_->snapshotDoing.get_value()); @@ -243,8 +192,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotMultiThreadSuccess) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotMultiThreadSuccess) { const std::string file1 = "file1"; const std::string file2 = "file2"; const std::string file3 = "file3"; @@ -264,15 +212,9 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, CreateSnapshotPre(_, _, _, _)) .Times(3) - .WillOnce(DoAll( - SetArgPointee<3>(info1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info3), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info3), Return(kErrCodeSuccess))); std::condition_variable cv; std::mutex m; @@ -281,43 +223,28 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) .Times(3) - .WillRepeatedly(Invoke([&cv, &m, &count] ( - std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - std::unique_lock lk(m); - count++; - task->Finish(); - cv.notify_all(); - })); - - - int ret = manager_->CreateSnapshot( - file1, - user, - desc1, - &uuid); + .WillRepeatedly( + Invoke([&cv, &m, &count](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + std::unique_lock lk(m); + count++; + task->Finish(); + cv.notify_all(); + })); + + int ret = manager_->CreateSnapshot(file1, user, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file2, - user, - desc2, - &uuid); + ret = manager_->CreateSnapshot(file2, user, desc2, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file3, - user, - desc3, - &uuid); + ret = manager_->CreateSnapshot(file3, user, desc3, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - cv.wait(lk, [&count](){return count == 3;}); + cv.wait(lk, [&count]() { return count == 3; }); - - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); ASSERT_EQ(3, snapshotMetric_->snapshotSucceed.get_value()); @@ -325,7 +252,7 @@ TEST_F(TestSnapshotServiceManager, } TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotMultiThreadSameFileSuccess) { + TestCreateSnapshotMultiThreadSameFileSuccess) { const std::string file1 = "file1"; const std::string user = "user1"; const std::string desc1 = "snap1"; @@ -343,52 +270,32 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, CreateSnapshotPre(_, _, _, _)) .Times(3) - .WillOnce(DoAll( - SetArgPointee<3>(info1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info3), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info3), Return(kErrCodeSuccess))); CountDownEvent cond1(3); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) .Times(3) - .WillRepeatedly(Invoke([&cond1] ( - std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - - int ret = manager_->CreateSnapshot( - file1, - user, - desc1, - &uuid); + .WillRepeatedly( + Invoke([&cond1](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file1, user, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file1, - user, - desc2, - &uuid); + ret = manager_->CreateSnapshot(file1, user, desc2, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file1, - user, - desc3, - &uuid); + ret = manager_->CreateSnapshot(file1, user, desc3, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -408,19 +315,18 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -428,7 +334,7 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -// 删除转cancel用例 +// Delete to cancel use case TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelSuccess) { const std::string file = "file1"; const std::string user = "user1"; @@ -438,30 +344,23 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] ( - std::shared_ptr task) { - LOG(INFO) << "in HandleCreateSnapshotTask"; - while (1) { - if (task->IsCanceled()) { - break; - } - } - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(Invoke([&cond1](std::shared_ptr task) { + LOG(INFO) << "in HandleCreateSnapshotTask"; + while (1) { + if (task->IsCanceled()) { + break; + } + } + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); @@ -496,19 +395,18 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelByDeleteSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -516,8 +414,6 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelByDeleteSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } - - TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPreFail) { const std::string file = "file1"; const std::string user = "user1"; @@ -543,10 +439,10 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPushTaskFail) { .WillRepeatedly(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - cond1.Signal(); - })); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); @@ -555,9 +451,8 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPushTaskFail) { ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeInternalError, ret); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(1, snapshotMetric_->snapshotDoing.get_value()); @@ -579,19 +474,18 @@ TEST_F(TestSnapshotServiceManager, TestCreateAndDeleteSnapshotSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -599,7 +493,6 @@ TEST_F(TestSnapshotServiceManager, TestCreateAndDeleteSnapshotSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } - TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { const std::string file = "file1"; const std::string user = "user1"; @@ -610,29 +503,22 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke( - [&cond1, progress] (std::shared_ptr task) { - task->SetProgress(progress); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, progress](std::shared_ptr task) { + task->SetProgress(progress); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - const std::string file2 = "file2"; const std::string desc2 = "snap2"; UUID uuid2 = "uuid2"; @@ -659,8 +545,7 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { snapInfo.push_back(snap4); EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillOnce(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeSuccess))); std::vector fileSnapInfo; ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -688,8 +573,7 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } } @@ -702,8 +586,8 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoFail) { std::vector snapInfo; EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillOnce(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeInternalError))); + .WillOnce( + DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeInternalError))); std::vector fileSnapInfo; int ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -722,8 +606,8 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoFail2) { snapInfo.push_back(snap1); EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeSuccess))); std::vector fileSnapInfo; int ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -740,29 +624,22 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke( - [&cond1, progress] (std::shared_ptr task) { - task->SetProgress(progress); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, progress](std::shared_ptr task) { + task->SetProgress(progress); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - const std::string file2 = "file2"; const std::string desc2 = "snap2"; UUID uuid2 = "uuid2"; @@ -789,8 +666,7 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { snapInfo.push_back(snap4); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // empty filter SnapshotFilterCondition filter; @@ -826,14 +702,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter uuid SnapshotFilterCondition filter2; @@ -852,14 +726,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::pending, s.GetStatus()); ASSERT_EQ(progress, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by filename SnapshotFilterCondition filter3; @@ -890,14 +762,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by status SnapshotFilterCondition filter4; @@ -923,14 +793,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::done, s.GetStatus()); ASSERT_EQ(100, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by user SnapshotFilterCondition filter5; @@ -949,8 +817,7 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::done, s.GetStatus()); ASSERT_EQ(100, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } } @@ -964,8 +831,8 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterFail) { std::vector snapInfo; EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeInternalError))); + .WillOnce( + DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeInternalError))); SnapshotFilterCondition filter; std::vector fileSnapInfo; @@ -993,32 +860,30 @@ TEST_F(TestSnapshotServiceManager, TestRecoverSnapshotTaskSuccess) { list.push_back(snap2); list.push_back(snap3); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(list), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(list), Return(kErrCodeSuccess))); CountDownEvent cond1(2); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->RecoverSnapshotTask(); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -1041,15 +906,13 @@ TEST_F(TestSnapshotServiceManager, TestRecoverSnapshotTaskFail) { list.push_back(snap1); list.push_back(snap2); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(list), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<0>(list), Return(kErrCodeInternalError))); int ret = manager_->RecoverSnapshotTask(); ASSERT_EQ(kErrCodeInternalError, ret); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotSuccess) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotSuccess) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1062,31 +925,27 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info2(uuidOut2, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - LOG(INFO) << "in mock HandleCreateSnapshotTask"; - while (1) { - if (task->IsCanceled()) { - cond1.Signal(); - break; - } - } - task->Finish(); - cond2.Signal(); - })); - - // 取消排队的快照会调一次 + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + LOG(INFO) << "in mock HandleCreateSnapshotTask"; + while (1) { + if (task->IsCanceled()) { + cond1.Signal(); + break; + } + } + task->Finish(); + cond2.Signal(); + })); + + // Unqueued snapshots will be called once EXPECT_CALL(*core_, HandleCancelUnSchduledSnapshotTask(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -1099,32 +958,20 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, HandleCancelScheduledSnapshotTask(_)) .WillOnce(Invoke(callback)); - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); - // 再打一个快照,覆盖排队的情况 - ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid2); + // Take another snapshot to cover the queuing situation + ret = manager_->CreateSnapshot(file, user, desc, &uuid2); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid2, uuidOut2); - // 先取消在排队的快照 - ret = manager_->CancelSnapshot(uuidOut2, - user, - file); + // Cancel queued snapshots first + ret = manager_->CancelSnapshot(uuidOut2, user, file); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CancelSnapshot(uuidOut, - user, - file); + ret = manager_->CancelSnapshot(uuidOut, user, file); ASSERT_EQ(kErrCodeSuccess, ret); @@ -1132,8 +979,7 @@ TEST_F(TestSnapshotServiceManager, cond2.Wait(); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotFailDiffUser) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotFailDiffUser) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1142,41 +988,32 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - cond2.Wait(); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + cond2.Wait(); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); std::string user2 = "user2"; - ret = manager_->CancelSnapshot(uuidOut, - user2, - file); + ret = manager_->CancelSnapshot(uuidOut, user2, file); cond2.Signal(); ASSERT_EQ(kErrCodeInvalidUser, ret); cond1.Wait(); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotFailDiffFile) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotFailDiffFile) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1185,40 +1022,30 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - cond2.Wait(); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + cond2.Wait(); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); std::string file2 = "file2"; - ret = manager_->CancelSnapshot(uuidOut, - user, - file2); + ret = manager_->CancelSnapshot(uuidOut, user, file2); cond2.Signal(); ASSERT_EQ(kErrCodeFileNameNotMatch, ret); cond1.Wait(); } - } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/tools/chunkserver_client_test.cpp b/test/tools/chunkserver_client_test.cpp index 9af94d01d3..b88d1fab08 100644 --- a/test/tools/chunkserver_client_test.cpp +++ b/test/tools/chunkserver_client_test.cpp @@ -20,14 +20,15 @@ * Author: charisu */ -#include #include "src/tools/chunkserver_client.h" -#include "test/client/fake/mockMDS.h" + +#include + #include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" -using curve::chunkserver::GetChunkInfoResponse; using curve::chunkserver::CHUNK_OP_STATUS; - +using curve::chunkserver::GetChunkInfoResponse; DECLARE_string(chunkserver_list); namespace brpc { @@ -46,9 +47,7 @@ class ChunkServerClientTest : public ::testing::Test { fakemds.Initialize(); fakemds.CreateFakeChunkservers(false); } - void TearDown() { - fakemds.UnInitialize(); - } + void TearDown() { fakemds.UnInitialize(); } ChunkServerClient client; FakeMDS fakemds; }; @@ -59,37 +58,36 @@ TEST_F(ChunkServerClientTest, Init) { } TEST_F(ChunkServerClientTest, GetRaftStatus) { - std::vector statServices = - fakemds.GetRaftStateService(); - // 正常情况 + std::vector statServices = + fakemds.GetRaftStateService(); + // Normal situation butil::IOBuf iobuf; iobuf.append("test"); statServices[0]->SetBuf(iobuf); ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetRaftStatus(&iobuf)); - // 传入空指针 + // Incoming null pointer ASSERT_EQ(-1, client.GetRaftStatus(nullptr)); - // RPC失败的情况 + // The situation of RPC failure statServices[0]->SetFailed(true); ASSERT_EQ(-1, client.GetRaftStatus(&iobuf)); } TEST_F(ChunkServerClientTest, CheckChunkServerOnline) { - std::vector chunkServices = fakemds.GetChunkservice(); + std::vector chunkServices = fakemds.GetChunkservice(); brpc::Controller cntl; - std::unique_ptr response( - new GetChunkInfoResponse()); + std::unique_ptr response(new GetChunkInfoResponse()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); std::unique_ptr fakeret( new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkInfo(fakeret.get()); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(true, client.CheckChunkServerOnline()); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(false, client.CheckChunkServerOnline()); } @@ -98,23 +96,23 @@ TEST_F(ChunkServerClientTest, GetCopysetStatus2) { auto copysetServices = fakemds.GetCreateCopysetService(); CopysetStatusRequest request; CopysetStatusResponse response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address("127.0.0.1:9191"); request.set_logicpoolid(1); request.set_copysetid(1001); request.set_allocated_peer(peer); request.set_queryhash(true); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetCopysetStatus(request, &response)); - // 返回码不ok的情况 + // The situation where the return code is not OK copysetServices[0]->SetStatus( COPYSET_OP_STATUS::COPYSET_OP_STATUS_COPYSET_NOTEXIST); ASSERT_EQ(-1, client.GetCopysetStatus(request, &response)); - // RPC失败的情况 + // The situation of RPC failure brpc::Controller cntl; std::unique_ptr fakeret(new FakeReturn(&cntl, nullptr)); copysetServices[0]->SetFakeReturn(fakeret.get()); @@ -122,27 +120,26 @@ TEST_F(ChunkServerClientTest, GetCopysetStatus2) { } TEST_F(ChunkServerClientTest, GetChunkHash) { - std::vector chunkServices = fakemds.GetChunkservice(); + std::vector chunkServices = fakemds.GetChunkservice(); brpc::Controller cntl; - std::unique_ptr response( - new GetChunkHashResponse()); + std::unique_ptr response(new GetChunkHashResponse()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response->set_hash("1234"); std::unique_ptr fakeret( new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkHash(fakeret.get()); Chunk chunk(1, 100, 1001); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); std::string hash; ASSERT_EQ(0, client.GetChunkHash(chunk, &hash)); ASSERT_EQ("1234", hash); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); - // 返回码不为ok + // The return code is not OK response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); } diff --git a/test/tools/version_tool_test.cpp b/test/tools/version_tool_test.cpp index 64581f73ac..db40892f40 100644 --- a/test/tools/version_tool_test.cpp +++ b/test/tools/version_tool_test.cpp @@ -21,21 +21,23 @@ * Copyright (c) 2018 netease */ -#include #include "src/tools/version_tool.h" + +#include + #include "test/tools/mock/mock_mds_client.h" #include "test/tools/mock/mock_metric_client.h" #include "test/tools/mock/mock_snapshot_clone_client.h" +using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::DiskState; +using curve::mds::topology::OnlineState; using ::testing::_; +using ::testing::An; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnRef; -using ::testing::DoAll; using ::testing::SetArgPointee; -using ::testing::An; -using curve::mds::topology::ChunkServerStatus; -using curve::mds::topology::DiskState; -using curve::mds::topology::OnlineState; namespace curve { namespace tool { @@ -53,8 +55,8 @@ class VersionToolTest : public ::testing::Test { metricClient_ = nullptr; } - void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo *csInfo, - uint64_t csId) { + void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo* csInfo, + uint64_t csId) { csInfo->set_chunkserverid(csId); csInfo->set_disktype("ssd"); csInfo->set_hostip("127.0.0.1"); @@ -73,64 +75,61 @@ class VersionToolTest : public ::testing::Test { TEST_F(VersionToolTest, GetAndCheckMdsVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::map dummyServerMap = - {{"127.0.0.1:6666", "127.0.0.1:6667"}, - {"127.0.0.1:6668", "127.0.0.1:6669"}, - {"127.0.0.1:6670", "127.0.0.1:6671"}}; + std::map dummyServerMap = { + {"127.0.0.1:6666", "127.0.0.1:6667"}, + {"127.0.0.1:6668", "127.0.0.1:6669"}, + {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; ASSERT_EQ(0, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分mds curve_version失败 + // 2. Obtain partial mds curve_version failed EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); ASSERT_EQ(0, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap2)); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -151,123 +150,112 @@ TEST_F(VersionToolTest, GetChunkServerVersion) { chunkservers.emplace_back(csInfo); } - // 1、正常情况 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 1. Normal situation + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、ListChunkServersInCluster失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 2. ListChunkServersInCluster failed + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) .WillOnce(Return(-1)); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); - // 3、获取metric失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 3. Failed to obtain metric + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); std::vector expectList = {"127.0.0.1:9191"}; ASSERT_EQ(expectList, failedList); - // 4、chunkserverList为空 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 4. chunkserverList is empty + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(std::vector()), - Return(0))); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillOnce( + DoAll(SetArgPointee<0>(std::vector()), Return(0))); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、version不一致 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 5. version inconsistency + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 6、老版本 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 6. Old version + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) .WillRepeatedly(Return(MetricRet::kNotFound)); - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_EQ("before-0.0.5.2", version); ASSERT_TRUE(failedList.empty()); } TEST_F(VersionToolTest, GetClientVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::vector clientAddrs = - {"127.0.0.1:8000", "127.0.0.1:8001", "127.0.0.1:8002", - "127.0.0.1:8003", "127.0.0.1:8004", "127.0.0.1:8005"}; + std::vector clientAddrs = {"127.0.0.1:8000", "127.0.0.1:8001", + "127.0.0.1:8002", "127.0.0.1:8003", + "127.0.0.1:8004", "127.0.0.1:8005"}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, kProcessCmdLineMetricName, _)) .Times(6) .WillOnce(Return(MetricRet::kOtherErr)) - .WillOnce(DoAll(SetArgPointee<2>(kProcessQemu), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>(kProcessPython), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>(kProcessOther), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>(kProcessQemu), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>(kProcessPython), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>(kProcessOther), Return(MetricRet::kOK))) .WillRepeatedly(DoAll(SetArgPointee<2>(kProcessNebdServer), - Return(MetricRet::kOK))); + Return(MetricRet::kOK))); EXPECT_CALL(*metricClient_, GetMetric(_, kCurveVersionMetricName, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.3"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.3"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)) .WillOnce(Return(MetricRet::kNotFound)) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), - Return(MetricRet::kOK))); + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), Return(MetricRet::kOK))); ClientVersionMapType clientVersionMap; ClientVersionMapType expected; VersionMapType versionMap = {{"0.0.5.2", {"127.0.0.1:8004"}}, @@ -282,85 +270,80 @@ TEST_F(VersionToolTest, GetClientVersion) { ASSERT_EQ(0, versionTool.GetClientVersion(&clientVersionMap)); ASSERT_EQ(expected, clientVersionMap); - // 2、ListClient失败 - EXPECT_CALL(*mdsClient_, ListClient(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. ListClient failed + EXPECT_CALL(*mdsClient_, ListClient(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, versionTool.GetClientVersion(&clientVersionMap)); } TEST_F(VersionToolTest, GetAndCheckSnapshotCloneVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::map dummyServerMap = - {{"127.0.0.1:6666", "127.0.0.1:6667"}, - {"127.0.0.1:6668", "127.0.0.1:6669"}, - {"127.0.0.1:6670", "127.0.0.1:6671"}}; + std::map dummyServerMap = { + {"127.0.0.1:6666", "127.0.0.1:6667"}, + {"127.0.0.1:6668", "127.0.0.1:6669"}, + {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分curve_version失败 + // 2. Obtain partial curve_version failed EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap2)); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); - ASSERT_EQ(-1, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); + ASSERT_EQ( + -1, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); - ASSERT_EQ(-1, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + -1, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillRepeatedly(Return(MetricRet::kNotFound)); - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("before-0.0.5.2", version); ASSERT_TRUE(failedList.empty()); }