Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

translate ch-comments to en-comments #2927

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
145 changes: 73 additions & 72 deletions conf/chunkserver.conf.example
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
#
# Global settings
#
# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3
# Log levels: INFO=0/WARNING=1/ERROR=2/FATAL=3
global.ip=127.0.0.1
global.port=8200
global.subnet=127.0.0.0/24
global.enable_external_server=false
global.external_ip=127.0.0.1
global.external_subnet=127.0.0.0/24
# chunk大小,一般16MB
# Chunk size, typically 16MB
global.chunk_size=16777216
# chunk 元数据页大小,一般4KB
# Chunk metadata page size, typically 4KB
global.meta_page_size=4096
# clone chunk允许的最长location长度
# Maximum length allowed for the location of a clone chunk
# chunk's block size, IO requests must align with it, supported value is |512| and |4096|
# it should consist with `block_size` in chunkfilepool.meta_path and `mds.volume.blockSize` in MDS's configurations
# for clone chunk and snapshot chunk, it's also the minimum granularity that each bit represents
Expand All @@ -23,34 +23,35 @@ global.location_limit=3000
#
# MDS settings
#
#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777
# Support for multiple addresses for MDS, separated by commas: 127.0.0.1:6666,127.0.0.1:7777
mds.listen.addr=127.0.0.1:6666
# 向mds注册的最大重试次数
# Maximum retry count for registering with MDS
mds.register_retries=100
# 向mds注册的rpc超时时间,一般1000ms
# RPC timeout for MDS registration, typically 1000ms
mds.register_timeout=1000
# 向mds发送心跳的间隔,一般10s
# Interval for sending heartbeats to MDS, usually 10s
mds.heartbeat_interval=10
# 向mds发送心跳的rpc超时间,一般1000ms
# RPC timeout for sending heartbeats to MDS, typically 1000ms
mds.heartbeat_timeout=5000

#
# Chunkserver settings
#
# chunkserver主目录
# Main directory for chunkserver
chunkserver.stor_uri=local://./0/
# chunkserver元数据文件
# Metadata file for chunkserver
chunkserver.meta_uri=local://./0/chunkserver.dat
# disk类型
# Disk type
chunkserver.disk_type=nvme
# raft内部install snapshot带宽上限,一般20MB
# Raft internal install snapshot bandwidth limit, usually 20MB
chunkserver.snapshot_throttle_throughput_bytes=20971520
# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB,
# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个
# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而
# 不是20MB的带宽
# Throttle check cycles are for finer-grained bandwidth control. For example,
# with snapshotThroughputBytes=100MB and check cycles=10, it ensures that
# the bandwidth is 10MB every 1/10 second, without accumulation. For instance,
# the bandwidth is 10MB for the first 1/10 second, but it expires after that.
# In the second 1/10 second, the bandwidth remains 10MB, not 20MB.
chunkserver.snapshot_throttle_check_cycles=4
# 限制inflight io数量,一般是5000
# Limit for the number of inflight IO requests, usually 5000
chunkserver.max_inflight_requests=5000

#
Expand All @@ -64,41 +65,41 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0
#
# Copyset settings
#
# 是否检查任期,一般检查
# Whether to check the term, usually checked
copyset.check_term=true
# 是否关闭raft配置变更的服务,一般不关闭
# Whether to disable the service for raft configuration changes, generally not disabled
copyset.disable_cli=false
copyset.log_applied_task=false
# raft选举超时时间,一般是5000ms
# Raft election timeout, usually 5000ms
copyset.election_timeout_ms=1000
# raft打快照间隔,一般是1800s,也就是30分钟
# Raft snapshot interval, usually 1800s, i.e., 30 minutes
copyset.snapshot_interval_s=1800
# add一个节点,add的节点首先以类似learner的角色拷贝数据
# 在跟leader差距catchup_margin个entry的时候,leader
# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定
# 会commit&apply,catchup_margin较小可以大概率确保learner
# 后续很快可以加入复制组
# When adding a node, the added node first copies data in a role similar to a learner.
# When there is a difference of catchup_margin entries from the leader, the leader
# will attempt to commit and apply the configuration change entry (usually the committed
# entry will certainly be committed and applied). A smaller catchup_margin can ensure
# that the learner can quickly join the replication group.
copyset.catchup_margin=1000
# copyset chunk数据目录
# Copyset chunk data directory
copyset.chunk_data_uri=local://./0/copysets
# raft wal log目录
# Raft WAL log directory
copyset.raft_log_uri=curve://./0/copysets
# raft元数据目录
# Raft metadata directory
copyset.raft_meta_uri=local://./0/copysets
# raft snapshot目录
# Raft snapshot directory
copyset.raft_snapshot_uri=curve://./0/copysets
# copyset回收目录
# Copyset recycling directory
copyset.recycler_uri=local://./0/recycler
# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制
# When the chunk server starts, the threshold for concurrent loading of copysets, set to 0 to indicate no limitation.
copyset.load_concurrency=10
# chunkserver use how many threads to use copyset complete sync.
# Number of threads used by chunk server for copyset complete synchronization.
copyset.sync_concurrency=20
# 检查copyset是否加载完成出现异常时的最大重试次数
# Maximum retry times when checking for exceptions during copyset loading.
copyset.check_retrytimes=3
# 当前peer的applied_index与leader上的committed_index差距小于该值
# 则判定copyset已经加载完成
# If the difference between the applied_index of the current peer and the committed_index
# on the leader is less than this value, the copyset is considered loaded.
copyset.finishload_margin=2000
# 循环判定copyset是否加载完成的内部睡眠时间
# Internal sleep time for cyclically determining if the copyset is loaded.
copyset.check_loadmargin_interval_ms=1000
# scan copyset interval
copyset.scan_interval_sec=5
Expand All @@ -124,34 +125,34 @@ copyset.check_syncing_interval_ms=500
#
# Clone settings
#
# 禁止使用curveclient
# Prohibit the use of curveclient
clone.disable_curve_client=false
# 禁止使用s3adapter
# Prohibit the use of s3adapter
clone.disable_s3_adapter=false
# 克隆的分片大小,一般1MB
# The shard size of the clone, usually 1MB
clone.slice_size=1048576
# 读clone chunk时是否需要paste到本地
# 该配置对recover chunk请求类型无效
# Do I need to paste to the local location when reading the clone chunk
# This configuration is not valid for the recover chunk request type
clone.enable_paste=false
# 克隆的线程数量
# Number of cloned threads
clone.thread_num=10
# 克隆的队列深度
# Queue depth for cloning
clone.queue_depth=6000
# curve用户名
# Curve username
curve.root_username=root
# curve密码
# Curve password
curve.root_password=root_password
# client配置文件
# Client configuration file
curve.config_path=conf/cs_client.conf
# s3配置文件
# S3 configuration file
s3.config_path=conf/s3.conf
# Curve File time to live
curve.curve_file_timeout_s=30

#
# Local FileSystem settings
#
# 是否开启使用renameat2,ext4内核3.15以后开始支持
# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards
fs.enable_renameat2=true

#
Expand All @@ -171,27 +172,27 @@ storeng.sync_write=false

#
# Concurrent apply module
# 并发模块写线程的并发度,一般是10
# The concurrency of concurrent module writing threads is generally 10
wconcurrentapply.size=10
# 并发模块写线程的队列深度
# Queue depth of concurrent module write threads
wconcurrentapply.queuedepth=1
# 并发模块读线程的并发度,一般是5
# The concurrency of concurrent module read threads is generally 5
rconcurrentapply.size=5
# 并发模块读线程的队列深度
# Queue depth of concurrent module read threads
rconcurrentapply.queuedepth=1

#
# Chunkfile pool
#
# 是否开启从chunkfilepool获取chunk,一般是true
# Whether to enable obtaining chunks from chunkfilepool, usually true
chunkfilepool.enable_get_chunk_from_pool=true
# chunkfilepool目录
# chunkfilepool directory
chunkfilepool.chunk_file_pool_dir=./0/chunks
# chunkfilepool meta文件路径
# chunkfilepool meta file path
#chunkfilepool.meta_path=./chunkfilepool.meta
# chunkfilepool meta文件大小
# chunkfilepool meta file size
chunkfilepool.cpmeta_file_size=4096
# chunkfilepool get chunk最大重试次数
# chunkfilepool get chunk maximum retry count
chunkfilepool.retry_times=5
# Enable clean chunk
chunkfilepool.clean.enable=true
Expand All @@ -211,23 +212,23 @@ chunkfilepool.thread_num=1
#
# WAL file pool
#
# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效
# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry
walfilepool.use_chunk_file_pool=true
# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间
# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation
walfilepool.use_chunk_file_pool_reserve=15
# 是否开启从walfilepool获取chunk,一般是true
# Whether to enable obtaining chunks from walfilepool, usually true
walfilepool.enable_get_segment_from_pool=true
# walpool目录
# Walpool directory
walfilepool.file_pool_dir=./0/
# walpool meta文件路径
# Walpool Meta File Path
walfilepool.meta_path=./walfilepool.meta
# walpool meta文件大小
# Walpool Meta File Size
walfilepool.segment_size=8388608
# WAL metapage大小
# WAL metapage size
walfilepool.metapage_size=4096
# WAL filepool 元数据文件大小
# WAL filepool metadata file size
walfilepool.meta_file_size=4096
# WAL filepool get chunk最大重试次数
# WAL filepool get chunk maximum retry count
walfilepool.retry_times=5
# Whether allocate filePool by percent of disk size.
walfilepool.allocated_by_percent=true
Expand All @@ -241,14 +242,14 @@ walfilepool.thread_num=1
#
# trash settings
#
# chunkserver回收数据彻底删除的过期时间
# The expiration time for chunkserver to completely delete data for recycling
trash.expire_afterSec=300
# chunkserver检查回收数据过期时间的周期
# Chunkserver checks the cycle of recycling data expiration time
trash.scan_periodSec=120

# common option
#
# chunkserver 日志存放文件夹
# Chunkserver log storage folder
chunkserver.common.logDir=./
# 单元测试情况下
# In the case of unit testing
# chunkserver.common.logDir=./runlog/
7 changes: 3 additions & 4 deletions curvefs/test/volume/bitmap_allocator_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,8 @@

#include <gtest/gtest.h>

#include "curvefs/test/volume/common.h"

#include "absl/memory/memory.h"
#include "curvefs/test/volume/common.h"

namespace curvefs {
namespace volume {
Expand Down Expand Up @@ -100,7 +99,7 @@ TEST_F(BitmapAllocatorTest, AllocFromBitmap) {

Extents expected = {
Extent(opt_.startOffset + opt_.length * opt_.smallAllocProportion,
allocSize)};
allocSize)};

ASSERT_EQ(expected, exts);

Expand Down Expand Up @@ -225,7 +224,7 @@ TEST_F(BitmapAllocatorTest, TestMarkUsedRandom) {
uint64_t off = opt_.startOffset;
uint64_t usedSize = 0;

// 对于每一个 size per bit,随机其中一部分设置
// For each size per bit, randomly set a portion of it
auto select = [this, &usedSize](uint64_t startOffset) {
auto off = rand_r(&seed) * 4096 % opt_.sizePerBit;
auto len = rand_r(&seed) * 4096 % opt_.sizePerBit;
Expand Down
12 changes: 7 additions & 5 deletions curvefs_python/cbd_client.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,15 +56,17 @@ class CBDClient {
int Rename(UserInfo_t* info, const char* oldpath, const char* newpath);
int Extend(const char* filename, UserInfo_t* info, uint64_t size);

// 同步读写
int Read(int fd, char* buf, unsigned long offset, unsigned long length); // NOLINT
int Write(int fd, const char* buf, unsigned long offset, unsigned long length); // NOLINT
// Synchronous read and write
int Read(int fd, char* buf, unsigned long offset,
unsigned long length); // NOLINT
int Write(int fd, const char* buf, unsigned long offset,
unsigned long length); // NOLINT

// 异步读写
// Asynchronous read and write
int AioRead(int fd, AioContext* aioctx);
int AioWrite(int fd, AioContext* aioctx);

// 获取文件的基本信息
// Obtain basic information about the file
int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo);
int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info);

Expand Down
7 changes: 4 additions & 3 deletions curvefs_python/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,12 @@

import os


def exec_cmd(cmd):
ret = os.system(cmd)
if ret == 0:
print cmd + " exec success"
else :
else:
print cmd + " exec fail, ret = " + str(ret)


Expand All @@ -37,10 +38,10 @@ def exec_cmd(cmd):
exec_cmd(cmd)
cmd = "curve list --user k8s --dirname /k8s"
exec_cmd(cmd)
# 不是root,失败
# Not root, failed
cmd = "curve list --user k8s --dirname /"
exec_cmd(cmd)
# root没有传入密码,失败
# Root did not pass in password, failed
cmd = "curve list --user root --dirname /"
exec_cmd(cmd)
cmd = "curve list --user root --dirname / --password root_password"
Expand Down
4 changes: 2 additions & 2 deletions deploy/local/chunkserver/conf/chunkserver.conf.0
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./0/chunkserver.dat
chunkserver.disk_type=nvme
chunkserver.snapshot_throttle_throughput_bytes=41943040
chunkserver.snapshot_throttle_check_cycles=4
# 限制inflight io数量,一般是5000
# Limit the number of inflight io, usually 5000
chunkserver.max_inflight_requests=5000

#
Expand Down Expand Up @@ -145,7 +145,7 @@ chunkfilepool.retry_times=5
#
# WAL file pool
#
# walpool是否共用chunkfilepool,如果为true,则以下配置无效
# If walpool is set to true, the following configuration is invalid as walpool shares chunkfilepool.
walfilepool.use_chunk_file_pool=true
walfilepool.enable_get_segment_from_pool=false
walfilepool.file_pool_dir=./0/walfilepool/
Expand Down
4 changes: 2 additions & 2 deletions deploy/local/chunkserver/conf/chunkserver.conf.1
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./1/chunkserver.dat
chunkserver.disk_type=nvme
chunkserver.snapshot_throttle_throughput_bytes=41943040
chunkserver.snapshot_throttle_check_cycles=4
# 限制inflight io数量,一般是5000
# Limit the number of inflight io, usually 5000
chunkserver.max_inflight_requests=5000

#
Expand Down Expand Up @@ -143,7 +143,7 @@ chunkfilepool.retry_times=5
#
# WAL file pool
#
# walpool是否共用chunkfilepool,如果为true,则以下配置无效
# Does walpool share chunkfilepool? If true, the following configuration is invalid
walfilepool.use_chunk_file_pool=true
walfilepool.enable_get_segment_from_pool=false
walfilepool.file_pool_dir=./1/walfilepool/
Expand Down
Loading