Skip to content

Commit

Permalink
curvefs: support curvebs as data backend
Browse files Browse the repository at this point in the history
  • Loading branch information
wu-hanqing committed Apr 5, 2022
1 parent fccfc6c commit a155b56
Show file tree
Hide file tree
Showing 196 changed files with 10,963 additions and 5,793 deletions.
21 changes: 12 additions & 9 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")

git_repository(
name = "com_github_baidu_braft",
remote = "https://github.com/baidu/braft",
remote = "https://gitee.com/baidu/braft",
commit = "e255c0e4b18d1a8a5d484d4b647f41ff1385ef1e",
)

Expand All @@ -36,7 +36,7 @@ http_archive(
name = "com_google_protobuf",
sha256 = "cef7f1b5a7c5fba672bec2a319246e8feba471f04dcebfe362d55930ee7c1c30",
strip_prefix = "protobuf-3.5.0",
urls = ["https://github.com/google/protobuf/archive/v3.5.0.zip"],
urls = ["https://curve-build.nos-eastchina1.126.net/protobuf-3.5.0.zip"],
)

bind(
Expand All @@ -48,7 +48,7 @@ bind(
new_git_repository(
name = "com_google_googletest",
build_file = "bazel/gmock.BUILD",
remote = "https://github.com/google/googletest",
remote = "https://gitee.com/mirrors/googletest",
tag = "release-1.8.0",
)

Expand All @@ -61,7 +61,7 @@ bind(
# brpc内BUILD文件在依赖glog时, 直接指定的依赖是"@com_github_google_glog//:glog"
git_repository(
name = "com_github_google_glog",
remote = "https://github.com/google/glog",
remote = "https://gitee.com/mirrors/glog",
commit = "4cc89c9e2b452db579397887c37f302fb28f6ca1",
patch_args = ["-p1"],
patches = ["//:thirdparties/glog/glog.patch"],
Expand All @@ -77,7 +77,7 @@ http_archive(
name = "com_github_gflags_gflags",
strip_prefix = "gflags-2.2.2",
urls = [
"https://mirror.bazel.build/github.com/gflags/gflags/archive/v2.2.2.tar.gz",
"https://curve-build.nos-eastchina1.126.net/gflags-2.2.2.tar.gz",
"https://github.com/gflags/gflags/archive/v2.2.2.tar.gz",
],
)
Expand All @@ -91,7 +91,7 @@ new_http_archive(
name = "com_github_google_leveldb",
build_file = "bazel/leveldb.BUILD",
strip_prefix = "leveldb-a53934a3ae1244679f812d998a4f16f2c7f309a6",
url = "https://github.com/google/leveldb/archive/a53934a3ae1244679f812d998a4f16f2c7f309a6.tar.gz",
url = "https://curve-build.nos-eastchina1.126.net/leveldb-a53934a3ae1244679f812d998a4f16f2c7f309a6.tar.gz",
)

bind(
Expand All @@ -101,7 +101,7 @@ bind(

git_repository(
name = "com_github_apache_brpc",
remote = "https://github.com/apache/incubator-brpc",
remote = "https://gitee.com/baidu/BRPC",
commit = "1b9e00641cbec1c8803da6a1f7f555398c954cb0",
patches = ["//:thirdparties/brpc/brpc.patch"],
patch_args = ["-p1"],
Expand Down Expand Up @@ -131,7 +131,7 @@ bind(
new_git_repository(
name = "jsoncpp",
build_file = "bazel/jsoncpp.BUILD",
remote = "https://github.com/open-source-parsers/jsoncpp.git",
remote = "https://gitee.com/mirrors/jsoncpp",
tag = "1.8.4",
)

Expand All @@ -149,7 +149,7 @@ new_local_repository(
new_http_archive(
name = "aws",
urls = [
"https://github.com/aws/aws-sdk-cpp/archive/1.7.340.tar.gz",
"https://curve-build.nos-eastchina1.126.net/aws-sdk-cpp-1.7.340.tar.gz",
"https://mirror.bazel.build/github.com/aws/aws-sdk-cpp/archive/1.7.340.tar.gz",
],
sha256 = "2e82517045efb55409cff1408c12829d9e8aea22c1e2888529cb769b7473b0bf",
Expand All @@ -160,6 +160,7 @@ new_http_archive(
new_http_archive(
name = "aws_c_common",
urls = [
"https://curve-build.nos-eastchina1.126.net/aws-c-common-0.4.29.tar.gz",
"https://github.com/awslabs/aws-c-common/archive/v0.4.29.tar.gz",
"https://mirror.tensorflow.org/github.com/awslabs/aws-c-common/archive/v0.4.29.tar.gz",
],
Expand All @@ -171,6 +172,7 @@ new_http_archive(
new_http_archive(
name = "aws_c_event_stream",
urls = [
"https://curve-build.nos-eastchina1.126.net/aws-c-event-stream-0.1.4.tar.gz",
"https://github.com/awslabs/aws-c-event-stream/archive/v0.1.4.tar.gz",
"https://mirror.tensorflow.org/github.com/awslabs/aws-c-event-stream/archive/v0.1.4.tar.gz",
],
Expand All @@ -182,6 +184,7 @@ new_http_archive(
new_http_archive(
name = "aws_checksums",
urls = [
"https://curve-build.nos-eastchina1.126.net/aws-checksums-0.1.5.tar.gz",
"https://github.com/awslabs/aws-checksums/archive/v0.1.5.tar.gz",
"https://mirror.tensorflow.org/github.com/awslabs/aws-checksums/archive/v0.1.5.tar.gz",
],
Expand Down
19 changes: 19 additions & 0 deletions curvefs/conf/client.conf
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,13 @@ spaceserver.spaceaddr=127.0.0.1:19999 # __ANSIBLE_TEMPLATE__ {{ groups.space |
spaceserver.rpcTimeoutMs=1000

#### bdev
# curve client's config file
bdev.confpath=/etc/curve/client.conf
# currently, we only use synchronous read/write interface of curve client
# so, use a dedicated threadpool to issue sync io requests to curve client to
# avoid performance degradation if single fs IO reqeust forms more than one requests to curve client
# TODO(wuhanqing): remove this threadpool and use asynchronous interface
bdev.threadnum=10

#### extentManager
extentManager.preAllocSize=65536
Expand Down Expand Up @@ -79,6 +85,19 @@ volume.bigFileSize=1048576
volume.volBlockSize=4096
volume.fsBlockSize=4096

# alloator type, supported {bitmap}
volume.allocator.type=bitmap

## for bitmap allocator
# size of each bit, default is 4MiB
volume.bitmapallocator.size_per_bit=4194304

# small allocation proportion [0-1]
volume.bitmapallocator.small_alloc_proportion=0.2

# number of block groups that allocated once
volume.blockgroup.allocate_once=4

#### s3
# the max size that fuse send
s3.fuseMaxSize=131072
Expand Down
5 changes: 4 additions & 1 deletion curvefs/conf/tools.conf
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,14 @@ etcdAddr=127.0.0.1:12379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEAD
blockSize=1048576
fsType=s3
# volume
volumeSize=1048576
volumeSize=0
volumeBlockSize=4096
volumeName=volume
volumeUser=user
volumePassword=password
volumeBlockGroupSize=134217728
# support |AtStart| and |AtEnd|
volumeBitmapLocation=AtStart
# s3
s3.ak=ak
s3.sk=sk
Expand Down
10 changes: 10 additions & 0 deletions curvefs/proto/common.proto
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,22 @@ enum FSType {
TYPE_S3 = 2;
}

// bitmap location for each block group
enum BitmapLocation {
AtStart = 1;
AtEnd = 2;
}

message Volume {
required uint64 volumeSize = 1;
required uint64 blockSize = 2;
required string volumeName = 3;
required string user = 4;
optional string password = 5;
required uint64 blockGroupSize = 6;
required BitmapLocation bitmapLocation = 7;

// TODO(all): maybe we need curvebs cluster's mds ip and port in here
}

message S3Info {
Expand Down
13 changes: 7 additions & 6 deletions curvefs/proto/metaserver.proto
Original file line number Diff line number Diff line change
Expand Up @@ -146,10 +146,11 @@ message GetInodeRequest {
}

enum FsFileType {
TYPE_DIRECTORY = 1;
TYPE_FILE = 2;
TYPE_SYM_LINK = 3;
TYPE_S3 = 4;
TYPE_DIRECTORY = 1; // 1 << 0
TYPE_FILE = 2; // 1 << 1
TYPE_SYM_LINK = 4; // 1 << 2
TYPE_S3 = 18; // (1 << 4) | TYPE_FILE
TYPE_VOLUME = 34; // (1 << 5) | TYPE_FILE
};

message VolumeExtent {
Expand Down Expand Up @@ -194,7 +195,7 @@ message Inode {
required FsFileType type = 14;
optional string symlink = 15; // TYPE_SYM_LINK only
optional uint64 rdev = 16;
optional VolumeExtentList volumeExtentList = 17; // TYPE_FILE only
map<uint64, VolumeExtentList> volumeExtentMap = 17; // TYPE_FILE only, key is file's offset
map<uint64, S3ChunkInfoList> s3ChunkInfoMap = 18; // TYPE_S3 only, first is chunk index
optional uint32 dtime = 19;
optional uint32 openmpcount = 20; // openmpcount mount points had the file open
Expand Down Expand Up @@ -266,7 +267,7 @@ message UpdateInodeRequest {
optional uint32 uid = 13;
optional uint32 gid = 14;
optional uint32 mode = 15;
optional VolumeExtentList volumeExtentList = 16;
map<uint64, VolumeExtentList> volumeExtentMap = 16;
map<uint64, S3ChunkInfoList> s3ChunkInfoMap = 17;
optional uint32 nlink = 18;
optional InodeOpenStatusChange inodeOpenstatusChange = 19;
Expand Down
Loading

0 comments on commit a155b56

Please sign in to comment.