diff --git a/CMakeLists.txt b/CMakeLists.txt index 60ee127a88..6bdfcf27f4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -891,6 +891,7 @@ set(SOURCES util/comparator.cc util/compression.cc util/compression_context_cache.cc + util/compressor.cc util/concurrent_task_limiter_impl.cc util/crc32c.cc util/data_structure.cc @@ -1493,6 +1494,7 @@ if(WITH_TESTS) util/autovector_test.cc util/bloom_test.cc util/coding_test.cc + util/compression_test.cc util/crc32c_test.cc util/defer_test.cc util/dynamic_bloom_test.cc diff --git a/Makefile b/Makefile index 081c11a75d..0561960b6c 100644 --- a/Makefile +++ b/Makefile @@ -1284,6 +1284,9 @@ cache_test: $(OBJ_DIR)/cache/cache_test.o $(TEST_LIBRARY) $(LIBRARY) coding_test: $(OBJ_DIR)/util/coding_test.o $(TEST_LIBRARY) $(LIBRARY) $(AM_LINK) +compression_test: $(OBJ_DIR)/util/compression_test.o $(TEST_LIBRARY) $(LIBRARY) + $(AM_LINK) + hash_test: $(OBJ_DIR)/util/hash_test.o $(TEST_LIBRARY) $(LIBRARY) $(AM_LINK) diff --git a/TARGETS b/TARGETS index f7bb513ea3..3057f2b1ce 100644 --- a/TARGETS +++ b/TARGETS @@ -252,6 +252,7 @@ cpp_library_wrapper(name="rocksdb_lib", srcs=[ "util/comparator.cc", "util/compression.cc", "util/compression_context_cache.cc", + "util/compressor.cc", "util/concurrent_task_limiter_impl.cc", "util/crc32c.cc", "util/crc32c_arm64.cc", @@ -4667,6 +4668,12 @@ cpp_unittest_wrapper(name="compressed_secondary_cache_test", extra_compiler_flags=[]) +cpp_unittest_wrapper(name="compression_test", + srcs=["util/compression_test.cc"], + deps=[":rocksdb_test_lib"], + extra_compiler_flags=[]) + + cpp_unittest_wrapper(name="configurable_test", srcs=["options/configurable_test.cc"], deps=[":rocksdb_test_lib"], diff --git a/cache/compressed_secondary_cache.cc b/cache/compressed_secondary_cache.cc index 4a35b8d1e6..e5190e6f36 100644 --- a/cache/compressed_secondary_cache.cc +++ b/cache/compressed_secondary_cache.cc @@ -73,15 +73,16 @@ std::unique_ptr CompressedSecondaryCache::Lookup( s = helper->create_cb(Slice(ptr->get(), handle_value_charge), create_context, allocator, &value, &charge); } else { - UncompressionContext uncompression_context(cache_options_.compression_type); - UncompressionInfo uncompression_info(uncompression_context, - UncompressionDict::GetEmptyDict(), - cache_options_.compression_type); + auto compressor = + BuiltinCompressor::GetCompressor(cache_options_.compression_type); + UncompressionInfo uncompression_info(UncompressionDict::GetEmptyDict(), + cache_options_.compress_format_version, + allocator); size_t uncompressed_size{0}; - CacheAllocationPtr uncompressed = UncompressData( - uncompression_info, (char*)ptr->get(), handle_value_charge, - &uncompressed_size, cache_options_.compress_format_version, allocator); + CacheAllocationPtr uncompressed = uncompression_info.UncompressData( + compressor.get(), (char*)ptr->get(), handle_value_charge, + &uncompressed_size); if (!uncompressed) { cache_->Release(lru_handle, /*erase_if_last_ref=*/true); @@ -151,17 +152,15 @@ Status CompressedSecondaryCache::Insert(const Slice& key, if (cache_options_.compression_type != kNoCompression && !cache_options_.do_not_compress_roles.Contains(helper->role)) { PERF_COUNTER_ADD(compressed_sec_cache_uncompressed_bytes, size); - CompressionOptions compression_opts; - CompressionContext compression_context(cache_options_.compression_type, - compression_opts); + auto compressor = + BuiltinCompressor::GetCompressor(cache_options_.compression_type); uint64_t sample_for_compression{0}; - CompressionInfo compression_info( - compression_opts, compression_context, CompressionDict::GetEmptyDict(), - cache_options_.compression_type, sample_for_compression); + CompressionInfo compression_info(CompressionDict::GetEmptyDict(), + cache_options_.compress_format_version, + sample_for_compression); bool success = - CompressData(val, compression_info, - cache_options_.compress_format_version, &compressed_val); + compression_info.CompressData(compressor.get(), val, &compressed_val); if (!success) { return Status::Corruption("Error compressing value."); diff --git a/db/arena_wrapped_db_iter.cc b/db/arena_wrapped_db_iter.cc index b101fbbc75..c738f1bb08 100644 --- a/db/arena_wrapped_db_iter.cc +++ b/db/arena_wrapped_db_iter.cc @@ -15,6 +15,7 @@ #include "rocksdb/options.h" #include "table/internal_iterator.h" #include "table/iterator_wrapper.h" +#include "util/string_util.h" #include "util/user_comparator_wrapper.h" namespace ROCKSDB_NAMESPACE { diff --git a/db/blob/blob_file_builder.cc b/db/blob/blob_file_builder.cc index 35269fdb50..70d4dcb93e 100644 --- a/db/blob/blob_file_builder.cc +++ b/db/blob/blob_file_builder.cc @@ -66,7 +66,7 @@ BlobFileBuilder::BlobFileBuilder( immutable_options_(immutable_options), min_blob_size_(mutable_cf_options->min_blob_size), blob_file_size_(mutable_cf_options->blob_file_size), - blob_compression_type_(mutable_cf_options->blob_compression_type), + blob_compressor_(mutable_cf_options->blob_compressor), prepopulate_blob_cache_(mutable_cf_options->prepopulate_blob_cache), file_options_(file_options), db_id_(std::move(db_id)), @@ -91,6 +91,10 @@ BlobFileBuilder::BlobFileBuilder( assert(blob_file_paths_->empty()); assert(blob_file_additions_); assert(blob_file_additions_->empty()); + + if (blob_compressor_ == nullptr) { + blob_compressor_ = BuiltinCompressor::GetCompressor(kNoCompression); + } } BlobFileBuilder::~BlobFileBuilder() = default; @@ -150,7 +154,7 @@ Status BlobFileBuilder::Add(const Slice& key, const Slice& value, } BlobIndex::EncodeBlob(blob_index, blob_file_number, blob_offset, blob.size(), - blob_compression_type_); + blob_compressor_->GetCompressionType()); return Status::OK(); } @@ -227,7 +231,8 @@ Status BlobFileBuilder::OpenBlobFileIfNeeded() { constexpr bool has_ttl = false; constexpr ExpirationRange expiration_range; - BlobLogHeader header(column_family_id_, blob_compression_type_, has_ttl, + BlobLogHeader header(column_family_id_, + blob_compressor_->GetCompressionType(), has_ttl, expiration_range); { @@ -255,27 +260,18 @@ Status BlobFileBuilder::CompressBlobIfNeeded( assert(compressed_blob->empty()); assert(immutable_options_); - if (blob_compression_type_ == kNoCompression) { + if (blob_compressor_->GetCompressionType() == kNoCompression) { return Status::OK(); } - // TODO: allow user CompressionOptions, including max_compressed_bytes_per_kb - CompressionOptions opts; - CompressionContext context(blob_compression_type_, opts); - constexpr uint64_t sample_for_compression = 0; - - CompressionInfo info(opts, context, CompressionDict::GetEmptyDict(), - blob_compression_type_, sample_for_compression); - - constexpr uint32_t compression_format_version = 2; + CompressionInfo info; bool success = false; { StopWatch stop_watch(immutable_options_->clock, immutable_options_->stats, BLOB_DB_COMPRESSION_MICROS); - success = - CompressData(*blob, info, compression_format_version, compressed_blob); + success = info.CompressData(blob_compressor_.get(), *blob, compressed_blob); } if (!success) { diff --git a/db/blob/blob_file_builder.h b/db/blob/blob_file_builder.h index 8e7aab502d..86bf359c8c 100644 --- a/db/blob/blob_file_builder.h +++ b/db/blob/blob_file_builder.h @@ -15,6 +15,7 @@ #include "rocksdb/env.h" #include "rocksdb/rocksdb_namespace.h" #include "rocksdb/types.h" +#include "util/compressor.h" namespace ROCKSDB_NAMESPACE { @@ -89,7 +90,7 @@ class BlobFileBuilder { const ImmutableOptions* immutable_options_; uint64_t min_blob_size_; uint64_t blob_file_size_; - CompressionType blob_compression_type_; + std::shared_ptr blob_compressor_; PrepopulateBlobCache prepopulate_blob_cache_; const FileOptions* file_options_; const std::string db_id_; diff --git a/db/blob/blob_file_builder_test.cc b/db/blob/blob_file_builder_test.cc index 5882e219fe..4a4e644a0f 100644 --- a/db/blob/blob_file_builder_test.cc +++ b/db/blob/blob_file_builder_test.cc @@ -405,16 +405,12 @@ TEST_F(BlobFileBuilderTest, Compression) { ASSERT_EQ(blob_file_addition.GetBlobFileNumber(), blob_file_number); ASSERT_EQ(blob_file_addition.GetTotalBlobCount(), 1); - CompressionOptions opts; - CompressionContext context(kSnappyCompression, opts); - constexpr uint64_t sample_for_compression = 0; - - CompressionInfo info(opts, context, CompressionDict::GetEmptyDict(), - kSnappyCompression, sample_for_compression); + auto compressor = BuiltinCompressor::GetCompressor(kSnappyCompression); + ASSERT_NE(compressor, nullptr); std::string compressed_value; - ASSERT_TRUE(Snappy_Compress(info, uncompressed_value.data(), - uncompressed_value.size(), &compressed_value)); + ASSERT_OK(compressor->Compress(CompressionInfo(), uncompressed_value, + &compressed_value)); ASSERT_EQ(blob_file_addition.GetTotalBlobBytes(), BlobLogRecord::kHeaderSize + key_size + compressed_value.size()); diff --git a/db/blob/blob_file_reader.cc b/db/blob/blob_file_reader.cc index bdab3ae68e..02721d301d 100644 --- a/db/blob/blob_file_reader.cc +++ b/db/blob/blob_file_reader.cc @@ -50,12 +50,11 @@ Status BlobFileReader::Create( Statistics* const statistics = immutable_options.stats; - CompressionType compression_type = kNoCompression; + std::shared_ptr compressor; { - const Status s = - ReadHeader(file_reader.get(), read_options, column_family_id, - statistics, &compression_type); + const Status s = ReadHeader(file_reader.get(), read_options, + column_family_id, statistics, &compressor); if (!s.ok()) { return s; } @@ -70,7 +69,7 @@ Status BlobFileReader::Create( } blob_file_reader->reset( - new BlobFileReader(std::move(file_reader), file_size, compression_type, + new BlobFileReader(std::move(file_reader), file_size, compressor, immutable_options.clock, statistics)); return Status::OK(); @@ -140,9 +139,9 @@ Status BlobFileReader::ReadHeader(const RandomAccessFileReader* file_reader, const ReadOptions& read_options, uint32_t column_family_id, Statistics* statistics, - CompressionType* compression_type) { + std::shared_ptr* compressor) { assert(file_reader); - assert(compression_type); + assert(compressor); Slice header_slice; Buffer buf; @@ -184,7 +183,7 @@ Status BlobFileReader::ReadHeader(const RandomAccessFileReader* file_reader, return Status::Corruption("Column family ID mismatch"); } - *compression_type = header.compression; + *compressor = BuiltinCompressor::GetCompressor(header.compression); return Status::OK(); } @@ -281,11 +280,11 @@ Status BlobFileReader::ReadFromFile(const RandomAccessFileReader* file_reader, BlobFileReader::BlobFileReader( std::unique_ptr&& file_reader, uint64_t file_size, - CompressionType compression_type, SystemClock* clock, + const std::shared_ptr& compressor, SystemClock* clock, Statistics* statistics) : file_reader_(std::move(file_reader)), file_size_(file_size), - compression_type_(compression_type), + compressor_(compressor), clock_(clock), statistics_(statistics) { assert(file_reader_); @@ -295,7 +294,7 @@ BlobFileReader::~BlobFileReader() = default; Status BlobFileReader::GetBlob( const ReadOptions& read_options, const Slice& user_key, uint64_t offset, - uint64_t value_size, CompressionType compression_type, + uint64_t value_size, const std::shared_ptr& compressor, FilePrefetchBuffer* prefetch_buffer, MemoryAllocator* allocator, std::unique_ptr* result, uint64_t* bytes_read) const { assert(result); @@ -306,7 +305,7 @@ Status BlobFileReader::GetBlob( return Status::Corruption("Invalid blob offset"); } - if (compression_type != compression_type_) { + if (compressor->GetCompressionType() != compressor_->GetCompressionType()) { return Status::Corruption("Compression type mismatch when reading blob"); } @@ -374,7 +373,7 @@ Status BlobFileReader::GetBlob( { const Status s = UncompressBlobIfNeeded( - value_slice, compression_type, allocator, clock_, statistics_, result); + value_slice, compressor.get(), allocator, clock_, statistics_, result); if (!s.ok()) { return s; } @@ -420,7 +419,8 @@ void BlobFileReader::MultiGetBlob( *req->status = Status::Corruption("Invalid blob offset"); continue; } - if (req->compression != compression_type_) { + if (req->compressor->GetCompressionType() != + compressor_->GetCompressionType()) { *req->status = Status::Corruption("Compression type mismatch when reading a blob"); continue; @@ -522,7 +522,7 @@ void BlobFileReader::MultiGetBlob( // Uncompress blob if needed Slice value_slice(record_slice.data() + adjustments[i], req->len); *req->status = - UncompressBlobIfNeeded(value_slice, compression_type_, allocator, + UncompressBlobIfNeeded(value_slice, compressor_.get(), allocator, clock_, statistics_, &blob_reqs[i].second); if (req->status->ok()) { total_bytes += record_slice.size(); @@ -579,31 +579,28 @@ Status BlobFileReader::VerifyBlob(const Slice& record_slice, } Status BlobFileReader::UncompressBlobIfNeeded( - const Slice& value_slice, CompressionType compression_type, + const Slice& value_slice, Compressor* compressor, MemoryAllocator* allocator, SystemClock* clock, Statistics* statistics, std::unique_ptr* result) { + assert(compressor); assert(result); - if (compression_type == kNoCompression) { + if (compressor->GetCompressionType() == kNoCompression) { BlobContentsCreator::Create(result, nullptr, value_slice, allocator); return Status::OK(); } - UncompressionContext context(compression_type); - UncompressionInfo info(context, UncompressionDict::GetEmptyDict(), - compression_type); + UncompressionInfo info; size_t uncompressed_size = 0; - constexpr uint32_t compression_format_version = 2; CacheAllocationPtr output; { PERF_TIMER_GUARD(blob_decompress_time); StopWatch stop_watch(clock, statistics, BLOB_DB_DECOMPRESSION_MICROS); - output = UncompressData(info, value_slice.data(), value_slice.size(), - &uncompressed_size, compression_format_version, - allocator); + output = info.UncompressData(compressor, value_slice.data(), + value_slice.size(), &uncompressed_size); } TEST_SYNC_POINT_CALLBACK( diff --git a/db/blob/blob_file_reader.h b/db/blob/blob_file_reader.h index fa8aa501d4..4dec3f5a91 100644 --- a/db/blob/blob_file_reader.h +++ b/db/blob/blob_file_reader.h @@ -13,6 +13,7 @@ #include "rocksdb/compression_type.h" #include "rocksdb/rocksdb_namespace.h" #include "util/autovector.h" +#include "util/compressor.h" namespace ROCKSDB_NAMESPACE { @@ -44,7 +45,7 @@ class BlobFileReader { Status GetBlob(const ReadOptions& read_options, const Slice& user_key, uint64_t offset, uint64_t value_size, - CompressionType compression_type, + const std::shared_ptr& compressor, FilePrefetchBuffer* prefetch_buffer, MemoryAllocator* allocator, std::unique_ptr* result, @@ -57,13 +58,16 @@ class BlobFileReader { blob_reqs, uint64_t* bytes_read) const; - CompressionType GetCompressionType() const { return compression_type_; } + const std::shared_ptr& GetCompressor() const { + return compressor_; + } uint64_t GetFileSize() const { return file_size_; } private: BlobFileReader(std::unique_ptr&& file_reader, - uint64_t file_size, CompressionType compression_type, + uint64_t file_size, + const std::shared_ptr& compressor, SystemClock* clock, Statistics* statistics); static Status OpenFile(const ImmutableOptions& immutable_options, @@ -77,7 +81,7 @@ class BlobFileReader { static Status ReadHeader(const RandomAccessFileReader* file_reader, const ReadOptions& read_options, uint32_t column_family_id, Statistics* statistics, - CompressionType* compression_type); + std::shared_ptr* compressor); static Status ReadFooter(const RandomAccessFileReader* file_reader, const ReadOptions& read_options, uint64_t file_size, @@ -95,7 +99,7 @@ class BlobFileReader { uint64_t value_size); static Status UncompressBlobIfNeeded(const Slice& value_slice, - CompressionType compression_type, + Compressor* compressor, MemoryAllocator* allocator, SystemClock* clock, Statistics* statistics, @@ -103,7 +107,7 @@ class BlobFileReader { std::unique_ptr file_reader_; uint64_t file_size_; - CompressionType compression_type_; + std::shared_ptr compressor_; SystemClock* clock_; Statistics* statistics_; }; diff --git a/db/blob/blob_file_reader_test.cc b/db/blob/blob_file_reader_test.cc index b6049d1ef5..8b85af0ad0 100644 --- a/db/blob/blob_file_reader_test.cc +++ b/db/blob/blob_file_reader_test.cc @@ -73,17 +73,14 @@ void WriteBlobFile(const ImmutableOptions& immutable_options, blob_sizes[i] = blobs[i].size(); } } else { - CompressionOptions opts; - CompressionContext context(compression, opts); - constexpr uint64_t sample_for_compression = 0; - CompressionInfo info(opts, context, CompressionDict::GetEmptyDict(), - compression, sample_for_compression); - - constexpr uint32_t compression_format_version = 2; + auto compressor = + BuiltinCompressor::GetCompressor(compression, CompressionOptions()); + ASSERT_NE(compressor, nullptr); + CompressionInfo info; for (size_t i = 0; i < num; ++i) { - ASSERT_TRUE(CompressData(blobs[i], info, compression_format_version, - &compressed_blobs[i])); + ASSERT_TRUE( + info.CompressData(compressor.get(), blobs[i], &compressed_blobs[i])); blobs_to_write[i] = compressed_blobs[i]; blob_sizes[i] = compressed_blobs[i].size(); } @@ -183,12 +180,15 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { constexpr FilePrefetchBuffer* prefetch_buffer = nullptr; constexpr MemoryAllocator* allocator = nullptr; + std::shared_ptr no_compressor = + BuiltinCompressor::GetCompressor(kNoCompression); + { std::unique_ptr value; uint64_t bytes_read = 0; ASSERT_OK(reader->GetBlob(read_options, keys[0], blob_offsets[0], - blob_sizes[0], kNoCompression, prefetch_buffer, + blob_sizes[0], no_compressor, prefetch_buffer, allocator, &value, &bytes_read)); ASSERT_NE(value, nullptr); ASSERT_EQ(value->data(), blobs[0]); @@ -206,7 +206,7 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { for (size_t i = 0; i < num_blobs; ++i) { requests_buf[i] = BlobReadRequest(keys[i], blob_offsets[i], blob_sizes[i], - kNoCompression, nullptr, &statuses_buf[i]); + no_compressor.get(), nullptr, &statuses_buf[i]); blob_reqs.emplace_back(&requests_buf[i], std::unique_ptr()); } @@ -230,7 +230,7 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { uint64_t bytes_read = 0; ASSERT_OK(reader->GetBlob(read_options, keys[1], blob_offsets[1], - blob_sizes[1], kNoCompression, prefetch_buffer, + blob_sizes[1], no_compressor, prefetch_buffer, allocator, &value, &bytes_read)); ASSERT_NE(value, nullptr); ASSERT_EQ(value->data(), blobs[1]); @@ -248,7 +248,7 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { ASSERT_TRUE(reader ->GetBlob(read_options, keys[0], blob_offsets[0] - 1, - blob_sizes[0], kNoCompression, prefetch_buffer, + blob_sizes[0], no_compressor, prefetch_buffer, allocator, &value, &bytes_read) .IsCorruption()); ASSERT_EQ(value, nullptr); @@ -262,7 +262,7 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { ASSERT_TRUE(reader ->GetBlob(read_options, keys[2], blob_offsets[2] + 1, - blob_sizes[2], kNoCompression, prefetch_buffer, + blob_sizes[2], no_compressor, prefetch_buffer, allocator, &value, &bytes_read) .IsCorruption()); ASSERT_EQ(value, nullptr); @@ -276,8 +276,9 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { ASSERT_TRUE(reader ->GetBlob(read_options, keys[0], blob_offsets[0], - blob_sizes[0], kZSTD, prefetch_buffer, allocator, - &value, &bytes_read) + blob_sizes[0], + BuiltinCompressor::GetCompressor(kZSTD), + prefetch_buffer, allocator, &value, &bytes_read) .IsCorruption()); ASSERT_EQ(value, nullptr); ASSERT_EQ(bytes_read, 0); @@ -293,7 +294,7 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { ->GetBlob(read_options, shorter_key, blob_offsets[0] - (keys[0].size() - sizeof(shorter_key) + 1), - blob_sizes[0], kNoCompression, prefetch_buffer, + blob_sizes[0], no_compressor, prefetch_buffer, allocator, &value, &bytes_read) .IsCorruption()); ASSERT_EQ(value, nullptr); @@ -320,7 +321,7 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { for (size_t i = 0; i < num_blobs; ++i) { requests_buf[i] = BlobReadRequest(key_refs[i], offsets[i], blob_sizes[i], - kNoCompression, nullptr, &statuses_buf[i]); + no_compressor.get(), nullptr, &statuses_buf[i]); blob_reqs.emplace_back(&requests_buf[i], std::unique_ptr()); } @@ -343,7 +344,7 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { ASSERT_TRUE(reader ->GetBlob(read_options, incorrect_key, blob_offsets[0], - blob_sizes[0], kNoCompression, prefetch_buffer, + blob_sizes[0], no_compressor, prefetch_buffer, allocator, &value, &bytes_read) .IsCorruption()); ASSERT_EQ(value, nullptr); @@ -365,7 +366,7 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { for (size_t i = 0; i < num_blobs; ++i) { requests_buf[i] = BlobReadRequest(key_refs[i], blob_offsets[i], blob_sizes[i], - kNoCompression, nullptr, &statuses_buf[i]); + no_compressor.get(), nullptr, &statuses_buf[i]); blob_reqs.emplace_back(&requests_buf[i], std::unique_ptr()); } @@ -387,8 +388,8 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { ASSERT_TRUE(reader ->GetBlob(read_options, keys[1], blob_offsets[1], - blob_sizes[1] + 1, kNoCompression, - prefetch_buffer, allocator, &value, &bytes_read) + blob_sizes[1] + 1, no_compressor, prefetch_buffer, + allocator, &value, &bytes_read) .IsCorruption()); ASSERT_EQ(value, nullptr); ASSERT_EQ(bytes_read, 0); @@ -404,13 +405,13 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { requests_buf[0] = BlobReadRequest(key_refs[0], blob_offsets[0], blob_sizes[0], - kNoCompression, nullptr, &statuses_buf[0]); + no_compressor.get(), nullptr, &statuses_buf[0]); requests_buf[1] = BlobReadRequest(key_refs[1], blob_offsets[1], blob_sizes[1] + 1, - kNoCompression, nullptr, &statuses_buf[1]); + no_compressor.get(), nullptr, &statuses_buf[1]); requests_buf[2] = BlobReadRequest(key_refs[2], blob_offsets[2], blob_sizes[2], - kNoCompression, nullptr, &statuses_buf[2]); + no_compressor.get(), nullptr, &statuses_buf[2]); autovector>> blob_reqs; @@ -687,8 +688,8 @@ TEST_F(BlobFileReaderTest, BlobCRCError) { ASSERT_TRUE(reader ->GetBlob(ReadOptions(), key, blob_offset, blob_size, - kNoCompression, prefetch_buffer, allocator, &value, - &bytes_read) + BuiltinCompressor::GetCompressor(kNoCompression), + prefetch_buffer, allocator, &value, &bytes_read) .IsCorruption()); ASSERT_EQ(value, nullptr); ASSERT_EQ(bytes_read, 0); @@ -698,7 +699,9 @@ TEST_F(BlobFileReaderTest, BlobCRCError) { } TEST_F(BlobFileReaderTest, Compression) { - if (!Snappy_Supported()) { + std::shared_ptr snappy_compressor = + BuiltinCompressor::GetCompressor(kSnappyCompression); + if (!snappy_compressor->Supported()) { return; } @@ -744,7 +747,7 @@ TEST_F(BlobFileReaderTest, Compression) { uint64_t bytes_read = 0; ASSERT_OK(reader->GetBlob(read_options, key, blob_offset, blob_size, - kSnappyCompression, prefetch_buffer, allocator, + snappy_compressor, prefetch_buffer, allocator, &value, &bytes_read)); ASSERT_NE(value, nullptr); ASSERT_EQ(value->data(), blob); @@ -758,7 +761,7 @@ TEST_F(BlobFileReaderTest, Compression) { uint64_t bytes_read = 0; ASSERT_OK(reader->GetBlob(read_options, key, blob_offset, blob_size, - kSnappyCompression, prefetch_buffer, allocator, + snappy_compressor, prefetch_buffer, allocator, &value, &bytes_read)); ASSERT_NE(value, nullptr); ASSERT_EQ(value->data(), blob); @@ -824,11 +827,12 @@ TEST_F(BlobFileReaderTest, UncompressionError) { std::unique_ptr value; uint64_t bytes_read = 0; - ASSERT_TRUE(reader - ->GetBlob(ReadOptions(), key, blob_offset, blob_size, - kSnappyCompression, prefetch_buffer, allocator, - &value, &bytes_read) - .IsCorruption()); + ASSERT_TRUE( + reader + ->GetBlob(ReadOptions(), key, blob_offset, blob_size, + BuiltinCompressor::GetCompressor(kSnappyCompression), + prefetch_buffer, allocator, &value, &bytes_read) + .IsCorruption()); ASSERT_EQ(value, nullptr); ASSERT_EQ(bytes_read, 0); @@ -915,8 +919,8 @@ TEST_P(BlobFileReaderIOErrorTest, IOError) { ASSERT_TRUE(reader ->GetBlob(ReadOptions(), key, blob_offset, blob_size, - kNoCompression, prefetch_buffer, allocator, - &value, &bytes_read) + BuiltinCompressor::GetCompressor(kNoCompression), + prefetch_buffer, allocator, &value, &bytes_read) .IsIOError()); ASSERT_EQ(value, nullptr); ASSERT_EQ(bytes_read, 0); @@ -1003,8 +1007,8 @@ TEST_P(BlobFileReaderDecodingErrorTest, DecodingError) { ASSERT_TRUE(reader ->GetBlob(ReadOptions(), key, blob_offset, blob_size, - kNoCompression, prefetch_buffer, allocator, - &value, &bytes_read) + BuiltinCompressor::GetCompressor(kNoCompression), + prefetch_buffer, allocator, &value, &bytes_read) .IsCorruption()); ASSERT_EQ(value, nullptr); ASSERT_EQ(bytes_read, 0); diff --git a/db/blob/blob_read_request.h b/db/blob/blob_read_request.h index f9668ca2ef..94bffb9c74 100644 --- a/db/blob/blob_read_request.h +++ b/db/blob/blob_read_request.h @@ -11,6 +11,7 @@ #include "rocksdb/slice.h" #include "rocksdb/status.h" #include "util/autovector.h" +#include "util/compressor.h" namespace ROCKSDB_NAMESPACE { @@ -27,7 +28,7 @@ struct BlobReadRequest { size_t len = 0; // Blob compression type - CompressionType compression = kNoCompression; + Compressor* compressor; // Output parameter set by MultiGetBlob() to point to the data buffer, and // the number of valid bytes @@ -37,12 +38,12 @@ struct BlobReadRequest { Status* status = nullptr; BlobReadRequest(const Slice& _user_key, uint64_t _offset, size_t _len, - CompressionType _compression, PinnableSlice* _result, + Compressor* _compressor, PinnableSlice* _result, Status* _status) : user_key(&_user_key), offset(_offset), len(_len), - compression(_compression), + compressor(_compressor), result(_result), status(_status) {} diff --git a/db/blob/blob_source.cc b/db/blob/blob_source.cc index b524982e53..7e125f73a2 100644 --- a/db/blob/blob_source.cc +++ b/db/blob/blob_source.cc @@ -158,7 +158,7 @@ Status BlobSource::GetBlob(const ReadOptions& read_options, const Slice& user_key, uint64_t file_number, uint64_t offset, uint64_t file_size, uint64_t value_size, - CompressionType compression_type, + const std::shared_ptr& compressor, FilePrefetchBuffer* prefetch_buffer, PinnableSlice* value, uint64_t* bytes_read) { assert(value); @@ -217,7 +217,8 @@ Status BlobSource::GetBlob(const ReadOptions& read_options, assert(blob_file_reader.GetValue()); - if (compression_type != blob_file_reader.GetValue()->GetCompressionType()) { + if (compressor->GetCompressionType() != + blob_file_reader.GetValue()->GetCompressor()->GetCompressionType()) { return Status::Corruption("Compression type mismatch when reading blob"); } @@ -228,8 +229,8 @@ Status BlobSource::GetBlob(const ReadOptions& read_options, uint64_t read_size = 0; s = blob_file_reader.GetValue()->GetBlob( - read_options, user_key, offset, value_size, compression_type, - prefetch_buffer, allocator, &blob_contents, &read_size); + read_options, user_key, offset, value_size, compressor, prefetch_buffer, + allocator, &blob_contents, &read_size); if (!s.ok()) { return s; } diff --git a/db/blob/blob_source.h b/db/blob/blob_source.h index d5e009b54d..232362bec7 100644 --- a/db/blob/blob_source.h +++ b/db/blob/blob_source.h @@ -17,6 +17,7 @@ #include "rocksdb/rocksdb_namespace.h" #include "table/block_based/cachable_entry.h" #include "util/autovector.h" +#include "util/compressor.h" namespace ROCKSDB_NAMESPACE { @@ -51,7 +52,8 @@ class BlobSource { // record. Status GetBlob(const ReadOptions& read_options, const Slice& user_key, uint64_t file_number, uint64_t offset, uint64_t file_size, - uint64_t value_size, CompressionType compression_type, + uint64_t value_size, + const std::shared_ptr& compressor, FilePrefetchBuffer* prefetch_buffer, PinnableSlice* value, uint64_t* bytes_read); diff --git a/db/blob/blob_source_test.cc b/db/blob/blob_source_test.cc index c0e1aba6ec..42baff0088 100644 --- a/db/blob/blob_source_test.cc +++ b/db/blob/blob_source_test.cc @@ -36,7 +36,8 @@ void WriteBlobFile(const ImmutableOptions& immutable_options, const ExpirationRange& expiration_range_header, const ExpirationRange& expiration_range_footer, uint64_t blob_file_number, const std::vector& keys, - const std::vector& blobs, CompressionType compression, + const std::vector& blobs, + const std::shared_ptr& compressor, std::vector& blob_offsets, std::vector& blob_sizes) { assert(!immutable_options.cf_paths.empty()); @@ -62,30 +63,27 @@ void WriteBlobFile(const ImmutableOptions& immutable_options, statistics, blob_file_number, use_fsync, do_flush); - BlobLogHeader header(column_family_id, compression, has_ttl, - expiration_range_header); + BlobLogHeader header(column_family_id, compressor->GetCompressionType(), + has_ttl, expiration_range_header); ASSERT_OK(blob_log_writer.WriteHeader(header)); std::vector compressed_blobs(num); std::vector blobs_to_write(num); - if (kNoCompression == compression) { + if (kNoCompression == compressor->GetCompressionType()) { for (size_t i = 0; i < num; ++i) { blobs_to_write[i] = blobs[i]; blob_sizes[i] = blobs[i].size(); } } else { - CompressionOptions opts; - CompressionContext context(compression, opts); constexpr uint64_t sample_for_compression = 0; - CompressionInfo info(opts, context, CompressionDict::GetEmptyDict(), - compression, sample_for_compression); - constexpr uint32_t compression_format_version = 2; + CompressionInfo info(CompressionDict::GetEmptyDict(), + compression_format_version, sample_for_compression); for (size_t i = 0; i < num; ++i) { - ASSERT_TRUE(CompressData(blobs[i], info, compression_format_version, - &compressed_blobs[i])); + ASSERT_TRUE( + info.CompressData(compressor.get(), blobs[i], &compressed_blobs[i])); blobs_to_write[i] = compressed_blobs[i]; blob_sizes[i] = compressed_blobs[i].size(); } @@ -144,6 +142,9 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) { Statistics* statistics = options_.statistics.get(); assert(statistics); + std::shared_ptr no_compressor = + BuiltinCompressor::GetCompressor(kNoCompression); + DestroyAndReopen(options_); ImmutableOptions immutable_options(options_); @@ -177,7 +178,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) { std::vector blob_sizes(keys.size()); WriteBlobFile(immutable_options, column_family_id, has_ttl, expiration_range, - expiration_range, blob_file_number, keys, blobs, kNoCompression, + expiration_range, blob_file_number, keys, blobs, no_compressor, blob_offsets, blob_sizes); constexpr size_t capacity = 1024; @@ -216,7 +217,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) { ASSERT_OK(blob_source.GetBlob(read_options, keys[i], blob_file_number, blob_offsets[i], file_size, blob_sizes[i], - kNoCompression, prefetch_buffer, &values[i], + no_compressor, prefetch_buffer, &values[i], &bytes_read)); ASSERT_EQ(values[i], blobs[i]); ASSERT_TRUE(values[i].IsPinned()); @@ -254,7 +255,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) { ASSERT_OK(blob_source.GetBlob(read_options, keys[i], blob_file_number, blob_offsets[i], file_size, blob_sizes[i], - kNoCompression, prefetch_buffer, &values[i], + no_compressor, prefetch_buffer, &values[i], &bytes_read)); ASSERT_EQ(values[i], blobs[i]); ASSERT_TRUE(values[i].IsPinned()); @@ -298,7 +299,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) { ASSERT_OK(blob_source.GetBlob(read_options, keys[i], blob_file_number, blob_offsets[i], file_size, blob_sizes[i], - kNoCompression, prefetch_buffer, &values[i], + no_compressor, prefetch_buffer, &values[i], &bytes_read)); ASSERT_EQ(values[i], blobs[i]); ASSERT_TRUE(values[i].IsPinned()); @@ -337,7 +338,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) { ASSERT_OK(blob_source.GetBlob(read_options, keys[i], blob_file_number, blob_offsets[i], file_size, blob_sizes[i], - kNoCompression, prefetch_buffer, &values[i], + no_compressor, prefetch_buffer, &values[i], &bytes_read)); ASSERT_EQ(values[i], blobs[i]); ASSERT_TRUE(values[i].IsPinned()); @@ -383,7 +384,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) { ASSERT_TRUE(blob_source .GetBlob(read_options, keys[i], blob_file_number, blob_offsets[i], file_size, blob_sizes[i], - kNoCompression, prefetch_buffer, &values[i], + no_compressor, prefetch_buffer, &values[i], &bytes_read) .IsIncomplete()); ASSERT_TRUE(values[i].empty()); @@ -425,7 +426,7 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) { ASSERT_TRUE(blob_source .GetBlob(read_options, keys[i], file_number, blob_offsets[i], file_size, blob_sizes[i], - kNoCompression, prefetch_buffer, &values[i], + no_compressor, prefetch_buffer, &values[i], &bytes_read) .IsIOError()); ASSERT_TRUE(values[i].empty()); @@ -455,7 +456,8 @@ TEST_F(BlobSourceTest, GetCompressedBlobs) { return; } - const CompressionType compression = kSnappyCompression; + std::shared_ptr snappy_compressor = + BuiltinCompressor::GetCompressor(kSnappyCompression); options_.cf_paths.emplace_back( test::PerThreadDBPath(env_, "BlobSourceTest_GetCompressedBlobs"), 0); @@ -514,7 +516,7 @@ TEST_F(BlobSourceTest, GetCompressedBlobs) { WriteBlobFile(immutable_options, column_family_id, has_ttl, expiration_range, expiration_range, file_number, keys, blobs, - compression, blob_offsets, blob_sizes); + snappy_compressor, blob_offsets, blob_sizes); CacheHandleGuard blob_file_reader; ASSERT_OK(blob_source.GetBlobFileReader(read_options, file_number, @@ -522,7 +524,9 @@ TEST_F(BlobSourceTest, GetCompressedBlobs) { ASSERT_NE(blob_file_reader.GetValue(), nullptr); const uint64_t file_size = blob_file_reader.GetValue()->GetFileSize(); - ASSERT_EQ(blob_file_reader.GetValue()->GetCompressionType(), compression); + ASSERT_EQ( + blob_file_reader.GetValue()->GetCompressor()->GetCompressionType(), + snappy_compressor->GetCompressionType()); for (size_t i = 0; i < num_blobs; ++i) { ASSERT_NE(blobs[i].size() /*uncompressed size*/, @@ -536,10 +540,10 @@ TEST_F(BlobSourceTest, GetCompressedBlobs) { for (size_t i = 0; i < num_blobs; ++i) { ASSERT_FALSE(blob_source.TEST_BlobInCache(file_number, file_size, blob_offsets[i])); - ASSERT_OK(blob_source.GetBlob(read_options, keys[i], file_number, - blob_offsets[i], file_size, blob_sizes[i], - compression, nullptr /*prefetch_buffer*/, - &values[i], &bytes_read)); + ASSERT_OK(blob_source.GetBlob( + read_options, keys[i], file_number, blob_offsets[i], file_size, + blob_sizes[i], snappy_compressor, nullptr /*prefetch_buffer*/, + &values[i], &bytes_read)); ASSERT_EQ(values[i], blobs[i] /*uncompressed blob*/); ASSERT_NE(values[i].size(), blob_sizes[i] /*compressed size*/); ASSERT_EQ(bytes_read, @@ -559,10 +563,10 @@ TEST_F(BlobSourceTest, GetCompressedBlobs) { blob_offsets[i])); // Compressed blob size is passed in GetBlob - ASSERT_OK(blob_source.GetBlob(read_options, keys[i], file_number, - blob_offsets[i], file_size, blob_sizes[i], - compression, nullptr /*prefetch_buffer*/, - &values[i], &bytes_read)); + ASSERT_OK(blob_source.GetBlob( + read_options, keys[i], file_number, blob_offsets[i], file_size, + blob_sizes[i], snappy_compressor, nullptr /*prefetch_buffer*/, + &values[i], &bytes_read)); ASSERT_EQ(values[i], blobs[i] /*uncompressed blob*/); ASSERT_NE(values[i].size(), blob_sizes[i] /*compressed size*/); ASSERT_EQ(bytes_read, @@ -621,13 +625,16 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromMultiFiles) { std::vector blob_offsets(keys.size()); std::vector blob_sizes(keys.size()); + std::shared_ptr no_compressor = + BuiltinCompressor::GetCompressor(kNoCompression); + { // Write key/blob pairs to multiple blob files. for (size_t i = 0; i < blob_files; ++i) { const uint64_t file_number = i + 1; WriteBlobFile(immutable_options, column_family_id, has_ttl, expiration_range, expiration_range, file_number, keys, - blobs, kNoCompression, blob_offsets, blob_sizes); + blobs, no_compressor, blob_offsets, blob_sizes); } } @@ -665,7 +672,7 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromMultiFiles) { const uint64_t file_number = i + 1; for (size_t j = 0; j < num_blobs; ++j) { blob_reqs_in_file[i].emplace_back( - keys[j], blob_offsets[j], blob_sizes[j], kNoCompression, + keys[j], blob_offsets[j], blob_sizes[j], no_compressor.get(), &value_buf[i * num_blobs + j], &statuses_buf[i * num_blobs + j]); } blob_reqs.emplace_back(file_number, file_size, blob_reqs_in_file[i]); @@ -718,7 +725,7 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromMultiFiles) { const uint64_t fake_file_number = 100; for (size_t i = 0; i < num_blobs; ++i) { fake_blob_reqs_in_file.emplace_back( - keys[i], blob_offsets[i], blob_sizes[i], kNoCompression, + keys[i], blob_offsets[i], blob_sizes[i], no_compressor.get(), &fake_value_buf[i], &fake_statuses_buf[i]); } @@ -811,8 +818,9 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) { std::vector blob_sizes(keys.size()); WriteBlobFile(immutable_options, column_family_id, has_ttl, expiration_range, - expiration_range, blob_file_number, keys, blobs, kNoCompression, - blob_offsets, blob_sizes); + expiration_range, blob_file_number, keys, blobs, + BuiltinCompressor::GetCompressor(kNoCompression), blob_offsets, + blob_sizes); constexpr size_t capacity = 10; std::shared_ptr backing_cache = @@ -834,6 +842,9 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) { constexpr FilePrefetchBuffer* prefetch_buffer = nullptr; + std::shared_ptr no_compressor = + BuiltinCompressor::GetCompressor(kNoCompression); + { // MultiGetBlobFromOneFile uint64_t bytes_read = 0; @@ -843,7 +854,8 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) { for (size_t i = 0; i < num_blobs; i += 2) { // even index blob_reqs.emplace_back(keys[i], blob_offsets[i], blob_sizes[i], - kNoCompression, &value_buf[i], &statuses_buf[i]); + no_compressor.get(), &value_buf[i], + &statuses_buf[i]); ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size, blob_offsets[i])); } @@ -902,7 +914,7 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) { ASSERT_OK(blob_source.GetBlob(read_options, keys[i], blob_file_number, blob_offsets[i], file_size, blob_sizes[i], - kNoCompression, prefetch_buffer, + no_compressor, prefetch_buffer, &value_buf[i], &bytes_read)); ASSERT_EQ(value_buf[i], blobs[i]); ASSERT_TRUE(value_buf[i].IsPinned()); @@ -921,7 +933,8 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) { blob_reqs.clear(); for (size_t i = 0; i < num_blobs; ++i) { blob_reqs.emplace_back(keys[i], blob_offsets[i], blob_sizes[i], - kNoCompression, &value_buf[i], &statuses_buf[i]); + no_compressor.get(), &value_buf[i], + &statuses_buf[i]); } blob_source.MultiGetBlobFromOneFile(read_options, blob_file_number, @@ -966,7 +979,8 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) { for (size_t i = 0; i < num_blobs; i++) { blob_reqs.emplace_back(keys[i], blob_offsets[i], blob_sizes[i], - kNoCompression, &value_buf[i], &statuses_buf[i]); + no_compressor.get(), &value_buf[i], + &statuses_buf[i]); ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size, blob_offsets[i])); } @@ -1010,7 +1024,8 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) { for (size_t i = 0; i < num_blobs; i++) { blob_reqs.emplace_back(keys[i], blob_offsets[i], blob_sizes[i], - kNoCompression, &value_buf[i], &statuses_buf[i]); + no_compressor.get(), &value_buf[i], + &statuses_buf[i]); ASSERT_FALSE(blob_source.TEST_BlobInCache(non_existing_file_number, file_size, blob_offsets[i])); } @@ -1122,8 +1137,11 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) { std::vector blob_offsets(keys.size()); std::vector blob_sizes(keys.size()); + std::shared_ptr no_compressor = + BuiltinCompressor::GetCompressor(kNoCompression); + WriteBlobFile(immutable_options, column_family_id, has_ttl, expiration_range, - expiration_range, file_number, keys, blobs, kNoCompression, + expiration_range, file_number, keys, blobs, no_compressor, blob_offsets, blob_sizes); constexpr size_t capacity = 1024; @@ -1145,7 +1163,8 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) { blob_source.GetBlobFileReader(read_options, file_number, &file_reader)); ASSERT_NE(file_reader.GetValue(), nullptr); const uint64_t file_size = file_reader.GetValue()->GetFileSize(); - ASSERT_EQ(file_reader.GetValue()->GetCompressionType(), kNoCompression); + ASSERT_EQ(file_reader.GetValue()->GetCompressor()->GetCompressionType(), + kNoCompression); read_options.verify_checksums = true; @@ -1162,7 +1181,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) { // key0 should be filled to the primary cache from the blob file. ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number, blob_offsets[0], file_size, blob_sizes[0], - kNoCompression, nullptr /* prefetch_buffer */, + no_compressor, nullptr /* prefetch_buffer */, &values[0], nullptr /* bytes_read */)); // Release cache handle values[0].Reset(); @@ -1171,7 +1190,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) { // cache. key1 should be filled to the primary cache from the blob file. ASSERT_OK(blob_source.GetBlob(read_options, keys[1], file_number, blob_offsets[1], file_size, blob_sizes[1], - kNoCompression, nullptr /* prefetch_buffer */, + no_compressor, nullptr /* prefetch_buffer */, &values[1], nullptr /* bytes_read */)); // Release cache handle @@ -1181,7 +1200,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) { // should be evicted and key1's dummy item is inserted into secondary cache. ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number, blob_offsets[0], file_size, blob_sizes[0], - kNoCompression, nullptr /* prefetch_buffer */, + no_compressor, nullptr /* prefetch_buffer */, &values[0], nullptr /* bytes_read */)); ASSERT_EQ(values[0], blobs[0]); ASSERT_TRUE( @@ -1194,7 +1213,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) { // key1 should be filled to the primary cache from the blob file. ASSERT_OK(blob_source.GetBlob(read_options, keys[1], file_number, blob_offsets[1], file_size, blob_sizes[1], - kNoCompression, nullptr /* prefetch_buffer */, + no_compressor, nullptr /* prefetch_buffer */, &values[1], nullptr /* bytes_read */)); ASSERT_EQ(values[1], blobs[1]); ASSERT_TRUE( @@ -1261,7 +1280,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) { // key1 is evicted and inserted into the secondary cache. ASSERT_OK(blob_source.GetBlob( read_options, keys[0], file_number, blob_offsets[0], file_size, - blob_sizes[0], kNoCompression, nullptr /* prefetch_buffer */, + blob_sizes[0], no_compressor, nullptr /* prefetch_buffer */, &values[0], nullptr /* bytes_read */)); ASSERT_EQ(values[0], blobs[0]); @@ -1410,9 +1429,12 @@ TEST_F(BlobSourceCacheReservationTest, SimpleCacheReservation) { std::vector blob_offsets(keys_.size()); std::vector blob_sizes(keys_.size()); + std::shared_ptr no_compressor = + BuiltinCompressor::GetCompressor(kNoCompression); + WriteBlobFile(immutable_options, kColumnFamilyId, kHasTTL, expiration_range, - expiration_range, kBlobFileNumber, keys_, blobs_, - kNoCompression, blob_offsets, blob_sizes); + expiration_range, kBlobFileNumber, keys_, blobs_, no_compressor, + blob_offsets, blob_sizes); constexpr size_t capacity = 10; std::shared_ptr backing_cache = NewLRUCache(capacity); @@ -1444,7 +1466,7 @@ TEST_F(BlobSourceCacheReservationTest, SimpleCacheReservation) { for (size_t i = 0; i < kNumBlobs; ++i) { ASSERT_OK(blob_source.GetBlob( read_options, keys_[i], kBlobFileNumber, blob_offsets[i], - blob_file_size_, blob_sizes[i], kNoCompression, + blob_file_size_, blob_sizes[i], no_compressor, nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */)); ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), 0); ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), 0); @@ -1463,7 +1485,7 @@ TEST_F(BlobSourceCacheReservationTest, SimpleCacheReservation) { for (size_t i = 0; i < kNumBlobs; ++i) { ASSERT_OK(blob_source.GetBlob( read_options, keys_[i], kBlobFileNumber, blob_offsets[i], - blob_file_size_, blob_sizes[i], kNoCompression, + blob_file_size_, blob_sizes[i], no_compressor, nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */)); size_t charge = 0; @@ -1529,10 +1551,13 @@ TEST_F(BlobSourceCacheReservationTest, IncreaseCacheReservation) { std::vector blob_offsets(keys_.size()); std::vector blob_sizes(keys_.size()); + std::shared_ptr no_compressor = + BuiltinCompressor::GetCompressor(kNoCompression); + constexpr ExpirationRange expiration_range; WriteBlobFile(immutable_options, kColumnFamilyId, kHasTTL, expiration_range, - expiration_range, kBlobFileNumber, keys_, blobs_, - kNoCompression, blob_offsets, blob_sizes); + expiration_range, kBlobFileNumber, keys_, blobs_, no_compressor, + blob_offsets, blob_sizes); constexpr size_t capacity = 10; std::shared_ptr backing_cache = NewLRUCache(capacity); @@ -1564,7 +1589,7 @@ TEST_F(BlobSourceCacheReservationTest, IncreaseCacheReservation) { for (size_t i = 0; i < kNumBlobs; ++i) { ASSERT_OK(blob_source.GetBlob( read_options, keys_[i], kBlobFileNumber, blob_offsets[i], - blob_file_size_, blob_sizes[i], kNoCompression, + blob_file_size_, blob_sizes[i], no_compressor, nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */)); ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), 0); ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), 0); @@ -1580,7 +1605,7 @@ TEST_F(BlobSourceCacheReservationTest, IncreaseCacheReservation) { for (size_t i = 0; i < kNumBlobs; ++i) { ASSERT_OK(blob_source.GetBlob( read_options, keys_[i], kBlobFileNumber, blob_offsets[i], - blob_file_size_, blob_sizes[i], kNoCompression, + blob_file_size_, blob_sizes[i], no_compressor, nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */)); // Release cache handle diff --git a/db/column_family.cc b/db/column_family.cc index 7be55f5569..fdfca69b76 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -128,24 +128,30 @@ void GetIntTblPropCollectorFactory( } Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options) { - if (!cf_options.compression_per_level.empty()) { - for (size_t level = 0; level < cf_options.compression_per_level.size(); - ++level) { - if (!CompressionTypeSupported(cf_options.compression_per_level[level])) { - return Status::InvalidArgument( - "Compression type " + - CompressionTypeToString(cf_options.compression_per_level[level]) + - " is not linked with the binary."); + MutableCFOptions moptions(cf_options); + ImmutableCFOptions ioptions(cf_options); + if (moptions.compressor && !moptions.compressor->Supported()) { + return Status::InvalidArgument("Compression type " + + moptions.compressor->GetId() + + " is not linked with the binary."); + } else if (moptions.bottommost_compressor && + !moptions.bottommost_compressor->Supported()) { + return Status::InvalidArgument("Compression type " + + moptions.bottommost_compressor->GetId() + + " is not linked with the binary."); + } else if (!moptions.compressor_per_level.empty()) { + for (const auto& compressor : moptions.compressor_per_level) { + if (compressor == nullptr) { + return Status::InvalidArgument("Compression type is invalid."); + } else if (!compressor->Supported()) { + return Status::InvalidArgument("Compression type " + + compressor->GetId() + + " is not linked with the binary."); } } - } else { - if (!CompressionTypeSupported(cf_options.compression)) { - return Status::InvalidArgument( - "Compression type " + - CompressionTypeToString(cf_options.compression) + - " is not linked with the binary."); - } } + + // TODO: Move this into ValidateOptions if (cf_options.compression_opts.zstd_max_train_bytes > 0) { if (cf_options.compression_opts.use_zstd_dict_trainer) { if (!ZSTD_TrainDictionarySupported()) { @@ -164,16 +170,11 @@ Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options) { "should be nonzero if we're using zstd's dictionary generator."); } } - - if (!CompressionTypeSupported(cf_options.blob_compression_type)) { - std::ostringstream oss; - oss << "The specified blob compression type " - << CompressionTypeToString(cf_options.blob_compression_type) - << " is not available."; - - return Status::InvalidArgument(oss.str()); + if (moptions.blob_compressor && !moptions.blob_compressor->Supported()) { + return Status::InvalidArgument("Blob compression type " + + moptions.blob_compressor->GetId() + + " is not linked with the binary."); } - return Status::OK(); } diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index 5d7e64dc01..6604394a69 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -30,6 +30,7 @@ #include "test_util/sync_point.h" #include "test_util/testharness.h" #include "util/cast_util.h" +#include "util/compressor.h" #include "util/string_util.h" namespace ROCKSDB_NAMESPACE { @@ -402,11 +403,11 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) { } TEST_F(CompactFilesTest, SentinelCompressionType) { - if (!Zlib_Supported()) { + if (!BuiltinCompressor::TypeSupported(kZlibCompression)) { fprintf(stderr, "zlib compression not supported, skip this test\n"); return; } - if (!Snappy_Supported()) { + if (!BuiltinCompressor::TypeSupported(kSnappyCompression)) { fprintf(stderr, "snappy compression not supported, skip this test\n"); return; } @@ -445,9 +446,10 @@ TEST_F(CompactFilesTest, SentinelCompressionType) { ROCKSDB_NAMESPACE::TablePropertiesCollection all_tables_props; ASSERT_OK(db->GetPropertiesOfAllTables(&all_tables_props)); + std::string zlib = + BuiltinCompressor::TypeToString(CompressionType::kZlibCompression); for (const auto& name_and_table_props : all_tables_props) { - ASSERT_EQ(CompressionTypeToString(CompressionType::kZlibCompression), - name_and_table_props.second->compression_name); + ASSERT_EQ(zlib, name_and_table_props.second->compression_name); } delete db; } diff --git a/db/compaction/compaction.cc b/db/compaction/compaction.cc index e28257d656..cc5238f1b7 100644 --- a/db/compaction/compaction.cc +++ b/db/compaction/compaction.cc @@ -238,12 +238,11 @@ Compaction::Compaction( const MutableDBOptions& _mutable_db_options, std::vector _inputs, int _output_level, uint64_t _target_file_size, uint64_t _max_compaction_bytes, - uint32_t _output_path_id, CompressionType _compression, - CompressionOptions _compression_opts, Temperature _output_temperature, - uint32_t _max_subcompactions, std::vector _grandparents, - bool _manual_compaction, const std::string& _trim_ts, double _score, - bool _deletion_compaction, bool l0_files_might_overlap, - CompactionReason _compaction_reason, + uint32_t _output_path_id, const std::shared_ptr& _compressor, + Temperature _output_temperature, uint32_t _max_subcompactions, + std::vector _grandparents, bool _manual_compaction, + const std::string& _trim_ts, double _score, bool _deletion_compaction, + bool l0_files_might_overlap, CompactionReason _compaction_reason, BlobGarbageCollectionPolicy _blob_garbage_collection_policy, double _blob_garbage_collection_age_cutoff) : input_vstorage_(vstorage), @@ -258,8 +257,7 @@ Compaction::Compaction( number_levels_(vstorage->num_levels()), cfd_(nullptr), output_path_id_(_output_path_id), - output_compression_(_compression), - output_compression_opts_(_compression_opts), + output_compressor_(_compressor), output_temperature_(_output_temperature), deletion_compaction_(_deletion_compaction), l0_files_might_overlap_(l0_files_might_overlap), @@ -454,8 +452,9 @@ bool Compaction::WithinPenultimateLevelOutputRange(const Slice& key) const { bool Compaction::InputCompressionMatchesOutput() const { int base_level = input_vstorage_->base_level(); bool matches = - (GetCompressionType(input_vstorage_, mutable_cf_options_, start_level_, - base_level) == output_compression_); + (GetCompressor(input_vstorage_, mutable_cf_options_, start_level_, + base_level) + ->GetCompressionType() == output_compressor_->GetCompressionType()); if (matches) { TEST_SYNC_POINT("Compaction::InputCompressionMatchesOutput:Matches"); return true; diff --git a/db/compaction/compaction.h b/db/compaction/compaction.h index fcb0f3003d..cf4a644271 100644 --- a/db/compaction/compaction.h +++ b/db/compaction/compaction.h @@ -86,8 +86,8 @@ class Compaction { const MutableDBOptions& mutable_db_options, std::vector inputs, int output_level, uint64_t target_file_size, uint64_t max_compaction_bytes, - uint32_t output_path_id, CompressionType compression, - CompressionOptions compression_opts, + uint32_t output_path_id, + const std::shared_ptr& compressor, Temperature output_temperature, uint32_t max_subcompactions, std::vector grandparents, bool manual_compaction = false, const std::string& trim_ts = "", @@ -187,11 +187,8 @@ class Compaction { uint64_t target_output_file_size() const { return target_output_file_size_; } // What compression for output - CompressionType output_compression() const { return output_compression_; } - - // What compression options for output - const CompressionOptions& output_compression_opts() const { - return output_compression_opts_; + const std::shared_ptr& output_compressor() const { + return output_compressor_; } // Whether need to write output file to second DB path. @@ -480,8 +477,7 @@ class Compaction { Arena arena_; // Arena used to allocate space for file_levels_ const uint32_t output_path_id_; - CompressionType output_compression_; - CompressionOptions output_compression_opts_; + std::shared_ptr output_compressor_; Temperature output_temperature_; // If true, then the compaction can be done by simply deleting input files. const bool deletion_compaction_; diff --git a/db/compaction/compaction_job.cc b/db/compaction/compaction_job.cc index e9a2b969d2..a9d5e069a9 100644 --- a/db/compaction/compaction_job.cc +++ b/db/compaction/compaction_job.cc @@ -58,6 +58,7 @@ #include "options/configurable_helper.h" #include "options/options_helper.h" #include "port/port.h" +#include "rocksdb/convenience.h" #include "rocksdb/db.h" #include "rocksdb/env.h" #include "rocksdb/options.h" @@ -933,8 +934,7 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) { stats.bytes_written / kMB, stats.bytes_written_blob / kMB, read_write_amp, write_amp, status.ToString().c_str(), stats.num_input_records, stats.num_dropped_records, - CompressionTypeToString(compact_->compaction->output_compression()) - .c_str()); + compact_->compaction->output_compressor()->GetId().c_str()); const auto& blob_files = vstorage->GetBlobFiles(); if (!blob_files.empty()) { @@ -982,7 +982,7 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) { << "num_output_records" << stats.num_output_records << "num_subcompactions" << compact_->sub_compact_states.size() << "output_compression" - << CompressionTypeToString(compact_->compaction->output_compression()); + << compact_->compaction->output_compressor()->GetId(); stream << "num_single_delete_mismatches" << compaction_job_stats_->num_single_del_mismatch; @@ -1945,8 +1945,7 @@ Status CompactionJob::OpenCompactionOutputFile(SubcompactionState* sub_compact, TableBuilderOptions tboptions( *cfd->ioptions(), *(sub_compact->compaction->mutable_cf_options()), cfd->internal_comparator(), cfd->int_tbl_prop_collector_factories(), - sub_compact->compaction->output_compression(), - sub_compact->compaction->output_compression_opts(), cfd->GetID(), + sub_compact->compaction->output_compressor(), cfd->GetID(), cfd->GetName(), sub_compact->compaction->output_level(), bottommost_level_, last_level_with_data_, TableFileCreationReason::kCompaction, 0 /* oldest_key_time */, diff --git a/db/compaction/compaction_job_test.cc b/db/compaction/compaction_job_test.cc index 61ed273269..cf2abd7e47 100644 --- a/db/compaction/compaction_job_test.cc +++ b/db/compaction/compaction_job_test.cc @@ -314,8 +314,9 @@ class CompactionJobTestBase : public testing::Test { TableBuilderOptions(*cfd_->ioptions(), mutable_cf_options_, cfd_->internal_comparator(), cfd_->int_tbl_prop_collector_factories(), - CompressionType::kNoCompression, - CompressionOptions(), 0 /* column_family_id */, + BuiltinCompressor::GetCompressor( + CompressionType::kNoCompression), + 0 /* column_family_id */, kDefaultColumnFamilyName, -1 /* level */), file_writer.get())); // Build table. @@ -659,9 +660,9 @@ class CompactionJobTestBase : public testing::Test { *cfd->GetLatestMutableCFOptions(), mutable_db_options_, compaction_input_files, output_level, mutable_cf_options_.target_file_size_base, - mutable_cf_options_.max_compaction_bytes, 0, kNoCompression, - cfd->GetLatestMutableCFOptions()->compression_opts, - Temperature::kUnknown, max_subcompactions, grandparents, true); + mutable_cf_options_.max_compaction_bytes, 0, + BuiltinCompressor::GetCompressor(kNoCompression), Temperature::kUnknown, + max_subcompactions, grandparents, true); compaction.SetInputVersion(cfd->current()); assert(db_options_.info_log); diff --git a/db/compaction/compaction_picker.cc b/db/compaction/compaction_picker.cc index 4d40ab5034..fe9f94afc1 100644 --- a/db/compaction/compaction_picker.cc +++ b/db/compaction/compaction_picker.cc @@ -75,57 +75,50 @@ bool FindIntraL0Compaction(const std::vector& level_files, // If enable_compression is false, then compression is always disabled no // matter what the values of the other two parameters are. // Otherwise, the compression type is determined based on options and level. -CompressionType GetCompressionType(const VersionStorageInfo* vstorage, - const MutableCFOptions& mutable_cf_options, - int level, int base_level, - const bool enable_compression) { +std::shared_ptr GetCompressor(const VersionStorageInfo* vstorage, + const MutableCFOptions& moptions, + int level, int base_level, + const bool enable_compression) { if (!enable_compression) { // disable compression - return kNoCompression; + return BuiltinCompressor::GetCompressor(kNoCompression); } // If bottommost_compression is set and we are compacting to the // bottommost level then we should use it. - if (mutable_cf_options.bottommost_compression != kDisableCompressionOption && - level >= (vstorage->num_non_empty_levels() - 1)) { - return mutable_cf_options.bottommost_compression; + bool bottom_level = (level >= (vstorage->num_non_empty_levels() - 1)); + if (moptions.bottommost_compressor != nullptr && bottom_level) { + return moptions.bottommost_compressor; } // If the user has specified a different compression level for each level, // then pick the compression for that level. - if (!mutable_cf_options.compression_per_level.empty()) { - assert(level == 0 || level >= base_level); - int idx = (level == 0) ? 0 : level - base_level + 1; - - const int n = - static_cast(mutable_cf_options.compression_per_level.size()) - 1; + if (!moptions.compressor_per_level.empty()) { // It is possible for level_ to be -1; in that case, we use level // 0's compression. This occurs mostly in backwards compatibility // situations when the builder doesn't know what level the file // belongs to. Likewise, if level is beyond the end of the // specified compression levels, use the last value. - return mutable_cf_options - .compression_per_level[std::max(0, std::min(idx, n))]; + assert(level == 0 || level >= base_level); + int lvl = std::max(0, level - base_level + 1); + int idx = std::min( + static_cast(moptions.compressor_per_level.size()) - 1, lvl); + // If not specified directly by the user, compressors in + // compressor_per_level are instantiated using compression_opts. If the user + // enabled bottommost_compression_opts, we need to create a new compressor + // with those options. + if (bottom_level && moptions.bottommost_compression_opts.enabled && + static_cast(moptions.compression_per_level.size()) > idx) { + return BuiltinCompressor::GetCompressor( + moptions.compression_per_level[idx], + moptions.bottommost_compression_opts); + } else { + return moptions.compressor_per_level[idx]; + } } else { - return mutable_cf_options.compression; + return moptions.compressor; } } -CompressionOptions GetCompressionOptions(const MutableCFOptions& cf_options, - const VersionStorageInfo* vstorage, - int level, - const bool enable_compression) { - if (!enable_compression) { - return cf_options.compression_opts; - } - // If bottommost_compression_opts is enabled and we are compacting to the - // bottommost level then we should use the specified compression options. - if (level >= (vstorage->num_non_empty_levels() - 1) && - cf_options.bottommost_compression_opts.enabled) { - return cf_options.bottommost_compression_opts; - } - return cf_options.compression_opts; -} - CompactionPicker::CompactionPicker(const ImmutableOptions& ioptions, const InternalKeyComparator* icmp) : ioptions_(ioptions), icmp_(icmp) {} @@ -357,26 +350,34 @@ Compaction* CompactionPicker::CompactFiles( start_level, output_level))); #endif /* !NDEBUG */ - CompressionType compression_type; - if (compact_options.compression == kDisableCompressionOption) { + std::shared_ptr compressor; + if (compact_options.compression != kDisableCompressionOption) { + bool bottom_level = + (output_level >= (vstorage->num_non_empty_levels() - 1)); + if (bottom_level && + mutable_cf_options.bottommost_compression_opts.enabled) { + compressor = BuiltinCompressor::GetCompressor( + compact_options.compression, + mutable_cf_options.bottommost_compression_opts); + } else { + compressor = BuiltinCompressor::GetCompressor( + compact_options.compression, mutable_cf_options.compression_opts); + } + } else { int base_level; if (ioptions_.compaction_style == kCompactionStyleLevel) { base_level = vstorage->base_level(); } else { base_level = 1; } - compression_type = GetCompressionType(vstorage, mutable_cf_options, - output_level, base_level); - } else { - // TODO(ajkr): `CompactionOptions` offers configurable `CompressionType` - // without configurable `CompressionOptions`, which is inconsistent. - compression_type = compact_options.compression; + compressor = + GetCompressor(vstorage, mutable_cf_options, output_level, base_level); } + auto c = new Compaction( vstorage, ioptions_, mutable_cf_options, mutable_db_options, input_files, output_level, compact_options.output_file_size_limit, - mutable_cf_options.max_compaction_bytes, output_path_id, compression_type, - GetCompressionOptions(mutable_cf_options, vstorage, output_level), + mutable_cf_options.max_compaction_bytes, output_path_id, compressor, Temperature::kUnknown, compact_options.max_subcompactions, /* grandparents */ {}, true); RegisterCompaction(c); @@ -668,8 +669,7 @@ Compaction* CompactionPicker::CompactRange( ioptions_.compaction_style), /* max_compaction_bytes */ LLONG_MAX, compact_range_options.target_path_id, - GetCompressionType(vstorage, mutable_cf_options, output_level, 1), - GetCompressionOptions(mutable_cf_options, vstorage, output_level), + GetCompressor(vstorage, mutable_cf_options, output_level, 1), Temperature::kUnknown, compact_range_options.max_subcompactions, /* grandparents */ {}, /* is manual */ true, trim_ts, /* score */ -1, /* deletion_compaction */ false, /* l0_files_might_overlap */ true, @@ -855,9 +855,8 @@ Compaction* CompactionPicker::CompactRange( ioptions_.level_compaction_dynamic_level_bytes), mutable_cf_options.max_compaction_bytes, compact_range_options.target_path_id, - GetCompressionType(vstorage, mutable_cf_options, output_level, - vstorage->base_level()), - GetCompressionOptions(mutable_cf_options, vstorage, output_level), + GetCompressor(vstorage, mutable_cf_options, output_level, + vstorage->base_level()), Temperature::kUnknown, compact_range_options.max_subcompactions, std::move(grandparents), /* is manual */ true, trim_ts, /* score */ -1, /* deletion_compaction */ false, /* l0_files_might_overlap */ true, diff --git a/db/compaction/compaction_picker.h b/db/compaction/compaction_picker.h index 11fdb1d937..6b169c3ef3 100644 --- a/db/compaction/compaction_picker.h +++ b/db/compaction/compaction_picker.h @@ -319,14 +319,9 @@ bool FindIntraL0Compaction(const std::vector& level_files, uint64_t max_compaction_bytes, CompactionInputFiles* comp_inputs); -CompressionType GetCompressionType(const VersionStorageInfo* vstorage, - const MutableCFOptions& mutable_cf_options, - int level, int base_level, - const bool enable_compression = true); - -CompressionOptions GetCompressionOptions( - const MutableCFOptions& mutable_cf_options, - const VersionStorageInfo* vstorage, int level, +std::shared_ptr GetCompressor( + const VersionStorageInfo* vstorage, + const MutableCFOptions& mutable_cf_options, int level, int base_level, const bool enable_compression = true); } // namespace ROCKSDB_NAMESPACE diff --git a/db/compaction/compaction_picker_fifo.cc b/db/compaction/compaction_picker_fifo.cc index 9aa24302e2..32c1d12fb9 100644 --- a/db/compaction/compaction_picker_fifo.cc +++ b/db/compaction/compaction_picker_fifo.cc @@ -112,8 +112,8 @@ Compaction* FIFOCompactionPicker::PickTTLCompaction( Compaction* c = new Compaction( vstorage, ioptions_, mutable_cf_options, mutable_db_options, - std::move(inputs), 0, 0, 0, 0, kNoCompression, - mutable_cf_options.compression_opts, Temperature::kUnknown, + std::move(inputs), 0, 0, 0, 0, + BuiltinCompressor::GetCompressor(kNoCompression), Temperature::kUnknown, /* max_subcompactions */ 0, {}, /* is manual */ false, /* trim_ts */ "", vstorage->CompactionScore(0), /* is deletion compaction */ true, /* l0_files_might_overlap */ true, @@ -181,10 +181,10 @@ Compaction* FIFOCompactionPicker::PickSizeCompaction( vstorage, ioptions_, mutable_cf_options, mutable_db_options, {comp_inputs}, 0, 16 * 1024 * 1024 /* output file size limit */, 0 /* max compaction bytes, not applicable */, - 0 /* output path ID */, mutable_cf_options.compression, - mutable_cf_options.compression_opts, Temperature::kUnknown, - 0 /* max_subcompactions */, {}, /* is manual */ false, - /* trim_ts */ "", vstorage->CompactionScore(0), + 0 /* output path ID */, mutable_cf_options.compressor, + Temperature::kUnknown, 0 /* max_subcompactions */, {}, + /* is manual */ false, /* trim_ts */ "", + vstorage->CompactionScore(0), /* is deletion compaction */ false, /* l0_files_might_overlap */ true, CompactionReason::kFIFOReduceNumFiles); @@ -276,8 +276,8 @@ Compaction* FIFOCompactionPicker::PickSizeCompaction( std::move(inputs), last_level, /* target_file_size */ 0, /* max_compaction_bytes */ 0, - /* output_path_id */ 0, kNoCompression, - mutable_cf_options.compression_opts, Temperature::kUnknown, + /* output_path_id */ 0, BuiltinCompressor::GetCompressor(kNoCompression), + Temperature::kUnknown, /* max_subcompactions */ 0, {}, /* is manual */ false, /* trim_ts */ "", vstorage->CompactionScore(0), /* is deletion compaction */ true, @@ -416,8 +416,7 @@ Compaction* FIFOCompactionPicker::PickTemperatureChangeCompaction( vstorage, ioptions_, mutable_cf_options, mutable_db_options, std::move(inputs), 0, 0 /* output file size limit */, 0 /* max compaction bytes, not applicable */, 0 /* output path ID */, - mutable_cf_options.compression, mutable_cf_options.compression_opts, - compaction_target_temp, + mutable_cf_options.compressor, compaction_target_temp, /* max_subcompactions */ 0, {}, /* is manual */ false, /* trim_ts */ "", vstorage->CompactionScore(0), /* is deletion compaction */ false, /* l0_files_might_overlap */ true, diff --git a/db/compaction/compaction_picker_level.cc b/db/compaction/compaction_picker_level.cc index c436689bb6..2dddb39743 100644 --- a/db/compaction/compaction_picker_level.cc +++ b/db/compaction/compaction_picker_level.cc @@ -521,9 +521,8 @@ Compaction* LevelCompactionBuilder::GetCompaction() { ioptions_.level_compaction_dynamic_level_bytes), mutable_cf_options_.max_compaction_bytes, GetPathId(ioptions_, mutable_cf_options_, output_level_), - GetCompressionType(vstorage_, mutable_cf_options_, output_level_, - vstorage_->base_level()), - GetCompressionOptions(mutable_cf_options_, vstorage_, output_level_), + GetCompressor(vstorage_, mutable_cf_options_, output_level_, + vstorage_->base_level()), Temperature::kUnknown, /* max_subcompactions */ 0, std::move(grandparents_), is_manual_, /* trim_ts */ "", start_level_score_, false /* deletion_compaction */, diff --git a/db/compaction/compaction_picker_universal.cc b/db/compaction/compaction_picker_universal.cc index 9eaf395467..90ad2a2795 100644 --- a/db/compaction/compaction_picker_universal.cc +++ b/db/compaction/compaction_picker_universal.cc @@ -765,10 +765,8 @@ Compaction* UniversalCompactionBuilder::PickCompactionToReduceSortedRuns( MaxFileSizeForLevel(mutable_cf_options_, output_level, kCompactionStyleUniversal), GetMaxOverlappingBytes(), path_id, - GetCompressionType(vstorage_, mutable_cf_options_, - output_level, 1, enable_compression), - GetCompressionOptions(mutable_cf_options_, vstorage_, - output_level, enable_compression), + GetCompressor(vstorage_, mutable_cf_options_, + output_level, 1, enable_compression), Temperature::kUnknown, /* max_subcompactions */ 0, grandparents, /* is manual */ false, /* trim_ts */ "", score_, @@ -1118,10 +1116,8 @@ Compaction* UniversalCompactionBuilder::PickIncrementalForReduceSizeAmp( MaxFileSizeForLevel(mutable_cf_options_, output_level, kCompactionStyleUniversal), GetMaxOverlappingBytes(), path_id, - GetCompressionType(vstorage_, mutable_cf_options_, output_level, 1, - true /* enable_compression */), - GetCompressionOptions(mutable_cf_options_, vstorage_, output_level, - true /* enable_compression */), + GetCompressor(vstorage_, mutable_cf_options_, output_level, 1, + true /* enable_compression */), Temperature::kUnknown, /* max_subcompactions */ 0, /* grandparents */ {}, /* is manual */ false, /* trim_ts */ "", score_, false /* deletion_compaction */, @@ -1264,8 +1260,7 @@ Compaction* UniversalCompactionBuilder::PickDeleteTriggeredCompaction() { MaxFileSizeForLevel(mutable_cf_options_, output_level, kCompactionStyleUniversal), /* max_grandparent_overlap_bytes */ GetMaxOverlappingBytes(), path_id, - GetCompressionType(vstorage_, mutable_cf_options_, output_level, 1), - GetCompressionOptions(mutable_cf_options_, vstorage_, output_level), + GetCompressor(vstorage_, mutable_cf_options_, output_level, 1), Temperature::kUnknown, /* max_subcompactions */ 0, grandparents, /* is manual */ false, /* trim_ts */ "", score_, false /* deletion_compaction */, @@ -1355,10 +1350,8 @@ Compaction* UniversalCompactionBuilder::PickCompactionWithSortedRunRange( MaxFileSizeForLevel(mutable_cf_options_, output_level, kCompactionStyleUniversal), GetMaxOverlappingBytes(), path_id, - GetCompressionType(vstorage_, mutable_cf_options_, output_level, 1, - true /* enable_compression */), - GetCompressionOptions(mutable_cf_options_, vstorage_, output_level, - true /* enable_compression */), + GetCompressor(vstorage_, mutable_cf_options_, output_level, 1, + true /* enable_compression */), Temperature::kUnknown, /* max_subcompactions */ 0, /* grandparents */ {}, /* is manual */ false, /* trim_ts */ "", score_, false /* deletion_compaction */, diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index aa92e26e83..9bf5d2bdcd 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -35,6 +35,7 @@ #include "table/block_based/block_based_table_reader.h" #include "table/block_based/block_builder.h" #include "test_util/sync_point.h" +#include "util/compressor.h" #include "util/file_checksum_helper.h" #include "util/random.h" #include "utilities/counted_fs.h" @@ -3524,13 +3525,13 @@ class DBBasicTestMultiGet : public DBTestBase { BlockBasedTableOptions table_options; if (compression_enabled_) { - std::vector compression_types; - compression_types = GetSupportedCompressions(); // Not every platform may have compression libraries available, so // dynamically pick based on what's available CompressionType tmp_type = kNoCompression; - for (auto c_type : compression_types) { - if (c_type != kNoCompression) { + for (auto c : Compressor::GetSupported()) { + CompressionType c_type; + if (BuiltinCompressor::StringToType(c, &c_type) && + c_type != kNoCompression) { tmp_type = c_type; break; } diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 8550f9b195..abcabd6a23 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -40,7 +40,7 @@ #include "rocksdb/table_properties.h" #include "table/block_based/block_based_table_reader.h" #include "table/unique_id_impl.h" -#include "util/compression.h" +#include "util/compressor.h" #include "util/defer.h" #include "util/hash.h" #include "util/math.h" @@ -1017,21 +1017,11 @@ TEST_F(DBBlockCacheTest, CacheCompressionDict) { const int kNumBytesPerEntry = 1024; // Try all the available libraries that support dictionary compression - std::vector compression_types; - if (Zlib_Supported()) { - compression_types.push_back(kZlibCompression); - } - if (LZ4_Supported()) { - compression_types.push_back(kLZ4Compression); - compression_types.push_back(kLZ4HCCompression); - } - if (ZSTD_Supported()) { - compression_types.push_back(kZSTD); - } else if (ZSTDNotFinal_Supported()) { - compression_types.push_back(kZSTDNotFinalCompression); - } + auto compressors = Compressor::GetDictSupported(); Random rnd(301); - for (auto compression_type : compression_types) { + for (auto c : compressors) { + CompressionType compression_type; + ASSERT_TRUE(BuiltinCompressor::StringToType(c, &compression_type)); Options options = CurrentOptions(); options.bottommost_compression = compression_type; options.bottommost_compression_opts.max_dict_bytes = 4096; @@ -2198,10 +2188,15 @@ TEST_P(DBBlockCachePinningTest, TwoLevelDB) { const int kNumKeysPerFile = kBlockSize * kNumBlocksPerFile / kKeySize; Options options = CurrentOptions(); - // `kNoCompression` makes the unit test more portable. But it relies on the - // current behavior of persisting/accessing dictionary even when there's no - // (de)compression happening, which seems fairly likely to change over time. - options.compression = kNoCompression; + // Select one of the available libraries that support dictionary compression + std::vector dict_compressions = Compressor::GetDictSupported(); + if (!dict_compressions.empty()) { + BuiltinCompressor::StringToType(dict_compressions[0], &options.compression); + } else { + ROCKSDB_GTEST_BYPASS( + "TwoLevelDB requires a library that supports dictionary compression"); + return; + } options.compression_opts.max_dict_bytes = 4 << 10; options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics(); BlockBasedTableOptions table_options; @@ -2269,7 +2264,8 @@ TEST_P(DBBlockCachePinningTest, TwoLevelDB) { ++expected_index_misses; } } - if (unpartitioned_pinning_ == PinningTier::kNone) { + if (unpartitioned_pinning_ == PinningTier::kNone && + options.compression != kNoCompression) { ++expected_compression_dict_misses; } ASSERT_EQ(expected_filter_misses, diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc index 75d8895a8a..10cd313ec5 100644 --- a/db/db_impl/db_impl.cc +++ b/db/db_impl/db_impl.cc @@ -131,22 +131,21 @@ const std::string kPersistentStatsColumnFamilyName( "___rocksdb_stats_history___"); void DumpRocksDBBuildVersion(Logger* log); -CompressionType GetCompressionFlush( - const ImmutableCFOptions& ioptions, - const MutableCFOptions& mutable_cf_options) { +std::shared_ptr GetCompressionFlush( + const ImmutableCFOptions& ioptions, const MutableCFOptions& moptions) { // Compressing memtable flushes might not help unless the sequential load // optimization is used for leveled compaction. Otherwise the CPU and // latency overhead is not offset by saving much space. - if (ioptions.compaction_style == kCompactionStyleUniversal && - mutable_cf_options.compaction_options_universal - .compression_size_percent >= 0) { - return kNoCompression; - } - if (mutable_cf_options.compression_per_level.empty()) { - return mutable_cf_options.compression; + if (ioptions.compaction_style == kCompactionStyleUniversal) { + if (moptions.compaction_options_universal.compression_size_percent < 0) { + return moptions.compressor; + } else { + return BuiltinCompressor::GetCompressor(kNoCompression); + } + } else if (moptions.compressor_per_level.empty()) { + return moptions.compressor; } else { - // For leveled compress when min_level_to_compress != 0. - return mutable_cf_options.compression_per_level[0]; + return moptions.compressor_per_level[0]; } } diff --git a/db/db_impl/db_impl.h b/db/db_impl/db_impl.h index b041844c60..7622135275 100644 --- a/db/db_impl/db_impl.h +++ b/db/db_impl/db_impl.h @@ -2852,7 +2852,7 @@ extern DBOptions SanitizeOptions(const std::string& db, const DBOptions& src, bool read_only = false, Status* logger_creation_s = nullptr); -extern CompressionType GetCompressionFlush( +extern std::shared_ptr GetCompressionFlush( const ImmutableCFOptions& ioptions, const MutableCFOptions& mutable_cf_options); diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index 8baf079565..295d232e4a 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -1931,9 +1931,8 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) { ->compaction_style) /* output file size limit, not applicable */ , LLONG_MAX /* max compaction bytes, not applicable */, - 0 /* output path ID, not applicable */, mutable_cf_options.compression, - mutable_cf_options.compression_opts, Temperature::kUnknown, - 0 /* max_subcompactions, not applicable */, + 0 /* output path ID, not applicable */, mutable_cf_options.compressor, + Temperature::kUnknown, 0 /* max_subcompactions, not applicable */, {} /* grandparents, not applicable */, false /* is manual */, "" /* trim_ts */, -1 /* score, not applicable */, false /* is deletion compaction, not applicable */, @@ -4130,7 +4129,8 @@ void DBImpl::BuildCompactionJobInfo( compaction_job_info->stats = compaction_job_stats; compaction_job_info->table_properties = c->GetTableProperties(); compaction_job_info->compaction_reason = c->compaction_reason(); - compaction_job_info->compression = c->output_compression(); + compaction_job_info->compression = + c->output_compressor()->GetCompressionType(); const ReadOptions read_options(Env::IOActivity::kCompaction); for (size_t i = 0; i < c->num_input_levels(); ++i) { @@ -4155,7 +4155,7 @@ void DBImpl::BuildCompactionJobInfo( newf.first, file_number, meta.oldest_blob_file_number}); } compaction_job_info->blob_compression_type = - c->mutable_cf_options()->blob_compression_type; + c->mutable_cf_options()->blob_compressor->GetCompressionType(); // Update BlobFilesInfo. for (const auto& blob_file : c->edit()->GetBlobFileAdditions()) { diff --git a/db/db_impl/db_impl_open.cc b/db/db_impl/db_impl_open.cc index 1722e6652e..bd2ad10d6f 100644 --- a/db/db_impl/db_impl_open.cc +++ b/db/db_impl/db_impl_open.cc @@ -40,6 +40,7 @@ #include "rocksdb/wal_filter.h" #include "test_util/sync_point.h" #include "util/rate_limiter_impl.h" +#include "util/string_util.h" #include "util/udt_util.h" namespace ROCKSDB_NAMESPACE { @@ -1672,9 +1673,8 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd, *cfd->ioptions(), mutable_cf_options, cfd->internal_comparator(), cfd->int_tbl_prop_collector_factories(), GetCompressionFlush(*cfd->ioptions(), mutable_cf_options), - mutable_cf_options.compression_opts, cfd->GetID(), cfd->GetName(), - 0 /* level */, false /* is_bottommost */, - false /* is_last_level_with_data */, + cfd->GetID(), cfd->GetName(), 0 /* level */, + false /* is_bottommost */, false /* is_last_level_with_data */, TableFileCreationReason::kRecovery, 0 /* oldest_key_time */, 0 /* file_creation_time */, db_id_, db_session_id_, 0 /* target_file_size */, meta.fd.GetNumber()); diff --git a/db/db_options_test.cc b/db/db_options_test.cc index 6fec87010c..b3a2d5ad82 100644 --- a/db/db_options_test.cc +++ b/db/db_options_test.cc @@ -1166,8 +1166,9 @@ TEST_F(DBOptionsTest, ChangeCompression) { SyncPoint::GetInstance()->SetCallBack( "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) { Compaction* c = reinterpret_cast(arg); - compression_used = c->output_compression(); - compression_opt_used = c->output_compression_opts(); + compression_used = c->output_compressor()->GetCompressionType(); + compression_opt_used = + *(c->output_compressor()->GetOptions()); compacted = true; }); SyncPoint::GetInstance()->EnableProcessing(); @@ -1199,7 +1200,8 @@ TEST_F(DBOptionsTest, ChangeCompression) { ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_TRUE(compacted); ASSERT_EQ(CompressionType::kSnappyCompression, compression_used); - ASSERT_EQ(6, compression_opt_used.level); + // Snappy compressor does not define level option. Default is returned. + ASSERT_EQ(32767, compression_opt_used.level); // Right now parallel_level is not yet allowed to be changed. SyncPoint::GetInstance()->DisableProcessing(); @@ -1515,14 +1517,15 @@ TEST_F(DBOptionsTest, BottommostCompressionOptsWithFallbackType) { options.bottommost_compression_opts.enabled = true; Reopen(options); - CompressionType compression_used = CompressionType::kDisableCompressionOption; + std::string compression_used; + const std::string lz4 = + BuiltinCompressor::TypeToString(CompressionType::kLZ4Compression); CompressionOptions compression_opt_used; bool compacted = false; SyncPoint::GetInstance()->SetCallBack( "CompactionPicker::RegisterCompaction:Registered", [&](void* arg) { Compaction* c = static_cast(arg); - compression_used = c->output_compression(); - compression_opt_used = c->output_compression_opts(); + compression_used = c->output_compressor()->GetId(); compacted = true; }); SyncPoint::GetInstance()->EnableProcessing(); @@ -1534,15 +1537,12 @@ TEST_F(DBOptionsTest, BottommostCompressionOptsWithFallbackType) { ASSERT_OK(Flush()); } ASSERT_OK(dbfull()->TEST_WaitForCompact()); - ASSERT_TRUE(compacted); - ASSERT_EQ(CompressionType::kLZ4Compression, compression_used); - ASSERT_EQ(kBottommostCompressionLevel, compression_opt_used.level); + ASSERT_EQ(lz4, compression_used); // Second, verify for manual compaction. compacted = false; compression_used = CompressionType::kDisableCompressionOption; - compression_opt_used = CompressionOptions(); CompactRangeOptions cro; cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized; ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr)); @@ -1551,8 +1551,7 @@ TEST_F(DBOptionsTest, BottommostCompressionOptsWithFallbackType) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks(); ASSERT_TRUE(compacted); - ASSERT_EQ(CompressionType::kLZ4Compression, compression_used); - ASSERT_EQ(kBottommostCompressionLevel, compression_opt_used.level); + ASSERT_EQ(lz4, compression_used); } TEST_F(DBOptionsTest, FIFOTemperatureAgeThresholdValidation) { diff --git a/db/db_test.cc b/db/db_test.cc index 409eccc5fd..373deec915 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -5263,7 +5263,8 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel2) { "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) { Compaction* compaction = reinterpret_cast(arg); if (compaction->output_level() == 4) { - ASSERT_TRUE(compaction->output_compression() == kLZ4Compression); + ASSERT_EQ(compaction->output_compressor()->GetCompressionType(), + kLZ4Compression); num_lz4.fetch_add(1); } }); @@ -5305,10 +5306,12 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel2) { "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) { Compaction* compaction = reinterpret_cast(arg); if (compaction->output_level() == 4 && compaction->start_level() == 3) { - ASSERT_TRUE(compaction->output_compression() == kZlibCompression); + ASSERT_EQ(compaction->output_compressor()->GetCompressionType(), + kZlibCompression); num_zlib.fetch_add(1); } else { - ASSERT_TRUE(compaction->output_compression() == kLZ4Compression); + ASSERT_EQ(compaction->output_compressor()->GetCompressionType(), + kLZ4Compression); num_lz4.fetch_add(1); } }); diff --git a/db/db_test2.cc b/db/db_test2.cc index dc771e87a4..7b58ee79ed 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -40,6 +40,7 @@ #include "rocksdb/utilities/replayer.h" #include "rocksdb/wal_filter.h" #include "test_util/testutil.h" +#include "util/compressor.h" #include "util/random.h" #include "utilities/fault_injection_env.h" @@ -1445,21 +1446,25 @@ TEST_F(DBTest2, PresetCompressionDictLocality) { class PresetCompressionDictTest : public DBTestBase, - public testing::WithParamInterface> { + public testing::WithParamInterface> { public: PresetCompressionDictTest() : DBTestBase("db_test2", false /* env_do_fsync */), - compression_type_(std::get<0>(GetParam())), - bottommost_(std::get<1>(GetParam())) {} + compression_name_(std::get<0>(GetParam())), + bottommost_(std::get<1>(GetParam())) { + EXPECT_TRUE( + BuiltinCompressor::StringToType(compression_name_, &compression_type_)); + } protected: - const CompressionType compression_type_; + const std::string compression_name_; + CompressionType compression_type_; const bool bottommost_; }; INSTANTIATE_TEST_CASE_P( DBTest2, PresetCompressionDictTest, - ::testing::Combine(::testing::ValuesIn(GetSupportedDictCompressions()), + ::testing::Combine(::testing::ValuesIn(Compressor::GetDictSupported()), ::testing::Bool())); TEST_P(PresetCompressionDictTest, Flush) { @@ -1707,16 +1712,19 @@ enum CompressionFailureType { class CompressionFailuresTest : public DBTest2, - public testing::WithParamInterface> { + public testing::WithParamInterface< + std::tuple> { public: CompressionFailuresTest() { - std::tie(compression_failure_type_, compression_type_, + std::tie(compression_failure_type_, compression_name_, compression_max_dict_bytes_, compression_parallel_threads_) = GetParam(); + EXPECT_TRUE( + BuiltinCompressor::StringToType(compression_name_, &compression_type_)); } CompressionFailureType compression_failure_type_ = kTestCompressionFail; + std::string compression_name_; CompressionType compression_type_ = kNoCompression; uint32_t compression_max_dict_bytes_ = 0; uint32_t compression_parallel_threads_ = 0; @@ -1727,7 +1735,7 @@ INSTANTIATE_TEST_CASE_P( ::testing::Combine(::testing::Values(kTestCompressionFail, kTestDecompressionFail, kTestDecompressionCorruption), - ::testing::ValuesIn(GetSupportedCompressions()), + ::testing::ValuesIn(Compressor::GetSupported()), ::testing::Values(0, 10), ::testing::Values(1, 4))); TEST_P(CompressionFailuresTest, CompressionFailures) { diff --git a/db/db_test_util.h b/db/db_test_util.h index eb80139c57..8b5158991d 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -59,7 +59,6 @@ #include "test_util/sync_point.h" #include "test_util/testharness.h" #include "util/cast_util.h" -#include "util/compression.h" #include "util/mutexlock.h" #include "util/string_util.h" #include "utilities/merge_operators.h" diff --git a/db/external_sst_file_ingestion_job.cc b/db/external_sst_file_ingestion_job.cc index 44821521c5..bbd02dd7dd 100644 --- a/db/external_sst_file_ingestion_job.cc +++ b/db/external_sst_file_ingestion_job.cc @@ -550,9 +550,8 @@ void ExternalSstFileIngestionJob::CreateEquivalentFileIngestingCompactions() { */ , LLONG_MAX /* max compaction bytes, not applicable */, - 0 /* output path ID, not applicable */, mutable_cf_options.compression, - mutable_cf_options.compression_opts, Temperature::kUnknown, - 0 /* max_subcompaction, not applicable */, + 0 /* output path ID, not applicable */, mutable_cf_options.compressor, + Temperature::kUnknown, 0 /* max_subcompaction, not applicable */, {} /* grandparents, not applicable */, false /* is manual */, "" /* trim_ts */, -1 /* score, not applicable */, false /* is deletion compaction, not applicable */, diff --git a/db/flush_job.cc b/db/flush_job.cc index 827523d908..2e5328d56c 100644 --- a/db/flush_job.cc +++ b/db/flush_job.cc @@ -60,6 +60,7 @@ #include "util/coding.h" #include "util/mutexlock.h" #include "util/stop_watch.h" +#include "util/string_util.h" namespace ROCKSDB_NAMESPACE { @@ -112,8 +113,9 @@ FlushJob::FlushJob( SequenceNumber earliest_write_conflict_snapshot, SnapshotChecker* snapshot_checker, JobContext* job_context, FlushReason flush_reason, LogBuffer* log_buffer, FSDirectory* db_directory, - FSDirectory* output_file_directory, CompressionType output_compression, - Statistics* stats, EventLogger* event_logger, bool measure_io_stats, + FSDirectory* output_file_directory, + const std::shared_ptr& output_compressor, Statistics* stats, + EventLogger* event_logger, bool measure_io_stats, const bool sync_output_directory, const bool write_manifest, Env::Priority thread_pri, const std::shared_ptr& io_tracer, const SeqnoToTimeMapping& seqno_time_mapping, const std::string& db_id, @@ -138,7 +140,7 @@ FlushJob::FlushJob( log_buffer_(log_buffer), db_directory_(db_directory), output_file_directory_(output_file_directory), - output_compression_(output_compression), + output_compressor_(output_compressor), stats_(stats), event_logger_(event_logger), measure_io_stats_(measure_io_stats), @@ -354,8 +356,7 @@ Status FlushJob::Run(LogsWithPrepTracker* prep_tracker, FileMetaData* file_meta, auto stream = event_logger_->LogToBuffer(log_buffer_, 1024); stream << "job" << job_context_->job_id << "event" << "flush_finished"; - stream << "output_compression" - << CompressionTypeToString(output_compression_); + stream << "output_compression" << output_compressor_->GetId(); stream << "lsm_state"; stream.StartArray(); auto vstorage = cfd_->current()->storage_info(); @@ -940,8 +941,11 @@ Status FlushJob::WriteLevel0Table() { cfd_->GetName().c_str(), job_context_->job_id, meta_.fd.GetNumber()); + CompressionType output_compression_type = + output_compressor_->GetCompressionType(); + (void)output_compression_type; TEST_SYNC_POINT_CALLBACK("FlushJob::WriteLevel0Table:output_compression", - &output_compression_); + &output_compression_type); int64_t _current_time = 0; auto status = clock_->GetCurrentTime(&_current_time); // Safe to proceed even if GetCurrentTime fails. So, log and proceed. @@ -975,12 +979,12 @@ Status FlushJob::WriteLevel0Table() { (full_history_ts_low_.empty()) ? nullptr : &full_history_ts_low_; TableBuilderOptions tboptions( *cfd_->ioptions(), mutable_cf_options_, cfd_->internal_comparator(), - cfd_->int_tbl_prop_collector_factories(), output_compression_, - mutable_cf_options_.compression_opts, cfd_->GetID(), cfd_->GetName(), - 0 /* level */, false /* is_bottommost */, - false /* is_last_level_with_data */, TableFileCreationReason::kFlush, - oldest_key_time, current_time, db_id_, db_session_id_, - 0 /* target_file_size */, meta_.fd.GetNumber()); + cfd_->int_tbl_prop_collector_factories(), output_compressor_, + cfd_->GetID(), cfd_->GetName(), 0 /* level */, + false /* is_bottommost */, false /* is_last_level_with_data */, + TableFileCreationReason::kFlush, oldest_key_time, current_time, + db_id_, db_session_id_, 0 /* target_file_size */, + meta_.fd.GetNumber()); const SequenceNumber job_snapshot_seq = job_context_->GetJobSnapshotSequence(); const ReadOptions read_options(Env::IOActivity::kFlush); @@ -1129,7 +1133,8 @@ std::unique_ptr FlushJob::GetFlushJobInfo() const { info->largest_seqno = meta_.fd.largest_seqno; info->table_properties = table_properties_; info->flush_reason = flush_reason_; - info->blob_compression_type = mutable_cf_options_.blob_compression_type; + info->blob_compression_type = + mutable_cf_options_.blob_compressor->GetCompressionType(); // Update BlobFilesInfo. for (const auto& blob_file : edit_->GetBlobFileAdditions()) { diff --git a/db/flush_job.h b/db/flush_job.h index 12a8f2fdf6..5816051152 100644 --- a/db/flush_job.h +++ b/db/flush_job.h @@ -83,8 +83,8 @@ class FlushJob { SnapshotChecker* snapshot_checker, JobContext* job_context, FlushReason flush_reason, LogBuffer* log_buffer, FSDirectory* db_directory, FSDirectory* output_file_directory, - CompressionType output_compression, Statistics* stats, - EventLogger* event_logger, bool measure_io_stats, + const std::shared_ptr& output_compressor, + Statistics* stats, EventLogger* event_logger, bool measure_io_stats, const bool sync_output_directory, const bool write_manifest, Env::Priority thread_pri, const std::shared_ptr& io_tracer, const SeqnoToTimeMapping& seq_time_mapping, @@ -183,7 +183,7 @@ class FlushJob { LogBuffer* log_buffer_; FSDirectory* db_directory_; FSDirectory* output_file_directory_; - CompressionType output_compression_; + std::shared_ptr output_compressor_; Statistics* stats_; EventLogger* event_logger_; TableProperties table_properties_; diff --git a/db/flush_job_test.cc b/db/flush_job_test.cc index 16796ac354..f9140e4c48 100644 --- a/db/flush_job_test.cc +++ b/db/flush_job_test.cc @@ -186,15 +186,16 @@ TEST_F(FlushJobTest, Empty) { auto cfd = versions_->GetColumnFamilySet()->GetDefault(); EventLogger event_logger(db_options_.info_log.get()); SnapshotChecker* snapshot_checker = nullptr; // not relavant - FlushJob flush_job( - dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, - *cfd->GetLatestMutableCFOptions(), - std::numeric_limits::max() /* memtable_id */, env_options_, - versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber, - snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr, - nullptr, kNoCompression, nullptr, &event_logger, false, - true /* sync_output_directory */, true /* write_manifest */, - Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_); + FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(), + db_options_, *cfd->GetLatestMutableCFOptions(), + std::numeric_limits::max() /* memtable_id */, + env_options_, versions_.get(), &mutex_, &shutting_down_, + {}, kMaxSequenceNumber, snapshot_checker, &job_context, + FlushReason::kTest, nullptr, nullptr, nullptr, + BuiltinCompressor::GetCompressor(kNoCompression), nullptr, + &event_logger, false, true /* sync_output_directory */, + true /* write_manifest */, Env::Priority::USER, + nullptr /*IOTracer*/, empty_seqno_to_time_mapping_); { InstrumentedMutexLock l(&mutex_); flush_job.PickMemTable(); @@ -278,8 +279,9 @@ TEST_F(FlushJobTest, NonEmpty) { std::numeric_limits::max() /* memtable_id */, env_options_, versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber, snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr, - nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, - true, true /* sync_output_directory */, true /* write_manifest */, + nullptr, BuiltinCompressor::GetCompressor(kNoCompression), + db_options_.statistics.get(), &event_logger, true, + true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_); HistogramData hist; @@ -341,8 +343,9 @@ TEST_F(FlushJobTest, FlushMemTablesSingleColumnFamily) { *cfd->GetLatestMutableCFOptions(), flush_memtable_id, env_options_, versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber, snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr, - nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, - true, true /* sync_output_directory */, true /* write_manifest */, + nullptr, BuiltinCompressor::GetCompressor(kNoCompression), + db_options_.statistics.get(), &event_logger, true, + true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_); HistogramData hist; FileMetaData file_meta; @@ -414,7 +417,8 @@ TEST_F(FlushJobTest, FlushMemtablesMultipleColumnFamilies) { memtable_ids[k], env_options_, versions_.get(), &mutex_, &shutting_down_, snapshot_seqs, kMaxSequenceNumber, snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr, nullptr, - kNoCompression, db_options_.statistics.get(), &event_logger, true, + BuiltinCompressor::GetCompressor(kNoCompression), + db_options_.statistics.get(), &event_logger, true, false /* sync_output_directory */, false /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_)); @@ -541,8 +545,9 @@ TEST_F(FlushJobTest, Snapshots) { std::numeric_limits::max() /* memtable_id */, env_options_, versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr, - nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, - true, true /* sync_output_directory */, true /* write_manifest */, + nullptr, BuiltinCompressor::GetCompressor(kNoCompression), + db_options_.statistics.get(), &event_logger, true, + true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_); mutex_.Lock(); flush_job.PickMemTable(); @@ -597,8 +602,9 @@ TEST_F(FlushJobTest, GetRateLimiterPriorityForWrite) { *cfd->GetLatestMutableCFOptions(), flush_memtable_id, env_options_, versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber, snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr, - nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, - true, true /* sync_output_directory */, true /* write_manifest */, + nullptr, BuiltinCompressor::GetCompressor(kNoCompression), + db_options_.statistics.get(), &event_logger, true, + true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_); // When the state from WriteController is normal. @@ -715,8 +721,9 @@ TEST_P(FlushJobTimestampTest, AllKeysExpired) { std::numeric_limits::max() /* memtable_id */, env_options_, versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr, - nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, - true, true /* sync_output_directory */, true /* write_manifest */, + nullptr, BuiltinCompressor::GetCompressor(kNoCompression), + db_options_.statistics.get(), &event_logger, true, + true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_, /*db_id=*/"", /*db_session_id=*/"", full_history_ts_low); @@ -777,8 +784,9 @@ TEST_P(FlushJobTimestampTest, NoKeyExpired) { std::numeric_limits::max() /* memtable_id */, env_options_, versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, snapshot_checker, &job_context, FlushReason::kTest, nullptr, nullptr, - nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, - true, true /* sync_output_directory */, true /* write_manifest */, + nullptr, BuiltinCompressor::GetCompressor(kNoCompression), + db_options_.statistics.get(), &event_logger, true, + true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/, empty_seqno_to_time_mapping_, /*db_id=*/"", /*db_session_id=*/"", full_history_ts_low); diff --git a/db/repair.cc b/db/repair.cc index 4c3d390de3..af01673fce 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -476,12 +476,11 @@ class Repairer { } IOStatus io_s; - CompressionOptions default_compression; TableBuilderOptions tboptions( *cfd->ioptions(), *cfd->GetLatestMutableCFOptions(), cfd->internal_comparator(), cfd->int_tbl_prop_collector_factories(), - kNoCompression, default_compression, cfd->GetID(), cfd->GetName(), - -1 /* level */, false /* is_bottommost */, + BuiltinCompressor::GetCompressor(kNoCompression), cfd->GetID(), + cfd->GetName(), -1 /* level */, false /* is_bottommost */, false /* is_last_level_with_data */, TableFileCreationReason::kRecovery, 0 /* oldest_key_time */, 0 /* file_creation_time */, "DB Repairer" /* db_id */, db_session_id_, diff --git a/db/table_properties_collector_test.cc b/db/table_properties_collector_test.cc index 437b7e3090..a230d0ab2f 100644 --- a/db/table_properties_collector_test.cc +++ b/db/table_properties_collector_test.cc @@ -43,8 +43,7 @@ static const std::string kTestColumnFamilyName = "test_column_fam"; static const int kTestLevel = 1; void MakeBuilder( - const Options& options, const ImmutableOptions& ioptions, - const MutableCFOptions& moptions, + const ImmutableOptions& ioptions, const MutableCFOptions& moptions, const InternalKeyComparator& internal_comparator, const IntTblPropCollectorFactories* int_tbl_prop_collector_factories, std::unique_ptr* writable, @@ -52,10 +51,10 @@ void MakeBuilder( std::unique_ptr wf(new test::StringSink); writable->reset( new WritableFileWriter(std::move(wf), "" /* don't care */, EnvOptions())); - TableBuilderOptions tboptions( - ioptions, moptions, internal_comparator, int_tbl_prop_collector_factories, - options.compression, options.compression_opts, kTestColumnFamilyId, - kTestColumnFamilyName, kTestLevel); + TableBuilderOptions tboptions(ioptions, moptions, internal_comparator, + int_tbl_prop_collector_factories, + moptions.compressor, kTestColumnFamilyId, + kTestColumnFamilyName, kTestLevel); builder->reset(NewTableBuilder(tboptions, writable->get())); } } // namespace @@ -272,7 +271,7 @@ void TestCustomizedTablePropertiesCollector( } else { GetIntTblPropCollectorFactory(ioptions, &int_tbl_prop_collector_factories); } - MakeBuilder(options, ioptions, moptions, internal_comparator, + MakeBuilder(ioptions, moptions, internal_comparator, &int_tbl_prop_collector_factories, &writer, &builder); SequenceNumber seqNum = 0U; @@ -413,8 +412,8 @@ void TestInternalKeyPropertiesCollector( MutableCFOptions moptions(options); for (int iter = 0; iter < 2; ++iter) { - MakeBuilder(options, ioptions, moptions, pikc, - &int_tbl_prop_collector_factories, &writable, &builder); + MakeBuilder(ioptions, moptions, pikc, &int_tbl_prop_collector_factories, + &writable, &builder); for (const auto& k : keys) { builder->Add(k.Encode(), "val"); } diff --git a/db/version_edit.h b/db/version_edit.h index e6d54d31d1..ee894c4a88 100644 --- a/db/version_edit.h +++ b/db/version_edit.h @@ -25,6 +25,7 @@ #include "rocksdb/advanced_options.h" #include "table/table_reader.h" #include "table/unique_id_impl.h" +#include "test_util/sync_point.h" #include "util/autovector.h" namespace ROCKSDB_NAMESPACE { diff --git a/db/version_set.cc b/db/version_set.cc index 7db376d168..7b3cecbe87 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -2264,7 +2264,8 @@ Status Version::GetBlob(const ReadOptions& read_options, const Slice& user_key, const Status s = blob_source_->GetBlob( read_options, user_key, blob_file_number, blob_index.offset(), blob_file_meta->GetBlobFileSize(), blob_index.size(), - blob_index.compression(), prefetch_buffer, value, bytes_read); + BuiltinCompressor::GetCompressor(blob_index.compression()), + prefetch_buffer, value, bytes_read); return s; } @@ -2307,9 +2308,11 @@ void Version::MultiGetBlob( continue; } + std::shared_ptr compressor = + BuiltinCompressor::GetCompressor(blob_index.compression()); blob_reqs_in_file.emplace_back( key_context->get_context->ukey_to_get_blob_value(), - blob_index.offset(), blob_index.size(), blob_index.compression(), + blob_index.offset(), blob_index.size(), compressor.get(), &blob.result, key_context->s); } if (blob_reqs_in_file.size() > 0) { diff --git a/db/version_set_test.cc b/db/version_set_test.cc index 0a01c1fb21..330636c6d7 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -3302,8 +3302,8 @@ class VersionSetTestMissingFiles : public VersionSetTestBase, std::unique_ptr builder(table_factory_->NewTableBuilder( TableBuilderOptions( immutable_options_, mutable_cf_options_, *internal_comparator_, - &int_tbl_prop_collector_factories, kNoCompression, - CompressionOptions(), + &int_tbl_prop_collector_factories, + BuiltinCompressor::GetCompressor(kNoCompression), TablePropertiesCollectorFactory::Context::kUnknownColumnFamily, info.column_family, info.level), fwriter.get())); diff --git a/env/env.cc b/env/env.cc index a3956324c7..1710988ee3 100644 --- a/env/env.cc +++ b/env/env.cc @@ -40,6 +40,7 @@ #include "rocksdb/utilities/object_registry.h" #include "rocksdb/utilities/options_type.h" #include "util/autovector.h" +#include "util/string_util.h" namespace ROCKSDB_NAMESPACE { namespace { diff --git a/include/rocksdb/advanced_options.h b/include/rocksdb/advanced_options.h index 6542ca42a9..5d41a84676 100644 --- a/include/rocksdb/advanced_options.h +++ b/include/rocksdb/advanced_options.h @@ -75,6 +75,8 @@ enum CompactionPri : char { // Compression options for different compression algorithms like Zlib struct CompressionOptions { + static const char* kName() { return "CompressionOptions"; } + // ==> BEGIN options that can be set by deprecated configuration syntax, <== // ==> e.g. compression_opts=5:6:7:8:9:10:true:11:false <== // ==> Please use compression_opts={level=6;strategy=7;} form instead. <== diff --git a/include/rocksdb/memory_allocator.h b/include/rocksdb/memory_allocator.h index d126abfe6d..fc0d3a81a8 100644 --- a/include/rocksdb/memory_allocator.h +++ b/include/rocksdb/memory_allocator.h @@ -39,6 +39,30 @@ class MemoryAllocator : public Customizable { std::string GetId() const override { return GenerateIndividualId(); } }; +// Allocate a buffer using the specified or default memory allocator. +// @param size Buffer size. +// @param allocator MemoryAllocator to use. If nullptr, the default allocator is +// used (new operator). Returns a pointer to the buffer. +inline char* Allocate(size_t size, MemoryAllocator* allocator) { + if (allocator) { + return reinterpret_cast(allocator->Allocate(size)); + } + return new char[size]; +} + +// Deallocate a buffer allocated by a specified or default memory allocator. +// @param p Pointer to the buffer to deallocate. This is the pointer that was +// returned by Allocate. +// @param allocator MemoryAllocator that was used for allocation. If nullptr, +// the default allocator was used. +inline void Deallocate(char* p, MemoryAllocator* allocator) { + if (allocator) { + allocator->Deallocate(p); + } else { + delete[] p; + } +} + struct JemallocAllocatorOptions { static const char* kName() { return "JemallocAllocatorOptions"; } // Jemalloc tcache cache allocations by size class. For each size class, diff --git a/options/cf_options.cc b/options/cf_options.cc index 86a524f515..365c084aee 100644 --- a/options/cf_options.cc +++ b/options/cf_options.cc @@ -43,6 +43,9 @@ #include "rocksdb/utilities/object_registry.h" #include "rocksdb/utilities/options_type.h" #include "util/cast_util.h" +#include "util/compression.h" +#include "util/compressor.h" +#include "util/string_util.h" // NOTE: in this file, many option flags that were deprecated // and removed from the rest of the code have to be kept here @@ -1046,6 +1049,129 @@ size_t MaxFileSizeForL0MetaPin(const MutableCFOptions& cf_options) { return cf_options.write_buffer_size / 2 * 3; } +MutableCFOptions::MutableCFOptions(const ColumnFamilyOptions& options) + : write_buffer_size(options.write_buffer_size), + max_write_buffer_number(options.max_write_buffer_number), + arena_block_size(options.arena_block_size), + memtable_prefix_bloom_size_ratio( + options.memtable_prefix_bloom_size_ratio), + memtable_whole_key_filtering(options.memtable_whole_key_filtering), + memtable_huge_page_size(options.memtable_huge_page_size), + max_successive_merges(options.max_successive_merges), + inplace_update_num_locks(options.inplace_update_num_locks), + prefix_extractor(options.prefix_extractor), + experimental_mempurge_threshold(options.experimental_mempurge_threshold), + disable_auto_compactions(options.disable_auto_compactions), + soft_pending_compaction_bytes_limit( + options.soft_pending_compaction_bytes_limit), + hard_pending_compaction_bytes_limit( + options.hard_pending_compaction_bytes_limit), + level0_file_num_compaction_trigger( + options.level0_file_num_compaction_trigger), + level0_slowdown_writes_trigger(options.level0_slowdown_writes_trigger), + level0_stop_writes_trigger(options.level0_stop_writes_trigger), + max_compaction_bytes(options.max_compaction_bytes), + ignore_max_compaction_bytes_for_input( + options.ignore_max_compaction_bytes_for_input), + target_file_size_base(options.target_file_size_base), + target_file_size_multiplier(options.target_file_size_multiplier), + max_bytes_for_level_base(options.max_bytes_for_level_base), + max_bytes_for_level_multiplier(options.max_bytes_for_level_multiplier), + ttl(options.ttl), + periodic_compaction_seconds(options.periodic_compaction_seconds), + max_bytes_for_level_multiplier_additional( + options.max_bytes_for_level_multiplier_additional), + compaction_options_fifo(options.compaction_options_fifo), + compaction_options_universal(options.compaction_options_universal), + enable_blob_files(options.enable_blob_files), + min_blob_size(options.min_blob_size), + blob_file_size(options.blob_file_size), + blob_compression_type(options.blob_compression_type), + enable_blob_garbage_collection(options.enable_blob_garbage_collection), + blob_garbage_collection_age_cutoff( + options.blob_garbage_collection_age_cutoff), + blob_garbage_collection_force_threshold( + options.blob_garbage_collection_force_threshold), + blob_compaction_readahead_size(options.blob_compaction_readahead_size), + blob_file_starting_level(options.blob_file_starting_level), + prepopulate_blob_cache(options.prepopulate_blob_cache), + max_sequential_skip_in_iterations( + options.max_sequential_skip_in_iterations), + check_flush_compaction_key_order( + options.check_flush_compaction_key_order), + paranoid_file_checks(options.paranoid_file_checks), + report_bg_io_stats(options.report_bg_io_stats), + compression(options.compression), + bottommost_compression(options.bottommost_compression), + compression_opts(options.compression_opts), + bottommost_compression_opts(options.bottommost_compression_opts), + last_level_temperature(options.last_level_temperature == + Temperature::kUnknown + ? options.bottommost_temperature + : options.last_level_temperature), + memtable_protection_bytes_per_key( + options.memtable_protection_bytes_per_key), + block_protection_bytes_per_key(options.block_protection_bytes_per_key), + sample_for_compression( + options.sample_for_compression), // TODO: is 0 fine here? + compression_per_level(options.compression_per_level), + memtable_max_range_deletions(options.memtable_max_range_deletions), + bottommost_file_compaction_delay( + options.bottommost_file_compaction_delay) { + RefreshDerivedOptions(options.num_levels, options.compaction_style); +} + +MutableCFOptions::MutableCFOptions() + : write_buffer_size(0), + max_write_buffer_number(0), + arena_block_size(0), + memtable_prefix_bloom_size_ratio(0), + memtable_whole_key_filtering(false), + memtable_huge_page_size(0), + max_successive_merges(0), + inplace_update_num_locks(0), + prefix_extractor(nullptr), + experimental_mempurge_threshold(0.0), + disable_auto_compactions(false), + soft_pending_compaction_bytes_limit(0), + hard_pending_compaction_bytes_limit(0), + level0_file_num_compaction_trigger(0), + level0_slowdown_writes_trigger(0), + level0_stop_writes_trigger(0), + max_compaction_bytes(0), + ignore_max_compaction_bytes_for_input(true), + target_file_size_base(0), + target_file_size_multiplier(0), + max_bytes_for_level_base(0), + max_bytes_for_level_multiplier(0), + ttl(0), + periodic_compaction_seconds(0), + compaction_options_fifo(), + enable_blob_files(false), + min_blob_size(0), + blob_file_size(0), + blob_compression_type(kNoCompression), + blob_compressor(nullptr), + enable_blob_garbage_collection(false), + blob_garbage_collection_age_cutoff(0.0), + blob_garbage_collection_force_threshold(0.0), + blob_compaction_readahead_size(0), + blob_file_starting_level(0), + prepopulate_blob_cache(PrepopulateBlobCache::kDisable), + max_sequential_skip_in_iterations(0), + check_flush_compaction_key_order(true), + paranoid_file_checks(false), + report_bg_io_stats(false), + compression(Snappy_Supported() ? kSnappyCompression : kNoCompression), + compressor(nullptr), + bottommost_compression(kDisableCompressionOption), + bottommost_compressor(nullptr), + last_level_temperature(Temperature::kUnknown), + memtable_protection_bytes_per_key(0), + block_protection_bytes_per_key(0), + sample_for_compression(0), + memtable_max_range_deletions(0) {} + void MutableCFOptions::RefreshDerivedOptions(int num_levels, CompactionStyle compaction_style) { max_file_size.resize(num_levels); @@ -1059,6 +1185,28 @@ void MutableCFOptions::RefreshDerivedOptions(int num_levels, max_file_size[i] = target_file_size_base; } } + compressor = BuiltinCompressor::GetCompressor(compression, compression_opts); + + if (bottommost_compression != kDisableCompressionOption) { + if (bottommost_compression_opts.enabled) { + bottommost_compressor = BuiltinCompressor::GetCompressor( + bottommost_compression, bottommost_compression_opts); + } else { + bottommost_compressor = BuiltinCompressor::GetCompressor( + bottommost_compression, compression_opts); + } + } + + if (blob_compression_type != kDisableCompressionOption) { + blob_compressor = BuiltinCompressor::GetCompressor(blob_compression_type, + compression_opts); + } + if (compressor_per_level.empty()) { + for (auto type : compression_per_level) { + compressor_per_level.push_back( + BuiltinCompressor::GetCompressor(type, compression_opts)); + } + } } void MutableCFOptions::Dump(Logger* log) const { diff --git a/options/cf_options.h b/options/cf_options.h index ca6511c4a9..ddce608a79 100644 --- a/options/cf_options.h +++ b/options/cf_options.h @@ -25,9 +25,9 @@ #include "db/dbformat.h" #include "options/db_options.h" #include "rocksdb/options.h" -#include "util/compression.h" namespace ROCKSDB_NAMESPACE { +class Compressor; // ImmutableCFOptions is a data struct used by RocksDB internal. It contains a // subset of Options that should not be changed during the entire lifetime @@ -125,127 +125,8 @@ struct ImmutableOptions : public ImmutableDBOptions, public ImmutableCFOptions { struct MutableCFOptions { static const char* kName() { return "MutableCFOptions"; } - explicit MutableCFOptions(const ColumnFamilyOptions& options) - : write_buffer_size(options.write_buffer_size), - max_write_buffer_number(options.max_write_buffer_number), - arena_block_size(options.arena_block_size), - memtable_prefix_bloom_size_ratio( - options.memtable_prefix_bloom_size_ratio), - memtable_whole_key_filtering(options.memtable_whole_key_filtering), - memtable_huge_page_size(options.memtable_huge_page_size), - max_successive_merges(options.max_successive_merges), - inplace_update_num_locks(options.inplace_update_num_locks), - prefix_extractor(options.prefix_extractor), - experimental_mempurge_threshold( - options.experimental_mempurge_threshold), - disable_auto_compactions(options.disable_auto_compactions), - soft_pending_compaction_bytes_limit( - options.soft_pending_compaction_bytes_limit), - hard_pending_compaction_bytes_limit( - options.hard_pending_compaction_bytes_limit), - level0_file_num_compaction_trigger( - options.level0_file_num_compaction_trigger), - level0_slowdown_writes_trigger(options.level0_slowdown_writes_trigger), - level0_stop_writes_trigger(options.level0_stop_writes_trigger), - max_compaction_bytes(options.max_compaction_bytes), - ignore_max_compaction_bytes_for_input( - options.ignore_max_compaction_bytes_for_input), - target_file_size_base(options.target_file_size_base), - target_file_size_multiplier(options.target_file_size_multiplier), - max_bytes_for_level_base(options.max_bytes_for_level_base), - max_bytes_for_level_multiplier(options.max_bytes_for_level_multiplier), - ttl(options.ttl), - periodic_compaction_seconds(options.periodic_compaction_seconds), - max_bytes_for_level_multiplier_additional( - options.max_bytes_for_level_multiplier_additional), - compaction_options_fifo(options.compaction_options_fifo), - compaction_options_universal(options.compaction_options_universal), - enable_blob_files(options.enable_blob_files), - min_blob_size(options.min_blob_size), - blob_file_size(options.blob_file_size), - blob_compression_type(options.blob_compression_type), - enable_blob_garbage_collection(options.enable_blob_garbage_collection), - blob_garbage_collection_age_cutoff( - options.blob_garbage_collection_age_cutoff), - blob_garbage_collection_force_threshold( - options.blob_garbage_collection_force_threshold), - blob_compaction_readahead_size(options.blob_compaction_readahead_size), - blob_file_starting_level(options.blob_file_starting_level), - prepopulate_blob_cache(options.prepopulate_blob_cache), - max_sequential_skip_in_iterations( - options.max_sequential_skip_in_iterations), - check_flush_compaction_key_order( - options.check_flush_compaction_key_order), - paranoid_file_checks(options.paranoid_file_checks), - report_bg_io_stats(options.report_bg_io_stats), - compression(options.compression), - bottommost_compression(options.bottommost_compression), - compression_opts(options.compression_opts), - bottommost_compression_opts(options.bottommost_compression_opts), - last_level_temperature(options.last_level_temperature == - Temperature::kUnknown - ? options.bottommost_temperature - : options.last_level_temperature), - memtable_protection_bytes_per_key( - options.memtable_protection_bytes_per_key), - block_protection_bytes_per_key(options.block_protection_bytes_per_key), - sample_for_compression( - options.sample_for_compression), // TODO: is 0 fine here? - compression_per_level(options.compression_per_level), - memtable_max_range_deletions(options.memtable_max_range_deletions), - bottommost_file_compaction_delay( - options.bottommost_file_compaction_delay) { - RefreshDerivedOptions(options.num_levels, options.compaction_style); - } - - MutableCFOptions() - : write_buffer_size(0), - max_write_buffer_number(0), - arena_block_size(0), - memtable_prefix_bloom_size_ratio(0), - memtable_whole_key_filtering(false), - memtable_huge_page_size(0), - max_successive_merges(0), - inplace_update_num_locks(0), - prefix_extractor(nullptr), - experimental_mempurge_threshold(0.0), - disable_auto_compactions(false), - soft_pending_compaction_bytes_limit(0), - hard_pending_compaction_bytes_limit(0), - level0_file_num_compaction_trigger(0), - level0_slowdown_writes_trigger(0), - level0_stop_writes_trigger(0), - max_compaction_bytes(0), - ignore_max_compaction_bytes_for_input(true), - target_file_size_base(0), - target_file_size_multiplier(0), - max_bytes_for_level_base(0), - max_bytes_for_level_multiplier(0), - ttl(0), - periodic_compaction_seconds(0), - compaction_options_fifo(), - enable_blob_files(false), - min_blob_size(0), - blob_file_size(0), - blob_compression_type(kNoCompression), - enable_blob_garbage_collection(false), - blob_garbage_collection_age_cutoff(0.0), - blob_garbage_collection_force_threshold(0.0), - blob_compaction_readahead_size(0), - blob_file_starting_level(0), - prepopulate_blob_cache(PrepopulateBlobCache::kDisable), - max_sequential_skip_in_iterations(0), - check_flush_compaction_key_order(true), - paranoid_file_checks(false), - report_bg_io_stats(false), - compression(Snappy_Supported() ? kSnappyCompression : kNoCompression), - bottommost_compression(kDisableCompressionOption), - last_level_temperature(Temperature::kUnknown), - memtable_protection_bytes_per_key(0), - block_protection_bytes_per_key(0), - sample_for_compression(0), - memtable_max_range_deletions(0) {} - + MutableCFOptions(); + explicit MutableCFOptions(const ColumnFamilyOptions& options); explicit MutableCFOptions(const Options& options); // Must be called after any change to MutableCFOptions @@ -317,6 +198,7 @@ struct MutableCFOptions { uint64_t min_blob_size; uint64_t blob_file_size; CompressionType blob_compression_type; + std::shared_ptr blob_compressor; bool enable_blob_garbage_collection; double blob_garbage_collection_age_cutoff; double blob_garbage_collection_force_threshold; @@ -330,7 +212,9 @@ struct MutableCFOptions { bool paranoid_file_checks; bool report_bg_io_stats; CompressionType compression; + std::shared_ptr compressor; CompressionType bottommost_compression; + std::shared_ptr bottommost_compressor; CompressionOptions compression_opts; CompressionOptions bottommost_compression_opts; Temperature last_level_temperature; @@ -339,6 +223,7 @@ struct MutableCFOptions { uint64_t sample_for_compression; std::vector compression_per_level; + std::vector> compressor_per_level; uint32_t memtable_max_range_deletions; uint32_t bottommost_file_compaction_delay; diff --git a/options/options.cc b/options/options.cc index 20cd6d08fc..748b8faebe 100644 --- a/options/options.cc +++ b/options/options.cc @@ -28,6 +28,7 @@ #include "logging/logging.h" #include "monitoring/statistics_impl.h" +#include "options/cf_options.h" #include "options/db_options.h" #include "options/options_helper.h" #include "rocksdb/cache.h" @@ -182,73 +183,21 @@ void ColumnFamilyOptions::Dump(Logger* log) const { write_buffer_size); ROCKS_LOG_HEADER(log, " Options.max_write_buffer_number: %d", max_write_buffer_number); - if (!compression_per_level.empty()) { - for (unsigned int i = 0; i < compression_per_level.size(); i++) { - ROCKS_LOG_HEADER( - log, " Options.compression[%d]: %s", i, - CompressionTypeToString(compression_per_level[i]).c_str()); - } - } else { - ROCKS_LOG_HEADER(log, " Options.compression: %s", - CompressionTypeToString(compression).c_str()); + MutableCFOptions moptions(*this); + ConfigOptions config_options; + + if (!moptions.compressor_per_level.empty()) { + for (unsigned int i = 0; i < moptions.compressor_per_level.size(); i++) { + ROCKS_LOG_HEADER(log, " Options.compression[%d]: %s", i, + moptions.compressor_per_level[i]->GetId().c_str()); } + } else if (moptions.compressor) { + ROCKS_LOG_HEADER(log, " Options.compression: %s", + moptions.compressor->ToString(config_options).c_str()); + } else { ROCKS_LOG_HEADER( - log, " Options.bottommost_compression: %s", - bottommost_compression == kDisableCompressionOption - ? "Disabled" - : CompressionTypeToString(bottommost_compression).c_str()); - ROCKS_LOG_HEADER( - log, " Options.prefix_extractor: %s", - prefix_extractor == nullptr ? "nullptr" : prefix_extractor->Name()); - ROCKS_LOG_HEADER(log, - " Options.memtable_insert_with_hint_prefix_extractor: %s", - memtable_insert_with_hint_prefix_extractor == nullptr - ? "nullptr" - : memtable_insert_with_hint_prefix_extractor->Name()); - ROCKS_LOG_HEADER(log, " Options.num_levels: %d", num_levels); - ROCKS_LOG_HEADER(log, " Options.min_write_buffer_number_to_merge: %d", - min_write_buffer_number_to_merge); - ROCKS_LOG_HEADER(log, " Options.max_write_buffer_number_to_maintain: %d", - max_write_buffer_number_to_maintain); - ROCKS_LOG_HEADER(log, - " Options.max_write_buffer_size_to_maintain: %" PRIu64, - max_write_buffer_size_to_maintain); - ROCKS_LOG_HEADER( - log, " Options.bottommost_compression_opts.window_bits: %d", - bottommost_compression_opts.window_bits); - ROCKS_LOG_HEADER( - log, " Options.bottommost_compression_opts.level: %d", - bottommost_compression_opts.level); - ROCKS_LOG_HEADER( - log, " Options.bottommost_compression_opts.strategy: %d", - bottommost_compression_opts.strategy); - ROCKS_LOG_HEADER( - log, - " Options.bottommost_compression_opts.max_dict_bytes: " - "%" PRIu32, - bottommost_compression_opts.max_dict_bytes); - ROCKS_LOG_HEADER( - log, - " Options.bottommost_compression_opts.zstd_max_train_bytes: " - "%" PRIu32, - bottommost_compression_opts.zstd_max_train_bytes); - ROCKS_LOG_HEADER( - log, - " Options.bottommost_compression_opts.parallel_threads: " - "%" PRIu32, - bottommost_compression_opts.parallel_threads); - ROCKS_LOG_HEADER( - log, " Options.bottommost_compression_opts.enabled: %s", - bottommost_compression_opts.enabled ? "true" : "false"); - ROCKS_LOG_HEADER( - log, - " Options.bottommost_compression_opts.max_dict_buffer_bytes: " - "%" PRIu64, - bottommost_compression_opts.max_dict_buffer_bytes); - ROCKS_LOG_HEADER( - log, - " Options.bottommost_compression_opts.use_zstd_dict_trainer: %s", - bottommost_compression_opts.use_zstd_dict_trainer ? "true" : "false"); + log, " Options.compression: %s", + BuiltinCompressor::TypeToString(moptions.compression).c_str()); ROCKS_LOG_HEADER(log, " Options.compression_opts.window_bits: %d", compression_opts.window_bits); ROCKS_LOG_HEADER(log, " Options.compression_opts.level: %d", @@ -256,8 +205,7 @@ void ColumnFamilyOptions::Dump(Logger* log) const { ROCKS_LOG_HEADER(log, " Options.compression_opts.strategy: %d", compression_opts.strategy); ROCKS_LOG_HEADER( - log, - " Options.compression_opts.max_dict_bytes: %" PRIu32, + log, " Options.compression_opts.max_dict_bytes: %" PRIu32, compression_opts.max_dict_bytes); ROCKS_LOG_HEADER(log, " Options.compression_opts.zstd_max_train_bytes: " @@ -277,211 +225,264 @@ void ColumnFamilyOptions::Dump(Logger* log) const { " Options.compression_opts.max_dict_buffer_bytes: " "%" PRIu64, compression_opts.max_dict_buffer_bytes); - ROCKS_LOG_HEADER(log, " Options.level0_file_num_compaction_trigger: %d", - level0_file_num_compaction_trigger); - ROCKS_LOG_HEADER(log, " Options.level0_slowdown_writes_trigger: %d", - level0_slowdown_writes_trigger); - ROCKS_LOG_HEADER(log, " Options.level0_stop_writes_trigger: %d", - level0_stop_writes_trigger); - ROCKS_LOG_HEADER( - log, " Options.target_file_size_base: %" PRIu64, - target_file_size_base); - ROCKS_LOG_HEADER(log, " Options.target_file_size_multiplier: %d", - target_file_size_multiplier); - ROCKS_LOG_HEADER( - log, " Options.max_bytes_for_level_base: %" PRIu64, - max_bytes_for_level_base); - ROCKS_LOG_HEADER(log, "Options.level_compaction_dynamic_level_bytes: %d", - level_compaction_dynamic_level_bytes); - ROCKS_LOG_HEADER(log, " Options.max_bytes_for_level_multiplier: %f", - max_bytes_for_level_multiplier); - for (size_t i = 0; i < max_bytes_for_level_multiplier_additional.size(); - i++) { - ROCKS_LOG_HEADER( - log, "Options.max_bytes_for_level_multiplier_addtl[%" ROCKSDB_PRIszt - "]: %d", - i, max_bytes_for_level_multiplier_additional[i]); - } - ROCKS_LOG_HEADER( - log, " Options.max_sequential_skip_in_iterations: %" PRIu64, - max_sequential_skip_in_iterations); - ROCKS_LOG_HEADER( - log, " Options.max_compaction_bytes: %" PRIu64, - max_compaction_bytes); - ROCKS_LOG_HEADER(log, " Options.ignore_max_compaction_bytes_for_input: %s", - ignore_max_compaction_bytes_for_input ? "true" : "false"); - ROCKS_LOG_HEADER( - log, - " Options.arena_block_size: %" ROCKSDB_PRIszt, - arena_block_size); - ROCKS_LOG_HEADER(log, - " Options.soft_pending_compaction_bytes_limit: %" PRIu64, - soft_pending_compaction_bytes_limit); - ROCKS_LOG_HEADER(log, - " Options.hard_pending_compaction_bytes_limit: %" PRIu64, - hard_pending_compaction_bytes_limit); - ROCKS_LOG_HEADER(log, " Options.disable_auto_compactions: %d", - disable_auto_compactions); - - const auto& it_compaction_style = - compaction_style_to_string.find(compaction_style); - std::string str_compaction_style; - if (it_compaction_style == compaction_style_to_string.end()) { - assert(false); - str_compaction_style = "unknown_" + std::to_string(compaction_style); - } else { - str_compaction_style = it_compaction_style->second; - } - ROCKS_LOG_HEADER(log, - " Options.compaction_style: %s", - str_compaction_style.c_str()); - - const auto& it_compaction_pri = - compaction_pri_to_string.find(compaction_pri); - std::string str_compaction_pri; - if (it_compaction_pri == compaction_pri_to_string.end()) { - assert(false); - str_compaction_pri = "unknown_" + std::to_string(compaction_pri); - } else { - str_compaction_pri = it_compaction_pri->second; - } - ROCKS_LOG_HEADER(log, - " Options.compaction_pri: %s", - str_compaction_pri.c_str()); - ROCKS_LOG_HEADER(log, - "Options.compaction_options_universal.size_ratio: %u", - compaction_options_universal.size_ratio); - ROCKS_LOG_HEADER(log, - "Options.compaction_options_universal.min_merge_width: %u", - compaction_options_universal.min_merge_width); - ROCKS_LOG_HEADER(log, - "Options.compaction_options_universal.max_merge_width: %u", - compaction_options_universal.max_merge_width); + } + if (moptions.bottommost_compressor) { ROCKS_LOG_HEADER( - log, - "Options.compaction_options_universal." - "max_size_amplification_percent: %u", - compaction_options_universal.max_size_amplification_percent); + log, " Options.bottommost_compression: %s", + moptions.bottommost_compressor->ToString(config_options).c_str()); + } else { ROCKS_LOG_HEADER( - log, - "Options.compaction_options_universal.compression_size_percent: %d", - compaction_options_universal.compression_size_percent); - const auto& it_compaction_stop_style = compaction_stop_style_to_string.find( - compaction_options_universal.stop_style); - std::string str_compaction_stop_style; - if (it_compaction_stop_style == compaction_stop_style_to_string.end()) { - assert(false); - str_compaction_stop_style = - "unknown_" + std::to_string(compaction_options_universal.stop_style); - } else { - str_compaction_stop_style = it_compaction_stop_style->second; - } - ROCKS_LOG_HEADER(log, - "Options.compaction_options_universal.stop_style: %s", - str_compaction_stop_style.c_str()); + log, " Options.bottommost_compression: %s", + BuiltinCompressor::TypeToString(moptions.bottommost_compression) + .c_str()); ROCKS_LOG_HEADER( - log, "Options.compaction_options_fifo.max_table_files_size: %" PRIu64, - compaction_options_fifo.max_table_files_size); - ROCKS_LOG_HEADER(log, - "Options.compaction_options_fifo.allow_compaction: %d", - compaction_options_fifo.allow_compaction); - std::ostringstream collector_info; - for (const auto& collector_factory : table_properties_collector_factories) { - collector_info << collector_factory->ToString() << ';'; + log, " Options.bottommost_compression_opts.enabled: %s", + bottommost_compression_opts.enabled ? "true" : "false"); + if (bottommost_compression_opts.enabled) { + ROCKS_LOG_HEADER( + log, " Options.bottommost_compression_opts.window_bits: %d", + bottommost_compression_opts.window_bits); + ROCKS_LOG_HEADER( + log, " Options.bottommost_compression_opts.level: %d", + bottommost_compression_opts.level); + ROCKS_LOG_HEADER( + log, " Options.bottommost_compression_opts.strategy: %d", + bottommost_compression_opts.strategy); + ROCKS_LOG_HEADER( + log, + " Options.bottommost_compression_opts.max_dict_bytes: " + "%" PRIu32, + bottommost_compression_opts.max_dict_bytes); + ROCKS_LOG_HEADER( + log, + " Options.bottommost_compression_opts.zstd_max_train_bytes: " + "%" PRIu32, + bottommost_compression_opts.zstd_max_train_bytes); + ROCKS_LOG_HEADER( + log, + " Options.bottommost_compression_opts.parallel_threads: " + "%" PRIu32, + bottommost_compression_opts.parallel_threads); + ROCKS_LOG_HEADER( + log, + " Options.bottommost_compression_opts.max_dict_buffer_bytes: " + "%" PRIu64, + bottommost_compression_opts.max_dict_buffer_bytes); } - ROCKS_LOG_HEADER( - log, " Options.table_properties_collectors: %s", - collector_info.str().c_str()); - ROCKS_LOG_HEADER(log, - " Options.inplace_update_support: %d", - inplace_update_support); + } + ROCKS_LOG_HEADER( + log, " Options.prefix_extractor: %s", + prefix_extractor == nullptr ? "nullptr" : prefix_extractor->Name()); + ROCKS_LOG_HEADER(log, + " Options.memtable_insert_with_hint_prefix_extractor: %s", + memtable_insert_with_hint_prefix_extractor == nullptr + ? "nullptr" + : memtable_insert_with_hint_prefix_extractor->Name()); + ROCKS_LOG_HEADER(log, " Options.num_levels: %d", num_levels); + ROCKS_LOG_HEADER(log, " Options.min_write_buffer_number_to_merge: %d", + min_write_buffer_number_to_merge); + ROCKS_LOG_HEADER(log, " Options.max_write_buffer_number_to_maintain: %d", + max_write_buffer_number_to_maintain); + ROCKS_LOG_HEADER(log, + " Options.max_write_buffer_size_to_maintain: %" PRIu64, + max_write_buffer_size_to_maintain); + ROCKS_LOG_HEADER(log, " Options.level0_file_num_compaction_trigger: %d", + level0_file_num_compaction_trigger); + ROCKS_LOG_HEADER(log, " Options.level0_slowdown_writes_trigger: %d", + level0_slowdown_writes_trigger); + ROCKS_LOG_HEADER(log, " Options.level0_stop_writes_trigger: %d", + level0_stop_writes_trigger); + ROCKS_LOG_HEADER(log, + " Options.target_file_size_base: %" PRIu64, + target_file_size_base); + ROCKS_LOG_HEADER(log, " Options.target_file_size_multiplier: %d", + target_file_size_multiplier); + ROCKS_LOG_HEADER(log, + " Options.max_bytes_for_level_base: %" PRIu64, + max_bytes_for_level_base); + ROCKS_LOG_HEADER(log, "Options.level_compaction_dynamic_level_bytes: %d", + level_compaction_dynamic_level_bytes); + ROCKS_LOG_HEADER(log, " Options.max_bytes_for_level_multiplier: %f", + max_bytes_for_level_multiplier); + for (size_t i = 0; i < max_bytes_for_level_multiplier_additional.size(); + i++) { ROCKS_LOG_HEADER( log, - " Options.inplace_update_num_locks: %" ROCKSDB_PRIszt, - inplace_update_num_locks); - // TODO: easier config for bloom (maybe based on avg key/value size) - ROCKS_LOG_HEADER( - log, " Options.memtable_prefix_bloom_size_ratio: %f", - memtable_prefix_bloom_size_ratio); - ROCKS_LOG_HEADER(log, - " Options.memtable_whole_key_filtering: %d", - memtable_whole_key_filtering); - - ROCKS_LOG_HEADER(log, " Options.memtable_huge_page_size: %" ROCKSDB_PRIszt, - memtable_huge_page_size); - ROCKS_LOG_HEADER(log, - " Options.bloom_locality: %d", - bloom_locality); + "Options.max_bytes_for_level_multiplier_addtl[%" ROCKSDB_PRIszt "]: %d", + i, max_bytes_for_level_multiplier_additional[i]); + } + ROCKS_LOG_HEADER(log, + " Options.max_sequential_skip_in_iterations: %" PRIu64, + max_sequential_skip_in_iterations); + ROCKS_LOG_HEADER(log, + " Options.max_compaction_bytes: %" PRIu64, + max_compaction_bytes); + ROCKS_LOG_HEADER(log, " Options.ignore_max_compaction_bytes_for_input: %s", + ignore_max_compaction_bytes_for_input ? "true" : "false"); + ROCKS_LOG_HEADER( + log, " Options.arena_block_size: %" ROCKSDB_PRIszt, + arena_block_size); + ROCKS_LOG_HEADER(log, + " Options.soft_pending_compaction_bytes_limit: %" PRIu64, + soft_pending_compaction_bytes_limit); + ROCKS_LOG_HEADER(log, + " Options.hard_pending_compaction_bytes_limit: %" PRIu64, + hard_pending_compaction_bytes_limit); + ROCKS_LOG_HEADER(log, " Options.disable_auto_compactions: %d", + disable_auto_compactions); + + const auto& it_compaction_style = + compaction_style_to_string.find(compaction_style); + std::string str_compaction_style; + if (it_compaction_style == compaction_style_to_string.end()) { + assert(false); + str_compaction_style = "unknown_" + std::to_string(compaction_style); + } else { + str_compaction_style = it_compaction_style->second; + } + ROCKS_LOG_HEADER(log, " Options.compaction_style: %s", + str_compaction_style.c_str()); + + const auto& it_compaction_pri = compaction_pri_to_string.find(compaction_pri); + std::string str_compaction_pri; + if (it_compaction_pri == compaction_pri_to_string.end()) { + assert(false); + str_compaction_pri = "unknown_" + std::to_string(compaction_pri); + } else { + str_compaction_pri = it_compaction_pri->second; + } + ROCKS_LOG_HEADER(log, " Options.compaction_pri: %s", + str_compaction_pri.c_str()); + ROCKS_LOG_HEADER(log, "Options.compaction_options_universal.size_ratio: %u", + compaction_options_universal.size_ratio); + ROCKS_LOG_HEADER(log, + "Options.compaction_options_universal.min_merge_width: %u", + compaction_options_universal.min_merge_width); + ROCKS_LOG_HEADER(log, + "Options.compaction_options_universal.max_merge_width: %u", + compaction_options_universal.max_merge_width); + ROCKS_LOG_HEADER(log, + "Options.compaction_options_universal." + "max_size_amplification_percent: %u", + compaction_options_universal.max_size_amplification_percent); + ROCKS_LOG_HEADER( + log, "Options.compaction_options_universal.compression_size_percent: %d", + compaction_options_universal.compression_size_percent); + const auto& it_compaction_stop_style = compaction_stop_style_to_string.find( + compaction_options_universal.stop_style); + std::string str_compaction_stop_style; + if (it_compaction_stop_style == compaction_stop_style_to_string.end()) { + assert(false); + str_compaction_stop_style = + "unknown_" + std::to_string(compaction_options_universal.stop_style); + } else { + str_compaction_stop_style = it_compaction_stop_style->second; + } + ROCKS_LOG_HEADER(log, "Options.compaction_options_universal.stop_style: %s", + str_compaction_stop_style.c_str()); + ROCKS_LOG_HEADER( + log, "Options.compaction_options_fifo.max_table_files_size: %" PRIu64, + compaction_options_fifo.max_table_files_size); + ROCKS_LOG_HEADER(log, "Options.compaction_options_fifo.allow_compaction: %d", + compaction_options_fifo.allow_compaction); + std::ostringstream collector_info; + for (const auto& collector_factory : table_properties_collector_factories) { + collector_info << collector_factory->ToString() << ';'; + } + ROCKS_LOG_HEADER(log, + " Options.table_properties_collectors: %s", + collector_info.str().c_str()); + ROCKS_LOG_HEADER(log, " Options.inplace_update_support: %d", + inplace_update_support); + ROCKS_LOG_HEADER( + log, " Options.inplace_update_num_locks: %" ROCKSDB_PRIszt, + inplace_update_num_locks); + // TODO: easier config for bloom (maybe based on avg key/value size) + ROCKS_LOG_HEADER(log, + " Options.memtable_prefix_bloom_size_ratio: %f", + memtable_prefix_bloom_size_ratio); + ROCKS_LOG_HEADER(log, + " Options.memtable_whole_key_filtering: %d", + memtable_whole_key_filtering); + + ROCKS_LOG_HEADER(log, " Options.memtable_huge_page_size: %" ROCKSDB_PRIszt, + memtable_huge_page_size); + ROCKS_LOG_HEADER(log, " Options.bloom_locality: %d", + bloom_locality); + ROCKS_LOG_HEADER( + log, " Options.max_successive_merges: %" ROCKSDB_PRIszt, + max_successive_merges); + ROCKS_LOG_HEADER(log, " Options.optimize_filters_for_hits: %d", + optimize_filters_for_hits); + ROCKS_LOG_HEADER(log, " Options.paranoid_file_checks: %d", + paranoid_file_checks); + ROCKS_LOG_HEADER(log, " Options.force_consistency_checks: %d", + force_consistency_checks); + ROCKS_LOG_HEADER(log, " Options.report_bg_io_stats: %d", + report_bg_io_stats); + ROCKS_LOG_HEADER(log, " Options.ttl: %" PRIu64, + ttl); + ROCKS_LOG_HEADER(log, + " Options.periodic_compaction_seconds: %" PRIu64, + periodic_compaction_seconds); + const auto& it_temp = temperature_to_string.find(default_temperature); + std::string str_default_temperature; + if (it_temp == temperature_to_string.end()) { + assert(false); + str_default_temperature = "unknown_temperature"; + } else { + str_default_temperature = it_temp->second; + } + ROCKS_LOG_HEADER(log, + " Options.default_temperature: %s", + str_default_temperature.c_str()); + ROCKS_LOG_HEADER(log, " Options.preclude_last_level_data_seconds: %" PRIu64, + preclude_last_level_data_seconds); + ROCKS_LOG_HEADER(log, " Options.preserve_internal_time_seconds: %" PRIu64, + preserve_internal_time_seconds); + ROCKS_LOG_HEADER(log, " Options.enable_blob_files: %s", + enable_blob_files ? "true" : "false"); + ROCKS_LOG_HEADER(log, + " Options.min_blob_size: %" PRIu64, + min_blob_size); + ROCKS_LOG_HEADER(log, + " Options.blob_file_size: %" PRIu64, + blob_file_size); + if (moptions.blob_compressor) { + ROCKS_LOG_HEADER(log, " Options.blob_compression: %s", + moptions.blob_compressor->GetId().c_str()); + } else { ROCKS_LOG_HEADER( - log, - " Options.max_successive_merges: %" ROCKSDB_PRIszt, - max_successive_merges); - ROCKS_LOG_HEADER(log, - " Options.optimize_filters_for_hits: %d", - optimize_filters_for_hits); - ROCKS_LOG_HEADER(log, " Options.paranoid_file_checks: %d", - paranoid_file_checks); - ROCKS_LOG_HEADER(log, " Options.force_consistency_checks: %d", - force_consistency_checks); - ROCKS_LOG_HEADER(log, " Options.report_bg_io_stats: %d", - report_bg_io_stats); - ROCKS_LOG_HEADER(log, " Options.ttl: %" PRIu64, - ttl); - ROCKS_LOG_HEADER(log, - " Options.periodic_compaction_seconds: %" PRIu64, - periodic_compaction_seconds); - const auto& it_temp = temperature_to_string.find(default_temperature); - std::string str_default_temperature; - if (it_temp == temperature_to_string.end()) { - assert(false); - str_default_temperature = "unknown_temperature"; - } else { - str_default_temperature = it_temp->second; - } + log, " Options.blob_compression_type: %s", + BuiltinCompressor::TypeToString(blob_compression_type).c_str()); + } + ROCKS_LOG_HEADER(log, " Options.enable_blob_garbage_collection: %s", + enable_blob_garbage_collection ? "true" : "false"); + ROCKS_LOG_HEADER(log, " Options.blob_garbage_collection_age_cutoff: %f", + blob_garbage_collection_age_cutoff); + ROCKS_LOG_HEADER(log, "Options.blob_garbage_collection_force_threshold: %f", + blob_garbage_collection_force_threshold); + ROCKS_LOG_HEADER(log, + " Options.blob_compaction_readahead_size: %" PRIu64, + blob_compaction_readahead_size); + ROCKS_LOG_HEADER(log, " Options.blob_file_starting_level: %d", + blob_file_starting_level); + if (blob_cache) { + ROCKS_LOG_HEADER(log, " Options.blob_cache: %s", + blob_cache->Name()); + ROCKS_LOG_HEADER(log, " blob_cache options: %s", + blob_cache->GetPrintableOptions().c_str()); ROCKS_LOG_HEADER(log, - " Options.default_temperature: %s", - str_default_temperature.c_str()); - ROCKS_LOG_HEADER(log, " Options.preclude_last_level_data_seconds: %" PRIu64, - preclude_last_level_data_seconds); - ROCKS_LOG_HEADER(log, " Options.preserve_internal_time_seconds: %" PRIu64, - preserve_internal_time_seconds); - ROCKS_LOG_HEADER(log, " Options.enable_blob_files: %s", - enable_blob_files ? "true" : "false"); - ROCKS_LOG_HEADER( - log, " Options.min_blob_size: %" PRIu64, - min_blob_size); - ROCKS_LOG_HEADER( - log, " Options.blob_file_size: %" PRIu64, - blob_file_size); - ROCKS_LOG_HEADER(log, " Options.blob_compression_type: %s", - CompressionTypeToString(blob_compression_type).c_str()); - ROCKS_LOG_HEADER(log, " Options.enable_blob_garbage_collection: %s", - enable_blob_garbage_collection ? "true" : "false"); - ROCKS_LOG_HEADER(log, " Options.blob_garbage_collection_age_cutoff: %f", - blob_garbage_collection_age_cutoff); - ROCKS_LOG_HEADER(log, "Options.blob_garbage_collection_force_threshold: %f", - blob_garbage_collection_force_threshold); - ROCKS_LOG_HEADER( - log, " Options.blob_compaction_readahead_size: %" PRIu64, - blob_compaction_readahead_size); - ROCKS_LOG_HEADER(log, " Options.blob_file_starting_level: %d", - blob_file_starting_level); - if (blob_cache) { - ROCKS_LOG_HEADER(log, " Options.blob_cache: %s", - blob_cache->Name()); - ROCKS_LOG_HEADER(log, " blob_cache options: %s", - blob_cache->GetPrintableOptions().c_str()); - ROCKS_LOG_HEADER( - log, " blob_cache prepopulated: %s", - prepopulate_blob_cache == PrepopulateBlobCache::kFlushOnly - ? "flush only" - : "disabled"); - } - ROCKS_LOG_HEADER(log, " Options.experimental_mempurge_threshold: %f", - experimental_mempurge_threshold); - ROCKS_LOG_HEADER(log, " Options.memtable_max_range_deletions: %d", - memtable_max_range_deletions); + " blob_cache prepopulated: %s", + prepopulate_blob_cache == PrepopulateBlobCache::kFlushOnly + ? "flush only" + : "disabled"); + } + ROCKS_LOG_HEADER(log, " Options.experimental_mempurge_threshold: %f", + experimental_mempurge_threshold); + ROCKS_LOG_HEADER(log, " Options.memtable_max_range_deletions: %d", + memtable_max_range_deletions); } // ColumnFamilyOptions::Dump void Options::Dump(Logger* log) const { diff --git a/options/options_helper.cc b/options/options_helper.cc index 00672728bc..d8b7d867a1 100644 --- a/options/options_helper.cc +++ b/options/options_helper.cc @@ -425,31 +425,6 @@ std::unordered_map {"kZSTDNotFinalCompression", kZSTDNotFinalCompression}, {"kDisableCompressionOption", kDisableCompressionOption}}; -std::vector GetSupportedCompressions() { - // std::set internally to deduplicate potential name aliases - std::set supported_compressions; - for (const auto& comp_to_name : OptionsHelper::compression_type_string_map) { - CompressionType t = comp_to_name.second; - if (t != kDisableCompressionOption && CompressionTypeSupported(t)) { - supported_compressions.insert(t); - } - } - return std::vector(supported_compressions.begin(), - supported_compressions.end()); -} - -std::vector GetSupportedDictCompressions() { - std::set dict_compression_types; - for (const auto& comp_to_name : OptionsHelper::compression_type_string_map) { - CompressionType t = comp_to_name.second; - if (t != kDisableCompressionOption && DictCompressionTypeSupported(t)) { - dict_compression_types.insert(t); - } - } - return std::vector(dict_compression_types.begin(), - dict_compression_types.end()); -} - std::vector GetSupportedChecksums() { std::set checksum_types; for (const auto& e : OptionsHelper::checksum_type_string_map) { diff --git a/options/options_helper.h b/options/options_helper.h index 914114c67e..08a9b00ad8 100644 --- a/options/options_helper.h +++ b/options/options_helper.h @@ -33,10 +33,6 @@ struct MutableDBOptions; struct MutableCFOptions; struct Options; -std::vector GetSupportedCompressions(); - -std::vector GetSupportedDictCompressions(); - std::vector GetSupportedChecksums(); inline bool IsSupportedChecksumType(ChecksumType type) { diff --git a/options/options_settable_test.cc b/options/options_settable_test.cc index d671581f6e..b7404a6e36 100644 --- a/options/options_settable_test.cc +++ b/options/options_settable_test.cc @@ -634,6 +634,8 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) { sizeof(struct CompactionOptionsFIFO)}, {offsetof(struct MutableCFOptions, compression_per_level), sizeof(std::vector)}, + {offsetof(struct MutableCFOptions, compressor_per_level), + sizeof(std::vector>)}, {offsetof(struct MutableCFOptions, max_file_size), sizeof(std::vector)}, }; diff --git a/src.mk b/src.mk index b6570278ae..41e259c752 100644 --- a/src.mk +++ b/src.mk @@ -239,6 +239,7 @@ LIB_SOURCES = \ util/comparator.cc \ util/compression.cc \ util/compression_context_cache.cc \ + util/compressor.cc \ util/concurrent_task_limiter_impl.cc \ util/crc32c.cc \ util/crc32c_arm64.cc \ @@ -584,6 +585,7 @@ TEST_MAIN_SOURCES = \ util/autovector_test.cc \ util/bloom_test.cc \ util/coding_test.cc \ + util/compression_test.cc \ util/crc32c_test.cc \ util/defer_test.cc \ util/dynamic_bloom_test.cc \ diff --git a/table/block_based/block_based_table_builder.cc b/table/block_based/block_based_table_builder.cc index 051f9d87b4..d5ab7d2ff1 100644 --- a/table/block_based/block_based_table_builder.cc +++ b/table/block_based/block_based_table_builder.cc @@ -117,8 +117,8 @@ bool GoodCompressionRatio(size_t compressed_size, size_t uncomp_size, } // namespace // format_version is the block format as defined in include/rocksdb/table.h -Slice CompressBlock(const Slice& uncompressed_data, const CompressionInfo& info, - CompressionType* type, uint32_t format_version, +Slice CompressBlock(Compressor* compressor, const Slice& uncompressed_data, + const CompressionInfo& info, CompressionType* type, bool allow_sample, std::string* compressed_output, std::string* sampled_output_fast, std::string* sampled_output_slow) { @@ -138,43 +138,38 @@ Slice CompressBlock(const Slice& uncompressed_data, const CompressionInfo& info, if (sampled_output_fast && (LZ4_Supported() || Snappy_Supported())) { CompressionType c = LZ4_Supported() ? kLZ4Compression : kSnappyCompression; - CompressionOptions options; - CompressionContext context(c, options); - CompressionInfo info_tmp(options, context, - CompressionDict::GetEmptyDict(), c, + auto sampler = BuiltinCompressor::GetCompressor(c, CompressionOptions()); + CompressionInfo info_tmp(CompressionDict::GetEmptyDict(), + info.CompressFormatVersion(), info.SampleForCompression()); - CompressData(uncompressed_data, info_tmp, - GetCompressFormatForVersion(format_version), - sampled_output_fast); + info_tmp.CompressData(sampler.get(), uncompressed_data, + sampled_output_fast); } // Sampling with a slow but high-compression algorithm if (sampled_output_slow && (ZSTD_Supported() || Zlib_Supported())) { CompressionType c = ZSTD_Supported() ? kZSTD : kZlibCompression; - CompressionOptions options; - CompressionContext context(c, options); - CompressionInfo info_tmp(options, context, - CompressionDict::GetEmptyDict(), c, + auto sampler = BuiltinCompressor::GetCompressor(c, CompressionOptions()); + CompressionInfo info_tmp(CompressionDict::GetEmptyDict(), + info.CompressFormatVersion(), info.SampleForCompression()); - CompressData(uncompressed_data, info_tmp, - GetCompressFormatForVersion(format_version), - sampled_output_slow); + info_tmp.CompressData(sampler.get(), uncompressed_data, + sampled_output_slow); } } - int max_compressed_bytes_per_kb = info.options().max_compressed_bytes_per_kb; - if (info.type() == kNoCompression || max_compressed_bytes_per_kb <= 0) { + *type = compressor->GetCompressionType(); + int max_compressed_bytes_per_kb = compressor->GetMaxCompressedBytesPerKb(); + if (*type == kNoCompression || max_compressed_bytes_per_kb <= 0) { *type = kNoCompression; return uncompressed_data; } // Actually compress the data; if the compression method is not supported, // or the compression fails etc., just fall back to uncompressed - if (!CompressData(uncompressed_data, info, - GetCompressFormatForVersion(format_version), - compressed_output)) { + if (!info.CompressData(compressor, uncompressed_data, compressed_output)) { *type = kNoCompression; return uncompressed_data; } @@ -187,7 +182,6 @@ Slice CompressBlock(const Slice& uncompressed_data, const CompressionInfo& info, return uncompressed_data; } - *type = info.type(); return *compressed_output; } @@ -298,17 +292,14 @@ struct BlockBasedTableBuilder::Rep { std::string last_key; const Slice* first_key_in_next_block = nullptr; - CompressionType compression_type; + std::shared_ptr compressor; uint64_t sample_for_compression; std::atomic compressible_input_data_bytes; std::atomic uncompressible_input_data_bytes; std::atomic sampled_input_data_bytes; std::atomic sampled_output_slow_data_bytes; std::atomic sampled_output_fast_data_bytes; - CompressionOptions compression_opts; std::unique_ptr compression_dict; - std::vector> compression_ctxs; - std::vector> verify_ctxs; std::unique_ptr verify_dict; size_t data_begin_offset = 0; @@ -370,7 +361,7 @@ struct BlockBasedTableBuilder::Rep { void set_offset(uint64_t o) { offset.store(o, std::memory_order_relaxed); } bool IsParallelCompressionEnabled() const { - return compression_opts.parallel_threads > 1; + return compressor->GetParallelThreads() > 1; } Status GetStatus() { @@ -468,29 +459,22 @@ struct BlockBasedTableBuilder::Rep { 0.75 /* data_block_hash_table_util_ratio */, ts_sz, persist_user_defined_timestamps), internal_prefix_transform(prefix_extractor.get()), - compression_type(tbo.compression_type), + compressor(tbo.compressor), sample_for_compression(tbo.moptions.sample_for_compression), compressible_input_data_bytes(0), uncompressible_input_data_bytes(0), sampled_input_data_bytes(0), sampled_output_slow_data_bytes(0), sampled_output_fast_data_bytes(0), - compression_opts(tbo.compression_opts), compression_dict(), - compression_ctxs(tbo.compression_opts.parallel_threads), - verify_ctxs(tbo.compression_opts.parallel_threads), verify_dict(), - state((tbo.compression_opts.max_dict_bytes > 0) ? State::kBuffered - : State::kUnbuffered), use_delta_encoding_for_index_values(table_opt.format_version >= 4 && !table_opt.block_align), reason(tbo.reason), flush_block_policy( table_options.flush_block_policy_factory->NewFlushBlockPolicy( table_options, data_block)), - create_context(&table_options, ioptions.stats, - compression_type == kZSTD || - compression_type == kZSTDNotFinalCompression, + create_context(&table_options, ioptions.stats, tbo.compressor, tbo.moptions.block_protection_bytes_per_key, tbo.internal_comparator.user_comparator(), !use_delta_encoding_for_index_values, @@ -499,13 +483,15 @@ struct BlockBasedTableBuilder::Rep { tail_size(0), status_ok(true), io_status_ok(true) { + state = compressor->IsDictEnabled() ? State::kBuffered : State::kUnbuffered; + + uint64_t max_dict_buffer_bytes = compressor->GetMaxDictBufferBytes(); if (tbo.target_file_size == 0) { - buffer_limit = compression_opts.max_dict_buffer_bytes; - } else if (compression_opts.max_dict_buffer_bytes == 0) { + buffer_limit = max_dict_buffer_bytes; + } else if (max_dict_buffer_bytes == 0) { buffer_limit = tbo.target_file_size; } else { - buffer_limit = std::min(tbo.target_file_size, - compression_opts.max_dict_buffer_bytes); + buffer_limit = std::min(tbo.target_file_size, max_dict_buffer_bytes); } const auto compress_dict_build_buffer_charged = @@ -524,12 +510,6 @@ struct BlockBasedTableBuilder::Rep { } else { compression_dict_buffer_cache_res_mgr = nullptr; } - - assert(compression_ctxs.size() >= compression_opts.parallel_threads); - for (uint32_t i = 0; i < compression_opts.parallel_threads; i++) { - compression_ctxs[i].reset( - new CompressionContext(compression_type, compression_opts)); - } if (table_options.index_type == BlockBasedTableOptions::kTwoLevelIndexSearch) { p_index_builder_ = PartitionedIndexBuilder::CreateIndexBuilder( @@ -592,11 +572,6 @@ struct BlockBasedTableBuilder::Rep { new TimestampTablePropertiesCollector( tbo.internal_comparator.user_comparator())); } - if (table_options.verify_compression) { - for (uint32_t i = 0; i < compression_opts.parallel_threads; i++) { - verify_ctxs[i].reset(new UncompressionContext(compression_type)); - } - } // These are only needed for populating table properties props.column_family_id = tbo.column_family_id; @@ -1102,8 +1077,9 @@ void BlockBasedTableBuilder::Flush() { if (r->IsParallelCompressionEnabled() && r->state == Rep::State::kUnbuffered) { r->data_block.Finish(); - ParallelCompressionRep::BlockRep* block_rep = r->pc_rep->PrepareBlock( - r->compression_type, r->first_key_in_next_block, &(r->data_block)); + ParallelCompressionRep::BlockRep* block_rep = + r->pc_rep->PrepareBlock(r->compressor->GetCompressionType(), + r->first_key_in_next_block, &(r->data_block)); assert(block_rep != nullptr); r->pc_rep->file_size_estimator.EmitBlock(block_rep->data->size(), r->get_offset()); @@ -1139,7 +1115,6 @@ void BlockBasedTableBuilder::WriteBlock(const Slice& uncompressed_block_data, Status compress_status; bool is_data_block = block_type == BlockType::kData; CompressAndVerifyBlock(uncompressed_block_data, is_data_block, - *(r->compression_ctxs[0]), r->verify_ctxs[0].get(), &(r->compressed_output), &(block_contents), &type, &compress_status); r->SetStatus(compress_status); @@ -1159,14 +1134,11 @@ void BlockBasedTableBuilder::WriteBlock(const Slice& uncompressed_block_data, } } -void BlockBasedTableBuilder::BGWorkCompression( - const CompressionContext& compression_ctx, - UncompressionContext* verify_ctx) { +void BlockBasedTableBuilder::BGWorkCompression() { ParallelCompressionRep::BlockRep* block_rep = nullptr; while (rep_->pc_rep->compress_queue.pop(block_rep)) { assert(block_rep != nullptr); CompressAndVerifyBlock(block_rep->contents, true, /* is_data_block*/ - compression_ctx, verify_ctx, block_rep->compressed_data.get(), &block_rep->compressed_contents, &(block_rep->compression_type), &block_rep->status); @@ -1176,7 +1148,6 @@ void BlockBasedTableBuilder::BGWorkCompression( void BlockBasedTableBuilder::CompressAndVerifyBlock( const Slice& uncompressed_block_data, bool is_data_block, - const CompressionContext& compression_ctx, UncompressionContext* verify_ctx, std::string* compressed_output, Slice* block_contents, CompressionType* type, Status* out_status) { Rep* r = rep_; @@ -1201,16 +1172,17 @@ void BlockBasedTableBuilder::CompressAndVerifyBlock( compression_dict = r->compression_dict.get(); } assert(compression_dict != nullptr); - CompressionInfo compression_info(r->compression_opts, compression_ctx, - *compression_dict, r->compression_type, - r->sample_for_compression); + CompressionInfo compression_info( + *compression_dict, + GetCompressFormatForVersion(r->table_options.format_version), + r->sample_for_compression); std::string sampled_output_fast; std::string sampled_output_slow; *block_contents = CompressBlock( - uncompressed_block_data, compression_info, type, - r->table_options.format_version, is_data_block /* allow_sample */, - compressed_output, &sampled_output_fast, &sampled_output_slow); + r->compressor.get(), uncompressed_block_data, compression_info, type, + is_data_block /* allow_sample */, compressed_output, + &sampled_output_fast, &sampled_output_slow); if (sampled_output_slow.size() > 0 || sampled_output_fast.size() > 0) { // Currently compression sampling is only enabled for data block. @@ -1240,11 +1212,12 @@ void BlockBasedTableBuilder::CompressAndVerifyBlock( } assert(verify_dict != nullptr); BlockContents contents; - UncompressionInfo uncompression_info(*verify_ctx, *verify_dict, - r->compression_type); + UncompressionInfo uncompression_info( + *verify_dict, + GetCompressFormatForVersion(r->table_options.format_version)); Status uncompress_status = UncompressBlockData( - uncompression_info, block_contents->data(), block_contents->size(), - &contents, r->table_options.format_version, r->ioptions); + r->compressor.get(), uncompression_info, block_contents->data(), + block_contents->size(), &contents, r->ioptions); if (uncompress_status.ok()) { bool data_match = contents.data.compare(uncompressed_block_data) == 0; @@ -1460,14 +1433,12 @@ void BlockBasedTableBuilder::BGWorkWriteMaybeCompressedBlock() { void BlockBasedTableBuilder::StartParallelCompression() { rep_->pc_rep.reset( - new ParallelCompressionRep(rep_->compression_opts.parallel_threads)); + new ParallelCompressionRep(rep_->compressor->GetParallelThreads())); rep_->pc_rep->compress_thread_pool.reserve( - rep_->compression_opts.parallel_threads); - for (uint32_t i = 0; i < rep_->compression_opts.parallel_threads; i++) { - rep_->pc_rep->compress_thread_pool.emplace_back([this, i] { - BGWorkCompression(*(rep_->compression_ctxs[i]), - rep_->verify_ctxs[i].get()); - }); + rep_->compressor->GetParallelThreads()); + for (uint32_t i = 0; i < rep_->compressor->GetParallelThreads(); i++) { + rep_->pc_rep->compress_thread_pool.emplace_back( + [this] { BGWorkCompression(); }); } rep_->pc_rep->write_thread.reset( new port::Thread([this] { BGWorkWriteMaybeCompressedBlock(); })); @@ -1654,10 +1625,7 @@ void BlockBasedTableBuilder::WritePropertiesBlock( rep_->ioptions.merge_operator != nullptr ? rep_->ioptions.merge_operator->Name() : "nullptr"; - rep_->props.compression_name = - CompressionTypeToString(rep_->compression_type); - rep_->props.compression_options = - CompressionOptionsToString(rep_->compression_opts); + rep_->props.compression_name = rep_->compressor->GetId(); rep_->props.prefix_extractor_name = rep_->prefix_extractor ? rep_->prefix_extractor->AsString() : "nullptr"; std::string property_collectors_names = "["; @@ -1804,72 +1772,13 @@ void BlockBasedTableBuilder::EnterUnbuffered() { Rep* r = rep_; assert(r->state == Rep::State::kBuffered); r->state = Rep::State::kUnbuffered; - const size_t kSampleBytes = r->compression_opts.zstd_max_train_bytes > 0 - ? r->compression_opts.zstd_max_train_bytes - : r->compression_opts.max_dict_bytes; - const size_t kNumBlocksBuffered = r->data_block_buffers.size(); - if (kNumBlocksBuffered == 0) { - // The below code is neither safe nor necessary for handling zero data - // blocks. - return; - } - // Abstract algebra teaches us that a finite cyclic group (such as the - // additive group of integers modulo N) can be generated by a number that is - // coprime with N. Since N is variable (number of buffered data blocks), we - // must then pick a prime number in order to guarantee coprimeness with any N. - // - // One downside of this approach is the spread will be poor when - // `kPrimeGeneratorRemainder` is close to zero or close to - // `kNumBlocksBuffered`. - // - // Picked a random number between one and one trillion and then chose the - // next prime number greater than or equal to it. - const uint64_t kPrimeGenerator = 545055921143ull; - // Can avoid repeated division by just adding the remainder repeatedly. - const size_t kPrimeGeneratorRemainder = static_cast( - kPrimeGenerator % static_cast(kNumBlocksBuffered)); - const size_t kInitSampleIdx = kNumBlocksBuffered / 2; - - std::string compression_dict_samples; - std::vector compression_dict_sample_lens; - size_t buffer_idx = kInitSampleIdx; - for (size_t i = 0; - i < kNumBlocksBuffered && compression_dict_samples.size() < kSampleBytes; - ++i) { - size_t copy_len = std::min(kSampleBytes - compression_dict_samples.size(), - r->data_block_buffers[buffer_idx].size()); - compression_dict_samples.append(r->data_block_buffers[buffer_idx], 0, - copy_len); - compression_dict_sample_lens.emplace_back(copy_len); - - buffer_idx += kPrimeGeneratorRemainder; - if (buffer_idx >= kNumBlocksBuffered) { - buffer_idx -= kNumBlocksBuffered; - } - } - - // final data block flushed, now we can generate dictionary from the samples. - // OK if compression_dict_samples is empty, we'll just get empty dictionary. - std::string dict; - if (r->compression_opts.zstd_max_train_bytes > 0) { - if (r->compression_opts.use_zstd_dict_trainer) { - dict = ZSTD_TrainDictionary(compression_dict_samples, - compression_dict_sample_lens, - r->compression_opts.max_dict_bytes); - } else { - dict = ZSTD_FinalizeDictionary( - compression_dict_samples, compression_dict_sample_lens, - r->compression_opts.max_dict_bytes, r->compression_opts.level); - } - } else { - dict = std::move(compression_dict_samples); - } - r->compression_dict.reset(new CompressionDict(dict, r->compression_type, - r->compression_opts.level)); - r->verify_dict.reset(new UncompressionDict( - dict, r->compression_type == kZSTD || - r->compression_type == kZSTDNotFinalCompression)); + Status s = + r->compressor->CreateDict(r->data_block_buffers, &r->compression_dict); + assert(s.ok()); + assert(r->compression_dict != nullptr); + auto dict = r->compression_dict->GetRawDict(); + r->verify_dict.reset(r->compressor->NewUncompressionDict(dict.ToString())); auto get_iterator_for_block = [&r](size_t i) { auto& data_block = r->data_block_buffers[i]; @@ -1915,7 +1824,8 @@ void BlockBasedTableBuilder::EnterUnbuffered() { } ParallelCompressionRep::BlockRep* block_rep = r->pc_rep->PrepareBlock( - r->compression_type, first_key_in_next_block_ptr, &data_block, &keys); + r->compressor->GetCompressionType(), first_key_in_next_block_ptr, + &data_block, &keys); assert(block_rep != nullptr); r->pc_rep->file_size_estimator.EmitBlock(block_rep->data->size(), @@ -1949,7 +1859,7 @@ void BlockBasedTableBuilder::EnterUnbuffered() { r->data_begin_offset = 0; // Release all reserved cache for data block buffers if (r->compression_dict_buffer_cache_res_mgr != nullptr) { - Status s = r->compression_dict_buffer_cache_res_mgr->UpdateCacheReservation( + s = r->compression_dict_buffer_cache_res_mgr->UpdateCacheReservation( r->data_begin_offset); s.PermitUncheckedError(); } diff --git a/table/block_based/block_based_table_builder.h b/table/block_based/block_based_table_builder.h index 3949474c58..d3e6793d62 100644 --- a/table/block_based/block_based_table_builder.h +++ b/table/block_based/block_based_table_builder.h @@ -24,12 +24,12 @@ #include "rocksdb/table.h" #include "table/meta_blocks.h" #include "table/table_builder.h" -#include "util/compression.h" namespace ROCKSDB_NAMESPACE { class BlockBuilder; class BlockHandle; +class Compressor; class WritableFile; struct BlockBasedTableOptions; @@ -175,15 +175,12 @@ class BlockBasedTableBuilder : public TableBuilder { // Get blocks from mem-table walking thread, compress them and // pass them to the write thread. Used in parallel compression mode only - void BGWorkCompression(const CompressionContext& compression_ctx, - UncompressionContext* verify_ctx); + void BGWorkCompression(); // Given uncompressed block content, try to compress it and return result and // compression type void CompressAndVerifyBlock(const Slice& uncompressed_block_data, bool is_data_block, - const CompressionContext& compression_ctx, - UncompressionContext* verify_ctx, std::string* compressed_output, Slice* result_block_contents, CompressionType* result_compression_type, @@ -200,8 +197,8 @@ class BlockBasedTableBuilder : public TableBuilder { void StopParallelCompression(); }; -Slice CompressBlock(const Slice& uncompressed_data, const CompressionInfo& info, - CompressionType* type, uint32_t format_version, +Slice CompressBlock(Compressor* compressor, const Slice& uncompressed_data, + const CompressionInfo& info, CompressionType* type, bool do_sample, std::string* compressed_output, std::string* sampled_output_fast, std::string* sampled_output_slow); diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc index 498bdfab14..7b1d9cc6c5 100644 --- a/table/block_based/block_based_table_reader.cc +++ b/table/block_based/block_based_table_reader.cc @@ -163,7 +163,8 @@ Status ReadAndParseBlockFromFile( BlockCreateContext& create_context, bool maybe_compressed, const UncompressionDict& uncompression_dict, const PersistentCacheOptions& cache_options, - MemoryAllocator* memory_allocator, bool for_compaction, bool async_read) { + MemoryAllocator* memory_allocator, bool for_compaction, + const std::shared_ptr& compressor, bool async_read) { assert(result); BlockContents contents; @@ -171,7 +172,7 @@ Status ReadAndParseBlockFromFile( file, prefetch_buffer, footer, options, handle, &contents, ioptions, /*do_uncompress*/ maybe_compressed, maybe_compressed, TBlocklike::kBlockType, uncompression_dict, cache_options, - memory_allocator, nullptr, for_compaction); + memory_allocator, nullptr, for_compaction, compressor.get()); Status s; // If prefetch_buffer is not allocated, it will fallback to synchronous // reading of block contents. @@ -692,17 +693,11 @@ Status BlockBasedTable::Open( } // Populate BlockCreateContext - bool blocks_definitely_zstd_compressed = - rep->table_properties && - (rep->table_properties->compression_name == - CompressionTypeToString(kZSTD) || - rep->table_properties->compression_name == - CompressionTypeToString(kZSTDNotFinalCompression)); - rep->create_context = BlockCreateContext( - &rep->table_options, rep->ioptions.stats, - blocks_definitely_zstd_compressed, block_protection_bytes_per_key, - rep->internal_comparator.user_comparator(), rep->index_value_is_full, - rep->index_has_first_key); + rep->create_context = + BlockCreateContext(&rep->table_options, rep->ioptions.stats, + rep->compressor, block_protection_bytes_per_key, + rep->internal_comparator.user_comparator(), + rep->index_value_is_full, rep->index_has_first_key); // Check expected unique id if provided if (expected_unique_id != kNullUniqueId64x2) { @@ -939,9 +934,17 @@ Status BlockBasedTable::ReadPropertiesBlock( } else { assert(table_properties != nullptr); rep_->table_properties = std::move(table_properties); - rep_->blocks_maybe_compressed = - rep_->table_properties->compression_name != - CompressionTypeToString(kNoCompression); + ConfigOptions config_options; + s = Compressor::CreateFromString(config_options, + rep_->table_properties->compression_name, + &rep_->compressor); + if (!s.ok() || rep_->compressor == nullptr) { + ROCKS_LOG_ERROR(rep_->ioptions.logger, + "Compression type not supported"); + } else { + rep_->blocks_maybe_compressed = + (rep_->compressor->GetCompressionType() != kNoCompression); + } } } else { ROCKS_LOG_ERROR(rep_->ioptions.logger, @@ -1256,7 +1259,7 @@ Status BlockBasedTable::ReadMetaIndexBlock( rep_->create_context, true /*maybe_compressed*/, UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options, GetMemoryAllocator(rep_->table_options), false /* for_compaction */, - false /* async_read */); + rep_->compressor, false /* async_read */); if (!s.ok()) { ROCKS_LOG_ERROR(rep_->ioptions.logger, @@ -1349,13 +1352,15 @@ WithBlocklikeCheck BlockBasedTable::PutDataBlockToCache( std::unique_ptr block_holder; if (block_comp_type != kNoCompression) { // Retrieve the uncompressed contents into a new buffer + std::shared_ptr compressor = + rep_->GetCompressor(block_comp_type); BlockContents uncompressed_block_contents; - UncompressionContext context(block_comp_type); - UncompressionInfo info(context, uncompression_dict, block_comp_type); - s = UncompressBlockData(info, block_contents.data.data(), + UncompressionInfo info(uncompression_dict, + GetCompressFormatForVersion(format_version), + memory_allocator); + s = UncompressBlockData(compressor.get(), info, block_contents.data.data(), block_contents.data.size(), - &uncompressed_block_contents, format_version, - ioptions, memory_allocator); + &uncompressed_block_contents, ioptions); if (!s.ok()) { return s; } @@ -1536,7 +1541,7 @@ BlockBasedTable::MaybeReadBlockAndLoadToCache( TBlocklike::kBlockType, uncompression_dict, rep_->persistent_cache_options, GetMemoryAllocator(rep_->table_options), - /*allocator=*/nullptr); + /*allocator=*/nullptr, false, rep_->compressor.get()); // If prefetch_buffer is not allocated, it will fallback to synchronous // reading of block contents. @@ -1733,7 +1738,8 @@ WithBlocklikeCheck BlockBasedTable::RetrieveBlock( rep_->file.get(), prefetch_buffer, rep_->footer, ro, handle, &block, rep_->ioptions, rep_->create_context, maybe_compressed, uncompression_dict, rep_->persistent_cache_options, - GetMemoryAllocator(rep_->table_options), for_compaction, async_read); + GetMemoryAllocator(rep_->table_options), for_compaction, + rep_->compressor, async_read); if (get_context) { switch (TBlocklike::kBlockType) { diff --git a/table/block_based/block_based_table_reader.h b/table/block_based/block_based_table_reader.h index 34ebf5aa52..e93bd002af 100644 --- a/table/block_based/block_based_table_reader.h +++ b/table/block_based/block_based_table_reader.h @@ -589,7 +589,11 @@ struct BlockBasedTable::Rep { level(_level), immortal_table(_immortal_table), user_defined_timestamps_persisted(_user_defined_timestamps_persisted), - cache_owner_id(_cache_owner_id) {} + cache_owner_id(_cache_owner_id) { + // Initialize as no compression. The actual compressor is determined when + // reading table properties. + compressor = BuiltinCompressor::GetCompressor(kNoCompression); + } ~Rep() { status.PermitUncheckedError(); } const ImmutableOptions& ioptions; const EnvOptions& env_options; @@ -657,6 +661,9 @@ struct BlockBasedTable::Rep { // before reading individual blocks enables certain optimizations. bool blocks_maybe_compressed = true; + // Compressor used for this table (obtained from SST properties block) + std::shared_ptr compressor; + // These describe how index is encoded. bool index_has_first_key = false; bool index_key_includes_seq = true; @@ -745,6 +752,14 @@ struct BlockBasedTable::Rep { #endif // ROCKSDB_MALLOC_USABLE_SIZE return usage; } + + std::shared_ptr GetCompressor(CompressionType compression_type) { + if (compression_type == compressor->GetCompressionType()) { + return compressor; + } else { + return BuiltinCompressor::GetCompressor(compression_type); + } + } }; // This is an adapter class for `WritableFile` to be used for `std::ostream`. diff --git a/table/block_based/block_based_table_reader_sync_and_async.h b/table/block_based/block_based_table_reader_sync_and_async.h index ab3ee01bb7..3d60aa4bde 100644 --- a/table/block_based/block_based_table_reader_sync_and_async.h +++ b/table/block_based/block_based_table_reader_sync_and_async.h @@ -285,11 +285,15 @@ DEFINE_SYNC_AND_ASYNC(void, BlockBasedTable::RetrieveMultipleBlocks) GetBlockCompressionType(serialized_block); BlockContents contents; if (compression_type != kNoCompression) { - UncompressionContext context(compression_type); - UncompressionInfo info(context, uncompression_dict, compression_type); - s = UncompressSerializedBlock( - info, req.result.data() + req_offset, handle.size(), &contents, - footer.format_version(), rep_->ioptions, memory_allocator); + std::shared_ptr compressor = + rep_->GetCompressor(compression_type); + UncompressionInfo info( + uncompression_dict, + GetCompressFormatForVersion(footer.format_version()), + memory_allocator); + s = UncompressSerializedBlock(compressor.get(), info, + req.result.data() + req_offset, + handle.size(), &contents, rep_->ioptions); } else { // There are two cases here: // 1) caller uses the shared buffer (scratch or direct io buffer); diff --git a/table/block_based/block_based_table_reader_test.cc b/table/block_based/block_based_table_reader_test.cc index 73db8007af..0fc870344d 100644 --- a/table/block_based/block_based_table_reader_test.cc +++ b/table/block_based/block_based_table_reader_test.cc @@ -25,6 +25,7 @@ #include "table/format.h" #include "test_util/testharness.h" #include "test_util/testutil.h" +#include "util/compressor.h" #include "util/random.h" namespace ROCKSDB_NAMESPACE { @@ -104,12 +105,13 @@ class BlockBasedTableReaderBaseTest : public testing::Test { // as each block's size. compression_opts.max_dict_bytes = compression_dict_bytes; compression_opts.max_dict_buffer_bytes = compression_dict_bytes; + std::shared_ptr compressor = + BuiltinCompressor::GetCompressor(compression_type, compression_opts); IntTblPropCollectorFactories factories; std::unique_ptr table_builder( options_.table_factory->NewTableBuilder( TableBuilderOptions(ioptions, moptions, comparator, &factories, - compression_type, compression_opts, - 0 /* column_family_id */, + compressor, 0 /* column_family_id */, kDefaultColumnFamilyName, -1 /* level */), writer.get())); @@ -214,12 +216,14 @@ class BlockBasedTableReaderBaseTest : public testing::Test { // compression dictionary. class BlockBasedTableReaderTest : public BlockBasedTableReaderBaseTest, - public testing::WithParamInterface> { + public testing::WithParamInterface< + std::tuple> { protected: void SetUp() override { - compression_type_ = std::get<0>(GetParam()); + compression_name_ = std::get<0>(GetParam()); + ASSERT_TRUE( + BuiltinCompressor::StringToType(compression_name_, &compression_type_)); use_direct_reads_ = std::get<1>(GetParam()); test::UserDefinedTimestampTestMode udt_test_mode = std::get<4>(GetParam()); udt_enabled_ = test::IsUDTEnabled(udt_test_mode); @@ -243,6 +247,7 @@ class BlockBasedTableReaderTest std::shared_ptr(NewFixedPrefixTransform(3)); } + std::string compression_name_; CompressionType compression_type_; bool use_direct_reads_; bool udt_enabled_; @@ -293,7 +298,7 @@ TEST_P(BlockBasedTableReaderTest, MultiGet) { } std::string table_name = "BlockBasedTableReaderTest_MultiGet" + - CompressionTypeToString(compression_type_); + BuiltinCompressor::TypeToString(compression_type_); ImmutableOptions ioptions(options); CreateTable(table_name, ioptions, compression_type_, kv, @@ -649,8 +654,8 @@ TEST_P(BlockBasedTableReaderTestVerifyChecksum, ChecksumMismatch) { options.statistics = CreateDBStatistics(); ImmutableOptions ioptions(options); - std::string table_name = - "BlockBasedTableReaderTest" + CompressionTypeToString(compression_type_); + std::string table_name = "BlockBasedTableReaderTest" + + BuiltinCompressor::TypeToString(compression_type_); CreateTable(table_name, ioptions, compression_type_, kv, compression_parallel_threads_, compression_dict_bytes_); @@ -709,7 +714,7 @@ TEST_P(BlockBasedTableReaderTestVerifyChecksum, ChecksumMismatch) { INSTANTIATE_TEST_CASE_P( BlockBasedTableReaderTest, BlockBasedTableReaderTest, ::testing::Combine( - ::testing::ValuesIn(GetSupportedCompressions()), ::testing::Bool(), + ::testing::ValuesIn(Compressor::GetSupported()), ::testing::Bool(), ::testing::Values( BlockBasedTableOptions::IndexType::kBinarySearch, BlockBasedTableOptions::IndexType::kHashSearch, @@ -720,7 +725,7 @@ INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P( VerifyChecksum, BlockBasedTableReaderTestVerifyChecksum, ::testing::Combine( - ::testing::ValuesIn(GetSupportedCompressions()), + ::testing::ValuesIn(Compressor::GetSupported()), ::testing::Values(false), ::testing::Values( BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch), diff --git a/table/block_based/block_cache.cc b/table/block_based/block_cache.cc index a252899d24..5c7c64d7bf 100644 --- a/table/block_based/block_cache.cc +++ b/table/block_based/block_cache.cc @@ -52,8 +52,13 @@ void BlockCreateContext::Create( void BlockCreateContext::Create(std::unique_ptr* parsed_out, BlockContents&& block) { - parsed_out->reset(new UncompressionDict( - block.data, std::move(block.allocation), using_zstd)); + if (compressor != nullptr) { + parsed_out->reset(compressor->NewUncompressionDict( + block.data, std::move(block.allocation))); + } else { + parsed_out->reset( + new UncompressionDict(block.data, std::move(block.allocation))); + } } namespace { diff --git a/table/block_based/block_cache.h b/table/block_based/block_cache.h index 00eaface37..f720cc904c 100644 --- a/table/block_based/block_cache.h +++ b/table/block_based/block_cache.h @@ -16,6 +16,7 @@ #include "table/block_based/block_type.h" #include "table/block_based/parsed_full_filter_block.h" #include "table/format.h" +#include "util/compressor.h" namespace ROCKSDB_NAMESPACE { @@ -70,14 +71,15 @@ class Block_kMetaIndex : public Block { struct BlockCreateContext : public Cache::CreateContext { BlockCreateContext() {} BlockCreateContext(const BlockBasedTableOptions* _table_options, - Statistics* _statistics, bool _using_zstd, + Statistics* _statistics, + const std::shared_ptr& _compressor, uint8_t _protection_bytes_per_key, const Comparator* _raw_ucmp, bool _index_value_is_full = false, bool _index_has_first_key = false) : table_options(_table_options), statistics(_statistics), - using_zstd(_using_zstd), + compressor(_compressor), protection_bytes_per_key(_protection_bytes_per_key), raw_ucmp(_raw_ucmp), index_value_is_full(_index_value_is_full), @@ -85,7 +87,7 @@ struct BlockCreateContext : public Cache::CreateContext { const BlockBasedTableOptions* table_options = nullptr; Statistics* statistics = nullptr; - bool using_zstd = false; + std::shared_ptr compressor; uint8_t protection_bytes_per_key = 0; const Comparator* raw_ucmp = nullptr; bool index_value_is_full; diff --git a/table/block_based/block_test.cc b/table/block_based/block_test.cc index 3264371c19..4bad23be68 100644 --- a/table/block_based/block_test.cc +++ b/table/block_based/block_test.cc @@ -847,10 +847,12 @@ TEST_F(BlockPerKVChecksumTest, EmptyBlock) { std::unique_ptr data_block; Options options = Options(); BlockBasedTableOptions tbo; + std::shared_ptr compressor = + BuiltinCompressor::GetCompressor(kNoCompression); uint8_t protection_bytes_per_key = 8; - BlockCreateContext create_context{ - &tbo, nullptr /* statistics */, false /* using_zstd */, - protection_bytes_per_key, options.comparator}; + BlockCreateContext create_context{&tbo, nullptr /* statistics */, compressor, + protection_bytes_per_key, + options.comparator}; create_context.Create(&data_block, std::move(contents)); std::unique_ptr biter{data_block->NewDataIterator( options.comparator, kDisableGlobalSequenceNumber)}; @@ -884,10 +886,12 @@ TEST_F(BlockPerKVChecksumTest, InitializeProtectionInfo) { // when the block is itself already corrupted. Options options = Options(); BlockBasedTableOptions tbo; + std::shared_ptr compressor = + BuiltinCompressor::GetCompressor(kNoCompression); uint8_t protection_bytes_per_key = 8; - BlockCreateContext create_context{ - &tbo, nullptr /* statistics */, false /* using_zstd */, - protection_bytes_per_key, options.comparator}; + BlockCreateContext create_context{&tbo, nullptr /* statistics */, compressor, + protection_bytes_per_key, + options.comparator}; { std::string invalid_content = "1"; @@ -946,16 +950,18 @@ TEST_F(BlockPerKVChecksumTest, ApproximateMemory) { Options options = Options(); BlockBasedTableOptions tbo; + std::shared_ptr compressor = + BuiltinCompressor::GetCompressor(kNoCompression); uint8_t protection_bytes_per_key = 8; BlockCreateContext with_checksum_create_context{ &tbo, nullptr /* statistics */, - false /* using_zstd */, + compressor, protection_bytes_per_key, options.comparator, true /* index_value_is_full */}; BlockCreateContext create_context{ - &tbo, nullptr /* statistics */, false /* using_zstd */, + &tbo, nullptr /* statistics */, compressor, 0, options.comparator, true /* index_value_is_full */}; { @@ -1045,8 +1051,10 @@ class DataBlockKVChecksumTest std::vector &keys, std::vector &values, int num_record) { BlockBasedTableOptions tbo; + std::shared_ptr compressor = + BuiltinCompressor::GetCompressor(kNoCompression); BlockCreateContext create_context{&tbo, nullptr /* statistics */, - false /* using_zstd */, GetChecksumLen(), + compressor, GetChecksumLen(), Options().comparator}; builder_ = std::make_unique( static_cast(GetRestartInterval()), @@ -1169,11 +1177,13 @@ class IndexBlockKVChecksumTest std::vector &first_keys, int num_record) { Options options = Options(); BlockBasedTableOptions tbo; + std::shared_ptr compressor = + BuiltinCompressor::GetCompressor(kNoCompression); uint8_t protection_bytes_per_key = GetChecksumLen(); BlockCreateContext create_context{ &tbo, nullptr /* statistics */, - false /* _using_zstd */, + compressor, protection_bytes_per_key, options.comparator, !UseValueDeltaEncoding() /* value_is_full */, @@ -1311,10 +1321,12 @@ class MetaIndexBlockKVChecksumTest int num_record) { Options options = Options(); BlockBasedTableOptions tbo; + std::shared_ptr compressor = + BuiltinCompressor::GetCompressor(kNoCompression); uint8_t protection_bytes_per_key = GetChecksumLen(); - BlockCreateContext create_context{ - &tbo, nullptr /* statistics */, false /* using_zstd */, - protection_bytes_per_key, options.comparator}; + BlockCreateContext create_context{&tbo, nullptr /* statistics */, + compressor, protection_bytes_per_key, + options.comparator}; builder_ = std::make_unique(static_cast(GetRestartInterval())); // add a bunch of records to a block @@ -1343,10 +1355,12 @@ INSTANTIATE_TEST_CASE_P(P, MetaIndexBlockKVChecksumTest, TEST_P(MetaIndexBlockKVChecksumTest, ChecksumConstructionAndVerification) { Options options = Options(); BlockBasedTableOptions tbo; + std::shared_ptr compressor = + BuiltinCompressor::GetCompressor(kNoCompression); uint8_t protection_bytes_per_key = GetChecksumLen(); - BlockCreateContext create_context{ - &tbo, nullptr /* statistics */, false /* using_zstd */, - protection_bytes_per_key, options.comparator}; + BlockCreateContext create_context{&tbo, nullptr /* statistics */, compressor, + protection_bytes_per_key, + options.comparator}; std::vector num_restart_intervals = {1, 16}; for (const auto num_restart_interval : num_restart_intervals) { const int kNumRecords = num_restart_interval * GetRestartInterval(); diff --git a/table/block_based/data_block_hash_index_test.cc b/table/block_based/data_block_hash_index_test.cc index 2841b271de..3fb9aab8ca 100644 --- a/table/block_based/data_block_hash_index_test.cc +++ b/table/block_based/data_block_hash_index_test.cc @@ -556,8 +556,7 @@ void TestBoundary(InternalKey& ik1, std::string& v1, InternalKey& ik2, builder.reset(ioptions.table_factory->NewTableBuilder( TableBuilderOptions( ioptions, moptions, internal_comparator, - &int_tbl_prop_collector_factories, options.compression, - CompressionOptions(), + &int_tbl_prop_collector_factories, moptions.compressor, TablePropertiesCollectorFactory::Context::kUnknownColumnFamily, column_family_name, level_), file_writer.get())); diff --git a/table/block_based/uncompression_dict_reader.h b/table/block_based/uncompression_dict_reader.h index c69fd48c9f..3fc7ead55f 100644 --- a/table/block_based/uncompression_dict_reader.h +++ b/table/block_based/uncompression_dict_reader.h @@ -25,6 +25,7 @@ #include "rocksdb/table_pinning_policy.h" #include "table/block_based/cachable_entry.h" #include "table/format.h" +#include "util/compressor.h" namespace ROCKSDB_NAMESPACE { @@ -33,7 +34,7 @@ struct BlockCacheLookupContext; class FilePrefetchBuffer; class GetContext; struct ReadOptions; -struct UncompressionDict; +class UncompressionDict; // Provides access to the uncompression dictionary regardless of whether // it is owned by the reader or stored in the cache, or whether it is pinned diff --git a/table/block_fetcher.cc b/table/block_fetcher.cc index 34d3e23e9a..0b46284713 100644 --- a/table/block_fetcher.cc +++ b/table/block_fetcher.cc @@ -328,11 +328,18 @@ IOStatus BlockFetcher::ReadBlockContents() { if (do_uncompress_ && compression_type_ != kNoCompression) { PERF_TIMER_GUARD(block_decompress_time); // compressed page, uncompress, update cache - UncompressionContext context(compression_type_); - UncompressionInfo info(context, uncompression_dict_, compression_type_); + Compressor* compressor = table_compressor_; + if (table_compressor_ == nullptr || + table_compressor_->GetCompressionType() != compression_type_) { + compressor_ = BuiltinCompressor::GetCompressor(compression_type_); + compressor = compressor_.get(); + } + UncompressionInfo info( + uncompression_dict_, + GetCompressFormatForVersion(footer_.format_version()), + memory_allocator_); io_status_ = status_to_io_status(UncompressSerializedBlock( - info, slice_.data(), block_size_, contents_, footer_.format_version(), - ioptions_, memory_allocator_)); + compressor, info, slice_.data(), block_size_, contents_, ioptions_)); #ifndef NDEBUG num_heap_buf_memcpy_++; #endif @@ -378,12 +385,19 @@ IOStatus BlockFetcher::ReadAsyncBlockContents() { if (do_uncompress_ && compression_type_ != kNoCompression) { PERF_TIMER_GUARD(block_decompress_time); // compressed page, uncompress, update cache - UncompressionContext context(compression_type_); - UncompressionInfo info(context, uncompression_dict_, - compression_type_); - io_status_ = status_to_io_status(UncompressSerializedBlock( - info, slice_.data(), block_size_, contents_, - footer_.format_version(), ioptions_, memory_allocator_)); + Compressor* compressor = table_compressor_; + if (table_compressor_ == nullptr || + table_compressor_->GetCompressionType() != compression_type_) { + compressor_ = BuiltinCompressor::GetCompressor(compression_type_); + compressor = compressor_.get(); + } + UncompressionInfo info( + uncompression_dict_, + GetCompressFormatForVersion(footer_.format_version()), + memory_allocator_); + io_status_ = status_to_io_status( + UncompressSerializedBlock(compressor, info, slice_.data(), + block_size_, contents_, ioptions_)); #ifndef NDEBUG num_heap_buf_memcpy_++; #endif diff --git a/table/block_fetcher.h b/table/block_fetcher.h index da6c352d0a..b6f9216135 100644 --- a/table/block_fetcher.h +++ b/table/block_fetcher.h @@ -13,6 +13,7 @@ #include "table/block_based/block_type.h" #include "table/format.h" #include "table/persistent_cache_options.h" +#include "util/compressor.h" namespace ROCKSDB_NAMESPACE { @@ -49,7 +50,7 @@ class BlockFetcher { const PersistentCacheOptions& cache_options /* ref retained */, MemoryAllocator* memory_allocator = nullptr, MemoryAllocator* memory_allocator_compressed = nullptr, - bool for_compaction = false) + bool for_compaction = false, Compressor* compressor = nullptr) : file_(file), prefetch_buffer_(prefetch_buffer), footer_(footer), @@ -66,7 +67,8 @@ class BlockFetcher { cache_options_(cache_options), memory_allocator_(memory_allocator), memory_allocator_compressed_(memory_allocator_compressed), - for_compaction_(for_compaction) { + for_compaction_(for_compaction), + table_compressor_(compressor) { io_status_.PermitUncheckedError(); // TODO(AR) can we improve on this? } @@ -121,8 +123,15 @@ class BlockFetcher { CacheAllocationPtr compressed_buf_; char stack_buf_[kDefaultStackBufferSize]; bool got_from_prefetch_buffer_ = false; - CompressionType compression_type_; bool for_compaction_ = false; + // Compression type used for the block + CompressionType compression_type_; + // If the compressor used for the block is different from the one used for the + // table, we need to hold a reference to the corresponding compressor while + // it's in use + std::shared_ptr compressor_ = nullptr; + // Compressor used for the table. It may not be the one used for the block. + Compressor* table_compressor_; // return true if found bool TryGetUncompressBlockFromPersistentCache(); diff --git a/table/block_fetcher_test.cc b/table/block_fetcher_test.cc index e686eac801..9811fdef9c 100644 --- a/table/block_fetcher_test.cc +++ b/table/block_fetcher_test.cc @@ -24,6 +24,7 @@ #include "options/options_helper.h" #include "port/port.h" #include "port/stack_trace.h" +#include "rocksdb/convenience.h" #include "rocksdb/db.h" #include "rocksdb/file_system.h" #include "rocksdb/table_pinning_policy.h" @@ -33,6 +34,7 @@ #include "table/block_based/block_based_table_reader.h" #include "table/format.h" #include "test_util/testharness.h" +#include "util/compressor.h" #include "utilities/memory_allocators.h" namespace ROCKSDB_NAMESPACE { @@ -94,7 +96,7 @@ class BlockFetcherTest : public testing::Test { IntTblPropCollectorFactories factories; std::unique_ptr table_builder(table_factory_.NewTableBuilder( TableBuilderOptions(ioptions, moptions, comparator, &factories, - compression_type, CompressionOptions(), + BuiltinCompressor::GetCompressor(compression_type), 0 /* column_family_id */, kDefaultColumnFamilyName, -1 /* level */), writer.get())); @@ -147,13 +149,16 @@ class BlockFetcherTest : public testing::Test { void TestFetchDataBlock( const std::string& table_name_prefix, bool compressed, bool do_uncompress, std::array expected_stats_by_mode) { - for (CompressionType compression_type : GetSupportedCompressions()) { + for (const auto& compression_str : Compressor::GetSupported()) { + CompressionType compression_type; + if (!BuiltinCompressor::StringToType(compression_str, + &compression_type)) { + continue; + } bool do_compress = compression_type != kNoCompression; if (compressed != do_compress) continue; - std::string compression_type_str = - CompressionTypeToString(compression_type); - std::string table_name = table_name_prefix + compression_type_str; + std::string table_name = table_name_prefix + compression_str; CreateTable(table_name, compression_type); CompressionType expected_compression_type_after_fetch = @@ -387,22 +392,24 @@ class BlockFetcherTest : public testing::Test { // Expects: // the index block contents are the same for both read modes. TEST_F(BlockFetcherTest, FetchIndexBlock) { - for (CompressionType compression : GetSupportedCompressions()) { - std::string table_name = - "FetchIndexBlock" + CompressionTypeToString(compression); - CreateTable(table_name, compression); - - CountedMemoryAllocator allocator; - MemcpyStats memcpy_stats; - BlockContents indexes[NumModes]; - std::string index_datas[NumModes]; - for (int i = 0; i < NumModes; ++i) { - SetMode(static_cast(i)); - FetchIndexBlock(table_name, &allocator, &allocator, &memcpy_stats, - &indexes[i], &index_datas[i]); - } - for (int i = 0; i < NumModes - 1; ++i) { - AssertSameBlock(index_datas[i], index_datas[i + 1]); + for (const auto& compression : Compressor::GetSupported()) { + CompressionType type; + if (BuiltinCompressor::StringToType(compression, &type)) { + std::string table_name = "FetchIndexBlock" + compression; + CreateTable(table_name, type); + + CountedMemoryAllocator allocator; + MemcpyStats memcpy_stats; + BlockContents indexes[NumModes]; + std::string index_datas[NumModes]; + for (int i = 0; i < NumModes; ++i) { + SetMode(static_cast(i)); + FetchIndexBlock(table_name, &allocator, &allocator, &memcpy_stats, + &indexes[i], &index_datas[i]); + } + for (int i = 0; i < NumModes - 1; ++i) { + AssertSameBlock(index_datas[i], index_datas[i + 1]); + } } } } diff --git a/table/format.cc b/table/format.cc index a72dcc835d..9f0708508d 100644 --- a/table/format.cc +++ b/table/format.cc @@ -43,7 +43,7 @@ #include "unique_id_impl.h" #include "util/cast_util.h" #include "util/coding.h" -#include "util/compression.h" +#include "util/compressor.h" #include "util/crc32c.h" #include "util/hash.h" #include "util/stop_watch.h" @@ -644,36 +644,38 @@ uint32_t ComputeBuiltinChecksumWithLastByte(ChecksumType type, const char* data, } } -Status UncompressBlockData(const UncompressionInfo& uncompression_info, +Status UncompressBlockData(Compressor* uncompressor, + const UncompressionInfo& uncompression_info, const char* data, size_t size, - BlockContents* out_contents, uint32_t format_version, - const ImmutableOptions& ioptions, - MemoryAllocator* allocator) { + BlockContents* out_contents, + const ImmutableOptions& ioptions) { Status ret = Status::OK(); - assert(uncompression_info.type() != kNoCompression && + assert((uncompressor == nullptr || + uncompressor->GetCompressionType() != kNoCompression) && "Invalid compression type"); StopWatchNano timer(ioptions.clock, ShouldReportDetailedTime(ioptions.env, ioptions.stats)); size_t uncompressed_size = 0; - const char* error_msg = nullptr; - CacheAllocationPtr ubuf = UncompressData( - uncompression_info, data, size, &uncompressed_size, - GetCompressFormatForVersion(format_version), allocator, &error_msg); + Status uncompress_status; + CacheAllocationPtr ubuf = uncompression_info.UncompressData( + uncompressor, data, size, &uncompressed_size, &uncompress_status); if (!ubuf) { - if (!CompressionTypeSupported(uncompression_info.type())) { - ret = Status::NotSupported( - "Unsupported compression method for this build", - CompressionTypeToString(uncompression_info.type())); + if (uncompressor == nullptr) { + return Status::NotSupported( + "Unsupported compression method for this build "); + } else if (!uncompressor->Supported()) { + return Status::NotSupported( + "Unsupported compression method for this build ", + uncompressor->GetId()); } else { std::ostringstream oss; oss << "Corrupted compressed block contents"; - if (error_msg) { - oss << ": " << error_msg; + if (uncompress_status.getState()) { + oss << ": " << uncompress_status.getState(); } - ret = Status::Corruption( - oss.str(), CompressionTypeToString(uncompression_info.type())); + ret = Status::Corruption(oss.str(), uncompressor->GetId()); } return ret; } @@ -698,16 +700,15 @@ Status UncompressBlockData(const UncompressionInfo& uncompression_info, return ret; } -Status UncompressSerializedBlock(const UncompressionInfo& uncompression_info, +Status UncompressSerializedBlock(Compressor* uncompressor, + const UncompressionInfo& uncompression_info, const char* data, size_t size, BlockContents* out_contents, - uint32_t format_version, - const ImmutableOptions& ioptions, - MemoryAllocator* allocator) { + const ImmutableOptions& ioptions) { assert(data[size] != kNoCompression); - assert(data[size] == static_cast(uncompression_info.type())); - return UncompressBlockData(uncompression_info, data, size, out_contents, - format_version, ioptions, allocator); + assert(data[size] == static_cast(uncompressor->GetCompressionType())); + return UncompressBlockData(uncompressor, uncompression_info, data, size, + out_contents, ioptions); } // Replace the contents of db_host_id with the actual hostname, if db_host_id diff --git a/table/format.h b/table/format.h index 73675381ed..5b3b396b03 100644 --- a/table/format.h +++ b/table/format.h @@ -25,7 +25,8 @@ #include "util/hash.h" namespace ROCKSDB_NAMESPACE { - +class Compressor; +class UncompressionInfo; class RandomAccessFile; struct ReadOptions; @@ -400,20 +401,18 @@ struct BlockContents { // contents are returned in `out_contents`. // format_version is as defined in include/rocksdb/table.h, which is // used to determine compression format version. -Status UncompressSerializedBlock(const UncompressionInfo& info, +Status UncompressSerializedBlock(Compressor* uncompressor, + const UncompressionInfo& info, const char* data, size_t size, BlockContents* out_contents, - uint32_t format_version, - const ImmutableOptions& ioptions, - MemoryAllocator* allocator = nullptr); + const ImmutableOptions& ioptions); // This is a variant of UncompressSerializedBlock that does not expect a // block trailer beyond `size`. (CompressionType is taken from `info`.) -Status UncompressBlockData(const UncompressionInfo& info, const char* data, +Status UncompressBlockData(Compressor* uncompressor, + const UncompressionInfo& info, const char* data, size_t size, BlockContents* out_contents, - uint32_t format_version, - const ImmutableOptions& ioptions, - MemoryAllocator* allocator = nullptr); + const ImmutableOptions& ioptions); // Replace db_host_id contents with the real hostname if necessary Status ReifyDbHostIdProperty(Env* env, std::string* db_host_id); diff --git a/table/sst_file_dumper.cc b/table/sst_file_dumper.cc index a081f9b918..c272374731 100644 --- a/table/sst_file_dumper.cc +++ b/table/sst_file_dumper.cc @@ -299,9 +299,10 @@ Status SstFileDumper::ShowCompressionSize( std::string column_family_name; int unknown_level = -1; + auto compressor = + BuiltinCompressor::GetCompressor(compress_type, compress_opt); TableBuilderOptions tb_opts( - imoptions, moptions, ikc, &block_based_table_factories, compress_type, - compress_opt, + imoptions, moptions, ikc, &block_based_table_factories, compressor, TablePropertiesCollectorFactory::Context::kUnknownColumnFamily, column_family_name, unknown_level); uint64_t num_data_blocks = 0; diff --git a/table/sst_file_writer.cc b/table/sst_file_writer.cc index 61212ee316..56c94a675f 100644 --- a/table/sst_file_writer.cc +++ b/table/sst_file_writer.cc @@ -310,23 +310,15 @@ Status SstFileWriter::Open(const std::string& file_path) { sst_file->SetIOPriority(r->io_priority); - CompressionType compression_type; - CompressionOptions compression_opts; - if (r->mutable_cf_options.bottommost_compression != - kDisableCompressionOption) { - compression_type = r->mutable_cf_options.bottommost_compression; - if (r->mutable_cf_options.bottommost_compression_opts.enabled) { - compression_opts = r->mutable_cf_options.bottommost_compression_opts; - } else { - compression_opts = r->mutable_cf_options.compression_opts; - } - } else if (!r->mutable_cf_options.compression_per_level.empty()) { - // Use the compression of the last level if we have per level compression - compression_type = *(r->mutable_cf_options.compression_per_level.rbegin()); - compression_opts = r->mutable_cf_options.compression_opts; + std::shared_ptr compressor; + if (r->mutable_cf_options.bottommost_compressor != nullptr) { + compressor = r->mutable_cf_options.bottommost_compressor; + } else if (r->mutable_cf_options.compressor_per_level.empty()) { + compressor = r->mutable_cf_options.compressor; } else { - compression_type = r->mutable_cf_options.compression; - compression_opts = r->mutable_cf_options.compression_opts; + // Use the compression of the last level if we have per level compression + auto levels = r->mutable_cf_options.compressor_per_level.size(); + compressor = r->mutable_cf_options.compressor_per_level[levels - 1]; } IntTblPropCollectorFactories int_tbl_prop_collector_factories; @@ -361,8 +353,8 @@ Status SstFileWriter::Open(const std::string& file_path) { // approximate time of ingested keys. TableBuilderOptions table_builder_options( r->ioptions, r->mutable_cf_options, r->internal_comparator, - &int_tbl_prop_collector_factories, compression_type, compression_opts, - cf_id, r->column_family_name, unknown_level, false /* is_bottommost */, + &int_tbl_prop_collector_factories, compressor, cf_id, + r->column_family_name, unknown_level, false /* is_bottommost */, false /* is_last_level_with_data */, TableFileCreationReason::kMisc, 0 /* oldest_key_time */, 0 /* file_creation_time */, "SST Writer" /* db_id */, r->db_session_id, 0 /* target_file_size */, diff --git a/table/table_builder.h b/table/table_builder.h index c235102652..2930db0d7b 100644 --- a/table/table_builder.h +++ b/table/table_builder.h @@ -129,10 +129,10 @@ struct TableBuilderOptions { const ImmutableOptions& _ioptions, const MutableCFOptions& _moptions, const InternalKeyComparator& _internal_comparator, const IntTblPropCollectorFactories* _int_tbl_prop_collector_factories, - CompressionType _compression_type, - const CompressionOptions& _compression_opts, uint32_t _column_family_id, - const std::string& _column_family_name, int _level, - bool _is_bottommost = false, bool _is_last_level_with_data = false, + const std::shared_ptr& _compressor, + uint32_t _column_family_id, const std::string& _column_family_name, + int _level, bool _is_bottommost = false, + bool _is_last_level_with_data = false, TableFileCreationReason _reason = TableFileCreationReason::kMisc, const int64_t _oldest_key_time = 0, const uint64_t _file_creation_time = 0, const std::string& _db_id = "", @@ -142,8 +142,7 @@ struct TableBuilderOptions { moptions(_moptions), internal_comparator(_internal_comparator), int_tbl_prop_collector_factories(_int_tbl_prop_collector_factories), - compression_type(_compression_type), - compression_opts(_compression_opts), + compressor(_compressor), column_family_id(_column_family_id), column_family_name(_column_family_name), oldest_key_time(_oldest_key_time), @@ -161,8 +160,7 @@ struct TableBuilderOptions { const MutableCFOptions& moptions; const InternalKeyComparator& internal_comparator; const IntTblPropCollectorFactories* int_tbl_prop_collector_factories; - const CompressionType compression_type; - const CompressionOptions& compression_opts; + std::shared_ptr compressor; const uint32_t column_family_id; const std::string& column_family_name; const int64_t oldest_key_time; diff --git a/table/table_reader_bench.cc b/table/table_reader_bench.cc index 60c84d7bf0..00ebb7928e 100644 --- a/table/table_reader_bench.cc +++ b/table/table_reader_bench.cc @@ -101,7 +101,7 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options, tb = opts.table_factory->NewTableBuilder( TableBuilderOptions( ioptions, moptions, ikc, &int_tbl_prop_collector_factories, - CompressionType::kNoCompression, CompressionOptions(), + BuiltinCompressor::GetCompressor(CompressionType::kNoCompression), 0 /* column_family_id */, kDefaultColumnFamilyName, unknown_level), file_writer.get()); } else { diff --git a/table/table_test.cc b/table/table_test.cc index 4cbc4df398..05205d32f5 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -375,7 +375,8 @@ class TableConstructor : public Constructor { } ~TableConstructor() override { Reset(); } - Status FinishImpl(const Options& options, const ImmutableOptions& ioptions, + Status FinishImpl(const Options& /* options */, + const ImmutableOptions& ioptions, const MutableCFOptions& moptions, const BlockBasedTableOptions& /*table_options*/, const InternalKeyComparator& internal_comparator, @@ -399,8 +400,8 @@ class TableConstructor : public Constructor { builder.reset(ioptions.table_factory->NewTableBuilder( TableBuilderOptions(ioptions, moptions, internal_comparator, &int_tbl_prop_collector_factories, - options.compression, options.compression_opts, - kUnknownColumnFamily, column_family_name, level_), + moptions.compressor, kUnknownColumnFamily, + column_family_name, level_), file_writer_.get())); for (const auto& kv : kv_map) { @@ -3994,10 +3995,9 @@ TEST_P(BlockBasedTableTest, NoFileChecksum) { f.CreateWritableFile(); std::unique_ptr builder; builder.reset(ioptions.table_factory->NewTableBuilder( - TableBuilderOptions(ioptions, moptions, *comparator, - &int_tbl_prop_collector_factories, - options.compression, options.compression_opts, - kUnknownColumnFamily, column_family_name, level), + TableBuilderOptions( + ioptions, moptions, *comparator, &int_tbl_prop_collector_factories, + moptions.compressor, kUnknownColumnFamily, column_family_name, level), f.GetFileWriter())); ASSERT_OK(f.ResetTableBuilder(std::move(builder))); f.AddKVtoKVMap(1000); @@ -4030,10 +4030,9 @@ TEST_P(BlockBasedTableTest, Crc32cFileChecksum) { f.SetFileChecksumGenerator(checksum_crc32c_gen1.release()); std::unique_ptr builder; builder.reset(ioptions.table_factory->NewTableBuilder( - TableBuilderOptions(ioptions, moptions, *comparator, - &int_tbl_prop_collector_factories, - options.compression, options.compression_opts, - kUnknownColumnFamily, column_family_name, level), + TableBuilderOptions( + ioptions, moptions, *comparator, &int_tbl_prop_collector_factories, + moptions.compressor, kUnknownColumnFamily, column_family_name, level), f.GetFileWriter())); ASSERT_OK(f.ResetTableBuilder(std::move(builder))); f.AddKVtoKVMap(1000); @@ -4076,10 +4075,10 @@ TEST_F(PlainTableTest, BasicPlainTableProperties) { std::string column_family_name; int unknown_level = -1; std::unique_ptr builder(factory.NewTableBuilder( - TableBuilderOptions(ioptions, moptions, ikc, - &int_tbl_prop_collector_factories, kNoCompression, - CompressionOptions(), kUnknownColumnFamily, - column_family_name, unknown_level), + TableBuilderOptions( + ioptions, moptions, ikc, &int_tbl_prop_collector_factories, + BuiltinCompressor::GetCompressor(kNoCompression), + kUnknownColumnFamily, column_family_name, unknown_level), file_writer.get())); for (char c = 'a'; c <= 'z'; ++c) { @@ -4131,10 +4130,10 @@ TEST_F(PlainTableTest, NoFileChecksum) { f.CreateWritableFile(); std::unique_ptr builder(factory.NewTableBuilder( - TableBuilderOptions(ioptions, moptions, ikc, - &int_tbl_prop_collector_factories, kNoCompression, - CompressionOptions(), kUnknownColumnFamily, - column_family_name, unknown_level), + TableBuilderOptions( + ioptions, moptions, ikc, &int_tbl_prop_collector_factories, + BuiltinCompressor::GetCompressor(kNoCompression), + kUnknownColumnFamily, column_family_name, unknown_level), f.GetFileWriter())); ASSERT_OK(f.ResetTableBuilder(std::move(builder))); f.AddKVtoKVMap(1000); @@ -4171,10 +4170,10 @@ TEST_F(PlainTableTest, Crc32cFileChecksum) { f.SetFileChecksumGenerator(checksum_crc32c_gen1.release()); std::unique_ptr builder(factory.NewTableBuilder( - TableBuilderOptions(ioptions, moptions, ikc, - &int_tbl_prop_collector_factories, kNoCompression, - CompressionOptions(), kUnknownColumnFamily, - column_family_name, unknown_level), + TableBuilderOptions( + ioptions, moptions, ikc, &int_tbl_prop_collector_factories, + BuiltinCompressor::GetCompressor(kNoCompression), + kUnknownColumnFamily, column_family_name, unknown_level), f.GetFileWriter())); ASSERT_OK(f.ResetTableBuilder(std::move(builder))); f.AddKVtoKVMap(1000); @@ -4780,9 +4779,9 @@ TEST_P(BlockBasedTableTest, DISABLED_TableWithGlobalSeqno) { std::string column_family_name; std::unique_ptr builder(options.table_factory->NewTableBuilder( TableBuilderOptions(ioptions, moptions, ikc, - &int_tbl_prop_collector_factories, kNoCompression, - CompressionOptions(), kUnknownColumnFamily, - column_family_name, -1), + &int_tbl_prop_collector_factories, + BuiltinCompressor::GetCompressor(kNoCompression), + kUnknownColumnFamily, column_family_name, -1), file_writer.get())); for (char c = 'a'; c <= 'z'; ++c) { @@ -4962,9 +4961,9 @@ TEST_P(BlockBasedTableTest, BlockAlignTest) { std::string column_family_name; std::unique_ptr builder(options.table_factory->NewTableBuilder( TableBuilderOptions(ioptions, moptions, ikc, - &int_tbl_prop_collector_factories, kNoCompression, - CompressionOptions(), kUnknownColumnFamily, - column_family_name, -1), + &int_tbl_prop_collector_factories, + BuiltinCompressor::GetCompressor(kNoCompression), + kUnknownColumnFamily, column_family_name, -1), file_writer.get())); for (int i = 1; i <= 10000; ++i) { @@ -5055,9 +5054,9 @@ TEST_P(BlockBasedTableTest, PropertiesBlockRestartPointTest) { std::unique_ptr builder(options.table_factory->NewTableBuilder( TableBuilderOptions(ioptions, moptions, ikc, - &int_tbl_prop_collector_factories, kNoCompression, - CompressionOptions(), kUnknownColumnFamily, - column_family_name, -1), + &int_tbl_prop_collector_factories, + BuiltinCompressor::GetCompressor(kNoCompression), + kUnknownColumnFamily, column_family_name, -1), file_writer.get())); for (int i = 1; i <= 10000; ++i) { @@ -5628,7 +5627,13 @@ TEST_F(ChargeCompressionDictionaryBuildingBufferTest, Basic) { {CacheEntryRole::kCompressionDictionaryBuildingBuffer, {/*.charged = */ charge_compression_dictionary_building_buffer}}); Options options; - options.compression = kSnappyCompression; + std::vector dict_compressions = Compressor::GetDictSupported(); + if (!dict_compressions.empty()) { + BuiltinCompressor::StringToType(dict_compressions[0], + &options.compression); + } else { + return; + } options.compression_opts.max_dict_bytes = kMaxDictBytes; options.compression_opts.max_dict_buffer_bytes = kMaxDictBufferBytes; options.table_factory.reset(NewBlockBasedTableFactory(table_options)); @@ -5643,12 +5648,14 @@ TEST_F(ChargeCompressionDictionaryBuildingBufferTest, Basic) { InternalKeyComparator ikc(options.comparator); IntTblPropCollectorFactories int_tbl_prop_collector_factories; + auto compressor = BuiltinCompressor::GetCompressor( + options.compression, options.compression_opts); + ASSERT_NE(compressor, nullptr); std::unique_ptr builder( options.table_factory->NewTableBuilder( TableBuilderOptions( ioptions, moptions, ikc, &int_tbl_prop_collector_factories, - kSnappyCompression, options.compression_opts, - kUnknownColumnFamily, "test_cf", -1 /* level */), + compressor, kUnknownColumnFamily, "test_cf", -1 /* level */), file_writer.get())); std::string key1 = "key1"; @@ -5704,7 +5711,12 @@ TEST_F(ChargeCompressionDictionaryBuildingBufferTest, std::make_shared(); Options options; - options.compression = kSnappyCompression; + std::vector dict_compressions = Compressor::GetDictSupported(); + if (!dict_compressions.empty()) { + BuiltinCompressor::StringToType(dict_compressions[0], &options.compression); + } else { + return; + } options.compression_opts.max_dict_bytes = kMaxDictBytes; options.compression_opts.max_dict_buffer_bytes = kMaxDictBufferBytes; options.table_factory.reset(NewBlockBasedTableFactory(table_options)); @@ -5719,11 +5731,13 @@ TEST_F(ChargeCompressionDictionaryBuildingBufferTest, InternalKeyComparator ikc(options.comparator); IntTblPropCollectorFactories int_tbl_prop_collector_factories; + auto compressor = BuiltinCompressor::GetCompressor(options.compression, + options.compression_opts); + ASSERT_NE(compressor, nullptr); std::unique_ptr builder(options.table_factory->NewTableBuilder( TableBuilderOptions(ioptions, moptions, ikc, - &int_tbl_prop_collector_factories, kSnappyCompression, - options.compression_opts, kUnknownColumnFamily, - "test_cf", -1 /* level */), + &int_tbl_prop_collector_factories, compressor, + kUnknownColumnFamily, "test_cf", -1 /* level */), file_writer.get())); std::string key1 = "key1"; @@ -5789,7 +5803,12 @@ TEST_F(ChargeCompressionDictionaryBuildingBufferTest, BasicWithCacheFull) { std::make_shared(); Options options; - options.compression = kSnappyCompression; + std::vector dict_compressions = Compressor::GetDictSupported(); + if (!dict_compressions.empty()) { + BuiltinCompressor::StringToType(dict_compressions[0], &options.compression); + } else { + return; + } options.compression_opts.max_dict_bytes = kMaxDictBytes; options.compression_opts.max_dict_buffer_bytes = kMaxDictBufferBytes; options.table_factory.reset(NewBlockBasedTableFactory(table_options)); @@ -5804,11 +5823,13 @@ TEST_F(ChargeCompressionDictionaryBuildingBufferTest, BasicWithCacheFull) { InternalKeyComparator ikc(options.comparator); IntTblPropCollectorFactories int_tbl_prop_collector_factories; + auto compressor = BuiltinCompressor::GetCompressor(options.compression, + options.compression_opts); + ASSERT_NE(compressor, nullptr); std::unique_ptr builder(options.table_factory->NewTableBuilder( TableBuilderOptions(ioptions, moptions, ikc, - &int_tbl_prop_collector_factories, kSnappyCompression, - options.compression_opts, kUnknownColumnFamily, - "test_cf", -1 /* level */), + &int_tbl_prop_collector_factories, compressor, + kUnknownColumnFamily, "test_cf", -1 /* level */), file_writer.get())); std::string key1 = "key1"; diff --git a/test_util/testutil.cc b/test_util/testutil.cc index 8a454366fc..6408f21169 100644 --- a/test_util/testutil.cc +++ b/test_util/testutil.cc @@ -42,7 +42,9 @@ #include "rocksdb/utilities/object_registry.h" #include "test_util/mock_time_env.h" #include "test_util/sync_point.h" +#include "util/compressor.h" #include "util/random.h" +#include "util/string_util.h" #ifndef ROCKSDB_UNITTESTS_WITH_CUSTOM_OBJECTS_FROM_STATIC_LIBS void RegisterCustomObjects(int /*argc*/, char** /*argv*/) {} @@ -234,7 +236,7 @@ std::string RandomName(Random* rnd, const size_t len) { CompressionType RandomCompressionType(Random* rnd) { auto ret = static_cast(rnd->Uniform(6)); - while (!CompressionTypeSupported(ret)) { + while (!BuiltinCompressor::TypeSupported(ret)) { ret = static_cast((static_cast(ret) + 1) % 6); } return ret; diff --git a/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc b/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc index 46746d0dc2..e19a1674fd 100644 --- a/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc +++ b/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc @@ -40,6 +40,7 @@ int main() { #include "test_util/testutil.h" #include "tools/block_cache_analyzer/block_cache_trace_analyzer.h" #include "trace_replay/block_cache_tracer.h" +#include "util/string_util.h" namespace ROCKSDB_NAMESPACE { diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 0fbee58055..d2bb298e81 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -3121,12 +3121,9 @@ class Benchmark { return true; } - inline bool CompressSlice(const CompressionInfo& compression_info, + inline bool CompressSlice(Compressor* compressor, const CompressionInfo& info, const Slice& input, std::string* compressed) { - constexpr uint32_t compress_format_version = 2; - - return CompressData(input, compression_info, compress_format_version, - compressed); + return info.CompressData(compressor, input, compressed); } void PrintHeader(bool first_group) { @@ -3210,12 +3207,11 @@ class Benchmark { const int len = FLAGS_block_size; std::string input_str(len, 'y'); std::string compressed; - CompressionOptions opts; - CompressionContext context(FLAGS_compression_type_e, opts); - CompressionInfo info(opts, context, CompressionDict::GetEmptyDict(), - FLAGS_compression_type_e, - FLAGS_sample_for_compression); - bool result = CompressSlice(info, Slice(input_str), &compressed); + std::shared_ptr compressor = + BuiltinCompressor::GetCompressor(FLAGS_compression_type_e); + CompressionInfo info(FLAGS_sample_for_compression); + bool result = + info.CompressData(compressor.get(), Slice(input_str), &compressed); if (!result) { fprintf(stdout, "WARNING: %s compression is not enabled\n", @@ -4447,24 +4443,22 @@ class Benchmark { Slice input = gen.Generate(FLAGS_block_size); int64_t bytes = 0; int64_t produced = 0; - bool ok = true; + Status s; std::string compressed; - CompressionOptions opts; - opts.level = FLAGS_compression_level; - CompressionContext context(FLAGS_compression_type_e, opts); - CompressionInfo info(opts, context, CompressionDict::GetEmptyDict(), - FLAGS_compression_type_e, - FLAGS_sample_for_compression); + std::shared_ptr compressor = + BuiltinCompressor::GetCompressor(FLAGS_compression_type_e); + auto raw_compressor = compressor.get(); + CompressionInfo info(FLAGS_sample_for_compression); // Compress 1G - while (ok && bytes < int64_t(1) << 30) { + while (s.ok() && bytes < int64_t(1) << 30) { compressed.clear(); - ok = CompressSlice(info, input, &compressed); + s = raw_compressor->Compress(info, input, &compressed); produced += compressed.size(); bytes += input.size(); thread->stats.FinishedOps(nullptr, nullptr, 1, kCompress); } - if (!ok) { + if (!s.ok()) { thread->stats.AddMessage("(compression failure)"); } else { char buf[340]; @@ -4482,25 +4476,19 @@ class Benchmark { CompressionOptions compression_opts; compression_opts.level = FLAGS_compression_level; - CompressionContext compression_ctx(FLAGS_compression_type_e, - compression_opts); - CompressionInfo compression_info( - compression_opts, compression_ctx, CompressionDict::GetEmptyDict(), - FLAGS_compression_type_e, FLAGS_sample_for_compression); - UncompressionContext uncompression_ctx(FLAGS_compression_type_e); - UncompressionInfo uncompression_info(uncompression_ctx, - UncompressionDict::GetEmptyDict(), - FLAGS_compression_type_e); - - bool ok = CompressSlice(compression_info, input, &compressed); + std::shared_ptr compressor = BuiltinCompressor::GetCompressor( + FLAGS_compression_type_e, compression_opts); + CompressionInfo compression_info(FLAGS_sample_for_compression); + UncompressionInfo uncompression_info; + + bool ok = + compression_info.CompressData(compressor.get(), input, &compressed); int64_t bytes = 0; size_t uncompressed_size = 0; while (ok && bytes < 1024 * 1048576) { - constexpr uint32_t compress_format_version = 2; - - CacheAllocationPtr uncompressed = UncompressData( - uncompression_info, compressed.data(), compressed.size(), - &uncompressed_size, compress_format_version); + CacheAllocationPtr uncompressed = uncompression_info.UncompressData( + compressor.get(), compressed.data(), compressed.size(), + &uncompressed_size); ok = uncompressed.get() != nullptr; bytes += input.size(); diff --git a/tools/sst_dump_test.cc b/tools/sst_dump_test.cc index 481c4b7220..2f68d8053e 100644 --- a/tools/sst_dump_test.cc +++ b/tools/sst_dump_test.cc @@ -18,6 +18,7 @@ #include "table/table_builder.h" #include "test_util/testharness.h" #include "test_util/testutil.h" +#include "util/compressor.h" namespace ROCKSDB_NAMESPACE { @@ -113,7 +114,7 @@ class SSTDumpToolTest : public testing::Test { tb.reset(opts.table_factory->NewTableBuilder( TableBuilderOptions( imoptions, moptions, ikc, &int_tbl_prop_collector_factories, - CompressionType::kNoCompression, CompressionOptions(), + BuiltinCompressor::GetCompressor(CompressionType::kNoCompression), TablePropertiesCollectorFactory::Context::kUnknownColumnFamily, column_family_name, unknown_level), file_writer.get())); diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index 1b269043ab..2594e38326 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -14,6 +14,7 @@ #include "rocksdb/convenience.h" #include "rocksdb/utilities/ldb_cmd.h" #include "table/sst_file_dumper.h" +#include "util/compressor.h" namespace ROCKSDB_NAMESPACE { @@ -246,12 +247,24 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { [&compression_type](std::pair curr) { return curr.second == compression_type; }); - if (iter == kCompressions.end()) { - fprintf(stderr, "%s is not a valid CompressionType\n", - compression_type.c_str()); - exit(1); + if (iter != kCompressions.end()) { + compression_types.emplace_back(*iter); + } else { + ConfigOptions config_options; + std::shared_ptr compressor; + Compressor::CreateFromString(config_options, compression_type, + &compressor); + if (compressor == nullptr) { + fprintf(stderr, "%s is not a valid CompressionType\n", + compression_type.c_str()); + exit(1); + } + compression_types.emplace_back( + std::pair( + static_cast( + compressor->GetCompressionType()), + compression_type.c_str())); } - compression_types.emplace_back(*iter); } } else if (strncmp(argv[i], "--parse_internal_key=", 21) == 0) { std::string in_key(argv[i] + 21); diff --git a/util/compression.cc b/util/compression.cc index 2a0bc38d4f..9b2b863db9 100644 --- a/util/compression.cc +++ b/util/compression.cc @@ -5,7 +5,673 @@ #include "util/compression.h" +#include "rocksdb/configurable.h" +#include "rocksdb/utilities/options_type.h" +#include "table/format.h" + namespace ROCKSDB_NAMESPACE { +#define PARALLEL_THREADS_TYPE_INFO \ + {"parallel_threads", \ + {offsetof(struct CompressionOptions, parallel_threads), \ + OptionType::kUInt32T, OptionVerificationType::kNormal, \ + OptionTypeFlags::kMutable}}, +#define DICTIONARY_TYPE_INFO \ + {"max_dict_bytes", \ + {offsetof(struct CompressionOptions, max_dict_bytes), OptionType::kInt, \ + OptionVerificationType::kNormal, OptionTypeFlags::kMutable}}, \ + {"max_train_bytes", \ + {offsetof(struct CompressionOptions, zstd_max_train_bytes), \ + OptionType::kUInt32T, OptionVerificationType::kNormal, \ + OptionTypeFlags::kMutable}}, \ + {"max_dict_buffer_bytes", \ + {offsetof(struct CompressionOptions, max_dict_buffer_bytes), \ + OptionType::kUInt64T, OptionVerificationType::kNormal, \ + OptionTypeFlags::kMutable}}, \ + {"use_zstd_dict_trainer", \ + {offsetof(struct CompressionOptions, use_zstd_dict_trainer), \ + OptionType::kBoolean, OptionVerificationType::kNormal, \ + OptionTypeFlags::kMutable}}, +#define LEVEL_TYPE_INFO \ + {"level", \ + {offsetof(struct CompressionOptions, level), OptionType::kInt, \ + OptionVerificationType::kNormal, OptionTypeFlags::kMutable}}, +#define WINDOW_BITS_TYPE_INFO \ + {"window_bits", \ + {offsetof(struct CompressionOptions, window_bits), OptionType::kInt, \ + OptionVerificationType::kNormal, OptionTypeFlags::kMutable}}, +#define STRATEGY_TYPE_INFO \ + {"strategy", \ + {offsetof(struct CompressionOptions, strategy), OptionType::kInt, \ + OptionVerificationType::kNormal, OptionTypeFlags::kMutable}}, +#define CHECKSUM_INFO \ + {"checksum", \ + {offsetof(struct CompressionOptions, checksum), OptionType::kBoolean, \ + OptionVerificationType::kNormal, OptionTypeFlags::kMutable}}, + +static std::unordered_map + compressor_parallel_type_info = {PARALLEL_THREADS_TYPE_INFO}; + +static std::unordered_map + compressor_level_type_info = {LEVEL_TYPE_INFO}; + +static std::unordered_map + compressor_dict_type_info = {DICTIONARY_TYPE_INFO}; + +static std::unordered_map + compressor_strategy_type_info = {STRATEGY_TYPE_INFO}; + +static std::unordered_map + compressor_window_type_info = {WINDOW_BITS_TYPE_INFO}; + +static std::unordered_map + compressor_checksum_type_info = {CHECKSUM_INFO}; + +static std::unordered_map> + builtin_compression_types = { + {kNoCompression, + std::make_pair(NoCompressor::kClassName(), NoCompressor::kNickName())}, + {kSnappyCompression, std::make_pair(SnappyCompressor::kClassName(), + SnappyCompressor::kNickName())}, + {kZlibCompression, std::make_pair(ZlibCompressor::kClassName(), + ZlibCompressor::kNickName())}, + {kBZip2Compression, std::make_pair(BZip2Compressor::kClassName(), + BZip2Compressor::kNickName())}, + {kLZ4Compression, std::make_pair(LZ4Compressor::kClassName(), + LZ4Compressor::kNickName())}, + {kLZ4HCCompression, std::make_pair(LZ4HCCompressor::kClassName(), + LZ4HCCompressor::kNickName())}, + {kXpressCompression, std::make_pair(XpressCompressor::kClassName(), + XpressCompressor::kNickName())}, + {kZSTD, std::make_pair(ZSTDCompressor::kClassName(), + ZSTDCompressor::kNickName())}, + {kZSTDNotFinalCompression, + std::make_pair(ZSTDNotFinalCompressor::kClassName(), + ZSTDNotFinalCompressor::kNickName())}, + {kDisableCompressionOption, + std::make_pair("DisableOption", "kDisableCompressionOption")}, +}; + +std::vector Compressor::GetSupported() { + std::vector supported; + for (const auto& cit : builtin_compression_types) { + if (BuiltinCompressor::TypeSupported(cit.first)) { + supported.push_back(cit.second.first); + } + } + return supported; +} + +std::vector Compressor::GetDictSupported() { + std::vector supported; + for (const auto& cit : builtin_compression_types) { + if (BuiltinCompressor::TypeSupported(cit.first) && + BuiltinCompressor::TypeSupportsDict(cit.first)) { + supported.push_back(cit.second.first); + } + } + return supported; +} + +std::vector GetSupportedCompressions() { + std::vector supported; + for (const auto& cit : builtin_compression_types) { + if (BuiltinCompressor::TypeSupported(cit.first)) { + supported.push_back(cit.first); + } + } + return supported; +} + +BuiltinCompressor::BuiltinCompressor() { + RegisterOptions(&compression_opts_, &compressor_parallel_type_info); +} + +bool BuiltinCompressor::TypeToString(CompressionType type, bool as_class, + std::string* result) { + const auto cit = builtin_compression_types.find(type); + if (cit != builtin_compression_types.end()) { + *result = (as_class) ? cit->second.first : cit->second.second; + return true; + } else { + *result = ""; + return false; + } +} + +std::string BuiltinCompressor::TypeToString(CompressionType type) { + std::string result; + bool okay __attribute__((__unused__)); + okay = TypeToString(type, true, &result); + assert(okay); + return result; +} + +bool BuiltinCompressor::StringToType(const std::string& id, + CompressionType* type) { + for (const auto& cit : builtin_compression_types) { + if (id == cit.second.first || id == cit.second.second) { + *type = cit.first; + return true; + } + } + return false; +} + +bool BuiltinCompressor::TypeSupported(CompressionType type) { + switch (type) { + case kNoCompression: + return true; + case kSnappyCompression: + return Snappy_Supported(); + case kZlibCompression: + return Zlib_Supported(); + case kBZip2Compression: + return BZip2_Supported(); + case kLZ4Compression: + return LZ4_Supported(); + case kLZ4HCCompression: + return LZ4_Supported(); + case kXpressCompression: + return XPRESS_Supported(); + case kZSTDNotFinalCompression: + return ZSTDNotFinal_Supported(); + case kZSTD: + return ZSTD_Supported(); + default: + return false; + } +} + +bool BuiltinCompressor::TypeSupportsDict(CompressionType type) { + switch (type) { + case kNoCompression: + return false; + case kSnappyCompression: + return false; + case kZlibCompression: + return Zlib_Supported(); + case kBZip2Compression: + return false; + case kLZ4Compression: + case kLZ4HCCompression: +#if LZ4_VERSION_NUMBER >= 10400 // r124+ + return LZ4_Supported(); +#else + return false; +#endif + case kXpressCompression: + return false; + case kZSTDNotFinalCompression: +#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+ + return ZSTDNotFinal_Supported(); +#else + return false; +#endif + case kZSTD: +#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+ + return ZSTD_Supported(); +#else + return false; +#endif + default: + return false; + } +} + +bool BuiltinCompressor::MatchesOptions(const CompressionOptions& opts) const { + return compression_opts_.parallel_threads == opts.parallel_threads; +} + +bool BuiltinCompressor::IsInstanceOf(const std::string& id) const { + if (id == kClassName()) { + return true; + } else { + return Compressor::IsInstanceOf(id); + } +} + +const void* BuiltinCompressor::GetOptionsPtr(const std::string& name) const { + if (name == CompressionOptions::kName()) { + return &compression_opts_; + } else { + return Compressor::GetOptionsPtr(name); + } +} + +BuiltinDictCompressor::BuiltinDictCompressor() { + RegisterOptions("dictionary", &compression_opts_, &compressor_dict_type_info); +} + +bool BuiltinDictCompressor::IsInstanceOf(const std::string& id) const { + if (id == kClassName()) { + return true; + } else { + return BuiltinCompressor::IsInstanceOf(id); + } +} + +bool BuiltinDictCompressor::MatchesOptions( + const CompressionOptions& opts) const { + if (compression_opts_.max_dict_bytes != opts.max_dict_bytes || + compression_opts_.zstd_max_train_bytes != opts.zstd_max_train_bytes || + compression_opts_.max_dict_buffer_bytes != opts.max_dict_buffer_bytes || + compression_opts_.use_zstd_dict_trainer != opts.use_zstd_dict_trainer) { + return false; + } else { + return BuiltinCompressor::MatchesOptions(opts); + } +} + +BZip2Compressor::BZip2Compressor() { + // No additional options +} + +#ifdef BZIP2 +Status BZip2Compressor::Compress(const CompressionInfo& info, + const Slice& input, std::string* output) { + bool success = BZip2_Compress(info, info.CompressFormatVersion(), + input.data(), input.size(), output); + if (!success) { + return Status::Corruption(); + } + return Status::OK(); +} + +Status BZip2Compressor::Uncompress(const UncompressionInfo& info, + const char* input, size_t input_length, + char** output, size_t* output_length) { + *output = BZip2_Uncompress(info, input, input_length, output_length); + if (!*output) { + return Status::Corruption(); + } + return Status::OK(); +} + +#endif // BZIP2 + +bool LZ4Compressor::DictCompressionSupported() const { +#if LZ4_VERSION_NUMBER >= 10400 // r124+ + return true; +#else + return false; +#endif +} + +#ifdef LZ4 +Status LZ4Compressor::Compress(const CompressionInfo& info, const Slice& input, + std::string* output) { + bool success = LZ4_Compress(info, input.data(), input.size(), output); + if (UNLIKELY(!success)) { + return Status::Corruption(); + } + return Status::OK(); +} + +Status LZ4Compressor::Uncompress(const UncompressionInfo& info, + const char* input, size_t input_length, + char** output, size_t* output_length) { + *output = LZ4_Uncompress(info, input, input_length, output_length); + if (UNLIKELY(!*output)) { + return Status::Corruption(); + } + return Status::OK(); +} +#endif // LZ4 + +LZ4HCCompressor::LZ4HCCompressor() { + RegisterOptions("level", &compression_opts_, &compressor_level_type_info); +} + +bool LZ4HCCompressor::MatchesOptions(const CompressionOptions& opts) const { + if (compression_opts_.level != opts.level) { + return false; + } else { + return BuiltinDictCompressor::MatchesOptions(opts); + } +} + +bool LZ4HCCompressor::DictCompressionSupported() const { +#if LZ4_VERSION_NUMBER >= 10400 // r124+ + return true; +#else + return false; +#endif +} + +#ifdef LZ4 +Status LZ4HCCompressor::Compress(const CompressionInfo& info, + const Slice& input, std::string* output) { + bool success = LZ4HC_Compress(info, compression_opts_, input.data(), + input.size(), output); + if (UNLIKELY(!success)) { + return Status::Corruption(); + } + return Status::OK(); +} + +Status LZ4HCCompressor::Uncompress(const UncompressionInfo& info, + const char* input, size_t input_length, + char** output, size_t* output_length) { + *output = LZ4_Uncompress(info, input, input_length, output_length); + if (UNLIKELY(!*output)) { + return Status::Corruption(); + } + return Status::OK(); +} +#endif // LZ4 + +SnappyCompressor::SnappyCompressor() { + // No additional options +} + +#ifdef SNAPPY +Status SnappyCompressor::Compress(const CompressionInfo& info, + const Slice& input, std::string* output) { + bool success = Snappy_Compress(info, input.data(), input.size(), output); + if (!success) { + return Status::Corruption(); + } + return Status::OK(); +} + +Status SnappyCompressor::Uncompress(const UncompressionInfo& info, + const char* input, size_t input_length, + char** output, size_t* output_length) { + *output = Snappy_Uncompress(input, input_length, output_length, + info.GetMemoryAllocator()); + if (!*output) { + return Status::Corruption(); + } else { + return Status::OK(); + } +} +#endif // SNAPPY + +XpressCompressor::XpressCompressor() { + // No additional options +} + +#ifdef XPRESS +Status XpressCompressor::Compress(const CompressionInfo& /*info*/, + const Slice& input, std::string* output) { + bool success = XPRESS_Compress(input.data(), input.size(), output); + if (!success) { + return Status::Corruption(); + } + return Status::OK(); +} + +Status XpressCompressor::Uncompress(const UncompressionInfo& /*info*/, + const char* input, size_t input_length, + char** output, size_t* output_length) { + // XPRESS allocates memory internally, thus no support for custom allocator. + *output = XPRESS_Uncompress(input, input_length, output_length); + if (!*output) { + return Status::Corruption(); + } + return Status::OK(); +} +#endif // XPRESS + +ZlibCompressor::ZlibCompressor() { + RegisterOptions("level", &compression_opts_, &compressor_level_type_info); + RegisterOptions("window", &compression_opts_, &compressor_window_type_info); + RegisterOptions("strategy", &compression_opts_, + &compressor_strategy_type_info); +} + +bool ZlibCompressor::MatchesOptions(const CompressionOptions& opts) const { + if (compression_opts_.level != opts.level) { + return false; + } else if (compression_opts_.window_bits != opts.window_bits || + compression_opts_.strategy != opts.strategy) { + return false; + } else { + return BuiltinDictCompressor::MatchesOptions(opts); + } +} +#ifdef ZLIB +Status ZlibCompressor::Compress(const CompressionInfo& info, const Slice& input, + std::string* output) { + bool success = Zlib_Compress(info, compression_opts_, input.data(), + input.size(), output); + if (!success) { + return Status::Corruption(); + } + return Status::OK(); +} + +Status ZlibCompressor::Uncompress(const UncompressionInfo& info, + const char* input, size_t input_length, + char** output, size_t* output_length) { + *output = Zlib_Uncompress(info, input, input_length, output_length); + if (!*output) { + return Status::Corruption(); + } + return Status::OK(); +} +#endif // ZLIB + +ZSTDCompressor::ZSTDCompressor() { + RegisterOptions("level", &compression_opts_, &compressor_level_type_info); + RegisterOptions("checksum", &compression_opts_, + &compressor_checksum_type_info); +} + +bool ZSTDCompressor::MatchesOptions(const CompressionOptions& opts) const { + if (compression_opts_.level != opts.level) { + return false; + } else { + return BuiltinDictCompressor::MatchesOptions(opts); + } +} + +bool ZSTDCompressor::DictCompressionSupported() const { +#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+ + return true; +#else + return false; +#endif +} + +#ifdef ZSTD +thread_local ZSTDContext ZSTDCompressor::zstd_context_; + +Status ZSTDCompressor::Compress(const CompressionInfo& info, const Slice& input, + std::string* output) { + auto length = input.size(); + if (length > std::numeric_limits::max()) { + // Can't compress more than 4GB + return Status::Corruption("ZSTD: Cannot compress more than 4GB"); + } + + size_t output_header_len = compression::PutDecompressedSizeInfo( + output, static_cast(length)); + + size_t compressBound = ZSTD_compressBound(length); + output->resize(static_cast(output_header_len + compressBound)); + size_t outlen = 0; +#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+ + ZSTD_CCtx* context = zstd_context_.GetCompressionContext( + this, compression_opts_.level, compression_opts_.checksum); + assert(context != nullptr); +#ifdef ZSTD_ADVANCED + if (info.dict().GetProcessedDict() != nullptr) { + ZSTD_CCtx_refCDict( + context, reinterpret_cast(info.dict().GetProcessedDict())); + } else { + ZSTD_CCtx_loadDictionary(context, info.dict().GetRawDict().data(), + info.dict().GetRawDict().size()); + } + + // Compression level is set in `contex` during CreateNativeContext() + outlen = ZSTD_compress2(context, &(*output)[output_header_len], compressBound, + input.data(), length); +#else // ZSTD_ADVANCED +#if ZSTD_VERSION_NUMBER >= 700 // v0.7.0+ + if (info.dict().GetProcessedDict() != nullptr) { + outlen = ZSTD_compress_usingCDict( + context, &(*output)[output_header_len], compressBound, input.data(), + length, reinterpret_cast(info.dict().GetProcessedDict())); + } +#endif // ZSTD_VERSION_NUMBER >= 700 + // TODO (cbi): error handling for compression. + if (outlen == 0) { + int level; + if (info.options().level == CompressionOptions::kDefaultCompressionLevel) { + // 3 is the value of ZSTD_CLEVEL_DEFAULT (not exposed publicly), see + // https://github.com/facebook/zstd/issues/1148 + level = 3; + } else { + level = info.options().level; + } + outlen = ZSTD_compress_usingDict(context, &(*output)[output_header_len], + compressBound, input.data(), length, + info.dict().GetRawDict().data(), + info.dict().GetRawDict().size(), level); + } +#endif // ZSTD_ADVANCED +#else // up to v0.4.x + outlen = ZSTD_compress(&(*output)[output_header_len], compressBound, + input.data(), length, level); +#endif // ZSTD_VERSION_NUMBER >= 500 + if (outlen == 0) { + return Status::Corruption(); + } else { + output->resize(output_header_len + outlen); + return Status::OK(); + } +} + +Status ZSTDCompressor::Uncompress(const UncompressionInfo& info, + const char* input, size_t input_length, + char** uncompressed, + size_t* uncompressed_length) { + static const char* const kErrorDecodeOutputSize = + "Cannot decode output size."; + static const char* const kErrorOutputLenMismatch = + "Decompressed size does not match header."; + uint32_t output_len = 0; + if (!compression::GetDecompressedSizeInfo(&input, &input_length, + &output_len)) { + return Status::Corruption(kErrorDecodeOutputSize); + } + + auto output = Allocate(output_len, info.GetMemoryAllocator()); + size_t actual_output_length = 0; +#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+ + ZSTD_DCtx* context = zstd_context_.GetUncompressionContext(this); + assert(context != nullptr); +#ifdef ROCKSDB_ZSTD_DDICT + if (info.dict().GetProcessedDict() != nullptr) { + actual_output_length = ZSTD_decompress_usingDDict( + context, output, output_len, input, input_length, + reinterpret_cast(info.dict().GetProcessedDict())); + } else { +#endif // ROCKSDB_ZSTD_DDICT + actual_output_length = ZSTD_decompress_usingDict( + context, output, output_len, input, input_length, + info.dict().GetRawDict().data(), info.dict().GetRawDict().size()); +#ifdef ROCKSDB_ZSTD_DDICT + } +#endif // ROCKSDB_ZSTD_DDICT +#else // up to v0.4.x + actual_output_length = + ZSTD_decompress(output, output_len, input, input_length); +#endif // ZSTD_VERSION_NUMBER >= 500 + if (ZSTD_isError(actual_output_length)) { + return Status::Corruption(ZSTD_getErrorName(actual_output_length)); + } else if (actual_output_length != output_len) { + return Status::Corruption(kErrorOutputLenMismatch); + } + + *uncompressed_length = actual_output_length; + *uncompressed = output; + return Status::OK(); +} +#endif // ZSTD + +#if ZSTD_VERSION_NUMBER >= 700 +class ZSTDCompressionDict : public ProcessedDict { + private: + ZSTD_CDict* cdict_ = nullptr; + + public: + ZSTDCompressionDict(const std::string& dict, int level) { + if (!dict.empty()) { + if (level == CompressionOptions::kDefaultCompressionLevel) { + // 3 is the value of ZSTD_CLEVEL_DEFAULT (not exposed publicly), see + // https://github.com/facebook/zstd/issues/1148 + level = 3; + } + // Should be safe (but slower) if below call fails as we'll use the + // raw dictionary to compress. + cdict_ = ZSTD_createCDict(dict.data(), dict.size(), level); + assert(cdict_ != nullptr); + } + } + + ~ZSTDCompressionDict() override { + if (cdict_ != nullptr) { + auto res = ZSTD_freeCDict(cdict_); + assert(res == 0); // Last I checked they can't fail + (void)res; // prevent unused var warning + } + } + void* Data() const override { return cdict_; } +}; + +std::unique_ptr ZSTDCompressor::NewCompressionDict( + const std::string& dict) { + std::unique_ptr zstd_cdict( + new ZSTDCompressionDict(dict, compression_opts_.level)); + return std::make_unique(dict, std::move(zstd_cdict)); +} + +class ZSTDUncompressionDict : public ProcessedDict { + private: + ZSTD_DDict* ddict_ = nullptr; + + public: + explicit ZSTDUncompressionDict(const Slice& slice) { + if (!slice.empty()) { +#ifdef ROCKSDB_ZSTD_DDICT + ddict_ = ZSTD_createDDict_byReference(slice.data(), slice.size()); + assert(ddict_ != nullptr); +#else + //**TODO: Should this use ZSTD_CreateDDict? + // ddict_ = ZSTD_createDDict(slice.data(), slice.size()); + // assert(ddict_ != nullptr); +#endif + } + } + + ~ZSTDUncompressionDict() override { + if (ddict_ != nullptr) { + auto res = ZSTD_freeDDict(ddict_); + assert(res == 0); // Last I checked they can't fail + (void)res; // prevent unused var warning + } + } + void* Data() const override { return ddict_; } +#ifdef ROCKSDB_ZSTD_DDICT + size_t Size() const override { return ZSTD_sizeof_DDict(ddict_); } +#endif +}; + +UncompressionDict* ZSTDCompressor::NewUncompressionDict( + const std::string& dict) { + std::unique_ptr processed(new ZSTDUncompressionDict(dict)); + return new UncompressionDict(dict, std::move(processed)); +} + +UncompressionDict* ZSTDCompressor::NewUncompressionDict( + const Slice& slice, CacheAllocationPtr&& allocation) { + std::unique_ptr processed(new ZSTDUncompressionDict(slice)); + return new UncompressionDict(slice, std::move(allocation), + std::move(processed)); +} +#endif // ZSTD_VERSION_NUMBER >= 700 StreamingCompress* StreamingCompress::Create(CompressionType compression_type, const CompressionOptions& opts, diff --git a/util/compression.h b/util/compression.h index 3e21a669b5..d0385ea846 100644 --- a/util/compression.h +++ b/util/compression.h @@ -19,14 +19,17 @@ #endif // OS_FREEBSD #endif // ROCKSDB_MALLOC_USABLE_SIZE #include +#include #include "memory/memory_allocator_impl.h" +#include "rocksdb/convenience.h" #include "rocksdb/options.h" #include "rocksdb/table.h" #include "table/block_based/block_type.h" #include "test_util/sync_point.h" #include "util/coding.h" #include "util/compression_context_cache.h" +#include "util/compressor.h" #include "util/string_util.h" #ifdef SNAPPY @@ -48,6 +51,9 @@ #if defined(ZSTD) #include +#if (ZSTD_VERSION_NUMBER >= 500) +#include +#endif // v1.1.3+ #if ZSTD_VERSION_NUMBER >= 10103 #include @@ -153,6 +159,7 @@ class ZSTDUncompressCachedData { private: void ignore_padding__() { padding = nullptr; } }; + } // namespace ROCKSDB_NAMESPACE #endif @@ -161,349 +168,6 @@ class ZSTDUncompressCachedData { #endif namespace ROCKSDB_NAMESPACE { - -// Holds dictionary and related data, like ZSTD's digested compression -// dictionary. -struct CompressionDict { -#if ZSTD_VERSION_NUMBER >= 700 - ZSTD_CDict* zstd_cdict_ = nullptr; -#endif // ZSTD_VERSION_NUMBER >= 700 - std::string dict_; - - public: -#if ZSTD_VERSION_NUMBER >= 700 - CompressionDict(std::string dict, CompressionType type, int level) { -#else // ZSTD_VERSION_NUMBER >= 700 - CompressionDict(std::string dict, CompressionType /*type*/, int /*level*/) { -#endif // ZSTD_VERSION_NUMBER >= 700 - dict_ = std::move(dict); -#if ZSTD_VERSION_NUMBER >= 700 - zstd_cdict_ = nullptr; - if (!dict_.empty() && (type == kZSTD || type == kZSTDNotFinalCompression)) { - if (level == CompressionOptions::kDefaultCompressionLevel) { - // 3 is the value of ZSTD_CLEVEL_DEFAULT (not exposed publicly), see - // https://github.com/facebook/zstd/issues/1148 - // TODO(cbi): ZSTD_CLEVEL_DEFAULT is exposed after - // https://github.com/facebook/zstd/pull/1174. Use ZSTD_CLEVEL_DEFAULT - // instead of hardcoding 3. - level = 3; - } - // Should be safe (but slower) if below call fails as we'll use the - // raw dictionary to compress. - zstd_cdict_ = ZSTD_createCDict(dict_.data(), dict_.size(), level); - assert(zstd_cdict_ != nullptr); - } -#endif // ZSTD_VERSION_NUMBER >= 700 - } - - ~CompressionDict() { -#if ZSTD_VERSION_NUMBER >= 700 - size_t res = 0; - if (zstd_cdict_ != nullptr) { - res = ZSTD_freeCDict(zstd_cdict_); - } - assert(res == 0); // Last I checked they can't fail - (void)res; // prevent unused var warning -#endif // ZSTD_VERSION_NUMBER >= 700 - } - -#if ZSTD_VERSION_NUMBER >= 700 - const ZSTD_CDict* GetDigestedZstdCDict() const { return zstd_cdict_; } -#endif // ZSTD_VERSION_NUMBER >= 700 - - Slice GetRawDict() const { return dict_; } - - static const CompressionDict& GetEmptyDict() { - static CompressionDict empty_dict{}; - return empty_dict; - } - - CompressionDict() = default; - // Disable copy/move - CompressionDict(const CompressionDict&) = delete; - CompressionDict& operator=(const CompressionDict&) = delete; - CompressionDict(CompressionDict&&) = delete; - CompressionDict& operator=(CompressionDict&&) = delete; -}; - -// Holds dictionary and related data, like ZSTD's digested uncompression -// dictionary. -struct UncompressionDict { - // Block containing the data for the compression dictionary in case the - // constructor that takes a string parameter is used. - std::string dict_; - - // Block containing the data for the compression dictionary in case the - // constructor that takes a Slice parameter is used and the passed in - // CacheAllocationPtr is not nullptr. - CacheAllocationPtr allocation_; - - // Slice pointing to the compression dictionary data. Can point to - // dict_, allocation_, or some other memory location, depending on how - // the object was constructed. - Slice slice_; - -#ifdef ROCKSDB_ZSTD_DDICT - // Processed version of the contents of slice_ for ZSTD compression. - ZSTD_DDict* zstd_ddict_ = nullptr; -#endif // ROCKSDB_ZSTD_DDICT - -#ifdef ROCKSDB_ZSTD_DDICT - UncompressionDict(std::string dict, bool using_zstd) -#else // ROCKSDB_ZSTD_DDICT - UncompressionDict(std::string dict, bool /* using_zstd */) -#endif // ROCKSDB_ZSTD_DDICT - : dict_(std::move(dict)), slice_(dict_) { -#ifdef ROCKSDB_ZSTD_DDICT - if (!slice_.empty() && using_zstd) { - zstd_ddict_ = ZSTD_createDDict_byReference(slice_.data(), slice_.size()); - assert(zstd_ddict_ != nullptr); - } -#endif // ROCKSDB_ZSTD_DDICT - } - -#ifdef ROCKSDB_ZSTD_DDICT - UncompressionDict(Slice slice, CacheAllocationPtr&& allocation, - bool using_zstd) -#else // ROCKSDB_ZSTD_DDICT - UncompressionDict(Slice slice, CacheAllocationPtr&& allocation, - bool /* using_zstd */) -#endif // ROCKSDB_ZSTD_DDICT - : allocation_(std::move(allocation)), slice_(std::move(slice)) { -#ifdef ROCKSDB_ZSTD_DDICT - if (!slice_.empty() && using_zstd) { - zstd_ddict_ = ZSTD_createDDict_byReference(slice_.data(), slice_.size()); - assert(zstd_ddict_ != nullptr); - } -#endif // ROCKSDB_ZSTD_DDICT - } - - UncompressionDict(UncompressionDict&& rhs) - : dict_(std::move(rhs.dict_)), - allocation_(std::move(rhs.allocation_)), - slice_(std::move(rhs.slice_)) -#ifdef ROCKSDB_ZSTD_DDICT - , - zstd_ddict_(rhs.zstd_ddict_) -#endif - { -#ifdef ROCKSDB_ZSTD_DDICT - rhs.zstd_ddict_ = nullptr; -#endif - } - - ~UncompressionDict() { -#ifdef ROCKSDB_ZSTD_DDICT - size_t res = 0; - if (zstd_ddict_ != nullptr) { - res = ZSTD_freeDDict(zstd_ddict_); - } - assert(res == 0); // Last I checked they can't fail - (void)res; // prevent unused var warning -#endif // ROCKSDB_ZSTD_DDICT - } - - UncompressionDict& operator=(UncompressionDict&& rhs) { - if (this == &rhs) { - return *this; - } - - dict_ = std::move(rhs.dict_); - allocation_ = std::move(rhs.allocation_); - slice_ = std::move(rhs.slice_); - -#ifdef ROCKSDB_ZSTD_DDICT - zstd_ddict_ = rhs.zstd_ddict_; - rhs.zstd_ddict_ = nullptr; -#endif - - return *this; - } - - // The object is self-contained if the string constructor is used, or the - // Slice constructor is invoked with a non-null allocation. Otherwise, it - // is the caller's responsibility to ensure that the underlying storage - // outlives this object. - bool own_bytes() const { return !dict_.empty() || allocation_; } - - const Slice& GetRawDict() const { return slice_; } - - // For TypedCacheInterface - const Slice& ContentSlice() const { return slice_; } - static constexpr CacheEntryRole kCacheEntryRole = CacheEntryRole::kOtherBlock; - static constexpr BlockType kBlockType = BlockType::kCompressionDictionary; - -#ifdef ROCKSDB_ZSTD_DDICT - const ZSTD_DDict* GetDigestedZstdDDict() const { return zstd_ddict_; } -#endif // ROCKSDB_ZSTD_DDICT - - static const UncompressionDict& GetEmptyDict() { - static UncompressionDict empty_dict{}; - return empty_dict; - } - - size_t ApproximateMemoryUsage() const { - size_t usage = sizeof(struct UncompressionDict); - usage += dict_.size(); - if (allocation_) { - auto allocator = allocation_.get_deleter().allocator; - if (allocator) { - usage += allocator->UsableSize(allocation_.get(), slice_.size()); - } else { - usage += slice_.size(); - } - } -#ifdef ROCKSDB_ZSTD_DDICT - usage += ZSTD_sizeof_DDict(zstd_ddict_); -#endif // ROCKSDB_ZSTD_DDICT - return usage; - } - - UncompressionDict() = default; - // Disable copy - UncompressionDict(const CompressionDict&) = delete; - UncompressionDict& operator=(const CompressionDict&) = delete; -}; - -class CompressionContext { - private: -#if defined(ZSTD) && (ZSTD_VERSION_NUMBER >= 500) - ZSTD_CCtx* zstd_ctx_ = nullptr; - - ZSTD_CCtx* CreateZSTDContext() { -#ifdef ROCKSDB_ZSTD_CUSTOM_MEM - return ZSTD_createCCtx_advanced(port::GetJeZstdAllocationOverrides()); -#else // ROCKSDB_ZSTD_CUSTOM_MEM - return ZSTD_createCCtx(); -#endif // ROCKSDB_ZSTD_CUSTOM_MEM - } - - void CreateNativeContext(CompressionType type, int level, bool checksum) { - if (type == kZSTD || type == kZSTDNotFinalCompression) { - zstd_ctx_ = CreateZSTDContext(); -#ifdef ZSTD_ADVANCED - if (level == CompressionOptions::kDefaultCompressionLevel) { - // 3 is the value of ZSTD_CLEVEL_DEFAULT (not exposed publicly), see - // https://github.com/facebook/zstd/issues/1148 - level = 3; - } - size_t err = - ZSTD_CCtx_setParameter(zstd_ctx_, ZSTD_c_compressionLevel, level); - if (ZSTD_isError(err)) { - assert(false); - ZSTD_freeCCtx(zstd_ctx_); - zstd_ctx_ = CreateZSTDContext(); - } - if (checksum) { - err = ZSTD_CCtx_setParameter(zstd_ctx_, ZSTD_c_checksumFlag, 1); - if (ZSTD_isError(err)) { - assert(false); - ZSTD_freeCCtx(zstd_ctx_); - zstd_ctx_ = CreateZSTDContext(); - } - } -#else - (void)level; - (void)checksum; -#endif - } - } - void DestroyNativeContext() { - if (zstd_ctx_ != nullptr) { - ZSTD_freeCCtx(zstd_ctx_); - } - } - - public: - // callable inside ZSTD_Compress - ZSTD_CCtx* ZSTDPreallocCtx() const { - assert(zstd_ctx_ != nullptr); - return zstd_ctx_; - } - -#else // ZSTD && (ZSTD_VERSION_NUMBER >= 500) - private: - void CreateNativeContext(CompressionType /* type */, int /* level */, - bool /* checksum */) {} - void DestroyNativeContext() {} -#endif // ZSTD && (ZSTD_VERSION_NUMBER >= 500) - public: - explicit CompressionContext(CompressionType type, - const CompressionOptions& options) { - CreateNativeContext(type, options.level, options.checksum); - } - ~CompressionContext() { DestroyNativeContext(); } - CompressionContext(const CompressionContext&) = delete; - CompressionContext& operator=(const CompressionContext&) = delete; -}; - -class CompressionInfo { - const CompressionOptions& opts_; - const CompressionContext& context_; - const CompressionDict& dict_; - const CompressionType type_; - const uint64_t sample_for_compression_; - - public: - CompressionInfo(const CompressionOptions& _opts, - const CompressionContext& _context, - const CompressionDict& _dict, CompressionType _type, - uint64_t _sample_for_compression) - : opts_(_opts), - context_(_context), - dict_(_dict), - type_(_type), - sample_for_compression_(_sample_for_compression) {} - - const CompressionOptions& options() const { return opts_; } - const CompressionContext& context() const { return context_; } - const CompressionDict& dict() const { return dict_; } - CompressionType type() const { return type_; } - uint64_t SampleForCompression() const { return sample_for_compression_; } -}; - -class UncompressionContext { - private: - CompressionContextCache* ctx_cache_ = nullptr; - ZSTDUncompressCachedData uncomp_cached_data_; - - public: - explicit UncompressionContext(CompressionType type) { - if (type == kZSTD || type == kZSTDNotFinalCompression) { - ctx_cache_ = CompressionContextCache::Instance(); - uncomp_cached_data_ = ctx_cache_->GetCachedZSTDUncompressData(); - } - } - ~UncompressionContext() { - if (uncomp_cached_data_.GetCacheIndex() != -1) { - assert(ctx_cache_ != nullptr); - ctx_cache_->ReturnCachedZSTDUncompressData( - uncomp_cached_data_.GetCacheIndex()); - } - } - UncompressionContext(const UncompressionContext&) = delete; - UncompressionContext& operator=(const UncompressionContext&) = delete; - - ZSTDUncompressCachedData::ZSTDNativeContext GetZSTDContext() const { - return uncomp_cached_data_.Get(); - } -}; - -class UncompressionInfo { - const UncompressionContext& context_; - const UncompressionDict& dict_; - const CompressionType type_; - - public: - UncompressionInfo(const UncompressionContext& _context, - const UncompressionDict& _dict, CompressionType _type) - : context_(_context), dict_(_dict), type_(_type) {} - - const UncompressionContext& context() const { return context_; } - const UncompressionDict& dict() const { return dict_; } - CompressionType type() const { return type_; } -}; - inline bool Snappy_Supported() { #ifdef SNAPPY return true; @@ -582,94 +246,15 @@ inline bool StreamingCompressionTypeSupported( } inline bool CompressionTypeSupported(CompressionType compression_type) { - switch (compression_type) { - case kNoCompression: - return true; - case kSnappyCompression: - return Snappy_Supported(); - case kZlibCompression: - return Zlib_Supported(); - case kBZip2Compression: - return BZip2_Supported(); - case kLZ4Compression: - return LZ4_Supported(); - case kLZ4HCCompression: - return LZ4_Supported(); - case kXpressCompression: - return XPRESS_Supported(); - case kZSTDNotFinalCompression: - return ZSTDNotFinal_Supported(); - case kZSTD: - return ZSTD_Supported(); - default: - assert(false); - return false; - } + return BuiltinCompressor::TypeSupported(compression_type); } inline bool DictCompressionTypeSupported(CompressionType compression_type) { - switch (compression_type) { - case kNoCompression: - return false; - case kSnappyCompression: - return false; - case kZlibCompression: - return Zlib_Supported(); - case kBZip2Compression: - return false; - case kLZ4Compression: - case kLZ4HCCompression: -#if LZ4_VERSION_NUMBER >= 10400 // r124+ - return LZ4_Supported(); -#else - return false; -#endif - case kXpressCompression: - return false; - case kZSTDNotFinalCompression: -#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+ - return ZSTDNotFinal_Supported(); -#else - return false; -#endif - case kZSTD: -#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+ - return ZSTD_Supported(); -#else - return false; -#endif - default: - assert(false); - return false; - } + return BuiltinCompressor::TypeSupportsDict(compression_type); } inline std::string CompressionTypeToString(CompressionType compression_type) { - switch (compression_type) { - case kNoCompression: - return "NoCompression"; - case kSnappyCompression: - return "Snappy"; - case kZlibCompression: - return "Zlib"; - case kBZip2Compression: - return "BZip2"; - case kLZ4Compression: - return "LZ4"; - case kLZ4HCCompression: - return "LZ4HC"; - case kXpressCompression: - return "Xpress"; - case kZSTD: - return "ZSTD"; - case kZSTDNotFinalCompression: - return "ZSTDNotFinal"; - case kDisableCompressionOption: - return "DisableOption"; - default: - assert(false); - return ""; - } + return BuiltinCompressor::TypeToString(compression_type); } inline std::string CompressionOptionsToString( @@ -703,55 +288,68 @@ inline std::string CompressionOptionsToString( return result; } +// TODO: xxx_Compress/Uncompress functions should move to compression.cc + +#ifdef SNAPPY // compress_format_version can have two values: // 1 -- decompressed sizes for BZip2 and Zlib are not included in the compressed // block. Also, decompressed sizes for LZ4 are encoded in platform-dependent // way. // 2 -- Zlib, BZip2 and LZ4 encode decompressed size as Varint32 just before the // start of compressed block. Snappy format is the same as version 1. - inline bool Snappy_Compress(const CompressionInfo& /*info*/, const char* input, size_t length, ::std::string* output) { -#ifdef SNAPPY output->resize(snappy::MaxCompressedLength(length)); size_t outlen; snappy::RawCompress(input, length, &(*output)[0], &outlen); output->resize(outlen); return true; -#else - (void)input; - (void)length; - (void)output; - return false; -#endif } -inline CacheAllocationPtr Snappy_Uncompress( - const char* input, size_t length, size_t* uncompressed_size, - MemoryAllocator* allocator = nullptr) { -#ifdef SNAPPY +inline char* Snappy_Uncompress(const char* input, size_t length, + size_t* uncompressed_size, + MemoryAllocator* allocator = nullptr) { size_t uncompressed_length = 0; if (!snappy::GetUncompressedLength(input, length, &uncompressed_length)) { return nullptr; } - CacheAllocationPtr output = AllocateBlock(uncompressed_length, allocator); + auto output = Allocate(uncompressed_length, allocator); - if (!snappy::RawUncompress(input, length, output.get())) { + if (!snappy::RawUncompress(input, length, output)) { + Deallocate(output, allocator); return nullptr; } *uncompressed_size = uncompressed_length; return output; -#else - (void)input; - (void)length; - (void)uncompressed_size; - (void)allocator; - return nullptr; -#endif } +#endif // SNAPPY + +class SnappyCompressor : public BuiltinCompressor { + public: + SnappyCompressor(); + + static const char* kClassName() { return "Snappy"; } + const char* Name() const override { return kClassName(); } + CompressionType GetCompressionType() const override { + return kSnappyCompression; + } + + static const char* kNickName() { return "kSnappyCompression"; } + const char* NickName() const override { return kNickName(); } + + bool Supported() const override { return Snappy_Supported(); } +#ifdef SNAPPY + Status Compress(const CompressionInfo& info, const Slice& input, + std::string* output) override; + + Status Uncompress(const UncompressionInfo& info, const char* input, + size_t input_length, char** output, + size_t* output_length) override; +#endif +}; namespace compression { // returns size @@ -774,6 +372,7 @@ inline bool GetDecompressedSizeInfo(const char** input_data, } } // namespace compression +#ifdef ZLIB // compress_format_version == 1 -- decompressed size is not included in the // block header // compress_format_version == 2 -- decompressed size is included in the block @@ -781,16 +380,15 @@ inline bool GetDecompressedSizeInfo(const char** input_data, // @param compression_dict Data for presetting the compression library's // dictionary. inline bool Zlib_Compress(const CompressionInfo& info, - uint32_t compress_format_version, const char* input, + const CompressionOptions& opts, const char* input, size_t length, ::std::string* output) { -#ifdef ZLIB if (length > std::numeric_limits::max()) { // Can't compress more than 4GB return false; } size_t output_header_len = 0; - if (compress_format_version == 2) { + if (info.CompressFormatVersion() == 2) { output_header_len = compression::PutDecompressedSizeInfo( output, static_cast(length)); } @@ -802,15 +400,15 @@ inline bool Zlib_Compress(const CompressionInfo& info, // The default value is 8. See zconf.h for more details. static const int memLevel = 8; int level; - if (info.options().level == CompressionOptions::kDefaultCompressionLevel) { + if (opts.level == CompressionOptions::kDefaultCompressionLevel) { level = Z_DEFAULT_COMPRESSION; } else { - level = info.options().level; + level = opts.level; } z_stream _stream; memset(&_stream, 0, sizeof(z_stream)); - int st = deflateInit2(&_stream, level, Z_DEFLATED, info.options().window_bits, - memLevel, info.options().strategy); + int st = deflateInit2(&_stream, level, Z_DEFLATED, opts.window_bits, memLevel, + opts.strategy); if (st != Z_OK) { return false; } @@ -852,14 +450,6 @@ inline bool Zlib_Compress(const CompressionInfo& info, deflateEnd(&_stream); return compressed; -#else - (void)info; - (void)compress_format_version; - (void)input; - (void)length; - (void)output; - return false; -#endif } // compress_format_version == 1 -- decompressed size is not included in the @@ -868,13 +458,11 @@ inline bool Zlib_Compress(const CompressionInfo& info, // header in varint32 format // @param compression_dict Data for presetting the compression library's // dictionary. -inline CacheAllocationPtr Zlib_Uncompress( - const UncompressionInfo& info, const char* input_data, size_t input_length, - size_t* uncompressed_size, uint32_t compress_format_version, - MemoryAllocator* allocator = nullptr, int windowBits = -14) { -#ifdef ZLIB +inline char* Zlib_Uncompress(const UncompressionInfo& info, + const char* input_data, size_t input_length, + size_t* uncompressed_size, int windowBits = -14) { uint32_t output_len = 0; - if (compress_format_version == 2) { + if (info.CompressFormatVersion() == 2) { if (!compression::GetDecompressedSizeInfo(&input_data, &input_length, &output_len)) { return nullptr; @@ -914,9 +502,9 @@ inline CacheAllocationPtr Zlib_Uncompress( _stream.next_in = (Bytef*)input_data; _stream.avail_in = static_cast(input_length); - auto output = AllocateBlock(output_len, allocator); + auto output = Allocate(output_len, info.GetMemoryAllocator()); - _stream.next_out = (Bytef*)output.get(); + _stream.next_out = (Bytef*)output; _stream.avail_out = static_cast(output_len); bool done = false; @@ -930,16 +518,17 @@ inline CacheAllocationPtr Zlib_Uncompress( // No output space. Increase the output space by 20%. // We should never run out of output space if // compress_format_version == 2 - assert(compress_format_version != 2); + assert(info.CompressFormatVersion() != 2); size_t old_sz = output_len; uint32_t output_len_delta = output_len / 5; output_len += output_len_delta < 10 ? 10 : output_len_delta; - auto tmp = AllocateBlock(output_len, allocator); - memcpy(tmp.get(), output.get(), old_sz); - output = std::move(tmp); + auto tmp = Allocate(output_len, info.GetMemoryAllocator()); + memcpy(tmp, output, old_sz); + Deallocate(output, info.GetMemoryAllocator()); + output = tmp; // Set more output. - _stream.next_out = (Bytef*)(output.get() + old_sz); + _stream.next_out = (Bytef*)(output + old_sz); _stream.avail_out = static_cast(output_len - old_sz); break; } @@ -951,23 +540,44 @@ inline CacheAllocationPtr Zlib_Uncompress( } // If we encoded decompressed block size, we should have no bytes left - assert(compress_format_version != 2 || _stream.avail_out == 0); + assert(info.CompressFormatVersion() != 2 || _stream.avail_out == 0); assert(output_len >= _stream.avail_out); *uncompressed_size = output_len - _stream.avail_out; inflateEnd(&_stream); return output; -#else - (void)info; - (void)input_data; - (void)input_length; - (void)uncompressed_size; - (void)compress_format_version; - (void)allocator; - (void)windowBits; - return nullptr; -#endif } +#endif // ZLIB + +class ZlibCompressor : public BuiltinDictCompressor { + public: + ZlibCompressor(); + + static const char* kClassName() { return "Zlib"; } + const char* Name() const override { return kClassName(); } + + static const char* kNickName() { return "kZlibCompression"; } + const char* NickName() const override { return kNickName(); } + + CompressionType GetCompressionType() const override { + return kZlibCompression; + } + bool Supported() const override { return Zlib_Supported(); } + bool DictCompressionSupported() const override { return true; } + +#ifdef ZLIB + Status Compress(const CompressionInfo& info, const Slice& slice, + std::string* output) override; + + Status Uncompress(const UncompressionInfo& info, const char* input, + size_t input_length, char** output, + size_t* output_length) override; +#endif // ZLIB + protected: + bool MatchesOptions(const CompressionOptions& opts) const override; +}; + +#ifdef BZIP2 // compress_format_version == 1 -- decompressed size is not included in the // block header // compress_format_version == 2 -- decompressed size is included in the block @@ -975,7 +585,6 @@ inline CacheAllocationPtr Zlib_Uncompress( inline bool BZip2_Compress(const CompressionInfo& /*info*/, uint32_t compress_format_version, const char* input, size_t length, ::std::string* output) { -#ifdef BZIP2 if (length > std::numeric_limits::max()) { // Can't compress more than 4GB return false; @@ -1020,25 +629,17 @@ inline bool BZip2_Compress(const CompressionInfo& /*info*/, BZ2_bzCompressEnd(&_stream); return compressed; -#else - (void)compress_format_version; - (void)input; - (void)length; - (void)output; - return false; -#endif } // compress_format_version == 1 -- decompressed size is not included in the // block header // compress_format_version == 2 -- decompressed size is included in the block // header in varint32 format -inline CacheAllocationPtr BZip2_Uncompress( - const char* input_data, size_t input_length, size_t* uncompressed_size, - uint32_t compress_format_version, MemoryAllocator* allocator = nullptr) { -#ifdef BZIP2 +inline char* BZip2_Uncompress(const UncompressionInfo& info, + const char* input_data, size_t input_length, + size_t* uncompressed_size) { uint32_t output_len = 0; - if (compress_format_version == 2) { + if (info.CompressFormatVersion() == 2) { if (!compression::GetDecompressedSizeInfo(&input_data, &input_length, &output_len)) { return nullptr; @@ -1063,9 +664,9 @@ inline CacheAllocationPtr BZip2_Uncompress( _stream.next_in = (char*)input_data; _stream.avail_in = static_cast(input_length); - auto output = AllocateBlock(output_len, allocator); + auto output = Allocate(output_len, info.GetMemoryAllocator()); - _stream.next_out = (char*)output.get(); + _stream.next_out = (char*)output; _stream.avail_out = static_cast(output_len); bool done = false; @@ -1079,15 +680,16 @@ inline CacheAllocationPtr BZip2_Uncompress( // No output space. Increase the output space by 20%. // We should never run out of output space if // compress_format_version == 2 - assert(compress_format_version != 2); + assert(info.CompressFormatVersion() != 2); uint32_t old_sz = output_len; output_len = output_len * 1.2; - auto tmp = AllocateBlock(output_len, allocator); - memcpy(tmp.get(), output.get(), old_sz); - output = std::move(tmp); + auto tmp = Allocate(output_len, info.GetMemoryAllocator()); + memcpy(tmp, output, old_sz); + Deallocate(output, info.GetMemoryAllocator()); + output = tmp; // Set more output. - _stream.next_out = (char*)(output.get() + old_sz); + _stream.next_out = (char*)(output + old_sz); _stream.avail_out = static_cast(output_len - old_sz); break; } @@ -1098,38 +700,54 @@ inline CacheAllocationPtr BZip2_Uncompress( } // If we encoded decompressed block size, we should have no bytes left - assert(compress_format_version != 2 || _stream.avail_out == 0); + assert(info.CompressFormatVersion() != 2 || _stream.avail_out == 0); assert(output_len >= _stream.avail_out); *uncompressed_size = output_len - _stream.avail_out; BZ2_bzDecompressEnd(&_stream); return output; -#else - (void)input_data; - (void)input_length; - (void)uncompressed_size; - (void)compress_format_version; - (void)allocator; - return nullptr; -#endif } +#endif // BZIP2 + +class BZip2Compressor : public BuiltinCompressor { + public: + BZip2Compressor(); + + static const char* kClassName() { return "BZip2"; } + const char* Name() const override { return kClassName(); } + + static const char* kNickName() { return "kBZip2Compression"; } + const char* NickName() const override { return kNickName(); } + + CompressionType GetCompressionType() const override { + return kBZip2Compression; + } + bool Supported() const override { return BZip2_Supported(); } +#ifdef BZIP2 + Status Compress(const CompressionInfo& info, const Slice& input, + std::string* output) override; + + Status Uncompress(const UncompressionInfo& info, const char* input, + size_t input_length, char** output, + size_t* output_length) override; +#endif // BZIP2 +}; +#ifdef LZ4 // compress_format_version == 1 -- decompressed size is included in the // block header using memcpy, which makes database non-portable) // compress_format_version == 2 -- decompressed size is included in the block // header in varint32 format // @param compression_dict Data for presetting the compression library's // dictionary. -inline bool LZ4_Compress(const CompressionInfo& info, - uint32_t compress_format_version, const char* input, +inline bool LZ4_Compress(const CompressionInfo& info, const char* input, size_t length, ::std::string* output) { -#ifdef LZ4 if (length > std::numeric_limits::max()) { // Can't compress more than 4GB return false; } size_t output_header_len = 0; - if (compress_format_version == 2) { + if (info.CompressFormatVersion() == 2) { // new encoding, using varint32 to store size information output_header_len = compression::PutDecompressedSizeInfo( output, static_cast(length)); @@ -1172,14 +790,6 @@ inline bool LZ4_Compress(const CompressionInfo& info, } output->resize(static_cast(output_header_len + outlen)); return true; -#else // LZ4 - (void)info; - (void)compress_format_version; - (void)input; - (void)length; - (void)output; - return false; -#endif } // compress_format_version == 1 -- decompressed size is included in the @@ -1188,15 +798,11 @@ inline bool LZ4_Compress(const CompressionInfo& info, // header in varint32 format // @param compression_dict Data for presetting the compression library's // dictionary. -inline CacheAllocationPtr LZ4_Uncompress(const UncompressionInfo& info, - const char* input_data, - size_t input_length, - size_t* uncompressed_size, - uint32_t compress_format_version, - MemoryAllocator* allocator = nullptr) { -#ifdef LZ4 +inline char* LZ4_Uncompress(const UncompressionInfo& info, + const char* input_data, size_t input_length, + size_t* uncompressed_size) { uint32_t output_len = 0; - if (compress_format_version == 2) { + if (info.CompressFormatVersion() == 2) { // new encoding, using varint32 to store size information if (!compression::GetDecompressedSizeInfo(&input_data, &input_length, &output_len)) { @@ -1217,7 +823,7 @@ inline CacheAllocationPtr LZ4_Uncompress(const UncompressionInfo& info, input_data += 8; } - auto output = AllocateBlock(output_len, allocator); + auto output = Allocate(output_len, info.GetMemoryAllocator()); int decompress_bytes = 0; @@ -1229,32 +835,50 @@ inline CacheAllocationPtr LZ4_Uncompress(const UncompressionInfo& info, static_cast(compression_dict.size())); } decompress_bytes = LZ4_decompress_safe_continue( - stream, input_data, output.get(), static_cast(input_length), + stream, input_data, output, static_cast(input_length), static_cast(output_len)); LZ4_freeStreamDecode(stream); #else // up to r123 - decompress_bytes = LZ4_decompress_safe(input_data, output.get(), - static_cast(input_length), - static_cast(output_len)); + decompress_bytes = + LZ4_decompress_safe(input_data, output, static_cast(input_length), + static_cast(output_len)); #endif // LZ4_VERSION_NUMBER >= 10400 if (decompress_bytes < 0) { + Deallocate(output, info.GetMemoryAllocator()); return nullptr; } assert(decompress_bytes == static_cast(output_len)); *uncompressed_size = decompress_bytes; return output; -#else // LZ4 - (void)info; - (void)input_data; - (void)input_length; - (void)uncompressed_size; - (void)compress_format_version; - (void)allocator; - return nullptr; -#endif } +#endif // LZ4 + +class LZ4Compressor : public BuiltinDictCompressor { + public: + static const char* kClassName() { return "LZ4"; } + const char* Name() const override { return kClassName(); } + + static const char* kNickName() { return "kLZ4Compression"; } + const char* NickName() const override { return kNickName(); } + CompressionType GetCompressionType() const override { + return kLZ4Compression; + } + bool Supported() const override { return LZ4_Supported(); } + + bool DictCompressionSupported() const override; +#ifdef LZ4 + Status Compress(const CompressionInfo& info, const Slice& input, + std::string* output) override; + + Status Uncompress(const UncompressionInfo& info, const char* input, + size_t input_length, char** output, + size_t* output_length) override; +#endif // LZ4 +}; + +#ifdef LZ4 // compress_format_version == 1 -- decompressed size is included in the // block header using memcpy, which makes database non-portable) // compress_format_version == 2 -- decompressed size is included in the block @@ -1262,16 +886,15 @@ inline CacheAllocationPtr LZ4_Uncompress(const UncompressionInfo& info, // @param compression_dict Data for presetting the compression library's // dictionary. inline bool LZ4HC_Compress(const CompressionInfo& info, - uint32_t compress_format_version, const char* input, + const CompressionOptions& opts, const char* input, size_t length, ::std::string* output) { -#ifdef LZ4 if (length > std::numeric_limits::max()) { // Can't compress more than 4GB return false; } size_t output_header_len = 0; - if (compress_format_version == 2) { + if (info.CompressFormatVersion() == 2) { // new encoding, using varint32 to store size information output_header_len = compression::PutDecompressedSizeInfo( output, static_cast(length)); @@ -1288,10 +911,10 @@ inline bool LZ4HC_Compress(const CompressionInfo& info, int outlen; int level; - if (info.options().level == CompressionOptions::kDefaultCompressionLevel) { + if (opts.level == CompressionOptions::kDefaultCompressionLevel) { level = 0; // lz4hc.h says any value < 1 will be sanitized to default } else { - level = info.options().level; + level = opts.level; } #if LZ4_VERSION_NUMBER >= 10400 // r124+ LZ4_streamHC_t* stream = LZ4_createStreamHC(); @@ -1331,181 +954,75 @@ inline bool LZ4HC_Compress(const CompressionInfo& info, } output->resize(static_cast(output_header_len + outlen)); return true; -#else // LZ4 - (void)info; - (void)compress_format_version; - (void)input; - (void)length; - (void)output; - return false; -#endif } +#endif // LZ4 + +class LZ4HCCompressor : public BuiltinDictCompressor { + public: + LZ4HCCompressor(); + + static const char* kClassName() { return "LZ4HC"; } + const char* Name() const override { return kClassName(); } + + static const char* kNickName() { return "kLZ4HCCompression"; } + const char* NickName() const override { return kNickName(); } + + CompressionType GetCompressionType() const override { + return kLZ4HCCompression; + } + + bool Supported() const override { return LZ4_Supported(); } + + bool DictCompressionSupported() const override; + +#ifdef LZ4 + Status Compress(const CompressionInfo& info, const Slice& slice, + std::string* output) override; + + Status Uncompress(const UncompressionInfo& info, const char* input, + size_t input_length, char** output, + size_t* output_length) override; +#endif // LZ4 + protected: + bool MatchesOptions(const CompressionOptions& opts) const override; +}; #ifdef XPRESS inline bool XPRESS_Compress(const char* input, size_t length, std::string* output) { return port::xpress::Compress(input, length, output); } -#else -inline bool XPRESS_Compress(const char* /*input*/, size_t /*length*/, - std::string* /*output*/) { - return false; -} -#endif -#ifdef XPRESS inline char* XPRESS_Uncompress(const char* input_data, size_t input_length, size_t* uncompressed_size) { return port::xpress::Decompress(input_data, input_length, uncompressed_size); } -#else -inline char* XPRESS_Uncompress(const char* /*input_data*/, - size_t /*input_length*/, - size_t* /*uncompressed_size*/) { - return nullptr; -} -#endif +#endif // XPRESS -inline bool ZSTD_Compress(const CompressionInfo& info, const char* input, - size_t length, ::std::string* output) { -#ifdef ZSTD - if (length > std::numeric_limits::max()) { - // Can't compress more than 4GB - return false; - } +class XpressCompressor : public BuiltinCompressor { + public: + XpressCompressor(); - size_t output_header_len = compression::PutDecompressedSizeInfo( - output, static_cast(length)); + static const char* kClassName() { return "Xpress"; } + const char* Name() const override { return kClassName(); } - size_t compressBound = ZSTD_compressBound(length); - output->resize(static_cast(output_header_len + compressBound)); - size_t outlen = 0; -#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+ - ZSTD_CCtx* context = info.context().ZSTDPreallocCtx(); - assert(context != nullptr); -#ifdef ZSTD_ADVANCED - if (info.dict().GetDigestedZstdCDict() != nullptr) { - ZSTD_CCtx_refCDict(context, info.dict().GetDigestedZstdCDict()); - } else { - ZSTD_CCtx_loadDictionary(context, info.dict().GetRawDict().data(), - info.dict().GetRawDict().size()); - } + static const char* kNickName() { return "kXpressCompression"; } + const char* NickName() const override { return kNickName(); } - // Compression level is set in `contex` during CreateNativeContext() - outlen = ZSTD_compress2(context, &(*output)[output_header_len], compressBound, - input, length); -#else // ZSTD_ADVANCED -#if ZSTD_VERSION_NUMBER >= 700 // v0.7.0+ - if (info.dict().GetDigestedZstdCDict() != nullptr) { - outlen = ZSTD_compress_usingCDict(context, &(*output)[output_header_len], - compressBound, input, length, - info.dict().GetDigestedZstdCDict()); + CompressionType GetCompressionType() const override { + return kXpressCompression; } -#endif // ZSTD_VERSION_NUMBER >= 700 - // TODO (cbi): error handling for compression. - if (outlen == 0) { - int level; - if (info.options().level == CompressionOptions::kDefaultCompressionLevel) { - // 3 is the value of ZSTD_CLEVEL_DEFAULT (not exposed publicly), see - // https://github.com/facebook/zstd/issues/1148 - level = 3; - } else { - level = info.options().level; - } - outlen = ZSTD_compress_usingDict(context, &(*output)[output_header_len], - compressBound, input, length, - info.dict().GetRawDict().data(), - info.dict().GetRawDict().size(), level); - } -#endif // ZSTD_ADVANCED -#else // up to v0.4.x - outlen = ZSTD_compress(&(*output)[output_header_len], compressBound, input, - length, level); -#endif // ZSTD_VERSION_NUMBER >= 500 - if (outlen == 0) { - return false; - } - output->resize(output_header_len + outlen); - return true; -#else // ZSTD - (void)info; - (void)input; - (void)length; - (void)output; - return false; -#endif -} + bool Supported() const override { return XPRESS_Supported(); } -// @param compression_dict Data for presetting the compression library's -// dictionary. -// @param error_message If not null, will be set if decompression fails. -// -// Returns nullptr if decompression fails. -inline CacheAllocationPtr ZSTD_Uncompress( - const UncompressionInfo& info, const char* input_data, size_t input_length, - size_t* uncompressed_size, MemoryAllocator* allocator = nullptr, - const char** error_message = nullptr) { -#ifdef ZSTD - static const char* const kErrorDecodeOutputSize = - "Cannot decode output size."; - static const char* const kErrorOutputLenMismatch = - "Decompressed size does not match header."; - uint32_t output_len = 0; - if (!compression::GetDecompressedSizeInfo(&input_data, &input_length, - &output_len)) { - if (error_message) { - *error_message = kErrorDecodeOutputSize; - } - return nullptr; - } - - CacheAllocationPtr output = AllocateBlock(output_len, allocator); - size_t actual_output_length = 0; -#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+ - ZSTD_DCtx* context = info.context().GetZSTDContext(); - assert(context != nullptr); -#ifdef ROCKSDB_ZSTD_DDICT - if (info.dict().GetDigestedZstdDDict() != nullptr) { - actual_output_length = ZSTD_decompress_usingDDict( - context, output.get(), output_len, input_data, input_length, - info.dict().GetDigestedZstdDDict()); - } else { -#endif // ROCKSDB_ZSTD_DDICT - actual_output_length = ZSTD_decompress_usingDict( - context, output.get(), output_len, input_data, input_length, - info.dict().GetRawDict().data(), info.dict().GetRawDict().size()); -#ifdef ROCKSDB_ZSTD_DDICT - } -#endif // ROCKSDB_ZSTD_DDICT -#else // up to v0.4.x - (void)info; - actual_output_length = - ZSTD_decompress(output.get(), output_len, input_data, input_length); -#endif // ZSTD_VERSION_NUMBER >= 500 - if (ZSTD_isError(actual_output_length)) { - if (error_message) { - *error_message = ZSTD_getErrorName(actual_output_length); - } - return nullptr; - } else if (actual_output_length != output_len) { - if (error_message) { - *error_message = kErrorOutputLenMismatch; - } - return nullptr; - } +#ifdef XPRESS + Status Compress(const CompressionInfo& info, const Slice& input, + std::string* output) override; - *uncompressed_size = actual_output_length; - return output; -#else // ZSTD - (void)info; - (void)input_data; - (void)input_length; - (void)uncompressed_size; - (void)allocator; - (void)error_message; - return nullptr; -#endif -} + Status Uncompress(const UncompressionInfo& info, const char* input, + size_t input_length, char** output, + size_t* output_length) override; +#endif // XPRESS +}; inline bool ZSTD_TrainDictionarySupported() { #ifdef ZSTD @@ -1614,85 +1131,159 @@ inline std::string ZSTD_FinalizeDictionary( #endif // ZSTD_VERSION_NUMBER >= 10405 } -inline bool CompressData(const Slice& raw, - const CompressionInfo& compression_info, - uint32_t compress_format_version, - std::string* compressed_output) { - bool ret = false; - - // Will return compressed block contents if (1) the compression method is - // supported in this platform and (2) the compression rate is "good enough". - switch (compression_info.type()) { - case kSnappyCompression: - ret = Snappy_Compress(compression_info, raw.data(), raw.size(), - compressed_output); - break; - case kZlibCompression: - ret = Zlib_Compress(compression_info, compress_format_version, raw.data(), - raw.size(), compressed_output); - break; - case kBZip2Compression: - ret = BZip2_Compress(compression_info, compress_format_version, - raw.data(), raw.size(), compressed_output); - break; - case kLZ4Compression: - ret = LZ4_Compress(compression_info, compress_format_version, raw.data(), - raw.size(), compressed_output); - break; - case kLZ4HCCompression: - ret = LZ4HC_Compress(compression_info, compress_format_version, - raw.data(), raw.size(), compressed_output); - break; - case kXpressCompression: - ret = XPRESS_Compress(raw.data(), raw.size(), compressed_output); - break; - case kZSTD: - case kZSTDNotFinalCompression: - ret = ZSTD_Compress(compression_info, raw.data(), raw.size(), - compressed_output); - break; - default: - // Do not recognize this compression type - break; +#if defined(ZSTD) && (ZSTD_VERSION_NUMBER >= 500) +class ZSTDCompressor; + +class ZSTDContext { + public: + ZSTDContext() { ctx_cache_ = CompressionContextCache::Instance(); } + + ~ZSTDContext() { + for (auto& uncomp_cached_data : uncomp_cached_data_) { + if (uncomp_cached_data.second.GetCacheIndex() != -1) { + assert(ctx_cache_ != nullptr); + ctx_cache_->ReturnCachedZSTDUncompressData( + uncomp_cached_data.second.GetCacheIndex()); + } + } + + for (auto& cctx : cctx_) { + ZSTD_freeCCtx(cctx.second); + } } - TEST_SYNC_POINT_CALLBACK("CompressData:TamperWithReturnValue", - static_cast(&ret)); + ZSTD_DCtx* GetUncompressionContext(ZSTDCompressor* compressor) { + auto iter = uncomp_cached_data_.find(compressor); + if (iter == uncomp_cached_data_.end()) { + uncomp_cached_data_[compressor] = + ctx_cache_->GetCachedZSTDUncompressData(); + } + return uncomp_cached_data_[compressor].Get(); + } - return ret; -} + ZSTD_CCtx* GetCompressionContext(ZSTDCompressor* compressor, int level, + bool checksum) { + auto cctx = cctx_.find(compressor); + if (cctx != cctx_.end()) { + return cctx->second; + } else { + return CreateNativeContext(compressor, level, checksum); + } + } -inline CacheAllocationPtr UncompressData( - const UncompressionInfo& uncompression_info, const char* data, size_t n, - size_t* uncompressed_size, uint32_t compress_format_version, - MemoryAllocator* allocator = nullptr, - const char** error_message = nullptr) { - switch (uncompression_info.type()) { - case kSnappyCompression: - return Snappy_Uncompress(data, n, uncompressed_size, allocator); - case kZlibCompression: - return Zlib_Uncompress(uncompression_info, data, n, uncompressed_size, - compress_format_version, allocator); - case kBZip2Compression: - return BZip2_Uncompress(data, n, uncompressed_size, - compress_format_version, allocator); - case kLZ4Compression: - case kLZ4HCCompression: - return LZ4_Uncompress(uncompression_info, data, n, uncompressed_size, - compress_format_version, allocator); - case kXpressCompression: - // XPRESS allocates memory internally, thus no support for custom - // allocator. - return CacheAllocationPtr(XPRESS_Uncompress(data, n, uncompressed_size)); - case kZSTD: - case kZSTDNotFinalCompression: - // TODO(cbi): error message handling for other compression algorithms. - return ZSTD_Uncompress(uncompression_info, data, n, uncompressed_size, - allocator, error_message); - default: - return CacheAllocationPtr(); + private: + ZSTD_CCtx* CreateZSTDContext() { +#ifdef ROCKSDB_ZSTD_CUSTOM_MEM + return ZSTD_createCCtx_advanced(port::GetJeZstdAllocationOverrides()); +#else // ROCKSDB_ZSTD_CUSTOM_MEM + return ZSTD_createCCtx(); +#endif // ROCKSDB_ZSTD_CUSTOM_MEM } -} + + ZSTD_CCtx* CreateNativeContext(ZSTDCompressor* compressor, int level, + bool checksum) { + ZSTD_CCtx* cctx = CreateZSTDContext(); +#ifdef ZSTD_ADVANCED + if (level == CompressionOptions::kDefaultCompressionLevel) { + // 3 is the value of ZSTD_CLEVEL_DEFAULT (not exposed publicly), see + // https://github.com/facebook/zstd/issues/1148 + level = 3; + } + size_t err = ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, level); + if (ZSTD_isError(err)) { + assert(false); + ZSTD_freeCCtx(cctx); + cctx = CreateZSTDContext(); + } + if (checksum) { + err = ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1); + if (ZSTD_isError(err)) { + assert(false); + ZSTD_freeCCtx(cctx); + cctx = CreateZSTDContext(); + } + } +#else + (void)level; + (void)checksum; +#endif + cctx_[compressor] = cctx; + return cctx; + } + + CompressionContextCache* ctx_cache_ = nullptr; + std::unordered_map + uncomp_cached_data_; + std::unordered_map cctx_; +}; +#endif // defined(ZSTD) && (ZSTD_VERSION_NUMBER >= 500) + +class ZSTDCompressor : public BuiltinDictCompressor { + public: + ZSTDCompressor(); + + static const char* kClassName() { return "ZSTD"; } + const char* Name() const override { return kClassName(); } + + static const char* kNickName() { return "kZSTD"; } + const char* NickName() const override { return kNickName(); } + + CompressionType GetCompressionType() const override { return kZSTD; } + bool Supported() const override { return ZSTD_Supported(); } + + bool DictCompressionSupported() const override; + +#ifdef ZSTD + Status Compress(const CompressionInfo& info, const Slice& input, + std::string* output) override; + + Status Uncompress(const UncompressionInfo& info, const char* input, + size_t input_length, char** output, + size_t* output_length) override; + +#if ZSTD_VERSION_NUMBER >= 700 + std::unique_ptr NewCompressionDict( + const std::string& dict) override; + UncompressionDict* NewUncompressionDict(const std::string& dict) override; + UncompressionDict* NewUncompressionDict( + const Slice& slice, CacheAllocationPtr&& allocation) override; +#endif // ZSTD_VERSION_NUMBER >= 700 + +#endif // ZSTD + protected: + bool MatchesOptions(const CompressionOptions& opts) const override; +#if defined(ZSTD) && (ZSTD_VERSION_NUMBER >= 500) + static thread_local ZSTDContext zstd_context_; +#endif // defined(ZSTD) && (ZSTD_VERSION_NUMBER >= 500) +}; + +class ZSTDNotFinalCompressor : public ZSTDCompressor { + public: + static const char* kClassName() { return "ZSTDNotFinal"; } + const char* Name() const override { return kClassName(); } + + static const char* kNickName() { return "kZSTDNotFinalCompression"; } + const char* NickName() const override { return kNickName(); } + + CompressionType GetCompressionType() const override { + return kZSTDNotFinalCompression; + } + bool Supported() const override { return ZSTDNotFinal_Supported(); } +}; + +class NoCompressor : public BuiltinCompressor { + public: + static const char* kClassName() { return "NoCompression"; } + const char* Name() const override { return kClassName(); } + + static const char* kNickName() { return "kNoCompression"; } + const char* NickName() const override { return kNickName(); } + + CompressionType GetCompressionType() const override { return kNoCompression; } + bool Supported() const override { return true; } + + bool DictCompressionSupported() const override { return false; } +}; // Records the compression type for subsequent WAL records. class CompressionTypeRecord { diff --git a/util/compression_test.cc b/util/compression_test.cc new file mode 100644 index 0000000000..5cb0c0e3cc --- /dev/null +++ b/util/compression_test.cc @@ -0,0 +1,200 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// + +#include "util/compression.h" + +#include "port/stack_trace.h" +#include "rocksdb/configurable.h" +#include "rocksdb/convenience.h" +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "rocksdb/utilities/options_type.h" +#include "rocksdb/utilities/options_util.h" +#include "test_util/testharness.h" +#include "util/compressor.h" + +namespace ROCKSDB_NAMESPACE { + +TEST(Compression, CreateFromString) { + ConfigOptions config_options; + config_options.ignore_unsupported_options = false; + config_options.ignore_unknown_options = false; + + for (auto type : {kSnappyCompression, kZlibCompression, kBZip2Compression, + kLZ4Compression, kLZ4HCCompression, kXpressCompression, + kZSTD, kZSTDNotFinalCompression, kNoCompression}) { + std::shared_ptr base, copy; + std::string name, nickname; + ASSERT_TRUE(BuiltinCompressor::TypeToString(type, true, &name)); + ASSERT_OK(Compressor::CreateFromString(config_options, name, &base)); + // Was compressor created? + ASSERT_NE(base, nullptr); + ASSERT_EQ(base->GetCompressionType(), type); + ASSERT_TRUE(base->IsInstanceOf(name)); + if (BuiltinCompressor::TypeToString(type, false, &nickname)) { + ASSERT_OK(Compressor::CreateFromString(config_options, nickname, ©)); + ASSERT_NE(copy, nullptr); + ASSERT_EQ(base.get(), copy.get()); + } + std::string value = base->ToString(config_options); + ASSERT_OK(Compressor::CreateFromString(config_options, value, ©)); + ASSERT_NE(copy, nullptr); + ASSERT_EQ(base.get(), copy.get()); + } +} + +TEST(Compression, TestBuiltinCompressors) { + std::string mismatch; + ConfigOptions config_options; + config_options.ignore_unsupported_options = false; + config_options.ignore_unknown_options = false; + CompressionOptions compression_opts{1, 2, 3, 4, 5, 6, false, 7, false, 8}; + + for (auto type : {kSnappyCompression, kZlibCompression, kBZip2Compression, + kLZ4Compression, kLZ4HCCompression, kXpressCompression, + kZSTD, kZSTDNotFinalCompression}) { + std::shared_ptr copy; + auto compressor1 = BuiltinCompressor::GetCompressor(type); + ASSERT_NE(compressor1, nullptr); + ASSERT_EQ(compressor1->GetCompressionType(), type); + auto compressor2 = BuiltinCompressor::GetCompressor(type, compression_opts); + ASSERT_NE(compressor2, nullptr); + ASSERT_EQ(compressor2->GetCompressionType(), type); + ASSERT_EQ(compressor2->GetCompressionType(), + compressor1->GetCompressionType()); + ASSERT_NE(compressor1.get(), compressor2.get()); + ASSERT_FALSE(compressor1->AreEquivalent(config_options, compressor2.get(), + &mismatch)); + std::string value = compressor1->ToString(config_options); + ASSERT_OK(Compressor::CreateFromString(config_options, value, ©)); + ASSERT_EQ(compressor1.get(), copy.get()); + + value = compressor2->ToString(config_options); + ASSERT_OK(Compressor::CreateFromString(config_options, value, ©)); + ASSERT_EQ(compressor2.get(), copy.get()); + } +} + +TEST(Compression, GetSupportedCompressions) { + std::vector types = GetSupportedCompressions(); + std::vector names = Compressor::GetSupported(); + for (const auto& n : names) { + CompressionType type; + if (BuiltinCompressor::StringToType(n, &type)) { + bool found = false; + for (auto& t : types) { + if (t == type) { + found = true; + break; + } + } + ASSERT_TRUE(found) << "Missing Compression Type: " << n; + } + } +} + +static void WriteDBAndFlush(DB* db, int num_keys, const std::string& val) { + WriteOptions wo; + for (int i = 0; i < num_keys; i++) { + std::string key = std::to_string(i); + Status s = db->Put(wo, Slice(key), Slice(val)); + ASSERT_OK(s); + } + // Flush all data from memtable so that an SST file is written + ASSERT_OK(db->Flush(FlushOptions())); +} + +static void CloseDB(DB* db) { + Status s = db->Close(); + ASSERT_OK(s); + delete db; +} + +TEST(Compression, DBWithZlibAndCompressionOptions) { + if (!BuiltinCompressor::TypeSupported(kZlibCompression)) { + ROCKSDB_GTEST_BYPASS("Test requires ZLIB compression"); + return; + } + + Options options; + std::string dbname = test::PerThreadDBPath("compression_test"); + ASSERT_OK(DestroyDB(dbname, options)); + + // Select Zlib through options.compression and options.compression_opts + options.create_if_missing = true; + options.compression = kZlibCompression; + options.compression_opts.window_bits = -13; + + // Open database + DB* db = nullptr; + Status s = DB::Open(options, dbname, &db); + ASSERT_OK(s); + ASSERT_NE(db, nullptr); + + // Write 200 values, each 20 bytes + WriteDBAndFlush(db, 200, "aaaaaaaaaabbbbbbbbbb"); + + // Verify table properties + TablePropertiesCollection all_tables_props; + s = db->GetPropertiesOfAllTables(&all_tables_props); + ASSERT_OK(s); + for (auto it = all_tables_props.begin(); it != all_tables_props.end(); ++it) { + ASSERT_EQ(it->second->compression_name, + BuiltinCompressor::TypeToString(kZlibCompression)); + } + // Verify options file + DBOptions db_options; + std::vector cf_descs; + ConfigOptions config_options; + s = LoadLatestOptions(config_options, db->GetName(), &db_options, &cf_descs); + ASSERT_OK(s); + ASSERT_EQ(cf_descs[0].options.compression, kZlibCompression); + ASSERT_EQ(cf_descs[0].options.compression_opts.window_bits, -13); + CloseDB(db); + ASSERT_OK(DestroyDB(dbname, options)); +} + +TEST(Compression, DBWithCompressionPerLevel) { + if (!BuiltinCompressor::TypeSupported(kSnappyCompression)) { + ROCKSDB_GTEST_BYPASS("Test requires Snappy compression"); + return; + } + + Options options; + std::string dbname = test::PerThreadDBPath("compression_test"); + ASSERT_OK(DestroyDB(dbname, options)); + + options.create_if_missing = true; + options.compression_per_level.push_back(kNoCompression); + options.compression_per_level.push_back(kSnappyCompression); + + DB* db = nullptr; + Status s = DB::Open(options, dbname, &db); + ASSERT_OK(s); + ASSERT_NE(db, nullptr); + + CloseDB(db); + + // Test an invalid selection for compression_per_level + options.compression_per_level.push_back(static_cast(254)); + s = DB::Open(options, dbname, &db); + ASSERT_NOK(s); + ASSERT_EQ(s.ToString(), "Invalid argument: Compression type is invalid."); + + ASSERT_OK(DestroyDB(dbname, options)); +} + +} // namespace ROCKSDB_NAMESPACE + +int main(int argc, char** argv) { + ROCKSDB_NAMESPACE::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/compressor.cc b/util/compressor.cc new file mode 100644 index 0000000000..306763e558 --- /dev/null +++ b/util/compressor.cc @@ -0,0 +1,337 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +#include "options/cf_options.h" +#include "rocksdb/utilities/customizable_util.h" +#include "util/compression.h" +#include "util/random.h" +#include "util/string_util.h" + +namespace ROCKSDB_NAMESPACE { +UncompressionDict::UncompressionDict(const std::string& dict, + std::unique_ptr&& processed) + : dict_(std::move(dict)), slice_(dict_), processed_(std::move(processed)) {} + +UncompressionDict::UncompressionDict(const Slice& slice, + CacheAllocationPtr&& allocation, + std::unique_ptr&& processed) + : allocation_(std::move(allocation)), + slice_(std::move(slice)), + processed_(std::move(processed)) {} + +UncompressionDict::UncompressionDict(UncompressionDict&& rhs) noexcept + : dict_(std::move(rhs.dict_)), + allocation_(std::move(rhs.allocation_)), + slice_(std::move(rhs.slice_)), + processed_(std::move(rhs.processed_)) {} + +UncompressionDict& UncompressionDict::operator=(UncompressionDict&& rhs) { + if (this == &rhs) { + return *this; + } + + dict_ = std::move(rhs.dict_); + allocation_ = std::move(rhs.allocation_); + slice_ = std::move(rhs.slice_); + processed_ = std::move(rhs.processed_); + return *this; +} + +size_t UncompressionDict::ApproximateMemoryUsage() const { + size_t usage = sizeof(UncompressionDict); + usage += dict_.size(); + if (allocation_) { + auto allocator = allocation_.get_deleter().allocator; + if (allocator) { + usage += allocator->UsableSize(allocation_.get(), slice_.size()); + } else { + usage += slice_.size(); + } + } + if (processed_) { + usage += processed_->Size(); + } + return usage; +} + +// Map built-in Compressor names to constants in CompressionType +static std::unordered_map builtin_compressors{ + {NoCompressor::kClassName(), kNoCompression}, + {SnappyCompressor::kClassName(), kSnappyCompression}, + {ZlibCompressor::kClassName(), kZlibCompression}, + {BZip2Compressor::kClassName(), kBZip2Compression}, + {LZ4Compressor::kClassName(), kLZ4Compression}, + {LZ4HCCompressor::kClassName(), kLZ4HCCompression}, + {XpressCompressor::kClassName(), kXpressCompression}, + {ZSTDCompressor::kClassName(), kZSTD}, + {ZSTDNotFinalCompressor::kClassName(), kZSTDNotFinalCompression}}; + +std::mutex Compressor::mutex_; +std::unordered_map>> + Compressor::compressors_; + +template +bool CreateIfMatches(const std::string& id, std::shared_ptr* c) { + if (id == T::kClassName() || id == T::kNickName()) { + c->reset(new T()); + return true; + } else { + return false; + } +} + +static Status NewCompressor(const ConfigOptions& /*config_options*/, + const std::string& id, + std::shared_ptr* result) { + if (CreateIfMatches(id, result) || + CreateIfMatches(id, result) || + CreateIfMatches(id, result) || + CreateIfMatches(id, result) || + CreateIfMatches(id, result) || + CreateIfMatches(id, result) || + CreateIfMatches(id, result) || + CreateIfMatches(id, result) || + CreateIfMatches(id, result)) { + return Status::OK(); + } else { + return Status::NotSupported("Cannot find compressor ", id); + } +} + +Status Compressor::CreateFromString(const ConfigOptions& config_options, + const std::string& value, + std::shared_ptr* result) { + std::string id; + OptionProperties props; + Status status = Customizable::GetOptionsMap(config_options, result->get(), + value, &id, &props); + if (!status.ok()) { // GetOptionsMap failed + return status; + } else if (value.empty() || value == kNullptrString) { + result->reset(); + return Status::OK(); + } else if (id.empty()) { + return Status::NotSupported("Cannot reset object ", value); + } else { + // For the builtins, always try to create based on the class name, + // not the nickname + CompressionType type; + if (BuiltinCompressor::StringToType(id, &type)) { + id = BuiltinCompressor::TypeToString(type); + } + std::unique_lock lock(mutex_); + auto vit = compressors_.find(id); + std::shared_ptr compressor; + // There are three case. + // 1 - When there are no existing compressors of this type + // In this case, we create a new compressor and add it to the collection + // 2 - When the compressor has no options + // In this case, we return the first valid compressor, creating/adding + // a new one if none found + // 3 - When the compressor has options + // In this case, we create a new compressor and see if it matches + // and existing one. If so, we return the existing one. + // If not, we create a new one and add it to the lsit + if (!props.empty() || vit == compressors_.end() || vit->second.empty()) { + // Either there are none in the list or there are options. Create one + status = NewCompressor(config_options, id, &compressor); + if (status.ok()) { + status = Customizable::ConfigureNewObject(config_options, + compressor.get(), props); + } + } + if (vit != compressors_.end() && !vit->second.empty()) { + if (props.empty()) { + // Case 2: There are no options + for (const auto& cit : vit->second) { + auto other = cit.lock(); + if (other) { + // Found a valid one. Return it + *result = other; + return Status::OK(); + } + } + // Looped through all of them and did not find any. Create one + status = NewCompressor(config_options, id, &compressor); + } else if (status.ok()) { + // Case 3: Compressor has options + for (const auto& cit : vit->second) { + std::string mismatch; + auto other = cit.lock(); + if (other && other->AreEquivalent(config_options, compressor.get(), + &mismatch)) { + // Found a matching one. Return it + *result = other; + return Status::OK(); + } + } + } + } + if (status.ok()) { + compressors_[id].push_back(compressor); + *result = compressor; + } + } + return status; +} + +std::shared_ptr BuiltinCompressor::GetCompressor( + CompressionType type) { + ConfigOptions config_options; + config_options.ignore_unknown_options = false; + config_options.ignore_unsupported_options = false; + config_options.invoke_prepare_options = false; + std::string id; + // Simple case of looking for any compressor that matches the type + // Convert the type to an ID and then go through the create w/o options case + if (BuiltinCompressor::TypeToString(type, true, &id)) { + std::shared_ptr result; + Status s = Compressor::CreateFromString(config_options, id, &result); + if (s.ok()) { + return result; + } + } + return nullptr; +} + +std::shared_ptr BuiltinCompressor::GetCompressor( + CompressionType type, const CompressionOptions& opts) { + std::string id; + if (BuiltinCompressor::TypeToString(type, true, &id)) { + // Looking for any compressor with specific options. + // Find the ones of the proper type and compare their options to the + // requested. If they match, return the existing one. If not, create a new + // one + std::unique_lock lock(mutex_); + auto vit = compressors_.find(id); + if (vit != compressors_.end()) { + for (const auto& cit : vit->second) { + std::shared_ptr c = cit.lock(); + if (c) { + auto bc = c->CheckedCast(); + if (bc != nullptr && bc->MatchesOptions(opts)) { + return c; + } + } + } + } + // We did not find an appropriate compressor in the list. Create a new one + std::shared_ptr builtin; + ConfigOptions config_options; + config_options.ignore_unknown_options = false; + config_options.ignore_unsupported_options = false; + config_options.invoke_prepare_options = false; + Status s = NewCompressor(config_options, id, &builtin); + if (s.ok()) { + auto bc_opts = builtin->GetOptions(); + assert(bc_opts != nullptr); + if (bc_opts != nullptr) { + *bc_opts = opts; + } + compressors_[id].push_back(builtin); + return builtin; + } + } + return nullptr; +} + +void Compressor::SampleDict(std::vector& data_block_buffers, + std::string* compression_dict_samples, + std::vector* compression_dict_sample_lens) { + uint32_t max_dict_bytes = GetMaxDictBytes(); + uint32_t max_train_bytes = GetMaxTrainBytes(); + + const size_t kSampleBytes = + max_train_bytes > 0 ? max_train_bytes : max_dict_bytes; + const size_t kNumBlocksBuffered = data_block_buffers.size(); + + // Abstract algebra teaches us that a finite cyclic group (such as the + // additive group of integers modulo N) can be generated by a number that is + // coprime with N. Since N is variable (number of buffered data blocks), we + // must then pick a prime number in order to guarantee coprimeness with any N. + // + // One downside of this approach is the spread will be poor when + // `kPrimeGeneratorRemainder` is close to zero or close to + // `kNumBlocksBuffered`. + // + // Picked a random number between one and one trillion and then chose the + // next prime number greater than or equal to it. + const uint64_t kPrimeGenerator = 545055921143ull; + // Can avoid repeated division by just adding the remainder repeatedly. + const size_t kPrimeGeneratorRemainder = static_cast( + kPrimeGenerator % static_cast(kNumBlocksBuffered)); + const size_t kInitSampleIdx = kNumBlocksBuffered / 2; + + size_t buffer_idx = kInitSampleIdx; + for (size_t i = 0; i < kNumBlocksBuffered && + compression_dict_samples->size() < kSampleBytes; + ++i) { + size_t copy_len = std::min(kSampleBytes - compression_dict_samples->size(), + data_block_buffers[buffer_idx].size()); + compression_dict_samples->append(data_block_buffers[buffer_idx], 0, + copy_len); + compression_dict_sample_lens->emplace_back(copy_len); + + buffer_idx += kPrimeGeneratorRemainder; + if (buffer_idx >= kNumBlocksBuffered) { + buffer_idx -= kNumBlocksBuffered; + } + } +} + +std::string Compressor::TrainDict( + const std::string& compression_dict_samples, + const std::vector& compression_dict_sample_lens) { + uint32_t max_dict_bytes = GetMaxDictBytes(); + uint32_t max_train_bytes = GetMaxTrainBytes(); + int level = GetLevel(); + bool use_dict_trainer = UseDictTrainer(); + + // final data block flushed, now we can generate dictionary from the samples. + // OK if compression_dict_samples is empty, we'll just get empty dictionary. + if (max_train_bytes > 0) { + if (use_dict_trainer && ZSTD_TrainDictionarySupported()) { + return ZSTD_TrainDictionary(compression_dict_samples, + compression_dict_sample_lens, max_dict_bytes); + } else if (ZSTD_FinalizeDictionarySupported()) { + return ZSTD_FinalizeDictionary(compression_dict_samples, + compression_dict_sample_lens, + max_dict_bytes, level); + } else { + return compression_dict_samples; + } + } else { + return compression_dict_samples; + } +} + +Status Compressor::CreateDict( + std::vector& data_block_buffers, + std::unique_ptr* compression_dict) { + if (!DictCompressionSupported()) { + return Status::NotSupported(); + } + + std::string compression_dict_samples; + std::vector compression_dict_sample_lens; + SampleDict(data_block_buffers, &compression_dict_samples, + &compression_dict_sample_lens); + + std::string dict = + TrainDict(compression_dict_samples, compression_dict_sample_lens); + + *compression_dict = NewCompressionDict(dict); + return Status::OK(); +} + +bool Compressor::IsDictEnabled() const { + return DictCompressionSupported() && (GetMaxDictBytes() > 0); +} + +} // namespace ROCKSDB_NAMESPACE diff --git a/util/compressor.h b/util/compressor.h new file mode 100644 index 0000000000..bd0aa390df --- /dev/null +++ b/util/compressor.h @@ -0,0 +1,493 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +#pragma once + +#include +#include + +#include "memory/memory_allocator_impl.h" +#include "rocksdb/advanced_options.h" +#include "rocksdb/customizable.h" +#include "rocksdb/utilities/object_registry.h" +#include "table/block_based/block_type.h" +#include "test_util/sync_point.h" + +namespace ROCKSDB_NAMESPACE { +class CompressionInfo; +class UncompressionInfo; +class Compressor; +class MemoryAllocator; + +struct CompressionOptions; + +// A Compressor-specific processed dictionary. +// Compressor implementations should extend this class +// for Compression and Uncompression-specific dictionary data. +// For example, this class can be extended to hold the ZSTD digested +// compression-related dictionaries. +class ProcessedDict { + public: + virtual ~ProcessedDict() = default; + // Returns a pointer to the processed dictionary data + virtual void* Data() const { return nullptr; } + // Returns the size of the processed dictionary data + virtual size_t Size() const { return 0; } +}; + +// Holds dictionary and related data, like ZSTD's digested compression +// dictionary. +class CompressionDict { + private: + // Raw dictionary + std::string dict_; + // Processed dictionary + std::unique_ptr processed_; + + public: + explicit CompressionDict(const std::string& dict, + std::unique_ptr&& processed = nullptr) + : dict_(std::move(dict)), processed_(std::move(processed)) {} + + Slice GetRawDict() const { return dict_; } + void* GetProcessedDict() const { + if (processed_) { + return processed_->Data(); + } else { + return nullptr; + } + } + + static const CompressionDict& GetEmptyDict() { + static CompressionDict empty_dict{}; + return empty_dict; + } + + CompressionDict() = default; + // Disable copy/move + CompressionDict(const CompressionDict&) = delete; + CompressionDict& operator=(const CompressionDict&) = delete; + CompressionDict(CompressionDict&&) = delete; + CompressionDict& operator=(CompressionDict&&) = delete; +}; + +// Holds dictionary and related data, like ZSTD's digested uncompression +// dictionary. +class UncompressionDict { + private: + // Block containing the data for the compression dictionary in case the + // constructor that takes a string parameter is used. + std::string dict_; + + // Block containing the data for the compression dictionary in case the + // constructor that takes a Slice parameter is used and the passed in + // CacheAllocationPtr is not nullptr. + CacheAllocationPtr allocation_; + + public: + // Slice pointing to the compression dictionary data. Can point to + // dict_, allocation_, or some other memory location, depending on how + // the object was constructed. + Slice slice_; + + private: + std::unique_ptr processed_; + + public: + explicit UncompressionDict( + const std::string& dict, + std::unique_ptr&& processed = nullptr); + UncompressionDict(const Slice& slice, CacheAllocationPtr&& allocation, + std::unique_ptr&& processed = nullptr); + UncompressionDict(UncompressionDict&& rhs) noexcept; + UncompressionDict& operator=(UncompressionDict&& rhs); + + // The object is self-contained if the string constructor is used, or the + // Slice constructor is invoked with a non-null allocation. Otherwise, it + // is the caller's responsibility to ensure that the underlying storage + // outlives this object. + bool own_bytes() const { return !dict_.empty() || allocation_; } + const Slice& GetRawDict() const { return slice_; } + const void* GetProcessedDict() const { + if (processed_) { + return processed_->Data(); + } else { + return nullptr; + } + } + + // For TypedCacheInterface + const Slice& ContentSlice() const { return slice_; } + static constexpr CacheEntryRole kCacheEntryRole = CacheEntryRole::kOtherBlock; + static constexpr BlockType kBlockType = BlockType::kCompressionDictionary; + + static const UncompressionDict& GetEmptyDict() { + static UncompressionDict empty_dict{}; + return empty_dict; + } + size_t ApproximateMemoryUsage() const; + + UncompressionDict() = default; + // Disable copy + explicit UncompressionDict(const CompressionDict&) = delete; + UncompressionDict& operator=(const CompressionDict&) = delete; +}; + +// Interface for each compression algorithm to implement. +class Compressor : public Customizable { + public: + virtual ~Compressor() = default; + + // Type required by Customizable + static const char* Type() { return "Compressor"; } + + // Creates and configures a Compressor from the input options. + // If an existing Compressor is found that matches the input, it + // is returned. Otherwise, a new one is created. + static Status CreateFromString(const ConfigOptions& opts, + const std::string& value, + std::shared_ptr* compressor); + + // Returns the IDs of the Compressors supported by this installation. + // Only Compressors that are available in this binary are returned. + static std::vector GetSupported(); + + // Returns the IDs of the Compressors supported by this installation that + // support compression dictionaries. + // Only Compressors that are available in this binary are returned. + static std::vector GetDictSupported(); + + // Get the numeric type associated with this compressor + virtual CompressionType GetCompressionType() const = 0; + + // Whether the compressor is supported. + // For example, a compressor can implement this method to verify its + // dependencies or environment settings. + virtual bool Supported() const { return true; } + + // Whether the compressor supports dictionary compression. + virtual bool DictCompressionSupported() const { return false; } + + // Compress data. + // @param info Pointer to CompressionInfo object (containing dictionary, + // version, etc). + // @param input Buffer containing data to compress. + // @param output Compressed data. + // Returns OK if compression completed correctly. + // Returns other status in case of error (e.g., Corruption). + virtual Status Compress(const CompressionInfo& info, const Slice& input, + std::string* output) = 0; + + // Uncompress data. + // @param info Pointer to UnompressionInfo object (containing dictionary, + // version, etc). + // @param input Buffer containing data to uncompress. + // @param input_length Length of the input data. + // @param output Buffer containing uncompressed data. + // @param output_length Length of the output data. + // Returns OK if uncompression completed correctly. + // Returns other status in case of error (e.g., Corruption). + virtual Status Uncompress(const UncompressionInfo& info, const char* input, + size_t input_length, char** output, + size_t* output_length) = 0; + + // Create a dictionary for compression using buffered data blocks. + // @param data_block_buffers Buffered data blocks + // @dict Pointer to the generated dictionary + // Returns OK if the dictionary was generated correctly. + // Returns other status in case of error. + virtual Status CreateDict(std::vector& data_block_buffers, + std::unique_ptr* dict); + + // Returns a new compression dictionary from the input dict. + // Classes which have a ProcessedDict should override this method. + virtual std::unique_ptr NewCompressionDict( + const std::string& dict) { + return std::make_unique(dict); + } + + // Returns a new uncompression dictionary from the input dict. + // Classes which have a ProcessedDict should override this method. + virtual UncompressionDict* NewUncompressionDict(const std::string& dict) { + return new UncompressionDict(dict); + } + + // Returns a new uncompression dictionary from the input. + // Classes which have a ProcessedDict should override this method. + virtual UncompressionDict* NewUncompressionDict( + const Slice& slice, CacheAllocationPtr&& allocation) { + return new UncompressionDict(slice, std::move(allocation)); + } + + // Whether dictionary compression is enabled for this compressor. + // If the compressor does not support dictionary compression + // (DictCompressionSupported returns false), then this method must always + // return false. + virtual bool IsDictEnabled() const; + + // Equivalent of max_dict_buffer_bytes in CompressionOptions. + // As options are set for each compressor, this function returns the value for + // that option. + virtual uint64_t GetMaxDictBufferBytes() const { return 0; } + + // Equivalent of parallel_threads in CompressionOptions. + // As options are set for each compressor, this function returns the value for + // that option. + virtual uint32_t GetParallelThreads() const { return 1; } + + // Equivalent of max_compressed_bytes_per_kb in CompressionOptions. + // As options are set for each compressor, this function returns the value for + // that option. + virtual int GetMaxCompressedBytesPerKb() const { return 1024 * 7 / 8; } + + protected: + static std::mutex mutex_; + static std::unordered_map>> + compressors_; + + // Sample data blocks to create a dictionary. + // @param data_block_buffers Buffered data blocks to sample from. + // @param compression_dict_samples Pointer to string to which sampled blocks + // are appended. + // @param compression_dict_sample_lens Vector of sample lengths. For each + // sample added to compression_dict_samples, store the corresponding sample + // length in this vector. + virtual void SampleDict(std::vector& data_block_buffers, + std::string* compression_dict_samples, + std::vector* compression_dict_sample_lens); + + // Train a dictionary from data samples. + // @param compression_dict_samples String containing the sampled data (it + // should be populated by the SampleDict method). + // @param compression_dict_sample_lens Length of each sample included in + // compression_dict_samples (it should be populated by the SampleDict + // method). + virtual std::string TrainDict( + const std::string& compression_dict_samples, + const std::vector& compression_dict_sample_lens); + + private: + // Equivalent of max_dict_bytes in CompressionOptions. + // As options are set for each compressor, this function returns the value for + // that option. + virtual uint32_t GetMaxDictBytes() const { return 0; } + + // Equivalent of max_zstd_train_bytes in CompressionOptions. + // As options are set for each compressor, this function returns the value for + // that option. + virtual uint32_t GetMaxTrainBytes() const { return 0; } + + // Equivalent of use_zstd_dict_trainer in CompressionOptions. + // As options are set for each compressor, this function returns the value for + // that option. + virtual bool UseDictTrainer() const { return true; } + + // Equivalent of level in CompressionOptions. + // As options are set for each compressor, this function returns the value for + // that option. + virtual int GetLevel() const { + return CompressionOptions::kDefaultCompressionLevel; + } +}; + +// A BuiltinCompressor. Instances of this class are based on the +// CompressionType enumerator. Builtins should extend this class and if they +// are supported, implement the Compress and Uncompress methods appropriately. +// BuiltinCompressors use CompressionOptions to store their configuration but +// only configure/compare the values that are used by the specific +// implementation +class BuiltinCompressor : public Compressor { + public: + BuiltinCompressor(); + // Get Compressor instance given its numeric type. Any instance of that type + // will match, regardless of the ComprssionOptions. + // + // Note that a Compressor can be created for CompressorTypes that are not + // supported (based on platform and libraries) + static std::shared_ptr GetCompressor(CompressionType type); + + // Gets a Compressor instance for the input type with the corresponding + // options. If one already exists that matches, it will be returned. + // Otherwise, a new one will be created. + // + // Note that a Compressor can be created for CompressorTypes that are not + // supported (based on platform and libraries) + static std::shared_ptr GetCompressor( + CompressionType type, const CompressionOptions& options); + + // Returns true if the input type is supported by this installation. + static bool TypeSupported(CompressionType type); + + // Returns true if the input type supports dictionary compression. + static bool TypeSupportsDict(CompressionType type); + + // Converts the input string to its corresponding CompressionType. + // Supports strings in both class (e.g. "Snappy") and enum (e.g + // "kSnappyCompression") formats Returns true if the conversion was successful + // and false otherwise. + static bool StringToType(const std::string& s, CompressionType* type); + + // Converts the input CompressionType to its "class" representation. + // For example, kSnappyCompression would convert to "Snappy". + static std::string TypeToString(CompressionType type); + + // Converts the input type into a string. + // If as_class is true, returns the class representation (e.g. "Snappy"). + // If as_class is false, returns the enum representation(e.g. + // "kSnappyCompression") Returns true if the CompressionType could be + // converted to a string and false otherwise. + static bool TypeToString(CompressionType type, bool as_class, + std::string* result); + + static const char* kClassName() { return "BuiltinCompressor"; } + bool IsInstanceOf(const std::string& id) const override; + const void* GetOptionsPtr(const std::string& name) const override; + + // Default implementation that returns NotSupported. + // Implementations should override this method when they are enabld. + Status Compress(const CompressionInfo& /*info*/, const Slice& /*input*/, + std::string* /*output*/) override { + return Status::NotSupported("Compaction library not available ", GetId()); + } + + // Default implementation that returns NotSupported. + // Implementations should override this method when they are enabld. + Status Uncompress(const UncompressionInfo& /*info*/, const char* /*input*/, + size_t /*input_length*/, char** /*output*/, + size_t* /*output_length*/) override { + return Status::NotSupported("Compaction library not available ", GetId()); + } + + uint32_t GetParallelThreads() const override { + return compression_opts_.parallel_threads; + } + + int GetMaxCompressedBytesPerKb() const override { + return compression_opts_.max_compressed_bytes_per_kb; + } + + protected: + // Method to match the input CompressionOptions to those for this Builtin. + // Only compares the values from opts that are required by this + // implementation. + virtual bool MatchesOptions(const CompressionOptions& opts) const; + CompressionOptions compression_opts_; +}; + +// A BuiltinCompressor that supports a Compression Dictionary +class BuiltinDictCompressor : public BuiltinCompressor { + public: + BuiltinDictCompressor(); + static const char* kClassName() { return "BuiltinDictCompressor"; } + bool IsInstanceOf(const std::string& id) const override; + + protected: + uint64_t GetMaxDictBufferBytes() const override { + return compression_opts_.max_dict_buffer_bytes; + } + uint32_t GetMaxDictBytes() const override { + return compression_opts_.max_dict_bytes; + } + + uint32_t GetMaxTrainBytes() const override { + return compression_opts_.zstd_max_train_bytes; + } + + bool UseDictTrainer() const override { + return compression_opts_.use_zstd_dict_trainer; + } + + int GetLevel() const override { return compression_opts_.level; } + + bool MatchesOptions(const CompressionOptions& opts) const override; +}; + +// Class with Options to be passed to Compressor::Compress +class CompressionInfo { + const CompressionDict& dict_; + const uint32_t compress_format_version_ = 2; + const uint64_t sample_for_compression_ = 0; + + public: + CompressionInfo(uint64_t _sample_for_compression = 0) + : dict_(CompressionDict::GetEmptyDict()), + sample_for_compression_(_sample_for_compression) {} + + explicit CompressionInfo(const CompressionDict& _dict, + uint32_t _compress_format_version = 2, + uint64_t _sample_for_compression = 0) + : dict_(_dict), + compress_format_version_(_compress_format_version), + sample_for_compression_(_sample_for_compression) {} + + const CompressionDict& dict() const { return dict_; } + uint32_t CompressFormatVersion() const { return compress_format_version_; } + uint64_t SampleForCompression() const { return sample_for_compression_; } + + inline bool CompressData(Compressor* compressor, const Slice& raw, + std::string* compressed) const { + bool ret = false; + + // Will return compressed block contents if (1) the compression method is + // supported in this platform and (2) the compression rate is "good enough". + if (compressor == nullptr) { + ret = false; + } else { + Status s = compressor->Compress(*this, raw, compressed); + ret = s.ok(); + } + + TEST_SYNC_POINT_CALLBACK("CompressData:TamperWithReturnValue", + static_cast(&ret)); + + return ret; + } +}; + +// Class with Options to be passed to Compressor::Compress +class UncompressionInfo { + const UncompressionDict& dict_; + const uint32_t compress_format_version_ = 2; + MemoryAllocator* allocator_ = nullptr; + + public: + UncompressionInfo() : dict_(UncompressionDict::GetEmptyDict()) {} + explicit UncompressionInfo(const UncompressionDict& _dict, + uint32_t _compress_format_version = 2, + MemoryAllocator* _allocator = nullptr) + : dict_(_dict), + compress_format_version_(_compress_format_version), + allocator_(_allocator) {} + + const UncompressionDict& dict() const { return dict_; } + uint32_t CompressFormatVersion() const { return compress_format_version_; } + MemoryAllocator* GetMemoryAllocator() const { return allocator_; } + + inline CacheAllocationPtr UncompressData( + Compressor* compressor, const char* compressed, size_t compressed_size, + size_t* uncompressed_size, Status* uncompress_status = nullptr) const { + if (compressor == nullptr) { + return CacheAllocationPtr(); + } + + char* uncompressed_data; + Status s = compressor->Uncompress(*this, compressed, compressed_size, + &uncompressed_data, uncompressed_size); + if (uncompress_status != nullptr) { + *uncompress_status = s; + } + if (!s.ok()) { + return CacheAllocationPtr(); + } + CacheAllocationPtr ubuf(uncompressed_data, allocator_); + return ubuf; + } +}; + +} // namespace ROCKSDB_NAMESPACE diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 0347011368..d241cff872 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -43,10 +43,6 @@ #include "utilities/blob_db/blob_db_iterator.h" #include "utilities/blob_db/blob_db_listener.h" -namespace { -int kBlockBasedTableVersionFormat = 2; -} // end namespace - namespace ROCKSDB_NAMESPACE { namespace blob_db { @@ -1147,12 +1143,14 @@ Slice BlobDBImpl::GetCompressedSlice(const Slice& raw, } StopWatch compression_sw(clock_, statistics_, BLOB_DB_COMPRESSION_MICROS); CompressionType type = bdb_options_.compression; - CompressionOptions opts; - CompressionContext context(type, opts); - CompressionInfo info(opts, context, CompressionDict::GetEmptyDict(), type, - 0 /* sample_for_compression */); - CompressBlock(raw, info, &type, kBlockBasedTableVersionFormat, false, - compression_output, nullptr, nullptr); + auto compressor = + BuiltinCompressor::GetCompressor(type, CompressionOptions()); + if (!compressor) { + ROCKS_LOG_ERROR(db_options_.info_log, "Failed to create compressor."); + } + CompressionInfo info; + CompressBlock(compressor.get(), raw, info, &type, false, compression_output, + nullptr, nullptr); return *compression_output; } @@ -1167,12 +1165,11 @@ Status BlobDBImpl::DecompressSlice(const Slice& compressed_value, { StopWatch decompression_sw(clock_, statistics_, BLOB_DB_DECOMPRESSION_MICROS); - UncompressionContext context(compression_type); - UncompressionInfo info(context, UncompressionDict::GetEmptyDict(), - compression_type); + auto compressor = BuiltinCompressor::GetCompressor(compression_type); + UncompressionInfo info; Status s = UncompressBlockData( - info, compressed_value.data(), compressed_value.size(), &contents, - kBlockBasedTableVersionFormat, *(cfh->cfd()->ioptions())); + compressor.get(), info, compressed_value.data(), + compressed_value.size(), &contents, *(cfh->cfd()->ioptions())); if (!s.ok()) { return Status::Corruption("Unable to decompress blob."); } diff --git a/utilities/blob_db/blob_dump_tool.cc b/utilities/blob_db/blob_dump_tool.cc index 0c2fef5e15..a3f804b656 100644 --- a/utilities/blob_db/blob_dump_tool.cc +++ b/utilities/blob_db/blob_dump_tool.cc @@ -56,8 +56,8 @@ Status BlobDumpTool::Run(const std::string& filename, DisplayType show_key, reader_.reset(new RandomAccessFileReader(std::move(file), filename)); uint64_t offset = 0; uint64_t footer_offset = 0; - CompressionType compression = kNoCompression; - s = DumpBlobLogHeader(&offset, &compression); + std::shared_ptr compressor; + s = DumpBlobLogHeader(&offset, &compressor); if (!s.ok()) { return s; } @@ -72,7 +72,7 @@ Status BlobDumpTool::Run(const std::string& filename, DisplayType show_key, if (show_key != DisplayType::kNone || show_summary) { while (offset < footer_offset) { s = DumpRecord(show_key, show_blob, show_uncompressed_blob, show_summary, - compression, &offset, &total_records, &total_key_size, + compressor.get(), &offset, &total_records, &total_key_size, &total_blob_size, &total_uncompressed_blob_size); if (!s.ok()) { break; @@ -84,7 +84,7 @@ Status BlobDumpTool::Run(const std::string& filename, DisplayType show_key, fprintf(stdout, " total records: %" PRIu64 "\n", total_records); fprintf(stdout, " total key size: %" PRIu64 "\n", total_key_size); fprintf(stdout, " total blob size: %" PRIu64 "\n", total_blob_size); - if (compression != kNoCompression) { + if (compressor->GetCompressionType() != kNoCompression) { fprintf(stdout, " total raw blob size: %" PRIu64 "\n", total_uncompressed_blob_size); } @@ -113,8 +113,8 @@ Status BlobDumpTool::Read(uint64_t offset, size_t size, Slice* result) { return s; } -Status BlobDumpTool::DumpBlobLogHeader(uint64_t* offset, - CompressionType* compression) { +Status BlobDumpTool::DumpBlobLogHeader( + uint64_t* offset, std::shared_ptr* compressor) { Slice slice; Status s = Read(0, BlobLogHeader::kSize, &slice); if (!s.ok()) { @@ -139,7 +139,7 @@ Status BlobDumpTool::DumpBlobLogHeader(uint64_t* offset, fprintf(stdout, " Expiration range : %s\n", GetString(header.expiration_range).c_str()); *offset = BlobLogHeader::kSize; - *compression = header.compression; + *compressor = BuiltinCompressor::GetCompressor(header.compression); return s; } @@ -173,7 +173,7 @@ Status BlobDumpTool::DumpBlobLogFooter(uint64_t file_size, Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob, DisplayType show_uncompressed_blob, - bool show_summary, CompressionType compression, + bool show_summary, Compressor* compressor, uint64_t* offset, uint64_t* total_records, uint64_t* total_key_size, uint64_t* total_blob_size, @@ -206,15 +206,13 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob, } // Decompress value std::string uncompressed_value; - if (compression != kNoCompression && + if (compressor->GetCompressionType() != kNoCompression && (show_uncompressed_blob != DisplayType::kNone || show_summary)) { BlockContents contents; - UncompressionContext context(compression); - UncompressionInfo info(context, UncompressionDict::GetEmptyDict(), - compression); - s = UncompressBlockData( - info, slice.data() + key_size, static_cast(value_size), - &contents, 2 /*compress_format_version*/, ImmutableOptions(Options())); + UncompressionInfo info; + s = UncompressBlockData(compressor, info, slice.data() + key_size, + static_cast(value_size), &contents, + ImmutableOptions(Options())); if (!s.ok()) { return s; } diff --git a/utilities/blob_db/blob_dump_tool.h b/utilities/blob_db/blob_dump_tool.h index 12cd1bf422..14fdbfec18 100644 --- a/utilities/blob_db/blob_dump_tool.h +++ b/utilities/blob_db/blob_dump_tool.h @@ -12,6 +12,7 @@ #include "file/random_access_file_reader.h" #include "rocksdb/slice.h" #include "rocksdb/status.h" +#include "util/compressor.h" namespace ROCKSDB_NAMESPACE { namespace blob_db { @@ -37,11 +38,12 @@ class BlobDumpTool { size_t buffer_size_; Status Read(uint64_t offset, size_t size, Slice* result); - Status DumpBlobLogHeader(uint64_t* offset, CompressionType* compression); + Status DumpBlobLogHeader(uint64_t* offset, + std::shared_ptr* compressor); Status DumpBlobLogFooter(uint64_t file_size, uint64_t* footer_offset); Status DumpRecord(DisplayType show_key, DisplayType show_blob, DisplayType show_uncompressed_blob, bool show_summary, - CompressionType compression, uint64_t* offset, + Compressor* compressor, uint64_t* offset, uint64_t* total_records, uint64_t* total_key_size, uint64_t* total_blob_size, uint64_t* total_uncompressed_blob_size);