Skip to content

Commit

Permalink
feat: enable zstd compression in merge tree data part to save memory
Browse files Browse the repository at this point in the history
  • Loading branch information
v0y4g3r committed Feb 25, 2024
1 parent e481f07 commit b060629
Showing 1 changed file with 16 additions and 9 deletions.
25 changes: 16 additions & 9 deletions src/mito2/src/memtable/merge_tree/data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ use datatypes::vectors::{
};
use parquet::arrow::arrow_reader::{ParquetRecordBatchReader, ParquetRecordBatchReaderBuilder};
use parquet::arrow::ArrowWriter;
use parquet::file::properties::WriterProperties;
use parquet::basic::{Compression, ZstdLevel};
use parquet::file::properties::{EnabledStatistics, WriterProperties};
use snafu::ResultExt;
use store_api::metadata::RegionMetadataRef;
use store_api::storage::consts::{OP_TYPE_COLUMN_NAME, SEQUENCE_COLUMN_NAME};
Expand Down Expand Up @@ -662,17 +663,23 @@ impl<'a> DataPartEncoder<'a> {
}
}

fn writer_props(&self) -> Option<WriterProperties> {
self.row_group_size.map(|size| {
WriterProperties::builder()
.set_max_row_group_size(size)
.build()
})
// todo(hl): more customized config according to region options.
fn writer_props(&self) -> WriterProperties {
let mut builder = WriterProperties::builder();
if let Some(row_group_size) = self.row_group_size {
builder = builder.set_max_row_group_size(row_group_size)
}
builder = builder
.set_compression(Compression::ZSTD(ZstdLevel::default()))
.set_statistics_enabled(EnabledStatistics::None);
builder.build()
}

pub fn write(&self, source: &mut DataBuffer) -> Result<DataPart> {
let mut bytes = Vec::with_capacity(1024);
let mut writer = ArrowWriter::try_new(&mut bytes, self.schema.clone(), self.writer_props())
.context(error::EncodeMemtableSnafu)?;
let mut writer =
ArrowWriter::try_new(&mut bytes, self.schema.clone(), Some(self.writer_props()))
.context(error::EncodeMemtableSnafu)?;
let rb = data_buffer_to_record_batches(
self.schema.clone(),
source,
Expand Down

0 comments on commit b060629

Please sign in to comment.