Skip to content

Commit

Permalink
Allocate a cell from freeblock
Browse files Browse the repository at this point in the history
allocateSpace() in btree.c of SQLite is the corresponding.
  • Loading branch information
kawasin73 committed Oct 23, 2023
1 parent 8993f0a commit a2d863b
Show file tree
Hide file tree
Showing 2 changed files with 147 additions and 18 deletions.
47 changes: 47 additions & 0 deletions src/btree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,53 @@ impl<'a> BtreePageHeaderMut<'a> {
pub fn set_cell_content_area_offset(&mut self, offset: u16) {
self.0[5..7].copy_from_slice(offset.to_be_bytes().as_slice());
}

pub fn set_first_freeblock_offset(&mut self, offset: [u8; 2]) {
self.0[1..3].copy_from_slice(&offset);
}

pub fn add_fragmented_free_bytes(&mut self, size: u8) {
self.0[7] += size;
}
}

pub struct FreeblockIterator<'a> {
offset: usize,
buffer: &'a [u8],
}

impl<'a> FreeblockIterator<'a> {
pub fn new(first_freeblock_offset: usize, buffer: &'a [u8]) -> Self {
Self {
offset: first_freeblock_offset,
buffer,
}
}
}

impl<'a> Iterator for FreeblockIterator<'a> {
type Item = (usize, u16);

fn next(&mut self) -> Option<Self::Item> {
if self.offset == 0 {
None
} else {
assert!(self.offset + 4 < self.buffer.len());
let offset = u16::from_be_bytes(
self.buffer[self.offset..self.offset + 2]
.try_into()
.unwrap(),
) as usize;
let size = u16::from_be_bytes(
self.buffer[self.offset + 2..self.offset + 4]
.try_into()
.unwrap(),
);

self.offset = offset;
Some((offset, size))
}
}
}

pub struct TableCellKeyParser<'a> {
Expand Down
118 changes: 100 additions & 18 deletions src/cursor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use crate::btree::BtreeContext;
use crate::btree::BtreePageHeader;
use crate::btree::BtreePageHeaderMut;
use crate::btree::BtreePageType;
use crate::btree::FreeblockIterator;
use crate::btree::IndexCellKeyParser;
use crate::btree::PayloadInfo;
use crate::btree::TableCellKeyParser;
Expand Down Expand Up @@ -459,9 +460,6 @@ impl<'a> BtreeCursor<'a> {
let buffer = self.current_page.mem.buffer();
let page_header = BtreePageHeader::from_page(&self.current_page.mem, &buffer);

// TODO: Support freeblock.
assert_eq!(page_header.first_freeblock_offset(), 0);
assert_eq!(page_header.fragmented_free_bytes(), 0);
let header_size = page_header.header_size();
let unallocated_space_offset = cell_pointer_offset(
&self.current_page.mem,
Expand All @@ -475,7 +473,17 @@ impl<'a> BtreeCursor<'a> {
bail!("invalid cell content area offset");
}

let free_size = cell_content_area_offset - unallocated_space_offset;
let unallocated_size = cell_content_area_offset - unallocated_space_offset;
let first_freeblock_offset = page_header.first_freeblock_offset() as usize;
let fragmented_free_bytes = page_header.fragmented_free_bytes() as usize;

// TODO: Cache free size.
let mut free_size = unallocated_size;
for (_, size) in FreeblockIterator::new(first_freeblock_offset, &buffer) {
free_size += size as usize;
}
free_size += fragmented_free_bytes;

if free_size < cell_size + 2 {
// TODO: balance the btree.
todo!("balance the btree");
Expand All @@ -492,24 +500,97 @@ impl<'a> BtreeCursor<'a> {
// which is mutable method.
let mut buffer = self.pager.make_page_mut(&self.current_page.mem)?;

// TODO: allocateSpace().
// Allocate space
//
// 1. Search freeblock first.
// 2. Defragmentation if needed
// 3. Allocate space from unallocated space.
let allocated_offset = {
let mut allocated_offset = None;
// 1. Search freeblock first.
if unallocated_size >= 2
&& first_freeblock_offset != 0
// Total fragments may not exceed 60 bytes. Otherwise Give up seeking freeblocks
// and try defragmentation.
&& fragmented_free_bytes <= 57
{
let mut previous_freeblock_offset = 0;
for (freeblock_offset, size) in
FreeblockIterator::new(first_freeblock_offset, &buffer)
{
let size = size as usize;
if size >= cell_size {
let new_freeblock_offset = if size < cell_size + 4 {
let fragment_size = (size - (cell_size + 4)) as u8;
assert!(fragment_size < 4);
BtreePageHeaderMut::from_page(
&self.current_page.mem,
&mut buffer,
)
.add_fragmented_free_bytes(fragment_size);

buffer[freeblock_offset..freeblock_offset + 2]
.try_into()
.unwrap()
} else {
// Split the freeblock.
let new_freeblock_offset = freeblock_offset + cell_size;
buffer.copy_within(
freeblock_offset..freeblock_offset + 2,
new_freeblock_offset,
);
let new_size = (size - cell_size) as u16;
buffer[new_freeblock_offset + 2..new_freeblock_offset + 4]
.copy_from_slice(&new_size.to_be_bytes());

(new_freeblock_offset as u16).to_be_bytes()
};
if previous_freeblock_offset == 0 {
BtreePageHeaderMut::from_page(
&self.current_page.mem,
&mut buffer,
)
.set_first_freeblock_offset(new_freeblock_offset);
} else {
buffer
[previous_freeblock_offset..previous_freeblock_offset + 2]
.copy_from_slice(&new_freeblock_offset);
}
allocated_offset = Some(freeblock_offset);
break;
}
previous_freeblock_offset = freeblock_offset;
}
}

let offset = cell_content_area_offset - cell_size;
// cell_content_area_offset is less than or equal to 65536. data is not empty.
// The offset must be less than 65536 and safe to cast into u16.
assert!(offset <= u16::MAX as usize);
if let Some(offset) = allocated_offset {
offset
} else {
// 2. Defragmentation if needed
if unallocated_size < cell_size + 2 {
todo!("defragmentation");
}

// Update the page header.
// 3. Allocate space from unallocated space.
let allocated_offset = cell_content_area_offset - cell_size;
// cell_content_area_offset is less than or equal to 65536. data is not
// empty. The offset must be less than 65536 and
// safe to cast into u16.
assert!(allocated_offset <= u16::MAX as usize);

// New cell content area offset must not be less than the tail of cell
// pointers.
assert!(allocated_offset >= unallocated_space_offset + 2);
let mut page_header =
BtreePageHeaderMut::from_page(&self.current_page.mem, &mut buffer);
page_header.set_cell_content_area_offset(allocated_offset as u16);
allocated_offset
}
};

self.current_page.n_cells += 1;
let mut page_header =
BtreePageHeaderMut::from_page(&self.current_page.mem, &mut buffer);
// New cell content area offset must not be less than the tail of cell pointers.
assert!(offset >= unallocated_space_offset + 2);
// offset <= u16::MAX is asserted above.
page_header.set_cell_content_area_offset(offset as u16);
self.current_page.n_cells += 1;
page_header.set_n_cells(self.current_page.n_cells);

// Update cell pointer.
Expand All @@ -530,11 +611,12 @@ impl<'a> BtreeCursor<'a> {
unallocated_space_offset
};
buffer[cell_pointer_offset..cell_pointer_offset + 2]
.copy_from_slice(&(offset as u16).to_be_bytes());
.copy_from_slice(&(allocated_offset as u16).to_be_bytes());

// Copy payload to the btree page.
let payload_offset = offset + cell_header_size;
buffer[offset..payload_offset].copy_from_slice(&cell_header[..cell_header_size]);
let payload_offset = allocated_offset + cell_header_size;
buffer[allocated_offset..payload_offset]
.copy_from_slice(&cell_header[..cell_header_size]);
let payload_tail_offset = payload_offset + n_local;
buffer[payload_offset..payload_tail_offset].copy_from_slice(&payload[..n_local]);
if let Some(overflow_page_id) = overflow_page_id {
Expand Down

0 comments on commit a2d863b

Please sign in to comment.