Skip to content

Commit

Permalink
Extended lfs_fs_gc to compact metadata, compact_thresh
Browse files Browse the repository at this point in the history
This extends lfs_fs_gc to now handle three things:

1. Calls mkconsistent if not already consistent
2. Compacts metadata > compact_thresh
3. Populates the block allocator

Which should be all of the janitorial work that can be done without
additional on-disk data structures.

Normally, metadata compaction occurs when an mdir is full, and results in
mdirs that are at most block_size/2.

Now, if you call lfs_fs_gc, littlefs will eagerly compact any mdirs that
exceed the compact_thresh configuration option. Because the resulting
mdirs are at most block_size/2, it only makes sense for compact_thresh to
be >= block_size/2 and <= block_size.

Additionally, there are some special values:

- compact_thresh=0  => defaults to ~88% block_size, may change
- compact_thresh=-1 => disables metadata compaction during lfs_fs_gc

Note that compact_thresh only affects lfs_fs_gc. Normal compactions
still only occur when full.
  • Loading branch information
geky committed Jan 19, 2024
1 parent 6056767 commit b5cd957
Show file tree
Hide file tree
Showing 7 changed files with 139 additions and 48 deletions.
106 changes: 83 additions & 23 deletions lfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -593,19 +593,6 @@ static int lfs_rawunmount(lfs_t *lfs);


/// Block allocator ///
#ifndef LFS_READONLY
static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
lfs_t *lfs = (lfs_t*)p;
lfs_block_t off = ((block - lfs->lookahead.start)
+ lfs->block_count) % lfs->block_count;

if (off < lfs->lookahead.size) {
lfs->lookahead.buffer[off / 8] |= 1U << (off % 8);
}

return 0;
}
#endif

// allocations should call this when all allocated blocks are committed to
// the filesystem
Expand All @@ -624,7 +611,21 @@ static void lfs_alloc_drop(lfs_t *lfs) {
}

#ifndef LFS_READONLY
static int lfs_fs_rawgc(lfs_t *lfs) {
static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
lfs_t *lfs = (lfs_t*)p;
lfs_block_t off = ((block - lfs->lookahead.start)
+ lfs->block_count) % lfs->block_count;

if (off < lfs->lookahead.size) {
lfs->lookahead.buffer[off / 8] |= 1U << (off % 8);
}

return 0;
}
#endif

#ifndef LFS_READONLY
static int lfs_alloc_scan(lfs_t *lfs) {
// move lookahead buffer to the first unused block
//
// note we limit the lookahead buffer to at most the amount of blocks
Expand Down Expand Up @@ -693,7 +694,7 @@ static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {

// No blocks in our lookahead buffer, we need to scan the filesystem for
// unused blocks in the next lookahead window.
int err = lfs_fs_rawgc(lfs);
int err = lfs_alloc_scan(lfs);
if(err) {
return err;
}
Expand Down Expand Up @@ -4172,6 +4173,14 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
// wear-leveling.
LFS_ASSERT(lfs->cfg->block_cycles != 0);

// check that compact_thresh makes sense
//
// metadata can't be compacted below block_size/2, and metadata can't
// exceed a block_size
LFS_ASSERT(lfs->cfg->compact_thresh == 0
|| lfs->cfg->compact_thresh >= lfs->cfg->block_size/2);
LFS_ASSERT(lfs->cfg->compact_thresh == (lfs_size_t)-1
|| lfs->cfg->compact_thresh <= lfs->cfg->block_size);

// setup read cache
if (lfs->cfg->read_buffer) {
Expand Down Expand Up @@ -5063,6 +5072,57 @@ static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs) {
return size;
}

// explicit garbage collection
#ifndef LFS_READONLY
static int lfs_fs_rawgc(lfs_t *lfs) {
// force consistency, even if we're not necessarily going to write,
// because this function is supposed to take care of janitorial work
// isn't it?
int err = lfs_fs_forceconsistency(lfs);
if (err) {
return err;
}

// try to compact metadata pairs, note we can't really accomplish
// anything if compact_thresh doesn't at least leave a prog_size
// available
if (lfs->cfg->compact_thresh
< lfs->cfg->block_size - lfs->cfg->prog_size) {
// iterate over all mdirs
lfs_mdir_t mdir = {.tail = {0, 1}};
while (!lfs_pair_isnull(mdir.tail)) {
err = lfs_dir_fetch(lfs, &mdir, mdir.tail);
if (err) {
return err;
}

// not erased? exceeds our compaction threshold?
if (!mdir.erased || ((lfs->cfg->compact_thresh == 0)
? mdir.off > lfs->cfg->block_size - lfs->cfg->block_size/8
: mdir.off > lfs->cfg->compact_thresh)) {
// the easiest way to trigger a compaction is to mark
// the mdir as unerased and add an empty commit
mdir.erased = false;
err = lfs_dir_commit(lfs, &mdir, NULL, 0);
if (err) {
return err;
}
}
}
}

// try to populate the lookahead buffer, unless it's already full
if (lfs->lookahead.size < 8*lfs->cfg->lookahead_size) {
err = lfs_alloc_scan(lfs);
if (err) {
return err;
}
}

return 0;
}
#endif

#ifndef LFS_READONLY
static int lfs_fs_rawgrow(lfs_t *lfs, lfs_size_t block_count) {
// shrinking is not supported
Expand Down Expand Up @@ -6269,32 +6329,32 @@ int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
}

#ifndef LFS_READONLY
int lfs_fs_gc(lfs_t *lfs) {
int lfs_fs_mkconsistent(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);
if (err) {
return err;
}
LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs);
LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);

err = lfs_fs_rawgc(lfs);
err = lfs_fs_rawmkconsistent(lfs);

LFS_TRACE("lfs_fs_gc -> %d", err);
LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
LFS_UNLOCK(lfs->cfg);
return err;
}
#endif

#ifndef LFS_READONLY
int lfs_fs_mkconsistent(lfs_t *lfs) {
int lfs_fs_gc(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);
if (err) {
return err;
}
LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);
LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs);

err = lfs_fs_rawmkconsistent(lfs);
err = lfs_fs_rawgc(lfs);

LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
LFS_TRACE("lfs_fs_gc -> %d", err);
LFS_UNLOCK(lfs->cfg);
return err;
}
Expand Down
41 changes: 29 additions & 12 deletions lfs.h
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,17 @@ struct lfs_config {
// can track 8 blocks.
lfs_size_t lookahead_size;

// Threshold for metadata compaction during lfs_fs_gc in bytes. Metadata
// pairs that exceed this threshold will be compacted during lfs_fs_gc.
// Defaults to ~88% block_size when zero, though the default may change
// in the future.
//
// Note this only affects lfs_fs_gc. Normal compactions still only occur
// when full.
//
// Set to -1 to disable metadata compaction during lfs_fs_gc.
lfs_size_t compact_thresh;

// Optional statically allocated read buffer. Must be cache_size.
// By default lfs_malloc is used to allocate this buffer.
void *read_buffer;
Expand Down Expand Up @@ -711,18 +722,6 @@ lfs_ssize_t lfs_fs_size(lfs_t *lfs);
// Returns a negative error code on failure.
int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);

// Attempt to proactively find free blocks
//
// Calling this function is not required, but may allowing the offloading of
// the expensive block allocation scan to a less time-critical code path.
//
// Note: littlefs currently does not persist any found free blocks to disk.
// This may change in the future.
//
// Returns a negative error code on failure. Finding no free blocks is
// not an error.
int lfs_fs_gc(lfs_t *lfs);

#ifndef LFS_READONLY
// Attempt to make the filesystem consistent and ready for writing
//
Expand All @@ -735,6 +734,24 @@ int lfs_fs_gc(lfs_t *lfs);
int lfs_fs_mkconsistent(lfs_t *lfs);
#endif

#ifndef LFS_READONLY
// Attempt any janitorial work
//
// This currently:
// 1. Calls mkconsistent if not already consistent
// 2. Compacts metadata > compact_thresh
// 3. Populates the block allocator
//
// Though additional janitorial work may be added in the future.
//
// Calling this function is not required, but may allow the offloading of
// expensive janitorial work to a less time-critical code path.
//
// Returns a negative error code on failure. Accomplishing nothing is not
// an error.
int lfs_fs_gc(lfs_t *lfs);
#endif

#ifndef LFS_READONLY
// Grows the filesystem to a new size, updating the superblock with the new
// block count.
Expand Down
1 change: 1 addition & 0 deletions runners/bench_runner.c
Original file line number Diff line number Diff line change
Expand Up @@ -1321,6 +1321,7 @@ void perm_run(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
};

struct lfs_emubd_config bdcfg = {
Expand Down
15 changes: 9 additions & 6 deletions runners/bench_runner.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,11 +95,12 @@ intmax_t bench_define(size_t define);
#define BLOCK_COUNT_i 5
#define CACHE_SIZE_i 6
#define LOOKAHEAD_SIZE_i 7
#define BLOCK_CYCLES_i 8
#define ERASE_VALUE_i 9
#define ERASE_CYCLES_i 10
#define BADBLOCK_BEHAVIOR_i 11
#define POWERLOSS_BEHAVIOR_i 12
#define COMPACT_THRESH_i 8
#define BLOCK_CYCLES_i 9
#define ERASE_VALUE_i 10
#define ERASE_CYCLES_i 11
#define BADBLOCK_BEHAVIOR_i 12
#define POWERLOSS_BEHAVIOR_i 13

#define READ_SIZE bench_define(READ_SIZE_i)
#define PROG_SIZE bench_define(PROG_SIZE_i)
Expand All @@ -109,6 +110,7 @@ intmax_t bench_define(size_t define);
#define BLOCK_COUNT bench_define(BLOCK_COUNT_i)
#define CACHE_SIZE bench_define(CACHE_SIZE_i)
#define LOOKAHEAD_SIZE bench_define(LOOKAHEAD_SIZE_i)
#define COMPACT_THRESH bench_define(COMPACT_THRESH_i)
#define BLOCK_CYCLES bench_define(BLOCK_CYCLES_i)
#define ERASE_VALUE bench_define(ERASE_VALUE_i)
#define ERASE_CYCLES bench_define(ERASE_CYCLES_i)
Expand All @@ -124,14 +126,15 @@ intmax_t bench_define(size_t define);
BENCH_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1))\
BENCH_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
BENCH_DEF(LOOKAHEAD_SIZE, 16) \
BENCH_DEF(COMPACT_THRESH, 0) \
BENCH_DEF(BLOCK_CYCLES, -1) \
BENCH_DEF(ERASE_VALUE, 0xff) \
BENCH_DEF(ERASE_CYCLES, 0) \
BENCH_DEF(BADBLOCK_BEHAVIOR, LFS_EMUBD_BADBLOCK_PROGERROR) \
BENCH_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP)

#define BENCH_GEOMETRY_DEFINE_COUNT 4
#define BENCH_IMPLICIT_DEFINE_COUNT 13
#define BENCH_IMPLICIT_DEFINE_COUNT 14


#endif
5 changes: 5 additions & 0 deletions runners/test_runner.c
Original file line number Diff line number Diff line change
Expand Up @@ -1346,6 +1346,7 @@ static void run_powerloss_none(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
Expand Down Expand Up @@ -1422,6 +1423,7 @@ static void run_powerloss_linear(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
Expand Down Expand Up @@ -1515,6 +1517,7 @@ static void run_powerloss_log(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
Expand Down Expand Up @@ -1606,6 +1609,7 @@ static void run_powerloss_cycles(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
Expand Down Expand Up @@ -1795,6 +1799,7 @@ static void run_powerloss_exhaustive(
.block_cycles = BLOCK_CYCLES,
.cache_size = CACHE_SIZE,
.lookahead_size = LOOKAHEAD_SIZE,
.compact_thresh = COMPACT_THRESH,
#ifdef LFS_MULTIVERSION
.disk_version = DISK_VERSION,
#endif
Expand Down
17 changes: 10 additions & 7 deletions runners/test_runner.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,12 +88,13 @@ intmax_t test_define(size_t define);
#define BLOCK_COUNT_i 5
#define CACHE_SIZE_i 6
#define LOOKAHEAD_SIZE_i 7
#define BLOCK_CYCLES_i 8
#define ERASE_VALUE_i 9
#define ERASE_CYCLES_i 10
#define BADBLOCK_BEHAVIOR_i 11
#define POWERLOSS_BEHAVIOR_i 12
#define DISK_VERSION_i 13
#define COMPACT_THRESH_i 8
#define BLOCK_CYCLES_i 9
#define ERASE_VALUE_i 10
#define ERASE_CYCLES_i 11
#define BADBLOCK_BEHAVIOR_i 12
#define POWERLOSS_BEHAVIOR_i 13
#define DISK_VERSION_i 14

#define READ_SIZE TEST_DEFINE(READ_SIZE_i)
#define PROG_SIZE TEST_DEFINE(PROG_SIZE_i)
Expand All @@ -103,6 +104,7 @@ intmax_t test_define(size_t define);
#define BLOCK_COUNT TEST_DEFINE(BLOCK_COUNT_i)
#define CACHE_SIZE TEST_DEFINE(CACHE_SIZE_i)
#define LOOKAHEAD_SIZE TEST_DEFINE(LOOKAHEAD_SIZE_i)
#define COMPACT_THRESH TEST_DEFINE(COMPACT_THRESH_i)
#define BLOCK_CYCLES TEST_DEFINE(BLOCK_CYCLES_i)
#define ERASE_VALUE TEST_DEFINE(ERASE_VALUE_i)
#define ERASE_CYCLES TEST_DEFINE(ERASE_CYCLES_i)
Expand All @@ -119,6 +121,7 @@ intmax_t test_define(size_t define);
TEST_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1)) \
TEST_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
TEST_DEF(LOOKAHEAD_SIZE, 16) \
TEST_DEF(COMPACT_THRESH, 0) \
TEST_DEF(BLOCK_CYCLES, -1) \
TEST_DEF(ERASE_VALUE, 0xff) \
TEST_DEF(ERASE_CYCLES, 0) \
Expand All @@ -127,7 +130,7 @@ intmax_t test_define(size_t define);
TEST_DEF(DISK_VERSION, 0)

#define TEST_GEOMETRY_DEFINE_COUNT 4
#define TEST_IMPLICIT_DEFINE_COUNT 14
#define TEST_IMPLICIT_DEFINE_COUNT 15


#endif
2 changes: 2 additions & 0 deletions tests/test_alloc.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ if = 'BLOCK_CYCLES == -1'
defines.FILES = 3
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
defines.GC = [false, true]
defines.COMPACT_THRESH = ['-1', '0', 'BLOCK_SIZE/2']
code = '''
const char *names[] = {"bacon", "eggs", "pancakes"};
lfs_file_t files[FILES];
Expand Down Expand Up @@ -60,6 +61,7 @@ code = '''
defines.FILES = 3
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
defines.GC = [false, true]
defines.COMPACT_THRESH = ['-1', '0', 'BLOCK_SIZE/2']
code = '''
const char *names[] = {"bacon", "eggs", "pancakes"};

Expand Down

0 comments on commit b5cd957

Please sign in to comment.