Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Morgan/upstream fix #1561

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions service/src/daemon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -422,13 +422,16 @@ impl DaemonController {
self.fs_service.lock().unwrap().clone()
}

/// Shutdown all services managed by the controller.
pub fn shutdown(&self) {
/// Notify controller shutdown
pub fn notify_shutdown(&self) {
// Marking exiting state.
self.active.store(false, Ordering::Release);
// Signal the `run_loop()` working thread to exit.
let _ = self.waker.wake();
}

/// Shutdown all services managed by the controller.
pub fn shutdown(&self) {
let daemon = self.daemon.lock().unwrap().take();
if let Some(d) = daemon {
if let Err(e) = d.trigger_stop() {
Expand Down
2 changes: 1 addition & 1 deletion smoke/tests/blobcache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func (a *BlobCacheTestSuite) TestGenerateBlobcache(t *testing.T) {
ctx.Binary.Builder, ctx.Env.BootstrapPath, blobcacheDir,
filepath.Join(ctx.Env.BlobDir, ociBlobDigest.Hex())))

a.compareTwoFiles(t, filepath.Join(blobcacheDir, fmt.Sprintf("%s.blob.data", ociBlobDigest.Hex())), filepath.Join(ctx.Env.CacheDir, fmt.Sprintf("%s.blob.data", ociBlobDigest.Hex())))
a.compareTwoFiles(t, filepath.Join(blobcacheDir, fmt.Sprintf("%s.blob.data", ociBlobDigest.Hex())), filepath.Join(ctx.Env.CacheDir, ociBlobDigest.Hex()))
a.compareTwoFiles(t, filepath.Join(blobcacheDir, fmt.Sprintf("%s.blob.meta", ociBlobDigest.Hex())), filepath.Join(ctx.Env.CacheDir, fmt.Sprintf("%s.blob.meta", ociBlobDigest.Hex())))
}

Expand Down
2 changes: 1 addition & 1 deletion src/bin/nydusd/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -720,7 +720,7 @@ mod nbd {
}

extern "C" fn sig_exit(_sig: std::os::raw::c_int) {
DAEMON_CONTROLLER.shutdown();
DAEMON_CONTROLLER.notify_shutdown();
}

fn main() -> Result<()> {
Expand Down
20 changes: 17 additions & 3 deletions storage/src/cache/cachedfile.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1662,6 +1662,20 @@ impl FileIoMergeState {
tag: BlobIoTag,
chunk: Option<Arc<dyn BlobChunkInfo>>,
) -> Result<()> {
// Make sure user io of same region continuous
if !self.regions.is_empty() && self.joinable(region_type) {
let region = &self.regions[self.regions.len() - 1];
if !region.seg.is_empty() && tag.is_user_io() {
if let BlobIoTag::User(ref seg) = tag {
if seg.offset as u64 + start
!= region.blob_address + region.seg.offset as u64 + region.seg.len as u64
{
self.commit();
}
}
}
}

if self.regions.is_empty() || !self.joinable(region_type) {
self.regions.push(Region::new(region_type));
self.last_region_joinable = true;
Expand Down Expand Up @@ -1793,7 +1807,7 @@ mod tests {

let tag = BlobIoTag::User(BlobIoSegment {
offset: 0x1800,
len: 0x1800,
len: 0x800,
});
state
.push(RegionType::CacheFast, 0x1000, 0x2000, tag, None)
Expand All @@ -1810,8 +1824,8 @@ mod tests {
assert_eq!(state.regions.len(), 1);

let tag = BlobIoTag::User(BlobIoSegment {
offset: 0x0000,
len: 0x2000,
offset: 0x0001,
len: 0x1fff,
});
state
.push(RegionType::CacheSlow, 0x5000, 0x2000, tag, None)
Expand Down
4 changes: 2 additions & 2 deletions storage/src/cache/filecache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ use crate::cache::{BlobCache, BlobCacheMgr};
use crate::device::{BlobFeatures, BlobInfo};

pub const BLOB_RAW_FILE_SUFFIX: &str = ".blob.raw";
pub const BLOB_DATA_FILE_SUFFIX: &str = ".blob.data";
pub const BLOB_DATA_FILE_SUFFIX: &str = "";

/// An implementation of [BlobCacheMgr](../trait.BlobCacheMgr.html) to improve performance by
/// caching uncompressed blob with local storage.
Expand Down Expand Up @@ -257,7 +257,7 @@ impl FileCacheEntry {
} else {
blob_info.uncompressed_size()
};
if file_size == 0 {
if file_size == 0 || file_size < cached_file_size {
file.set_len(cached_file_size)?;
} else if cached_file_size != 0 && file_size != cached_file_size {
let msg = format!(
Expand Down
51 changes: 24 additions & 27 deletions storage/src/cache/state/persist_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,36 +111,33 @@ impl PersistMap {
}

let header = filemap.get_mut::<Header>(0)?;
let mut not_ready_count = chunk_count;
if header.version >= 1 {
if header.magic2 != MAGIC2 {
return Err(einval!(format!(
"invalid blob chunk_map file header: {:?}",
filename
)));
if header.version >= 1 && header.magic2 != MAGIC2 {
return Err(einval!(format!(
"invalid blob chunk_map file header: {:?}",
filename
)));
}
let not_ready_count = if new_content {
chunk_count
} else if header.version >= 1 && header.all_ready == MAGIC_ALL_READY {
0
} else {
let mut ready_count = 0;
for idx in HEADER_SIZE..expected_size as usize {
let current = filemap.get_ref::<AtomicU8>(idx)?;
let val = current.load(Ordering::Acquire);
ready_count += val.count_ones() as u32;
}
if header.all_ready == MAGIC_ALL_READY {
not_ready_count = 0;
} else if new_content {
not_ready_count = chunk_count;
} else {
let mut ready_count = 0;
for idx in HEADER_SIZE..expected_size as usize {
let current = filemap.get_ref::<AtomicU8>(idx)?;
let val = current.load(Ordering::Acquire);
ready_count += val.count_ones() as u32;
}

if ready_count >= chunk_count {
let header = filemap.get_mut::<Header>(0)?;
header.all_ready = MAGIC_ALL_READY;
let _ = file.sync_all();
not_ready_count = 0;
} else {
not_ready_count = chunk_count - ready_count;
}
if ready_count >= chunk_count {
let header = filemap.get_mut::<Header>(0)?;
header.all_ready = MAGIC_ALL_READY;
let _ = file.sync_all();
0
} else {
chunk_count - ready_count
}
}
};

readahead(file.as_raw_fd(), 0, expected_size);
if !persist {
Expand Down
Loading