Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: correct some typos and grammatical problem #1578

Merged
merged 1 commit into from
Jun 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions api/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2299,7 +2299,7 @@ mod tests {
}

#[test]
fn test_get_confg() {
fn test_get_config() {
get_config("localdisk");
get_config("localfs");
get_config("oss");
Expand Down Expand Up @@ -2579,7 +2579,7 @@ mod tests {
}

#[test]
fn test_bckend_config_try_from() {
fn test_backend_config_try_from() {
let config = BackendConfig {
backend_type: "localdisk".to_string(),
backend_config: serde_json::to_value(LocalDiskConfig::default()).unwrap(),
Expand Down
4 changes: 2 additions & 2 deletions builder/src/compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ pub struct Config {
/// we compact blobs whose size are less than compact_blob_size
#[serde(default = "default_compact_blob_size")]
compact_blob_size: usize,
/// size of compacted blobs should not be large than max_compact_size
/// size of compacted blobs should not be larger than max_compact_size
#[serde(default = "default_max_compact_size")]
max_compact_size: usize,
/// if number of blobs >= layers_to_compact, do compact
Expand Down Expand Up @@ -642,7 +642,7 @@ impl BlobCompactor {
return Ok(None);
}

info!("compatctor: successfully compacted blob");
info!("compactor: successfully compacted blob");
// blobs have already been dumped, dump bootstrap only
let blob_table = compactor.new_blob_mgr.to_blob_table(&build_ctx)?;
bootstrap.build(&mut build_ctx, &mut bootstrap_ctx)?;
Expand Down
2 changes: 1 addition & 1 deletion builder/src/merge.rs
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ mod tests {
);
assert!(build_output.is_ok());
let build_output = build_output.unwrap();
println!("BuildOutpu: {}", build_output);
println!("BuildOutput: {}", build_output);
assert_eq!(build_output.blob_size, Some(16));
}
}
2 changes: 1 addition & 1 deletion rafs/src/blobfs/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ impl BlobFs {
fn load_bootstrap(cfg: &Config) -> io::Result<BlobfsState> {
let blob_ondemand_conf = BlobOndemandConfig::from_str(&cfg.blob_ondemand_cfg)?;
if !blob_ondemand_conf.rafs_conf.validate() {
return Err(einval!("blobfs: invlidate configuration for blobfs"));
return Err(einval!("blobfs: invalidate configuration for blobfs"));
}
let rafs_cfg = blob_ondemand_conf.rafs_conf.get_rafs_config()?;
if rafs_cfg.mode != "direct" {
Expand Down
2 changes: 1 addition & 1 deletion rafs/src/fs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ impl Rafs {
}

// Perform different policy for v5 format and v6 format as rafs v6's blobs are capable to
// to download chunks and decompress them all by themselves. For rafs v6, directly perform
// download chunks and decompress them all by themselves. For rafs v6, directly perform
// chunk based full prefetch
if !ignore_prefetch_all && (inlay_prefetch_all || prefetch_all || startup_prefetch_all) {
if sb.meta.is_v6() {
Expand Down
2 changes: 1 addition & 1 deletion rafs/src/metadata/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -792,7 +792,7 @@ impl RafsSuper {
// Backward compatibility: try to fix blob id for old converters.
// Old converters extracts bootstraps from data blobs with inlined bootstrap
// use blob digest as the bootstrap file name. The last blob in the blob table from
// the bootstrap has wrong blod id, so we need to fix it.
// the bootstrap has wrong blob id, so we need to fix it.
let blobs = rs.superblock.get_blob_infos();
for blob in blobs.iter() {
// Fix blob id for new images with old converters.
Expand Down
2 changes: 1 addition & 1 deletion service/src/block_nbd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ impl NbdService {
})
}

/// Create a [NbdWoker] to run the event loop to handle NBD requests from kernel.
/// Create a [NbdWorker] to run the event loop to handle NBD requests from kernel.
pub fn create_worker(&self) -> Result<NbdWorker> {
// Let the NBD driver go.
let (sock1, sock2) = std::os::unix::net::UnixStream::pair()?;
Expand Down
2 changes: 1 addition & 1 deletion service/src/fs_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ pub trait FsService: Send + Sync {
/// Validate prefetch file list from user input.
///
/// Validation rules:
/// - an item may be file or directroy.
/// - an item may be file or directory.
/// - items must be separated by space, such as "<path1> <path2> <path3>".
/// - each item must be absolute path, such as "/foo1/bar1 /foo2/bar2".
fn validate_prefetch_file_list(input: &Option<Vec<String>>) -> Result<Option<Vec<PathBuf>>> {
Expand Down
2 changes: 1 addition & 1 deletion src/bin/nydus-image/inspect.rs
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ impl RafsInspector {
}

// Walk through children inodes to find the file
// Print its basic information and all chunk infomation
// Print its basic information and all chunk information
let dir_inode = self.rafs_meta.get_extended_inode(self.cur_dir_ino, false)?;
dir_inode.walk_children_inodes(0, &mut |_inode, child_name, child_ino, _offset| {
if child_name == file_name {
Expand Down
4 changes: 2 additions & 2 deletions src/bin/nydus-image/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -832,9 +832,9 @@ impl Command {
let chunk_size = Self::get_chunk_size(matches, conversion_type)?;
let batch_size = Self::get_batch_size(matches, version, conversion_type, chunk_size)?;
let blob_cache_storage = Self::get_blob_cache_storage(matches, conversion_type)?;
// blob-cacher-dir and blob-dir/blob are a set of mutually exclusive functions,
// blob-cache-dir and blob-dir/blob are a set of mutually exclusive functions,
// the former is used to generate blob cache, nydusd is directly started through blob cache,
// the latter is to generate nydus blob, as nydud backend to start
// the latter is to generate nydus blob, as nydusd backend to start
let blob_storage = if blob_cache_storage.is_some() {
None
} else {
Expand Down
2 changes: 1 addition & 1 deletion src/bin/nydusd/api_server_glue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ impl ApiServer {
}

/// External supervisor wants this instance to fetch `/dev/fuse` fd. Before
/// invoking this method, supervisor should already listens on a Unix socket and
/// invoking this method, supervisor should already listen on a Unix socket and
/// waits for connection from this instance. Then supervisor should send the *fd*
/// back. Note, the http response does not mean this process already finishes Takeover
/// procedure. Supervisor has to continuously query the state of Nydusd until it gets
Expand Down
2 changes: 1 addition & 1 deletion src/logger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ pub fn setup_logging(
})?;
spec = spec.basename(basename);

// `flexi_logger` automatically add `.log` suffix if the file name has not extension.
// `flexi_logger` automatically add `.log` suffix if the file name has no extension.
if let Some(suffix) = path.extension() {
let suffix = suffix.to_str().ok_or_else(|| {
eprintln!("invalid file extension {:?}", suffix);
Expand Down
2 changes: 1 addition & 1 deletion storage/src/backend/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ pub trait BlobBackend: Send + Sync {
/// Get metrics object.
fn metrics(&self) -> &BackendMetrics;

/// Get a blob reader object to access blod `blob_id`.
/// Get a blob reader object to access blob `blob_id`.
fn get_reader(&self, blob_id: &str) -> BackendResult<Arc<dyn BlobReader>>;
}

Expand Down
6 changes: 3 additions & 3 deletions storage/src/backend/registry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ impl RegistryState {
form.insert("scope".to_string(), auth.scope.clone());
form.insert("grant_type".to_string(), "password".to_string());
form.insert("username".to_string(), self.username.clone());
form.insert("passward".to_string(), self.password.clone());
form.insert("password".to_string(), self.password.clone());
form.insert("client_id".to_string(), REGISTRY_CLIENT_ID.to_string());

let token_resp = connection
Expand Down Expand Up @@ -1089,15 +1089,15 @@ mod tests {
assert_eq!(&auth.service, "my-registry.com");
assert_eq!(&auth.scope, "repository:test/repo:pull,push");
}
_ => panic!("failed to pase `Bearer` authentication header"),
_ => panic!("failed to parse `Bearer` authentication header"),
}

let str = "Basic realm=\"https://auth.my-registry.com/token\"";
let header = HeaderValue::from_str(str).unwrap();
let auth = RegistryState::parse_auth(&header).unwrap();
match auth {
Auth::Basic(auth) => assert_eq!(&auth.realm, "https://auth.my-registry.com/token"),
_ => panic!("failed to pase `Bearer` authentication header"),
_ => panic!("failed to parse `Bearer` authentication header"),
}

let str = "Base realm=\"https://auth.my-registry.com/token\"";
Expand Down
2 changes: 1 addition & 1 deletion storage/src/cache/dummycache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ pub struct DummyCacheMgr {
}

impl DummyCacheMgr {
/// Create a new instance of `DummmyCacheMgr`.
/// Create a new instance of `DummyCacheMgr`.
pub fn new(
config: &CacheConfigV2,
backend: Arc<dyn BlobBackend>,
Expand Down
2 changes: 1 addition & 1 deletion storage/src/cache/state/digested_chunk_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
//!
//! This module provides a chunk state tracking driver for legacy Rafs images without chunk array,
//! which uses chunk digest as id to track chunk readiness state. The [DigestedChunkMap] is not
//! optimal in case of performance and memory consumption. So it is only used only to keep backward
//! optimal in case of performance and memory consumption. So it is only used to keep backward
/// compatibility with the old nydus image format.
use std::collections::HashSet;
use std::io::Result;
Expand Down
2 changes: 1 addition & 1 deletion storage/src/cache/state/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ pub trait ChunkMap: Any + Send + Sync {
///
/// The function returns:
/// - `Err(Timeout)` waiting for inflight backend IO timeouts.
/// - `Ok(true)` if the the chunk is ready.
/// - `Ok(true)` if the chunk is ready.
/// - `Ok(false)` marks the chunk as pending, either set_ready_and_clear_pending() or
/// clear_pending() must be called to clear the pending state.
fn check_ready_and_mark_pending(&self, _chunk: &dyn BlobChunkInfo) -> StorageResult<bool> {
Expand Down
4 changes: 2 additions & 2 deletions storage/src/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,7 @@ pub struct BlobIoVec {
bi_blob: Arc<BlobInfo>,
/// Total size of blob IOs to be performed.
bi_size: u64,
/// Array of blob IOs, these IOs should executed sequentially.
/// Array of blob IOs, these IOs should be executed sequentially.
pub(crate) bi_vec: Vec<BlobIoDesc>,
}

Expand Down Expand Up @@ -829,7 +829,7 @@ impl BlobIoVec {
self.bi_vec.len()
}

/// Check whether there's 'BlobIoDesc' in the'BlobIoVec'.
/// Check whether there's 'BlobIoDesc' in the 'BlobIoVec'.
pub fn is_empty(&self) -> bool {
self.bi_vec.is_empty()
}
Expand Down
2 changes: 1 addition & 1 deletion storage/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
//! storage containing data chunks. Data chunks may be compressed, encrypted and deduplicated by
//! content digest value. When Rafs file is used for container images, Rafs metadata blob contains
//! all filesystem metadatas, such as directory, file name, permission etc. Actually file contents
//! are split into chunks and stored into data blobs. Rafs may built one data blob for each
//! are split into chunks and stored into data blobs. Rafs may build one data blob for each
//! container image layer or build a single data blob for the whole image, according to building
//! options.
//!
Expand Down
2 changes: 1 addition & 1 deletion storage/src/meta/batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ impl BatchContextGenerator {
self.chunk_data_buf.is_empty()
}

/// Get the lenth of chunk data buffer.
/// Get the length of chunk data buffer.
pub fn chunk_data_buf_len(&self) -> usize {
self.chunk_data_buf.len()
}
Expand Down
8 changes: 4 additions & 4 deletions storage/src/meta/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ const BLOB_TOC_FILE_SUFFIX: &str = "blob.toc";
/// and can be used as marker to locate the compression context table. All fields of compression
/// context table header should be encoded in little-endian format.
///
/// The compression context table and header are arranged in the data blob as follow:
/// The compression context table and header are arranged in the data blob as follows:
///
/// `chunk data` | `compression context table` | `[ZRan context table | ZRan dictionary]` | `compression context table header`
#[repr(C)]
Expand Down Expand Up @@ -705,7 +705,7 @@ impl BlobCompressionContextInfo {
}

/// Get compressed size associated with the chunk at `chunk_index`.
/// Capabale of handling both batch and non-batch chunks.
/// Capable of handling both batch and non-batch chunks.
pub fn get_compressed_size(&self, chunk_index: u32) -> Result<u32> {
self.state.get_compressed_size(chunk_index as usize)
}
Expand Down Expand Up @@ -1012,7 +1012,7 @@ impl BlobCompressionContext {
}

/// Get compressed size associated with the chunk at `chunk_index`.
/// Capabale of handling both batch and non-batch chunks.
/// Capable of handling both batch and non-batch chunks.
pub fn get_compressed_size(&self, chunk_index: usize) -> Result<u32> {
if self.is_batch_chunk(chunk_index) {
let ctx = self
Expand Down Expand Up @@ -1379,7 +1379,7 @@ impl BlobMetaChunkArray {
// - `mid < size`: `mid` is limited by `[left; right)` bound.
let entry = &chunks[mid];
if compressed {
// Capabale of handling both batch and non-batch chunks.
// Capable of handling both batch and non-batch chunks.
let c_offset = entry.compressed_offset();
let c_size = state.get_compressed_size(mid)?;
(start, end) = (c_offset, c_offset + c_size as u64);
Expand Down
2 changes: 1 addition & 1 deletion storage/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: Apache-2.0

//! Utility helpers to supprt the storage subsystem.
//! Utility helpers to support the storage subsystem.
use std::alloc::{alloc, Layout};
use std::cmp::{self, min};
use std::io::{ErrorKind, IoSliceMut, Result};
Expand Down
4 changes: 2 additions & 2 deletions utils/src/compress/lz4_standard.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ mod tests {
#[test]
fn test_error_input() {
let mut big_buf = vec![0x0u8; u32::MAX as usize];
let mock_comperessed = vec![0x0u8; 32];
let mock_compressed = vec![0x0u8; 32];
assert!(lz4_compress(&big_buf).is_err());
assert!(lz4_decompress(&mock_comperessed, big_buf.as_mut_slice()).is_err());
assert!(lz4_decompress(&mock_compressed, big_buf.as_mut_slice()).is_err());
}
}
6 changes: 3 additions & 3 deletions utils/src/crypt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ impl Cipher {
.map_err(|e| eother!(format!("failed to encrypt data, {}", e)))
}
Cipher::Aes256Gcm(_cipher) => {
Err(einval!("Cipher::entrypt() doesn't support Aes256Gcm"))
Err(einval!("Cipher::encrypt() doesn't support Aes256Gcm"))
}
}
}
Expand All @@ -247,7 +247,7 @@ impl Cipher {
Cipher::Aes256Xts(cipher) => Self::cipher(*cipher, symm::Mode::Decrypt, key, iv, data)
.map_err(|e| eother!(format!("failed to decrypt data, {}", e))),
Cipher::Aes256Gcm(_cipher) => {
Err(einval!("Cipher::detrypt() doesn't support Aes256Gcm"))
Err(einval!("Cipher::decrypt() doesn't support Aes256Gcm"))
}
}?;

Expand Down Expand Up @@ -751,7 +751,7 @@ mod tests {
CipherContext::new(error_key.to_vec(), iv.to_vec(), true, Algorithm::Aes128Xts)
.is_err()
);
// create wtih symmetry key
// create with symmetry key
assert!(CipherContext::new(
symmetry_key.to_vec(),
iv.to_vec(),
Expand Down
2 changes: 1 addition & 1 deletion utils/src/digest.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ pub trait DigestHasher {
/// So we should avoid any unnecessary clone() operation. Add we prefer allocation on stack
/// instead of allocation on heap.
///
/// If allocating memory for blake3::Hahser is preferred over using the stack, please try:
/// If allocating memory for blake3::Hasher is preferred over using the stack, please try:
/// Blake3(Box<blake3::Hasher>). But be careful, this will cause one extra memory allocation/free
/// for each digest.
#[derive(Clone, Debug)]
Expand Down
2 changes: 1 addition & 1 deletion utils/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ mod tests {
}

#[test]
fn test_round_up_uszie() {
fn test_round_up_usize() {
assert_eq!(round_up_usize(10, 8), 16);
assert_eq!(round_up_usize(100, 8), 104);
assert_eq!(round_up_usize(1000, 8), 1000);
Expand Down
Loading