diff --git a/db/src/lib.rs b/db/src/lib.rs index 8ae74918fb..1b27e2ebb1 100644 --- a/db/src/lib.rs +++ b/db/src/lib.rs @@ -15,7 +15,7 @@ pub use crate::config::DBConfig; pub use crate::memorydb::MemoryKeyValueDB; pub use crate::rocksdb::RocksDB; -pub type Col = Option; +pub type Col = u32; pub type Error = ErrorKind; pub type Result = result::Result; diff --git a/db/src/memorydb.rs b/db/src/memorydb.rs index 7ea96c7477..bdc50df646 100644 --- a/db/src/memorydb.rs +++ b/db/src/memorydb.rs @@ -17,9 +17,8 @@ pub struct MemoryKeyValueDB { impl MemoryKeyValueDB { pub fn open(cols: usize) -> MemoryKeyValueDB { let mut table = FnvHashMap::with_capacity_and_hasher(cols, Default::default()); - table.insert(None, FnvHashMap::default()); for idx in 0..cols { - table.insert(Some(idx as u32), FnvHashMap::default()); + table.insert(idx as u32, FnvHashMap::default()); } MemoryKeyValueDB { db: Arc::new(RwLock::new(table)), @@ -120,38 +119,38 @@ mod tests { fn write_and_read() { let db = MemoryKeyValueDB::open(2); let mut batch = db.batch().unwrap(); - batch.insert(None, &[0, 0], &[0, 0, 0]).unwrap(); - batch.insert(Some(1), &[1, 1], &[1, 1, 1]).unwrap(); + batch.insert(0, &[0, 0], &[0, 0, 0]).unwrap(); + batch.insert(1, &[1, 1], &[1, 1, 1]).unwrap(); batch.commit().unwrap(); - assert_eq!(Some(vec![0, 0, 0]), db.read(None, &[0, 0]).unwrap()); - assert_eq!(None, db.read(None, &[1, 1]).unwrap()); + assert_eq!(Some(vec![0, 0, 0]), db.read(0, &[0, 0]).unwrap()); + assert_eq!(None, db.read(0, &[1, 1]).unwrap()); - assert_eq!(None, db.read(Some(1), &[0, 0]).unwrap()); - assert_eq!(Some(vec![1, 1, 1]), db.read(Some(1), &[1, 1]).unwrap()); + assert_eq!(None, db.read(1, &[0, 0]).unwrap()); + assert_eq!(Some(vec![1, 1, 1]), db.read(1, &[1, 1]).unwrap()); } #[test] fn write_and_partial_read() { let db = MemoryKeyValueDB::open(2); let mut batch = db.batch().unwrap(); - batch.insert(None, &[0, 0], &[5, 4, 3, 2]).unwrap(); - batch.insert(Some(1), &[1, 1], &[1, 2, 3, 4, 5]).unwrap(); + batch.insert(0, &[0, 0], &[5, 4, 3, 2]).unwrap(); + batch.insert(1, &[1, 1], &[1, 2, 3, 4, 5]).unwrap(); batch.commit().unwrap(); assert_eq!( Some(vec![2, 3, 4]), - db.partial_read(Some(1), &[1, 1], &(1..4)).unwrap() + db.partial_read(1, &[1, 1], &(1..4)).unwrap() ); - assert_eq!(None, db.partial_read(Some(1), &[0, 0], &(1..4)).unwrap()); + assert_eq!(None, db.partial_read(1, &[0, 0], &(1..4)).unwrap()); // return None when invalid range is passed - assert_eq!(None, db.partial_read(Some(1), &[1, 1], &(2..8)).unwrap()); + assert_eq!(None, db.partial_read(1, &[1, 1], &(2..8)).unwrap()); // range must be increasing - assert_eq!(None, db.partial_read(Some(1), &[1, 1], &(3..0)).unwrap()); + assert_eq!(None, db.partial_read(1, &[1, 1], &(3..0)).unwrap()); assert_eq!( Some(vec![4, 3, 2]), - db.partial_read(None, &[0, 0], &(1..4)).unwrap() + db.partial_read(0, &[0, 0], &(1..4)).unwrap() ); } } diff --git a/db/src/rocksdb.rs b/db/src/rocksdb.rs index 0f57131dcc..2cd8f40fcf 100644 --- a/db/src/rocksdb.rs +++ b/db/src/rocksdb.rs @@ -51,33 +51,25 @@ impl KeyValueDB for RocksDB { type Batch = RocksdbBatch; fn read(&self, col: Col, key: &[u8]) -> Result>> { - match col { - Some(col) => { - let cf = self - .inner - .cf_handle(&col.to_string()) - .expect("column not found"); - self.inner.get_cf(cf, &key) - } - None => self.inner.get(&key), - } - .map(|v| v.map(|vi| vi.to_vec())) - .map_err(Into::into) + let cf = self + .inner + .cf_handle(&col.to_string()) + .expect("column not found"); + self.inner + .get_cf(cf, &key) + .map(|v| v.map(|vi| vi.to_vec())) + .map_err(Into::into) } fn partial_read(&self, col: Col, key: &[u8], range: &Range) -> Result>> { - match col { - Some(col) => { - let cf = self - .inner - .cf_handle(&col.to_string()) - .expect("column not found"); - self.inner.get_pinned_cf(cf, &key) - } - None => self.inner.get_pinned(&key), - } - .map(|v| v.and_then(|vi| vi.get(range.start..range.end).map(|slice| slice.to_vec()))) - .map_err(Into::into) + let cf = self + .inner + .cf_handle(&col.to_string()) + .expect("column not found"); + self.inner + .get_pinned_cf(cf, &key) + .map(|v| v.and_then(|vi| vi.get(range.start..range.end).map(|slice| slice.to_vec()))) + .map_err(Into::into) } fn batch(&self) -> Result { @@ -95,30 +87,20 @@ pub struct RocksdbBatch { impl DbBatch for RocksdbBatch { fn insert(&mut self, col: Col, key: &[u8], value: &[u8]) -> Result<()> { - match col { - Some(col) => { - let cf = self - .db - .cf_handle(&col.to_string()) - .expect("column not found"); - self.wb.put_cf(cf, key, value)? - } - None => self.wb.put(key, value)?, - } + let cf = self + .db + .cf_handle(&col.to_string()) + .expect("column not found"); + self.wb.put_cf(cf, key, value)?; Ok(()) } fn delete(&mut self, col: Col, key: &[u8]) -> Result<()> { - match col { - Some(col) => { - let cf = self - .db - .cf_handle(&col.to_string()) - .expect("column not found"); - self.wb.delete_cf(cf, &key)? - } - None => self.wb.delete(key)?, - } + let cf = self + .db + .cf_handle(&col.to_string()) + .expect("column not found"); + self.wb.delete_cf(cf, &key)?; Ok(()) } @@ -190,15 +172,15 @@ mod tests { let db = setup_db("write_and_read", 2); let mut batch = db.batch().unwrap(); - batch.insert(None, &[0, 0], &[0, 0, 0]).unwrap(); - batch.insert(Some(1), &[1, 1], &[1, 1, 1]).unwrap(); + batch.insert(0, &[0, 0], &[0, 0, 0]).unwrap(); + batch.insert(1, &[1, 1], &[1, 1, 1]).unwrap(); batch.commit().unwrap(); - assert_eq!(Some(vec![0, 0, 0]), db.read(None, &[0, 0]).unwrap()); - assert_eq!(None, db.read(None, &[1, 1]).unwrap()); + assert_eq!(Some(vec![0, 0, 0]), db.read(0, &[0, 0]).unwrap()); + assert_eq!(None, db.read(0, &[1, 1]).unwrap()); - assert_eq!(None, db.read(Some(1), &[0, 0]).unwrap()); - assert_eq!(Some(vec![1, 1, 1]), db.read(Some(1), &[1, 1]).unwrap()); + assert_eq!(None, db.read(1, &[0, 0]).unwrap()); + assert_eq!(Some(vec![1, 1, 1]), db.read(1, &[1, 1]).unwrap()); } #[test] @@ -206,23 +188,23 @@ mod tests { let db = setup_db("write_and_partial_read", 2); let mut batch = db.batch().unwrap(); - batch.insert(None, &[0, 0], &[5, 4, 3, 2]).unwrap(); - batch.insert(Some(1), &[1, 1], &[1, 2, 3, 4, 5]).unwrap(); + batch.insert(0, &[0, 0], &[5, 4, 3, 2]).unwrap(); + batch.insert(1, &[1, 1], &[1, 2, 3, 4, 5]).unwrap(); batch.commit().unwrap(); assert_eq!( Some(vec![2, 3, 4]), - db.partial_read(Some(1), &[1, 1], &(1..4)).unwrap() + db.partial_read(1, &[1, 1], &(1..4)).unwrap() ); - assert_eq!(None, db.partial_read(Some(1), &[0, 0], &(1..4)).unwrap()); + assert_eq!(None, db.partial_read(1, &[0, 0], &(1..4)).unwrap()); // return None when invalid range is passed - assert_eq!(None, db.partial_read(Some(1), &[1, 1], &(2..8)).unwrap()); + assert_eq!(None, db.partial_read(1, &[1, 1], &(2..8)).unwrap()); // range must be increasing - assert_eq!(None, db.partial_read(Some(1), &[1, 1], &(3..0)).unwrap()); + assert_eq!(None, db.partial_read(1, &[1, 1], &(3..0)).unwrap()); assert_eq!( Some(vec![4, 3, 2]), - db.partial_read(None, &[0, 0], &(1..4)).unwrap() + db.partial_read(0, &[0, 0], &(1..4)).unwrap() ); } } diff --git a/shared/src/cachedb.rs b/shared/src/cachedb.rs index 2f3f31f57a..df08645149 100644 --- a/shared/src/cachedb.rs +++ b/shared/src/cachedb.rs @@ -22,7 +22,7 @@ where pub fn new(db: T, cols: &[CacheCols]) -> Self { let mut table = FnvHashMap::with_capacity_and_hasher(cols.len(), Default::default()); for (idx, capacity) in cols { - table.insert(Some(*idx), LruCache::new(*capacity)); + table.insert(*idx, LruCache::new(*capacity)); } CacheDB { db, diff --git a/shared/src/lib.rs b/shared/src/lib.rs index 7ab423118e..96b377d1f4 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -24,12 +24,12 @@ mod tests; use ckb_db::Col; pub const COLUMNS: u32 = 9; -pub const COLUMN_INDEX: Col = Some(0); -pub const COLUMN_BLOCK_HEADER: Col = Some(1); -pub const COLUMN_BLOCK_BODY: Col = Some(2); -pub const COLUMN_BLOCK_UNCLE: Col = Some(3); -pub const COLUMN_META: Col = Some(4); -pub const COLUMN_TRANSACTION_ADDR: Col = Some(5); -pub const COLUMN_EXT: Col = Some(6); -pub const COLUMN_BLOCK_TRANSACTION_ADDRESSES: Col = Some(7); -pub const COLUMN_BLOCK_PROPOSAL_IDS: Col = Some(8); +pub const COLUMN_INDEX: Col = 0; +pub const COLUMN_BLOCK_HEADER: Col = 1; +pub const COLUMN_BLOCK_BODY: Col = 2; +pub const COLUMN_BLOCK_UNCLE: Col = 3; +pub const COLUMN_META: Col = 4; +pub const COLUMN_TRANSACTION_ADDR: Col = 5; +pub const COLUMN_EXT: Col = 6; +pub const COLUMN_BLOCK_TRANSACTION_ADDRESSES: Col = 7; +pub const COLUMN_BLOCK_PROPOSAL_IDS: Col = 8; diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 12f93b4342..acaf0e20f2 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -359,7 +359,7 @@ impl SharedBuilder> { pub fn db(mut self, config: &DBConfig) -> Self { self.db = Some(CacheDB::new( RocksDB::open(config, COLUMNS), - &[(COLUMN_BLOCK_HEADER.unwrap(), 4096)], + &[(COLUMN_BLOCK_HEADER, 4096)], )); self }