Skip to content

Commit

Permalink
chore: resolve some error handle issue
Browse files Browse the repository at this point in the history
  • Loading branch information
quake committed Apr 11, 2019
1 parent 4c06dca commit a2e8b45
Show file tree
Hide file tree
Showing 8 changed files with 114 additions and 119 deletions.
22 changes: 11 additions & 11 deletions chain/src/chain.rs
Expand Up @@ -172,7 +172,6 @@ impl<CI: ChainIndex + 'static> ChainService<CI> {
Ok(())
}

#[allow(clippy::op_ref)]
pub(crate) fn insert_block(&self, block: Arc<Block>) -> Result<(), FailureError> {
let mut new_best_block = false;
let mut total_difficulty = U256::zero();
Expand Down Expand Up @@ -208,8 +207,8 @@ impl<CI: ChainIndex + 'static> ChainService<CI> {
txs_verified: None,
};

let mut batch = self.shared.store().new_batch();
batch.insert_block(&block);
let mut batch = self.shared.store().new_batch()?;
batch.insert_block(&block)?;
if (cannon_total_difficulty > current_total_difficulty)
|| ((current_total_difficulty == cannon_total_difficulty)
&& (block.header().hash() < tip_hash))
Expand All @@ -223,16 +222,16 @@ impl<CI: ChainIndex + 'static> ChainService<CI> {

self.find_fork(&mut fork, tip_number, &block, ext);
cell_set_diff = self.reconcile_main_chain(&mut batch, &mut fork, &mut chain_state)?;
self.update_index(&mut batch, &fork.detached_blocks, &fork.attached_blocks);
self.update_index(&mut batch, &fork.detached_blocks, &fork.attached_blocks)?;
self.update_proposal_ids(&mut chain_state, &fork);
batch.insert_tip_header(&block.header());
batch.insert_tip_header(&block.header())?;
new_best_block = true;

total_difficulty = cannon_total_difficulty;
} else {
batch.insert_block_ext(&block.header().hash(), &ext);
batch.insert_block_ext(&block.header().hash(), &ext)?;
}
batch.commit();
batch.commit()?;

if new_best_block {
let tip_header = block.header().clone();
Expand Down Expand Up @@ -266,14 +265,15 @@ impl<CI: ChainIndex + 'static> ChainService<CI> {
batch: &mut StoreBatch,
detached_blocks: &[Block],
attached_blocks: &[Block],
) {
) -> Result<(), FailureError> {
for block in detached_blocks {
batch.detach_block(block);
batch.detach_block(block)?;
}

for block in attached_blocks {
batch.attach_block(block);
batch.attach_block(block)?;
}
Ok(())
}

fn alignment_fork(
Expand Down Expand Up @@ -465,7 +465,7 @@ impl<CI: ChainIndex + 'static> ChainService<CI> {
.zip(fork.attached_blocks().iter())
.rev()
{
batch.insert_block_ext(&b.header().hash(), ext);
batch.insert_block_ext(&b.header().hash(), ext)?;
}

if let Some(err) = found_error {
Expand Down
3 changes: 1 addition & 2 deletions db/src/lib.rs
Expand Up @@ -18,11 +18,10 @@ pub use crate::memorydb::MemoryKeyValueDB;
pub use crate::rocksdb::RocksDB;

pub type Col = u32;
pub type Error = ErrorKind;
pub type Result<T> = result::Result<T, Error>;

#[derive(Clone, Debug, PartialEq, Eq, Fail)]
pub enum ErrorKind {
pub enum Error {
#[fail(display = "DBError {}", _0)]
DBError(String),
}
Expand Down
6 changes: 3 additions & 3 deletions db/src/memorydb.rs
@@ -1,5 +1,5 @@
// for unit test
use crate::{Col, DbBatch, ErrorKind, KeyValueDB, Result};
use crate::{Col, DbBatch, Error, KeyValueDB, Result};
use ckb_util::RwLock;
use fnv::FnvHashMap;
use std::ops::Range;
Expand Down Expand Up @@ -33,7 +33,7 @@ impl KeyValueDB for MemoryKeyValueDB {
let db = self.db.read();

match db.get(&col) {
None => Err(ErrorKind::DBError(format!("column {:?} not found ", col))),
None => Err(Error::DBError(format!("column {} not found ", col))),
Some(map) => Ok(map.get(key).cloned()),
}
}
Expand All @@ -42,7 +42,7 @@ impl KeyValueDB for MemoryKeyValueDB {
let db = self.db.read();

match db.get(&col) {
None => Err(ErrorKind::DBError(format!("column {:?} not found ", col))),
None => Err(Error::DBError(format!("column {} not found ", col))),
Some(map) => Ok(map
.get(key)
.and_then(|data| data.get(range.start..range.end))
Expand Down
31 changes: 12 additions & 19 deletions db/src/rocksdb.rs
@@ -1,6 +1,6 @@
use crate::{Col, DBConfig, DbBatch, Error, ErrorKind, KeyValueDB, Result};
use crate::{Col, DBConfig, DbBatch, Error, KeyValueDB, Result};
use log::warn;
use rocksdb::{Error as RdbError, Options, WriteBatch, DB};
use rocksdb::{ColumnFamily, Error as RdbError, Options, WriteBatch, DB};
use std::ops::Range;
use std::sync::Arc;

Expand Down Expand Up @@ -47,25 +47,24 @@ impl RocksDB {
}
}

fn cf_handle(db: &DB, col: Col) -> Result<ColumnFamily> {
db.cf_handle(&col.to_string())
.ok_or_else(|| Error::DBError(format!("column {} not found", col)))
}

impl KeyValueDB for RocksDB {
type Batch = RocksdbBatch;

fn read(&self, col: Col, key: &[u8]) -> Result<Option<Vec<u8>>> {
let cf = self
.inner
.cf_handle(&col.to_string())
.expect("column not found");
let cf = cf_handle(&self.inner, col)?;
self.inner
.get_cf(cf, &key)
.map(|v| v.map(|vi| vi.to_vec()))
.map_err(Into::into)
}

fn partial_read(&self, col: Col, key: &[u8], range: &Range<usize>) -> Result<Option<Vec<u8>>> {
let cf = self
.inner
.cf_handle(&col.to_string())
.expect("column not found");
let cf = cf_handle(&self.inner, col)?;
self.inner
.get_pinned_cf(cf, &key)
.map(|v| v.and_then(|vi| vi.get(range.start..range.end).map(|slice| slice.to_vec())))
Expand All @@ -87,19 +86,13 @@ pub struct RocksdbBatch {

impl DbBatch for RocksdbBatch {
fn insert(&mut self, col: Col, key: &[u8], value: &[u8]) -> Result<()> {
let cf = self
.db
.cf_handle(&col.to_string())
.expect("column not found");
let cf = cf_handle(&self.db, col)?;
self.wb.put_cf(cf, key, value)?;
Ok(())
}

fn delete(&mut self, col: Col, key: &[u8]) -> Result<()> {
let cf = self
.db
.cf_handle(&col.to_string())
.expect("column not found");
let cf = cf_handle(&self.db, col)?;
self.wb.delete_cf(cf, &key)?;
Ok(())
}
Expand All @@ -112,7 +105,7 @@ impl DbBatch for RocksdbBatch {

impl From<RdbError> for Error {
fn from(err: RdbError) -> Error {
ErrorKind::DBError(err.into())
Error::DBError(err.into())
}
}

Expand Down
20 changes: 10 additions & 10 deletions shared/src/index.rs
Expand Up @@ -5,14 +5,14 @@ use ckb_core::block::Block;
use ckb_core::extras::{BlockExt, TransactionAddress};
use ckb_core::header::{BlockNumber, Header};
use ckb_core::transaction::{Transaction, TransactionBuilder};
use ckb_db::KeyValueDB;
use ckb_db::{Error, KeyValueDB};
use numext_fixed_hash::H256;

const META_TIP_HEADER_KEY: &[u8] = b"TIP_HEADER";

// maintain chain index, extend chainstore
pub trait ChainIndex: ChainStore {
fn init(&self, genesis: &Block);
fn init(&self, genesis: &Block) -> Result<(), Error>;
fn get_block_hash(&self, number: BlockNumber) -> Option<H256>;
fn get_block_number(&self, hash: &H256) -> Option<BlockNumber>;
fn get_tip_header(&self) -> Option<Header>;
Expand All @@ -21,8 +21,8 @@ pub trait ChainIndex: ChainStore {
}

impl<T: KeyValueDB> ChainIndex for ChainKVStore<T> {
fn init(&self, genesis: &Block) {
let mut batch = self.new_batch();
fn init(&self, genesis: &Block) -> Result<(), Error> {
let mut batch = self.new_batch()?;
let genesis_hash = genesis.header().hash();
let ext = BlockExt {
received_at: genesis.header().timestamp(),
Expand All @@ -44,11 +44,11 @@ impl<T: KeyValueDB> ChainIndex for ChainKVStore<T> {
cells.push((ins, outs));
}

batch.insert_block(genesis);
batch.insert_block_ext(&genesis_hash, &ext);
batch.insert_tip_header(&genesis.header());
batch.attach_block(genesis);
batch.commit();
batch.insert_block(genesis)?;
batch.insert_block_ext(&genesis_hash, &ext)?;
batch.insert_tip_header(&genesis.header())?;
batch.attach_block(genesis)?;
batch.commit()
}

fn get_block_hash(&self, number: BlockNumber) -> Option<H256> {
Expand Down Expand Up @@ -110,7 +110,7 @@ mod tests {
let consensus = Consensus::default();
let block = consensus.genesis_block();
let hash = block.header().hash();
store.init(&block);
store.init(&block).unwrap();
assert_eq!(&hash, &store.get_block_hash(0).unwrap());

assert_eq!(
Expand Down
4 changes: 3 additions & 1 deletion shared/src/shared.rs
Expand Up @@ -55,7 +55,9 @@ impl<CI: ChainIndex> Shared<CI> {
match store.get_tip_header() {
Some(h) => h,
None => {
store.init(&genesis);
store
.init(&genesis)
.expect("init genesis block should be ok");
genesis.header().clone()
}
}
Expand Down

0 comments on commit a2e8b45

Please sign in to comment.