Skip to content

Commit

Permalink
backup previous implementation of sqlite storage
Browse files Browse the repository at this point in the history
  • Loading branch information
eugene-babichenko committed Dec 9, 2019
1 parent 0817a85 commit 7ee68d2
Show file tree
Hide file tree
Showing 3 changed files with 295 additions and 0 deletions.
1 change: 1 addition & 0 deletions Cargo.toml
Expand Up @@ -7,6 +7,7 @@ members = [
"chain-crypto",
"chain-storage",
"chain-storage-sqlite",
"chain-storage-sqlite-old",
"chain-impl-mockchain",
"cardano-legacy-address",
"network-core",
Expand Down
25 changes: 25 additions & 0 deletions chain-storage-sqlite-old/Cargo.toml
@@ -0,0 +1,25 @@
[package]
name = "chain-storage-sqlite"
version = "0.1.0"
authors = [ "Nicolas Di Prima <nicolas.diprima@iohk.io>"
, "Vincent Hanquez <vincent.hanquez@iohk.io>"
, "Eelco Dolstra <edolstra@gmail.com>"
, "Mikhail Zabaluev <mikhail.zabaluev@gmail.com>"
, "Alexander Vershilov <alexander.vershilov@gmail.com>"
]
edition = "2018"
license = "MIT OR Apache-2.0"

[dependencies]
chain-core = { path = "../chain-core" }
chain-storage = { path = "../chain-storage" }
r2d2 = { version = "0.8" }
r2d2_sqlite = { version = "0.8" }

[dependencies.rusqlite]
version = "0.16.0"
features = ["bundled"]

[dev-dependencies]
chain-storage = { path = "../chain-storage", features=["test-api"] }
rand_os = "0.1"
269 changes: 269 additions & 0 deletions chain-storage-sqlite-old/src/lib.rs
@@ -0,0 +1,269 @@
use chain_core::property::{Block, BlockId, Serialize};
use chain_storage::{
error::Error,
store::{BackLink, BlockInfo, BlockStore},
};
use rusqlite::types::Value;
use std::path::Path;

pub struct SQLiteBlockStore<B>
where
B: Block,
{
pool: r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>,
dummy: std::marker::PhantomData<B>,
}

impl<B> SQLiteBlockStore<B>
where
B: Block,
{
pub fn new<P: AsRef<Path>>(path: P) -> Self {
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
let pool = r2d2::Pool::new(manager).unwrap();

let connection = pool.get().unwrap();

connection
.execute_batch(
r#"
begin;
create table if not exists BlockInfo (
hash blob primary key,
depth integer not null,
parent blob not null,
fast_distance blob,
fast_hash blob,
foreign key(hash) references Blocks(hash)
);
create table if not exists Blocks (
hash blob primary key,
block blob not null
);
create table if not exists Tags (
name text primary key,
hash blob not null,
foreign key(hash) references BlockInfo(hash)
);
commit;
"#,
)
.unwrap();

/*
connection
.execute("pragma synchronous = off", rusqlite::NO_PARAMS)
.unwrap();
*/

connection
.execute_batch("pragma journal_mode = WAL")
.unwrap();

SQLiteBlockStore {
pool,
dummy: std::marker::PhantomData,
}
}
}

fn blob_to_hash<Id: BlockId>(blob: Vec<u8>) -> Id {
Id::deserialize(&blob[..]).unwrap()
}

impl<B> BlockStore for SQLiteBlockStore<B>
where
B: Block,
{
type Block = B;

fn put_block_internal(&mut self, block: &B, block_info: BlockInfo<B::Id>) -> Result<(), Error> {
let mut conn = self
.pool
.get()
.map_err(|err| Error::BackendError(Box::new(err)))?;

let tx = conn
.transaction()
.map_err(|err| Error::BackendError(Box::new(err)))?;

let worked = tx
.prepare_cached("insert into Blocks (hash, block) values(?, ?)")
.map_err(|err| Error::BackendError(Box::new(err)))?
.execute(&[
&block_info.block_hash.serialize_as_vec().unwrap()[..],
&block.serialize_as_vec().unwrap()[..],
])
.map(|_| true)
.or_else(|err| match err {
rusqlite::Error::SqliteFailure(error, _) => {
if error.code == rusqlite::ErrorCode::ConstraintViolation {
Ok(false)
} else {
Err(err)
}
}
_ => Err(err),
})
.map_err(|err| Error::BackendError(Box::new(err)))?;
if !worked {
return Err(Error::BlockAlreadyPresent);
}

let parent = block_info
.back_links
.iter()
.find(|x| x.distance == 1)
.unwrap();

let (fast_distance, fast_hash) =
match block_info.back_links.iter().find(|x| x.distance != 1) {
Some(fast_link) => (
Value::Integer(fast_link.distance as i64),
Value::Blob(fast_link.block_hash.serialize_as_vec().unwrap()),
),
None => (Value::Null, Value::Null),
};

tx
.prepare_cached("insert into BlockInfo (hash, depth, parent, fast_distance, fast_hash) values(?, ?, ?, ?, ?)")
.map_err(|err| Error::BackendError(Box::new(err)))?
.execute(&[
Value::Blob(block_info.block_hash.serialize_as_vec().unwrap()),
Value::Integer(block_info.depth as i64),
Value::Blob(parent.block_hash.serialize_as_vec().unwrap()),
fast_distance,
fast_hash,
])
.map_err(|err| Error::BackendError(Box::new(err)))?;

tx.commit()
.map_err(|err| Error::BackendError(Box::new(err)))?;

Ok(())
}

fn get_block(&self, block_hash: &B::Id) -> Result<(B, BlockInfo<B::Id>), Error> {
let blk = self
.pool
.get()
.map_err(|err| Error::BackendError(Box::new(err)))?
.prepare_cached("select block from Blocks where hash = ?")
.map_err(|err| Error::BackendError(Box::new(err)))?
.query_row(&[&block_hash.serialize_as_vec().unwrap()[..]], |row| {
let x: Vec<u8> = row.get(0);
B::deserialize(&x[..]).unwrap()
})
.map_err(|err| match err {
rusqlite::Error::QueryReturnedNoRows => Error::BlockNotFound,
err => Error::BackendError(Box::new(err)),
})?;

let info = self.get_block_info(block_hash)?;

Ok((blk, info))
}

fn get_block_info(&self, block_hash: &B::Id) -> Result<BlockInfo<B::Id>, Error> {
self.pool
.get()
.map_err(|err| Error::BackendError(Box::new(err)))?
.prepare_cached(
"select depth, parent, fast_distance, fast_hash from BlockInfo where hash = ?",
)
.map_err(|err| Error::BackendError(Box::new(err)))?
.query_row(&[&block_hash.serialize_as_vec().unwrap()[..]], |row| {
let mut back_links = vec![BackLink {
distance: 1,
block_hash: blob_to_hash(row.get(1)),
}];

let fast_distance: Option<i64> = row.get(2);
if let Some(fast_distance) = fast_distance {
back_links.push(BackLink {
distance: fast_distance as u64,
block_hash: blob_to_hash(row.get(3)),
});
}

let depth: i64 = row.get(0);

BlockInfo {
block_hash: block_hash.clone(),
depth: depth as u64,
back_links,
}
})
.map_err(|err| match err {
rusqlite::Error::QueryReturnedNoRows => Error::BlockNotFound,
err => Error::BackendError(Box::new(err)),
})
}

fn put_tag(&mut self, tag_name: &str, block_hash: &B::Id) -> Result<(), Error> {
match self
.pool
.get()
.map_err(|err| Error::BackendError(Box::new(err)))?
.prepare_cached("insert or replace into Tags (name, hash) values(?, ?)")
.map_err(|err| Error::BackendError(Box::new(err)))?
.execute(&[
Value::Text(tag_name.to_string()),
Value::Blob(block_hash.serialize_as_vec().unwrap()),
]) {
Ok(_) => Ok(()),
Err(rusqlite::Error::SqliteFailure(err, _))
if err.code == rusqlite::ErrorCode::ConstraintViolation =>
{
Err(Error::BlockNotFound)
}
Err(err) => Err(Error::BackendError(Box::new(err))),
}
}

fn get_tag(&self, tag_name: &str) -> Result<Option<B::Id>, Error> {
match self
.pool
.get()
.map_err(|err| Error::BackendError(Box::new(err)))?
.prepare_cached("select hash from Tags where name = ?")
.map_err(|err| Error::BackendError(Box::new(err)))?
.query_row(&[&tag_name], |row| blob_to_hash(row.get(0)))
{
Ok(s) => Ok(Some(s)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(err) => Err(Error::BackendError(Box::new(err))),
}
}
}

#[cfg(test)]
mod tests {
use super::*;
use chain_storage::store::testing::Block;
use rand_os::OsRng;

#[test]
pub fn put_get() {
let mut store = SQLiteBlockStore::<Block>::new(":memory:");
chain_storage::store::testing::test_put_get(&mut store);
}

#[test]
pub fn nth_ancestor() {
let mut rng = OsRng::new().unwrap();
let mut store = SQLiteBlockStore::<Block>::new(":memory:");
chain_storage::store::testing::test_nth_ancestor(&mut rng, &mut store);
}

#[test]
pub fn iterate_range() {
let mut rng = OsRng::new().unwrap();
let mut store = SQLiteBlockStore::<Block>::new(":memory:");
chain_storage::store::testing::test_iterate_range(&mut rng, &mut store);
}
}

0 comments on commit 7ee68d2

Please sign in to comment.