Skip to content

Commit

Permalink
Sync block index with block storage
Browse files Browse the repository at this point in the history
This commit adds the functionality of checkpointing block index progress
and sync-ing (updating) the index during start of the block storage system

Change-Id: Ib1a325add455bce47e510ccfc7af052db51117e6
Signed-off-by: manish <manish.sethi@gmail.com>
  • Loading branch information
manish-sethi committed Sep 28, 2016
1 parent a069514 commit 910e496
Show file tree
Hide file tree
Showing 5 changed files with 296 additions and 50 deletions.
59 changes: 41 additions & 18 deletions core/ledgernext/blkstorage/fsblkstorage/block_stream.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ var ErrUnexpectedEndOfBlockfile = errors.New("unexpected end of blockfile")
// blockfileStream reads blocks sequentially from a single file.
// It starts from the given offset and can traverse till the end of the file
type blockfileStream struct {
fileNum int
file *os.File
reader *bufio.Reader
currentOffset int64
Expand All @@ -49,10 +50,19 @@ type blockStream struct {
currentFileStream *blockfileStream
}

// blockPlacementInfo captures the information related
// to block's placement in the file.
type blockPlacementInfo struct {
fileNum int
blockStartOffset int64
blockBytesOffset int64
}

///////////////////////////////////
// blockfileStream functions
////////////////////////////////////
func newBlockfileStream(filePath string, startOffset int64) (*blockfileStream, error) {
func newBlockfileStream(rootDir string, fileNum int, startOffset int64) (*blockfileStream, error) {
filePath := deriveBlockfilePath(rootDir, fileNum)
logger.Debugf("newBlockfileStream(): filePath=[%s], startOffset=[%d]", filePath, startOffset)
var file *os.File
var err error
Expand All @@ -68,41 +78,50 @@ func newBlockfileStream(filePath string, startOffset int64) (*blockfileStream, e
panic(fmt.Sprintf("Could not seek file [%s] to given startOffset [%d]. New position = [%d]",
filePath, startOffset, newPosition))
}
s := &blockfileStream{file, bufio.NewReader(file), startOffset}
s := &blockfileStream{fileNum, file, bufio.NewReader(file), startOffset}
return s, nil
}

func (s *blockfileStream) nextBlockBytes() ([]byte, error) {
blockBytes, _, err := s.nextBlockBytesAndPlacementInfo()
return blockBytes, err
}

func (s *blockfileStream) nextBlockBytesAndPlacementInfo() ([]byte, *blockPlacementInfo, error) {
var lenBytes []byte
var err error
if lenBytes, err = s.reader.Peek(8); err != nil {
// reader.Peek raises io.EOF error if enough bytes not available
if err == io.EOF {
if len(lenBytes) > 0 {
return nil, ErrUnexpectedEndOfBlockfile
return nil, nil, ErrUnexpectedEndOfBlockfile
}
return nil, nil
return nil, nil, nil
}
return nil, err
return nil, nil, err
}
len, n := proto.DecodeVarint(lenBytes)
if n == 0 {
panic(fmt.Errorf("Error in decoding varint bytes"))
}
if _, err = s.reader.Discard(n); err != nil {
return nil, err
return nil, nil, err
}
blockBytes := make([]byte, len)
if _, err = io.ReadAtLeast(s.reader, blockBytes, int(len)); err != nil {
// io.ReadAtLeast raises io.ErrUnexpectedEOF error if it is able to
// read a fewer (non-zero) bytes and io.EOF is encountered
if err == io.ErrUnexpectedEOF {
return nil, ErrUnexpectedEndOfBlockfile
return nil, nil, ErrUnexpectedEndOfBlockfile
}
return nil, err
return nil, nil, err
}
blockPlacementInfo := &blockPlacementInfo{
fileNum: s.fileNum,
blockStartOffset: s.currentOffset,
blockBytesOffset: s.currentOffset + int64(n)}
s.currentOffset += int64(n) + int64(len)
return blockBytes, nil
return blockBytes, blockPlacementInfo, nil
}

func (s *blockfileStream) close() error {
Expand All @@ -113,8 +132,7 @@ func (s *blockfileStream) close() error {
// blockStream functions
////////////////////////////////////
func newBlockStream(rootDir string, startFileNum int, startOffset int64, endFileNum int) (*blockStream, error) {
startFile := deriveBlockfilePath(rootDir, startFileNum)
startFileStream, err := newBlockfileStream(startFile, startOffset)
startFileStream, err := newBlockfileStream(rootDir, startFileNum, startOffset)
if err != nil {
return nil, err
}
Expand All @@ -127,30 +145,35 @@ func (s *blockStream) moveToNextBlockfileStream() error {
return err
}
s.currentFileNum++
nextFile := deriveBlockfilePath(s.rootDir, s.currentFileNum)
if s.currentFileStream, err = newBlockfileStream(nextFile, 0); err != nil {
if s.currentFileStream, err = newBlockfileStream(s.rootDir, s.currentFileNum, 0); err != nil {
return err
}
return nil
}

func (s *blockStream) nextBlockBytes() ([]byte, error) {
blockBytes, _, err := s.nextBlockBytesAndPlacementInfo()
return blockBytes, err
}

func (s *blockStream) nextBlockBytesAndPlacementInfo() ([]byte, *blockPlacementInfo, error) {
var blockBytes []byte
var blockPlacementInfo *blockPlacementInfo
var err error
if blockBytes, err = s.currentFileStream.nextBlockBytes(); err != nil {
if blockBytes, blockPlacementInfo, err = s.currentFileStream.nextBlockBytesAndPlacementInfo(); err != nil {
logger.Debugf("current file [%d]", s.currentFileNum)
logger.Debugf("blockbytes [%d]. Err:%s", len(blockBytes), err)
return nil, err
return nil, nil, err
}
logger.Debugf("blockbytes [%d] read from file [%d]", len(blockBytes), s.currentFileNum)
if blockBytes == nil && s.currentFileNum < s.endFileNum {
logger.Debugf("current file [%d] exhausted. Moving to next file", s.currentFileNum)
if err = s.moveToNextBlockfileStream(); err != nil {
return nil, err
return nil, nil, err
}
return s.nextBlockBytes()
return s.nextBlockBytesAndPlacementInfo()
}
return blockBytes, nil
return blockBytes, blockPlacementInfo, nil
}

func (s *blockStream) close() error {
Expand Down
4 changes: 2 additions & 2 deletions core/ledgernext/blkstorage/fsblkstorage/block_stream_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func testBlockfileStream(t *testing.T, numBlocks int) {
w.addBlocks(blocks)
w.close()

s, err := newBlockfileStream(deriveBlockfilePath(blockfileMgr.rootDir, 0), 0)
s, err := newBlockfileStream(blockfileMgr.rootDir, 0, 0)
defer s.close()
testutil.AssertNoError(t, err, "Error in constructing blockfile stream")

Expand Down Expand Up @@ -80,7 +80,7 @@ func testBlockFileStreamUnexpectedEOF(t *testing.T, numBlocks int, partialBlockB
w.addBlocks(blocks)
blockfileMgr.currentFileWriter.append(partialBlockBytes, true)
w.close()
s, err := newBlockfileStream(deriveBlockfilePath(blockfileMgr.rootDir, 0), 0)
s, err := newBlockfileStream(blockfileMgr.rootDir, 0, 0)
defer s.close()
testutil.AssertNoError(t, err, "Error in constructing blockfile stream")

Expand Down
81 changes: 72 additions & 9 deletions core/ledgernext/blkstorage/fsblkstorage/blockfile_mgr.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ type blockfileMgr struct {
conf *Conf
db *db.DB
defaultCF *gorocksdb.ColumnFamilyHandle
index *blockIndex
index index
cpInfo *checkpointInfo
currentFileWriter *blockfileWriter
bcInfo atomic.Value
Expand Down Expand Up @@ -79,9 +79,11 @@ func newBlockfileMgr(conf *Conf) *blockfileMgr {
panic(fmt.Sprintf("Could not truncate current file to known size in db: %s", err))
}

mgr.index = newBlockIndex(db)
mgr.index = newBlockIndex(db, db.GetCFHandle(blockIndexCF))
mgr.cpInfo = cpInfo
mgr.currentFileWriter = currentFileWriter
mgr.syncIndex()

// init BlockchainInfo
bcInfo := &protos.BlockchainInfo{
Height: 0,
Expand Down Expand Up @@ -130,7 +132,8 @@ func updateCPInfo(conf *Conf, cpInfo *checkpointInfo) {
// check point info is in sync with the file on disk
return
}
endOffsetLastBlock, numBlocks, err := scanForLastCompleteBlock(filePath, int64(cpInfo.latestFileChunksize))
endOffsetLastBlock, numBlocks, err := scanForLastCompleteBlock(
rootDir, cpInfo.latestFileChunkSuffixNum, int64(cpInfo.latestFileChunksize))
if err != nil {
panic(fmt.Sprintf("Could not open current file for detecting last block in the file: %s", err))
}
Expand Down Expand Up @@ -217,11 +220,73 @@ func (mgr *blockfileMgr) addBlock(block *protos.Block2) error {
}
blockFLP := &fileLocPointer{fileSuffixNum: mgr.cpInfo.latestFileChunkSuffixNum}
blockFLP.offset = currentOffset
mgr.index.indexBlock(mgr.cpInfo.lastBlockNumber, blockHash, blockFLP, blockBytesLen, len(blockBytesEncodedLen), txOffsets)
// shift the txoffset because we prepend length of bytes before block bytes
for i := 0; i < len(txOffsets); i++ {
txOffsets[i] += len(blockBytesEncodedLen)
}
mgr.index.indexBlock(&blockIdxInfo{
blockNum: mgr.cpInfo.lastBlockNumber, blockHash: blockHash,
flp: blockFLP, txOffsets: txOffsets})
mgr.updateBlockchainInfo(blockHash, block)
return nil
}

func (mgr *blockfileMgr) syncIndex() error {
var lastBlockIndexed uint64
var err error
if lastBlockIndexed, err = mgr.index.getLastBlockIndexed(); err != nil {
return err
}
startFileNum := 0
startOffset := 0
blockNum := uint64(1)
endFileNum := mgr.cpInfo.latestFileChunkSuffixNum
if lastBlockIndexed != 0 {
var flp *fileLocPointer
if flp, err = mgr.index.getBlockLocByBlockNum(lastBlockIndexed); err != nil {
return err
}
startFileNum = flp.fileSuffixNum
startOffset = flp.locPointer.offset
blockNum = lastBlockIndexed
}

var stream *blockStream
if stream, err = newBlockStream(mgr.rootDir, startFileNum, int64(startOffset), endFileNum); err != nil {
return err
}
var blockBytes []byte
var blockPlacementInfo *blockPlacementInfo

for {
if blockBytes, blockPlacementInfo, err = stream.nextBlockBytesAndPlacementInfo(); err != nil {
return err
}
if blockBytes == nil {
break
}
serBlock2 := protos.NewSerBlock2(blockBytes)
var txOffsets []int
if txOffsets, err = serBlock2.GetTxOffsets(); err != nil {
return err
}
for i := 0; i < len(txOffsets); i++ {
txOffsets[i] += int(blockPlacementInfo.blockBytesOffset)
}
blockIdxInfo := &blockIdxInfo{}
blockIdxInfo.blockHash = serBlock2.ComputeHash()
blockIdxInfo.blockNum = blockNum
blockIdxInfo.flp = &fileLocPointer{fileSuffixNum: blockPlacementInfo.fileNum,
locPointer: locPointer{offset: int(blockPlacementInfo.blockStartOffset)}}
blockIdxInfo.txOffsets = txOffsets
if err = mgr.index.indexBlock(blockIdxInfo); err != nil {
return err
}
blockNum++
}
return nil
}

func (mgr *blockfileMgr) getBlockchainInfo() *protos.BlockchainInfo {
return mgr.bcInfo.Load().(*protos.BlockchainInfo)
}
Expand Down Expand Up @@ -320,8 +385,7 @@ func (mgr *blockfileMgr) fetchTransaction(lp *fileLocPointer) (*protos.Transacti
}

func (mgr *blockfileMgr) fetchBlockBytes(lp *fileLocPointer) ([]byte, error) {
filePath := deriveBlockfilePath(mgr.rootDir, lp.fileSuffixNum)
stream, err := newBlockfileStream(filePath, int64(lp.offset))
stream, err := newBlockfileStream(mgr.rootDir, lp.fileSuffixNum, int64(lp.offset))
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -378,10 +442,9 @@ func (mgr *blockfileMgr) saveCurrentInfo(i *checkpointInfo, flush bool) error {
return nil
}

func scanForLastCompleteBlock(filePath string, startingOffset int64) (int64, int, error) {
logger.Debugf("scanForLastCompleteBlock(): filePath=[%s], startingOffset=[%d]", filePath, startingOffset)
func scanForLastCompleteBlock(rootDir string, fileNum int, startingOffset int64) (int64, int, error) {
numBlocks := 0
blockStream, err := newBlockfileStream(filePath, startingOffset)
blockStream, err := newBlockfileStream(rootDir, fileNum, startingOffset)
if err != nil {
return 0, 0, err
}
Expand Down

0 comments on commit 910e496

Please sign in to comment.