Skip to content

Commit

Permalink
Progress on BlockRecordManager
Browse files Browse the repository at this point in the history
  • Loading branch information
jasperpotts committed May 17, 2023
1 parent 2ccd823 commit f6468a1
Show file tree
Hide file tree
Showing 15 changed files with 1,436 additions and 159 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,26 @@ public interface NodeInfo {
* @return whether this node has zero stake.
*/
boolean isSelfZeroStake();


/**
* Convenience method to get this node's account number from the address book.
*
* @return this node's account number from the address book.
*/
long accountNum();

/**
* Convenience method to get the memo of this node's account which is in the address book.
*
* @return this node's account memo
*/
String accountMemo();

/**
* True if the node was initialized in event stream recovery state.
*
* @return if the node was initialized in event stream recovery state.
*/
boolean wasStartedInEventStreamRecovery();
}
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import static java.util.Objects.requireNonNull;

import com.hedera.node.app.spi.info.NodeInfo;
import com.swirlds.common.system.InitTrigger;
import edu.umd.cs.findbugs.annotations.NonNull;

/**
Expand All @@ -27,19 +28,53 @@
public class MonoNodeInfo implements NodeInfo {

private final com.hedera.node.app.service.mono.context.NodeInfo delegate;
private final InitTrigger initTrigger;

/**
* Constructs a {@link MonoNodeInfo} with the given delegate.
*
* @param delegate the delegate
* @param initTrigger the init trigger for access to node starting state
* @throws NullPointerException if {@code delegate} is {@code null}
*/
public MonoNodeInfo(@NonNull com.hedera.node.app.service.mono.context.NodeInfo delegate) {
public MonoNodeInfo(@NonNull com.hedera.node.app.service.mono.context.NodeInfo delegate,
@NonNull final InitTrigger initTrigger) {
this.delegate = requireNonNull(delegate);
this.initTrigger = initTrigger;
}

@Override
public boolean isSelfZeroStake() {
return delegate.isSelfZeroStake();
}

/**
* Convenience method to get this node's account number from the address book.
*
* @return this node's account number from the address book.
*/
@Override
public long accountNum() {
return delegate.selfAccount().getAccountNum();
}

/**
* Convenience method to get the memo of this node's account which is in the address book.
*
* @return this node's account memo
*/
@Override
public String accountMemo() {
return "TEMP ACCOUNT MEMO"; // TODO where to get this from and what happens if it changes
}

/**
* True if the node was initialized in event stream recovery state.
*
* @return if the node was initialized in event stream recovery state.
*/
@Override
public boolean wasStartedInEventStreamRecovery() {
return initTrigger == InitTrigger.EVENT_STREAM_RECOVERY;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
package com.hedera.node.app.records;

import com.hedera.hapi.streams.RecordStreamItem;
import com.hedera.hapi.streams.TransactionSidecarRecord;
import com.hedera.node.app.records.store.BlockStateStore;
import com.hedera.node.app.spi.config.ConfigProvider;
import com.hedera.node.app.spi.info.NodeInfo;
import com.hedera.node.app.spi.records.SingleTransactionRecord;
import com.hedera.pbj.runtime.io.buffer.Bytes;
import com.swirlds.common.crypto.DigestType;
import com.swirlds.common.crypto.Hash;
import com.swirlds.common.stream.Signer;
import edu.umd.cs.findbugs.annotations.NonNull;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import javax.inject.Inject;
import javax.inject.Singleton;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ForkJoinPool;
import java.util.stream.Stream;

/**
* RecordManager is a singleton facility that records transaction records into the record stream. It is responsible for:
*
* <ul>
* <li>Packages transaction records into files and sending for writing</li>
* <li>Manages block number</li>
* <li>Manages Running Hashes</li>
* <li>Manages Record State</li>
* </ul>
*/
@Singleton
public class BlockRecordManager {
private static final Logger log = LogManager.getLogger(BlockRecordManager.class);

public static final int UNKNOWN_BLOCK_NO = 0;

private final long blockPeriodInSeconds;
private final ConfigProvider configProvider;
private final StreamFileProducerBase streamFileProducer;
private final BlockStateStore blockStateStore;
private final ExecutorService executorService;
private boolean eventRecoveryCompleted = false;
private Instant provisionalCurrentBlockFirstTransactionTime = null;
// -------------------------------------------------------------------------------------------------------
// These fields are only accessed by the handle thread, so they do not need to be thread safe
private final List<SingleTransactionRecord> transactionRecords = new ArrayList<>();

/**
* Construct BlockRecordManager using default fork join pool
*/
@Inject
public BlockRecordManager(ConfigProvider configProvider, NodeInfo nodeInfo, BlockStateStore blockStateStore, Signer signer) {
this(configProvider, nodeInfo, blockStateStore, ForkJoinPool.commonPool(), signer);
}

/**
* Construct BlockRecordManager
*/
@Inject
public BlockRecordManager(ConfigProvider configProvider, NodeInfo nodeInfo, BlockStateStore blockStateStore,
ExecutorService executorService, Signer signer) {
this.configProvider = configProvider;
this.streamFileProducer = new StreamFileProducerConcurrent(configProvider, nodeInfo, signer, executorService);
this.blockStateStore = blockStateStore;
this.executorService = executorService;
// check if we were started in event recover mode and if event recovery needs to be completed before we write
// any new records to stream
this.eventRecoveryCompleted = !nodeInfo.wasStartedInEventStreamRecovery();
// get configuration needed, this is configuration that is assumed not to change while the node is running
RecordStreamConfig recordStreamConfig = configProvider.getConfiguration().getConfigData(RecordStreamConfig.class);
this.blockPeriodInSeconds = recordStreamConfig.logPeriod();
// start the StreamFileProducer off with current running hash
this.streamFileProducer.setRunningHash(blockStateStore.lastBlockHash());
}

// =======================================================================================================================
// Update methods

/**
* Inform BlockRecordManager of the new consensus time at the beginning of new transaction. This should only be called for before user
* transactions where the workflow knows 100% that any there will be no new transaction records for any consensus time prior to this one.
* <p>
* This allows BlockRecordManager to set up the correct block information for the user transaction that is about to be executed. So block
* questions are answered correctly.
* <p>
* The BlockRecordManager may choose to close one or more files if consensus time threshold has passed.
*
* @param consensusTime The consensus time of the user transaction we are about to start executing. It must be the adjusted consensus time
* not the platform assigned consensus time. Assuming the two are different.
*/
public void startUserTransaction(Instant consensusTime) {
final long currentBlockPeriod = blockStateStore.firstConsTimeOfLastBlock().getEpochSecond() / blockPeriodInSeconds;
final long newBlockPeriod = consensusTime.getEpochSecond() / blockPeriodInSeconds;
if (newBlockPeriod > currentBlockPeriod) {
// we are in a new block, so close the previous one
// compute block hash of last block, this is the running hash after the last transaction record.
final Bytes lastBlockHashBytes = streamFileProducer.getCurrentRunningHash();
blockStateStore.addBlockHash(lastBlockHashBytes);
// update block number
final long lastBlockNo = blockStateStore.lastBlockNo() + 1;
blockStateStore.lastBlockNo(lastBlockNo);
// update the first transaction time of the last block
blockStateStore.firstConsTimeOfLastBlock(provisionalCurrentBlockFirstTransactionTime);
// close all stream files for end of block and create signature files, then open new block record file
streamFileProducer.switchBlocks(lastBlockNo, lastBlockNo+1, consensusTime);
// log end of block if needed
RecordStreamConfig recordStreamConfig = configProvider.getConfiguration().getConfigData(RecordStreamConfig.class);
if (recordStreamConfig.logEveryTransaction()) {
log.info(
"""
--- BLOCK UPDATE ---
Finished: #{} @ {} with hash {}
Starting: #{} @ {}""",
lastBlockNo,
provisionalCurrentBlockFirstTransactionTime,
new Hash(PbjHelper.toByteArray(lastBlockHashBytes), DigestType.SHA_384),
lastBlockNo + 1,
consensusTime);
}
// clear provisionalCurrentBlockFirstTransactionTime so it will be recomputed for next block
provisionalCurrentBlockFirstTransactionTime = null;
}
// update current block first transaction time if new consensusTime is earlier
if(provisionalCurrentBlockFirstTransactionTime == null || provisionalCurrentBlockFirstTransactionTime.isAfter(consensusTime)) {
provisionalCurrentBlockFirstTransactionTime = consensusTime;
}
}

/**
* Add a user transactions records to the record stream. They must be in exact consensus time order! This must only be called
* after the user transaction has been committed to state and is 100% done.
*
* @param recordStreamItems Stream of records produced while handling the user transaction
* @param transactionSidecarRecords Stream of sidecar records produced while handling the user transaction
*/
public void endUserTransaction(@NonNull final Stream<RecordStreamItem> recordStreamItems,
@NonNull final Stream<TransactionSidecarRecord> transactionSidecarRecords) {
// check if we need to run event recovery before we can write any new records to stream
if (!this.eventRecoveryCompleted) {
// TODO create event recovery class and call it here
// TODO should this be in startUserTransaction()?
this.eventRecoveryCompleted = true;
}
// pass to record stream writer to handle
streamFileProducer.writeRecordStreamItems(blockStateStore.firstConsTimeOfLastBlock(), recordStreamItems, transactionSidecarRecords);
}


// =======================================================================================================================
// Private methods
//
// private long finishBlock(final Bytes ethHash, final Instant firstConsTimeOfNewBlock) {
//
//
// if (blockHashes.size() == NUM_BLOCK_HASHES_TO_KEEP) {
// blockHashes.poll();
// }
// blockHashes.add(new BytesElement(ethHash.toArrayUnsafe()));
// blockNo++;
// firstConsTimeOfCurrentBlock = firstConsTimeOfNewBlock;
// return blockNo;
// }
}
Loading

0 comments on commit f6468a1

Please sign in to comment.