Skip to content

Commit

Permalink
Merge branch 'develop' into ethan/mnt-37
Browse files Browse the repository at this point in the history
# Conflicts:
#	mt-batcher/services/sequencer/driver.go
  • Loading branch information
Ethanncnm committed Jul 2, 2023
2 parents ce4e99e + cff85de commit e1710a4
Show file tree
Hide file tree
Showing 21 changed files with 144 additions and 124 deletions.
13 changes: 0 additions & 13 deletions mt-batcher/common/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@ package common

import (
"fmt"
"github.com/Layr-Labs/datalayr/common/graphView"
"github.com/Layr-Labs/datalayr/common/header"
"github.com/ethereum/go-ethereum/crypto"
"math/big"
"os"
)
Expand Down Expand Up @@ -33,17 +31,6 @@ func CreateUploadHeader(params StoreParams) ([]byte, error) {
return uploadHeader, nil
}

func GetMessageHash(event graphView.DataStoreInit) []byte {
msg := make([]byte, 0)
msg = append(msg, uint32ToByteSlice(event.StoreNumber)...)
msg = append(msg, event.DataCommitment[:]...)
msg = append(msg, byte(event.Duration))
msg = append(msg, packTo(uint32ToByteSlice(event.InitTime), 32)...)
msg = append(msg, uint32ToByteSlice(event.Index)...)
msgHash := crypto.Keccak256(msg)
return msgHash
}

func uint32ToByteSlice(x uint32) []byte {
res := make([]byte, 4)
res[0] = byte(x >> 24)
Expand Down
6 changes: 3 additions & 3 deletions mt-batcher/services/sequencer/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ func (d *Driver) StoreData(ctx context.Context, uploadHeader []byte, duration ui
return tx, nil

case d.IsMaxPriorityFeePerGasNotFoundError(err):
log.Warn("MtBather eth_maxPriorityFeePerGas is unsupported by current backend, using fallback gasTipCap")
log.Warn("MtBatcher eth_maxPriorityFeePerGas is unsupported by current backend, using fallback gasTipCap")
opts.GasTipCap = common4.FallbackGasTipCap
return d.Cfg.EigenDaContract.StoreData(opts, uploadHeader, duration, blockNumber, startL2BlockNumber, endL2BlockNumber, totalOperatorsIndex, isReRollup)

Expand Down Expand Up @@ -407,7 +407,7 @@ func (d *Driver) ConfirmData(ctx context.Context, callData []byte, searchData rc
return tx, nil

case d.IsMaxPriorityFeePerGasNotFoundError(err):
log.Warn("MtBather eth_maxPriorityFeePerGas is unsupported by current backend, using fallback gasTipCap")
log.Warn("MtBatcher eth_maxPriorityFeePerGas is unsupported by current backend, using fallback gasTipCap")
opts.GasTipCap = common4.FallbackGasTipCap
return d.Cfg.EigenDaContract.ConfirmData(opts, callData, searchData, startL2BlockNumber, endL2BlockNumber, originDataStoreId, reConfirmedBatchIndex, isReRollup)

Expand Down Expand Up @@ -658,7 +658,7 @@ func (d *Driver) UpdateFee(ctx context.Context, l2Block, daFee *big.Int) (*types
case err == nil:
return tx, nil
case d.IsMaxPriorityFeePerGasNotFoundError(err):
log.Warn("MtBather eth_maxPriorityFeePerGas is unsupported by current backend, using fallback gasTipCap")
log.Warn("MtBatcher eth_maxPriorityFeePerGas is unsupported by current backend, using fallback gasTipCap")
opts.GasTipCap = common4.FallbackGasTipCap
return d.Cfg.EigenFeeContract.SetRollupFee(opts, l2Block, daFee)
default:
Expand Down
23 changes: 10 additions & 13 deletions packages/contracts/contracts/da/BVM_EigenDataLayrChain.sol
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ contract BVM_EigenDataLayrChain is Initializable, OwnableUpgradeable, Reentrancy
event RollupBatchIndexUpdated(uint256 oldRollupBatchIndex, uint256 newRollupBatchIndex);
event L2ConfirmedBlockNumberUpdated(uint256 oldL2ConfirmedBlockNumber, uint256 newL2ConfirmedBlockNumber);
event DataLayrManagerAddressUpdated(address oldDataLayrManagerAddress, address newDataLayrManagerAddress);
event ResetRollupBatchData(uint256 rollupBatchIndex, uint256 l2StoredBlockNumber, uint256 l2ConfirmedBlockNumber);

constructor() {
_disableInitializers();
Expand Down Expand Up @@ -136,16 +137,7 @@ contract BVM_EigenDataLayrChain is Initializable, OwnableUpgradeable, Reentrancy
require(_address != address(0), "setFraudProofAddress: address is the zero address");
fraudProofWhitelist[_address] = true;
}

/**
* @notice unavailable fraud proof address
* @param _address for fraud proof
*/
function unavailableFraudProofAddress(address _address) external onlySequencer {
require(_address != address(0), "unavailableFraudProofAddress: unavailableFraudProofAddress: address is the zero address");
fraudProofWhitelist[_address] = false;
}


/**
* @notice remove fraud proof address
* @param _address for fraud proof
Expand All @@ -160,6 +152,10 @@ contract BVM_EigenDataLayrChain is Initializable, OwnableUpgradeable, Reentrancy
* @param _fraudProofPeriod fraud proof period
*/
function updateFraudProofPeriod(uint256 _fraudProofPeriod) external onlySequencer {
// MantleDa data validity period is at least one hour
require(_fraudProofPeriod >= 3600, "updateFraudProofPeriod: _fraudProofPeriod need more than one hour");
// MantleDa data validity max period seven hour
require(_fraudProofPeriod <= 25200, "updateFraudProofPeriod: _fraudProofPeriod need less than seven hour");
uint256 oldFraudProofPeriod = fraudProofPeriod;
fraudProofPeriod = _fraudProofPeriod;
emit FraudProofPeriodUpdated(oldFraudProofPeriod, fraudProofPeriod);
Expand Down Expand Up @@ -226,13 +222,14 @@ contract BVM_EigenDataLayrChain is Initializable, OwnableUpgradeable, Reentrancy
* @notice reset batch rollup batch data
* @param _rollupBatchIndex update rollup index
*/
function resetRollupBatchData(uint256 _rollupBatchIndex) external onlySequencer {
function resetRollupBatchData(uint256 _rollupBatchIndex, uint256 _l2StoredBlockNumber, uint256 _l2ConfirmedBlockNumber) external onlySequencer {
for (uint256 i = _rollupBatchIndex; i < rollupBatchIndex; i++) {
delete rollupBatchIndexRollupStores[i];
}
rollupBatchIndex = _rollupBatchIndex;
l2StoredBlockNumber = 1;
l2ConfirmedBlockNumber = 1;
l2StoredBlockNumber = _l2StoredBlockNumber;
l2ConfirmedBlockNumber = _l2ConfirmedBlockNumber;
emit ResetRollupBatchData(_rollupBatchIndex, _l2StoredBlockNumber, _l2ConfirmedBlockNumber);
}

/**
Expand Down
1 change: 1 addition & 0 deletions packages/contracts/contracts/da/BVM_EigenDataLayrFee.sol
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ contract BVM_EigenDataLayrFee is Initializable, OwnableUpgradeable, ReentrancyGu
}

function setRollupFee(uint256 _l2Block, uint256 _userRollupFee) public onlyGasFee {
require(_userRollupFee > 0, "BVM_EigenDataLayrFee:setRollupFee _userRollupFee is zero");
userRollupFee = _userRollupFee;
emit RollupFeeHistory(_l2Block, _userRollupFee);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,10 @@ contract L1StandardBridgeUpgrade is IL1StandardBridge, CrossDomainEnabled {
// withdrawals. The use of safeTransferFrom enables support of "broken tokens" which do not
// return a boolean value.
// slither-disable-next-line reentrancy-events, reentrancy-benign
uint256 expectedTransferBalance = IERC20(_l1Token).balanceOf(address(this)) + _amount;
IERC20(_l1Token).safeTransferFrom(_from, address(this), _amount);
uint256 postTransferBalance = IERC20(_l1Token).balanceOf(address(this));
require(expectedTransferBalance == postTransferBalance,"Fee on transfer tokens not supported");

// Construct calldata for _l2Token.finalizeDeposit(_to, _amount)
bytes memory message;
Expand Down
7 changes: 4 additions & 3 deletions tss/manager/agreement.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,18 @@ import (
"encoding/json"
"errors"
"fmt"
"sync"

"github.com/influxdata/influxdb/pkg/slices"
"github.com/mantlenetworkio/mantle/l2geth/log"
tss "github.com/mantlenetworkio/mantle/tss/common"
"github.com/mantlenetworkio/mantle/tss/manager/types"
"github.com/mantlenetworkio/mantle/tss/ws/server"
tmjson "github.com/tendermint/tendermint/libs/json"
tmtypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"sync"
)

func (m Manager) agreement(ctx types.Context, request interface{}, method tss.Method) (types.Context, error) {
func (m *Manager) agreement(ctx types.Context, request interface{}, method tss.Method) (types.Context, error) {
respChan := make(chan server.ResponseMsg)
stopChan := make(chan struct{})
if err := m.wsServer.RegisterResChannel("ASK_"+ctx.RequestId(), respChan, stopChan); err != nil {
Expand Down Expand Up @@ -120,7 +121,7 @@ func (m Manager) agreement(ctx types.Context, request interface{}, method tss.Me
return ctx, nil
}

func (m Manager) askNodes(ctx types.Context, request []byte, method tss.Method, stopChan chan struct{}, errSendChan chan struct{}) {
func (m *Manager) askNodes(ctx types.Context, request []byte, method tss.Method, stopChan chan struct{}, errSendChan chan struct{}) {
log.Info("start to sendTonNodes", "number", len(ctx.AvailableNodes()))
nodes := ctx.AvailableNodes()
for i := range nodes {
Expand Down
6 changes: 3 additions & 3 deletions tss/manager/keygen.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (
tmtypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
)

func (m Manager) observeElection() {
func (m *Manager) observeElection() {

queryTicker := time.NewTicker(m.taskInterval + 30*time.Second)
for {
Expand Down Expand Up @@ -79,7 +79,7 @@ func (m Manager) observeElection() {
}
}

func (m Manager) generateKey(tssMembers []string, threshold int) (string, error) {
func (m *Manager) generateKey(tssMembers []string, threshold int) (string, error) {
availableNodes := m.availableNodes(tssMembers)
if len(availableNodes) < len(tssMembers) {
return "", errors.New("not enough available nodes to generate CPK")
Expand Down Expand Up @@ -162,7 +162,7 @@ func (m Manager) generateKey(tssMembers []string, threshold int) (string, error)
return base, nil
}

func (m Manager) callKeygen(availableNodes []string, threshold int, requestId string, sendError chan struct{}) {
func (m *Manager) callKeygen(availableNodes []string, threshold int, requestId string, sendError chan struct{}) {
for _, node := range availableNodes {
nodeRequest := tss.KeygenRequest{
Nodes: availableNodes,
Expand Down
48 changes: 24 additions & 24 deletions tss/manager/manage.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,55 +59,55 @@ type Manager struct {
func NewManager(wsServer server.IWebsocketManager,
tssQueryService types.TssQueryService,
store types.ManagerStore,
config tss.Configuration) (Manager, error) {
config tss.Configuration) (*Manager, error) {
taskIntervalDur, err := time.ParseDuration(config.TimedTaskInterval)
if err != nil {
return Manager{}, err
return nil, err
}
receiptConfirmTimeoutDur, err := time.ParseDuration(config.L1ReceiptConfirmTimeout)
if err != nil {
return Manager{}, err
return nil, err
}
keygenTimeoutDur, err := time.ParseDuration(config.Manager.KeygenTimeout)
if err != nil {
return Manager{}, err
return nil, err
}
cpkConfirmTimeoutDur, err := time.ParseDuration(config.Manager.CPKConfirmTimeout)
if err != nil {
return Manager{}, err
return nil, err
}
askTimeoutDur, err := time.ParseDuration(config.Manager.AskTimeout)
if err != nil {
return Manager{}, err
return nil, err
}
signTimeoutDur, err := time.ParseDuration(config.Manager.SignTimeout)
if err != nil {
return Manager{}, err
return nil, err
}

l1Cli, err := ethclient.Dial(config.L1Url)
if err != nil {
return Manager{}, err
return nil, err
}
tssStakingSlashingCaller, err := tsh.NewTssStakingSlashingCaller(common.HexToAddress(config.TssStakingSlashContractAddress), l1Cli)
if err != nil {
return Manager{}, err
return nil, err
}
tssGroupManagerCaller, err := tgm.NewTssGroupManagerCaller(common.HexToAddress(config.TssGroupContractAddress), l1Cli)
if err != nil {
return Manager{}, err
return nil, err
}
privKey, err := crypto.HexToECDSA(config.Manager.PrivateKey)
if err != nil {
return Manager{}, err
return nil, err
}

chainId, err := l1Cli.ChainID(context.Background())
if err != nil {
return Manager{}, err
return nil, err
}

return Manager{
return &Manager{
wsServer: wsServer,
tssQueryService: tssQueryService,
store: store,
Expand All @@ -134,25 +134,25 @@ func NewManager(wsServer server.IWebsocketManager,
}

// Start launch a manager
func (m Manager) Start() {
func (m *Manager) Start() {
log.Info("manager is starting......")
go m.observeElection()
go m.slashing()
}

func (m Manager) Stop() {
func (m *Manager) Stop() {
close(m.stopChan)
}

func (m Manager) stopGenerateKey() {
func (m *Manager) stopGenerateKey() {
m.stopGenKey = true
}

func (m Manager) recoverGenerateKey() {
func (m *Manager) recoverGenerateKey() {
m.stopGenKey = false
}

func (m Manager) SignStateBatch(request tss.SignStateRequest) ([]byte, error) {
func (m *Manager) SignStateBatch(request tss.SignStateRequest) ([]byte, error) {
log.Info("received sign state request", "start block", request.StartBlock, "len", len(request.StateRoots), "index", request.OffsetStartsAtIndex)
offsetStartsAtIndex, _ := new(big.Int).SetString(request.OffsetStartsAtIndex, 10)
digestBz, err := tss.StateBatchHash(request.StateRoots, offsetStartsAtIndex)
Expand Down Expand Up @@ -284,7 +284,7 @@ func (m Manager) SignStateBatch(request tss.SignStateRequest) ([]byte, error) {
return responseBytes, nil
}

func (m Manager) SignRollBack(request tss.SignStateRequest) ([]byte, error) {
func (m *Manager) SignRollBack(request tss.SignStateRequest) ([]byte, error) {
log.Info("received roll back request", "request", request.String())

tssInfo, err := m.tssQueryService.QueryActiveInfo()
Expand Down Expand Up @@ -338,11 +338,11 @@ func (m Manager) SignRollBack(request tss.SignStateRequest) ([]byte, error) {
return responseBytes, nil
}

func (m Manager) SignTxBatch() error {
func (m *Manager) SignTxBatch() error {
return errors.New("not support for now")
}

func (m Manager) availableNodes(tssMembers []string) []string {
func (m *Manager) availableNodes(tssMembers []string) []string {
aliveNodes := m.wsServer.AliveNodes()
m.metics.OnlineNodesCount.Set(float64(len(aliveNodes)))
log.Info("check available nodes", "expected", fmt.Sprintf("%v", tssMembers), "alive nodes", fmt.Sprintf("%v", aliveNodes))
Expand All @@ -362,7 +362,7 @@ func randomRequestId() string {
return time.Now().Format("20060102150405") + code
}

func (m Manager) afterSignStateBatch(ctx types.Context, stateBatch [][32]byte, absentNodes []string) error {
func (m *Manager) afterSignStateBatch(ctx types.Context, stateBatch [][32]byte, absentNodes []string) error {
batchRoot, err := tss.GetMerkleRoot(stateBatch)
if err != nil {
return err
Expand All @@ -380,15 +380,15 @@ func (m Manager) afterSignStateBatch(ctx types.Context, stateBatch [][32]byte, a
return nil
}

func (m Manager) getStateSignature(digestBz []byte) []byte {
func (m *Manager) getStateSignature(digestBz []byte) []byte {
m.sigCacheLock.RLock()
defer m.sigCacheLock.RUnlock()
var key [32]byte
copy(key[:], digestBz)
return m.stateSignatureCache[key]
}

func (m Manager) setStateSignature(digestBz []byte, sig []byte) {
func (m *Manager) setStateSignature(digestBz []byte, sig []byte) {
m.sigCacheLock.Lock()
defer m.sigCacheLock.Unlock()
var key [32]byte
Expand Down
4 changes: 2 additions & 2 deletions tss/manager/setup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func (mock *mockWsManager) SendMsg(request server.RequestMsg) error {
return mock.afterMsgSent(request, mock.responseCh)
}

func setup(afterMsgSent afterMsgSendFunc, queryAliveNodes queryAliveNodesFunc) (Manager, tss.SignStateRequest) {
func setup(afterMsgSent afterMsgSendFunc, queryAliveNodes queryAliveNodesFunc) (*Manager, tss.SignStateRequest) {
mock := mockWsManager{
afterMsgSent: afterMsgSent,
queryAliveNodes: queryAliveNodes,
Expand All @@ -42,7 +42,7 @@ func setup(afterMsgSent afterMsgSendFunc, queryAliveNodes queryAliveNodesFunc) (
if err != nil {
panic(err)
}
manager := Manager{
manager := &Manager{
wsServer: &mock,
store: storage,

Expand Down
4 changes: 2 additions & 2 deletions tss/manager/sign.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func (c *Counter) satisfied(minNumber int) []string {
return ret
}

func (m Manager) sign(ctx types.Context, request interface{}, digestBz []byte, method tss.Method) (tss.SignResponse, []string, error) {
func (m *Manager) sign(ctx types.Context, request interface{}, digestBz []byte, method tss.Method) (tss.SignResponse, []string, error) {
respChan := make(chan server.ResponseMsg)
stopChan := make(chan struct{})

Expand Down Expand Up @@ -138,7 +138,7 @@ func (m Manager) sign(ctx types.Context, request interface{}, digestBz []byte, m
return *validSignResponse, culprits, nil
}

func (m Manager) sendToNodes(ctx types.Context, request interface{}, method tss.Method, errSendChan chan struct{}) {
func (m *Manager) sendToNodes(ctx types.Context, request interface{}, method tss.Method, errSendChan chan struct{}) {
nodes := ctx.Approvers()
nodeRequest := tss.NodeSignRequest{
ClusterPublicKey: ctx.TssInfos().ClusterPubKey,
Expand Down
Loading

0 comments on commit e1710a4

Please sign in to comment.