diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..364162adc7 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +vendor +data \ No newline at end of file diff --git a/blockchain/beaconpdeprocess.go b/blockchain/beaconpdeprocess.go index 46c35e42be..43707e4cd4 100644 --- a/blockchain/beaconpdeprocess.go +++ b/blockchain/beaconpdeprocess.go @@ -103,9 +103,11 @@ func (blockchain *BlockChain) processPDEContributionV2(pdexStateDB *statedb.Stat Amount: waitingContribution.ContributedAmount, TxReqID: waitingContribution.TxReqID, } + contribStatus := metadata.PDEContributionStatus{ Status: byte(common.PDEContributionWaitingStatus), } + contribStatusBytes, _ := json.Marshal(contribStatus) err = statedb.TrackPDEContributionStatus( pdexStateDB, diff --git a/blockchain/beaconpdeprocess_test.go b/blockchain/beaconpdeprocess_test.go index bf3a40cc8e..f43c7ab048 100644 --- a/blockchain/beaconpdeprocess_test.go +++ b/blockchain/beaconpdeprocess_test.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "github.com/incognitochain/incognito-chain/dataaccessobject/rawdbv2" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" "strconv" "testing" @@ -108,7 +109,7 @@ func (suite *PDEProcessSuite) TestPDEContributionOnUnexistedWaitingUniqID() { ) beaconHeight := uint64(1001) bc := &BlockChain{} - err := bc.processPDEContributionV2(beaconHeight-1, contribInsts[0], suite.currentPDEState) + err := bc.processPDEContributionV2(&statedb.StateDB{}, beaconHeight-1, contribInsts[0], suite.currentPDEState) suite.Equal(err, nil) waitingContribKey := string(rawdbv2.BuildWaitingPDEContributionKey( beaconHeight-1, @@ -150,7 +151,7 @@ func (suite *PDEProcessSuite) TestPDEContributionOnUnexistedPairForExistedWaitin contribToken2IDStr, ) bc := &BlockChain{} - err := bc.processPDEContributionV2(beaconHeight-1, contribInsts[0], suite.currentPDEState) + err := bc.processPDEContributionV2(&statedb.StateDB{}, beaconHeight-1, contribInsts[0], suite.currentPDEState) suite.Equal(err, nil) _, found := currentPDEState.WaitingPDEContributions[existedWaitingContribKey] suite.Equal(found, false) @@ -261,7 +262,7 @@ func (suite *PDEProcessSuite) TestPDEContributionOnExistedPairForExistedWaitingU contribToken2IDStr, ) bc := &BlockChain{} - err := bc.processPDEContributionV2(beaconHeight-1, contribInsts[0], suite.currentPDEState) + err := bc.processPDEContributionV2(&statedb.StateDB{}, beaconHeight-1, contribInsts[0], suite.currentPDEState) suite.Equal(err, nil) newWaitingPDEContributions := suite.currentPDEState.WaitingPDEContributions suite.Equal(len(newWaitingPDEContributions), 1) diff --git a/blockchain/beaconportalliquidationprocess.go b/blockchain/beaconportalliquidationprocess.go new file mode 100644 index 0000000000..d16dffe14c --- /dev/null +++ b/blockchain/beaconportalliquidationprocess.go @@ -0,0 +1,582 @@ +package blockchain + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/metadata" +) + +func (blockchain *BlockChain) processPortalLiquidateCustodian( + stateDB *statedb.StateDB, + beaconHeight uint64, instructions []string, + currentPortalState *CurrentPortalState) error { + + // unmarshal instructions content + var actionData metadata.PortalLiquidateCustodianContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + Logger.log.Errorf("Can not unmarshal instruction content %v - Error %v\n", instructions[3], err) + return nil + } + + // get pTokenID from actionData + pTokenID := actionData.TokenID + + reqStatus := instructions[2] + if reqStatus == common.PortalLiquidateCustodianSuccessChainStatus { + // update custodian state (total collateral, holding public tokens, locked amount, free collateral) + cusStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, actionData.CustodianIncAddressStr) + cusStateKeyStr := cusStateKey.String() + custodianState := currentPortalState.CustodianPoolState[cusStateKeyStr] + + if custodianState.GetTotalCollateral() < actionData.MintedCollateralAmount || + custodianState.GetLockedAmountCollateral()[pTokenID] < actionData.MintedCollateralAmount { + Logger.log.Errorf("[checkAndBuildInstForCustodianLiquidation] Total collateral %v, locked amount %v "+ + "should be greater than minted amount %v\n: ", + custodianState.GetTotalCollateral(), custodianState.GetLockedAmountCollateral()[pTokenID], actionData.MintedCollateralAmount) + return fmt.Errorf("[checkAndBuildInstForCustodianLiquidation] Total collateral %v, locked amount %v "+ + "should be greater than minted amount %v\n: ", + custodianState.GetTotalCollateral(), custodianState.GetLockedAmountCollateral()[pTokenID], actionData.MintedCollateralAmount) + } + + updateCustodianStateAfterLiquidateCustodian(custodianState, actionData.MintedCollateralAmount, pTokenID) + + // remove matching custodian from matching custodians list in waiting redeem request + waitingRedeemReqKey := statedb.GenerateWaitingRedeemRequestObjectKey(beaconHeight, actionData.UniqueRedeemID) + waitingRedeemReqKeyStr := waitingRedeemReqKey.String() + + updatedCustodians, _ := removeCustodianFromMatchingRedeemCustodians( + currentPortalState.WaitingRedeemRequests[waitingRedeemReqKeyStr].GetCustodians(), actionData.CustodianIncAddressStr) + currentPortalState.WaitingRedeemRequests[waitingRedeemReqKeyStr].SetCustodians(updatedCustodians) + + // remove redeem request from waiting redeem requests list + if len(currentPortalState.WaitingRedeemRequests[waitingRedeemReqKeyStr].GetCustodians()) == 0 { + deleteWaitingRedeemRequest(currentPortalState, waitingRedeemReqKeyStr) + + // update status of redeem request with redeemID to liquidated status + err = updateRedeemRequestStatusByRedeemId(actionData.UniqueRedeemID, common.PortalRedeemReqLiquidatedStatus, stateDB) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while updating redeem request status by redeemID: %+v", err) + return nil + } + } + + // track liquidation custodian status by redeemID and custodian address into DB + custodianLiquidationTrackData := metadata.PortalLiquidateCustodianStatus{ + Status: common.PortalLiquidateCustodianSuccessStatus, + UniqueRedeemID: actionData.UniqueRedeemID, + TokenID: actionData.TokenID, + RedeemPubTokenAmount: actionData.RedeemPubTokenAmount, + MintedCollateralAmount: actionData.MintedCollateralAmount, + RedeemerIncAddressStr: actionData.RedeemerIncAddressStr, + CustodianIncAddressStr: actionData.CustodianIncAddressStr, + LiquidatedByExchangeRate: actionData.LiquidatedByExchangeRate, + ShardID: actionData.ShardID, + LiquidatedBeaconHeight: beaconHeight + 1, + } + custodianLiquidationTrackDataBytes, _ := json.Marshal(custodianLiquidationTrackData) + err = statedb.StorePortalLiquidationCustodianRunAwayStatus( + stateDB, + actionData.UniqueRedeemID, + actionData.CustodianIncAddressStr, + custodianLiquidationTrackDataBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking liquidation custodian: %+v", err) + return nil + } + + } else if reqStatus == common.PortalLiquidateCustodianFailedChainStatus { + // track liquidation custodian status by redeemID and custodian address into DB + custodianLiquidationTrackData := metadata.PortalLiquidateCustodianStatus{ + Status: common.PortalLiquidateCustodianFailedStatus, + UniqueRedeemID: actionData.UniqueRedeemID, + TokenID: actionData.TokenID, + RedeemPubTokenAmount: actionData.RedeemPubTokenAmount, + MintedCollateralAmount: actionData.MintedCollateralAmount, + RedeemerIncAddressStr: actionData.RedeemerIncAddressStr, + CustodianIncAddressStr: actionData.CustodianIncAddressStr, + LiquidatedByExchangeRate: actionData.LiquidatedByExchangeRate, + ShardID: actionData.ShardID, + LiquidatedBeaconHeight: beaconHeight + 1, + } + custodianLiquidationTrackDataBytes, _ := json.Marshal(custodianLiquidationTrackData) + err = statedb.StorePortalLiquidationCustodianRunAwayStatus( + stateDB, + actionData.UniqueRedeemID, + actionData.CustodianIncAddressStr, + custodianLiquidationTrackDataBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking liquidation custodian: %+v", err) + return nil + } + } + + return nil +} + +func (blockchain *BlockChain) processLiquidationTopPercentileExchangeRates(portalStateDB *statedb.StateDB, beaconHeight uint64, instructions []string, + currentPortalState *CurrentPortalState) error { + + // unmarshal instructions content + var actionData metadata.PortalLiquidateTopPercentileExchangeRatesContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + Logger.log.Errorf("Can not unmarshal instruction content %v - Error %v\n", instructions[3], err) + return nil + } + + Logger.log.Infof("start processLiquidationTopPercentileExchangeRates with data %#v", actionData) + + keyExchangeRate := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + exchangeRate, ok := currentPortalState.FinalExchangeRatesState[keyExchangeRate.String()] + if !ok { + Logger.log.Errorf("Exchange rate not found", err) + return nil + } + + cusStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, actionData.CustodianAddress) + cusStateKeyStr := cusStateKey.String() + custodianState, ok := currentPortalState.CustodianPoolState[cusStateKeyStr] + + //todo: check custodian exist on db + if !ok { + Logger.log.Errorf("Custodian not found") + return nil + } + + reqStatus := instructions[2] + if reqStatus == common.PortalLiquidateTPExchangeRatesSuccessChainStatus { + //validation + Logger.log.Infof("custodian address %v, hold ptoken %+v, lock amount %+v", custodianState.GetIncognitoAddress(), custodianState.GetHoldingPublicTokens(), custodianState.GetLockedAmountCollateral()) + detectTPExchangeRates, err := calculateTPRatio(custodianState.GetHoldingPublicTokens(), custodianState.GetLockedAmountCollateral(), exchangeRate) + if err != nil { + Logger.log.Errorf("Detect tp ratio error %v", err) + return nil + } + + detectTp, err := detectTopPercentileLiquidation(custodianState, detectTPExchangeRates) + + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while Get liquidate exchange rates change error %v", err) + return nil + } + + if len(detectTp) > 0 { + //update current portal state + Logger.log.Infof("start update liquidation %#v", currentPortalState) + updateCurrentPortalStateOfLiquidationExchangeRates(beaconHeight, currentPortalState, cusStateKeyStr, custodianState, detectTp) + Logger.log.Infof("end update liquidation %#v", currentPortalState) + + //save db + beaconHeightBytes := []byte(fmt.Sprintf("%d-", beaconHeight)) + newTPKey := beaconHeightBytes + newTPKey = append(newTPKey, []byte(custodianState.GetIncognitoAddress())...) + + newTPExchangeRates := metadata.NewLiquidateTopPercentileExchangeRatesStatus( + custodianState.GetIncognitoAddress(), + common.PortalLiquidationTPExchangeRatesSuccessStatus, + detectTp, + ) + + contentStatusBytes, _ := json.Marshal(newTPExchangeRates) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalLiquidationTpExchangeRatesStatusPrefix(), + newTPKey, + contentStatusBytes, + beaconHeight, + ) + + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while store liquidation TP exchange rates %v", err) + return nil + } + } + } else if reqStatus == common.PortalLiquidateTPExchangeRatesFailedChainStatus { + beaconHeightBytes := []byte(fmt.Sprintf("%d-", beaconHeight)) + newTPKey := beaconHeightBytes + newTPKey = append(newTPKey, []byte(custodianState.GetIncognitoAddress())...) + + newTPExchangeRates := metadata.NewLiquidateTopPercentileExchangeRatesStatus( + custodianState.GetIncognitoAddress(), + common.PortalLiquidationTPExchangeRatesFailedStatus, + nil, + ) + + contentStatusBytes, _ := json.Marshal(newTPExchangeRates) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalLiquidationTpExchangeRatesStatusPrefix(), + newTPKey, + contentStatusBytes, + beaconHeight, + ) + + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while store liquidation TP exchange rates %v", err) + return nil + } + } + + return nil +} + +func (blockchain *BlockChain) processPortalRedeemLiquidateExchangeRates(portalStateDB *statedb.StateDB, beaconHeight uint64, instructions []string, currentPortalState *CurrentPortalState, updatingInfoByTokenID map[common.Hash]UpdatingInfo) error { + if currentPortalState == nil { + Logger.log.Errorf("current portal state is nil") + return nil + } + + if len(instructions) != 4 { + return nil // skip the instruction + } + + // unmarshal instructions content + var actionData metadata.PortalRedeemLiquidateExchangeRatesContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + Logger.log.Errorf("Can not unmarshal instruction content %v - Error %v\n", instructions[3], err) + return nil + } + + reqStatus := instructions[2] + if reqStatus == common.PortalRedeemLiquidateExchangeRatesSuccessChainStatus { + keyExchangeRate := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + _, ok := currentPortalState.FinalExchangeRatesState[keyExchangeRate.String()] + if !ok { + Logger.log.Errorf("Exchange rate not found", err) + return nil + } + + liquidateExchangeRatesKey := statedb.GeneratePortalLiquidateExchangeRatesPoolObjectKey(beaconHeight) + liquidateExchangeRates, ok := currentPortalState.LiquidateExchangeRatesPool[liquidateExchangeRatesKey.String()] + + if !ok { + Logger.log.Errorf("Liquidate exchange rates not found") + return nil + } + + liquidateByTokenID, ok := liquidateExchangeRates.Rates()[actionData.TokenID] + if !ok { + Logger.log.Errorf("Liquidate exchange rates not found") + return nil + } + + totalPrv := actionData.TotalPTokenReceived + + liquidateExchangeRates.Rates()[actionData.TokenID] = statedb.LiquidateExchangeRatesDetail{ + HoldAmountFreeCollateral: liquidateByTokenID.HoldAmountFreeCollateral - totalPrv, + HoldAmountPubToken: liquidateByTokenID.HoldAmountPubToken - actionData.RedeemAmount, + } + + currentPortalState.LiquidateExchangeRatesPool[liquidateExchangeRatesKey.String()] = liquidateExchangeRates + + Logger.log.Infof("Redeem Liquidation: Amount refund to user amount ptoken %v, amount prv %v", actionData.RedeemAmount, totalPrv) + + redeem := metadata.NewRedeemLiquidateExchangeRatesStatus( + actionData.TxReqID, + actionData.TokenID, + actionData.RedeemerIncAddressStr, + actionData.RemoteAddress, + actionData.RedeemAmount, + actionData.RedeemFee, + common.PortalRedeemLiquidateExchangeRatesSuccessStatus, + totalPrv, + ) + + contentStatusBytes, _ := json.Marshal(redeem) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalLiquidationRedeemRequestStatusPrefix(), + []byte(actionData.TxReqID.String()), + contentStatusBytes, + beaconHeight, + ) + + if err != nil { + Logger.log.Errorf("Store redeem liquidate exchange rates error %v\n", err) + return nil + } + + // update bridge/portal token info + incTokenID, err := common.Hash{}.NewHashFromStr(actionData.TokenID) + if err != nil { + Logger.log.Errorf("ERROR: Can not new hash from porting incTokenID: %+v", err) + return nil + } + updatingInfo, found := updatingInfoByTokenID[*incTokenID] + if found { + updatingInfo.deductAmt += actionData.RedeemAmount + } else { + updatingInfo = UpdatingInfo{ + countUpAmt: 0, + deductAmt: actionData.RedeemAmount, + tokenID: *incTokenID, + externalTokenID: nil, + isCentralized: false, + } + } + updatingInfoByTokenID[*incTokenID] = updatingInfo + } else if reqStatus == common.PortalRedeemLiquidateExchangeRatesRejectedChainStatus { + redeem := metadata.NewRedeemLiquidateExchangeRatesStatus( + actionData.TxReqID, + actionData.TokenID, + actionData.RedeemerIncAddressStr, + actionData.RemoteAddress, + actionData.RedeemAmount, + actionData.RedeemFee, + common.PortalRedeemLiquidateExchangeRatesRejectedStatus, + 0, + ) + + contentStatusBytes, _ := json.Marshal(redeem) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalLiquidationRedeemRequestStatusPrefix(), + []byte(actionData.TxReqID.String()), + contentStatusBytes, + beaconHeight, + ) + if err != nil { + Logger.log.Errorf("Store redeem liquidate exchange rates error %v\n", err) + return nil + } + } + + return nil +} + +func (blockchain *BlockChain) processPortalLiquidationCustodianDeposit(portalStateDB *statedb.StateDB, beaconHeight uint64, instructions []string, currentPortalState *CurrentPortalState) error { + if currentPortalState == nil { + Logger.log.Errorf("current portal state is nil") + return nil + } + if len(instructions) != 4 { + return nil // skip the instruction + } + + // unmarshal instructions content + var actionData metadata.PortalLiquidationCustodianDepositContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + return err + } + + depositStatus := instructions[2] + + if depositStatus == common.PortalLiquidationCustodianDepositSuccessChainStatus { + keyExchangeRate := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + exchangeRate := currentPortalState.FinalExchangeRatesState[keyExchangeRate.String()] + + keyCustodianState := statedb.GenerateCustodianStateObjectKey(beaconHeight, actionData.IncogAddressStr) + keyCustodianStateStr := keyCustodianState.String() + + custodian, ok := currentPortalState.CustodianPoolState[keyCustodianStateStr] + if !ok { + Logger.log.Errorf("Custodian not found") + return nil + } + + amountNeeded, totalFreeCollateralNeeded, remainFreeCollateral, err := CalAmountNeededDepositLiquidate(custodian, exchangeRate, actionData.PTokenId, actionData.FreeCollateralSelected) + + if err != nil { + Logger.log.Errorf("Calculate amount needed deposit err %v", err) + return nil + } + + if actionData.DepositedAmount < amountNeeded { + Logger.log.Errorf("Deposited amount is not enough, expect %v, data sent %v", amountNeeded, actionData.DepositedAmount) + return nil + } + + Logger.log.Infof("Deposited amount: expect %v, data sent %v", amountNeeded, actionData.DepositedAmount) + + remainDepositAmount := actionData.DepositedAmount - amountNeeded + custodian.SetTotalCollateral(custodian.GetTotalCollateral() + actionData.DepositedAmount) + + if actionData.FreeCollateralSelected == false { + lockedAmountTmp := custodian.GetLockedAmountCollateral() + lockedAmountTmp[actionData.PTokenId] += amountNeeded + custodian.SetLockedAmountCollateral(lockedAmountTmp) + + //update remain + custodian.SetFreeCollateral(custodian.GetFreeCollateral() + remainDepositAmount) + } else { + //deposit from free collateral DepositedAmount + lockedAmountTmp := custodian.GetLockedAmountCollateral() + lockedAmountTmp[actionData.PTokenId] = lockedAmountTmp[actionData.PTokenId] + amountNeeded + totalFreeCollateralNeeded + custodian.SetLockedAmountCollateral(lockedAmountTmp) + + custodian.SetFreeCollateral(remainFreeCollateral + remainDepositAmount) + } + + currentPortalState.CustodianPoolState[keyCustodianStateStr] = custodian + + newLiquidationCustodianDeposit := metadata.NewLiquidationCustodianDepositStatus( + actionData.TxReqID, + actionData.IncogAddressStr, + actionData.PTokenId, + actionData.DepositedAmount, + actionData.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositSuccessStatus, + ) + + contentStatusBytes, _ := json.Marshal(newLiquidationCustodianDeposit) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalLiquidationCustodianDepositStatusPrefix(), + []byte(actionData.TxReqID.String()), + contentStatusBytes, + beaconHeight, + ) + + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while store liquidation custodian deposit error %v", err) + return nil + } + } else if depositStatus == common.PortalLiquidationCustodianDepositRejectedChainStatus { + newLiquidationCustodianDeposit := metadata.NewLiquidationCustodianDepositStatus( + actionData.TxReqID, + actionData.IncogAddressStr, + actionData.PTokenId, + actionData.DepositedAmount, + actionData.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositRejectedStatus, + ) + + contentStatusBytes, _ := json.Marshal(newLiquidationCustodianDeposit) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalLiquidationCustodianDepositStatusPrefix(), + []byte(actionData.TxReqID.String()), + contentStatusBytes, + beaconHeight, + ) + + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while store liquidation custodian deposit error %v", err) + return nil + } + } + + return nil +} + +func (blockchain *BlockChain) processPortalExpiredPortingRequest( + stateDB *statedb.StateDB, beaconHeight uint64, instructions []string, currentPortalState *CurrentPortalState) error { + if currentPortalState == nil { + Logger.log.Errorf("current portal state is nil") + return nil + } + if len(instructions) != 4 { + return nil // skip the instruction + } + + // unmarshal instructions content + var actionData metadata.PortalExpiredWaitingPortingReqContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + return err + } + + status := instructions[2] + waitingPortingID := actionData.UniquePortingID + + if status == common.PortalExpiredWaitingPortingReqSuccessChainStatus { + waitingPortingKey := statedb.GeneratePortalWaitingPortingRequestObjectKey(beaconHeight, waitingPortingID) + waitingPortingKeyStr := waitingPortingKey.String() + waitingPortingReq := currentPortalState.WaitingPortingRequests[waitingPortingKeyStr] + if waitingPortingReq == nil { + Logger.log.Errorf("[processPortalExpiredPortingRequest] waiting porting req nil with key : %v", waitingPortingKey) + return nil + } + + // get tokenID from redeemTokenID + tokenID := waitingPortingReq.TokenID() + + // update custodian state in matching custodians list (holding public tokens, locked amount) + for _, matchCusDetail := range waitingPortingReq.Custodians() { + cusStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, matchCusDetail.IncAddress) + cusStateKeyStr := cusStateKey.String() + custodianState := currentPortalState.CustodianPoolState[cusStateKeyStr] + if custodianState == nil { + Logger.log.Errorf("[checkAndBuildInstForExpiredWaitingPortingRequest] Error when get custodian state with key %v\n: ", cusStateKey) + continue + } + updateCustodianStateAfterExpiredPortingReq(custodianState, matchCusDetail.LockedAmountCollateral, matchCusDetail.Amount, tokenID) + } + + // remove waiting porting request from waiting list + // TODO: + delete(currentPortalState.WaitingPortingRequests, waitingPortingKeyStr) + + // update status of porting ID => expired/liquidated + portingReqStatus := common.PortalPortingReqExpiredStatus + if actionData.ExpiredByLiquidation { + portingReqStatus = common.PortalPortingReqLiquidatedStatus + } + + newPortingRequestStatus := statedb.NewWaitingPortingRequestWithValue( + waitingPortingReq.UniquePortingID(), + waitingPortingReq.TxReqID(), + tokenID, + waitingPortingReq.PorterAddress(), + waitingPortingReq.Amount(), + waitingPortingReq.Custodians(), + waitingPortingReq.PortingFee(), + portingReqStatus, + waitingPortingReq.BeaconHeight(), + ) + + err = statedb.StoreWaitingPortingRequests(stateDB, beaconHeight, waitingPortingReq.UniquePortingID(), newPortingRequestStatus) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while store porting request item: %+v", err) + return nil + } + + // track expired waiting porting request status by portingID into DB + expiredPortingTrackData := metadata.PortalExpiredWaitingPortingReqStatus{ + Status: common.PortalExpiredPortingReqSuccessStatus, + UniquePortingID: waitingPortingID, + ShardID: actionData.ShardID, + ExpiredByLiquidation: actionData.ExpiredByLiquidation, + ExpiredBeaconHeight: beaconHeight + 1, + } + expiredPortingTrackDataBytes, _ := json.Marshal(expiredPortingTrackData) + err = statedb.StorePortalExpiredPortingRequestStatus( + stateDB, + waitingPortingID, + expiredPortingTrackDataBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking expired porting request: %+v", err) + return nil + } + + } else if status == common.PortalLiquidationCustodianDepositRejectedChainStatus { + // track expired waiting porting request status by portingID into DB + expiredPortingTrackData := metadata.PortalExpiredWaitingPortingReqStatus{ + Status: common.PortalExpiredPortingReqFailedStatus, + UniquePortingID: waitingPortingID, + ShardID: actionData.ShardID, + ExpiredByLiquidation: actionData.ExpiredByLiquidation, + ExpiredBeaconHeight: beaconHeight + 1, + } + expiredPortingTrackDataBytes, _ := json.Marshal(expiredPortingTrackData) + err = statedb.StorePortalExpiredPortingRequestStatus( + stateDB, + waitingPortingID, + expiredPortingTrackDataBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking expired porting request: %+v", err) + return nil + } + } + + return nil +} diff --git a/blockchain/beaconportalliquidationproducer.go b/blockchain/beaconportalliquidationproducer.go new file mode 100644 index 0000000000..66c4b406f4 --- /dev/null +++ b/blockchain/beaconportalliquidationproducer.go @@ -0,0 +1,981 @@ +package blockchain + +import ( + "encoding/base64" + "encoding/json" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/metadata" + "github.com/incognitochain/incognito-chain/wallet" + "math/big" + "strconv" + "time" +) + +// beacon build instruction for portal liquidation when custodians run away - don't send public tokens back to users. +func buildCustodianRunAwayLiquidationInst( + redeemID string, + tokenID string, + redeemPubTokenAmount uint64, + mintedCollateralAmount uint64, + redeemerIncAddrStr string, + custodianIncAddrStr string, + liquidatedByExchangeRate bool, + metaType int, + shardID byte, + status string, +) []string { + liqCustodianContent := metadata.PortalLiquidateCustodianContent{ + UniqueRedeemID: redeemID, + TokenID: tokenID, + RedeemPubTokenAmount: redeemPubTokenAmount, + MintedCollateralAmount: mintedCollateralAmount, + RedeemerIncAddressStr: redeemerIncAddrStr, + CustodianIncAddressStr: custodianIncAddrStr, + LiquidatedByExchangeRate: liquidatedByExchangeRate, + ShardID: shardID, + } + liqCustodianContentBytes, _ := json.Marshal(liqCustodianContent) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + status, + string(liqCustodianContentBytes), + } +} + +func buildTopPercentileExchangeRatesLiquidationInst( + custodianAddress string, + metaType int, + status string, + topPercentile map[string]metadata.LiquidateTopPercentileExchangeRatesDetail, +) []string { + tpContent := metadata.PortalLiquidateTopPercentileExchangeRatesContent{ + CustodianAddress: custodianAddress, + MetaType: metaType, + Status: status, + TP: topPercentile, + } + tpContentBytes, _ := json.Marshal(tpContent) + return []string{ + strconv.Itoa(metaType), + "-1", + status, + string(tpContentBytes), + } +} + +func buildRedeemLiquidateExchangeRatesInst( + tokenID string, + redeemAmount uint64, + incAddressStr string, + remoteAddress string, + redeemFee uint64, + totalPTokenReceived uint64, + metaType int, + shardID byte, + txReqID common.Hash, + status string, +) []string { + redeemRequestContent := metadata.PortalRedeemLiquidateExchangeRatesContent{ + TokenID: tokenID, + RedeemAmount: redeemAmount, + RedeemerIncAddressStr: incAddressStr, + RemoteAddress: remoteAddress, + RedeemFee: redeemFee, + TxReqID: txReqID, + ShardID: shardID, + TotalPTokenReceived: totalPTokenReceived, + } + redeemRequestContentBytes, _ := json.Marshal(redeemRequestContent) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + status, + string(redeemRequestContentBytes), + } +} + +func buildLiquidationCustodianDepositInst( + pTokenId string, + incogAddress string, + depositedAmount uint64, + freeCollateralSelected bool, + status string, + metaType int, + shardID byte, + txReqID common.Hash, +) []string { + redeemRequestContent := metadata.PortalLiquidationCustodianDepositContent{ + PTokenId: pTokenId, + IncogAddressStr: incogAddress, + DepositedAmount: depositedAmount, + FreeCollateralSelected: freeCollateralSelected, + TxReqID: txReqID, + ShardID: shardID, + } + redeemRequestContentBytes, _ := json.Marshal(redeemRequestContent) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + status, + string(redeemRequestContentBytes), + } +} + +func (blockchain *BlockChain) checkAndBuildInstForCustodianLiquidation( + beaconHeight uint64, + currentPortalState *CurrentPortalState, +) ([][]string, error) { + + insts := [][]string{} + + // get exchange rate + exchangeRateKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + exchangeRate := currentPortalState.FinalExchangeRatesState[exchangeRateKey.String()] + if exchangeRate == nil { + //Logger.log.Errorf("[checkAndBuildInstForCustodianLiquidation] Error when get exchange rate") + } + convertExchangeRatesObj := NewConvertExchangeRatesObject(exchangeRate) + liquidatedByExchangeRate := false + + for redeemReqKey, redeemReq := range currentPortalState.WaitingRedeemRequests { + if (beaconHeight+1) - redeemReq.GetBeaconHeight() >= blockchain.convertPortalTimeOutToBeaconBlocks(common.PortalTimeOutCustodianSendPubTokenBack) { + // get shardId of redeemer + redeemerKey, err := wallet.Base58CheckDeserialize(redeemReq.GetRedeemerAddress()) + if err != nil { + Logger.log.Errorf("[checkAndBuildInstForCustodianLiquidation] Error when deserializing redeemer address string in redeemID %v - %v\n: ", + redeemReq.GetUniqueRedeemID(), err) + continue + } + shardID := common.GetShardIDFromLastByte(redeemerKey.KeySet.PaymentAddress.Pk[len(redeemerKey.KeySet.PaymentAddress.Pk)-1]) + + // get tokenID from redeemTokenID + tokenID := redeemReq.GetTokenID() + + for _, matchCusDetail := range redeemReq.GetCustodians() { + // calculate minted collateral amount + mintedAmountInPToken := matchCusDetail.GetAmount() * common.PercentReceivedCollateralAmount / 100 + mintedAmountInPRV, err := convertExchangeRatesObj.ExchangePToken2PRVByTokenId(tokenID, mintedAmountInPToken) + if err != nil { + Logger.log.Errorf("[checkAndBuildInstForCustodianLiquidation] Error when exchanging ptoken to prv amount %v\n: ", err) + inst := buildCustodianRunAwayLiquidationInst( + redeemReq.GetUniqueRedeemID(), + redeemReq.GetTokenID(), + matchCusDetail.GetAmount(), + 0, + redeemReq.GetRedeemerAddress(), + matchCusDetail.GetIncognitoAddress(), + liquidatedByExchangeRate, + metadata.PortalLiquidateCustodianMeta, + shardID, + common.PortalLiquidateCustodianFailedChainStatus, + ) + insts = append(insts, inst) + continue + } + + // update custodian state (total collateral, holding public tokens, locked amount, free collateral) + cusStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, matchCusDetail.GetIncognitoAddress()) + cusStateKeyStr := cusStateKey.String() + custodianState := currentPortalState.CustodianPoolState[cusStateKeyStr] + if custodianState == nil { + Logger.log.Errorf("[checkAndBuildInstForCustodianLiquidation] Error when get custodian state with key %v\n: ", cusStateKey) + inst := buildCustodianRunAwayLiquidationInst( + redeemReq.GetUniqueRedeemID(), + redeemReq.GetTokenID(), + matchCusDetail.GetAmount(), + 0, + redeemReq.GetRedeemerAddress(), + matchCusDetail.GetIncognitoAddress(), + liquidatedByExchangeRate, + metadata.PortalLiquidateCustodianMeta, + shardID, + common.PortalLiquidateCustodianFailedChainStatus, + ) + insts = append(insts, inst) + continue + } + + if custodianState.GetTotalCollateral() < mintedAmountInPRV || + custodianState.GetLockedAmountCollateral()[tokenID] < mintedAmountInPRV { + Logger.log.Errorf("[checkAndBuildInstForCustodianLiquidation] Total collateral %v, locked amount %v "+ + "should be greater than minted amount %v\n: ", + custodianState.GetTotalCollateral(), custodianState.GetLockedAmountCollateral()[tokenID], mintedAmountInPRV) + inst := buildCustodianRunAwayLiquidationInst( + redeemReq.GetUniqueRedeemID(), + redeemReq.GetTokenID(), + matchCusDetail.GetAmount(), + mintedAmountInPRV, + redeemReq.GetRedeemerAddress(), + matchCusDetail.GetIncognitoAddress(), + liquidatedByExchangeRate, + metadata.PortalLiquidateCustodianMeta, + shardID, + common.PortalLiquidateCustodianFailedChainStatus, + ) + insts = append(insts, inst) + continue + } + + updateCustodianStateAfterLiquidateCustodian(custodianState, mintedAmountInPRV, tokenID) + + // remove matching custodian from matching custodians list in waiting redeem request + updatedCustodians, _ := removeCustodianFromMatchingRedeemCustodians( + currentPortalState.WaitingRedeemRequests[redeemReqKey].GetCustodians(), matchCusDetail.GetIncognitoAddress()) + currentPortalState.WaitingRedeemRequests[redeemReqKey].SetCustodians(updatedCustodians) + + // build instruction + inst := buildCustodianRunAwayLiquidationInst( + redeemReq.GetUniqueRedeemID(), + redeemReq.GetTokenID(), + matchCusDetail.GetAmount(), + mintedAmountInPRV, + redeemReq.GetRedeemerAddress(), + matchCusDetail.GetIncognitoAddress(), + liquidatedByExchangeRate, + metadata.PortalLiquidateCustodianMeta, + shardID, + common.PortalLiquidateCustodianSuccessChainStatus, + ) + insts = append(insts, inst) + } + + // remove redeem request from waiting redeem requests list + if len(currentPortalState.WaitingRedeemRequests[redeemReqKey].GetCustodians()) == 0 { + deleteWaitingRedeemRequest(currentPortalState, redeemReqKey) + } + } + } + + return insts, nil +} + +// beacon build instruction for expired waiting porting request - user doesn't send public token to custodian after requesting +func buildExpiredWaitingPortingReqInst( + portingID string, + expiredByLiquidation bool, + metaType int, + shardID byte, + status string, +) []string { + liqCustodianContent := metadata.PortalExpiredWaitingPortingReqContent{ + UniquePortingID: portingID, + ExpiredByLiquidation: expiredByLiquidation, + ShardID: shardID, + } + liqCustodianContentBytes, _ := json.Marshal(liqCustodianContent) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + status, + string(liqCustodianContentBytes), + } +} + +func buildInstForExpiredPortingReqByPortingID( + beaconHeight uint64, + currentPortalState *CurrentPortalState, + portingReqKey string, + portingReq *statedb.WaitingPortingRequest, + expiredByLiquidation bool) ([][]string, error) { + insts := [][]string{} + + //get shardId of redeemer + redeemerKey, err := wallet.Base58CheckDeserialize(portingReq.PorterAddress()) + if err != nil { + Logger.log.Errorf("[buildInstForExpiredPortingReqByPortingID] Error when deserializing redeemer address string in redeemID %v - %v\n: ", + portingReq.UniquePortingID, err) + return insts, err + } + shardID := common.GetShardIDFromLastByte(redeemerKey.KeySet.PaymentAddress.Pk[len(redeemerKey.KeySet.PaymentAddress.Pk)-1]) + + // get tokenID from redeemTokenID + tokenID := portingReq.TokenID() + + // update custodian state in matching custodians list (holding public tokens, locked amount) + for _, matchCusDetail := range portingReq.Custodians() { + cusStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, matchCusDetail.IncAddress) + cusStateKeyStr := cusStateKey.String() + custodianState := currentPortalState.CustodianPoolState[cusStateKeyStr] + if custodianState == nil { + Logger.log.Errorf("[checkAndBuildInstForExpiredWaitingPortingRequest] Error when get custodian state with key %v\n: ", cusStateKey) + continue + } + updateCustodianStateAfterExpiredPortingReq(custodianState, matchCusDetail.LockedAmountCollateral, matchCusDetail.Amount, tokenID) + } + + // remove waiting porting request from waiting list + delete(currentPortalState.WaitingPortingRequests, portingReqKey) + + // build instruction + inst := buildExpiredWaitingPortingReqInst( + portingReq.UniquePortingID(), + expiredByLiquidation, + metadata.PortalExpiredWaitingPortingReqMeta, + shardID, + common.PortalExpiredWaitingPortingReqSuccessChainStatus, + ) + insts = append(insts, inst) + + return insts, nil +} + +// convertPortalTimeOutToBeaconBlocks returns number of beacon blocks corresponding to duration time +func (blockchain *BlockChain) convertPortalTimeOutToBeaconBlocks(duration time.Duration) uint64 { + return uint64(duration.Seconds() / blockchain.config.ChainParams.MinBeaconBlockInterval.Seconds()) +} + +func (blockchain *BlockChain) checkAndBuildInstForExpiredWaitingPortingRequest( + beaconHeight uint64, + currentPortalState *CurrentPortalState, +) ([][]string, error) { + insts := [][]string{} + for portingReqKey, portingReq := range currentPortalState.WaitingPortingRequests { + if (beaconHeight+1) - portingReq.BeaconHeight() >= blockchain.convertPortalTimeOutToBeaconBlocks(common.PortalTimeOutWaitingPortingRequest) { + inst, err := buildInstForExpiredPortingReqByPortingID( + beaconHeight, currentPortalState, portingReqKey, portingReq, false) + if err != nil { + Logger.log.Errorf("[checkAndBuildInstForExpiredWaitingPortingRequest] Error when build instruction for expired porting request %v\n", err) + continue + } + insts = append(insts, inst...) + } + } + + return insts, nil +} + +func checkAndBuildInstForTPExchangeRateRedeemRequest( + beaconHeight uint64, + currentPortalState *CurrentPortalState, + exchangeRate *statedb.FinalExchangeRatesState, + liquidatedCustodianState *statedb.CustodianState, + tokenID string, +) ([][]string, error) { + insts := [][]string{} + + // calculate total amount of matching redeem amount with the liquidated custodian + totalMatchingRedeemAmountPubToken := uint64(0) + for _, redeemReq := range currentPortalState.WaitingRedeemRequests { + if redeemReq.GetTokenID() == tokenID { + for _, cus := range redeemReq.GetCustodians() { + if cus.GetIncognitoAddress() == liquidatedCustodianState.GetIncognitoAddress() { + totalMatchingRedeemAmountPubToken += cus.GetAmount() + } + } + } + } + + convertExchangeRatesObj := NewConvertExchangeRatesObject(exchangeRate) + + // calculate total minted amount prv for liquidate (maximum 120% amount) + totalMatchingRedeemAmountPubTokenInPRV, err := convertExchangeRatesObj.ExchangePToken2PRVByTokenId(tokenID, totalMatchingRedeemAmountPubToken) + if err != nil { + Logger.log.Errorf("[checkAndBuildInstForTPExchangeRateRedeemRequest] Error when convert total amount public token to prv %v", err) + return insts, err + } + + totalMintedTmp := new(big.Int).Mul(new(big.Int).SetUint64(totalMatchingRedeemAmountPubTokenInPRV), new(big.Int).SetUint64(common.PercentReceivedCollateralAmount)) + totalMintedAmountPRV := new(big.Int).Div(totalMintedTmp, new(big.Int).SetUint64(100)).Uint64() + + if totalMintedAmountPRV > liquidatedCustodianState.GetLockedAmountCollateral()[tokenID] { + totalMintedAmountPRV = liquidatedCustodianState.GetLockedAmountCollateral()[tokenID] + } + + // calculate minted amount prv for each matching redeem requests + // rely on percent matching redeem amount and total matching redeem amount + liquidatedByExchangeRate := true + for redeemReqKey, redeemReq := range currentPortalState.WaitingRedeemRequests { + if redeemReq.GetTokenID() == tokenID { + for _, matchCustodian := range redeemReq.GetCustodians() { + if matchCustodian.GetIncognitoAddress() == liquidatedCustodianState.GetIncognitoAddress() { + tmp := new(big.Int).Mul(new(big.Int).SetUint64(matchCustodian.GetAmount()), new(big.Int).SetUint64(totalMintedAmountPRV)) + mintedAmountPRV := new(big.Int).Div(tmp, new(big.Int).SetUint64(totalMatchingRedeemAmountPubToken)).Uint64() + + // get shardId of redeemer + redeemerKey, err := wallet.Base58CheckDeserialize(redeemReq.GetRedeemerAddress()) + if err != nil { + Logger.log.Errorf("[checkAndBuildInstForTPExchangeRateRedeemRequest] Error when deserializing redeemer address string in redeemID %v - %v\n: ", + redeemReq.GetUniqueRedeemID(), err) + continue + } + shardID := common.GetShardIDFromLastByte(redeemerKey.KeySet.PaymentAddress.Pk[len(redeemerKey.KeySet.PaymentAddress.Pk)-1]) + + // remove matching custodian from matching custodians list in waiting redeem request + updatedCustodians, _ := removeCustodianFromMatchingRedeemCustodians( + currentPortalState.WaitingRedeemRequests[redeemReqKey].GetCustodians(), matchCustodian.GetIncognitoAddress()) + currentPortalState.WaitingRedeemRequests[redeemReqKey].SetCustodians(updatedCustodians) + + // build instruction + inst := buildCustodianRunAwayLiquidationInst( + redeemReq.GetUniqueRedeemID(), + redeemReq.GetTokenID(), + matchCustodian.GetAmount(), + mintedAmountPRV, + redeemReq.GetRedeemerAddress(), + matchCustodian.GetIncognitoAddress(), + liquidatedByExchangeRate, + metadata.PortalLiquidateCustodianMeta, + shardID, + common.PortalLiquidateCustodianSuccessChainStatus, + ) + insts = append(insts, inst) + + } + } + // remove redeem request from waiting redeem requests list + if len(currentPortalState.WaitingRedeemRequests[redeemReqKey].GetCustodians()) == 0 { + deleteWaitingRedeemRequest(currentPortalState, redeemReqKey) + } + } + } + // update custodian state (update locked amount, holding public token amount) + custodianStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, liquidatedCustodianState.GetIncognitoAddress()) + custodianStateKeyStr := custodianStateKey.String() + holdingPubTokenTmp := currentPortalState.CustodianPoolState[custodianStateKeyStr].GetHoldingPublicTokens() + holdingPubTokenTmp[tokenID] -= totalMatchingRedeemAmountPubToken + currentPortalState.CustodianPoolState[custodianStateKeyStr].SetHoldingPublicTokens(holdingPubTokenTmp) + + lockedAmountTmp := currentPortalState.CustodianPoolState[custodianStateKeyStr].GetLockedAmountCollateral() + lockedAmountTmp[tokenID] -= totalMintedAmountPRV + currentPortalState.CustodianPoolState[custodianStateKeyStr].SetLockedAmountCollateral(lockedAmountTmp) + + return insts, nil +} + +func checkAndBuildInstForTPExchangeRatePortingRequest( + beaconHeight uint64, + currentPortalState *CurrentPortalState, + exchangeRate *statedb.FinalExchangeRatesState, + liquidatedCustodianState *statedb.CustodianState, + tokenID string, +) ([][]string, error) { + insts := [][]string{} + // filter waiting porting request that has liquidated matching custodian by exchange rate drops down + for portingReqKey, portingReq := range currentPortalState.WaitingPortingRequests { + if portingReq.TokenID() == tokenID { + for _, cus := range portingReq.Custodians() { + if cus.IncAddress == liquidatedCustodianState.GetIncognitoAddress() { + inst, err := buildInstForExpiredPortingReqByPortingID( + beaconHeight, currentPortalState, portingReqKey, portingReq, true) + if err != nil { + Logger.log.Errorf("[checkAndBuildInstForTPExchangeRatePortingRequest] Error when build instruction %v\n", err) + continue + } + insts = append(insts, inst...) + } + } + } + } + + return insts, nil +} + +/* +Top percentile (TP): 150 (TP150), 130 (TP130), 120 (TP120) +if TP down, we are need liquidation custodian and notify to custodians (or users) +*/ +func buildInstForLiquidationTopPercentileExchangeRates(beaconHeight uint64, currentPortalState *CurrentPortalState) ([][]string, error) { + if len(currentPortalState.CustodianPoolState) <= 0 { + return [][]string{}, nil + } + + insts := [][]string{} + keyExchangeRate := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + keyExchangeRateStr := keyExchangeRate.String() + exchangeRate, ok := currentPortalState.FinalExchangeRatesState[keyExchangeRateStr] + if !ok { + Logger.log.Errorf("Exchange key %+v rate not found", keyExchangeRateStr) + return [][]string{}, nil + } + + custodianPoolState := currentPortalState.CustodianPoolState + + for custodianKey, custodianState := range custodianPoolState { + Logger.log.Infof("Start detect tp custodian address: custodian key %v, custodian address %v, total pubtokens %v, total amount collateral %v",custodianKey, custodianState.GetIncognitoAddress(), custodianState.GetHoldingPublicTokens(), custodianState.GetLockedAmountCollateral()) + + calTPRatio, err := calculateTPRatio(custodianState.GetHoldingPublicTokens(), custodianState.GetLockedAmountCollateral(), exchangeRate) + if err != nil { + Logger.log.Errorf("Auto liquidation: cal tp ratio error %v", err) + continue + } + + //filter TP by TP 120 or TP130 + detectTp, err := detectTopPercentileLiquidation(custodianState, calTPRatio) + if err != nil { + Logger.log.Errorf("Auto liquidation: detect cal tp ratio error %v", err) + continue + } + + Logger.log.Infof("liquidate exchange rates: detect TP result %v", detectTp) + if len(detectTp) > 0 { + for pTokenID, v := range detectTp { + if v.HoldAmountFreeCollateral > 0 { + // check and build instruction for waiting redeem request + instsFromRedeemRequest, err := checkAndBuildInstForTPExchangeRateRedeemRequest( + beaconHeight, + currentPortalState, + exchangeRate, + custodianState, + pTokenID, + ) + if err != nil { + Logger.log.Errorf("Error when check and build instruction from redeem request %v\n", err) + continue + } + if len(instsFromRedeemRequest) > 0 { + Logger.log.Infof("There is %v instructions for tp exchange rate for redeem request", len(instsFromRedeemRequest)) + insts = append(insts, instsFromRedeemRequest...) + } + + // check and build instruction for waiting porting request + instsFromWaitingPortingReq, err := checkAndBuildInstForTPExchangeRatePortingRequest( + beaconHeight, + currentPortalState, + exchangeRate, + custodianState, + pTokenID, + ) + if err != nil { + Logger.log.Errorf("Error when check and build instruction from redeem request %v\n", err) + continue + } + if len(instsFromWaitingPortingReq) > 0 { + Logger.log.Infof("There is %v instructions for tp exchange rate for waiting porting request", len(instsFromWaitingPortingReq)) + insts = append(insts, instsFromWaitingPortingReq...) + } + } + } + + inst := buildTopPercentileExchangeRatesLiquidationInst( + custodianState.GetIncognitoAddress(), + metadata.PortalLiquidateTPExchangeRatesMeta, + common.PortalLiquidateTPExchangeRatesSuccessChainStatus, + detectTp, + ) + + + + //update current portal state + updateCurrentPortalStateOfLiquidationExchangeRates(beaconHeight, currentPortalState, custodianKey, custodianState, detectTp) + + insts = append(insts, inst) + } + } + + return insts, nil +} + +func (blockchain *BlockChain) buildInstructionsForLiquidationRedeemPTokenExchangeRates( + contentStr string, + shardID byte, + metaType int, + currentPortalState *CurrentPortalState, + beaconHeight uint64, +) ([][]string, error) { + // parse instruction + actionContentBytes, err := base64.StdEncoding.DecodeString(contentStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while decoding content string of portal redeem liquidate exchange rate action: %+v", err) + return [][]string{}, nil + } + var actionData metadata.PortalRedeemLiquidateExchangeRatesAction + err = json.Unmarshal(actionContentBytes, &actionData) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while unmarshal portal redeem liquidate exchange rate action: %+v", err) + return [][]string{}, nil + } + + meta := actionData.Meta + if currentPortalState == nil { + Logger.log.Warn("Current Portal state is null.") + // need to mint ptoken to user + inst := buildRedeemLiquidateExchangeRatesInst( + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + 0, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemLiquidateExchangeRatesRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + //get exchange rates + exchangeRatesKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + exchangeRatesState, ok := currentPortalState.FinalExchangeRatesState[exchangeRatesKey.String()] + if !ok { + Logger.log.Errorf("exchange rates not found") + inst := buildRedeemLiquidateExchangeRatesInst( + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + 0, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemLiquidateExchangeRatesRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + minRedeemFee, err := CalMinRedeemFee(meta.RedeemAmount, meta.TokenID, exchangeRatesState) + if err != nil { + Logger.log.Errorf("Error when calculating minimum redeem fee %v", err) + inst := buildRedeemLiquidateExchangeRatesInst( + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + 0, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemLiquidateExchangeRatesRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + if meta.RedeemFee < minRedeemFee { + Logger.log.Errorf("Redeem fee is invalid, minRedeemFee %v, but get %v\n", minRedeemFee, meta.RedeemFee) + inst := buildRedeemLiquidateExchangeRatesInst( + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + 0, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemLiquidateExchangeRatesRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + //check redeem amount + liquidateExchangeRatesKey := statedb.GeneratePortalLiquidateExchangeRatesPoolObjectKey(beaconHeight) + liquidateExchangeRates, ok := currentPortalState.LiquidateExchangeRatesPool[liquidateExchangeRatesKey.String()] + + if !ok { + Logger.log.Errorf("Liquidate exchange rates not found") + inst := buildRedeemLiquidateExchangeRatesInst( + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + 0, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemLiquidateExchangeRatesRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + liquidateByTokenID, ok := liquidateExchangeRates.Rates()[meta.TokenID] + + if !ok { + Logger.log.Errorf("Liquidate exchange rates not found") + inst := buildRedeemLiquidateExchangeRatesInst( + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + 0, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemLiquidateExchangeRatesRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + totalPrv, err := calTotalLiquidationByExchangeRates(meta.RedeemAmount, liquidateByTokenID) + + if err != nil { + Logger.log.Errorf("Calculate total liquidation error %v", err) + inst := buildRedeemLiquidateExchangeRatesInst( + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + 0, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemLiquidateExchangeRatesRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + //todo: review + if totalPrv > liquidateByTokenID.HoldAmountFreeCollateral || liquidateByTokenID.HoldAmountFreeCollateral <= 0 { + Logger.log.Errorf("amout free collateral not enough, need prv %v != hold amount free collateral %v", totalPrv, liquidateByTokenID.HoldAmountFreeCollateral) + inst := buildRedeemLiquidateExchangeRatesInst( + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + 0, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemLiquidateExchangeRatesRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + Logger.log.Infof("Redeem Liquidation: Amount refund to user amount ptoken %v, amount prv %v", meta.RedeemAmount, totalPrv) + liquidateExchangeRates.Rates()[meta.TokenID] = statedb.LiquidateExchangeRatesDetail{ + HoldAmountFreeCollateral: liquidateByTokenID.HoldAmountFreeCollateral - totalPrv, + HoldAmountPubToken: liquidateByTokenID.HoldAmountPubToken - meta.RedeemAmount, + } + + currentPortalState.LiquidateExchangeRatesPool[liquidateExchangeRatesKey.String()] = liquidateExchangeRates + + inst := buildRedeemLiquidateExchangeRatesInst( + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + totalPrv, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemLiquidateExchangeRatesSuccessChainStatus, + ) + return [][]string{inst}, nil +} + +func (blockchain *BlockChain) buildInstructionsForLiquidationCustodianDeposit( + contentStr string, + shardID byte, + metaType int, + currentPortalState *CurrentPortalState, + beaconHeight uint64, +) ([][]string, error) { + // parse instruction + actionContentBytes, err := base64.StdEncoding.DecodeString(contentStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while decoding content string of portal liquidation custodian deposit action: %+v", err) + return [][]string{}, nil + } + var actionData metadata.PortalLiquidationCustodianDepositAction + err = json.Unmarshal(actionContentBytes, &actionData) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while unmarshal portal liquidation custodian deposit action: %+v", err) + return [][]string{}, nil + } + + if currentPortalState == nil { + Logger.log.Warn("Current Portal state is null.") + // need to refund collateral to custodian + inst := buildLiquidationCustodianDepositInst( + actionData.Meta.PTokenId, + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositRejectedChainStatus, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + meta := actionData.Meta + + keyCustodianState := statedb.GenerateCustodianStateObjectKey(beaconHeight, meta.IncogAddressStr) + custodian, ok := currentPortalState.CustodianPoolState[keyCustodianState.String()] + + if !ok { + Logger.log.Errorf("Custodian not found") + // need to refund collateral to custodian + inst := buildLiquidationCustodianDepositInst( + actionData.Meta.PTokenId, + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositRejectedChainStatus, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + //check exit ptoken + if _, ok := custodian.GetLockedAmountCollateral()[actionData.Meta.PTokenId]; !ok { + Logger.log.Errorf("PToken not found") + // need to refund collateral to custodian + inst := buildLiquidationCustodianDepositInst( + actionData.Meta.PTokenId, + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositRejectedChainStatus, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + keyExchangeRate := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + exchangeRate, ok := currentPortalState.FinalExchangeRatesState[keyExchangeRate.String()] + if !ok { + Logger.log.Errorf("Exchange rate not found", err) + inst := buildLiquidationCustodianDepositInst( + actionData.Meta.PTokenId, + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositRejectedChainStatus, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + calTPRatio, err := calculateTPRatio(custodian.GetHoldingPublicTokens(), custodian.GetLockedAmountCollateral(), exchangeRate) + if err != nil { + Logger.log.Errorf("Custodian deposit: cal tp ratio error %v", err) + inst := buildLiquidationCustodianDepositInst( + actionData.Meta.PTokenId, + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositRejectedChainStatus, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + //filter TP by TP 120 & TP130 + detectTp, err := detectTopPercentileLiquidation(custodian, calTPRatio) + + if err != nil { + Logger.log.Errorf("Detect TP liquidation error %v", err) + inst := buildLiquidationCustodianDepositInst( + actionData.Meta.PTokenId, + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositRejectedChainStatus, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + tpItem, ok := detectTp[actionData.Meta.PTokenId] + + if tpItem.TPKey != common.TP130 || !ok { + Logger.log.Errorf("TP value is must TP130") + inst := buildLiquidationCustodianDepositInst( + actionData.Meta.PTokenId, + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositRejectedChainStatus, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + amountNeeded, totalFreeCollateralNeeded, remainFreeCollateral, err := CalAmountNeededDepositLiquidate(custodian, exchangeRate, actionData.Meta.PTokenId, actionData.Meta.FreeCollateralSelected) + + if err != nil { + Logger.log.Errorf("Calculate amount needed deposit err %v", err) + inst := buildLiquidationCustodianDepositInst( + actionData.Meta.PTokenId, + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositRejectedChainStatus, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + if actionData.Meta.DepositedAmount < amountNeeded { + Logger.log.Errorf("Deposited amount is not enough, expect %v, data sent %v", amountNeeded, actionData.Meta.DepositedAmount) + inst := buildLiquidationCustodianDepositInst( + actionData.Meta.PTokenId, + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositRejectedChainStatus, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + Logger.log.Infof("Deposited amount: expect %v, data sent %v", amountNeeded, actionData.Meta.DepositedAmount) + + remainDepositAmount := actionData.Meta.DepositedAmount - amountNeeded + custodian.SetTotalCollateral(custodian.GetTotalCollateral() + actionData.Meta.DepositedAmount) + + if actionData.Meta.FreeCollateralSelected == false { + lockedAmountCollateral := custodian.GetLockedAmountCollateral() + lockedAmountCollateral[actionData.Meta.PTokenId] = lockedAmountCollateral[actionData.Meta.PTokenId] + amountNeeded + + custodian.SetLockedAmountCollateral(lockedAmountCollateral) + //update remain + custodian.SetFreeCollateral(custodian.GetFreeCollateral() + remainDepositAmount) + } else { + lockedAmountCollateral := custodian.GetLockedAmountCollateral() + lockedAmountCollateral[actionData.Meta.PTokenId] = lockedAmountCollateral[actionData.Meta.PTokenId] + amountNeeded + totalFreeCollateralNeeded + //deposit from free collateral DepositedAmount + custodian.SetLockedAmountCollateral(lockedAmountCollateral) + custodian.SetFreeCollateral(remainFreeCollateral + remainDepositAmount) + } + + currentPortalState.CustodianPoolState[keyCustodianState.String()] = custodian + + inst := buildLiquidationCustodianDepositInst( + actionData.Meta.PTokenId, + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.FreeCollateralSelected, + common.PortalLiquidationCustodianDepositSuccessChainStatus, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + ) + + return [][]string{inst}, nil +} diff --git a/blockchain/beaconportalprocess.go b/blockchain/beaconportalprocess.go new file mode 100644 index 0000000000..52e5fd49f5 --- /dev/null +++ b/blockchain/beaconportalprocess.go @@ -0,0 +1,1036 @@ +package blockchain + +import ( + "encoding/json" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/metadata" + "sort" + "strconv" +) + +func (blockchain *BlockChain) processPortalInstructions(portalStateDB *statedb.StateDB, block *BeaconBlock) error { + beaconHeight := block.Header.Height - 1 + currentPortalState, err := InitCurrentPortalStateFromDB(portalStateDB, beaconHeight) + if err != nil { + Logger.log.Error(err) + return nil + } + + // re-use update info of bridge + updatingInfoByTokenID := map[common.Hash]UpdatingInfo{} + + for _, inst := range block.Body.Instructions { + if len(inst) < 4 { + continue // Not error, just not Portal instruction + } + + var err error + + switch inst[0] { + //porting request + case strconv.Itoa(metadata.PortalUserRegisterMeta): + err = blockchain.processPortalUserRegister(portalStateDB, beaconHeight, inst, currentPortalState) + //exchange rates + case strconv.Itoa(metadata.PortalExchangeRatesMeta): + err = blockchain.processPortalExchangeRates(portalStateDB, beaconHeight, inst, currentPortalState) + //custodian withdraw + case strconv.Itoa(metadata.PortalCustodianWithdrawRequestMeta): + err = blockchain.processPortalCustodianWithdrawRequest(portalStateDB, beaconHeight, inst, currentPortalState) + //liquidation exchange rates + case strconv.Itoa(metadata.PortalLiquidateTPExchangeRatesMeta): + err = blockchain.processLiquidationTopPercentileExchangeRates(portalStateDB, beaconHeight, inst, currentPortalState) + //liquidation custodian deposit + case strconv.Itoa(metadata.PortalLiquidationCustodianDepositMeta): + err = blockchain.processPortalLiquidationCustodianDeposit(portalStateDB, beaconHeight, inst, currentPortalState) + //liquidation user redeem + case strconv.Itoa(metadata.PortalRedeemLiquidateExchangeRatesMeta): + err = blockchain.processPortalRedeemLiquidateExchangeRates(portalStateDB, beaconHeight, inst, currentPortalState, updatingInfoByTokenID) + //custodian deposit + case strconv.Itoa(metadata.PortalCustodianDepositMeta): + err = blockchain.processPortalCustodianDeposit(portalStateDB, beaconHeight, inst, currentPortalState) + // request ptoken + case strconv.Itoa(metadata.PortalUserRequestPTokenMeta): + err = blockchain.processPortalUserReqPToken(portalStateDB, beaconHeight, inst, currentPortalState, updatingInfoByTokenID) + // redeem request + case strconv.Itoa(metadata.PortalRedeemRequestMeta): + err = blockchain.processPortalRedeemRequest(portalStateDB, beaconHeight, inst, currentPortalState, updatingInfoByTokenID) + // request unlock collateral + case strconv.Itoa(metadata.PortalRequestUnlockCollateralMeta): + err = blockchain.processPortalUnlockCollateral(portalStateDB, beaconHeight, inst, currentPortalState) + // liquidation custodian run away + case strconv.Itoa(metadata.PortalLiquidateCustodianMeta): + err = blockchain.processPortalLiquidateCustodian(portalStateDB, beaconHeight, inst, currentPortalState) + // portal reward + case strconv.Itoa(metadata.PortalRewardMeta): + err = blockchain.processPortalReward(portalStateDB, beaconHeight, inst, currentPortalState) + // request withdraw reward + case strconv.Itoa(metadata.PortalRequestWithdrawRewardMeta): + err = blockchain.processPortalWithdrawReward(portalStateDB, beaconHeight, inst, currentPortalState) + // expired waiting porting request + case strconv.Itoa(metadata.PortalExpiredWaitingPortingReqMeta): + err = blockchain.processPortalExpiredPortingRequest(portalStateDB, beaconHeight, inst, currentPortalState) + // total custodian reward instruction + case strconv.Itoa(metadata.PortalTotalRewardCustodianMeta): + err = blockchain.processPortalTotalCustodianReward(portalStateDB, beaconHeight, inst, currentPortalState) + } + + if err != nil { + Logger.log.Error(err) + return nil + } + } + + //save final exchangeRates + blockchain.pickExchangesRatesFinal(beaconHeight, currentPortalState) + + // update info of bridge portal token + for _, updatingInfo := range updatingInfoByTokenID { + var updatingAmt uint64 + var updatingType string + if updatingInfo.countUpAmt > updatingInfo.deductAmt { + updatingAmt = updatingInfo.countUpAmt - updatingInfo.deductAmt + updatingType = "+" + } + if updatingInfo.countUpAmt < updatingInfo.deductAmt { + updatingAmt = updatingInfo.deductAmt - updatingInfo.countUpAmt + updatingType = "-" + } + err := statedb.UpdateBridgeTokenInfo( + portalStateDB, + updatingInfo.tokenID, + updatingInfo.externalTokenID, + updatingInfo.isCentralized, + updatingAmt, + updatingType, + ) + if err != nil { + return err + } + } + + // store updated currentPortalState to leveldb with new beacon height + err = storePortalStateToDB(portalStateDB, beaconHeight+1, currentPortalState) + if err != nil { + Logger.log.Error(err) + } + + return nil +} + +func (blockchain *BlockChain) processPortalCustodianDeposit( + stateDB *statedb.StateDB, beaconHeight uint64, instructions []string, currentPortalState *CurrentPortalState) error { + if currentPortalState == nil { + Logger.log.Errorf("current portal state is nil") + return nil + } + if len(instructions) != 4 { + return nil // skip the instruction + } + + // unmarshal instructions content + var actionData metadata.PortalCustodianDepositContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + return err + } + + depositStatus := instructions[2] + if depositStatus == common.PortalCustodianDepositAcceptedChainStatus { + keyCustodianState := statedb.GenerateCustodianStateObjectKey(beaconHeight, actionData.IncogAddressStr) + keyCustodianStateStr := keyCustodianState.String() + + if currentPortalState.CustodianPoolState[keyCustodianStateStr] == nil { + // new custodian + newCustodian := statedb.NewCustodianStateWithValue( + actionData.IncogAddressStr, actionData.DepositedAmount, actionData.DepositedAmount, + nil, nil, + actionData.RemoteAddresses, nil) + currentPortalState.CustodianPoolState[keyCustodianStateStr] = newCustodian + } else { + // custodian deposited before + // update state of the custodian + custodian := currentPortalState.CustodianPoolState[keyCustodianStateStr] + totalCollateral := custodian.GetTotalCollateral() + actionData.DepositedAmount + freeCollateral := custodian.GetFreeCollateral() + actionData.DepositedAmount + holdingPubTokens := custodian.GetHoldingPublicTokens() + lockedAmountCollateral := custodian.GetLockedAmountCollateral() + rewardAmount := custodian.GetRewardAmount() + remoteAddresses := custodian.GetRemoteAddresses() + for _, address := range actionData.RemoteAddresses { + if existedAddr, _ := statedb.GetRemoteAddressByTokenID(remoteAddresses, address.GetPTokenID()); existedAddr == "" { + remoteAddresses = append(remoteAddresses, address) + } + } + + newCustodian := statedb.NewCustodianStateWithValue(actionData.IncogAddressStr, totalCollateral, freeCollateral, + holdingPubTokens, lockedAmountCollateral, remoteAddresses, rewardAmount) + currentPortalState.CustodianPoolState[keyCustodianStateStr] = newCustodian + } + + // store custodian deposit status into DB + custodianDepositTrackData := metadata.PortalCustodianDepositStatus{ + Status: common.PortalCustodianDepositAcceptedStatus, + IncogAddressStr: actionData.IncogAddressStr, + DepositedAmount: actionData.DepositedAmount, + RemoteAddresses: actionData.RemoteAddresses, + } + custodianDepositDataBytes, _ := json.Marshal(custodianDepositTrackData) + + err = statedb.StoreCustodianDepositStatus( + stateDB, + actionData.TxReqID.String(), + custodianDepositDataBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking custodian deposit collateral: %+v", err) + return nil + } + } else if depositStatus == common.PortalCustodianDepositRefundChainStatus { + // store custodian deposit status into DB + custodianDepositTrackData := metadata.PortalCustodianDepositStatus{ + Status: common.PortalCustodianDepositRefundStatus, + IncogAddressStr: actionData.IncogAddressStr, + DepositedAmount: actionData.DepositedAmount, + RemoteAddresses: actionData.RemoteAddresses, + } + custodianDepositDataBytes, _ := json.Marshal(custodianDepositTrackData) + + err = statedb.StoreCustodianDepositStatus( + stateDB, + actionData.TxReqID.String(), + custodianDepositDataBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking custodian deposit collateral: %+v", err) + return nil + } + } + + return nil +} + +func (blockchain *BlockChain) processPortalUserRegister( + portalStateDB *statedb.StateDB, + beaconHeight uint64, instructions []string, currentPortalState *CurrentPortalState) error { + + if currentPortalState == nil { + Logger.log.Errorf("current portal state is nil") + return nil + } + + if len(instructions) != 4 { + return nil // skip the instruction + } + + // parse instruction + var portingRequestContent metadata.PortalPortingRequestContent + err := json.Unmarshal([]byte(instructions[3]), &portingRequestContent) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while unmarshaling content string of porting request contribution instruction: %+v", err) + return nil + } + + reqStatus := instructions[2] + + uniquePortingID := portingRequestContent.UniqueRegisterId + txReqID := portingRequestContent.TxReqID + tokenID := portingRequestContent.PTokenId + + porterAddress := portingRequestContent.IncogAddressStr + amount := portingRequestContent.RegisterAmount + + custodiansDetail := portingRequestContent.Custodian + portingFee := portingRequestContent.PortingFee + + switch reqStatus { + case common.PortalPortingRequestAcceptedChainStatus: + //verify custodian + isCustodianAccepted := true + for _, itemCustodian := range custodiansDetail { + keyPortingRequestNewState := statedb.GenerateCustodianStateObjectKey(beaconHeight, itemCustodian.IncAddress) + keyPortingRequestNewStateStr := keyPortingRequestNewState.String() + custodian, ok := currentPortalState.CustodianPoolState[keyPortingRequestNewStateStr] + if !ok { + Logger.log.Errorf("ERROR: Custodian not found") + isCustodianAccepted = false + break + } + + if custodian.GetFreeCollateral() < itemCustodian.LockedAmountCollateral { + Logger.log.Errorf("ERROR: Custodian is not enough PRV, free collateral %v < lock amount %v", custodian.GetFreeCollateral(), itemCustodian.LockedAmountCollateral) + isCustodianAccepted = false + break + } + + continue + } + + if isCustodianAccepted == false { + Logger.log.Errorf("ERROR: Custodian not found") + return nil + } + + // new request + newWaitingPortingRequestState := statedb.NewWaitingPortingRequestWithValue( + uniquePortingID, + txReqID, + tokenID, + porterAddress, + amount, + custodiansDetail, + portingFee, + common.PortalPortingReqWaitingStatus, + beaconHeight+1, + ) + + newPortingRequestState := metadata.NewPortingRequestStatus( + uniquePortingID, + txReqID, + tokenID, + porterAddress, + amount, + custodiansDetail, + portingFee, + common.PortalPortingReqWaitingStatus, + beaconHeight+1, + ) + + newPortingTxRequestState := metadata.NewPortingRequestStatus( + uniquePortingID, + txReqID, + tokenID, + porterAddress, + amount, + custodiansDetail, + portingFee, + common.PortalPortingTxRequestAcceptedStatus, + beaconHeight+1, + ) + + //save transaction + newPortingTxRequestStatusBytes, _ := json.Marshal(newPortingTxRequestState) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalPortingRequestTxStatusPrefix(), + []byte(txReqID.String()), + newPortingTxRequestStatusBytes, + beaconHeight, + ) + + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while store porting tx request item: %+v", err) + return nil + } + + //save success porting request + newPortingRequestStatusBytes, _ := json.Marshal(newPortingRequestState) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalPortingRequestStatusPrefix(), + []byte(uniquePortingID), + newPortingRequestStatusBytes, + beaconHeight, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while store porting request item: %+v", err) + return nil + } + + //save custodian state + for _, itemCustodian := range custodiansDetail { + //update custodian state + custodianKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, itemCustodian.IncAddress) + custodianKeyStr := custodianKey.String() + _ = UpdateCustodianWithNewAmount(currentPortalState, custodianKeyStr, tokenID, itemCustodian.Amount, itemCustodian.LockedAmountCollateral) + } + + //save waiting request porting state + keyWaitingPortingRequest := statedb.GeneratePortalWaitingPortingRequestObjectKey(beaconHeight, portingRequestContent.UniqueRegisterId) + Logger.log.Infof("Porting request, save waiting porting request with key %v", keyWaitingPortingRequest) + currentPortalState.WaitingPortingRequests[keyWaitingPortingRequest.String()] = newWaitingPortingRequestState + + break + case common.PortalPortingRequestRejectedChainStatus: + txReqID := portingRequestContent.TxReqID + + newPortingRequest := metadata.NewPortingRequestStatus( + uniquePortingID, + txReqID, + tokenID, + porterAddress, + amount, + custodiansDetail, + portingFee, + common.PortalPortingTxRequestRejectedStatus, + beaconHeight+1, + ) + + //save transaction + newPortingTxRequestStatusBytes, _ := json.Marshal(newPortingRequest) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalPortingRequestTxStatusPrefix(), + []byte(txReqID.String()), + newPortingTxRequestStatusBytes, + beaconHeight, + ) + + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while store porting request item: %+v", err) + return nil + } + break + } + + return nil +} + +func (blockchain *BlockChain) processPortalUserReqPToken( + stateDB *statedb.StateDB, + beaconHeight uint64, instructions []string, + currentPortalState *CurrentPortalState, + updatingInfoByTokenID map[common.Hash]UpdatingInfo) error { + if currentPortalState == nil { + Logger.log.Errorf("current portal state is nil") + return nil + } + + if len(instructions) != 4 { + return nil // skip the instruction + } + + // unmarshal instructions content + var actionData metadata.PortalRequestPTokensContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + Logger.log.Errorf("Can not unmarshal instruction content %v - Error: %v\n", instructions[3], err) + return nil + } + + reqStatus := instructions[2] + if reqStatus == common.PortalReqPTokensAcceptedChainStatus { + // remove portingRequest from waitingPortingRequests + waitingPortingReqKey := statedb.GeneratePortalWaitingPortingRequestObjectKey(beaconHeight, actionData.UniquePortingID) + waitingPortingReqKeyStr := waitingPortingReqKey.String() + deleteWaitingPortingRequest(currentPortalState, waitingPortingReqKeyStr) + // make sure user can not re-use proof for other portingID + // update status of porting request with portingID + + //update new status of porting request + portingRequestState, err := statedb.GetPortalStateStatusMultiple(stateDB, statedb.PortalPortingRequestStatusPrefix(), []byte(actionData.UniquePortingID)) + if err != nil { + Logger.log.Errorf("Has an error occurred while get porting request status: %+v", err) + return nil + } + + var portingRequestStatus metadata.PortingRequestStatus + err = json.Unmarshal(portingRequestState, &portingRequestStatus) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while unmarshal PortingRequestStatus: %+v", err) + return nil + } + + portingRequestStatus.Status = common.PortalPortingReqSuccessStatus + newPortingRequestStatusBytes, _ := json.Marshal(portingRequestStatus) + err = statedb.TrackPortalStateStatusMultiple( + stateDB, + statedb.PortalPortingRequestStatusPrefix(), + []byte(actionData.UniquePortingID), + newPortingRequestStatusBytes, + beaconHeight, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while store porting request item status: %+v", err) + return nil + } + //end + + // track reqPToken status by txID into DB + reqPTokenTrackData := metadata.PortalRequestPTokensStatus{ + Status: common.PortalReqPTokenAcceptedStatus, + UniquePortingID: actionData.UniquePortingID, + TokenID: actionData.TokenID, + IncogAddressStr: actionData.IncogAddressStr, + PortingAmount: actionData.PortingAmount, + PortingProof: actionData.PortingProof, + } + reqPTokenTrackDataBytes, _ := json.Marshal(reqPTokenTrackData) + err = statedb.StoreRequestPTokenStatus( + stateDB, + actionData.TxReqID.String(), + reqPTokenTrackDataBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking request ptoken tx: %+v", err) + return nil + } + + // update bridge/portal token info + incTokenID, err := common.Hash{}.NewHashFromStr(actionData.TokenID) + if err != nil { + Logger.log.Errorf("ERROR: Can not new hash from porting incTokenID: %+v", err) + return nil + } + updatingInfo, found := updatingInfoByTokenID[*incTokenID] + if found { + updatingInfo.countUpAmt += actionData.PortingAmount + } else { + updatingInfo = UpdatingInfo{ + countUpAmt: actionData.PortingAmount, + deductAmt: 0, + tokenID: *incTokenID, + externalTokenID: nil, + isCentralized: false, + } + } + updatingInfoByTokenID[*incTokenID] = updatingInfo + + } else if reqStatus == common.PortalReqPTokensRejectedChainStatus { + reqPTokenTrackData := metadata.PortalRequestPTokensStatus{ + Status: common.PortalReqPTokenRejectedStatus, + UniquePortingID: actionData.UniquePortingID, + TokenID: actionData.TokenID, + IncogAddressStr: actionData.IncogAddressStr, + PortingAmount: actionData.PortingAmount, + PortingProof: actionData.PortingProof, + } + reqPTokenTrackDataBytes, _ := json.Marshal(reqPTokenTrackData) + err = statedb.StoreRequestPTokenStatus( + stateDB, + actionData.TxReqID.String(), + reqPTokenTrackDataBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking request ptoken tx: %+v", err) + return nil + } + } + + return nil +} + +func (blockchain *BlockChain) processPortalExchangeRates(portalStateDB *statedb.StateDB, beaconHeight uint64, instructions []string, currentPortalState *CurrentPortalState) error { + if currentPortalState == nil { + Logger.log.Errorf("current portal state is nil") + return nil + } + + // parse instruction + var portingExchangeRatesContent metadata.PortalExchangeRatesContent + err := json.Unmarshal([]byte(instructions[3]), &portingExchangeRatesContent) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while unmarshaling content string of portal exchange rates instruction: %+v", err) + return nil + } + + reqStatus := instructions[2] + Logger.log.Infof("Portal exchange rates, data input: %+v, status: %+v", portingExchangeRatesContent, reqStatus) + + switch reqStatus { + case common.PortalExchangeRatesAcceptedChainStatus: + //save db + newExchangeRates := metadata.NewExchangeRatesRequestStatus( + common.PortalExchangeRatesAcceptedStatus, + portingExchangeRatesContent.SenderAddress, + portingExchangeRatesContent.Rates, + ) + + newExchangeRatesStatusBytes, _ := json.Marshal(newExchangeRates) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalExchangeRatesRequestStatusPrefix(), + []byte(portingExchangeRatesContent.TxReqID.String()), + newExchangeRatesStatusBytes, + beaconHeight, + ) + + if err != nil { + Logger.log.Errorf("ERROR: Save exchange rates error: %+v", err) + return err + } + + currentPortalState.ExchangeRatesRequests[portingExchangeRatesContent.TxReqID.String()] = newExchangeRates + + Logger.log.Infof("Portal exchange rates, exchange rates request: total final exchange rate %v , total exchange rate request %v", len(currentPortalState.FinalExchangeRatesState), len(currentPortalState.ExchangeRatesRequests)) + + case common.PortalExchangeRatesRejectedChainStatus: + //save db + newExchangeRates := metadata.NewExchangeRatesRequestStatus( + common.PortalExchangeRatesRejectedStatus, + portingExchangeRatesContent.SenderAddress, + nil, + ) + + newExchangeRatesStatusBytes, _ := json.Marshal(newExchangeRates) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalExchangeRatesRequestStatusPrefix(), + []byte(portingExchangeRatesContent.TxReqID.String()), + newExchangeRatesStatusBytes, + beaconHeight, + ) + + if err != nil { + Logger.log.Errorf("ERROR: Save exchange rates error: %+v", err) + return err + } + } + + return nil +} + +func (blockchain *BlockChain) pickExchangesRatesFinal(beaconHeight uint64, currentPortalState *CurrentPortalState) { + exchangeRatesKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + + //convert to slice + var btcExchangeRatesSlice []uint64 + var bnbExchangeRatesSlice []uint64 + var prvExchangeRatesSlice []uint64 + for _, v := range currentPortalState.ExchangeRatesRequests { + for _, rate := range v.Rates { + switch rate.PTokenID { + case common.PortalBTCIDStr: + btcExchangeRatesSlice = append(btcExchangeRatesSlice, rate.Rate) + break + case common.PortalBNBIDStr: + bnbExchangeRatesSlice = append(bnbExchangeRatesSlice, rate.Rate) + break + case common.PRVIDStr: + prvExchangeRatesSlice = append(prvExchangeRatesSlice, rate.Rate) + break + } + } + } + + //sort + sort.SliceStable(btcExchangeRatesSlice, func(i, j int) bool { + return btcExchangeRatesSlice[i] < btcExchangeRatesSlice[j] + }) + + sort.SliceStable(bnbExchangeRatesSlice, func(i, j int) bool { + return bnbExchangeRatesSlice[i] < bnbExchangeRatesSlice[j] + }) + + sort.SliceStable(prvExchangeRatesSlice, func(i, j int) bool { + return prvExchangeRatesSlice[i] < prvExchangeRatesSlice[j] + }) + + exchangeRatesList := make(map[string]statedb.FinalExchangeRatesDetail) + + var btcAmount uint64 + var bnbAmount uint64 + var prvAmount uint64 + + //get current value + if len(btcExchangeRatesSlice) > 0 { + btcAmount = calcMedian(btcExchangeRatesSlice) + } + + if len(bnbExchangeRatesSlice) > 0 { + bnbAmount = calcMedian(bnbExchangeRatesSlice) + + } + + if len(prvExchangeRatesSlice) > 0 { + prvAmount = calcMedian(prvExchangeRatesSlice) + } + + //todo: need refactor code, not need write this code + //update value when has exchange + if exchangeRatesState, ok := currentPortalState.FinalExchangeRatesState[exchangeRatesKey.String()]; ok { + var btcAmountPreState uint64 + var bnbAmountPreState uint64 + var prvAmountPreState uint64 + if value, ok := exchangeRatesState.Rates()[common.PortalBTCIDStr]; ok { + btcAmountPreState = value.Amount + } + + if value, ok := exchangeRatesState.Rates()[common.PortalBNBIDStr]; ok { + bnbAmountPreState = value.Amount + } + + if value, ok := exchangeRatesState.Rates()[common.PRVIDStr]; ok { + prvAmountPreState = value.Amount + } + + //pick current value and pre value state + btcAmount = choicePrice(btcAmount, btcAmountPreState) + bnbAmount = choicePrice(bnbAmount, bnbAmountPreState) + prvAmount = choicePrice(prvAmount, prvAmountPreState) + } + + //select + if btcAmount > 0 { + exchangeRatesList[common.PortalBTCIDStr] = statedb.FinalExchangeRatesDetail{ + Amount: btcAmount, + } + } + + if bnbAmount > 0 { + exchangeRatesList[common.PortalBNBIDStr] = statedb.FinalExchangeRatesDetail{ + Amount: bnbAmount, + } + } + + if prvAmount > 0 { + exchangeRatesList[common.PRVIDStr] = statedb.FinalExchangeRatesDetail{ + Amount: prvAmount, + } + } + + if len(exchangeRatesList) > 0 { + currentPortalState.FinalExchangeRatesState[exchangeRatesKey.String()] = statedb.NewFinalExchangeRatesStateWithValue(exchangeRatesList) + } +} + +func calcMedian(ratesList []uint64) uint64 { + mNumber := len(ratesList) / 2 + + if len(ratesList)%2 == 0 { + return (ratesList[mNumber-1] + ratesList[mNumber]) / 2 + } + + return ratesList[mNumber] +} + +func choicePrice(currentPrice uint64, prePrice uint64) uint64 { + if currentPrice > 0 { + return currentPrice + } else { + if prePrice > 0 { + return prePrice + } + } + + return 0 +} + +func (blockchain *BlockChain) processPortalRedeemRequest( + stateDB *statedb.StateDB, + beaconHeight uint64, instructions []string, + currentPortalState *CurrentPortalState, + updatingInfoByTokenID map[common.Hash]UpdatingInfo) error { + if currentPortalState == nil { + Logger.log.Errorf("current portal state is nil") + return nil + } + + if len(instructions) != 4 { + return nil // skip the instruction + } + + // unmarshal instructions content + var actionData metadata.PortalRedeemRequestContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + Logger.log.Errorf("Can not unmarshal instruction content %v - Error %v\n", instructions[3], err) + return nil + } + + // get tokenID from redeemTokenID + tokenID := actionData.TokenID + + reqStatus := instructions[2] + + if reqStatus == common.PortalRedeemRequestAcceptedChainStatus { + // add waiting redeem request into waiting redeems list + keyWaitingRedeemRequest := statedb.GenerateWaitingRedeemRequestObjectKey(beaconHeight, actionData.UniqueRedeemID) + keyWaitingRedeemRequestStr := keyWaitingRedeemRequest.String() + redeemRequest := statedb.NewWaitingRedeemRequestWithValue( + actionData.UniqueRedeemID, + actionData.TokenID, + actionData.RedeemerIncAddressStr, + actionData.RemoteAddress, + actionData.RedeemAmount, + actionData.MatchingCustodianDetail, + actionData.RedeemFee, + beaconHeight+1, + actionData.TxReqID, + ) + currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr] = redeemRequest + + // update custodian state + for _, cus := range actionData.MatchingCustodianDetail { + custodianStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, cus.GetIncognitoAddress()) + custodianStateKeyStr := custodianStateKey.String() + holdingPubTokenTmp := currentPortalState.CustodianPoolState[custodianStateKeyStr].GetHoldingPublicTokens() + if holdingPubTokenTmp[tokenID] < cus.GetAmount() { + Logger.log.Errorf("[processPortalRedeemRequest] Amount holding public tokens is less than matching redeem amount") + return nil + } + holdingPubTokenTmp[tokenID] -= cus.GetAmount() + currentPortalState.CustodianPoolState[custodianStateKeyStr].SetHoldingPublicTokens(holdingPubTokenTmp) + } + + // track status of redeem request by redeemID + redeemRequestStatus := metadata.PortalRedeemRequestStatus{ + Status: common.PortalRedeemReqWaitingStatus, + UniqueRedeemID: actionData.UniqueRedeemID, + TokenID: actionData.TokenID, + RedeemAmount: actionData.RedeemAmount, + RedeemerIncAddressStr: actionData.RedeemerIncAddressStr, + RemoteAddress: actionData.RemoteAddress, + RedeemFee: actionData.RedeemFee, + MatchingCustodianDetail: actionData.MatchingCustodianDetail, + TxReqID: actionData.TxReqID, + } + redeemRequestStatusBytes, _ := json.Marshal(redeemRequestStatus) + err := statedb.StorePortalRedeemRequestStatus( + stateDB, + actionData.UniqueRedeemID, + redeemRequestStatusBytes) + if err != nil { + Logger.log.Errorf("[processPortalRedeemRequest] Error when storing status of redeem request by redeemID: %v\n", err) + return nil + } + + // track status of redeem request by txReqID + redeemRequestByTxIDStatus := metadata.PortalRedeemRequestStatus{ + Status: common.PortalRedeemReqWaitingStatus, + UniqueRedeemID: actionData.UniqueRedeemID, + } + redeemRequestByTxIDStatusBytes, _ := json.Marshal(redeemRequestByTxIDStatus) + err = statedb.StorePortalRedeemRequestByTxIDStatus( + stateDB, actionData.TxReqID.String(), redeemRequestByTxIDStatusBytes) + if err != nil { + Logger.log.Errorf("[processPortalRedeemRequest] Error when tracking status of redeem request by txReqID: %v\n", err) + return nil + } + + // update bridge/portal token info + incTokenID, err := common.Hash{}.NewHashFromStr(actionData.TokenID) + if err != nil { + Logger.log.Errorf("ERROR: Can not new hash from porting incTokenID: %+v", err) + return nil + } + updatingInfo, found := updatingInfoByTokenID[*incTokenID] + if found { + updatingInfo.deductAmt += actionData.RedeemAmount + } else { + updatingInfo = UpdatingInfo{ + countUpAmt: 0, + deductAmt: actionData.RedeemAmount, + tokenID: *incTokenID, + externalTokenID: nil, + isCentralized: false, + } + } + updatingInfoByTokenID[*incTokenID] = updatingInfo + + } else if reqStatus == common.PortalRedeemRequestRejectedChainStatus { + // track status of redeem request by txReqID + redeemRequestByTxIDStatus := metadata.PortalRedeemRequestStatus{ + Status: common.PortalRedeemReqWaitingStatus, + UniqueRedeemID: actionData.UniqueRedeemID, + } + redeemRequestByTxIDStatusBytes, _ := json.Marshal(redeemRequestByTxIDStatus) + err = statedb.StorePortalRedeemRequestByTxIDStatus( + stateDB, actionData.TxReqID.String(), redeemRequestByTxIDStatusBytes) + if err != nil { + Logger.log.Errorf("[processPortalRedeemRequest] Error when tracking status of redeem request by txReqID: %v\n", err) + return nil + } + } + + return nil +} + +func (blockchain *BlockChain) processPortalCustodianWithdrawRequest( + portalStateDB *statedb.StateDB, + beaconHeight uint64, + instructions []string, + currentPortalState *CurrentPortalState) error { + if currentPortalState == nil { + Logger.log.Errorf("current portal state is nil") + return nil + } + + if len(instructions) != 4 { + return nil // skip the instruction + } + // parse instruction + var custodianWithdrawRequestContent = metadata.PortalCustodianWithdrawRequestContent{} + err := json.Unmarshal([]byte(instructions[3]), &custodianWithdrawRequestContent) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while unmarshaling content string of custodian withdraw request instruction: %+v", err) + return nil + } + + reqStatus := instructions[2] + paymentAddress := custodianWithdrawRequestContent.PaymentAddress + amount := custodianWithdrawRequestContent.Amount + freeCollateral := custodianWithdrawRequestContent.RemainFreeCollateral + txHash := custodianWithdrawRequestContent.TxReqID.String() + + switch reqStatus { + case common.PortalCustodianWithdrawRequestAcceptedStatus: + //save transaction + newCustodianWithdrawRequest := metadata.NewCustodianWithdrawRequestStatus( + paymentAddress, + amount, + common.PortalCustodianWithdrawReqAcceptedStatus, + freeCollateral, + ) + + custodianKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, paymentAddress) + custodianKeyStr := custodianKey.String() + custodian, ok := currentPortalState.CustodianPoolState[custodianKeyStr] + + if !ok { + Logger.log.Errorf("ERROR: Custodian not found ") + return nil + } + + //check free collateral + if amount > custodian.GetFreeCollateral() { + Logger.log.Errorf("ERROR: Free collateral is not enough to refund") + return nil + } + + contentStatusBytes, _ := json.Marshal(newCustodianWithdrawRequest) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalCustodianWithdrawStatusPrefix(), + []byte(txHash), + contentStatusBytes, + beaconHeight, + ) + + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while store custodian withdraw item: %+v", err) + return nil + } + + //update custodian + custodian.SetFreeCollateral(custodian.GetFreeCollateral() - amount) + custodian.SetTotalCollateral(custodian.GetTotalCollateral() - amount) + + currentPortalState.CustodianPoolState[custodianKeyStr] = custodian + + case common.PortalCustodianWithdrawRequestRejectedStatus: + newCustodianWithdrawRequest := metadata.NewCustodianWithdrawRequestStatus( + paymentAddress, + amount, + common.PortalCustodianWithdrawReqRejectStatus, + freeCollateral, + ) + + contentStatusBytes, _ := json.Marshal(newCustodianWithdrawRequest) + err = statedb.TrackPortalStateStatusMultiple( + portalStateDB, + statedb.PortalCustodianWithdrawStatusPrefix(), + []byte(txHash), + contentStatusBytes, + beaconHeight, + ) + + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while store custodian withdraw item: %+v", err) + return nil + } + } + + return nil +} + +func (blockchain *BlockChain) processPortalUnlockCollateral( + stateDB *statedb.StateDB, + beaconHeight uint64, instructions []string, + currentPortalState *CurrentPortalState) error { + + // unmarshal instructions content + var actionData metadata.PortalRequestUnlockCollateralContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + Logger.log.Errorf("Can not unmarshal instruction content %v - Error %v\n", instructions[3], err) + return nil + } + + // get tokenID from redeemTokenID + tokenID := actionData.TokenID + reqStatus := instructions[2] + if reqStatus == common.PortalReqUnlockCollateralAcceptedChainStatus { + // update custodian state (FreeCollateral, LockedAmountCollateral) + custodianStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, actionData.CustodianAddressStr) + custodianStateKeyStr := custodianStateKey.String() + finalExchangeRateKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + finalExchangeRateKeyStr := finalExchangeRateKey.String() + _, err2 := updateFreeCollateralCustodian( + currentPortalState.CustodianPoolState[custodianStateKeyStr], + actionData.RedeemAmount, tokenID, + currentPortalState.FinalExchangeRatesState[finalExchangeRateKeyStr]) + if err2 != nil { + Logger.log.Errorf("Error when update free collateral amount for custodian", err2) + + return nil + } + + redeemID := actionData.UniqueRedeemID + keyWaitingRedeemRequest := statedb.GenerateWaitingRedeemRequestObjectKey(beaconHeight, redeemID) + keyWaitingRedeemRequestStr := keyWaitingRedeemRequest.String() + + // update redeem request state in WaitingRedeemRequest (remove custodian from matchingCustodianDetail) + newCustodians, _ := removeCustodianFromMatchingRedeemCustodians( + currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr].GetCustodians(), actionData.CustodianAddressStr) + currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr].SetCustodians(newCustodians) + + // remove redeem request from WaitingRedeemRequest list when all matching custodians return public token to user + // when list matchingCustodianDetail is empty + if len(currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr].GetCustodians()) == 0 { + deleteWaitingRedeemRequest(currentPortalState, keyWaitingRedeemRequestStr) + + // update status of redeem request with redeemID + err = updateRedeemRequestStatusByRedeemId(redeemID, common.PortalRedeemReqSuccessStatus, stateDB) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while updating redeem request status by redeemID: %+v", err) + return nil + } + } + + // track reqUnlockCollateral status by txID into DB + reqUnlockCollateralTrackData := metadata.PortalRequestUnlockCollateralStatus{ + Status: common.PortalReqUnlockCollateralAcceptedStatus, + UniqueRedeemID: actionData.UniqueRedeemID, + TokenID: actionData.TokenID, + CustodianAddressStr: actionData.CustodianAddressStr, + RedeemAmount: actionData.RedeemAmount, + UnlockAmount: actionData.UnlockAmount, + RedeemProof: actionData.RedeemProof, + } + reqUnlockCollateralTrackDataBytes, _ := json.Marshal(reqUnlockCollateralTrackData) + err = statedb.StorePortalRequestUnlockCollateralStatus( + stateDB, + actionData.TxReqID.String(), + reqUnlockCollateralTrackDataBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking request unlock collateral tx: %+v", err) + return nil + } + + } else if reqStatus == common.PortalReqUnlockCollateralRejectedChainStatus { + // track reqUnlockCollateral status by txID into DB + reqUnlockCollateralTrackData := metadata.PortalRequestUnlockCollateralStatus{ + Status: common.PortalReqUnlockCollateralRejectedStatus, + UniqueRedeemID: actionData.UniqueRedeemID, + TokenID: actionData.TokenID, + CustodianAddressStr: actionData.CustodianAddressStr, + RedeemAmount: actionData.RedeemAmount, + UnlockAmount: actionData.UnlockAmount, + RedeemProof: actionData.RedeemProof, + } + reqUnlockCollateralTrackDataBytes, _ := json.Marshal(reqUnlockCollateralTrackData) + err = statedb.StorePortalRequestUnlockCollateralStatus( + stateDB, + actionData.TxReqID.String(), + reqUnlockCollateralTrackDataBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking request unlock collateral tx: %+v", err) + return nil + } + } + + return nil +} diff --git a/blockchain/beaconportalproducer.go b/blockchain/beaconportalproducer.go new file mode 100644 index 0000000000..de15cd138f --- /dev/null +++ b/blockchain/beaconportalproducer.go @@ -0,0 +1,2155 @@ +package blockchain + +import ( + "fmt" + "bytes" + "encoding/base64" + "encoding/json" + "github.com/binance-chain/go-sdk/types/msg" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/metadata" + "github.com/incognitochain/incognito-chain/relaying/bnb" + btcrelaying "github.com/incognitochain/incognito-chain/relaying/btc" + "strconv" +) + +// beacon build new instruction from instruction received from ShardToBeaconBlock +func buildCustodianDepositInst( + custodianAddressStr string, + depositedAmount uint64, + remoteAddresses []statedb.RemoteAddress, + metaType int, + shardID byte, + txReqID common.Hash, + status string, +) []string { + custodianDepositContent := metadata.PortalCustodianDepositContent{ + IncogAddressStr: custodianAddressStr, + RemoteAddresses: remoteAddresses, + DepositedAmount: depositedAmount, + TxReqID: txReqID, + ShardID: shardID, + } + custodianDepositContentBytes, _ := json.Marshal(custodianDepositContent) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + status, + string(custodianDepositContentBytes), + } +} + +func buildRequestPortingInst( + metaType int, + shardID byte, + reqStatus string, + uniqueRegisterId string, + incogAddressStr string, + pTokenId string, + registerAmount uint64, + portingFee uint64, + custodian []*statedb.MatchingPortingCustodianDetail, + txReqID common.Hash, +) []string { + portingRequestContent := metadata.PortalPortingRequestContent{ + UniqueRegisterId: uniqueRegisterId, + IncogAddressStr: incogAddressStr, + PTokenId: pTokenId, + RegisterAmount: registerAmount, + PortingFee: portingFee, + Custodian: custodian, + TxReqID: txReqID, + } + + portingRequestContentBytes, _ := json.Marshal(portingRequestContent) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + reqStatus, + string(portingRequestContentBytes), + } +} + +// beacon build new instruction from instruction received from ShardToBeaconBlock +func buildReqPTokensInst( + uniquePortingID string, + tokenID string, + incogAddressStr string, + portingAmount uint64, + portingProof string, + metaType int, + shardID byte, + txReqID common.Hash, + status string, +) []string { + reqPTokenContent := metadata.PortalRequestPTokensContent{ + UniquePortingID: uniquePortingID, + TokenID: tokenID, + IncogAddressStr: incogAddressStr, + PortingAmount: portingAmount, + PortingProof: portingProof, + TxReqID: txReqID, + ShardID: shardID, + } + reqPTokenContentBytes, _ := json.Marshal(reqPTokenContent) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + status, + string(reqPTokenContentBytes), + } +} + +func buildCustodianWithdrawInst( + metaType int, + shardID byte, + reqStatus string, + paymentAddress string, + amount uint64, + remainFreeCollateral uint64, + txReqID common.Hash, +) []string { + content := metadata.PortalCustodianWithdrawRequestContent{ + PaymentAddress: paymentAddress, + Amount: amount, + RemainFreeCollateral: remainFreeCollateral, + TxReqID: txReqID, + ShardID: shardID, + } + + contentBytes, _ := json.Marshal(content) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + reqStatus, + string(contentBytes), + } +} + +// buildInstructionsForCustodianDeposit builds instruction for custodian deposit action +func (blockchain *BlockChain) buildInstructionsForCustodianDeposit( + contentStr string, + shardID byte, + metaType int, + currentPortalState *CurrentPortalState, + beaconHeight uint64, +) ([][]string, error) { + // parse instruction + actionContentBytes, err := base64.StdEncoding.DecodeString(contentStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while decoding content string of portal custodian deposit action: %+v", err) + return [][]string{}, nil + } + var actionData metadata.PortalCustodianDepositAction + err = json.Unmarshal(actionContentBytes, &actionData) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while unmarshal portal custodian deposit action: %+v", err) + return [][]string{}, nil + } + + if currentPortalState == nil { + Logger.log.Errorf("WARN - [buildInstructionsForCustodianDeposit]: Current Portal state is null.") + // need to refund collateral to custodian + inst := buildCustodianDepositInst( + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.RemoteAddresses, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + common.PortalCustodianDepositRefundChainStatus, + ) + return [][]string{inst}, nil + } + meta := actionData.Meta + + keyCustodianState := statedb.GenerateCustodianStateObjectKey(beaconHeight, meta.IncogAddressStr) + keyCustodianStateStr := keyCustodianState.String() + + if currentPortalState.CustodianPoolState[keyCustodianStateStr] == nil { + // new custodian + newCustodian := statedb.NewCustodianStateWithValue( + meta.IncogAddressStr, meta.DepositedAmount, meta.DepositedAmount, + nil, nil, + meta.RemoteAddresses, nil) + currentPortalState.CustodianPoolState[keyCustodianStateStr] = newCustodian + } else { + // custodian deposited before + // update state of the custodian + custodian := currentPortalState.CustodianPoolState[keyCustodianStateStr] + totalCollateral := custodian.GetTotalCollateral() + meta.DepositedAmount + freeCollateral := custodian.GetFreeCollateral() + meta.DepositedAmount + holdingPubTokens := custodian.GetHoldingPublicTokens() + lockedAmountCollateral := custodian.GetLockedAmountCollateral() + rewardAmount := custodian.GetRewardAmount() + remoteAddresses := custodian.GetRemoteAddresses() + for _, address := range meta.RemoteAddresses { + if existedAddr, _ := statedb.GetRemoteAddressByTokenID(remoteAddresses, address.GetPTokenID()); existedAddr == "" { + remoteAddresses = append(remoteAddresses, address) + } + } + + newCustodian := statedb.NewCustodianStateWithValue(meta.IncogAddressStr, totalCollateral, freeCollateral, + holdingPubTokens, lockedAmountCollateral, remoteAddresses, rewardAmount) + currentPortalState.CustodianPoolState[keyCustodianStateStr] = newCustodian + } + + inst := buildCustodianDepositInst( + actionData.Meta.IncogAddressStr, + actionData.Meta.DepositedAmount, + actionData.Meta.RemoteAddresses, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + common.PortalCustodianDepositAcceptedChainStatus, + ) + return [][]string{inst}, nil +} + +func (blockchain *BlockChain) buildInstructionsForPortingRequest( + contentStr string, + shardID byte, + metaType int, + currentPortalState *CurrentPortalState, + beaconHeight uint64, +) ([][]string, error) { + actionContentBytes, err := base64.StdEncoding.DecodeString(contentStr) + if err != nil { + Logger.log.Errorf("Porting request: an error occurred while decoding content string of portal porting request action: %+v", err) + return [][]string{}, nil + } + + var actionData metadata.PortalUserRegisterAction + err = json.Unmarshal(actionContentBytes, &actionData) + if err != nil { + Logger.log.Errorf("Porting request: an error occurred while unmarshal portal porting request action: %+v", err) + return [][]string{}, nil + } + + if currentPortalState == nil { + Logger.log.Warn("Porting request: Current Portal state is null") + return [][]string{}, nil + } + + stateDB := blockchain.BestState.Beacon.GetCopiedFeatureStateDB() + //check unique id from record from db + portingRequestKeyExist, err := statedb.IsPortingRequestIdExist(stateDB, []byte(actionData.Meta.UniqueRegisterId)) + + if err != nil { + Logger.log.Errorf("Porting request: Get item portal by prefix error: %+v", err) + + inst := buildRequestPortingInst( + actionData.Meta.Type, + shardID, + common.PortalPortingRequestRejectedChainStatus, + actionData.Meta.UniqueRegisterId, + actionData.Meta.IncogAddressStr, + actionData.Meta.PTokenId, + actionData.Meta.RegisterAmount, + actionData.Meta.PortingFee, + nil, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + if portingRequestKeyExist { + Logger.log.Errorf("Porting request: Porting request id exist, key %v", actionData.Meta.UniqueRegisterId) + inst := buildRequestPortingInst( + actionData.Meta.Type, + shardID, + common.PortalPortingRequestRejectedChainStatus, + actionData.Meta.UniqueRegisterId, + actionData.Meta.IncogAddressStr, + actionData.Meta.PTokenId, + actionData.Meta.RegisterAmount, + actionData.Meta.PortingFee, + nil, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + waitingPortingRequestKey := statedb.GeneratePortalWaitingPortingRequestObjectKey(beaconHeight, actionData.Meta.UniqueRegisterId) + if _, ok := currentPortalState.WaitingPortingRequests[waitingPortingRequestKey.String()]; ok { + Logger.log.Errorf("Porting request: Waiting porting request exist, key %v", waitingPortingRequestKey) + inst := buildRequestPortingInst( + actionData.Meta.Type, + shardID, + common.PortalPortingRequestRejectedChainStatus, + actionData.Meta.UniqueRegisterId, + actionData.Meta.IncogAddressStr, + actionData.Meta.PTokenId, + actionData.Meta.RegisterAmount, + actionData.Meta.PortingFee, + nil, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + //get exchange rates + exchangeRatesKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + exchangeRatesState, ok := currentPortalState.FinalExchangeRatesState[exchangeRatesKey.String()] + + if !ok { + Logger.log.Errorf("Porting request, exchange rates not found") + inst := buildRequestPortingInst( + actionData.Meta.Type, + shardID, + common.PortalPortingRequestRejectedChainStatus, + actionData.Meta.UniqueRegisterId, + actionData.Meta.IncogAddressStr, + actionData.Meta.PTokenId, + actionData.Meta.RegisterAmount, + actionData.Meta.PortingFee, + nil, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + if len(currentPortalState.CustodianPoolState) <= 0 { + Logger.log.Errorf("Porting request: Custodian not found") + inst := buildRequestPortingInst( + actionData.Meta.Type, + shardID, + common.PortalPortingRequestRejectedChainStatus, + actionData.Meta.UniqueRegisterId, + actionData.Meta.IncogAddressStr, + actionData.Meta.PTokenId, + actionData.Meta.RegisterAmount, + actionData.Meta.PortingFee, + nil, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + var sortCustodianStateByFreeCollateral []CustodianStateSlice + sortCustodianByAmountAscent(actionData.Meta, currentPortalState.CustodianPoolState, &sortCustodianStateByFreeCollateral) + + if len(sortCustodianStateByFreeCollateral) <= 0 { + Logger.log.Errorf("Porting request, custodian not found") + + inst := buildRequestPortingInst( + actionData.Meta.Type, + shardID, + common.PortalPortingRequestRejectedChainStatus, + actionData.Meta.UniqueRegisterId, + actionData.Meta.IncogAddressStr, + actionData.Meta.PTokenId, + actionData.Meta.RegisterAmount, + actionData.Meta.PortingFee, + nil, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + //validation porting fees + exchangePortingFees, err := CalMinPortingFee(actionData.Meta.RegisterAmount, actionData.Meta.PTokenId, exchangeRatesState) + if err != nil { + Logger.log.Errorf("Calculate Porting fee is error %v", err) + return [][]string{}, nil + } + + Logger.log.Infof("Porting request, porting fees need %v", exchangePortingFees) + + if actionData.Meta.PortingFee < exchangePortingFees { + Logger.log.Errorf("Porting request, Porting fees is wrong") + + inst := buildRequestPortingInst( + actionData.Meta.Type, + shardID, + common.PortalPortingRequestRejectedChainStatus, + actionData.Meta.UniqueRegisterId, + actionData.Meta.IncogAddressStr, + actionData.Meta.PTokenId, + actionData.Meta.RegisterAmount, + actionData.Meta.PortingFee, + nil, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + //pick one + pickCustodianResult, _ := pickSingleCustodian(actionData.Meta, exchangeRatesState, sortCustodianStateByFreeCollateral, currentPortalState) + Logger.log.Infof("Porting request, pick single custodian result %v", len(pickCustodianResult)) + + //pick multiple + if len(pickCustodianResult) == 0 { + pickCustodianResult, _ = pickMultipleCustodian(actionData.Meta, exchangeRatesState, sortCustodianStateByFreeCollateral, currentPortalState) + Logger.log.Infof("Porting request, pick multiple custodian result %v", len(pickCustodianResult)) + } + //end + + if len(pickCustodianResult) == 0 { + Logger.log.Errorf("Porting request, custodian not found") + inst := buildRequestPortingInst( + actionData.Meta.Type, + shardID, + common.PortalPortingRequestRejectedChainStatus, + actionData.Meta.UniqueRegisterId, + actionData.Meta.IncogAddressStr, + actionData.Meta.PTokenId, + actionData.Meta.RegisterAmount, + actionData.Meta.PortingFee, + pickCustodianResult, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + //verify total amount + var totalPToken uint64 = 0 + for _, eachCustodian := range pickCustodianResult { + totalPToken = totalPToken + eachCustodian.Amount + } + + if totalPToken != actionData.Meta.RegisterAmount { + Logger.log.Errorf("Porting request, total custodian picked difference with total input PToken %v != %v", actionData.Meta.RegisterAmount, totalPToken) + + Logger.log.Errorf("Porting request, custodian not found") + inst := buildRequestPortingInst( + actionData.Meta.Type, + shardID, + common.PortalPortingRequestRejectedChainStatus, + actionData.Meta.UniqueRegisterId, + actionData.Meta.IncogAddressStr, + actionData.Meta.PTokenId, + actionData.Meta.RegisterAmount, + actionData.Meta.PortingFee, + nil, + actionData.TxReqID, + ) + + return [][]string{inst}, nil + } + + inst := buildRequestPortingInst( + actionData.Meta.Type, + shardID, + common.PortalPortingRequestAcceptedChainStatus, + actionData.Meta.UniqueRegisterId, + actionData.Meta.IncogAddressStr, + actionData.Meta.PTokenId, + actionData.Meta.RegisterAmount, + actionData.Meta.PortingFee, + pickCustodianResult, + actionData.TxReqID, + ) + + newPortingRequestStateWaiting := statedb.NewWaitingPortingRequestWithValue( + actionData.Meta.UniqueRegisterId, + actionData.TxReqID, + actionData.Meta.PTokenId, + actionData.Meta.IncogAddressStr, + actionData.Meta.RegisterAmount, + pickCustodianResult, + actionData.Meta.PortingFee, + common.PortalPortingReqWaitingStatus, + beaconHeight+1, + ) + + keyWaitingPortingRequest := statedb.GeneratePortalWaitingPortingRequestObjectKey(beaconHeight, actionData.Meta.UniqueRegisterId) + currentPortalState.WaitingPortingRequests[keyWaitingPortingRequest.String()] = newPortingRequestStateWaiting + + return [][]string{inst}, nil +} + +// buildInstructionsForCustodianDeposit builds instruction for custodian deposit action +func (blockchain *BlockChain) buildInstructionsForReqPTokens( + stateDB *statedb.StateDB, + contentStr string, + shardID byte, + metaType int, + currentPortalState *CurrentPortalState, + beaconHeight uint64, +) ([][]string, error) { + + // parse instruction + actionContentBytes, err := base64.StdEncoding.DecodeString(contentStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while decoding content string of portal custodian deposit action: %+v", err) + return [][]string{}, nil + } + var actionData metadata.PortalRequestPTokensAction + err = json.Unmarshal(actionContentBytes, &actionData) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while unmarshal portal custodian deposit action: %+v", err) + return [][]string{}, nil + } + meta := actionData.Meta + + if currentPortalState == nil { + Logger.log.Warn("WARN - [buildInstructionsForCustodianDeposit]: Current Portal state is null.") + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check meta.UniquePortingID is in waiting PortingRequests list in portal state or not + portingID := meta.UniquePortingID + keyWaitingPortingRequest := statedb.GeneratePortalWaitingPortingRequestObjectKey(beaconHeight, portingID) + keyWaitingPortingRequestStr := keyWaitingPortingRequest.String() + waitingPortingRequest := currentPortalState.WaitingPortingRequests[keyWaitingPortingRequestStr] + if waitingPortingRequest == nil { + Logger.log.Errorf("PortingID is not existed in waiting porting requests list") + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + db := blockchain.GetDatabase() + + //check unique id from record from db + portingRequest, err := statedb.GetPortalStateStatusMultiple(stateDB, statedb.PortalPortingRequestStatusPrefix(), []byte(meta.UniquePortingID)) + + if err != nil { + Logger.log.Errorf("Can not get porting req status for portingID %v, %v\n", meta.UniquePortingID, err) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + var portingRequestStatus metadata.PortingRequestStatus + err = json.Unmarshal(portingRequest, &portingRequestStatus) + if err != nil { + Logger.log.Errorf("Has an error occurred while unmarshal PortingRequestStatus: %+v", err) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + if portingRequestStatus.Status != common.PortalPortingReqWaitingStatus { + Logger.log.Errorf("PortingID status invalid, expected %v , but got %v\n", common.PortalPortingReqWaitingStatus, portingRequestStatus.Status) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check tokenID + if meta.TokenID != waitingPortingRequest.TokenID() { + Logger.log.Errorf("TokenID is not correct in portingID req") + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check porting amount + if meta.PortingAmount != waitingPortingRequest.Amount() { + Logger.log.Errorf("PortingAmount is not correct in portingID req") + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + if meta.TokenID == common.PortalBTCIDStr { + btcChain := blockchain.config.BTCChain + if btcChain == nil { + Logger.log.Error("BTC relaying chain should not be null") + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + // parse PortingProof in meta + btcTxProof, err := btcrelaying.ParseBTCProofFromB64EncodeStr(meta.PortingProof) + if err != nil { + Logger.log.Errorf("PortingProof is invalid %v\n", err) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + isValid, err := btcChain.VerifyTxWithMerkleProofs(btcTxProof) + if !isValid || err != nil { + Logger.log.Errorf("Verify btcTxProof failed %v", err) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // extract attached message from txOut's OP_RETURN + btcAttachedMsg, err := btcrelaying.ExtractAttachedMsgFromTx(btcTxProof.BTCTx) + if err != nil { + Logger.log.Errorf("Could not extract attached message from BTC tx proof with err: %v", err) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + encodedMsg := btcrelaying.HashAndEncodeBase58(meta.UniquePortingID) + if btcAttachedMsg != encodedMsg { + Logger.log.Errorf("PortingId in the btc attached message is not matched with portingID in metadata") + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check whether amount transfer in txBNB is equal porting amount or not + // check receiver and amount in tx + // get list matching custodians in waitingPortingRequest + custodians := waitingPortingRequest.Custodians() + outputs := btcTxProof.BTCTx.TxOut + for _, cusDetail := range custodians { + remoteAddressNeedToBeTransfer := cusDetail.RemoteAddress + amountNeedToBeTransfer := cusDetail.Amount + amountNeedToBeTransferInBTC := btcrelaying.ConvertIncPBTCAmountToExternalBTCAmount(int64(amountNeedToBeTransfer)) + + isChecked := false + for _, out := range outputs { + addrStr, err := btcChain.ExtractPaymentAddrStrFromPkScript(out.PkScript) + if err != nil { + Logger.log.Errorf("[portal] ExtractPaymentAddrStrFromPkScript: could not extract payment address string from pkscript with err: %v\n", err) + continue + } + if addrStr != remoteAddressNeedToBeTransfer { + continue + } + if out.Value < amountNeedToBeTransferInBTC { + Logger.log.Errorf("BTC-TxProof is invalid - the transferred amount to %s must be equal to or greater than %d, but got %d", addrStr, amountNeedToBeTransferInBTC, out.Value) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } else { + isChecked = true + break + } + } + if !isChecked { + Logger.log.Error("BTC-TxProof is invalid") + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + } + + inst := buildReqPTokensInst( + actionData.Meta.UniquePortingID, + actionData.Meta.TokenID, + actionData.Meta.IncogAddressStr, + actionData.Meta.PortingAmount, + actionData.Meta.PortingProof, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensAcceptedChainStatus, + ) + + // remove waiting porting request from currentPortalState + deleteWaitingPortingRequest(currentPortalState, keyWaitingPortingRequestStr) + return [][]string{inst}, nil + + } else if meta.TokenID == common.PortalBNBIDStr { + // parse PortingProof in meta + txProofBNB, err := bnb.ParseBNBProofFromB64EncodeStr(meta.PortingProof) + if err != nil { + Logger.log.Errorf("PortingProof is invalid %v\n", err) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check minimum confirmations block of bnb proof + latestBNBBlockHeight, err2 := getLatestRelayingBNBBlockHeight(db, beaconHeight) + if err2 != nil { + Logger.log.Errorf("Can not get latest relaying bnb block height %v\n", err) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + if latestBNBBlockHeight < txProofBNB.BlockHeight + bnb.MinConfirmationsBlock { + Logger.log.Errorf("Not enough min bnb confirmations block %v, latestBNBBlockHeight %v - txProofBNB.BlockHeight %v\n", + bnb.MinConfirmationsBlock, latestBNBBlockHeight, txProofBNB.BlockHeight) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + isValid, err := txProofBNB.Verify(db) + if !isValid || err != nil { + Logger.log.Errorf("Verify txProofBNB failed %v", err) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // parse Tx from Data in txProofBNB + txBNB, err := bnb.ParseTxFromData(txProofBNB.Proof.Data) + if err != nil { + Logger.log.Errorf("Data in PortingProof is invalid %v", err) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check memo attach portingID req: + memo := txBNB.Memo + memoBytes, err2 := base64.StdEncoding.DecodeString(memo) + if err2 != nil { + Logger.log.Errorf("Can not decode memo in tx bnb proof", err2) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + var portingMemo PortingMemoBNB + err2 = json.Unmarshal(memoBytes, &portingMemo) + if err2 != nil { + Logger.log.Errorf("Can not unmarshal memo in tx bnb proof", err2) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + if portingMemo.PortingID != meta.UniquePortingID { + Logger.log.Errorf("PortingId in memoTx is not matched with portingID in metadata", err2) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check whether amount transfer in txBNB is equal porting amount or not + // check receiver and amount in tx + // get list matching custodians in waitingPortingRequest + custodians := waitingPortingRequest.Custodians() + outputs := txBNB.Msgs[0].(msg.SendMsg).Outputs + for _, cusDetail := range custodians { + remoteAddressNeedToBeTransfer := cusDetail.RemoteAddress + amountNeedToBeTransfer := cusDetail.Amount + amountNeedToBeTransferInBNB := convertIncPBNBAmountToExternalBNBAmount(int64(amountNeedToBeTransfer)) + + isChecked := false + for _, out := range outputs { + addr, _ := bnb.GetAccAddressString(&out.Address, blockchain.config.ChainParams.BNBRelayingHeaderChainID) + if addr != remoteAddressNeedToBeTransfer { + Logger.log.Errorf("[portal] remoteAddressNeedToBeTransfer: %v - addr: %v\n", remoteAddressNeedToBeTransfer, addr) + continue + } + + // calculate amount that was transferred to custodian's remote address + amountTransfer := int64(0) + for _, coin := range out.Coins { + if coin.Denom == bnb.DenomBNB { + amountTransfer += coin.Amount + } + } + if amountTransfer < amountNeedToBeTransferInBNB { + Logger.log.Errorf("TxProof-BNB is invalid - Amount transfer to %s must be equal to or greater than %d, but got %d", + addr, amountNeedToBeTransferInBNB, amountTransfer) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } else { + isChecked = true + break + } + } + if !isChecked { + Logger.log.Errorf("TxProof-BNB is invalid - Receiver address is invalid, expected %v", + remoteAddressNeedToBeTransfer) + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + } + + inst := buildReqPTokensInst( + actionData.Meta.UniquePortingID, + actionData.Meta.TokenID, + actionData.Meta.IncogAddressStr, + actionData.Meta.PortingAmount, + actionData.Meta.PortingProof, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensAcceptedChainStatus, + ) + + // remove waiting porting request from currentPortalState + deleteWaitingPortingRequest(currentPortalState, keyWaitingPortingRequestStr) + return [][]string{inst}, nil + } else { + Logger.log.Errorf("TokenID is not supported currently on Portal") + inst := buildReqPTokensInst( + meta.UniquePortingID, + meta.TokenID, + meta.IncogAddressStr, + meta.PortingAmount, + meta.PortingProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqPTokensRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + return [][]string{}, nil +} + +func (blockchain *BlockChain) buildInstructionsForExchangeRates( + contentStr string, + shardID byte, + metaType int, + currentPortalState *CurrentPortalState, + beaconHeight uint64, +) ([][]string, error) { + actionContentBytes, err := base64.StdEncoding.DecodeString(contentStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while decoding content string of portal exchange rates action: %+v", err) + return [][]string{}, nil + } + + var actionData metadata.PortalExchangeRatesAction + err = json.Unmarshal(actionContentBytes, &actionData) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while unmarshal portal exchange rates action: %+v", err) + return [][]string{}, nil + } + + //check key from db + if currentPortalState.ExchangeRatesRequests != nil { + _, ok := currentPortalState.ExchangeRatesRequests[actionData.TxReqID.String()] + if ok { + Logger.log.Errorf("ERROR: exchange rates key is duplicated") + + portalExchangeRatesContent := metadata.PortalExchangeRatesContent{ + SenderAddress: actionData.Meta.SenderAddress, + Rates: actionData.Meta.Rates, + TxReqID: actionData.TxReqID, + LockTime: actionData.LockTime, + } + + portalExchangeRatesContentBytes, _ := json.Marshal(portalExchangeRatesContent) + + inst := []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + common.PortalExchangeRatesRejectedChainStatus, + string(portalExchangeRatesContentBytes), + } + + return [][]string{inst}, nil + } + } + + //success + portalExchangeRatesContent := metadata.PortalExchangeRatesContent{ + SenderAddress: actionData.Meta.SenderAddress, + Rates: actionData.Meta.Rates, + TxReqID: actionData.TxReqID, + LockTime: actionData.LockTime, + } + + portalExchangeRatesContentBytes, _ := json.Marshal(portalExchangeRatesContent) + + inst := []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + common.PortalExchangeRatesAcceptedChainStatus, + string(portalExchangeRatesContentBytes), + } + + //update E-R request + if currentPortalState.ExchangeRatesRequests != nil { + currentPortalState.ExchangeRatesRequests[actionData.TxReqID.String()] = metadata.NewExchangeRatesRequestStatus( + common.PortalExchangeRatesAcceptedStatus, + actionData.Meta.SenderAddress, + actionData.Meta.Rates, + ) + } else { + //new object + newExchangeRatesRequest := make(map[string]*metadata.ExchangeRatesRequestStatus) + newExchangeRatesRequest[actionData.TxReqID.String()] = metadata.NewExchangeRatesRequestStatus( + common.PortalExchangeRatesAcceptedStatus, + actionData.Meta.SenderAddress, + actionData.Meta.Rates, + ) + + currentPortalState.ExchangeRatesRequests = newExchangeRatesRequest + } + + return [][]string{inst}, nil +} + +// beacon build new instruction from instruction received from ShardToBeaconBlock +func buildRedeemRequestInst( + uniqueRedeemID string, + tokenID string, + redeemAmount uint64, + incAddressStr string, + remoteAddress string, + redeemFee uint64, + matchingCustodianDetail []*statedb.MatchingRedeemCustodianDetail, + metaType int, + shardID byte, + txReqID common.Hash, + status string, +) []string { + redeemRequestContent := metadata.PortalRedeemRequestContent{ + UniqueRedeemID: uniqueRedeemID, + TokenID: tokenID, + RedeemAmount: redeemAmount, + RedeemerIncAddressStr: incAddressStr, + RemoteAddress: remoteAddress, + MatchingCustodianDetail: matchingCustodianDetail, + RedeemFee: redeemFee, + TxReqID: txReqID, + ShardID: shardID, + } + redeemRequestContentBytes, _ := json.Marshal(redeemRequestContent) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + status, + string(redeemRequestContentBytes), + } +} + +// buildInstructionsForRedeemRequest builds instruction for redeem request action +func (blockchain *BlockChain) buildInstructionsForRedeemRequest( + stateDB *statedb.StateDB, + contentStr string, + shardID byte, + metaType int, + currentPortalState *CurrentPortalState, + beaconHeight uint64, +) ([][]string, error) { + // parse instruction + actionContentBytes, err := base64.StdEncoding.DecodeString(contentStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while decoding content string of portal redeem request action: %+v", err) + return [][]string{}, nil + } + var actionData metadata.PortalRedeemRequestAction + err = json.Unmarshal(actionContentBytes, &actionData) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while unmarshal portal redeem request action: %+v", err) + return [][]string{}, nil + } + + meta := actionData.Meta + if currentPortalState == nil { + Logger.log.Warn("WARN - [buildInstructionsForRedeemRequest]: Current Portal state is null.") + // need to mint ptoken to user + inst := buildRedeemRequestInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + nil, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemRequestRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + redeemID := meta.UniqueRedeemID + + // check uniqueRedeemID is existed waitingRedeem list or not + keyWaitingRedeemRequest := statedb.GenerateWaitingRedeemRequestObjectKey(beaconHeight, redeemID) + keyWaitingRedeemRequestStr := keyWaitingRedeemRequest.String() + waitingRedeemRequest := currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr] + if waitingRedeemRequest != nil { + Logger.log.Errorf("RedeemID is existed in waiting redeem requests list %v\n", redeemID) + inst := buildRedeemRequestInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + nil, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemRequestRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check uniqueRedeemID is existed in db or not + redeemRequestBytes, err := statedb.GetPortalRedeemRequestStatus(stateDB, meta.UniqueRedeemID) + if err != nil { + Logger.log.Errorf("Can not get redeem req status for redeemID %v, %v\n", meta.UniqueRedeemID, err) + inst := buildRedeemRequestInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + nil, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemRequestRejectedChainStatus, + ) + return [][]string{inst}, nil + } else if len(redeemRequestBytes) > 0 { + Logger.log.Errorf("RedeemID is existed in redeem requests list in db %v\n", redeemID) + inst := buildRedeemRequestInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + nil, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemRequestRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // get tokenID from redeemTokenID + tokenID := meta.TokenID + + // check redeem fee + exchangeRateKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + exchangeRateKeyStr := exchangeRateKey.String() + if currentPortalState.FinalExchangeRatesState[exchangeRateKeyStr] == nil { + Logger.log.Errorf("Can not get exchange rate at beaconHeight %v\n", beaconHeight) + inst := buildRedeemRequestInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + nil, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemRequestRejectedChainStatus, + ) + return [][]string{inst}, nil + } + minRedeemFee, err := CalMinRedeemFee(meta.RedeemAmount, tokenID, currentPortalState.FinalExchangeRatesState[exchangeRateKeyStr]) + if err != nil { + Logger.log.Errorf("Error when calculating minimum redeem fee %v\n", err) + inst := buildRedeemRequestInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + nil, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemRequestRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + if meta.RedeemFee < minRedeemFee { + Logger.log.Errorf("Redeem fee is invalid, minRedeemFee %v, but get %v\n", minRedeemFee, meta.RedeemFee) + inst := buildRedeemRequestInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + nil, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemRequestRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // pick custodian(s) who holding public token to return user + matchingCustodiansDetail, err := pickupCustodianForRedeem(meta.RedeemAmount, tokenID, currentPortalState) + if err != nil { + Logger.log.Errorf("Error when pick up custodian for redeem %v\n", err) + inst := buildRedeemRequestInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + nil, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemRequestRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // update custodian state (holding public tokens) + for _, cus := range matchingCustodiansDetail { + custodianStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, cus.GetIncognitoAddress()) + custodianStateKeyStr := custodianStateKey.String() + if currentPortalState.CustodianPoolState[custodianStateKeyStr].GetHoldingPublicTokens()[tokenID] < cus.GetAmount() { + Logger.log.Errorf("Amount holding public tokens is less than matching redeem amount") + inst := buildRedeemRequestInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + nil, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemRequestRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + holdingPubTokenTmp := currentPortalState.CustodianPoolState[custodianStateKeyStr].GetHoldingPublicTokens() + holdingPubTokenTmp[tokenID] -= cus.GetAmount() + currentPortalState.CustodianPoolState[custodianStateKeyStr].SetHoldingPublicTokens(holdingPubTokenTmp) + } + + // add to waiting Redeem list + redeemRequest := statedb.NewWaitingRedeemRequestWithValue( + meta.UniqueRedeemID, + meta.TokenID, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemAmount, + matchingCustodiansDetail, + meta.RedeemFee, + beaconHeight+1, + actionData.TxReqID, + ) + currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr] = redeemRequest + + Logger.log.Infof("[Portal] Build accepted instruction for redeem request") + inst := buildRedeemRequestInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.RedeemAmount, + meta.RedeemerIncAddressStr, + meta.RemoteAddress, + meta.RedeemFee, + matchingCustodiansDetail, + meta.Type, + actionData.ShardID, + actionData.TxReqID, + common.PortalRedeemRequestAcceptedChainStatus, + ) + return [][]string{inst}, nil +} + +/** +Validation: + - verify each instruct belong shard + - check amount < fee collateral + - build PortalCustodianWithdrawRequestContent to send beacon +*/ +func (blockchain *BlockChain) buildInstructionsForCustodianWithdraw( + contentStr string, + shardID byte, + metaType int, + currentPortalState *CurrentPortalState, + beaconHeight uint64, +) ([][]string, error) { + actionContentBytes, err := base64.StdEncoding.DecodeString(contentStr) + if err != nil { + Logger.log.Errorf("Have an error occurred while decoding content string of custodian withdraw request action: %+v", err) + return [][]string{}, nil + } + + var actionData metadata.PortalCustodianWithdrawRequestAction + err = json.Unmarshal(actionContentBytes, &actionData) + if err != nil { + Logger.log.Errorf("Have an error occurred while unmarshal custodian withdraw request action: %+v", err) + return [][]string{}, nil + } + + if currentPortalState == nil { + Logger.log.Warn("Current Portal state is null") + return [][]string{}, nil + } + + if len(currentPortalState.CustodianPoolState) <= 0 { + Logger.log.Errorf("Custodian state is empty") + + inst := buildCustodianWithdrawInst( + actionData.Meta.Type, + shardID, + common.PortalCustodianWithdrawRequestRejectedStatus, + actionData.Meta.PaymentAddress, + actionData.Meta.Amount, + 0, + actionData.TxReqID, + ) + return [][]string{inst}, nil + } + + custodianKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, actionData.Meta.PaymentAddress) + custodianKeyStr := custodianKey.String() + custodian, ok := currentPortalState.CustodianPoolState[custodianKeyStr] + + if !ok { + Logger.log.Errorf("Custodian not found") + + inst := buildCustodianWithdrawInst( + actionData.Meta.Type, + shardID, + common.PortalCustodianWithdrawRequestRejectedStatus, + actionData.Meta.PaymentAddress, + actionData.Meta.Amount, + 0, + actionData.TxReqID, + ) + return [][]string{inst}, nil + } + + if actionData.Meta.Amount > custodian.GetFreeCollateral() { + Logger.log.Errorf("Free Collateral is not enough PRV") + + inst := buildCustodianWithdrawInst( + actionData.Meta.Type, + shardID, + common.PortalCustodianWithdrawRequestRejectedStatus, + actionData.Meta.PaymentAddress, + actionData.Meta.Amount, + 0, + actionData.TxReqID, + ) + return [][]string{inst}, nil + } + //withdraw + remainFreeCollateral := custodian.GetFreeCollateral() - actionData.Meta.Amount + totalFreeCollateral := custodian.GetTotalCollateral() - actionData.Meta.Amount + + inst := buildCustodianWithdrawInst( + actionData.Meta.Type, + shardID, + common.PortalCustodianWithdrawRequestAcceptedStatus, + actionData.Meta.PaymentAddress, + actionData.Meta.Amount, + remainFreeCollateral, + actionData.TxReqID, + ) + + //update free collateral custodian + custodian.SetFreeCollateral(remainFreeCollateral) + custodian.SetTotalCollateral(totalFreeCollateral) + currentPortalState.CustodianPoolState[custodianKeyStr] = custodian + return [][]string{inst}, nil +} + +// beacon build new instruction from instruction received from ShardToBeaconBlock +func buildReqUnlockCollateralInst( + uniqueRedeemID string, + tokenID string, + custodianAddressStr string, + redeemAmount uint64, + unlockAmount uint64, + redeemProof string, + metaType int, + shardID byte, + txReqID common.Hash, + status string, +) []string { + reqUnlockCollateralContent := metadata.PortalRequestUnlockCollateralContent{ + UniqueRedeemID: uniqueRedeemID, + TokenID: tokenID, + CustodianAddressStr: custodianAddressStr, + RedeemAmount: redeemAmount, + UnlockAmount: unlockAmount, + RedeemProof: redeemProof, + TxReqID: txReqID, + ShardID: shardID, + } + reqUnlockCollateralContentBytes, _ := json.Marshal(reqUnlockCollateralContent) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + status, + string(reqUnlockCollateralContentBytes), + } +} + +// buildInstructionsForReqUnlockCollateral builds instruction for custodian deposit action +func (blockchain *BlockChain) buildInstructionsForReqUnlockCollateral( + stateDB *statedb.StateDB, + contentStr string, + shardID byte, + metaType int, + currentPortalState *CurrentPortalState, + beaconHeight uint64, +) ([][]string, error) { + + // parse instruction + actionContentBytes, err := base64.StdEncoding.DecodeString(contentStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while decoding content string of portal request unlock collateral action: %+v", err) + return [][]string{}, nil + } + var actionData metadata.PortalRequestUnlockCollateralAction + err = json.Unmarshal(actionContentBytes, &actionData) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while unmarshal portal request unlock collateral action: %+v", err) + return [][]string{}, nil + } + meta := actionData.Meta + + if currentPortalState == nil { + Logger.log.Warn("WARN - [buildInstructionsForReqUnlockCollateral]: Current Portal state is null.") + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check meta.UniqueRedeemID is in waiting RedeemRequests list in portal state or not + redeemID := meta.UniqueRedeemID + keyWaitingRedeemRequest := statedb.GenerateWaitingRedeemRequestObjectKey(beaconHeight, redeemID) + keyWaitingRedeemRequestStr := keyWaitingRedeemRequest.String() + waitingRedeemRequest := currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr] + if waitingRedeemRequest == nil { + Logger.log.Errorf("redeemID is not existed in waiting redeem requests list") + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + db := blockchain.GetDatabase() + + // check status of request unlock collateral by redeemID + redeemReqStatusBytes, err := statedb.GetPortalRedeemRequestStatus(stateDB, redeemID) + if err != nil { + Logger.log.Errorf("Can not get redeem request by redeemID from db %v\n", err) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + var redeemRequest metadata.PortalRedeemRequestStatus + err = json.Unmarshal(redeemReqStatusBytes, &redeemRequest) + if err != nil { + Logger.log.Errorf("Can not unmarshal redeem request %v\n", err) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + if redeemRequest.Status != common.PortalRedeemReqWaitingStatus { + Logger.log.Errorf("Redeem request %v has invalid status %v\n", redeemID, redeemRequest.Status) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check tokenID + if meta.TokenID != waitingRedeemRequest.GetTokenID() { + Logger.log.Errorf("TokenID is not correct in redeemID req") + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check redeem amount of matching custodian + amountMatchingCustodian := uint64(0) + for _, cus := range waitingRedeemRequest.GetCustodians() { + if cus.GetIncognitoAddress() == meta.CustodianAddressStr { + amountMatchingCustodian = cus.GetAmount() + break + } + } + + if meta.RedeemAmount != amountMatchingCustodian { + Logger.log.Errorf("RedeemAmount is not correct in redeemID req") + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + + ) + return [][]string{inst}, nil + } + + // validate proof and memo in tx + if meta.TokenID == common.PortalBTCIDStr { + btcChain := blockchain.config.BTCChain + if btcChain == nil { + Logger.log.Error("BTC relaying chain should not be null") + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + // parse PortingProof in meta + btcTxProof, err := btcrelaying.ParseBTCProofFromB64EncodeStr(meta.RedeemProof) + if err != nil { + Logger.log.Errorf("PortingProof is invalid %v\n", err) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + isValid, err := btcChain.VerifyTxWithMerkleProofs(btcTxProof) + if !isValid || err != nil { + Logger.log.Errorf("Verify btcTxProof failed %v", err) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // extract attached message from txOut's OP_RETURN + btcAttachedMsg, err := btcrelaying.ExtractAttachedMsgFromTx(btcTxProof.BTCTx) + if err != nil { + Logger.log.Errorf("Could not extract message from btc proof with error: ", err) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + rawMsg := fmt.Sprintf("%s%s", meta.UniqueRedeemID, meta.CustodianAddressStr) + encodedMsg := btcrelaying.HashAndEncodeBase58(rawMsg) + if btcAttachedMsg != encodedMsg { + Logger.log.Errorf("The hash of combination of UniqueRedeemID(%s) and CustodianAddressStr(%s) is not matched to tx's attached message", meta.UniqueRedeemID, meta.CustodianAddressStr) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check whether amount transfer in txBNB is equal redeem amount or not + // check receiver and amount in tx + // get list matching custodians in waitingRedeemRequest + + outputs := btcTxProof.BTCTx.TxOut + remoteAddressNeedToBeTransfer := waitingRedeemRequest.GetRedeemerRemoteAddress() + amountNeedToBeTransfer := meta.RedeemAmount + amountNeedToBeTransferInBTC := btcrelaying.ConvertIncPBTCAmountToExternalBTCAmount(int64(amountNeedToBeTransfer)) + + isChecked := false + for _, out := range outputs { + addrStr, err := btcChain.ExtractPaymentAddrStrFromPkScript(out.PkScript) + if err != nil { + Logger.log.Warnf("[portal] ExtractPaymentAddrStrFromPkScript: could not extract payment address string from pkscript with err: %v\n", err) + continue + } + if addrStr != remoteAddressNeedToBeTransfer { + continue + } + if out.Value < amountNeedToBeTransferInBTC { + Logger.log.Errorf("BTC-TxProof is invalid - the transferred amount to %s must be equal to or greater than %d, but got %d", addrStr, amountNeedToBeTransferInBTC, out.Value) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } else { + isChecked = true + break + } + } + + if !isChecked{ + Logger.log.Error("BTC-TxProof is invalid") + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // get tokenID from redeemTokenID + tokenID := meta.TokenID + + // update custodian state (FreeCollateral, LockedAmountCollateral) + custodianStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, meta.CustodianAddressStr) + custodianStateKeyStr := custodianStateKey.String() + finalExchangeRateKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + finalExchangeRateKeyStr := finalExchangeRateKey.String() + unlockAmount, err2 := updateFreeCollateralCustodian( + currentPortalState.CustodianPoolState[custodianStateKeyStr], + meta.RedeemAmount, tokenID, + currentPortalState.FinalExchangeRatesState[finalExchangeRateKeyStr]) + if err2 != nil { + Logger.log.Errorf("Error when update free collateral amount for custodian", err2) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // update redeem request state in WaitingRedeemRequest (remove custodian from matchingCustodianDetail) + updatedCustodians, _ := removeCustodianFromMatchingRedeemCustodians( + currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr].GetCustodians(), meta.CustodianAddressStr) + currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr].SetCustodians(updatedCustodians) + + // remove redeem request from WaitingRedeemRequest list when all matching custodians return public token to user + // when list matchingCustodianDetail is empty + if len(currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr].GetCustodians()) == 0 { + deleteWaitingRedeemRequest(currentPortalState, keyWaitingRedeemRequestStr) + } + + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + unlockAmount, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralAcceptedChainStatus, + ) + + return [][]string{inst}, nil + + } else if meta.TokenID == common.PortalBNBIDStr { + // parse PortingProof in meta + txProofBNB, err := bnb.ParseBNBProofFromB64EncodeStr(meta.RedeemProof) + if err != nil { + Logger.log.Errorf("RedeemProof is invalid %v\n", err) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check minimum confirmations block of bnb proof + latestBNBBlockHeight, err2 := getLatestRelayingBNBBlockHeight(db, beaconHeight) + if err2 != nil { + Logger.log.Errorf("Can not get latest relaying bnb block height %v\n", err) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + if latestBNBBlockHeight < txProofBNB.BlockHeight + bnb.MinConfirmationsBlock { + Logger.log.Errorf("Not enough min bnb confirmations block %v, latestBNBBlockHeight %v - txProofBNB.BlockHeight %v\n", + bnb.MinConfirmationsBlock, latestBNBBlockHeight, txProofBNB.BlockHeight) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + isValid, err := txProofBNB.Verify(db) + if !isValid || err != nil { + Logger.log.Errorf("Verify txProofBNB failed %v", err) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // parse Tx from Data in txProofBNB + txBNB, err := bnb.ParseTxFromData(txProofBNB.Proof.Data) + if err != nil { + Logger.log.Errorf("Data in RedeemProof is invalid %v", err) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check memo attach redeemID req (compare hash memo) + memo := txBNB.Memo + memoHashBytes, err2 := base64.StdEncoding.DecodeString(memo) + if err2 != nil { + Logger.log.Errorf("Can not decode memo in tx bnb proof", err2) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + expectedRedeemMemo := RedeemMemoBNB { + RedeemID: redeemID, + CustodianIncognitoAddress: meta.CustodianAddressStr} + expectedRedeemMemoBytes, _ := json.Marshal(expectedRedeemMemo) + expectedRedeemMemoHashBytes := common.HashB(expectedRedeemMemoBytes) + + if !bytes.Equal(memoHashBytes, expectedRedeemMemoHashBytes) { + Logger.log.Errorf("Memo redeem is invalid") + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // check whether amount transfer in txBNB is equal redeem amount or not + // check receiver and amount in tx + // get list matching custodians in waitingRedeemRequest + + outputs := txBNB.Msgs[0].(msg.SendMsg).Outputs + + remoteAddressNeedToBeTransfer := waitingRedeemRequest.GetRedeemerRemoteAddress() + amountNeedToBeTransfer := meta.RedeemAmount + amountNeedToBeTransferInBNB := convertIncPBNBAmountToExternalBNBAmount(int64(amountNeedToBeTransfer)) + + isChecked := false + for _, out := range outputs { + addr, _ := bnb.GetAccAddressString(&out.Address, blockchain.config.ChainParams.BNBRelayingHeaderChainID) + if addr != remoteAddressNeedToBeTransfer { + continue + } + + // calculate amount that was transferred to custodian's remote address + amountTransfer := int64(0) + for _, coin := range out.Coins { + if coin.Denom == bnb.DenomBNB { + amountTransfer += coin.Amount + // note: log error for debug + Logger.log.Errorf("TxProof-BNB coin.Amount %d", + coin.Amount) + } + } + if amountTransfer < amountNeedToBeTransferInBNB { + Logger.log.Errorf("TxProof-BNB is invalid - Amount transfer to %s must be equal to or greater than %d, but got %d", + addr, amountNeedToBeTransferInBNB, amountTransfer) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } else { + isChecked = true + break + } + } + + if !isChecked { + Logger.log.Errorf("TxProof-BNB is invalid - Receiver address is invalid, expected %v", + remoteAddressNeedToBeTransfer) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // get tokenID from redeemTokenID + tokenID := meta.TokenID + + // update custodian state (FreeCollateral, LockedAmountCollateral) + custodianStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, meta.CustodianAddressStr) + custodianStateKeyStr := custodianStateKey.String() + finalExchangeRateKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + finalExchangeRateKeyStr := finalExchangeRateKey.String() + unlockAmount, err2 := updateFreeCollateralCustodian( + currentPortalState.CustodianPoolState[custodianStateKeyStr], + meta.RedeemAmount, tokenID, + currentPortalState.FinalExchangeRatesState[finalExchangeRateKeyStr]) + if err2 != nil { + Logger.log.Errorf("Error when update free collateral amount for custodian", err2) + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + // update redeem request state in WaitingRedeemRequest (remove custodian from matchingCustodianDetail) + updatedCustodians, _ := removeCustodianFromMatchingRedeemCustodians( + currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr].GetCustodians(), meta.CustodianAddressStr) + currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr].SetCustodians(updatedCustodians) + + // remove redeem request from WaitingRedeemRequest list when all matching custodians return public token to user + // when list matchingCustodianDetail is empty + if len(currentPortalState.WaitingRedeemRequests[keyWaitingRedeemRequestStr].GetCustodians()) == 0 { + deleteWaitingRedeemRequest(currentPortalState, keyWaitingRedeemRequestStr) + } + + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + unlockAmount, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralAcceptedChainStatus, + ) + + return [][]string{inst}, nil + } else { + Logger.log.Errorf("TokenID is not supported currently on Portal") + inst := buildReqUnlockCollateralInst( + meta.UniqueRedeemID, + meta.TokenID, + meta.CustodianAddressStr, + meta.RedeemAmount, + 0, + meta.RedeemProof, + meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqUnlockCollateralRejectedChainStatus, + + ) + return [][]string{inst}, nil + } + + return [][]string{}, nil +} diff --git a/blockchain/beaconportalproducer_test.go b/blockchain/beaconportalproducer_test.go new file mode 100644 index 0000000000..46f33b5164 --- /dev/null +++ b/blockchain/beaconportalproducer_test.go @@ -0,0 +1,1219 @@ +package blockchain + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/metadata" + mocks "github.com/incognitochain/incognito-chain/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "strconv" + "testing" +) + +// Define the suite, and absorb the built-in basic suite +// functionality from testify - including a T() method which +// returns the current testing context +type PortalProducerSuite struct { + suite.Suite + currentPortalState *CurrentPortalState +} + +func (suite *PortalProducerSuite) SetupTest() { + suite.currentPortalState = &CurrentPortalState{ + CustodianPoolState: map[string]*statedb.CustodianState{}, + ExchangeRatesRequests: map[string]*metadata.ExchangeRatesRequestStatus{}, + FinalExchangeRatesState: map[string]*statedb.FinalExchangeRatesState{}, + WaitingPortingRequests: map[string]*statedb.WaitingPortingRequest{}, + WaitingRedeemRequests: map[string]*statedb.WaitingRedeemRequest{}, + LiquidateExchangeRatesPool: map[string]*statedb.LiquidateExchangeRatesPool{}, + } +} + +/************************ Porting request test ************************/ +type PortingRequestExcepted struct { + Metadata string + ChainStatus string + Custodian1 []string + Custodian2 []string +} + +type LiquidationExchangeRatesExcepted struct { + TpValue int + Custodian1 []string + Custodian2 []string + LiquidationPool []uint64 +} + +type PortingRequestTestCase struct { + TestCaseName string + Input func() metadata.PortalUserRegisterAction + Output func() PortingRequestExcepted +} + +type AutoLiquidationExchangeRatesTestCase struct { + TestCaseName string + Input map[string]uint64 + Output func() LiquidationExchangeRatesExcepted +} + +func (suite *PortalProducerSuite) SetupExchangeRates(beaconHeight uint64) { + rates := make(map[string]statedb.FinalExchangeRatesDetail) + rates["b832e5d3b1f01a4f0623f7fe91d6673461e1f5d37d91fe78c5c2e6183ff39696"] = statedb.FinalExchangeRatesDetail{ + Amount: 8000000000, + } + rates["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"] = statedb.FinalExchangeRatesDetail{ + Amount: 20000000, + } + rates["0000000000000000000000000000000000000000000000000000000000000004"] = statedb.FinalExchangeRatesDetail{ + Amount: 500000, + } + + exchangeRates := make(map[string]*statedb.FinalExchangeRatesState) + exchangeRatesKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + exchangeRates[exchangeRatesKey.String()] = statedb.NewFinalExchangeRatesStateWithValue(rates) + + suite.currentPortalState.FinalExchangeRatesState = exchangeRates +} + +func (suite *PortalProducerSuite) SetupExchangeRatesWithValue(beaconHeight uint64, btc uint64, bnb uint64, prv uint64) { + rates := make(map[string]statedb.FinalExchangeRatesDetail) + rates["b832e5d3b1f01a4f0623f7fe91d6673461e1f5d37d91fe78c5c2e6183ff39696"] = statedb.FinalExchangeRatesDetail{ + Amount: btc, + } + rates["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"] = statedb.FinalExchangeRatesDetail{ + Amount: bnb, + } + rates["0000000000000000000000000000000000000000000000000000000000000004"] = statedb.FinalExchangeRatesDetail{ + Amount: prv, + } + + exchangeRates := make(map[string]*statedb.FinalExchangeRatesState) + exchangeRatesKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + exchangeRates[exchangeRatesKey.String()] = statedb.NewFinalExchangeRatesStateWithValue(rates) + + suite.currentPortalState.FinalExchangeRatesState = exchangeRates +} + +func (suite *PortalProducerSuite) SetupOneCustodian(beaconHeight uint64) { + remoteAddresses := make([]statedb.RemoteAddress, 0) + remoteAddresses = append( + remoteAddresses, + *statedb.NewRemoteAddressWithValue("b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", "bnb136ns6lfw4zs5hg4n85vdthaad7hq5m4gtkgf234"), + ) + + custodianKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ") + newCustodian := statedb.NewCustodianStateWithValue( + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", + 100000, + 100000, + nil, + nil, + remoteAddresses, + 0, + ) + + custodian := make(map[string]*statedb.CustodianState) + custodian[custodianKey.String()] = newCustodian + suite.currentPortalState.CustodianPoolState = custodian +} + +func (suite *PortalProducerSuite) SetupMultipleCustodian(beaconHeight uint64) { + remoteAddresses := make([]statedb.RemoteAddress, 0) + remoteAddresses = append( + remoteAddresses, + *statedb.NewRemoteAddressWithValue("b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", "bnb136ns6lfw4zs5hg4n85vdthaad7hq5m4gtkgf234"), + ) + + custodianKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ") + newCustodian := statedb.NewCustodianStateWithValue( + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", + 100000, + 100000, + nil, + nil, + remoteAddresses, + 0, + ) + + custodianKey2 := statedb.GenerateCustodianStateObjectKey(beaconHeight, "12Rwz4HXkVABgRnSb5Gfu1FaJ7auo3fLNXVGFhxx1dSytxHpWhbkimT1Mv5Z2oCMsssSXTVsapY8QGBZd2J4mPiCTzJAtMyCzb4dDcy") + newCustodian2 := statedb.NewCustodianStateWithValue( + "12Rwz4HXkVABgRnSb5Gfu1FaJ7auo3fLNXVGFhxx1dSytxHpWhbkimT1Mv5Z2oCMsssSXTVsapY8QGBZd2J4mPiCTzJAtMyCzb4dDcy", + 90000, + 90000, + nil, + nil, + remoteAddresses, + 0, + ) + + custodian := make(map[string]*statedb.CustodianState) + custodian[custodianKey.String()] = newCustodian + custodian[custodianKey2.String()] = newCustodian2 + suite.currentPortalState.CustodianPoolState = custodian +} + +func (suite *PortalProducerSuite) SetupMultipleCustodianContainPToken(beaconHeight uint64) { + remoteAddresses := make([]statedb.RemoteAddress, 0) + remoteAddresses = append( + remoteAddresses, + *statedb.NewRemoteAddressWithValue("b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", "bnb136ns6lfw4zs5hg4n85vdthaad7hq5m4gtkgf234"), + ) + + exchangeRatesKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + + convertExchangeRatesObj := NewConvertExchangeRatesObject(suite.currentPortalState.FinalExchangeRatesState[exchangeRatesKey.String()]) + totalPTokenAfterUp150PercentUnit64 := up150Percent(1000) //return nano pBTC, pBNB + totalPTokenAfterUp150PercentUnit64_2 := up150Percent(2000) //return nano pBTC, pBNB + + totalPRV, _ := convertExchangeRatesObj.ExchangePToken2PRVByTokenId("b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", totalPTokenAfterUp150PercentUnit64) + totalPRV_2, _ := convertExchangeRatesObj.ExchangePToken2PRVByTokenId("b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", totalPTokenAfterUp150PercentUnit64_2) + + custodianKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ") + newCustodian := statedb.NewCustodianStateWithValue( + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", + 100000, + 100000, + map[string]uint64{ + "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b": 1000, + }, + map[string]uint64{ + "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b": totalPRV, + }, + remoteAddresses, + 0, + ) + + custodianKey2 := statedb.GenerateCustodianStateObjectKey(beaconHeight, "12Rwz4HXkVABgRnSb5Gfu1FaJ7auo3fLNXVGFhxx1dSytxHpWhbkimT1Mv5Z2oCMsssSXTVsapY8QGBZd2J4mPiCTzJAtMyCzb4dDcy") + newCustodian2 := statedb.NewCustodianStateWithValue( + "12Rwz4HXkVABgRnSb5Gfu1FaJ7auo3fLNXVGFhxx1dSytxHpWhbkimT1Mv5Z2oCMsssSXTVsapY8QGBZd2J4mPiCTzJAtMyCzb4dDcy", + 90000, + 90000, + map[string]uint64{ + "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b": 2000, + }, + map[string]uint64{ + "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b": totalPRV_2, + }, + remoteAddresses, + 0, + ) + + custodian := make(map[string]*statedb.CustodianState) + custodian[custodianKey.String()] = newCustodian + custodian[custodianKey2.String()] = newCustodian2 + suite.currentPortalState.CustodianPoolState = custodian +} + +func (suite *PortalProducerSuite) SetupMockBlockChain(trieMock *mocks.Trie) *BlockChain { + root := common.Hash{} + wrapperDBMock := new(mocks.DatabaseAccessWarper) + wrapperDBMock.On("OpenPrefixTrie", root).Return( + trieMock, + nil, + ) + + wrapperDBMock.On("CopyTrie", trieMock).Return( + trieMock, + nil, + ) + + root1 := common.Hash{} + stateDb, _ := statedb.NewWithPrefixTrie(root1, wrapperDBMock) + + beaconBestState := &BeaconBestState{ + featureStateDB: stateDb, + } + + bestState := &BestState{ + Beacon: beaconBestState, + } + + blockChain := &BlockChain{ + BestState: bestState, + } + + return blockChain +} + +func (suite *PortalProducerSuite) TestBuildInstructionsForPortingRequest() { + happyCases := []PortingRequestTestCase{ + { + "happy_case_1", + func() metadata.PortalUserRegisterAction { + meta, _ := metadata.NewPortalUserRegister( + "1", + "12S5pBBRDf1GqfRHouvCV86sWaHzNfvakAWpVMvNnWu2k299xWCgQzLLc9wqPYUHfMYGDprPvQ794dbi6UU1hfRN4tPiU61txWWenhC", //100.000 prv + "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", + 1000, + 4, + metadata.PortalUserRegisterMeta, + ) + + actionContent := metadata.PortalUserRegisterAction{ + Meta: *meta, + TxReqID: *meta.Hash(), + ShardID: 1, + } + return actionContent + }, + func() PortingRequestExcepted { + return PortingRequestExcepted{ + Metadata: strconv.Itoa(metadata.PortalUserRegisterMeta), + ChainStatus: common.PortalPortingRequestAcceptedChainStatus, + Custodian1: []string{ + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", //address + "40000", //free collateral + "1000", //hold pToken + "60000", //lock prv amount + }, + } + }, + }, + { + "happy_case_2", + func() metadata.PortalUserRegisterAction { + meta, _ := metadata.NewPortalUserRegister( + "2", + "12S5pBBRDf1GqfRHouvCV86sWaHzNfvakAWpVMvNnWu2k299xWCgQzLLc9wqPYUHfMYGDprPvQ794dbi6UU1hfRN4tPiU61txWWenhC", //100.000 prv + "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", + 100, + 4, + metadata.PortalUserRegisterMeta, + ) + + actionContent := metadata.PortalUserRegisterAction{ + Meta: *meta, + TxReqID: *meta.Hash(), + ShardID: 1, + } + return actionContent + }, + func() PortingRequestExcepted { + return PortingRequestExcepted{ + Metadata: strconv.Itoa(metadata.PortalUserRegisterMeta), + ChainStatus: common.PortalPortingRequestAcceptedChainStatus, + Custodian1: []string{ + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", //address + "34000", //free collateral + "1100", //hold pToken + "66000", //lock prv amount + }, + } + }, + }, + } + + //reset + suite.SetupTest() + suite.SetupExchangeRates(1) + suite.SetupOneCustodian(1) + suite.verifyPortingRequest(happyCases) + + pickMultipleCustodianCases := []PortingRequestTestCase{ + { + "pick_multiple_custodian_case_1", + func() metadata.PortalUserRegisterAction { + meta, _ := metadata.NewPortalUserRegister( + "1", + "12S5pBBRDf1GqfRHouvCV86sWaHzNfvakAWpVMvNnWu2k299xWCgQzLLc9wqPYUHfMYGDprPvQ794dbi6UU1hfRN4tPiU61txWWenhC", //100.000 prv + "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", + 2000, + 8, + metadata.PortalUserRegisterMeta, + ) + + actionContent := metadata.PortalUserRegisterAction{ + Meta: *meta, + TxReqID: *meta.Hash(), + ShardID: 1, + } + return actionContent + }, + func() PortingRequestExcepted { + return PortingRequestExcepted{ + Metadata: strconv.Itoa(metadata.PortalUserRegisterMeta), + ChainStatus: common.PortalPortingRequestAcceptedChainStatus, + Custodian1: []string{ + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", //address + "0", //free collateral + "1667", //hold pToken + "100000", //lock prv amount + }, + Custodian2: []string{ + "12Rwz4HXkVABgRnSb5Gfu1FaJ7auo3fLNXVGFhxx1dSytxHpWhbkimT1Mv5Z2oCMsssSXTVsapY8QGBZd2J4mPiCTzJAtMyCzb4dDcy", //address + "70000", //free collateral + "333", //hold pToken + "20000", //lock prv amount + }, + } + }, + }, + { + "pick_a_custodian_case_2", + func() metadata.PortalUserRegisterAction { + meta, _ := metadata.NewPortalUserRegister( + "2", + "12S5pBBRDf1GqfRHouvCV86sWaHzNfvakAWpVMvNnWu2k299xWCgQzLLc9wqPYUHfMYGDprPvQ794dbi6UU1hfRN4tPiU61txWWenhC", //100.000 prv + "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", + 1000, + 4, + metadata.PortalUserRegisterMeta, + ) + + actionContent := metadata.PortalUserRegisterAction{ + Meta: *meta, + TxReqID: *meta.Hash(), + ShardID: 1, + } + return actionContent + }, + func() PortingRequestExcepted { + return PortingRequestExcepted{ + Metadata: strconv.Itoa(metadata.PortalUserRegisterMeta), + ChainStatus: common.PortalPortingRequestAcceptedChainStatus, + Custodian1: []string{ + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", //address + "0", //free collateral + "1667", //hold pToken + "100000", //lock prv amount + }, + Custodian2: []string{ + "12Rwz4HXkVABgRnSb5Gfu1FaJ7auo3fLNXVGFhxx1dSytxHpWhbkimT1Mv5Z2oCMsssSXTVsapY8QGBZd2J4mPiCTzJAtMyCzb4dDcy", //address + "10000", //free collateral + "1333", //hold pToken + "80000", //lock prv amount + }, + } + }, + }, + } + + //reset + suite.SetupTest() + suite.SetupExchangeRates(1) + suite.SetupMultipleCustodian(1) + suite.verifyPortingRequest(pickMultipleCustodianCases) + + + waitingPortingRequest := []PortingRequestTestCase{ + { + "waiting_porting_request_case_1", + func() metadata.PortalUserRegisterAction { + meta, _ := metadata.NewPortalUserRegister( + "1", + "12S5pBBRDf1GqfRHouvCV86sWaHzNfvakAWpVMvNnWu2k299xWCgQzLLc9wqPYUHfMYGDprPvQ794dbi6UU1hfRN4tPiU61txWWenhC", //100.000 prv + "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", + 2000, + 8, + metadata.PortalUserRegisterMeta, + ) + + actionContent := metadata.PortalUserRegisterAction{ + Meta: *meta, + TxReqID: *meta.Hash(), + ShardID: 1, + } + return actionContent + }, + func() PortingRequestExcepted { + return PortingRequestExcepted{ + Metadata: strconv.Itoa(metadata.PortalUserRegisterMeta), + ChainStatus: common.PortalPortingRequestAcceptedChainStatus, + Custodian1: []string{ + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", //address + "0", //free collateral + "1667", //hold pToken + "100000", //lock prv amount + }, + Custodian2: []string{ + "12Rwz4HXkVABgRnSb5Gfu1FaJ7auo3fLNXVGFhxx1dSytxHpWhbkimT1Mv5Z2oCMsssSXTVsapY8QGBZd2J4mPiCTzJAtMyCzb4dDcy", //address + "70000", //free collateral + "333", //hold pToken + "20000", //lock prv amount + }, + } + }, + }, + { + "waiting_porting_request_exist_case_2", + func() metadata.PortalUserRegisterAction { + meta, _ := metadata.NewPortalUserRegister( + "1", + "12S5pBBRDf1GqfRHouvCV86sWaHzNfvakAWpVMvNnWu2k299xWCgQzLLc9wqPYUHfMYGDprPvQ794dbi6UU1hfRN4tPiU61txWWenhC", //100.000 prv + "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", + 1000, + 4, + metadata.PortalUserRegisterMeta, + ) + + actionContent := metadata.PortalUserRegisterAction{ + Meta: *meta, + TxReqID: *meta.Hash(), + ShardID: 1, + } + return actionContent + }, + func() PortingRequestExcepted { + return PortingRequestExcepted{ + Metadata: strconv.Itoa(metadata.PortalUserRegisterMeta), + ChainStatus: common.PortalPortingRequestRejectedChainStatus, + } + }, + }, + } + + //reset + suite.SetupTest() + suite.SetupExchangeRates(1) + suite.SetupMultipleCustodian(1) + suite.verifyPortingRequest(waitingPortingRequest) +} + +func (suite *PortalProducerSuite) TestBuildInstructionsForLiquidationTPExchangeRates() { + //check tp 150 + //check tp 130 + //check tp 120 + + //check custodian + //check liquidation pool + + exchangeRatesChange := []AutoLiquidationExchangeRatesTestCase{ + { + "liquidation_exchange_rates_none_tp150_1", + map[string]uint64 { + "btc": 8000000000, + "bnb": 20000000, + "prv": 500000, + }, + func() LiquidationExchangeRatesExcepted { + return LiquidationExchangeRatesExcepted{ + TpValue: 0, + Custodian1: []string{ + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", //address + "100000", //free collateral + "1000", //hold pToken + "60000", //lock prv amount + }, + Custodian2: nil, + LiquidationPool: []uint64{ + 0, + 0, + }, + } + }, + }, + { + "liquidation_exchange_rates_tp130_2", + map[string]uint64 { + "btc": 8000000000, + "bnb": 23000000, + "prv": 500000, + }, + func() LiquidationExchangeRatesExcepted { + return LiquidationExchangeRatesExcepted{ + TpValue: 130, + Custodian1: []string{ + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", //address + "100000", //free collateral + "1000", //hold pToken + "60000", //lock prv amount + }, + Custodian2: nil, + LiquidationPool: []uint64{ + 0, //lock ptoken + 0, //lock amount collateral + }, + } + }, + }, + { + "liquidation_exchange_rates_tp120_3", + map[string]uint64 { + "btc": 8000000000, + "bnb": 25000000, + "prv": 500000, + }, + func() LiquidationExchangeRatesExcepted { + return LiquidationExchangeRatesExcepted{ + TpValue: 120, + Custodian1: []string{ + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", //address + "100000", //free collateral + "0", //hold pToken + "0", //lock prv amount + }, + Custodian2: []string{ + "12Rwz4HXkVABgRnSb5Gfu1FaJ7auo3fLNXVGFhxx1dSytxHpWhbkimT1Mv5Z2oCMsssSXTVsapY8QGBZd2J4mPiCTzJAtMyCzb4dDcy", //address + "90000", //free collateral + "0", //hold pToken + "0", //lock prv amount + }, + LiquidationPool: []uint64{ + 3000, //lock ptoken + 180000, //lock amount collateral + }, + } + }, + }, + } + + suite.SetupTest() + suite.SetupExchangeRates(1) + suite.SetupMultipleCustodianContainPToken(1) + suite.verifyAutoLiquidationExchangeRates(exchangeRatesChange) +} + +func (suite *PortalProducerSuite) verifyAutoLiquidationExchangeRates(testCases []AutoLiquidationExchangeRatesTestCase) { + beaconHeight := uint64(1) + + for _, testCase := range testCases { + suite.SetupExchangeRatesWithValue(beaconHeight, testCase.Input["btc"], testCase.Input["bnb"], testCase.Input["prv"],) + + value, _ := buildInstForLiquidationTopPercentileExchangeRates( + beaconHeight, + suite.currentPortalState, + ) + + fmt.Printf("Testcase %v: instruction %#v", testCase.TestCaseName, value) + fmt.Println() + + if testCase.Output().TpValue > 0 { + var actionData metadata.PortalLiquidateTopPercentileExchangeRatesContent + json.Unmarshal([]byte(value[0][3]), &actionData) + + if actionData.TP["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"].TPKey != testCase.Output().TpValue { //free collateral + suite.T().Errorf("tp is not equal, %v != %v", actionData.TP["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"].TPKey, testCase.Output().TpValue) + } + } + + //custodian 1 + if testCase.Output().Custodian1 != nil { + custodianKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, testCase.Output().Custodian1[0]) + custodian, ok := suite.currentPortalState.CustodianPoolState[custodianKey.String()] + if !ok { + suite.T().Errorf("custodian %v not found", custodianKey.String()) + } + + holdPublicToken := custodian.GetHoldingPublicTokens() + lockedAmountCollateral := custodian.GetLockedAmountCollateral() + freeCollateral := custodian.GetFreeCollateral() + + fmt.Println("custodian 1") + fmt.Println(testCase.Output().Custodian1) + i1, _ := strconv.ParseUint(testCase.Output().Custodian1[1], 10, 64) + i2, _ := strconv.ParseUint(testCase.Output().Custodian1[2], 10, 64) + i3, _ := strconv.ParseUint(testCase.Output().Custodian1[3], 10, 64) + + if i1 != freeCollateral { //free collateral + suite.T().Errorf("free collateral is not equal, %v != %v", i1, freeCollateral) + } + + if i2 != holdPublicToken["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"] { + suite.T().Errorf("hold public token is not equal, %v != %v", i2, holdPublicToken["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"]) + } + + if i3 != lockedAmountCollateral["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"] { + suite.T().Errorf("lock amount collateral is not equal, %v != %v", i3, lockedAmountCollateral["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"]) + } + } + + if testCase.Output().Custodian2 != nil { + custodianKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, testCase.Output().Custodian2[0]) + custodian, ok := suite.currentPortalState.CustodianPoolState[custodianKey.String()] + if !ok { + suite.T().Errorf("custodian %v not found", custodianKey.String()) + } + + holdPublicToken := custodian.GetHoldingPublicTokens() + lockedAmountCollateral := custodian.GetLockedAmountCollateral() + freeCollateral := custodian.GetFreeCollateral() + + fmt.Println("custodian 1") + fmt.Println(testCase.Output().Custodian1) + i1, _ := strconv.ParseUint(testCase.Output().Custodian2[1], 10, 64) + i2, _ := strconv.ParseUint(testCase.Output().Custodian2[2], 10, 64) + i3, _ := strconv.ParseUint(testCase.Output().Custodian2[3], 10, 64) + + if i1 != freeCollateral { //free collateral + suite.T().Errorf("free collateral is not equal, %v != %v", i1, freeCollateral) + } + + if i2 != holdPublicToken["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"] { + suite.T().Errorf("hold public token is not equal, %v != %v", i2, holdPublicToken["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"]) + } + + if i3 != lockedAmountCollateral["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"] { + suite.T().Errorf("lock amount collateral is not equal, %v != %v", i3, lockedAmountCollateral["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"]) + } + } + + //liquidation pool + if testCase.Output().LiquidationPool != nil { + liquidationPoolKey := statedb.GeneratePortalLiquidateExchangeRatesPoolObjectKey(beaconHeight) + liquidationPool, ok := suite.currentPortalState.LiquidateExchangeRatesPool[liquidationPoolKey.String()] + + if ok && testCase.Output().LiquidationPool[0] != liquidationPool.Rates()["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"].HoldAmountPubToken { + suite.T().Errorf("hold public token is not equal, %v != %v", testCase.Output().LiquidationPool[0], liquidationPool.Rates()["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"].HoldAmountPubToken) + } + + if ok && testCase.Output().LiquidationPool[1] != liquidationPool.Rates()["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"].HoldAmountFreeCollateral { + suite.T().Errorf("hold amount collateral is not equal, %v != %v", testCase.Output().LiquidationPool[1], liquidationPool.Rates()["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"].HoldAmountFreeCollateral) + } + } + } +} + +func (suite *PortalProducerSuite) verifyPortingRequest(testCases []PortingRequestTestCase) { + trieMock := new(mocks.Trie) + beaconHeight := uint64(1) + + for _, testCase := range testCases { + actionContentBytes, _ := json.Marshal(testCase.Input()) + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + + key := statedb.GeneratePortalStatusObjectKey(statedb.PortalPortingRequestStatusPrefix(), []byte(testCase.Input().Meta.UniqueRegisterId)) + trieMock.On("TryGet", key[:]).Return(nil, nil) + + blockChain := suite.SetupMockBlockChain(trieMock) + + value, err := blockChain.buildInstructionsForPortingRequest( + actionContentBase64Str, + testCase.Input().ShardID, + testCase.Input().Meta.Type, + suite.currentPortalState, + beaconHeight, + ) + + fmt.Printf("Testcase %v: instruction %#v", testCase.TestCaseName, value) + fmt.Println() + + assert.Equal(suite.T(), err, nil) + + if len(testCase.Output().Metadata) > 0 { + assert.Equal(suite.T(), testCase.Output().Metadata, value[0][0]) + } + + assert.Equal(suite.T(), strconv.Itoa(1), value[0][1]) + + if len(testCase.Output().ChainStatus) > 0 { + assert.Equal(suite.T(), testCase.Output().ChainStatus, value[0][2]) + } + + assert.NotNil(suite.T(), value[0][3]) + + //test current portal state + var portingRequestContent metadata.PortalPortingRequestContent + json.Unmarshal([]byte(value[0][3]), &portingRequestContent) + + prettyJSON, _ := json.MarshalIndent(portingRequestContent, "", " ") + fmt.Printf("Porting request result: %s\n", string(prettyJSON)) + + for _, itemCustodian := range portingRequestContent.Custodian { + //update custodian state + custodianKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, itemCustodian.IncAddress) + custodian := suite.currentPortalState.CustodianPoolState[custodianKey.String()] + + if testCase.Output().Custodian1 != nil && itemCustodian.IncAddress == testCase.Output().Custodian1[0] { + holdPublicToken := custodian.GetHoldingPublicTokens() + lockedAmountCollateral := custodian.GetLockedAmountCollateral() + freeCollateral := custodian.GetFreeCollateral() + + fmt.Println("custodian 1") + fmt.Println(testCase.Output().Custodian1) + i1, _ := strconv.ParseUint(testCase.Output().Custodian1[1], 10, 64) + i2, _ := strconv.ParseUint(testCase.Output().Custodian1[2], 10, 64) + i3, _ := strconv.ParseUint(testCase.Output().Custodian1[3], 10, 64) + + if i1 != freeCollateral { //free collateral + suite.T().Errorf("free collateral is not equal, %v != %v", i1, freeCollateral) + } + + if i2 != holdPublicToken["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"] { + suite.T().Errorf("hold public token is not equal, %v != %v", i2, holdPublicToken["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"]) + } + + if i3 != lockedAmountCollateral["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"] { + suite.T().Errorf("lock amount collateral is not equal, %v != %v", i3, lockedAmountCollateral["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"]) + } + } + + if testCase.Output().Custodian2 != nil && itemCustodian.IncAddress == testCase.Output().Custodian2[0] { + holdPublicToken := custodian.GetHoldingPublicTokens() + lockedAmountCollateral := custodian.GetLockedAmountCollateral() + freeCollateral := custodian.GetFreeCollateral() + + fmt.Println("custodian 2") + fmt.Println(testCase.Output().Custodian2) + i1, _ := strconv.ParseUint(testCase.Output().Custodian2[1], 10, 64) + i2, _ := strconv.ParseUint(testCase.Output().Custodian2[2], 10, 64) + i3, _ := strconv.ParseUint(testCase.Output().Custodian2[3], 10, 64) + + if i1 != freeCollateral { //free collateral + suite.T().Errorf("free collateral is not equal, %v != %v", i1, freeCollateral) + } + + if i2 != holdPublicToken["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"] { + suite.T().Errorf("hold public token is not equal, %v != %v", i2, holdPublicToken["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"]) + } + + if i3 != lockedAmountCollateral["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"] { + suite.T().Errorf("lock amount collateral is not equal, %v != %v", i3, lockedAmountCollateral["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"]) + } + } + } + } +} + +/************************ Custodian deposit test ************************/ +const ShardIDHardCode = 0 +const BeaconHeight = 1 +const BNBTokenID = "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b" +const BNBRemoteAddress = "tbnb1fau9kq605jwkyfea2knw495we8cpa47r9r6uxv" + +type CustodianDepositOutput struct { + MetadataType string + ChainStatus string + CustodianDepositContent string + CustodianPool map[string]*statedb.CustodianState +} + +type CustodianDepositInput struct { + IncognitoAddress string + RemoteAddresses []statedb.RemoteAddress + DepositedAmount uint64 +} + +type CustodianDepositTestCase struct { + TestCaseName string + Input CustodianDepositInput + Output CustodianDepositOutput +} + +func buildPortalCustodianDepositAction( + incogAddressStr string, + remoteAddresses []statedb.RemoteAddress, + depositedAmount uint64, +) []string { + custodianDepositMeta, _ := metadata.NewPortalCustodianDeposit( + metadata.PortalCustodianDepositMeta, + incogAddressStr, + remoteAddresses, + depositedAmount, + ) + + actionContent := metadata.PortalCustodianDepositAction{ + Meta: *custodianDepositMeta, + TxReqID: common.Hash{}, + ShardID: byte(ShardIDHardCode), + } + actionContentBytes, _ := json.Marshal(actionContent) + + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(metadata.PortalCustodianDepositMeta), actionContentBase64Str} + return action +} + +func buildPortalCustodianDepositContent( + custodianAddressStr string, + remoteAddresses []statedb.RemoteAddress, + depositedAmount uint64, +) string { + custodianDepositContent := metadata.PortalCustodianDepositContent{ + IncogAddressStr: custodianAddressStr, + RemoteAddresses: remoteAddresses, + DepositedAmount: depositedAmount, + TxReqID: common.Hash{}, + ShardID: byte(ShardIDHardCode), + } + custodianDepositContentBytes, _ := json.Marshal(custodianDepositContent) + return string(custodianDepositContentBytes) +} + +func getTestCasesForCustodianDeposit() []*CustodianDepositTestCase { + testcases := []*CustodianDepositTestCase{ + { + TestCaseName: "Custodian deposit when custodian pool is empty", + Input: CustodianDepositInput{ + IncognitoAddress: "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", + RemoteAddresses: []statedb.RemoteAddress{ + *statedb.NewRemoteAddressWithValue( + BNBTokenID, + BNBRemoteAddress), + }, + DepositedAmount: 1000 * 1e9, + }, + Output: CustodianDepositOutput{ + MetadataType: strconv.Itoa(metadata.PortalCustodianDepositMeta), + ChainStatus: common.PortalCustodianDepositAcceptedChainStatus, + CustodianDepositContent: "", + }, + }, + { + TestCaseName: "Custodian deposit when custodian pool has one custodian before", + Input: CustodianDepositInput{ + IncognitoAddress: "12Rwz4HXkVABgRnSb5Gfu1FaJ7auo3fLNXVGFhxx1dSytxHpWhbkimT1Mv5Z2oCMsssSXTVsapY8QGBZd2J4mPiCTzJAtMyCzb4dDcy", + RemoteAddresses: []statedb.RemoteAddress{ + *statedb.NewRemoteAddressWithValue( + BNBTokenID, + BNBRemoteAddress), + }, + DepositedAmount: 2000 * 1e9, + }, + Output: CustodianDepositOutput{ + MetadataType: strconv.Itoa(metadata.PortalCustodianDepositMeta), + ChainStatus: common.PortalCustodianDepositAcceptedChainStatus, + CustodianDepositContent: "", + }, + }, { + TestCaseName: "Custodian deposit more", + Input: CustodianDepositInput{ + IncognitoAddress: "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", + RemoteAddresses: []statedb.RemoteAddress{ + *statedb.NewRemoteAddressWithValue( + BNBTokenID, + BNBRemoteAddress), + }, + DepositedAmount: 3000 * 1e9, + }, + Output: CustodianDepositOutput{ + MetadataType: strconv.Itoa(metadata.PortalCustodianDepositMeta), + ChainStatus: common.PortalCustodianDepositAcceptedChainStatus, + CustodianDepositContent: "", + }, + }, + } + + custodianPool := make(map[string]*statedb.CustodianState, 0) + for i := 0; i < len(testcases); i++ { + testcases[i].Output.CustodianDepositContent = buildPortalCustodianDepositContent( + testcases[i].Input.IncognitoAddress, + testcases[i].Input.RemoteAddresses, + testcases[i].Input.DepositedAmount) + + custodianKey := statedb.GenerateCustodianStateObjectKey(uint64(BeaconHeight), testcases[i].Input.IncognitoAddress) + if custodianPool[custodianKey.String()] == nil { + custodianState := statedb.NewCustodianStateWithValue( + testcases[i].Input.IncognitoAddress, + testcases[i].Input.DepositedAmount, + testcases[i].Input.DepositedAmount, + nil, nil, + testcases[i].Input.RemoteAddresses, + 0, + ) + custodianPool[custodianKey.String()] = custodianState + } else { + custodianPool[custodianKey.String()].SetFreeCollateral(custodianPool[custodianKey.String()].GetFreeCollateral() + testcases[i].Input.DepositedAmount) + custodianPool[custodianKey.String()].SetTotalCollateral(custodianPool[custodianKey.String()].GetTotalCollateral() + testcases[i].Input.DepositedAmount) + } + custodianPoolTmp := map[string]*statedb.CustodianState{} + for key, cus := range custodianPool { + custodianPoolTmp[key] = statedb.NewCustodianStateWithValue( + cus.GetIncognitoAddress(), + cus.GetTotalCollateral(), + cus.GetFreeCollateral(), + cus.GetHoldingPublicTokens(), + cus.GetLockedAmountCollateral(), + cus.GetRemoteAddresses(), + cus.GetRewardAmount(), + ) + } + testcases[i].Output.CustodianPool = custodianPoolTmp + } + return testcases +} + +func (suite *PortalProducerSuite) TestCustodianDeposit() { + testcases := getTestCasesForCustodianDeposit() + + for _, tc := range testcases { + fmt.Printf("[Custodian deposit] Running test case: %v\n", tc.TestCaseName) + // build custodian deposit action + action := buildPortalCustodianDepositAction(tc.Input.IncognitoAddress, tc.Input.RemoteAddresses, tc.Input.DepositedAmount) + + // beacon build new instruction for the action + bc := BlockChain{} + shardID := byte(ShardIDHardCode) + metaType, _ := strconv.Atoi(action[0]) + contentStr := action[1] + newInsts, err := bc.buildInstructionsForCustodianDeposit(contentStr, shardID, metaType, suite.currentPortalState, uint64(BeaconHeight)) + + // compare results to Outputs of test case + suite.Nil(err) + suite.Equal(1, len(newInsts)) + newInst := newInsts[0] + suite.Equal(tc.Output.MetadataType, newInst[0]) + suite.Equal(tc.Output.ChainStatus, newInst[2]) + suite.Equal(tc.Output.CustodianDepositContent, newInst[3]) + suite.EqualValues(tc.Output.CustodianPool, suite.currentPortalState.CustodianPoolState) + } +} + +/************************ Redeem request test ************************/ +type RedeemRequestOutput struct { + MetadataType string + ChainStatus string + RedeemRequestContent string + CustodianPool map[string]*statedb.CustodianState + WaitingRedeemRequest map[string]*statedb.WaitingRedeemRequest +} + +type RedeemRequestInput struct { + UniqueRedeemID string + TokenID string + RedeemAmount uint64 + RedeemerIncAddress string + RedeemerRemoteAddr string + RedeemFee uint64 +} + +type RedeemRequestTestCase struct { + TestCaseName string + Input RedeemRequestInput + Output RedeemRequestOutput +} + +func buildPortalRedeemRequestAction( + uniqueRedeemID string, + tokenID string, + redeemAmount uint64, + incAddressStr string, + remoteAddr string, + redeemFee uint64, +) []string { + redeemRequestMeta, _ := metadata.NewPortalRedeemRequest( + metadata.PortalRedeemRequestMeta, + uniqueRedeemID, + tokenID, + redeemAmount, + incAddressStr, + remoteAddr, + redeemFee, + ) + + actionContent := metadata.PortalRedeemRequestAction{ + Meta: *redeemRequestMeta, + TxReqID: common.Hash{}, + ShardID: byte(ShardIDHardCode), + } + actionContentBytes, _ := json.Marshal(actionContent) + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(metadata.PortalRedeemRequestMeta), actionContentBase64Str} + return action +} + +func buildPortalRedeemRequestContent( + uniqueRedeemID string, + tokenID string, + redeemAmount uint64, + incAddressStr string, + remoteAddr string, + redeemFee uint64, + matchingCustodianDetail []*statedb.MatchingRedeemCustodianDetail, +) string { + redeemRequestContent := metadata.PortalRedeemRequestContent{ + UniqueRedeemID: uniqueRedeemID, + TokenID: tokenID, + RedeemAmount: redeemAmount, + RedeemerIncAddressStr: incAddressStr, + RemoteAddress: remoteAddr, + RedeemFee: redeemFee, + MatchingCustodianDetail: matchingCustodianDetail, + TxReqID: common.Hash{}, + ShardID: byte(ShardIDHardCode), + } + redeemRequestContentBytes, _ := json.Marshal(redeemRequestContent) + return string(redeemRequestContentBytes) +} + +func (suite *PortalProducerSuite) SetupRedeemRequest(beaconHeight uint64) { + // set up exchange rates + rates := make(map[string]statedb.FinalExchangeRatesDetail) + rates["b832e5d3b1f01a4f0623f7fe91d6673461e1f5d37d91fe78c5c2e6183ff39696"] = statedb.FinalExchangeRatesDetail{ + Amount: 8000000000, + } + rates["b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b"] = statedb.FinalExchangeRatesDetail{ + Amount: 20000000, + } + rates["0000000000000000000000000000000000000000000000000000000000000004"] = statedb.FinalExchangeRatesDetail{ + Amount: 500000, + } + + exchangeRates := make(map[string]*statedb.FinalExchangeRatesState) + exchangeRatesKey := statedb.GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + exchangeRates[exchangeRatesKey.String()] = statedb.NewFinalExchangeRatesStateWithValue(rates) + suite.currentPortalState.FinalExchangeRatesState = exchangeRates + + // set up custodian pool + remoteAddresses := make([]statedb.RemoteAddress, 0) + remoteAddresses = append( + remoteAddresses, + *statedb.NewRemoteAddressWithValue(BNBTokenID, BNBRemoteAddress), + ) + + custodianStates := []*statedb.CustodianState{ + statedb.NewCustodianStateWithValue( + "12RuEdPjq4yxivzm8xPxRVHmkL74t4eAdUKPdKKhMEnpxPH3k8GEyULbwq4hjwHWmHQr7MmGBJsMpdCHsYAqNE18jipWQwciBf9yqvQ", + 1000 * 1e9, + 400 * 1e9, + map[string]uint64{ + BNBTokenID: 10 * 1e9, // hold 10 BNB + }, + map[string]uint64{ + BNBTokenID: 600 * 1e9, // lock 600 PRV + }, + remoteAddresses, + 0, + ), + statedb.NewCustodianStateWithValue( + "12Rwz4HXkVABgRnSb5Gfu1FaJ7auo3fLNXVGFhxx1dSytxHpWhbkimT1Mv5Z2oCMsssSXTVsapY8QGBZd2J4mPiCTzJAtMyCzb4dDcy", + 5000 * 1e9, + 2000 * 1e9, + map[string]uint64{ + BNBTokenID: 50 * 1e9, // hold 50 BNB + }, + map[string]uint64{ + BNBTokenID: 3000 * 1e9, // lock 3000 PRV + }, + remoteAddresses, + 0, + ), + } + + custodian := make(map[string]*statedb.CustodianState) + for _, cus := range custodianStates { + custodianKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, cus.GetIncognitoAddress()) + custodian[custodianKey.String()] = cus + } + + suite.currentPortalState.CustodianPoolState = custodian +} + +func getTestCasesForRedeemRequest() []*RedeemRequestTestCase { + testcases := []*RedeemRequestTestCase{ + { + TestCaseName: "Redeem request matches to one custodian", + Input: RedeemRequestInput{ + UniqueRedeemID: "1", + TokenID: BNBTokenID, + RedeemAmount: 1 * 1e9, + RedeemerIncAddress: "12S5pBBRDf1GqfRHouvCV86sWaHzNfvakAWpVMvNnWu2k299xWCgQzLLc9wqPYUHfMYGDprPvQ794dbi6UU1hfRN4tPiU61txWWenhC", + RedeemerRemoteAddr: BNBRemoteAddress, + RedeemFee: 0.004 * 1e9, + }, + Output: RedeemRequestOutput{ + MetadataType: strconv.Itoa(metadata.PortalRedeemRequestMeta), + ChainStatus: common.PortalRedeemRequestAcceptedChainStatus, + RedeemRequestContent: "", + }, + }, + { + TestCaseName: "Redeem request matches to two custodians", + Input: RedeemRequestInput{ + UniqueRedeemID: "1", + TokenID: BNBTokenID, + RedeemAmount: 51 * 1e9, + RedeemerIncAddress: "12S5pBBRDf1GqfRHouvCV86sWaHzNfvakAWpVMvNnWu2k299xWCgQzLLc9wqPYUHfMYGDprPvQ794dbi6UU1hfRN4tPiU61txWWenhC", + RedeemerRemoteAddr: BNBRemoteAddress, + RedeemFee: 0.204 * 1e9, + }, + Output: RedeemRequestOutput{ + MetadataType: strconv.Itoa(metadata.PortalRedeemRequestMeta), + ChainStatus: common.PortalRedeemRequestAcceptedChainStatus, + RedeemRequestContent: "", + }, + }, + { + TestCaseName: "Redeem request with fee less than min redeem fee", + Input: RedeemRequestInput{ + UniqueRedeemID: "1", + TokenID: BNBTokenID, + RedeemAmount: 1 * 1e9, + RedeemerIncAddress: "12S5pBBRDf1GqfRHouvCV86sWaHzNfvakAWpVMvNnWu2k299xWCgQzLLc9wqPYUHfMYGDprPvQ794dbi6UU1hfRN4tPiU61txWWenhC", + RedeemerRemoteAddr: BNBRemoteAddress, + RedeemFee: 0.003 * 1e9, + }, + Output: RedeemRequestOutput{ + MetadataType: strconv.Itoa(metadata.PortalRedeemRequestMeta), + ChainStatus: common.PortalRedeemRequestRejectedChainStatus, + RedeemRequestContent: "", + }, + }, + } + + //custodianPool := make(map[string]*statedb.CustodianState, 0) + //for i := 0; i < len(testcases); i++ { + // testcases[i].Output.CustodianDepositContent = buildPortalCustodianDepositContent( + // testcases[i].Input.IncognitoAddress, + // testcases[i].Input.RemoteAddresses, + // testcases[i].Input.DepositedAmount) + // + // custodianKey := statedb.GenerateCustodianStateObjectKey(uint64(BeaconHeight), testcases[i].Input.IncognitoAddress) + // if custodianPool[custodianKey.String()] == nil { + // custodianState := statedb.NewCustodianStateWithValue( + // testcases[i].Input.IncognitoAddress, + // testcases[i].Input.DepositedAmount, + // testcases[i].Input.DepositedAmount, + // nil, nil, + // testcases[i].Input.RemoteAddresses, + // 0, + // ) + // custodianPool[custodianKey.String()] = custodianState + // } else { + // custodianPool[custodianKey.String()].SetFreeCollateral(custodianPool[custodianKey.String()].GetFreeCollateral() + testcases[i].Input.DepositedAmount) + // custodianPool[custodianKey.String()].SetTotalCollateral(custodianPool[custodianKey.String()].GetTotalCollateral() + testcases[i].Input.DepositedAmount) + // } + // custodianPoolTmp := map[string]*statedb.CustodianState{} + // for key, cus := range custodianPool { + // custodianPoolTmp[key] = statedb.NewCustodianStateWithValue( + // cus.GetIncognitoAddress(), + // cus.GetTotalCollateral(), + // cus.GetFreeCollateral(), + // cus.GetHoldingPublicTokens(), + // cus.GetLockedAmountCollateral(), + // cus.GetRemoteAddresses(), + // cus.GetRewardAmount(), + // ) + // } + // testcases[i].Output.CustodianPool = custodianPoolTmp + //} + return testcases +} + +func (suite *PortalProducerSuite) TestRedeemRequest() { + //testcases := getTestCasesForRedeemRequest() + // + //for _, tc := range testcases { + // fmt.Printf("[Redeem Request] Running test case: %v\n", tc.TestCaseName) + // // build redeem request action + // action := buildPortalRedeemRequestAction( + // tc.Input.UniqueRedeemID, tc.Input.TokenID, tc.Input.RedeemAmount, + // tc.Input.RedeemerIncAddress, tc.Input.RedeemerRemoteAddr, tc.Input.RedeemFee) + // + // // beacon build new instruction for the action + // bc := BlockChain{} + // shardID := byte(ShardIDHardCode) + // metaType, _ := strconv.Atoi(action[0]) + // contentStr := action[1] + // newInsts, err := bc.buildInstructionsForRedeemRequest(statedb, contentStr, shardID, metaType, suite.currentPortalState, uint64(BeaconHeight)) + // + // // compare results to Outputs of test case + // suite.Nil(err) + // suite.Equal(1, len(newInsts)) + // newInst := newInsts[0] + // suite.Equal(tc.Output.MetadataType, newInst[0]) + // suite.Equal(tc.Output.ChainStatus, newInst[2]) + // suite.Equal(tc.Output.CustodianDepositContent, newInst[3]) + // suite.EqualValues(tc.Output.CustodianPool, suite.currentPortalState.CustodianPoolState) + //} +} + +/************************ Run suite test ************************/ +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestPortalProducerSuite(t *testing.T) { + suite.Run(t, new(PortalProducerSuite)) +} diff --git a/blockchain/beaconportalrewardprocess.go b/blockchain/beaconportalrewardprocess.go new file mode 100644 index 0000000000..3460bc6e69 --- /dev/null +++ b/blockchain/beaconportalrewardprocess.go @@ -0,0 +1,195 @@ +package blockchain + +import ( + "encoding/json" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/metadata" +) + +func (blockchain *BlockChain) processPortalReward( + stateDB *statedb.StateDB, + beaconHeight uint64, instructions []string, + currentPortalState *CurrentPortalState) error { + + // unmarshal instructions content + var actionData metadata.PortalRewardContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + Logger.log.Errorf("Can not unmarshal instruction content %v - Error %v\n", instructions[3], err) + return nil + } + + reqStatus := instructions[2] + if reqStatus == "portalRewardInst" { + // update reward amount for custodian + for custodianKey, custodianState := range currentPortalState.CustodianPoolState { + custodianAddr := custodianState.GetIncognitoAddress() + custodianReward := custodianState.GetRewardAmount() + if custodianReward == nil { + custodianReward = map[string]uint64{} + } + for _, rewardInfo := range actionData.Rewards { + if rewardInfo.GetCustodianIncAddr() == custodianAddr { + for _, rewardDetail := range rewardInfo.GetRewards() { + custodianReward[rewardDetail.GetTokenID()] += rewardDetail.GetAmount() + } + break + } + } + currentPortalState.CustodianPoolState[custodianKey].SetRewardAmount(custodianReward) + } + + // at the end of epoch + if (beaconHeight+1) % blockchain.config.ChainParams.Epoch == 1 { + currentPortalState.LockedCollateralState.Reset() + } + + totalLockedCollateralAmount := uint64(0) + lockedCollateralDetails := currentPortalState.LockedCollateralState.GetLockedCollateralDetail() + for _, custodianState := range currentPortalState.CustodianPoolState { + for _, lockedAmount := range custodianState.GetLockedAmountCollateral() { + totalLockedCollateralAmount += lockedAmount + lockedCollateralDetails[custodianState.GetIncognitoAddress()] += lockedAmount + } + } + + currentPortalState.LockedCollateralState.SetTotalLockedCollateralInEpoch( + currentPortalState.LockedCollateralState.GetTotalLockedCollateralInEpoch() + totalLockedCollateralAmount) + currentPortalState.LockedCollateralState.SetLockedCollateralDetail( + lockedCollateralDetails) + + // store reward at beacon height into db + err = statedb.StorePortalRewards( + stateDB, + beaconHeight+1, + actionData.Rewards, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking liquidation custodian: %+v", err) + return nil + } + } else { + Logger.log.Errorf("ERROR: Invalid status of instruction: %+v", reqStatus) + return nil + } + + return nil +} + +func (blockchain *BlockChain) processPortalWithdrawReward( + stateDB *statedb.StateDB, + beaconHeight uint64, instructions []string, + currentPortalState *CurrentPortalState) error { + + // unmarshal instructions content + var actionData metadata.PortalRequestWithdrawRewardContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + Logger.log.Errorf("Can not unmarshal instruction content %v - Error %v\n", instructions[3], err) + return nil + } + + reqStatus := instructions[2] + if reqStatus == common.PortalReqWithdrawRewardAcceptedChainStatus { + // update reward amount of custodian + cusStateKey := statedb.GenerateCustodianStateObjectKey(beaconHeight, actionData.CustodianAddressStr) + cusStateKeyStr := cusStateKey.String() + custodianState := currentPortalState.CustodianPoolState[cusStateKeyStr] + if custodianState == nil { + Logger.log.Errorf("[processPortalWithdrawReward] Can not get custodian state with key %v", cusStateKey) + return nil + } + updatedRewardAmount := custodianState.GetRewardAmount() + updatedRewardAmount[actionData.TokenID.String()] = 0 + currentPortalState.CustodianPoolState[cusStateKeyStr].SetRewardAmount(updatedRewardAmount) + + // track request withdraw portal reward + portalReqRewardStatus := metadata.PortalRequestWithdrawRewardStatus{ + Status: common.PortalReqWithdrawRewardAcceptedStatus, + CustodianAddressStr: actionData.CustodianAddressStr, + TokenID: actionData.TokenID, + RewardAmount: actionData.RewardAmount, + TxReqID: actionData.TxReqID, + } + portalReqRewardStatusBytes, _ := json.Marshal(portalReqRewardStatus) + err = statedb.StorePortalRequestWithdrawRewardStatus( + stateDB, + actionData.TxReqID.String(), + portalReqRewardStatusBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking liquidation custodian: %+v", err) + return nil + } + + } else if reqStatus == common.PortalReqUnlockCollateralRejectedChainStatus { + // track request withdraw portal reward + portalReqRewardStatus := metadata.PortalRequestWithdrawRewardStatus{ + Status: common.PortalReqWithdrawRewardRejectedStatus, + CustodianAddressStr: actionData.CustodianAddressStr, + TokenID: actionData.TokenID, + RewardAmount: actionData.RewardAmount, + TxReqID: actionData.TxReqID, + } + portalReqRewardStatusBytes, _ := json.Marshal(portalReqRewardStatus) + err = statedb.StorePortalRequestWithdrawRewardStatus( + stateDB, + actionData.TxReqID.String(), + portalReqRewardStatusBytes, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while tracking liquidation custodian: %+v", err) + return nil + } + } + + return nil +} + +func (blockchain *BlockChain) processPortalTotalCustodianReward( + stateDB *statedb.StateDB, + beaconHeight uint64, instructions []string, + currentPortalState *CurrentPortalState) error { + + // unmarshal instructions content + var actionData metadata.PortalTotalCustodianReward + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + Logger.log.Errorf("Can not unmarshal instruction content %v - Error %v\n", instructions[3], err) + return nil + } + + reqStatus := instructions[2] + if reqStatus == "portalTotalRewardInst" { + epoch := beaconHeight / blockchain.config.ChainParams.Epoch + // get old total custodian reward + oldCustodianRewards, err := statedb.GetRewardFeatureStateByFeatureName(stateDB, statedb.PortalRewardName, epoch - 1) + if err != nil { + Logger.log.Errorf("ERROR: Can not get reward for custodian: %+v", err) + return nil + } + + // update total custodian reward + for _, r := range actionData.Rewards { + oldCustodianRewards.AddTotalRewards(r.GetTokenID(), r.GetAmount()) + } + + // store total custodian reward into db + err = statedb.StoreRewardFeatureState( + stateDB, + statedb.PortalRewardName, + oldCustodianRewards.GetTotalRewards(), + epoch, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while storing total custodian reward: %+v", err) + return nil + } + } else { + Logger.log.Errorf("ERROR: Invalid status of instruction: %+v", reqStatus) + return nil + } + + return nil +} diff --git a/blockchain/beaconportalrewardproducer.go b/blockchain/beaconportalrewardproducer.go new file mode 100644 index 0000000000..37dd612a31 --- /dev/null +++ b/blockchain/beaconportalrewardproducer.go @@ -0,0 +1,293 @@ +package blockchain + +import ( + "encoding/base64" + "encoding/json" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/metadata" + "math/big" + "strconv" +) + +func (blockchain *BlockChain) buildInstForPortalReward(beaconHeight uint64, rewardInfos []*statedb.PortalRewardInfo) []string { + portalRewardContent := metadata.NewPortalReward(beaconHeight, rewardInfos) + contentStr, _ := json.Marshal(portalRewardContent) + + inst := []string{ + strconv.Itoa(metadata.PortalRewardMeta), + strconv.Itoa(-1), // no need shardID + "portalRewardInst", + string(contentStr), + } + + return inst +} + +func (blockchain *BlockChain) buildInstForPortalTotalReward(rewardInfos []*statedb.RewardInfoDetail) []string { + portalRewardContent := metadata.NewPortalTotalCustodianReward(rewardInfos) + contentStr, _ := json.Marshal(portalRewardContent) + + inst := []string{ + strconv.Itoa(metadata.PortalTotalRewardCustodianMeta), + strconv.Itoa(-1), // no need shardID + "portalTotalRewardInst", + string(contentStr), + } + + return inst +} + +func updatePortalRewardInfos( + rewardInfos []*statedb.PortalRewardInfo, + custodianAddress string, + tokenID string, amount uint64) []*statedb.PortalRewardInfo { + for i := 0; i < len(rewardInfos); i++ { + if rewardInfos[i].GetCustodianIncAddr() == custodianAddress { + rewardInfos[i].AddPortalRewardInfo(tokenID, amount) + return rewardInfos + } + } + rewardInfos = append(rewardInfos, + statedb.NewPortalRewardInfoWithValue(custodianAddress, + []*statedb.RewardInfoDetail{statedb.NewRewardInfoDetailWithValue(tokenID, amount)})) + + return rewardInfos +} + +func splitPortingFeeForMatchingCustodians( + feeAmount uint64, + portingAmount uint64, + matchingCustodianAddresses []*statedb.MatchingPortingCustodianDetail, + rewardInfos []*statedb.PortalRewardInfo) []*statedb.PortalRewardInfo { + for _, matchCustodianDetail := range matchingCustodianAddresses { + tmp := new(big.Int).Mul(new(big.Int).SetUint64(matchCustodianDetail.Amount), new(big.Int).SetUint64(feeAmount)) + splitedFee := new(big.Int).Div(tmp, new(big.Int).SetUint64(portingAmount)) + rewardInfos = updatePortalRewardInfos(rewardInfos, matchCustodianDetail.IncAddress, common.PRVIDStr, splitedFee.Uint64()) + } + return rewardInfos +} + +func splitRedeemFeeForMatchingCustodians( + feeAmount uint64, + redeemAmount uint64, + matchingCustodianAddresses []*statedb.MatchingRedeemCustodianDetail, + rewardInfos []*statedb.PortalRewardInfo) []*statedb.PortalRewardInfo { + for _, matchCustodianDetail := range matchingCustodianAddresses { + tmp := new(big.Int).Mul(new(big.Int).SetUint64(matchCustodianDetail.GetAmount()), new(big.Int).SetUint64(feeAmount)) + splitedFee := new(big.Int).Div(tmp, new(big.Int).SetUint64(redeemAmount)) + rewardInfos = updatePortalRewardInfos(rewardInfos, matchCustodianDetail.GetIncognitoAddress(), common.PRVIDStr, splitedFee.Uint64()) + } + + return rewardInfos +} + +func splitRewardForCustodians( + totalCustodianReward map[common.Hash]uint64, + lockedCollateralState *statedb.LockedCollateralState, + custodianState map[string]*statedb.CustodianState, + rewardInfos []*statedb.PortalRewardInfo) []*statedb.PortalRewardInfo { + totalLockedCollateral := lockedCollateralState.GetTotalLockedCollateralInEpoch() + for _, custodian := range custodianState { + lockedCollateralCustodian := lockedCollateralState.GetLockedCollateralDetail()[custodian.GetIncognitoAddress()] + for tokenID, amount := range totalCustodianReward { + tmp := new(big.Int).Mul(new(big.Int).SetUint64(lockedCollateralCustodian), new(big.Int).SetUint64(amount)) + splitedReward := new(big.Int).Div(tmp, new(big.Int).SetUint64(totalLockedCollateral)) + rewardInfos = updatePortalRewardInfos(rewardInfos, custodian.GetIncognitoAddress(), tokenID.String(), splitedReward.Uint64()) + } + } + return rewardInfos +} + +func (blockchain *BlockChain) buildPortalRewardsInsts( + beaconHeight uint64, currentPortalState *CurrentPortalState, rewardForCustodianByEpoch map[common.Hash]uint64) ([][]string, error) { + + // rewardInfos are map custodians' addresses and reward amount + rewardInfos := make([]*statedb.PortalRewardInfo, 0) + + // get porting fee from waiting porting request at beaconHeight + 1 (new waiting porting requests) + // and split fees for matching custodians + for _, waitingPortingReq := range currentPortalState.WaitingPortingRequests { + if waitingPortingReq.BeaconHeight() == beaconHeight+1 { + rewardInfos = splitPortingFeeForMatchingCustodians( + waitingPortingReq.PortingFee(), + waitingPortingReq.Amount(), + waitingPortingReq.Custodians(), + rewardInfos, + ) + } + } + + // get redeem fee from waiting redeem request at beaconHeight + 1 (new waiting redeem requests) + // and split fees for matching custodians + for _, waitingRedeemReq := range currentPortalState.WaitingRedeemRequests { + if waitingRedeemReq.GetBeaconHeight() == beaconHeight+1 { + rewardInfos = splitRedeemFeeForMatchingCustodians( + waitingRedeemReq.GetRedeemFee(), + waitingRedeemReq.GetRedeemAmount(), + waitingRedeemReq.GetCustodians(), + rewardInfos, + ) + } + } + + // if there are reward by epoch instructions (at the end of the epoch) + // split reward for custodians + rewardInsts := [][]string{} + if rewardForCustodianByEpoch != nil && len(rewardForCustodianByEpoch) > 0 { + if currentPortalState.LockedCollateralState.GetTotalLockedCollateralInEpoch() > 0 { + // split reward for custodians + rewardInfos = splitRewardForCustodians( + rewardForCustodianByEpoch, + currentPortalState.LockedCollateralState, + currentPortalState.CustodianPoolState, + rewardInfos) + + // create instruction for total custodian rewards + totalCustodianRewardSlice := make([]*statedb.RewardInfoDetail, 0) + for tokenID, amount := range rewardForCustodianByEpoch { + totalCustodianRewardSlice = append(totalCustodianRewardSlice, + statedb.NewRewardInfoDetailWithValue(tokenID.String(), amount)) + } + instTotalReward := blockchain.buildInstForPortalTotalReward(totalCustodianRewardSlice) + rewardInsts = append(rewardInsts, instTotalReward) + } + } + + // update reward amount for custodian + for custodianKey, custodianState := range currentPortalState.CustodianPoolState { + custodianAddr := custodianState.GetIncognitoAddress() + custodianReward := custodianState.GetRewardAmount() + if custodianReward == nil { + custodianReward = map[string]uint64{} + } + for _, rewardInfo := range rewardInfos { + if rewardInfo.GetCustodianIncAddr() == custodianAddr { + for _, rewardDetail := range rewardInfo.GetRewards() { + custodianReward[rewardDetail.GetTokenID()] += rewardDetail.GetAmount() + } + break + } + } + currentPortalState.CustodianPoolState[custodianKey].SetRewardAmount(custodianReward) + } + + inst := blockchain.buildInstForPortalReward(beaconHeight+1, rewardInfos) + rewardInsts = append(rewardInsts, inst) + + return rewardInsts, nil +} + +// beacon build new instruction from instruction received from ShardToBeaconBlock +func buildWithdrawPortalRewardInst( + custodianAddressStr string, + tokenID common.Hash, + rewardAmount uint64, + metaType int, + shardID byte, + txReqID common.Hash, + status string, +) []string { + withdrawRewardContent := metadata.PortalRequestWithdrawRewardContent{ + CustodianAddressStr: custodianAddressStr, + TokenID: tokenID, + RewardAmount: rewardAmount, + TxReqID: txReqID, + ShardID: shardID, + } + withdrawRewardContentBytes, _ := json.Marshal(withdrawRewardContent) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + status, + string(withdrawRewardContentBytes), + } +} + +// buildInstructionsForCustodianDeposit builds instruction for custodian deposit action +func (blockchain *BlockChain) buildInstructionsForReqWithdrawPortalReward( + contentStr string, + shardID byte, + metaType int, + currentPortalState *CurrentPortalState, + beaconHeight uint64, +) ([][]string, error) { + // parse instruction + actionContentBytes, err := base64.StdEncoding.DecodeString(contentStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while decoding content string of portal custodian deposit action: %+v", err) + return [][]string{}, nil + } + var actionData metadata.PortalRequestWithdrawRewardAction + err = json.Unmarshal(actionContentBytes, &actionData) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while unmarshal portal custodian deposit action: %+v", err) + return [][]string{}, nil + } + + if currentPortalState == nil { + Logger.log.Warn("WARN - [buildInstructionsForReqWithdrawPortalReward]: Current Portal state is null.") + // need to refund collateral to custodian + inst := buildWithdrawPortalRewardInst( + actionData.Meta.CustodianAddressStr, + actionData.Meta.TokenID, + 0, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqWithdrawRewardRejectedChainStatus, + ) + return [][]string{inst}, nil + } + meta := actionData.Meta + + keyCustodianState := statedb.GenerateCustodianStateObjectKey(beaconHeight, meta.CustodianAddressStr) + keyCustodianStateStr := keyCustodianState.String() + custodian := currentPortalState.CustodianPoolState[keyCustodianStateStr] + if custodian == nil { + Logger.log.Warn("WARN - [buildInstructionsForReqWithdrawPortalReward]: Not found custodian address in custodian pool.") + inst := buildWithdrawPortalRewardInst( + actionData.Meta.CustodianAddressStr, + actionData.Meta.TokenID, + 0, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqWithdrawRewardRejectedChainStatus, + ) + return [][]string{inst}, nil + } else { + rewardAmounts := custodian.GetRewardAmount() + rewardAmount := rewardAmounts[actionData.Meta.TokenID.String()] + + if rewardAmount <= 0 { + Logger.log.Warn("WARN - [buildInstructionsForReqWithdrawPortalReward]: Reward amount of custodian %v is zero.", meta.CustodianAddressStr) + inst := buildWithdrawPortalRewardInst( + actionData.Meta.CustodianAddressStr, + actionData.Meta.TokenID, + 0, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqWithdrawRewardRejectedChainStatus, + ) + return [][]string{inst}, nil + } + + inst := buildWithdrawPortalRewardInst( + actionData.Meta.CustodianAddressStr, + actionData.Meta.TokenID, + rewardAmount, + actionData.Meta.Type, + shardID, + actionData.TxReqID, + common.PortalReqWithdrawRewardAcceptedChainStatus, + ) + + // update reward amount of custodian + updatedRewardAmount := custodian.GetRewardAmount() + updatedRewardAmount[actionData.Meta.TokenID.String()] = 0 + currentPortalState.CustodianPoolState[keyCustodianStateStr].SetRewardAmount(updatedRewardAmount) + return [][]string{inst}, nil + } +} diff --git a/blockchain/beaconprocess.go b/blockchain/beaconprocess.go index dee0248bfc..d57afd93b4 100644 --- a/blockchain/beaconprocess.go +++ b/blockchain/beaconprocess.go @@ -395,9 +395,16 @@ func (blockchain *BlockChain) verifyPreProcessingBeaconBlockForSigning(beaconBlo acceptedBlockRewardInstructions := [][]string{} stopAutoStakingInstructions := [][]string{} statefulActionsByShardID := map[byte][][]string{} + rewardForCustodianByEpoch := map[common.Hash]uint64{} // Get Reward Instruction By Epoch if beaconBlock.Header.Height%blockchain.config.ChainParams.Epoch == 1 { - rewardByEpochInstruction, err = blockchain.buildRewardInstructionByEpoch(beaconBlock.Header.Height, beaconBlock.Header.Epoch-1, blockchain.BestState.Beacon.GetCopiedRewardStateDB()) + featureStateDB := beaconBestState.GetCopiedFeatureStateDB() + totalLockedCollateral, err := getTotalLockedCollateralInEpoch(featureStateDB, beaconBestState.BeaconHeight) + if err != nil { + return NewBlockChainError(GetTotalLockedCollateralError, err) + } + isSplitRewardForCustodian := totalLockedCollateral > 0 + rewardByEpochInstruction, rewardForCustodianByEpoch, err = blockchain.buildRewardInstructionByEpoch(beaconBlock.Header.Height, beaconBlock.Header.Epoch-1, blockchain.BestState.Beacon.GetCopiedRewardStateDB(), isSplitRewardForCustodian) if err != nil { return NewBlockChainError(BuildRewardInstructionError, err) } @@ -462,7 +469,7 @@ func (blockchain *BlockChain) verifyPreProcessingBeaconBlockForSigning(beaconBlo } } // build stateful instructions - statefulInsts := blockchain.buildStatefulInstructions(blockchain.BestState.Beacon.featureStateDB, statefulActionsByShardID, beaconBlock.Header.Height) + statefulInsts := blockchain.buildStatefulInstructions(blockchain.BestState.Beacon.featureStateDB, statefulActionsByShardID, beaconBlock.Header.Height, rewardForCustodianByEpoch) bridgeInstructions = append(bridgeInstructions, statefulInsts...) tempInstruction, err := blockchain.BestState.Beacon.GenerateInstruction(beaconBlock.Header.Height, stakeInstructions, swapInstructions, stopAutoStakingInstructions, @@ -493,7 +500,7 @@ func (blockchain *BlockChain) verifyPreProcessingBeaconBlockForSigning(beaconBlo // Get beacon state of this block // For example, new blockHeight is 91 then beacon state of this block must have height 90 // OR new block has previous has is beacon best block hash -// - Get producer via index and compare with producer address in beacon block header +// - Get producer via index and compare with producer address in beacon block headerproce // - Validate public key and signature sanity // - Validate Agg Signature // - Beacon Best State has best block is previous block of new beacon block @@ -1303,6 +1310,17 @@ func (blockchain *BlockChain) processStoreBeaconBlock(beaconBlock *BeaconBlock, if err != nil { return NewBlockChainError(ProcessPDEInstructionError, err) } + // execute, store + err = blockchain.processPortalInstructions(tempBeaconBestState.featureStateDB, beaconBlock) + if err != nil { + return NewBlockChainError(ProcessPortalInstructionError, err) + } + // execute, store + err = blockchain.processRelayingInstructions(beaconBlock) + if err != nil { + return NewBlockChainError(ProcessPortalRelayingError, err) + } + consensusRootHash, err := tempBeaconBestState.consensusStateDB.Commit(true) if err != nil { return err diff --git a/blockchain/beaconproducer.go b/blockchain/beaconproducer.go index b20d1c858f..44a44abee5 100644 --- a/blockchain/beaconproducer.go +++ b/blockchain/beaconproducer.go @@ -92,13 +92,22 @@ func (blockGenerator *BlockGenerator) NewBlockBeacon(round int, shardsToBeaconLi BLogger.log.Infof("Producing block: %d (epoch %d)", beaconBlock.Header.Height, beaconBlock.Header.Epoch) //=====END Build Header Essential Data===== //============Build body=================== + rewardForCustodianByEpoch := map[common.Hash]uint64{} if (beaconBestState.BeaconHeight+1)%blockGenerator.chain.config.ChainParams.Epoch == 1 { - rewardByEpochInstruction, err = blockGenerator.chain.buildRewardInstructionByEpoch(beaconBlock.Header.Height, beaconBestState.Epoch, blockGenerator.chain.BestState.Beacon.GetCopiedRewardStateDB()) + featureStateDB := beaconBestState.GetCopiedFeatureStateDB() + totalLockedCollateral, err := getTotalLockedCollateralInEpoch(featureStateDB, beaconBestState.BeaconHeight) + if err != nil { + return nil, NewBlockChainError(GetTotalLockedCollateralError, err) + } + isSplitRewardForCustodian := totalLockedCollateral > 0 + rewardByEpochInstruction, rewardForCustodianByEpoch, err = blockGenerator.chain.buildRewardInstructionByEpoch(beaconBlock.Header.Height, beaconBestState.Epoch, blockGenerator.chain.BestState.Beacon.GetCopiedRewardStateDB(), isSplitRewardForCustodian) if err != nil { return nil, NewBlockChainError(BuildRewardInstructionError, err) } } - tempShardState, stakeInstructions, swapInstructions, bridgeInstructions, acceptedRewardInstructions, stopAutoStakingInstructions := blockGenerator.GetShardState(beaconBestState, shardsToBeaconLimit) + + tempShardState, stakeInstructions, swapInstructions, bridgeInstructions, acceptedRewardInstructions, stopAutoStakingInstructions := blockGenerator.GetShardState(beaconBestState, shardsToBeaconLimit, rewardForCustodianByEpoch) + Logger.log.Infof("In NewBlockBeacon tempShardState: %+v", tempShardState) tempInstruction, err := beaconBestState.GenerateInstruction( beaconBlock.Header.Height, stakeInstructions, swapInstructions, stopAutoStakingInstructions, @@ -244,7 +253,7 @@ func (blockGenerator *BlockGenerator) NewBlockBeacon(round int, shardsToBeaconLi // 4. bridge instructions // 5. accepted reward instructions // 6. stop auto staking instructions -func (blockGenerator *BlockGenerator) GetShardState(beaconBestState *BeaconBestState, shardsToBeacon map[byte]uint64) (map[byte][]ShardState, [][]string, map[byte][][]string, [][]string, [][]string, [][]string) { +func (blockGenerator *BlockGenerator) GetShardState(beaconBestState *BeaconBestState, shardsToBeacon map[byte]uint64, rewardForCustodianByEpoch map[common.Hash]uint64) (map[byte][]ShardState, [][]string, map[byte][][]string, [][]string, [][]string, [][]string) { shardStates := make(map[byte][]ShardState) validStakeInstructions := [][]string{} validStakePublicKeys := []string{} @@ -292,7 +301,12 @@ func (blockGenerator *BlockGenerator) GetShardState(beaconBestState *BeaconBestS } Logger.log.Infof("Beacon Producer/ AFTER FILTER, Shard %+v ONLY GET %+v block", shardID, totalBlock+1) for _, shardBlock := range shardBlocks[:totalBlock+1] { - shardState, validStakeInstruction, tempValidStakePublicKeys, validSwapInstruction, bridgeInstruction, acceptedRewardInstruction, stopAutoStakingInstruction, statefulActions := blockGenerator.chain.GetShardStateFromBlock(beaconBestState.BeaconHeight+1, shardBlock, shardID, true, validStakePublicKeys) + shardState, + validStakeInstruction, + tempValidStakePublicKeys, validSwapInstruction, bridgeInstruction, + acceptedRewardInstruction, stopAutoStakingInstruction, + + statefulActions := blockGenerator.chain.GetShardStateFromBlock(beaconBestState.BeaconHeight+1, shardBlock, shardID, true, validStakePublicKeys) shardStates[shardID] = append(shardStates[shardID], shardState[shardID]) validStakeInstructions = append(validStakeInstructions, validStakeInstruction...) validSwapInstructions[shardID] = append(validSwapInstructions[shardID], validSwapInstruction[shardID]...) @@ -310,8 +324,9 @@ func (blockGenerator *BlockGenerator) GetShardState(beaconBestState *BeaconBestS } } } + // build stateful instructions - statefulInsts := blockGenerator.chain.buildStatefulInstructions(beaconBestState.featureStateDB, statefulActionsByShardID, beaconBestState.BeaconHeight+1) + statefulInsts := blockGenerator.chain.buildStatefulInstructions(beaconBestState.featureStateDB, statefulActionsByShardID, beaconBestState.BeaconHeight+1, rewardForCustodianByEpoch) bridgeInstructions = append(bridgeInstructions, statefulInsts...) return shardStates, validStakeInstructions, validSwapInstructions, bridgeInstructions, acceptedRewardInstructions, validStopAutoStakingInstructions } @@ -553,7 +568,6 @@ func (blockchain *BlockChain) GetShardStateFromBlock(newBeaconHeight uint64, sha // Collect stateful actions statefulActions := blockchain.collectStatefulActions(shardBlock.Instructions) - Logger.log.Infof("Becon Produce: Got Shard Block %+v Shard %+v \n", shardBlock.Header.Height, shardID) return shardStates, stakeInstructions, tempValidStakePublicKeys, swapInstructions, bridgeInstructions, acceptedRewardInstructions, stopAutoStakingInstructions, statefulActions } diff --git a/blockchain/beaconrelayingprocess.go b/blockchain/beaconrelayingprocess.go new file mode 100644 index 0000000000..2f58932037 --- /dev/null +++ b/blockchain/beaconrelayingprocess.go @@ -0,0 +1,176 @@ +package blockchain + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcd/wire" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/rawdbv2" + "github.com/incognitochain/incognito-chain/metadata" + "github.com/incognitochain/incognito-chain/relaying/bnb" + btcrelaying "github.com/incognitochain/incognito-chain/relaying/btc" + "github.com/tendermint/tendermint/types" + "strconv" +) + +func (blockchain *BlockChain) processRelayingInstructions(block *BeaconBlock) error { + beaconHeight := block.Header.Height - 1 + db := blockchain.GetDatabase() + + relayingState, err := blockchain.InitRelayingHeaderChainStateFromDB(db, beaconHeight) + if err != nil { + Logger.log.Error(err) + return nil + } + + // because relaying instructions in received beacon block were sorted already as desired so dont need to do sorting again over here + for _, inst := range block.Body.Instructions { + if len(inst) < 4 { + continue // Not error, just not relaying instruction + } + var err error + switch inst[0] { + case strconv.Itoa(metadata.RelayingBNBHeaderMeta): + err = blockchain.processRelayingBNBHeaderInst(inst, relayingState) + case strconv.Itoa(metadata.RelayingBTCHeaderMeta): + err = blockchain.processRelayingBTCHeaderInst(inst, relayingState) + } + if err != nil { + Logger.log.Error(err) + } + } + + // store updated relayingState to leveldb with new beacon height + err = storeRelayingHeaderStateToDB(db, beaconHeight+1, relayingState) + if err != nil { + Logger.log.Error(err) + } + return nil +} + +func (blockchain *BlockChain) processRelayingBTCHeaderInst( + instruction []string, + relayingState *RelayingHeaderChainState, +) error { + Logger.log.Info("[BTC Relaying] - Processing processRelayingBTCHeaderInst...") + btcHeaderChain := relayingState.BTCHeaderChain + if btcHeaderChain == nil { + return errors.New("[processRelayingBTCHeaderInst] BTC Header chain instance should not be nil") + } + + if len(instruction) != 4 { + return nil // skip the instruction + } + + var relayingHeaderContent metadata.RelayingHeaderContent + err := json.Unmarshal([]byte(instruction[3]), &relayingHeaderContent) + if err != nil { + return err + } + + headerBytes, err := base64.StdEncoding.DecodeString(relayingHeaderContent.Header) + if err != nil { + return err + } + var msgBlk *wire.MsgBlock + err = json.Unmarshal(headerBytes, &msgBlk) + if err != nil { + return err + } + block := btcutil.NewBlock(msgBlk) + isMainChain, isOrphan, err := btcHeaderChain.ProcessBlockV2(block, btcrelaying.BFNone) + if err != nil { + Logger.log.Errorf("ProcessBlock fail with error: %v", err) + return err + } + Logger.log.Infof("ProcessBlock (%s) success with result: isMainChain: %v, isOrphan: %v", block.Hash(), isMainChain, isOrphan) + return nil +} + +func (blockchain *BlockChain) processRelayingBNBHeaderInst( + instructions []string, + relayingState *RelayingHeaderChainState, +) error { + if relayingState == nil { + Logger.log.Errorf("relaying header state is nil") + return errors.New("relaying header state is nil") + } + if len(instructions) != 4 { + return nil // skip the instruction + } + db := blockchain.GetDatabase() + + // unmarshal instructions content + var actionData metadata.RelayingHeaderContent + err := json.Unmarshal([]byte(instructions[3]), &actionData) + if err != nil { + return err + } + + var header types.Block + headerBytes, err := base64.StdEncoding.DecodeString(actionData.Header) + if err != nil { + return err + } + err = json.Unmarshal(headerBytes, &header) + if err != nil { + return err + } + + reqStatus := instructions[2] + if reqStatus == common.RelayingHeaderUnconfirmedAcceptedChainStatus { + //update relaying state + relayingState.BNBHeaderChain.UnconfirmedBlocks = append(relayingState.BNBHeaderChain.UnconfirmedBlocks, &header) + + } else if reqStatus == common.RelayingHeaderConfirmedAcceptedChainStatus { + // check newLatestBNBHeader is genesis header or not + genesisHeaderHeight, _ := bnb.GetGenesisBNBHeaderBlockHeight(blockchain.config.ChainParams.BNBRelayingHeaderChainID) + + if header.Header.Height == genesisHeaderHeight { + relayingState.BNBHeaderChain.LatestBlock = &header + + // store new confirmed header into db + newConfirmedheader := relayingState.BNBHeaderChain.LatestBlock + // don't need to store Data and Evidence into db + newConfirmedheader.Data = types.Data{} + newConfirmedheader.Evidence = types.EvidenceData{} + newConfirmedheaderBytes, _ := json.Marshal(newConfirmedheader) + + err := rawdbv2.StoreRelayingBNBHeaderChain(db, uint64(newConfirmedheader.Height), newConfirmedheaderBytes) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while storing new confirmed header: %+v", err) + return err + } + return nil + } + + // get new latest header + blockIDNewLatestHeader := header.Header.LastBlockID + for _, header := range relayingState.BNBHeaderChain.UnconfirmedBlocks { + if bytes.Equal(header.Hash().Bytes(), blockIDNewLatestHeader.Hash) { + relayingState.BNBHeaderChain.LatestBlock = header + break + } + } + + //update relaying state + relayingState.BNBHeaderChain.UnconfirmedBlocks = []*types.Block{&header} + + // store new confirmed header into db + newConfirmedheader := relayingState.BNBHeaderChain.LatestBlock + newConfirmedheader.Data = types.Data{} + newConfirmedheader.Evidence = types.EvidenceData{} + newConfirmedheaderBytes, _ := json.Marshal(newConfirmedheader) + + err := rawdbv2.StoreRelayingBNBHeaderChain(db, uint64(newConfirmedheader.Height), newConfirmedheaderBytes) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while storing new confirmed header: %+v", err) + return err + } + } + + return nil +} diff --git a/blockchain/beaconrelayingproducer.go b/blockchain/beaconrelayingproducer.go new file mode 100644 index 0000000000..fa90752822 --- /dev/null +++ b/blockchain/beaconrelayingproducer.go @@ -0,0 +1,87 @@ +package blockchain + +import ( + "encoding/base64" + "encoding/json" + "sort" + + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/metadata" +) + +func buildRelayingInstsFromActions( + rc relayingProcessor, + relayingState *RelayingHeaderChainState, + blockchain *BlockChain, +) [][]string { + actions := rc.getActions() + Logger.log.Infof("[Blocks Relaying] - Processing buildRelayingInstsFromActions for %d actions", len(actions)) + // sort push header relaying inst + actionsGroupByBlockHeight := make(map[uint64][]metadata.RelayingHeaderAction) + + var blockHeightArr []uint64 + + for _, action := range actions { + // parse inst + var relayingHeaderAction metadata.RelayingHeaderAction + relayingHeaderActionBytes, err := base64.StdEncoding.DecodeString(action[1]) + if err != nil { + continue + } + err = json.Unmarshal(relayingHeaderActionBytes, &relayingHeaderAction) + if err != nil { + continue + } + + // get blockHeight in action + blockHeight := relayingHeaderAction.Meta.BlockHeight + + // add to blockHeightArr + if isExist, _ := common.SliceExists(blockHeightArr, blockHeight); !isExist { + blockHeightArr = append(blockHeightArr, blockHeight) + } + + // add to actionsGroupByBlockHeight + if actionsGroupByBlockHeight[blockHeight] != nil { + actionsGroupByBlockHeight[blockHeight] = append(actionsGroupByBlockHeight[blockHeight], relayingHeaderAction) + } else { + actionsGroupByBlockHeight[blockHeight] = []metadata.RelayingHeaderAction{relayingHeaderAction} + } + } + + // sort blockHeightArr + sort.Slice(blockHeightArr, func(i, j int) bool { + return blockHeightArr[i] < blockHeightArr[j] + }) + + relayingInsts := [][]string{} + for _, value := range blockHeightArr { + blockHeight := uint64(value) + actions := actionsGroupByBlockHeight[blockHeight] + for _, action := range actions { + inst := rc.buildRelayingInst(blockchain, action, relayingState) + relayingInsts = append(relayingInsts, inst...) + } + } + return relayingInsts +} + +func (blockchain *BlockChain) handleRelayingInsts( + relayingState *RelayingHeaderChainState, + pm *portalManager, +) [][]string { + Logger.log.Info("[Blocks Relaying] - Processing handleRelayingInsts...") + newInsts := [][]string{} + // sort relayingChains map to make it consistent for every run + var metaTypes []int + for metaType := range pm.relayingChains { + metaTypes = append(metaTypes, metaType) + } + sort.Ints(metaTypes) + for _, metaType := range metaTypes { + rc := pm.relayingChains[metaType] + insts := buildRelayingInstsFromActions(rc, relayingState, blockchain) + newInsts = append(newInsts, insts...) + } + return newInsts +} diff --git a/blockchain/beaconstatefulinsts.go b/blockchain/beaconstatefulinsts.go index f029ab6381..dad970bb48 100644 --- a/blockchain/beaconstatefulinsts.go +++ b/blockchain/beaconstatefulinsts.go @@ -33,10 +33,26 @@ func (blockchain *BlockChain) collectStatefulActions( continue } switch metaType { - case metadata.IssuingRequestMeta, metadata.IssuingETHRequestMeta, - metadata.PDEContributionMeta, metadata.PDETradeRequestMeta, - metadata.PDEWithdrawalRequestMeta: - statefulInsts = append(statefulInsts, inst) + case metadata.IssuingRequestMeta, + metadata.IssuingETHRequestMeta, + metadata.PDEContributionMeta, + metadata.PDETradeRequestMeta, + metadata.PDEWithdrawalRequestMeta, + metadata.PortalCustodianDepositMeta, + metadata.PortalUserRegisterMeta, + metadata.PortalUserRequestPTokenMeta, + metadata.PortalExchangeRatesMeta, + metadata.RelayingBNBHeaderMeta, + metadata.RelayingBTCHeaderMeta, + metadata.PortalCustodianWithdrawRequestMeta, + metadata.PortalRedeemRequestMeta, + metadata.PortalRequestUnlockCollateralMeta, + metadata.PortalLiquidateCustodianMeta, + metadata.PortalRequestWithdrawRewardMeta, + metadata.PortalRedeemLiquidateExchangeRatesMeta, + metadata.PortalLiquidationCustodianDepositMeta, + metadata.PortalLiquidationCustodianDepositResponseMeta: + statefulInsts = append(statefulInsts, inst) default: continue @@ -59,21 +75,52 @@ func groupPDEActionsByShardID( return pdeActionsByShardID } -func (blockchain *BlockChain) buildStatefulInstructions(stateDB *statedb.StateDB, statefulActionsByShardID map[byte][][]string, beaconHeight uint64) [][]string { +func (blockchain *BlockChain) buildStatefulInstructions( + stateDB *statedb.StateDB, + statefulActionsByShardID map[byte][][]string, + beaconHeight uint64, + rewardForCustodianByEpoch map[common.Hash]uint64) [][]string { currentPDEState, err := InitCurrentPDEStateFromDB(stateDB, beaconHeight-1) if err != nil { Logger.log.Error(err) } + + currentPortalState, err := InitCurrentPortalStateFromDB(stateDB, beaconHeight-1) + if err != nil { + Logger.log.Error(err) + } + + pm := NewPortalManager() + db := blockchain.GetDatabase() + relayingHeaderState, err := blockchain.InitRelayingHeaderChainStateFromDB(db, beaconHeight-1) + if err != nil { + Logger.log.Error(err) + } + accumulatedValues := &metadata.AccumulatedValues{ UniqETHTxsUsed: [][]byte{}, DBridgeTokenPair: map[string][]byte{}, CBridgeTokens: []*common.Hash{}, } instructions := [][]string{} + + // pde instructions pdeContributionActionsByShardID := map[byte][][]string{} pdeTradeActionsByShardID := map[byte][][]string{} pdeWithdrawalActionsByShardID := map[byte][][]string{} + // portal instructions + portalCustodianDepositActionsByShardID := map[byte][][]string{} + portalUserReqPortingActionsByShardID := map[byte][][]string{} + portalUserReqPTokenActionsByShardID := map[byte][][]string{} + portalExchangeRatesActionsByShardID := map[byte][][]string{} + portalRedeemReqActionsByShardID := map[byte][][]string{} + portalCustodianWithdrawActionsByShardID := map[byte][][]string{} + portalReqUnlockCollateralActionsByShardID := map[byte][][]string{} + portalReqWithdrawRewardActionsByShardID := map[byte][][]string{} + portalRedeemLiquidateExchangeRatesActionByShardID := map[byte][][]string{} + portalLiquidationCustodianDepositActionByShardID := map[byte][][]string{} + var keys []int for k := range statefulActionsByShardID { keys = append(keys, int(k)) @@ -114,6 +161,74 @@ func (blockchain *BlockChain) buildStatefulInstructions(stateDB *statedb.StateDB action, shardID, ) + case metadata.PortalCustodianDepositMeta: + { + portalCustodianDepositActionsByShardID = groupPortalActionsByShardID( + portalCustodianDepositActionsByShardID, + action, + shardID, + ) + } + + case metadata.PortalUserRegisterMeta: + portalUserReqPortingActionsByShardID = groupPortalActionsByShardID( + portalUserReqPortingActionsByShardID, + action, + shardID, + ) + case metadata.PortalUserRequestPTokenMeta: + portalUserReqPTokenActionsByShardID = groupPortalActionsByShardID( + portalUserReqPTokenActionsByShardID, + action, + shardID, + ) + case metadata.PortalExchangeRatesMeta: + portalExchangeRatesActionsByShardID = groupPortalActionsByShardID( + portalExchangeRatesActionsByShardID, + action, + shardID, + ) + case metadata.PortalCustodianWithdrawRequestMeta: + portalCustodianWithdrawActionsByShardID = groupPortalActionsByShardID( + portalCustodianWithdrawActionsByShardID, + action, + shardID, + ) + case metadata.PortalRedeemRequestMeta: + portalRedeemReqActionsByShardID = groupPortalActionsByShardID( + portalRedeemReqActionsByShardID, + action, + shardID, + ) + case metadata.PortalRequestUnlockCollateralMeta: + portalReqUnlockCollateralActionsByShardID = groupPortalActionsByShardID( + portalReqUnlockCollateralActionsByShardID, + action, + shardID, + ) + case metadata.PortalRequestWithdrawRewardMeta: + portalReqWithdrawRewardActionsByShardID = groupPortalActionsByShardID( + portalReqWithdrawRewardActionsByShardID, + action, + shardID, + ) + + case metadata.PortalRedeemLiquidateExchangeRatesMeta: + portalRedeemLiquidateExchangeRatesActionByShardID = groupPortalActionsByShardID( + portalRedeemLiquidateExchangeRatesActionByShardID, + action, + shardID, + ) + case metadata.PortalLiquidationCustodianDepositMeta: + portalLiquidationCustodianDepositActionByShardID = groupPortalActionsByShardID( + portalLiquidationCustodianDepositActionByShardID, + action, + shardID, + ) + case metadata.RelayingBNBHeaderMeta: + pm.relayingChains[metadata.RelayingBNBHeaderMeta].putAction(action) + case metadata.RelayingBTCHeaderMeta: + pm.relayingChains[metadata.RelayingBTCHeaderMeta].putAction(action) default: continue } @@ -126,12 +241,14 @@ func (blockchain *BlockChain) buildStatefulInstructions(stateDB *statedb.StateDB } } } + pdeInsts, err := blockchain.handlePDEInsts( beaconHeight-1, currentPDEState, pdeContributionActionsByShardID, pdeTradeActionsByShardID, pdeWithdrawalActionsByShardID, ) + if err != nil { Logger.log.Error(err) return instructions @@ -139,6 +256,67 @@ func (blockchain *BlockChain) buildStatefulInstructions(stateDB *statedb.StateDB if len(pdeInsts) > 0 { instructions = append(instructions, pdeInsts...) } + + // handle portal instructions + portalInsts, err := blockchain.handlePortalInsts( + stateDB, + beaconHeight-1, + currentPortalState, + portalCustodianDepositActionsByShardID, + portalUserReqPortingActionsByShardID, + portalUserReqPTokenActionsByShardID, + portalExchangeRatesActionsByShardID, + portalRedeemReqActionsByShardID, + portalCustodianWithdrawActionsByShardID, + portalReqUnlockCollateralActionsByShardID, + portalRedeemLiquidateExchangeRatesActionByShardID, + portalLiquidationCustodianDepositActionByShardID, + ) + + if err != nil { + Logger.log.Error(err) + return instructions + } + if len(portalInsts) > 0 { + instructions = append(instructions, portalInsts...) + } + + // handle relaying instructions + relayingInsts := blockchain.handleRelayingInsts(relayingHeaderState, pm) + if len(relayingInsts) > 0 { + instructions = append(instructions, relayingInsts...) + } + + // auto-liquidation portal instructions + portalLiquidationInsts, err := blockchain.autoCheckAndCreatePortalLiquidationInsts( + beaconHeight-1, + currentPortalState, + ) + + if err != nil { + Logger.log.Error(err) + return instructions + } + if len(portalLiquidationInsts) > 0 { + instructions = append(instructions, portalLiquidationInsts...) + } + + // calculate rewards (include porting fee and redeem fee) for custodians and build instructions at beaconHeight + portalRewardsInsts, err := blockchain.handlePortalRewardInsts( + beaconHeight-1, + currentPortalState, + portalReqWithdrawRewardActionsByShardID, + rewardForCustodianByEpoch, + ) + + if err != nil { + Logger.log.Error(err) + return instructions + } + if len(portalRewardsInsts) > 0 { + instructions = append(instructions, portalRewardsInsts...) + } + return instructions } @@ -292,3 +470,422 @@ func (blockchain *BlockChain) handlePDEInsts( } return instructions, nil } + +// Portal +func groupPortalActionsByShardID( + portalActionsByShardID map[byte][][]string, + action []string, + shardID byte, +) map[byte][][]string { + _, found := portalActionsByShardID[shardID] + if !found { + portalActionsByShardID[shardID] = [][]string{action} + } else { + portalActionsByShardID[shardID] = append(portalActionsByShardID[shardID], action) + } + return portalActionsByShardID +} + +func (blockchain *BlockChain) handlePortalInsts( + stateDB *statedb.StateDB, + beaconHeight uint64, + currentPortalState *CurrentPortalState, + portalCustodianDepositActionsByShardID map[byte][][]string, + portalUserRequestPortingActionsByShardID map[byte][][]string, + portalUserRequestPTokenActionsByShardID map[byte][][]string, + portalExchangeRatesActionsByShardID map[byte][][]string, + portalRedeemReqActionsByShardID map[byte][][]string, + portalCustodianWithdrawActionByShardID map[byte][][]string, + portalReqUnlockCollateralActionsByShardID map[byte][][]string, + portalRedeemLiquidateExchangeRatesActionByShardID map[byte][][]string, + portalLiquidationCustodianDepositActionByShardID map[byte][][]string, +) ([][]string, error) { + instructions := [][]string{} + + // handle portal custodian deposit inst + var custodianShardIDKeys []int + for k := range portalCustodianDepositActionsByShardID { + custodianShardIDKeys = append(custodianShardIDKeys, int(k)) + } + + sort.Ints(custodianShardIDKeys) + for _, value := range custodianShardIDKeys { + shardID := byte(value) + actions := portalCustodianDepositActionsByShardID[shardID] + for _, action := range actions { + contentStr := action[1] + newInst, err := blockchain.buildInstructionsForCustodianDeposit( + contentStr, + shardID, + metadata.PortalCustodianDepositMeta, + currentPortalState, + beaconHeight, + ) + + if err != nil { + Logger.log.Error(err) + continue + } + if len(newInst) > 0 { + instructions = append(instructions, newInst...) + } + } + } + + // handle portal user request porting inst + var requestPortingShardIDKeys []int + for k := range portalUserRequestPortingActionsByShardID { + requestPortingShardIDKeys = append(requestPortingShardIDKeys, int(k)) + } + + sort.Ints(requestPortingShardIDKeys) + for _, value := range requestPortingShardIDKeys { + shardID := byte(value) + actions := portalUserRequestPortingActionsByShardID[shardID] + + //check identity of porting request id + for _, action := range actions { + contentStr := action[1] + newInst, err := blockchain.buildInstructionsForPortingRequest( + contentStr, + shardID, + metadata.PortalUserRegisterMeta, + currentPortalState, + beaconHeight, + ) + + if err != nil { + Logger.log.Error(err) + continue + } + if len(newInst) > 0 { + instructions = append(instructions, newInst...) + } + } + } + // handle portal user request ptoken inst + var reqPTokenShardIDKeys []int + for k := range portalUserRequestPTokenActionsByShardID { + reqPTokenShardIDKeys = append(reqPTokenShardIDKeys, int(k)) + } + + sort.Ints(reqPTokenShardIDKeys) + for _, value := range reqPTokenShardIDKeys { + shardID := byte(value) + actions := portalUserRequestPTokenActionsByShardID[shardID] + for _, action := range actions { + contentStr := action[1] + newInst, err := blockchain.buildInstructionsForReqPTokens( + stateDB, + contentStr, + shardID, + metadata.PortalUserRequestPTokenMeta, + currentPortalState, + beaconHeight, + ) + + if err != nil { + Logger.log.Error(err) + continue + } + if len(newInst) > 0 { + instructions = append(instructions, newInst...) + } + } + } + + // handle portal redeem req inst + var redeemReqShardIDKeys []int + for k := range portalRedeemReqActionsByShardID { + redeemReqShardIDKeys = append(redeemReqShardIDKeys, int(k)) + } + + sort.Ints(redeemReqShardIDKeys) + for _, value := range redeemReqShardIDKeys { + shardID := byte(value) + actions := portalRedeemReqActionsByShardID[shardID] + for _, action := range actions { + contentStr := action[1] + newInst, err := blockchain.buildInstructionsForRedeemRequest( + stateDB, + contentStr, + shardID, + metadata.PortalRedeemRequestMeta, + currentPortalState, + beaconHeight, + ) + + if err != nil { + Logger.log.Error(err) + continue + } + if len(newInst) > 0 { + instructions = append(instructions, newInst...) + } + } + } + + //handle portal exchange rates + var exchangeRatesShardIDKeys []int + for k := range portalExchangeRatesActionsByShardID { + exchangeRatesShardIDKeys = append(exchangeRatesShardIDKeys, int(k)) + } + + sort.Ints(exchangeRatesShardIDKeys) + for _, value := range exchangeRatesShardIDKeys { + shardID := byte(value) + actions := portalExchangeRatesActionsByShardID[shardID] + for _, action := range actions { + contentStr := action[1] + newInst, err := blockchain.buildInstructionsForExchangeRates( + contentStr, + shardID, + metadata.PortalExchangeRatesMeta, + currentPortalState, + beaconHeight, + ) + + if err != nil { + Logger.log.Error(err) + continue + } + if len(newInst) > 0 { + instructions = append(instructions, newInst...) + } + } + } + + //handle portal custodian withdraw + var portalCustodianWithdrawShardIDKeys []int + for k := range portalCustodianWithdrawActionByShardID { + portalCustodianWithdrawShardIDKeys = append(portalCustodianWithdrawShardIDKeys, int(k)) + } + + sort.Ints(portalCustodianWithdrawShardIDKeys) + for _, value := range portalCustodianWithdrawShardIDKeys { + shardID := byte(value) + actions := portalCustodianWithdrawActionByShardID[shardID] + for _, action := range actions { + contentStr := action[1] + newInst, err := blockchain.buildInstructionsForCustodianWithdraw( + contentStr, + shardID, + metadata.PortalCustodianWithdrawRequestMeta, + + currentPortalState, + beaconHeight, + ) + + if err != nil { + Logger.log.Error(err) + continue + } + if len(newInst) > 0 { + instructions = append(instructions, newInst...) + } + } + } + + // handle portal req unlock collateral inst + var reqUnlockCollateralShardIDKeys []int + for k := range portalReqUnlockCollateralActionsByShardID { + reqUnlockCollateralShardIDKeys = append(reqUnlockCollateralShardIDKeys, int(k)) + } + + sort.Ints(reqUnlockCollateralShardIDKeys) + for _, value := range reqUnlockCollateralShardIDKeys { + shardID := byte(value) + actions := portalReqUnlockCollateralActionsByShardID[shardID] + for _, action := range actions { + contentStr := action[1] + newInst, err := blockchain.buildInstructionsForReqUnlockCollateral( + stateDB, + contentStr, + shardID, + metadata.PortalRequestUnlockCollateralMeta, + currentPortalState, + beaconHeight, + ) + + if err != nil { + Logger.log.Error(err) + continue + } + if len(newInst) > 0 { + instructions = append(instructions, newInst...) + } + } + } + + // handle liquidation user redeem ptoken exchange rates + var redeemLiquidateExchangeRatesActionByShardIDKeys []int + for k := range portalRedeemLiquidateExchangeRatesActionByShardID { + redeemLiquidateExchangeRatesActionByShardIDKeys = append(redeemLiquidateExchangeRatesActionByShardIDKeys, int(k)) + } + + sort.Ints(redeemLiquidateExchangeRatesActionByShardIDKeys) + for _, value := range redeemLiquidateExchangeRatesActionByShardIDKeys { + shardID := byte(value) + actions := portalRedeemLiquidateExchangeRatesActionByShardID[shardID] + for _, action := range actions { + contentStr := action[1] + newInst, err := blockchain.buildInstructionsForLiquidationRedeemPTokenExchangeRates( + contentStr, + shardID, + metadata.PortalRedeemLiquidateExchangeRatesMeta, + currentPortalState, + beaconHeight, + ) + + if err != nil { + Logger.log.Error(err) + continue + } + if len(newInst) > 0 { + instructions = append(instructions, newInst...) + } + } + } + + // handle portal liquidation custodian deposit inst + var portalLiquidationCustodianDepositActionByShardIDKeys []int + for k := range portalLiquidationCustodianDepositActionByShardID { + portalLiquidationCustodianDepositActionByShardIDKeys = append(portalLiquidationCustodianDepositActionByShardIDKeys, int(k)) + } + + sort.Ints(portalLiquidationCustodianDepositActionByShardIDKeys) + for _, value := range portalLiquidationCustodianDepositActionByShardIDKeys { + shardID := byte(value) + actions := portalLiquidationCustodianDepositActionByShardID[shardID] + for _, action := range actions { + contentStr := action[1] + newInst, err := blockchain.buildInstructionsForLiquidationCustodianDeposit( + contentStr, + shardID, + metadata.PortalLiquidationCustodianDepositMeta, + currentPortalState, + beaconHeight, + ) + + if err != nil { + Logger.log.Error(err) + continue + } + if len(newInst) > 0 { + instructions = append(instructions, newInst...) + } + } + } + + return instructions, nil +} + +// Header relaying +func groupRelayingActionsByShardID( + relayingActionsByShardID map[byte][][]string, + action []string, + shardID byte, +) map[byte][][]string { + _, found := relayingActionsByShardID[shardID] + if !found { + relayingActionsByShardID[shardID] = [][]string{action} + } else { + relayingActionsByShardID[shardID] = append(relayingActionsByShardID[shardID], action) + } + return relayingActionsByShardID +} + +func (blockchain *BlockChain) autoCheckAndCreatePortalLiquidationInsts( + beaconHeight uint64, currentPortalState *CurrentPortalState) ([][]string, error) { + //Logger.log.Errorf("autoCheckAndCreatePortalLiquidationInsts starting.......") + + insts := [][]string{} + + // check there is any waiting porting request timeout + expiredWaitingPortingInsts, err := blockchain.checkAndBuildInstForExpiredWaitingPortingRequest(beaconHeight, currentPortalState) + if err != nil { + Logger.log.Errorf("Error when check and build custodian liquidation %v\n", err) + } + if len(expiredWaitingPortingInsts) > 0 { + insts = append(insts, expiredWaitingPortingInsts...) + } + Logger.log.Infof("There are %v instruction for expired waiting porting in portal\n", len(expiredWaitingPortingInsts)) + + // case 1: check there is any custodian doesn't send public tokens back to user after PortalTimeOutCustodianSendPubTokenBack + // get custodian's collateral to return user + custodianLiqInsts, err := blockchain.checkAndBuildInstForCustodianLiquidation(beaconHeight, currentPortalState) + if err != nil { + Logger.log.Errorf("Error when check and build custodian liquidation %v\n", err) + } + if len(custodianLiqInsts) > 0 { + insts = append(insts, custodianLiqInsts...) + } + Logger.log.Infof("There are %v instruction for custodian liquidation in portal\n", len(custodianLiqInsts)) + + // case 2: check collateral's value (locked collateral amount) drops below MinRatio + + exchangeRatesLiqInsts, err := buildInstForLiquidationTopPercentileExchangeRates(beaconHeight, currentPortalState) + if err != nil { + Logger.log.Errorf("Error when check and build exchange rates liquidation %v\n", err) + } + if len(exchangeRatesLiqInsts) > 0 { + insts = append(insts, exchangeRatesLiqInsts...) + } + + Logger.log.Infof("There are %v instruction for exchange rates liquidation in portal\n", len(exchangeRatesLiqInsts)) + + return insts, nil +} + +// handlePortalRewardInsts +// 1. Build instructions for request withdraw portal reward +// 2. Build instructions portal reward for each beacon block +func (blockchain *BlockChain) handlePortalRewardInsts( + beaconHeight uint64, + currentPortalState *CurrentPortalState, + portalReqWithdrawRewardActionsByShardID map[byte][][]string, + rewardForCustodianByEpoch map[common.Hash]uint64, +) ([][]string, error) { + instructions := [][]string{} + + // Build instructions portal reward for each beacon block + portalRewardInsts, err := blockchain.buildPortalRewardsInsts(beaconHeight, currentPortalState, rewardForCustodianByEpoch) + if err != nil { + Logger.log.Error(err) + } + if len(portalRewardInsts) > 0 { + instructions = append(instructions, portalRewardInsts...) + } + + // handle portal request withdraw reward inst + var shardIDKeys []int + for k := range portalReqWithdrawRewardActionsByShardID { + shardIDKeys = append(shardIDKeys, int(k)) + } + + sort.Ints(shardIDKeys) + for _, value := range shardIDKeys { + shardID := byte(value) + actions := portalReqWithdrawRewardActionsByShardID[shardID] + for _, action := range actions { + contentStr := action[1] + newInst, err := blockchain.buildInstructionsForReqWithdrawPortalReward( + contentStr, + shardID, + metadata.PortalRequestWithdrawRewardMeta, + currentPortalState, + beaconHeight, + ) + + if err != nil { + Logger.log.Error(err) + continue + } + if len(newInst) > 0 { + instructions = append(instructions, newInst...) + } + } + } + + return instructions, nil +} diff --git a/blockchain/beaconutils.go b/blockchain/beaconutils.go index bb49c957f9..dcaf4433b8 100644 --- a/blockchain/beaconutils.go +++ b/blockchain/beaconutils.go @@ -409,7 +409,7 @@ func snapshotRewardReceiver(rewardReceiver map[string]string) (map[string]string snapshotRewardReceiver[k] = v } if !reflect.DeepEqual(snapshotRewardReceiver, rewardReceiver) { - return snapshotRewardReceiver, fmt.Errorf("Failed to Clone Reward Receivers, expect %+v but get %+v", rewardReceiver, snapshotRewardReceiver) + return snapshotRewardReceiver, fmt.Errorf("Failed to Clone Reward Rewards, expect %+v but get %+v", rewardReceiver, snapshotRewardReceiver) } return snapshotRewardReceiver, nil } diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index ec558376c9..9502241d35 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -16,6 +16,7 @@ import ( "github.com/incognitochain/incognito-chain/privacy" "github.com/incognitochain/incognito-chain/pubsub" "github.com/incognitochain/incognito-chain/transaction" + btcrelaying "github.com/incognitochain/incognito-chain/relaying/btc" libp2p "github.com/libp2p/go-libp2p-peer" "github.com/pkg/errors" "io" @@ -41,8 +42,9 @@ type BestState struct { Shard map[byte]*ShardBestState } -// config is a descriptor which specifies the blockchain instance configuration. +// Config is a descriptor which specifies the blockchain instance configuration. type Config struct { + BTCChain *btcrelaying.BlockChain DataBase incdb.Database MemCache *memcache.MemoryCache Interrupt <-chan struct{} @@ -522,3 +524,8 @@ func (blockchain *BlockChain) BackupBeaconChain(writer io.Writer) error { } // -------------- End of Blockchain BackUp And Restore -------------- + +// GetConfig returns blockchain's config +func (blockchain *BlockChain) GetConfig() *Config { + return &blockchain.config +} diff --git a/blockchain/burn_test.go b/blockchain/burn_test.go1 similarity index 100% rename from blockchain/burn_test.go rename to blockchain/burn_test.go1 diff --git a/blockchain/constants.go b/blockchain/constants.go index 2ad160f269..dc6eb921b0 100644 --- a/blockchain/constants.go +++ b/blockchain/constants.go @@ -71,6 +71,10 @@ const ( MainETHContractAddressStr = "0x0261DB5AfF8E5eC99fBc8FBBA5D4B9f8EcD44ec7" // v2-main - mainnet, branch master-temp-B-deploy, support erc20 with decimals > 18 MainnetIncognitoDAOAddress = "12S32fSyF4h8VxFHt4HfHvU1m9KHvBQsab5zp4TpQctmMdWuveXFH9KYWNemo7DRKvaBEvMgqm4XAuq1a1R4cNk2kfUfvXR3DdxCho3" // community fund MainnetCentralizedWebsitePaymentAddress = "12Rvjw6J3FWY3YZ1eDZ5uTy6DTPjFeLhCK7SXgppjivg9ShX2RRq3s8pdoapnH8AMoqvUSqZm1Gqzw7rrKsNzRJwSK2kWbWf1ogy885" + + // relaying header chain + MainnetBNBChainID = "Binance-Chain-Tigris" + MainnetBTCChainID = "Bitcoin-Mainnet" // ------------- end Mainnet -------------------------------------- ) @@ -113,6 +117,11 @@ const ( TestnetETHContractAddressStr = "0xe77aBF10cC0c30Ab3Ac2d877add39553cA7a8654" TestnetIncognitoDAOAddress = "12S5Lrs1XeQLbqN4ySyKtjAjd2d7sBP2tjFijzmp6avrrkQCNFMpkXm3FPzj2Wcu2ZNqJEmh9JriVuRErVwhuQnLmWSaggobEWsBEci" // community fund TestnetCentralizedWebsitePaymentAddress = "12S5Lrs1XeQLbqN4ySyKtjAjd2d7sBP2tjFijzmp6avrrkQCNFMpkXm3FPzj2Wcu2ZNqJEmh9JriVuRErVwhuQnLmWSaggobEWsBEci" + + // relaying header chain + TestnetBNBChainID = "Binance-Chain-Nile" + //TestnetBNBChainID = "Binance-Dev" + TestnetBTCChainID = "Bitcoin-Testnet" ) // VARIABLE for testnet diff --git a/blockchain/error.go b/blockchain/error.go index 3d0f561456..8a6cb97e0c 100644 --- a/blockchain/error.go +++ b/blockchain/error.go @@ -164,6 +164,7 @@ const ( NotEnoughRewardError InitPDETradeResponseTransactionError ProcessPDEInstructionError + ProcessPortalInstructionError InitBeaconStateError GetListOutputCoinsByKeysetError ProcessSalaryInstructionsError @@ -174,6 +175,8 @@ const ( BackUpShardStateError BackupCurrentBeaconStateError ProcessAutoStakingError + ProcessPortalRelayingError + GetTotalLockedCollateralError ) var ErrCodeMessage = map[int]struct { @@ -323,16 +326,19 @@ var ErrCodeMessage = map[int]struct { NotEnoughRewardError: {-1140, "Not enough reward Error"}, InitPDETradeResponseTransactionError: {-1141, "Init PDE trade response tx Error"}, ProcessPDEInstructionError: {-1142, "Process PDE instruction Error"}, - InitBeaconStateError: {-1143, "Init Beacon State Error"}, - ProcessSalaryInstructionsError: {-1144, "Proccess Salary Instruction Error"}, - GetShardIDFromTxError: {-1145, "Get ShardID From Tx Error"}, - GetValueFromTxError: {-1146, "Get Value From Tx Error"}, - ValidateBlockWithPreviousShardBestStateError: {-1147, "Validate Block With Previous Shard Best State Error"}, - BackUpShardStateError: {-1148, "Back Up Shard State Error"}, - ValidateBlockWithPreviousBeaconBestStateError: {-1149, "Validate Block With Previous Beacon Best State Error"}, - BackupCurrentBeaconStateError: {-1150, "Backup Current Beacon State Error"}, - ProcessAutoStakingError: {-1151, "Process Auto Staking Error"}, + ProcessPortalInstructionError: {-1143, "Process Portal instruction Error"}, + InitBeaconStateError: {-1144, "Init Beacon State Error"}, + ProcessSalaryInstructionsError: {-1145, "Proccess Salary Instruction Error"}, + GetShardIDFromTxError: {-1146, "Get ShardID From Tx Error"}, + GetValueFromTxError: {-1147, "Get Value From Tx Error"}, + ValidateBlockWithPreviousShardBestStateError: {-1148, "Validate Block With Previous Shard Best State Error"}, + BackUpShardStateError: {-1149, "Back Up Shard State Error"}, + ValidateBlockWithPreviousBeaconBestStateError: {-1150, "Validate Block With Previous Beacon Best State Error"}, + BackupCurrentBeaconStateError: {-1151, "Backup Current Beacon State Error"}, + ProcessAutoStakingError: {-1152, "Process Auto Staking Error"}, + ProcessPortalRelayingError: {-1153, "Process Portal Relaying Error"}, GetListOutputCoinsByKeysetError: {-2000, "Get List Output Coins By Keyset Error"}, + GetTotalLockedCollateralError: {-3000, "Get Total Locked Collateral Error"}, } type BlockChainError struct { diff --git a/blockchain/params.go b/blockchain/params.go index 672106662d..5e8543b9d4 100644 --- a/blockchain/params.go +++ b/blockchain/params.go @@ -45,6 +45,8 @@ type Params struct { ChainVersion string AssignOffset int BeaconHeightBreakPointBurnAddr uint64 + BNBRelayingHeaderChainID string + BTCRelayingHeaderChainID string } type GenesisParams struct { @@ -108,6 +110,8 @@ func init() { CheckForce: false, ChainVersion: "version-chain-test.json", BeaconHeightBreakPointBurnAddr: 250000, + BNBRelayingHeaderChainID: TestnetBNBChainID, + BTCRelayingHeaderChainID: TestnetBTCChainID, } // END TESTNET // FOR MAINNET @@ -153,5 +157,7 @@ func init() { CheckForce: false, ChainVersion: "version-chain-main.json", BeaconHeightBreakPointBurnAddr: 150500, + BNBRelayingHeaderChainID: MainnetBNBChainID, + BTCRelayingHeaderChainID: MainnetBTCChainID, } } diff --git a/blockchain/pdeflows_test.go b/blockchain/pdeflows_test.go1 similarity index 100% rename from blockchain/pdeflows_test.go rename to blockchain/pdeflows_test.go1 diff --git a/blockchain/portaltxsbuilder.go b/blockchain/portaltxsbuilder.go new file mode 100644 index 0000000000..5519dd334d --- /dev/null +++ b/blockchain/portaltxsbuilder.go @@ -0,0 +1,527 @@ +package blockchain + +import ( + "encoding/json" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/metadata" + "github.com/incognitochain/incognito-chain/privacy" + "github.com/incognitochain/incognito-chain/transaction" + "github.com/incognitochain/incognito-chain/wallet" +) + +// buildPortalRefundCustodianDepositTx builds refund tx for custodian deposit tx with status "refund" +// mints PRV to return to custodian +func (blockGenerator *BlockGenerator) buildPortalRefundCustodianDepositTx( + contentStr string, + producerPrivateKey *privacy.PrivateKey, + shardID byte, +) (metadata.Transaction, error) { + Logger.log.Info("[Portal refund custodian deposit] Starting...") + contentBytes := []byte(contentStr) + var refundDeposit metadata.PortalCustodianDepositContent + err := json.Unmarshal(contentBytes, &refundDeposit) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while unmarshaling portal custodian deposit content: %+v", err) + return nil, nil + } + if refundDeposit.ShardID != shardID { + return nil, nil + } + + meta := metadata.NewPortalCustodianDepositResponse( + "refund", + refundDeposit.TxReqID, + refundDeposit.IncogAddressStr, + metadata.PortalCustodianDepositResponseMeta, + ) + + keyWallet, err := wallet.Base58CheckDeserialize(refundDeposit.IncogAddressStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while deserializing custodian address string: %+v", err) + return nil, nil + } + receiverAddr := keyWallet.KeySet.PaymentAddress + + // the returned currency is PRV + resTx := new(transaction.Tx) + err = resTx.InitTxSalary( + refundDeposit.DepositedAmount, + &receiverAddr, + producerPrivateKey, + blockGenerator.chain.BestState.Shard[shardID].GetCopiedTransactionStateDB(), + meta, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while initializing refund contribution (normal) tx: %+v", err) + return nil, nil + } + //modify the type of the salary transaction + // resTx.Type = common.TxBlockProducerCreatedType + return resTx, nil +} + +func (blockGenerator *BlockGenerator) buildPortalLiquidationCustodianDepositReject( + contentStr string, + producerPrivateKey *privacy.PrivateKey, + shardID byte, +) (metadata.Transaction, error) { + Logger.log.Info("[buildPortalLiquidationCustodianDepositReject] Starting...") + contentBytes := []byte(contentStr) + var refundDeposit metadata.PortalLiquidationCustodianDepositContent + err := json.Unmarshal(contentBytes, &refundDeposit) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while unmarshaling portal liquidation custodian deposit content: %+v", err) + return nil, nil + } + if refundDeposit.ShardID != shardID { + return nil, nil + } + + meta := metadata.NewPortalLiquidationCustodianDepositResponse( + common.PortalLiquidationCustodianDepositRejectedChainStatus, + refundDeposit.TxReqID, + refundDeposit.IncogAddressStr, + refundDeposit.DepositedAmount, + metadata.PortalLiquidationCustodianDepositResponseMeta, + ) + + keyWallet, err := wallet.Base58CheckDeserialize(refundDeposit.IncogAddressStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while deserializing custodian liquidation address string: %+v", err) + return nil, nil + } + receiverAddr := keyWallet.KeySet.PaymentAddress + + // the returned currency is PRV + resTx := new(transaction.Tx) + err = resTx.InitTxSalary( + refundDeposit.DepositedAmount, + &receiverAddr, + producerPrivateKey, + blockGenerator.chain.BestState.Shard[shardID].GetCopiedTransactionStateDB(), + meta, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while initializing refund contribution (normal) tx: %+v", err) + return nil, nil + } + //modify the type of the salary transaction + // resTx.Type = common.TxBlockProducerCreatedType + return resTx, nil +} + +// buildPortalAcceptedRequestPTokensTx builds response tx for user request ptoken tx with status "accepted" +// mints ptoken to return to user +func (blockGenerator *BlockGenerator) buildPortalAcceptedRequestPTokensTx( + contentStr string, + producerPrivateKey *privacy.PrivateKey, + shardID byte, +) (metadata.Transaction, error) { + Logger.log.Errorf("[Shard buildPortalAcceptedRequestPTokensTx] Starting...") + contentBytes := []byte(contentStr) + var acceptedReqPToken metadata.PortalRequestPTokensContent + err := json.Unmarshal(contentBytes, &acceptedReqPToken) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while unmarshaling portal custodian deposit content: %+v", err) + return nil, nil + } + if acceptedReqPToken.ShardID != shardID { + Logger.log.Errorf("ERROR: ShardID unexpected expect %v, but got %+v", shardID, acceptedReqPToken.ShardID) + return nil, nil + } + + meta := metadata.NewPortalRequestPTokensResponse( + "accepted", + acceptedReqPToken.TxReqID, + acceptedReqPToken.IncogAddressStr, + acceptedReqPToken.PortingAmount, + acceptedReqPToken.TokenID, + metadata.PortalUserRequestPTokenResponseMeta, + ) + + keyWallet, err := wallet.Base58CheckDeserialize(acceptedReqPToken.IncogAddressStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while deserializing custodian address string: %+v", err) + return nil, nil + } + receiverAddr := keyWallet.KeySet.PaymentAddress + receiveAmt := acceptedReqPToken.PortingAmount + tokenID, _ := new(common.Hash).NewHashFromStr(acceptedReqPToken.TokenID) + + // in case the returned currency is privacy custom token + receiver := &privacy.PaymentInfo{ + Amount: receiveAmt, + PaymentAddress: receiverAddr, + } + var propertyID [common.HashSize]byte + copy(propertyID[:], tokenID[:]) + propID := common.Hash(propertyID) + tokenParams := &transaction.CustomTokenPrivacyParamTx{ + PropertyID: propID.String(), + // PropertyName: issuingAcceptedInst.IncTokenName, + // PropertySymbol: issuingAcceptedInst.IncTokenName, + Amount: receiveAmt, + TokenTxType: transaction.CustomTokenInit, + Receiver: []*privacy.PaymentInfo{receiver}, + TokenInput: []*privacy.InputCoin{}, + Mintable: true, + } + resTx := &transaction.TxCustomTokenPrivacy{} + txStateDB := blockGenerator.chain.BestState.Shard[shardID].GetCopiedTransactionStateDB() + featureStateDB := blockGenerator.chain.BestState.Beacon.GetCopiedFeatureStateDB() + initErr := resTx.Init( + transaction.NewTxPrivacyTokenInitParams( + producerPrivateKey, + []*privacy.PaymentInfo{}, + nil, + 0, + tokenParams, + txStateDB, + meta, + false, + false, + shardID, + nil, + featureStateDB, + ), + ) + if initErr != nil { + Logger.log.Errorf("ERROR: an error occured while initializing request ptoken response tx: %+v", initErr) + return nil, initErr + } + return resTx, nil +} + +func (blockGenerator *BlockGenerator) buildPortalCustodianWithdrawRequest( + contentStr string, + producerPrivateKey *privacy.PrivateKey, + shardID byte, +) (metadata.Transaction, error) { + Logger.log.Infof("[Shard buildPortalCustodianWithdrawRequest] Starting...") + contentBytes := []byte(contentStr) + var custodianWithdrawRequest metadata.PortalCustodianWithdrawRequestContent + err := json.Unmarshal(contentBytes, &custodianWithdrawRequest) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while unmarshaling portal custodian withdraw request content: %+v", err) + return nil, nil + } + if custodianWithdrawRequest.ShardID != shardID { + Logger.log.Errorf("ERROR: ShardID unexpected expect %v, but got %+v", shardID, custodianWithdrawRequest.ShardID) + return nil, nil + } + + meta := metadata.NewPortalCustodianWithdrawResponse( + common.PortalCustodianWithdrawRequestAcceptedStatus, + custodianWithdrawRequest.TxReqID, + custodianWithdrawRequest.PaymentAddress, + custodianWithdrawRequest.Amount, + metadata.PortalCustodianWithdrawResponseMeta, + ) + + keyWallet, err := wallet.Base58CheckDeserialize(custodianWithdrawRequest.PaymentAddress) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while deserializing custodian address string: %+v", err) + return nil, nil + } + + receiverAddr := keyWallet.KeySet.PaymentAddress + receiveAmt := custodianWithdrawRequest.Amount + + // the returned currency is PRV + resTx := new(transaction.Tx) + err = resTx.InitTxSalary( + receiveAmt, + &receiverAddr, + producerPrivateKey, + blockGenerator.chain.BestState.Shard[shardID].GetCopiedTransactionStateDB(), + meta, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while initializing custodian withdraw (normal) tx: %+v", err) + return nil, nil + } + + return resTx, nil +} + +func (blockGenerator *BlockGenerator) buildPortalRedeemLiquidateExchangeRatesRequestTx( + contentStr string, + producerPrivateKey *privacy.PrivateKey, + shardID byte, +) (metadata.Transaction, error) { + Logger.log.Errorf("[Shard buildPortalRedeemLiquidateExchangeRatesRequestTx] Starting...") + contentBytes := []byte(contentStr) + var redeemReqContent metadata.PortalRedeemLiquidateExchangeRatesContent + err := json.Unmarshal(contentBytes, &redeemReqContent) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while unmarshaling portal redeem liquidate exchange rates content: %+v", err) + return nil, nil + } + if redeemReqContent.ShardID != shardID { + Logger.log.Errorf("ERROR: ShardID unexpected expect %v, but got %+v", shardID, redeemReqContent.ShardID) + return nil, nil + } + + meta := metadata.NewPortalRedeemLiquidateExchangeRatesResponse( + common.PortalRedeemLiquidateExchangeRatesSuccessChainStatus, + redeemReqContent.TxReqID, + redeemReqContent.RedeemerIncAddressStr, + redeemReqContent.RedeemAmount, + redeemReqContent.TotalPTokenReceived, + redeemReqContent.TokenID, + metadata.PortalRedeemLiquidateExchangeRatesResponseMeta, + ) + + keyWallet, err := wallet.Base58CheckDeserialize(redeemReqContent.RedeemerIncAddressStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occurred while deserializing custodian address string: %+v", err) + return nil, nil + } + + receiverAddr := keyWallet.KeySet.PaymentAddress + receiveAmt := redeemReqContent.TotalPTokenReceived + + // the returned currency is PRV + resTx := new(transaction.Tx) + err = resTx.InitTxSalary( + receiveAmt, + &receiverAddr, + producerPrivateKey, + blockGenerator.chain.BestState.Shard[shardID].GetCopiedTransactionStateDB(), + meta, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while initializing custodian withdraw (normal) tx: %+v", err) + return nil, nil + } + + return resTx, nil +} + + +// buildPortalRejectedRedeemRequestTx builds response tx for user request redeem tx with status "rejected" +// mints ptoken to return to user (ptoken that user burned) +func (blockGenerator *BlockGenerator) buildPortalRejectedRedeemRequestTx( + contentStr string, + producerPrivateKey *privacy.PrivateKey, + shardID byte, +) (metadata.Transaction, error) { + Logger.log.Errorf("[Shard buildPortalRejectedRedeemRequestTx] Starting...") + contentBytes := []byte(contentStr) + var redeemReqContent metadata.PortalRedeemRequestContent + err := json.Unmarshal(contentBytes, &redeemReqContent) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while unmarshaling portal redeem request content: %+v", err) + return nil, nil + } + if redeemReqContent.ShardID != shardID { + Logger.log.Errorf("ERROR: ShardID unexpected expect %v, but got %+v", shardID, redeemReqContent.ShardID) + return nil, nil + } + + meta := metadata.NewPortalRedeemRequestResponse( + "rejected", + redeemReqContent.TxReqID, + redeemReqContent.RedeemerIncAddressStr, + redeemReqContent.RedeemAmount, + redeemReqContent.TokenID, + metadata.PortalRedeemRequestResponseMeta, + ) + + keyWallet, err := wallet.Base58CheckDeserialize(redeemReqContent.RedeemerIncAddressStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while deserializing requester address string: %+v", err) + return nil, nil + } + receiverAddr := keyWallet.KeySet.PaymentAddress + receiveAmt := redeemReqContent.RedeemAmount + tokenID, _ := new(common.Hash).NewHashFromStr(redeemReqContent.TokenID) + + // in case the returned currency is privacy custom token + receiver := &privacy.PaymentInfo{ + Amount: receiveAmt, + PaymentAddress: receiverAddr, + } + var propertyID [common.HashSize]byte + copy(propertyID[:], tokenID[:]) + propID := common.Hash(propertyID) + tokenParams := &transaction.CustomTokenPrivacyParamTx{ + PropertyID: propID.String(), + Amount: receiveAmt, + TokenTxType: transaction.CustomTokenInit, + Receiver: []*privacy.PaymentInfo{receiver}, + TokenInput: []*privacy.InputCoin{}, + Mintable: true, + } + resTx := &transaction.TxCustomTokenPrivacy{} + txStateDB := blockGenerator.chain.BestState.Shard[shardID].GetCopiedTransactionStateDB() + featureStateDB := blockGenerator.chain.BestState.Beacon.GetCopiedFeatureStateDB() + initErr := resTx.Init( + transaction.NewTxPrivacyTokenInitParams( + producerPrivateKey, + []*privacy.PaymentInfo{}, + nil, + 0, + tokenParams, + txStateDB, + meta, + false, + false, + shardID, + nil, + featureStateDB, + ), + ) + if initErr != nil { + Logger.log.Errorf("ERROR: an error occured while initializing redeem request response tx: %+v", initErr) + return nil, initErr + } + + Logger.log.Errorf("Suucc: %+v", err) + return resTx, nil +} + +// buildPortalRefundCustodianDepositTx builds refund tx for custodian deposit tx with status "refund" +// mints PRV to return to custodian +func (blockGenerator *BlockGenerator) buildPortalLiquidateCustodianResponseTx( + contentStr string, + producerPrivateKey *privacy.PrivateKey, + shardID byte, +) (metadata.Transaction, error) { + Logger.log.Info("[Portal liquidate custodian response] Starting...") + contentBytes := []byte(contentStr) + var liqCustodian metadata.PortalLiquidateCustodianContent + err := json.Unmarshal(contentBytes, &liqCustodian) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while unmarshaling portal liquidation custodian content: %+v", err) + return nil, nil + } + if liqCustodian.ShardID != shardID { + return nil, nil + } + + meta := metadata.NewPortalLiquidateCustodianResponse( + liqCustodian.UniqueRedeemID, + liqCustodian.MintedCollateralAmount, + liqCustodian.RedeemerIncAddressStr, + liqCustodian.CustodianIncAddressStr, + metadata.PortalLiquidateCustodianResponseMeta, + ) + + keyWallet, err := wallet.Base58CheckDeserialize(liqCustodian.RedeemerIncAddressStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while deserializing redeemer address string: %+v", err) + return nil, nil + } + receiverAddr := keyWallet.KeySet.PaymentAddress + + // the returned currency is PRV + resTx := new(transaction.Tx) + err = resTx.InitTxSalary( + liqCustodian.MintedCollateralAmount, + &receiverAddr, + producerPrivateKey, + blockGenerator.chain.BestState.Shard[shardID].GetCopiedTransactionStateDB(), + meta, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while initializing refund contribution (normal) tx: %+v", err) + return nil, nil + } + return resTx, nil +} + +// buildPortalAcceptedWithdrawRewardTx builds withdraw portal rewards response tx +// mints rewards in PRV for sending to custodian +func (blockGenerator *BlockGenerator) buildPortalAcceptedWithdrawRewardTx( + contentStr string, + producerPrivateKey *privacy.PrivateKey, + shardID byte, +) (metadata.Transaction, error) { + Logger.log.Info("[buildPortalAcceptedWithdrawRewardTx] Starting...") + contentBytes := []byte(contentStr) + var withdrawRewardContent metadata.PortalRequestWithdrawRewardContent + err := json.Unmarshal(contentBytes, &withdrawRewardContent) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while unmarshaling portal withdraw reward content: %+v", err) + return nil, nil + } + if withdrawRewardContent.ShardID != shardID { + return nil, nil + } + + meta := metadata.NewPortalWithdrawRewardResponse( + withdrawRewardContent.TxReqID, + withdrawRewardContent.CustodianAddressStr, + withdrawRewardContent.TokenID, + withdrawRewardContent.RewardAmount, + metadata.PortalRequestWithdrawRewardResponseMeta, + ) + + keyWallet, err := wallet.Base58CheckDeserialize(withdrawRewardContent.CustodianAddressStr) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while deserializing custodian address string: %+v", err) + return nil, nil + } + receiverAddr := keyWallet.KeySet.PaymentAddress + + // the returned currency is PRV + if withdrawRewardContent.TokenID.String() == common.PRVIDStr { + resTx := new(transaction.Tx) + err = resTx.InitTxSalary( + withdrawRewardContent.RewardAmount, + &receiverAddr, + producerPrivateKey, + blockGenerator.chain.BestState.Shard[shardID].GetCopiedTransactionStateDB(), + meta, + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while initializing withdraw portal reward tx: %+v", err) + return nil, nil + } + return resTx, nil + } else { + // in case the returned currency is privacy custom token + receiver := &privacy.PaymentInfo{ + Amount: withdrawRewardContent.RewardAmount, + PaymentAddress: receiverAddr, + } + var propertyID [common.HashSize]byte + copy(propertyID[:], withdrawRewardContent.TokenID[:]) + propID := common.Hash(propertyID) + tokenParams := &transaction.CustomTokenPrivacyParamTx{ + PropertyID: propID.String(), + // PropertyName: issuingAcceptedInst.IncTokenName, + // PropertySymbol: issuingAcceptedInst.IncTokenName, + Amount: withdrawRewardContent.RewardAmount, + TokenTxType: transaction.CustomTokenInit, + Receiver: []*privacy.PaymentInfo{receiver}, + TokenInput: []*privacy.InputCoin{}, + Mintable: true, + } + resTx := &transaction.TxCustomTokenPrivacy{} + txStateDB := blockGenerator.chain.BestState.Shard[shardID].GetCopiedTransactionStateDB() + featureStateDB := blockGenerator.chain.BestState.Beacon.GetCopiedFeatureStateDB() + err = resTx.Init( + transaction.NewTxPrivacyTokenInitParams( + producerPrivateKey, + []*privacy.PaymentInfo{}, + nil, + 0, + tokenParams, + txStateDB, + meta, + false, + false, + shardID, + nil, + featureStateDB, + ), + ) + if err != nil { + Logger.log.Errorf("ERROR: an error occured while initializing withdraw portal reward tx: %+v", err) + return nil, nil + } + return resTx, nil + } +} \ No newline at end of file diff --git a/blockchain/portalutils.go b/blockchain/portalutils.go new file mode 100644 index 0000000000..2b78439eb9 --- /dev/null +++ b/blockchain/portalutils.go @@ -0,0 +1,942 @@ +package blockchain + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/metadata" + "github.com/pkg/errors" + "math" + "math/big" + "math/rand" + "sort" + "time" +) + +type CurrentPortalState struct { + CustodianPoolState map[string]*statedb.CustodianState // key : hash(beaconHeight || custodian_address) + WaitingPortingRequests map[string]*statedb.WaitingPortingRequest // key : hash(beaconHeight || UniquePortingID) + WaitingRedeemRequests map[string]*statedb.WaitingRedeemRequest // key : hash(beaconHeight || UniqueRedeemID) + FinalExchangeRatesState map[string]*statedb.FinalExchangeRatesState // key : hash(beaconHeight || TxID) + LiquidateExchangeRatesPool map[string]*statedb.LiquidateExchangeRatesPool // key : hash(beaconHeight || TxID) + // it used for calculate reward for custodian at the end epoch + LockedCollateralState *statedb.LockedCollateralState + //Store temporary exchange rates requests + ExchangeRatesRequests map[string]*metadata.ExchangeRatesRequestStatus // key : hash(beaconHeight | TxID) +} + +type CustodianStateSlice struct { + Key string + Value *statedb.CustodianState +} + +type RedeemMemoBNB struct { + RedeemID string `json:"RedeemID"` + CustodianIncognitoAddress string `json:"CustodianIncognitoAddress"` +} + +type PortingMemoBNB struct { + PortingID string `json:"PortingID"` +} + +func InitCurrentPortalStateFromDB( + stateDB *statedb.StateDB, + beaconHeight uint64, +) (*CurrentPortalState, error) { + custodianPoolState, err := statedb.GetCustodianPoolState(stateDB, beaconHeight) + if err != nil { + return nil, err + } + waitingPortingReqs, err := statedb.GetWaitingPortingRequests(stateDB, beaconHeight) + if err != nil { + return nil, err + } + waitingRedeemReqs, err := statedb.GetWaitingRedeemRequests(stateDB, beaconHeight) + if err != nil { + return nil, err + } + finalExchangeRates, err := statedb.GetFinalExchangeRatesState(stateDB, beaconHeight) + if err != nil { + return nil, err + } + liquidateExchangeRatesPool, err := statedb.GetLiquidateExchangeRatesPool(stateDB, beaconHeight) + if err != nil { + return nil, err + } + lockedCollateralState, err := statedb.GetLockedCollateralStateByBeaconHeight(stateDB, beaconHeight) + if err != nil { + return nil, err + } + + return &CurrentPortalState{ + CustodianPoolState: custodianPoolState, + WaitingPortingRequests: waitingPortingReqs, + WaitingRedeemRequests: waitingRedeemReqs, + FinalExchangeRatesState: finalExchangeRates, + ExchangeRatesRequests: make(map[string]*metadata.ExchangeRatesRequestStatus), + LiquidateExchangeRatesPool: liquidateExchangeRatesPool, + LockedCollateralState: lockedCollateralState, + }, nil +} + +func storePortalStateToDB( + stateDB *statedb.StateDB, + beaconHeight uint64, + currentPortalState *CurrentPortalState, +) error { + err := statedb.StoreCustodianState(stateDB, beaconHeight, currentPortalState.CustodianPoolState) + if err != nil { + return err + } + err = statedb.StoreBulkWaitingPortingRequests(stateDB, beaconHeight, currentPortalState.WaitingPortingRequests) + if err != nil { + return err + } + err = statedb.StoreWaitingRedeemRequests(stateDB, beaconHeight, currentPortalState.WaitingRedeemRequests) + if err != nil { + return err + } + err = statedb.StoreBulkFinalExchangeRatesState(stateDB, beaconHeight, currentPortalState.FinalExchangeRatesState) + if err != nil { + return err + } + err = statedb.StoreBulkLiquidateExchangeRatesPool(stateDB, beaconHeight, currentPortalState.LiquidateExchangeRatesPool) + if err != nil { + return err + } + err = statedb.StoreLockedCollateralState(stateDB, beaconHeight, currentPortalState.LockedCollateralState) + if err != nil { + return err + } + + return nil +} + +func sortCustodianByAmountAscent( + metadata metadata.PortalUserRegister, + custodianState map[string]*statedb.CustodianState, + custodianStateSlice *[]CustodianStateSlice) { + //convert to slice + + var result []CustodianStateSlice + for k, v := range custodianState { + //check pTokenId, select only ptokenid + tokenIdExist := false + for _, remoteAddr := range v.GetRemoteAddresses() { + if remoteAddr.GetPTokenID() == metadata.PTokenId { + tokenIdExist = true + break + } + } + if !tokenIdExist { + continue + } + + item := CustodianStateSlice{ + Key: k, + Value: v, + } + result = append(result, item) + } + + sort.Slice(result, func(i, j int) bool { + return result[i].Value.GetFreeCollateral() <= result[j].Value.GetFreeCollateral() + }) + + *custodianStateSlice = result +} + +func pickSingleCustodian( + metadata metadata.PortalUserRegister, + exchangeRate *statedb.FinalExchangeRatesState, + custodianStateSlice []CustodianStateSlice, + currentPortalState *CurrentPortalState) ([]*statedb.MatchingPortingCustodianDetail, error) { + //sort random slice + var cloneCustodianList []CustodianStateSlice + copy(cloneCustodianList, custodianStateSlice) + + rand.Seed(time.Now().UnixNano()) + rand.Shuffle(len(cloneCustodianList), func(i, j int) { + cloneCustodianList[i], + cloneCustodianList[j] = cloneCustodianList[j], + cloneCustodianList[i] + }) + + //pToken to PRV + convertExchangeRatesObj := NewConvertExchangeRatesObject(exchangeRate) + totalPTokenAfterUp150PercentUnit64 := up150Percent(metadata.RegisterAmount) //return nano pBTC, pBNB + + totalPRV, err := convertExchangeRatesObj.ExchangePToken2PRVByTokenId(metadata.PTokenId, totalPTokenAfterUp150PercentUnit64) + + if err != nil { + Logger.log.Errorf("Convert PToken is error %v", err) + return nil, err + } + + Logger.log.Infof("Porting request, pick single custodian ptoken: %v, need prv %v for %v ptoken", metadata.PTokenId, totalPRV, metadata.RegisterAmount) + + for _, kv := range cloneCustodianList { + Logger.log.Infof("Porting request, pick single custodian address %v, key %v, free collateral: %v", kv.Value.GetIncognitoAddress(), kv.Key, kv.Value.GetFreeCollateral()) + if kv.Value.GetFreeCollateral() > 0 && kv.Value.GetFreeCollateral() >= totalPRV { + result := make([]*statedb.MatchingPortingCustodianDetail, 1) + + remoteAddr, err := statedb.GetRemoteAddressByTokenID(kv.Value.GetRemoteAddresses(), metadata.PTokenId) + if err != nil { + Logger.log.Errorf("Error when get remote address by tokenID %v", err) + return nil, err + } + result[0] = &statedb.MatchingPortingCustodianDetail{ + IncAddress: kv.Value.GetIncognitoAddress(), + RemoteAddress: remoteAddr, + Amount: metadata.RegisterAmount, + LockedAmountCollateral: totalPRV, + RemainCollateral: kv.Value.GetFreeCollateral() - totalPRV, + } + + //update custodian state + err = UpdateCustodianWithNewAmount(currentPortalState, kv.Key, metadata.PTokenId, metadata.RegisterAmount, totalPRV) + + if err != nil { + return nil, err + } + + return result, nil + } + } + + return nil, nil +} + +func pickMultipleCustodian( + metadata metadata.PortalUserRegister, + exchangeRate *statedb.FinalExchangeRatesState, + custodianStateSlice []CustodianStateSlice, + currentPortalState *CurrentPortalState, +) ([]*statedb.MatchingPortingCustodianDetail, error) { + //get multiple custodian + var holdPToken uint64 = 0 + multipleCustodian := make([]*statedb.MatchingPortingCustodianDetail, 0) + + convertExchangeRatesObj := NewConvertExchangeRatesObject(exchangeRate) + + for i := len(custodianStateSlice) - 1; i >= 0; i-- { + custodianItem := custodianStateSlice[i] + if holdPToken >= metadata.RegisterAmount { + break + } + Logger.log.Infof("Porting request, pick multiple custodian key: %v, has collateral %v", custodianItem.Key, custodianItem.Value.GetFreeCollateral()) + + //base on current FreeCollateral find PToken can use + totalPToken, err := convertExchangeRatesObj.ExchangePRV2PTokenByTokenId(metadata.PTokenId, custodianItem.Value.GetFreeCollateral()) + if err != nil { + Logger.log.Errorf("Convert PToken is error %v", err) + return nil, err + } + + pTokenHolded := down150Percent(totalPToken) + + remainPToken := metadata.RegisterAmount - holdPToken // 1000 - 833 = 167 + if pTokenHolded > remainPToken { + pTokenHolded = remainPToken + Logger.log.Infof("Porting request, custodian key: %v, ptoken amount is more larger than remain so custodian can keep ptoken %v", custodianItem.Key, pTokenHolded) + } else { + Logger.log.Infof("Porting request, pick multiple custodian key: %v, can keep ptoken %v", custodianItem.Key, pTokenHolded) + } + + totalPTokenAfterUp150PercentUnit64 := up150Percent(pTokenHolded) + totalPRV, err := convertExchangeRatesObj.ExchangePToken2PRVByTokenId(metadata.PTokenId, totalPTokenAfterUp150PercentUnit64) + + if err != nil { + Logger.log.Errorf("Convert PToken is error %v", err) + return nil, err + } + + Logger.log.Infof("Porting request, custodian key: %v, to keep ptoken %v need prv %v", custodianItem.Key, pTokenHolded, totalPRV) + + if custodianItem.Value.GetFreeCollateral() > 0 && custodianItem.Value.GetFreeCollateral() >= totalPRV { + + remoteAddr, err := statedb.GetRemoteAddressByTokenID(custodianItem.Value.GetRemoteAddresses(), metadata.PTokenId) + if err != nil { + Logger.log.Errorf("Error when get remote address by tokenID %v", err) + return nil, err + } + multipleCustodian = append( + multipleCustodian, + &statedb.MatchingPortingCustodianDetail{ + IncAddress: custodianItem.Value.GetIncognitoAddress(), + RemoteAddress: remoteAddr, + Amount: pTokenHolded, + LockedAmountCollateral: totalPRV, + RemainCollateral: custodianItem.Value.GetFreeCollateral() - totalPRV, + }, + ) + + holdPToken = holdPToken + pTokenHolded + + //update custodian state + err = UpdateCustodianWithNewAmount(currentPortalState, custodianItem.Key, metadata.PTokenId, pTokenHolded, totalPRV) + if err != nil { + return nil, err + } + } + } + + return multipleCustodian, nil +} + +func UpdateCustodianWithNewAmount(currentPortalState *CurrentPortalState, custodianKey string, PTokenId string, amountPToken uint64, lockedAmountCollateral uint64) error { + custodian, ok := currentPortalState.CustodianPoolState[custodianKey] + if !ok { + return errors.New("Custodian not found") + } + + freeCollateral := custodian.GetFreeCollateral() - lockedAmountCollateral + custodian.SetFreeCollateral(freeCollateral) + + //update ptoken holded + holdingPubTokensMapping := make(map[string]uint64) + if custodian.GetHoldingPublicTokens() == nil { + holdingPubTokensMapping[PTokenId] = amountPToken + } else { + for ptokenId, value := range custodian.GetHoldingPublicTokens() { + holdingPubTokensMapping[ptokenId] = value + amountPToken + } + } + holdingPubTokens := holdingPubTokensMapping + custodian.SetHoldingPublicTokens(holdingPubTokens) + + //update collateral holded + if custodian.GetLockedAmountCollateral() == nil { + totalLockedAmountCollateral := make(map[string]uint64) + totalLockedAmountCollateral[PTokenId] = lockedAmountCollateral + custodian.SetLockedAmountCollateral(totalLockedAmountCollateral) + } else { + lockedAmount := custodian.GetLockedAmountCollateral() + lockedAmount[PTokenId] = lockedAmount[PTokenId] + lockedAmountCollateral + custodian.SetLockedAmountCollateral(lockedAmount) + } + + currentPortalState.CustodianPoolState[custodianKey] = custodian + + return nil +} + +func CalculatePortingFees(totalPToken uint64) uint64 { + result := common.PercentPortingFeeAmount * float64(totalPToken) / 100 + roundNumber := math.Round(result) + return uint64(roundNumber) +} + +func CalMinPortingFee(portingAmountInPToken uint64, tokenSymbol string, exchangeRate *statedb.FinalExchangeRatesState) (uint64, error) { + convertExchangeRatesObj := NewConvertExchangeRatesObject(exchangeRate) + portingAmountInPRV, err := convertExchangeRatesObj.ExchangePToken2PRVByTokenId(tokenSymbol, portingAmountInPToken) + if err != nil { + Logger.log.Errorf("Error when calculating minimum porting fee %v", err) + return 0, err + } + + // can't use big int to calculate porting fee because of common.PercentPortingFeeAmount < 1 + portingFee := uint64(math.Round(float64(portingAmountInPRV) * common.PercentPortingFeeAmount / 100)) + + return portingFee, nil +} + +func CalMinRedeemFee(redeemAmountInPToken uint64, tokenSymbol string, exchangeRate *statedb.FinalExchangeRatesState) (uint64, error) { + convertExchangeRatesObj := NewConvertExchangeRatesObject(exchangeRate) + redeemAmountInPRV, err := convertExchangeRatesObj.ExchangePToken2PRVByTokenId(tokenSymbol, redeemAmountInPToken) + if err != nil { + Logger.log.Errorf("Error when calculating minimum redeem fee %v", err) + return 0, err + } + + // can't use big int to calculate porting fee because of common.PercentRedeemFeeAmount < 1 + redeemFee := uint64(math.Round(float64(redeemAmountInPRV) * common.PercentRedeemFeeAmount / 100)) + + return redeemFee, nil +} + +/* + up 150% +*/ +func up150Percent(amount uint64) uint64 { + tmp := new(big.Int).Mul(new(big.Int).SetUint64(amount), new(big.Int).SetUint64(150)) + result := new(big.Int).Div(tmp, new(big.Int).SetUint64(100)).Uint64() + return result //return nano pBTC, pBNB +} + +func down150Percent(amount uint64) uint64 { + tmp := new(big.Int).Mul(new(big.Int).SetUint64(amount), new(big.Int).SetUint64(100)) + result := new(big.Int).Div(tmp, new(big.Int).SetUint64(150)).Uint64() + return result +} + +func calTotalLiquidationByExchangeRates(RedeemAmount uint64, liquidateExchangeRates statedb.LiquidateExchangeRatesDetail) (uint64, error) { + //todo: need review divide operator + // prv ------ total token + // ? amount token + + if liquidateExchangeRates.HoldAmountPubToken <= 0 { + return 0, errors.New("Can not divide 0") + } + + tmp := new(big.Int).Mul(big.NewInt(int64(liquidateExchangeRates.HoldAmountFreeCollateral)), big.NewInt(int64(RedeemAmount))) + totalPrv := new(big.Int).Div(tmp, big.NewInt(int64(liquidateExchangeRates.HoldAmountPubToken))) + return totalPrv.Uint64(), nil +} + +//check value is tp120 or tp130 +func IsTP120(tpValue uint64) (bool, bool) { + if tpValue > common.TP120 && tpValue <= common.TP130 { + return false, true + } + + if tpValue <= common.TP120 { + return true, true + } + + //not found + return false, false +} + +//filter TP for ptoken each custodian +func detectTopPercentileLiquidation(custodian *statedb.CustodianState, tpList map[string]uint64) (map[string]metadata.LiquidateTopPercentileExchangeRatesDetail, error) { + if custodian == nil { + return nil, errors.New("Custodian not found") + } + + liquidateExchangeRatesList := make(map[string]metadata.LiquidateTopPercentileExchangeRatesDetail) + for ptoken, tpValue := range tpList { + if tp20, ok := IsTP120(tpValue); ok { + if tp20 { + liquidateExchangeRatesList[ptoken] = metadata.LiquidateTopPercentileExchangeRatesDetail{ + TPKey: common.TP120, + TPValue: tpValue, + HoldAmountFreeCollateral: custodian.GetLockedAmountCollateral()[ptoken], + HoldAmountPubToken: custodian.GetHoldingPublicTokens()[ptoken], + } + } else { + liquidateExchangeRatesList[ptoken] = metadata.LiquidateTopPercentileExchangeRatesDetail{ + TPKey: common.TP130, + TPValue: tpValue, + HoldAmountFreeCollateral: 0, + HoldAmountPubToken: 0, + } + } + } + } + + return liquidateExchangeRatesList, nil +} + +//detect tp by hold ptoken and hold prv each custodian +func calculateTPRatio(holdPToken map[string]uint64, holdPRV map[string]uint64, finalExchange *statedb.FinalExchangeRatesState) (map[string]uint64, error) { + result := make(map[string]uint64) + convertExchangeRatesObj := NewConvertExchangeRatesObject(finalExchange) + for key, amountPToken := range holdPToken { + amountPRV, ok := holdPRV[key] + if !ok { + return nil, errors.New("Ptoken not found") + } + + if amountPRV <= 0 || amountPToken <= 0 { + return nil, errors.New("total PToken of custodian is zero") + } + + //(1): convert amount PToken to PRV + amountPTokenConverted, err := convertExchangeRatesObj.ExchangePToken2PRVByTokenId(key, amountPToken) + + if err != nil { + return nil, errors.New("Exchange rates error") + } + + //(2): calculate % up-down from amount PRV and (1) + // total1: total ptoken was converted ex: 1BNB = 1000 PRV + // total2: total prv (was up 150%) + // 1500 ------ ? + //1000 ------ 100% + // => 1500 * 100 / 1000 = 150% + if amountPTokenConverted <= 0 { + return nil, errors.New("Can not divide zero") + } + //todo: calculate + percentUp := new(big.Int).Mul(big.NewInt(int64(amountPRV)), big.NewInt(100)) //amountPRV * 100 / amountPTokenConverted + roundNumber := new(big.Int).Div(percentUp, big.NewInt(int64(amountPTokenConverted))) // math.Ceil(float64(percentUp)) + result[key] = roundNumber.Uint64() + } + + return result, nil +} + +func CalAmountNeededDepositLiquidate(custodian *statedb.CustodianState, exchangeRates *statedb.FinalExchangeRatesState, pTokenId string, isFreeCollateralSelected bool) (uint64, uint64, uint64, error) { + totalPToken := up150Percent(custodian.GetHoldingPublicTokens()[pTokenId]) + convertExchangeRatesObj := NewConvertExchangeRatesObject(exchangeRates) + totalPRV, err := convertExchangeRatesObj.ExchangePToken2PRVByTokenId(pTokenId, totalPToken) + + if err != nil { + return 0, 0, 0, err + } + + totalAmountNeeded := totalPRV - custodian.GetLockedAmountCollateral()[pTokenId] + var remainAmountFreeCollateral uint64 + var totalFreeCollateralNeeded uint64 + + if isFreeCollateralSelected { + if custodian.GetFreeCollateral() >= totalAmountNeeded { + remainAmountFreeCollateral = custodian.GetFreeCollateral() - totalAmountNeeded + totalFreeCollateralNeeded = totalAmountNeeded + totalAmountNeeded = 0 + } else { + remainAmountFreeCollateral = 0 + totalFreeCollateralNeeded = custodian.GetFreeCollateral() + totalAmountNeeded = totalAmountNeeded - custodian.GetFreeCollateral() + } + + return totalAmountNeeded, totalFreeCollateralNeeded, remainAmountFreeCollateral, nil + } + + return totalAmountNeeded, 0, 0, nil +} + +func ValidationExchangeRates(exchangeRates *statedb.FinalExchangeRatesState) error { + if exchangeRates == nil || exchangeRates.Rates() == nil { + return errors.New("Exchange rates not found") + } + + if _, ok := exchangeRates.Rates()[common.PortalBTCIDStr]; !ok { + return errors.New("BTC rates is not exist") + } + + if _, ok := exchangeRates.Rates()[common.PortalBNBIDStr]; !ok { + return errors.New("BNB rates is not exist") + } + + if _, ok := exchangeRates.Rates()[common.PRVIDStr]; !ok { + return errors.New("PRV rates is not exist") + } + + return nil +} + +func sortCustodiansByAmountHoldingPubTokenAscent(tokenSymbol string, custodians map[string]*statedb.CustodianState) []*CustodianStateSlice { + sortedCustodians := make([]*CustodianStateSlice, 0) + for key, value := range custodians { + if value.GetHoldingPublicTokens()[tokenSymbol] > 0 { + item := CustodianStateSlice{ + Key: key, + Value: value, + } + sortedCustodians = append(sortedCustodians, &item) + } + } + + sort.Slice(sortedCustodians, func(i, j int) bool { + return sortedCustodians[i].Value.GetHoldingPublicTokens()[tokenSymbol] <= sortedCustodians[j].Value.GetHoldingPublicTokens()[tokenSymbol] + }) + + return sortedCustodians +} + +func pickupCustodianForRedeem(redeemAmount uint64, tokenID string, portalState *CurrentPortalState) ([]*statedb.MatchingRedeemCustodianDetail, error) { + custodianPoolState := portalState.CustodianPoolState + + // case 1: pick one custodian + // filter custodians + // bigCustodians who holding amount public token greater than or equal to redeem amount + // smallCustodians who holding amount public token less than redeem amount + bigCustodians := make(map[string]*statedb.CustodianState, 0) + bigCustodianKeys := make([]string, 0) + smallCustodians := make(map[string]*statedb.CustodianState, 0) + matchedCustodians := make([]*statedb.MatchingRedeemCustodianDetail, 0) + + for key, cus := range custodianPoolState { + holdingPubTokenAmount := cus.GetHoldingPublicTokens()[tokenID] + if holdingPubTokenAmount >= redeemAmount { + bigCustodians[key] = new(statedb.CustodianState) + bigCustodians[key] = cus + bigCustodianKeys = append(bigCustodianKeys, key) + } else if holdingPubTokenAmount > 0 { + smallCustodians[key] = new(statedb.CustodianState) + smallCustodians[key] = cus + } + } + + // random to pick-up one custodian in bigCustodians + if len(bigCustodians) > 0 { + randomIndexCus := rand.Intn(len(bigCustodians)) + custodianKey := bigCustodianKeys[randomIndexCus] + matchingCustodian := bigCustodians[custodianKey] + + remoteAddr, err := statedb.GetRemoteAddressByTokenID(matchingCustodian.GetRemoteAddresses(), tokenID) + if err != nil { + Logger.log.Errorf("Error when get remote address of custodian: %v", err) + return nil, err + } + matchedCustodians = append( + matchedCustodians, + statedb.NewMatchingRedeemCustodianDetailWithValue( + custodianPoolState[custodianKey].GetIncognitoAddress(), remoteAddr, redeemAmount)) + + return matchedCustodians, nil + } + + // case 2: pick-up multiple custodians in smallCustodians + if len(smallCustodians) == 0 { + Logger.log.Errorf("there is no custodian in custodian pool") + return nil, errors.New("there is no custodian in custodian pool") + } + // sort smallCustodians by amount holding public token + sortedCustodianSlice := sortCustodiansByAmountHoldingPubTokenAscent(tokenID, smallCustodians) + + // get custodians util matching full redeemAmount + totalMatchedAmount := uint64(0) + for i := len(sortedCustodianSlice) - 1; i >= 0; i-- { + custodianKey := sortedCustodianSlice[i].Key + custodianValue := sortedCustodianSlice[i].Value + + matchedAmount := custodianValue.GetHoldingPublicTokens()[tokenID] + amountNeedToBeMatched := redeemAmount - totalMatchedAmount + if matchedAmount > amountNeedToBeMatched { + matchedAmount = amountNeedToBeMatched + } + + remoteAddr, err := statedb.GetRemoteAddressByTokenID(custodianValue.GetRemoteAddresses(), tokenID) + if err != nil { + Logger.log.Errorf("Error when get remote address of custodian: %v", err) + return nil, err + } + + matchedCustodians = append( + matchedCustodians, + statedb.NewMatchingRedeemCustodianDetailWithValue( + custodianPoolState[custodianKey].GetIncognitoAddress(), remoteAddr, matchedAmount)) + + totalMatchedAmount += matchedAmount + if totalMatchedAmount >= redeemAmount { + return matchedCustodians, nil + } + } + + Logger.log.Errorf("Not enough amount public token to return user") + return nil, errors.New("Not enough amount public token to return user") +} + +// convertIncPBNBAmountToExternalBNBAmount converts amount in inc chain (decimal 9) to amount in bnb chain (decimal 8) +func convertIncPBNBAmountToExternalBNBAmount(incPBNBAmount int64) int64 { + return incPBNBAmount / 10 // incPBNBAmount / 1^9 * 1^8 +} + +// updateFreeCollateralCustodian updates custodian state (amount collaterals) when custodian returns redeemAmount public token to user +func updateFreeCollateralCustodian(custodianState *statedb.CustodianState, redeemAmount uint64, tokenID string, exchangeRate *statedb.FinalExchangeRatesState) (uint64, error) { + // calculate unlock amount for custodian + // if custodian returns redeem amount that is all amount holding of token => unlock full amount + // else => return 120% redeem amount + + convertExchangeRatesObj := NewConvertExchangeRatesObject(exchangeRate) + + unlockedAmount := uint64(0) + var err error + if custodianState.GetHoldingPublicTokens()[tokenID] == 0 { + unlockedAmount = custodianState.GetLockedAmountCollateral()[tokenID] + lockedAmountTmp := custodianState.GetLockedAmountCollateral() + lockedAmountTmp[tokenID] = 0 + custodianState.SetLockedAmountCollateral(lockedAmountTmp) + custodianState.SetFreeCollateral(custodianState.GetFreeCollateral() + unlockedAmount) + } else { + tmp := new(big.Int).Mul(new(big.Int).SetUint64(redeemAmount), new(big.Int).SetUint64(common.MinPercentUnlockedCollateralAmount)) + unlockedAmountInPToken := new(big.Int).Div(tmp, new(big.Int).SetUint64(100)).Uint64() + unlockedAmount, err = convertExchangeRatesObj.ExchangePToken2PRVByTokenId(tokenID, unlockedAmountInPToken) + + if err != nil { + Logger.log.Errorf("Convert PToken is error %v", err) + return 0, errors.New("[portal-updateFreeCollateralCustodian] error convert amount ptoken to amount in prv ") + } + + if unlockedAmount == 0 { + return 0, errors.New("[portal-updateFreeCollateralCustodian] error convert amount ptoken to amount in prv ") + } + if custodianState.GetLockedAmountCollateral()[tokenID] <= unlockedAmount { + return 0, errors.New("[portal-updateFreeCollateralCustodian] Locked amount must be greater than amount need to unlocked") + } + lockedAmountTmp := custodianState.GetLockedAmountCollateral() + lockedAmountTmp[tokenID] -= unlockedAmount + custodianState.SetLockedAmountCollateral(lockedAmountTmp) + custodianState.SetFreeCollateral(custodianState.GetFreeCollateral() + unlockedAmount) + } + return unlockedAmount, nil +} + +// updateRedeemRequestStatusByRedeemId updates status of redeem request into db +func updateRedeemRequestStatusByRedeemId(redeemID string, newStatus int, db *statedb.StateDB) error { + redeemRequestBytes, err := statedb.GetPortalRedeemRequestStatus(db, redeemID) + if err != nil { + return err + } + if len(redeemRequestBytes) == 0 { + return fmt.Errorf("Not found redeem request from db with redeemId %v\n", redeemID) + } + + var redeemRequest metadata.PortalRedeemRequestStatus + err = json.Unmarshal(redeemRequestBytes, &redeemRequest) + if err != nil { + return err + } + + redeemRequest.Status = byte(newStatus) + newRedeemRequest, err := json.Marshal(redeemRequest) + if err != nil { + return err + } + err = statedb.StorePortalRedeemRequestStatus(db, redeemID, newRedeemRequest) + if err != nil { + return err + } + return nil +} + +func updateCustodianStateAfterLiquidateCustodian(custodianState *statedb.CustodianState, mintedAmountInPRV uint64, tokenID string) { + custodianState.SetTotalCollateral(custodianState.GetTotalCollateral() - mintedAmountInPRV) + + if custodianState.GetHoldingPublicTokens()[tokenID] > 0 { + lockedAmountTmp := custodianState.GetLockedAmountCollateral() + lockedAmountTmp[tokenID] -= mintedAmountInPRV + custodianState.SetLockedAmountCollateral(lockedAmountTmp) + } else { + unlockedCollateralAmount := custodianState.GetLockedAmountCollateral()[tokenID] - mintedAmountInPRV + custodianState.SetFreeCollateral(custodianState.GetFreeCollateral() + unlockedCollateralAmount) + lockedAmountTmp := custodianState.GetLockedAmountCollateral() + lockedAmountTmp[tokenID] = 0 + custodianState.SetLockedAmountCollateral(lockedAmountTmp) + } +} + +func updateCustodianStateAfterExpiredPortingReq( + custodianState *statedb.CustodianState, unlockedAmount uint64, unholdingPublicToken uint64, tokenID string) { + + holdingPubTokenTmp := custodianState.GetHoldingPublicTokens() + holdingPubTokenTmp[tokenID] -= unholdingPublicToken + custodianState.SetHoldingPublicTokens(holdingPubTokenTmp) + + custodianState.SetFreeCollateral(custodianState.GetFreeCollateral() + unlockedAmount) + + lockedAmountTmp := custodianState.GetLockedAmountCollateral() + lockedAmountTmp[tokenID] -= unlockedAmount + custodianState.SetLockedAmountCollateral(lockedAmountTmp) +} + +func removeCustodianFromMatchingPortingCustodians(matchingCustodians []*statedb.MatchingPortingCustodianDetail, custodianIncAddr string) bool { + for i, cus := range matchingCustodians { + if cus.IncAddress == custodianIncAddr { + if i == len(matchingCustodians)-1 { + matchingCustodians = matchingCustodians[:i] + } else { + matchingCustodians = append(matchingCustodians[:i], matchingCustodians[i+1:]...) + } + return true + } + } + + return false +} + +func removeCustodianFromMatchingRedeemCustodians( + matchingCustodians []*statedb.MatchingRedeemCustodianDetail, + custodianIncAddr string) ([]*statedb.MatchingRedeemCustodianDetail, bool) { + for i, cus := range matchingCustodians { + if cus.GetIncognitoAddress() == custodianIncAddr { + if i == len(matchingCustodians)-1 { + matchingCustodians = matchingCustodians[:i] + } else { + matchingCustodians = append(matchingCustodians[:i], matchingCustodians[i+1:]...) + } + return matchingCustodians, true + } + } + + return matchingCustodians, false +} + +func deleteWaitingRedeemRequest(state *CurrentPortalState, waitingRedeemRequestKey string) { + delete(state.WaitingRedeemRequests, waitingRedeemRequestKey) +} + +func deleteWaitingPortingRequest(state *CurrentPortalState, waitingPortingRequestKey string) { + delete(state.WaitingPortingRequests, waitingPortingRequestKey) +} + +type ConvertExchangeRatesObject struct { + finalExchangeRates *statedb.FinalExchangeRatesState +} + +func NewConvertExchangeRatesObject(finalExchangeRates *statedb.FinalExchangeRatesState) *ConvertExchangeRatesObject { + return &ConvertExchangeRatesObject{finalExchangeRates: finalExchangeRates} +} + +func (c ConvertExchangeRatesObject) ExchangePToken2PRVByTokenId(pTokenId string, value uint64) (uint64, error) { + switch pTokenId { + case common.PortalBTCIDStr: + result, err := c.ExchangeBTC2PRV(value) + if err != nil { + return 0, err + } + + return result, nil + case common.PortalBNBIDStr: + result, err := c.ExchangeBNB2PRV(value) + if err != nil { + return 0, err + } + + return result, nil + } + + return 0, errors.New("Ptoken is not support") +} + +func (c *ConvertExchangeRatesObject) ExchangePRV2PTokenByTokenId(pTokenId string, value uint64) (uint64, error) { + switch pTokenId { + case common.PortalBTCIDStr: + return c.ExchangePRV2BTC(value) + case common.PortalBNBIDStr: + return c.ExchangePRV2BNB(value) + } + + return 0, errors.New("Ptoken is not support") +} + +func (c *ConvertExchangeRatesObject) convert(value uint64, ratesFrom uint64, RatesTo uint64) (uint64, error) { + //convert to pusdt + total := new(big.Int).Mul(big.NewInt(int64(value)), big.NewInt(int64(ratesFrom))) + pUstd := new(big.Int).Div(total, big.NewInt(int64(math.Pow10(9)))) //value of nanno + + if RatesTo <= 0 { + return 0, errors.New("Can not divide zero") + } + + //pusdt -> new coin + result := new(big.Int).Mul(pUstd, big.NewInt(int64(math.Pow10(9)))) // (total * uint64(math.Pow10(9))) / RatesTo + roundNumber := new(big.Int).Div(result, big.NewInt(int64(RatesTo))) //round up + return roundNumber.Uint64(), nil + +} + +func (c *ConvertExchangeRatesObject) ExchangeBTC2PRV(value uint64) (uint64, error) { + //input : nano + //todo: check rates exist + BTCRates := c.finalExchangeRates.Rates()[common.PortalBTCIDStr].Amount //return nano pUSDT + PRVRates := c.finalExchangeRates.Rates()[common.PRVIDStr].Amount //return nano pUSDT + valueExchange, err := c.convert(value, BTCRates, PRVRates) + + if err != nil { + return 0, err + } + + Logger.log.Infof("================ Convert, BTC %d 2 PRV with BTCRates %d PRVRates %d , result %d", value, BTCRates, PRVRates, valueExchange) + + //nano + return valueExchange, nil +} + +func (c *ConvertExchangeRatesObject) ExchangeBNB2PRV(value uint64) (uint64, error) { + BNBRates := c.finalExchangeRates.Rates()[common.PortalBNBIDStr].Amount + PRVRates := c.finalExchangeRates.Rates()[common.PRVIDStr].Amount + + valueExchange, err := c.convert(value, BNBRates, PRVRates) + + if err != nil { + return 0, err + } + + Logger.log.Infof("================ Convert, BNB %v 2 PRV with BNBRates %v PRVRates %v, result %v", value, BNBRates, PRVRates, valueExchange) + + return valueExchange, nil +} + +func (c *ConvertExchangeRatesObject) ExchangePRV2BTC(value uint64) (uint64, error) { + //input nano + BTCRates := c.finalExchangeRates.Rates()[common.PortalBTCIDStr].Amount //return nano pUSDT + PRVRates := c.finalExchangeRates.Rates()[common.PRVIDStr].Amount //return nano pUSDT + + valueExchange, err := c.convert(value, PRVRates, BTCRates) + + if err != nil { + return 0, err + } + + Logger.log.Infof("================ Convert, PRV %v 2 BTC with BTCRates %v PRVRates %v, result %v", value, BTCRates, PRVRates, valueExchange) + + return valueExchange, nil +} + +func (c *ConvertExchangeRatesObject) ExchangePRV2BNB(value uint64) (uint64, error) { + BNBRates := c.finalExchangeRates.Rates()[common.PortalBNBIDStr].Amount + PRVRates := c.finalExchangeRates.Rates()[common.PRVIDStr].Amount + + valueExchange, err := c.convert(value, PRVRates, BNBRates) + if err != nil { + return 0, err + } + Logger.log.Infof("================ Convert, PRV %v 2 BNB with BNBRates %v PRVRates %v, result %v", value, BNBRates, PRVRates, valueExchange) + return valueExchange, nil +} + +func updateCurrentPortalStateOfLiquidationExchangeRates(beaconHeight uint64, currentPortalState *CurrentPortalState, custodianKey string, custodianState *statedb.CustodianState, detectTp map[string]metadata.LiquidateTopPercentileExchangeRatesDetail) { + //update custodian + for pTokenId, liquidateTopPercentileExchangeRatesDetail := range detectTp { + holdingPubTokenTmp := custodianState.GetHoldingPublicTokens() + holdingPubTokenTmp[pTokenId] -= liquidateTopPercentileExchangeRatesDetail.HoldAmountPubToken + custodianState.SetHoldingPublicTokens(holdingPubTokenTmp) + + lockedAmountTmp := custodianState.GetLockedAmountCollateral() + lockedAmountTmp[pTokenId] -= liquidateTopPercentileExchangeRatesDetail.HoldAmountFreeCollateral + custodianState.SetLockedAmountCollateral(lockedAmountTmp) + + custodianState.SetTotalCollateral(custodianState.GetTotalCollateral() - liquidateTopPercentileExchangeRatesDetail.HoldAmountFreeCollateral) + } + + currentPortalState.CustodianPoolState[custodianKey] = custodianState + //end + + //update LiquidateExchangeRates + liquidateExchangeRatesKey := statedb.GeneratePortalLiquidateExchangeRatesPoolObjectKey(beaconHeight) + liquidateExchangeRates, ok := currentPortalState.LiquidateExchangeRatesPool[liquidateExchangeRatesKey.String()] + + Logger.log.Infof("update LiquidateExchangeRatesPool with liquidateExchangeRatesKey %v value %#v", liquidateExchangeRatesKey, detectTp) + if !ok { + item := make(map[string]statedb.LiquidateExchangeRatesDetail) + + for ptoken, liquidateTopPercentileExchangeRatesDetail := range detectTp { + item[ptoken] = statedb.LiquidateExchangeRatesDetail{ + HoldAmountFreeCollateral: liquidateTopPercentileExchangeRatesDetail.HoldAmountFreeCollateral, + HoldAmountPubToken: liquidateTopPercentileExchangeRatesDetail.HoldAmountPubToken, + } + } + currentPortalState.LiquidateExchangeRatesPool[liquidateExchangeRatesKey.String()] = statedb.NewLiquidateExchangeRatesPoolWithValue(item) + } else { + for ptoken, liquidateTopPercentileExchangeRatesDetail := range detectTp { + if _, ok := liquidateExchangeRates.Rates()[ptoken]; !ok { + liquidateExchangeRates.Rates()[ptoken] = statedb.LiquidateExchangeRatesDetail{ + HoldAmountFreeCollateral: liquidateTopPercentileExchangeRatesDetail.HoldAmountFreeCollateral, + HoldAmountPubToken: liquidateTopPercentileExchangeRatesDetail.HoldAmountPubToken, + } + } else { + liquidateExchangeRates.Rates()[ptoken] = statedb.LiquidateExchangeRatesDetail{ + HoldAmountFreeCollateral: liquidateExchangeRates.Rates()[ptoken].HoldAmountFreeCollateral + liquidateTopPercentileExchangeRatesDetail.HoldAmountFreeCollateral, + HoldAmountPubToken: liquidateExchangeRates.Rates()[ptoken].HoldAmountPubToken + liquidateTopPercentileExchangeRatesDetail.HoldAmountPubToken, + } + } + } + + currentPortalState.LiquidateExchangeRatesPool[liquidateExchangeRatesKey.String()] = liquidateExchangeRates + } + //end +} + +func getTotalLockedCollateralInEpoch(featureStateDB *statedb.StateDB, beaconHeight uint64) (uint64, error){ + currentPortalState, err := InitCurrentPortalStateFromDB(featureStateDB, beaconHeight) + if err != nil { + return 0, nil + } + + return currentPortalState.LockedCollateralState.GetTotalLockedCollateralInEpoch(), nil +} diff --git a/blockchain/portalutils_test.go b/blockchain/portalutils_test.go new file mode 100644 index 0000000000..708fb43732 --- /dev/null +++ b/blockchain/portalutils_test.go @@ -0,0 +1,33 @@ +package blockchain + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestCalculatePortingFees(t *testing.T) { + result := CalculatePortingFees(3106511852580) + assert.Equal(t, result, uint64(310651185)) +} + +func TestCurrentPortalStateStruct(t *testing.T) { + currentPortalState := &CurrentPortalState{} + + assert.NotEqual(t, currentPortalState, nil) + assert.Equal(t, len(currentPortalState.CustodianPoolState), 0) + assert.Equal(t, len(currentPortalState.ExchangeRatesRequests), 0) + assert.Equal(t, len(currentPortalState.WaitingPortingRequests), 0) + assert.Equal(t, len(currentPortalState.WaitingRedeemRequests), 0) + assert.Equal(t, len(currentPortalState.FinalExchangeRatesState), 0) + + finalExchangeRates := currentPortalState.FinalExchangeRatesState["abc"] + assert.Equal(t, finalExchangeRates.Rates, nil) + + _, ok := currentPortalState.CustodianPoolState["abc"] + assert.Equal(t, ok, false) + + for _, v := range currentPortalState.CustodianPoolState { + assert.Equal(t, 1, 0) + assert.NotNil(t, v) + } +} \ No newline at end of file diff --git a/blockchain/relayingutils.go b/blockchain/relayingutils.go new file mode 100644 index 0000000000..b2c1a9a064 --- /dev/null +++ b/blockchain/relayingutils.go @@ -0,0 +1,349 @@ +package blockchain + +import ( + "encoding/base64" + "encoding/json" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/rawdbv2" + "github.com/incognitochain/incognito-chain/incdb" + "github.com/incognitochain/incognito-chain/metadata" + bnbrelaying "github.com/incognitochain/incognito-chain/relaying/bnb" + btcrelaying "github.com/incognitochain/incognito-chain/relaying/btc" + "github.com/pkg/errors" + lvdbErrors "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/tendermint/tendermint/types" + "strconv" +) + +type relayingChain struct { + actions [][]string +} +type relayingBNBChain struct { + *relayingChain +} +type relayingBTCChain struct { + *relayingChain +} +type relayingProcessor interface { + getActions() [][]string + putAction(action []string) + buildRelayingInst( + blockchain *BlockChain, + relayingHeaderAction metadata.RelayingHeaderAction, + relayingState *RelayingHeaderChainState, + ) [][]string + buildHeaderRelayingInst( + senderAddressStr string, + header string, + blockHeight uint64, + metaType int, + shardID byte, + txReqID common.Hash, + status string, + ) []string +} +type portalManager struct { + relayingChains map[int]relayingProcessor +} + +func (rChain *relayingChain) getActions() [][]string { + return rChain.actions +} +func (rChain *relayingChain) putAction(action []string) { + rChain.actions = append(rChain.actions, action) +} +// buildHeaderRelayingInst builds a new instruction from action received from ShardToBeaconBlock +func (rChain *relayingChain) buildHeaderRelayingInst( + senderAddressStr string, + header string, + blockHeight uint64, + metaType int, + shardID byte, + txReqID common.Hash, + status string, +) []string { + headerRelayingContent := metadata.RelayingHeaderContent{ + IncogAddressStr: senderAddressStr, + Header: header, + TxReqID: txReqID, + BlockHeight: blockHeight, + } + headerRelayingContentBytes, _ := json.Marshal(headerRelayingContent) + return []string{ + strconv.Itoa(metaType), + strconv.Itoa(int(shardID)), + status, + string(headerRelayingContentBytes), + } +} + +func (rbnbChain *relayingBNBChain) buildRelayingInst( + blockchain *BlockChain, + relayingHeaderAction metadata.RelayingHeaderAction, + relayingHeaderChain *RelayingHeaderChainState, +) [][]string { + if relayingHeaderChain == nil { + Logger.log.Warn("WARN - [buildInstructionsForBNBHeaderRelaying]: relayingHeaderChain is null.") + inst := rbnbChain.buildHeaderRelayingInst( + relayingHeaderAction.Meta.IncogAddressStr, + relayingHeaderAction.Meta.Header, + relayingHeaderAction.Meta.BlockHeight, + relayingHeaderAction.Meta.Type, + relayingHeaderAction.ShardID, + relayingHeaderAction.TxReqID, + common.RelayingHeaderRejectedChainStatus, + ) + return [][]string{inst} + } + meta := relayingHeaderAction.Meta + // parse and verify header chain + headerBytes, err := base64.StdEncoding.DecodeString(meta.Header) + if err != nil { + Logger.log.Errorf("Error - [buildInstructionsForBNBHeaderRelaying]: Cannot decode header string.%v\n", err) + inst := rbnbChain.buildHeaderRelayingInst( + relayingHeaderAction.Meta.IncogAddressStr, + relayingHeaderAction.Meta.Header, + relayingHeaderAction.Meta.BlockHeight, + relayingHeaderAction.Meta.Type, + relayingHeaderAction.ShardID, + relayingHeaderAction.TxReqID, + common.RelayingHeaderRejectedChainStatus, + ) + return [][]string{inst} + } + + var newBlock types.Block + err = json.Unmarshal(headerBytes, &newBlock) + if err != nil { + Logger.log.Errorf("Error - [buildInstructionsForBNBHeaderRelaying]: Cannot unmarshal header.%v\n", err) + inst := rbnbChain.buildHeaderRelayingInst( + relayingHeaderAction.Meta.IncogAddressStr, + relayingHeaderAction.Meta.Header, + relayingHeaderAction.Meta.BlockHeight, + relayingHeaderAction.Meta.Type, + relayingHeaderAction.ShardID, + relayingHeaderAction.TxReqID, + common.RelayingHeaderRejectedChainStatus, + ) + return [][]string{inst} + } + + if newBlock.Header.Height != int64(relayingHeaderAction.Meta.BlockHeight) { + Logger.log.Errorf("Error - [buildInstructionsForBNBHeaderRelaying]: Block height in metadata is unmatched with block height in new header.") + inst := rbnbChain.buildHeaderRelayingInst( + relayingHeaderAction.Meta.IncogAddressStr, + relayingHeaderAction.Meta.Header, + relayingHeaderAction.Meta.BlockHeight, + relayingHeaderAction.Meta.Type, + relayingHeaderAction.ShardID, + relayingHeaderAction.TxReqID, + common.RelayingHeaderRejectedChainStatus, + ) + return [][]string{inst} + } + + // if valid, create instruction with status accepted + // if not, create instruction with status rejected + latestBNBBlockHeader := relayingHeaderChain.BNBHeaderChain.LatestBlock + var isValid bool + var err2 error + relayingHeaderChain.BNBHeaderChain, isValid, err2 = relayingHeaderChain.BNBHeaderChain.AppendBlock( + &newBlock, blockchain.config.ChainParams.BNBRelayingHeaderChainID) + if err2.(*bnbrelaying.BNBRelayingError) != nil || !isValid { + Logger.log.Errorf("Error - [buildInstructionsForBNBHeaderRelaying]: Verify new header failed. %v\n", err2) + inst := rbnbChain.buildHeaderRelayingInst( + relayingHeaderAction.Meta.IncogAddressStr, + relayingHeaderAction.Meta.Header, + relayingHeaderAction.Meta.BlockHeight, + relayingHeaderAction.Meta.Type, + relayingHeaderAction.ShardID, + relayingHeaderAction.TxReqID, + common.RelayingHeaderRejectedChainStatus, + ) + return [][]string{inst} + } + + // check newBlock is a header contain last commit for one of the header in unconfirmed header list or not\ + // check newLatestBNBHeader is genesis header or not + genesisHeaderHeight, _ := bnbrelaying.GetGenesisBNBHeaderBlockHeight(blockchain.config.ChainParams.BNBRelayingHeaderChainID) + newLatestBNBHeader := relayingHeaderChain.BNBHeaderChain.LatestBlock + if newLatestBNBHeader != nil && newLatestBNBHeader.Height == genesisHeaderHeight && latestBNBBlockHeader == nil { + genesisBlockBytes, _ := json.Marshal(newLatestBNBHeader) + genesisBlockStr := base64.StdEncoding.EncodeToString(genesisBlockBytes) + //genesisBlockStr, _ := bnbrelaying.GetGenesisBNBHeaderStr(blockchain.config.ChainParams.BNBRelayingHeaderChainID) + inst1 := rbnbChain.buildHeaderRelayingInst( + relayingHeaderAction.Meta.IncogAddressStr, + genesisBlockStr, + uint64(genesisHeaderHeight), + relayingHeaderAction.Meta.Type, + relayingHeaderAction.ShardID, + relayingHeaderAction.TxReqID, + common.RelayingHeaderConfirmedAcceptedChainStatus, + ) + + inst2 := rbnbChain.buildHeaderRelayingInst( + relayingHeaderAction.Meta.IncogAddressStr, + relayingHeaderAction.Meta.Header, + relayingHeaderAction.Meta.BlockHeight, + relayingHeaderAction.Meta.Type, + relayingHeaderAction.ShardID, + relayingHeaderAction.TxReqID, + common.RelayingHeaderUnconfirmedAcceptedChainStatus, + ) + return [][]string{inst1, inst2} + } + + if newLatestBNBHeader != nil && latestBNBBlockHeader != nil { + if newLatestBNBHeader.Height == latestBNBBlockHeader.Height + 1 { + inst := rbnbChain.buildHeaderRelayingInst( + relayingHeaderAction.Meta.IncogAddressStr, + relayingHeaderAction.Meta.Header, + relayingHeaderAction.Meta.BlockHeight, + relayingHeaderAction.Meta.Type, + relayingHeaderAction.ShardID, + relayingHeaderAction.TxReqID, + common.RelayingHeaderConfirmedAcceptedChainStatus, + ) + return [][]string{inst} + } + } + + inst := rbnbChain.buildHeaderRelayingInst( + relayingHeaderAction.Meta.IncogAddressStr, + relayingHeaderAction.Meta.Header, + relayingHeaderAction.Meta.BlockHeight, + relayingHeaderAction.Meta.Type, + relayingHeaderAction.ShardID, + relayingHeaderAction.TxReqID, + common.RelayingHeaderUnconfirmedAcceptedChainStatus, + ) + return [][]string{inst} +} + +func (rbtcChain *relayingBTCChain) buildRelayingInst( + blockchain *BlockChain, + relayingHeaderAction metadata.RelayingHeaderAction, + relayingState *RelayingHeaderChainState, +) [][]string { + Logger.log.Info("[BTC Relaying] - Processing buildRelayingInst...") + inst := rbtcChain.buildHeaderRelayingInst( + relayingHeaderAction.Meta.IncogAddressStr, + relayingHeaderAction.Meta.Header, + relayingHeaderAction.Meta.BlockHeight, + relayingHeaderAction.Meta.Type, + relayingHeaderAction.ShardID, + relayingHeaderAction.TxReqID, + common.RelayingHeaderConsideringChainStatus, + ) + return [][]string{inst} +} + +func NewPortalManager() *portalManager { + rbnbChain := &relayingBNBChain{ + relayingChain: &relayingChain{ + actions: [][]string{}, + }, + } + rbtcChain := &relayingBTCChain{ + relayingChain: &relayingChain{ + actions: [][]string{}, + }, + } + return &portalManager{ + relayingChains: map[int]relayingProcessor{ + metadata.RelayingBNBHeaderMeta: rbnbChain, + metadata.RelayingBTCHeaderMeta: rbtcChain, + }, + } +} + + +type RelayingHeaderChainState struct{ + BNBHeaderChain *bnbrelaying.LatestHeaderChain + BTCHeaderChain *btcrelaying.BlockChain +} + +func (bc *BlockChain) InitRelayingHeaderChainStateFromDB( + db incdb.Database, + beaconHeight uint64, +) (*RelayingHeaderChainState, error) { + bnbHeaderChainState, err := getBNBHeaderChainState(db, beaconHeight) + if err != nil { + return nil, err + } + + btcChain := bc.config.BTCChain + return &RelayingHeaderChainState{ + BNBHeaderChain: bnbHeaderChainState, + BTCHeaderChain: btcChain, + }, nil +} + + +// getBNBHeaderChainState gets bnb header chain state at beaconHeight +func getBNBHeaderChainState( + db incdb.Database, + beaconHeight uint64, +) (*bnbrelaying.LatestHeaderChain, error) { + relayingStateKey := rawdbv2.NewBNBHeaderRelayingStateKey(beaconHeight) + + relayingStateValueBytes, err := db.Get([]byte(relayingStateKey)) + if err != nil && err != lvdbErrors.ErrNotFound { + Logger.log.Errorf("getBNBHeaderChainState - Can not get relaying bnb header state from db %v\n", err) + return nil, err + } + + var hc bnbrelaying.LatestHeaderChain + if len(relayingStateValueBytes) > 0 { + err = json.Unmarshal(relayingStateValueBytes, &hc) + if err != nil { + Logger.log.Errorf("getBNBHeaderChainState - Can not unmarshal relaying bnb header state %v\n", err) + return nil, err + } + } + return &hc, nil +} + +// storeBNBHeaderChainState stores bnb header chain state at beaconHeight +func storeBNBHeaderChainState(db incdb.Database, + beaconHeight uint64, + bnbHeaderRelaying *bnbrelaying.LatestHeaderChain) error { + key := rawdbv2.NewBNBHeaderRelayingStateKey(beaconHeight) + value, err := json.Marshal(bnbHeaderRelaying) + if err != nil { + return err + } + err = db.Put([]byte(key), value) + if err != nil { + return rawdbv2.NewRawdbError(rawdbv2.StoreRelayingBNBHeaderError, errors.Wrap(err, "db.lvdb.put")) + } + return nil +} + +func storeRelayingHeaderStateToDB( + db incdb.Database, + beaconHeight uint64, + relayingHeaderState *RelayingHeaderChainState, +) error { + err := storeBNBHeaderChainState(db, beaconHeight, relayingHeaderState.BNBHeaderChain) + if err != nil { + return err + } + return nil +} + +// getBNBHeaderChainState gets bnb header chain state at beaconHeight +func getLatestRelayingBNBBlockHeight( + db incdb.Database, + beaconHeight uint64, +) (int64, error) { + bnbChainState, err := getBNBHeaderChainState(db, beaconHeight) + if err != nil { + return int64(0), err + } + + if bnbChainState.LatestBlock == nil { + return int64(0), errors.New("Latest bnb block is nil") + } + return bnbChainState.LatestBlock.Height, nil +} diff --git a/blockchain/retrieverformetadata.go b/blockchain/retrieverformetadata.go index 3b2a2cf1b6..e5128f0a9c 100644 --- a/blockchain/retrieverformetadata.go +++ b/blockchain/retrieverformetadata.go @@ -196,4 +196,4 @@ func (blockchain *BlockChain) GetBurningAddress(beaconHeight uint64) string { } return burningAddress2 -} +} \ No newline at end of file diff --git a/blockchain/salary.go b/blockchain/salary.go index 30b74a9cb2..ffd87e4a89 100644 --- a/blockchain/salary.go +++ b/blockchain/salary.go @@ -206,8 +206,8 @@ func (blockchain *BlockChain) processSalaryInstructions(rewardStateDB *statedb.S } } - } - return nil +} +return nil } func (blockchain *BlockChain) addShardCommitteeReward(rewardStateDB *statedb.StateDB, shardID byte, rewardInfoShardToProcess *metadata.ShardBlockRewardInfo, committeeOfShardToProcess []incognitokey.CommitteePublicKey, rewardReceiver map[string]string) (err error) { @@ -231,7 +231,10 @@ func (blockchain *BlockChain) addShardCommitteeReward(rewardStateDB *statedb.Sta return nil } -func (blockchain *BlockChain) buildRewardInstructionByEpoch(blkHeight, epoch uint64, rewardStateDB *statedb.StateDB) ([][]string, error) { +func (blockchain *BlockChain) buildRewardInstructionByEpoch( + blkHeight, epoch uint64, + rewardStateDB *statedb.StateDB, + isSplitRewardForCustodian bool) ([][]string, map[common.Hash]uint64, error) { var resInst [][]string var err error var instRewardForBeacons [][]string @@ -244,6 +247,7 @@ func (blockchain *BlockChain) buildRewardInstructionByEpoch(blkHeight, epoch uin totalRewards := make([]map[common.Hash]uint64, numberOfActiveShards) totalRewardForBeacon := map[common.Hash]uint64{} totalRewardForIncDAO := map[common.Hash]uint64{} + totalRewardForCustodian := map[common.Hash]uint64{} for ID := 0; ID < numberOfActiveShards; ID++ { if totalRewards[ID] == nil { totalRewards[ID] = map[common.Hash]uint64{} @@ -251,37 +255,38 @@ func (blockchain *BlockChain) buildRewardInstructionByEpoch(blkHeight, epoch uin for _, coinID := range allCoinID { totalRewards[ID][coinID], err = statedb.GetRewardOfShardByEpoch(rewardStateDB, epoch, byte(ID), coinID) if err != nil { - return nil, err + return nil, nil, err } if totalRewards[ID][coinID] == 0 { delete(totalRewards[ID], coinID) } } - rewardForBeacon, rewardForIncDAO, err := splitReward(&totalRewards[ID], numberOfActiveShards, percentForIncognitoDAO) + rewardForBeacon, rewardForIncDAO, rewardForCustodian, err := splitReward(&totalRewards[ID], numberOfActiveShards, percentForIncognitoDAO, isSplitRewardForCustodian) if err != nil { Logger.log.Infof("\n------------------------------------\nNot enough reward in epoch %v\n------------------------------------\n", err) } mapPlusMap(rewardForBeacon, &totalRewardForBeacon) mapPlusMap(rewardForIncDAO, &totalRewardForIncDAO) + mapPlusMap(rewardForCustodian, &totalRewardForCustodian) } if len(totalRewardForBeacon) > 0 { instRewardForBeacons, err = blockchain.buildInstRewardForBeacons(epoch, totalRewardForBeacon) if err != nil { - return nil, err + return nil, nil, err } } instRewardForShards, err = blockchain.buildInstRewardForShards(epoch, totalRewards) if err != nil { - return nil, err + return nil, nil, err } if len(totalRewardForIncDAO) > 0 { instRewardForIncDAO, err = blockchain.buildInstRewardForIncDAO(epoch, totalRewardForIncDAO) if err != nil { - return nil, err + return nil, nil, err } } resInst = common.AppendSliceString(instRewardForBeacons, instRewardForIncDAO, instRewardForShards) - return resInst, nil + return resInst, totalRewardForCustodian, nil } //buildInstRewardForBeacons create reward instruction for beacons @@ -368,7 +373,9 @@ func splitReward( totalReward *map[common.Hash]uint64, numberOfActiveShards int, devPercent int, + isSplitRewardForCustodian bool, ) ( + *map[common.Hash]uint64, *map[common.Hash]uint64, *map[common.Hash]uint64, error, @@ -376,19 +383,26 @@ func splitReward( hasValue := false rewardForBeacon := map[common.Hash]uint64{} rewardForIncDAO := map[common.Hash]uint64{} + rewardForCustodian := map[common.Hash]uint64{} for key, value := range *totalReward { rewardForBeacon[key] = 2 * (uint64(100-devPercent) * value) / ((uint64(numberOfActiveShards) + 2) * 100) - rewardForIncDAO[key] = uint64(devPercent) * value / uint64(100) - (*totalReward)[key] = value - (rewardForBeacon[key] + rewardForIncDAO[key]) + totalRewardForDAOAndCustodians := uint64(devPercent) * value / uint64(100) + if isSplitRewardForCustodian { + rewardForCustodian[key] = uint64(common.PercentCustodianRewards) * totalRewardForDAOAndCustodians / uint64(100) + rewardForIncDAO[key] = totalRewardForDAOAndCustodians - rewardForCustodian[key] + } else { + rewardForIncDAO[key] = totalRewardForDAOAndCustodians + } + (*totalReward)[key] = value - (rewardForBeacon[key] + totalRewardForDAOAndCustodians) if !hasValue { hasValue = true } } if !hasValue { //fmt.Printf("[ndh] not enough reward\n") - return nil, nil, NewBlockChainError(NotEnoughRewardError, errors.New("Not enough reward")) + return nil, nil, nil, NewBlockChainError(NotEnoughRewardError, errors.New("Not enough reward")) } - return &rewardForBeacon, &rewardForIncDAO, nil + return &rewardForBeacon, &rewardForIncDAO, &rewardForCustodian, nil } func getNoBlkPerYear(blockCreationTimeSeconds uint64) uint64 { diff --git a/blockchain/shardprocess.go b/blockchain/shardprocess.go index b23041ef7c..96c0391856 100644 --- a/blockchain/shardprocess.go +++ b/blockchain/shardprocess.go @@ -947,7 +947,7 @@ func (blockchain *BlockChain) processStoreShardBlock(shardBlock *ShardBlock, com metaType := tx.GetMetadataType() if metaType == metadata.WithDrawRewardResponseMeta { _, publicKey, amountRes, coinID := tx.GetTransferData() - err := statedb.RemoveCommitteeReward(tempShardBestState.rewardStateDB, publicKey, amountRes, *coinID) + err = statedb.RemoveCommitteeReward(tempShardBestState.rewardStateDB, publicKey, amountRes, *coinID) if err != nil { return NewBlockChainError(RemoveCommitteeRewardError, err) } @@ -1121,4 +1121,4 @@ func (blockchain *BlockChain) removeOldDataAfterProcessingShardBlock(shardBlock //Remove tx out of pool go blockchain.config.TxPool.RemoveTx(shardBlock.Body.Transactions, true) }() -} +} \ No newline at end of file diff --git a/blockchain/shardproducer.go b/blockchain/shardproducer.go index dbcc1c32ee..41673dea11 100644 --- a/blockchain/shardproducer.go +++ b/blockchain/shardproducer.go @@ -375,7 +375,41 @@ func (blockGenerator *BlockGenerator) buildResponseTxsFromBeaconInstructions(bea newTx, err = blockGenerator.buildPDEMatchedNReturnedContributionTx(l[3], producerPrivateKey, shardID) } } - + case metadata.PortalCustodianDepositMeta: + if len(l) >= 4 && l[2] == common.PortalCustodianDepositRefundChainStatus { + newTx, err = blockGenerator.buildPortalRefundCustodianDepositTx(l[3], producerPrivateKey, shardID) + } + case metadata.PortalUserRequestPTokenMeta: + if len(l) >= 4 && l[2] == common.PortalReqPTokensAcceptedChainStatus { + newTx, err = blockGenerator.buildPortalAcceptedRequestPTokensTx(l[3], producerPrivateKey, shardID) + } + //custodian withdraw + case metadata.PortalCustodianWithdrawRequestMeta: + if len(l) >= 4 && l[2] == common.PortalCustodianWithdrawRequestAcceptedStatus { + newTx, err = blockGenerator.buildPortalCustodianWithdrawRequest(l[3], producerPrivateKey, shardID) + } + case metadata.PortalRedeemRequestMeta: + if len(l) >= 4 && l[2] == common.PortalRedeemRequestRejectedChainStatus { + newTx, err = blockGenerator.buildPortalRejectedRedeemRequestTx(l[3], producerPrivateKey, shardID) + } + //liquidation: redeem ptoken + case metadata.PortalRedeemLiquidateExchangeRatesMeta: + if len(l) >= 4 && l[2] == common.PortalRedeemLiquidateExchangeRatesSuccessChainStatus { + newTx, err = blockGenerator.buildPortalRedeemLiquidateExchangeRatesRequestTx(l[3], producerPrivateKey, shardID) + } + case metadata.PortalLiquidateCustodianMeta: + if len(l) >= 4 && l[2] == common.PortalLiquidateCustodianSuccessChainStatus { + newTx, err = blockGenerator.buildPortalLiquidateCustodianResponseTx(l[3], producerPrivateKey, shardID) + } + case metadata.PortalRequestWithdrawRewardMeta: + if len(l) >= 4 && l[2] == common.PortalReqWithdrawRewardAcceptedChainStatus { + newTx, err = blockGenerator.buildPortalAcceptedWithdrawRewardTx(l[3], producerPrivateKey, shardID) + } + //liquidation: custodian deposit + case metadata.PortalLiquidationCustodianDepositMeta: + if len(l) >= 4 && l[2] == common.PortalLiquidationCustodianDepositRejectedChainStatus { + newTx, err = blockGenerator.buildPortalLiquidationCustodianDepositReject(l[3], producerPrivateKey, shardID) + } default: continue } @@ -393,9 +427,9 @@ func (blockGenerator *BlockGenerator) buildResponseTxsFromBeaconInstructions(bea responsedTxs = append(responsedTxs, newTx) responsedHashTxs = append(responsedHashTxs, newTxHash) } - } - } - return responsedTxs, errorInstructions, nil +} +} +return responsedTxs, errorInstructions, nil } // Process Instruction From Beacon Blocks: diff --git a/blockchain/syncblockutils_test.go b/blockchain/syncblockutils_test.go index afc1037231..9ea23e8134 100644 --- a/blockchain/syncblockutils_test.go +++ b/blockchain/syncblockutils_test.go @@ -1,8 +1,6 @@ package blockchain import ( - "github.com/btcsuite/btcd/database" - "github.com/incognitochain/incognito-chain/dataaccessobject/rawdb" "io/ioutil" "os" "reflect" diff --git a/common/common.go b/common/common.go index 04b72b187a..7f2be29bc4 100644 --- a/common/common.go +++ b/common/common.go @@ -460,6 +460,15 @@ func GetShardChainKey(shardID byte) string { return ShardChainKey + "-" + strconv.Itoa(int(shardID)) } +func IsPortalToken(tokenIDStr string) bool { + isExisted, _ := SliceExists(PortalSupportedIncTokenIDs, tokenIDStr) + return isExisted +} + +func IsPortalExchangeRateToken (tokenIDStr string) bool { + return IsPortalToken(tokenIDStr) || tokenIDStr == PRVIDStr +} + // CopyBytes returns an exact copy of the provided bytes. func CopyBytes(b []byte) (copiedBytes []byte) { if b == nil { @@ -496,4 +505,4 @@ func FromHex(s string) []byte { // HexToHash sets byte representation of s to hash. // If b is larger than len(h), b will be cropped from the left. -func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) } +func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) } \ No newline at end of file diff --git a/common/constants.go b/common/constants.go index cd52362280..76fa514358 100644 --- a/common/constants.go +++ b/common/constants.go @@ -1,5 +1,7 @@ package common +import "time" + // for common const ( EmptyString = "" @@ -96,7 +98,7 @@ const ( EthAddrStr = "0x0000000000000000000000000000000000000000" ) -// Bridge & PDE statuses for RPCs +// Bridge, PDE & Portal statuses for RPCs const ( BridgeRequestNotFoundStatus = 0 BridgeRequestProcessingStatus = 1 @@ -118,6 +120,52 @@ const ( MinTxFeesOnTokenRequirement = 10000000000000 // 10000 prv, this requirement is applied from beacon height 87301 mainnet BeaconBlockHeighMilestoneForMinTxFeesOnTokenRequirement = 87301 // milestone of beacon height, when apply min fee on token requirement + + //portal + PortalCustodianDepositAcceptedStatus = 1 + PortalCustodianDepositRefundStatus = 2 + + PortalReqPTokenAcceptedStatus = 1 + PortalReqPTokenRejectedStatus = 2 + + PortalPortingTxRequestAcceptedStatus = 1 + PortalPortingTxRequestRejectedStatus = 2 + + PortalPortingReqSuccessStatus = 1 + PortalPortingReqWaitingStatus = 2 + PortalPortingReqExpiredStatus = 3 + PortalPortingReqLiquidatedStatus = 4 + + PortalRedeemReqSuccessStatus = 1 + PortalRedeemReqWaitingStatus = 2 + PortalRedeemReqLiquidatedStatus = 3 + + PortalCustodianWithdrawReqAcceptedStatus = 1 + PortalCustodianWithdrawReqRejectStatus = 2 + + PortalReqUnlockCollateralAcceptedStatus = 1 + PortalReqUnlockCollateralRejectedStatus = 2 + + PortalLiquidateCustodianSuccessStatus = 1 + PortalLiquidateCustodianFailedStatus = 2 + + PortalLiquidationTPExchangeRatesSuccessStatus = 1 + PortalLiquidationTPExchangeRatesFailedStatus = 2 + + PortalReqWithdrawRewardAcceptedStatus = 1 + PortalReqWithdrawRewardRejectedStatus = 2 + + PortalRedeemLiquidateExchangeRatesSuccessStatus = 1 + PortalRedeemLiquidateExchangeRatesRejectedStatus = 2 + + PortalLiquidationCustodianDepositSuccessStatus = 1 + PortalLiquidationCustodianDepositRejectedStatus = 2 + + PortalExpiredPortingReqSuccessStatus = 1 + PortalExpiredPortingReqFailedStatus = 2 + + PortalExchangeRatesAcceptedStatus = 1 + PortalExchangeRatesRejectedStatus = 2 ) // PDE statuses for chain @@ -134,6 +182,80 @@ const ( PDEWithdrawalRejectedChainStatus = "rejected" ) +// Portal status for chain +const ( + PortalCustodianDepositAcceptedChainStatus = "accepted" + PortalCustodianDepositRefundChainStatus = "refund" + + PortalReqPTokensAcceptedChainStatus = "accepted" + PortalReqPTokensRejectedChainStatus = "rejected" + + PortalPortingRequestAcceptedChainStatus = "accepted" + PortalPortingRequestRejectedChainStatus = "rejected" + + PortalExchangeRatesAcceptedChainStatus = "accepted" + PortalExchangeRatesRejectedChainStatus = "rejected" + + PortalRedeemRequestAcceptedChainStatus = "accepted" + PortalRedeemRequestRejectedChainStatus = "rejected" + + PortalCustodianWithdrawRequestAcceptedStatus = "accepted" + PortalCustodianWithdrawRequestRejectedStatus = "rejected" + + PortalReqUnlockCollateralAcceptedChainStatus = "accepted" + PortalReqUnlockCollateralRejectedChainStatus = "rejected" + + PortalLiquidateCustodianSuccessChainStatus = "success" + PortalLiquidateCustodianFailedChainStatus = "failed" + + PortalLiquidateTPExchangeRatesSuccessChainStatus = "success" + PortalLiquidateTPExchangeRatesFailedChainStatus = "rejected" + + PortalReqWithdrawRewardAcceptedChainStatus = "accepted" + PortalReqWithdrawRewardRejectedChainStatus = "rejected" + + PortalRedeemLiquidateExchangeRatesSuccessChainStatus = "success" + PortalRedeemLiquidateExchangeRatesRejectedChainStatus = "rejected" + + PortalLiquidationCustodianDepositSuccessChainStatus = "success" + PortalLiquidationCustodianDepositRejectedChainStatus = "rejected" + + PortalExpiredWaitingPortingReqSuccessChainStatus = "success" + PortalExpiredWaitingPortingReqFailedChainStatus = "failed" +) + +// Relaying header +const ( + RelayingHeaderConfirmedAcceptedChainStatus = "confirmedAccepted" + RelayingHeaderUnconfirmedAcceptedChainStatus = "unconfirmedAccepted" + RelayingHeaderRejectedChainStatus = "rejected" + RelayingHeaderConsideringChainStatus = "considering" +) + +const ( + PortalTimeOutCustodianSendPubTokenBack = 24 * time.Hour // 24 hours + PortalTimeOutWaitingPortingRequest = 24 * time.Hour // 24 hours + PercentReceivedCollateralAmount = 120 // users will be received 120% of redeem amount in PRV (if there is custodian liquidation for redeem request) + MinPercentUnlockedCollateralAmount = 120 // minimum percent collateral amount will be unlocked after custodian return pubTokens for users + // todo: need to be updated before deploying + PercentCustodianRewards = 10 // 10% of DAO funds per epoch + + TP120 = 120 // 120% - minimum ratio between collateral's value and holding public tokens' value + TP130 = 130 + + PercentPortingFeeAmount = 0.01 // % + PercentRedeemFeeAmount = 0.01 // % +) + +const PortalBTCIDStr = "b832e5d3b1f01a4f0623f7fe91d6673461e1f5d37d91fe78c5c2e6183ff39696" +const PortalBNBIDStr = "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b" +const PRVIDStr = "0000000000000000000000000000000000000000000000000000000000000004" + +var PortalSupportedIncTokenIDs = []string{ + "b832e5d3b1f01a4f0623f7fe91d6673461e1f5d37d91fe78c5c2e6183ff39696", // pBTC + "b2655152784e8639fa19521a7035f331eea1f1e911b2f3200a507ebb4554387b", // pBNB +} + const ( HexEmptyRoot = "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" ) diff --git a/common/log.go b/common/log.go index b4593e4e05..0e8778a152 100644 --- a/common/log.go +++ b/common/log.go @@ -330,6 +330,7 @@ func (b *Backend) print(lvl, tag string, args ...interface{}) { HandleCaptureMessage(string(mes[1]), lvl) } } + recycleBuffer(bytebuf) } @@ -361,6 +362,7 @@ func (b *Backend) printf(lvl, tag string, format string, args ...interface{}) { HandleCaptureMessage(string(mes[1]), lvl) } } + recycleBuffer(bytebuf) } diff --git a/dataaccessobject/rawdbv2/accessor_relaying.go b/dataaccessobject/rawdbv2/accessor_relaying.go new file mode 100644 index 0000000000..f769c3c491 --- /dev/null +++ b/dataaccessobject/rawdbv2/accessor_relaying.go @@ -0,0 +1,65 @@ +package rawdbv2 + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/incdb" + "github.com/tendermint/tendermint/types" +) + +// key prefix +var ( + RelayingBNBHeaderStatePrefix = []byte("relayingbnbheaderstate-") + RelayingBNBHeaderChainPrefix = []byte("relayingbnbheaderchain-") +) + +func NewBNBHeaderRelayingStateKey(beaconHeight uint64) string { + beaconHeightBytes := []byte(fmt.Sprintf("%d", beaconHeight)) + key := append(RelayingBNBHeaderStatePrefix, beaconHeightBytes...) + return string(key) //prefix + beaconHeight +} + +func NewRelayingBNBHeaderChainKey(blockHeight uint64) string { + blockHeightBytes := []byte(fmt.Sprintf("%d", blockHeight)) + key := append(RelayingBNBHeaderChainPrefix, blockHeightBytes...) + return string(key) //prefix + blockHeight +} + +func StoreRelayingBNBHeaderChain(db incdb.Database, blockHeight uint64, header []byte) error { + key := NewRelayingBNBHeaderChainKey(blockHeight) + + err := db.Put([]byte(key), header) + if err != nil { + return NewRawdbError(StoreRelayingBNBHeaderError, err) + } + + return nil +} + +func GetRelayingBNBHeaderChain(db incdb.Database, blockHeight uint64) ([]byte, error) { + key := NewRelayingBNBHeaderChainKey(blockHeight) + + data, err := db.Get([]byte(key)) + if err != nil { + return nil, NewRawdbError(GetRelayingBNBHeaderError, err) + } + + return data, nil +} + +func GetBNBDataHashByBlockHeight(db incdb.Database, blockHeight uint64) ([]byte, error) { + key := NewRelayingBNBHeaderChainKey(blockHeight) + + data, err := db.Get([]byte(key)) + if err != nil { + return nil, NewRawdbError(GetRelayingBNBHeaderError, err) + } + + var bnbBlock types.Block + err = json.Unmarshal(data, &bnbBlock) + if err != nil { + return nil, NewRawdbError(GetRelayingBNBHeaderError, err) + } + + return bnbBlock.DataHash, nil +} \ No newline at end of file diff --git a/dataaccessobject/rawdbv2/error.go b/dataaccessobject/rawdbv2/error.go index 9faaf66b32..987144c545 100755 --- a/dataaccessobject/rawdbv2/error.go +++ b/dataaccessobject/rawdbv2/error.go @@ -66,6 +66,11 @@ const ( DeleteTransactionByHashError StoreTxByPublicKeyError GetTxByPublicKeyError + + // relaying - portal + StoreRelayingBNBHeaderError + GetRelayingBNBHeaderError + GetBNBDataHashError ) var ErrCodeMessage = map[int]struct { @@ -130,6 +135,11 @@ var ErrCodeMessage = map[int]struct { DeleteShardFeatureRootHashError: {-4027, "Delete Shard Feature Root Hash Error"}, DeleteShardSlashRootHashError: {-4028, "Delete Shard Slash Root Hash Error"}, RestoreCrossShardNextHeightsError: {-4029, "Restore Cross Shard Next Heights Error"}, + + // relaying + StoreRelayingBNBHeaderError: {-5001, "Store relaying header bnb error"}, + GetRelayingBNBHeaderError: {-5002, "Get relaying header bnb error"}, + GetBNBDataHashError: {-5003, "Get bnb data hash by block height error"}, } type RawdbError struct { diff --git a/dataaccessobject/statedb/accessor_portal.go b/dataaccessobject/statedb/accessor_portal.go new file mode 100644 index 0000000000..6b47576aa4 --- /dev/null +++ b/dataaccessobject/statedb/accessor_portal.go @@ -0,0 +1,615 @@ +package statedb + +import ( + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject" +) + +//====================== Redeem ====================== +func GetWaitingRedeemRequests(stateDB *StateDB, beaconHeight uint64) (map[string]*WaitingRedeemRequest, error) { + waitingRedeemRequests := stateDB.getAllWaitingRedeemRequest(beaconHeight) + return waitingRedeemRequests, nil +} + +// StoreWaitingRedeemRequests stores waiting redeem requests at beaconHeight +func StoreWaitingRedeemRequests( + stateDB *StateDB, + beaconHeight uint64, + waitingRedeemReqs map[string]*WaitingRedeemRequest) error { + for _, waitingReq := range waitingRedeemReqs { + key := GenerateWaitingRedeemRequestObjectKey(beaconHeight, waitingReq.uniqueRedeemID) + value := NewWaitingRedeemRequestWithValue( + waitingReq.uniqueRedeemID, + waitingReq.tokenID, + waitingReq.redeemerAddress, + waitingReq.redeemerRemoteAddress, + waitingReq.redeemAmount, + waitingReq.custodians, + waitingReq.redeemFee, + waitingReq.beaconHeight, + waitingReq.txReqID, + ) + err := stateDB.SetStateObject(WaitingRedeemRequestObjectType, key, value) + if err != nil { + return NewStatedbError(StoreWaitingRedeemRequestError, err) + } + } + + return nil +} + +func DeleteWaitingRedeemRequest(stateDB *StateDB, deletedWaitingRedeemRequests map[string]*WaitingRedeemRequest) { + for key, _ := range deletedWaitingRedeemRequests { + keyHash := common.Hash{} + copy(keyHash[:], key) + stateDB.MarkDeleteStateObject(WaitingRedeemRequestObjectType, keyHash) + } +} + +func StorePortalRedeemRequestStatus(stateDB *StateDB, redeemID string, statusContent []byte) error { + statusType := PortalRedeemRequestStatusPrefix() + statusSuffix := []byte(redeemID) + err := StorePortalStatus(stateDB, statusType, statusSuffix, statusContent) + if err != nil { + return NewStatedbError(StorePortalRedeemRequestStatusError, err) + } + + return nil +} + +func GetPortalRedeemRequestStatus(stateDB *StateDB, redeemID string) ([]byte, error) { + statusType := PortalRedeemRequestStatusPrefix() + statusSuffix := []byte(redeemID) + data, err := GetPortalStatus(stateDB, statusType, statusSuffix) + if err != nil && err.(*StatedbError).GetErrorCode() != ErrCodeMessage[GetPortalStatusNotFoundError].Code { + return []byte{}, NewStatedbError(GetPortalRedeemRequestStatusError, err) + } + + return data, nil +} + +func StorePortalRedeemRequestByTxIDStatus(stateDB *StateDB, txID string, statusContent []byte) error { + statusType := PortalRedeemRequestStatusByTxReqIDPrefix() + statusSuffix := []byte(txID) + err := StorePortalStatus(stateDB, statusType, statusSuffix, statusContent) + if err != nil { + return NewStatedbError(StorePortalRedeemRequestByTxIDStatusError, err) + } + + return nil +} + +func GetPortalRedeemRequestByTxIDStatus(stateDB *StateDB, txID string) ([]byte, error) { + statusType := PortalRedeemRequestStatusByTxReqIDPrefix() + statusSuffix := []byte(txID) + data, err := GetPortalStatus(stateDB, statusType, statusSuffix) + if err != nil { + return []byte{}, NewStatedbError(GetPortalRedeemRequestByTxIDStatusError, err) + } + + return data, nil +} + +//====================== Custodian pool ====================== +// getCustodianPoolState gets custodian pool state at beaconHeight +func GetCustodianPoolState( + stateDB *StateDB, + beaconHeight uint64, +) (map[string]*CustodianState, error) { + waitingRedeemRequests := stateDB.getAllCustodianStatePool(beaconHeight) + return waitingRedeemRequests, nil +} + +// StoreWaitingRedeemRequests stores waiting redeem requests at beaconHeight +func StoreCustodianState( + stateDB *StateDB, + beaconHeight uint64, + custodians map[string]*CustodianState) error { + for _, cus := range custodians { + key := GenerateCustodianStateObjectKey(beaconHeight, cus.incognitoAddress) + value := NewCustodianStateWithValue( + cus.incognitoAddress, + cus.totalCollateral, + cus.freeCollateral, + cus.holdingPubTokens, + cus.lockedAmountCollateral, + cus.remoteAddresses, + cus.rewardAmount, + ) + err := stateDB.SetStateObject(CustodianStateObjectType, key, value) + if err != nil { + return NewStatedbError(StoreCustodianStateError, err) + } + } + + return nil +} + +func DeleteCustodianState(stateDB *StateDB, deletedCustodianStates map[string]*CustodianState) { + for key, _ := range deletedCustodianStates { + keyHash := common.Hash{} + copy(keyHash[:], key) + stateDB.MarkDeleteStateObject(CustodianStateObjectType, keyHash) + } +} + +func StoreCustodianDepositStatus(stateDB *StateDB, txID string, statusContent []byte) error { + statusType := PortalCustodianDepositStatusPrefix() + statusSuffix := []byte(txID) + err := StorePortalStatus(stateDB, statusType, statusSuffix, statusContent) + if err != nil { + return NewStatedbError(StorePortalCustodianDepositStatusError, err) + } + + return nil +} + +func GetCustodianDepositStatus(stateDB *StateDB, txID string) ([]byte, error) { + statusType := PortalCustodianDepositStatusPrefix() + statusSuffix := []byte(txID) + data, err := GetPortalStatus(stateDB, statusType, statusSuffix) + if err != nil { + return []byte{}, NewStatedbError(GetPortalCustodianDepositStatusError, err) + } + + return data, nil +} + +func GetOneCustodian(stateDB *StateDB, beaconHeight uint64, custodianAddress string) (*CustodianState, error) { + key := GenerateCustodianStateObjectKey(beaconHeight, custodianAddress) + custodianState, has, err := stateDB.getCustodianByKey(key) + if err != nil { + return nil, NewStatedbError(GetPortalStatusError, err) + } + if !has { + return nil, NewStatedbError(GetPortalStatusError, fmt.Errorf("key with beacon height %+v, custodian address %+v not found", beaconHeight, custodianAddress)) + } + + return custodianState, nil +} + +//====================== Exchange rate ====================== +func GetFinalExchangeRatesState( + stateDB *StateDB, + beaconHeight uint64, +) (map[string]*FinalExchangeRatesState, error) { + finalExchangeRates := stateDB.getFinalExchangeRatesState(beaconHeight) + return finalExchangeRates, nil +} + +func GetFinalExchangeRatesByKey(stateDB *StateDB, beaconHeight uint64) (*FinalExchangeRatesState, error) { + key := GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + finalExchangeRates, has, err := stateDB.getFinalExchangeRatesByKey(key) + if err != nil { + return nil, NewStatedbError(GetPortalFinalExchangeRatesStateError, err) + } + if !has { + return nil, NewStatedbError(GetPortalFinalExchangeRatesStateError, fmt.Errorf("key with beacon height %+v not found", beaconHeight)) + } + + return finalExchangeRates, nil +} + +func StoreBulkFinalExchangeRatesState( + stateDB *StateDB, + beaconHeight uint64, + finalExchangeRatesState map[string]*FinalExchangeRatesState) error { + for _, exchangeRates := range finalExchangeRatesState { + key := GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight) + err := stateDB.SetStateObject(PortalFinalExchangeRatesStateObjectType, key, exchangeRates) + if err != nil { + return NewStatedbError(StoreFinalExchangeRatesStateError, err) + } + } + return nil +} + +//====================== Liquidation ====================== +func StorePortalLiquidationCustodianRunAwayStatus(stateDB *StateDB, redeemID string, custodianIncognitoAddress string, statusContent []byte) error { + statusType := PortalLiquidateCustodianRunAwayPrefix() + statusSuffix := append([]byte(redeemID), []byte(custodianIncognitoAddress)...) + err := StorePortalStatus(stateDB, statusType, statusSuffix, statusContent) + if err != nil { + return NewStatedbError(StorePortalLiquidationCustodianRunAwayStatusError, err) + } + + return nil +} + +func GetPortalLiquidationCustodianRunAwayStatus(stateDB *StateDB, redeemID string, custodianIncognitoAddress string, ) ([]byte, error) { + statusType := PortalLiquidateCustodianRunAwayPrefix() + statusSuffix := append([]byte(redeemID), []byte(custodianIncognitoAddress)...) + data, err := GetPortalStatus(stateDB, statusType, statusSuffix) + if err != nil { + return []byte{}, NewStatedbError(GetPortalLiquidationCustodianRunAwayStatusError, err) + } + + return data, nil +} + +func StorePortalExpiredPortingRequestStatus(stateDB *StateDB, waitingPortingID string, statusContent []byte) error { + statusType := PortalExpiredPortingReqPrefix() + statusSuffix := []byte(waitingPortingID) + err := StorePortalStatus(stateDB, statusType, statusSuffix, statusContent) + if err != nil { + return NewStatedbError(StorePortalLiquidationCustodianRunAwayStatusError, err) + } + + return nil +} + +func GetPortalExpiredPortingRequestStatus(stateDB *StateDB, waitingPortingID string) ([]byte, error) { + statusType := PortalExpiredPortingReqPrefix() + statusSuffix := []byte(waitingPortingID) + data, err := GetPortalStatus(stateDB, statusType, statusSuffix) + if err != nil { + return []byte{}, NewStatedbError(GetPortalLiquidationCustodianRunAwayStatusError, err) + } + + return data, nil +} + +func GetLiquidateExchangeRatesPool( + stateDB *StateDB, + beaconHeight uint64, +) (map[string]*LiquidateExchangeRatesPool, error) { + liquidateExchangeRates := stateDB.getLiquidateExchangeRatesPool(beaconHeight) + return liquidateExchangeRates, nil +} + +func StoreBulkLiquidateExchangeRatesPool( + stateDB *StateDB, + beaconHeight uint64, + liquidateExchangeRates map[string]*LiquidateExchangeRatesPool, +) error { + for _, value := range liquidateExchangeRates { + key := GeneratePortalLiquidateExchangeRatesPoolObjectKey(beaconHeight) + err := stateDB.SetStateObject(PortalLiquidationExchangeRatesPoolObjectType, key, value) + if err != nil { + return NewStatedbError(StoreLiquidateExchangeRatesPoolError, err) + } + } + return nil +} + +func GetLiquidateExchangeRatesPoolByKey(stateDB *StateDB, beaconHeight uint64) (*LiquidateExchangeRatesPool, error) { + key := GeneratePortalLiquidateExchangeRatesPoolObjectKey(beaconHeight) + liquidateExchangeRates, has, err := stateDB.getLiquidateExchangeRatesPoolByKey(key) + if err != nil { + return nil, NewStatedbError(GetPortalLiquidationExchangeRatesPoolError, err) + } + if !has { + return nil, NewStatedbError(GetPortalLiquidationExchangeRatesPoolError, fmt.Errorf("key with beacon height %+v not found", beaconHeight)) + } + + return liquidateExchangeRates, nil +} + +//====================== Porting ====================== +func TrackPortalStateStatusMultiple(stateDB *StateDB, statusType []byte, statusSuffix []byte, statusContent []byte, beaconHeight uint64) error { + key := GeneratePortalStatusObjectKey(statusType, statusSuffix) + value := NewPortalStatusStateWithValue(statusType, statusSuffix, statusContent) + + dataaccessobject.Logger.Log.Infof("TrackPortalStateStatusMultiple [beaconHeight: %v] statusType: %+v, statusSuffix: %+v, value: %+v",beaconHeight, string(statusType), string(statusSuffix), value.ToString()) + + err := stateDB.SetStateObject(PortalStatusObjectType, key, value) + + var errType int + switch string(statusType) { + case string(PortalLiquidationTpExchangeRatesStatusPrefix()): + errType = StoreLiquidateTopPercentileExchangeRatesError + case string(PortalLiquidationRedeemRequestStatusPrefix()): + errType = StoreRedeemLiquidationExchangeRatesError + case string(PortalLiquidationCustodianDepositStatusPrefix()): + errType = StoreLiquidationCustodianDepositError + case string(PortalPortingRequestStatusPrefix()): + errType = StorePortalStatusError + case string(PortalPortingRequestTxStatusPrefix()): + errType = StorePortalTxStatusError + case string(PortalExchangeRatesRequestStatusPrefix()): + errType = StorePortalExchangeRatesStatusError + case string(PortalCustodianWithdrawStatusPrefix()): + errType = StorePortalCustodianWithdrawRequestStatusError + default: + errType = StorePortalStatusError + } + + if err != nil { + return NewStatedbError(errType, err) + } + + return nil +} + +func GetPortalStateStatusMultiple(stateDB *StateDB, statusType []byte, statusSuffix []byte) ([]byte, error) { + key := GeneratePortalStatusObjectKey(statusType, statusSuffix) + s, has, err := stateDB.getPortalStatusByKey(key) + + var errType int + switch string(statusType) { + case string(PortalPortingRequestStatusPrefix()): + errType = GetPortingRequestStatusError + case string(PortalPortingRequestTxStatusPrefix()): + errType = GetPortingRequestTxStatusError + case string(PortalLiquidationTpExchangeRatesStatusPrefix()): + errType = GetLiquidationTopPercentileExchangeRatesStatusError + case string(PortalCustodianWithdrawStatusPrefix()): + errType = GetPortalCustodianWithdrawStatusError + default: + errType = StorePortalStatusError + } + + if err != nil { + return []byte{}, NewStatedbError(errType, err) + } + + if !has { + return []byte{}, NewStatedbError(errType, fmt.Errorf("status %+v with prefix %+v not found", string(statusType), string(statusSuffix))) + } + + return s.statusContent, nil +} + +func IsPortingRequestIdExist(stateDB *StateDB, statusSuffix []byte) (bool, error) { + key := GeneratePortalStatusObjectKey(PortalPortingRequestStatusPrefix(), statusSuffix) + _, has, err := stateDB.getPortalStatusByKey(key) + + if err != nil { + return false, NewStatedbError(GetPortingRequestStatusError, err) + } + + if !has { + return false, nil + } + + return true, nil +} + +//====================== Waiting Porting ====================== +// getCustodianPoolState gets custodian pool state at beaconHeight +func GetWaitingPortingRequests( + stateDB *StateDB, + beaconHeight uint64, +) (map[string]*WaitingPortingRequest, error) { + waitingPortingRequestList := stateDB.getWaitingPortingRequests(beaconHeight) + return waitingPortingRequestList, nil +} + +// StoreWaitingRedeemRequests stores waiting redeem requests at beaconHeight +func StoreBulkWaitingPortingRequests( + stateDB *StateDB, + beaconHeight uint64, + waitingPortingRequest map[string]*WaitingPortingRequest) error { + for _, items := range waitingPortingRequest { + key := GeneratePortalWaitingPortingRequestObjectKey(beaconHeight, items.UniquePortingID()) + err := stateDB.SetStateObject(PortalWaitingPortingRequestObjectType, key, items) + if err != nil { + return NewStatedbError(StoreWaitingPortingRequestError, err) + } + } + return nil +} + +func StoreWaitingPortingRequests(stateDB *StateDB, beaconHeight uint64, portingRequestId string, statusContent *WaitingPortingRequest) error { + key := GeneratePortalWaitingPortingRequestObjectKey(beaconHeight, portingRequestId) + err := stateDB.SetStateObject(PortalWaitingPortingRequestObjectType, key, statusContent) + if err != nil { + return NewStatedbError(StoreWaitingPortingRequestError, err) + } + + return nil +} + +func DeleteWaitingPortingRequest(stateDB *StateDB, deletedWaitingPortingRequests map[string]*WaitingPortingRequest) { + for key, _ := range deletedWaitingPortingRequests { + keyHash := common.Hash{} + copy(keyHash[:], key) + stateDB.MarkDeleteStateObject(PortalWaitingPortingRequestObjectType, keyHash) + } +} + +//====================== Portal status ====================== +func StorePortalStatus(stateDB *StateDB, statusType []byte, statusSuffix []byte, statusContent []byte) error { + key := GeneratePortalStatusObjectKey(statusType, statusSuffix) + value := NewPortalStatusStateWithValue(statusType, statusSuffix, statusContent) + err := stateDB.SetStateObject(PortalStatusObjectType, key, value) + if err != nil { + return NewStatedbError(StorePortalStatusError, err) + } + return nil +} + +func GetPortalStatus(stateDB *StateDB, statusType []byte, statusSuffix []byte) ([]byte, error) { + key := GeneratePortalStatusObjectKey(statusType, statusSuffix) + s, has, err := stateDB.getPortalStatusByKey(key) + if err != nil { + return []byte{}, NewStatedbError(GetPortalStatusError, err) + } + if !has { + return []byte{}, NewStatedbError(GetPortalStatusNotFoundError, fmt.Errorf("status %+v with prefix %+v not found", string(statusType), string(statusSuffix))) + } + return s.statusContent, nil +} + +func StoreRequestPTokenStatus(stateDB *StateDB, txID string, statusContent []byte) error { + statusType := PortalRequestPTokenStatusPrefix() + statusSuffix := []byte(txID) + err := StorePortalStatus(stateDB, statusType, statusSuffix, statusContent) + if err != nil { + return NewStatedbError(StorePortalCustodianDepositStatusError, err) + } + + return nil +} + +func GetRequestPTokenStatus(stateDB *StateDB, txID string) ([]byte, error) { + statusType := PortalRequestPTokenStatusPrefix() + statusSuffix := []byte(txID) + data, err := GetPortalStatus(stateDB, statusType, statusSuffix) + if err != nil { + return []byte{}, NewStatedbError(GetPortalCustodianDepositStatusError, err) + } + + return data, nil +} + +func StorePortalRequestUnlockCollateralStatus(stateDB *StateDB, txID string, statusContent []byte) error { + statusType := PortalRequestUnlockCollateralStatusPrefix() + statusSuffix := []byte(txID) + err := StorePortalStatus(stateDB, statusType, statusSuffix, statusContent) + if err != nil { + return NewStatedbError(StorePortalRequestUnlockCollateralStatusError, err) + } + + return nil +} + +func GetPortalRequestUnlockCollateralStatus(stateDB *StateDB, txID string) ([]byte, error) { + statusType := PortalRequestUnlockCollateralStatusPrefix() + statusSuffix := []byte(txID) + data, err := GetPortalStatus(stateDB, statusType, statusSuffix) + if err != nil { + return []byte{}, NewStatedbError(GetPortalRequestUnlockCollateralStatusError, err) + } + + return data, nil +} + +//====================== Portal reward ====================== +// GetPortalRewardsByBeaconHeight gets portal reward state at beaconHeight +func GetPortalRewardsByBeaconHeight( + stateDB *StateDB, + beaconHeight uint64, +) ([]*PortalRewardInfo, error) { + portalRewards := stateDB.getPortalRewards(beaconHeight) + return portalRewards, nil +} + +// StoreWaitingRedeemRequests stores waiting redeem requests at beaconHeight +func StorePortalRewards( + stateDB *StateDB, + beaconHeight uint64, + portalRewardInfos []*PortalRewardInfo) error { + for _, info := range portalRewardInfos { + key := GeneratePortalRewardInfoObjectKey(beaconHeight, info.custodianIncAddr) + value := NewPortalRewardInfoWithValue( + info.custodianIncAddr, + info.rewards, + ) + err := stateDB.SetStateObject(PortalRewardInfoObjectType, key, value) + if err != nil { + return NewStatedbError(StorePortalRewardError, err) + } + } + + return nil +} + +func StorePortalRequestWithdrawRewardStatus(stateDB *StateDB, txID string, statusContent []byte) error { + statusType := PortalRequestWithdrawRewardStatusPrefix() + statusSuffix := []byte(txID) + err := StorePortalStatus(stateDB, statusType, statusSuffix, statusContent) + if err != nil { + return NewStatedbError(StorePortalRequestWithdrawRewardStatusError, err) + } + + return nil +} + +func GetPortalRequestWithdrawRewardStatus(stateDB *StateDB, txID string) ([]byte, error) { + statusType := PortalRequestWithdrawRewardStatusPrefix() + statusSuffix := []byte(txID) + data, err := GetPortalStatus(stateDB, statusType, statusSuffix) + if err != nil { + return []byte{}, NewStatedbError(GetPortalRequestWithdrawRewardStatusError, err) + } + + return data, nil +} + +func GetLockedCollateralStateByBeaconHeight( + stateDB *StateDB, + beaconHeight uint64, +) (*LockedCollateralState, error) { + lockedCollateralState, _, err := stateDB.getLockedCollateralState(beaconHeight) + if err != nil { + return nil, NewStatedbError(GetLockedCollateralStateError, err) + } + return lockedCollateralState, nil +} + +// StoreWaitingRedeemRequests stores waiting redeem requests at beaconHeight +func StoreLockedCollateralState( + stateDB *StateDB, + beaconHeight uint64, + lockedCollateralState *LockedCollateralState) error { + key := GenerateLockedCollateralStateObjectKey(beaconHeight) + err := stateDB.SetStateObject(LockedCollateralStateObjectType, key, lockedCollateralState) + if err != nil { + return NewStatedbError(StorePortalRewardError, err) + } + + return nil +} + +//====================== Feature reward ====================== +func StoreRewardFeatureState( + stateDB *StateDB, + featureName string, + rewardInfo []*RewardInfoDetail, + epoch uint64) error { + key := GenerateRewardFeatureStateObjectKey(featureName, epoch) + value := NewRewardFeatureStateWithValue(rewardInfo) + + err := stateDB.SetStateObject(RewardFeatureStateObjectType, key, value) + if err != nil { + return NewStatedbError(StoreRewardFeatureError, err) + } + + return nil +} + +func GetRewardFeatureAmountByTokenID( + stateDB *StateDB, + tokenID string, + epoch uint64) (uint64, error) { + + totalAmount := uint64(0) + // reset for portal reward + allRewardFeature, err := GetAllRewardFeatureState(stateDB, epoch) + if err != nil { + return uint64(0), NewStatedbError(GetRewardFeatureAmountByTokenIDError, err) + } + totalRewards := allRewardFeature.GetTotalRewards() + for i := 0; i < len(totalRewards); i++ { + if totalRewards[i].GetTokenID() == tokenID { + totalAmount = totalRewards[i].GetAmount() + break + } + } + + return totalAmount, nil +} + +func GetRewardFeatureStateByFeatureName( + stateDB *StateDB, + featureName string, + epoch uint64) (*RewardFeatureState, error) { + result, _, err := stateDB.getFeatureRewardByFeatureName(featureName, epoch) + if err != nil { + return nil, NewStatedbError(GetRewardFeatureError, err) + } + + return result, nil +} + +func GetAllRewardFeatureState( + stateDB *StateDB, epoch uint64) (*RewardFeatureState, error) { + result, _, err := stateDB.getAllFeatureRewards(epoch) + if err != nil { + return nil, NewStatedbError(GetAllRewardFeatureError, err) + } + + return result, nil +} diff --git a/dataaccessobject/statedb/constant.go b/dataaccessobject/statedb/constant.go index c9d0544a22..d546416cc2 100644 --- a/dataaccessobject/statedb/constant.go +++ b/dataaccessobject/statedb/constant.go @@ -28,6 +28,22 @@ const ( BridgeStatusObjectType BurningConfirmObjectType TokenTransactionObjectType + + // portal + //final exchange rates + PortalFinalExchangeRatesStateObjectType + //waiting porting request + PortalWaitingPortingRequestObjectType + //liquidation + PortalLiquidationExchangeRatesPoolObjectType + + //B + PortalStatusObjectType + CustodianStateObjectType + WaitingRedeemRequestObjectType + PortalRewardInfoObjectType + LockedCollateralStateObjectType + RewardFeatureStateObjectType ) // Prefix length diff --git a/dataaccessobject/statedb/error.go b/dataaccessobject/statedb/error.go index 41b906a4a8..1bd49523eb 100644 --- a/dataaccessobject/statedb/error.go +++ b/dataaccessobject/statedb/error.go @@ -30,6 +30,17 @@ const ( ErrInvalidBridgeStatusStateType = "invalid bridge status state type" ErrInvalidBurningConfirmStateType = "invalid burning confirm state type" ErrInvalidTokenTransactionStateType = "invalid token transaction state type" + //A + ErrInvalidFinalExchangeRatesStateType = "invalid final exchange rates state type" + ErrInvalidLiquidationExchangeRatesType = "invalid liquidation exchange rates type" + ErrInvalidWaitingPortingRequestType = "invalid waiting porting request type" + //B + ErrInvalidPortalStatusStateType = "invalid portal status state type" + ErrInvalidPortalCustodianStateType = "invalid portal custodian state type" + ErrInvalidPortalWaitingRedeemRequestType = "invalid portal waiting redeem request type" + ErrInvalidPortalRewardInfoStateType = "invalid portal reward info state type" + ErrInvalidPortalLockedCollateralStateType = "invalid portal locked collateral state type" + ErrInvalidRewardFeatureStateType = "invalid feature reward state type" ) const ( InvalidByteArrayTypeError = iota @@ -112,6 +123,78 @@ const ( // burning confirm StoreBurningConfirmError GetBurningConfirmError + + //portal + StoreCustodianDepositStateError + StoreCustodianStateError + StoreWaitingRedeemRequestError + StoreRedeemRequestStateError + TrackCustodianDepositError + TrackReqPTokenError + GetItemPortalByKeyError + GetItemPortalByKeyNotFound + GetCustodianDepositStatusError + GetReqPTokenStatusError + StoreRedeemRequestError + TrackRedeemReqByTxReqIDError + TrackReqUnlockCollateralByTxReqIDError + GetReqUnlockCollateralStatusError + TrackLiquidateCustodianError + StorePortalRewardError + StorePortalStatusError + StorePortalTxStatusError + GetPortalStatusError + GetPortalStatusNotFoundError + GetPortalRedeemRequestStatusError + StorePortalRedeemRequestStatusError + StorePortalCustodianDepositStatusError + GetPortalCustodianDepositStatusError + StorePortalRequestPTokenStatusError + GetPortalRequestPTokenStatusError + GetPortalRedeemRequestByTxIDStatusError + StorePortalRedeemRequestByTxIDStatusError + GetPortalRequestUnlockCollateralStatusError + StorePortalRequestUnlockCollateralStatusError + GetPortalLiquidationCustodianRunAwayStatusError + StorePortalLiquidationCustodianRunAwayStatusError + GetPortalExpiredPortingReqStatusError + StorePortalExpiredPortingReqStatusError + GetPortalRequestWithdrawRewardStatusError + StorePortalRequestWithdrawRewardStatusError + StoreLockedCollateralStateError + GetLockedCollateralStateError + + //porting request + GetPortingRequestTxStatusError + GetPortingRequestStatusError + StorePortingRequestStateError + StoreWaitingPortingRequestError + //exchange rates + GetPortalFinalExchangeRatesStateError + StorePortalExchangeRatesStatusError + StoreExchangeRatesRequestStateError + StoreFinalExchangeRatesStateError + + //liquidation exchange rates + GetPortalLiquidationExchangeRatesPoolError + GetLiquidationTopPercentileExchangeRatesStatusError + StoreLiquidateTopPercentileExchangeRatesError + StoreLiquidateExchangeRatesPoolError + //liquidation custodian deposit + StoreLiquidationCustodianDepositError + //liquidation user redeem + StoreRedeemLiquidationExchangeRatesError + + //custodian withdraw + StorePortalCustodianWithdrawRequestStatusError + GetPortalCustodianWithdrawStatusError + + // feature rewards + StoreRewardFeatureError + GetRewardFeatureError + GetAllRewardFeatureError + ResetAllFeatureRewardByTokenIDError + GetRewardFeatureAmountByTokenIDError ) var ErrCodeMessage = map[int]struct { @@ -188,6 +271,70 @@ var ErrCodeMessage = map[int]struct { // -6xxx: burning confirm StoreBurningConfirmError: {-6000, "Store Burning Confirm Error"}, GetBurningConfirmError: {-6001, "Get Burning Confirm Error"}, + + //portal + StoreCustodianDepositStateError: {-14001, "Store custodian deposit error"}, + StoreWaitingPortingRequestError: {-14002, "Store waiting porting requests error"}, + StoreWaitingRedeemRequestError: {-14003, "Store waiting redeem requests error"}, + StorePortingRequestStateError: {-14004, "Store porting request error"}, + StoreRedeemRequestStateError: {-14005, "Store redeem request error"}, + TrackCustodianDepositError: {-14006, "Track custodian deposit error"}, + TrackReqPTokenError: {-14007, "Track requesting ptokens error"}, + StoreExchangeRatesRequestStateError: {-14008, "Store exchange rates request error"}, + StoreFinalExchangeRatesStateError: {-14009, "Store final exchange rates request error"}, + GetItemPortalByKeyError: {-14010, "Get item portal by key error"}, + GetItemPortalByKeyNotFound: {-14011, "Get item portal by key not found"}, + GetCustodianDepositStatusError: {-14012, "Get all custodian deposit status error"}, + GetReqPTokenStatusError: {-14013, "Get request ptoken status error"}, + StoreRedeemRequestError: {-14014, "Store redeem request error"}, + TrackRedeemReqByTxReqIDError: {-14015, "Track redeem request by txReqID error"}, + TrackReqUnlockCollateralByTxReqIDError: {-14016, "Track request unlock collateral by txReqID error"}, + GetReqUnlockCollateralStatusError: {-14017, "Get status of request unlock collateral by txReqID error"}, + StorePortalCustodianWithdrawRequestStatusError: {-14018, "Store portal custodian withdraw request status error"}, + TrackLiquidateCustodianError: {-14019, "Track liquidation custodian error"}, + StorePortalRewardError: {-14020, "Store portal reward error"}, + StoreLiquidateTopPercentileExchangeRatesError: {-14021, "Store liquidate top percentile exchange rates error"}, + StoreLiquidateExchangeRatesPoolError: {-14022, "Store liquidate exchange rates pool error"}, + StoreRedeemLiquidationExchangeRatesError: {-14023, "Store redeem liquidation exchange rates error"}, + StoreLiquidationCustodianDepositError: {-14024, "Store liquidation custodian deposit error"}, + StoreCustodianStateError: {-14025, "Store custodian state error"}, + + //B + StorePortalStatusError: {-14026, "Store portal status error"}, + GetPortalStatusError: {-14027, "Get portal status error"}, + GetPortalRedeemRequestStatusError: {-14028, "Get portal redeem request status error"}, + StorePortalRedeemRequestStatusError: {-14029, "Store portal redeem request status error"}, + GetPortalCustodianDepositStatusError: {-14030, "Get portal custodian deposit status error"}, + StorePortalCustodianDepositStatusError: {-14031, "Store portal custodian deposit status error"}, + StorePortalRequestPTokenStatusError: {-14032, "Store portal request ptoken status error"}, + GetPortalRequestPTokenStatusError: {-14033, "Get portal request ptoken status error"}, + GetPortalRedeemRequestByTxIDStatusError: {-14034, "Get portal redeem request by txid status error"}, + StorePortalRedeemRequestByTxIDStatusError: {-14035, "Store portal redeem request by txid status error"}, + GetPortalRequestUnlockCollateralStatusError: {-14036, "Get portal request unlock collateral status error"}, + StorePortalRequestUnlockCollateralStatusError: {-14037, "Store portal request unlock collateral status error"}, + GetPortalLiquidationCustodianRunAwayStatusError: {-14036, "Get portal liquidation custodian run away status error"}, + StorePortalLiquidationCustodianRunAwayStatusError: {-14036, "Store portal liquidation custodian run away status error"}, + GetPortalExpiredPortingReqStatusError: {-14036, "Get portal expired porting request status error"}, + StorePortalExpiredPortingReqStatusError: {-14036, "Store portal expired porting request status error"}, + GetPortalRequestWithdrawRewardStatusError: {-14036, "Get portal request withdraw reward status error"}, + StorePortalRequestWithdrawRewardStatusError: {-14036, "Store portal request withdraw reward status error"}, + GetPortalFinalExchangeRatesStateError: {-14037, "Get portal final exchange rates state error"}, + StorePortalTxStatusError: {-14038, "Store portal Tx status error"}, + StorePortalExchangeRatesStatusError: {-14039, "Store portal exchange rates status error"}, + GetPortalLiquidationExchangeRatesPoolError: {-14040, "Get portal liquidation exchange rates pool error"}, + GetPortingRequestStatusError: {-14041, "Get portal porting request status error"}, + GetPortingRequestTxStatusError: {-14042, "Get portal porting request tx status error"}, + GetLiquidationTopPercentileExchangeRatesStatusError: {-14043, "Get liquidation tp ex change rates status error"}, + GetPortalStatusNotFoundError: {-14044, "Get portal status not found error"}, + GetPortalCustodianWithdrawStatusError: {-14045, "Get portal custodian withdraw status error"}, + StoreLockedCollateralStateError: {-14046, "Store locked collateral state error"}, + GetLockedCollateralStateError: {-14047, "Get locked collateral state error"}, + + StoreRewardFeatureError: {-15000, "Store reward feature state error"}, + GetRewardFeatureError: {-15001, "Get reward feature state error"}, + GetAllRewardFeatureError: {-15002, "Get all reward feature state error"}, + ResetAllFeatureRewardByTokenIDError: {-15003, "Reset all reward feature state by tokenID error"}, + GetRewardFeatureAmountByTokenIDError: {-15004, "Get reward feature amount by tokenID error"}, } type StatedbError struct { diff --git a/dataaccessobject/statedb/schema.go b/dataaccessobject/statedb/schema.go index 6904e25a40..3ba8fd9584 100644 --- a/dataaccessobject/statedb/schema.go +++ b/dataaccessobject/statedb/schema.go @@ -39,6 +39,44 @@ var ( bridgeDecentralizedTokenInfoPrefix = []byte("bri-de-token-info-") bridgeStatusPrefix = []byte("bri-status-") burnPrefix = []byte("burn-") + + // portal + //A + portalFinaExchangeRatesStatePrefix = []byte("portalfinalexchangeratesstate-") + portalExchangeRatesRequestStatusPrefix = []byte("portalexchangeratesrequeststatus-") + portalPortingRequestStatusPrefix = []byte("portalportingrequeststatus-") + portalPortingRequestTxStatusPrefix = []byte("portalportingrequesttxstatus-") + portalCustodianWithdrawStatusPrefix = []byte("portalcustodianwithdrawstatus-") + portalLiquidationTpExchangeRatesStatusPrefix = []byte("portalliquidationtpexchangeratesstatus-") + portalLiquidationExchangeRatesPoolPrefix = []byte("portalliquidationexchangeratespool-") + portalLiquidationCustodianDepositStatusPrefix = []byte("portalliquidationcustodiandepositstatus-") + portalLiquidationRedeemRequestStatusPrefix = []byte("portalliquidationredeemrequeststatus-") + portalWaitingPortingRequestPrefix = []byte("portalwaitingportingrequest-") + + //B + portalCustodianStatePrefix = []byte("portalcustodian-") + portalWaitingRedeemRequestsPrefix = []byte("portalwaitingredeemrequest-") + + portalStatusPrefix = []byte("portalstatus-") + portalCustodianDepositStatusPrefix = []byte("custodiandeposit-") + portalRequestPTokenStatusPrefix = []byte("requestptoken-") + portalRedeemRequestStatusPrefix = []byte("redeemrequest-") + portalRedeemRequestStatusByTxReqIDPrefix = []byte("redeemrequestbytxid-") + portalRequestUnlockCollateralStatusPrefix = []byte("requestunlockcollateral-") + portalRequestWithdrawRewardStatusPrefix = []byte("requestwithdrawportalreward-") + + // liquidation for portal + portalLiquidateCustodianRunAwayPrefix = []byte("portalliquidaterunaway-") + portalExpiredPortingReqPrefix = []byte("portalexpiredportingreq-") + + // reward for portal + portalRewardInfoStatePrefix = []byte("portalreward-") + portalLockedCollateralStatePrefix = []byte("portallockedcollateral-") + + // reward for features in network (such as portal, pdex, etc) + rewardFeatureStatePrefix = []byte("rewardfeaturestate-") + // feature names + PortalRewardName = "portal" ) func GetCommitteePrefixWithRole(role int, shardID int) []byte { @@ -220,6 +258,114 @@ func GetPDEStatusKey(prefix []byte, suffix []byte) []byte { return append(prefix, suffix...) } +// Portal +//A +func GetFinalExchangeRatesStatePrefix(beaconHeight uint64) []byte { + h := common.HashH(append(portalFinaExchangeRatesStatePrefix, []byte(fmt.Sprintf("%d", beaconHeight))...)) + return h[:][:prefixHashKeyLength] +} + +func PortalPortingRequestStatusPrefix() []byte { + return portalPortingRequestStatusPrefix +} + +func PortalPortingRequestTxStatusPrefix() []byte { + return portalPortingRequestTxStatusPrefix +} + +func PortalExchangeRatesRequestStatusPrefix() []byte { + return portalExchangeRatesRequestStatusPrefix +} + +func PortalCustodianWithdrawStatusPrefix() []byte { + return portalCustodianWithdrawStatusPrefix +} + +func PortalLiquidationTpExchangeRatesStatusPrefix() []byte { + return portalLiquidationTpExchangeRatesStatusPrefix +} + +func PortalLiquidationCustodianDepositStatusPrefix() []byte { + return portalLiquidationCustodianDepositStatusPrefix +} + +func PortalLiquidationRedeemRequestStatusPrefix() []byte { + return portalLiquidationRedeemRequestStatusPrefix +} + +func GetPortalWaitingPortingRequestPrefix(beaconHeight uint64) []byte { + h := common.HashH(append(portalWaitingPortingRequestPrefix, []byte(fmt.Sprintf("%d", beaconHeight))...)) + return h[:][:prefixHashKeyLength] +} + +func GetPortalLiquidationExchangeRatesPoolPrefix(beaconHeight uint64) []byte { + h := common.HashH(append(portalLiquidationExchangeRatesPoolPrefix, []byte(fmt.Sprintf("%d", beaconHeight))...)) + return h[:][:prefixHashKeyLength] +} + +//B +func GetPortalCustodianStatePrefix(beaconHeight uint64) []byte { + h := common.HashH(append(portalCustodianStatePrefix, []byte(fmt.Sprintf("%d", beaconHeight))...)) + return h[:][:prefixHashKeyLength] +} + +func GetWaitingRedeemRequestPrefix(beaconHeight uint64) []byte { + h := common.HashH(append(portalWaitingRedeemRequestsPrefix, []byte(fmt.Sprintf("%d", beaconHeight))...)) + return h[:][:prefixHashKeyLength] +} + +func GetPortalRewardInfoStatePrefix(beaconHeight uint64) []byte { + h := common.HashH(append(portalRewardInfoStatePrefix, []byte(fmt.Sprintf("%d-", beaconHeight))...)) + return h[:][:prefixHashKeyLength] +} + +func GetPortalStatusPrefix() []byte { + h := common.HashH(portalStatusPrefix) + return h[:][:prefixHashKeyLength] +} + +func GetLockedCollateralStatePrefix() []byte { + h := common.HashH(portalLockedCollateralStatePrefix) + return h[:][:prefixHashKeyLength] +} + +func GetRewardFeatureStatePrefix(beaconHeight uint64) []byte { + h := common.HashH(append(rewardFeatureStatePrefix, []byte(fmt.Sprintf("%d-", beaconHeight))...)) + return h[:][:prefixHashKeyLength] +} + +func PortalCustodianDepositStatusPrefix() []byte { + return portalCustodianDepositStatusPrefix +} + +func PortalRequestPTokenStatusPrefix() []byte { + return portalRequestPTokenStatusPrefix +} + +func PortalRedeemRequestStatusPrefix() []byte { + return portalRedeemRequestStatusPrefix +} + +func PortalRedeemRequestStatusByTxReqIDPrefix() []byte { + return portalRedeemRequestStatusByTxReqIDPrefix +} + +func PortalRequestUnlockCollateralStatusPrefix() []byte { + return portalRequestUnlockCollateralStatusPrefix +} + +func PortalRequestWithdrawRewardStatusPrefix() []byte { + return portalRequestWithdrawRewardStatusPrefix +} + +func PortalLiquidateCustodianRunAwayPrefix() []byte { + return portalLiquidateCustodianRunAwayPrefix +} + +func PortalExpiredPortingReqPrefix() []byte { + return portalExpiredPortingReqPrefix +} + var _ = func() (_ struct{}) { m := make(map[string]string) prefixs := [][]byte{} diff --git a/dataaccessobject/statedb/statedb.go b/dataaccessobject/statedb/statedb.go index b8e6221823..617e0f36ba 100644 --- a/dataaccessobject/statedb/statedb.go +++ b/dataaccessobject/statedb/statedb.go @@ -1208,3 +1208,219 @@ func (stateDB *StateDB) getBurningConfirmState(key common.Hash) (*BurningConfirm } return NewBurningConfirmState(), false, nil } + +// ================================= Portal OBJECT ======================================= +func (stateDB *StateDB) getWaitingPortingRequests(beaconHeight uint64) map[string]*WaitingPortingRequest { + waitingPortingRequest := make(map[string]*WaitingPortingRequest) + temp := stateDB.trie.NodeIterator(GetPortalWaitingPortingRequestPrefix(beaconHeight)) + it := trie.NewIterator(temp) + for it.Next() { + key := it.Key + keyHash, _ := common.Hash{}.NewHash(key) + value := it.Value + newValue := make([]byte, len(value)) + copy(newValue, value) + object := NewWaitingPortingRequest() + err := json.Unmarshal(newValue, object) + if err != nil { + panic("wrong expect type") + } + waitingPortingRequest[keyHash.String()] = object + } + + return waitingPortingRequest +} + +func (stateDB *StateDB) getCustodianByKey(key common.Hash) (*CustodianState, bool, error) { + custodianState, err := stateDB.getStateObject(CustodianStateObjectType, key) + if err != nil { + return nil, false, err + } + if custodianState != nil { + return custodianState.GetValue().(*CustodianState), true, nil + } + return NewCustodianState(), false, nil +} + +func (stateDB *StateDB) getFinalExchangeRatesByKey(key common.Hash) (*FinalExchangeRatesState, bool, error) { + finalExchangeRates, err := stateDB.getStateObject(PortalFinalExchangeRatesStateObjectType, key) + if err != nil { + return nil, false, err + } + if finalExchangeRates != nil { + return finalExchangeRates.GetValue().(*FinalExchangeRatesState), true, nil + } + return NewFinalExchangeRatesState(), false, nil +} + +func (stateDB *StateDB) getLiquidateExchangeRatesPoolByKey(key common.Hash) (*LiquidateExchangeRatesPool, bool, error) { + liquidateExchangeRates, err := stateDB.getStateObject(PortalLiquidationExchangeRatesPoolObjectType, key) + if err != nil { + return nil, false, err + } + if liquidateExchangeRates != nil { + return liquidateExchangeRates.GetValue().(*LiquidateExchangeRatesPool), true, nil + } + return NewLiquidateExchangeRatesPool(), false, nil +} + +func (stateDB *StateDB) getLiquidateExchangeRatesPool(beaconHeight uint64) map[string]*LiquidateExchangeRatesPool { + liquidateExchangeRatesPoolList := make(map[string]*LiquidateExchangeRatesPool) + temp := stateDB.trie.NodeIterator(GetPortalLiquidationExchangeRatesPoolPrefix(beaconHeight)) + it := trie.NewIterator(temp) + for it.Next() { + key := it.Key + keyHash, _ := common.Hash{}.NewHash(key) + value := it.Value + newValue := make([]byte, len(value)) + copy(newValue, value) + object := NewLiquidateExchangeRatesPool() + err := json.Unmarshal(newValue, object) + if err != nil { + panic("wrong expect type") + } + liquidateExchangeRatesPoolList[keyHash.String()] = object + } + + return liquidateExchangeRatesPoolList +} + +func (stateDB *StateDB) getFinalExchangeRatesState(beaconHeight uint64) map[string]*FinalExchangeRatesState { + finalExchangeRatesState := make(map[string]*FinalExchangeRatesState) + temp := stateDB.trie.NodeIterator(GetFinalExchangeRatesStatePrefix(beaconHeight)) + it := trie.NewIterator(temp) + for it.Next() { + key := it.Key + keyHash, _ := common.Hash{}.NewHash(key) + value := it.Value + newValue := make([]byte, len(value)) + copy(newValue, value) + object := NewFinalExchangeRatesState() + err := json.Unmarshal(newValue, object) + if err != nil { + panic("wrong expect type") + } + finalExchangeRatesState[keyHash.String()] = object + } + + return finalExchangeRatesState +} + +//B +func (stateDB *StateDB) getAllWaitingRedeemRequest(beaconHeight uint64) map[string]*WaitingRedeemRequest { + waitingRedeemRequests := make(map[string]*WaitingRedeemRequest) + temp := stateDB.trie.NodeIterator(GetWaitingRedeemRequestPrefix(beaconHeight)) + it := trie.NewIterator(temp) + for it.Next() { + key := it.Key + keyHash, _ := common.Hash{}.NewHash(key) + value := it.Value + newValue := make([]byte, len(value)) + copy(newValue, value) + wr := NewWaitingRedeemRequest() + err := json.Unmarshal(newValue, wr) + if err != nil { + panic("wrong expect type") + } + waitingRedeemRequests[keyHash.String()] = wr + } + return waitingRedeemRequests +} + +func (stateDB *StateDB) getAllCustodianStatePool(beaconHeight uint64) map[string]*CustodianState { + custodians := make(map[string]*CustodianState) + temp := stateDB.trie.NodeIterator(GetPortalCustodianStatePrefix(beaconHeight)) + it := trie.NewIterator(temp) + for it.Next() { + key := it.Key + keyHash, _ := common.Hash{}.NewHash(key) + value := it.Value + newValue := make([]byte, len(value)) + copy(newValue, value) + cus := NewCustodianState() + err := json.Unmarshal(newValue, cus) + if err != nil { + panic("wrong expect type") + } + custodians[keyHash.String()] = cus + } + return custodians +} + +func (stateDB *StateDB) getPortalRewards(beaconHeight uint64) []*PortalRewardInfo { + portalRewards := make([]*PortalRewardInfo, 0) + temp := stateDB.trie.NodeIterator(GetPortalRewardInfoStatePrefix(beaconHeight)) + it := trie.NewIterator(temp) + for it.Next() { + value := it.Value + newValue := make([]byte, len(value)) + copy(newValue, value) + rewardInfo := NewPortalRewardInfo() + err := json.Unmarshal(newValue, rewardInfo) + if err != nil { + panic("wrong expect type") + } + portalRewards = append(portalRewards, rewardInfo) + } + return portalRewards +} + +func (stateDB *StateDB) getPortalStatusByKey(key common.Hash) (*PortalStatusState, bool, error) { + portalStatusState, err := stateDB.getStateObject(PortalStatusObjectType, key) + if err != nil { + return nil, false, err + } + if portalStatusState != nil { + return portalStatusState.GetValue().(*PortalStatusState), true, nil + } + return NewPortalStatusState(), false, nil +} + +func (stateDB *StateDB) getLockedCollateralState(beaconHeight uint64) (*LockedCollateralState, bool, error) { + key := GenerateLockedCollateralStateObjectKey(beaconHeight) + lockedCollateralState, err := stateDB.getStateObject(LockedCollateralStateObjectType, key) + if err != nil { + return nil, false, err + } + + if lockedCollateralState != nil { + return lockedCollateralState.GetValue().(*LockedCollateralState), true, nil + } + return NewLockedCollateralState(), false, nil +} + +// ================================= Feature reward OBJECT ======================================= +func (stateDB *StateDB) getFeatureRewardByFeatureName(featureName string, epoch uint64) (*RewardFeatureState, bool, error) { + key := GenerateRewardFeatureStateObjectKey(featureName, epoch) + rewardFeatureState, err := stateDB.getStateObject(RewardFeatureStateObjectType, key) + if err != nil { + return nil, false, err + } + + if rewardFeatureState != nil { + return rewardFeatureState.GetValue().(*RewardFeatureState), true, nil + } + return NewRewardFeatureState(), false, nil +} + +func (stateDB *StateDB) getAllFeatureRewards(epoch uint64) (*RewardFeatureState, bool, error) { + result := NewRewardFeatureState() + + temp := stateDB.trie.NodeIterator(GetRewardFeatureStatePrefix(epoch)) + it := trie.NewIterator(temp) + for it.Next() { + value := it.Value + newValue := make([]byte, len(value)) + copy(newValue, value) + rewardFeature := NewRewardFeatureState() + err := json.Unmarshal(newValue, rewardFeature) + if err != nil { + panic("wrong expect type") + } + + for _, r := range rewardFeature.totalRewards { + result.AddTotalRewards(r.tokenID, r.amount) + } + } + return result, true, nil +} diff --git a/dataaccessobject/statedb/stateobject.go b/dataaccessobject/statedb/stateobject.go index 739bd0dbe3..e7dfd187fa 100644 --- a/dataaccessobject/statedb/stateobject.go +++ b/dataaccessobject/statedb/stateobject.go @@ -63,6 +63,24 @@ func newStateObjectWithValue(db *StateDB, objectType int, hash common.Hash, valu return newBurningConfirmObjectWithValue(db, hash, value) case TokenTransactionObjectType: return newTokenTransactionObjectWithValue(db, hash, value) + case PortalFinalExchangeRatesStateObjectType: + return newFinalExchangeRatesStateObjectWithValue(db, hash, value) + case PortalLiquidationExchangeRatesPoolObjectType: + return newLiquidateExchangeRatesPoolObjectWithValue(db, hash, value) + case PortalWaitingPortingRequestObjectType: + return newWaitingPortingRequestObjectWithValue(db, hash, value) + case PortalStatusObjectType: + return newPortalStatusObjectWithValue(db, hash, value) + case PortalRewardInfoObjectType: + return newPortalRewardInfoObjectWithValue(db, hash, value) + case WaitingRedeemRequestObjectType: + return newWaitingRedeemRequestObjectWithValue(db, hash, value) + case CustodianStateObjectType: + return newCustodianStateObjectWithValue(db, hash, value) + case LockedCollateralStateObjectType: + return newLockedCollateralStateObjectWithValue(db, hash, value) + case RewardFeatureStateObjectType: + return newRewardFeatureStateObjectWithValue(db, hash, value) default: panic("state object type not exist") } @@ -106,6 +124,24 @@ func newStateObject(db *StateDB, objectType int, hash common.Hash) StateObject { return newBridgeStatusObject(db, hash) case BurningConfirmObjectType: return newBurningConfirmObject(db, hash) + case PortalFinalExchangeRatesStateObjectType: + return newFinalExchangeRatesStateObject(db, hash) + case PortalLiquidationExchangeRatesPoolObjectType: + return newLiquidateExchangeRatesPoolObject(db, hash) + case PortalWaitingPortingRequestObjectType: + return newWaitingPortingRequestObject(db, hash) + case PortalStatusObjectType: + return newPortalStatusObject(db, hash) + case PortalRewardInfoObjectType: + return newPortalRewardInfoObject(db, hash) + case WaitingRedeemRequestObjectType: + return newWaitingRedeemRequestObject(db, hash) + case CustodianStateObjectType: + return newCustodianStateObject(db, hash) + case LockedCollateralStateObjectType: + return newLockedCollateralStateObject(db, hash) + case RewardFeatureStateObjectType: + return newRewardFeatureStateObject(db, hash) default: panic("state object type not exist") } diff --git a/dataaccessobject/statedb/stateobject_portal_custodian.go b/dataaccessobject/statedb/stateobject_portal_custodian.go new file mode 100644 index 0000000000..747efd5fec --- /dev/null +++ b/dataaccessobject/statedb/stateobject_portal_custodian.go @@ -0,0 +1,345 @@ +package statedb + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "reflect" +) + +type CustodianState struct { + incognitoAddress string + totalCollateral uint64 // prv + freeCollateral uint64 // prv + holdingPubTokens map[string]uint64 // tokenID : amount + lockedAmountCollateral map[string]uint64 // tokenID : amount + remoteAddresses []RemoteAddress + rewardAmount map[string]uint64 // tokenID : amount +} + +type RemoteAddress struct { + pTokenID string + address string +} + +func (cs CustodianState) GetIncognitoAddress() string { + return cs.incognitoAddress +} + +func (cs *CustodianState) SetIncognitoAddress(incognitoAddress string) { + cs.incognitoAddress = incognitoAddress +} + +func (cs CustodianState) GetTotalCollateral() uint64 { + return cs.totalCollateral +} + +func (cs *CustodianState) SetTotalCollateral(amount uint64) { + cs.totalCollateral = amount +} + +func (cs CustodianState) GetHoldingPublicTokens() map[string]uint64 { + return cs.holdingPubTokens +} + +func (cs *CustodianState) SetHoldingPublicTokens(holdingPublicTokens map[string]uint64) { + cs.holdingPubTokens = holdingPublicTokens +} + +func (cs CustodianState) GetLockedAmountCollateral() map[string]uint64 { + return cs.lockedAmountCollateral +} + +func (cs *CustodianState) SetLockedAmountCollateral(lockedAmountCollateral map[string]uint64) { + cs.lockedAmountCollateral = lockedAmountCollateral +} + +func (cs CustodianState) GetRemoteAddresses() []RemoteAddress { + return cs.remoteAddresses +} + +func (cs *CustodianState) SetRemoteAddresses(remoteAddresses []RemoteAddress) { + cs.remoteAddresses = remoteAddresses +} + +func (cs CustodianState) GetFreeCollateral() uint64 { + return cs.freeCollateral +} + +func (cs *CustodianState) SetFreeCollateral(amount uint64) { + cs.freeCollateral = amount +} + +func (cs CustodianState) GetRewardAmount() map[string]uint64 { + return cs.rewardAmount +} + +func (cs *CustodianState) SetRewardAmount(amount map[string]uint64) { + cs.rewardAmount = amount +} + +func (cs CustodianState) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + IncognitoAddress string + TotalCollateral uint64 + FreeCollateral uint64 + HoldingPubTokens map[string]uint64 + LockedAmountCollateral map[string]uint64 + RemoteAddresses []RemoteAddress + RewardAmount map[string]uint64 + }{ + IncognitoAddress: cs.incognitoAddress, + TotalCollateral: cs.totalCollateral, + FreeCollateral: cs.freeCollateral, + HoldingPubTokens: cs.holdingPubTokens, + LockedAmountCollateral: cs.lockedAmountCollateral, + RemoteAddresses: cs.remoteAddresses, + RewardAmount: cs.rewardAmount, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (cs *CustodianState) UnmarshalJSON(data []byte) error { + temp := struct { + IncognitoAddress string + TotalCollateral uint64 + FreeCollateral uint64 + HoldingPubTokens map[string]uint64 + LockedAmountCollateral map[string]uint64 + RemoteAddresses []RemoteAddress + RewardAmount map[string]uint64 + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + cs.incognitoAddress = temp.IncognitoAddress + cs.totalCollateral = temp.TotalCollateral + cs.freeCollateral = temp.FreeCollateral + cs.holdingPubTokens = temp.HoldingPubTokens + cs.lockedAmountCollateral = temp.LockedAmountCollateral + cs.remoteAddresses = temp.RemoteAddresses + cs.rewardAmount = temp.RewardAmount + return nil +} + +func (r RemoteAddress) GetPTokenID() string { + return r.pTokenID +} + +func (r *RemoteAddress) SetPTokenID(pTokenID string) { + r.pTokenID = pTokenID +} + +func (r RemoteAddress) GetAddress() string { + return r.address +} + +func (r *RemoteAddress) SetAddress(address string) { + r.address = address +} + +func (r RemoteAddress) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + PTokenID string + Address string + }{ + PTokenID: r.pTokenID, + Address: r.address, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (r *RemoteAddress) UnmarshalJSON(data []byte) error { + temp := struct { + PTokenID string + Address string + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + r.pTokenID = temp.PTokenID + r.address = temp.Address + return nil +} + +// GetRemoteAddressByTokenID returns remote address for tokenID +func GetRemoteAddressByTokenID(addresses []RemoteAddress, tokenID string) (string, error) { + for _, addr := range addresses { + if addr.GetPTokenID() == tokenID { + return addr.GetAddress(), nil + } + } + + return "", errors.New("Can not found address with tokenID") +} + +func NewCustodianState() *CustodianState { + return &CustodianState{ + rewardAmount: map[string]uint64{}, + holdingPubTokens: map[string]uint64{}, + lockedAmountCollateral: map[string]uint64{}, + } +} + +func NewCustodianStateWithValue( + incognitoAddress string, + totalCollateral uint64, + freeCollateral uint64, + holdingPubTokens map[string]uint64, + lockedAmountCollateral map[string]uint64, + remoteAddresses []RemoteAddress, + rewardAmount map[string]uint64) *CustodianState { + + return &CustodianState{ + incognitoAddress: incognitoAddress, + totalCollateral: totalCollateral, + freeCollateral: freeCollateral, + holdingPubTokens: holdingPubTokens, + lockedAmountCollateral: lockedAmountCollateral, + remoteAddresses: remoteAddresses, + rewardAmount: rewardAmount, + } +} + +func NewRemoteAddressWithValue(pToken string, address string) *RemoteAddress { + return &RemoteAddress{pTokenID: pToken, address: address} +} + +type CustodianStateObject struct { + db *StateDB + // Write caches. + trie Trie // storage trie, which becomes non-nil on first access + + version int + custodianStateHash common.Hash + custodianState *CustodianState + objectType int + deleted bool + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error +} + +func newCustodianStateObject(db *StateDB, hash common.Hash) *CustodianStateObject { + return &CustodianStateObject{ + version: defaultVersion, + db: db, + custodianStateHash: hash, + custodianState: NewCustodianState(), + objectType: CustodianStateObjectType, + deleted: false, + } +} + +func newCustodianStateObjectWithValue(db *StateDB, key common.Hash, data interface{}) (*CustodianStateObject, error) { + var custodianState = NewCustodianState() + var ok bool + var dataBytes []byte + if dataBytes, ok = data.([]byte); ok { + err := json.Unmarshal(dataBytes, custodianState) + if err != nil { + return nil, err + } + } else { + custodianState, ok = data.(*CustodianState) + if !ok { + return nil, fmt.Errorf("%+v, got type %+v", ErrInvalidPortalCustodianStateType, reflect.TypeOf(data)) + } + } + return &CustodianStateObject{ + version: defaultVersion, + custodianStateHash: key, + custodianState: custodianState, + db: db, + objectType: CustodianStateObjectType, + deleted: false, + }, nil +} + +func GenerateCustodianStateObjectKey(beaconHeight uint64, custodianIncognitoAddress string) common.Hash { + prefixHash := GetPortalCustodianStatePrefix(beaconHeight) + valueHash := common.HashH([]byte(custodianIncognitoAddress)) + return common.BytesToHash(append(prefixHash, valueHash[:][:prefixKeyLength]...)) +} + +func (t CustodianStateObject) GetVersion() int { + return t.version +} + +// setError remembers the first non-nil error it is called with. +func (t *CustodianStateObject) SetError(err error) { + if t.dbErr == nil { + t.dbErr = err + } +} + +func (t CustodianStateObject) GetTrie(db DatabaseAccessWarper) Trie { + return t.trie +} + +func (t *CustodianStateObject) SetValue(data interface{}) error { + newCustodianState, ok := data.(*CustodianState) + if !ok { + return fmt.Errorf("%+v, got type %+v", ErrInvalidPortalCustodianStateType, reflect.TypeOf(data)) + } + t.custodianState = newCustodianState + return nil +} + +func (t CustodianStateObject) GetValue() interface{} { + return t.custodianState +} + +func (t CustodianStateObject) GetValueBytes() []byte { + custodianState, ok := t.GetValue().(*CustodianState) + if !ok { + panic("wrong expected value type") + } + value, err := json.Marshal(custodianState) + if err != nil { + panic("failed to marshal custodian state") + } + return value +} + +func (t CustodianStateObject) GetHash() common.Hash { + return t.custodianStateHash +} + +func (t CustodianStateObject) GetType() int { + return t.objectType +} + +// MarkDelete will delete an object in trie +func (t *CustodianStateObject) MarkDelete() { + t.deleted = true +} + +// reset all shard committee value into default value +func (t *CustodianStateObject) Reset() bool { + t.custodianState = NewCustodianState() + return true +} + +func (t CustodianStateObject) IsDeleted() bool { + return t.deleted +} + +// value is either default or nil +func (t CustodianStateObject) IsEmpty() bool { + temp := NewCustodianState() + return reflect.DeepEqual(temp, t.custodianState) || t.custodianState == nil +} diff --git a/dataaccessobject/statedb/stateobject_portal_final_exchange_rates.go b/dataaccessobject/statedb/stateobject_portal_final_exchange_rates.go new file mode 100644 index 0000000000..558094b351 --- /dev/null +++ b/dataaccessobject/statedb/stateobject_portal_final_exchange_rates.go @@ -0,0 +1,193 @@ +package statedb + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "reflect" +) + +type FinalExchangeRatesDetail struct { + Amount uint64 +} + +type FinalExchangeRatesState struct { + rates map[string]FinalExchangeRatesDetail +} + +func (f *FinalExchangeRatesState) Rates() map[string]FinalExchangeRatesDetail { + return f.rates +} + +func (f *FinalExchangeRatesState) SetRates(rates map[string]FinalExchangeRatesDetail) { + f.rates = rates +} + +func NewFinalExchangeRatesState() *FinalExchangeRatesState { + return &FinalExchangeRatesState{} +} + +func NewFinalExchangeRatesStateWithValue(rates map[string]FinalExchangeRatesDetail) *FinalExchangeRatesState { + return &FinalExchangeRatesState{rates: rates} +} + +func GeneratePortalFinalExchangeRatesStateObjectKey(beaconHeight uint64) common.Hash { + suffix := "exchangerates" + prefixHash := GetFinalExchangeRatesStatePrefix(beaconHeight) + valueHash := common.HashH([]byte(suffix)) + return common.BytesToHash(append(prefixHash, valueHash[:][:prefixKeyLength]...)) +} + +func (f *FinalExchangeRatesState) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + Rates map[string]FinalExchangeRatesDetail + }{ + Rates: f.rates, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (f *FinalExchangeRatesState) UnmarshalJSON(data[]byte) error { + temp := struct { + Rates map[string]FinalExchangeRatesDetail + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + f.rates = temp.Rates + return nil +} + + +type FinalExchangeRatesStateObject struct { + db *StateDB + // Write caches. + trie Trie // storage trie, which becomes non-nil on first access + + version int + finalExchangeRatesStateHash common.Hash + finalExchangeRatesState *FinalExchangeRatesState + objectType int + deleted bool + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error +} + +func newFinalExchangeRatesStateObjectWithValue(db *StateDB, finalExchangeRatesStateHash common.Hash, data interface{}) (*FinalExchangeRatesStateObject, error) { + var newFinalExchangeRatesState = NewFinalExchangeRatesState() + var ok bool + var dataBytes []byte + if dataBytes, ok = data.([]byte); ok { + err := json.Unmarshal(dataBytes, newFinalExchangeRatesState) + if err != nil { + return nil, err + } + } else { + newFinalExchangeRatesState, ok = data.(*FinalExchangeRatesState) + if !ok { + return nil, fmt.Errorf("%+v, got type %+v", ErrInvalidFinalExchangeRatesStateType, reflect.TypeOf(data)) + } + } + return &FinalExchangeRatesStateObject{ + db: db, + version: defaultVersion, + finalExchangeRatesStateHash: finalExchangeRatesStateHash, + finalExchangeRatesState: newFinalExchangeRatesState, + objectType: PortalFinalExchangeRatesStateObjectType, + deleted: false, + }, nil +} + +func newFinalExchangeRatesStateObject(db *StateDB, finalExchangeRatesStateHash common.Hash) *FinalExchangeRatesStateObject { + return &FinalExchangeRatesStateObject{ + db: db, + version: defaultVersion, + finalExchangeRatesStateHash: finalExchangeRatesStateHash, + finalExchangeRatesState: NewFinalExchangeRatesState(), + objectType: PortalFinalExchangeRatesStateObjectType, + deleted: false, + } +} + +func (f FinalExchangeRatesStateObject) GetVersion() int { + return f.version +} + +// setError remembers the first non-nil error it is called with. +func (f *FinalExchangeRatesStateObject) SetError(err error) { + if f.dbErr == nil { + f.dbErr = err + } +} + +func (f FinalExchangeRatesStateObject) GetTrie(db DatabaseAccessWarper) Trie { + return f.trie +} + +func (f *FinalExchangeRatesStateObject) SetValue(data interface{}) error { + finalExchangeRatesState, ok := data.(*FinalExchangeRatesState) + if !ok { + return fmt.Errorf("%+v, got type %+v", ErrInvalidFinalExchangeRatesStateType, reflect.TypeOf(data)) + } + f.finalExchangeRatesState = finalExchangeRatesState + return nil +} + +func (f FinalExchangeRatesStateObject) GetValue() interface{} { + return f.finalExchangeRatesState +} + +func (f FinalExchangeRatesStateObject) GetValueBytes() []byte { + finalExchangeRatesState, ok := f.GetValue().(*FinalExchangeRatesState) + if !ok { + panic("wrong expected value type") + } + value, err := json.Marshal(finalExchangeRatesState) + if err != nil { + panic("failed to marshal FinalExchangeRatesState") + } + return value +} + +func (f FinalExchangeRatesStateObject) GetHash() common.Hash { + return f.finalExchangeRatesStateHash +} + +func (f FinalExchangeRatesStateObject) GetType() int { + return f.objectType +} + +// MarkDelete will delete an object in trie +func (f *FinalExchangeRatesStateObject) MarkDelete() { + f.deleted = true +} + +// reset all shard committee value into default value +func (f *FinalExchangeRatesStateObject) Reset() bool { + f.finalExchangeRatesState = NewFinalExchangeRatesState() + return true +} + +func (f FinalExchangeRatesStateObject) IsDeleted() bool { + return f.deleted +} + +// value is either default or nil +func (f FinalExchangeRatesStateObject) IsEmpty() bool { + temp := NewPDEStatusState() + return reflect.DeepEqual(temp, f.finalExchangeRatesState) || f.finalExchangeRatesState == nil +} + + + + + diff --git a/dataaccessobject/statedb/stateobject_portal_liquidation_exchangerates_pool.go b/dataaccessobject/statedb/stateobject_portal_liquidation_exchangerates_pool.go new file mode 100644 index 0000000000..89245bd3fc --- /dev/null +++ b/dataaccessobject/statedb/stateobject_portal_liquidation_exchangerates_pool.go @@ -0,0 +1,188 @@ +package statedb + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "reflect" +) + +type LiquidateExchangeRatesDetail struct { + HoldAmountFreeCollateral uint64 + HoldAmountPubToken uint64 +} + +type LiquidateExchangeRatesPool struct { + rates map[string]LiquidateExchangeRatesDetail //ptoken | detail +} + +func (l *LiquidateExchangeRatesPool) Rates() map[string]LiquidateExchangeRatesDetail { + return l.rates +} + +func (l *LiquidateExchangeRatesPool) SetRates(rates map[string]LiquidateExchangeRatesDetail) { + l.rates = rates +} + +func NewLiquidateExchangeRatesPool() *LiquidateExchangeRatesPool { + return &LiquidateExchangeRatesPool{} +} + +func NewLiquidateExchangeRatesPoolWithValue(rates map[string]LiquidateExchangeRatesDetail) *LiquidateExchangeRatesPool { + return &LiquidateExchangeRatesPool{rates: rates} +} + +func GeneratePortalLiquidateExchangeRatesPoolObjectKey(beaconHeight uint64) common.Hash { + suffix := "liquidation" + prefixHash := GetPortalLiquidationExchangeRatesPoolPrefix(beaconHeight) + valueHash := common.HashH([]byte(suffix)) + return common.BytesToHash(append(prefixHash, valueHash[:][:prefixKeyLength]...)) +} + +func (l *LiquidateExchangeRatesPool) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + Rates map[string]LiquidateExchangeRatesDetail + }{ + Rates: l.rates, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (l *LiquidateExchangeRatesPool) UnmarshalJSON(data []byte) error { + temp := struct { + Rates map[string]LiquidateExchangeRatesDetail + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + l.rates = temp.Rates + return nil +} + +type LiquidateExchangeRatesPoolObject struct { + db *StateDB + // Write caches. + trie Trie // storage trie, which becomes non-nil on first access + + version int + keyObject common.Hash + valueObject *LiquidateExchangeRatesPool + objectType int + deleted bool + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error +} + +func newLiquidateExchangeRatesPoolObjectWithValue(db *StateDB, keyObject common.Hash, valueObject interface{}) (*LiquidateExchangeRatesPoolObject, error) { + var content = NewLiquidateExchangeRatesPool() + var ok bool + var dataBytes []byte + if dataBytes, ok = valueObject.([]byte); ok { + err := json.Unmarshal(dataBytes, content) + if err != nil { + return nil, err + } + } else { + content, ok = valueObject.(*LiquidateExchangeRatesPool) + if !ok { + return nil, fmt.Errorf("%+v, got type %+v", ErrInvalidLiquidationExchangeRatesType, reflect.TypeOf(valueObject)) + } + } + return &LiquidateExchangeRatesPoolObject{ + db: db, + version: defaultVersion, + keyObject: keyObject, + valueObject: content, + objectType: PortalLiquidationExchangeRatesPoolObjectType, + deleted: false, + }, nil +} + +func newLiquidateExchangeRatesPoolObject(db *StateDB, keyObject common.Hash) *LiquidateExchangeRatesPoolObject { + return &LiquidateExchangeRatesPoolObject{ + db: db, + version: defaultVersion, + keyObject: keyObject, + valueObject: NewLiquidateExchangeRatesPool(), + objectType: PortalLiquidationExchangeRatesPoolObjectType, + deleted: false, + } +} + +func (l LiquidateExchangeRatesPoolObject) GetVersion() int { + return l.version +} + +// setError remembers the first non-nil error it is called with. +func (l *LiquidateExchangeRatesPoolObject) SetError(err error) { + if l.dbErr == nil { + l.dbErr = err + } +} + +func (l LiquidateExchangeRatesPoolObject) GetTrie(db DatabaseAccessWarper) Trie { + return l.trie +} + +func (l *LiquidateExchangeRatesPoolObject) SetValue(data interface{}) error { + valueObject, ok := data.(*LiquidateExchangeRatesPool) + if !ok { + return fmt.Errorf("%+v, got type %+v", ErrInvalidLiquidationExchangeRatesType, reflect.TypeOf(data)) + } + l.valueObject = valueObject + return nil +} + +func (l LiquidateExchangeRatesPoolObject) GetValue() interface{} { + return l.valueObject +} + +func (l LiquidateExchangeRatesPoolObject) GetValueBytes() []byte { + valueObject, ok := l.GetValue().(*LiquidateExchangeRatesPool) + if !ok { + panic("wrong expected value type") + } + value, err := json.Marshal(valueObject) + if err != nil { + panic("failed to marshal LiquidateExchangeRatesPool") + } + return value +} + +func (l LiquidateExchangeRatesPoolObject) GetHash() common.Hash { + return l.keyObject +} + +func (l LiquidateExchangeRatesPoolObject) GetType() int { + return l.objectType +} + +// MarkDelete will delete an object in trie +func (l *LiquidateExchangeRatesPoolObject) MarkDelete() { + l.deleted = true +} + +// reset all shard committee value into default value +func (l *LiquidateExchangeRatesPoolObject) Reset() bool { + l.valueObject = NewLiquidateExchangeRatesPool() + return true +} + +func (l LiquidateExchangeRatesPoolObject) IsDeleted() bool { + return l.deleted +} + +// value is either default or nil +func (l LiquidateExchangeRatesPoolObject) IsEmpty() bool { + temp := NewLiquidateExchangeRatesPool() + return reflect.DeepEqual(temp, l.valueObject) || l.valueObject == nil +} diff --git a/dataaccessobject/statedb/stateobject_portal_lockedcollateral.go b/dataaccessobject/statedb/stateobject_portal_lockedcollateral.go new file mode 100644 index 0000000000..46039c7966 --- /dev/null +++ b/dataaccessobject/statedb/stateobject_portal_lockedcollateral.go @@ -0,0 +1,211 @@ +package statedb + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "reflect" +) + +type LockedCollateralState struct { + totalLockedCollateralInEpoch uint64 + lockedCollateralDetail map[string]uint64 // custodianAddress : amount +} + +func (lcs LockedCollateralState) GetTotalLockedCollateralInEpoch() uint64 { + return lcs.totalLockedCollateralInEpoch +} + +func (lcs *LockedCollateralState) SetTotalLockedCollateralInEpoch(amount uint64) { + lcs.totalLockedCollateralInEpoch = amount +} + +func (lcs LockedCollateralState) GetLockedCollateralDetail() map[string]uint64 { + if lcs.lockedCollateralDetail == nil { + return map[string]uint64{} + } + return lcs.lockedCollateralDetail +} + +func (lcs *LockedCollateralState) SetLockedCollateralDetail(lockedCollateralDetail map[string]uint64) { + lcs.lockedCollateralDetail = lockedCollateralDetail +} + +func (lcs *LockedCollateralState) Reset() { + lcs.lockedCollateralDetail = nil + lcs.totalLockedCollateralInEpoch = 0 +} + +func (lcs LockedCollateralState) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + TotalLockedCollateralInEpoch uint64 + LockedCollateralDetail map[string]uint64 + }{ + TotalLockedCollateralInEpoch: lcs.totalLockedCollateralInEpoch, + LockedCollateralDetail: lcs.lockedCollateralDetail, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (lcs *LockedCollateralState) UnmarshalJSON(data []byte) error { + temp := struct { + TotalLockedCollateralInEpoch uint64 + LockedCollateralDetail map[string]uint64 + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + lcs.totalLockedCollateralInEpoch = temp.TotalLockedCollateralInEpoch + lcs.lockedCollateralDetail = temp.LockedCollateralDetail + return nil +} + +func NewLockedCollateralState() *LockedCollateralState { + return &LockedCollateralState{ + lockedCollateralDetail: map[string]uint64{}, + } +} + +func NewLockedCollateralStateWithValue( + totalLockedCollateralInEpoch uint64, + lockedCollateralDetail map[string]uint64, +) *LockedCollateralState { + return &LockedCollateralState{ + totalLockedCollateralInEpoch: totalLockedCollateralInEpoch, + lockedCollateralDetail: lockedCollateralDetail, + } +} + +type LockedCollateralStateObject struct { + db *StateDB + // Write caches. + trie Trie // storage trie, which becomes non-nil on first access + + version int + lockedCollateralStateHash common.Hash + lockedCollateralState *LockedCollateralState + objectType int + deleted bool + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error +} + +func newLockedCollateralStateObject(db *StateDB, hash common.Hash) *LockedCollateralStateObject { + return &LockedCollateralStateObject{ + version: defaultVersion, + db: db, + lockedCollateralStateHash: hash, + lockedCollateralState: NewLockedCollateralState(), + objectType: LockedCollateralStateObjectType, + deleted: false, + } +} + +func newLockedCollateralStateObjectWithValue(db *StateDB, key common.Hash, data interface{}) (*LockedCollateralStateObject, error) { + var lockedCollateralState = NewLockedCollateralState() + var ok bool + var dataBytes []byte + if dataBytes, ok = data.([]byte); ok { + err := json.Unmarshal(dataBytes, lockedCollateralState) + if err != nil { + return nil, err + } + } else { + lockedCollateralState, ok = data.(*LockedCollateralState) + if !ok { + return nil, fmt.Errorf("%+v, got type %+v", ErrInvalidPortalLockedCollateralStateType, reflect.TypeOf(data)) + } + } + return &LockedCollateralStateObject{ + version: defaultVersion, + lockedCollateralStateHash: key, + lockedCollateralState: lockedCollateralState, + db: db, + objectType: LockedCollateralStateObjectType, + deleted: false, + }, nil +} + +func GenerateLockedCollateralStateObjectKey(beaconHeight uint64) common.Hash { + prefixHash := GetLockedCollateralStatePrefix() + valueHash := common.HashH([]byte(fmt.Sprintf("%d", beaconHeight))) + return common.BytesToHash(append(prefixHash, valueHash[:][:prefixKeyLength]...)) +} + +func (t LockedCollateralStateObject) GetVersion() int { + return t.version +} + +// setError remembers the first non-nil error it is called with. +func (t *LockedCollateralStateObject) SetError(err error) { + if t.dbErr == nil { + t.dbErr = err + } +} + +func (t LockedCollateralStateObject) GetTrie(db DatabaseAccessWarper) Trie { + return t.trie +} + +func (t *LockedCollateralStateObject) SetValue(data interface{}) error { + lockedCollateralState, ok := data.(*LockedCollateralState) + if !ok { + return fmt.Errorf("%+v, got type %+v", ErrInvalidPortalLockedCollateralStateType, reflect.TypeOf(data)) + } + t.lockedCollateralState = lockedCollateralState + return nil +} + +func (t LockedCollateralStateObject) GetValue() interface{} { + return t.lockedCollateralState +} + +func (t LockedCollateralStateObject) GetValueBytes() []byte { + lockedCollateralState, ok := t.GetValue().(*LockedCollateralState) + if !ok { + panic("wrong expected value type") + } + value, err := json.Marshal(lockedCollateralState) + if err != nil { + panic("failed to marshal locked collateral state") + } + return value +} + +func (t LockedCollateralStateObject) GetHash() common.Hash { + return t.lockedCollateralStateHash +} + +func (t LockedCollateralStateObject) GetType() int { + return t.objectType +} + +// MarkDelete will delete an object in trie +func (t *LockedCollateralStateObject) MarkDelete() { + t.deleted = true +} + +// reset all shard committee value into default value +func (t *LockedCollateralStateObject) Reset() bool { + t.lockedCollateralState = NewLockedCollateralState() + return true +} + +func (t LockedCollateralStateObject) IsDeleted() bool { + return t.deleted +} + +// value is either default or nil +func (t LockedCollateralStateObject) IsEmpty() bool { + temp := NewLockedCollateralState() + return reflect.DeepEqual(temp, t.lockedCollateralState) || t.lockedCollateralState == nil +} diff --git a/dataaccessobject/statedb/stateobject_portal_reward.go b/dataaccessobject/statedb/stateobject_portal_reward.go new file mode 100644 index 0000000000..1120d0c554 --- /dev/null +++ b/dataaccessobject/statedb/stateobject_portal_reward.go @@ -0,0 +1,283 @@ +package statedb + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "reflect" +) + +type PortalRewardInfo struct { + custodianIncAddr string + rewards []*RewardInfoDetail +} + +type RewardInfoDetail struct { + tokenID string + amount uint64 +} + +func (p PortalRewardInfo) GetCustodianIncAddr() string { + return p.custodianIncAddr +} + +func (p *PortalRewardInfo) SetCustodianIncAddr(custodianIncAddr string) { + p.custodianIncAddr = custodianIncAddr +} + +func (p PortalRewardInfo) GetRewards() []*RewardInfoDetail { + return p.rewards +} + +func (p *PortalRewardInfo) SetRewards(rewards []*RewardInfoDetail) { + p.rewards = rewards +} + +func (p *PortalRewardInfo) AddPortalRewardInfo(tokenID string, amount uint64) { + for i := 0; i < len(p.rewards); i++ { + if p.rewards[i].GetTokenID() == tokenID { + p.rewards[i].SetAmount(p.rewards[i].GetAmount() + amount) + return + } + } + p.rewards = append(p.rewards, NewPortalRewardInfoDetailWithValue(tokenID, amount)) +} + +func (p PortalRewardInfo) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + CustodianIncAddr string + Rewards []*RewardInfoDetail + }{ + CustodianIncAddr: p.custodianIncAddr, + Rewards: p.rewards, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (p *PortalRewardInfo) UnmarshalJSON(data []byte) error { + temp := struct { + CustodianIncAddr string + Rewards []*RewardInfoDetail + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + p.custodianIncAddr = temp.CustodianIncAddr + p.rewards = temp.Rewards + return nil +} + +func NewPortalRewardInfo() *PortalRewardInfo { + return &PortalRewardInfo{} +} + +func NewPortalRewardInfoWithValue( + custodianIncAddr string, + rewards []*RewardInfoDetail) *PortalRewardInfo { + + return &PortalRewardInfo{ + custodianIncAddr: custodianIncAddr, + rewards: rewards, + } +} + +func (p RewardInfoDetail) GetAmount() uint64 { + return p.amount +} + +func (p *RewardInfoDetail) SetAmount(amount uint64) { + p.amount = amount +} + +func (p RewardInfoDetail) GetTokenID() string { + return p.tokenID +} + +func (p *RewardInfoDetail) SetTokenID(tokenId string) { + p.tokenID = tokenId +} + +func NewPortalRewardInfoDetailWithValue( + tokenID string, + amount uint64) *RewardInfoDetail { + + return &RewardInfoDetail{ + tokenID: tokenID, + amount: amount, + } +} + +func (p RewardInfoDetail) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + TokenID string + Amount uint64 + }{ + TokenID: p.tokenID, + Amount: p.amount, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (p *RewardInfoDetail) UnmarshalJSON(data []byte) error { + temp := struct { + TokenID string + Amount uint64 + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + p.tokenID = temp.TokenID + p.amount = temp.Amount + return nil +} + +func NewRewardInfoDetail() *RewardInfoDetail { + return &RewardInfoDetail{} +} + +func NewRewardInfoDetailWithValue( + tokenID string, amount uint64) *RewardInfoDetail { + + return &RewardInfoDetail{ + tokenID: tokenID, + amount: amount, + } +} + +type PortalRewardInfoObject struct { + db *StateDB + // Write caches. + trie Trie // storage trie, which becomes non-nil on first access + + version int + portalRewardInfoHash common.Hash + portalRewardInfo *PortalRewardInfo + objectType int + deleted bool + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error +} + +func newPortalRewardInfoObject(db *StateDB, hash common.Hash) *PortalRewardInfoObject { + return &PortalRewardInfoObject{ + version: defaultVersion, + db: db, + portalRewardInfoHash: hash, + portalRewardInfo: NewPortalRewardInfo(), + objectType: PortalRewardInfoObjectType, + deleted: false, + } +} + +func newPortalRewardInfoObjectWithValue(db *StateDB, key common.Hash, data interface{}) (*PortalRewardInfoObject, error) { + var portalRewardInfo = NewPortalRewardInfo() + var ok bool + var dataBytes []byte + if dataBytes, ok = data.([]byte); ok { + err := json.Unmarshal(dataBytes, portalRewardInfo) + if err != nil { + return nil, err + } + } else { + portalRewardInfo, ok = data.(*PortalRewardInfo) + if !ok { + return nil, fmt.Errorf("%+v, got type %+v", ErrInvalidPortalRewardInfoStateType, reflect.TypeOf(data)) + } + } + return &PortalRewardInfoObject{ + version: defaultVersion, + portalRewardInfoHash: key, + portalRewardInfo: portalRewardInfo, + db: db, + objectType: PortalRewardInfoObjectType, + deleted: false, + }, nil +} + +func GeneratePortalRewardInfoObjectKey(beaconHeight uint64, custodianIncognitoAddress string) common.Hash { + prefixHash := GetPortalRewardInfoStatePrefix(beaconHeight) + valueHash := common.HashH([]byte(custodianIncognitoAddress)) + return common.BytesToHash(append(prefixHash, valueHash[:][:prefixKeyLength]...)) +} + +func (t PortalRewardInfoObject) GetVersion() int { + return t.version +} + +// setError remembers the first non-nil error it is called with. +func (t *PortalRewardInfoObject) SetError(err error) { + if t.dbErr == nil { + t.dbErr = err + } +} + +func (t PortalRewardInfoObject) GetTrie(db DatabaseAccessWarper) Trie { + return t.trie +} + +func (t *PortalRewardInfoObject) SetValue(data interface{}) error { + portalRewardInfo, ok := data.(*PortalRewardInfo) + if !ok { + return fmt.Errorf("%+v, got type %+v", ErrInvalidPortalRewardInfoStateType, reflect.TypeOf(data)) + } + t.portalRewardInfo = portalRewardInfo + return nil +} + +func (t PortalRewardInfoObject) GetValue() interface{} { + return t.portalRewardInfo +} + +func (t PortalRewardInfoObject) GetValueBytes() []byte { + portalRewardInfo, ok := t.GetValue().(*PortalRewardInfo) + if !ok { + panic("wrong expected value type") + } + value, err := json.Marshal(portalRewardInfo) + if err != nil { + panic("failed to marshal portal reward info") + } + return value +} + +func (t PortalRewardInfoObject) GetHash() common.Hash { + return t.portalRewardInfoHash +} + +func (t PortalRewardInfoObject) GetType() int { + return t.objectType +} + +// MarkDelete will delete an object in trie +func (t *PortalRewardInfoObject) MarkDelete() { + t.deleted = true +} + +// reset all shard committee value into default value +func (t *PortalRewardInfoObject) Reset() bool { + t.portalRewardInfo = NewPortalRewardInfo() + return true +} + +func (t PortalRewardInfoObject) IsDeleted() bool { + return t.deleted +} + +// value is either default or nil +func (t PortalRewardInfoObject) IsEmpty() bool { + temp := NewCustodianState() + return reflect.DeepEqual(temp, t.portalRewardInfo) || t.portalRewardInfo == nil +} diff --git a/dataaccessobject/statedb/stateobject_portal_status.go b/dataaccessobject/statedb/stateobject_portal_status.go new file mode 100644 index 0000000000..3e9b5bf574 --- /dev/null +++ b/dataaccessobject/statedb/stateobject_portal_status.go @@ -0,0 +1,215 @@ +package statedb + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "reflect" +) + +type PortalStatusState struct { + statusType []byte + statusSuffix []byte + statusContent []byte +} + +func (s PortalStatusState) StatusType() []byte { + return s.statusType +} + +func (s *PortalStatusState) SetStatusType(statusType []byte) { + s.statusType = statusType +} + +func (s PortalStatusState) StatusSuffix() []byte { + return s.statusSuffix +} + +func (s *PortalStatusState) SetStatusSuffix(statusSuffix []byte) { + s.statusSuffix = statusSuffix +} + +func (s PortalStatusState) StatusContent() []byte { + return s.statusContent +} + +func (s *PortalStatusState) SetStatusContent(statusContent []byte) { + s.statusContent = statusContent +} + +func (s PortalStatusState) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + StatusType []byte + StatusSuffix []byte + StatusContent []byte + }{ + StatusType: s.statusType, + StatusSuffix: s.statusSuffix, + StatusContent: s.statusContent, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (s *PortalStatusState) UnmarshalJSON(data []byte) error { + temp := struct { + StatusType []byte + StatusSuffix []byte + StatusContent []byte + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + s.statusType = temp.StatusType + s.statusContent = temp.StatusContent + return nil +} + +func (s *PortalStatusState) ToString() string { + return "{" + + " \"StatusType\": " + string(s.statusType) + + " \"StatusSuffix\": " + string(s.statusSuffix) + + " \"StatusContent\": " + string(s.statusContent) + + "}" +} + +func NewPortalStatusState() *PortalStatusState { + return &PortalStatusState{} +} + +func NewPortalStatusStateWithValue(statusType []byte, statusSuffix []byte, statusContent []byte) *PortalStatusState { + return &PortalStatusState{statusType: statusType, statusSuffix: statusSuffix, statusContent: statusContent} +} + +type PortalStatusObject struct { + db *StateDB + // Write caches. + trie Trie // storage trie, which becomes non-nil on first access + + version int + portalStatusHash common.Hash + portalStatus *PortalStatusState + objectType int + deleted bool + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error +} + +func newPortalStatusObject(db *StateDB, hash common.Hash) *PortalStatusObject { + return &PortalStatusObject{ + version: defaultVersion, + db: db, + portalStatusHash: hash, + portalStatus: NewPortalStatusState(), + objectType: PortalStatusObjectType, + deleted: false, + } +} + +func newPortalStatusObjectWithValue(db *StateDB, key common.Hash, data interface{}) (*PortalStatusObject, error) { + var newPortalStatus = NewPortalStatusState() + var ok bool + var dataBytes []byte + if dataBytes, ok = data.([]byte); ok { + err := json.Unmarshal(dataBytes, newPortalStatus) + if err != nil { + return nil, err + } + } else { + newPortalStatus, ok = data.(*PortalStatusState) + if !ok { + return nil, fmt.Errorf("%+v, got type %+v", ErrInvalidPortalStatusStateType, reflect.TypeOf(data)) + } + } + return &PortalStatusObject{ + version: defaultVersion, + portalStatusHash: key, + portalStatus: newPortalStatus, + db: db, + objectType: PortalStatusObjectType, + deleted: false, + }, nil +} + +func GeneratePortalStatusObjectKey(statusType []byte, statusSuffix []byte) common.Hash { + prefixHash := GetPortalStatusPrefix() + valueHash := common.HashH(append(statusType, statusSuffix...)) + return common.BytesToHash(append(prefixHash, valueHash[:][:prefixKeyLength]...)) +} + +func (t PortalStatusObject) GetVersion() int { + return t.version +} + +// setError remembers the first non-nil error it is called with. +func (t *PortalStatusObject) SetError(err error) { + if t.dbErr == nil { + t.dbErr = err + } +} + +func (t PortalStatusObject) GetTrie(db DatabaseAccessWarper) Trie { + return t.trie +} + +func (t *PortalStatusObject) SetValue(data interface{}) error { + newPortalStatus, ok := data.(*PortalStatusState) + if !ok { + return fmt.Errorf("%+v, got type %+v", ErrInvalidPortalStatusStateType, reflect.TypeOf(data)) + } + t.portalStatus = newPortalStatus + return nil +} + +func (t PortalStatusObject) GetValue() interface{} { + return t.portalStatus +} + +func (t PortalStatusObject) GetValueBytes() []byte { + portalStatusState, ok := t.GetValue().(*PortalStatusState) + if !ok { + panic("wrong expected value type") + } + value, err := json.Marshal(portalStatusState) + if err != nil { + panic("failed to marshal portalStatusState") + } + return value +} + +func (t PortalStatusObject) GetHash() common.Hash { + return t.portalStatusHash +} + +func (t PortalStatusObject) GetType() int { + return t.objectType +} + +// MarkDelete will delete an object in trie +func (t *PortalStatusObject) MarkDelete() { + t.deleted = true +} + +// reset all shard committee value into default value +func (t *PortalStatusObject) Reset() bool { + t.portalStatus = NewPortalStatusState() + return true +} + +func (t PortalStatusObject) IsDeleted() bool { + return t.deleted +} + +// value is either default or nil +func (t PortalStatusObject) IsEmpty() bool { + temp := NewPortalStatusState() + return reflect.DeepEqual(temp, t.portalStatus) || t.portalStatus == nil +} diff --git a/dataaccessobject/statedb/stateobject_portal_waiting_porting_request.go b/dataaccessobject/statedb/stateobject_portal_waiting_porting_request.go new file mode 100644 index 0000000000..2d7576b2aa --- /dev/null +++ b/dataaccessobject/statedb/stateobject_portal_waiting_porting_request.go @@ -0,0 +1,297 @@ +package statedb + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "reflect" +) + +type MatchingPortingCustodianDetail struct { + IncAddress string + RemoteAddress string + Amount uint64 + LockedAmountCollateral uint64 + RemainCollateral uint64 +} + +type WaitingPortingRequest struct { + uniquePortingID string + txReqID common.Hash + tokenID string + porterAddress string + amount uint64 + custodians []*MatchingPortingCustodianDetail + portingFee uint64 + status int + beaconHeight uint64 +} + +func (w *WaitingPortingRequest) BeaconHeight() uint64 { + return w.beaconHeight +} + +func (w *WaitingPortingRequest) SetBeaconHeight(beaconHeight uint64) { + w.beaconHeight = beaconHeight +} + +func (w *WaitingPortingRequest) Status() int { + return w.status +} + +func (w *WaitingPortingRequest) SetStatus(status int) { + w.status = status +} + +func (w *WaitingPortingRequest) PortingFee() uint64 { + return w.portingFee +} + +func (w *WaitingPortingRequest) SetPortingFee(portingFee uint64) { + w.portingFee = portingFee +} + +func (w *WaitingPortingRequest) Custodians() []*MatchingPortingCustodianDetail { + return w.custodians +} + +func (w *WaitingPortingRequest) SetCustodians(custodians []*MatchingPortingCustodianDetail) { + w.custodians = custodians +} + +func (w *WaitingPortingRequest) Amount() uint64 { + return w.amount +} + +func (w *WaitingPortingRequest) SetAmount(amount uint64) { + w.amount = amount +} + +func (w *WaitingPortingRequest) PorterAddress() string { + return w.porterAddress +} + +func (w *WaitingPortingRequest) SetPorterAddress(porterAddress string) { + w.porterAddress = porterAddress +} + +func (w *WaitingPortingRequest) TokenID() string { + return w.tokenID +} + +func (w *WaitingPortingRequest) SetTokenID(tokenID string) { + w.tokenID = tokenID +} + +func (w *WaitingPortingRequest) TxReqID() common.Hash { + return w.txReqID +} + +func (w *WaitingPortingRequest) SetTxReqID(txReqID common.Hash) { + w.txReqID = txReqID +} + +func (w *WaitingPortingRequest) UniquePortingID() string { + return w.uniquePortingID +} + +func (w *WaitingPortingRequest) SetUniquePortingID(uniquePortingID string) { + w.uniquePortingID = uniquePortingID +} + +func NewWaitingPortingRequest() *WaitingPortingRequest { + return &WaitingPortingRequest{} +} + +func NewWaitingPortingRequestWithValue(uniquePortingID string, txReqID common.Hash, tokenID string, porterAddress string, amount uint64, custodians []*MatchingPortingCustodianDetail, portingFee uint64, status int, beaconHeight uint64) *WaitingPortingRequest { + return &WaitingPortingRequest{uniquePortingID: uniquePortingID, txReqID: txReqID, tokenID: tokenID, porterAddress: porterAddress, amount: amount, custodians: custodians, portingFee: portingFee, status: status, beaconHeight: beaconHeight} +} + +func GeneratePortalWaitingPortingRequestObjectKey(beaconHeight uint64, portingRequestId string) common.Hash { + prefixHash := GetPortalWaitingPortingRequestPrefix(beaconHeight) + valueHash := common.HashH([]byte(portingRequestId)) + return common.BytesToHash(append(prefixHash, valueHash[:][:prefixKeyLength]...)) +} + +func (w *WaitingPortingRequest) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + UniquePortingID string + TxReqID common.Hash + TokenID string + PorterAddress string + Amount uint64 + Custodians []*MatchingPortingCustodianDetail + PortingFee uint64 + Status int + BeaconHeight uint64 + }{ + UniquePortingID: w.uniquePortingID, + TxReqID: w.txReqID, + TokenID: w.tokenID, + PorterAddress: w.porterAddress, + Amount: w.amount, + Custodians: w.custodians, + PortingFee: w.portingFee, + Status: w.status, + BeaconHeight: w.beaconHeight, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (w *WaitingPortingRequest) UnmarshalJSON(data []byte) error { + temp := struct { + UniquePortingID string + TxReqID common.Hash + TokenID string + PorterAddress string + Amount uint64 + Custodians []*MatchingPortingCustodianDetail + PortingFee uint64 + Status int + BeaconHeight uint64 + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + + w.uniquePortingID = temp.UniquePortingID + w.txReqID = temp.TxReqID + w.tokenID = temp.TokenID + w.porterAddress = temp.PorterAddress + w.amount = temp.Amount + w.custodians = temp.Custodians + w.portingFee = temp.PortingFee + w.status = temp.Status + w.beaconHeight = temp.BeaconHeight + + return nil +} + + +type WaitingPortingRequestObject struct { + db *StateDB + // Write caches. + trie Trie // storage trie, which becomes non-nil on first access + + version int + keyObject common.Hash + valueObject *WaitingPortingRequest + objectType int + deleted bool + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error +} + +func newWaitingPortingRequestObjectWithValue(db *StateDB, keyObject common.Hash, valueObject interface{}) (*WaitingPortingRequestObject, error) { + var content = NewWaitingPortingRequest() + var ok bool + var dataBytes []byte + if dataBytes, ok = valueObject.([]byte); ok { + err := json.Unmarshal(dataBytes, content) + if err != nil { + return nil, err + } + } else { + content, ok = valueObject.(*WaitingPortingRequest) + if !ok { + return nil, fmt.Errorf("%+v, got type %+v", ErrInvalidWaitingPortingRequestType, reflect.TypeOf(valueObject)) + } + } + return &WaitingPortingRequestObject{ + db: db, + version: defaultVersion, + keyObject: keyObject, + valueObject: content, + objectType: PortalWaitingPortingRequestObjectType, + deleted: false, + }, nil +} + +func newWaitingPortingRequestObject(db *StateDB, keyObject common.Hash) *WaitingPortingRequestObject { + return &WaitingPortingRequestObject{ + db: db, + version: defaultVersion, + keyObject: keyObject, + valueObject: NewWaitingPortingRequest(), + objectType: PortalWaitingPortingRequestObjectType, + deleted: false, + } +} + +func (l WaitingPortingRequestObject) GetVersion() int { + return l.version +} + +// setError remembers the first non-nil error it is called with. +func (l *WaitingPortingRequestObject) SetError(err error) { + if l.dbErr == nil { + l.dbErr = err + } +} + +func (l WaitingPortingRequestObject) GetTrie(db DatabaseAccessWarper) Trie { + return l.trie +} + +func (l *WaitingPortingRequestObject) SetValue(data interface{}) error { + valueObject, ok := data.(*WaitingPortingRequest) + if !ok { + return fmt.Errorf("%+v, got type %+v", ErrInvalidWaitingPortingRequestType, reflect.TypeOf(data)) + } + l.valueObject = valueObject + return nil +} + +func (l WaitingPortingRequestObject) GetValue() interface{} { + return l.valueObject +} + +func (l WaitingPortingRequestObject) GetValueBytes() []byte { + valueObject, ok := l.GetValue().(*WaitingPortingRequest) + if !ok { + panic("wrong expected value type") + } + value, err := json.Marshal(valueObject) + if err != nil { + panic("failed to marshal WaitingPortingRequest") + } + return value +} + +func (l WaitingPortingRequestObject) GetHash() common.Hash { + return l.keyObject +} + +func (l WaitingPortingRequestObject) GetType() int { + return l.objectType +} + +// MarkDelete will delete an object in trie +func (l *WaitingPortingRequestObject) MarkDelete() { + l.deleted = true +} + +// reset all shard committee value into default value +func (l *WaitingPortingRequestObject) Reset() bool { + l.valueObject = NewWaitingPortingRequest() + return true +} + +func (l WaitingPortingRequestObject) IsDeleted() bool { + return l.deleted +} + +// value is either default or nil +func (l WaitingPortingRequestObject) IsEmpty() bool { + temp := NewWaitingPortingRequest() + return reflect.DeepEqual(temp, l.valueObject) || l.valueObject == nil +} diff --git a/dataaccessobject/statedb/stateobject_portal_waitingredeem.go b/dataaccessobject/statedb/stateobject_portal_waitingredeem.go new file mode 100644 index 0000000000..c03d3a3dac --- /dev/null +++ b/dataaccessobject/statedb/stateobject_portal_waitingredeem.go @@ -0,0 +1,380 @@ +package statedb + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "reflect" +) + +type WaitingRedeemRequest struct { + uniqueRedeemID string + tokenID string + redeemerAddress string + redeemerRemoteAddress string + redeemAmount uint64 + custodians []*MatchingRedeemCustodianDetail + redeemFee uint64 + beaconHeight uint64 + txReqID common.Hash +} + +type MatchingRedeemCustodianDetail struct { + incAddress string + remoteAddress string + amount uint64 +} + +func (wrq WaitingRedeemRequest) GetUniqueRedeemID() string { + return wrq.uniqueRedeemID +} + +func (wrq *WaitingRedeemRequest) SetUniqueRedeemID(uniqueRedeemID string) { + wrq.uniqueRedeemID = uniqueRedeemID +} + +func (wrq WaitingRedeemRequest) GetTokenID() string { + return wrq.tokenID +} + +func (wrq *WaitingRedeemRequest) SetTokenID(tokenID string) { + wrq.tokenID = tokenID +} + +func (wrq WaitingRedeemRequest) GetRedeemerAddress() string { + return wrq.redeemerAddress +} + +func (wrq *WaitingRedeemRequest) SetRedeemerAddress(redeemerAddress string) { + wrq.redeemerAddress = redeemerAddress +} + +func (wrq WaitingRedeemRequest) GetRedeemerRemoteAddress() string { + return wrq.redeemerRemoteAddress +} + +func (wrq *WaitingRedeemRequest) SetRedeemerRemoteAddress(redeemerRemoteAddress string) { + wrq.redeemerRemoteAddress = redeemerRemoteAddress +} + +func (wrq WaitingRedeemRequest) GetRedeemAmount() uint64 { + return wrq.redeemAmount +} + +func (wrq *WaitingRedeemRequest) SetRedeemAmount(redeemAmount uint64) { + wrq.redeemAmount = redeemAmount +} + +func (wrq WaitingRedeemRequest) GetCustodians() []*MatchingRedeemCustodianDetail { + return wrq.custodians +} + +func (wrq *WaitingRedeemRequest) SetCustodians(custodians []*MatchingRedeemCustodianDetail) { + wrq.custodians = custodians +} + +func (wrq WaitingRedeemRequest) GetRedeemFee() uint64 { + return wrq.redeemFee +} + +func (wrq *WaitingRedeemRequest) SetRedeemFee(redeemFee uint64) { + wrq.redeemFee = redeemFee +} + +func (wrq WaitingRedeemRequest) GetBeaconHeight() uint64 { + return wrq.beaconHeight +} + +func (wrq *WaitingRedeemRequest) SetBeaconHeight(beaconHeight uint64) { + wrq.beaconHeight = beaconHeight +} + +func (wrq WaitingRedeemRequest) GetTxReqID() common.Hash { + return wrq.txReqID +} + +func (wrq *WaitingRedeemRequest) SetTxReqID(txReqID common.Hash) { + wrq.txReqID = txReqID +} + +func (wrq WaitingRedeemRequest) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + UniqueRedeemID string + TokenID string + RedeemerAddress string + RedeemerRemoteAddress string + RedeemAmount uint64 + Custodians []*MatchingRedeemCustodianDetail + RedeemFee uint64 + BeaconHeight uint64 + TxReqID common.Hash + }{ + UniqueRedeemID: wrq.uniqueRedeemID, + TokenID: wrq.tokenID, + RedeemerAddress: wrq.redeemerAddress, + RedeemerRemoteAddress: wrq.redeemerRemoteAddress, + RedeemAmount: wrq.redeemAmount, + Custodians: wrq.custodians, + RedeemFee: wrq.redeemFee, + BeaconHeight: wrq.beaconHeight, + TxReqID: wrq.txReqID, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (wrq *WaitingRedeemRequest) UnmarshalJSON(data []byte) error { + temp := struct { + UniqueRedeemID string + TokenID string + RedeemerAddress string + RedeemerRemoteAddress string + RedeemAmount uint64 + Custodians []*MatchingRedeemCustodianDetail + RedeemFee uint64 + BeaconHeight uint64 + TxReqID common.Hash + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + wrq.uniqueRedeemID = temp.UniqueRedeemID + wrq.tokenID = temp.TokenID + wrq.redeemerAddress = temp.RedeemerAddress + wrq.redeemerRemoteAddress = temp.RedeemerRemoteAddress + wrq.redeemAmount = temp.RedeemAmount + wrq.custodians = temp.Custodians + wrq.redeemFee = temp.RedeemFee + wrq.beaconHeight = temp.BeaconHeight + wrq.txReqID = temp.TxReqID + return nil +} + +func (mc MatchingRedeemCustodianDetail) GetIncognitoAddress() string { + return mc.incAddress +} + +func (mc *MatchingRedeemCustodianDetail) SetIncognitoAddress(incognitoAddress string) { + mc.incAddress = incognitoAddress +} + +func (mc MatchingRedeemCustodianDetail) GetRemoteAddress() string { + return mc.remoteAddress +} + +func (mc *MatchingRedeemCustodianDetail) SetRemoteAddress(remoteAddress string) { + mc.remoteAddress = remoteAddress +} + +func (mc MatchingRedeemCustodianDetail) GetAmount() uint64 { + return mc.amount +} + +func (mc *MatchingRedeemCustodianDetail) SetAmount(amount uint64) { + mc.amount = amount +} + +func (mc MatchingRedeemCustodianDetail) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + IncAddress string + RemoteAddress string + Amount uint64 + }{ + IncAddress: mc.incAddress, + RemoteAddress: mc.remoteAddress, + Amount: mc.amount, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (mc *MatchingRedeemCustodianDetail) UnmarshalJSON(data []byte) error { + temp := struct { + IncAddress string + RemoteAddress string + Amount uint64 + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + mc.incAddress = temp.IncAddress + mc.remoteAddress = temp.RemoteAddress + mc.amount = temp.Amount + return nil +} + +func NewMatchingRedeemCustodianDetailWithValue( + incAddress string, + remoteAddress string, + amount uint64) *MatchingRedeemCustodianDetail { + + return &MatchingRedeemCustodianDetail{ + incAddress: incAddress, + remoteAddress: remoteAddress, + amount: amount, + } +} + +func NewWaitingRedeemRequest() *WaitingRedeemRequest { + return &WaitingRedeemRequest{} +} + +func NewWaitingRedeemRequestWithValue( + uniqueRedeemID string, + tokenID string, + redeemerAddress string, + redeemerRemoteAddress string, + redeemAmount uint64, + custodians []*MatchingRedeemCustodianDetail, + redeemFee uint64, + beaconHeight uint64, + txReqID common.Hash) *WaitingRedeemRequest { + + return &WaitingRedeemRequest{ + uniqueRedeemID: uniqueRedeemID, + tokenID: tokenID, + redeemerAddress: redeemerAddress, + redeemerRemoteAddress: redeemerRemoteAddress, + redeemAmount: redeemAmount, + custodians: custodians, + redeemFee: redeemFee, + beaconHeight: beaconHeight, + txReqID: txReqID, + } +} + +type WaitingRedeemRequestObject struct { + db *StateDB + // Write caches. + trie Trie // storage trie, which becomes non-nil on first access + + version int + waitingRedeemRequestHash common.Hash + waitingRedeemRequest *WaitingRedeemRequest + objectType int + deleted bool + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error +} + +func newWaitingRedeemRequestObject(db *StateDB, hash common.Hash) *WaitingRedeemRequestObject { + return &WaitingRedeemRequestObject{ + version: defaultVersion, + db: db, + waitingRedeemRequestHash: hash, + waitingRedeemRequest: NewWaitingRedeemRequest(), + objectType: WaitingRedeemRequestObjectType, + deleted: false, + } +} + +func newWaitingRedeemRequestObjectWithValue(db *StateDB, key common.Hash, data interface{}) (*WaitingRedeemRequestObject, error) { + var redeemRequest = NewWaitingRedeemRequest() + var ok bool + var dataBytes []byte + if dataBytes, ok = data.([]byte); ok { + err := json.Unmarshal(dataBytes, redeemRequest) + if err != nil { + return nil, err + } + } else { + redeemRequest, ok = data.(*WaitingRedeemRequest) + if !ok { + return nil, fmt.Errorf("%+v, got type %+v", ErrInvalidPortalWaitingRedeemRequestType, reflect.TypeOf(data)) + } + } + return &WaitingRedeemRequestObject{ + version: defaultVersion, + waitingRedeemRequestHash: key, + waitingRedeemRequest: redeemRequest, + db: db, + objectType: CustodianStateObjectType, + deleted: false, + }, nil +} + +func GenerateWaitingRedeemRequestObjectKey(beaconHeight uint64, redeemID string) common.Hash { + prefixHash := GetWaitingRedeemRequestPrefix(beaconHeight) + valueHash := common.HashH([]byte(redeemID)) + return common.BytesToHash(append(prefixHash, valueHash[:][:prefixKeyLength]...)) +} + +func (t WaitingRedeemRequestObject) GetVersion() int { + return t.version +} + +// setError remembers the first non-nil error it is called with. +func (t *WaitingRedeemRequestObject) SetError(err error) { + if t.dbErr == nil { + t.dbErr = err + } +} + +func (t WaitingRedeemRequestObject) GetTrie(db DatabaseAccessWarper) Trie { + return t.trie +} + +func (t *WaitingRedeemRequestObject) SetValue(data interface{}) error { + redeemRequest, ok := data.(*WaitingRedeemRequest) + if !ok { + return fmt.Errorf("%+v, got type %+v", ErrInvalidPortalWaitingRedeemRequestType, reflect.TypeOf(data)) + } + t.waitingRedeemRequest = redeemRequest + return nil +} + +func (t WaitingRedeemRequestObject) GetValue() interface{} { + return t.waitingRedeemRequest +} + +func (t WaitingRedeemRequestObject) GetValueBytes() []byte { + redeemRequest, ok := t.GetValue().(*WaitingRedeemRequest) + if !ok { + panic("wrong expected value type") + } + value, err := json.Marshal(redeemRequest) + if err != nil { + panic("failed to marshal redeem request") + } + return value +} + +func (t WaitingRedeemRequestObject) GetHash() common.Hash { + return t.waitingRedeemRequestHash +} + +func (t WaitingRedeemRequestObject) GetType() int { + return t.objectType +} + +// MarkDelete will delete an object in trie +func (t *WaitingRedeemRequestObject) MarkDelete() { + t.deleted = true +} + +// reset all shard committee value into default value +func (t *WaitingRedeemRequestObject) Reset() bool { + t.waitingRedeemRequest = NewWaitingRedeemRequest() + return true +} + +func (t WaitingRedeemRequestObject) IsDeleted() bool { + return t.deleted +} + +// value is either default or nil +func (t WaitingRedeemRequestObject) IsEmpty() bool { + temp := NewCustodianState() + return reflect.DeepEqual(temp, t.waitingRedeemRequest) || t.waitingRedeemRequest == nil +} diff --git a/dataaccessobject/statedb/stateobject_reward_feature.go b/dataaccessobject/statedb/stateobject_reward_feature.go new file mode 100644 index 0000000000..4f541e7400 --- /dev/null +++ b/dataaccessobject/statedb/stateobject_reward_feature.go @@ -0,0 +1,205 @@ +package statedb + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "reflect" +) + +type RewardFeatureState struct { + totalRewards []*RewardInfoDetail +} + +func (rfs RewardFeatureState) GetTotalRewards() []*RewardInfoDetail { + return rfs.totalRewards +} + +func (rfs *RewardFeatureState) SetTotalRewards(totalRewards []*RewardInfoDetail) { + rfs.totalRewards = totalRewards +} + +func (rfs *RewardFeatureState) AddTotalRewards(tokenID string, amount uint64) { + for i := 0; i < len(rfs.totalRewards); i++ { + if rfs.totalRewards[i].tokenID == tokenID { + rfs.totalRewards[i].amount += amount + return + } + } + rfs.totalRewards = append(rfs.totalRewards, NewRewardInfoDetailWithValue(tokenID, amount)) +} + +func (rfs *RewardFeatureState) ResetTotalRewardByTokenID(tokenID string) { + for i := 0; i < len(rfs.totalRewards); i++ { + if rfs.totalRewards[i].tokenID == tokenID { + rfs.totalRewards[i].amount = 0 + return + } + } +} + +func (rfs RewardFeatureState) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(struct { + TotalRewards []*RewardInfoDetail + }{ + TotalRewards: rfs.totalRewards, + }) + if err != nil { + return []byte{}, err + } + return data, nil +} + +func (rfs *RewardFeatureState) UnmarshalJSON(data []byte) error { + temp := struct { + TotalRewards []*RewardInfoDetail + }{} + err := json.Unmarshal(data, &temp) + if err != nil { + return err + } + rfs.totalRewards = temp.TotalRewards + return nil +} + +func NewRewardFeatureState() *RewardFeatureState { + return &RewardFeatureState{} +} + +func NewRewardFeatureStateWithValue( + totalRewards []*RewardInfoDetail, +) *RewardFeatureState { + return &RewardFeatureState{ + totalRewards: totalRewards, + } +} + +type RewardFeatureStateObject struct { + db *StateDB + // Write caches. + trie Trie // storage trie, which becomes non-nil on first access + + version int + rewardFeatureStateHash common.Hash + rewardFeatureState *RewardFeatureState + objectType int + deleted bool + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error +} + +func newRewardFeatureStateObject(db *StateDB, hash common.Hash) *RewardFeatureStateObject { + return &RewardFeatureStateObject{ + version: defaultVersion, + db: db, + rewardFeatureStateHash: hash, + rewardFeatureState: NewRewardFeatureState(), + objectType: RewardFeatureStateObjectType, + deleted: false, + } +} + +func newRewardFeatureStateObjectWithValue(db *StateDB, key common.Hash, data interface{}) (*RewardFeatureStateObject, error) { + var totalCustodianRewardState = NewRewardFeatureState() + var ok bool + var dataBytes []byte + if dataBytes, ok = data.([]byte); ok { + err := json.Unmarshal(dataBytes, totalCustodianRewardState) + if err != nil { + return nil, err + } + } else { + totalCustodianRewardState, ok = data.(*RewardFeatureState) + if !ok { + return nil, fmt.Errorf("%+v, got type %+v", ErrInvalidRewardFeatureStateType, reflect.TypeOf(data)) + } + } + return &RewardFeatureStateObject{ + version: defaultVersion, + rewardFeatureStateHash: key, + rewardFeatureState: totalCustodianRewardState, + db: db, + objectType: RewardFeatureStateObjectType, + deleted: false, + }, nil +} + +func GenerateRewardFeatureStateObjectKey(featureName string, epoch uint64) common.Hash { + prefixHash := GetRewardFeatureStatePrefix(epoch) + valueHash := common.HashH([]byte(featureName)) + return common.BytesToHash(append(prefixHash, valueHash[:][:prefixKeyLength]...)) +} + +func (t RewardFeatureStateObject) GetVersion() int { + return t.version +} + +// setError remembers the first non-nil error it is called with. +func (t *RewardFeatureStateObject) SetError(err error) { + if t.dbErr == nil { + t.dbErr = err + } +} + +func (t RewardFeatureStateObject) GetTrie(db DatabaseAccessWarper) Trie { + return t.trie +} + +func (t *RewardFeatureStateObject) SetValue(data interface{}) error { + rewardFeatureState, ok := data.(*RewardFeatureState) + if !ok { + return fmt.Errorf("%+v, got type %+v", ErrInvalidRewardFeatureStateType, reflect.TypeOf(data)) + } + t.rewardFeatureState = rewardFeatureState + return nil +} + +func (t RewardFeatureStateObject) GetValue() interface{} { + return t.rewardFeatureState +} + +func (t RewardFeatureStateObject) GetValueBytes() []byte { + rewardFeatureState, ok := t.GetValue().(*RewardFeatureState) + if !ok { + panic("wrong expected value type") + } + value, err := json.Marshal(rewardFeatureState) + if err != nil { + panic("failed to marshal reward feature state") + } + return value +} + +func (t RewardFeatureStateObject) GetHash() common.Hash { + return t.rewardFeatureStateHash +} + +func (t RewardFeatureStateObject) GetType() int { + return t.objectType +} + +// MarkDelete will delete an object in trie +func (t *RewardFeatureStateObject) MarkDelete() { + t.deleted = true +} + +// reset all shard committee value into default value +func (t *RewardFeatureStateObject) Reset() bool { + t.rewardFeatureState = NewRewardFeatureState() + return true +} + +func (t RewardFeatureStateObject) IsDeleted() bool { + return t.deleted +} + +// value is either default or nil +func (t RewardFeatureStateObject) IsEmpty() bool { + temp := NewRewardFeatureState() + return reflect.DeepEqual(temp, t.rewardFeatureState) || t.rewardFeatureState == nil +} diff --git a/go.mod b/go.mod index cbdf0275f0..ba483f3ab7 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,17 @@ module github.com/incognitochain/incognito-chain -go 1.12 +go 1.13 require ( cloud.google.com/go v0.38.0 github.com/0xsirrush/color v1.7.0 github.com/allegro/bigcache v1.2.1 github.com/aristanetworks/goarista v0.0.0-20190704150520-f44d68189fd7 // indirect + github.com/binance-chain/go-sdk v1.1.3 + github.com/blockcypher/gobcy v1.3.1 + github.com/btcsuite/btcd v0.20.1-beta + github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f + github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d github.com/davecgh/go-spew v1.1.1 github.com/dchest/siphash v1.2.1 // indirect github.com/deckarep/golang-set v1.7.1 // indirect @@ -14,13 +19,17 @@ require ( github.com/ebfe/keccak v0.0.0-20150115210727-5cc570678d1b github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/elastic/gosigar v0.10.4 // indirect + github.com/etcd-io/bbolt v1.3.3 // indirect github.com/ethereum/go-ethereum v1.8.22-0.20190710074244-72029f0f88f6 github.com/fortytw2/leaktest v1.3.0 // indirect + github.com/gogo/protobuf v1.3.0 // indirect + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect github.com/golang/protobuf v1.3.2 github.com/google/uuid v1.1.1 github.com/gorilla/websocket v1.4.0 github.com/hashicorp/golang-lru v0.5.1 github.com/incognitochain/go-libp2p-grpc v0.0.0-20181024123959-d1f24bf49b50 + github.com/ipfs/go-cid v0.0.3 // indirect github.com/jbenet/goprocess v0.1.3 github.com/jessevdk/go-flags v1.4.0 github.com/jrick/logrotate v1.0.0 @@ -35,9 +44,12 @@ require ( github.com/libp2p/go-libp2p-protocol v0.1.0 // indirect github.com/libp2p/go-libp2p-pubsub v0.1.1 github.com/libp2p/go-libp2p-swarm v0.2.1 + github.com/libp2p/go-openssl v0.0.3 // indirect github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect github.com/mattn/go-runewidth v0.0.4 // indirect + github.com/minio/sha256-simd v0.1.1 // indirect github.com/multiformats/go-multiaddr v0.0.4 + github.com/multiformats/go-multihash v0.0.8 // indirect github.com/olekukonko/tablewriter v0.0.1 // indirect github.com/olivere/elastic v6.2.21+incompatible github.com/onsi/ginkgo v1.10.3 // indirect @@ -46,12 +58,13 @@ require ( github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.8.1 github.com/prometheus/tsdb v0.9.1 // indirect - github.com/rs/cors v1.6.0 // indirect + github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa // indirect github.com/stathat/consistent v1.0.0 github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect - github.com/stretchr/testify v1.5.0 + github.com/stretchr/testify v1.5.1 github.com/syndtr/goleveldb v1.0.0 + github.com/tendermint/tendermint v0.32.0 go.opencensus.io v0.22.0 // indirect golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect @@ -59,4 +72,7 @@ require ( google.golang.org/grpc v1.27.1 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect stathat.com/c/consistent v1.0.0 // indirect + ) + +replace github.com/tendermint/go-amino => github.com/binance-chain/bnc-go-amino v0.14.1-binance.1 diff --git a/go.sum b/go.sum index 418925783e..c1d7df1886 100644 --- a/go.sum +++ b/go.sum @@ -541,4 +541,4 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= stathat.com/c/consistent v1.0.0 h1:ezyc51EGcRPJUxfHGSgJjWzJdj3NiMU9pNfLNGiXV0c= -stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0= +stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0= \ No newline at end of file diff --git a/incognito.go b/incognito.go index 0d48dc241c..d2a62bf18e 100644 --- a/incognito.go +++ b/incognito.go @@ -11,6 +11,7 @@ import ( "runtime/debug" "strconv" + "github.com/incognitochain/incognito-chain/blockchain" "github.com/incognitochain/incognito-chain/common" _ "github.com/incognitochain/incognito-chain/consensus/blsbft" "github.com/incognitochain/incognito-chain/databasemp" @@ -20,6 +21,8 @@ import ( "github.com/incognitochain/incognito-chain/limits" "github.com/incognitochain/incognito-chain/metrics" "github.com/incognitochain/incognito-chain/wallet" + btcrelaying "github.com/incognitochain/incognito-chain/relaying/btc" + "github.com/btcsuite/btcd/chaincfg" ) //go:generate mockery -dir=incdb/ -name=Database @@ -31,6 +34,15 @@ var ( // as a service and reacts accordingly. var winServiceMain func() (bool, error) +func getBTCRelayingChain(btcRelayingChainID string) (*btcrelaying.BlockChain, error) { + relayingChainParams := map[string]*chaincfg.Params{ + blockchain.TestnetBTCChainID: btcrelaying.GetTestNet3Params(), + blockchain.MainnetBTCChainID: btcrelaying.GetMainNetParams(), + } + return btcrelaying.GetChainV2(filepath.Join(cfg.DataDir, "btcrelaying"), relayingChainParams[btcRelayingChainID]) +} + + // mainMaster is the real main function for Incognito network. It is necessary to work around // the fact that deferred functions do not run when os.Exit() is called. The // optional serverChan parameter is mainly used by the service code to be @@ -99,10 +111,19 @@ func mainMaster(serverChan chan<- *Server) error { } } } + + // Create btcrelaying chain + btcChain, err := getBTCRelayingChain(activeNetParams.Params.BTCRelayingHeaderChainID) + if err != nil { + Logger.log.Error("could not get or create btc relaying chain") + Logger.log.Error(err) + panic(err) + } + // Create server and start it. server := Server{} server.wallet = walletObj - err = server.NewServer(cfg.Listener, db, dbmp, activeNetParams.Params, version, interrupt) + err = server.NewServer(cfg.Listener, db, dbmp, activeNetParams.Params, version, btcChain, interrupt) if err != nil { Logger.log.Errorf("Unable to start server on %+v", cfg.Listener) Logger.log.Error(err) diff --git a/log.go b/log.go index 278c90351f..fc162cb644 100644 --- a/log.go +++ b/log.go @@ -2,6 +2,7 @@ package main import ( "fmt" + relaying "github.com/incognitochain/incognito-chain/relaying/bnb" "github.com/incognitochain/incognito-chain/dataaccessobject" "os" "path/filepath" @@ -56,6 +57,7 @@ var ( metadataLogger = backendLog.Logger("Metadata log", false) trieLogger = backendLog.Logger("Trie log", false) peerv2Logger = backendLog.Logger("Peerv2 log", false) + relayingLogger = backendLog.Logger("Relaying log", false) wrapperLogger = backendLog.Logger("Wrapper log", false) daov2Logger = backendLog.Logger("DAO log", false) ) @@ -96,9 +98,9 @@ func init() { metadata.Logger.Init(metadataLogger) trie.Logger.Init(trieLogger) peerv2.Logger.Init(peerv2Logger) + relaying.Logger.Init(relayingLogger) wrapper.Logger.Init(wrapperLogger) dataaccessobject.Logger.Init(daov2Logger) - } // subsystemLoggers maps each subsystem identifier to its associated logger. diff --git a/metadata/beaconportalreward.go b/metadata/beaconportalreward.go new file mode 100644 index 0000000000..d7ebe62ad2 --- /dev/null +++ b/metadata/beaconportalreward.go @@ -0,0 +1,27 @@ +package metadata + +import ( + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" +) + +type PortalRewardContent struct { + BeaconHeight uint64 + Rewards []*statedb.PortalRewardInfo +} + +func NewPortalReward(beaconHeight uint64, receivers []*statedb.PortalRewardInfo) *PortalRewardContent { + return &PortalRewardContent{ + BeaconHeight: beaconHeight, + Rewards: receivers, + } +} + +type PortalTotalCustodianReward struct { + Rewards []*statedb.RewardInfoDetail +} + +func NewPortalTotalCustodianReward(rewards []*statedb.RewardInfoDetail) *PortalTotalCustodianReward { + return &PortalTotalCustodianReward{ + Rewards: rewards, + } +} diff --git a/metadata/common.go b/metadata/common.go index f94905a6c4..54fd859897 100755 --- a/metadata/common.go +++ b/metadata/common.go @@ -69,6 +69,48 @@ func ParseMetadata(meta interface{}) (Metadata, error) { md = &PDEWithdrawalResponse{} case PDEContributionResponseMeta: md = &PDEContributionResponse{} + case PortalCustodianDepositMeta: + md = &PortalCustodianDeposit{} + case PortalUserRegisterMeta: + md = &PortalUserRegister{} + case PortalUserRequestPTokenMeta: + md = &PortalRequestPTokens{} + case PortalCustodianDepositResponseMeta: + md = &PortalCustodianDepositResponse{} + case PortalUserRequestPTokenResponseMeta: + md = &PortalRequestPTokensResponse{} + case PortalRedeemRequestMeta: + md = &PortalRedeemRequest{} + case PortalRedeemRequestResponseMeta: + md = &PortalRedeemRequestResponse{} + case PortalRequestUnlockCollateralMeta: + md = &PortalRequestUnlockCollateral{} + case PortalExchangeRatesMeta: + md = &PortalExchangeRates{} + case RelayingBNBHeaderMeta: + md = &RelayingHeader{} + case RelayingBTCHeaderMeta: + md = &RelayingHeader{} + case PortalCustodianWithdrawRequestMeta: + md = &PortalCustodianWithdrawRequest{} + case PortalCustodianWithdrawResponseMeta: + md = &PortalCustodianWithdrawResponse{} + case PortalLiquidateCustodianMeta: + md = &PortalLiquidateCustodian{} + case PortalLiquidateCustodianResponseMeta: + md = &PortalLiquidateCustodianResponse{} + case PortalRequestWithdrawRewardMeta: + md = &PortalRequestWithdrawReward{} + case PortalRequestWithdrawRewardResponseMeta: + md = &PortalWithdrawRewardResponse{} + case PortalRedeemLiquidateExchangeRatesMeta: + md = &PortalRedeemLiquidateExchangeRates{} + case PortalRedeemLiquidateExchangeRatesResponseMeta: + md = &PortalRedeemLiquidateExchangeRatesResponse{} + case PortalLiquidationCustodianDepositMeta: + md = &PortalLiquidationCustodianDeposit{} + case PortalLiquidationCustodianDepositResponseMeta: + md = &PortalLiquidationCustodianDepositResponse{} case BurningForDepositToSCRequestMeta: md = &BurningRequest{} default: diff --git a/metadata/constants.go b/metadata/constants.go index aaef824c30..02e0620478 100755 --- a/metadata/constants.go +++ b/metadata/constants.go @@ -41,6 +41,38 @@ const ( PDEWithdrawalResponseMeta = 94 PDEContributionResponseMeta = 95 + // portal + PortalCustodianDepositMeta = 100 + PortalUserRegisterMeta = 101 + PortalUserRequestPTokenMeta = 102 + PortalCustodianDepositResponseMeta = 103 + PortalUserRequestPTokenResponseMeta = 104 + PortalExchangeRatesMeta = 105 + PortalRedeemRequestMeta = 106 + PortalRedeemRequestResponseMeta = 107 + PortalRequestUnlockCollateralMeta = 108 + PortalRequestUnlockCollateralResponseMeta = 109 + PortalCustodianWithdrawRequestMeta = 110 + PortalCustodianWithdrawResponseMeta = 111 + PortalLiquidateCustodianMeta = 112 + PortalLiquidateCustodianResponseMeta = 113 + PortalLiquidateTPExchangeRatesMeta = 114 + PortalLiquidateTPExchangeRatesResponseMeta = 115 + PortalExpiredWaitingPortingReqMeta = 116 + + PortalRewardMeta = 117 + PortalRequestWithdrawRewardMeta = 118 + PortalRequestWithdrawRewardResponseMeta = 119 + PortalRedeemLiquidateExchangeRatesMeta = 120 + PortalRedeemLiquidateExchangeRatesResponseMeta = 121 + PortalLiquidationCustodianDepositMeta = 122 + PortalLiquidationCustodianDepositResponseMeta = 123 + PortalTotalRewardCustodianMeta = 124 + + // relaying + RelayingBNBHeaderMeta = 200 + RelayingBTCHeaderMeta = 201 + // incognito mode for smart contract BurningForDepositToSCRequestMeta = 96 BurningConfirmForDepositToSCMeta = 97 @@ -56,6 +88,14 @@ var minerCreatedMetaTypes = []int{ PDETradeResponseMeta, PDEWithdrawalResponseMeta, PDEContributionResponseMeta, + PortalUserRequestPTokenResponseMeta, + PortalCustodianDepositResponseMeta, + PortalRedeemRequestResponseMeta, + PortalCustodianWithdrawResponseMeta, + PortalLiquidateCustodianResponseMeta, + PortalRequestWithdrawRewardResponseMeta, + PortalRedeemLiquidateExchangeRatesResponseMeta, + PortalLiquidationCustodianDepositResponseMeta, } // Special rules for shardID: stored as 2nd param of instruction of BeaconBlock @@ -82,4 +122,4 @@ const ( ETHConfirmationBlocks = 15 ) -var AcceptedWithdrawRewardRequestVersion = []int{0, 1} +var AcceptedWithdrawRewardRequestVersion = []int{0, 1} \ No newline at end of file diff --git a/metadata/error.go b/metadata/error.go index ec518e894d..dab00b7336 100644 --- a/metadata/error.go +++ b/metadata/error.go @@ -41,6 +41,11 @@ const ( PDEWithdrawalRequestFromMapError CouldNotGetExchangeRateError RejectInvalidFee + + // portal + PortalRequestPTokenParamError + PortalRedeemRequestParamError + PortalRedeemLiquidateExchangeRatesParamError ) var ErrCodeMessage = map[int]struct { @@ -80,12 +85,17 @@ var ErrCodeMessage = map[int]struct { StopAutoStakingRequestAlreadyStopError: {-4005, "Stop Auto Staking Request Already Stop Error"}, // -5xxx dev reward error - WrongIncognitoDAOPaymentAddressError: {-5001, "Invalid dev account"}, + WrongIncognitoDAOPaymentAddressError: {-5001, "Invalid dev account"}, // pde - PDEWithdrawalRequestFromMapError: {-6001, "PDE withdrawal request Error"}, - CouldNotGetExchangeRateError: {-6002, "Could not get the exchange rate error"}, - RejectInvalidFee: {-6003, "Reject invalid fee"}, + PDEWithdrawalRequestFromMapError: {-6001, "PDE withdrawal request Error"}, + CouldNotGetExchangeRateError: {-6002, "Could not get the exchange rate error"}, + RejectInvalidFee: {-6003, "Reject invalid fee"}, + + // portal + PortalRequestPTokenParamError: {-7001, "Portal request ptoken param error"}, + PortalRedeemRequestParamError: {-7002, "Portal redeem request param error"}, + PortalRedeemLiquidateExchangeRatesParamError: {-7003, "Portal redeem liquidate exchange rates param error"}, } type MetadataTxError struct { diff --git a/metadata/pdecontributionresponse.go b/metadata/pdecontributionresponse.go index 8a6262cb75..25f371e209 100644 --- a/metadata/pdecontributionresponse.go +++ b/metadata/pdecontributionresponse.go @@ -152,4 +152,4 @@ func (iRes PDEContributionResponse) VerifyMinerCreatedTxBeforeGettingInBlock( } instUsed[idx] = 1 return true, nil -} +} \ No newline at end of file diff --git a/metadata/portalcustodiandeposit.go b/metadata/portalcustodiandeposit.go new file mode 100644 index 0000000000..4b2375907e --- /dev/null +++ b/metadata/portalcustodiandeposit.go @@ -0,0 +1,156 @@ +package metadata + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +// PortalCustodianDeposit - portal custodian deposit collateral (PRV) +// metadata - custodian deposit - create normal tx with this metadata +type PortalCustodianDeposit struct { + MetadataBase + IncogAddressStr string + RemoteAddresses []statedb.RemoteAddress // token symbol: remote address + DepositedAmount uint64 +} + +// PortalCustodianDepositAction - shard validator creates instruction that contain this action content +// it will be append to ShardToBeaconBlock +type PortalCustodianDepositAction struct { + Meta PortalCustodianDeposit + TxReqID common.Hash + ShardID byte +} + +// PortalCustodianDepositContent - Beacon builds a new instruction with this content after receiving a instruction from shard +// It will be appended to beaconBlock +// both accepted and refund status +type PortalCustodianDepositContent struct { + IncogAddressStr string + RemoteAddresses []statedb.RemoteAddress + DepositedAmount uint64 + TxReqID common.Hash + ShardID byte +} + +// PortalCustodianDepositStatus - Beacon tracks status of custodian deposit tx into db +type PortalCustodianDepositStatus struct { + Status byte + IncogAddressStr string + RemoteAddresses []statedb.RemoteAddress + DepositedAmount uint64 +} + +func NewPortalCustodianDeposit(metaType int, incognitoAddrStr string, remoteAddrs []statedb.RemoteAddress, amount uint64) (*PortalCustodianDeposit, error) { + metadataBase := MetadataBase{ + Type: metaType, + } + custodianDepositMeta := &PortalCustodianDeposit{ + IncogAddressStr: incognitoAddrStr, + RemoteAddresses: remoteAddrs, + DepositedAmount: amount, + } + custodianDepositMeta.MetadataBase = metadataBase + return custodianDepositMeta, nil +} + +func (custodianDeposit PortalCustodianDeposit) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + return true, nil +} + +func (custodianDeposit PortalCustodianDeposit) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + // Note: the metadata was already verified with *transaction.TxCustomToken level so no need to verify with *transaction.Tx level again as *transaction.Tx is embedding property of *transaction.TxCustomToken + //if txr.GetType() == common.TxCustomTokenPrivacyType && reflect.TypeOf(txr).String() == "*transaction.Tx" { + // return true, true, nil + //} + + // validate IncogAddressStr + keyWallet, err := wallet.Base58CheckDeserialize(custodianDeposit.IncogAddressStr) + if err != nil { + return false, false, errors.New("IncogAddressStr of custodian incorrect") + } + incogAddr := keyWallet.KeySet.PaymentAddress + if len(incogAddr.Pk) == 0 { + return false, false, errors.New("wrong custodian incognito address") + } + if !bytes.Equal(txr.GetSigPubKey()[:], incogAddr.Pk[:]) { + return false, false, errors.New("custodian incognito address is not signer tx") + } + + // check tx type + if txr.GetType() != common.TxNormalType { + return false, false, errors.New("tx custodian deposit must be TxNormalType") + } + + // check burning tx + if !txr.IsCoinsBurning(bcr, beaconHeight) { + return false, false, errors.New("must send coin to burning address") + } + + // validate amount deposit + if custodianDeposit.DepositedAmount == 0 { + return false, false, errors.New("deposit amount should be larger than 0") + } + if custodianDeposit.DepositedAmount != txr.CalculateTxValue() { + return false, false, errors.New("deposit amount should be equal to the tx value") + } + + // validate remote addresses + if len(custodianDeposit.RemoteAddresses) == 0 { + return false, false, errors.New("remote addresses should be at least one") + } + for _, remoteAddr := range custodianDeposit.RemoteAddresses { + if !common.IsPortalToken(remoteAddr.GetPTokenID()) { + return false, false, errors.New("TokenID in remote address is invalid") + } + } + + return true, true, nil +} + +func (custodianDeposit PortalCustodianDeposit) ValidateMetadataByItself() bool { + return custodianDeposit.Type == PortalCustodianDepositMeta +} + +func (custodianDeposit PortalCustodianDeposit) Hash() *common.Hash { + record := custodianDeposit.MetadataBase.Hash().String() + record += custodianDeposit.IncogAddressStr + for _, remoteAddr := range custodianDeposit.RemoteAddresses { + record += remoteAddr.GetPTokenID() + record += remoteAddr.GetAddress() + } + record += strconv.FormatUint(custodianDeposit.DepositedAmount, 10) + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (custodianDeposit *PortalCustodianDeposit) BuildReqActions(tx Transaction, bcr BlockchainRetriever, shardID byte) ([][]string, error) { + actionContent := PortalCustodianDepositAction{ + Meta: *custodianDeposit, + TxReqID: *tx.Hash(), + ShardID: shardID, + } + actionContentBytes, err := json.Marshal(actionContent) + if err != nil { + return [][]string{}, err + } + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(PortalCustodianDepositMeta), actionContentBase64Str} + return [][]string{action}, nil +} + +func (custodianDeposit *PortalCustodianDeposit) CalculateSize() uint64 { + return calculateSize(custodianDeposit) +} diff --git a/metadata/portalcustodiandepositresponse.go b/metadata/portalcustodiandepositresponse.go new file mode 100644 index 0000000000..d34f9a8812 --- /dev/null +++ b/metadata/portalcustodiandepositresponse.go @@ -0,0 +1,140 @@ +package metadata + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +type PortalCustodianDepositResponse struct { + MetadataBase + DepositStatus string + ReqTxID common.Hash + CustodianAddrStr string +} + +func NewPortalCustodianDepositResponse( + depositStatus string, + reqTxID common.Hash, + custodianAddressStr string, + metaType int, +) *PortalCustodianDepositResponse { + metadataBase := MetadataBase{ + Type: metaType, + } + return &PortalCustodianDepositResponse{ + DepositStatus: depositStatus, + ReqTxID: reqTxID, + MetadataBase: metadataBase, + CustodianAddrStr: custodianAddressStr, + } +} + +func (iRes PortalCustodianDepositResponse) CheckTransactionFee(tr Transaction, minFee uint64, beaconHeight int64, db *statedb.StateDB) bool { + // no need to have fee for this tx + return true +} + +func (iRes PortalCustodianDepositResponse) ValidateTxWithBlockChain(txr Transaction, bcr BlockchainRetriever, shardID byte, db *statedb.StateDB) (bool, error) { + // no need to validate tx with blockchain, just need to validate with requested tx (via RequestedTxID) + return false, nil +} + +func (iRes PortalCustodianDepositResponse) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + return false, true, nil +} + +func (iRes PortalCustodianDepositResponse) ValidateMetadataByItself() bool { + // The validation just need to check at tx level, so returning true here + return iRes.Type == PortalCustodianDepositResponseMeta +} + +func (iRes PortalCustodianDepositResponse) Hash() *common.Hash { + record := iRes.DepositStatus + record += iRes.ReqTxID.String() + record += iRes.MetadataBase.Hash().String() + + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (iRes *PortalCustodianDepositResponse) CalculateSize() uint64 { + return calculateSize(iRes) +} + +//todo: +func (iRes PortalCustodianDepositResponse) VerifyMinerCreatedTxBeforeGettingInBlock( + txsInBlock []Transaction, + txsUsed []int, + insts [][]string, + instUsed []int, + shardID byte, + tx Transaction, + bcr BlockchainRetriever, + ac *AccumulatedValues, +) (bool, error) { + idx := -1 + for i, inst := range insts { + if len(inst) < 4 { // this is not PortalCustodianDeposit response instruction + continue + } + instMetaType := inst[0] + if instUsed[i] > 0 || + instMetaType != strconv.Itoa(PortalCustodianDepositMeta) { + continue + } + instDepositStatus := inst[2] + if instDepositStatus != iRes.DepositStatus || + (instDepositStatus != common.PortalCustodianDepositRefundChainStatus) { + continue + } + + var shardIDFromInst byte + var txReqIDFromInst common.Hash + var custodianAddrStrFromInst string + var depositedAmountFromInst uint64 + + contentBytes := []byte(inst[3]) + var custodianDepositContent PortalCustodianDepositContent + err := json.Unmarshal(contentBytes, &custodianDepositContent) + if err != nil { + Logger.log.Error("WARNING - VALIDATION: an error occured while parsing portal custodian deposit content: ", err) + continue + } + shardIDFromInst = custodianDepositContent.ShardID + txReqIDFromInst = custodianDepositContent.TxReqID + custodianAddrStrFromInst = custodianDepositContent.IncogAddressStr + depositedAmountFromInst = custodianDepositContent.DepositedAmount + + if !bytes.Equal(iRes.ReqTxID[:], txReqIDFromInst[:]) || + shardID != shardIDFromInst { + continue + } + key, err := wallet.Base58CheckDeserialize(custodianAddrStrFromInst) + if err != nil { + Logger.log.Info("WARNING - VALIDATION: an error occured while deserializing custodian address string: ", err) + continue + } + + // collateral must be PRV + PRVIDStr := common.PRVCoinID.String() + _, pk, paidAmount, assetID := tx.GetTransferData() + if !bytes.Equal(key.KeySet.PaymentAddress.Pk[:], pk[:]) || + depositedAmountFromInst != paidAmount || + PRVIDStr != assetID.String() { + continue + } + idx = i + break + } + if idx == -1 { // not found the issuance request tx for this response + return false, fmt.Errorf(fmt.Sprintf("no PortalCustodianDeposit instruction found for PortalCustodianDepositResponse tx %s", tx.Hash().String())) + } + instUsed[idx] = 1 + return true, nil +} diff --git a/metadata/portalcustodianwithdraw.go b/metadata/portalcustodianwithdraw.go new file mode 100644 index 0000000000..77c0afb5d8 --- /dev/null +++ b/metadata/portalcustodianwithdraw.go @@ -0,0 +1,137 @@ +package metadata + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "reflect" + "strconv" +) + +type PortalCustodianWithdrawRequest struct { + MetadataBase + PaymentAddress string + Amount uint64 +} + +type PortalCustodianWithdrawRequestAction struct { + Meta PortalCustodianWithdrawRequest + TxReqID common.Hash + ShardID byte +} + +type PortalCustodianWithdrawRequestContent struct { + PaymentAddress string + Amount uint64 + RemainFreeCollateral uint64 + TxReqID common.Hash + ShardID byte +} + +type CustodianWithdrawRequestStatus struct { + PaymentAddress string + Amount uint64 + Status int + RemainCustodianFreeCollateral uint64 +} + +func NewCustodianWithdrawRequestStatus(paymentAddress string, amount uint64, status int, remainCustodianFreeCollateral uint64) *CustodianWithdrawRequestStatus { + return &CustodianWithdrawRequestStatus{PaymentAddress: paymentAddress, Amount: amount, Status: status, RemainCustodianFreeCollateral: remainCustodianFreeCollateral} +} + +func NewPortalCustodianWithdrawRequest(metaType int, paymentAddress string, amount uint64) (*PortalCustodianWithdrawRequest, error) { + metadataBase := MetadataBase{ + Type: metaType, + } + + portalCustodianWithdrawReq := &PortalCustodianWithdrawRequest{ + PaymentAddress: paymentAddress, + Amount: amount, + } + + portalCustodianWithdrawReq.MetadataBase = metadataBase + + return portalCustodianWithdrawReq, nil +} + +func (Withdraw PortalCustodianWithdrawRequest) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + // NOTE: verify supported tokens pair as needed + return true, nil +} + +func (Withdraw PortalCustodianWithdrawRequest) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + if txr.GetType() == common.TxCustomTokenPrivacyType && reflect.TypeOf(txr).String() == "*transaction.Tx" { + return true, true, nil + } + + if len(Withdraw.PaymentAddress) <= 0 { + return false, false, errors.New("Payment address should be not empty") + } + + // validate Payment address + keyWallet, err := wallet.Base58CheckDeserialize(Withdraw.PaymentAddress) + if err != nil { + return false, false, NewMetadataTxError(IssuingRequestNewIssuingRequestFromMapEror, errors.New("ContributorAddressStr incorrect")) + } + + incogAddr := keyWallet.KeySet.PaymentAddress + if len(incogAddr.Pk) == 0 { + return false, false, errors.New("wrong custodian incognito address") + } + if !bytes.Equal(txr.GetSigPubKey()[:], incogAddr.Pk[:]) { + return false, false, errors.New("custodian incognito address is not signer tx") + } + + // check tx type + if txr.GetType() != common.TxNormalType { + return false, false, errors.New("tx custodian deposit must be TxNormalType") + } + + if Withdraw.Amount <= 0 { + return false, false, errors.New("Amount should be larger than 0") + } + + return true, true, nil +} + +func (Withdraw PortalCustodianWithdrawRequest) ValidateMetadataByItself() bool { + return Withdraw.Type == PortalCustodianWithdrawRequestMeta +} + +func (Withdraw PortalCustodianWithdrawRequest) Hash() *common.Hash { + record := Withdraw.MetadataBase.Hash().String() + record += Withdraw.PaymentAddress + record += strconv.FormatUint(Withdraw.Amount, 10) + + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (Withdraw *PortalCustodianWithdrawRequest) BuildReqActions(tx Transaction, bcr BlockchainRetriever, shardID byte) ([][]string, error) { + actionContent := PortalCustodianWithdrawRequestAction{ + Meta: *Withdraw, + TxReqID: *tx.Hash(), + ShardID: shardID, + } + actionContentBytes, err := json.Marshal(actionContent) + if err != nil { + return [][]string{}, err + } + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(PortalCustodianWithdrawRequestMeta), actionContentBase64Str} + return [][]string{action}, nil +} + +func (Withdraw *PortalCustodianWithdrawRequest) CalculateSize() uint64 { + return calculateSize(Withdraw) +} diff --git a/metadata/portalcustodianwithdrawresponse.go b/metadata/portalcustodianwithdrawresponse.go new file mode 100644 index 0000000000..497e4eb5c3 --- /dev/null +++ b/metadata/portalcustodianwithdrawresponse.go @@ -0,0 +1,145 @@ +package metadata + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +type PortalCustodianWithdrawResponse struct { + MetadataBase + RequestStatus string + ReqTxID common.Hash + PaymentAddress string + Amount uint64 +} + +func NewPortalCustodianWithdrawResponse( + requestStatus string, + reqTxId common.Hash, + paymentAddress string, + amount uint64, + metaType int, +) *PortalCustodianWithdrawResponse { + metaDataBase := MetadataBase{Type: metaType} + + return &PortalCustodianWithdrawResponse{ + MetadataBase: metaDataBase, + RequestStatus: requestStatus, + ReqTxID: reqTxId, + PaymentAddress: paymentAddress, + Amount: amount, + } +} + +func (responseMeta PortalCustodianWithdrawResponse) CheckTransactionFee(tr Transaction, minFee uint64, beaconHeight int64, db *statedb.StateDB) bool { + // no need to have fee for this tx + return true +} + +func (responseMeta PortalCustodianWithdrawResponse) ValidateTxWithBlockChain(txr Transaction, bcr BlockchainRetriever, shardID byte, db *statedb.StateDB) (bool, error) { + // no need to validate tx with blockchain, just need to validate with requested tx (via RequestedTxID) + return false, nil +} + +func (responseMeta PortalCustodianWithdrawResponse) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + return false, true, nil +} + +func (responseMeta PortalCustodianWithdrawResponse) ValidateMetadataByItself() bool { + // The validation just need to check at tx level, so returning true here + return responseMeta.Type == PortalCustodianWithdrawResponseMeta +} + +func (responseMeta PortalCustodianWithdrawResponse) Hash() *common.Hash { + record := responseMeta.MetadataBase.Hash().String() + record += responseMeta.RequestStatus + record += responseMeta.ReqTxID.String() + record += responseMeta.PaymentAddress + record += strconv.FormatUint(responseMeta.Amount, 10) + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (responseMeta *PortalCustodianWithdrawResponse) CalculateSize() uint64 { + return calculateSize(responseMeta) +} + +func (responseMeta PortalCustodianWithdrawResponse) VerifyMinerCreatedTxBeforeGettingInBlock( + txsInBlock []Transaction, + txsUsed []int, + insts [][]string, + instUsed []int, + shardID byte, + tx Transaction, + bcr BlockchainRetriever, + ac *AccumulatedValues, +) (bool, error) { + idx := -1 + for i, inst := range insts { + if len(inst) < 4 { // this is not PortalRequestPTokens response instruction + continue + } + + instMetaType := inst[0] + if instUsed[i] > 0 || instMetaType != strconv.Itoa(PortalCustodianWithdrawRequestMeta) { + continue + } + + instDepositStatus := inst[2] + if instDepositStatus != responseMeta.RequestStatus || + (instDepositStatus != common.PortalCustodianWithdrawRequestAcceptedStatus) { + continue + } + + var shardIDFromInst byte + var txReqIDFromInst common.Hash + var requesterAddrStrFromInst string + var portingAmountFromInst uint64 + + contentBytes := []byte(inst[3]) + var custodianWithdrawRequest PortalCustodianWithdrawRequestContent + err := json.Unmarshal(contentBytes, &custodianWithdrawRequest) + if err != nil { + Logger.log.Error("WARNING - VALIDATION: an error occured while parsing custodian withdraw request content: ", err) + continue + } + shardIDFromInst = custodianWithdrawRequest.ShardID + txReqIDFromInst = custodianWithdrawRequest.TxReqID + requesterAddrStrFromInst = custodianWithdrawRequest.PaymentAddress + portingAmountFromInst = custodianWithdrawRequest.Amount + receivingTokenIDStr := common.PRVCoinID.String() + + if !bytes.Equal(responseMeta.ReqTxID[:], txReqIDFromInst[:]) || + shardID != shardIDFromInst { + continue + } + + key, err := wallet.Base58CheckDeserialize(requesterAddrStrFromInst) + if err != nil { + Logger.log.Info("WARNING - VALIDATION: an error occured while deserializing receiver address string: ", err) + continue + } + + _, pk, amount, assetID := tx.GetTransferData() + if !bytes.Equal(key.KeySet.PaymentAddress.Pk[:], pk[:]) || + portingAmountFromInst != amount || + receivingTokenIDStr != assetID.String() { + continue + } + + idx = i + break + } + + if idx == -1 { // not found the issuance request tx for this response + return false, fmt.Errorf(fmt.Sprintf("no PortalCustodianWithdrawRequest instruction found for PortalCustodianWithdrawResponse tx %s", tx.Hash().String())) + } + instUsed[idx] = 1 + return true, nil +} diff --git a/metadata/portalexchangerate.go b/metadata/portalexchangerate.go new file mode 100644 index 0000000000..88cbfca787 --- /dev/null +++ b/metadata/portalexchangerate.go @@ -0,0 +1,146 @@ +package metadata + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "reflect" + "strconv" +) + +type PortalExchangeRates struct { + MetadataBase + SenderAddress string + Rates []*ExchangeRateInfo //amount * 10^6 (USDT) +} + +type PortalExchangeRatesAction struct { + Meta PortalExchangeRates + TxReqID common.Hash + LockTime int64 + ShardID byte +} + +type ExchangeRateInfo struct { + PTokenID string + Rate uint64 +} + +type ExchangeRatesRequestStatus struct { + Status byte + SenderAddress string + Rates []*ExchangeRateInfo +} + +func NewExchangeRatesRequestStatus(status byte, senderAddress string, rates []*ExchangeRateInfo) *ExchangeRatesRequestStatus { + return &ExchangeRatesRequestStatus{Status: status, SenderAddress: senderAddress, Rates: rates} +} + +func NewPortalExchangeRates(metaType int, senderAddress string, currency []*ExchangeRateInfo) (*PortalExchangeRates, error) { + metadataBase := MetadataBase{Type: metaType} + + portalExchangeRates := &PortalExchangeRates{ + SenderAddress: senderAddress, + Rates: currency, + } + + portalExchangeRates.MetadataBase = metadataBase + + return portalExchangeRates, nil +} + +type PortalExchangeRatesContent struct { + SenderAddress string + Rates []*ExchangeRateInfo + TxReqID common.Hash + LockTime int64 +} + +func (portalExchangeRates PortalExchangeRates) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + // NOTE: verify supported tokens pair as needed + return true, nil +} + +func (portalExchangeRates PortalExchangeRates) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + if txr.GetType() == common.TxCustomTokenPrivacyType && reflect.TypeOf(txr).String() == "*transaction.Tx" { + return true, true, nil + } + + keyWallet, err := wallet.Base58CheckDeserialize(portalExchangeRates.SenderAddress) + if err != nil { + return false, false, NewMetadataTxError(IssuingRequestNewIssuingRequestFromMapEror, errors.New("SenderAddress incorrect")) + } + + senderAddr := keyWallet.KeySet.PaymentAddress + if len(senderAddr.Pk) == 0 { + return false, false, errors.New("Sender address invalid, sender address must be incognito address") + } + + if !bytes.Equal(txr.GetSigPubKey()[:], senderAddr.Pk[:]) { + return false, false, errors.New("Sender address is not signer tx") + } + + if txr.GetType() != common.TxNormalType { + return false, false, errors.New("Tx exchange rates must be TxNormalType") + } + + + for _, value := range portalExchangeRates.Rates { + if !common.IsPortalExchangeRateToken(value.PTokenID) { + return false, false, errors.New("Public token is not supported currently") + } + + if value.Rate <= 0 { + return false, false, errors.New("Exchange rates should be larger than 0") + } + } + + return true, true, nil +} + +func (portalExchangeRates PortalExchangeRates) ValidateMetadataByItself() bool { + return portalExchangeRates.Type == PortalExchangeRatesMeta +} + +func (portalExchangeRates PortalExchangeRates) Hash() *common.Hash { + record := portalExchangeRates.MetadataBase.Hash().String() + record += portalExchangeRates.SenderAddress + for _, rateInfo := range portalExchangeRates.Rates { + record += rateInfo.PTokenID + record += strconv.FormatUint(rateInfo.Rate, 10) + } + + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (portalExchangeRates *PortalExchangeRates) BuildReqActions(tx Transaction, bcr BlockchainRetriever, shardID byte) ([][]string, error) { + actionContent := PortalExchangeRatesAction{ + Meta: *portalExchangeRates, + TxReqID: *tx.Hash(), + LockTime: tx.GetLockTime(), + ShardID: shardID, + } + + actionContentBytes, err := json.Marshal(actionContent) + if err != nil { + return [][]string{}, err + } + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(PortalExchangeRatesMeta), actionContentBase64Str} + return [][]string{action}, nil +} + +func (portalExchangeRates *PortalExchangeRates) CalculateSize() uint64 { + return calculateSize(portalExchangeRates) +} diff --git a/metadata/portalexpiredwaitingportingreq.go b/metadata/portalexpiredwaitingportingreq.go new file mode 100644 index 0000000000..792e30b11f --- /dev/null +++ b/metadata/portalexpiredwaitingportingreq.go @@ -0,0 +1,79 @@ +package metadata + +import ( + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "strconv" +) + +// PortalRedeemRequest - portal user redeem requests to get public token by burning ptoken +// metadata - redeem request - create normal tx with this metadata +type PortalExpiredWaitingPortingReq struct { + MetadataBase + UniquePortingID string + ExpiredByLiquidation bool +} + +// PortalExpiredWaitingPortingReqContent - Beacon builds a new instruction with this content after detecting custodians run away +// It will be appended to beaconBlock +type PortalExpiredWaitingPortingReqContent struct { + MetadataBase + UniquePortingID string + ExpiredByLiquidation bool + ShardID byte +} + +// PortalExpiredWaitingPortingReqStatus - Beacon tracks status of custodian liquidation into db +type PortalExpiredWaitingPortingReqStatus struct { + Status byte + UniquePortingID string + ShardID byte + ExpiredByLiquidation bool + ExpiredBeaconHeight uint64 +} + +func NewPortalExpiredWaitingPortingReq( + metaType int, + uniquePortingID string, + expiredByLiquidation bool, + ) (*PortalExpiredWaitingPortingReq, error) { + metadataBase := MetadataBase{ + Type: metaType, + } + liquidCustodianMeta := &PortalExpiredWaitingPortingReq{ + UniquePortingID: uniquePortingID, + ExpiredByLiquidation: expiredByLiquidation, + } + liquidCustodianMeta.MetadataBase = metadataBase + return liquidCustodianMeta, nil +} + +func (expiredPortingReq PortalExpiredWaitingPortingReq) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + return true, nil +} + +func (expiredPortingReq PortalExpiredWaitingPortingReq) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + return true, true, nil +} + +func (expiredPortingReq PortalExpiredWaitingPortingReq) ValidateMetadataByItself() bool { + return expiredPortingReq.Type == PortalExpiredWaitingPortingReqMeta +} + +func (expiredPortingReq PortalExpiredWaitingPortingReq) Hash() *common.Hash { + record := expiredPortingReq.MetadataBase.Hash().String() + record += expiredPortingReq.UniquePortingID + record += strconv.FormatBool(expiredPortingReq.ExpiredByLiquidation) + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (expiredPortingReq *PortalExpiredWaitingPortingReq) CalculateSize() uint64 { + return calculateSize(expiredPortingReq) +} diff --git a/metadata/portalliquidatecustodian.go b/metadata/portalliquidatecustodian.go new file mode 100644 index 0000000000..44d2b3bec6 --- /dev/null +++ b/metadata/portalliquidatecustodian.go @@ -0,0 +1,108 @@ +package metadata + +import ( + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "strconv" +) + +// PortalRedeemRequest - portal user redeem requests to get public token by burning ptoken +// metadata - redeem request - create normal tx with this metadata +type PortalLiquidateCustodian struct { + MetadataBase + UniqueRedeemID string + TokenID string // pTokenID in incognito chain + RedeemPubTokenAmount uint64 + MintedCollateralAmount uint64 // minted PRV amount for sending back to users + RedeemerIncAddressStr string + CustodianIncAddressStr string + LiquidatedByExchangeRate bool +} + +// PortalLiquidateCustodianContent - Beacon builds a new instruction with this content after detecting custodians run away +// It will be appended to beaconBlock +type PortalLiquidateCustodianContent struct { + MetadataBase + UniqueRedeemID string + TokenID string // pTokenID in incognito chain + RedeemPubTokenAmount uint64 + MintedCollateralAmount uint64 // minted PRV amount for sending back to users + RedeemerIncAddressStr string + CustodianIncAddressStr string + LiquidatedByExchangeRate bool + ShardID byte +} + +// PortalLiquidateCustodianStatus - Beacon tracks status of custodian liquidation into db +type PortalLiquidateCustodianStatus struct { + Status byte + UniqueRedeemID string + TokenID string // pTokenID in incognito chain + RedeemPubTokenAmount uint64 + MintedCollateralAmount uint64 // minted PRV amount for sending back to users + RedeemerIncAddressStr string + CustodianIncAddressStr string + LiquidatedByExchangeRate bool + ShardID byte + LiquidatedBeaconHeight uint64 +} + +func NewPortalLiquidateCustodian( + metaType int, + uniqueRedeemID string, + tokenID string, + redeemAmount uint64, + mintedCollateralAmount uint64, + redeemerIncAddressStr string, + custodianIncAddressStr string, + liquidatedByExchangeRate bool) (*PortalLiquidateCustodian, error) { + metadataBase := MetadataBase{ + Type: metaType, + } + liquidCustodianMeta := &PortalLiquidateCustodian{ + UniqueRedeemID: uniqueRedeemID, + TokenID: tokenID, + RedeemPubTokenAmount: redeemAmount, + MintedCollateralAmount: mintedCollateralAmount, + RedeemerIncAddressStr: redeemerIncAddressStr, + CustodianIncAddressStr: custodianIncAddressStr, + LiquidatedByExchangeRate: liquidatedByExchangeRate, + } + liquidCustodianMeta.MetadataBase = metadataBase + return liquidCustodianMeta, nil +} + +func (liqCustodian PortalLiquidateCustodian) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + return true, nil +} + +func (liqCustodian PortalLiquidateCustodian) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + return true, true, nil +} + +func (liqCustodian PortalLiquidateCustodian) ValidateMetadataByItself() bool { + return liqCustodian.Type == PortalLiquidateCustodianMeta +} + +func (liqCustodian PortalLiquidateCustodian) Hash() *common.Hash { + record := liqCustodian.MetadataBase.Hash().String() + record += liqCustodian.UniqueRedeemID + record += liqCustodian.TokenID + record += strconv.FormatUint(liqCustodian.RedeemPubTokenAmount, 10) + record += strconv.FormatUint(liqCustodian.MintedCollateralAmount, 10) + record += liqCustodian.RedeemerIncAddressStr + record += liqCustodian.CustodianIncAddressStr + record += strconv.FormatBool(liqCustodian.LiquidatedByExchangeRate) + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (liqCustodian *PortalLiquidateCustodian) CalculateSize() uint64 { + return calculateSize(liqCustodian) +} diff --git a/metadata/portalliquidatecustodianresponse.go b/metadata/portalliquidatecustodianresponse.go new file mode 100644 index 0000000000..9f17a55375 --- /dev/null +++ b/metadata/portalliquidatecustodianresponse.go @@ -0,0 +1,152 @@ +package metadata + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +type PortalLiquidateCustodianResponse struct { + MetadataBase + UniqueRedeemID string + MintedCollateralAmount uint64 // minted PRV amount for sending back to users + RedeemerIncAddressStr string + CustodianIncAddressStr string +} + +func NewPortalLiquidateCustodianResponse( + uniqueRedeemID string, + mintedAmount uint64, + redeemerIncAddressStr string, + custodianIncAddressStr string, + metaType int, +) *PortalLiquidateCustodianResponse { + metadataBase := MetadataBase{ + Type: metaType, + } + return &PortalLiquidateCustodianResponse{ + MetadataBase: metadataBase, + UniqueRedeemID: uniqueRedeemID, + MintedCollateralAmount: mintedAmount, + RedeemerIncAddressStr: redeemerIncAddressStr, + CustodianIncAddressStr: custodianIncAddressStr, + } +} + +func (iRes PortalLiquidateCustodianResponse) CheckTransactionFee(tr Transaction, minFee uint64, beaconHeight int64, db *statedb.StateDB) bool { + // no need to have fee for this tx + return true +} + +func (iRes PortalLiquidateCustodianResponse) ValidateTxWithBlockChain(txr Transaction, bcr BlockchainRetriever, shardID byte, db *statedb.StateDB) (bool, error) { + // no need to validate tx with blockchain, just need to validate with requested tx (via RequestedTxID) + return false, nil +} + +func (iRes PortalLiquidateCustodianResponse) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + return false, true, nil +} + +func (iRes PortalLiquidateCustodianResponse) ValidateMetadataByItself() bool { + // The validation just need to check at tx level, so returning true here + return iRes.Type == PortalLiquidateCustodianResponseMeta +} + +func (iRes PortalLiquidateCustodianResponse) Hash() *common.Hash { + record := iRes.UniqueRedeemID + record += strconv.FormatUint(iRes.MintedCollateralAmount, 10) + record += iRes.RedeemerIncAddressStr + record += iRes.CustodianIncAddressStr + record += iRes.MetadataBase.Hash().String() + + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (iRes *PortalLiquidateCustodianResponse) CalculateSize() uint64 { + return calculateSize(iRes) +} + +func (iRes PortalLiquidateCustodianResponse) VerifyMinerCreatedTxBeforeGettingInBlock( + txsInBlock []Transaction, + txsUsed []int, + insts [][]string, + instUsed []int, + shardID byte, + tx Transaction, + bcr BlockchainRetriever, + ac *AccumulatedValues, +) (bool, error) { + idx := -1 + for i, inst := range insts { + if len(inst) < 4 { // this is not PortalLiquidateCustodian response instruction + continue + } + instMetaType := inst[0] + if instUsed[i] > 0 || + instMetaType != strconv.Itoa(PortalLiquidateCustodianMeta) { + continue + } + + status := inst[2] + if status != common.PortalLiquidateCustodianSuccessChainStatus { + continue + } + + var shardIDFromInst byte + var custodianAddrStrFromInst string + var redeemerIncAddressStrFromInst string + var mintedCollateralAmountFromInst uint64 + + contentBytes := []byte(inst[3]) + var liqCustodianContent PortalLiquidateCustodianContent + err := json.Unmarshal(contentBytes, &liqCustodianContent) + if err != nil { + Logger.log.Error("WARNING - VALIDATION: an error occured while parsing portal liquidation custodian content: %v", err) + continue + } + + custodianAddrStrFromInst = liqCustodianContent.CustodianIncAddressStr + redeemerIncAddressStrFromInst = liqCustodianContent.RedeemerIncAddressStr + mintedCollateralAmountFromInst = liqCustodianContent.MintedCollateralAmount + shardIDFromInst = liqCustodianContent.ShardID + + if shardIDFromInst != shardID { + Logger.log.Error("WARNING - VALIDATION: shardID is incorrect: shardIDFromInst %v - shardID %v ", shardIDFromInst, shardID) + continue + } + + _, err = wallet.Base58CheckDeserialize(custodianAddrStrFromInst) + if err != nil { + Logger.log.Info("WARNING - VALIDATION: an error occured while deserializing custodian address string: ", err) + continue + } + + redeemerKey, err := wallet.Base58CheckDeserialize(redeemerIncAddressStrFromInst) + if err != nil { + Logger.log.Info("WARNING - VALIDATION: an error occured while deserializing redeemer address string: ", err) + continue + } + + // collateral must be PRV + PRVIDStr := common.PRVCoinID.String() + _, pk, paidAmount, assetID := tx.GetTransferData() + if !bytes.Equal(redeemerKey.KeySet.PaymentAddress.Pk[:], pk[:]) || + mintedCollateralAmountFromInst != paidAmount || + PRVIDStr != assetID.String() { + continue + } + idx = i + break + } + if idx == -1 { // not found the issuance request tx for this response + return false, fmt.Errorf(fmt.Sprintf("no PortalLiquidateCustodian instruction found for PortalLiquidateCustodianResponse tx %s", tx.Hash().String())) + } + instUsed[idx] = 1 + return true, nil +} diff --git a/metadata/portalliquidatetpexchangerates.go b/metadata/portalliquidatetpexchangerates.go new file mode 100644 index 0000000000..c8f0453686 --- /dev/null +++ b/metadata/portalliquidatetpexchangerates.go @@ -0,0 +1,26 @@ +package metadata + +type PortalLiquidateTopPercentileExchangeRatesContent struct { + CustodianAddress string + Status string + MetaType int + TP map[string]LiquidateTopPercentileExchangeRatesDetail +} + +type LiquidateTopPercentileExchangeRatesDetail struct { + TPKey int + TPValue uint64 + HoldAmountFreeCollateral uint64 + HoldAmountPubToken uint64 +} + +type LiquidateTopPercentileExchangeRatesStatus struct { + CustodianAddress string + Status byte + Rates map[string]LiquidateTopPercentileExchangeRatesDetail //ptoken | detail +} + +func NewLiquidateTopPercentileExchangeRatesStatus(custodianAddress string, status byte, rates map[string]LiquidateTopPercentileExchangeRatesDetail) *LiquidateTopPercentileExchangeRatesStatus { + return &LiquidateTopPercentileExchangeRatesStatus{CustodianAddress: custodianAddress, Status: status, Rates: rates} +} + diff --git a/metadata/portalliquidationcustodiandeposit.go b/metadata/portalliquidationcustodiandeposit.go new file mode 100644 index 0000000000..7c700212fc --- /dev/null +++ b/metadata/portalliquidationcustodiandeposit.go @@ -0,0 +1,150 @@ +package metadata + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "reflect" + "strconv" +) + +type PortalLiquidationCustodianDeposit struct { + MetadataBase + IncogAddressStr string + PTokenId string + DepositedAmount uint64 + FreeCollateralSelected bool +} + +type PortalLiquidationCustodianDepositAction struct { + Meta PortalLiquidationCustodianDeposit + TxReqID common.Hash + ShardID byte +} + +type PortalLiquidationCustodianDepositContent struct { + IncogAddressStr string + PTokenId string + DepositedAmount uint64 + FreeCollateralSelected bool + TxReqID common.Hash + ShardID byte +} + +type LiquidationCustodianDepositStatus struct { + TxReqID common.Hash + IncogAddressStr string + PTokenId string + DepositAmount uint64 + FreeCollateralSelected bool + Status byte +} + +func NewLiquidationCustodianDepositStatus(txReqID common.Hash, incogAddressStr string, PTokenId string, depositAmount uint64, freeCollateralSelected bool, status byte) *LiquidationCustodianDepositStatus { + return &LiquidationCustodianDepositStatus{TxReqID: txReqID, IncogAddressStr: incogAddressStr, PTokenId: PTokenId, DepositAmount: depositAmount, FreeCollateralSelected: freeCollateralSelected, Status: status} +} + +func NewPortalLiquidationCustodianDeposit(metaType int, incognitoAddrStr string, pToken string, amount uint64, freeCollateralSelected bool) (*PortalLiquidationCustodianDeposit , error) { + metadataBase := MetadataBase{ + Type: metaType, + } + custodianDepositMeta := &PortalLiquidationCustodianDeposit { + IncogAddressStr: incognitoAddrStr, + PTokenId: pToken, + DepositedAmount: amount, + FreeCollateralSelected: freeCollateralSelected, + } + custodianDepositMeta.MetadataBase = metadataBase + return custodianDepositMeta, nil +} + +func (custodianDeposit PortalLiquidationCustodianDeposit) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + return true, nil +} + +func (custodianDeposit PortalLiquidationCustodianDeposit) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + // Note: the metadata was already verified with *transaction.TxCustomToken level so no need to verify with *transaction.Tx level again as *transaction.Tx is embedding property of *transaction.TxCustomToken + if txr.GetType() == common.TxCustomTokenPrivacyType && reflect.TypeOf(txr).String() == "*transaction.Tx" { + return true, true, nil + } + + // validate IncogAddressStr + keyWallet, err := wallet.Base58CheckDeserialize(custodianDeposit.IncogAddressStr) + if err != nil { + return false, false, errors.New("IncogAddressStr of custodian incorrect") + } + incogAddr := keyWallet.KeySet.PaymentAddress + if len(incogAddr.Pk) == 0 { + return false, false, errors.New("wrong custodian incognito address") + } + if !bytes.Equal(txr.GetSigPubKey()[:], incogAddr.Pk[:]) { + return false, false, errors.New("custodian incognito address is not signer tx") + } + + // check tx type + if txr.GetType() != common.TxNormalType { + return false, false, errors.New("tx custodian deposit must be TxNormalType") + } + + // check burning tx + if !txr.IsCoinsBurning(bcr, beaconHeight) { + return false, false, errors.New("must send coin to burning address") + } + + // validate amount deposit + if custodianDeposit.DepositedAmount == 0 { + return false, false, errors.New("deposit amount should be larger than 0") + } + if custodianDeposit.DepositedAmount != txr.CalculateTxValue() { + return false, false, errors.New("deposit amount should be equal to the tx value") + } + + if !common.IsPortalToken(custodianDeposit.PTokenId) { + return false, false, errors.New("TokenID in remote address is invalid") + } + + return true, true, nil +} + +func (custodianDeposit PortalLiquidationCustodianDeposit) ValidateMetadataByItself() bool { + return custodianDeposit.Type == PortalLiquidationCustodianDepositMeta +} + +func (custodianDeposit PortalLiquidationCustodianDeposit) Hash() *common.Hash { + record := custodianDeposit.MetadataBase.Hash().String() + record += custodianDeposit.IncogAddressStr + record += custodianDeposit.PTokenId + record += strconv.FormatUint(custodianDeposit.DepositedAmount, 10) + record += strconv.FormatBool(custodianDeposit.FreeCollateralSelected) + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (custodianDeposit *PortalLiquidationCustodianDeposit) BuildReqActions(tx Transaction, bcr BlockchainRetriever, shardID byte) ([][]string, error) { + actionContent := PortalLiquidationCustodianDepositAction{ + Meta: *custodianDeposit, + TxReqID: *tx.Hash(), + ShardID: shardID, + } + actionContentBytes, err := json.Marshal(actionContent) + if err != nil { + return [][]string{}, err + } + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(PortalLiquidationCustodianDepositMeta), actionContentBase64Str} + return [][]string{action}, nil +} + +func (custodianDeposit *PortalLiquidationCustodianDeposit) CalculateSize() uint64 { + return calculateSize(custodianDeposit) +} diff --git a/metadata/portalliquidationcustodiandepositresponse.go b/metadata/portalliquidationcustodiandepositresponse.go new file mode 100644 index 0000000000..416605d33b --- /dev/null +++ b/metadata/portalliquidationcustodiandepositresponse.go @@ -0,0 +1,145 @@ +package metadata + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +type PortalLiquidationCustodianDepositResponse struct { + MetadataBase + DepositStatus string + ReqTxID common.Hash + CustodianAddrStr string + DepositedAmount uint64 +} + +func NewPortalLiquidationCustodianDepositResponse( + depositStatus string, + reqTxID common.Hash, + custodianAddressStr string, + depositedAmount uint64, + metaType int, +) *PortalLiquidationCustodianDepositResponse { + metadataBase := MetadataBase{ + Type: metaType, + } + + return &PortalLiquidationCustodianDepositResponse{ + DepositStatus: depositStatus, + ReqTxID: reqTxID, + MetadataBase: metadataBase, + CustodianAddrStr: custodianAddressStr, + DepositedAmount: depositedAmount, + } +} + +func (iRes PortalLiquidationCustodianDepositResponse) CheckTransactionFee(tr Transaction, minFee uint64, beaconHeight int64, db *statedb.StateDB) bool { + // no need to have fee for this tx + return true +} + +func (iRes PortalLiquidationCustodianDepositResponse) ValidateTxWithBlockChain(txr Transaction, bcr BlockchainRetriever, shardID byte, db *statedb.StateDB) (bool, error) { + // no need to validate tx with blockchain, just need to validate with requested tx (via RequestedTxID) + return false, nil +} + +func (iRes PortalLiquidationCustodianDepositResponse) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + return false, true, nil +} + +func (iRes PortalLiquidationCustodianDepositResponse) ValidateMetadataByItself() bool { + // The validation just need to check at tx level, so returning true here + return iRes.Type == PortalLiquidationCustodianDepositResponseMeta +} + +func (iRes PortalLiquidationCustodianDepositResponse) Hash() *common.Hash { + record := iRes.DepositStatus + record += strconv.FormatUint(iRes.DepositedAmount, 10) + record += iRes.ReqTxID.String() + record += iRes.MetadataBase.Hash().String() + + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (iRes *PortalLiquidationCustodianDepositResponse) CalculateSize() uint64 { + return calculateSize(iRes) +} + +func (iRes PortalLiquidationCustodianDepositResponse) VerifyMinerCreatedTxBeforeGettingInBlock( + txsInBlock []Transaction, + txsUsed []int, + insts [][]string, + instUsed []int, + shardID byte, + tx Transaction, + bcr BlockchainRetriever, + ac *AccumulatedValues, +) (bool, error) { + idx := -1 + for i, inst := range insts { + if len(inst) < 4 { // this is not PortalCustodianDeposit response instruction + continue + } + instMetaType := inst[0] + if instUsed[i] > 0 || + instMetaType != strconv.Itoa(PortalLiquidationCustodianDepositMeta) { + continue + } + instDepositStatus := inst[2] + if instDepositStatus != iRes.DepositStatus || + (instDepositStatus != common.PortalLiquidationCustodianDepositRejectedChainStatus) { + continue + } + + var shardIDFromInst byte + var txReqIDFromInst common.Hash + var custodianAddrStrFromInst string + var depositedAmountFromInst uint64 + + contentBytes := []byte(inst[3]) + var custodianDepositContent PortalLiquidationCustodianDepositContent + err := json.Unmarshal(contentBytes, &custodianDepositContent) + if err != nil { + Logger.log.Error("WARNING - VALIDATION: an error occured while parsing portal liquidation custodian deposit content: ", err) + continue + } + shardIDFromInst = custodianDepositContent.ShardID + txReqIDFromInst = custodianDepositContent.TxReqID + custodianAddrStrFromInst = custodianDepositContent.IncogAddressStr + depositedAmountFromInst = custodianDepositContent.DepositedAmount + + if !bytes.Equal(iRes.ReqTxID[:], txReqIDFromInst[:]) || + shardID != shardIDFromInst { + continue + } + key, err := wallet.Base58CheckDeserialize(custodianAddrStrFromInst) + if err != nil { + Logger.log.Info("WARNING - VALIDATION: an error occurred while deserializing custodian address string: ", err) + continue + } + + // collateral must be PRV + PRVIDStr := common.PRVCoinID.String() + _, pk, paidAmount, assetID := tx.GetTransferData() + if !bytes.Equal(key.KeySet.PaymentAddress.Pk[:], pk[:]) || + depositedAmountFromInst != paidAmount || + PRVIDStr != assetID.String() { + continue + } + idx = i + break + } + + if idx == -1 { // not found the issuance request tx for this response + return false, fmt.Errorf(fmt.Sprintf("no PortalLiquidationCustodianDeposit instruction found for PortalLiquidationCustodianDepositResponse tx %s", tx.Hash().String())) + } + instUsed[idx] = 1 + return true, nil +} \ No newline at end of file diff --git a/metadata/portalportingrequest.go b/metadata/portalportingrequest.go new file mode 100644 index 0000000000..7017833c6f --- /dev/null +++ b/metadata/portalportingrequest.go @@ -0,0 +1,173 @@ +package metadata + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "reflect" + "strconv" +) + +// PortalUserRegister - User register porting public tokens +type PortalUserRegister struct { + MetadataBase + UniqueRegisterId string // + IncogAddressStr string + PTokenId string + RegisterAmount uint64 + PortingFee uint64 +} + +type PortalUserRegisterAction struct { + Meta PortalUserRegister + TxReqID common.Hash + ShardID byte +} + +type PortalPortingRequestContent struct { + UniqueRegisterId string + IncogAddressStr string + PTokenId string + RegisterAmount uint64 + PortingFee uint64 + Custodian []*statedb.MatchingPortingCustodianDetail + TxReqID common.Hash +} + +type PortingRequestStatus struct { + UniquePortingID string + TxReqID common.Hash + TokenID string + PorterAddress string + Amount uint64 + Custodians []*statedb.MatchingPortingCustodianDetail + PortingFee uint64 + Status int + BeaconHeight uint64 +} + +func NewPortingRequestStatus(uniquePortingID string, txReqID common.Hash, tokenID string, porterAddress string, amount uint64, custodians []*statedb.MatchingPortingCustodianDetail, portingFee uint64, status int, beaconHeight uint64) *PortingRequestStatus { + return &PortingRequestStatus{UniquePortingID: uniquePortingID, TxReqID: txReqID, TokenID: tokenID, PorterAddress: porterAddress, Amount: amount, Custodians: custodians, PortingFee: portingFee, Status: status, BeaconHeight: beaconHeight} +} + +func NewPortalUserRegister(uniqueRegisterId string, incogAddressStr string, pTokenId string, registerAmount uint64, portingFee uint64, metaType int) (*PortalUserRegister, error) { + metadataBase := MetadataBase{ + Type: metaType, + } + + portalUserRegisterMeta := &PortalUserRegister{ + UniqueRegisterId: uniqueRegisterId, + IncogAddressStr: incogAddressStr, + PTokenId: pTokenId, + RegisterAmount: registerAmount, + PortingFee: portingFee, + } + + portalUserRegisterMeta.MetadataBase = metadataBase + + return portalUserRegisterMeta, nil +} + +func (portalUserRegister PortalUserRegister) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + // NOTE: verify supported tokens pair as needed + return true, nil +} + +func (portalUserRegister PortalUserRegister) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + if txr.GetType() == common.TxCustomTokenPrivacyType && reflect.TypeOf(txr).String() == "*transaction.Tx" { + return true, true, nil + } + + if len(portalUserRegister.IncogAddressStr) <= 0 { + return false, false, errors.New("IncogAddressStr should be not empty") + } + + // validate IncogAddressStr + keyWallet, err := wallet.Base58CheckDeserialize(portalUserRegister.IncogAddressStr) + if err != nil { + return false, false, NewMetadataTxError(IssuingRequestNewIssuingRequestFromMapEror, errors.New("ContributorAddressStr incorrect")) + } + + incogAddr := keyWallet.KeySet.PaymentAddress + if len(incogAddr.Pk) == 0 { + return false, false, errors.New("wrong custodian incognito address") + } + if !bytes.Equal(txr.GetSigPubKey()[:], incogAddr.Pk[:]) { + return false, false, errors.New("custodian incognito address is not signer tx") + } + + // check tx type + if txr.GetType() != common.TxNormalType { + return false, false, errors.New("tx custodian deposit must be TxNormalType") + } + + // check burning tx + if !txr.IsCoinsBurning(bcr, beaconHeight) { + return false, false, errors.New("must send coin to burning address") + } + + if len(portalUserRegister.UniqueRegisterId) <= 0 { + return false, false, errors.New("UniqueRegisterId should be not empty") + } + + // validate amount register + if portalUserRegister.RegisterAmount == 0 { + return false, false, errors.New("register amount should be larger than 0") + } + + //validation porting fee + if portalUserRegister.PortingFee == 0 { + return false, false, errors.New("porting fee should be larger than 0") + } + + if (portalUserRegister.PortingFee) != txr.CalculateTxValue() { + return false, false, errors.New("Total of register amount and porting fee should be equal to the tx value") + } + + return true, true, nil +} + +func (portalUserRegister PortalUserRegister) ValidateMetadataByItself() bool { + return portalUserRegister.Type == PortalUserRegisterMeta +} + +func (portalUserRegister PortalUserRegister) Hash() *common.Hash { + record := portalUserRegister.MetadataBase.Hash().String() + record += portalUserRegister.UniqueRegisterId + record += portalUserRegister.PTokenId + record += portalUserRegister.IncogAddressStr + record += strconv.FormatUint(portalUserRegister.RegisterAmount, 10) + record += strconv.FormatUint(portalUserRegister.PortingFee, 10) + + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (portalUserRegister *PortalUserRegister) BuildReqActions(tx Transaction, bcr BlockchainRetriever, shardID byte) ([][]string, error) { + actionContent := PortalUserRegisterAction{ + Meta: *portalUserRegister, + TxReqID: *tx.Hash(), + ShardID: shardID, + } + actionContentBytes, err := json.Marshal(actionContent) + if err != nil { + return [][]string{}, err + } + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(PortalUserRegisterMeta), actionContentBase64Str} + return [][]string{action}, nil +} + +func (portalUserRegister *PortalUserRegister) CalculateSize() uint64 { + return calculateSize(portalUserRegister) +} diff --git a/metadata/portalredeemliquidateexchangerates.go b/metadata/portalredeemliquidateexchangerates.go new file mode 100644 index 0000000000..923262b682 --- /dev/null +++ b/metadata/portalredeemliquidateexchangerates.go @@ -0,0 +1,191 @@ +package metadata + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "reflect" + "strconv" +) + +type PortalRedeemLiquidateExchangeRates struct { + MetadataBase + TokenID string // pTokenID in incognito chain + RedeemAmount uint64 + RedeemerIncAddressStr string + RemoteAddress string // btc/bnb/etc address + RedeemFee uint64 // redeem fee in PRV, 0.01% redeemAmount in PRV +} + +type PortalRedeemLiquidateExchangeRatesAction struct { + Meta PortalRedeemLiquidateExchangeRates + TxReqID common.Hash + ShardID byte +} + +type PortalRedeemLiquidateExchangeRatesContent struct { + TokenID string // pTokenID in incognito chain + RedeemAmount uint64 + RedeemerIncAddressStr string + RemoteAddress string // btc/bnb/etc address + RedeemFee uint64 // redeem fee in PRV, 0.01% redeemAmount in PRV + TxReqID common.Hash + ShardID byte + TotalPTokenReceived uint64 +} + +type RedeemLiquidateExchangeRatesStatus struct { + TxReqID common.Hash + TokenID string + RedeemerAddress string + RedeemerRemoteAddress string + RedeemAmount uint64 + RedeemFee uint64 + Status byte + TotalPTokenReceived uint64 +} + +func NewRedeemLiquidateExchangeRatesStatus(txReqID common.Hash, tokenID string, redeemerAddress string, redeemerRemoteAddress string, redeemAmount uint64, redeemFee uint64, status byte, totalPTokenReceived uint64) *RedeemLiquidateExchangeRatesStatus { + return &RedeemLiquidateExchangeRatesStatus{TxReqID: txReqID, TokenID: tokenID, RedeemerAddress: redeemerAddress, RedeemerRemoteAddress: redeemerRemoteAddress, RedeemAmount: redeemAmount, RedeemFee: redeemFee, Status: status, TotalPTokenReceived: totalPTokenReceived} +} + +func NewPortalRedeemLiquidateExchangeRates( + metaType int, + tokenID string, + redeemAmount uint64, + incAddressStr string, + remoteAddr string, + redeemFee uint64, +) (*PortalRedeemLiquidateExchangeRates, error) { + metadataBase := MetadataBase{Type: metaType} + + portalRedeemLiquidateExchangeRates := &PortalRedeemLiquidateExchangeRates { + TokenID: tokenID, + RedeemAmount: redeemAmount, + RedeemerIncAddressStr: incAddressStr, + RemoteAddress: remoteAddr, + RedeemFee: redeemFee, + } + + portalRedeemLiquidateExchangeRates.MetadataBase = metadataBase + + return portalRedeemLiquidateExchangeRates, nil +} + +func (redeemReq PortalRedeemLiquidateExchangeRates) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + return true, nil +} + +func (redeemReq PortalRedeemLiquidateExchangeRates) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + // Note: the metadata was already verified with *transaction.TxCustomToken level so no need to verify with *transaction.Tx level again as *transaction.Tx is embedding property of *transaction.TxCustomToken + if txr.GetType() == common.TxCustomTokenPrivacyType && reflect.TypeOf(txr).String() == "*transaction.Tx" { + if !txr.IsCoinsBurning(bcr, beaconHeight) { + return false, false, errors.New("txnormal in tx redeem request must be coin burning tx") + } + // validate value transfer of tx for redeem fee in prv + if redeemReq.RedeemFee != txr.CalculateTxValue() { + return false, false, errors.New("redeem fee amount should be equal to the tx value") + } + return true, true, nil + } + + // validate RedeemerIncAddressStr + keyWallet, err := wallet.Base58CheckDeserialize(redeemReq.RedeemerIncAddressStr) + if err != nil { + return false, false, NewMetadataTxError(PortalRedeemLiquidateExchangeRatesParamError, errors.New("Address incognito redeem is invalid")) + } + + + incAddr := keyWallet.KeySet.PaymentAddress + if len(incAddr.Pk) == 0 { + return false, false, NewMetadataTxError(PortalRedeemLiquidateExchangeRatesParamError, errors.New("Payment incognito address is invalid")) + } + if !bytes.Equal(txr.GetSigPubKey()[:], incAddr.Pk[:]) { + return false, false, NewMetadataTxError(PortalRedeemLiquidateExchangeRatesParamError, errors.New("Address incognito redeem is not signer")) + } + + // check tx type + if txr.GetType() != common.TxCustomTokenPrivacyType { + return false, false, errors.New("tx redeem request must be TxCustomTokenPrivacyType") + } + + if !txr.IsCoinsBurning(bcr, beaconHeight) { + return false, false, errors.New("txprivacytoken in tx redeem request must be coin burning tx") + } + + // validate redeem amount + if redeemReq.RedeemAmount <= 0 { + return false, false, errors.New("redeem amount should be larger than 0") + } + + // validate redeem fee + if redeemReq.RedeemFee <= 0 { + return false, false, errors.New("redeem fee should be larger than 0") + } + + // validate value transfer of tx for redeem amount in ptoken + if redeemReq.RedeemAmount != txr.CalculateTxValue() { + return false, false, errors.New("redeem amount should be equal to the tx value") + } + + // validate tokenID + if redeemReq.TokenID != txr.GetTokenID().String() { + return false, false, NewMetadataTxError(PortalRedeemLiquidateExchangeRatesParamError, errors.New("TokenID in metadata is not matched to tokenID in tx")) + } + // check tokenId is portal token or not + if !common.IsPortalToken(redeemReq.TokenID) { + return false, false, NewMetadataTxError(PortalRedeemLiquidateExchangeRatesParamError, errors.New("TokenID is not in portal tokens list")) + } + + //validate RemoteAddress + // todo: + if len(redeemReq.RemoteAddress) == 0 { + return false, false, NewMetadataTxError(PortalRedeemLiquidateExchangeRatesParamError, errors.New("Remote address is invalid")) + } + + return true, true, nil +} + +func (redeemReq PortalRedeemLiquidateExchangeRates) ValidateMetadataByItself() bool { + return redeemReq.Type == PortalRedeemLiquidateExchangeRatesMeta +} + +func (redeemReq PortalRedeemLiquidateExchangeRates) Hash() *common.Hash { + record := redeemReq.MetadataBase.Hash().String() + record += redeemReq.TokenID + record += strconv.FormatUint(redeemReq.RedeemAmount, 10) + record += strconv.FormatUint(redeemReq.RedeemFee, 10) + record += redeemReq.RedeemerIncAddressStr + record += redeemReq.RemoteAddress + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (redeemReq *PortalRedeemLiquidateExchangeRates) BuildReqActions(tx Transaction, bcr BlockchainRetriever, shardID byte) ([][]string, error) { + actionContent := PortalRedeemLiquidateExchangeRatesAction{ + Meta: *redeemReq, + TxReqID: *tx.Hash(), + ShardID: shardID, + } + actionContentBytes, err := json.Marshal(actionContent) + if err != nil { + return [][]string{}, err + } + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(PortalRedeemLiquidateExchangeRatesMeta), actionContentBase64Str} + return [][]string{action}, nil +} + +func (redeemReq *PortalRedeemLiquidateExchangeRates) CalculateSize() uint64 { + return calculateSize(redeemReq) +} diff --git a/metadata/portalredeemliquidateexchangeratesresponse.go b/metadata/portalredeemliquidateexchangeratesresponse.go new file mode 100644 index 0000000000..3e632fd2ce --- /dev/null +++ b/metadata/portalredeemliquidateexchangeratesresponse.go @@ -0,0 +1,174 @@ +package metadata + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +type PortalRedeemLiquidateExchangeRatesResponse struct { + MetadataBase + RequestStatus string + ReqTxID common.Hash + RequesterAddrStr string + RedeemAmount uint64 + Amount uint64 + TokenID string +} + +func NewPortalRedeemLiquidateExchangeRatesResponse( + requestStatus string, + reqTxID common.Hash, + requesterAddressStr string, + redeemAmount uint64, + amount uint64, + tokenID string, + metaType int, +) *PortalRedeemLiquidateExchangeRatesResponse { + metadataBase := MetadataBase{ + Type: metaType, + } + return &PortalRedeemLiquidateExchangeRatesResponse{ + RequestStatus: requestStatus, + ReqTxID: reqTxID, + MetadataBase: metadataBase, + RequesterAddrStr: requesterAddressStr, + RedeemAmount: redeemAmount, + Amount: amount, + TokenID: tokenID, + } +} + +func (iRes PortalRedeemLiquidateExchangeRatesResponse) CheckTransactionFee(tr Transaction, minFee uint64, beaconHeight int64, db *statedb.StateDB) bool { + // no need to have fee for this tx + return true +} + +func (iRes PortalRedeemLiquidateExchangeRatesResponse) ValidateTxWithBlockChain(txr Transaction, bcr BlockchainRetriever, shardID byte, db *statedb.StateDB) (bool, error) { + // no need to validate tx with blockchain, just need to validate with requested tx (via RequestedTxID) + return false, nil +} + +func (iRes PortalRedeemLiquidateExchangeRatesResponse) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + return false, true, nil +} + +func (iRes PortalRedeemLiquidateExchangeRatesResponse) ValidateMetadataByItself() bool { + // The validation just need to check at tx level, so returning true here + return iRes.Type == PortalRedeemLiquidateExchangeRatesResponseMeta +} + +func (iRes PortalRedeemLiquidateExchangeRatesResponse) Hash() *common.Hash { + record := iRes.MetadataBase.Hash().String() + record += iRes.RequestStatus + record += iRes.ReqTxID.String() + record += iRes.RequesterAddrStr + record += strconv.FormatUint(iRes.RedeemAmount, 10) + record += strconv.FormatUint(iRes.Amount, 10) + record += iRes.TokenID + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (iRes *PortalRedeemLiquidateExchangeRatesResponse) CalculateSize() uint64 { + return calculateSize(iRes) +} + +func (iRes PortalRedeemLiquidateExchangeRatesResponse) VerifyMinerCreatedTxBeforeGettingInBlock( + txsInBlock []Transaction, + txsUsed []int, + insts [][]string, + instUsed []int, + shardID byte, + tx Transaction, + bcr BlockchainRetriever, + ac *AccumulatedValues, +) (bool, error) { + idx := -1 + for i, inst := range insts { + if len(inst) < 4 { // this is not PortalRedeemRequest response instruction + continue + } + instMetaType := inst[0] + if instUsed[i] > 0 || + instMetaType != strconv.Itoa(PortalRedeemLiquidateExchangeRatesMeta) { + continue + } + instReqStatus := inst[2] + if instReqStatus != iRes.RequestStatus || + (instReqStatus != common.PortalRedeemLiquidateExchangeRatesSuccessChainStatus) { + Logger.log.Error("WARNING - VALIDATION: status is not exactly, status %v", instReqStatus) + continue + } + + var shardIDFromInst byte + var txReqIDFromInst common.Hash + var requesterAddrStrFromInst string + var redeemAmountFromInst uint64 + var totalPTokenReceived uint64 + //var tokenIDStrFromInst string + + contentBytes := []byte(inst[3]) + var redeemReqContent PortalRedeemLiquidateExchangeRatesContent + err := json.Unmarshal(contentBytes, &redeemReqContent) + if err != nil { + Logger.log.Error("WARNING - VALIDATION: an error occurred while parsing portal redeem liquidate exchange rates content: ", err) + continue + } + + shardIDFromInst = redeemReqContent.ShardID + txReqIDFromInst = redeemReqContent.TxReqID + requesterAddrStrFromInst = redeemReqContent.RedeemerIncAddressStr + redeemAmountFromInst = redeemReqContent.RedeemAmount + totalPTokenReceived = redeemReqContent.TotalPTokenReceived + //tokenIDStrFromInst = redeemReqContent.TokenID + + if !bytes.Equal(iRes.ReqTxID[:], txReqIDFromInst[:]) || + shardID != shardIDFromInst { + continue + } + + if requesterAddrStrFromInst != iRes.RequesterAddrStr { + Logger.log.Errorf("Error - VALIDATION: Requester address %v is not matching to Requester address in instruction %v", iRes.RequesterAddrStr, requesterAddrStrFromInst) + continue + } + + if totalPTokenReceived != iRes.Amount { + Logger.log.Errorf("Error - VALIDATION: totalPTokenReceived %v is not matching to TotalPTokenReceived in instruction %v", iRes.Amount, redeemAmountFromInst) + continue + } + + if redeemAmountFromInst != iRes.RedeemAmount { + Logger.log.Errorf("Error - VALIDATION: Redeem amount %v is not matching to redeem amount in instruction %v", iRes.RedeemAmount, redeemAmountFromInst) + continue + } + + key, err := wallet.Base58CheckDeserialize(requesterAddrStrFromInst) + if err != nil { + Logger.log.Info("WARNING - VALIDATION: an error occurred while deserializing requester address string: ", err) + continue + } + + PRVIDStr := common.PRVCoinID.String() + _, pk, paidAmount, assetID := tx.GetTransferData() + if !bytes.Equal(key.KeySet.PaymentAddress.Pk[:], pk[:]) || + totalPTokenReceived != paidAmount || + PRVIDStr != assetID.String() { + continue + } + idx = i + break + } + + if idx == -1 { // not found the issuance request tx for this response + return false, fmt.Errorf(fmt.Sprintf("no PortalRedeemLiquidateExchangeRates instruction found for PortalRedeemLiquidateExchangeRatesResponse tx %s", tx.Hash().String())) + } + + instUsed[idx] = 1 + return true, nil +} diff --git a/metadata/portalredeemrequest.go b/metadata/portalredeemrequest.go new file mode 100644 index 0000000000..fd342885da --- /dev/null +++ b/metadata/portalredeemrequest.go @@ -0,0 +1,197 @@ +package metadata + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "reflect" + "strconv" +) + +// PortalRedeemRequest - portal user redeem requests to get public token by burning ptoken +// metadata - redeem request - create normal tx with this metadata +type PortalRedeemRequest struct { + MetadataBase + UniqueRedeemID string + TokenID string // pTokenID in incognito chain + RedeemAmount uint64 + RedeemerIncAddressStr string + RemoteAddress string // btc/bnb/etc address + RedeemFee uint64 // redeem fee in PRV, 0.01% redeemAmount in PRV +} + +// PortalRedeemRequestAction - shard validator creates instruction that contain this action content +// it will be append to ShardToBeaconBlock +type PortalRedeemRequestAction struct { + Meta PortalRedeemRequest + TxReqID common.Hash + ShardID byte +} + +// PortalRedeemRequestContent - Beacon builds a new instruction with this content after receiving a instruction from shard +// It will be appended to beaconBlock +// both accepted and rejected status +type PortalRedeemRequestContent struct { + UniqueRedeemID string + TokenID string // pTokenID in incognito chain + RedeemAmount uint64 + RedeemerIncAddressStr string + RemoteAddress string // btc/bnb/etc address + RedeemFee uint64 // redeem fee in PRV, 0.01% redeemAmount in PRV + MatchingCustodianDetail []*statedb.MatchingRedeemCustodianDetail // key: incAddressCustodian + TxReqID common.Hash + ShardID byte +} + +// PortalRedeemRequestStatus - Beacon tracks status of redeem request into db +type PortalRedeemRequestStatus struct { + Status byte + UniqueRedeemID string + TokenID string // pTokenID in incognito chain + RedeemAmount uint64 + RedeemerIncAddressStr string + RemoteAddress string // btc/bnb/etc address + RedeemFee uint64 // redeem fee in PRV, 0.01% redeemAmount in PRV + MatchingCustodianDetail []*statedb.MatchingRedeemCustodianDetail // key: incAddressCustodian + TxReqID common.Hash +} + +func NewPortalRedeemRequest( + metaType int, + uniqueRedeemID string, + tokenID string, + redeemAmount uint64, + incAddressStr string, + remoteAddr string, + redeemFee uint64) (*PortalRedeemRequest, error) { + metadataBase := MetadataBase{ + Type: metaType, + } + requestPTokenMeta := &PortalRedeemRequest{ + UniqueRedeemID: uniqueRedeemID, + TokenID: tokenID, + RedeemAmount: redeemAmount, + RedeemerIncAddressStr: incAddressStr, + RemoteAddress: remoteAddr, + RedeemFee: redeemFee, + } + requestPTokenMeta.MetadataBase = metadataBase + return requestPTokenMeta, nil +} + +func (redeemReq PortalRedeemRequest) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + return true, nil +} + +func (redeemReq PortalRedeemRequest) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + // Note: the metadata was already verified with *transaction.TxCustomToken level so no need to verify with *transaction.Tx level again as *transaction.Tx is embedding property of *transaction.TxCustomToken + if txr.GetType() == common.TxCustomTokenPrivacyType && reflect.TypeOf(txr).String() == "*transaction.Tx" { + if !txr.IsCoinsBurning(bcr, beaconHeight) { + return false, false, errors.New("txnormal in tx redeem request must be coin burning tx") + } + // validate value transfer of tx for redeem fee in prv + if redeemReq.RedeemFee != txr.CalculateTxValue() { + return false, false, errors.New("redeem fee amount should be equal to the tx value") + } + return true, true, nil + } + + // validate RedeemerIncAddressStr + keyWallet, err := wallet.Base58CheckDeserialize(redeemReq.RedeemerIncAddressStr) + if err != nil { + return false, false, NewMetadataTxError(PortalRedeemRequestParamError, errors.New("Requester incognito address is invalid")) + } + incAddr := keyWallet.KeySet.PaymentAddress + if len(incAddr.Pk) == 0 { + return false, false, NewMetadataTxError(PortalRedeemRequestParamError, errors.New("Requester incognito address is invalid")) + } + if !bytes.Equal(txr.GetSigPubKey()[:], incAddr.Pk[:]) { + return false, false, NewMetadataTxError(PortalRedeemRequestParamError, errors.New("Requester incognito address is not signer")) + } + + // check tx type + if txr.GetType() != common.TxCustomTokenPrivacyType { + return false, false, errors.New("tx redeem request must be TxCustomTokenPrivacyType") + } + + if !txr.IsCoinsBurning(bcr, beaconHeight) { + return false, false, errors.New("txprivacytoken in tx redeem request must be coin burning tx") + } + + // validate redeem amount + if redeemReq.RedeemAmount <= 0 { + return false, false, errors.New("redeem amount should be larger than 0") + } + + // validate redeem fee + if redeemReq.RedeemFee <= 0 { + return false, false, errors.New("redeem fee should be larger than 0") + } + + // validate value transfer of tx for redeem amount in ptoken + if redeemReq.RedeemAmount != txr.CalculateTxValue() { + return false, false, errors.New("redeem amount should be equal to the tx value") + } + + // validate tokenID + if redeemReq.TokenID != txr.GetTokenID().String() { + return false, false, NewMetadataTxError(PortalRedeemRequestParamError, errors.New("TokenID in metadata is not matched to tokenID in tx")) + } + // check tokenId is portal token or not + if !common.IsPortalToken(redeemReq.TokenID) { + return false, false, NewMetadataTxError(PortalRedeemRequestParamError, errors.New("TokenID is not in portal tokens list")) + } + + //validate RemoteAddress + // todo: + if len(redeemReq.RemoteAddress) == 0 { + return false, false, NewMetadataTxError(PortalRedeemRequestParamError, errors.New("Remote address is invalid")) + } + + return true, true, nil +} + +func (redeemReq PortalRedeemRequest) ValidateMetadataByItself() bool { + return redeemReq.Type == PortalRedeemRequestMeta +} + +func (redeemReq PortalRedeemRequest) Hash() *common.Hash { + record := redeemReq.MetadataBase.Hash().String() + record += redeemReq.UniqueRedeemID + record += redeemReq.TokenID + record += strconv.FormatUint(redeemReq.RedeemAmount, 10) + record += strconv.FormatUint(redeemReq.RedeemFee, 10) + record += redeemReq.RedeemerIncAddressStr + record += redeemReq.RemoteAddress + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (redeemReq *PortalRedeemRequest) BuildReqActions(tx Transaction, bcr BlockchainRetriever, shardID byte) ([][]string, error) { + actionContent := PortalRedeemRequestAction{ + Meta: *redeemReq, + TxReqID: *tx.Hash(), + ShardID: shardID, + } + actionContentBytes, err := json.Marshal(actionContent) + if err != nil { + return [][]string{}, err + } + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(PortalRedeemRequestMeta), actionContentBase64Str} + return [][]string{action}, nil +} + +func (redeemReq *PortalRedeemRequest) CalculateSize() uint64 { + return calculateSize(redeemReq) +} diff --git a/metadata/portalredeemrequestresponse.go b/metadata/portalredeemrequestresponse.go new file mode 100644 index 0000000000..7a9558e6b1 --- /dev/null +++ b/metadata/portalredeemrequestresponse.go @@ -0,0 +1,157 @@ +package metadata + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +type PortalRedeemRequestResponse struct { + MetadataBase + RequestStatus string + ReqTxID common.Hash + RequesterAddrStr string + Amount uint64 + IncTokenID string +} + +func NewPortalRedeemRequestResponse( + requestStatus string, + reqTxID common.Hash, + requesterAddressStr string, + amount uint64, + tokenID string, + metaType int, +) *PortalRedeemRequestResponse { + metadataBase := MetadataBase{ + Type: metaType, + } + return &PortalRedeemRequestResponse{ + RequestStatus: requestStatus, + ReqTxID: reqTxID, + MetadataBase: metadataBase, + RequesterAddrStr: requesterAddressStr, + Amount: amount, + IncTokenID: tokenID, + } +} + +func (iRes PortalRedeemRequestResponse) CheckTransactionFee(tr Transaction, minFee uint64, beaconHeight int64, db *statedb.StateDB) bool { + // no need to have fee for this tx + return true +} + +func (iRes PortalRedeemRequestResponse) ValidateTxWithBlockChain(txr Transaction, bcr BlockchainRetriever, shardID byte, db *statedb.StateDB) (bool, error) { + // no need to validate tx with blockchain, just need to validate with requested tx (via RequestedTxID) + return false, nil +} + +func (iRes PortalRedeemRequestResponse) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + return false, true, nil +} + +func (iRes PortalRedeemRequestResponse) ValidateMetadataByItself() bool { + // The validation just need to check at tx level, so returning true here + return iRes.Type == PortalRedeemRequestResponseMeta +} + +func (iRes PortalRedeemRequestResponse) Hash() *common.Hash { + record := iRes.MetadataBase.Hash().String() + record += iRes.RequestStatus + record += iRes.ReqTxID.String() + record += iRes.RequesterAddrStr + record += strconv.FormatUint(iRes.Amount, 10) + record += iRes.IncTokenID + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (iRes *PortalRedeemRequestResponse) CalculateSize() uint64 { + return calculateSize(iRes) +} + +func (iRes PortalRedeemRequestResponse) VerifyMinerCreatedTxBeforeGettingInBlock( + txsInBlock []Transaction, + txsUsed []int, + insts [][]string, + instUsed []int, + shardID byte, + tx Transaction, + bcr BlockchainRetriever, + ac *AccumulatedValues, +) (bool, error) { + idx := -1 + for i, inst := range insts { + if len(inst) < 4 { // this is not PortalRedeemRequest response instruction + continue + } + instMetaType := inst[0] + if instUsed[i] > 0 || + instMetaType != strconv.Itoa(PortalRedeemRequestMeta) { + continue + } + instReqStatus := inst[2] + if instReqStatus != iRes.RequestStatus || + (instReqStatus != common.PortalRedeemRequestRejectedChainStatus) { + continue + } + + var shardIDFromInst byte + var txReqIDFromInst common.Hash + var requesterAddrStrFromInst string + var redeemAmountFromInst uint64 + var tokenIDStrFromInst string + + contentBytes := []byte(inst[3]) + var redeemReqContent PortalRedeemRequestContent + err := json.Unmarshal(contentBytes, &redeemReqContent) + if err != nil { + Logger.log.Error("WARNING - VALIDATION: an error occured while parsing portal redeem request content: ", err) + continue + } + shardIDFromInst = redeemReqContent.ShardID + txReqIDFromInst = redeemReqContent.TxReqID + requesterAddrStrFromInst = redeemReqContent.RedeemerIncAddressStr + redeemAmountFromInst = redeemReqContent.RedeemAmount + tokenIDStrFromInst = redeemReqContent.TokenID + + if !bytes.Equal(iRes.ReqTxID[:], txReqIDFromInst[:]) || + shardID != shardIDFromInst { + continue + } + if requesterAddrStrFromInst != iRes.RequesterAddrStr { + Logger.log.Errorf("Error - VALIDATION: Requester address %v is not matching to Requester address in instruction %v", iRes.RequesterAddrStr, requesterAddrStrFromInst) + continue + } + + if redeemAmountFromInst != iRes.Amount { + Logger.log.Errorf("Error - VALIDATION: Redeem amount %v is not matching to redeem amount in instruction %v", iRes.Amount, redeemAmountFromInst) + continue + } + + key, err := wallet.Base58CheckDeserialize(requesterAddrStrFromInst) + if err != nil { + Logger.log.Info("WARNING - VALIDATION: an error occured while deserializing requester address string: ", err) + continue + } + + _, pk, paidAmount, assetID := tx.GetTransferData() + if !bytes.Equal(key.KeySet.PaymentAddress.Pk[:], pk[:]) || + redeemAmountFromInst != paidAmount || + tokenIDStrFromInst != assetID.String() { + continue + } + idx = i + break + } + if idx == -1 { // not found the issuance request tx for this response + return false, fmt.Errorf(fmt.Sprintf("no PortalRedeemRequest instruction found for PortalRedeemRequestResponse tx %s", tx.Hash().String())) + } + instUsed[idx] = 1 + return true, nil +} diff --git a/metadata/portalrequestptokens.go b/metadata/portalrequestptokens.go new file mode 100644 index 0000000000..e5914f1e57 --- /dev/null +++ b/metadata/portalrequestptokens.go @@ -0,0 +1,157 @@ +package metadata + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +// PortalRequestPTokens - portal user requests ptoken (after sending pubToken to custodians) +// metadata - user requests ptoken - create normal tx with this metadata +type PortalRequestPTokens struct { + MetadataBase + UniquePortingID string + TokenID string // pTokenID in incognito chain + IncogAddressStr string + PortingAmount uint64 + PortingProof string +} + +// PortalRequestPTokensAction - shard validator creates instruction that contain this action content +// it will be append to ShardToBeaconBlock +type PortalRequestPTokensAction struct { + Meta PortalRequestPTokens + TxReqID common.Hash + ShardID byte +} + +// PortalRequestPTokensContent - Beacon builds a new instruction with this content after receiving a instruction from shard +// It will be appended to beaconBlock +// both accepted and rejected status +type PortalRequestPTokensContent struct { + UniquePortingID string + TokenID string // pTokenID in incognito chain + IncogAddressStr string + PortingAmount uint64 + PortingProof string + TxReqID common.Hash + ShardID byte +} + +// PortalRequestPTokensStatus - Beacon tracks status of request ptokens into db +type PortalRequestPTokensStatus struct { + Status byte + UniquePortingID string + TokenID string // pTokenID in incognito chain + IncogAddressStr string + PortingAmount uint64 + PortingProof string + TxReqID common.Hash +} + +func NewPortalRequestPTokens( + metaType int, + uniquePortingID string, + tokenID string, + incogAddressStr string, + portingAmount uint64, + portingProof string) (*PortalRequestPTokens, error) { + metadataBase := MetadataBase{ + Type: metaType, + } + requestPTokenMeta := &PortalRequestPTokens{ + UniquePortingID: uniquePortingID, + TokenID: tokenID, + IncogAddressStr: incogAddressStr, + PortingAmount: portingAmount, + PortingProof: portingProof, + } + requestPTokenMeta.MetadataBase = metadataBase + return requestPTokenMeta, nil +} + +func (reqPToken PortalRequestPTokens) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + return true, nil +} + +func (reqPToken PortalRequestPTokens) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + // Note: the metadata was already verified with *transaction.TxCustomToken level so no need to verify with *transaction.Tx level again as *transaction.Tx is embedding property of *transaction.TxCustomToken + //if txr.GetType() == common.TxCustomTokenPrivacyType && reflect.TypeOf(txr).String() == "*transaction.Tx" { + // return true, true, nil + //} + + // validate IncogAddressStr + keyWallet, err := wallet.Base58CheckDeserialize(reqPToken.IncogAddressStr) + if err != nil { + return false, false, NewMetadataTxError(PortalRequestPTokenParamError, errors.New("Requester incognito address is invalid")) + } + incogAddr := keyWallet.KeySet.PaymentAddress + if len(incogAddr.Pk) == 0 { + return false, false, NewMetadataTxError(PortalRequestPTokenParamError, errors.New("Requester incognito address is invalid")) + } + if !bytes.Equal(txr.GetSigPubKey()[:], incogAddr.Pk[:]) { + return false, false, NewMetadataTxError(PortalRequestPTokenParamError, errors.New("Requester incognito address is not signer")) + } + + // check tx type + if txr.GetType() != common.TxNormalType { + return false, false, errors.New("tx custodian deposit must be TxNormalType") + } + + // validate amount deposit + if reqPToken.PortingAmount == 0 { + return false, false, errors.New("porting amount should be larger than 0") + } + + // validate tokenID and porting proof + if !common.IsPortalToken(reqPToken.TokenID){ + return false, false, NewMetadataTxError(PortalRequestPTokenParamError, errors.New("TokenID is not supported currently on Portal")) + } + + return true, true, nil +} + +func (reqPToken PortalRequestPTokens) ValidateMetadataByItself() bool { + return reqPToken.Type == PortalUserRequestPTokenMeta +} + +func (reqPToken PortalRequestPTokens) Hash() *common.Hash { + record := reqPToken.MetadataBase.Hash().String() + record += reqPToken.UniquePortingID + record += reqPToken.TokenID + record += reqPToken.IncogAddressStr + record += strconv.FormatUint(reqPToken.PortingAmount, 10) + record += reqPToken.PortingProof + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (reqPToken *PortalRequestPTokens) BuildReqActions(tx Transaction, bcr BlockchainRetriever, shardID byte) ([][]string, error) { + actionContent := PortalRequestPTokensAction{ + Meta: *reqPToken, + TxReqID: *tx.Hash(), + ShardID: shardID, + } + actionContentBytes, err := json.Marshal(actionContent) + if err != nil { + return [][]string{}, err + } + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(PortalUserRequestPTokenMeta), actionContentBase64Str} + return [][]string{action}, nil +} + +func (reqPToken *PortalRequestPTokens) CalculateSize() uint64 { + return calculateSize(reqPToken) +} diff --git a/metadata/portalrequestptokensresponse.go b/metadata/portalrequestptokensresponse.go new file mode 100644 index 0000000000..099ee785fa --- /dev/null +++ b/metadata/portalrequestptokensresponse.go @@ -0,0 +1,147 @@ +package metadata + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +type PortalRequestPTokensResponse struct { + MetadataBase + RequestStatus string + ReqTxID common.Hash + RequesterAddrStr string + Amount uint64 + IncTokenID string +} + +func NewPortalRequestPTokensResponse( + depositStatus string, + reqTxID common.Hash, + requesterAddressStr string, + amount uint64, + tokenID string, + metaType int, +) *PortalRequestPTokensResponse { + metadataBase := MetadataBase{ + Type: metaType, + } + return &PortalRequestPTokensResponse{ + RequestStatus: depositStatus, + ReqTxID: reqTxID, + MetadataBase: metadataBase, + RequesterAddrStr: requesterAddressStr, + Amount: amount, + IncTokenID: tokenID, + } +} + +func (iRes PortalRequestPTokensResponse) CheckTransactionFee(tr Transaction, minFee uint64, beaconHeight int64, db *statedb.StateDB) bool { + // no need to have fee for this tx + return true +} + +func (iRes PortalRequestPTokensResponse) ValidateTxWithBlockChain(txr Transaction, bcr BlockchainRetriever, shardID byte, db *statedb.StateDB) (bool, error) { + // no need to validate tx with blockchain, just need to validate with requested tx (via RequestedTxID) + return false, nil +} + +func (iRes PortalRequestPTokensResponse) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + return false, true, nil +} + +func (iRes PortalRequestPTokensResponse) ValidateMetadataByItself() bool { + // The validation just need to check at tx level, so returning true here + return iRes.Type == PortalUserRequestPTokenResponseMeta +} + +func (iRes PortalRequestPTokensResponse) Hash() *common.Hash { + record := iRes.MetadataBase.Hash().String() + record += iRes.RequestStatus + record += iRes.ReqTxID.String() + record += iRes.RequesterAddrStr + record += strconv.FormatUint(iRes.Amount, 10) + record += iRes.IncTokenID + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (iRes *PortalRequestPTokensResponse) CalculateSize() uint64 { + return calculateSize(iRes) +} + +func (iRes PortalRequestPTokensResponse) VerifyMinerCreatedTxBeforeGettingInBlock( + txsInBlock []Transaction, + txsUsed []int, + insts [][]string, + instUsed []int, + shardID byte, + tx Transaction, + bcr BlockchainRetriever, + ac *AccumulatedValues, +) (bool, error) { + idx := -1 + for i, inst := range insts { + if len(inst) < 4 { // this is not PortalRequestPTokens response instruction + continue + } + instMetaType := inst[0] + if instUsed[i] > 0 || + instMetaType != strconv.Itoa(PortalUserRequestPTokenMeta) { + continue + } + instDepositStatus := inst[2] + if instDepositStatus != iRes.RequestStatus || + (instDepositStatus != common.PortalReqPTokensAcceptedChainStatus) { + continue + } + + var shardIDFromInst byte + var txReqIDFromInst common.Hash + var requesterAddrStrFromInst string + var portingAmountFromInst uint64 + var tokenIDStrFromInst string + + contentBytes := []byte(inst[3]) + var reqPTokensContent PortalRequestPTokensContent + err := json.Unmarshal(contentBytes, &reqPTokensContent) + if err != nil { + Logger.log.Error("WARNING - VALIDATION: an error occured while parsing portal request ptokens content: ", err) + continue + } + shardIDFromInst = reqPTokensContent.ShardID + txReqIDFromInst = reqPTokensContent.TxReqID + requesterAddrStrFromInst = reqPTokensContent.IncogAddressStr + portingAmountFromInst = reqPTokensContent.PortingAmount + tokenIDStrFromInst = reqPTokensContent.TokenID + + if !bytes.Equal(iRes.ReqTxID[:], txReqIDFromInst[:]) || + shardID != shardIDFromInst { + continue + } + key, err := wallet.Base58CheckDeserialize(requesterAddrStrFromInst) + if err != nil { + Logger.log.Info("WARNING - VALIDATION: an error occured while deserializing receiver address string: ", err) + continue + } + + _, pk, paidAmount, assetID := tx.GetTransferData() + if !bytes.Equal(key.KeySet.PaymentAddress.Pk[:], pk[:]) || + portingAmountFromInst != paidAmount || + tokenIDStrFromInst != assetID.String() { + continue + } + idx = i + break + } + if idx == -1 { // not found the issuance request tx for this response + return false, fmt.Errorf(fmt.Sprintf("no PortalReqPtokens instruction found for PortalReqPtokensResponse tx %s", tx.Hash().String())) + } + instUsed[idx] = 1 + return true, nil +} diff --git a/metadata/portalrequestunlockcollateral.go b/metadata/portalrequestunlockcollateral.go new file mode 100644 index 0000000000..055cd89c92 --- /dev/null +++ b/metadata/portalrequestunlockcollateral.go @@ -0,0 +1,159 @@ +package metadata + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +// PortalRequestUnlockCollateral - portal custodian requests unlock collateral (after returning pubToken to user) +// metadata - custodian requests unlock collateral - create normal tx with this metadata +type PortalRequestUnlockCollateral struct { + MetadataBase + UniqueRedeemID string + TokenID string // pTokenID in incognito chain + CustodianAddressStr string + RedeemAmount uint64 + RedeemProof string +} + +// PortalRequestUnlockCollateralAction - shard validator creates instruction that contain this action content +// it will be append to ShardToBeaconBlock +type PortalRequestUnlockCollateralAction struct { + Meta PortalRequestUnlockCollateral + TxReqID common.Hash + ShardID byte +} + +// PortalRequestUnlockCollateralContent - Beacon builds a new instruction with this content after receiving a instruction from shard +// It will be appended to beaconBlock +// both accepted and rejected status +type PortalRequestUnlockCollateralContent struct { + UniqueRedeemID string + TokenID string // pTokenID in incognito chain + CustodianAddressStr string + RedeemAmount uint64 + UnlockAmount uint64 // prv + RedeemProof string + TxReqID common.Hash + ShardID byte +} + +// PortalRequestUnlockCollateralStatus - Beacon tracks status of request unlock collateral amount into db +type PortalRequestUnlockCollateralStatus struct { + Status byte + UniqueRedeemID string + TokenID string // pTokenID in incognito chain + CustodianAddressStr string + RedeemAmount uint64 + UnlockAmount uint64 // prv + RedeemProof string + TxReqID common.Hash +} + +func NewPortalRequestUnlockCollateral( + metaType int, + uniqueRedeemID string, + tokenID string, + incogAddressStr string, + redeemAmount uint64, + redeemProof string) (*PortalRequestUnlockCollateral, error) { + metadataBase := MetadataBase{ + Type: metaType, + } + requestPTokenMeta := &PortalRequestUnlockCollateral{ + UniqueRedeemID: uniqueRedeemID, + TokenID: tokenID, + CustodianAddressStr: incogAddressStr, + RedeemAmount: redeemAmount, + RedeemProof: redeemProof, + } + requestPTokenMeta.MetadataBase = metadataBase + return requestPTokenMeta, nil +} + +func (meta PortalRequestUnlockCollateral) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + return true, nil +} + +func (meta PortalRequestUnlockCollateral) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + // Note: the metadata was already verified with *transaction.TxCustomToken level so no need to verify with *transaction.Tx level again as *transaction.Tx is embedding property of *transaction.TxCustomToken + //if txr.GetType() == common.TxCustomTokenPrivacyType && reflect.TypeOf(txr).String() == "*transaction.Tx" { + // return true, true, nil + //} + + // validate CustodianAddressStr + keyWallet, err := wallet.Base58CheckDeserialize(meta.CustodianAddressStr) + if err != nil { + return false, false, NewMetadataTxError(PortalRequestPTokenParamError, errors.New("Custodian incognito address is invalid")) + } + incogAddr := keyWallet.KeySet.PaymentAddress + if len(incogAddr.Pk) == 0 { + return false, false, NewMetadataTxError(PortalRequestPTokenParamError, errors.New("Custodian incognito address is invalid")) + } + if !bytes.Equal(txr.GetSigPubKey()[:], incogAddr.Pk[:]) { + return false, false, NewMetadataTxError(PortalRequestPTokenParamError, errors.New("Custodian incognito address is not signer")) + } + + // check tx type + if txr.GetType() != common.TxNormalType { + return false, false, errors.New("tx custodian deposit must be TxNormalType") + } + + // validate amount redeem + if meta.RedeemAmount == 0 { + return false, false, errors.New("redeem amount should be larger than 0") + } + + // validate tokenID + if !common.IsPortalToken(meta.TokenID) { + return false, false, errors.New("TokenID is not a portal token") + } + + return true, true, nil +} + +func (meta PortalRequestUnlockCollateral) ValidateMetadataByItself() bool { + return meta.Type == PortalRequestUnlockCollateralMeta +} + +func (meta PortalRequestUnlockCollateral) Hash() *common.Hash { + record := meta.MetadataBase.Hash().String() + record += meta.UniqueRedeemID + record += meta.TokenID + record += meta.CustodianAddressStr + record += strconv.FormatUint(meta.RedeemAmount, 10) + record += meta.RedeemProof + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (meta *PortalRequestUnlockCollateral) BuildReqActions(tx Transaction, bcr BlockchainRetriever, shardID byte) ([][]string, error) { + actionContent := PortalRequestUnlockCollateralAction{ + Meta: *meta, + TxReqID: *tx.Hash(), + ShardID: shardID, + } + actionContentBytes, err := json.Marshal(actionContent) + if err != nil { + return [][]string{}, err + } + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(PortalRequestUnlockCollateralMeta), actionContentBase64Str} + return [][]string{action}, nil +} + +func (meta *PortalRequestUnlockCollateral) CalculateSize() uint64 { + return calculateSize(meta) +} diff --git a/metadata/portalwithdrawrewardrequest.go b/metadata/portalwithdrawrewardrequest.go new file mode 100644 index 0000000000..e05837f8dc --- /dev/null +++ b/metadata/portalwithdrawrewardrequest.go @@ -0,0 +1,126 @@ +package metadata + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +// PortalRequestWithdrawReward - custodians request withdraw reward +// metadata - custodians request withdraw reward - create normal tx with this metadata +type PortalRequestWithdrawReward struct { + MetadataBase + CustodianAddressStr string + TokenID common.Hash +} + +// PortalRequestWithdrawRewardAction - shard validator creates instruction that contain this action content +// it will be append to ShardToBeaconBlock +type PortalRequestWithdrawRewardAction struct { + Meta PortalRequestWithdrawReward + TxReqID common.Hash + ShardID byte +} + +// PortalRequestWithdrawRewardContent - Beacon builds a new instruction with this content after receiving a instruction from shard +// It will be appended to beaconBlock +// both accepted and rejected status +type PortalRequestWithdrawRewardContent struct { + CustodianAddressStr string + TokenID common.Hash + RewardAmount uint64 + TxReqID common.Hash + ShardID byte +} + +// PortalRequestWithdrawRewardStatus - Beacon tracks status of request unlock collateral amount into db +type PortalRequestWithdrawRewardStatus struct { + Status byte + CustodianAddressStr string + TokenID common.Hash + RewardAmount uint64 + TxReqID common.Hash +} + +func NewPortalRequestWithdrawReward( + metaType int, + incogAddressStr string, + tokenID common.Hash) (*PortalRequestWithdrawReward, error) { + metadataBase := MetadataBase{ + Type: metaType, + } + meta := &PortalRequestWithdrawReward{ + CustodianAddressStr: incogAddressStr, + TokenID: tokenID, + } + meta.MetadataBase = metadataBase + return meta, nil +} + +func (meta PortalRequestWithdrawReward) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + return true, nil +} + +func (meta PortalRequestWithdrawReward) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + // validate CustodianAddressStr + keyWallet, err := wallet.Base58CheckDeserialize(meta.CustodianAddressStr) + if err != nil { + return false, false, errors.New("Custodian incognito address is invalid") + } + incogAddr := keyWallet.KeySet.PaymentAddress + if len(incogAddr.Pk) == 0 { + return false, false, errors.New("Custodian incognito address is invalid") + } + if !bytes.Equal(txr.GetSigPubKey()[:], incogAddr.Pk[:]) { + return false, false, errors.New("Custodian incognito address is not signer") + } + + // check tx type + if txr.GetType() != common.TxNormalType { + return false, false, errors.New("tx request withdraw reward must be TxNormalType") + } + + return true, true, nil +} + +func (meta PortalRequestWithdrawReward) ValidateMetadataByItself() bool { + return meta.Type == PortalRequestWithdrawRewardMeta +} + +func (meta PortalRequestWithdrawReward) Hash() *common.Hash { + record := meta.MetadataBase.Hash().String() + record += meta.CustodianAddressStr + record += meta.TokenID.String() + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (meta *PortalRequestWithdrawReward) BuildReqActions(tx Transaction, bcr BlockchainRetriever, shardID byte) ([][]string, error) { + actionContent := PortalRequestWithdrawRewardAction{ + Meta: *meta, + TxReqID: *tx.Hash(), + ShardID: shardID, + } + actionContentBytes, err := json.Marshal(actionContent) + if err != nil { + return [][]string{}, err + } + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(PortalRequestWithdrawRewardMeta), actionContentBase64Str} + return [][]string{action}, nil +} + +func (meta *PortalRequestWithdrawReward) CalculateSize() uint64 { + return calculateSize(meta) +} diff --git a/metadata/portalwithdrawrewardresponse.go b/metadata/portalwithdrawrewardresponse.go new file mode 100644 index 0000000000..dd3f70a6c8 --- /dev/null +++ b/metadata/portalwithdrawrewardresponse.go @@ -0,0 +1,144 @@ +package metadata + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" + "strconv" +) + +// PortalRequestUnlockCollateral - portal custodian requests unlock collateral (after returning pubToken to user) +// metadata - custodian requests unlock collateral - create normal tx with this metadata +type PortalWithdrawRewardResponse struct { + MetadataBase + CustodianAddressStr string + TokenID common.Hash + RewardAmount uint64 + TxReqID common.Hash +} + +func NewPortalWithdrawRewardResponse( + reqTxID common.Hash, + custodianAddressStr string, + tokenID common.Hash, + rewardAmount uint64, + metaType int, +) *PortalWithdrawRewardResponse { + metadataBase := MetadataBase{ + Type: metaType, + } + return &PortalWithdrawRewardResponse{ + MetadataBase: metadataBase, + CustodianAddressStr: custodianAddressStr, + TokenID: tokenID, + RewardAmount: rewardAmount, + TxReqID: reqTxID, + } +} + +func (iRes PortalWithdrawRewardResponse) CheckTransactionFee(tr Transaction, minFee uint64, beaconHeight int64, db *statedb.StateDB) bool { + // no need to have fee for this tx + return true +} + +func (iRes PortalWithdrawRewardResponse) ValidateTxWithBlockChain(txr Transaction, bcr BlockchainRetriever, shardID byte, db *statedb.StateDB) (bool, error) { + // no need to validate tx with blockchain, just need to validate with requested tx (via RequestedTxID) + return false, nil +} + +func (iRes PortalWithdrawRewardResponse) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + return false, true, nil +} + +func (iRes PortalWithdrawRewardResponse) ValidateMetadataByItself() bool { + // The validation just need to check at tx level, so returning true here + return iRes.Type == PortalRequestWithdrawRewardResponseMeta +} + +func (iRes PortalWithdrawRewardResponse) Hash() *common.Hash { + record := iRes.MetadataBase.Hash().String() + record += iRes.TxReqID.String() + record += iRes.CustodianAddressStr + record += iRes.TokenID.String() + record += strconv.FormatUint(iRes.RewardAmount, 10) + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (iRes *PortalWithdrawRewardResponse) CalculateSize() uint64 { + return calculateSize(iRes) +} + +func (iRes PortalWithdrawRewardResponse) VerifyMinerCreatedTxBeforeGettingInBlock( + txsInBlock []Transaction, + txsUsed []int, + insts [][]string, + instUsed []int, + shardID byte, + tx Transaction, + bcr BlockchainRetriever, + ac *AccumulatedValues, +) (bool, error) { + idx := -1 + for i, inst := range insts { + if len(inst) < 4 { // this is not PortalWithdrawReward instruction + continue + } + instMetaType := inst[0] + if instUsed[i] > 0 || + instMetaType != strconv.Itoa(PortalRequestWithdrawRewardMeta) { + continue + } + instDepositStatus := inst[2] + if instDepositStatus != common.PortalReqWithdrawRewardAcceptedChainStatus { + continue + } + + var shardIDFromInst byte + var txReqIDFromInst common.Hash + var custodianAddrStrFromInst string + var rewardAmountFromInst uint64 + var tokenIDFromInst common.Hash + + contentBytes := []byte(inst[3]) + var reqWithdrawRewardContent PortalRequestWithdrawRewardContent + err := json.Unmarshal(contentBytes, &reqWithdrawRewardContent) + if err != nil { + Logger.log.Error("WARNING - VALIDATION: an error occured while parsing portal request withdraw reward content: ", err) + continue + } + shardIDFromInst = reqWithdrawRewardContent.ShardID + txReqIDFromInst = reqWithdrawRewardContent.TxReqID + custodianAddrStrFromInst = reqWithdrawRewardContent.CustodianAddressStr + rewardAmountFromInst = reqWithdrawRewardContent.RewardAmount + tokenIDFromInst = reqWithdrawRewardContent.TokenID + + if !bytes.Equal(iRes.TxReqID[:], txReqIDFromInst[:]) || + shardID != shardIDFromInst { + continue + } + key, err := wallet.Base58CheckDeserialize(custodianAddrStrFromInst) + if err != nil { + Logger.log.Info("WARNING - VALIDATION: an error occured while deserializing custodian address string: ", err) + continue + } + + _, pk, paidAmount, assetID := tx.GetTransferData() + if !bytes.Equal(key.KeySet.PaymentAddress.Pk[:], pk[:]) || + rewardAmountFromInst != paidAmount || + tokenIDFromInst.String() != assetID.String() { + continue + } + idx = i + break + } + if idx == -1 { // not found the issuance request tx for this response + return false, fmt.Errorf(fmt.Sprintf("no PortalWithdrawReward instruction found for PortalWithdrawRewardResponse tx %s", tx.Hash().String())) + } + instUsed[idx] = 1 + return true, nil +} diff --git a/metadata/relayingheader.go b/metadata/relayingheader.go new file mode 100644 index 0000000000..4c67e2ce63 --- /dev/null +++ b/metadata/relayingheader.go @@ -0,0 +1,143 @@ +package metadata + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "strconv" + + "github.com/incognitochain/incognito-chain/common" + "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + "github.com/incognitochain/incognito-chain/wallet" +) + +// RelayingHeader - relaying header chain +// metadata - create normal tx with this metadata +type RelayingHeader struct { + MetadataBase + IncogAddressStr string + Header string + BlockHeight uint64 +} + +// RelayingHeaderAction - shard validator creates instruction that contain this action content +// it will be append to ShardToBeaconBlock +type RelayingHeaderAction struct { + Meta RelayingHeader + TxReqID common.Hash + ShardID byte +} + +// RelayingHeaderContent - Beacon builds a new instruction with this content after receiving a instruction from shard +// It will be appended to beaconBlock +// both accepted and refund status +type RelayingHeaderContent struct { + IncogAddressStr string + Header string + BlockHeight uint64 + TxReqID common.Hash +} + +// RelayingHeaderStatus - Beacon tracks status of custodian deposit tx into db +type RelayingHeaderStatus struct { + Status byte + IncogAddressStr string + Header string + BlockHeight uint64 +} + +func NewRelayingHeader( + metaType int, + incognitoAddrStr string, + header string, + blockHeight uint64, +) (*RelayingHeader, error) { + metadataBase := MetadataBase{ + Type: metaType, + } + relayingHeader := &RelayingHeader{ + IncogAddressStr: incognitoAddrStr, + Header: header, + BlockHeight: blockHeight, + } + relayingHeader.MetadataBase = metadataBase + return relayingHeader, nil +} + +//todo +func (headerRelaying RelayingHeader) ValidateTxWithBlockChain( + txr Transaction, + bcr BlockchainRetriever, + shardID byte, + db *statedb.StateDB, +) (bool, error) { + return true, nil +} + +func (rh RelayingHeader) ValidateSanityData(bcr BlockchainRetriever, txr Transaction, beaconHeight uint64) (bool, bool, error) { + // validate IncogAddressStr + keyWallet, err := wallet.Base58CheckDeserialize(rh.IncogAddressStr) + if err != nil { + return false, false, NewMetadataTxError(IssuingRequestNewIssuingRequestFromMapEror, errors.New("sender address is incorrect")) + } + incogAddr := keyWallet.KeySet.PaymentAddress + if len(incogAddr.Pk) == 0 { + return false, false, errors.New("wrong sender address") + } + if !bytes.Equal(txr.GetSigPubKey()[:], incogAddr.Pk[:]) { + return false, false, errors.New("sender address is not signer tx") + } + + // check tx type + if txr.GetType() != common.TxNormalType { + return false, false, errors.New("tx push header relaying must be TxNormalType") + } + + // check block height + if rh.BlockHeight < 1 { + return false, false, errors.New("BlockHeight must be greater than 0") + } + + // check header + headerBytes, err := base64.StdEncoding.DecodeString(rh.Header) + if err != nil || len(headerBytes) == 0 { + return false, false, errors.New("header is invalid") + } + + return true, true, nil +} + +func (rh RelayingHeader) ValidateMetadataByItself() bool { + return rh.Type == RelayingBNBHeaderMeta || rh.Type == RelayingBTCHeaderMeta +} + +func (rh RelayingHeader) Hash() *common.Hash { + record := rh.MetadataBase.Hash().String() + record += rh.IncogAddressStr + record += rh.Header + record += strconv.Itoa(int(rh.BlockHeight)) + + // final hash + hash := common.HashH([]byte(record)) + return &hash +} + +func (rh *RelayingHeader) BuildReqActions(tx Transaction, bcr BlockchainRetriever, shardID byte) ([][]string, error) { + actionContent := RelayingHeaderAction{ + Meta: *rh, + TxReqID: *tx.Hash(), + ShardID: shardID, + } + actionContentBytes, err := json.Marshal(actionContent) + if err != nil { + return [][]string{}, err + } + actionContentBase64Str := base64.StdEncoding.EncodeToString(actionContentBytes) + action := []string{strconv.Itoa(rh.Type), actionContentBase64Str} + return [][]string{action}, nil +} + +func (rh *RelayingHeader) CalculateSize() uint64 { + return calculateSize(rh) +} diff --git a/metadata/withdrawreward.go b/metadata/withdrawreward.go index 9304739c6b..1cd6f20b8b 100644 --- a/metadata/withdrawreward.go +++ b/metadata/withdrawreward.go @@ -194,4 +194,4 @@ func (withDrawRewardResponse WithDrawRewardResponse) ValidateSanityData(bcr Bloc func (withDrawRewardResponse WithDrawRewardResponse) ValidateMetadataByItself() bool { // The validation just need to check at tx level, so returning true here return true -} +} \ No newline at end of file diff --git a/mocks/BlockchainRetriever.go b/mocks/BlockchainRetriever.go new file mode 100644 index 0000000000..7b21e4b0fd --- /dev/null +++ b/mocks/BlockchainRetriever.go @@ -0,0 +1,465 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import common "github.com/incognitochain/incognito-chain/common" +import incognitokey "github.com/incognitochain/incognito-chain/incognitokey" +import metadata "github.com/incognitochain/incognito-chain/metadata" +import mock "github.com/stretchr/testify/mock" +import statedb "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" + +// BlockchainRetriever is an autogenerated mock type for the BlockchainRetriever type +type BlockchainRetriever struct { + mock.Mock +} + +// GetAllCommitteeValidatorCandidate provides a mock function with given fields: +func (_m *BlockchainRetriever) GetAllCommitteeValidatorCandidate() (map[byte][]incognitokey.CommitteePublicKey, map[byte][]incognitokey.CommitteePublicKey, []incognitokey.CommitteePublicKey, []incognitokey.CommitteePublicKey, []incognitokey.CommitteePublicKey, []incognitokey.CommitteePublicKey, []incognitokey.CommitteePublicKey, []incognitokey.CommitteePublicKey, error) { + ret := _m.Called() + + var r0 map[byte][]incognitokey.CommitteePublicKey + if rf, ok := ret.Get(0).(func() map[byte][]incognitokey.CommitteePublicKey); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[byte][]incognitokey.CommitteePublicKey) + } + } + + var r1 map[byte][]incognitokey.CommitteePublicKey + if rf, ok := ret.Get(1).(func() map[byte][]incognitokey.CommitteePublicKey); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(map[byte][]incognitokey.CommitteePublicKey) + } + } + + var r2 []incognitokey.CommitteePublicKey + if rf, ok := ret.Get(2).(func() []incognitokey.CommitteePublicKey); ok { + r2 = rf() + } else { + if ret.Get(2) != nil { + r2 = ret.Get(2).([]incognitokey.CommitteePublicKey) + } + } + + var r3 []incognitokey.CommitteePublicKey + if rf, ok := ret.Get(3).(func() []incognitokey.CommitteePublicKey); ok { + r3 = rf() + } else { + if ret.Get(3) != nil { + r3 = ret.Get(3).([]incognitokey.CommitteePublicKey) + } + } + + var r4 []incognitokey.CommitteePublicKey + if rf, ok := ret.Get(4).(func() []incognitokey.CommitteePublicKey); ok { + r4 = rf() + } else { + if ret.Get(4) != nil { + r4 = ret.Get(4).([]incognitokey.CommitteePublicKey) + } + } + + var r5 []incognitokey.CommitteePublicKey + if rf, ok := ret.Get(5).(func() []incognitokey.CommitteePublicKey); ok { + r5 = rf() + } else { + if ret.Get(5) != nil { + r5 = ret.Get(5).([]incognitokey.CommitteePublicKey) + } + } + + var r6 []incognitokey.CommitteePublicKey + if rf, ok := ret.Get(6).(func() []incognitokey.CommitteePublicKey); ok { + r6 = rf() + } else { + if ret.Get(6) != nil { + r6 = ret.Get(6).([]incognitokey.CommitteePublicKey) + } + } + + var r7 []incognitokey.CommitteePublicKey + if rf, ok := ret.Get(7).(func() []incognitokey.CommitteePublicKey); ok { + r7 = rf() + } else { + if ret.Get(7) != nil { + r7 = ret.Get(7).([]incognitokey.CommitteePublicKey) + } + } + + var r8 error + if rf, ok := ret.Get(8).(func() error); ok { + r8 = rf() + } else { + r8 = ret.Error(8) + } + + return r0, r1, r2, r3, r4, r5, r6, r7, r8 +} + +// GetAllCommitteeValidatorCandidateFlattenListFromDatabase provides a mock function with given fields: +func (_m *BlockchainRetriever) GetAllCommitteeValidatorCandidateFlattenListFromDatabase() ([]string, error) { + ret := _m.Called() + + var r0 []string + if rf, ok := ret.Get(0).(func() []string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAutoStakingList provides a mock function with given fields: +func (_m *BlockchainRetriever) GetAutoStakingList() map[string]bool { + ret := _m.Called() + + var r0 map[string]bool + if rf, ok := ret.Get(0).(func() map[string]bool); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]bool) + } + } + + return r0 +} + +// GetBeaconFeatureStateDB provides a mock function with given fields: +func (_m *BlockchainRetriever) GetBeaconFeatureStateDB() *statedb.StateDB { + ret := _m.Called() + + var r0 *statedb.StateDB + if rf, ok := ret.Get(0).(func() *statedb.StateDB); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*statedb.StateDB) + } + } + + return r0 +} + +// GetBeaconHeight provides a mock function with given fields: +func (_m *BlockchainRetriever) GetBeaconHeight() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetBeaconHeightBreakPointBurnAddr provides a mock function with given fields: +func (_m *BlockchainRetriever) GetBeaconHeightBreakPointBurnAddr() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetBeaconRewardStateDB provides a mock function with given fields: +func (_m *BlockchainRetriever) GetBeaconRewardStateDB() *statedb.StateDB { + ret := _m.Called() + + var r0 *statedb.StateDB + if rf, ok := ret.Get(0).(func() *statedb.StateDB); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*statedb.StateDB) + } + } + + return r0 +} + +// GetBeaconSlashStateDB provides a mock function with given fields: +func (_m *BlockchainRetriever) GetBeaconSlashStateDB() *statedb.StateDB { + ret := _m.Called() + + var r0 *statedb.StateDB + if rf, ok := ret.Get(0).(func() *statedb.StateDB); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*statedb.StateDB) + } + } + + return r0 +} + +// GetBurningAddress provides a mock function with given fields: blockHeight +func (_m *BlockchainRetriever) GetBurningAddress(blockHeight uint64) string { + ret := _m.Called(blockHeight) + + var r0 string + if rf, ok := ret.Get(0).(func(uint64) string); ok { + r0 = rf(blockHeight) + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// GetCentralizedWebsitePaymentAddress provides a mock function with given fields: +func (_m *BlockchainRetriever) GetCentralizedWebsitePaymentAddress() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// GetChainHeight provides a mock function with given fields: _a0 +func (_m *BlockchainRetriever) GetChainHeight(_a0 byte) uint64 { + ret := _m.Called(_a0) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(byte) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetCurrentBeaconBlockHeight provides a mock function with given fields: _a0 +func (_m *BlockchainRetriever) GetCurrentBeaconBlockHeight(_a0 byte) uint64 { + ret := _m.Called(_a0) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(byte) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetShardFeatureStateDB provides a mock function with given fields: shardID +func (_m *BlockchainRetriever) GetShardFeatureStateDB(shardID byte) *statedb.StateDB { + ret := _m.Called(shardID) + + var r0 *statedb.StateDB + if rf, ok := ret.Get(0).(func(byte) *statedb.StateDB); ok { + r0 = rf(shardID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*statedb.StateDB) + } + } + + return r0 +} + +// GetShardIDFromTx provides a mock function with given fields: txid +func (_m *BlockchainRetriever) GetShardIDFromTx(txid string) (byte, error) { + ret := _m.Called(txid) + + var r0 byte + if rf, ok := ret.Get(0).(func(string) byte); ok { + r0 = rf(txid) + } else { + r0 = ret.Get(0).(byte) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(txid) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetShardRewardStateDB provides a mock function with given fields: shardID +func (_m *BlockchainRetriever) GetShardRewardStateDB(shardID byte) *statedb.StateDB { + ret := _m.Called(shardID) + + var r0 *statedb.StateDB + if rf, ok := ret.Get(0).(func(byte) *statedb.StateDB); ok { + r0 = rf(shardID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*statedb.StateDB) + } + } + + return r0 +} + +// GetStakingAmountShard provides a mock function with given fields: +func (_m *BlockchainRetriever) GetStakingAmountShard() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetStakingTx provides a mock function with given fields: _a0 +func (_m *BlockchainRetriever) GetStakingTx(_a0 byte) map[string]string { + ret := _m.Called(_a0) + + var r0 map[string]string + if rf, ok := ret.Get(0).(func(byte) map[string]string); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + return r0 +} + +// GetTransactionByHash provides a mock function with given fields: _a0 +func (_m *BlockchainRetriever) GetTransactionByHash(_a0 common.Hash) (byte, common.Hash, int, metadata.Transaction, error) { + ret := _m.Called(_a0) + + var r0 byte + if rf, ok := ret.Get(0).(func(common.Hash) byte); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(byte) + } + + var r1 common.Hash + if rf, ok := ret.Get(1).(func(common.Hash) common.Hash); ok { + r1 = rf(_a0) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(common.Hash) + } + } + + var r2 int + if rf, ok := ret.Get(2).(func(common.Hash) int); ok { + r2 = rf(_a0) + } else { + r2 = ret.Get(2).(int) + } + + var r3 metadata.Transaction + if rf, ok := ret.Get(3).(func(common.Hash) metadata.Transaction); ok { + r3 = rf(_a0) + } else { + if ret.Get(3) != nil { + r3 = ret.Get(3).(metadata.Transaction) + } + } + + var r4 error + if rf, ok := ret.Get(4).(func(common.Hash) error); ok { + r4 = rf(_a0) + } else { + r4 = ret.Error(4) + } + + return r0, r1, r2, r3, r4 +} + +// GetTxChainHeight provides a mock function with given fields: tx +func (_m *BlockchainRetriever) GetTxChainHeight(tx metadata.Transaction) (uint64, error) { + ret := _m.Called(tx) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(metadata.Transaction) uint64); ok { + r0 = rf(tx) + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(metadata.Transaction) error); ok { + r1 = rf(tx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTxValue provides a mock function with given fields: txid +func (_m *BlockchainRetriever) GetTxValue(txid string) (uint64, error) { + ret := _m.Called(txid) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(string) uint64); ok { + r0 = rf(txid) + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(txid) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListPrivacyTokenAndBridgeTokenAndPRVByShardID provides a mock function with given fields: _a0 +func (_m *BlockchainRetriever) ListPrivacyTokenAndBridgeTokenAndPRVByShardID(_a0 byte) ([]common.Hash, error) { + ret := _m.Called(_a0) + + var r0 []common.Hash + if rf, ok := ret.Get(0).(func(byte) []common.Hash); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]common.Hash) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(byte) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/mocks/DatabaseAccessWarper.go b/mocks/DatabaseAccessWarper.go new file mode 100644 index 0000000000..293e6caa54 --- /dev/null +++ b/mocks/DatabaseAccessWarper.go @@ -0,0 +1,68 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import common "github.com/incognitochain/incognito-chain/common" +import mock "github.com/stretchr/testify/mock" +import statedb "github.com/incognitochain/incognito-chain/dataaccessobject/statedb" +import trie "github.com/incognitochain/incognito-chain/trie" + +// DatabaseAccessWarper is an autogenerated mock type for the DatabaseAccessWarper type +type DatabaseAccessWarper struct { + mock.Mock +} + +// CopyTrie provides a mock function with given fields: _a0 +func (_m *DatabaseAccessWarper) CopyTrie(_a0 statedb.Trie) statedb.Trie { + ret := _m.Called(_a0) + + var r0 statedb.Trie + if rf, ok := ret.Get(0).(func(statedb.Trie) statedb.Trie); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(statedb.Trie) + } + } + + return r0 +} + +// OpenPrefixTrie provides a mock function with given fields: root +func (_m *DatabaseAccessWarper) OpenPrefixTrie(root common.Hash) (statedb.Trie, error) { + ret := _m.Called(root) + + var r0 statedb.Trie + if rf, ok := ret.Get(0).(func(common.Hash) statedb.Trie); ok { + r0 = rf(root) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(statedb.Trie) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(common.Hash) error); ok { + r1 = rf(root) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TrieDB provides a mock function with given fields: +func (_m *DatabaseAccessWarper) TrieDB() *trie.IntermediateWriter { + ret := _m.Called() + + var r0 *trie.IntermediateWriter + if rf, ok := ret.Get(0).(func() *trie.IntermediateWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*trie.IntermediateWriter) + } + } + + return r0 +} diff --git a/mocks/DatabaseInterface.go b/mocks/DatabaseInterface.go deleted file mode 100644 index 47a7895651..0000000000 --- a/mocks/DatabaseInterface.go +++ /dev/null @@ -1,2379 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import big "math/big" -import common "github.com/incognitochain/incognito-chain/common" -import incdb "github.com/incognitochain/incognito-chain/incdb" -import mock "github.com/stretchr/testify/mock" - -// DatabaseInterface is an autogenerated mock type for the DatabaseInterface type -type Database struct { - mock.Mock -} - -// AddCommitteeReward provides a mock function with given fields: committeeAddress, amount, tokenID -func (_m *DatabaseInterface) AddCommitteeReward(committeeAddress []byte, amount uint64, tokenID common.Hash) error { - ret := _m.Called(committeeAddress, amount, tokenID) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, uint64, common.Hash) error); ok { - r0 = rf(committeeAddress, amount, tokenID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AddShardRewardRequest provides a mock function with given fields: epoch, shardID, amount, tokenID, bd -func (_m *DatabaseInterface) AddShardRewardRequest(epoch uint64, shardID byte, amount uint64, tokenID common.Hash, bd *[]database.BatchData) error { - ret := _m.Called(epoch, shardID, amount, tokenID, bd) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, byte, uint64, common.Hash, *[]database.BatchData) error); ok { - r0 = rf(epoch, shardID, amount, tokenID, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AddTradeFeeUp provides a mock function with given fields: beaconHeight, token1IDStr, token2IDStr, tokenIDToBuyStr, amt -func (_m *DatabaseInterface) AddTradeFeeUp(beaconHeight uint64, token1IDStr string, token2IDStr string, tokenIDToBuyStr string, amt uint64) error { - ret := _m.Called(beaconHeight, token1IDStr, token2IDStr, tokenIDToBuyStr, amt) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, string, string, string, uint64) error); ok { - r0 = rf(beaconHeight, token1IDStr, token2IDStr, tokenIDToBuyStr, amt) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// BackupBridgedTokenByTokenID provides a mock function with given fields: tokenID -func (_m *DatabaseInterface) BackupBridgedTokenByTokenID(tokenID common.Hash) error { - ret := _m.Called(tokenID) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash) error); ok { - r0 = rf(tokenID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// BackupCommitmentsOfPubkey provides a mock function with given fields: tokenID, shardID, pubkey -func (_m *DatabaseInterface) BackupCommitmentsOfPubkey(tokenID common.Hash, shardID byte, pubkey []byte) error { - ret := _m.Called(tokenID, shardID, pubkey) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, byte, []byte) error); ok { - r0 = rf(tokenID, shardID, pubkey) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// BackupCommitteeReward provides a mock function with given fields: committeeAddress, tokenID -func (_m *DatabaseInterface) BackupCommitteeReward(committeeAddress []byte, tokenID common.Hash) error { - ret := _m.Called(committeeAddress, tokenID) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, common.Hash) error); ok { - r0 = rf(committeeAddress, tokenID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// BackupSerialNumbersLen provides a mock function with given fields: tokenID, shardID -func (_m *DatabaseInterface) BackupSerialNumbersLen(tokenID common.Hash, shardID byte) error { - ret := _m.Called(tokenID, shardID) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, byte) error); ok { - r0 = rf(tokenID, shardID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// BackupShardRewardRequest provides a mock function with given fields: epoch, shardID, tokenID -func (_m *DatabaseInterface) BackupShardRewardRequest(epoch uint64, shardID byte, tokenID common.Hash) error { - ret := _m.Called(epoch, shardID, tokenID) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, byte, common.Hash) error); ok { - r0 = rf(epoch, shardID, tokenID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// CanProcessCIncToken provides a mock function with given fields: incTokenID -func (_m *DatabaseInterface) CanProcessCIncToken(incTokenID common.Hash) (bool, error) { - ret := _m.Called(incTokenID) - - var r0 bool - if rf, ok := ret.Get(0).(func(common.Hash) bool); ok { - r0 = rf(incTokenID) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(incTokenID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CanProcessTokenPair provides a mock function with given fields: externalTokenID, incTokenID -func (_m *DatabaseInterface) CanProcessTokenPair(externalTokenID []byte, incTokenID common.Hash) (bool, error) { - ret := _m.Called(externalTokenID, incTokenID) - - var r0 bool - if rf, ok := ret.Get(0).(func([]byte, common.Hash) bool); ok { - r0 = rf(externalTokenID, incTokenID) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte, common.Hash) error); ok { - r1 = rf(externalTokenID, incTokenID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CleanBackup provides a mock function with given fields: isBeacon, shardID -func (_m *DatabaseInterface) CleanBackup(isBeacon bool, shardID byte) error { - ret := _m.Called(isBeacon, shardID) - - var r0 error - if rf, ok := ret.Get(0).(func(bool, byte) error); ok { - r0 = rf(isBeacon, shardID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// CleanBeaconBestState provides a mock function with given fields: -func (_m *DatabaseInterface) CleanBeaconBestState() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// CleanCommitments provides a mock function with given fields: -func (_m *DatabaseInterface) CleanCommitments() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// CleanFeeEstimator provides a mock function with given fields: -func (_m *DatabaseInterface) CleanFeeEstimator() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// CleanSNDerivator provides a mock function with given fields: -func (_m *DatabaseInterface) CleanSNDerivator() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// CleanSerialNumbers provides a mock function with given fields: -func (_m *DatabaseInterface) CleanSerialNumbers() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// CleanShardBestState provides a mock function with given fields: -func (_m *DatabaseInterface) CleanShardBestState() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Close provides a mock function with given fields: -func (_m *DatabaseInterface) Close() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ContributeToPDE provides a mock function with given fields: beaconHeight, pairID, contributorAddressStr, tokenIDStr, contributedAmount -func (_m *DatabaseInterface) ContributeToPDE(beaconHeight uint64, pairID string, contributorAddressStr string, tokenIDStr string, contributedAmount uint64) error { - ret := _m.Called(beaconHeight, pairID, contributorAddressStr, tokenIDStr, contributedAmount) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, string, string, string, uint64) error); ok { - r0 = rf(beaconHeight, pairID, contributorAddressStr, tokenIDStr, contributedAmount) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeductSharesForWithdrawal provides a mock function with given fields: beaconHeight, token1IDStr, token2IDStr, targetingTokenIDStr, withdrawerAddressStr, amt -func (_m *DatabaseInterface) DeductSharesForWithdrawal(beaconHeight uint64, token1IDStr string, token2IDStr string, targetingTokenIDStr string, withdrawerAddressStr string, amt uint64) error { - ret := _m.Called(beaconHeight, token1IDStr, token2IDStr, targetingTokenIDStr, withdrawerAddressStr, amt) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, string, string, string, string, uint64) error); ok { - r0 = rf(beaconHeight, token1IDStr, token2IDStr, targetingTokenIDStr, withdrawerAddressStr, amt) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeductTradeFee provides a mock function with given fields: beaconHeight, token1IDStr, token2IDStr, tokenIDToBuyStr, amt -func (_m *DatabaseInterface) DeductTradeFee(beaconHeight uint64, token1IDStr string, token2IDStr string, tokenIDToBuyStr string, amt uint64) error { - ret := _m.Called(beaconHeight, token1IDStr, token2IDStr, tokenIDToBuyStr, amt) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, string, string, string, uint64) error); ok { - r0 = rf(beaconHeight, token1IDStr, token2IDStr, tokenIDToBuyStr, amt) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Delete provides a mock function with given fields: key -func (_m *DatabaseInterface) Delete(key []byte) error { - ret := _m.Called(key) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte) error); ok { - r0 = rf(key) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteAcceptedShardToBeacon provides a mock function with given fields: shardID, shardBlkHash -func (_m *DatabaseInterface) DeleteAcceptedShardToBeacon(shardID byte, shardBlkHash common.Hash) error { - ret := _m.Called(shardID, shardBlkHash) - - var r0 error - if rf, ok := ret.Get(0).(func(byte, common.Hash) error); ok { - r0 = rf(shardID, shardBlkHash) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteBeaconBlock provides a mock function with given fields: hash, idx -func (_m *DatabaseInterface) DeleteBeaconBlock(hash common.Hash, idx uint64) error { - ret := _m.Called(hash, idx) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, uint64) error); ok { - r0 = rf(hash, idx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteBlock provides a mock function with given fields: hash, idx, shardID -func (_m *DatabaseInterface) DeleteBlock(hash common.Hash, idx uint64, shardID byte) error { - ret := _m.Called(hash, idx, shardID) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, uint64, byte) error); ok { - r0 = rf(hash, idx, shardID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteCommitteeByHeight provides a mock function with given fields: blkEpoch -func (_m *DatabaseInterface) DeleteCommitteeByHeight(blkEpoch uint64) error { - ret := _m.Called(blkEpoch) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64) error); ok { - r0 = rf(blkEpoch) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteIncomingCrossShard provides a mock function with given fields: shardID, crossShardID, crossBlkHash -func (_m *DatabaseInterface) DeleteIncomingCrossShard(shardID byte, crossShardID byte, crossBlkHash common.Hash) error { - ret := _m.Called(shardID, crossShardID, crossBlkHash) - - var r0 error - if rf, ok := ret.Get(0).(func(byte, byte, common.Hash) error); ok { - r0 = rf(shardID, crossShardID, crossBlkHash) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteOutputCoin provides a mock function with given fields: tokenID, publicKey, outputCoinArr, shardID -func (_m *DatabaseInterface) DeleteOutputCoin(tokenID common.Hash, publicKey []byte, outputCoinArr [][]byte, shardID byte) error { - ret := _m.Called(tokenID, publicKey, outputCoinArr, shardID) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, []byte, [][]byte, byte) error); ok { - r0 = rf(tokenID, publicKey, outputCoinArr, shardID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeletePrivacyToken provides a mock function with given fields: tokenID -func (_m *DatabaseInterface) DeletePrivacyToken(tokenID common.Hash) error { - ret := _m.Called(tokenID) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash) error); ok { - r0 = rf(tokenID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeletePrivacyTokenCrossShard provides a mock function with given fields: tokenID -func (_m *DatabaseInterface) DeletePrivacyTokenCrossShard(tokenID common.Hash) error { - ret := _m.Called(tokenID) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash) error); ok { - r0 = rf(tokenID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeletePrivacyTokenTx provides a mock function with given fields: tokenID, txIndex, shardID, blockHeight -func (_m *DatabaseInterface) DeletePrivacyTokenTx(tokenID common.Hash, txIndex int32, shardID byte, blockHeight uint64) error { - ret := _m.Called(tokenID, txIndex, shardID, blockHeight) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, int32, byte, uint64) error); ok { - r0 = rf(tokenID, txIndex, shardID, blockHeight) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteTransactionIndex provides a mock function with given fields: txId -func (_m *DatabaseInterface) DeleteTransactionIndex(txId common.Hash) error { - ret := _m.Called(txId) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash) error); ok { - r0 = rf(txId) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteWaitingPDEContributionByPairID provides a mock function with given fields: beaconHeight, pairID -func (_m *DatabaseInterface) DeleteWaitingPDEContributionByPairID(beaconHeight uint64, pairID string) error { - ret := _m.Called(beaconHeight, pairID) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, string) error); ok { - r0 = rf(beaconHeight, pairID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// FetchAutoStakingByHeight provides a mock function with given fields: height -func (_m *DatabaseInterface) FetchAutoStakingByHeight(height uint64) ([]byte, error) { - ret := _m.Called(height) - - var r0 []byte - if rf, ok := ret.Get(0).(func(uint64) []byte); ok { - r0 = rf(height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchBeaconBestState provides a mock function with given fields: -func (_m *DatabaseInterface) FetchBeaconBestState() ([]byte, error) { - ret := _m.Called() - - var r0 []byte - if rf, ok := ret.Get(0).(func() []byte); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchBeaconBlock provides a mock function with given fields: hash -func (_m *DatabaseInterface) FetchBeaconBlock(hash common.Hash) ([]byte, error) { - ret := _m.Called(hash) - - var r0 []byte - if rf, ok := ret.Get(0).(func(common.Hash) []byte); ok { - r0 = rf(hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchBeaconCommitteeByHeight provides a mock function with given fields: height -func (_m *DatabaseInterface) FetchBeaconCommitteeByHeight(height uint64) ([]byte, error) { - ret := _m.Called(height) - - var r0 []byte - if rf, ok := ret.Get(0).(func(uint64) []byte); ok { - r0 = rf(height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchBlock provides a mock function with given fields: hash -func (_m *DatabaseInterface) FetchBlock(hash common.Hash) ([]byte, error) { - ret := _m.Called(hash) - - var r0 []byte - if rf, ok := ret.Get(0).(func(common.Hash) []byte); ok { - r0 = rf(hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchCrossShardNextHeight provides a mock function with given fields: fromShard, toShard, curHeight -func (_m *DatabaseInterface) FetchCrossShardNextHeight(fromShard byte, toShard byte, curHeight uint64) (uint64, error) { - ret := _m.Called(fromShard, toShard, curHeight) - - var r0 uint64 - if rf, ok := ret.Get(0).(func(byte, byte, uint64) uint64); ok { - r0 = rf(fromShard, toShard, curHeight) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(byte, byte, uint64) error); ok { - r1 = rf(fromShard, toShard, curHeight) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchPrevBestState provides a mock function with given fields: isBeacon, shardID -func (_m *DatabaseInterface) FetchPrevBestState(isBeacon bool, shardID byte) ([]byte, error) { - ret := _m.Called(isBeacon, shardID) - - var r0 []byte - if rf, ok := ret.Get(0).(func(bool, byte) []byte); ok { - r0 = rf(isBeacon, shardID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(bool, byte) error); ok { - r1 = rf(isBeacon, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchRewardReceiverByHeight provides a mock function with given fields: height -func (_m *DatabaseInterface) FetchRewardReceiverByHeight(height uint64) ([]byte, error) { - ret := _m.Called(height) - - var r0 []byte - if rf, ok := ret.Get(0).(func(uint64) []byte); ok { - r0 = rf(height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchShardBestState provides a mock function with given fields: shardID -func (_m *DatabaseInterface) FetchShardBestState(shardID byte) ([]byte, error) { - ret := _m.Called(shardID) - - var r0 []byte - if rf, ok := ret.Get(0).(func(byte) []byte); ok { - r0 = rf(shardID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(byte) error); ok { - r1 = rf(shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchShardCommitteeByHeight provides a mock function with given fields: height -func (_m *DatabaseInterface) FetchShardCommitteeByHeight(height uint64) ([]byte, error) { - ret := _m.Called(height) - - var r0 []byte - if rf, ok := ret.Get(0).(func(uint64) []byte); ok { - r0 = rf(height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Get provides a mock function with given fields: key -func (_m *DatabaseInterface) Get(key []byte) ([]byte, error) { - ret := _m.Called(key) - - var r0 []byte - if rf, ok := ret.Get(0).(func([]byte) []byte); ok { - r0 = rf(key) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(key) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetAcceptedShardToBeacon provides a mock function with given fields: shardID, shardBlkHash -func (_m *DatabaseInterface) GetAcceptedShardToBeacon(shardID byte, shardBlkHash common.Hash) (uint64, error) { - ret := _m.Called(shardID, shardBlkHash) - - var r0 uint64 - if rf, ok := ret.Get(0).(func(byte, common.Hash) uint64); ok { - r0 = rf(shardID, shardBlkHash) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(byte, common.Hash) error); ok { - r1 = rf(shardID, shardBlkHash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetAllBridgeTokens provides a mock function with given fields: -func (_m *DatabaseInterface) GetAllBridgeTokens() ([]byte, error) { - ret := _m.Called() - - var r0 []byte - if rf, ok := ret.Get(0).(func() []byte); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetAllRecordsByPrefix provides a mock function with given fields: beaconHeight, prefix -func (_m *DatabaseInterface) GetAllRecordsByPrefix(beaconHeight uint64, prefix []byte) ([][]byte, [][]byte, error) { - ret := _m.Called(beaconHeight, prefix) - - var r0 [][]byte - if rf, ok := ret.Get(0).(func(uint64, []byte) [][]byte); ok { - r0 = rf(beaconHeight, prefix) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([][]byte) - } - } - - var r1 [][]byte - if rf, ok := ret.Get(1).(func(uint64, []byte) [][]byte); ok { - r1 = rf(beaconHeight, prefix) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).([][]byte) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func(uint64, []byte) error); ok { - r2 = rf(beaconHeight, prefix) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// GetAllTokenIDForReward provides a mock function with given fields: epoch -func (_m *DatabaseInterface) GetAllTokenIDForReward(epoch uint64) ([]common.Hash, error) { - ret := _m.Called(epoch) - - var r0 []common.Hash - if rf, ok := ret.Get(0).(func(uint64) []common.Hash); ok { - r0 = rf(epoch) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]common.Hash) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(epoch) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetBeaconBlockHashByIndex provides a mock function with given fields: idx -func (_m *DatabaseInterface) GetBeaconBlockHashByIndex(idx uint64) (common.Hash, error) { - ret := _m.Called(idx) - - var r0 common.Hash - if rf, ok := ret.Get(0).(func(uint64) common.Hash); ok { - r0 = rf(idx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.Hash) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(idx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetBlockByIndex provides a mock function with given fields: idx, shardID -func (_m *DatabaseInterface) GetBlockByIndex(idx uint64, shardID byte) (common.Hash, error) { - ret := _m.Called(idx, shardID) - - var r0 common.Hash - if rf, ok := ret.Get(0).(func(uint64, byte) common.Hash); ok { - r0 = rf(idx, shardID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.Hash) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64, byte) error); ok { - r1 = rf(idx, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetBridgeReqWithStatus provides a mock function with given fields: txReqID -func (_m *DatabaseInterface) GetBridgeReqWithStatus(txReqID common.Hash) (byte, error) { - ret := _m.Called(txReqID) - - var r0 byte - if rf, ok := ret.Get(0).(func(common.Hash) byte); ok { - r0 = rf(txReqID) - } else { - r0 = ret.Get(0).(byte) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(txReqID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetBurningConfirm provides a mock function with given fields: txID -func (_m *DatabaseInterface) GetBurningConfirm(txID common.Hash) (uint64, error) { - ret := _m.Called(txID) - - var r0 uint64 - if rf, ok := ret.Get(0).(func(common.Hash) uint64); ok { - r0 = rf(txID) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(txID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetCommitmentByIndex provides a mock function with given fields: tokenID, commitmentIndex, shardID -func (_m *DatabaseInterface) GetCommitmentByIndex(tokenID common.Hash, commitmentIndex uint64, shardID byte) ([]byte, error) { - ret := _m.Called(tokenID, commitmentIndex, shardID) - - var r0 []byte - if rf, ok := ret.Get(0).(func(common.Hash, uint64, byte) []byte); ok { - r0 = rf(tokenID, commitmentIndex, shardID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, uint64, byte) error); ok { - r1 = rf(tokenID, commitmentIndex, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetCommitmentIndex provides a mock function with given fields: tokenID, commitment, shardID -func (_m *DatabaseInterface) GetCommitmentIndex(tokenID common.Hash, commitment []byte, shardID byte) (*big.Int, error) { - ret := _m.Called(tokenID, commitment, shardID) - - var r0 *big.Int - if rf, ok := ret.Get(0).(func(common.Hash, []byte, byte) *big.Int); ok { - r0 = rf(tokenID, commitment, shardID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, []byte, byte) error); ok { - r1 = rf(tokenID, commitment, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetCommitmentLength provides a mock function with given fields: tokenID, shardID -func (_m *DatabaseInterface) GetCommitmentLength(tokenID common.Hash, shardID byte) (*big.Int, error) { - ret := _m.Called(tokenID, shardID) - - var r0 *big.Int - if rf, ok := ret.Get(0).(func(common.Hash, byte) *big.Int); ok { - r0 = rf(tokenID, shardID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, byte) error); ok { - r1 = rf(tokenID, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetCommitteeReward provides a mock function with given fields: committeeAddress, tokenID -func (_m *DatabaseInterface) GetCommitteeReward(committeeAddress []byte, tokenID common.Hash) (uint64, error) { - ret := _m.Called(committeeAddress, tokenID) - - var r0 uint64 - if rf, ok := ret.Get(0).(func([]byte, common.Hash) uint64); ok { - r0 = rf(committeeAddress, tokenID) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte, common.Hash) error); ok { - r1 = rf(committeeAddress, tokenID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetFeeEstimator provides a mock function with given fields: shardID -func (_m *DatabaseInterface) GetFeeEstimator(shardID byte) ([]byte, error) { - ret := _m.Called(shardID) - - var r0 []byte - if rf, ok := ret.Get(0).(func(byte) []byte); ok { - r0 = rf(shardID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(byte) error); ok { - r1 = rf(shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetIncomingCrossShard provides a mock function with given fields: shardID, crossShardID, crossBlkHash -func (_m *DatabaseInterface) GetIncomingCrossShard(shardID byte, crossShardID byte, crossBlkHash common.Hash) (uint64, error) { - ret := _m.Called(shardID, crossShardID, crossBlkHash) - - var r0 uint64 - if rf, ok := ret.Get(0).(func(byte, byte, common.Hash) uint64); ok { - r0 = rf(shardID, crossShardID, crossBlkHash) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(byte, byte, common.Hash) error); ok { - r1 = rf(shardID, crossShardID, crossBlkHash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetIndexOfBeaconBlock provides a mock function with given fields: hash -func (_m *DatabaseInterface) GetIndexOfBeaconBlock(hash common.Hash) (uint64, error) { - ret := _m.Called(hash) - - var r0 uint64 - if rf, ok := ret.Get(0).(func(common.Hash) uint64); ok { - r0 = rf(hash) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetIndexOfBlock provides a mock function with given fields: hash -func (_m *DatabaseInterface) GetIndexOfBlock(hash common.Hash) (uint64, byte, error) { - ret := _m.Called(hash) - - var r0 uint64 - if rf, ok := ret.Get(0).(func(common.Hash) uint64); ok { - r0 = rf(hash) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 byte - if rf, ok := ret.Get(1).(func(common.Hash) byte); ok { - r1 = rf(hash) - } else { - r1 = ret.Get(1).(byte) - } - - var r2 error - if rf, ok := ret.Get(2).(func(common.Hash) error); ok { - r2 = rf(hash) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// GetLatestPDEPoolForPair provides a mock function with given fields: tokenIDToBuyStr, tokenIDToSellStr -func (_m *DatabaseInterface) GetLatestPDEPoolForPair(tokenIDToBuyStr string, tokenIDToSellStr string) ([]byte, error) { - ret := _m.Called(tokenIDToBuyStr, tokenIDToSellStr) - - var r0 []byte - if rf, ok := ret.Get(0).(func(string, string) []byte); ok { - r0 = rf(tokenIDToBuyStr, tokenIDToSellStr) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(tokenIDToBuyStr, tokenIDToSellStr) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetOutcoinsByPubkey provides a mock function with given fields: tokenID, pubkey, shardID -func (_m *DatabaseInterface) GetOutcoinsByPubkey(tokenID common.Hash, pubkey []byte, shardID byte) ([][]byte, error) { - ret := _m.Called(tokenID, pubkey, shardID) - - var r0 [][]byte - if rf, ok := ret.Get(0).(func(common.Hash, []byte, byte) [][]byte); ok { - r0 = rf(tokenID, pubkey, shardID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([][]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, []byte, byte) error); ok { - r1 = rf(tokenID, pubkey, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetPDEContributionStatus provides a mock function with given fields: prefix, suffix -func (_m *DatabaseInterface) GetPDEContributionStatus(prefix []byte, suffix []byte) ([]byte, error) { - ret := _m.Called(prefix, suffix) - - var r0 []byte - if rf, ok := ret.Get(0).(func([]byte, []byte) []byte); ok { - r0 = rf(prefix, suffix) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte, []byte) error); ok { - r1 = rf(prefix, suffix) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetPDEPoolForPair provides a mock function with given fields: beaconHeight, tokenIDToBuyStr, tokenIDToSellStr -func (_m *DatabaseInterface) GetPDEPoolForPair(beaconHeight uint64, tokenIDToBuyStr string, tokenIDToSellStr string) ([]byte, error) { - ret := _m.Called(beaconHeight, tokenIDToBuyStr, tokenIDToSellStr) - - var r0 []byte - if rf, ok := ret.Get(0).(func(uint64, string, string) []byte); ok { - r0 = rf(beaconHeight, tokenIDToBuyStr, tokenIDToSellStr) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64, string, string) error); ok { - r1 = rf(beaconHeight, tokenIDToBuyStr, tokenIDToSellStr) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetPDEStatus provides a mock function with given fields: prefix, suffix -func (_m *DatabaseInterface) GetPDEStatus(prefix []byte, suffix []byte) (byte, error) { - ret := _m.Called(prefix, suffix) - - var r0 byte - if rf, ok := ret.Get(0).(func([]byte, []byte) byte); ok { - r0 = rf(prefix, suffix) - } else { - r0 = ret.Get(0).(byte) - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte, []byte) error); ok { - r1 = rf(prefix, suffix) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetProducersBlackList provides a mock function with given fields: beaconHeight -func (_m *DatabaseInterface) GetProducersBlackList(beaconHeight uint64) (map[string]uint8, error) { - ret := _m.Called(beaconHeight) - - var r0 map[string]uint8 - if rf, ok := ret.Get(0).(func(uint64) map[string]uint8); ok { - r0 = rf(beaconHeight) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]uint8) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(beaconHeight) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetRewardOfShardByEpoch provides a mock function with given fields: epoch, shardID, tokenID -func (_m *DatabaseInterface) GetRewardOfShardByEpoch(epoch uint64, shardID byte, tokenID common.Hash) (uint64, error) { - ret := _m.Called(epoch, shardID, tokenID) - - var r0 uint64 - if rf, ok := ret.Get(0).(func(uint64, byte, common.Hash) uint64); ok { - r0 = rf(epoch, shardID, tokenID) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64, byte, common.Hash) error); ok { - r1 = rf(epoch, shardID, tokenID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetSharesOfContributorForTokenIDOnAPair provides a mock function with given fields: token1IDStr, token2IDStr, contributedTokenIDStr, contributorAddrStr -func (_m *DatabaseInterface) GetSharesOfContributorForTokenIDOnAPair(token1IDStr string, token2IDStr string, contributedTokenIDStr string, contributorAddrStr string) (uint64, error) { - ret := _m.Called(token1IDStr, token2IDStr, contributedTokenIDStr, contributorAddrStr) - - var r0 uint64 - if rf, ok := ret.Get(0).(func(string, string, string, string) uint64); ok { - r0 = rf(token1IDStr, token2IDStr, contributedTokenIDStr, contributorAddrStr) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, string, string, string) error); ok { - r1 = rf(token1IDStr, token2IDStr, contributedTokenIDStr, contributorAddrStr) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetTotalSharesForTokenIDOnAPair provides a mock function with given fields: token1IDStr, token2IDStr, contributedTokenIDStr -func (_m *DatabaseInterface) GetTotalSharesForTokenIDOnAPair(token1IDStr string, token2IDStr string, contributedTokenIDStr string) (uint64, error) { - ret := _m.Called(token1IDStr, token2IDStr, contributedTokenIDStr) - - var r0 uint64 - if rf, ok := ret.Get(0).(func(string, string, string) uint64); ok { - r0 = rf(token1IDStr, token2IDStr, contributedTokenIDStr) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, string, string) error); ok { - r1 = rf(token1IDStr, token2IDStr, contributedTokenIDStr) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetTransactionIndexById provides a mock function with given fields: txId -func (_m *DatabaseInterface) GetTransactionIndexById(txId common.Hash) (common.Hash, int, error) { - ret := _m.Called(txId) - - var r0 common.Hash - if rf, ok := ret.Get(0).(func(common.Hash) common.Hash); ok { - r0 = rf(txId) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.Hash) - } - } - - var r1 int - if rf, ok := ret.Get(1).(func(common.Hash) int); ok { - r1 = rf(txId) - } else { - r1 = ret.Get(1).(int) - } - - var r2 error - if rf, ok := ret.Get(2).(func(common.Hash) error); ok { - r2 = rf(txId) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// GetTxByPublicKey provides a mock function with given fields: publicKey -func (_m *DatabaseInterface) GetTxByPublicKey(publicKey []byte) (map[byte][]common.Hash, error) { - ret := _m.Called(publicKey) - - var r0 map[byte][]common.Hash - if rf, ok := ret.Get(0).(func([]byte) map[byte][]common.Hash); ok { - r0 = rf(publicKey) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[byte][]common.Hash) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(publicKey) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HasAcceptedShardToBeacon provides a mock function with given fields: shardID, shardBlkHash -func (_m *DatabaseInterface) HasAcceptedShardToBeacon(shardID byte, shardBlkHash common.Hash) error { - ret := _m.Called(shardID, shardBlkHash) - - var r0 error - if rf, ok := ret.Get(0).(func(byte, common.Hash) error); ok { - r0 = rf(shardID, shardBlkHash) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// HasBeaconBlock provides a mock function with given fields: hash -func (_m *DatabaseInterface) HasBeaconBlock(hash common.Hash) (bool, error) { - ret := _m.Called(hash) - - var r0 bool - if rf, ok := ret.Get(0).(func(common.Hash) bool); ok { - r0 = rf(hash) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HasBlock provides a mock function with given fields: hash -func (_m *DatabaseInterface) HasBlock(hash common.Hash) (bool, error) { - ret := _m.Called(hash) - - var r0 bool - if rf, ok := ret.Get(0).(func(common.Hash) bool); ok { - r0 = rf(hash) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HasCommitment provides a mock function with given fields: tokenID, commitment, shardID -func (_m *DatabaseInterface) HasCommitment(tokenID common.Hash, commitment []byte, shardID byte) (bool, error) { - ret := _m.Called(tokenID, commitment, shardID) - - var r0 bool - if rf, ok := ret.Get(0).(func(common.Hash, []byte, byte) bool); ok { - r0 = rf(tokenID, commitment, shardID) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, []byte, byte) error); ok { - r1 = rf(tokenID, commitment, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HasCommitmentIndex provides a mock function with given fields: tokenID, commitmentIndex, shardID -func (_m *DatabaseInterface) HasCommitmentIndex(tokenID common.Hash, commitmentIndex uint64, shardID byte) (bool, error) { - ret := _m.Called(tokenID, commitmentIndex, shardID) - - var r0 bool - if rf, ok := ret.Get(0).(func(common.Hash, uint64, byte) bool); ok { - r0 = rf(tokenID, commitmentIndex, shardID) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, uint64, byte) error); ok { - r1 = rf(tokenID, commitmentIndex, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HasIncomingCrossShard provides a mock function with given fields: shardID, crossShardID, crossBlkHash -func (_m *DatabaseInterface) HasIncomingCrossShard(shardID byte, crossShardID byte, crossBlkHash common.Hash) error { - ret := _m.Called(shardID, crossShardID, crossBlkHash) - - var r0 error - if rf, ok := ret.Get(0).(func(byte, byte, common.Hash) error); ok { - r0 = rf(shardID, crossShardID, crossBlkHash) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// HasSNDerivator provides a mock function with given fields: tokenID, data -func (_m *DatabaseInterface) HasSNDerivator(tokenID common.Hash, data []byte) (bool, error) { - ret := _m.Called(tokenID, data) - - var r0 bool - if rf, ok := ret.Get(0).(func(common.Hash, []byte) bool); ok { - r0 = rf(tokenID, data) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, []byte) error); ok { - r1 = rf(tokenID, data) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HasSerialNumber provides a mock function with given fields: tokenID, data, shardID -func (_m *DatabaseInterface) HasSerialNumber(tokenID common.Hash, data []byte, shardID byte) (bool, error) { - ret := _m.Called(tokenID, data, shardID) - - var r0 bool - if rf, ok := ret.Get(0).(func(common.Hash, []byte, byte) bool); ok { - r0 = rf(tokenID, data, shardID) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, []byte, byte) error); ok { - r1 = rf(tokenID, data, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HasShardCommitteeByHeight provides a mock function with given fields: height -func (_m *DatabaseInterface) HasShardCommitteeByHeight(height uint64) (bool, error) { - ret := _m.Called(height) - - var r0 bool - if rf, ok := ret.Get(0).(func(uint64) bool); ok { - r0 = rf(height) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HasValue provides a mock function with given fields: key -func (_m *DatabaseInterface) HasValue(key []byte) (bool, error) { - ret := _m.Called(key) - - var r0 bool - if rf, ok := ret.Get(0).(func([]byte) bool); ok { - r0 = rf(key) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(key) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// InsertETHTxHashIssued provides a mock function with given fields: uniqETHTx -func (_m *DatabaseInterface) InsertETHTxHashIssued(uniqETHTx []byte) error { - ret := _m.Called(uniqETHTx) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte) error); ok { - r0 = rf(uniqETHTx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// IsBridgeTokenExistedByType provides a mock function with given fields: incTokenID, isCentralized -func (_m *DatabaseInterface) IsBridgeTokenExistedByType(incTokenID common.Hash, isCentralized bool) (bool, error) { - ret := _m.Called(incTokenID, isCentralized) - - var r0 bool - if rf, ok := ret.Get(0).(func(common.Hash, bool) bool); ok { - r0 = rf(incTokenID, isCentralized) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, bool) error); ok { - r1 = rf(incTokenID, isCentralized) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IsETHTxHashIssued provides a mock function with given fields: uniqETHTx -func (_m *DatabaseInterface) IsETHTxHashIssued(uniqETHTx []byte) (bool, error) { - ret := _m.Called(uniqETHTx) - - var r0 bool - if rf, ok := ret.Get(0).(func([]byte) bool); ok { - r0 = rf(uniqETHTx) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(uniqETHTx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListCommitment provides a mock function with given fields: tokenID, shardID -func (_m *DatabaseInterface) ListCommitment(tokenID common.Hash, shardID byte) (map[string]uint64, error) { - ret := _m.Called(tokenID, shardID) - - var r0 map[string]uint64 - if rf, ok := ret.Get(0).(func(common.Hash, byte) map[string]uint64); ok { - r0 = rf(tokenID, shardID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]uint64) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, byte) error); ok { - r1 = rf(tokenID, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListCommitmentIndices provides a mock function with given fields: tokenID, shardID -func (_m *DatabaseInterface) ListCommitmentIndices(tokenID common.Hash, shardID byte) (map[uint64]string, error) { - ret := _m.Called(tokenID, shardID) - - var r0 map[uint64]string - if rf, ok := ret.Get(0).(func(common.Hash, byte) map[uint64]string); ok { - r0 = rf(tokenID, shardID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[uint64]string) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, byte) error); ok { - r1 = rf(tokenID, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListCommitteeReward provides a mock function with given fields: -func (_m *DatabaseInterface) ListCommitteeReward() map[string]map[common.Hash]uint64 { - ret := _m.Called() - - var r0 map[string]map[common.Hash]uint64 - if rf, ok := ret.Get(0).(func() map[string]map[common.Hash]uint64); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]map[common.Hash]uint64) - } - } - - return r0 -} - -// ListPrivacyToken provides a mock function with given fields: -func (_m *DatabaseInterface) ListPrivacyToken() ([][]byte, error) { - ret := _m.Called() - - var r0 [][]byte - if rf, ok := ret.Get(0).(func() [][]byte); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([][]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListPrivacyTokenCrossShard provides a mock function with given fields: -func (_m *DatabaseInterface) ListPrivacyTokenCrossShard() ([][]byte, error) { - ret := _m.Called() - - var r0 [][]byte - if rf, ok := ret.Get(0).(func() [][]byte); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([][]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListSNDerivator provides a mock function with given fields: tokenID -func (_m *DatabaseInterface) ListSNDerivator(tokenID common.Hash) ([][]byte, error) { - ret := _m.Called(tokenID) - - var r0 [][]byte - if rf, ok := ret.Get(0).(func(common.Hash) [][]byte); ok { - r0 = rf(tokenID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([][]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(tokenID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListSerialNumber provides a mock function with given fields: tokenID, shardID -func (_m *DatabaseInterface) ListSerialNumber(tokenID common.Hash, shardID byte) (map[string]uint64, error) { - ret := _m.Called(tokenID, shardID) - - var r0 map[string]uint64 - if rf, ok := ret.Get(0).(func(common.Hash, byte) map[string]uint64); ok { - r0 = rf(tokenID, shardID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]uint64) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash, byte) error); ok { - r1 = rf(tokenID, shardID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PrivacyTokenIDCrossShardExisted provides a mock function with given fields: tokenID -func (_m *DatabaseInterface) PrivacyTokenIDCrossShardExisted(tokenID common.Hash) bool { - ret := _m.Called(tokenID) - - var r0 bool - if rf, ok := ret.Get(0).(func(common.Hash) bool); ok { - r0 = rf(tokenID) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// PrivacyTokenIDExisted provides a mock function with given fields: tokenID -func (_m *DatabaseInterface) PrivacyTokenIDExisted(tokenID common.Hash) bool { - ret := _m.Called(tokenID) - - var r0 bool - if rf, ok := ret.Get(0).(func(common.Hash) bool); ok { - r0 = rf(tokenID) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// PrivacyTokenTxs provides a mock function with given fields: tokenID -func (_m *DatabaseInterface) PrivacyTokenTxs(tokenID common.Hash) ([]common.Hash, error) { - ret := _m.Called(tokenID) - - var r0 []common.Hash - if rf, ok := ret.Get(0).(func(common.Hash) []common.Hash); ok { - r0 = rf(tokenID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]common.Hash) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(tokenID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Put provides a mock function with given fields: key, value -func (_m *DatabaseInterface) Put(key []byte, value []byte) error { - ret := _m.Called(key, value) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, []byte) error); ok { - r0 = rf(key, value) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// PutBatch provides a mock function with given fields: data -func (_m *DatabaseInterface) PutBatch(data []incdb.BatchData) error { - ret := _m.Called(data) - - var r0 error - if rf, ok := ret.Get(0).(func([]incdb.BatchData) error); ok { - r0 = rf(data) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// RemoveCommitteeReward provides a mock function with given fields: committeeAddress, amount, tokenID, bd -func (_m *DatabaseInterface) RemoveCommitteeReward(committeeAddress []byte, amount uint64, tokenID common.Hash, bd *[]database.BatchData) error { - ret := _m.Called(committeeAddress, amount, tokenID, bd) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, uint64, common.Hash, *[]database.BatchData) error); ok { - r0 = rf(committeeAddress, amount, tokenID, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// RestoreBridgedTokenByTokenID provides a mock function with given fields: tokenID -func (_m *DatabaseInterface) RestoreBridgedTokenByTokenID(tokenID common.Hash) error { - ret := _m.Called(tokenID) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash) error); ok { - r0 = rf(tokenID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// RestoreCommitmentsOfPubkey provides a mock function with given fields: tokenID, shardID, pubkey, commitments -func (_m *DatabaseInterface) RestoreCommitmentsOfPubkey(tokenID common.Hash, shardID byte, pubkey []byte, commitments [][]byte) error { - ret := _m.Called(tokenID, shardID, pubkey, commitments) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, byte, []byte, [][]byte) error); ok { - r0 = rf(tokenID, shardID, pubkey, commitments) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// RestoreCommitteeReward provides a mock function with given fields: committeeAddress, tokenID -func (_m *DatabaseInterface) RestoreCommitteeReward(committeeAddress []byte, tokenID common.Hash) error { - ret := _m.Called(committeeAddress, tokenID) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, common.Hash) error); ok { - r0 = rf(committeeAddress, tokenID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// RestoreCrossShardNextHeights provides a mock function with given fields: fromShard, toShard, curHeight -func (_m *DatabaseInterface) RestoreCrossShardNextHeights(fromShard byte, toShard byte, curHeight uint64) error { - ret := _m.Called(fromShard, toShard, curHeight) - - var r0 error - if rf, ok := ret.Get(0).(func(byte, byte, uint64) error); ok { - r0 = rf(fromShard, toShard, curHeight) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// RestoreSerialNumber provides a mock function with given fields: tokenID, shardID, serialNumbers -func (_m *DatabaseInterface) RestoreSerialNumber(tokenID common.Hash, shardID byte, serialNumbers [][]byte) error { - ret := _m.Called(tokenID, shardID, serialNumbers) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, byte, [][]byte) error); ok { - r0 = rf(tokenID, shardID, serialNumbers) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// RestoreShardRewardRequest provides a mock function with given fields: epoch, shardID, tokenID -func (_m *DatabaseInterface) RestoreShardRewardRequest(epoch uint64, shardID byte, tokenID common.Hash) error { - ret := _m.Called(epoch, shardID, tokenID) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, byte, common.Hash) error); ok { - r0 = rf(epoch, shardID, tokenID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreAcceptedShardToBeacon provides a mock function with given fields: shardID, blkHeight, shardBlkHash -func (_m *DatabaseInterface) StoreAcceptedShardToBeacon(shardID byte, blkHeight uint64, shardBlkHash common.Hash) error { - ret := _m.Called(shardID, blkHeight, shardBlkHash) - - var r0 error - if rf, ok := ret.Get(0).(func(byte, uint64, common.Hash) error); ok { - r0 = rf(shardID, blkHeight, shardBlkHash) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreAutoStakingByHeight provides a mock function with given fields: height, v -func (_m *DatabaseInterface) StoreAutoStakingByHeight(height uint64, v interface{}) error { - ret := _m.Called(height, v) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, interface{}) error); ok { - r0 = rf(height, v) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreBeaconBestState provides a mock function with given fields: v, bd -func (_m *DatabaseInterface) StoreBeaconBestState(v interface{}, bd *[]database.BatchData) error { - ret := _m.Called(v, bd) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}, *[]database.BatchData) error); ok { - r0 = rf(v, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreBeaconBlock provides a mock function with given fields: v, hash, bd -func (_m *DatabaseInterface) StoreBeaconBlock(v interface{}, hash common.Hash, bd *[]database.BatchData) error { - ret := _m.Called(v, hash, bd) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}, common.Hash, *[]database.BatchData) error); ok { - r0 = rf(v, hash, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreBeaconBlockIndex provides a mock function with given fields: hash, idx -func (_m *DatabaseInterface) StoreBeaconBlockIndex(hash common.Hash, idx uint64) error { - ret := _m.Called(hash, idx) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, uint64) error); ok { - r0 = rf(hash, idx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreBeaconCommitteeByHeight provides a mock function with given fields: height, v -func (_m *DatabaseInterface) StoreBeaconCommitteeByHeight(height uint64, v interface{}) error { - ret := _m.Called(height, v) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, interface{}) error); ok { - r0 = rf(height, v) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreBurningConfirm provides a mock function with given fields: txID, height, bd -func (_m *DatabaseInterface) StoreBurningConfirm(txID common.Hash, height uint64, bd *[]database.BatchData) error { - ret := _m.Called(txID, height, bd) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, uint64, *[]database.BatchData) error); ok { - r0 = rf(txID, height, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreCommitments provides a mock function with given fields: tokenID, pubkey, commitment, shardID -func (_m *DatabaseInterface) StoreCommitments(tokenID common.Hash, pubkey []byte, commitment [][]byte, shardID byte) error { - ret := _m.Called(tokenID, pubkey, commitment, shardID) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, []byte, [][]byte, byte) error); ok { - r0 = rf(tokenID, pubkey, commitment, shardID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreCrossShardNextHeight provides a mock function with given fields: fromShard, toShard, curHeight, nextHeight -func (_m *DatabaseInterface) StoreCrossShardNextHeight(fromShard byte, toShard byte, curHeight uint64, nextHeight uint64) error { - ret := _m.Called(fromShard, toShard, curHeight, nextHeight) - - var r0 error - if rf, ok := ret.Get(0).(func(byte, byte, uint64, uint64) error); ok { - r0 = rf(fromShard, toShard, curHeight, nextHeight) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreFeeEstimator provides a mock function with given fields: val, shardID -func (_m *DatabaseInterface) StoreFeeEstimator(val []byte, shardID byte) error { - ret := _m.Called(val, shardID) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, byte) error); ok { - r0 = rf(val, shardID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreIncomingCrossShard provides a mock function with given fields: shardID, crossShardID, blkHeight, crossBlkHash, bd -func (_m *DatabaseInterface) StoreIncomingCrossShard(shardID byte, crossShardID byte, blkHeight uint64, crossBlkHash common.Hash, bd *[]database.BatchData) error { - ret := _m.Called(shardID, crossShardID, blkHeight, crossBlkHash, bd) - - var r0 error - if rf, ok := ret.Get(0).(func(byte, byte, uint64, common.Hash, *[]database.BatchData) error); ok { - r0 = rf(shardID, crossShardID, blkHeight, crossBlkHash, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreOutputCoins provides a mock function with given fields: tokenID, publicKey, outputCoinArr, shardID -func (_m *DatabaseInterface) StoreOutputCoins(tokenID common.Hash, publicKey []byte, outputCoinArr [][]byte, shardID byte) error { - ret := _m.Called(tokenID, publicKey, outputCoinArr, shardID) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, []byte, [][]byte, byte) error); ok { - r0 = rf(tokenID, publicKey, outputCoinArr, shardID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StorePrevBestState provides a mock function with given fields: val, isBeacon, shardID -func (_m *DatabaseInterface) StorePrevBestState(val []byte, isBeacon bool, shardID byte) error { - ret := _m.Called(val, isBeacon, shardID) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, bool, byte) error); ok { - r0 = rf(val, isBeacon, shardID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StorePrivacyToken provides a mock function with given fields: tokenID, data -func (_m *DatabaseInterface) StorePrivacyToken(tokenID common.Hash, data []byte) error { - ret := _m.Called(tokenID, data) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, []byte) error); ok { - r0 = rf(tokenID, data) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StorePrivacyTokenCrossShard provides a mock function with given fields: tokenID, tokenValue -func (_m *DatabaseInterface) StorePrivacyTokenCrossShard(tokenID common.Hash, tokenValue []byte) error { - ret := _m.Called(tokenID, tokenValue) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, []byte) error); ok { - r0 = rf(tokenID, tokenValue) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StorePrivacyTokenTx provides a mock function with given fields: tokenID, shardID, blockHeight, txIndex, txHash -func (_m *DatabaseInterface) StorePrivacyTokenTx(tokenID common.Hash, shardID byte, blockHeight uint64, txIndex int32, txHash []byte) error { - ret := _m.Called(tokenID, shardID, blockHeight, txIndex, txHash) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, byte, uint64, int32, []byte) error); ok { - r0 = rf(tokenID, shardID, blockHeight, txIndex, txHash) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreProducersBlackList provides a mock function with given fields: beaconHeight, producersBlackList -func (_m *DatabaseInterface) StoreProducersBlackList(beaconHeight uint64, producersBlackList map[string]uint8) error { - ret := _m.Called(beaconHeight, producersBlackList) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, map[string]uint8) error); ok { - r0 = rf(beaconHeight, producersBlackList) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreRewardReceiverByHeight provides a mock function with given fields: height, v -func (_m *DatabaseInterface) StoreRewardReceiverByHeight(height uint64, v interface{}) error { - ret := _m.Called(height, v) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, interface{}) error); ok { - r0 = rf(height, v) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreSNDerivators provides a mock function with given fields: tokenID, sndArray -func (_m *DatabaseInterface) StoreSNDerivators(tokenID common.Hash, sndArray [][]byte) error { - ret := _m.Called(tokenID, sndArray) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, [][]byte) error); ok { - r0 = rf(tokenID, sndArray) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreSerialNumbers provides a mock function with given fields: tokenID, serialNumber, shardID -func (_m *DatabaseInterface) StoreSerialNumbers(tokenID common.Hash, serialNumber [][]byte, shardID byte) error { - ret := _m.Called(tokenID, serialNumber, shardID) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, [][]byte, byte) error); ok { - r0 = rf(tokenID, serialNumber, shardID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreShardBestState provides a mock function with given fields: v, shardID, bd -func (_m *DatabaseInterface) StoreShardBestState(v interface{}, shardID byte, bd *[]database.BatchData) error { - ret := _m.Called(v, shardID, bd) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}, byte, *[]database.BatchData) error); ok { - r0 = rf(v, shardID, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreShardBlock provides a mock function with given fields: v, hash, shardID, bd -func (_m *DatabaseInterface) StoreShardBlock(v interface{}, hash common.Hash, shardID byte, bd *[]database.BatchData) error { - ret := _m.Called(v, hash, shardID, bd) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}, common.Hash, byte, *[]database.BatchData) error); ok { - r0 = rf(v, hash, shardID, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreShardBlockIndex provides a mock function with given fields: hash, idx, shardID, bd -func (_m *DatabaseInterface) StoreShardBlockIndex(hash common.Hash, idx uint64, shardID byte, bd *[]database.BatchData) error { - ret := _m.Called(hash, idx, shardID, bd) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, uint64, byte, *[]database.BatchData) error); ok { - r0 = rf(hash, idx, shardID, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreShardCommitteeByHeight provides a mock function with given fields: height, v -func (_m *DatabaseInterface) StoreShardCommitteeByHeight(height uint64, v interface{}) error { - ret := _m.Called(height, v) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, interface{}) error); ok { - r0 = rf(height, v) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreTransactionIndex provides a mock function with given fields: txId, blockHash, indexInBlock, bd -func (_m *DatabaseInterface) StoreTransactionIndex(txId common.Hash, blockHash common.Hash, indexInBlock int, bd *[]database.BatchData) error { - ret := _m.Called(txId, blockHash, indexInBlock, bd) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Hash, int, *[]database.BatchData) error); ok { - r0 = rf(txId, blockHash, indexInBlock, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreTxByPublicKey provides a mock function with given fields: publicKey, txID, shardID -func (_m *DatabaseInterface) StoreTxByPublicKey(publicKey []byte, txID common.Hash, shardID byte) error { - ret := _m.Called(publicKey, txID, shardID) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, common.Hash, byte) error); ok { - r0 = rf(publicKey, txID, shardID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// TrackBridgeReqWithStatus provides a mock function with given fields: txReqID, status, bd -func (_m *DatabaseInterface) TrackBridgeReqWithStatus(txReqID common.Hash, status byte, bd *[]database.BatchData) error { - ret := _m.Called(txReqID, status, bd) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, byte, *[]database.BatchData) error); ok { - r0 = rf(txReqID, status, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// TrackPDEContributionStatus provides a mock function with given fields: prefix, suffix, statusContent -func (_m *DatabaseInterface) TrackPDEContributionStatus(prefix []byte, suffix []byte, statusContent []byte) error { - ret := _m.Called(prefix, suffix, statusContent) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, []byte, []byte) error); ok { - r0 = rf(prefix, suffix, statusContent) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// TrackPDEStatus provides a mock function with given fields: prefix, suffix, status -func (_m *DatabaseInterface) TrackPDEStatus(prefix []byte, suffix []byte, status byte) error { - ret := _m.Called(prefix, suffix, status) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, []byte, byte) error); ok { - r0 = rf(prefix, suffix, status) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateBridgeTokenInfo provides a mock function with given fields: incTokenID, externalTokenID, isCentralized, updatingAmt, updateType, bd -func (_m *DatabaseInterface) UpdateBridgeTokenInfo(incTokenID common.Hash, externalTokenID []byte, isCentralized bool, updatingAmt uint64, updateType string, bd *[]database.BatchData) error { - ret := _m.Called(incTokenID, externalTokenID, isCentralized, updatingAmt, updateType, bd) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, []byte, bool, uint64, string, *[]database.BatchData) error); ok { - r0 = rf(incTokenID, externalTokenID, isCentralized, updatingAmt, updateType, bd) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdatePDEPoolForPair provides a mock function with given fields: beaconHeight, token1IDStr, token2IDStr, pdePoolForPairBytes -func (_m *DatabaseInterface) UpdatePDEPoolForPair(beaconHeight uint64, token1IDStr string, token2IDStr string, pdePoolForPairBytes []byte) error { - ret := _m.Called(beaconHeight, token1IDStr, token2IDStr, pdePoolForPairBytes) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, string, string, []byte) error); ok { - r0 = rf(beaconHeight, token1IDStr, token2IDStr, pdePoolForPairBytes) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/mocks/Trie.go b/mocks/Trie.go new file mode 100644 index 0000000000..e5f78825eb --- /dev/null +++ b/mocks/Trie.go @@ -0,0 +1,150 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import common "github.com/incognitochain/incognito-chain/common" +import incdb "github.com/incognitochain/incognito-chain/incdb" +import mock "github.com/stretchr/testify/mock" + +import trie "github.com/incognitochain/incognito-chain/trie" + +// Trie is an autogenerated mock type for the Trie type +type Trie struct { + mock.Mock +} + +// Commit provides a mock function with given fields: onleaf +func (_m *Trie) Commit(onleaf trie.LeafCallback) (common.Hash, error) { + ret := _m.Called(onleaf) + + var r0 common.Hash + if rf, ok := ret.Get(0).(func(trie.LeafCallback) common.Hash); ok { + r0 = rf(onleaf) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(trie.LeafCallback) error); ok { + r1 = rf(onleaf) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetKey provides a mock function with given fields: _a0 +func (_m *Trie) GetKey(_a0 []byte) []byte { + ret := _m.Called(_a0) + + var r0 []byte + if rf, ok := ret.Get(0).(func([]byte) []byte); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// Hash provides a mock function with given fields: +func (_m *Trie) Hash() common.Hash { + ret := _m.Called() + + var r0 common.Hash + if rf, ok := ret.Get(0).(func() common.Hash); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + return r0 +} + +// NodeIterator provides a mock function with given fields: startKey +func (_m *Trie) NodeIterator(startKey []byte) trie.NodeIterator { + ret := _m.Called(startKey) + + var r0 trie.NodeIterator + if rf, ok := ret.Get(0).(func([]byte) trie.NodeIterator); ok { + r0 = rf(startKey) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(trie.NodeIterator) + } + } + + return r0 +} + +// Prove provides a mock function with given fields: key, fromLevel, proofDb +func (_m *Trie) Prove(key []byte, fromLevel uint, proofDb incdb.Database) error { + ret := _m.Called(key, fromLevel, proofDb) + + var r0 error + if rf, ok := ret.Get(0).(func([]byte, uint, incdb.Database) error); ok { + r0 = rf(key, fromLevel, proofDb) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TryDelete provides a mock function with given fields: key +func (_m *Trie) TryDelete(key []byte) error { + ret := _m.Called(key) + + var r0 error + if rf, ok := ret.Get(0).(func([]byte) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TryGet provides a mock function with given fields: key +func (_m *Trie) TryGet(key []byte) ([]byte, error) { + ret := _m.Called(key) + + var r0 []byte + if rf, ok := ret.Get(0).(func([]byte) []byte); ok { + r0 = rf(key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TryUpdate provides a mock function with given fields: key, value +func (_m *Trie) TryUpdate(key []byte, value []byte) error { + ret := _m.Called(key, value) + + var r0 error + if rf, ok := ret.Get(0).(func([]byte, []byte) error); ok { + r0 = rf(key, value) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/relaying/bnb/constants.go b/relaying/bnb/constants.go new file mode 100644 index 0000000000..d048b7fba6 --- /dev/null +++ b/relaying/bnb/constants.go @@ -0,0 +1,27 @@ +package bnb + +//todo: need to update param before deploying +const ( + // fixed params + DenomBNB = "BNB" + MinConfirmationsBlock = 3 + + // mainnet + MainnetBNBChainID = "Binance-Chain-Tigris" + MainnetTotalVotingPowers = 11000000000000 + MainnetURLRemote = "https://seed1.longevito.io:443" + MainnetGenesisBlockHeight = 79394120 + + // local + //TestnetBNBChainID = "Binance-Dev" + //TestnetTotalVotingPowers = 1000000000000 + //TestnetURLRemote = "http://localhost:26657" + //TestnetGenesisBlockHeight = 1000 + //TestnetGenesisHeaderStr = "eyJoZWFkZXIiOnsidmVyc2lvbiI6eyJibG9jayI6MTAsImFwcCI6MH0sImNoYWluX2lkIjoiQmluYW5jZS1EZXYiLCJoZWlnaHQiOjEwMDAsInRpbWUiOiIyMDIwLTAzLTI4VDEyOjUwOjI3LjEwMDU5M1oiLCJudW1fdHhzIjowLCJ0b3RhbF90eHMiOjEsImxhc3RfYmxvY2tfaWQiOnsiaGFzaCI6IjRBMzFFMDU3MUM5N0M1NkE2OTgwRDQ1OTlENEFCNjY4MDVCMjI0ODYwNjBDQkMyRTA0MkRFNjg5RkJBODRCMUMiLCJwYXJ0cyI6eyJ0b3RhbCI6MSwiaGFzaCI6IjJFQTlCMTdEMzI1MDVFQjU2QTEwQjcwOUFDNDVFRDQyQjk0QjAwM0QxRTRBMzFCOTAwMzE5OEVEMDM1MDM1MDIifX0sImxhc3RfY29tbWl0X2hhc2giOiJGNDVGMDkxNTE2NjM4NDlGMjlBMURFM0FCMkRGNjM2NkQzOTEzNjU0QjQxQjAxRDVDNTZGNTcwRDgzMEMyNkU0IiwiZGF0YV9oYXNoIjoiIiwidmFsaWRhdG9yc19oYXNoIjoiRTcxQzcxNEJGOEI4RTYyOUE0MjY2RTY0RTJCQUU1QURBMTUxODVCRUU1QTI2MTcxRENCQzc2NUFDRDQ0RDZGMyIsIm5leHRfdmFsaWRhdG9yc19oYXNoIjoiRTcxQzcxNEJGOEI4RTYyOUE0MjY2RTY0RTJCQUU1QURBMTUxODVCRUU1QTI2MTcxRENCQzc2NUFDRDQ0RDZGMyIsImNvbnNlbnN1c19oYXNoIjoiMjk0RDhGQkQwQjk0Qjc2N0E3RUJBOTg0MEYyOTlBMzU4NkRBN0ZFNkI1REVBRDNCN0VFQ0JBMTkzQzQwMEY5MyIsImFwcF9oYXNoIjoiNkYwNTZDOTA2RkFGRjE2NDAxNzQ3OUMyQTY3OEYyNkY0MzQxQkNEOTFCRDcxNEVEQThDNkZBNDJGMzhCNEM0NiIsImxhc3RfcmVzdWx0c19oYXNoIjoiIiwiZXZpZGVuY2VfaGFzaCI6IiIsInByb3Bvc2VyX2FkZHJlc3MiOiI4N0U3MzM0MjI5NjY2ODVDMUIyNEY0MkEzMTg0QUM5NTlFQzQ5QTRDIn0sImRhdGEiOnsidHhzIjpudWxsfSwiZXZpZGVuY2UiOnsiZXZpZGVuY2UiOm51bGx9LCJsYXN0X2NvbW1pdCI6eyJibG9ja19pZCI6eyJoYXNoIjoiNEEzMUUwNTcxQzk3QzU2QTY5ODBENDU5OUQ0QUI2NjgwNUIyMjQ4NjA2MENCQzJFMDQyREU2ODlGQkE4NEIxQyIsInBhcnRzIjp7InRvdGFsIjoxLCJoYXNoIjoiMkVBOUIxN0QzMjUwNUVCNTZBMTBCNzA5QUM0NUVENDJCOTRCMDAzRDFFNEEzMUI5MDAzMTk4RUQwMzUwMzUwMiJ9fSwicHJlY29tbWl0cyI6W3sidHlwZSI6MiwiaGVpZ2h0Ijo5OTksInJvdW5kIjowLCJibG9ja19pZCI6eyJoYXNoIjoiNEEzMUUwNTcxQzk3QzU2QTY5ODBENDU5OUQ0QUI2NjgwNUIyMjQ4NjA2MENCQzJFMDQyREU2ODlGQkE4NEIxQyIsInBhcnRzIjp7InRvdGFsIjoxLCJoYXNoIjoiMkVBOUIxN0QzMjUwNUVCNTZBMTBCNzA5QUM0NUVENDJCOTRCMDAzRDFFNEEzMUI5MDAzMTk4RUQwMzUwMzUwMiJ9fSwidGltZXN0YW1wIjoiMjAyMC0wMy0yOFQxMjo1MDoyNy4xMDA1OTNaIiwidmFsaWRhdG9yX2FkZHJlc3MiOiI4N0U3MzM0MjI5NjY2ODVDMUIyNEY0MkEzMTg0QUM5NTlFQzQ5QTRDIiwidmFsaWRhdG9yX2luZGV4IjowLCJzaWduYXR1cmUiOiJkRERSUWlrcUdERHBkK3A4NDQwTFdDRUlpNVdqWHhwTmZ1WStaTVZ0d0NPeGlodHlGVEdlVjFIS3lSRCtsUUZCVlVBekkyU1NUKzNURXdDdHRwb0FDdz09In1dfX0=" + + // testnet + TestnetBNBChainID = "Binance-Chain-Nile" + TestnetTotalVotingPowers = 11000000000000 + TestnetURLRemote = "https://data-seed-pre-0-s1.binance.org:443" + TestnetGenesisBlockHeight = 75955500 +) diff --git a/relaying/bnb/error.go b/relaying/bnb/error.go new file mode 100644 index 0000000000..378058bbf8 --- /dev/null +++ b/relaying/bnb/error.go @@ -0,0 +1,56 @@ +package bnb + +import ( + "fmt" + "github.com/pkg/errors" +) + +const ( + UnexpectedErr = iota + InvalidBasicSignedHeaderErr + InvalidSignatureSignedHeaderErr + InvalidNewHeaderErr + InvalidBasicHeaderErr + InvalidTxProofErr + ParseProofErr + ExistedNewHeaderErr + GetBNBDataHashErr +) + +var ErrCodeMessage = map[int]struct { + Code int + Message string +}{ + UnexpectedErr: {-14000, "Unexpected error"}, + + InvalidBasicSignedHeaderErr: {-14001, "Invalid basic signed header error"}, + InvalidSignatureSignedHeaderErr: {-14002, "Invalid signature signed header error"}, + InvalidNewHeaderErr: {-14003, "Invalid new header"}, + InvalidBasicHeaderErr: {-14004, "Invalid basic header error"}, + InvalidTxProofErr: {-14005, "Invalid tx proof error"}, + ParseProofErr: {-14006, "Parse proof from json string error"}, + ExistedNewHeaderErr: {-14007, "New header is existed in list of unconfirmed headers error"}, + GetBNBDataHashErr: {-14008, "Can not get bnb data hash from db error"}, +} + +type BNBRelayingError struct { + Code int + Message string + err error +} + +func (e BNBRelayingError) Error() string { + return fmt.Sprintf("%+v: %+v %+v", e.Code, e.Message, e.err) +} + +func (e BNBRelayingError) GetCode() int { + return e.Code +} + +func NewBNBRelayingError(key int, err error) *BNBRelayingError { + return &BNBRelayingError{ + err: errors.Wrap(err, ErrCodeMessage[key].Message), + Code: ErrCodeMessage[key].Code, + Message: ErrCodeMessage[key].Message, + } +} diff --git a/relaying/bnb/headerchain.go b/relaying/bnb/headerchain.go new file mode 100644 index 0000000000..e4f674f804 --- /dev/null +++ b/relaying/bnb/headerchain.go @@ -0,0 +1,167 @@ +package bnb + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "github.com/tendermint/tendermint/types" + "strings" +) + +func NewSignedHeader (h *types.Header, lastCommit *types.Commit) *types.SignedHeader{ + return &types.SignedHeader{ + Header: h, + Commit: lastCommit, + } +} + +func VerifySignature(sh *types.SignedHeader, chainID string) *BNBRelayingError { + validatorMap := map[string]*types.Validator{} + totalVotingPowerParam := uint64(0) + if chainID == TestnetBNBChainID { + validatorMap = validatorsTestnet + totalVotingPowerParam = TestnetTotalVotingPowers + } else if chainID == MainnetBNBChainID { + validatorMap = validatorsMainnet + totalVotingPowerParam = MainnetTotalVotingPowers + } + + signedValidators := map[string]bool{} + sigs := sh.Commit.Precommits + totalVotingPower := int64(0) + // get vote from commit sig + for i, sig := range sigs { + if sig == nil { + continue + } + vote := sh.Commit.GetVote(i) + if vote != nil { + validateAddressStr := strings.ToUpper(hex.EncodeToString(vote.ValidatorAddress)) + // check duplicate vote + if !signedValidators[validateAddressStr] { + signedValidators[validateAddressStr] = true + err := vote.Verify(chainID, validatorMap[validateAddressStr].PubKey) + if err != nil { + Logger.log.Errorf("Invalid signature index %v - %v\n", i, err) + continue + } + totalVotingPower += validatorMap[validateAddressStr].VotingPower + } else { + Logger.log.Errorf("Duplicate signature from the same validator %v\n", validateAddressStr) + } + } + } + + // not greater than 2/3 voting power + if totalVotingPower <= int64(totalVotingPowerParam) * 2 / 3 { + return NewBNBRelayingError(InvalidSignatureSignedHeaderErr, errors.New("not greater than 2/3 voting power")) + } + + return nil +} + +func VerifySignedHeader(sh *types.SignedHeader, chainID string) (bool, *BNBRelayingError){ + err := sh.ValidateBasic(chainID) + if err != nil { + return false, NewBNBRelayingError(InvalidBasicSignedHeaderErr, err) + } + + err2 := VerifySignature(sh, chainID) + if err2 != nil { + return false, err2 + } + + return true, nil +} + +type LatestHeaderChain struct { + LatestBlock *types.Block + // UnconfirmedBlocks contains header blocks that aren't committed by validator set in the next header block + UnconfirmedBlocks []*types.Block +} + +func appendBlockToUnconfirmedBlocks(block *types.Block, unconfirmedBlocks []*types.Block) ([]*types.Block, error){ + hHash := block.Header.Hash().Bytes() + for _, unconfirmedHeader := range unconfirmedBlocks { + if bytes.Equal(unconfirmedHeader.Hash().Bytes(), hHash) { + Logger.log.Errorf("Block is existed %v\n", hHash) + return unconfirmedBlocks, fmt.Errorf("Header is existed %v\n", hHash) + } + } + unconfirmedBlocks = append(unconfirmedBlocks, block) + return unconfirmedBlocks, nil +} + +// AppendBlock receives new header and last commit for the previous header block +func (hc *LatestHeaderChain) AppendBlock(h *types.Block, chainID string) (*LatestHeaderChain, bool, *BNBRelayingError) { + // create genesis header before appending new header + if hc.LatestBlock == nil && len(hc.UnconfirmedBlocks) == 0 { + genesisBlock, _ := createGenesisHeaderChain(chainID) + hc.LatestBlock = genesisBlock + } + + var err2 error + if hc.LatestBlock != nil && h.LastCommit == nil { + Logger.log.Errorf("[AppendBlock] last commit is nil") + return hc, false, NewBNBRelayingError(InvalidNewHeaderErr, errors.New("last commit is nil")) + } + + // verify lastCommit + if !bytes.Equal(h.LastCommitHash , h.LastCommit.Hash()){ + Logger.log.Errorf("[AppendBlock] invalid last commit hash") + return hc, false, NewBNBRelayingError(InvalidBasicSignedHeaderErr, errors.New("invalid last commit hash")) + } + + // case 1: h is the next block header of the latest block header in HeaderChain + if hc.LatestBlock != nil { + // get the latest committed block header + latestHeader := hc.LatestBlock + latestHeaderBlockID := latestHeader.Hash() + + // check last blockID + if bytes.Equal(h.LastBlockID.Hash.Bytes(), latestHeaderBlockID) && h.Height == latestHeader.Height + 1{ + // create new signed header and verify + // add to UnconfirmedBlocks list + newSignedHeader := NewSignedHeader(&latestHeader.Header, h.LastCommit) + isValid, err := VerifySignedHeader(newSignedHeader, chainID) + if isValid && err == nil{ + Logger.log.Infof("[AppendBlock] Case 1: Receive new confirmed header %v\n", h.Height) + hc.UnconfirmedBlocks, err2 = appendBlockToUnconfirmedBlocks(h, hc.UnconfirmedBlocks) + if err2 != nil { + Logger.log.Errorf("[AppendBlock] Error when append header to unconfirmed headers %v\n", err2) + return hc, false, NewBNBRelayingError(InvalidNewHeaderErr, err2) + } + return hc, true, nil + } + + Logger.log.Errorf("[AppendBlock] invalid new signed header %v", err) + return hc, false, NewBNBRelayingError(InvalidNewHeaderErr, err) + } + } + + // case2 : h is the next block header of one of block headers in UnconfirmedBlocks + if len(hc.UnconfirmedBlocks) > 0 { + for _, uh := range hc.UnconfirmedBlocks { + if bytes.Equal(h.LastBlockID.Hash.Bytes(), uh.Hash()) && h.Height == uh.Height + 1 { + // create new signed header and verify + // append uh to hc.HeaderChain, + // clear all UnconfirmedBlocks => append h to UnconfirmedBlocks + newSignedHeader := NewSignedHeader(&uh.Header, h.LastCommit) + isValid, err := VerifySignedHeader(newSignedHeader, chainID) + if isValid && err == nil{ + hc.LatestBlock = uh + hc.UnconfirmedBlocks = []*types.Block{h} + Logger.log.Infof("[AppendBlock] Case 2 new unconfirmed block %v\n", h.Height) + return hc, true, nil + } + + Logger.log.Errorf("[AppendBlock] invalid new signed header %v", err) + return hc, false, NewBNBRelayingError(InvalidNewHeaderErr, err) + } + } + } + + Logger.log.Errorf("New header is invalid") + return hc, false, NewBNBRelayingError(InvalidNewHeaderErr, nil) +} diff --git a/relaying/bnb/headerchain_test.go b/relaying/bnb/headerchain_test.go new file mode 100644 index 0000000000..622da7be04 --- /dev/null +++ b/relaying/bnb/headerchain_test.go @@ -0,0 +1,467 @@ +package bnb + +import ( + "encoding/json" + "fmt" + "github.com/incognitochain/incognito-chain/common" + "github.com/stretchr/testify/assert" + "io/ioutil" + "log" + + //"github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/types" + "testing" +) + +var _ = func() (_ struct{}) { + Logger.Init(common.NewBackend(nil).Logger("test", true)) + Logger.log.Info("This runs before init()!") + return +}() + +func TestMain(m *testing.M) { + log.SetOutput(ioutil.Discard) + m.Run() +} + +func parseHeaderFromJson(object string) (*types.Header, *BNBRelayingError) { + header := types.Header{} + err := json.Unmarshal([]byte(object), &header) + if err != nil { + fmt.Printf("err unmarshal: %+v\n", err) + } + return &header, nil +} + +func parseCommitFromJson(object string) (*types.Commit, *BNBRelayingError) { + commit := types.Commit{} + err := json.Unmarshal([]byte(object), &commit) + if err != nil { + fmt.Printf("err unmarshal: %+v\n", err) + } + return &commit, nil +} + +func TestHeaderChain_ReceiveNewHeader(t *testing.T) { + //header1Json := ` + // { + // "version": { + // "block": 10, + // "app": 0 + // }, + // "chain_id": "Binance-Chain-Tigris", + // "height": 1, + // "time": "2019-04-18T05:59:26.228734998Z", + // "num_txs": 0, + // "total_txs": 0, + // "last_block_id": { + // "hash": "", + // "parts": { + // "total": 0, + // "hash": "" + // } + // }, + // "last_commit_hash": "", + // "data_hash": "", + // "validators_hash": "43C53A50D8653EF8CF1E5716DA68120FB51B636DC6D111EC3277B098ECD42D49", + // "next_validators_hash": "43C53A50D8653EF8CF1E5716DA68120FB51B636DC6D111EC3277B098ECD42D49", + // "consensus_hash": "294D8FBD0B94B767A7EBA9840F299A3586DA7FE6B5DEAD3B7EECBA193C400F93", + // "app_hash": "", + // "last_results_hash": "", + // "evidence_hash": "", + // "proposer_address": "14CFCE69B645F3F88BAF08EA5B77FA521E4480F9" + // } + //` + + header2Json := ` + { + "version": { + "block": 10, + "app": 0 + }, + "chain_id": "Binance-Chain-Tigris", + "height": 2, + "time": "2019-04-18T06:07:02.154340159Z", + "num_txs": 0, + "total_txs": 0, + "last_block_id": { + "hash": "494A674FCDADB0F67BC99CBC33B6F8061E1B76A786485B662EE422C6918C86C8", + "parts": { + "total": 1, + "hash": "AF4CEDFAB7FB7E5263D9C5A72F5ECD3F3A4C186D8D3C10D80C95FCB2BCEE0E11" + } + }, + "last_commit_hash": "DBDCD4FB32B47D560A7FB045D254C997EDB456FB98F49495B9C1808981DB57AE", + "data_hash": "", + "validators_hash": "43C53A50D8653EF8CF1E5716DA68120FB51B636DC6D111EC3277B098ECD42D49", + "next_validators_hash": "43C53A50D8653EF8CF1E5716DA68120FB51B636DC6D111EC3277B098ECD42D49", + "consensus_hash": "294D8FBD0B94B767A7EBA9840F299A3586DA7FE6B5DEAD3B7EECBA193C400F93", + "app_hash": "B6E4D322EDA04F783EBDCD3D59AA908D211675F890EBC05A893C2093D101D009", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "14CFCE69B645F3F88BAF08EA5B77FA521E4480F9" + } + ` + lastCommitJson1 := ` + { + "block_id": { + "hash": "494A674FCDADB0F67BC99CBC33B6F8061E1B76A786485B662EE422C6918C86C8", + "parts": { + "total": 1, + "hash": "AF4CEDFAB7FB7E5263D9C5A72F5ECD3F3A4C186D8D3C10D80C95FCB2BCEE0E11" + } + }, + "precommits": [ + null, + { + "type": 2, + "height": 1, + "round": 1, + "block_id": { + "hash": "494A674FCDADB0F67BC99CBC33B6F8061E1B76A786485B662EE422C6918C86C8", + "parts": { + "total": 1, + "hash": "AF4CEDFAB7FB7E5263D9C5A72F5ECD3F3A4C186D8D3C10D80C95FCB2BCEE0E11" + } + }, + "timestamp": "2019-04-18T06:07:02.233083179Z", + "validator_address": "14CFCE69B645F3F88BAF08EA5B77FA521E4480F9", + "validator_index": 1, + "signature": "jbuHaCfT/uZDdbLcEHGWZHuqc1pkV6hlgaqq7wfrPf5NNnBAGpqcD5R9V8dGq+avAcg7G/7fxwXK6Pt+Q2d/Cw==" + }, + { + "type": 2, + "height": 1, + "round": 1, + "block_id": { + "hash": "494A674FCDADB0F67BC99CBC33B6F8061E1B76A786485B662EE422C6918C86C8", + "parts": { + "total": 1, + "hash": "AF4CEDFAB7FB7E5263D9C5A72F5ECD3F3A4C186D8D3C10D80C95FCB2BCEE0E11" + } + }, + "timestamp": "2019-04-18T06:07:02.153180042Z", + "validator_address": "17B42E8F284D3CA0E420262F89CD76C749BB12C9", + "validator_index": 2, + "signature": "w4N2FT1fHEX8HrHY5jiq8ApNsuLBKK8r7KxwwLCcItydBME97IIK0SDcJOuNCuRd7zMfgp5JRGiJuSJSB8L6Aw==" + }, + { + "type": 2, + "height": 1, + "round": 1, + "block_id": { + "hash": "494A674FCDADB0F67BC99CBC33B6F8061E1B76A786485B662EE422C6918C86C8", + "parts": { + "total": 1, + "hash": "AF4CEDFAB7FB7E5263D9C5A72F5ECD3F3A4C186D8D3C10D80C95FCB2BCEE0E11" + } + }, + "timestamp": "2019-04-18T06:07:02.23279118Z", + "validator_address": "3CD4AABABDDEB7ABFEA9618732E331077A861D2B", + "validator_index": 3, + "signature": "mzZuMGM/q+92GiY4ZL2Qv4WvRcy7Vh91UwK5uhXbSA5j+R1RabmTjZpFBthQsOc2TGTyCvsE5en5F1pwfiWtBg==" + }, + { + "type": 2, + "height": 1, + "round": 1, + "block_id": { + "hash": "494A674FCDADB0F67BC99CBC33B6F8061E1B76A786485B662EE422C6918C86C8", + "parts": { + "total": 1, + "hash": "AF4CEDFAB7FB7E5263D9C5A72F5ECD3F3A4C186D8D3C10D80C95FCB2BCEE0E11" + } + }, + "timestamp": "2019-04-18T06:07:02.154340159Z", + "validator_address": "414FB3BBA216AF84C47E07D6EBAA2DCFC3563A2F", + "validator_index": 4, + "signature": "JETscHroKuMBkntQ7ALoWfTwptq5yyODmiWtA7Ac9v1//EsQJ/0jVE/YHmU1LYFXuJ68U3I7l6NH7jMEgxtnAA==" + }, + { + "type": 2, + "height": 1, + "round": 1, + "block_id": { + "hash": "494A674FCDADB0F67BC99CBC33B6F8061E1B76A786485B662EE422C6918C86C8", + "parts": { + "total": 1, + "hash": "AF4CEDFAB7FB7E5263D9C5A72F5ECD3F3A4C186D8D3C10D80C95FCB2BCEE0E11" + } + }, + "timestamp": "2019-04-18T06:07:02.23639005Z", + "validator_address": "71F253E6FEA9EDD4B4753F5483549FE4F0F3A21C", + "validator_index": 5, + "signature": "n8oPNTYLjPIGzrxaWkaAXZQa6iqcGXsVfXjp279slYgEqVAcGuHwMubzPPFQG1KWTuvpfwAhKBPNq4nqs0gLBg==" + }, + null, + { + "type": 2, + "height": 1, + "round": 1, + "block_id": { + "hash": "494A674FCDADB0F67BC99CBC33B6F8061E1B76A786485B662EE422C6918C86C8", + "parts": { + "total": 1, + "hash": "AF4CEDFAB7FB7E5263D9C5A72F5ECD3F3A4C186D8D3C10D80C95FCB2BCEE0E11" + } + }, + "timestamp": "2019-04-18T06:07:02.232155249Z", + "validator_address": "A71E5CD078B8C5C7B1AF88BCE84DD70B0557D93E", + "validator_index": 7, + "signature": "0nuhfUXcH6Xfcx3KIhCIpbBZfjEU3FQ3O7yGBHqnwwljCR/FG06+7wTIO2lfPkAnakXwspj+S36dBTkngQeADw==" + }, + { + "type": 2, + "height": 1, + "round": 1, + "block_id": { + "hash": "494A674FCDADB0F67BC99CBC33B6F8061E1B76A786485B662EE422C6918C86C8", + "parts": { + "total": 1, + "hash": "AF4CEDFAB7FB7E5263D9C5A72F5ECD3F3A4C186D8D3C10D80C95FCB2BCEE0E11" + } + }, + "timestamp": "2019-04-18T06:07:02.153428422Z", + "validator_address": "A9157B3FA6EB4C1E396B9B746E95327A07DC42E5", + "validator_index": 8, + "signature": "q/Yw0yasJttINNpUCQRf9YwhqLoVcdEihuRK8vfakhhJ6MFonX3Ynt5ZGoLlyIbVyaenVmP7lcefheFknc7NAQ==" + }, + { + "type": 2, + "height": 1, + "round": 1, + "block_id": { + "hash": "494A674FCDADB0F67BC99CBC33B6F8061E1B76A786485B662EE422C6918C86C8", + "parts": { + "total": 1, + "hash": "AF4CEDFAB7FB7E5263D9C5A72F5ECD3F3A4C186D8D3C10D80C95FCB2BCEE0E11" + } + }, + "timestamp": "2019-04-18T06:07:02.153867648Z", + "validator_address": "B0FBB52FF7EE93CC476DFE6B74FA1FC88584F30D", + "validator_index": 9, + "signature": "TV2VzCqxuAWQaDFHm4dXpXtEcYw/EdMxMtD5wSMoxLef2SPvnQlkG/sY2zOb3KNtgOmjelkdkzqeA0kAPIlLBw==" + }, + null + ] + } + ` + + headerJson3 := ` + { + "version": { + "block": 10, + "app": 0 + }, + "chain_id": "Binance-Chain-Tigris", + "height": 3, + "time": "2019-04-18T06:07:07.739441221Z", + "num_txs": 0, + "total_txs": 0, + "last_block_id": { + "hash": "B7FB22262EE63921EB260E10A285AC14A96CC794F6298EABB7155DD1B4EF22CF", + "parts": { + "total": 1, + "hash": "F801520DB85754F18002773752C0020605B3C25A357339826C12D71A1522475D" + } + }, + "last_commit_hash": "E7BDF3448318E45BA69BAE0BC3B94F5005279B4FFA1FEDF51AAF0F4CF1F6D529", + "data_hash": "", + "validators_hash": "43C53A50D8653EF8CF1E5716DA68120FB51B636DC6D111EC3277B098ECD42D49", + "next_validators_hash": "43C53A50D8653EF8CF1E5716DA68120FB51B636DC6D111EC3277B098ECD42D49", + "consensus_hash": "294D8FBD0B94B767A7EBA9840F299A3586DA7FE6B5DEAD3B7EECBA193C400F93", + "app_hash": "04C4F444F7101F26AAFC4F80507BEB61C066354BBB4B94502E6AF7E7DBE69CA1", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "14CFCE69B645F3F88BAF08EA5B77FA521E4480F9" + } + ` + lastCommitJson2 := ` + { + "block_id": { + "hash": "B7FB22262EE63921EB260E10A285AC14A96CC794F6298EABB7155DD1B4EF22CF", + "parts": { + "total": 1, + "hash": "F801520DB85754F18002773752C0020605B3C25A357339826C12D71A1522475D" + } + }, + "precommits": [ + null, + { + "type": 2, + "height": 2, + "round": 1, + "block_id": { + "hash": "B7FB22262EE63921EB260E10A285AC14A96CC794F6298EABB7155DD1B4EF22CF", + "parts": { + "total": 1, + "hash": "F801520DB85754F18002773752C0020605B3C25A357339826C12D71A1522475D" + } + }, + "timestamp": "2019-04-18T06:07:07.81740164Z", + "validator_address": "14CFCE69B645F3F88BAF08EA5B77FA521E4480F9", + "validator_index": 1, + "signature": "VyHgok5hmgzjUJGPUPwAquXtmuLaJu4p2B5kzCvY/oI06LC2bbUPLUrkNFAtmz7XkRhVqUHMwgODSCsxpqpeAA==" + }, + { + "type": 2, + "height": 2, + "round": 1, + "block_id": { + "hash": "B7FB22262EE63921EB260E10A285AC14A96CC794F6298EABB7155DD1B4EF22CF", + "parts": { + "total": 1, + "hash": "F801520DB85754F18002773752C0020605B3C25A357339826C12D71A1522475D" + } + }, + "timestamp": "2019-04-18T06:07:07.7393153Z", + "validator_address": "17B42E8F284D3CA0E420262F89CD76C749BB12C9", + "validator_index": 2, + "signature": "kv6bsey/YxsRHuxLo2juzDHPrD/ZkxY432ii1gHDnuFXhbqwZM0xrKb+BU7dz7DLhCYJ8wuw/yo43vYP9/WCAQ==" + }, + { + "type": 2, + "height": 2, + "round": 1, + "block_id": { + "hash": "B7FB22262EE63921EB260E10A285AC14A96CC794F6298EABB7155DD1B4EF22CF", + "parts": { + "total": 1, + "hash": "F801520DB85754F18002773752C0020605B3C25A357339826C12D71A1522475D" + } + }, + "timestamp": "2019-04-18T06:07:07.817334959Z", + "validator_address": "3CD4AABABDDEB7ABFEA9618732E331077A861D2B", + "validator_index": 3, + "signature": "vS7ASlEZsg+5PFAJ8YYDozCBswsbG0VrdQKBh6R6JhhJLt84KSqSaVkAGQtzFH397kAxDJpDEtULNkzd6v6NAw==" + }, + { + "type": 2, + "height": 2, + "round": 1, + "block_id": { + "hash": "B7FB22262EE63921EB260E10A285AC14A96CC794F6298EABB7155DD1B4EF22CF", + "parts": { + "total": 1, + "hash": "F801520DB85754F18002773752C0020605B3C25A357339826C12D71A1522475D" + } + }, + "timestamp": "2019-04-18T06:07:07.735960102Z", + "validator_address": "414FB3BBA216AF84C47E07D6EBAA2DCFC3563A2F", + "validator_index": 4, + "signature": "RUxRSxOA1pqqh8LvG2V7BRBanhcD51rC36gQNdztASlx4DVON7UhUPMaDYXKTjm/DQsMMPz8363HHflWz4RPAg==" + }, + { + "type": 2, + "height": 2, + "round": 1, + "block_id": { + "hash": "B7FB22262EE63921EB260E10A285AC14A96CC794F6298EABB7155DD1B4EF22CF", + "parts": { + "total": 1, + "hash": "F801520DB85754F18002773752C0020605B3C25A357339826C12D71A1522475D" + } + }, + "timestamp": "2019-04-18T06:07:07.818207384Z", + "validator_address": "71F253E6FEA9EDD4B4753F5483549FE4F0F3A21C", + "validator_index": 5, + "signature": "zASakyX56qEkiAE+rXFXCjVch2Fr8CcOmUAkoQNvIu4efkK9wAkdTlfwSmDtxN+fa398MkadRGxkLWUHNS0BBg==" + }, + null, + { + "type": 2, + "height": 2, + "round": 1, + "block_id": { + "hash": "B7FB22262EE63921EB260E10A285AC14A96CC794F6298EABB7155DD1B4EF22CF", + "parts": { + "total": 1, + "hash": "F801520DB85754F18002773752C0020605B3C25A357339826C12D71A1522475D" + } + }, + "timestamp": "2019-04-18T06:07:07.816888459Z", + "validator_address": "A71E5CD078B8C5C7B1AF88BCE84DD70B0557D93E", + "validator_index": 7, + "signature": "0tW24yXhTOo5eW8hAIwhFZUumdsHB9fdMtJgAKbKcGP79nOoCBYAoEObYITVL/XpfXjHglmDcTHUmTnpqQDCAw==" + }, + { + "type": 2, + "height": 2, + "round": 1, + "block_id": { + "hash": "B7FB22262EE63921EB260E10A285AC14A96CC794F6298EABB7155DD1B4EF22CF", + "parts": { + "total": 1, + "hash": "F801520DB85754F18002773752C0020605B3C25A357339826C12D71A1522475D" + } + }, + "timestamp": "2019-04-18T06:07:07.739441221Z", + "validator_address": "A9157B3FA6EB4C1E396B9B746E95327A07DC42E5", + "validator_index": 8, + "signature": "V+dwIBkgrvZrDP659JP6sW3YejrStULTf2MROly+M8EZdRYfQLt56GcwcvricEl9VcHAHAJILgkENTmZG6WuDw==" + }, + { + "type": 2, + "height": 2, + "round": 1, + "block_id": { + "hash": "B7FB22262EE63921EB260E10A285AC14A96CC794F6298EABB7155DD1B4EF22CF", + "parts": { + "total": 1, + "hash": "F801520DB85754F18002773752C0020605B3C25A357339826C12D71A1522475D" + } + }, + "timestamp": "2019-04-18T06:07:07.738348307Z", + "validator_address": "B0FBB52FF7EE93CC476DFE6B74FA1FC88584F30D", + "validator_index": 9, + "signature": "zGrjAbOy20MQXk00aj/w5AkG7Bkbl11PQlZjKmEwKlerUXmLXDWBQ8Yez/bVvYJdfZLUek6WGPwvq7bXu4o5AA==" + }, + null + ] + } + ` + + //header1, err := parseHeaderFromJson(header1Json) + //assert.Nil(t, err) + header2, err := parseHeaderFromJson(header2Json) + assert.Nil(t, err) + lastCommit1, err := parseCommitFromJson(lastCommitJson1) + assert.Nil(t, err) + + header3, err := parseHeaderFromJson(headerJson3) + assert.Nil(t, err) + lastCommit2, err := parseCommitFromJson(lastCommitJson2) + assert.Nil(t, err) + + testcases := []struct { + header *types.Header + lastCommit *types.Commit + expectedBlockHeight int64 + expectedUnconfirmedHeaderNumber int + }{ + //{header1, nil, 0, 1}, + {header2, lastCommit1, 1, 1}, + {header3, lastCommit2, 2, 1}, + } + + // header chain + headerChain := new(LatestHeaderChain) + headerChain.LatestBlock = nil + headerChain.UnconfirmedBlocks = []*types.Block{} + + var isResult bool + + for _, tc := range testcases { + fmt.Printf("Receive header with height %v\n", tc.header.Height) + newBlock := &types.Block{ + Header: *tc.header, + LastCommit: tc.lastCommit, + } + headerChain, isResult, err = headerChain.AppendBlock(newBlock, MainnetBNBChainID) + assert.Nil(t, err) + assert.Equal(t, true, isResult) + assert.Equal(t, tc.expectedUnconfirmedHeaderNumber, len(headerChain.UnconfirmedBlocks)) + assert.Equal(t, tc.expectedBlockHeight, headerChain.LatestBlock.Height) + } +} \ No newline at end of file diff --git a/relaying/bnb/log.go b/relaying/bnb/log.go new file mode 100644 index 0000000000..18c07655bd --- /dev/null +++ b/relaying/bnb/log.go @@ -0,0 +1,14 @@ +package bnb + +import "github.com/incognitochain/incognito-chain/common" + +type RelayingLogger struct { + log common.Logger +} + +func (logger *RelayingLogger) Init(inst common.Logger) { + logger.log = inst +} + +// Global instant to use +var Logger = RelayingLogger{} diff --git a/relaying/bnb/proof.go b/relaying/bnb/proof.go new file mode 100644 index 0000000000..cf86014135 --- /dev/null +++ b/relaying/bnb/proof.go @@ -0,0 +1,112 @@ +package bnb + +import ( + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/binance-chain/go-sdk/client/rpc" + bnbtx "github.com/binance-chain/go-sdk/types/tx" + "github.com/incognitochain/incognito-chain/dataaccessobject/rawdbv2" + "github.com/incognitochain/incognito-chain/incdb" + "github.com/tendermint/tendermint/rpc/client" + "github.com/tendermint/tendermint/types" +) + +func getProofByTxHash(txHashStr string, url string) (*types.TxProof, *BNBRelayingError) { + txHash, err := hex.DecodeString(txHashStr) + if err != nil { + return nil, NewBNBRelayingError(UnexpectedErr, err) + } + + client := client.NewHTTP(url, "/websocket") + err = client.Start() + if err != nil { + // handle error + } + defer client.Stop() + tx, err := client.Tx(txHash, true) + //fmt.Printf("tx: %+v\n", tx) + + return &tx.Proof, nil +} + +func getTxsInBlockHeight(blockHeight int64, url string) (*types.Txs, *BNBRelayingError) { + block, err := GetBlock(blockHeight, url) + if err != nil { + return nil, err + } + + return &block.Txs, nil +} + +// GetBlock call API to url to get bnb block by blockHeight +func GetBlock(blockHeight int64, url string) (*types.Block, *BNBRelayingError) { + client := client.NewHTTP(url, "/websocket") + err := client.Start() + if err != nil { + // handle error + } + defer client.Stop() + block, err := client.Block(&blockHeight) + fmt.Printf("block: %+v\n", block) + + return block.Block, nil +} + +type BNBProof struct { + Proof *types.TxProof + BlockHeight int64 +} + +// buildProof creates a proof for tx at indexTx in block height +// Call API get all txs in a block height and build proof from those txs by Tendermint's code +func (p *BNBProof) Build(indexTx int, blockHeight int64, url string) *BNBRelayingError { + txs, err := getTxsInBlockHeight(blockHeight, url) + if err != nil { + return err + } + + proof := txs.Proof(indexTx) + + p.BlockHeight = blockHeight + p.Proof = &proof + + return nil +} + +func (p *BNBProof) Verify(db incdb.Database) (bool, *BNBRelayingError) { + dataHash, err := rawdbv2.GetBNBDataHashByBlockHeight(db, uint64(p.BlockHeight)) + if err != nil { + return false, NewBNBRelayingError(GetBNBDataHashErr, err) + } + err = p.Proof.Validate(dataHash) + if err != nil { + return false, NewBNBRelayingError(InvalidTxProofErr, err) + } + return true, nil +} + +func ParseBNBProofFromB64EncodeStr(b64EncodedStr string) (*BNBProof, *BNBRelayingError) { + jsonBytes, err := base64.StdEncoding.DecodeString(b64EncodedStr) + if err != nil { + return nil, NewBNBRelayingError(UnexpectedErr, err) + } + + proof := BNBProof{} + err = json.Unmarshal(jsonBytes, &proof) + if err != nil { + return nil, NewBNBRelayingError(UnexpectedErr, err) + } + + return &proof, nil +} + +func ParseTxFromData(data []byte) (*bnbtx.StdTx, *BNBRelayingError) { + tx, err := rpc.ParseTx(bnbtx.Cdc, data) + if err != nil { + return nil, NewBNBRelayingError(UnexpectedErr, err) + } + stdTx := tx.(bnbtx.StdTx) + return &stdTx, nil +} diff --git a/relaying/bnb/proof_test.go b/relaying/bnb/proof_test.go new file mode 100644 index 0000000000..c69f19470f --- /dev/null +++ b/relaying/bnb/proof_test.go @@ -0,0 +1,69 @@ +package bnb + +import ( + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/binance-chain/go-sdk/types/msg" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestGetProofByTxHash(t *testing.T){ + txProof, err := getProofByTxHash("421B68266AC570DEC49A12B1DDA0518D59205F4A874A24DB0F9448D4E03720A3", MainnetURLRemote) + assert.Nil(t, err) + fmt.Printf("txProof %v\n", txProof.Data) + + tx, err := ParseTxFromData(txProof.Data) + assert.Nil(t, err) + fmt.Printf("tx %+v\n", tx) +} + +func TestParseBNBTxFromData(t *testing.T) { + data, _ := hex.DecodeString("A702F0625DEE0A482A2C87FA0A200A141C4693E2455A9DA63C5D8F1240BE3D8466CD0E4612080A03424E4210904E12200A141C4693E2455A9DA63C5D8F1240BE3D8466CD0E4612080A03424E4210904E12710A26EB5AE98721037985B53085AEF69B8B481B5BF35BDE7B20DBF98DB970909048F725849412E3AC12408120FDA9DD3326D440C8D058851AFB5830BC6E12EF896B513EE0095D47273DFF0A640382A848E9B628F6B24672515C33776ABE38EAA5FA188DC8467DA1A63C3618A25C20D0EC031A647B2270726F746F636F6C223A22616C6570682D6F6666636861696E222C2276657273696F6E223A312C22636F6E74656E74223A22516D63644C5A6853736D47364C72445A44397364616A647077616266345978355650483950376152393765324D52227D") + tx, err := ParseTxFromData(data) + + senderAddress := "bnb1r3rf8cj9t2w6v0za3ufyp03as3nv6rjxteku6g" + receiverAddress := "bnb1r3rf8cj9t2w6v0za3ufyp03as3nv6rjxteku6g" + amount := int64(10000) + assert.Nil(t, err) + assert.Equal(t, senderAddress, tx.Msgs[0].(msg.SendMsg).Inputs[0].Address.String()) + assert.Equal(t, receiverAddress, tx.Msgs[0].(msg.SendMsg).Outputs[0].Address.String()) + + outputCoins := tx.Msgs[0].(msg.SendMsg).Outputs[0].Coins + actualAmount := int64(0) + for _, coin := range outputCoins{ + actualAmount += coin.Amount + } + assert.Equal(t, amount, actualAmount) +} + +func TestBNBProof(t *testing.T) { + txIndex := 0 + blockHeight := int64(60479432) + + // build bnb proof + bnbProof := new(BNBProof) + err := bnbProof.Build(txIndex, blockHeight, MainnetURLRemote) + assert.Nil(t, err) + assert.Equal(t, blockHeight, bnbProof.BlockHeight) + + // verify bnb proof + isValid, err := bnbProof.Verify(nil) + assert.Nil(t, err) + assert.Equal(t, true, isValid) + + // encode bnb proof to string + jsonStr, _ := json.Marshal(bnbProof) + b64EncodeProof := base64.StdEncoding.EncodeToString(jsonStr) + fmt.Printf("b64EncodeProof: %+v\n", b64EncodeProof) + + // decode bnb proof from string + bnbProof2, err := ParseBNBProofFromB64EncodeStr(b64EncodeProof) + assert.Nil(t, err) + + isValid2, err := bnbProof2.Verify(nil) + assert.Nil(t, err) + assert.Equal(t, true, isValid2) +} \ No newline at end of file diff --git a/relaying/bnb/utils.go b/relaying/bnb/utils.go new file mode 100644 index 0000000000..e47e23638d --- /dev/null +++ b/relaying/bnb/utils.go @@ -0,0 +1,161 @@ +package bnb + +import ( + "encoding/hex" + "errors" + "github.com/binance-chain/go-sdk/common/bech32" + "github.com/binance-chain/go-sdk/common/types" + tdmtypes "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" + "time" +) + +func GetAccAddressString(accAddress *types.AccAddress, chainID string) (string, error) { + switch chainID { + case TestnetBNBChainID: + { + bech32Addr, err := bech32.ConvertAndEncode(types.TestNetwork.Bech32Prefixes(), accAddress.Bytes()) + if err != nil { + return "", err + } + return bech32Addr, nil + } + case MainnetBNBChainID: + { + bech32Addr, err := bech32.ConvertAndEncode(types.ProdNetwork.Bech32Prefixes(), accAddress.Bytes()) + if err != nil { + return "", err + } + return bech32Addr, nil + } + default: + return "", errors.New("Invalid network chainID") + } +} + +func GetGenesisBNBHeaderBlockHeight(chainID string) (int64, error) { + switch chainID { + case TestnetBNBChainID: + { + return TestnetGenesisBlockHeight, nil + } + case MainnetBNBChainID: + { + return MainnetGenesisBlockHeight, nil + } + default: + return int64(0), errors.New("Invalid network chainID") + } +} + +func createGenesisHeaderChain(chainID string) (*tdmtypes.Block, error) { + if chainID == MainnetBNBChainID { + return getGenesisBNBBlockMainnet(), nil + } else if chainID == TestnetBNBChainID { + return getGenesisBNBBlockTestnet(), nil + } + + return nil, errors.New("Invalid network chainID") +} + +// getGenesisBNBBlockMainnet returns Block 79394120 from Binance mainnet +func getGenesisBNBBlockMainnet() *tdmtypes.Block { + lastBlockIDHash, _ := hex.DecodeString("B0930A48A06AB07BBDDA67CD8AFBFA9F9D1659561585D763AB71D01CDF5039DF") + partsHeaderHash, _ := hex.DecodeString("3CF1C5B9DE88425D37DF9DD7FCF0F6203BCAE5D13B1B47758DE1076B8BAFC5D8") + lastCommitHash, _ := hex.DecodeString("E42713455A1D9A30F1F3507394656A9C864AADF56EF62535F7B35ACC59BB40A0") + //dataHash, _ := hex.DecodeString("") + validatorsHash, _ := hex.DecodeString("43C53A50D8653EF8CF1E5716DA68120FB51B636DC6D111EC3277B098ECD42D49") + nextValidatorsHash, _ := hex.DecodeString("43C53A50D8653EF8CF1E5716DA68120FB51B636DC6D111EC3277B098ECD42D49") + consensusHash, _ := hex.DecodeString("294D8FBD0B94B767A7EBA9840F299A3586DA7FE6B5DEAD3B7EECBA193C400F93") + appHash, _ := hex.DecodeString("1F0659D2683CEA03D7B74426D6565CE2BFC2FEA7FD8F99A8CEB27B4FFAAB62DF") + lastResultsHash, _ := hex.DecodeString("6E340B9CFFB37A989CA544E6BB780A2C78901D3FB33738768511A30617AFA01D") + //evidenceHash, _ := hex.DecodeString("") + proposerAddress, _ := hex.DecodeString("14CFCE69B645F3F88BAF08EA5B77FA521E4480F9") + blkTime := time.Time{} + blkTime.UnmarshalText([]byte("2020-04-06T07:01:28.00123013Z")) + + return &tdmtypes.Block{ + Header: tdmtypes.Header{ + Version: version.Consensus{ + Block: 10, + App: 0, + }, + ChainID: MainnetBNBChainID, + Height: 79394120, + Time: blkTime, + NumTxs: 0, + TotalTxs: 76030277, + LastBlockID: tdmtypes.BlockID{ + Hash: lastBlockIDHash, + PartsHeader: tdmtypes.PartSetHeader{ + Total: 1, + Hash: partsHeaderHash, + }, + }, + LastCommitHash: lastCommitHash, + DataHash: []byte{}, + ValidatorsHash: validatorsHash, + + NextValidatorsHash: nextValidatorsHash, + ConsensusHash: consensusHash, + AppHash: appHash, + LastResultsHash: lastResultsHash, + EvidenceHash: []byte{}, + ProposerAddress: proposerAddress, + }, + Data: tdmtypes.Data{}, + Evidence: tdmtypes.EvidenceData{}, + LastCommit: &tdmtypes.Commit{}, + } +} + +// getGenesisBNBBlockMainnet returns Block 75955500 from Binance testnet +func getGenesisBNBBlockTestnet() *tdmtypes.Block { + lastBlockIDHash, _ := hex.DecodeString("D157C93E92BE2309EBD00FBED7DA9DDDACDA0CC8880D5378251F8253BE5A39B3") + partsHeaderHash, _ := hex.DecodeString("38B304DC260622222FCB36C1A713005142956C4C9F984B05DD6AFAE9D546DB41") + lastCommitHash, _ := hex.DecodeString("0AAC3C8D6A50792CA69361C4FFA6E03FBB8F2CAEC988E49EBE05A9F3061CA7E6") + dataHash, _ := hex.DecodeString("EF38E0A2BA343259087FF49E0A709825F4271CFBC824B637D7BE37687E6F4AEA") + validatorsHash, _ := hex.DecodeString("80D9AB0FC10D18CA0E0832D5F4C063C5489EC1443DFB738252D038A82131B27A") + nextValidatorsHash, _ := hex.DecodeString("80D9AB0FC10D18CA0E0832D5F4C063C5489EC1443DFB738252D038A82131B27A") + consensusHash, _ := hex.DecodeString("294D8FBD0B94B767A7EBA9840F299A3586DA7FE6B5DEAD3B7EECBA193C400F93") + appHash, _ := hex.DecodeString("DCB96B9F17459C78AAD15715DDEB9F3C7803C5CAFC0CF12393539C92F90FAEAB") + //lastResultsHash, _ := hex.DecodeString("") + //evidenceHash, _ := hex.DecodeString("") + proposerAddress, _ := hex.DecodeString("37EF19AF29679B368D2B9E9DE3F8769B35786676") + blkTime := time.Time{} + blkTime.UnmarshalText([]byte("2020-04-08T11:00:38.832568688Z")) + + return &tdmtypes.Block{ + Header: tdmtypes.Header{ + Version: version.Consensus{ + Block: 10, + App: 0, + }, + ChainID: TestnetBNBChainID, + Height: 75955500, + Time: blkTime, + NumTxs: 1, + TotalTxs: 50470541, + LastBlockID: tdmtypes.BlockID{ + Hash: lastBlockIDHash, + PartsHeader: tdmtypes.PartSetHeader{ + Total: 1, + Hash: partsHeaderHash, + }, + }, + LastCommitHash: lastCommitHash, + DataHash: dataHash, + ValidatorsHash: validatorsHash, + + NextValidatorsHash: nextValidatorsHash, + ConsensusHash: consensusHash, + AppHash: appHash, + LastResultsHash: []byte{}, + EvidenceHash: []byte{}, + ProposerAddress: proposerAddress, + }, + Data: tdmtypes.Data{}, + Evidence: tdmtypes.EvidenceData{}, + LastCommit: &tdmtypes.Commit{}, + } +} diff --git a/relaying/bnb/validator.go b/relaying/bnb/validator.go new file mode 100644 index 0000000000..a59f77942a --- /dev/null +++ b/relaying/bnb/validator.go @@ -0,0 +1,208 @@ +package bnb + +import ( + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/types" + "strings" +) + +// Mainnet - Binance + +// address = hexEncode(sha256(pubKey)[:20]) +var MainnetValidatorAddresses = []string{ + "1175946A48EAA473868A0A6F52E6C66CCAF472EA", + "14CFCE69B645F3F88BAF08EA5B77FA521E4480F9", + "17B42E8F284D3CA0E420262F89CD76C749BB12C9", + "3CD4AABABDDEB7ABFEA9618732E331077A861D2B", + "414FB3BBA216AF84C47E07D6EBAA2DCFC3563A2F", + "71F253E6FEA9EDD4B4753F5483549FE4F0F3A21C", + "7235EF143D20FC0ABC427615D83014BB02D7C06C", + "A71E5CD078B8C5C7B1AF88BCE84DD70B0557D93E", + "A9157B3FA6EB4C1E396B9B746E95327A07DC42E5", + "B0FBB52FF7EE93CC476DFE6B74FA1FC88584F30D", + "B7707D9F593C62E85BB9E1A2366D12A97CD5DFF2", +} + +// public key on ed25519 curve (base64 encoded) +var MainnetValidatorB64EncodePubKeys = []string{ + "03adih94tMF6ll96MNQYH6u9H5afRtPI6Dta1IRUIdg=", + "K6ToFUL0N7euH4o13bIzx4mo3CJzQ3fZttY68cpAO2E=", + "342oxav9s4WVORMIu3HloeCqvcHQzzgxXVDWvpObJgY=", + "tmGe3KQUNISAAoHWmLcMk16RUq1Xsx2FwF8vefZLOfM=", + "lEbRSthsjS10eAsIRxEAAaHC4lLu3+pHU+u7/OOiL1I=", + "A1PGOfgMyAFZRENtqxAyJF1E+RLtwx72aP+fSkXNBZk=", + "6B03l+BUTDpxjh8F8Pt4IhLiSOeEwahRvofneuDbIw4=", + "Xj/NowvRnUXEtzaI2jXn2h/OfGhZssHyDtUgLSQUTj4=", + "sGpZotdb9dAU/OfJmbXnHnqWCHD3JYR9S6MjW66qCO8=", + "DJEOL+ZQ5OAUBrMxC0iftgqEvD/1xb7jpW1YmLaorzI=", + "cfLXuOwci5mmU0KbARjNIB95T0CdD+pNZbG2YvKwAGM=", +} + +// MainnetValidatorPubKeyBytes are results from base-64 decoding MainnetValidatorB64EncodePubKeys +var MainnetValidatorPubKeyBytes = [][]byte{ + {211, 118, 157, 138, 31, 120, 180, 193, 122, 150, 95, 122, 48, 212, 24, 31, 171, 189, 31, 150, 159, 70, 211, 200, 232, 59, 90, 212, 132, 84, 33, 216}, + {43, 164, 232, 21, 66, 244, 55, 183, 174, 31, 138, 53, 221, 178, 51, 199, 137, 168, 220, 34, 115, 67, 119, 217, 182, 214, 58, 241, 202, 64, 59, 97}, + {223, 141, 168, 197, 171, 253, 179, 133, 149, 57, 19, 8, 187, 113, 229, 161, 224, 170, 189, 193, 208, 207, 56, 49, 93, 80, 214, 190, 147, 155, 38, 6}, + {182, 97, 158, 220, 164, 20, 52, 132, 128, 2, 129, 214, 152, 183, 12, 147, 94, 145, 82, 173, 87, 179, 29, 133, 192, 95, 47, 121, 246, 75, 57, 243}, + {148, 70, 209, 74, 216, 108, 141, 45, 116, 120, 11, 8, 71, 17, 0, 1, 161, 194, 226, 82, 238, 223, 234, 71, 83, 235, 187, 252, 227, 162, 47, 82}, + {3, 83, 198, 57, 248, 12, 200, 1, 89, 68, 67, 109, 171, 16, 50, 36, 93, 68, 249, 18, 237, 195, 30, 246, 104, 255, 159, 74, 69, 205, 5, 153}, + {232, 29, 55, 151, 224, 84, 76, 58, 113, 142, 31, 5, 240, 251, 120, 34, 18, 226, 72, 231, 132, 193, 168, 81, 190, 135, 231, 122, 224, 219, 35, 14}, + {94, 63, 205, 163, 11, 209, 157, 69, 196, 183, 54, 136, 218, 53, 231, 218, 31, 206, 124, 104, 89, 178, 193, 242, 14, 213, 32, 45, 36, 20, 78, 62}, + {176, 106, 89, 162, 215, 91, 245, 208, 20, 252, 231, 201, 153, 181, 231, 30, 122, 150, 8, 112, 247, 37, 132, 125, 75, 163, 35, 91, 174, 170, 8, 239}, + {12, 145, 14, 47, 230, 80, 228, 224, 20, 6, 179, 49, 11, 72, 159, 182, 10, 132, 188, 63, 245, 197, 190, 227, 165, 109, 88, 152, 182, 168, 175, 50}, + {113, 242, 215, 184, 236, 28, 139, 153, 166, 83, 66, 155, 1, 24, 205, 32, 31, 121, 79, 64, 157, 15, 234, 77, 101, 177, 182, 98, 242, 176, 0, 99}, +} + +var MainnetValidatorVotingPowers = []int64{ + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, +} + +// SHA256 returns the SHA256 of the bz. +// todo: need to be moved to common package +func SHA256(data []byte) []byte { + h := sha256.Sum256(data) + return h[:] +} + +// DecodePublicKeyValidator decodes encoded public key to public key in bytes array +func DecodePublicKeyValidator() error { + MainnetValidatorPubKeyBytes = make([][]byte, len(MainnetValidatorB64EncodePubKeys)) + for i, item := range MainnetValidatorB64EncodePubKeys { + bytes, err := base64.StdEncoding.DecodeString(item) + if err != nil { + return err + } + + // check public key bytes to address + pubKeyHash := SHA256(bytes) + addTmpStr := strings.ToUpper(hex.EncodeToString(pubKeyHash[0:20])) + + if addTmpStr == MainnetValidatorAddresses[i] { + MainnetValidatorPubKeyBytes[i] = bytes + } else{ + fmt.Printf("Public key is wrong %v\n", i) + } + } + + fmt.Printf("MainnetValidatorPubKeyBytes %#v\n", MainnetValidatorPubKeyBytes) + return nil +} + +// Local + +// address = hexEncode(sha256(pubKey)[:20]) +//var ValidatorAddressesTestnet = []string{ +// "87E733422966685C1B24F42A3184AC959EC49A4C", +//} +// +//// public key on ed25519 curve (base64 encoded) +//var ValidatorB64EncodePubKeysTestnet = []string{ +// "uND4Li1FIzpmjmEe9RZGZlKr53zLP8ZHUP8DSQCZpN4=", +//} +// +//// MainnetValidatorPubKeyBytes are results from base-64 decoding MainnetValidatorB64EncodePubKeys +//var ValidatorPubKeyBytesTestnet = [][]byte{ +// []byte{0xb8, 0xd0, 0xf8, 0x2e, 0x2d, 0x45, 0x23, 0x3a, 0x66, 0x8e, 0x61, 0x1e, 0xf5, 0x16, 0x46, 0x66, 0x52, 0xab, 0xe7, 0x7c, 0xcb, 0x3f, 0xc6, 0x47, 0x50, 0xff, 0x3, 0x49, 0x0, 0x99, 0xa4, 0xde}, +//} +// +//var ValidatorVotingPowersTestnet = []int64{ +// 1000000000000, +//} + + +// Testnet +var ValidatorAddressesTestnet = []string{ + "06FD60078EB4C2356137DD50036597DB267CF616", + "18E69CC672973992BB5F76D049A5B2C5DDF77436", + "344C39BB8F4512D6CAB1F6AAFAC1811EF9D8AFDF", + "37EF19AF29679B368D2B9E9DE3F8769B35786676", + "62633D9DB7ED78E951F79913FDC8231AA77EC12B", + "7B343E041CA130000A8BC00C35152BD7E7740037", + "91844D296BD8E591448EFC65FD6AD51A888D58FA", + "B3727172CE6473BC780298A2D66C12F1A14F5B2A", + "B6F20C7FAA2B2F6F24518FA02B71CB5F4A09FBA3", + "E0DD72609CC106210D1AA13936CB67B93A0AEE21", + "FC3108DC3814888F4187452182BC1BAF83B71BC9", +} + +// MainnetValidatorPubKeyBytes are results from base-64 decoding MainnetValidatorB64EncodePubKeys +var ValidatorPubKeyBytesTestnet = [][]byte{ + {225,124,190,156,32,205,207,223,135,107,59,18,151,141,50,100,160,7,252,170,167,28,76,219,112,29,158,188,3,35,244,79}, + {24,78,123,16,61,52,196,16,3,249,184,100,213,248,193,173,218,155,208,67,107,37,59,179,200,68,188,115,156,30,119,201}, + {77,66,10,234,132,62,146,160,207,230,157,137,105,109,255,104,39,118,159,156,181,42,36,154,245,55,206,137,191,42,75,116}, + {189,3,222,159,138,178,158,40,0,9,78,21,63,172,111,105,108,250,81,37,54,201,194,248,4,220,178,194,196,228,174,214}, + {143,74,116,160,115,81,137,93,223,55,48,87,185,143,174,109,250,242,205,33,243,122,6,62,25,96,16,120,254,71,13,83}, + {74,93,71,83,235,121,249,46,128,239,226,45,247,172,164,246,102,164,244,75,248,28,83,108,74,9,212,185,197,182,84,181}, + {200,14,154,190,247,255,67,156,16,198,143,232,241,48,61,237,223,197,39,113,140,59,55,216,186,104,7,68,110,60,130,122}, + {145,66,175,204,105,27,124,192,93,38,199,176,190,12,139,70,65,130,148,23,23,48,224,121,243,132,253,226,250,80,186,252}, + {73,178,136,228,235,187,58,40,28,45,84,111,195,2,83,213,186,240,137,147,182,229,210,149,251,120,122,91,49,74,41,142}, + {4,34,67,57,104,143,1,46,100,157,228,142,36,24,128,9,46,170,143,106,160,244,241,75,252,249,224,199,105,23,192,182}, + {64,52,179,124,237,168,160,191,19,177,171,174,238,122,143,147,131,84,32,153,165,84,210,25,185,61,12,230,158,57,112,232}, +} + +var ValidatorVotingPowersTestnet = []int64{ + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, + 1000000000000, +} + +func NewFixedValidators(chainID string) (map[string]*types.Validator, error) { + if chainID == MainnetBNBChainID { + if len(MainnetValidatorAddresses) != len(MainnetValidatorPubKeyBytes) || len(MainnetValidatorAddresses) != len(MainnetValidatorVotingPowers) { + return nil, errors.New("invalid validator set data") + } + validators := make(map[string]*types.Validator, len(MainnetValidatorAddresses)) + for i, addressStr := range MainnetValidatorAddresses { + var pubKey ed25519.PubKeyEd25519 + copy(pubKey[:], MainnetValidatorPubKeyBytes[i]) + validators[addressStr] = &types.Validator{ + PubKey: pubKey, + VotingPower: MainnetValidatorVotingPowers[i], + } + } + return validators, nil + } else if chainID == TestnetBNBChainID { + if len(ValidatorAddressesTestnet) != len(ValidatorPubKeyBytesTestnet) || len(ValidatorAddressesTestnet) != len(ValidatorVotingPowersTestnet) { + return nil, errors.New("invalid validator set data") + } + validators := make(map[string]*types.Validator, len(ValidatorAddressesTestnet)) + for i, addressStr := range ValidatorAddressesTestnet { + var pubKey ed25519.PubKeyEd25519 + copy(pubKey[:], ValidatorPubKeyBytesTestnet[i]) + validators[addressStr] = &types.Validator{ + PubKey: pubKey, + VotingPower: ValidatorVotingPowersTestnet[i], + } + } + return validators, nil + } + + return nil, errors.New("Invalid network chainID") +} + +var validatorsMainnet, _ = NewFixedValidators(MainnetBNBChainID) +var validatorsTestnet, _ = NewFixedValidators(TestnetBNBChainID) + diff --git a/relaying/bnb/validator_test.go b/relaying/bnb/validator_test.go new file mode 100644 index 0000000000..6c2c213400 --- /dev/null +++ b/relaying/bnb/validator_test.go @@ -0,0 +1,14 @@ +package bnb + +import ( + "encoding/base64" + "fmt" + "testing" +) + +func TestDecodePubKeyValidator(t *testing.T){ + b64EncodePubKey := "uND4Li1FIzpmjmEe9RZGZlKr53zLP8ZHUP8DSQCZpN4=" + + pubKeyBytes, _ := base64.StdEncoding.DecodeString(b64EncodePubKey) + fmt.Printf("pubKeyBytes: %#v\n", pubKeyBytes) +} \ No newline at end of file diff --git a/relaying/btc/accept.go b/relaying/btc/accept.go new file mode 100644 index 0000000000..c5d6f286a0 --- /dev/null +++ b/relaying/btc/accept.go @@ -0,0 +1,78 @@ +package btcrelaying + +import ( + "fmt" + + "github.com/btcsuite/btcd/database" + "github.com/btcsuite/btcutil" +) + +func (b *BlockChain) maybeAcceptBlockV2(block *btcutil.Block, flags BehaviorFlags) (bool, error) { + // The height of this block is one more than the referenced previous + // block. + prevHash := &block.MsgBlock().Header.PrevBlock + prevNode := b.index.LookupNode(prevHash) + if prevNode == nil { + str := fmt.Sprintf("previous block %s is unknown", prevHash) + return false, ruleError(ErrPreviousBlockUnknown, str) + } else if b.index.NodeStatus(prevNode).KnownInvalid() { + str := fmt.Sprintf("previous block %s is known to be invalid", prevHash) + return false, ruleError(ErrInvalidAncestorBlock, str) + } + + blockHeight := prevNode.height + 1 + block.SetHeight(blockHeight) + + // The block must pass all of the validation rules which depend on the + // position of the block within the block chain. + err := b.checkBlockContext(block, prevNode, flags) + if err != nil { + return false, err + } + + // Insert the block into the database if it's not already there. Even + // though it is possible the block will ultimately fail to connect, it + // has already passed all proof-of-work and validity tests which means + // it would be prohibitively expensive for an attacker to fill up the + // disk with a bunch of blocks that fail to connect. This is necessary + // since it allows block download to be decoupled from the much more + // expensive connection logic. It also has some other nice properties + // such as making blocks that never become part of the main chain or + // blocks that fail to connect available for further analysis. + err = b.db.Update(func(dbTx database.Tx) error { + return dbStoreBlock(dbTx, block) + }) + if err != nil { + return false, err + } + + // Create a new block node for the block and add it to the node index. Even + // if the block ultimately gets connected to the main chain, it starts out + // on a side chain. + blockHeader := &block.MsgBlock().Header + newNode := newBlockNode(blockHeader, prevNode) + newNode.status = statusDataStored + + b.index.AddNode(newNode) + err = b.index.flushToDB() + if err != nil { + return false, err + } + + // Connect the passed block to the chain while respecting proper chain + // selection according to the chain with the most proof of work. This + // also handles validation of the transaction scripts. + isMainChain, err := b.connectBestChainV2(newNode, block, flags) + if err != nil { + return false, err + } + + // Notify the caller that the new block was accepted into the block + // chain. The caller would typically want to react by relaying the + // inventory to other peers. + b.chainLock.Unlock() + b.sendNotification(NTBlockAccepted, block) + b.chainLock.Lock() + + return isMainChain, nil +} diff --git a/relaying/btc/blockindex.go b/relaying/btc/blockindex.go new file mode 100644 index 0000000000..5c0ab9300b --- /dev/null +++ b/relaying/btc/blockindex.go @@ -0,0 +1,344 @@ +package btcrelaying + +import ( + "math/big" + "sort" + "sync" + "time" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/database" + "github.com/btcsuite/btcd/wire" +) + +// blockStatus is a bit field representing the validation state of the block. +type blockStatus byte + +const ( + // statusDataStored indicates that the block's payload is stored on disk. + statusDataStored blockStatus = 1 << iota + + // statusValid indicates that the block has been fully validated. + statusValid + + // statusValidateFailed indicates that the block has failed validation. + statusValidateFailed + + // statusInvalidAncestor indicates that one of the block's ancestors has + // has failed validation, thus the block is also invalid. + statusInvalidAncestor + + // statusNone indicates that the block has no validation state flags set. + // + // NOTE: This must be defined last in order to avoid influencing iota. + statusNone blockStatus = 0 +) + +// HaveData returns whether the full block data is stored in the database. This +// will return false for a block node where only the header is downloaded or +// kept. +func (status blockStatus) HaveData() bool { + return status&statusDataStored != 0 +} + +// KnownValid returns whether the block is known to be valid. This will return +// false for a valid block that has not been fully validated yet. +func (status blockStatus) KnownValid() bool { + return status&statusValid != 0 +} + +// KnownInvalid returns whether the block is known to be invalid. This may be +// because the block itself failed validation or any of its ancestors is +// invalid. This will return false for invalid blocks that have not been proven +// invalid yet. +func (status blockStatus) KnownInvalid() bool { + return status&(statusValidateFailed|statusInvalidAncestor) != 0 +} + +// blockNode represents a block within the block chain and is primarily used to +// aid in selecting the best chain to be the main chain. The main chain is +// stored into the block database. +type blockNode struct { + // NOTE: Additions, deletions, or modifications to the order of the + // definitions in this struct should not be changed without considering + // how it affects alignment on 64-bit platforms. The current order is + // specifically crafted to result in minimal padding. There will be + // hundreds of thousands of these in memory, so a few extra bytes of + // padding adds up. + + // parent is the parent block for this node. + parent *blockNode + + // hash is the double sha 256 of the block. + hash chainhash.Hash + + // workSum is the total amount of work in the chain up to and including + // this node. + workSum *big.Int + + // height is the position in the block chain. + height int32 + + // Some fields from block headers to aid in best chain selection and + // reconstructing headers from memory. These must be treated as + // immutable and are intentionally ordered to avoid padding on 64-bit + // platforms. + version int32 + bits uint32 + nonce uint32 + timestamp int64 + merkleRoot chainhash.Hash + + // status is a bitfield representing the validation state of the block. The + // status field, unlike the other fields, may be written to and so should + // only be accessed using the concurrent-safe NodeStatus method on + // blockIndex once the node has been added to the global index. + status blockStatus +} + +// initBlockNode initializes a block node from the given header and parent node, +// calculating the height and workSum from the respective fields on the parent. +// This function is NOT safe for concurrent access. It must only be called when +// initially creating a node. +func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parent *blockNode) { + *node = blockNode{ + hash: blockHeader.BlockHash(), + workSum: CalcWork(blockHeader.Bits), + version: blockHeader.Version, + bits: blockHeader.Bits, + nonce: blockHeader.Nonce, + timestamp: blockHeader.Timestamp.Unix(), + merkleRoot: blockHeader.MerkleRoot, + } + if parent != nil { + node.parent = parent + node.height = parent.height + 1 + node.workSum = node.workSum.Add(parent.workSum, node.workSum) + } +} + +// newBlockNode returns a new block node for the given block header and parent +// node, calculating the height and workSum from the respective fields on the +// parent. This function is NOT safe for concurrent access. +func newBlockNode(blockHeader *wire.BlockHeader, parent *blockNode) *blockNode { + var node blockNode + initBlockNode(&node, blockHeader, parent) + return &node +} + +// Header constructs a block header from the node and returns it. +// +// This function is safe for concurrent access. +func (node *blockNode) Header() wire.BlockHeader { + // No lock is needed because all accessed fields are immutable. + prevHash := &zeroHash + if node.parent != nil { + prevHash = &node.parent.hash + } + return wire.BlockHeader{ + Version: node.version, + PrevBlock: *prevHash, + MerkleRoot: node.merkleRoot, + Timestamp: time.Unix(node.timestamp, 0), + Bits: node.bits, + Nonce: node.nonce, + } +} + +// Ancestor returns the ancestor block node at the provided height by following +// the chain backwards from this node. The returned block will be nil when a +// height is requested that is after the height of the passed node or is less +// than zero. +// +// This function is safe for concurrent access. +func (node *blockNode) Ancestor(height int32) *blockNode { + if height < 0 || height > node.height { + return nil + } + + n := node + for ; n != nil && n.height != height; n = n.parent { + // Intentionally left blank + } + + return n +} + +// RelativeAncestor returns the ancestor block node a relative 'distance' blocks +// before this node. This is equivalent to calling Ancestor with the node's +// height minus provided distance. +// +// This function is safe for concurrent access. +func (node *blockNode) RelativeAncestor(distance int32) *blockNode { + return node.Ancestor(node.height - distance) +} + +// CalcPastMedianTime calculates the median time of the previous few blocks +// prior to, and including, the block node. +// +// This function is safe for concurrent access. +func (node *blockNode) CalcPastMedianTime() time.Time { + // Create a slice of the previous few block timestamps used to calculate + // the median per the number defined by the constant medianTimeBlocks. + timestamps := make([]int64, medianTimeBlocks) + numNodes := 0 + iterNode := node + for i := 0; i < medianTimeBlocks && iterNode != nil; i++ { + timestamps[i] = iterNode.timestamp + numNodes++ + + iterNode = iterNode.parent + } + + // Prune the slice to the actual number of available timestamps which + // will be fewer than desired near the beginning of the block chain + // and sort them. + timestamps = timestamps[:numNodes] + sort.Sort(timeSorter(timestamps)) + + // NOTE: The consensus rules incorrectly calculate the median for even + // numbers of blocks. A true median averages the middle two elements + // for a set with an even number of elements in it. Since the constant + // for the previous number of blocks to be used is odd, this is only an + // issue for a few blocks near the beginning of the chain. I suspect + // this is an optimization even though the result is slightly wrong for + // a few of the first blocks since after the first few blocks, there + // will always be an odd number of blocks in the set per the constant. + // + // This code follows suit to ensure the same rules are used, however, be + // aware that should the medianTimeBlocks constant ever be changed to an + // even number, this code will be wrong. + medianTimestamp := timestamps[numNodes/2] + return time.Unix(medianTimestamp, 0) +} + +// blockIndex provides facilities for keeping track of an in-memory index of the +// block chain. Although the name block chain suggests a single chain of +// blocks, it is actually a tree-shaped structure where any node can have +// multiple children. However, there can only be one active branch which does +// indeed form a chain from the tip all the way back to the genesis block. +type blockIndex struct { + // The following fields are set when the instance is created and can't + // be changed afterwards, so there is no need to protect them with a + // separate mutex. + db database.DB + chainParams *chaincfg.Params + + sync.RWMutex + index map[chainhash.Hash]*blockNode + dirty map[*blockNode]struct{} +} + +// newBlockIndex returns a new empty instance of a block index. The index will +// be dynamically populated as block nodes are loaded from the database and +// manually added. +func newBlockIndex(db database.DB, chainParams *chaincfg.Params) *blockIndex { + return &blockIndex{ + db: db, + chainParams: chainParams, + index: make(map[chainhash.Hash]*blockNode), + dirty: make(map[*blockNode]struct{}), + } +} + +// HaveBlock returns whether or not the block index contains the provided hash. +// +// This function is safe for concurrent access. +func (bi *blockIndex) HaveBlock(hash *chainhash.Hash) bool { + bi.RLock() + _, hasBlock := bi.index[*hash] + bi.RUnlock() + return hasBlock +} + +// LookupNode returns the block node identified by the provided hash. It will +// return nil if there is no entry for the hash. +// +// This function is safe for concurrent access. +func (bi *blockIndex) LookupNode(hash *chainhash.Hash) *blockNode { + bi.RLock() + node := bi.index[*hash] + bi.RUnlock() + return node +} + +// AddNode adds the provided node to the block index and marks it as dirty. +// Duplicate entries are not checked so it is up to caller to avoid adding them. +// +// This function is safe for concurrent access. +func (bi *blockIndex) AddNode(node *blockNode) { + bi.Lock() + bi.addNode(node) + bi.dirty[node] = struct{}{} + bi.Unlock() +} + +// addNode adds the provided node to the block index, but does not mark it as +// dirty. This can be used while initializing the block index. +// +// This function is NOT safe for concurrent access. +func (bi *blockIndex) addNode(node *blockNode) { + bi.index[node.hash] = node +} + +// NodeStatus provides concurrent-safe access to the status field of a node. +// +// This function is safe for concurrent access. +func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus { + bi.RLock() + status := node.status + bi.RUnlock() + return status +} + +// SetStatusFlags flips the provided status flags on the block node to on, +// regardless of whether they were on or off previously. This does not unset any +// flags currently on. +// +// This function is safe for concurrent access. +func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) { + bi.Lock() + node.status |= flags + bi.dirty[node] = struct{}{} + bi.Unlock() +} + +// UnsetStatusFlags flips the provided status flags on the block node to off, +// regardless of whether they were on or off previously. +// +// This function is safe for concurrent access. +func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) { + bi.Lock() + node.status &^= flags + bi.dirty[node] = struct{}{} + bi.Unlock() +} + +// flushToDB writes all dirty block nodes to the database. If all writes +// succeed, this clears the dirty set. +func (bi *blockIndex) flushToDB() error { + bi.Lock() + if len(bi.dirty) == 0 { + bi.Unlock() + return nil + } + + err := bi.db.Update(func(dbTx database.Tx) error { + for node := range bi.dirty { + err := dbStoreBlockNode(dbTx, node) + if err != nil { + return err + } + } + return nil + }) + + // If write was successful, clear the dirty set. + if err == nil { + bi.dirty = make(map[*blockNode]struct{}) + } + + bi.Unlock() + return err +} diff --git a/relaying/btc/chain.go b/relaying/btc/chain.go new file mode 100644 index 0000000000..457e1100ac --- /dev/null +++ b/relaying/btc/chain.go @@ -0,0 +1,2013 @@ +package btcrelaying + +import ( + "container/list" + "fmt" + "sync" + "time" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/database" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +const ( + // maxOrphanBlocks is the maximum number of orphan blocks that can be + // queued. + maxOrphanBlocks = 100 +) + +// BlockLocator is used to help locate a specific block. The algorithm for +// building the block locator is to add the hashes in reverse order until +// the genesis block is reached. In order to keep the list of locator hashes +// to a reasonable number of entries, first the most recent previous 12 block +// hashes are added, then the step is doubled each loop iteration to +// exponentially decrease the number of hashes as a function of the distance +// from the block being located. +// +// For example, assume a block chain with a side chain as depicted below: +// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 +// \-> 16a -> 17a +// +// The block locator for block 17a would be the hashes of blocks: +// [17a 16a 15 14 13 12 11 10 9 8 7 6 4 genesis] +type BlockLocator []*chainhash.Hash + +// orphanBlock represents a block that we don't yet have the parent for. It +// is a normal block plus an expiration time to prevent caching the orphan +// forever. +type orphanBlock struct { + block *btcutil.Block + expiration time.Time +} + +// BestState houses information about the current best block and other info +// related to the state of the main chain as it exists from the point of view of +// the current best block. +// +// The BestSnapshot method can be used to obtain access to this information +// in a concurrent safe manner and the data will not be changed out from under +// the caller when chain state changes occur as the function name implies. +// However, the returned snapshot must be treated as immutable since it is +// shared by all callers. +type BestState struct { + Hash chainhash.Hash // The hash of the block. + Height int32 // The height of the block. + Bits uint32 // The difficulty bits of the block. + BlockSize uint64 // The size of the block. + BlockWeight uint64 // The weight of the block. + NumTxns uint64 // The number of txns in the block. + TotalTxns uint64 // The total number of txns in the chain. + MedianTime time.Time // Median time as per CalcPastMedianTime. +} + +// newBestState returns a new best stats instance for the given parameters. +func newBestState(node *blockNode, blockSize, blockWeight, numTxns, + totalTxns uint64, medianTime time.Time) *BestState { + + return &BestState{ + Hash: node.hash, + Height: node.height, + Bits: node.bits, + BlockSize: blockSize, + BlockWeight: blockWeight, + NumTxns: numTxns, + TotalTxns: totalTxns, + MedianTime: medianTime, + } +} + +// BlockChain provides functions for working with the bitcoin block chain. +// It includes functionality such as rejecting duplicate blocks, ensuring blocks +// follow all rules, orphan handling, checkpoint handling, and best chain +// selection with reorganization. +type BlockChain struct { + // The following fields are set when the instance is created and can't + // be changed afterwards, so there is no need to protect them with a + // separate mutex. + checkpoints []chaincfg.Checkpoint + checkpointsByHeight map[int32]*chaincfg.Checkpoint + db database.DB + chainParams *chaincfg.Params + timeSource MedianTimeSource + sigCache *txscript.SigCache + indexManager IndexManager + hashCache *txscript.HashCache + + // The following fields are calculated based upon the provided chain + // parameters. They are also set when the instance is created and + // can't be changed afterwards, so there is no need to protect them with + // a separate mutex. + minRetargetTimespan int64 // target timespan / adjustment factor + maxRetargetTimespan int64 // target timespan * adjustment factor + blocksPerRetarget int32 // target timespan / target time per block + + // chainLock protects concurrent access to the vast majority of the + // fields in this struct below this point. + chainLock sync.RWMutex + + // These fields are related to the memory block index. They both have + // their own locks, however they are often also protected by the chain + // lock to help prevent logic races when blocks are being processed. + // + // index houses the entire block index in memory. The block index is + // a tree-shaped structure. + // + // bestChain tracks the current active chain by making use of an + // efficient chain view into the block index. + index *blockIndex + bestChain *chainView + + // These fields are related to handling of orphan blocks. They are + // protected by a combination of the chain lock and the orphan lock. + orphanLock sync.RWMutex + orphans map[chainhash.Hash]*orphanBlock + prevOrphans map[chainhash.Hash][]*orphanBlock + oldestOrphan *orphanBlock + + // These fields are related to checkpoint handling. They are protected + // by the chain lock. + nextCheckpoint *chaincfg.Checkpoint + checkpointNode *blockNode + + // The state is used as a fairly efficient way to cache information + // about the current best chain state that is returned to callers when + // requested. It operates on the principle of MVCC such that any time a + // new block becomes the best block, the state pointer is replaced with + // a new struct and the old state is left untouched. In this way, + // multiple callers can be pointing to different best chain states. + // This is acceptable for most callers because the state is only being + // queried at a specific point in time. + // + // In addition, some of the fields are stored in the database so the + // chain state can be quickly reconstructed on load. + stateLock sync.RWMutex + stateSnapshot *BestState + + // The following caches are used to efficiently keep track of the + // current deployment threshold state of each rule change deployment. + // + // This information is stored in the database so it can be quickly + // reconstructed on load. + // + // warningCaches caches the current deployment threshold state for blocks + // in each of the **possible** deployments. This is used in order to + // detect when new unrecognized rule changes are being voted on and/or + // have been activated such as will be the case when older versions of + // the software are being used + // + // deploymentCaches caches the current deployment threshold state for + // blocks in each of the actively defined deployments. + warningCaches []thresholdStateCache + deploymentCaches []thresholdStateCache + + // The following fields are used to determine if certain warnings have + // already been shown. + // + // unknownRulesWarned refers to warnings due to unknown rules being + // activated. + // + // unknownVersionsWarned refers to warnings due to unknown versions + // being mined. + unknownRulesWarned bool + unknownVersionsWarned bool + + // The notifications field stores a slice of callbacks to be executed on + // certain blockchain events. + notificationsLock sync.RWMutex + notifications []NotificationCallback +} + +func (b *BlockChain) GetChainParams() *chaincfg.Params { + return b.chainParams +} + +// HaveBlock returns whether or not the chain instance has the block represented +// by the passed hash. This includes checking the various places a block can +// be like part of the main chain, on a side chain, or in the orphan pool. +// +// This function is safe for concurrent access. +func (b *BlockChain) HaveBlock(hash *chainhash.Hash) (bool, error) { + exists, err := b.blockExists(hash) + if err != nil { + return false, err + } + return exists || b.IsKnownOrphan(hash), nil +} + +// IsKnownOrphan returns whether the passed hash is currently a known orphan. +// Keep in mind that only a limited number of orphans are held onto for a +// limited amount of time, so this function must not be used as an absolute +// way to test if a block is an orphan block. A full block (as opposed to just +// its hash) must be passed to ProcessBlock for that purpose. However, calling +// ProcessBlock with an orphan that already exists results in an error, so this +// function provides a mechanism for a caller to intelligently detect *recent* +// duplicate orphans and react accordingly. +// +// This function is safe for concurrent access. +func (b *BlockChain) IsKnownOrphan(hash *chainhash.Hash) bool { + // Protect concurrent access. Using a read lock only so multiple + // readers can query without blocking each other. + b.orphanLock.RLock() + _, exists := b.orphans[*hash] + b.orphanLock.RUnlock() + + return exists +} + +// GetOrphanRoot returns the head of the chain for the provided hash from the +// map of orphan blocks. +// +// This function is safe for concurrent access. +func (b *BlockChain) GetOrphanRoot(hash *chainhash.Hash) *chainhash.Hash { + // Protect concurrent access. Using a read lock only so multiple + // readers can query without blocking each other. + b.orphanLock.RLock() + defer b.orphanLock.RUnlock() + + // Keep looping while the parent of each orphaned block is + // known and is an orphan itself. + orphanRoot := hash + prevHash := hash + for { + orphan, exists := b.orphans[*prevHash] + if !exists { + break + } + orphanRoot = prevHash + prevHash = &orphan.block.MsgBlock().Header.PrevBlock + } + + return orphanRoot +} + +// removeOrphanBlock removes the passed orphan block from the orphan pool and +// previous orphan index. +func (b *BlockChain) removeOrphanBlock(orphan *orphanBlock) { + // Protect concurrent access. + b.orphanLock.Lock() + defer b.orphanLock.Unlock() + + // Remove the orphan block from the orphan pool. + orphanHash := orphan.block.Hash() + delete(b.orphans, *orphanHash) + + // Remove the reference from the previous orphan index too. An indexing + // for loop is intentionally used over a range here as range does not + // reevaluate the slice on each iteration nor does it adjust the index + // for the modified slice. + prevHash := &orphan.block.MsgBlock().Header.PrevBlock + orphans := b.prevOrphans[*prevHash] + for i := 0; i < len(orphans); i++ { + hash := orphans[i].block.Hash() + if hash.IsEqual(orphanHash) { + copy(orphans[i:], orphans[i+1:]) + orphans[len(orphans)-1] = nil + orphans = orphans[:len(orphans)-1] + i-- + } + } + b.prevOrphans[*prevHash] = orphans + + // Remove the map entry altogether if there are no longer any orphans + // which depend on the parent hash. + if len(b.prevOrphans[*prevHash]) == 0 { + delete(b.prevOrphans, *prevHash) + } +} + +// addOrphanBlock adds the passed block (which is already determined to be +// an orphan prior calling this function) to the orphan pool. It lazily cleans +// up any expired blocks so a separate cleanup poller doesn't need to be run. +// It also imposes a maximum limit on the number of outstanding orphan +// blocks and will remove the oldest received orphan block if the limit is +// exceeded. +func (b *BlockChain) addOrphanBlock(block *btcutil.Block) { + // Remove expired orphan blocks. + for _, oBlock := range b.orphans { + if time.Now().After(oBlock.expiration) { + b.removeOrphanBlock(oBlock) + continue + } + + // Update the oldest orphan block pointer so it can be discarded + // in case the orphan pool fills up. + if b.oldestOrphan == nil || oBlock.expiration.Before(b.oldestOrphan.expiration) { + b.oldestOrphan = oBlock + } + } + + // Limit orphan blocks to prevent memory exhaustion. + if len(b.orphans)+1 > maxOrphanBlocks { + // Remove the oldest orphan to make room for the new one. + b.removeOrphanBlock(b.oldestOrphan) + b.oldestOrphan = nil + } + + // Protect concurrent access. This is intentionally done here instead + // of near the top since removeOrphanBlock does its own locking and + // the range iterator is not invalidated by removing map entries. + b.orphanLock.Lock() + defer b.orphanLock.Unlock() + + // Insert the block into the orphan map with an expiration time + // 1 hour from now. + expiration := time.Now().Add(time.Hour) + oBlock := &orphanBlock{ + block: block, + expiration: expiration, + } + b.orphans[*block.Hash()] = oBlock + + // Add to previous hash lookup index for faster dependency lookups. + prevHash := &block.MsgBlock().Header.PrevBlock + b.prevOrphans[*prevHash] = append(b.prevOrphans[*prevHash], oBlock) +} + +// SequenceLock represents the converted relative lock-time in seconds, and +// absolute block-height for a transaction input's relative lock-times. +// According to SequenceLock, after the referenced input has been confirmed +// within a block, a transaction spending that input can be included into a +// block either after 'seconds' (according to past median time), or once the +// 'BlockHeight' has been reached. +type SequenceLock struct { + Seconds int64 + BlockHeight int32 +} + +// CalcSequenceLock computes a relative lock-time SequenceLock for the passed +// transaction using the passed UtxoViewpoint to obtain the past median time +// for blocks in which the referenced inputs of the transactions were included +// within. The generated SequenceLock lock can be used in conjunction with a +// block height, and adjusted median block time to determine if all the inputs +// referenced within a transaction have reached sufficient maturity allowing +// the candidate transaction to be included in a block. +// +// This function is safe for concurrent access. +func (b *BlockChain) CalcSequenceLock(tx *btcutil.Tx, utxoView *UtxoViewpoint, mempool bool) (*SequenceLock, error) { + b.chainLock.Lock() + defer b.chainLock.Unlock() + + return b.calcSequenceLock(b.bestChain.Tip(), tx, utxoView, mempool) +} + +// calcSequenceLock computes the relative lock-times for the passed +// transaction. See the exported version, CalcSequenceLock for further details. +// +// This function MUST be called with the chain state lock held (for writes). +func (b *BlockChain) calcSequenceLock(node *blockNode, tx *btcutil.Tx, utxoView *UtxoViewpoint, mempool bool) (*SequenceLock, error) { + // A value of -1 for each relative lock type represents a relative time + // lock value that will allow a transaction to be included in a block + // at any given height or time. This value is returned as the relative + // lock time in the case that BIP 68 is disabled, or has not yet been + // activated. + sequenceLock := &SequenceLock{Seconds: -1, BlockHeight: -1} + + // The sequence locks semantics are always active for transactions + // within the mempool. + csvSoftforkActive := mempool + + // If we're performing block validation, then we need to query the BIP9 + // state. + if !csvSoftforkActive { + // Obtain the latest BIP9 version bits state for the + // CSV-package soft-fork deployment. The adherence of sequence + // locks depends on the current soft-fork state. + csvState, err := b.deploymentState(node.parent, chaincfg.DeploymentCSV) + if err != nil { + return nil, err + } + csvSoftforkActive = csvState == ThresholdActive + } + + // If the transaction's version is less than 2, and BIP 68 has not yet + // been activated then sequence locks are disabled. Additionally, + // sequence locks don't apply to coinbase transactions Therefore, we + // return sequence lock values of -1 indicating that this transaction + // can be included within a block at any given height or time. + mTx := tx.MsgTx() + sequenceLockActive := mTx.Version >= 2 && csvSoftforkActive + if !sequenceLockActive || IsCoinBase(tx) { + return sequenceLock, nil + } + + // Grab the next height from the PoV of the passed blockNode to use for + // inputs present in the mempool. + nextHeight := node.height + 1 + + for txInIndex, txIn := range mTx.TxIn { + utxo := utxoView.LookupEntry(txIn.PreviousOutPoint) + if utxo == nil { + str := fmt.Sprintf("output %v referenced from "+ + "transaction %s:%d either does not exist or "+ + "has already been spent", txIn.PreviousOutPoint, + tx.Hash(), txInIndex) + return sequenceLock, ruleError(ErrMissingTxOut, str) + } + + // If the input height is set to the mempool height, then we + // assume the transaction makes it into the next block when + // evaluating its sequence blocks. + inputHeight := utxo.BlockHeight() + if inputHeight == 0x7fffffff { + inputHeight = nextHeight + } + + // Given a sequence number, we apply the relative time lock + // mask in order to obtain the time lock delta required before + // this input can be spent. + sequenceNum := txIn.Sequence + relativeLock := int64(sequenceNum & wire.SequenceLockTimeMask) + + switch { + // Relative time locks are disabled for this input, so we can + // skip any further calculation. + case sequenceNum&wire.SequenceLockTimeDisabled == wire.SequenceLockTimeDisabled: + continue + case sequenceNum&wire.SequenceLockTimeIsSeconds == wire.SequenceLockTimeIsSeconds: + // This input requires a relative time lock expressed + // in seconds before it can be spent. Therefore, we + // need to query for the block prior to the one in + // which this input was included within so we can + // compute the past median time for the block prior to + // the one which included this referenced output. + prevInputHeight := inputHeight - 1 + if prevInputHeight < 0 { + prevInputHeight = 0 + } + blockNode := node.Ancestor(prevInputHeight) + medianTime := blockNode.CalcPastMedianTime() + + // Time based relative time-locks as defined by BIP 68 + // have a time granularity of RelativeLockSeconds, so + // we shift left by this amount to convert to the + // proper relative time-lock. We also subtract one from + // the relative lock to maintain the original lockTime + // semantics. + timeLockSeconds := (relativeLock << wire.SequenceLockTimeGranularity) - 1 + timeLock := medianTime.Unix() + timeLockSeconds + if timeLock > sequenceLock.Seconds { + sequenceLock.Seconds = timeLock + } + default: + // The relative lock-time for this input is expressed + // in blocks so we calculate the relative offset from + // the input's height as its converted absolute + // lock-time. We subtract one from the relative lock in + // order to maintain the original lockTime semantics. + blockHeight := inputHeight + int32(relativeLock-1) + if blockHeight > sequenceLock.BlockHeight { + sequenceLock.BlockHeight = blockHeight + } + } + } + + return sequenceLock, nil +} + +// LockTimeToSequence converts the passed relative locktime to a sequence +// number in accordance to BIP-68. +// See: https://github.com/bitcoin/bips/blob/master/bip-0068.mediawiki +// * (Compatibility) +func LockTimeToSequence(isSeconds bool, locktime uint32) uint32 { + // If we're expressing the relative lock time in blocks, then the + // corresponding sequence number is simply the desired input age. + if !isSeconds { + return locktime + } + + // Set the 22nd bit which indicates the lock time is in seconds, then + // shift the locktime over by 9 since the time granularity is in + // 512-second intervals (2^9). This results in a max lock-time of + // 33,553,920 seconds, or 1.1 years. + return wire.SequenceLockTimeIsSeconds | + locktime>>wire.SequenceLockTimeGranularity +} + +// getReorganizeNodes finds the fork point between the main chain and the passed +// node and returns a list of block nodes that would need to be detached from +// the main chain and a list of block nodes that would need to be attached to +// the fork point (which will be the end of the main chain after detaching the +// returned list of block nodes) in order to reorganize the chain such that the +// passed node is the new end of the main chain. The lists will be empty if the +// passed node is not on a side chain. +// +// This function may modify node statuses in the block index without flushing. +// +// This function MUST be called with the chain state lock held (for reads). +func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List) { + attachNodes := list.New() + detachNodes := list.New() + + // Do not reorganize to a known invalid chain. Ancestors deeper than the + // direct parent are checked below but this is a quick check before doing + // more unnecessary work. + if b.index.NodeStatus(node.parent).KnownInvalid() { + b.index.SetStatusFlags(node, statusInvalidAncestor) + return detachNodes, attachNodes + } + + // Find the fork point (if any) adding each block to the list of nodes + // to attach to the main tree. Push them onto the list in reverse order + // so they are attached in the appropriate order when iterating the list + // later. + forkNode := b.bestChain.FindFork(node) + invalidChain := false + for n := node; n != nil && n != forkNode; n = n.parent { + if b.index.NodeStatus(n).KnownInvalid() { + invalidChain = true + break + } + attachNodes.PushFront(n) + } + + // If any of the node's ancestors are invalid, unwind attachNodes, marking + // each one as invalid for future reference. + if invalidChain { + var next *list.Element + for e := attachNodes.Front(); e != nil; e = next { + next = e.Next() + n := attachNodes.Remove(e).(*blockNode) + b.index.SetStatusFlags(n, statusInvalidAncestor) + } + return detachNodes, attachNodes + } + + // Start from the end of the main chain and work backwards until the + // common ancestor adding each block to the list of nodes to detach from + // the main chain. + for n := b.bestChain.Tip(); n != nil && n != forkNode; n = n.parent { + detachNodes.PushBack(n) + } + + return detachNodes, attachNodes +} + +// connectBlock handles connecting the passed node/block to the end of the main +// (best) chain. +// +// This passed utxo view must have all referenced txos the block spends marked +// as spent and all of the new txos the block creates added to it. In addition, +// the passed stxos slice must be populated with all of the information for the +// spent txos. This approach is used because the connection validation that +// must happen prior to calling this function requires the same details, so +// it would be inefficient to repeat it. +// +// This function MUST be called with the chain state lock held (for writes). +func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, + view *UtxoViewpoint, stxos []SpentTxOut) error { + + // Make sure it's extending the end of the best chain. + prevHash := &block.MsgBlock().Header.PrevBlock + if !prevHash.IsEqual(&b.bestChain.Tip().hash) { + return AssertError("connectBlock must be called with a block " + + "that extends the main chain") + } + + // Sanity check the correct number of stxos are provided. + if len(stxos) != countSpentOutputs(block) { + return AssertError("connectBlock called with inconsistent " + + "spent transaction out information") + } + + // No warnings about unknown rules or versions until the chain is + // current. + if b.isCurrent() { + // Warn if any unknown new rules are either about to activate or + // have already been activated. + if err := b.warnUnknownRuleActivations(node); err != nil { + return err + } + + // Warn if a high enough percentage of the last blocks have + // unexpected versions. + if err := b.warnUnknownVersions(node); err != nil { + return err + } + } + + // Write any block status changes to DB before updating best state. + err := b.index.flushToDB() + if err != nil { + return err + } + + // Generate a new best state snapshot that will be used to update the + // database and later memory if all database updates are successful. + b.stateLock.RLock() + curTotalTxns := b.stateSnapshot.TotalTxns + b.stateLock.RUnlock() + numTxns := uint64(len(block.MsgBlock().Transactions)) + blockSize := uint64(block.MsgBlock().SerializeSize()) + blockWeight := uint64(GetBlockWeight(block)) + state := newBestState(node, blockSize, blockWeight, numTxns, + curTotalTxns+numTxns, node.CalcPastMedianTime()) + + // Atomically insert info into the database. + err = b.db.Update(func(dbTx database.Tx) error { + // Update best block state. + err := dbPutBestState(dbTx, state, node.workSum) + if err != nil { + return err + } + + // Add the block hash and height to the block index which tracks + // the main chain. + err = dbPutBlockIndex(dbTx, block.Hash(), node.height) + if err != nil { + return err + } + + // Update the utxo set using the state of the utxo view. This + // entails removing all of the utxos spent and adding the new + // ones created by the block. + err = dbPutUtxoView(dbTx, view) + if err != nil { + return err + } + + // Update the transaction spend journal by adding a record for + // the block that contains all txos spent by it. + err = dbPutSpendJournalEntry(dbTx, block.Hash(), stxos) + if err != nil { + return err + } + + // Allow the index manager to call each of the currently active + // optional indexes with the block being connected so they can + // update themselves accordingly. + if b.indexManager != nil { + err := b.indexManager.ConnectBlock(dbTx, block, stxos) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return err + } + + // Prune fully spent entries and mark all entries in the view unmodified + // now that the modifications have been committed to the database. + view.commit() + + // This node is now the end of the best chain. + b.bestChain.SetTip(node) + + // Update the state for the best block. Notice how this replaces the + // entire struct instead of updating the existing one. This effectively + // allows the old version to act as a snapshot which callers can use + // freely without needing to hold a lock for the duration. See the + // comments on the state variable for more details. + b.stateLock.Lock() + b.stateSnapshot = state + b.stateLock.Unlock() + + // Notify the caller that the block was connected to the main chain. + // The caller would typically want to react with actions such as + // updating wallets. + b.chainLock.Unlock() + b.sendNotification(NTBlockConnected, block) + b.chainLock.Lock() + + return nil +} + +func (b *BlockChain) connectBlockV2(node *blockNode, block *btcutil.Block, + view *UtxoViewpoint) error { + + // Make sure it's extending the end of the best chain. + prevHash := &block.MsgBlock().Header.PrevBlock + if !prevHash.IsEqual(&b.bestChain.Tip().hash) { + return AssertError("connectBlock must be called with a block " + + "that extends the main chain") + } + + // No warnings about unknown rules or versions until the chain is + // current. + if b.isCurrent() { + // Warn if any unknown new rules are either about to activate or + // have already been activated. + if err := b.warnUnknownRuleActivations(node); err != nil { + return err + } + + // Warn if a high enough percentage of the last blocks have + // unexpected versions. + if err := b.warnUnknownVersions(node); err != nil { + return err + } + } + + // Write any block status changes to DB before updating best state. + err := b.index.flushToDB() + if err != nil { + return err + } + + // Generate a new best state snapshot that will be used to update the + // database and later memory if all database updates are successful. + b.stateLock.RLock() + curTotalTxns := b.stateSnapshot.TotalTxns + b.stateLock.RUnlock() + numTxns := uint64(len(block.MsgBlock().Transactions)) + blockSize := uint64(block.MsgBlock().SerializeSize()) + blockWeight := uint64(GetBlockWeight(block)) + state := newBestState(node, blockSize, blockWeight, numTxns, + curTotalTxns+numTxns, node.CalcPastMedianTime()) + + // Atomically insert info into the database. + err = b.db.Update(func(dbTx database.Tx) error { + // Update best block state. + err := dbPutBestState(dbTx, state, node.workSum) + if err != nil { + return err + } + + // Add the block hash and height to the block index which tracks + // the main chain. + err = dbPutBlockIndex(dbTx, block.Hash(), node.height) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return err + } + + // Prune fully spent entries and mark all entries in the view unmodified + // now that the modifications have been committed to the database. + view.commit() + + // This node is now the end of the best chain. + b.bestChain.SetTip(node) + + // Update the state for the best block. Notice how this replaces the + // entire struct instead of updating the existing one. This effectively + // allows the old version to act as a snapshot which callers can use + // freely without needing to hold a lock for the duration. See the + // comments on the state variable for more details. + b.stateLock.Lock() + b.stateSnapshot = state + b.stateLock.Unlock() + + return nil +} + +// disconnectBlock handles disconnecting the passed node/block from the end of +// the main (best) chain. +// +// This function MUST be called with the chain state lock held (for writes). +func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error { + // Make sure the node being disconnected is the end of the best chain. + if !node.hash.IsEqual(&b.bestChain.Tip().hash) { + return AssertError("disconnectBlock must be called with the " + + "block at the end of the main chain") + } + + // Load the previous block since some details for it are needed below. + prevNode := node.parent + var prevBlock *btcutil.Block + err := b.db.View(func(dbTx database.Tx) error { + var err error + prevBlock, err = dbFetchBlockByNode(dbTx, prevNode) + return err + }) + if err != nil { + return err + } + + // Write any block status changes to DB before updating best state. + err = b.index.flushToDB() + if err != nil { + return err + } + + // Generate a new best state snapshot that will be used to update the + // database and later memory if all database updates are successful. + b.stateLock.RLock() + curTotalTxns := b.stateSnapshot.TotalTxns + b.stateLock.RUnlock() + numTxns := uint64(len(prevBlock.MsgBlock().Transactions)) + blockSize := uint64(prevBlock.MsgBlock().SerializeSize()) + blockWeight := uint64(GetBlockWeight(prevBlock)) + newTotalTxns := curTotalTxns - uint64(len(block.MsgBlock().Transactions)) + state := newBestState(prevNode, blockSize, blockWeight, numTxns, + newTotalTxns, prevNode.CalcPastMedianTime()) + + err = b.db.Update(func(dbTx database.Tx) error { + // Update best block state. + err := dbPutBestState(dbTx, state, node.workSum) + if err != nil { + return err + } + + // Remove the block hash and height from the block index which + // tracks the main chain. + err = dbRemoveBlockIndex(dbTx, block.Hash(), node.height) + if err != nil { + return err + } + + // Update the utxo set using the state of the utxo view. This + // entails restoring all of the utxos spent and removing the new + // ones created by the block. + err = dbPutUtxoView(dbTx, view) + if err != nil { + return err + } + + // Before we delete the spend journal entry for this back, + // we'll fetch it as is so the indexers can utilize if needed. + stxos, err := dbFetchSpendJournalEntry(dbTx, block) + if err != nil { + return err + } + + // Update the transaction spend journal by removing the record + // that contains all txos spent by the block. + err = dbRemoveSpendJournalEntry(dbTx, block.Hash()) + if err != nil { + return err + } + + // Allow the index manager to call each of the currently active + // optional indexes with the block being disconnected so they + // can update themselves accordingly. + if b.indexManager != nil { + err := b.indexManager.DisconnectBlock(dbTx, block, stxos) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return err + } + + // Prune fully spent entries and mark all entries in the view unmodified + // now that the modifications have been committed to the database. + view.commit() + + // This node's parent is now the end of the best chain. + b.bestChain.SetTip(node.parent) + + // Update the state for the best block. Notice how this replaces the + // entire struct instead of updating the existing one. This effectively + // allows the old version to act as a snapshot which callers can use + // freely without needing to hold a lock for the duration. See the + // comments on the state variable for more details. + b.stateLock.Lock() + b.stateSnapshot = state + b.stateLock.Unlock() + + // Notify the caller that the block was disconnected from the main + // chain. The caller would typically want to react with actions such as + // updating wallets. + b.chainLock.Unlock() + b.sendNotification(NTBlockDisconnected, block) + b.chainLock.Lock() + + return nil +} + +func (b *BlockChain) disconnectBlockV2(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error { + // Make sure the node being disconnected is the end of the best chain. + if !node.hash.IsEqual(&b.bestChain.Tip().hash) { + return AssertError("disconnectBlock must be called with the " + + "block at the end of the main chain") + } + + // Load the previous block since some details for it are needed below. + prevNode := node.parent + var prevBlock *btcutil.Block + err := b.db.View(func(dbTx database.Tx) error { + var err error + prevBlock, err = dbFetchBlockByNode(dbTx, prevNode) + return err + }) + if err != nil { + return err + } + + // Write any block status changes to DB before updating best state. + err = b.index.flushToDB() + if err != nil { + return err + } + + // Generate a new best state snapshot that will be used to update the + // database and later memory if all database updates are successful. + b.stateLock.RLock() + curTotalTxns := b.stateSnapshot.TotalTxns + b.stateLock.RUnlock() + numTxns := uint64(len(prevBlock.MsgBlock().Transactions)) + blockSize := uint64(prevBlock.MsgBlock().SerializeSize()) + blockWeight := uint64(GetBlockWeight(prevBlock)) + newTotalTxns := curTotalTxns - uint64(len(block.MsgBlock().Transactions)) + state := newBestState(prevNode, blockSize, blockWeight, numTxns, + newTotalTxns, prevNode.CalcPastMedianTime()) + + err = b.db.Update(func(dbTx database.Tx) error { + // Update best block state. + err := dbPutBestState(dbTx, state, node.workSum) + if err != nil { + return err + } + + // Remove the block hash and height from the block index which + // tracks the main chain. + err = dbRemoveBlockIndex(dbTx, block.Hash(), node.height) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return err + } + + // Prune fully spent entries and mark all entries in the view unmodified + // now that the modifications have been committed to the database. + view.commit() + + // This node's parent is now the end of the best chain. + b.bestChain.SetTip(node.parent) + + // Update the state for the best block. Notice how this replaces the + // entire struct instead of updating the existing one. This effectively + // allows the old version to act as a snapshot which callers can use + // freely without needing to hold a lock for the duration. See the + // comments on the state variable for more details. + b.stateLock.Lock() + b.stateSnapshot = state + b.stateLock.Unlock() + + return nil +} + +// countSpentOutputs returns the number of utxos the passed block spends. +func countSpentOutputs(block *btcutil.Block) int { + // Exclude the coinbase transaction since it can't spend anything. + var numSpent int + for _, tx := range block.Transactions()[1:] { + numSpent += len(tx.MsgTx().TxIn) + } + return numSpent +} + +func countSpentOutputsV2(block *btcutil.Block) int { + // Exclude the coinbase transaction since it can't spend anything. + var numSpent int + for _, tx := range block.Transactions()[1:] { + numSpent += len(tx.MsgTx().TxIn) + } + return numSpent +} + +func (b *BlockChain) reorganizeChainV2(detachNodes, attachNodes *list.List) error { + // Nothing to do if no reorganize nodes were provided. + if detachNodes.Len() == 0 && attachNodes.Len() == 0 { + return nil + } + + // Ensure the provided nodes match the current best chain. + tip := b.bestChain.Tip() + if detachNodes.Len() != 0 { + firstDetachNode := detachNodes.Front().Value.(*blockNode) + if firstDetachNode.hash != tip.hash { + return AssertError(fmt.Sprintf("reorganize nodes to detach are "+ + "not for the current best chain -- first detach node %v, "+ + "current chain %v", &firstDetachNode.hash, &tip.hash)) + } + } + + // Ensure the provided nodes are for the same fork point. + if attachNodes.Len() != 0 && detachNodes.Len() != 0 { + firstAttachNode := attachNodes.Front().Value.(*blockNode) + lastDetachNode := detachNodes.Back().Value.(*blockNode) + if firstAttachNode.parent.hash != lastDetachNode.parent.hash { + return AssertError(fmt.Sprintf("reorganize nodes do not have the "+ + "same fork point -- first attach parent %v, last detach "+ + "parent %v", &firstAttachNode.parent.hash, + &lastDetachNode.parent.hash)) + } + } + + // Track the old and new best chains heads. + oldBest := tip + newBest := tip + + // All of the blocks to detach and related spend journal entries needed + // to unspend transaction outputs in the blocks being disconnected must + // be loaded from the database during the reorg check phase below and + // then they are needed again when doing the actual database updates. + // Rather than doing two loads, cache the loaded data into these slices. + detachBlocks := make([]*btcutil.Block, 0, detachNodes.Len()) + // detachSpentTxOuts := make([][]SpentTxOut, 0, detachNodes.Len()) + attachBlocks := make([]*btcutil.Block, 0, attachNodes.Len()) + + // Disconnect all of the blocks back to the point of the fork. This + // entails loading the blocks and their associated spent txos from the + // database and using that information to unspend all of the spent txos + // and remove the utxos created by the blocks. + view := NewUtxoViewpoint() + view.SetBestHash(&oldBest.hash) + for e := detachNodes.Front(); e != nil; e = e.Next() { + n := e.Value.(*blockNode) + var block *btcutil.Block + err := b.db.View(func(dbTx database.Tx) error { + var err error + block, err = dbFetchBlockByNode(dbTx, n) + return err + }) + if err != nil { + return err + } + if n.hash != *block.Hash() { + return AssertError(fmt.Sprintf("detach block node hash %v (height "+ + "%v) does not match previous parent block hash %v", &n.hash, + n.height, block.Hash())) + } + + // Load all of the utxos referenced by the block that aren't + // already in the view. + // err = view.fetchInputUtxos(b.db, block) + // if err != nil { + // return err + // } + + // Load all of the spent txos for the block from the spend + // journal. + // var stxos []SpentTxOut + // err = b.db.View(func(dbTx database.Tx) error { + // stxos, err = dbFetchSpendJournalEntry(dbTx, block) + // return err + // }) + // if err != nil { + // return err + // } + + // Store the loaded block and spend journal entry for later. + detachBlocks = append(detachBlocks, block) + // detachSpentTxOuts = append(detachSpentTxOuts, stxos) + + // err = view.disconnectTransactions(b.db, block, stxos) + // if err != nil { + // return err + // } + + newBest = n.parent + } + + // Set the fork point only if there are nodes to attach since otherwise + // blocks are only being disconnected and thus there is no fork point. + var forkNode *blockNode + if attachNodes.Len() > 0 { + forkNode = newBest + } + + // Perform several checks to verify each block that needs to be attached + // to the main chain can be connected without violating any rules and + // without actually connecting the block. + // + // NOTE: These checks could be done directly when connecting a block, + // however the downside to that approach is that if any of these checks + // fail after disconnecting some blocks or attaching others, all of the + // operations have to be rolled back to get the chain back into the + // state it was before the rule violation (or other failure). There are + // at least a couple of ways accomplish that rollback, but both involve + // tweaking the chain and/or database. This approach catches these + // issues before ever modifying the chain. + for e := attachNodes.Front(); e != nil; e = e.Next() { + n := e.Value.(*blockNode) + + var block *btcutil.Block + err := b.db.View(func(dbTx database.Tx) error { + var err error + block, err = dbFetchBlockByNode(dbTx, n) + return err + }) + if err != nil { + return err + } + + // Store the loaded block for later. + attachBlocks = append(attachBlocks, block) + + // Skip checks if node has already been fully validated. Although + // checkConnectBlock gets skipped, we still need to update the UTXO + // view. + // if b.index.NodeStatus(n).KnownValid() { + // err = view.fetchInputUtxos(b.db, block) + // if err != nil { + // return err + // } + // err = view.connectTransactions(block, nil) + // if err != nil { + // return err + // } + + // newBest = n + // continue + // } + + // Notice the spent txout details are not requested here and + // thus will not be generated. This is done because the state + // is not being immediately written to the database, so it is + // not needed. + // + // In the case the block is determined to be invalid due to a + // rule violation, mark it as invalid and mark all of its + // descendants as having an invalid ancestor. + err = b.checkConnectBlockV2(n, block, view) + if err != nil { + if _, ok := err.(RuleError); ok { + b.index.SetStatusFlags(n, statusValidateFailed) + for de := e.Next(); de != nil; de = de.Next() { + dn := de.Value.(*blockNode) + b.index.SetStatusFlags(dn, statusInvalidAncestor) + } + } + fmt.Println("Error with checkConnectBlockV2 inside reorganizeChainV2") + return err + } + b.index.SetStatusFlags(n, statusValid) + + newBest = n + } + + // Reset the view for the actual connection code below. This is + // required because the view was previously modified when checking if + // the reorg would be successful and the connection code requires the + // view to be valid from the viewpoint of each block being connected or + // disconnected. + view = NewUtxoViewpoint() + view.SetBestHash(&b.bestChain.Tip().hash) + + // Disconnect blocks from the main chain. + for i, e := 0, detachNodes.Front(); e != nil; i, e = i+1, e.Next() { + n := e.Value.(*blockNode) + block := detachBlocks[i] + + // Load all of the utxos referenced by the block that aren't + // already in the view. + // err := view.fetchInputUtxos(b.db, block) + // if err != nil { + // return err + // } + + // Update the view to unspend all of the spent txos and remove + // the utxos created by the block. + // err = view.disconnectTransactions(b.db, block, + // detachSpentTxOuts[i]) + // if err != nil { + // return err + // } + + // Update the database and chain state. + err := b.disconnectBlockV2(n, block, view) + if err != nil { + return err + } + } + + // Connect the new best chain blocks. + for i, e := 0, attachNodes.Front(); e != nil; i, e = i+1, e.Next() { + n := e.Value.(*blockNode) + block := attachBlocks[i] + + // Load all of the utxos referenced by the block that aren't + // already in the view. + // err := view.fetchInputUtxos(b.db, block) + // if err != nil { + // return err + // } + + // Update the view to mark all utxos referenced by the block + // as spent and add all transactions being created by this block + // to it. Also, provide an stxo slice so the spent txout + // details are generated. + // stxos := make([]SpentTxOut, 0, countSpentOutputs(block)) + // err = view.connectTransactions(block, &stxos) + // if err != nil { + // return err + // } + + // Update the database and chain state. + err := b.connectBlockV2(n, block, view) + if err != nil { + return err + } + } + + // Log the point where the chain forked and old and new best chain + // heads. + if forkNode != nil { + log.Infof("REORGANIZE: Chain forks at %v (height %v)", forkNode.hash, + forkNode.height) + } + log.Infof("REORGANIZE: Old best chain head was %v (height %v)", + &oldBest.hash, oldBest.height) + log.Infof("REORGANIZE: New best chain head is %v (height %v)", + newBest.hash, newBest.height) + + return nil +} + +func (b *BlockChain) connectBestChainV2(node *blockNode, block *btcutil.Block, flags BehaviorFlags) (bool, error) { + + fastAdd := flags&BFFastAdd == BFFastAdd + + flushIndexState := func() { + if writeErr := b.index.flushToDB(); writeErr != nil { + log.Warnf("Error flushing block index changes to disk: %v", + writeErr) + } + } + + // We are extending the main (best) chain with a new block. This is the + // most common case. + parentHash := &block.MsgBlock().Header.PrevBlock + if parentHash.IsEqual(&b.bestChain.Tip().hash) { + // Perform several checks to verify the block can be connected + // to the main chain without violating any rules and without + // actually connecting the block. + view := NewUtxoViewpoint() + view.SetBestHash(parentHash) + + err := b.checkConnectBlockV2(node, block, view) + if err == nil { + b.index.SetStatusFlags(node, statusValid) + } else if _, ok := err.(RuleError); ok { + b.index.SetStatusFlags(node, statusValidateFailed) + } else { + return false, err + } + + flushIndexState() + + if err != nil { + fmt.Println("Error with checkConnectBlockV2 inside connectBestChainV2") + return false, err + } + // Connect the block to the main chain. + err = b.connectBlockV2(node, block, view) + if err != nil { + // If we got hit with a rule error, then we'll mark + // that status of the block as invalid and flush the + // index state to disk before returning with the error. + if _, ok := err.(RuleError); ok { + b.index.SetStatusFlags( + node, statusValidateFailed, + ) + } + + flushIndexState() + + return false, err + } + + // If this is fast add, or this block node isn't yet marked as + // valid, then we'll update its status and flush the state to + // disk again. + if !b.index.NodeStatus(node).KnownValid() { + b.index.SetStatusFlags(node, statusValid) + flushIndexState() + } + + return true, nil + } + if fastAdd { + log.Warnf("fastAdd set in the side chain case? %v\n", + block.Hash()) + } + + // We're extending (or creating) a side chain, but the cumulative + // work for this new side chain is not enough to make it the new chain. + if node.workSum.Cmp(b.bestChain.Tip().workSum) <= 0 { + // Log information about how the block is forking the chain. + fork := b.bestChain.FindFork(node) + if fork.hash.IsEqual(parentHash) { + log.Infof("FORK: Block %v forks the chain at height %d"+ + "/block %v, but does not cause a reorganize", + node.hash, fork.height, fork.hash) + } else { + log.Infof("EXTEND FORK: Block %v extends a side chain "+ + "which forks the chain at height %d/block %v", + node.hash, fork.height, fork.hash) + } + + return false, nil + } + + // We're extending (or creating) a side chain and the cumulative work + // for this new side chain is more than the old best chain, so this side + // chain needs to become the main chain. In order to accomplish that, + // find the common ancestor of both sides of the fork, disconnect the + // blocks that form the (now) old fork from the main chain, and attach + // the blocks that form the new chain to the main chain starting at the + // common ancenstor (the point where the chain forked). + detachNodes, attachNodes := b.getReorganizeNodes(node) + + // Reorganize the chain. + log.Infof("REORGANIZE: Block %v is causing a reorganize.", node.hash) + err := b.reorganizeChainV2(detachNodes, attachNodes) + + // Either getReorganizeNodes or reorganizeChain could have made unsaved + // changes to the block index, so flush regardless of whether there was an + // error. The index would only be dirty if the block failed to connect, so + // we can ignore any errors writing. + if writeErr := b.index.flushToDB(); writeErr != nil { + log.Warnf("Error flushing block index changes to disk: %v", writeErr) + } + + return err == nil, err +} + + +// isCurrent returns whether or not the chain believes it is current. Several +// factors are used to guess, but the key factors that allow the chain to +// believe it is current are: +// - Latest block height is after the latest checkpoint (if enabled) +// - Latest block has a timestamp newer than 24 hours ago +// +// This function MUST be called with the chain state lock held (for reads). +func (b *BlockChain) isCurrent() bool { + // Not current if the latest main (best) chain height is before the + // latest known good checkpoint (when checkpoints are enabled). + checkpoint := b.LatestCheckpoint() + if checkpoint != nil && b.bestChain.Tip().height < checkpoint.Height { + return false + } + + // Not current if the latest best block has a timestamp before 24 hours + // ago. + // + // The chain appears to be current if none of the checks reported + // otherwise. + minus24Hours := b.timeSource.AdjustedTime().Add(-24 * time.Hour).Unix() + return b.bestChain.Tip().timestamp >= minus24Hours +} + +// IsCurrent returns whether or not the chain believes it is current. Several +// factors are used to guess, but the key factors that allow the chain to +// believe it is current are: +// - Latest block height is after the latest checkpoint (if enabled) +// - Latest block has a timestamp newer than 24 hours ago +// +// This function is safe for concurrent access. +func (b *BlockChain) IsCurrent() bool { + b.chainLock.RLock() + defer b.chainLock.RUnlock() + + return b.isCurrent() +} + +// BestSnapshot returns information about the current best chain block and +// related state as of the current point in time. The returned instance must be +// treated as immutable since it is shared by all callers. +// +// This function is safe for concurrent access. +func (b *BlockChain) BestSnapshot() *BestState { + b.stateLock.RLock() + snapshot := b.stateSnapshot + b.stateLock.RUnlock() + return snapshot +} + +// HeaderByHash returns the block header identified by the given hash or an +// error if it doesn't exist. Note that this will return headers from both the +// main and side chains. +func (b *BlockChain) HeaderByHash(hash *chainhash.Hash) (wire.BlockHeader, error) { + node := b.index.LookupNode(hash) + if node == nil { + err := fmt.Errorf("block %s is not known", hash) + return wire.BlockHeader{}, err + } + + return node.Header(), nil +} + +// MainChainHasBlock returns whether or not the block with the given hash is in +// the main chain. +// +// This function is safe for concurrent access. +func (b *BlockChain) MainChainHasBlock(hash *chainhash.Hash) bool { + node := b.index.LookupNode(hash) + return node != nil && b.bestChain.Contains(node) +} + +// BlockLocatorFromHash returns a block locator for the passed block hash. +// See BlockLocator for details on the algorithm used to create a block locator. +// +// In addition to the general algorithm referenced above, this function will +// return the block locator for the latest known tip of the main (best) chain if +// the passed hash is not currently known. +// +// This function is safe for concurrent access. +func (b *BlockChain) BlockLocatorFromHash(hash *chainhash.Hash) BlockLocator { + b.chainLock.RLock() + node := b.index.LookupNode(hash) + locator := b.bestChain.blockLocator(node) + b.chainLock.RUnlock() + return locator +} + +// LatestBlockLocator returns a block locator for the latest known tip of the +// main (best) chain. +// +// This function is safe for concurrent access. +func (b *BlockChain) LatestBlockLocator() (BlockLocator, error) { + b.chainLock.RLock() + locator := b.bestChain.BlockLocator(nil) + b.chainLock.RUnlock() + return locator, nil +} + +// BlockHeightByHash returns the height of the block with the given hash in the +// main chain. +// +// This function is safe for concurrent access. +func (b *BlockChain) BlockHeightByHash(hash *chainhash.Hash) (int32, error) { + node := b.index.LookupNode(hash) + if node == nil || !b.bestChain.Contains(node) { + str := fmt.Sprintf("block %s is not in the main chain", hash) + return 0, errNotInMainChain(str) + } + + return node.height, nil +} + +// BlockHashByHeight returns the hash of the block at the given height in the +// main chain. +// +// This function is safe for concurrent access. +func (b *BlockChain) BlockHashByHeight(blockHeight int32) (*chainhash.Hash, error) { + node := b.bestChain.NodeByHeight(blockHeight) + if node == nil { + str := fmt.Sprintf("no block at height %d exists", blockHeight) + return nil, errNotInMainChain(str) + + } + + return &node.hash, nil +} + +// HeightRange returns a range of block hashes for the given start and end +// heights. It is inclusive of the start height and exclusive of the end +// height. The end height will be limited to the current main chain height. +// +// This function is safe for concurrent access. +func (b *BlockChain) HeightRange(startHeight, endHeight int32) ([]chainhash.Hash, error) { + // Ensure requested heights are sane. + if startHeight < 0 { + return nil, fmt.Errorf("start height of fetch range must not "+ + "be less than zero - got %d", startHeight) + } + if endHeight < startHeight { + return nil, fmt.Errorf("end height of fetch range must not "+ + "be less than the start height - got start %d, end %d", + startHeight, endHeight) + } + + // There is nothing to do when the start and end heights are the same, + // so return now to avoid the chain view lock. + if startHeight == endHeight { + return nil, nil + } + + // Grab a lock on the chain view to prevent it from changing due to a + // reorg while building the hashes. + b.bestChain.mtx.Lock() + defer b.bestChain.mtx.Unlock() + + // When the requested start height is after the most recent best chain + // height, there is nothing to do. + latestHeight := b.bestChain.tip().height + if startHeight > latestHeight { + return nil, nil + } + + // Limit the ending height to the latest height of the chain. + if endHeight > latestHeight+1 { + endHeight = latestHeight + 1 + } + + // Fetch as many as are available within the specified range. + hashes := make([]chainhash.Hash, 0, endHeight-startHeight) + for i := startHeight; i < endHeight; i++ { + hashes = append(hashes, b.bestChain.nodeByHeight(i).hash) + } + return hashes, nil +} + +// HeightToHashRange returns a range of block hashes for the given start height +// and end hash, inclusive on both ends. The hashes are for all blocks that are +// ancestors of endHash with height greater than or equal to startHeight. The +// end hash must belong to a block that is known to be valid. +// +// This function is safe for concurrent access. +func (b *BlockChain) HeightToHashRange(startHeight int32, + endHash *chainhash.Hash, maxResults int) ([]chainhash.Hash, error) { + + endNode := b.index.LookupNode(endHash) + if endNode == nil { + return nil, fmt.Errorf("no known block header with hash %v", endHash) + } + if !b.index.NodeStatus(endNode).KnownValid() { + return nil, fmt.Errorf("block %v is not yet validated", endHash) + } + endHeight := endNode.height + + if startHeight < 0 { + return nil, fmt.Errorf("start height (%d) is below 0", startHeight) + } + if startHeight > endHeight { + return nil, fmt.Errorf("start height (%d) is past end height (%d)", + startHeight, endHeight) + } + + resultsLength := int(endHeight - startHeight + 1) + if resultsLength > maxResults { + return nil, fmt.Errorf("number of results (%d) would exceed max (%d)", + resultsLength, maxResults) + } + + // Walk backwards from endHeight to startHeight, collecting block hashes. + node := endNode + hashes := make([]chainhash.Hash, resultsLength) + for i := resultsLength - 1; i >= 0; i-- { + hashes[i] = node.hash + node = node.parent + } + return hashes, nil +} + +// IntervalBlockHashes returns hashes for all blocks that are ancestors of +// endHash where the block height is a positive multiple of interval. +// +// This function is safe for concurrent access. +func (b *BlockChain) IntervalBlockHashes(endHash *chainhash.Hash, interval int, +) ([]chainhash.Hash, error) { + + endNode := b.index.LookupNode(endHash) + if endNode == nil { + return nil, fmt.Errorf("no known block header with hash %v", endHash) + } + if !b.index.NodeStatus(endNode).KnownValid() { + return nil, fmt.Errorf("block %v is not yet validated", endHash) + } + endHeight := endNode.height + + resultsLength := int(endHeight) / interval + hashes := make([]chainhash.Hash, resultsLength) + + b.bestChain.mtx.Lock() + defer b.bestChain.mtx.Unlock() + + blockNode := endNode + for index := int(endHeight) / interval; index > 0; index-- { + // Use the bestChain chainView for faster lookups once lookup intersects + // the best chain. + blockHeight := int32(index * interval) + if b.bestChain.contains(blockNode) { + blockNode = b.bestChain.nodeByHeight(blockHeight) + } else { + blockNode = blockNode.Ancestor(blockHeight) + } + + hashes[index-1] = blockNode.hash + } + + return hashes, nil +} + +// locateInventory returns the node of the block after the first known block in +// the locator along with the number of subsequent nodes needed to either reach +// the provided stop hash or the provided max number of entries. +// +// In addition, there are two special cases: +// +// - When no locators are provided, the stop hash is treated as a request for +// that block, so it will either return the node associated with the stop hash +// if it is known, or nil if it is unknown +// - When locators are provided, but none of them are known, nodes starting +// after the genesis block will be returned +// +// This is primarily a helper function for the locateBlocks and locateHeaders +// functions. +// +// This function MUST be called with the chain state lock held (for reads). +func (b *BlockChain) locateInventory(locator BlockLocator, hashStop *chainhash.Hash, maxEntries uint32) (*blockNode, uint32) { + // There are no block locators so a specific block is being requested + // as identified by the stop hash. + stopNode := b.index.LookupNode(hashStop) + if len(locator) == 0 { + if stopNode == nil { + // No blocks with the stop hash were found so there is + // nothing to do. + return nil, 0 + } + return stopNode, 1 + } + + // Find the most recent locator block hash in the main chain. In the + // case none of the hashes in the locator are in the main chain, fall + // back to the genesis block. + startNode := b.bestChain.Genesis() + for _, hash := range locator { + node := b.index.LookupNode(hash) + if node != nil && b.bestChain.Contains(node) { + startNode = node + break + } + } + + // Start at the block after the most recently known block. When there + // is no next block it means the most recently known block is the tip of + // the best chain, so there is nothing more to do. + startNode = b.bestChain.Next(startNode) + if startNode == nil { + return nil, 0 + } + + // Calculate how many entries are needed. + total := uint32((b.bestChain.Tip().height - startNode.height) + 1) + if stopNode != nil && b.bestChain.Contains(stopNode) && + stopNode.height >= startNode.height { + + total = uint32((stopNode.height - startNode.height) + 1) + } + if total > maxEntries { + total = maxEntries + } + + return startNode, total +} + +// locateBlocks returns the hashes of the blocks after the first known block in +// the locator until the provided stop hash is reached, or up to the provided +// max number of block hashes. +// +// See the comment on the exported function for more details on special cases. +// +// This function MUST be called with the chain state lock held (for reads). +func (b *BlockChain) locateBlocks(locator BlockLocator, hashStop *chainhash.Hash, maxHashes uint32) []chainhash.Hash { + // Find the node after the first known block in the locator and the + // total number of nodes after it needed while respecting the stop hash + // and max entries. + node, total := b.locateInventory(locator, hashStop, maxHashes) + if total == 0 { + return nil + } + + // Populate and return the found hashes. + hashes := make([]chainhash.Hash, 0, total) + for i := uint32(0); i < total; i++ { + hashes = append(hashes, node.hash) + node = b.bestChain.Next(node) + } + return hashes +} + +// LocateBlocks returns the hashes of the blocks after the first known block in +// the locator until the provided stop hash is reached, or up to the provided +// max number of block hashes. +// +// In addition, there are two special cases: +// +// - When no locators are provided, the stop hash is treated as a request for +// that block, so it will either return the stop hash itself if it is known, +// or nil if it is unknown +// - When locators are provided, but none of them are known, hashes starting +// after the genesis block will be returned +// +// This function is safe for concurrent access. +func (b *BlockChain) LocateBlocks(locator BlockLocator, hashStop *chainhash.Hash, maxHashes uint32) []chainhash.Hash { + b.chainLock.RLock() + hashes := b.locateBlocks(locator, hashStop, maxHashes) + b.chainLock.RUnlock() + return hashes +} + +// locateHeaders returns the headers of the blocks after the first known block +// in the locator until the provided stop hash is reached, or up to the provided +// max number of block headers. +// +// See the comment on the exported function for more details on special cases. +// +// This function MUST be called with the chain state lock held (for reads). +func (b *BlockChain) locateHeaders(locator BlockLocator, hashStop *chainhash.Hash, maxHeaders uint32) []wire.BlockHeader { + // Find the node after the first known block in the locator and the + // total number of nodes after it needed while respecting the stop hash + // and max entries. + node, total := b.locateInventory(locator, hashStop, maxHeaders) + if total == 0 { + return nil + } + + // Populate and return the found headers. + headers := make([]wire.BlockHeader, 0, total) + for i := uint32(0); i < total; i++ { + headers = append(headers, node.Header()) + node = b.bestChain.Next(node) + } + return headers +} + +// LocateHeaders returns the headers of the blocks after the first known block +// in the locator until the provided stop hash is reached, or up to a max of +// wire.MaxBlockHeadersPerMsg headers. +// +// In addition, there are two special cases: +// +// - When no locators are provided, the stop hash is treated as a request for +// that header, so it will either return the header for the stop hash itself +// if it is known, or nil if it is unknown +// - When locators are provided, but none of them are known, headers starting +// after the genesis block will be returned +// +// This function is safe for concurrent access. +func (b *BlockChain) LocateHeaders(locator BlockLocator, hashStop *chainhash.Hash) []wire.BlockHeader { + b.chainLock.RLock() + headers := b.locateHeaders(locator, hashStop, wire.MaxBlockHeadersPerMsg) + b.chainLock.RUnlock() + return headers +} + +// IndexManager provides a generic interface that the is called when blocks are +// connected and disconnected to and from the tip of the main chain for the +// purpose of supporting optional indexes. +type IndexManager interface { + // Init is invoked during chain initialize in order to allow the index + // manager to initialize itself and any indexes it is managing. The + // channel parameter specifies a channel the caller can close to signal + // that the process should be interrupted. It can be nil if that + // behavior is not desired. + Init(*BlockChain, <-chan struct{}) error + + // ConnectBlock is invoked when a new block has been connected to the + // main chain. The set of output spent within a block is also passed in + // so indexers can access the previous output scripts input spent if + // required. + ConnectBlock(database.Tx, *btcutil.Block, []SpentTxOut) error + + // DisconnectBlock is invoked when a block has been disconnected from + // the main chain. The set of outputs scripts that were spent within + // this block is also returned so indexers can clean up the prior index + // state for this block. + DisconnectBlock(database.Tx, *btcutil.Block, []SpentTxOut) error +} + +// Config is a descriptor which specifies the blockchain instance configuration. +type Config struct { + // DB defines the database which houses the blocks and will be used to + // store all metadata created by this package such as the utxo set. + // + // This field is required. + DB database.DB + + // Interrupt specifies a channel the caller can close to signal that + // long running operations, such as catching up indexes or performing + // database migrations, should be interrupted. + // + // This field can be nil if the caller does not desire the behavior. + Interrupt <-chan struct{} + + // ChainParams identifies which chain parameters the chain is associated + // with. + // + // This field is required. + ChainParams *chaincfg.Params + + // Checkpoints hold caller-defined checkpoints that should be added to + // the default checkpoints in ChainParams. Checkpoints must be sorted + // by height. + // + // This field can be nil if the caller does not wish to specify any + // checkpoints. + Checkpoints []chaincfg.Checkpoint + + // TimeSource defines the median time source to use for things such as + // block processing and determining whether or not the chain is current. + // + // The caller is expected to keep a reference to the time source as well + // and add time samples from other peers on the network so the local + // time is adjusted to be in agreement with other peers. + TimeSource MedianTimeSource + + // SigCache defines a signature cache to use when when validating + // signatures. This is typically most useful when individual + // transactions are already being validated prior to their inclusion in + // a block such as what is usually done via a transaction memory pool. + // + // This field can be nil if the caller is not interested in using a + // signature cache. + SigCache *txscript.SigCache + + // IndexManager defines an index manager to use when initializing the + // chain and connecting and disconnecting blocks. + // + // This field can be nil if the caller does not wish to make use of an + // index manager. + IndexManager IndexManager + + // HashCache defines a transaction hash mid-state cache to use when + // validating transactions. This cache has the potential to greatly + // speed up transaction validation as re-using the pre-calculated + // mid-state eliminates the O(N^2) validation complexity due to the + // SigHashAll flag. + // + // This field can be nil if the caller is not interested in using a + // signature cache. + HashCache *txscript.HashCache +} + +// New returns a BlockChain instance using the provided configuration details. +func New(config *Config) (*BlockChain, error) { + // Enforce required config fields. + if config.DB == nil { + return nil, AssertError("blockchain.New database is nil") + } + if config.ChainParams == nil { + return nil, AssertError("blockchain.New chain parameters nil") + } + if config.TimeSource == nil { + return nil, AssertError("blockchain.New timesource is nil") + } + + // Generate a checkpoint by height map from the provided checkpoints + // and assert the provided checkpoints are sorted by height as required. + var checkpointsByHeight map[int32]*chaincfg.Checkpoint + var prevCheckpointHeight int32 + if len(config.Checkpoints) > 0 { + checkpointsByHeight = make(map[int32]*chaincfg.Checkpoint) + for i := range config.Checkpoints { + checkpoint := &config.Checkpoints[i] + if checkpoint.Height <= prevCheckpointHeight { + return nil, AssertError("blockchain.New " + + "checkpoints are not sorted by height") + } + + checkpointsByHeight[checkpoint.Height] = checkpoint + prevCheckpointHeight = checkpoint.Height + } + } + + params := config.ChainParams + targetTimespan := int64(params.TargetTimespan / time.Second) + targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second) + adjustmentFactor := params.RetargetAdjustmentFactor + b := BlockChain{ + checkpoints: config.Checkpoints, + checkpointsByHeight: checkpointsByHeight, + db: config.DB, + chainParams: params, + timeSource: config.TimeSource, + sigCache: config.SigCache, + indexManager: config.IndexManager, + minRetargetTimespan: targetTimespan / adjustmentFactor, + maxRetargetTimespan: targetTimespan * adjustmentFactor, + blocksPerRetarget: int32(targetTimespan / targetTimePerBlock), + index: newBlockIndex(config.DB, params), + hashCache: config.HashCache, + bestChain: newChainView(nil), + orphans: make(map[chainhash.Hash]*orphanBlock), + prevOrphans: make(map[chainhash.Hash][]*orphanBlock), + warningCaches: newThresholdCaches(vbNumBits), + deploymentCaches: newThresholdCaches(chaincfg.DefinedDeployments), + } + + // Initialize the chain state from the passed database. When the db + // does not yet contain any chain state, both it and the chain state + // will be initialized to contain only the genesis block. + if err := b.initChainState(); err != nil { + return nil, err + } + + // Perform any upgrades to the various chain-specific buckets as needed. + if err := b.maybeUpgradeDbBuckets(config.Interrupt); err != nil { + return nil, err + } + + // Initialize and catch up all of the currently active optional indexes + // as needed. + if config.IndexManager != nil { + err := config.IndexManager.Init(&b, config.Interrupt) + if err != nil { + return nil, err + } + } + + // Initialize rule change threshold state caches. + if err := b.initThresholdCaches(); err != nil { + return nil, err + } + + bestNode := b.bestChain.Tip() + log.Infof("Chain state (height %d, hash %v, totaltx %d, work %v)", + bestNode.height, bestNode.hash, b.stateSnapshot.TotalTxns, + bestNode.workSum) + + return &b, nil +} + +func NewV2(config *Config, genBlk *wire.MsgBlock) (*BlockChain, error) { + // Enforce required config fields. + if config.DB == nil { + return nil, AssertError("blockchain.New database is nil") + } + if config.ChainParams == nil { + return nil, AssertError("blockchain.New chain parameters nil") + } + if config.TimeSource == nil { + return nil, AssertError("blockchain.New timesource is nil") + } + + // Generate a checkpoint by height map from the provided checkpoints + // and assert the provided checkpoints are sorted by height as required. + var checkpointsByHeight map[int32]*chaincfg.Checkpoint + var prevCheckpointHeight int32 + if len(config.Checkpoints) > 0 { + checkpointsByHeight = make(map[int32]*chaincfg.Checkpoint) + for i := range config.Checkpoints { + checkpoint := &config.Checkpoints[i] + if checkpoint.Height <= prevCheckpointHeight { + return nil, AssertError("blockchain.New " + + "checkpoints are not sorted by height") + } + + checkpointsByHeight[checkpoint.Height] = checkpoint + prevCheckpointHeight = checkpoint.Height + } + } + + params := config.ChainParams + targetTimespan := int64(params.TargetTimespan / time.Second) + targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second) + adjustmentFactor := params.RetargetAdjustmentFactor + b := BlockChain{ + checkpoints: config.Checkpoints, + checkpointsByHeight: checkpointsByHeight, + db: config.DB, + chainParams: params, + timeSource: config.TimeSource, + sigCache: config.SigCache, + indexManager: config.IndexManager, + minRetargetTimespan: targetTimespan / adjustmentFactor, + maxRetargetTimespan: targetTimespan * adjustmentFactor, + blocksPerRetarget: int32(targetTimespan / targetTimePerBlock), + index: newBlockIndex(config.DB, params), + hashCache: config.HashCache, + bestChain: newChainView(nil), + orphans: make(map[chainhash.Hash]*orphanBlock), + prevOrphans: make(map[chainhash.Hash][]*orphanBlock), + warningCaches: newThresholdCaches(vbNumBits), + deploymentCaches: newThresholdCaches(chaincfg.DefinedDeployments), + } + + // Initialize the chain state from the passed database. When the db + // does not yet contain any chain state, both it and the chain state + // will be initialized to contain only the genesis block. + if err := b.initChainStateV2(genBlk); err != nil { + return nil, err + } + + // Perform any upgrades to the various chain-specific buckets as needed. + if err := b.maybeUpgradeDbBuckets(config.Interrupt); err != nil { + return nil, err + } + + // Initialize and catch up all of the currently active optional indexes + // as needed. + if config.IndexManager != nil { + err := config.IndexManager.Init(&b, config.Interrupt) + if err != nil { + return nil, err + } + } + + // Initialize rule change threshold state caches. + if err := b.initThresholdCaches(); err != nil { + return nil, err + } + + bestNode := b.bestChain.Tip() + log.Infof("Chain state (height %d, hash %v, totaltx %d, work %v)", + bestNode.height, bestNode.hash, b.stateSnapshot.TotalTxns, + bestNode.workSum) + + return &b, nil +} + diff --git a/relaying/btc/chain_test.go b/relaying/btc/chain_test.go new file mode 100644 index 0000000000..38836590fb --- /dev/null +++ b/relaying/btc/chain_test.go @@ -0,0 +1,1638 @@ +package btcrelaying + +import ( + "fmt" + "testing" + "time" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/database" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +// Block100000 defines block 100,000 of the block chain. It is used to +// test Block operations. +var Block100000 = wire.MsgBlock{ + Header: wire.BlockHeader{ + Version: 1, + PrevBlock: chainhash.Hash([32]byte{ // Make go vet happy. + 0x50, 0x12, 0x01, 0x19, 0x17, 0x2a, 0x61, 0x04, + 0x21, 0xa6, 0xc3, 0x01, 0x1d, 0xd3, 0x30, 0xd9, + 0xdf, 0x07, 0xb6, 0x36, 0x16, 0xc2, 0xcc, 0x1f, + 0x1c, 0xd0, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + }), // 000000000002d01c1fccc21636b607dfd930d31d01c3a62104612a1719011250 + MerkleRoot: chainhash.Hash([32]byte{ // Make go vet happy. + 0x66, 0x57, 0xa9, 0x25, 0x2a, 0xac, 0xd5, 0xc0, + 0xb2, 0x94, 0x09, 0x96, 0xec, 0xff, 0x95, 0x22, + 0x28, 0xc3, 0x06, 0x7c, 0xc3, 0x8d, 0x48, 0x85, + 0xef, 0xb5, 0xa4, 0xac, 0x42, 0x47, 0xe9, 0xf3, + }), // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766 + Timestamp: time.Unix(1293623863, 0), // 2010-12-29 11:57:43 +0000 UTC + Bits: 0x1b04864c, // 453281356 + Nonce: 0x10572b0f, // 274148111 + }, + Transactions: []*wire.MsgTx{ + { + Version: 1, + TxIn: []*wire.TxIn{ + { + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash{}, + Index: 0xffffffff, + }, + SignatureScript: []byte{ + 0x04, 0x4c, 0x86, 0x04, 0x1b, 0x02, 0x06, 0x02, + }, + Sequence: 0xffffffff, + }, + }, + TxOut: []*wire.TxOut{ + { + Value: 0x12a05f200, // 5000000000 + PkScript: []byte{ + 0x41, // OP_DATA_65 + 0x04, 0x1b, 0x0e, 0x8c, 0x25, 0x67, 0xc1, 0x25, + 0x36, 0xaa, 0x13, 0x35, 0x7b, 0x79, 0xa0, 0x73, + 0xdc, 0x44, 0x44, 0xac, 0xb8, 0x3c, 0x4e, 0xc7, + 0xa0, 0xe2, 0xf9, 0x9d, 0xd7, 0x45, 0x75, 0x16, + 0xc5, 0x81, 0x72, 0x42, 0xda, 0x79, 0x69, 0x24, + 0xca, 0x4e, 0x99, 0x94, 0x7d, 0x08, 0x7f, 0xed, + 0xf9, 0xce, 0x46, 0x7c, 0xb9, 0xf7, 0xc6, 0x28, + 0x70, 0x78, 0xf8, 0x01, 0xdf, 0x27, 0x6f, 0xdf, + 0x84, // 65-byte signature + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 0, + }, + { + Version: 1, + TxIn: []*wire.TxIn{ + { + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash([32]byte{ // Make go vet happy. + 0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60, + 0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac, + 0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07, + 0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, + }), // 87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03 + Index: 0, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3, + 0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6, + 0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94, + 0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58, + 0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00, + 0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62, + 0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c, + 0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60, + 0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d, + 0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38, + 0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25, + 0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e, + 0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8, + 0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd, + 0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b, + 0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3, + 0xd3, // 65-byte pubkey + }, + Sequence: 0xffffffff, + }, + }, + TxOut: []*wire.TxOut{ + { + Value: 0x2123e300, // 556000000 + PkScript: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, + }, + { + Value: 0x108e20f00, // 4444000000 + PkScript: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 0, + }, + { + Version: 1, + TxIn: []*wire.TxIn{ + { + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash([32]byte{ // Make go vet happy. + 0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d, + 0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27, + 0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65, + 0xe4, 0x1c, 0x61, 0xd0, 0x78, 0x29, 0x4e, 0xcf, + }), // cf4e2978d0611ce46592e02d7e7daf8627a316ab69759a9f3df109a7f2bf3ec3 + Index: 1, + }, + SignatureScript: []byte{ + 0x47, // OP_DATA_71 + 0x30, 0x44, 0x02, 0x20, 0x03, 0x2d, 0x30, 0xdf, + 0x5e, 0xe6, 0xf5, 0x7f, 0xa4, 0x6c, 0xdd, 0xb5, + 0xeb, 0x8d, 0x0d, 0x9f, 0xe8, 0xde, 0x6b, 0x34, + 0x2d, 0x27, 0x94, 0x2a, 0xe9, 0x0a, 0x32, 0x31, + 0xe0, 0xba, 0x33, 0x3e, 0x02, 0x20, 0x3d, 0xee, + 0xe8, 0x06, 0x0f, 0xdc, 0x70, 0x23, 0x0a, 0x7f, + 0x5b, 0x4a, 0xd7, 0xd7, 0xbc, 0x3e, 0x62, 0x8c, + 0xbe, 0x21, 0x9a, 0x88, 0x6b, 0x84, 0x26, 0x9e, + 0xae, 0xb8, 0x1e, 0x26, 0xb4, 0xfe, 0x01, + 0x41, // OP_DATA_65 + 0x04, 0xae, 0x31, 0xc3, 0x1b, 0xf9, 0x12, 0x78, + 0xd9, 0x9b, 0x83, 0x77, 0xa3, 0x5b, 0xbc, 0xe5, + 0xb2, 0x7d, 0x9f, 0xff, 0x15, 0x45, 0x68, 0x39, + 0xe9, 0x19, 0x45, 0x3f, 0xc7, 0xb3, 0xf7, 0x21, + 0xf0, 0xba, 0x40, 0x3f, 0xf9, 0x6c, 0x9d, 0xee, + 0xb6, 0x80, 0xe5, 0xfd, 0x34, 0x1c, 0x0f, 0xc3, + 0xa7, 0xb9, 0x0d, 0xa4, 0x63, 0x1e, 0xe3, 0x95, + 0x60, 0x63, 0x9d, 0xb4, 0x62, 0xe9, 0xcb, 0x85, + 0x0f, // 65-byte pubkey + }, + Sequence: 0xffffffff, + }, + }, + TxOut: []*wire.TxOut{ + { + Value: 0xf4240, // 1000000 + PkScript: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xb0, 0xdc, 0xbf, 0x97, 0xea, 0xbf, 0x44, 0x04, + 0xe3, 0x1d, 0x95, 0x24, 0x77, 0xce, 0x82, 0x2d, + 0xad, 0xbe, 0x7e, 0x10, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, + }, + { + Value: 0x11d260c0, // 299000000 + PkScript: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x6b, 0x12, 0x81, 0xee, 0xc2, 0x5a, 0xb4, 0xe1, + 0xe0, 0x79, 0x3f, 0xf4, 0xe0, 0x8a, 0xb1, 0xab, + 0xb3, 0x40, 0x9c, 0xd9, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 0, + }, + { + Version: 1, + TxIn: []*wire.TxIn{ + { + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash([32]byte{ // Make go vet happy. + 0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73, + 0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac, + 0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90, + 0x9b, 0xa1, 0xc4, 0x3d, 0xed, 0x5f, 0x51, 0xf4, + }), // f4515fed3dc4a19b90a317b9840c243bac26114cf637522373a7d486b372600b + Index: 0, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xbb, 0x1a, 0xd2, + 0x6d, 0xf9, 0x30, 0xa5, 0x1c, 0xce, 0x11, 0x0c, + 0xf4, 0x4f, 0x7a, 0x48, 0xc3, 0xc5, 0x61, 0xfd, + 0x97, 0x75, 0x00, 0xb1, 0xae, 0x5d, 0x6b, 0x6f, + 0xd1, 0x3d, 0x0b, 0x3f, 0x4a, 0x02, 0x21, 0x00, + 0xc5, 0xb4, 0x29, 0x51, 0xac, 0xed, 0xff, 0x14, + 0xab, 0xba, 0x27, 0x36, 0xfd, 0x57, 0x4b, 0xdb, + 0x46, 0x5f, 0x3e, 0x6f, 0x8d, 0xa1, 0x2e, 0x2c, + 0x53, 0x03, 0x95, 0x4a, 0xca, 0x7f, 0x78, 0xf3, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xa7, 0x13, 0x5b, 0xfe, 0x82, 0x4c, 0x97, + 0xec, 0xc0, 0x1e, 0xc7, 0xd7, 0xe3, 0x36, 0x18, + 0x5c, 0x81, 0xe2, 0xaa, 0x2c, 0x41, 0xab, 0x17, + 0x54, 0x07, 0xc0, 0x94, 0x84, 0xce, 0x96, 0x94, + 0xb4, 0x49, 0x53, 0xfc, 0xb7, 0x51, 0x20, 0x65, + 0x64, 0xa9, 0xc2, 0x4d, 0xd0, 0x94, 0xd4, 0x2f, + 0xdb, 0xfd, 0xd5, 0xaa, 0xd3, 0xe0, 0x63, 0xce, + 0x6a, 0xf4, 0xcf, 0xaa, 0xea, 0x4e, 0xa1, 0x4f, + 0xbb, // 65-byte pubkey + }, + Sequence: 0xffffffff, + }, + }, + TxOut: []*wire.TxOut{ + { + Value: 0xf4240, // 1000000 + PkScript: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x39, 0xaa, 0x3d, 0x56, 0x9e, 0x06, 0xa1, 0xd7, + 0x92, 0x6d, 0xc4, 0xbe, 0x11, 0x93, 0xc9, 0x9b, + 0xf2, 0xeb, 0x9e, 0xe0, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 0, + }, + }, +} + + +func TestReorganizeChainV2(t *testing.T) { + // Load up blocks such that there is a side chain. + // (genesis block) -> 1 -> 2 -> 3 + // \-> 2a -> 3a -> 4a + testFiles := []string{ + "blk_0_to_4.dat.bz2", + "blk_3A.dat.bz2", + "blk_4A.dat.bz2", + "blk_5A.dat.bz2", + } + + var blocks []*btcutil.Block + var genBlk *wire.MsgBlock + for _, file := range testFiles { + blockTmp, err := loadBlocks(file) + if err != nil { + t.Errorf("Error loading file: %v\n", err) + return + } + blocks = append(blocks, blockTmp...) + } + + for _, block := range blocks { + if block.MsgBlock().BlockHash().String() == "00000000ebe5ec3e94d8dfe18100e5c0f3b1955bc6107fbe24d95732b814551b" { + block.MsgBlock().ClearTransactions() + genBlk = block.MsgBlock() + break + } + } + + fmt.Println(genBlk) + + chain, err := GetChain("haveblock", + &chaincfg.MainNetParams) + if err != nil { + t.Errorf("Failed to setup chain instance: %v", err) + return + } + // defer teardownFunc() + + // Since we're not dealing with the real block chain, set the coinbase + // maturity to 1. + chain.TstSetCoinbaseMaturity(1) + + for i := 1; i < len(blocks); i++ { + blocks[i].MsgBlock().ClearTransactions() + _, isOrphan, err := chain.ProcessBlockV2(blocks[i], BFNone) + + if err != nil { + t.Errorf("ProcessBlock fail on block %v: %v\n", i, err) + return + } + if isOrphan { + t.Errorf("ProcessBlock incorrectly returned block %v "+ + "is an orphan\n", i) + return + } + fmt.Println("===== FINISHED PROCESS BLOCK ========") + } + + // Insert an orphan block. + _, isOrphan, err := chain.ProcessBlockV2(btcutil.NewBlock(&Block100000), + BFNone) + if err != nil { + t.Errorf("Unable to process block: %v", err) + return + } + if !isOrphan { + t.Errorf("ProcessBlock indicated block is an not orphan when " + + "it should be\n") + return + } + + tests := []struct { + hash string + want bool + }{ + // Genesis block should be present (in the main chain). + {hash: chaincfg.MainNetParams.GenesisHash.String(), want: true}, + + // genesis block + {hash: "00000000ebe5ec3e94d8dfe18100e5c0f3b1955bc6107fbe24d95732b814551b", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "00000000952ccb1bf9b799fcd0cc654dd48363f76781f8b1c61dbf1696c39f97", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "00000000bc3589303953766cc9364130cb97bc3749bae170f476d45f1e23f850", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "000000002f264d6504013e73b9c913de9098d4d771c1bb219af475d2a01b128e", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "00000000474284d20067a4d33f6a02284e6ef70764a3a26d6a5b9df52ef663dd", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "00000000551dc04c148242d1f648802577df8cf7d4e1b469211016280204a2bf", want: true}, + + // Random hashes should not be available. + {hash: "123", want: false}, + + // Block 100000 should be present (as an orphan). + {hash: "00000000195f85184e77c18914bd0febd11278d950f5e4731a38f71ed79f044e", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506", want: true}, + } + + for i, test := range tests { + hash, err := chainhash.NewHashFromStr(test.hash) + if err != nil { + t.Errorf("NewHashFromStr: %v", err) + continue + } + + result, err := chain.HaveBlock(hash) + if err != nil { + t.Errorf("HaveBlock #%d unexpected error: %v", i, err) + return + } + if result != test.want { + t.Errorf("HaveBlock #%d got %v want %v", i, result, + test.want) + continue + } + + fmt.Println("=== check mainchain: ", test.hash, chain.MainChainHasBlock(hash)) + + blk, err := chain.BlockByHash(hash) + if err != nil { + fmt.Println("Get block by hash error: ", err) + continue + } + fmt.Println("===== Prev block hash : ", blk.MsgBlock().Header.PrevBlock.String()) + fmt.Println("===== Merkle root : ", blk.MsgBlock().Header.MerkleRoot.String()) + fmt.Println("===== Block hash : ", blk.MsgBlock().BlockHash()) + fmt.Println("===== Bits : ", blk.MsgBlock().Header.Bits) + fmt.Println("===== Nonce : ", blk.MsgBlock().Header.Nonce) + } + + fmt.Println("Finished test have block!!!!", chain.BestSnapshot().Hash, chain.BestSnapshot().Height) + + var initialized, hasBlockIndex bool + chain.db.View(func(dbTx database.Tx) error { + fmt.Println("chainStateKeyName: ", string(chainStateKeyName)) + fmt.Println("blockIndexBucketName: ", string(blockIndexBucketName)) + initialized = dbTx.Metadata().Get(chainStateKeyName) != nil + hasBlockIndex = dbTx.Metadata().Bucket(blockIndexBucketName) != nil + return nil + }) + fmt.Println("initialized, hasBlockIndex: ", initialized, hasBlockIndex) + + chain.db.Close() // must have this statement because db is obtaining write lock + + // newChain, err := GetChain("haveblock", + // &chaincfg.MainNetParams) + // if err != nil { + // t.Errorf("Failed to get chain instance: %v", err) + // return + // } + // fmt.Println("info on new chain: ", newChain.BestSnapshot().Hash, newChain.BestSnapshot().Height) + // newChain.db.Close() +} + +// func TestReorganizeChain(t *testing.T) { +// // Load up blocks such that there is a side chain. +// // (genesis block) -> 1 -> 2 -> 3 -> 4 +// // \-> 3a -> 4a -> 5a +// testFiles := []string{ +// "blk_0_to_4.dat.bz2", +// "blk_3A.dat.bz2", +// "blk_4A.dat.bz2", +// "blk_5A.dat.bz2", +// } + +// var blocks []*btcutil.Block +// for _, file := range testFiles { +// blockTmp, err := loadBlocks(file) +// if err != nil { +// t.Errorf("Error loading file: %v\n", err) +// return +// } +// blocks = append(blocks, blockTmp...) +// } + +// // Create a new database and chain instance to run tests against. +// chain, _, err := chainSetup("haveblock", +// &chaincfg.MainNetParams) +// if err != nil { +// t.Errorf("Failed to setup chain instance: %v", err) +// return +// } +// // defer teardownFunc() + +// // Since we're not dealing with the real block chain, set the coinbase +// // maturity to 1. +// chain.TstSetCoinbaseMaturity(1) + +// for i := 1; i < len(blocks); i++ { +// blocks[i].MsgBlock().ClearTransactions() +// _, isOrphan, err := chain.ProcessBlockV2(blocks[i], BFNone) + +// if err != nil { +// t.Errorf("ProcessBlock fail on block %v: %v\n", i, err) +// return +// } +// if isOrphan { +// t.Errorf("ProcessBlock incorrectly returned block %v "+ +// "is an orphan\n", i) +// return +// } +// fmt.Println("===== FINISHED PROCESS BLOCK ========") +// } + +// // Insert an orphan block. +// _, isOrphan, err := chain.ProcessBlockV2(btcutil.NewBlock(&Block100000), +// BFNone) +// if err != nil { +// t.Errorf("Unable to process block: %v", err) +// return +// } +// if !isOrphan { +// t.Errorf("ProcessBlock indicated block is an not orphan when " + +// "it should be\n") +// return +// } + +// tests := []struct { +// hash string +// want bool +// }{ +// // Genesis block should be present (in the main chain). +// {hash: chaincfg.MainNetParams.GenesisHash.String(), want: true}, + +// // Block 3a should be present (on a side chain). +// {hash: "00000000ebe5ec3e94d8dfe18100e5c0f3b1955bc6107fbe24d95732b814551b", want: true}, + +// // Block 100000 should be present (as an orphan). +// {hash: "00000000952ccb1bf9b799fcd0cc654dd48363f76781f8b1c61dbf1696c39f97", want: true}, + +// // Block 100000 should be present (as an orphan). +// {hash: "00000000bc3589303953766cc9364130cb97bc3749bae170f476d45f1e23f850", want: true}, + +// // Block 100000 should be present (as an orphan). +// {hash: "000000002f264d6504013e73b9c913de9098d4d771c1bb219af475d2a01b128e", want: true}, + +// // Block 100000 should be present (as an orphan). +// {hash: "00000000474284d20067a4d33f6a02284e6ef70764a3a26d6a5b9df52ef663dd", want: true}, + +// // Block 100000 should be present (as an orphan). +// {hash: "00000000551dc04c148242d1f648802577df8cf7d4e1b469211016280204a2bf", want: true}, + +// // Random hashes should not be available. +// {hash: "123", want: false}, + +// // Block 100000 should be present (as an orphan). +// {hash: "00000000195f85184e77c18914bd0febd11278d950f5e4731a38f71ed79f044e", want: true}, + +// // Block 100000 should be present (as an orphan). +// {hash: "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506", want: true}, +// } + +// for i, test := range tests { +// hash, err := chainhash.NewHashFromStr(test.hash) +// if err != nil { +// t.Errorf("NewHashFromStr: %v", err) +// continue +// } + +// result, err := chain.HaveBlock(hash) +// if err != nil { +// t.Errorf("HaveBlock #%d unexpected error: %v", i, err) +// return +// } +// if result != test.want { +// t.Errorf("HaveBlock #%d got %v want %v", i, result, +// test.want) +// continue +// } + +// fmt.Println("=== check mainchain: ", test.hash, chain.MainChainHasBlock(hash)) + +// blk, err := chain.BlockByHash(hash) +// if err != nil { +// fmt.Println("Get block by hash error: ", err) +// continue +// } +// fmt.Println("===== Prev block hash : ", blk.MsgBlock().Header.PrevBlock.String()) +// fmt.Println("===== Merkle root : ", blk.MsgBlock().Header.MerkleRoot.String()) +// fmt.Println("===== Block hash : ", blk.MsgBlock().BlockHash()) +// fmt.Println("===== Bits : ", blk.MsgBlock().Header.Bits) +// fmt.Println("===== Nonce : ", blk.MsgBlock().Header.Nonce) +// } + +// fmt.Println("Finished test have block!!!!", chain.BestSnapshot().Hash, chain.BestSnapshot().Height) + +// var initialized, hasBlockIndex bool +// chain.db.View(func(dbTx database.Tx) error { +// fmt.Println("chainStateKeyName: ", string(chainStateKeyName)) +// fmt.Println("blockIndexBucketName: ", string(blockIndexBucketName)) +// initialized = dbTx.Metadata().Get(chainStateKeyName) != nil +// hasBlockIndex = dbTx.Metadata().Bucket(blockIndexBucketName) != nil +// return nil +// }) +// fmt.Println("initialized, hasBlockIndex: ", initialized, hasBlockIndex) + +// chain.db.Close() // must have this statement because db is obtaining write lock + +// // newChain, err := GetChain("haveblock", +// // &chaincfg.MainNetParams) +// // if err != nil { +// // t.Errorf("Failed to get chain instance: %v", err) +// // return +// // } +// // fmt.Println("info on new chain: ", newChain.BestSnapshot().Hash, newChain.BestSnapshot().Height) +// // newChain.db.Close() +// } + +func TestGetChainInfo(t *testing.T) { + testFiles := []string{ + "blk_0_to_4.dat.bz2", + "blk_3A.dat.bz2", + "blk_4A.dat.bz2", + "blk_5A.dat.bz2", + } + + var blocks []*btcutil.Block + var genBlk *wire.MsgBlock + for _, file := range testFiles { + blockTmp, err := loadBlocks(file) + if err != nil { + t.Errorf("Error loading file: %v\n", err) + return + } + blocks = append(blocks, blockTmp...) + } + + for _, block := range blocks { + if block.MsgBlock().BlockHash().String() == "00000000ebe5ec3e94d8dfe18100e5c0f3b1955bc6107fbe24d95732b814551b" { + block.MsgBlock().ClearTransactions() + genBlk = block.MsgBlock() + break + } + } + fmt.Println(genBlk) + + chain, err := GetChain("haveblock", + &chaincfg.MainNetParams) + if err != nil { + t.Errorf("Failed to get chain instance: %v", err) + return + } + fmt.Println("info on new new chain: ", chain.BestSnapshot().Hash, chain.BestSnapshot().Height) + + tests := []struct { + hash string + want bool + }{ + // Genesis block should be present (in the main chain). + {hash: chaincfg.MainNetParams.GenesisHash.String(), want: true}, + + // genesis block + {hash: "00000000ebe5ec3e94d8dfe18100e5c0f3b1955bc6107fbe24d95732b814551b", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "00000000952ccb1bf9b799fcd0cc654dd48363f76781f8b1c61dbf1696c39f97", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "00000000bc3589303953766cc9364130cb97bc3749bae170f476d45f1e23f850", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "000000002f264d6504013e73b9c913de9098d4d771c1bb219af475d2a01b128e", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "00000000474284d20067a4d33f6a02284e6ef70764a3a26d6a5b9df52ef663dd", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "00000000551dc04c148242d1f648802577df8cf7d4e1b469211016280204a2bf", want: true}, + + // Random hashes should not be available. + {hash: "123", want: false}, + + // Block 100000 should be present (as an orphan). + {hash: "00000000195f85184e77c18914bd0febd11278d950f5e4731a38f71ed79f044e", want: true}, + + // Block 100000 should be present (as an orphan). + {hash: "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506", want: false}, + } + + for i, test := range tests { + hash, err := chainhash.NewHashFromStr(test.hash) + if err != nil { + t.Errorf("NewHashFromStr: %v", err) + continue + } + + result, err := chain.HaveBlock(hash) + if err != nil { + t.Errorf("HaveBlock #%d unexpected error: %v", i, err) + return + } + if result != test.want { + t.Errorf("HaveBlock #%d got %v want %v", i, result, + test.want) + continue + } + + fmt.Println("=== check mainchain: ", test.hash, chain.MainChainHasBlock(hash)) + } +} + +// // TestHaveBlock tests the HaveBlock API to ensure proper functionality. +// func TestHaveBlock(t *testing.T) { +// // Load up blocks such that there is a side chain. +// // (genesis block) -> 1 -> 2 -> 3 -> 4 +// // \-> 3a +// testFiles := []string{ +// "blk_0_to_4.dat.bz2", +// "blk_3A.dat.bz2", +// } + +// var blocks []*btcutil.Block +// for _, file := range testFiles { +// blockTmp, err := loadBlocks(file) +// if err != nil { +// t.Errorf("Error loading file: %v\n", err) +// return +// } +// blocks = append(blocks, blockTmp...) +// } + +// // Create a new database and chain instance to run tests against. +// chain, teardownFunc, err := chainSetup("haveblock", +// &chaincfg.MainNetParams) +// if err != nil { +// t.Errorf("Failed to setup chain instance: %v", err) +// return +// } +// defer teardownFunc() + +// // Since we're not dealing with the real block chain, set the coinbase +// // maturity to 1. +// chain.TstSetCoinbaseMaturity(1) + +// for i := 1; i < len(blocks); i++ { +// blocks[i].MsgBlock().ClearTransactions() +// isMainChain, isOrphan, err := chain.ProcessBlock(blocks[i], BFNone) + +// // bb, _ := json.Marshal(blocks[i].MsgBlock()) +// // fmt.Println("===== Block contain: ", string(bb)) +// fmt.Println("===== isMainChain, isOrphan: ", isMainChain, isOrphan) +// fmt.Println("===== Prev block hash : ", blocks[i].MsgBlock().Header.PrevBlock.String()) +// fmt.Println("===== Block hash : ", blocks[i].MsgBlock().BlockHash()) +// fmt.Println("===== Bits : ", blocks[i].MsgBlock().Header.Bits) +// fmt.Println("===== FINISHED PROCESS BLOCK ========") + +// if err != nil { +// t.Errorf("ProcessBlock fail on block %v: %v\n", i, err) +// return +// } +// if isOrphan { +// t.Errorf("ProcessBlock incorrectly returned block %v "+ +// "is an orphan\n", i) +// return +// } +// } + +// // Insert an orphan block. +// _, isOrphan, err := chain.ProcessBlock(btcutil.NewBlock(&Block100000), +// BFNone) +// if err != nil { +// t.Errorf("Unable to process block: %v", err) +// return +// } +// if !isOrphan { +// t.Errorf("ProcessBlock indicated block is an not orphan when " + +// "it should be\n") +// return +// } + +// tests := []struct { +// hash string +// want bool +// }{ +// // Genesis block should be present (in the main chain). +// {hash: chaincfg.MainNetParams.GenesisHash.String(), want: true}, + +// // Block 3a should be present (on a side chain). +// {hash: "00000000474284d20067a4d33f6a02284e6ef70764a3a26d6a5b9df52ef663dd", want: true}, + +// // Block 100000 should be present (as an orphan). +// {hash: "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506", want: true}, + +// // Random hashes should not be available. +// {hash: "123", want: false}, +// } + +// for i, test := range tests { +// hash, err := chainhash.NewHashFromStr(test.hash) +// if err != nil { +// t.Errorf("NewHashFromStr: %v", err) +// continue +// } + +// result, err := chain.HaveBlock(hash) +// if err != nil { +// t.Errorf("HaveBlock #%d unexpected error: %v", i, err) +// return +// } +// if result != test.want { +// t.Errorf("HaveBlock #%d got %v want %v", i, result, +// test.want) +// continue +// } +// } +// fmt.Println("Finished test have block!!!!") +// } + +// // TestCalcSequenceLock tests the LockTimeToSequence function, and the +// // CalcSequenceLock method of a Chain instance. The tests exercise several +// // combinations of inputs to the CalcSequenceLock function in order to ensure +// // the returned SequenceLocks are correct for each test instance. +// func TestCalcSequenceLock(t *testing.T) { +// netParams := &chaincfg.SimNetParams + +// // We need to activate CSV in order to test the processing logic, so +// // manually craft the block version that's used to signal the soft-fork +// // activation. +// csvBit := netParams.Deployments[chaincfg.DeploymentCSV].BitNumber +// blockVersion := int32(0x20000000 | (uint32(1) << csvBit)) + +// // Generate enough synthetic blocks to activate CSV. +// chain := newFakeChain(netParams) +// node := chain.bestChain.Tip() +// blockTime := node.Header().Timestamp +// numBlocksToActivate := (netParams.MinerConfirmationWindow * 3) +// for i := uint32(0); i < numBlocksToActivate; i++ { +// blockTime = blockTime.Add(time.Second) +// node = newFakeNode(node, blockVersion, 0, blockTime) +// chain.index.AddNode(node) +// chain.bestChain.SetTip(node) +// } + +// // Create a utxo view with a fake utxo for the inputs used in the +// // transactions created below. This utxo is added such that it has an +// // age of 4 blocks. +// targetTx := btcutil.NewTx(&wire.MsgTx{ +// TxOut: []*wire.TxOut{{ +// PkScript: nil, +// Value: 10, +// }}, +// }) +// utxoView := NewUtxoViewpoint() +// utxoView.AddTxOuts(targetTx, int32(numBlocksToActivate)-4) +// utxoView.SetBestHash(&node.hash) + +// // Create a utxo that spends the fake utxo created above for use in the +// // transactions created in the tests. It has an age of 4 blocks. Note +// // that the sequence lock heights are always calculated from the same +// // point of view that they were originally calculated from for a given +// // utxo. That is to say, the height prior to it. +// utxo := wire.OutPoint{ +// Hash: *targetTx.Hash(), +// Index: 0, +// } +// prevUtxoHeight := int32(numBlocksToActivate) - 4 + +// // Obtain the median time past from the PoV of the input created above. +// // The MTP for the input is the MTP from the PoV of the block *prior* +// // to the one that included it. +// medianTime := node.RelativeAncestor(5).CalcPastMedianTime().Unix() + +// // The median time calculated from the PoV of the best block in the +// // test chain. For unconfirmed inputs, this value will be used since +// // the MTP will be calculated from the PoV of the yet-to-be-mined +// // block. +// nextMedianTime := node.CalcPastMedianTime().Unix() +// nextBlockHeight := int32(numBlocksToActivate) + 1 + +// // Add an additional transaction which will serve as our unconfirmed +// // output. +// unConfTx := &wire.MsgTx{ +// TxOut: []*wire.TxOut{{ +// PkScript: nil, +// Value: 5, +// }}, +// } +// unConfUtxo := wire.OutPoint{ +// Hash: unConfTx.TxHash(), +// Index: 0, +// } + +// // Adding a utxo with a height of 0x7fffffff indicates that the output +// // is currently unmined. +// utxoView.AddTxOuts(btcutil.NewTx(unConfTx), 0x7fffffff) + +// tests := []struct { +// tx *wire.MsgTx +// view *UtxoViewpoint +// mempool bool +// want *SequenceLock +// }{ +// // A transaction of version one should disable sequence locks +// // as the new sequence number semantics only apply to +// // transactions version 2 or higher. +// { +// tx: &wire.MsgTx{ +// Version: 1, +// TxIn: []*wire.TxIn{{ +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(false, 3), +// }}, +// }, +// view: utxoView, +// want: &SequenceLock{ +// Seconds: -1, +// BlockHeight: -1, +// }, +// }, +// // A transaction with a single input with max sequence number. +// // This sequence number has the high bit set, so sequence locks +// // should be disabled. +// { +// tx: &wire.MsgTx{ +// Version: 2, +// TxIn: []*wire.TxIn{{ +// PreviousOutPoint: utxo, +// Sequence: wire.MaxTxInSequenceNum, +// }}, +// }, +// view: utxoView, +// want: &SequenceLock{ +// Seconds: -1, +// BlockHeight: -1, +// }, +// }, +// // A transaction with a single input whose lock time is +// // expressed in seconds. However, the specified lock time is +// // below the required floor for time based lock times since +// // they have time granularity of 512 seconds. As a result, the +// // seconds lock-time should be just before the median time of +// // the targeted block. +// { +// tx: &wire.MsgTx{ +// Version: 2, +// TxIn: []*wire.TxIn{{ +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(true, 2), +// }}, +// }, +// view: utxoView, +// want: &SequenceLock{ +// Seconds: medianTime - 1, +// BlockHeight: -1, +// }, +// }, +// // A transaction with a single input whose lock time is +// // expressed in seconds. The number of seconds should be 1023 +// // seconds after the median past time of the last block in the +// // chain. +// { +// tx: &wire.MsgTx{ +// Version: 2, +// TxIn: []*wire.TxIn{{ +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(true, 1024), +// }}, +// }, +// view: utxoView, +// want: &SequenceLock{ +// Seconds: medianTime + 1023, +// BlockHeight: -1, +// }, +// }, +// // A transaction with multiple inputs. The first input has a +// // lock time expressed in seconds. The second input has a +// // sequence lock in blocks with a value of 4. The last input +// // has a sequence number with a value of 5, but has the disable +// // bit set. So the first lock should be selected as it's the +// // latest lock that isn't disabled. +// { +// tx: &wire.MsgTx{ +// Version: 2, +// TxIn: []*wire.TxIn{{ +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(true, 2560), +// }, { +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(false, 4), +// }, { +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(false, 5) | +// wire.SequenceLockTimeDisabled, +// }}, +// }, +// view: utxoView, +// want: &SequenceLock{ +// Seconds: medianTime + (5 << wire.SequenceLockTimeGranularity) - 1, +// BlockHeight: prevUtxoHeight + 3, +// }, +// }, +// // Transaction with a single input. The input's sequence number +// // encodes a relative lock-time in blocks (3 blocks). The +// // sequence lock should have a value of -1 for seconds, but a +// // height of 2 meaning it can be included at height 3. +// { +// tx: &wire.MsgTx{ +// Version: 2, +// TxIn: []*wire.TxIn{{ +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(false, 3), +// }}, +// }, +// view: utxoView, +// want: &SequenceLock{ +// Seconds: -1, +// BlockHeight: prevUtxoHeight + 2, +// }, +// }, +// // A transaction with two inputs with lock times expressed in +// // seconds. The selected sequence lock value for seconds should +// // be the time further in the future. +// { +// tx: &wire.MsgTx{ +// Version: 2, +// TxIn: []*wire.TxIn{{ +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(true, 5120), +// }, { +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(true, 2560), +// }}, +// }, +// view: utxoView, +// want: &SequenceLock{ +// Seconds: medianTime + (10 << wire.SequenceLockTimeGranularity) - 1, +// BlockHeight: -1, +// }, +// }, +// // A transaction with two inputs with lock times expressed in +// // blocks. The selected sequence lock value for blocks should +// // be the height further in the future, so a height of 10 +// // indicating it can be included at height 11. +// { +// tx: &wire.MsgTx{ +// Version: 2, +// TxIn: []*wire.TxIn{{ +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(false, 1), +// }, { +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(false, 11), +// }}, +// }, +// view: utxoView, +// want: &SequenceLock{ +// Seconds: -1, +// BlockHeight: prevUtxoHeight + 10, +// }, +// }, +// // A transaction with multiple inputs. Two inputs are time +// // based, and the other two are block based. The lock lying +// // further into the future for both inputs should be chosen. +// { +// tx: &wire.MsgTx{ +// Version: 2, +// TxIn: []*wire.TxIn{{ +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(true, 2560), +// }, { +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(true, 6656), +// }, { +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(false, 3), +// }, { +// PreviousOutPoint: utxo, +// Sequence: LockTimeToSequence(false, 9), +// }}, +// }, +// view: utxoView, +// want: &SequenceLock{ +// Seconds: medianTime + (13 << wire.SequenceLockTimeGranularity) - 1, +// BlockHeight: prevUtxoHeight + 8, +// }, +// }, +// // A transaction with a single unconfirmed input. As the input +// // is confirmed, the height of the input should be interpreted +// // as the height of the *next* block. So, a 2 block relative +// // lock means the sequence lock should be for 1 block after the +// // *next* block height, indicating it can be included 2 blocks +// // after that. +// { +// tx: &wire.MsgTx{ +// Version: 2, +// TxIn: []*wire.TxIn{{ +// PreviousOutPoint: unConfUtxo, +// Sequence: LockTimeToSequence(false, 2), +// }}, +// }, +// view: utxoView, +// mempool: true, +// want: &SequenceLock{ +// Seconds: -1, +// BlockHeight: nextBlockHeight + 1, +// }, +// }, +// // A transaction with a single unconfirmed input. The input has +// // a time based lock, so the lock time should be based off the +// // MTP of the *next* block. +// { +// tx: &wire.MsgTx{ +// Version: 2, +// TxIn: []*wire.TxIn{{ +// PreviousOutPoint: unConfUtxo, +// Sequence: LockTimeToSequence(true, 1024), +// }}, +// }, +// view: utxoView, +// mempool: true, +// want: &SequenceLock{ +// Seconds: nextMedianTime + 1023, +// BlockHeight: -1, +// }, +// }, +// } + +// t.Logf("Running %v SequenceLock tests", len(tests)) +// for i, test := range tests { +// utilTx := btcutil.NewTx(test.tx) +// seqLock, err := chain.CalcSequenceLock(utilTx, test.view, test.mempool) +// if err != nil { +// t.Fatalf("test #%d, unable to calc sequence lock: %v", i, err) +// } + +// if seqLock.Seconds != test.want.Seconds { +// t.Fatalf("test #%d got %v seconds want %v seconds", +// i, seqLock.Seconds, test.want.Seconds) +// } +// if seqLock.BlockHeight != test.want.BlockHeight { +// t.Fatalf("test #%d got height of %v want height of %v ", +// i, seqLock.BlockHeight, test.want.BlockHeight) +// } +// } +// } + +// // nodeHashes is a convenience function that returns the hashes for all of the +// // passed indexes of the provided nodes. It is used to construct expected hash +// // slices in the tests. +// func nodeHashes(nodes []*blockNode, indexes ...int) []chainhash.Hash { +// hashes := make([]chainhash.Hash, 0, len(indexes)) +// for _, idx := range indexes { +// hashes = append(hashes, nodes[idx].hash) +// } +// return hashes +// } + +// // nodeHeaders is a convenience function that returns the headers for all of +// // the passed indexes of the provided nodes. It is used to construct expected +// // located headers in the tests. +// func nodeHeaders(nodes []*blockNode, indexes ...int) []wire.BlockHeader { +// headers := make([]wire.BlockHeader, 0, len(indexes)) +// for _, idx := range indexes { +// headers = append(headers, nodes[idx].Header()) +// } +// return headers +// } + +// // TestLocateInventory ensures that locating inventory via the LocateHeaders and +// // LocateBlocks functions behaves as expected. +// func TestLocateInventory(t *testing.T) { +// // Construct a synthetic block chain with a block index consisting of +// // the following structure. +// // genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 +// // \-> 16a -> 17a +// tip := tstTip +// chain := newFakeChain(&chaincfg.MainNetParams) +// branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 18) +// branch1Nodes := chainedNodes(branch0Nodes[14], 2) +// for _, node := range branch0Nodes { +// chain.index.AddNode(node) +// } +// for _, node := range branch1Nodes { +// chain.index.AddNode(node) +// } +// chain.bestChain.SetTip(tip(branch0Nodes)) + +// // Create chain views for different branches of the overall chain to +// // simulate a local and remote node on different parts of the chain. +// localView := newChainView(tip(branch0Nodes)) +// remoteView := newChainView(tip(branch1Nodes)) + +// // Create a chain view for a completely unrelated block chain to +// // simulate a remote node on a totally different chain. +// unrelatedBranchNodes := chainedNodes(nil, 5) +// unrelatedView := newChainView(tip(unrelatedBranchNodes)) + +// tests := []struct { +// name string +// locator BlockLocator // locator for requested inventory +// hashStop chainhash.Hash // stop hash for locator +// maxAllowed uint32 // max to locate, 0 = wire const +// headers []wire.BlockHeader // expected located headers +// hashes []chainhash.Hash // expected located hashes +// }{ +// { +// // Empty block locators and unknown stop hash. No +// // inventory should be located. +// name: "no locators, no stop", +// locator: nil, +// hashStop: chainhash.Hash{}, +// headers: nil, +// hashes: nil, +// }, +// { +// // Empty block locators and stop hash in side chain. +// // The expected result is the requested block. +// name: "no locators, stop in side", +// locator: nil, +// hashStop: tip(branch1Nodes).hash, +// headers: nodeHeaders(branch1Nodes, 1), +// hashes: nodeHashes(branch1Nodes, 1), +// }, +// { +// // Empty block locators and stop hash in main chain. +// // The expected result is the requested block. +// name: "no locators, stop in main", +// locator: nil, +// hashStop: branch0Nodes[12].hash, +// headers: nodeHeaders(branch0Nodes, 12), +// hashes: nodeHashes(branch0Nodes, 12), +// }, +// { +// // Locators based on remote being on side chain and a +// // stop hash local node doesn't know about. The +// // expected result is the blocks after the fork point in +// // the main chain and the stop hash has no effect. +// name: "remote side chain, unknown stop", +// locator: remoteView.BlockLocator(nil), +// hashStop: chainhash.Hash{0x01}, +// headers: nodeHeaders(branch0Nodes, 15, 16, 17), +// hashes: nodeHashes(branch0Nodes, 15, 16, 17), +// }, +// { +// // Locators based on remote being on side chain and a +// // stop hash in side chain. The expected result is the +// // blocks after the fork point in the main chain and the +// // stop hash has no effect. +// name: "remote side chain, stop in side", +// locator: remoteView.BlockLocator(nil), +// hashStop: tip(branch1Nodes).hash, +// headers: nodeHeaders(branch0Nodes, 15, 16, 17), +// hashes: nodeHashes(branch0Nodes, 15, 16, 17), +// }, +// { +// // Locators based on remote being on side chain and a +// // stop hash in main chain, but before fork point. The +// // expected result is the blocks after the fork point in +// // the main chain and the stop hash has no effect. +// name: "remote side chain, stop in main before", +// locator: remoteView.BlockLocator(nil), +// hashStop: branch0Nodes[13].hash, +// headers: nodeHeaders(branch0Nodes, 15, 16, 17), +// hashes: nodeHashes(branch0Nodes, 15, 16, 17), +// }, +// { +// // Locators based on remote being on side chain and a +// // stop hash in main chain, but exactly at the fork +// // point. The expected result is the blocks after the +// // fork point in the main chain and the stop hash has no +// // effect. +// name: "remote side chain, stop in main exact", +// locator: remoteView.BlockLocator(nil), +// hashStop: branch0Nodes[14].hash, +// headers: nodeHeaders(branch0Nodes, 15, 16, 17), +// hashes: nodeHashes(branch0Nodes, 15, 16, 17), +// }, +// { +// // Locators based on remote being on side chain and a +// // stop hash in main chain just after the fork point. +// // The expected result is the blocks after the fork +// // point in the main chain up to and including the stop +// // hash. +// name: "remote side chain, stop in main after", +// locator: remoteView.BlockLocator(nil), +// hashStop: branch0Nodes[15].hash, +// headers: nodeHeaders(branch0Nodes, 15), +// hashes: nodeHashes(branch0Nodes, 15), +// }, +// { +// // Locators based on remote being on side chain and a +// // stop hash in main chain some time after the fork +// // point. The expected result is the blocks after the +// // fork point in the main chain up to and including the +// // stop hash. +// name: "remote side chain, stop in main after more", +// locator: remoteView.BlockLocator(nil), +// hashStop: branch0Nodes[16].hash, +// headers: nodeHeaders(branch0Nodes, 15, 16), +// hashes: nodeHashes(branch0Nodes, 15, 16), +// }, +// { +// // Locators based on remote being on main chain in the +// // past and a stop hash local node doesn't know about. +// // The expected result is the blocks after the known +// // point in the main chain and the stop hash has no +// // effect. +// name: "remote main chain past, unknown stop", +// locator: localView.BlockLocator(branch0Nodes[12]), +// hashStop: chainhash.Hash{0x01}, +// headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17), +// hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17), +// }, +// { +// // Locators based on remote being on main chain in the +// // past and a stop hash in a side chain. The expected +// // result is the blocks after the known point in the +// // main chain and the stop hash has no effect. +// name: "remote main chain past, stop in side", +// locator: localView.BlockLocator(branch0Nodes[12]), +// hashStop: tip(branch1Nodes).hash, +// headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17), +// hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17), +// }, +// { +// // Locators based on remote being on main chain in the +// // past and a stop hash in the main chain before that +// // point. The expected result is the blocks after the +// // known point in the main chain and the stop hash has +// // no effect. +// name: "remote main chain past, stop in main before", +// locator: localView.BlockLocator(branch0Nodes[12]), +// hashStop: branch0Nodes[11].hash, +// headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17), +// hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17), +// }, +// { +// // Locators based on remote being on main chain in the +// // past and a stop hash in the main chain exactly at that +// // point. The expected result is the blocks after the +// // known point in the main chain and the stop hash has +// // no effect. +// name: "remote main chain past, stop in main exact", +// locator: localView.BlockLocator(branch0Nodes[12]), +// hashStop: branch0Nodes[12].hash, +// headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17), +// hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17), +// }, +// { +// // Locators based on remote being on main chain in the +// // past and a stop hash in the main chain just after +// // that point. The expected result is the blocks after +// // the known point in the main chain and the stop hash +// // has no effect. +// name: "remote main chain past, stop in main after", +// locator: localView.BlockLocator(branch0Nodes[12]), +// hashStop: branch0Nodes[13].hash, +// headers: nodeHeaders(branch0Nodes, 13), +// hashes: nodeHashes(branch0Nodes, 13), +// }, +// { +// // Locators based on remote being on main chain in the +// // past and a stop hash in the main chain some time +// // after that point. The expected result is the blocks +// // after the known point in the main chain and the stop +// // hash has no effect. +// name: "remote main chain past, stop in main after more", +// locator: localView.BlockLocator(branch0Nodes[12]), +// hashStop: branch0Nodes[15].hash, +// headers: nodeHeaders(branch0Nodes, 13, 14, 15), +// hashes: nodeHashes(branch0Nodes, 13, 14, 15), +// }, +// { +// // Locators based on remote being at exactly the same +// // point in the main chain and a stop hash local node +// // doesn't know about. The expected result is no +// // located inventory. +// name: "remote main chain same, unknown stop", +// locator: localView.BlockLocator(nil), +// hashStop: chainhash.Hash{0x01}, +// headers: nil, +// hashes: nil, +// }, +// { +// // Locators based on remote being at exactly the same +// // point in the main chain and a stop hash at exactly +// // the same point. The expected result is no located +// // inventory. +// name: "remote main chain same, stop same point", +// locator: localView.BlockLocator(nil), +// hashStop: tip(branch0Nodes).hash, +// headers: nil, +// hashes: nil, +// }, +// { +// // Locators from remote that don't include any blocks +// // the local node knows. This would happen if the +// // remote node is on a completely separate chain that +// // isn't rooted with the same genesis block. The +// // expected result is the blocks after the genesis +// // block. +// name: "remote unrelated chain", +// locator: unrelatedView.BlockLocator(nil), +// hashStop: chainhash.Hash{}, +// headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, +// 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), +// hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, +// 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), +// }, +// { +// // Locators from remote for second block in main chain +// // and no stop hash, but with an overridden max limit. +// // The expected result is the blocks after the second +// // block limited by the max. +// name: "remote genesis", +// locator: locatorHashes(branch0Nodes, 0), +// hashStop: chainhash.Hash{}, +// maxAllowed: 3, +// headers: nodeHeaders(branch0Nodes, 1, 2, 3), +// hashes: nodeHashes(branch0Nodes, 1, 2, 3), +// }, +// { +// // Poorly formed locator. +// // +// // Locator from remote that only includes a single +// // block on a side chain the local node knows. The +// // expected result is the blocks after the genesis +// // block since even though the block is known, it is on +// // a side chain and there are no more locators to find +// // the fork point. +// name: "weak locator, single known side block", +// locator: locatorHashes(branch1Nodes, 1), +// hashStop: chainhash.Hash{}, +// headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, +// 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), +// hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, +// 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), +// }, +// { +// // Poorly formed locator. +// // +// // Locator from remote that only includes multiple +// // blocks on a side chain the local node knows however +// // none in the main chain. The expected result is the +// // blocks after the genesis block since even though the +// // blocks are known, they are all on a side chain and +// // there are no more locators to find the fork point. +// name: "weak locator, multiple known side blocks", +// locator: locatorHashes(branch1Nodes, 1), +// hashStop: chainhash.Hash{}, +// headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, +// 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), +// hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6, +// 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), +// }, +// { +// // Poorly formed locator. +// // +// // Locator from remote that only includes multiple +// // blocks on a side chain the local node knows however +// // none in the main chain but includes a stop hash in +// // the main chain. The expected result is the blocks +// // after the genesis block up to the stop hash since +// // even though the blocks are known, they are all on a +// // side chain and there are no more locators to find the +// // fork point. +// name: "weak locator, multiple known side blocks, stop in main", +// locator: locatorHashes(branch1Nodes, 1), +// hashStop: branch0Nodes[5].hash, +// headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5), +// hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5), +// }, +// } +// for _, test := range tests { +// // Ensure the expected headers are located. +// var headers []wire.BlockHeader +// if test.maxAllowed != 0 { +// // Need to use the unexported function to override the +// // max allowed for headers. +// chain.chainLock.RLock() +// headers = chain.locateHeaders(test.locator, +// &test.hashStop, test.maxAllowed) +// chain.chainLock.RUnlock() +// } else { +// headers = chain.LocateHeaders(test.locator, +// &test.hashStop) +// } +// if !reflect.DeepEqual(headers, test.headers) { +// t.Errorf("%s: unxpected headers -- got %v, want %v", +// test.name, headers, test.headers) +// continue +// } + +// // Ensure the expected block hashes are located. +// maxAllowed := uint32(wire.MaxBlocksPerMsg) +// if test.maxAllowed != 0 { +// maxAllowed = test.maxAllowed +// } +// hashes := chain.LocateBlocks(test.locator, &test.hashStop, +// maxAllowed) +// if !reflect.DeepEqual(hashes, test.hashes) { +// t.Errorf("%s: unxpected hashes -- got %v, want %v", +// test.name, hashes, test.hashes) +// continue +// } +// } +// } + +// // TestHeightToHashRange ensures that fetching a range of block hashes by start +// // height and end hash works as expected. +// func TestHeightToHashRange(t *testing.T) { +// // Construct a synthetic block chain with a block index consisting of +// // the following structure. +// // genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 +// // \-> 16a -> 17a -> 18a (unvalidated) +// tip := tstTip +// chain := newFakeChain(&chaincfg.MainNetParams) +// branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 18) +// branch1Nodes := chainedNodes(branch0Nodes[14], 3) +// for _, node := range branch0Nodes { +// chain.index.SetStatusFlags(node, statusValid) +// chain.index.AddNode(node) +// } +// for _, node := range branch1Nodes { +// if node.height < 18 { +// chain.index.SetStatusFlags(node, statusValid) +// } +// chain.index.AddNode(node) +// } +// chain.bestChain.SetTip(tip(branch0Nodes)) + +// tests := []struct { +// name string +// startHeight int32 // locator for requested inventory +// endHash chainhash.Hash // stop hash for locator +// maxResults int // max to locate, 0 = wire const +// hashes []chainhash.Hash // expected located hashes +// expectError bool +// }{ +// { +// name: "blocks below tip", +// startHeight: 11, +// endHash: branch0Nodes[14].hash, +// maxResults: 10, +// hashes: nodeHashes(branch0Nodes, 10, 11, 12, 13, 14), +// }, +// { +// name: "blocks on main chain", +// startHeight: 15, +// endHash: branch0Nodes[17].hash, +// maxResults: 10, +// hashes: nodeHashes(branch0Nodes, 14, 15, 16, 17), +// }, +// { +// name: "blocks on stale chain", +// startHeight: 15, +// endHash: branch1Nodes[1].hash, +// maxResults: 10, +// hashes: append(nodeHashes(branch0Nodes, 14), +// nodeHashes(branch1Nodes, 0, 1)...), +// }, +// { +// name: "invalid start height", +// startHeight: 19, +// endHash: branch0Nodes[17].hash, +// maxResults: 10, +// expectError: true, +// }, +// { +// name: "too many results", +// startHeight: 1, +// endHash: branch0Nodes[17].hash, +// maxResults: 10, +// expectError: true, +// }, +// { +// name: "unvalidated block", +// startHeight: 15, +// endHash: branch1Nodes[2].hash, +// maxResults: 10, +// expectError: true, +// }, +// } +// for _, test := range tests { +// hashes, err := chain.HeightToHashRange(test.startHeight, &test.endHash, +// test.maxResults) +// if err != nil { +// if !test.expectError { +// t.Errorf("%s: unexpected error: %v", test.name, err) +// } +// continue +// } + +// if !reflect.DeepEqual(hashes, test.hashes) { +// t.Errorf("%s: unxpected hashes -- got %v, want %v", +// test.name, hashes, test.hashes) +// } +// } +// } + +// // TestIntervalBlockHashes ensures that fetching block hashes at specified +// // intervals by end hash works as expected. +// func TestIntervalBlockHashes(t *testing.T) { +// // Construct a synthetic block chain with a block index consisting of +// // the following structure. +// // genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 +// // \-> 16a -> 17a -> 18a (unvalidated) +// tip := tstTip +// chain := newFakeChain(&chaincfg.MainNetParams) +// branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 18) +// branch1Nodes := chainedNodes(branch0Nodes[14], 3) +// for _, node := range branch0Nodes { +// chain.index.SetStatusFlags(node, statusValid) +// chain.index.AddNode(node) +// } +// for _, node := range branch1Nodes { +// if node.height < 18 { +// chain.index.SetStatusFlags(node, statusValid) +// } +// chain.index.AddNode(node) +// } +// chain.bestChain.SetTip(tip(branch0Nodes)) + +// tests := []struct { +// name string +// endHash chainhash.Hash +// interval int +// hashes []chainhash.Hash +// expectError bool +// }{ +// { +// name: "blocks on main chain", +// endHash: branch0Nodes[17].hash, +// interval: 8, +// hashes: nodeHashes(branch0Nodes, 7, 15), +// }, +// { +// name: "blocks on stale chain", +// endHash: branch1Nodes[1].hash, +// interval: 8, +// hashes: append(nodeHashes(branch0Nodes, 7), +// nodeHashes(branch1Nodes, 0)...), +// }, +// { +// name: "no results", +// endHash: branch0Nodes[17].hash, +// interval: 20, +// hashes: []chainhash.Hash{}, +// }, +// { +// name: "unvalidated block", +// endHash: branch1Nodes[2].hash, +// interval: 8, +// expectError: true, +// }, +// } +// for _, test := range tests { +// hashes, err := chain.IntervalBlockHashes(&test.endHash, test.interval) +// if err != nil { +// if !test.expectError { +// t.Errorf("%s: unexpected error: %v", test.name, err) +// } +// continue +// } + +// if !reflect.DeepEqual(hashes, test.hashes) { +// t.Errorf("%s: unxpected hashes -- got %v, want %v", +// test.name, hashes, test.hashes) +// } +// } +// } diff --git a/relaying/btc/chainio.go b/relaying/btc/chainio.go new file mode 100644 index 0000000000..53502ebfe5 --- /dev/null +++ b/relaying/btc/chainio.go @@ -0,0 +1,1681 @@ +package btcrelaying + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/big" + "sync" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/database" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +const ( + // blockHdrSize is the size of a block header. This is simply the + // constant from wire and is only provided here for convenience since + // wire.MaxBlockHeaderPayload is quite long. + blockHdrSize = wire.MaxBlockHeaderPayload + + // latestUtxoSetBucketVersion is the current version of the utxo set + // bucket that is used to track all unspent outputs. + latestUtxoSetBucketVersion = 2 + + // latestSpendJournalBucketVersion is the current version of the spend + // journal bucket that is used to track all spent transactions for use + // in reorgs. + latestSpendJournalBucketVersion = 1 +) + +var ( + // blockIndexBucketName is the name of the db bucket used to house to the + // block headers and contextual information. + blockIndexBucketName = []byte("blockheaderidx") + + // hashIndexBucketName is the name of the db bucket used to house to the + // block hash -> block height index. + hashIndexBucketName = []byte("hashidx") + + // heightIndexBucketName is the name of the db bucket used to house to + // the block height -> block hash index. + heightIndexBucketName = []byte("heightidx") + + // chainStateKeyName is the name of the db key used to store the best + // chain state. + chainStateKeyName = []byte("chainstate") + + // spendJournalVersionKeyName is the name of the db key used to store + // the version of the spend journal currently in the database. + spendJournalVersionKeyName = []byte("spendjournalversion") + + // spendJournalBucketName is the name of the db bucket used to house + // transactions outputs that are spent in each block. + spendJournalBucketName = []byte("spendjournal") + + // utxoSetVersionKeyName is the name of the db key used to store the + // version of the utxo set currently in the database. + utxoSetVersionKeyName = []byte("utxosetversion") + + // utxoSetBucketName is the name of the db bucket used to house the + // unspent transaction output set. + utxoSetBucketName = []byte("utxosetv2") + + // byteOrder is the preferred byte order used for serializing numeric + // fields for storage in the database. + byteOrder = binary.LittleEndian +) + +// errNotInMainChain signifies that a block hash or height that is not in the +// main chain was requested. +type errNotInMainChain string + +// Error implements the error interface. +func (e errNotInMainChain) Error() string { + return string(e) +} + +// isNotInMainChainErr returns whether or not the passed error is an +// errNotInMainChain error. +func isNotInMainChainErr(err error) bool { + _, ok := err.(errNotInMainChain) + return ok +} + +// errDeserialize signifies that a problem was encountered when deserializing +// data. +type errDeserialize string + +// Error implements the error interface. +func (e errDeserialize) Error() string { + return string(e) +} + +// isDeserializeErr returns whether or not the passed error is an errDeserialize +// error. +func isDeserializeErr(err error) bool { + _, ok := err.(errDeserialize) + return ok +} + +// isDbBucketNotFoundErr returns whether or not the passed error is a +// database.Error with an error code of database.ErrBucketNotFound. +func isDbBucketNotFoundErr(err error) bool { + dbErr, ok := err.(database.Error) + return ok && dbErr.ErrorCode == database.ErrBucketNotFound +} + +// dbFetchVersion fetches an individual version with the given key from the +// metadata bucket. It is primarily used to track versions on entities such as +// buckets. It returns zero if the provided key does not exist. +func dbFetchVersion(dbTx database.Tx, key []byte) uint32 { + serialized := dbTx.Metadata().Get(key) + if serialized == nil { + return 0 + } + + return byteOrder.Uint32(serialized[:]) +} + +// dbPutVersion uses an existing database transaction to update the provided +// key in the metadata bucket to the given version. It is primarily used to +// track versions on entities such as buckets. +func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error { + var serialized [4]byte + byteOrder.PutUint32(serialized[:], version) + return dbTx.Metadata().Put(key, serialized[:]) +} + +// dbFetchOrCreateVersion uses an existing database transaction to attempt to +// fetch the provided key from the metadata bucket as a version and in the case +// it doesn't exist, it adds the entry with the provided default version and +// returns that. This is useful during upgrades to automatically handle loading +// and adding version keys as necessary. +func dbFetchOrCreateVersion(dbTx database.Tx, key []byte, defaultVersion uint32) (uint32, error) { + version := dbFetchVersion(dbTx, key) + if version == 0 { + version = defaultVersion + err := dbPutVersion(dbTx, key, version) + if err != nil { + return 0, err + } + } + + return version, nil +} + +// ----------------------------------------------------------------------------- +// The transaction spend journal consists of an entry for each block connected +// to the main chain which contains the transaction outputs the block spends +// serialized such that the order is the reverse of the order they were spent. +// +// This is required because reorganizing the chain necessarily entails +// disconnecting blocks to get back to the point of the fork which implies +// unspending all of the transaction outputs that each block previously spent. +// Since the utxo set, by definition, only contains unspent transaction outputs, +// the spent transaction outputs must be resurrected from somewhere. There is +// more than one way this could be done, however this is the most straight +// forward method that does not require having a transaction index and unpruned +// blockchain. +// +// NOTE: This format is NOT self describing. The additional details such as +// the number of entries (transaction inputs) are expected to come from the +// block itself and the utxo set (for legacy entries). The rationale in doing +// this is to save space. This is also the reason the spent outputs are +// serialized in the reverse order they are spent because later transactions are +// allowed to spend outputs from earlier ones in the same block. +// +// The reserved field below used to keep track of the version of the containing +// transaction when the height in the header code was non-zero, however the +// height is always non-zero now, but keeping the extra reserved field allows +// backwards compatibility. +// +// The serialized format is: +// +// [
],... +// +// Field Type Size +// header code VLQ variable +// reserved byte 1 +// compressed txout +// compressed amount VLQ variable +// compressed script []byte variable +// +// The serialized header code format is: +// bit 0 - containing transaction is a coinbase +// bits 1-x - height of the block that contains the spent txout +// +// Example 1: +// From block 170 in main blockchain. +// +// 1300320511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c +// <><><------------------------------------------------------------------> +// | | | +// | reserved compressed txout +// header code +// +// - header code: 0x13 (coinbase, height 9) +// - reserved: 0x00 +// - compressed txout 0: +// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC) +// - 0x05: special script type pay-to-pubkey +// - 0x11...5c: x-coordinate of the pubkey +// +// Example 2: +// Adapted from block 100025 in main blockchain. +// +// 8b99700091f20f006edbc6c4d31bae9f1ccc38538a114bf42de65e868b99700086c64700b2fb57eadf61e106a100a7445a8c3f67898841ec +// <----><><----------------------------------------------><----><><----------------------------------------------> +// | | | | | | +// | reserved compressed txout | reserved compressed txout +// header code header code +// +// - Last spent output: +// - header code: 0x8b9970 (not coinbase, height 100024) +// - reserved: 0x00 +// - compressed txout: +// - 0x91f20f: VLQ-encoded compressed amount for 34405000000 (344.05 BTC) +// - 0x00: special script type pay-to-pubkey-hash +// - 0x6e...86: pubkey hash +// - Second to last spent output: +// - header code: 0x8b9970 (not coinbase, height 100024) +// - reserved: 0x00 +// - compressed txout: +// - 0x86c647: VLQ-encoded compressed amount for 13761000000 (137.61 BTC) +// - 0x00: special script type pay-to-pubkey-hash +// - 0xb2...ec: pubkey hash +// ----------------------------------------------------------------------------- + +// SpentTxOut contains a spent transaction output and potentially additional +// contextual information such as whether or not it was contained in a coinbase +// transaction, the version of the transaction it was contained in, and which +// block height the containing transaction was included in. As described in +// the comments above, the additional contextual information will only be valid +// when this spent txout is spending the last unspent output of the containing +// transaction. +type SpentTxOut struct { + // Amount is the amount of the output. + Amount int64 + + // PkScipt is the the public key script for the output. + PkScript []byte + + // Height is the height of the the block containing the creating tx. + Height int32 + + // Denotes if the creating tx is a coinbase. + IsCoinBase bool +} + +// FetchSpendJournal attempts to retrieve the spend journal, or the set of +// outputs spent for the target block. This provides a view of all the outputs +// that will be consumed once the target block is connected to the end of the +// main chain. +// +// This function is safe for concurrent access. +func (b *BlockChain) FetchSpendJournal(targetBlock *btcutil.Block) ([]SpentTxOut, error) { + b.chainLock.RLock() + defer b.chainLock.RUnlock() + + var spendEntries []SpentTxOut + err := b.db.View(func(dbTx database.Tx) error { + var err error + + spendEntries, err = dbFetchSpendJournalEntry(dbTx, targetBlock) + return err + }) + if err != nil { + return nil, err + } + + return spendEntries, nil +} + +// spentTxOutHeaderCode returns the calculated header code to be used when +// serializing the provided stxo entry. +func spentTxOutHeaderCode(stxo *SpentTxOut) uint64 { + // As described in the serialization format comments, the header code + // encodes the height shifted over one bit and the coinbase flag in the + // lowest bit. + headerCode := uint64(stxo.Height) << 1 + if stxo.IsCoinBase { + headerCode |= 0x01 + } + + return headerCode +} + +// spentTxOutSerializeSize returns the number of bytes it would take to +// serialize the passed stxo according to the format described above. +func spentTxOutSerializeSize(stxo *SpentTxOut) int { + size := serializeSizeVLQ(spentTxOutHeaderCode(stxo)) + if stxo.Height > 0 { + // The legacy v1 spend journal format conditionally tracked the + // containing transaction version when the height was non-zero, + // so this is required for backwards compat. + size += serializeSizeVLQ(0) + } + return size + compressedTxOutSize(uint64(stxo.Amount), stxo.PkScript) +} + +// putSpentTxOut serializes the passed stxo according to the format described +// above directly into the passed target byte slice. The target byte slice must +// be at least large enough to handle the number of bytes returned by the +// SpentTxOutSerializeSize function or it will panic. +func putSpentTxOut(target []byte, stxo *SpentTxOut) int { + headerCode := spentTxOutHeaderCode(stxo) + offset := putVLQ(target, headerCode) + if stxo.Height > 0 { + // The legacy v1 spend journal format conditionally tracked the + // containing transaction version when the height was non-zero, + // so this is required for backwards compat. + offset += putVLQ(target[offset:], 0) + } + return offset + putCompressedTxOut(target[offset:], uint64(stxo.Amount), + stxo.PkScript) +} + +// decodeSpentTxOut decodes the passed serialized stxo entry, possibly followed +// by other data, into the passed stxo struct. It returns the number of bytes +// read. +func decodeSpentTxOut(serialized []byte, stxo *SpentTxOut) (int, error) { + // Ensure there are bytes to decode. + if len(serialized) == 0 { + return 0, errDeserialize("no serialized bytes") + } + + // Deserialize the header code. + code, offset := deserializeVLQ(serialized) + if offset >= len(serialized) { + return offset, errDeserialize("unexpected end of data after " + + "header code") + } + + // Decode the header code. + // + // Bit 0 indicates containing transaction is a coinbase. + // Bits 1-x encode height of containing transaction. + stxo.IsCoinBase = code&0x01 != 0 + stxo.Height = int32(code >> 1) + if stxo.Height > 0 { + // The legacy v1 spend journal format conditionally tracked the + // containing transaction version when the height was non-zero, + // so this is required for backwards compat. + _, bytesRead := deserializeVLQ(serialized[offset:]) + offset += bytesRead + if offset >= len(serialized) { + return offset, errDeserialize("unexpected end of data " + + "after reserved") + } + } + + // Decode the compressed txout. + amount, pkScript, bytesRead, err := decodeCompressedTxOut( + serialized[offset:]) + offset += bytesRead + if err != nil { + return offset, errDeserialize(fmt.Sprintf("unable to decode "+ + "txout: %v", err)) + } + stxo.Amount = int64(amount) + stxo.PkScript = pkScript + return offset, nil +} + +// deserializeSpendJournalEntry decodes the passed serialized byte slice into a +// slice of spent txouts according to the format described in detail above. +// +// Since the serialization format is not self describing, as noted in the +// format comments, this function also requires the transactions that spend the +// txouts. +func deserializeSpendJournalEntry(serialized []byte, txns []*wire.MsgTx) ([]SpentTxOut, error) { + // Calculate the total number of stxos. + var numStxos int + for _, tx := range txns { + numStxos += len(tx.TxIn) + } + + // When a block has no spent txouts there is nothing to serialize. + if len(serialized) == 0 { + // Ensure the block actually has no stxos. This should never + // happen unless there is database corruption or an empty entry + // erroneously made its way into the database. + if numStxos != 0 { + return nil, AssertError(fmt.Sprintf("mismatched spend "+ + "journal serialization - no serialization for "+ + "expected %d stxos", numStxos)) + } + + return nil, nil + } + + // Loop backwards through all transactions so everything is read in + // reverse order to match the serialization order. + stxoIdx := numStxos - 1 + offset := 0 + stxos := make([]SpentTxOut, numStxos) + for txIdx := len(txns) - 1; txIdx > -1; txIdx-- { + tx := txns[txIdx] + + // Loop backwards through all of the transaction inputs and read + // the associated stxo. + for txInIdx := len(tx.TxIn) - 1; txInIdx > -1; txInIdx-- { + txIn := tx.TxIn[txInIdx] + stxo := &stxos[stxoIdx] + stxoIdx-- + + n, err := decodeSpentTxOut(serialized[offset:], stxo) + offset += n + if err != nil { + return nil, errDeserialize(fmt.Sprintf("unable "+ + "to decode stxo for %v: %v", + txIn.PreviousOutPoint, err)) + } + } + } + + return stxos, nil +} + +// serializeSpendJournalEntry serializes all of the passed spent txouts into a +// single byte slice according to the format described in detail above. +func serializeSpendJournalEntry(stxos []SpentTxOut) []byte { + if len(stxos) == 0 { + return nil + } + + // Calculate the size needed to serialize the entire journal entry. + var size int + for i := range stxos { + size += spentTxOutSerializeSize(&stxos[i]) + } + serialized := make([]byte, size) + + // Serialize each individual stxo directly into the slice in reverse + // order one after the other. + var offset int + for i := len(stxos) - 1; i > -1; i-- { + offset += putSpentTxOut(serialized[offset:], &stxos[i]) + } + + return serialized +} + +// dbFetchSpendJournalEntry fetches the spend journal entry for the passed block +// and deserializes it into a slice of spent txout entries. +// +// NOTE: Legacy entries will not have the coinbase flag or height set unless it +// was the final output spend in the containing transaction. It is up to the +// caller to handle this properly by looking the information up in the utxo set. +func dbFetchSpendJournalEntry(dbTx database.Tx, block *btcutil.Block) ([]SpentTxOut, error) { + // Exclude the coinbase transaction since it can't spend anything. + spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) + serialized := spendBucket.Get(block.Hash()[:]) + blockTxns := block.MsgBlock().Transactions[1:] + stxos, err := deserializeSpendJournalEntry(serialized, blockTxns) + if err != nil { + // Ensure any deserialization errors are returned as database + // corruption errors. + if isDeserializeErr(err) { + return nil, database.Error{ + ErrorCode: database.ErrCorruption, + Description: fmt.Sprintf("corrupt spend "+ + "information for %v: %v", block.Hash(), + err), + } + } + + return nil, err + } + + return stxos, nil +} + +// dbPutSpendJournalEntry uses an existing database transaction to update the +// spend journal entry for the given block hash using the provided slice of +// spent txouts. The spent txouts slice must contain an entry for every txout +// the transactions in the block spend in the order they are spent. +func dbPutSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash, stxos []SpentTxOut) error { + spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) + serialized := serializeSpendJournalEntry(stxos) + return spendBucket.Put(blockHash[:], serialized) +} + +// dbRemoveSpendJournalEntry uses an existing database transaction to remove the +// spend journal entry for the passed block hash. +func dbRemoveSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash) error { + spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) + return spendBucket.Delete(blockHash[:]) +} + +// ----------------------------------------------------------------------------- +// The unspent transaction output (utxo) set consists of an entry for each +// unspent output using a format that is optimized to reduce space using domain +// specific compression algorithms. This format is a slightly modified version +// of the format used in Bitcoin Core. +// +// Each entry is keyed by an outpoint as specified below. It is important to +// note that the key encoding uses a VLQ, which employs an MSB encoding so +// iteration of utxos when doing byte-wise comparisons will produce them in +// order. +// +// The serialized key format is: +// +// +// Field Type Size +// hash chainhash.Hash chainhash.HashSize +// output index VLQ variable +// +// The serialized value format is: +// +//
+// +// Field Type Size +// header code VLQ variable +// compressed txout +// compressed amount VLQ variable +// compressed script []byte variable +// +// The serialized header code format is: +// bit 0 - containing transaction is a coinbase +// bits 1-x - height of the block that contains the unspent txout +// +// Example 1: +// From tx in main blockchain: +// Blk 1, 0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098:0 +// +// 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52 +// <><------------------------------------------------------------------> +// | | +// header code compressed txout +// +// - header code: 0x03 (coinbase, height 1) +// - compressed txout: +// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC) +// - 0x04: special script type pay-to-pubkey +// - 0x96...52: x-coordinate of the pubkey +// +// Example 2: +// From tx in main blockchain: +// Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2 +// +// 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58 +// <----><------------------------------------------> +// | | +// header code compressed txout +// +// - header code: 0x8cf316 (not coinbase, height 113931) +// - compressed txout: +// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC) +// - 0x00: special script type pay-to-pubkey-hash +// - 0xb8...58: pubkey hash +// +// Example 3: +// From tx in main blockchain: +// Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22 +// +// a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6 +// <----><--------------------------------------------------> +// | | +// header code compressed txout +// +// - header code: 0xa8a258 (not coinbase, height 338156) +// - compressed txout: +// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC) +// - 0x01: special script type pay-to-script-hash +// - 0x1d...e6: script hash +// ----------------------------------------------------------------------------- + +// maxUint32VLQSerializeSize is the maximum number of bytes a max uint32 takes +// to serialize as a VLQ. +var maxUint32VLQSerializeSize = serializeSizeVLQ(1<<32 - 1) + +// outpointKeyPool defines a concurrent safe free list of byte slices used to +// provide temporary buffers for outpoint database keys. +var outpointKeyPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, chainhash.HashSize+maxUint32VLQSerializeSize) + return &b // Pointer to slice to avoid boxing alloc. + }, +} + +// outpointKey returns a key suitable for use as a database key in the utxo set +// while making use of a free list. A new buffer is allocated if there are not +// already any available on the free list. The returned byte slice should be +// returned to the free list by using the recycleOutpointKey function when the +// caller is done with it _unless_ the slice will need to live for longer than +// the caller can calculate such as when used to write to the database. +func outpointKey(outpoint wire.OutPoint) *[]byte { + // A VLQ employs an MSB encoding, so they are useful not only to reduce + // the amount of storage space, but also so iteration of utxos when + // doing byte-wise comparisons will produce them in order. + key := outpointKeyPool.Get().(*[]byte) + idx := uint64(outpoint.Index) + *key = (*key)[:chainhash.HashSize+serializeSizeVLQ(idx)] + copy(*key, outpoint.Hash[:]) + putVLQ((*key)[chainhash.HashSize:], idx) + return key +} + +// recycleOutpointKey puts the provided byte slice, which should have been +// obtained via the outpointKey function, back on the free list. +func recycleOutpointKey(key *[]byte) { + outpointKeyPool.Put(key) +} + +// utxoEntryHeaderCode returns the calculated header code to be used when +// serializing the provided utxo entry. +func utxoEntryHeaderCode(entry *UtxoEntry) (uint64, error) { + if entry.IsSpent() { + return 0, AssertError("attempt to serialize spent utxo header") + } + + // As described in the serialization format comments, the header code + // encodes the height shifted over one bit and the coinbase flag in the + // lowest bit. + headerCode := uint64(entry.BlockHeight()) << 1 + if entry.IsCoinBase() { + headerCode |= 0x01 + } + + return headerCode, nil +} + +// serializeUtxoEntry returns the entry serialized to a format that is suitable +// for long-term storage. The format is described in detail above. +func serializeUtxoEntry(entry *UtxoEntry) ([]byte, error) { + // Spent outputs have no serialization. + if entry.IsSpent() { + return nil, nil + } + + // Encode the header code. + headerCode, err := utxoEntryHeaderCode(entry) + if err != nil { + return nil, err + } + + // Calculate the size needed to serialize the entry. + size := serializeSizeVLQ(headerCode) + + compressedTxOutSize(uint64(entry.Amount()), entry.PkScript()) + + // Serialize the header code followed by the compressed unspent + // transaction output. + serialized := make([]byte, size) + offset := putVLQ(serialized, headerCode) + offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()), + entry.PkScript()) + + return serialized, nil +} + +// deserializeUtxoEntry decodes a utxo entry from the passed serialized byte +// slice into a new UtxoEntry using a format that is suitable for long-term +// storage. The format is described in detail above. +func deserializeUtxoEntry(serialized []byte) (*UtxoEntry, error) { + // Deserialize the header code. + code, offset := deserializeVLQ(serialized) + if offset >= len(serialized) { + return nil, errDeserialize("unexpected end of data after header") + } + + // Decode the header code. + // + // Bit 0 indicates whether the containing transaction is a coinbase. + // Bits 1-x encode height of containing transaction. + isCoinBase := code&0x01 != 0 + blockHeight := int32(code >> 1) + + // Decode the compressed unspent transaction output. + amount, pkScript, _, err := decodeCompressedTxOut(serialized[offset:]) + if err != nil { + return nil, errDeserialize(fmt.Sprintf("unable to decode "+ + "utxo: %v", err)) + } + + entry := &UtxoEntry{ + amount: int64(amount), + pkScript: pkScript, + blockHeight: blockHeight, + packedFlags: 0, + } + if isCoinBase { + entry.packedFlags |= tfCoinBase + } + + return entry, nil +} + +// dbFetchUtxoEntryByHash attempts to find and fetch a utxo for the given hash. +// It uses a cursor and seek to try and do this as efficiently as possible. +// +// When there are no entries for the provided hash, nil will be returned for the +// both the entry and the error. +func dbFetchUtxoEntryByHash(dbTx database.Tx, hash *chainhash.Hash) (*UtxoEntry, error) { + // Attempt to find an entry by seeking for the hash along with a zero + // index. Due to the fact the keys are serialized as , + // where the index uses an MSB encoding, if there are any entries for + // the hash at all, one will be found. + cursor := dbTx.Metadata().Bucket(utxoSetBucketName).Cursor() + key := outpointKey(wire.OutPoint{Hash: *hash, Index: 0}) + ok := cursor.Seek(*key) + recycleOutpointKey(key) + if !ok { + return nil, nil + } + + // An entry was found, but it could just be an entry with the next + // highest hash after the requested one, so make sure the hashes + // actually match. + cursorKey := cursor.Key() + if len(cursorKey) < chainhash.HashSize { + return nil, nil + } + if !bytes.Equal(hash[:], cursorKey[:chainhash.HashSize]) { + return nil, nil + } + + return deserializeUtxoEntry(cursor.Value()) +} + +// dbFetchUtxoEntry uses an existing database transaction to fetch the specified +// transaction output from the utxo set. +// +// When there is no entry for the provided output, nil will be returned for both +// the entry and the error. +func dbFetchUtxoEntry(dbTx database.Tx, outpoint wire.OutPoint) (*UtxoEntry, error) { + // Fetch the unspent transaction output information for the passed + // transaction output. Return now when there is no entry. + key := outpointKey(outpoint) + utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) + serializedUtxo := utxoBucket.Get(*key) + recycleOutpointKey(key) + if serializedUtxo == nil { + return nil, nil + } + + // A non-nil zero-length entry means there is an entry in the database + // for a spent transaction output which should never be the case. + if len(serializedUtxo) == 0 { + return nil, AssertError(fmt.Sprintf("database contains entry "+ + "for spent tx output %v", outpoint)) + } + + // Deserialize the utxo entry and return it. + entry, err := deserializeUtxoEntry(serializedUtxo) + if err != nil { + // Ensure any deserialization errors are returned as database + // corruption errors. + if isDeserializeErr(err) { + return nil, database.Error{ + ErrorCode: database.ErrCorruption, + Description: fmt.Sprintf("corrupt utxo entry "+ + "for %v: %v", outpoint, err), + } + } + + return nil, err + } + + return entry, nil +} + +// dbPutUtxoView uses an existing database transaction to update the utxo set +// in the database based on the provided utxo view contents and state. In +// particular, only the entries that have been marked as modified are written +// to the database. +func dbPutUtxoView(dbTx database.Tx, view *UtxoViewpoint) error { + utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) + for outpoint, entry := range view.entries { + // No need to update the database if the entry was not modified. + if entry == nil || !entry.isModified() { + continue + } + + // Remove the utxo entry if it is spent. + if entry.IsSpent() { + key := outpointKey(outpoint) + err := utxoBucket.Delete(*key) + recycleOutpointKey(key) + if err != nil { + return err + } + + continue + } + + // Serialize and store the utxo entry. + serialized, err := serializeUtxoEntry(entry) + if err != nil { + return err + } + key := outpointKey(outpoint) + err = utxoBucket.Put(*key, serialized) + // NOTE: The key is intentionally not recycled here since the + // database interface contract prohibits modifications. It will + // be garbage collected normally when the database is done with + // it. + if err != nil { + return err + } + } + + return nil +} + +// ----------------------------------------------------------------------------- +// The block index consists of two buckets with an entry for every block in the +// main chain. One bucket is for the hash to height mapping and the other is +// for the height to hash mapping. +// +// The serialized format for values in the hash to height bucket is: +// +// +// Field Type Size +// height uint32 4 bytes +// +// The serialized format for values in the height to hash bucket is: +// +// +// Field Type Size +// hash chainhash.Hash chainhash.HashSize +// ----------------------------------------------------------------------------- + +// dbPutBlockIndex uses an existing database transaction to update or add the +// block index entries for the hash to height and height to hash mappings for +// the provided values. +func dbPutBlockIndex(dbTx database.Tx, hash *chainhash.Hash, height int32) error { + // Serialize the height for use in the index entries. + var serializedHeight [4]byte + byteOrder.PutUint32(serializedHeight[:], uint32(height)) + + // Add the block hash to height mapping to the index. + meta := dbTx.Metadata() + hashIndex := meta.Bucket(hashIndexBucketName) + if err := hashIndex.Put(hash[:], serializedHeight[:]); err != nil { + return err + } + + // Add the block height to hash mapping to the index. + heightIndex := meta.Bucket(heightIndexBucketName) + return heightIndex.Put(serializedHeight[:], hash[:]) +} + +// dbRemoveBlockIndex uses an existing database transaction remove block index +// entries from the hash to height and height to hash mappings for the provided +// values. +func dbRemoveBlockIndex(dbTx database.Tx, hash *chainhash.Hash, height int32) error { + // Remove the block hash to height mapping. + meta := dbTx.Metadata() + hashIndex := meta.Bucket(hashIndexBucketName) + if err := hashIndex.Delete(hash[:]); err != nil { + return err + } + + // Remove the block height to hash mapping. + var serializedHeight [4]byte + byteOrder.PutUint32(serializedHeight[:], uint32(height)) + heightIndex := meta.Bucket(heightIndexBucketName) + return heightIndex.Delete(serializedHeight[:]) +} + +// dbFetchHeightByHash uses an existing database transaction to retrieve the +// height for the provided hash from the index. +func dbFetchHeightByHash(dbTx database.Tx, hash *chainhash.Hash) (int32, error) { + meta := dbTx.Metadata() + hashIndex := meta.Bucket(hashIndexBucketName) + serializedHeight := hashIndex.Get(hash[:]) + if serializedHeight == nil { + str := fmt.Sprintf("block %s is not in the main chain", hash) + return 0, errNotInMainChain(str) + } + + return int32(byteOrder.Uint32(serializedHeight)), nil +} + +// dbFetchHashByHeight uses an existing database transaction to retrieve the +// hash for the provided height from the index. +func dbFetchHashByHeight(dbTx database.Tx, height int32) (*chainhash.Hash, error) { + var serializedHeight [4]byte + byteOrder.PutUint32(serializedHeight[:], uint32(height)) + + meta := dbTx.Metadata() + heightIndex := meta.Bucket(heightIndexBucketName) + hashBytes := heightIndex.Get(serializedHeight[:]) + if hashBytes == nil { + str := fmt.Sprintf("no block at height %d exists", height) + return nil, errNotInMainChain(str) + } + + var hash chainhash.Hash + copy(hash[:], hashBytes) + return &hash, nil +} + +// ----------------------------------------------------------------------------- +// The best chain state consists of the best block hash and height, the total +// number of transactions up to and including those in the best block, and the +// accumulated work sum up to and including the best block. +// +// The serialized format is: +// +// +// +// Field Type Size +// block hash chainhash.Hash chainhash.HashSize +// block height uint32 4 bytes +// total txns uint64 8 bytes +// work sum length uint32 4 bytes +// work sum big.Int work sum length +// ----------------------------------------------------------------------------- + +// bestChainState represents the data to be stored the database for the current +// best chain state. +type bestChainState struct { + hash chainhash.Hash + height uint32 + totalTxns uint64 + workSum *big.Int +} + +// serializeBestChainState returns the serialization of the passed block best +// chain state. This is data to be stored in the chain state bucket. +func serializeBestChainState(state bestChainState) []byte { + // Calculate the full size needed to serialize the chain state. + workSumBytes := state.workSum.Bytes() + workSumBytesLen := uint32(len(workSumBytes)) + serializedLen := chainhash.HashSize + 4 + 8 + 4 + workSumBytesLen + + // Serialize the chain state. + serializedData := make([]byte, serializedLen) + copy(serializedData[0:chainhash.HashSize], state.hash[:]) + offset := uint32(chainhash.HashSize) + byteOrder.PutUint32(serializedData[offset:], state.height) + offset += 4 + byteOrder.PutUint64(serializedData[offset:], state.totalTxns) + offset += 8 + byteOrder.PutUint32(serializedData[offset:], workSumBytesLen) + offset += 4 + copy(serializedData[offset:], workSumBytes) + return serializedData[:] +} + +// deserializeBestChainState deserializes the passed serialized best chain +// state. This is data stored in the chain state bucket and is updated after +// every block is connected or disconnected form the main chain. +// block. +func deserializeBestChainState(serializedData []byte) (bestChainState, error) { + // Ensure the serialized data has enough bytes to properly deserialize + // the hash, height, total transactions, and work sum length. + if len(serializedData) < chainhash.HashSize+16 { + return bestChainState{}, database.Error{ + ErrorCode: database.ErrCorruption, + Description: "corrupt best chain state", + } + } + + state := bestChainState{} + copy(state.hash[:], serializedData[0:chainhash.HashSize]) + offset := uint32(chainhash.HashSize) + state.height = byteOrder.Uint32(serializedData[offset : offset+4]) + offset += 4 + state.totalTxns = byteOrder.Uint64(serializedData[offset : offset+8]) + offset += 8 + workSumBytesLen := byteOrder.Uint32(serializedData[offset : offset+4]) + offset += 4 + + // Ensure the serialized data has enough bytes to deserialize the work + // sum. + if uint32(len(serializedData[offset:])) < workSumBytesLen { + return bestChainState{}, database.Error{ + ErrorCode: database.ErrCorruption, + Description: "corrupt best chain state", + } + } + workSumBytes := serializedData[offset : offset+workSumBytesLen] + state.workSum = new(big.Int).SetBytes(workSumBytes) + + return state, nil +} + +// dbPutBestState uses an existing database transaction to update the best chain +// state with the given parameters. +func dbPutBestState(dbTx database.Tx, snapshot *BestState, workSum *big.Int) error { + // Serialize the current best chain state. + serializedData := serializeBestChainState(bestChainState{ + hash: snapshot.Hash, + height: uint32(snapshot.Height), + totalTxns: snapshot.TotalTxns, + workSum: workSum, + }) + + // Store the current best chain state into the database. + return dbTx.Metadata().Put(chainStateKeyName, serializedData) +} + +// createChainState initializes both the database and the chain state to the +// genesis block. This includes creating the necessary buckets and inserting +// the genesis block, so it must only be called on an uninitialized database. +func (b *BlockChain) createChainState() error { + // Create a new node from the genesis block and set it as the best node. + genesisBlock := btcutil.NewBlock(b.chainParams.GenesisBlock) + genesisBlock.SetHeight(0) + header := &genesisBlock.MsgBlock().Header + node := newBlockNode(header, nil) + node.status = statusDataStored | statusValid + b.bestChain.SetTip(node) + + // Add the new node to the index which is used for faster lookups. + b.index.addNode(node) + + // Initialize the state related to the best block. Since it is the + // genesis block, use its timestamp for the median time. + numTxns := uint64(len(genesisBlock.MsgBlock().Transactions)) + blockSize := uint64(genesisBlock.MsgBlock().SerializeSize()) + blockWeight := uint64(GetBlockWeight(genesisBlock)) + b.stateSnapshot = newBestState(node, blockSize, blockWeight, numTxns, + numTxns, time.Unix(node.timestamp, 0)) + + // Create the initial the database chain state including creating the + // necessary index buckets and inserting the genesis block. + err := b.db.Update(func(dbTx database.Tx) error { + meta := dbTx.Metadata() + + // Create the bucket that houses the block index data. + _, err := meta.CreateBucket(blockIndexBucketName) + if err != nil { + return err + } + + // Create the bucket that houses the chain block hash to height + // index. + _, err = meta.CreateBucket(hashIndexBucketName) + if err != nil { + return err + } + + // Create the bucket that houses the chain block height to hash + // index. + _, err = meta.CreateBucket(heightIndexBucketName) + if err != nil { + return err + } + + // Create the bucket that houses the spend journal data and + // store its version. + _, err = meta.CreateBucket(spendJournalBucketName) + if err != nil { + return err + } + err = dbPutVersion(dbTx, utxoSetVersionKeyName, + latestUtxoSetBucketVersion) + if err != nil { + return err + } + + // Create the bucket that houses the utxo set and store its + // version. Note that the genesis block coinbase transaction is + // intentionally not inserted here since it is not spendable by + // consensus rules. + _, err = meta.CreateBucket(utxoSetBucketName) + if err != nil { + return err + } + err = dbPutVersion(dbTx, spendJournalVersionKeyName, + latestSpendJournalBucketVersion) + if err != nil { + return err + } + + // Save the genesis block to the block index database. + err = dbStoreBlockNode(dbTx, node) + if err != nil { + return err + } + + // Add the genesis block hash to height and height to hash + // mappings to the index. + err = dbPutBlockIndex(dbTx, &node.hash, node.height) + if err != nil { + return err + } + + // Store the current best chain state into the database. + err = dbPutBestState(dbTx, b.stateSnapshot, node.workSum) + if err != nil { + return err + } + + // Store the genesis block into the database. + return dbStoreBlock(dbTx, genesisBlock) + }) + return err +} + +func (b *BlockChain) createChainStateV2(genBlk *wire.MsgBlock) error { + // Create a new node from the genesis block and set it as the best node. + genesisBlock := btcutil.NewBlock(genBlk) + genesisBlock.SetHeight(0) + header := &genesisBlock.MsgBlock().Header + node := newBlockNode(header, nil) + node.status = statusDataStored | statusValid + b.bestChain.SetTip(node) + + // Add the new node to the index which is used for faster lookups. + b.index.addNode(node) + + // Initialize the state related to the best block. Since it is the + // genesis block, use its timestamp for the median time. + numTxns := uint64(len(genesisBlock.MsgBlock().Transactions)) + blockSize := uint64(genesisBlock.MsgBlock().SerializeSize()) + blockWeight := uint64(GetBlockWeight(genesisBlock)) + b.stateSnapshot = newBestState(node, blockSize, blockWeight, numTxns, + numTxns, time.Unix(node.timestamp, 0)) + + // Create the initial the database chain state including creating the + // necessary index buckets and inserting the genesis block. + err := b.db.Update(func(dbTx database.Tx) error { + meta := dbTx.Metadata() + + // Create the bucket that houses the block index data. + _, err := meta.CreateBucket(blockIndexBucketName) + if err != nil { + return err + } + + // Create the bucket that houses the chain block hash to height + // index. + _, err = meta.CreateBucket(hashIndexBucketName) + if err != nil { + return err + } + + // Create the bucket that houses the chain block height to hash + // index. + _, err = meta.CreateBucket(heightIndexBucketName) + if err != nil { + return err + } + + // Create the bucket that houses the spend journal data and + // store its version. + _, err = meta.CreateBucket(spendJournalBucketName) + if err != nil { + return err + } + err = dbPutVersion(dbTx, utxoSetVersionKeyName, + latestUtxoSetBucketVersion) + if err != nil { + return err + } + + // Create the bucket that houses the utxo set and store its + // version. Note that the genesis block coinbase transaction is + // intentionally not inserted here since it is not spendable by + // consensus rules. + _, err = meta.CreateBucket(utxoSetBucketName) + if err != nil { + return err + } + err = dbPutVersion(dbTx, spendJournalVersionKeyName, + latestSpendJournalBucketVersion) + if err != nil { + return err + } + + // Save the genesis block to the block index database. + err = dbStoreBlockNode(dbTx, node) + if err != nil { + return err + } + + // Add the genesis block hash to height and height to hash + // mappings to the index. + err = dbPutBlockIndex(dbTx, &node.hash, node.height) + if err != nil { + return err + } + + // Store the current best chain state into the database. + err = dbPutBestState(dbTx, b.stateSnapshot, node.workSum) + if err != nil { + return err + } + + // Store the genesis block into the database. + return dbStoreBlock(dbTx, genesisBlock) + }) + return err +} + +// initChainState attempts to load and initialize the chain state from the +// database. When the db does not yet contain any chain state, both it and the +// chain state are initialized to the genesis block. +func (b *BlockChain) initChainState() error { + // Determine the state of the chain database. We may need to initialize + // everything from scratch or upgrade certain buckets. + var initialized, hasBlockIndex bool + err := b.db.View(func(dbTx database.Tx) error { + initialized = dbTx.Metadata().Get(chainStateKeyName) != nil + hasBlockIndex = dbTx.Metadata().Bucket(blockIndexBucketName) != nil + return nil + }) + if err != nil { + return err + } + + if !initialized { + // At this point the database has not already been initialized, so + // initialize both it and the chain state to the genesis block. + return b.createChainState() + } + + if !hasBlockIndex { + err := migrateBlockIndex(b.db) + if err != nil { + return nil + } + } + + // Attempt to load the chain state from the database. + err = b.db.View(func(dbTx database.Tx) error { + // Fetch the stored chain state from the database metadata. + // When it doesn't exist, it means the database hasn't been + // initialized for use with chain yet, so break out now to allow + // that to happen under a writable database transaction. + serializedData := dbTx.Metadata().Get(chainStateKeyName) + log.Tracef("Serialized chain state: %x", serializedData) + state, err := deserializeBestChainState(serializedData) + if err != nil { + return err + } + + // Load all of the headers from the data for the known best + // chain and construct the block index accordingly. Since the + // number of nodes are already known, perform a single alloc + // for them versus a whole bunch of little ones to reduce + // pressure on the GC. + log.Infof("Loading block index...") + + blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName) + + // Determine how many blocks will be loaded into the index so we can + // allocate the right amount. + var blockCount int32 + cursor := blockIndexBucket.Cursor() + for ok := cursor.First(); ok; ok = cursor.Next() { + blockCount++ + } + blockNodes := make([]blockNode, blockCount) + + var i int32 + var lastNode *blockNode + cursor = blockIndexBucket.Cursor() + for ok := cursor.First(); ok; ok = cursor.Next() { + header, status, err := deserializeBlockRow(cursor.Value()) + if err != nil { + return err + } + + // Determine the parent block node. Since we iterate block headers + // in order of height, if the blocks are mostly linear there is a + // very good chance the previous header processed is the parent. + var parent *blockNode + if lastNode == nil { + // NOTE: since the stored genesis header's PrevBlock is empty hash, so we need to set it manually to chainparams's genesis block value + header.PrevBlock = b.chainParams.GenesisBlock.Header.PrevBlock + blockHash := header.BlockHash() + + if !blockHash.IsEqual(b.chainParams.GenesisHash) { + return AssertError(fmt.Sprintf("initChainState: Expected "+ + "first entry in block index to be genesis block, "+ + "found %s", blockHash)) + } + } else if header.PrevBlock == lastNode.hash { + // Since we iterate block headers in order of height, if the + // blocks are mostly linear there is a very good chance the + // previous header processed is the parent. + parent = lastNode + } else { + parent = b.index.LookupNode(&header.PrevBlock) + if parent == nil { + return AssertError(fmt.Sprintf("initChainState: Could "+ + "not find parent for block %s", header.BlockHash())) + } + } + + // Initialize the block node for the block, connect it, + // and add it to the block index. + node := &blockNodes[i] + initBlockNode(node, header, parent) + node.status = status + b.index.addNode(node) + + lastNode = node + i++ + } + + // Set the best chain view to the stored best state. + tip := b.index.LookupNode(&state.hash) + if tip == nil { + return AssertError(fmt.Sprintf("initChainState: cannot find "+ + "chain tip %s in block index", state.hash)) + } + b.bestChain.SetTip(tip) + + // Load the raw block bytes for the best block. + blockBytes, err := dbTx.FetchBlock(&state.hash) + if err != nil { + return err + } + var block wire.MsgBlock + err = block.Deserialize(bytes.NewReader(blockBytes)) + if err != nil { + return err + } + + // As a final consistency check, we'll run through all the + // nodes which are ancestors of the current chain tip, and mark + // them as valid if they aren't already marked as such. This + // is a safe assumption as all the block before the current tip + // are valid by definition. + for iterNode := tip; iterNode != nil; iterNode = iterNode.parent { + // If this isn't already marked as valid in the index, then + // we'll mark it as valid now to ensure consistency once + // we're up and running. + if !iterNode.status.KnownValid() { + log.Infof("Block %v (height=%v) ancestor of "+ + "chain tip not marked as valid, "+ + "upgrading to valid for consistency", + iterNode.hash, iterNode.height) + + b.index.SetStatusFlags(iterNode, statusValid) + } + } + + // Initialize the state related to the best block. + blockSize := uint64(len(blockBytes)) + blockWeight := uint64(GetBlockWeight(btcutil.NewBlock(&block))) + numTxns := uint64(len(block.Transactions)) + b.stateSnapshot = newBestState(tip, blockSize, blockWeight, + numTxns, state.totalTxns, tip.CalcPastMedianTime()) + + return nil + }) + if err != nil { + return err + } + + // As we might have updated the index after it was loaded, we'll + // attempt to flush the index to the DB. This will only result in a + // write if the elements are dirty, so it'll usually be a noop. + return b.index.flushToDB() +} + +func (b *BlockChain) initChainStateV2(genBlk *wire.MsgBlock) error { + // Determine the state of the chain database. We may need to initialize + // everything from scratch or upgrade certain buckets. + var initialized, hasBlockIndex bool + err := b.db.View(func(dbTx database.Tx) error { + initialized = dbTx.Metadata().Get(chainStateKeyName) != nil + hasBlockIndex = dbTx.Metadata().Bucket(blockIndexBucketName) != nil + return nil + }) + if err != nil { + return err + } + + if !initialized { + // At this point the database has not already been initialized, so + // initialize both it and the chain state to the genesis block. + return b.createChainStateV2(genBlk) + } + + if !hasBlockIndex { + err := migrateBlockIndex(b.db) + if err != nil { + return nil + } + } + + // Attempt to load the chain state from the database. + err = b.db.View(func(dbTx database.Tx) error { + // Fetch the stored chain state from the database metadata. + // When it doesn't exist, it means the database hasn't been + // initialized for use with chain yet, so break out now to allow + // that to happen under a writable database transaction. + serializedData := dbTx.Metadata().Get(chainStateKeyName) + log.Tracef("Serialized chain state: %x", serializedData) + state, err := deserializeBestChainState(serializedData) + if err != nil { + return err + } + + // Load all of the headers from the data for the known best + // chain and construct the block index accordingly. Since the + // number of nodes are already known, perform a single alloc + // for them versus a whole bunch of little ones to reduce + // pressure on the GC. + log.Infof("Loading block index...") + + blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName) + + // Determine how many blocks will be loaded into the index so we can + // allocate the right amount. + var blockCount int32 + cursor := blockIndexBucket.Cursor() + for ok := cursor.First(); ok; ok = cursor.Next() { + blockCount++ + } + blockNodes := make([]blockNode, blockCount) + + var i int32 + var lastNode *blockNode + cursor = blockIndexBucket.Cursor() + for ok := cursor.First(); ok; ok = cursor.Next() { + header, status, err := deserializeBlockRow(cursor.Value()) + if err != nil { + return err + } + header.PrevBlock = genBlk.Header.PrevBlock + + // Determine the parent block node. Since we iterate block headers + // in order of height, if the blocks are mostly linear there is a + // very good chance the previous header processed is the parent. + var parent *blockNode + if lastNode == nil { + fmt.Println("first header prev hash: ", header.PrevBlock.String()) + fmt.Println("first header merkle root: ", header.MerkleRoot.String()) + fmt.Println("first header merkle root: ", header.MerkleRoot.String()) + + fmt.Println("genesis prev hash: ", genBlk.Header.PrevBlock.String()) + fmt.Println("genesis merkle root: ", genBlk.Header.MerkleRoot.String()) + fmt.Println("genesis timestamp: ", genBlk.Header.Timestamp.Unix()) + fmt.Println("genesis bits: ", genBlk.Header.Bits) + fmt.Println("genesis nonce: ", genBlk.Header.Nonce) + blockHash := header.BlockHash() + genHash := genBlk.Header.BlockHash() + fmt.Println("fuck: ", blockHash.String(), genHash.String()) + if !blockHash.IsEqual(&genHash) { + return AssertError(fmt.Sprintf("initChainState: Expected "+ + "first entry in block index to be genesis block, "+ + "found %s", blockHash)) + } + } else if header.PrevBlock == lastNode.hash { + // Since we iterate block headers in order of height, if the + // blocks are mostly linear there is a very good chance the + // previous header processed is the parent. + parent = lastNode + } else { + parent = b.index.LookupNode(&header.PrevBlock) + if parent == nil { + return AssertError(fmt.Sprintf("initChainState: Could "+ + "not find parent for block %s", header.BlockHash())) + } + } + + // Initialize the block node for the block, connect it, + // and add it to the block index. + node := &blockNodes[i] + initBlockNode(node, header, parent) + node.status = status + b.index.addNode(node) + + lastNode = node + i++ + } + + // Set the best chain view to the stored best state. + tip := b.index.LookupNode(&state.hash) + if tip == nil { + return AssertError(fmt.Sprintf("initChainState: cannot find "+ + "chain tip %s in block index", state.hash)) + } + b.bestChain.SetTip(tip) + + // Load the raw block bytes for the best block. + blockBytes, err := dbTx.FetchBlock(&state.hash) + if err != nil { + return err + } + var block wire.MsgBlock + err = block.Deserialize(bytes.NewReader(blockBytes)) + if err != nil { + return err + } + + // As a final consistency check, we'll run through all the + // nodes which are ancestors of the current chain tip, and mark + // them as valid if they aren't already marked as such. This + // is a safe assumption as all the block before the current tip + // are valid by definition. + for iterNode := tip; iterNode != nil; iterNode = iterNode.parent { + // If this isn't already marked as valid in the index, then + // we'll mark it as valid now to ensure consistency once + // we're up and running. + if !iterNode.status.KnownValid() { + log.Infof("Block %v (height=%v) ancestor of "+ + "chain tip not marked as valid, "+ + "upgrading to valid for consistency", + iterNode.hash, iterNode.height) + + b.index.SetStatusFlags(iterNode, statusValid) + } + } + + // Initialize the state related to the best block. + blockSize := uint64(len(blockBytes)) + blockWeight := uint64(GetBlockWeight(btcutil.NewBlock(&block))) + numTxns := uint64(len(block.Transactions)) + b.stateSnapshot = newBestState(tip, blockSize, blockWeight, + numTxns, state.totalTxns, tip.CalcPastMedianTime()) + + return nil + }) + if err != nil { + return err + } + + // As we might have updated the index after it was loaded, we'll + // attempt to flush the index to the DB. This will only result in a + // write if the elements are dirty, so it'll usually be a noop. + return b.index.flushToDB() +} + +// deserializeBlockRow parses a value in the block index bucket into a block +// header and block status bitfield. +func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error) { + buffer := bytes.NewReader(blockRow) + + var header wire.BlockHeader + err := header.Deserialize(buffer) + if err != nil { + return nil, statusNone, err + } + + statusByte, err := buffer.ReadByte() + if err != nil { + return nil, statusNone, err + } + + return &header, blockStatus(statusByte), nil +} + +// dbFetchHeaderByHash uses an existing database transaction to retrieve the +// block header for the provided hash. +func dbFetchHeaderByHash(dbTx database.Tx, hash *chainhash.Hash) (*wire.BlockHeader, error) { + headerBytes, err := dbTx.FetchBlockHeader(hash) + if err != nil { + return nil, err + } + + var header wire.BlockHeader + err = header.Deserialize(bytes.NewReader(headerBytes)) + if err != nil { + return nil, err + } + + return &header, nil +} + +// dbFetchHeaderByHeight uses an existing database transaction to retrieve the +// block header for the provided height. +func dbFetchHeaderByHeight(dbTx database.Tx, height int32) (*wire.BlockHeader, error) { + hash, err := dbFetchHashByHeight(dbTx, height) + if err != nil { + return nil, err + } + + return dbFetchHeaderByHash(dbTx, hash) +} + +// dbFetchBlockByNode uses an existing database transaction to retrieve the +// raw block for the provided node, deserialize it, and return a btcutil.Block +// with the height set. +func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*btcutil.Block, error) { + // Load the raw block bytes from the database. + blockBytes, err := dbTx.FetchBlock(&node.hash) + if err != nil { + return nil, err + } + + // Create the encapsulated block and set the height appropriately. + block, err := btcutil.NewBlockFromBytes(blockBytes) + if err != nil { + return nil, err + } + block.SetHeight(node.height) + + return block, nil +} + +// dbStoreBlockNode stores the block header and validation status to the block +// index bucket. This overwrites the current entry if there exists one. +func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error { + // Serialize block data to be stored. + w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1)) + header := node.Header() + err := header.Serialize(w) + if err != nil { + return err + } + err = w.WriteByte(byte(node.status)) + if err != nil { + return err + } + value := w.Bytes() + + // Write block header data to block index bucket. + blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName) + key := blockIndexKey(&node.hash, uint32(node.height)) + return blockIndexBucket.Put(key, value) +} + +// dbStoreBlock stores the provided block in the database if it is not already +// there. The full block data is written to ffldb. +func dbStoreBlock(dbTx database.Tx, block *btcutil.Block) error { + hasBlock, err := dbTx.HasBlock(block.Hash()) + if err != nil { + return err + } + if hasBlock { + return nil + } + return dbTx.StoreBlock(block) +} + +// blockIndexKey generates the binary key for an entry in the block index +// bucket. The key is composed of the block height encoded as a big-endian +// 32-bit unsigned int followed by the 32 byte block hash. +func blockIndexKey(blockHash *chainhash.Hash, blockHeight uint32) []byte { + indexKey := make([]byte, chainhash.HashSize+4) + binary.BigEndian.PutUint32(indexKey[0:4], blockHeight) + copy(indexKey[4:chainhash.HashSize+4], blockHash[:]) + return indexKey +} + +// BlockByHeight returns the block at the given height in the main chain. +// +// This function is safe for concurrent access. +func (b *BlockChain) BlockByHeight(blockHeight int32) (*btcutil.Block, error) { + // Lookup the block height in the best chain. + node := b.bestChain.NodeByHeight(blockHeight) + if node == nil { + str := fmt.Sprintf("no block at height %d exists", blockHeight) + return nil, errNotInMainChain(str) + } + + // Load the block from the database and return it. + var block *btcutil.Block + err := b.db.View(func(dbTx database.Tx) error { + var err error + block, err = dbFetchBlockByNode(dbTx, node) + return err + }) + return block, err +} + +// BlockByHash returns the block from the main chain with the given hash with +// the appropriate chain height set. +// +// This function is safe for concurrent access. +func (b *BlockChain) BlockByHash(hash *chainhash.Hash) (*btcutil.Block, error) { + // Lookup the block hash in block index and ensure it is in the best + // chain. + node := b.index.LookupNode(hash) + if node == nil || !b.bestChain.Contains(node) { + str := fmt.Sprintf("block %s is not in the main chain", hash) + return nil, errNotInMainChain(str) + } + + // Load the block from the database and return it. + var block *btcutil.Block + err := b.db.View(func(dbTx database.Tx) error { + var err error + block, err = dbFetchBlockByNode(dbTx, node) + return err + }) + return block, err +} diff --git a/relaying/btc/chainview.go b/relaying/btc/chainview.go new file mode 100644 index 0000000000..7d7b85f2c4 --- /dev/null +++ b/relaying/btc/chainview.go @@ -0,0 +1,419 @@ +package btcrelaying + +import ( + "sync" +) + +// approxNodesPerWeek is an approximation of the number of new blocks there are +// in a week on average. +const approxNodesPerWeek = 6 * 24 * 7 + +// log2FloorMasks defines the masks to use when quickly calculating +// floor(log2(x)) in a constant log2(32) = 5 steps, where x is a uint32, using +// shifts. They are derived from (2^(2^x) - 1) * (2^(2^x)), for x in 4..0. +var log2FloorMasks = []uint32{0xffff0000, 0xff00, 0xf0, 0xc, 0x2} + +// fastLog2Floor calculates and returns floor(log2(x)) in a constant 5 steps. +func fastLog2Floor(n uint32) uint8 { + rv := uint8(0) + exponent := uint8(16) + for i := 0; i < 5; i++ { + if n&log2FloorMasks[i] != 0 { + rv += exponent + n >>= exponent + } + exponent >>= 1 + } + return rv +} + +// chainView provides a flat view of a specific branch of the block chain from +// its tip back to the genesis block and provides various convenience functions +// for comparing chains. +// +// For example, assume a block chain with a side chain as depicted below: +// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 +// \-> 4a -> 5a -> 6a +// +// The chain view for the branch ending in 6a consists of: +// genesis -> 1 -> 2 -> 3 -> 4a -> 5a -> 6a +type chainView struct { + mtx sync.Mutex + nodes []*blockNode +} + +// newChainView returns a new chain view for the given tip block node. Passing +// nil as the tip will result in a chain view that is not initialized. The tip +// can be updated at any time via the setTip function. +func newChainView(tip *blockNode) *chainView { + // The mutex is intentionally not held since this is a constructor. + var c chainView + c.setTip(tip) + return &c +} + +// genesis returns the genesis block for the chain view. This only differs from +// the exported version in that it is up to the caller to ensure the lock is +// held. +// +// This function MUST be called with the view mutex locked (for reads). +func (c *chainView) genesis() *blockNode { + if len(c.nodes) == 0 { + return nil + } + + return c.nodes[0] +} + +// Genesis returns the genesis block for the chain view. +// +// This function is safe for concurrent access. +func (c *chainView) Genesis() *blockNode { + c.mtx.Lock() + genesis := c.genesis() + c.mtx.Unlock() + return genesis +} + +// tip returns the current tip block node for the chain view. It will return +// nil if there is no tip. This only differs from the exported version in that +// it is up to the caller to ensure the lock is held. +// +// This function MUST be called with the view mutex locked (for reads). +func (c *chainView) tip() *blockNode { + if len(c.nodes) == 0 { + return nil + } + + return c.nodes[len(c.nodes)-1] +} + +// Tip returns the current tip block node for the chain view. It will return +// nil if there is no tip. +// +// This function is safe for concurrent access. +func (c *chainView) Tip() *blockNode { + c.mtx.Lock() + tip := c.tip() + c.mtx.Unlock() + return tip +} + +// setTip sets the chain view to use the provided block node as the current tip +// and ensures the view is consistent by populating it with the nodes obtained +// by walking backwards all the way to genesis block as necessary. Further +// calls will only perform the minimum work needed, so switching between chain +// tips is efficient. This only differs from the exported version in that it is +// up to the caller to ensure the lock is held. +// +// This function MUST be called with the view mutex locked (for writes). +func (c *chainView) setTip(node *blockNode) { + if node == nil { + // Keep the backing array around for potential future use. + c.nodes = c.nodes[:0] + return + } + + // Create or resize the slice that will hold the block nodes to the + // provided tip height. When creating the slice, it is created with + // some additional capacity for the underlying array as append would do + // in order to reduce overhead when extending the chain later. As long + // as the underlying array already has enough capacity, simply expand or + // contract the slice accordingly. The additional capacity is chosen + // such that the array should only have to be extended about once a + // week. + needed := node.height + 1 + if int32(cap(c.nodes)) < needed { + nodes := make([]*blockNode, needed, needed+approxNodesPerWeek) + copy(nodes, c.nodes) + c.nodes = nodes + } else { + prevLen := int32(len(c.nodes)) + c.nodes = c.nodes[0:needed] + for i := prevLen; i < needed; i++ { + c.nodes[i] = nil + } + } + + for node != nil && c.nodes[node.height] != node { + c.nodes[node.height] = node + node = node.parent + } +} + +// SetTip sets the chain view to use the provided block node as the current tip +// and ensures the view is consistent by populating it with the nodes obtained +// by walking backwards all the way to genesis block as necessary. Further +// calls will only perform the minimum work needed, so switching between chain +// tips is efficient. +// +// This function is safe for concurrent access. +func (c *chainView) SetTip(node *blockNode) { + c.mtx.Lock() + c.setTip(node) + c.mtx.Unlock() +} + +// height returns the height of the tip of the chain view. It will return -1 if +// there is no tip (which only happens if the chain view has not been +// initialized). This only differs from the exported version in that it is up +// to the caller to ensure the lock is held. +// +// This function MUST be called with the view mutex locked (for reads). +func (c *chainView) height() int32 { + return int32(len(c.nodes) - 1) +} + +// Height returns the height of the tip of the chain view. It will return -1 if +// there is no tip (which only happens if the chain view has not been +// initialized). +// +// This function is safe for concurrent access. +func (c *chainView) Height() int32 { + c.mtx.Lock() + height := c.height() + c.mtx.Unlock() + return height +} + +// nodeByHeight returns the block node at the specified height. Nil will be +// returned if the height does not exist. This only differs from the exported +// version in that it is up to the caller to ensure the lock is held. +// +// This function MUST be called with the view mutex locked (for reads). +func (c *chainView) nodeByHeight(height int32) *blockNode { + if height < 0 || height >= int32(len(c.nodes)) { + return nil + } + + return c.nodes[height] +} + +// NodeByHeight returns the block node at the specified height. Nil will be +// returned if the height does not exist. +// +// This function is safe for concurrent access. +func (c *chainView) NodeByHeight(height int32) *blockNode { + c.mtx.Lock() + node := c.nodeByHeight(height) + c.mtx.Unlock() + return node +} + +// Equals returns whether or not two chain views are the same. Uninitialized +// views (tip set to nil) are considered equal. +// +// This function is safe for concurrent access. +func (c *chainView) Equals(other *chainView) bool { + c.mtx.Lock() + other.mtx.Lock() + equals := len(c.nodes) == len(other.nodes) && c.tip() == other.tip() + other.mtx.Unlock() + c.mtx.Unlock() + return equals +} + +// contains returns whether or not the chain view contains the passed block +// node. This only differs from the exported version in that it is up to the +// caller to ensure the lock is held. +// +// This function MUST be called with the view mutex locked (for reads). +func (c *chainView) contains(node *blockNode) bool { + return c.nodeByHeight(node.height) == node +} + +// Contains returns whether or not the chain view contains the passed block +// node. +// +// This function is safe for concurrent access. +func (c *chainView) Contains(node *blockNode) bool { + c.mtx.Lock() + contains := c.contains(node) + c.mtx.Unlock() + return contains +} + +// next returns the successor to the provided node for the chain view. It will +// return nil if there is no successor or the provided node is not part of the +// view. This only differs from the exported version in that it is up to the +// caller to ensure the lock is held. +// +// See the comment on the exported function for more details. +// +// This function MUST be called with the view mutex locked (for reads). +func (c *chainView) next(node *blockNode) *blockNode { + if node == nil || !c.contains(node) { + return nil + } + + return c.nodeByHeight(node.height + 1) +} + +// Next returns the successor to the provided node for the chain view. It will +// return nil if there is no successfor or the provided node is not part of the +// view. +// +// For example, assume a block chain with a side chain as depicted below: +// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 +// \-> 4a -> 5a -> 6a +// +// Further, assume the view is for the longer chain depicted above. That is to +// say it consists of: +// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 +// +// Invoking this function with block node 5 would return block node 6 while +// invoking it with block node 5a would return nil since that node is not part +// of the view. +// +// This function is safe for concurrent access. +func (c *chainView) Next(node *blockNode) *blockNode { + c.mtx.Lock() + next := c.next(node) + c.mtx.Unlock() + return next +} + +// findFork returns the final common block between the provided node and the +// the chain view. It will return nil if there is no common block. This only +// differs from the exported version in that it is up to the caller to ensure +// the lock is held. +// +// See the exported FindFork comments for more details. +// +// This function MUST be called with the view mutex locked (for reads). +func (c *chainView) findFork(node *blockNode) *blockNode { + // No fork point for node that doesn't exist. + if node == nil { + return nil + } + + // When the height of the passed node is higher than the height of the + // tip of the current chain view, walk backwards through the nodes of + // the other chain until the heights match (or there or no more nodes in + // which case there is no common node between the two). + // + // NOTE: This isn't strictly necessary as the following section will + // find the node as well, however, it is more efficient to avoid the + // contains check since it is already known that the common node can't + // possibly be past the end of the current chain view. It also allows + // this code to take advantage of any potential future optimizations to + // the Ancestor function such as using an O(log n) skip list. + chainHeight := c.height() + if node.height > chainHeight { + node = node.Ancestor(chainHeight) + } + + // Walk the other chain backwards as long as the current one does not + // contain the node or there are no more nodes in which case there is no + // common node between the two. + for node != nil && !c.contains(node) { + node = node.parent + } + + return node +} + +// FindFork returns the final common block between the provided node and the +// the chain view. It will return nil if there is no common block. +// +// For example, assume a block chain with a side chain as depicted below: +// genesis -> 1 -> 2 -> ... -> 5 -> 6 -> 7 -> 8 +// \-> 6a -> 7a +// +// Further, assume the view is for the longer chain depicted above. That is to +// say it consists of: +// genesis -> 1 -> 2 -> ... -> 5 -> 6 -> 7 -> 8. +// +// Invoking this function with block node 7a would return block node 5 while +// invoking it with block node 7 would return itself since it is already part of +// the branch formed by the view. +// +// This function is safe for concurrent access. +func (c *chainView) FindFork(node *blockNode) *blockNode { + c.mtx.Lock() + fork := c.findFork(node) + c.mtx.Unlock() + return fork +} + +// blockLocator returns a block locator for the passed block node. The passed +// node can be nil in which case the block locator for the current tip +// associated with the view will be returned. This only differs from the +// exported version in that it is up to the caller to ensure the lock is held. +// +// See the exported BlockLocator function comments for more details. +// +// This function MUST be called with the view mutex locked (for reads). +func (c *chainView) blockLocator(node *blockNode) BlockLocator { + // Use the current tip if requested. + if node == nil { + node = c.tip() + } + if node == nil { + return nil + } + + // Calculate the max number of entries that will ultimately be in the + // block locator. See the description of the algorithm for how these + // numbers are derived. + var maxEntries uint8 + if node.height <= 12 { + maxEntries = uint8(node.height) + 1 + } else { + // Requested hash itself + previous 10 entries + genesis block. + // Then floor(log2(height-10)) entries for the skip portion. + adjustedHeight := uint32(node.height) - 10 + maxEntries = 12 + fastLog2Floor(adjustedHeight) + } + locator := make(BlockLocator, 0, maxEntries) + + step := int32(1) + for node != nil { + locator = append(locator, &node.hash) + + // Nothing more to add once the genesis block has been added. + if node.height == 0 { + break + } + + // Calculate height of previous node to include ensuring the + // final node is the genesis block. + height := node.height - step + if height < 0 { + height = 0 + } + + // When the node is in the current chain view, all of its + // ancestors must be too, so use a much faster O(1) lookup in + // that case. Otherwise, fall back to walking backwards through + // the nodes of the other chain to the correct ancestor. + if c.contains(node) { + node = c.nodes[height] + } else { + node = node.Ancestor(height) + } + + // Once 11 entries have been included, start doubling the + // distance between included hashes. + if len(locator) > 10 { + step *= 2 + } + } + + return locator +} + +// BlockLocator returns a block locator for the passed block node. The passed +// node can be nil in which case the block locator for the current tip +// associated with the view will be returned. +// +// See the BlockLocator type for details on the algorithm used to create a block +// locator. +// +// This function is safe for concurrent access. +func (c *chainView) BlockLocator(node *blockNode) BlockLocator { + c.mtx.Lock() + locator := c.blockLocator(node) + c.mtx.Unlock() + return locator +} diff --git a/relaying/btc/checkpoints.go b/relaying/btc/checkpoints.go new file mode 100644 index 0000000000..cad0751e3b --- /dev/null +++ b/relaying/btc/checkpoints.go @@ -0,0 +1,257 @@ +package btcrelaying + +import ( + "fmt" + "time" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcutil" +) + +// CheckpointConfirmations is the number of blocks before the end of the current +// best block chain that a good checkpoint candidate must be. +const CheckpointConfirmations = 2016 + +// newHashFromStr converts the passed big-endian hex string into a +// chainhash.Hash. It only differs from the one available in chainhash in that +// it ignores the error since it will only (and must only) be called with +// hard-coded, and therefore known good, hashes. +func newHashFromStr(hexStr string) *chainhash.Hash { + hash, _ := chainhash.NewHashFromStr(hexStr) + return hash +} + +// Checkpoints returns a slice of checkpoints (regardless of whether they are +// already known). When there are no checkpoints for the chain, it will return +// nil. +// +// This function is safe for concurrent access. +func (b *BlockChain) Checkpoints() []chaincfg.Checkpoint { + return b.checkpoints +} + +// HasCheckpoints returns whether this BlockChain has checkpoints defined. +// +// This function is safe for concurrent access. +func (b *BlockChain) HasCheckpoints() bool { + return len(b.checkpoints) > 0 +} + +// LatestCheckpoint returns the most recent checkpoint (regardless of whether it +// is already known). When there are no defined checkpoints for the active chain +// instance, it will return nil. +// +// This function is safe for concurrent access. +func (b *BlockChain) LatestCheckpoint() *chaincfg.Checkpoint { + if !b.HasCheckpoints() { + return nil + } + return &b.checkpoints[len(b.checkpoints)-1] +} + +// verifyCheckpoint returns whether the passed block height and hash combination +// match the checkpoint data. It also returns true if there is no checkpoint +// data for the passed block height. +func (b *BlockChain) verifyCheckpoint(height int32, hash *chainhash.Hash) bool { + if !b.HasCheckpoints() { + return true + } + + // Nothing to check if there is no checkpoint data for the block height. + checkpoint, exists := b.checkpointsByHeight[height] + if !exists { + return true + } + + if !checkpoint.Hash.IsEqual(hash) { + return false + } + + log.Infof("Verified checkpoint at height %d/block %s", checkpoint.Height, + checkpoint.Hash) + return true +} + +// findPreviousCheckpoint finds the most recent checkpoint that is already +// available in the downloaded portion of the block chain and returns the +// associated block node. It returns nil if a checkpoint can't be found (this +// should really only happen for blocks before the first checkpoint). +// +// This function MUST be called with the chain lock held (for reads). +func (b *BlockChain) findPreviousCheckpoint() (*blockNode, error) { + if !b.HasCheckpoints() { + return nil, nil + } + + // Perform the initial search to find and cache the latest known + // checkpoint if the best chain is not known yet or we haven't already + // previously searched. + checkpoints := b.checkpoints + numCheckpoints := len(checkpoints) + if b.checkpointNode == nil && b.nextCheckpoint == nil { + // Loop backwards through the available checkpoints to find one + // that is already available. + for i := numCheckpoints - 1; i >= 0; i-- { + node := b.index.LookupNode(checkpoints[i].Hash) + if node == nil || !b.bestChain.Contains(node) { + continue + } + + // Checkpoint found. Cache it for future lookups and + // set the next expected checkpoint accordingly. + b.checkpointNode = node + if i < numCheckpoints-1 { + b.nextCheckpoint = &checkpoints[i+1] + } + return b.checkpointNode, nil + } + + // No known latest checkpoint. This will only happen on blocks + // before the first known checkpoint. So, set the next expected + // checkpoint to the first checkpoint and return the fact there + // is no latest known checkpoint block. + b.nextCheckpoint = &checkpoints[0] + return nil, nil + } + + // At this point we've already searched for the latest known checkpoint, + // so when there is no next checkpoint, the current checkpoint lockin + // will always be the latest known checkpoint. + if b.nextCheckpoint == nil { + return b.checkpointNode, nil + } + + // When there is a next checkpoint and the height of the current best + // chain does not exceed it, the current checkpoint lockin is still + // the latest known checkpoint. + if b.bestChain.Tip().height < b.nextCheckpoint.Height { + return b.checkpointNode, nil + } + + // We've reached or exceeded the next checkpoint height. Note that + // once a checkpoint lockin has been reached, forks are prevented from + // any blocks before the checkpoint, so we don't have to worry about the + // checkpoint going away out from under us due to a chain reorganize. + + // Cache the latest known checkpoint for future lookups. Note that if + // this lookup fails something is very wrong since the chain has already + // passed the checkpoint which was verified as accurate before inserting + // it. + checkpointNode := b.index.LookupNode(b.nextCheckpoint.Hash) + if checkpointNode == nil { + return nil, AssertError(fmt.Sprintf("findPreviousCheckpoint "+ + "failed lookup of known good block node %s", + b.nextCheckpoint.Hash)) + } + b.checkpointNode = checkpointNode + + // Set the next expected checkpoint. + checkpointIndex := -1 + for i := numCheckpoints - 1; i >= 0; i-- { + if checkpoints[i].Hash.IsEqual(b.nextCheckpoint.Hash) { + checkpointIndex = i + break + } + } + b.nextCheckpoint = nil + if checkpointIndex != -1 && checkpointIndex < numCheckpoints-1 { + b.nextCheckpoint = &checkpoints[checkpointIndex+1] + } + + return b.checkpointNode, nil +} + +// isNonstandardTransaction determines whether a transaction contains any +// scripts which are not one of the standard types. +func isNonstandardTransaction(tx *btcutil.Tx) bool { + // Check all of the output public key scripts for non-standard scripts. + for _, txOut := range tx.MsgTx().TxOut { + scriptClass := txscript.GetScriptClass(txOut.PkScript) + if scriptClass == txscript.NonStandardTy { + return true + } + } + return false +} + +// IsCheckpointCandidate returns whether or not the passed block is a good +// checkpoint candidate. +// +// The factors used to determine a good checkpoint are: +// - The block must be in the main chain +// - The block must be at least 'CheckpointConfirmations' blocks prior to the +// current end of the main chain +// - The timestamps for the blocks before and after the checkpoint must have +// timestamps which are also before and after the checkpoint, respectively +// (due to the median time allowance this is not always the case) +// - The block must not contain any strange transaction such as those with +// nonstandard scripts +// +// The intent is that candidates are reviewed by a developer to make the final +// decision and then manually added to the list of checkpoints for a network. +// +// This function is safe for concurrent access. +func (b *BlockChain) IsCheckpointCandidate(block *btcutil.Block) (bool, error) { + b.chainLock.RLock() + defer b.chainLock.RUnlock() + + // A checkpoint must be in the main chain. + node := b.index.LookupNode(block.Hash()) + if node == nil || !b.bestChain.Contains(node) { + return false, nil + } + + // Ensure the height of the passed block and the entry for the block in + // the main chain match. This should always be the case unless the + // caller provided an invalid block. + if node.height != block.Height() { + return false, fmt.Errorf("passed block height of %d does not "+ + "match the main chain height of %d", block.Height(), + node.height) + } + + // A checkpoint must be at least CheckpointConfirmations blocks + // before the end of the main chain. + mainChainHeight := b.bestChain.Tip().height + if node.height > (mainChainHeight - CheckpointConfirmations) { + return false, nil + } + + // A checkpoint must be have at least one block after it. + // + // This should always succeed since the check above already made sure it + // is CheckpointConfirmations back, but be safe in case the constant + // changes. + nextNode := b.bestChain.Next(node) + if nextNode == nil { + return false, nil + } + + // A checkpoint must be have at least one block before it. + if node.parent == nil { + return false, nil + } + + // A checkpoint must have timestamps for the block and the blocks on + // either side of it in order (due to the median time allowance this is + // not always the case). + prevTime := time.Unix(node.parent.timestamp, 0) + curTime := block.MsgBlock().Header.Timestamp + nextTime := time.Unix(nextNode.timestamp, 0) + if prevTime.After(curTime) || nextTime.Before(curTime) { + return false, nil + } + + // A checkpoint must have transactions that only contain standard + // scripts. + for _, tx := range block.Transactions() { + if isNonstandardTransaction(tx) { + return false, nil + } + } + + // All of the checks passed, so the block is a candidate. + return true, nil +} diff --git a/relaying/btc/common.go b/relaying/btc/common.go new file mode 100644 index 0000000000..095893f5e5 --- /dev/null +++ b/relaying/btc/common.go @@ -0,0 +1,484 @@ +package btcrelaying + +import ( + "compress/bzip2" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/database" + _ "github.com/btcsuite/btcd/database/ffldb" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +const ( + // testDbType is the database backend type to use for the tests. + testDbType = "ffldb" + + // testDbRoot is the root directory used to create all test databases. + testDbRoot = "btcdbs" + + // blockDataNet is the expected network in the test block data. + blockDataNet = wire.MainNet +) + +// filesExists returns whether or not the named file or directory exists. +func fileExists(name string) bool { + if _, err := os.Stat(name); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} + +// isSupportedDbType returns whether or not the passed database type is +// currently supported. +func isSupportedDbType(dbType string) bool { + supportedDrivers := database.SupportedDrivers() + for _, driver := range supportedDrivers { + if dbType == driver { + return true + } + } + + return false +} + +// loadBlocks reads files containing bitcoin block data (gzipped but otherwise +// in the format bitcoind writes) from disk and returns them as an array of +// btcutil.Block. This is largely borrowed from the test code in btcdb. +func loadBlocks(filename string) (blocks []*btcutil.Block, err error) { + filename = filepath.Join("testdata/", filename) + + var network = wire.MainNet + var dr io.Reader + var fi io.ReadCloser + + fi, err = os.Open(filename) + if err != nil { + return + } + + if strings.HasSuffix(filename, ".bz2") { + dr = bzip2.NewReader(fi) + } else { + dr = fi + } + defer fi.Close() + + var block *btcutil.Block + + err = nil + for height := int64(1); err == nil; height++ { + var rintbuf uint32 + err = binary.Read(dr, binary.LittleEndian, &rintbuf) + if err == io.EOF { + // hit end of file at expected offset: no warning + height-- + err = nil + break + } + if err != nil { + break + } + if rintbuf != uint32(network) { + break + } + err = binary.Read(dr, binary.LittleEndian, &rintbuf) + blocklen := rintbuf + + rbytes := make([]byte, blocklen) + + // read block + dr.Read(rbytes) + + block, err = btcutil.NewBlockFromBytes(rbytes) + if err != nil { + return + } + blocks = append(blocks, block) + } + + return +} + +// chainSetup is used to create a new db and chain instance with the genesis +// block already inserted. In addition to the new chain instance, it returns +// a teardown function the caller should invoke when done testing to clean up. +func chainSetup(dbName string, params *chaincfg.Params) (*BlockChain, func(), error) { + if !isSupportedDbType(testDbType) { + return nil, nil, fmt.Errorf("unsupported db type %v", testDbType) + } + + // Handle memory database specially since it doesn't need the disk + // specific handling. + var db database.DB + var teardown func() + if testDbType == "memdb" { + ndb, err := database.Create(testDbType) + if err != nil { + return nil, nil, fmt.Errorf("error creating db: %v", err) + } + db = ndb + + // Setup a teardown function for cleaning up. This function is + // returned to the caller to be invoked when it is done testing. + teardown = func() { + db.Close() + } + } else { + // Create the root directory for test databases. + if !fileExists(testDbRoot) { + if err := os.MkdirAll(testDbRoot, 0700); err != nil { + err := fmt.Errorf("unable to create test db "+ + "root: %v", err) + return nil, nil, err + } + } + + // Create a new database to store the accepted blocks into. + dbPath := filepath.Join(testDbRoot, dbName) + // _ = os.RemoveAll(dbPath) + ndb, err := database.Create(testDbType, dbPath, blockDataNet) + if err != nil { + fmt.Println("--------- come here -----------") + return nil, nil, fmt.Errorf("error creating db: %v", err) + } + db = ndb + + // Setup a teardown function for cleaning up. This function is + // returned to the caller to be invoked when it is done testing. + teardown = func() { + db.Close() + os.RemoveAll(dbPath) + os.RemoveAll(testDbRoot) + } + } + + // Copy the chain params to ensure any modifications the tests do to + // the chain parameters do not affect the global instance. + paramsCopy := *params + + // Create the main chain instance. + chain, err := New(&Config{ + DB: db, + ChainParams: ¶msCopy, + Checkpoints: nil, + TimeSource: NewMedianTime(), + SigCache: txscript.NewSigCache(1000), + }) + if err != nil { + teardown() + err := fmt.Errorf("failed to create chain instance: %v", err) + return nil, nil, err + } + return chain, teardown, nil +} + +// GetChainV2 returns btcrelaying chain +func GetChainV2(dbPath string, params *chaincfg.Params) (*BlockChain, error) { + if !isSupportedDbType(testDbType) { + return nil, fmt.Errorf("unsupported db type %v", testDbType) + } + + var db database.DB + // dbPath := filepath.Join(testDbRoot, dbName) + ndb, err := database.Open(testDbType, dbPath, blockDataNet) + if err != nil { + // Return the error if it's not because the database doesn't + // exist. + if dbErr, ok := err.(database.Error); !ok || dbErr.ErrorCode != + database.ErrDbDoesNotExist { + return nil, err + } + + // Create the db if it does not exist. + ndb, err = database.Create(testDbType, dbPath, blockDataNet) + if err != nil { + return nil, fmt.Errorf("error creating db: %v", err) + } + } + db = ndb + + // Copy the chain params to ensure any modifications the tests do to + // the chain parameters do not affect the global instance. + paramsCopy := *params + + // Create the main chain instance. + chain, err := New(&Config{ + DB: db, + ChainParams: ¶msCopy, + Checkpoints: nil, + TimeSource: NewMedianTime(), + SigCache: txscript.NewSigCache(1000), + }) + if err != nil { + err := fmt.Errorf("failed to create chain instance: %v", err) + return nil, err + } + return chain, nil +} + +func GetChain(dbName string, params *chaincfg.Params) (*BlockChain, error) { + if !isSupportedDbType(testDbType) { + return nil, fmt.Errorf("unsupported db type %v", testDbType) + } + + // Handle memory database specially since it doesn't need the disk + // specific handling. + var db database.DB + if testDbType == "memdb" { + ndb, err := database.Create(testDbType) + if err != nil { + return nil, fmt.Errorf("error creating db: %v", err) + } + db = ndb + } else { + dbPath := filepath.Join(testDbRoot, dbName) + ndb, err := database.Open(testDbType, dbPath, blockDataNet) + if err != nil { + // Return the error if it's not because the database doesn't + // exist. + if dbErr, ok := err.(database.Error); !ok || dbErr.ErrorCode != + database.ErrDbDoesNotExist { + return nil, err + } + + // Create the db if it does not exist. + err = os.MkdirAll(testDbRoot, 0700) + if err != nil { + err := fmt.Errorf("unable to create test db "+ + "root: %v", err) + return nil, err + } + ndb, err = database.Create(testDbType, dbPath, blockDataNet) + if err != nil { + return nil, fmt.Errorf("error creating db: %v", err) + } + } + db = ndb + } + + // Copy the chain params to ensure any modifications the tests do to + // the chain parameters do not affect the global instance. + paramsCopy := *params + + // Create the main chain instance. + chain, err := New(&Config{ + DB: db, + ChainParams: ¶msCopy, + Checkpoints: nil, + TimeSource: NewMedianTime(), + SigCache: txscript.NewSigCache(1000), + }) + if err != nil { + err := fmt.Errorf("failed to create chain instance: %v", err) + return nil, err + } + return chain, nil +} + +// loadUtxoView returns a utxo view loaded from a file. +func loadUtxoView(filename string) (*UtxoViewpoint, error) { + // The utxostore file format is: + // + // + // The output index and serialized utxo len are little endian uint32s + // and the serialized utxo uses the format described in chainio.go. + + filename = filepath.Join("testdata", filename) + fi, err := os.Open(filename) + if err != nil { + return nil, err + } + + // Choose read based on whether the file is compressed or not. + var r io.Reader + if strings.HasSuffix(filename, ".bz2") { + r = bzip2.NewReader(fi) + } else { + r = fi + } + defer fi.Close() + + view := NewUtxoViewpoint() + for { + // Hash of the utxo entry. + var hash chainhash.Hash + _, err := io.ReadAtLeast(r, hash[:], len(hash[:])) + if err != nil { + // Expected EOF at the right offset. + if err == io.EOF { + break + } + return nil, err + } + + // Output index of the utxo entry. + var index uint32 + err = binary.Read(r, binary.LittleEndian, &index) + if err != nil { + return nil, err + } + + // Num of serialized utxo entry bytes. + var numBytes uint32 + err = binary.Read(r, binary.LittleEndian, &numBytes) + if err != nil { + return nil, err + } + + // Serialized utxo entry. + serialized := make([]byte, numBytes) + _, err = io.ReadAtLeast(r, serialized, int(numBytes)) + if err != nil { + return nil, err + } + + // Deserialize it and add it to the view. + entry, err := deserializeUtxoEntry(serialized) + if err != nil { + return nil, err + } + view.Entries()[wire.OutPoint{Hash: hash, Index: index}] = entry + } + + return view, nil +} + +// convertUtxoStore reads a utxostore from the legacy format and writes it back +// out using the latest format. It is only useful for converting utxostore data +// used in the tests, which has already been done. However, the code is left +// available for future reference. +func convertUtxoStore(r io.Reader, w io.Writer) error { + // The old utxostore file format was: + // + // + // The serialized utxo len was a little endian uint32 and the serialized + // utxo uses the format described in upgrade.go. + + littleEndian := binary.LittleEndian + for { + // Hash of the utxo entry. + var hash chainhash.Hash + _, err := io.ReadAtLeast(r, hash[:], len(hash[:])) + if err != nil { + // Expected EOF at the right offset. + if err == io.EOF { + break + } + return err + } + + // Num of serialized utxo entry bytes. + var numBytes uint32 + err = binary.Read(r, littleEndian, &numBytes) + if err != nil { + return err + } + + // Serialized utxo entry. + serialized := make([]byte, numBytes) + _, err = io.ReadAtLeast(r, serialized, int(numBytes)) + if err != nil { + return err + } + + // Deserialize the entry. + entries, err := deserializeUtxoEntryV0(serialized) + if err != nil { + return err + } + + // Loop through all of the utxos and write them out in the new + // format. + for outputIdx, entry := range entries { + // Reserialize the entries using the new format. + serialized, err := serializeUtxoEntry(entry) + if err != nil { + return err + } + + // Write the hash of the utxo entry. + _, err = w.Write(hash[:]) + if err != nil { + return err + } + + // Write the output index of the utxo entry. + err = binary.Write(w, littleEndian, outputIdx) + if err != nil { + return err + } + + // Write num of serialized utxo entry bytes. + err = binary.Write(w, littleEndian, uint32(len(serialized))) + if err != nil { + return err + } + + // Write the serialized utxo. + _, err = w.Write(serialized) + if err != nil { + return err + } + } + } + + return nil +} + +// TstSetCoinbaseMaturity makes the ability to set the coinbase maturity +// available when running tests. +func (b *BlockChain) TstSetCoinbaseMaturity(maturity uint16) { + b.chainParams.CoinbaseMaturity = maturity +} + +// newFakeChain returns a chain that is usable for syntetic tests. It is +// important to note that this chain has no database associated with it, so +// it is not usable with all functions and the tests must take care when making +// use of it. +func newFakeChain(params *chaincfg.Params) *BlockChain { + // Create a genesis block node and block index index populated with it + // for use when creating the fake chain below. + node := newBlockNode(¶ms.GenesisBlock.Header, nil) + index := newBlockIndex(nil, params) + index.AddNode(node) + + targetTimespan := int64(params.TargetTimespan / time.Second) + targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second) + adjustmentFactor := params.RetargetAdjustmentFactor + return &BlockChain{ + chainParams: params, + timeSource: NewMedianTime(), + minRetargetTimespan: targetTimespan / adjustmentFactor, + maxRetargetTimespan: targetTimespan * adjustmentFactor, + blocksPerRetarget: int32(targetTimespan / targetTimePerBlock), + index: index, + bestChain: newChainView(node), + warningCaches: newThresholdCaches(vbNumBits), + deploymentCaches: newThresholdCaches(chaincfg.DefinedDeployments), + } +} + +// newFakeNode creates a block node connected to the passed parent with the +// provided fields populated and fake values for the other fields. +func newFakeNode(parent *blockNode, blockVersion int32, bits uint32, timestamp time.Time) *blockNode { + // Make up a header and create a block node from it. + header := &wire.BlockHeader{ + Version: blockVersion, + PrevBlock: parent.hash, + Bits: bits, + Timestamp: timestamp, + } + return newBlockNode(header, parent) +} diff --git a/relaying/btc/compress.go b/relaying/btc/compress.go new file mode 100644 index 0000000000..844fe52a8f --- /dev/null +++ b/relaying/btc/compress.go @@ -0,0 +1,582 @@ +package btcrelaying + +import ( + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/txscript" +) + +// ----------------------------------------------------------------------------- +// A variable length quantity (VLQ) is an encoding that uses an arbitrary number +// of binary octets to represent an arbitrarily large integer. The scheme +// employs a most significant byte (MSB) base-128 encoding where the high bit in +// each byte indicates whether or not the byte is the final one. In addition, +// to ensure there are no redundant encodings, an offset is subtracted every +// time a group of 7 bits is shifted out. Therefore each integer can be +// represented in exactly one way, and each representation stands for exactly +// one integer. +// +// Another nice property of this encoding is that it provides a compact +// representation of values that are typically used to indicate sizes. For +// example, the values 0 - 127 are represented with a single byte, 128 - 16511 +// with two bytes, and 16512 - 2113663 with three bytes. +// +// While the encoding allows arbitrarily large integers, it is artificially +// limited in this code to an unsigned 64-bit integer for efficiency purposes. +// +// Example encodings: +// 0 -> [0x00] +// 127 -> [0x7f] * Max 1-byte value +// 128 -> [0x80 0x00] +// 129 -> [0x80 0x01] +// 255 -> [0x80 0x7f] +// 256 -> [0x81 0x00] +// 16511 -> [0xff 0x7f] * Max 2-byte value +// 16512 -> [0x80 0x80 0x00] +// 32895 -> [0x80 0xff 0x7f] +// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value +// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value +// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f] +// +// References: +// https://en.wikipedia.org/wiki/Variable-length_quantity +// http://www.codecodex.com/wiki/Variable-Length_Integers +// ----------------------------------------------------------------------------- + +// serializeSizeVLQ returns the number of bytes it would take to serialize the +// passed number as a variable-length quantity according to the format described +// above. +func serializeSizeVLQ(n uint64) int { + size := 1 + for ; n > 0x7f; n = (n >> 7) - 1 { + size++ + } + + return size +} + +// putVLQ serializes the provided number to a variable-length quantity according +// to the format described above and returns the number of bytes of the encoded +// value. The result is placed directly into the passed byte slice which must +// be at least large enough to handle the number of bytes returned by the +// serializeSizeVLQ function or it will panic. +func putVLQ(target []byte, n uint64) int { + offset := 0 + for ; ; offset++ { + // The high bit is set when another byte follows. + highBitMask := byte(0x80) + if offset == 0 { + highBitMask = 0x00 + } + + target[offset] = byte(n&0x7f) | highBitMask + if n <= 0x7f { + break + } + n = (n >> 7) - 1 + } + + // Reverse the bytes so it is MSB-encoded. + for i, j := 0, offset; i < j; i, j = i+1, j-1 { + target[i], target[j] = target[j], target[i] + } + + return offset + 1 +} + +// deserializeVLQ deserializes the provided variable-length quantity according +// to the format described above. It also returns the number of bytes +// deserialized. +func deserializeVLQ(serialized []byte) (uint64, int) { + var n uint64 + var size int + for _, val := range serialized { + size++ + n = (n << 7) | uint64(val&0x7f) + if val&0x80 != 0x80 { + break + } + n++ + } + + return n, size +} + +// ----------------------------------------------------------------------------- +// In order to reduce the size of stored scripts, a domain specific compression +// algorithm is used which recognizes standard scripts and stores them using +// less bytes than the original script. The compression algorithm used here was +// obtained from Bitcoin Core, so all credits for the algorithm go to it. +// +// The general serialized format is: +// +//