Skip to content

Commit

Permalink
Merge #7730: Remove priority estimation
Browse files Browse the repository at this point in the history
0bd581a add release notes for removal of priority estimation (Alex Morcos)
b2322e0 Remove priority estimation (Alex Morcos)
  • Loading branch information
laanwj committed Nov 7, 2016
2 parents 1e50d22 + 0bd581a commit 3c03dc2
Show file tree
Hide file tree
Showing 6 changed files with 110 additions and 251 deletions.
9 changes: 9 additions & 0 deletions doc/release-notes.md
Expand Up @@ -48,6 +48,15 @@ Low-level RPC changes
an optional third arg, which was always ignored. Make sure to never pass more
than two arguments.

Removal of Priority Estimation
------------------------------

- Estimation of "priority" needed for a transaction to be included within a target
number of blocks has been removed. The rpc calls are deprecated and will either
return -1 or 1e24 appropriately. The format for fee_estimates.dat has also
changed to no longer save these priority estimates. It will automatically be
converted to the new format which is not readable by prior versions of the
software.

0.14.0 Change log
=================
Expand Down
168 changes: 32 additions & 136 deletions src/policy/fees.cpp
Expand Up @@ -14,10 +14,9 @@
#include "util.h"

void TxConfirmStats::Initialize(std::vector<double>& defaultBuckets,
unsigned int maxConfirms, double _decay, std::string _dataTypeString)
unsigned int maxConfirms, double _decay)
{
decay = _decay;
dataTypeString = _dataTypeString;
for (unsigned int i = 0; i < defaultBuckets.size(); i++) {
buckets.push_back(defaultBuckets[i]);
bucketMap[defaultBuckets[i]] = i;
Expand Down Expand Up @@ -87,10 +86,10 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,

int maxbucketindex = buckets.size() - 1;

// requireGreater means we are looking for the lowest fee/priority such that all higher
// values pass, so we start at maxbucketindex (highest fee) and look at successively
// requireGreater means we are looking for the lowest feerate such that all higher
// values pass, so we start at maxbucketindex (highest feerate) and look at successively
// smaller buckets until we reach failure. Otherwise, we are looking for the highest
// fee/priority such that all lower values fail, and we go in the opposite direction.
// feerate such that all lower values fail, and we go in the opposite direction.
unsigned int startbucket = requireGreater ? maxbucketindex : 0;
int step = requireGreater ? -1 : 1;

Expand All @@ -107,7 +106,7 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
bool foundAnswer = false;
unsigned int bins = unconfTxs.size();

// Start counting from highest(default) or lowest fee/pri transactions
// Start counting from highest(default) or lowest feerate transactions
for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) {
curFarBucket = bucket;
nConf += confAvg[confTarget - 1][bucket];
Expand Down Expand Up @@ -145,8 +144,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
double median = -1;
double txSum = 0;

// Calculate the "average" fee of the best bucket range that met success conditions
// Find the bucket with the median transaction and then report the average fee from that bucket
// Calculate the "average" feerate of the best bucket range that met success conditions
// Find the bucket with the median transaction and then report the average feerate from that bucket
// This is a compromise between finding the median which we can't since we don't save all tx's
// and reporting the average which is less accurate
unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket;
Expand All @@ -166,8 +165,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
}
}

LogPrint("estimatefee", "%3d: For conf success %s %4.2f need %s %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
confTarget, requireGreater ? ">" : "<", successBreakPoint, dataTypeString,
LogPrint("estimatefee", "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
confTarget, requireGreater ? ">" : "<", successBreakPoint,
requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket],
100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum);

Expand Down Expand Up @@ -200,10 +199,10 @@ void TxConfirmStats::Read(CAutoFile& filein)
filein >> fileBuckets;
numBuckets = fileBuckets.size();
if (numBuckets <= 1 || numBuckets > 1000)
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 fee/pri buckets");
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
filein >> fileAvg;
if (fileAvg.size() != numBuckets)
throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri average bucket count");
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate average bucket count");
filein >> fileTxCtAvg;
if (fileTxCtAvg.size() != numBuckets)
throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
Expand All @@ -213,9 +212,9 @@ void TxConfirmStats::Read(CAutoFile& filein)
throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
for (unsigned int i = 0; i < maxConfirms; i++) {
if (fileConfAvg[i].size() != numBuckets)
throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri conf average bucket count");
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
}
// Now that we've processed the entire fee estimate data file and not
// Now that we've processed the entire feerate estimate data file and not
// thrown any errors, we can copy it to our data structures
decay = fileDecay;
buckets = fileBuckets;
Expand All @@ -242,16 +241,15 @@ void TxConfirmStats::Read(CAutoFile& filein)
for (unsigned int i = 0; i < buckets.size(); i++)
bucketMap[buckets[i]] = i;

LogPrint("estimatefee", "Reading estimates: %u %s buckets counting confirms up to %u blocks\n",
numBuckets, dataTypeString, maxConfirms);
LogPrint("estimatefee", "Reading estimates: %u buckets counting confirms up to %u blocks\n",
numBuckets, maxConfirms);
}

unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val)
{
unsigned int bucketindex = bucketMap.lower_bound(val)->second;
unsigned int blockIndex = nBlockHeight % unconfTxs.size();
unconfTxs[blockIndex][bucketindex]++;
LogPrint("estimatefee", "adding to %s", dataTypeString);
return bucketindex;
}

Expand Down Expand Up @@ -291,12 +289,10 @@ void CBlockPolicyEstimator::removeTx(uint256 hash)
hash.ToString().c_str());
return;
}
TxConfirmStats *stats = pos->second.stats;
unsigned int entryHeight = pos->second.blockHeight;
unsigned int bucketIndex = pos->second.bucketIndex;

if (stats != NULL)
stats->removeTx(entryHeight, nBestSeenHeight, bucketIndex);
feeStats.removeTx(entryHeight, nBestSeenHeight, bucketIndex);
mapMemPoolTxs.erase(hash);
}

Expand All @@ -309,45 +305,14 @@ CBlockPolicyEstimator::CBlockPolicyEstimator(const CFeeRate& _minRelayFee)
vfeelist.push_back(bucketBoundary);
}
vfeelist.push_back(INF_FEERATE);
feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "FeeRate");

minTrackedPriority = AllowFreeThreshold() < MIN_PRIORITY ? MIN_PRIORITY : AllowFreeThreshold();
std::vector<double> vprilist;
for (double bucketBoundary = minTrackedPriority; bucketBoundary <= MAX_PRIORITY; bucketBoundary *= PRI_SPACING) {
vprilist.push_back(bucketBoundary);
}
vprilist.push_back(INF_PRIORITY);
priStats.Initialize(vprilist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "Priority");

feeUnlikely = CFeeRate(0);
feeLikely = CFeeRate(INF_FEERATE);
priUnlikely = 0;
priLikely = INF_PRIORITY;
}

bool CBlockPolicyEstimator::isFeeDataPoint(const CFeeRate &fee, double pri)
{
if ((pri < minTrackedPriority && fee >= minTrackedFee) ||
(pri < priUnlikely && fee > feeLikely)) {
return true;
}
return false;
}

bool CBlockPolicyEstimator::isPriDataPoint(const CFeeRate &fee, double pri)
{
if ((fee < minTrackedFee && pri >= minTrackedPriority) ||
(fee < feeUnlikely && pri > priLikely)) {
return true;
}
return false;
feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY);
}

void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool fCurrentEstimate)
{
unsigned int txHeight = entry.GetHeight();
uint256 hash = entry.GetTx().GetHash();
if (mapMemPoolTxs[hash].stats != NULL) {
if (mapMemPoolTxs.count(hash)) {
LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n",
hash.ToString().c_str());
return;
Expand All @@ -371,30 +336,11 @@ void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, boo
return;
}

// Fees are stored and reported as BTC-per-kb:
// Feerates are stored and reported as BTC-per-kb:
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());

// Want the priority of the tx at confirmation. However we don't know
// what that will be and its too hard to continue updating it
// so use starting priority as a proxy
double curPri = entry.GetPriority(txHeight);
mapMemPoolTxs[hash].blockHeight = txHeight;

LogPrint("estimatefee", "Blockpolicy mempool tx %s ", hash.ToString().substr(0,10));
// Record this as a priority estimate
if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) {
mapMemPoolTxs[hash].stats = &priStats;
mapMemPoolTxs[hash].bucketIndex = priStats.NewTx(txHeight, curPri);
}
// Record this as a fee estimate
else if (isFeeDataPoint(feeRate, curPri)) {
mapMemPoolTxs[hash].stats = &feeStats;
mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK());
}
else {
LogPrint("estimatefee", "not adding");
}
LogPrint("estimatefee", "\n");
mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK());
}

void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry& entry)
Expand All @@ -417,21 +363,10 @@ void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxM
return;
}

// Fees are stored and reported as BTC-per-kb:
// Feerates are stored and reported as BTC-per-kb:
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());

// Want the priority of the tx at confirmation. The priority when it
// entered the mempool could easily be very small and change quickly
double curPri = entry.GetPriority(nBlockHeight);

// Record this as a priority estimate
if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) {
priStats.Record(blocksToConfirm, curPri);
}
// Record this as a fee estimate
else if (isFeeDataPoint(feeRate, curPri)) {
feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK());
}
feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK());
}

void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
Expand All @@ -452,41 +387,15 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
if (!fCurrentEstimate)
return;

// Update the dynamic cutoffs
// a fee/priority is "likely" the reason your tx was included in a block if >85% of such tx's
// were confirmed in 2 blocks and is "unlikely" if <50% were confirmed in 10 blocks
LogPrint("estimatefee", "Blockpolicy recalculating dynamic cutoffs:\n");
priLikely = priStats.EstimateMedianVal(2, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBlockHeight);
if (priLikely == -1)
priLikely = INF_PRIORITY;

double feeLikelyEst = feeStats.EstimateMedianVal(2, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBlockHeight);
if (feeLikelyEst == -1)
feeLikely = CFeeRate(INF_FEERATE);
else
feeLikely = CFeeRate(feeLikelyEst);

priUnlikely = priStats.EstimateMedianVal(10, SUFFICIENT_PRITXS, UNLIKELY_PCT, false, nBlockHeight);
if (priUnlikely == -1)
priUnlikely = 0;

double feeUnlikelyEst = feeStats.EstimateMedianVal(10, SUFFICIENT_FEETXS, UNLIKELY_PCT, false, nBlockHeight);
if (feeUnlikelyEst == -1)
feeUnlikely = CFeeRate(0);
else
feeUnlikely = CFeeRate(feeUnlikelyEst);

// Clear the current block states
// Clear the current block state
feeStats.ClearCurrent(nBlockHeight);
priStats.ClearCurrent(nBlockHeight);

// Repopulate the current block states
for (unsigned int i = 0; i < entries.size(); i++)
processBlockTx(nBlockHeight, entries[i]);

// Update all exponential averages with the current block states
// Update all exponential averages with the current block state
feeStats.UpdateMovingAverages();
priStats.UpdateMovingAverages();

LogPrint("estimatefee", "Blockpolicy after updating estimates for %u confirmed entries, new mempool map size %u\n",
entries.size(), mapMemPoolTxs.size());
Expand Down Expand Up @@ -522,7 +431,7 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun
if (answerFoundAtTarget)
*answerFoundAtTarget = confTarget - 1;

// If mempool is limiting txs , return at least the min fee from the mempool
// If mempool is limiting txs , return at least the min feerate from the mempool
CAmount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
if (minPoolFee > 0 && minPoolFee > median)
return CFeeRate(minPoolFee);
Expand All @@ -535,51 +444,38 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun

double CBlockPolicyEstimator::estimatePriority(int confTarget)
{
// Return failure if trying to analyze a target we're not tracking
if (confTarget <= 0 || (unsigned int)confTarget > priStats.GetMaxConfirms())
return -1;

return priStats.EstimateMedianVal(confTarget, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
return -1;
}

double CBlockPolicyEstimator::estimateSmartPriority(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool)
{
if (answerFoundAtTarget)
*answerFoundAtTarget = confTarget;
// Return failure if trying to analyze a target we're not tracking
if (confTarget <= 0 || (unsigned int)confTarget > priStats.GetMaxConfirms())
return -1;

// If mempool is limiting txs, no priority txs are allowed
CAmount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
if (minPoolFee > 0)
return INF_PRIORITY;

double median = -1;
while (median < 0 && (unsigned int)confTarget <= priStats.GetMaxConfirms()) {
median = priStats.EstimateMedianVal(confTarget++, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
}

if (answerFoundAtTarget)
*answerFoundAtTarget = confTarget - 1;

return median;
return -1;
}

void CBlockPolicyEstimator::Write(CAutoFile& fileout)
{
fileout << nBestSeenHeight;
feeStats.Write(fileout);
priStats.Write(fileout);
}

void CBlockPolicyEstimator::Read(CAutoFile& filein)
void CBlockPolicyEstimator::Read(CAutoFile& filein, int nFileVersion)
{
int nFileBestSeenHeight;
filein >> nFileBestSeenHeight;
feeStats.Read(filein);
priStats.Read(filein);
nBestSeenHeight = nFileBestSeenHeight;
if (nFileVersion < 139900) {
TxConfirmStats priStats;
priStats.Read(filein);
}
}

FeeFilterRounder::FeeFilterRounder(const CFeeRate& minIncrementalFee)
Expand Down

0 comments on commit 3c03dc2

Please sign in to comment.