1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2016 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
6 #include "policy/fees.h"
7 #include "policy/policy.h"
10 #include "clientversion.h"
11 #include "primitives/transaction.h"
14 #include "txmempool.h"
17 static constexpr double INF_FEERATE
= 1e99
;
20 * We will instantiate an instance of this class to track transactions that were
21 * included in a block. We will lump transactions into a bucket according to their
22 * approximate feerate and then track how long it took for those txs to be included in a block
24 * The tracking of unconfirmed (mempool) transactions is completely independent of the
25 * historical tracking of transactions that have been confirmed in a block.
30 //Define the buckets we will group transactions into
31 const std::vector
<double>& buckets
; // The upper-bound of the range for the bucket (inclusive)
32 const std::map
<double, unsigned int>& bucketMap
; // Map of bucket upper-bound to index into all vectors by bucket
35 // Count the total # of txs in each bucket
36 // Track the historical moving average of this total over blocks
37 std::vector
<double> txCtAvg
;
39 // Count the total # of txs confirmed within Y blocks in each bucket
40 // Track the historical moving average of theses totals over blocks
41 std::vector
<std::vector
<double>> confAvg
; // confAvg[Y][X]
43 // Track moving avg of txs which have been evicted from the mempool
44 // after failing to be confirmed within Y blocks
45 std::vector
<std::vector
<double>> failAvg
; // failAvg[Y][X]
47 // Sum the total feerate of all tx's in each bucket
48 // Track the historical moving average of this total over blocks
49 std::vector
<double> avg
;
51 // Combine the conf counts with tx counts to calculate the confirmation % for each Y,X
52 // Combine the total value with the tx counts to calculate the avg feerate per bucket
56 // Resolution (# of blocks) with which confirmations are tracked
59 // Mempool counts of outstanding transactions
60 // For each bucket X, track the number of transactions in the mempool
61 // that are unconfirmed for each possible confirmation value Y
62 std::vector
<std::vector
<int> > unconfTxs
; //unconfTxs[Y][X]
63 // transactions still unconfirmed after GetMaxConfirms for each bucket
64 std::vector
<int> oldUnconfTxs
;
66 void resizeInMemoryCounters(size_t newbuckets
);
70 * Create new TxConfirmStats. This is called by BlockPolicyEstimator's
71 * constructor with default values.
72 * @param defaultBuckets contains the upper limits for the bucket boundaries
73 * @param maxConfirms max number of confirms to track
74 * @param decay how much to decay the historical moving average per block
76 TxConfirmStats(const std::vector
<double>& defaultBuckets
, const std::map
<double, unsigned int>& defaultBucketMap
,
77 unsigned int maxPeriods
, double decay
, unsigned int scale
);
79 /** Roll the circular buffer for unconfirmed txs*/
80 void ClearCurrent(unsigned int nBlockHeight
);
83 * Record a new transaction data point in the current block stats
84 * @param blocksToConfirm the number of blocks it took this transaction to confirm
85 * @param val the feerate of the transaction
86 * @warning blocksToConfirm is 1-based and has to be >= 1
88 void Record(int blocksToConfirm
, double val
);
90 /** Record a new transaction entering the mempool*/
91 unsigned int NewTx(unsigned int nBlockHeight
, double val
);
93 /** Remove a transaction from mempool tracking stats*/
94 void removeTx(unsigned int entryHeight
, unsigned int nBestSeenHeight
,
95 unsigned int bucketIndex
, bool inBlock
);
97 /** Update our estimates by decaying our historical moving average and updating
98 with the data gathered from the current block */
99 void UpdateMovingAverages();
102 * Calculate a feerate estimate. Find the lowest value bucket (or range of buckets
103 * to make sure we have enough data points) whose transactions still have sufficient likelihood
104 * of being confirmed within the target number of confirmations
105 * @param confTarget target number of confirmations
106 * @param sufficientTxVal required average number of transactions per block in a bucket range
107 * @param minSuccess the success probability we require
108 * @param requireGreater return the lowest feerate such that all higher values pass minSuccess OR
109 * return the highest feerate such that all lower values fail minSuccess
110 * @param nBlockHeight the current block height
112 double EstimateMedianVal(int confTarget
, double sufficientTxVal
,
113 double minSuccess
, bool requireGreater
, unsigned int nBlockHeight
,
114 EstimationResult
*result
= nullptr) const;
116 /** Return the max number of confirms we're tracking */
117 unsigned int GetMaxConfirms() const { return scale
* confAvg
.size(); }
119 /** Write state of estimation data to a file*/
120 void Write(CAutoFile
& fileout
) const;
123 * Read saved state of estimation data from a file and replace all internal data structures and
124 * variables with this state.
126 void Read(CAutoFile
& filein
, int nFileVersion
, size_t numBuckets
);
130 TxConfirmStats::TxConfirmStats(const std::vector
<double>& defaultBuckets
,
131 const std::map
<double, unsigned int>& defaultBucketMap
,
132 unsigned int maxPeriods
, double _decay
, unsigned int _scale
)
133 : buckets(defaultBuckets
), bucketMap(defaultBucketMap
)
137 confAvg
.resize(maxPeriods
);
138 for (unsigned int i
= 0; i
< maxPeriods
; i
++) {
139 confAvg
[i
].resize(buckets
.size());
141 failAvg
.resize(maxPeriods
);
142 for (unsigned int i
= 0; i
< maxPeriods
; i
++) {
143 failAvg
[i
].resize(buckets
.size());
146 txCtAvg
.resize(buckets
.size());
147 avg
.resize(buckets
.size());
149 resizeInMemoryCounters(buckets
.size());
152 void TxConfirmStats::resizeInMemoryCounters(size_t newbuckets
) {
153 // newbuckets must be passed in because the buckets referred to during Read have not been updated yet.
154 unconfTxs
.resize(GetMaxConfirms());
155 for (unsigned int i
= 0; i
< unconfTxs
.size(); i
++) {
156 unconfTxs
[i
].resize(newbuckets
);
158 oldUnconfTxs
.resize(newbuckets
);
161 // Roll the unconfirmed txs circular buffer
162 void TxConfirmStats::ClearCurrent(unsigned int nBlockHeight
)
164 for (unsigned int j
= 0; j
< buckets
.size(); j
++) {
165 oldUnconfTxs
[j
] += unconfTxs
[nBlockHeight
%unconfTxs
.size()][j
];
166 unconfTxs
[nBlockHeight
%unconfTxs
.size()][j
] = 0;
171 void TxConfirmStats::Record(int blocksToConfirm
, double val
)
173 // blocksToConfirm is 1-based
174 if (blocksToConfirm
< 1)
176 int periodsToConfirm
= (blocksToConfirm
+ scale
- 1)/scale
;
177 unsigned int bucketindex
= bucketMap
.lower_bound(val
)->second
;
178 for (size_t i
= periodsToConfirm
; i
<= confAvg
.size(); i
++) {
179 confAvg
[i
- 1][bucketindex
]++;
181 txCtAvg
[bucketindex
]++;
182 avg
[bucketindex
] += val
;
185 void TxConfirmStats::UpdateMovingAverages()
187 for (unsigned int j
= 0; j
< buckets
.size(); j
++) {
188 for (unsigned int i
= 0; i
< confAvg
.size(); i
++)
189 confAvg
[i
][j
] = confAvg
[i
][j
] * decay
;
190 for (unsigned int i
= 0; i
< failAvg
.size(); i
++)
191 failAvg
[i
][j
] = failAvg
[i
][j
] * decay
;
192 avg
[j
] = avg
[j
] * decay
;
193 txCtAvg
[j
] = txCtAvg
[j
] * decay
;
197 // returns -1 on error conditions
198 double TxConfirmStats::EstimateMedianVal(int confTarget
, double sufficientTxVal
,
199 double successBreakPoint
, bool requireGreater
,
200 unsigned int nBlockHeight
, EstimationResult
*result
) const
202 // Counters for a bucket (or range of buckets)
203 double nConf
= 0; // Number of tx's confirmed within the confTarget
204 double totalNum
= 0; // Total number of tx's that were ever confirmed
205 int extraNum
= 0; // Number of tx's still in mempool for confTarget or longer
206 double failNum
= 0; // Number of tx's that were never confirmed but removed from the mempool after confTarget
207 int periodTarget
= (confTarget
+ scale
- 1)/scale
;
209 int maxbucketindex
= buckets
.size() - 1;
211 // requireGreater means we are looking for the lowest feerate such that all higher
212 // values pass, so we start at maxbucketindex (highest feerate) and look at successively
213 // smaller buckets until we reach failure. Otherwise, we are looking for the highest
214 // feerate such that all lower values fail, and we go in the opposite direction.
215 unsigned int startbucket
= requireGreater
? maxbucketindex
: 0;
216 int step
= requireGreater
? -1 : 1;
218 // We'll combine buckets until we have enough samples.
219 // The near and far variables will define the range we've combined
220 // The best variables are the last range we saw which still had a high
221 // enough confirmation rate to count as success.
222 // The cur variables are the current range we're counting.
223 unsigned int curNearBucket
= startbucket
;
224 unsigned int bestNearBucket
= startbucket
;
225 unsigned int curFarBucket
= startbucket
;
226 unsigned int bestFarBucket
= startbucket
;
228 bool foundAnswer
= false;
229 unsigned int bins
= unconfTxs
.size();
230 bool newBucketRange
= true;
232 EstimatorBucket passBucket
;
233 EstimatorBucket failBucket
;
235 // Start counting from highest(default) or lowest feerate transactions
236 for (int bucket
= startbucket
; bucket
>= 0 && bucket
<= maxbucketindex
; bucket
+= step
) {
237 if (newBucketRange
) {
238 curNearBucket
= bucket
;
239 newBucketRange
= false;
241 curFarBucket
= bucket
;
242 nConf
+= confAvg
[periodTarget
- 1][bucket
];
243 totalNum
+= txCtAvg
[bucket
];
244 failNum
+= failAvg
[periodTarget
- 1][bucket
];
245 for (unsigned int confct
= confTarget
; confct
< GetMaxConfirms(); confct
++)
246 extraNum
+= unconfTxs
[(nBlockHeight
- confct
)%bins
][bucket
];
247 extraNum
+= oldUnconfTxs
[bucket
];
248 // If we have enough transaction data points in this range of buckets,
249 // we can test for success
250 // (Only count the confirmed data points, so that each confirmation count
251 // will be looking at the same amount of data and same bucket breaks)
252 if (totalNum
>= sufficientTxVal
/ (1 - decay
)) {
253 double curPct
= nConf
/ (totalNum
+ failNum
+ extraNum
);
255 // Check to see if we are no longer getting confirmed at the success rate
256 if ((requireGreater
&& curPct
< successBreakPoint
) || (!requireGreater
&& curPct
> successBreakPoint
)) {
257 if (passing
== true) {
258 // First time we hit a failure record the failed bucket
259 unsigned int failMinBucket
= std::min(curNearBucket
, curFarBucket
);
260 unsigned int failMaxBucket
= std::max(curNearBucket
, curFarBucket
);
261 failBucket
.start
= failMinBucket
? buckets
[failMinBucket
- 1] : 0;
262 failBucket
.end
= buckets
[failMaxBucket
];
263 failBucket
.withinTarget
= nConf
;
264 failBucket
.totalConfirmed
= totalNum
;
265 failBucket
.inMempool
= extraNum
;
266 failBucket
.leftMempool
= failNum
;
271 // Otherwise update the cumulative stats, and the bucket variables
272 // and reset the counters
274 failBucket
= EstimatorBucket(); // Reset any failed bucket, currently passing
277 passBucket
.withinTarget
= nConf
;
279 passBucket
.totalConfirmed
= totalNum
;
281 passBucket
.inMempool
= extraNum
;
282 passBucket
.leftMempool
= failNum
;
285 bestNearBucket
= curNearBucket
;
286 bestFarBucket
= curFarBucket
;
287 newBucketRange
= true;
295 // Calculate the "average" feerate of the best bucket range that met success conditions
296 // Find the bucket with the median transaction and then report the average feerate from that bucket
297 // This is a compromise between finding the median which we can't since we don't save all tx's
298 // and reporting the average which is less accurate
299 unsigned int minBucket
= std::min(bestNearBucket
, bestFarBucket
);
300 unsigned int maxBucket
= std::max(bestNearBucket
, bestFarBucket
);
301 for (unsigned int j
= minBucket
; j
<= maxBucket
; j
++) {
304 if (foundAnswer
&& txSum
!= 0) {
306 for (unsigned int j
= minBucket
; j
<= maxBucket
; j
++) {
307 if (txCtAvg
[j
] < txSum
)
309 else { // we're in the right bucket
310 median
= avg
[j
] / txCtAvg
[j
];
315 passBucket
.start
= minBucket
? buckets
[minBucket
-1] : 0;
316 passBucket
.end
= buckets
[maxBucket
];
319 // If we were passing until we reached last few buckets with insufficient data, then report those as failed
320 if (passing
&& !newBucketRange
) {
321 unsigned int failMinBucket
= std::min(curNearBucket
, curFarBucket
);
322 unsigned int failMaxBucket
= std::max(curNearBucket
, curFarBucket
);
323 failBucket
.start
= failMinBucket
? buckets
[failMinBucket
- 1] : 0;
324 failBucket
.end
= buckets
[failMaxBucket
];
325 failBucket
.withinTarget
= nConf
;
326 failBucket
.totalConfirmed
= totalNum
;
327 failBucket
.inMempool
= extraNum
;
328 failBucket
.leftMempool
= failNum
;
331 LogPrint(BCLog::ESTIMATEFEE
, "FeeEst: %d %s%.0f%% decay %.5f: feerate: %g from (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out)\n",
332 confTarget
, requireGreater
? ">" : "<", 100.0 * successBreakPoint
, decay
,
333 median
, passBucket
.start
, passBucket
.end
,
334 100 * passBucket
.withinTarget
/ (passBucket
.totalConfirmed
+ passBucket
.inMempool
+ passBucket
.leftMempool
),
335 passBucket
.withinTarget
, passBucket
.totalConfirmed
, passBucket
.inMempool
, passBucket
.leftMempool
,
336 failBucket
.start
, failBucket
.end
,
337 100 * failBucket
.withinTarget
/ (failBucket
.totalConfirmed
+ failBucket
.inMempool
+ failBucket
.leftMempool
),
338 failBucket
.withinTarget
, failBucket
.totalConfirmed
, failBucket
.inMempool
, failBucket
.leftMempool
);
342 result
->pass
= passBucket
;
343 result
->fail
= failBucket
;
344 result
->decay
= decay
;
345 result
->scale
= scale
;
350 void TxConfirmStats::Write(CAutoFile
& fileout
) const
360 void TxConfirmStats::Read(CAutoFile
& filein
, int nFileVersion
, size_t numBuckets
)
362 // Read data file and do some very basic sanity checking
363 // buckets and bucketMap are not updated yet, so don't access them
364 // If there is a read failure, we'll just discard this entire object anyway
365 size_t maxConfirms
, maxPeriods
;
367 // The current version will store the decay with each individual TxConfirmStats and also keep a scale factor
368 if (nFileVersion
>= 149900) {
370 if (decay
<= 0 || decay
>= 1) {
371 throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
377 if (avg
.size() != numBuckets
) {
378 throw std::runtime_error("Corrupt estimates file. Mismatch in feerate average bucket count");
381 if (txCtAvg
.size() != numBuckets
) {
382 throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
385 maxPeriods
= confAvg
.size();
386 maxConfirms
= scale
* maxPeriods
;
388 if (maxConfirms
<= 0 || maxConfirms
> 6 * 24 * 7) { // one week
389 throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
391 for (unsigned int i
= 0; i
< maxPeriods
; i
++) {
392 if (confAvg
[i
].size() != numBuckets
) {
393 throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
397 if (nFileVersion
>= 149900) {
399 if (maxPeriods
!= failAvg
.size()) {
400 throw std::runtime_error("Corrupt estimates file. Mismatch in confirms tracked for failures");
402 for (unsigned int i
= 0; i
< maxPeriods
; i
++) {
403 if (failAvg
[i
].size() != numBuckets
) {
404 throw std::runtime_error("Corrupt estimates file. Mismatch in one of failure average bucket counts");
408 failAvg
.resize(confAvg
.size());
409 for (unsigned int i
= 0; i
< failAvg
.size(); i
++) {
410 failAvg
[i
].resize(numBuckets
);
414 // Resize the current block variables which aren't stored in the data file
415 // to match the number of confirms and buckets
416 resizeInMemoryCounters(numBuckets
);
418 LogPrint(BCLog::ESTIMATEFEE
, "Reading estimates: %u buckets counting confirms up to %u blocks\n",
419 numBuckets
, maxConfirms
);
422 unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight
, double val
)
424 unsigned int bucketindex
= bucketMap
.lower_bound(val
)->second
;
425 unsigned int blockIndex
= nBlockHeight
% unconfTxs
.size();
426 unconfTxs
[blockIndex
][bucketindex
]++;
430 void TxConfirmStats::removeTx(unsigned int entryHeight
, unsigned int nBestSeenHeight
, unsigned int bucketindex
, bool inBlock
)
432 //nBestSeenHeight is not updated yet for the new block
433 int blocksAgo
= nBestSeenHeight
- entryHeight
;
434 if (nBestSeenHeight
== 0) // the BlockPolicyEstimator hasn't seen any blocks yet
437 LogPrint(BCLog::ESTIMATEFEE
, "Blockpolicy error, blocks ago is negative for mempool tx\n");
438 return; //This can't happen because we call this with our best seen height, no entries can have higher
441 if (blocksAgo
>= (int)unconfTxs
.size()) {
442 if (oldUnconfTxs
[bucketindex
] > 0) {
443 oldUnconfTxs
[bucketindex
]--;
445 LogPrint(BCLog::ESTIMATEFEE
, "Blockpolicy error, mempool tx removed from >25 blocks,bucketIndex=%u already\n",
450 unsigned int blockIndex
= entryHeight
% unconfTxs
.size();
451 if (unconfTxs
[blockIndex
][bucketindex
] > 0) {
452 unconfTxs
[blockIndex
][bucketindex
]--;
454 LogPrint(BCLog::ESTIMATEFEE
, "Blockpolicy error, mempool tx removed from blockIndex=%u,bucketIndex=%u already\n",
455 blockIndex
, bucketindex
);
458 if (!inBlock
&& (unsigned int)blocksAgo
>= scale
) { // Only counts as a failure if not confirmed for entire period
459 unsigned int periodsAgo
= blocksAgo
/ scale
;
460 for (size_t i
= 0; i
< periodsAgo
&& i
< failAvg
.size(); i
++) {
461 failAvg
[i
][bucketindex
]++;
466 // This function is called from CTxMemPool::removeUnchecked to ensure
467 // txs removed from the mempool for any reason are no longer
468 // tracked. Txs that were part of a block have already been removed in
469 // processBlockTx to ensure they are never double tracked, but it is
470 // of no harm to try to remove them again.
471 bool CBlockPolicyEstimator::removeTx(uint256 hash
, bool inBlock
)
473 LOCK(cs_feeEstimator
);
474 std::map
<uint256
, TxStatsInfo
>::iterator pos
= mapMemPoolTxs
.find(hash
);
475 if (pos
!= mapMemPoolTxs
.end()) {
476 feeStats
->removeTx(pos
->second
.blockHeight
, nBestSeenHeight
, pos
->second
.bucketIndex
, inBlock
);
477 shortStats
->removeTx(pos
->second
.blockHeight
, nBestSeenHeight
, pos
->second
.bucketIndex
, inBlock
);
478 longStats
->removeTx(pos
->second
.blockHeight
, nBestSeenHeight
, pos
->second
.bucketIndex
, inBlock
);
479 mapMemPoolTxs
.erase(hash
);
486 CBlockPolicyEstimator::CBlockPolicyEstimator()
487 : nBestSeenHeight(0), firstRecordedHeight(0), historicalFirst(0), historicalBest(0), trackedTxs(0), untrackedTxs(0)
489 static_assert(MIN_BUCKET_FEERATE
> 0, "Min feerate must be nonzero");
490 size_t bucketIndex
= 0;
491 for (double bucketBoundary
= MIN_BUCKET_FEERATE
; bucketBoundary
<= MAX_BUCKET_FEERATE
; bucketBoundary
*= FEE_SPACING
, bucketIndex
++) {
492 buckets
.push_back(bucketBoundary
);
493 bucketMap
[bucketBoundary
] = bucketIndex
;
495 buckets
.push_back(INF_FEERATE
);
496 bucketMap
[INF_FEERATE
] = bucketIndex
;
497 assert(bucketMap
.size() == buckets
.size());
499 feeStats
= new TxConfirmStats(buckets
, bucketMap
, MED_BLOCK_PERIODS
, MED_DECAY
, MED_SCALE
);
500 shortStats
= new TxConfirmStats(buckets
, bucketMap
, SHORT_BLOCK_PERIODS
, SHORT_DECAY
, SHORT_SCALE
);
501 longStats
= new TxConfirmStats(buckets
, bucketMap
, LONG_BLOCK_PERIODS
, LONG_DECAY
, LONG_SCALE
);
504 CBlockPolicyEstimator::~CBlockPolicyEstimator()
511 void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry
& entry
, bool validFeeEstimate
)
513 LOCK(cs_feeEstimator
);
514 unsigned int txHeight
= entry
.GetHeight();
515 uint256 hash
= entry
.GetTx().GetHash();
516 if (mapMemPoolTxs
.count(hash
)) {
517 LogPrint(BCLog::ESTIMATEFEE
, "Blockpolicy error mempool tx %s already being tracked\n",
518 hash
.ToString().c_str());
522 if (txHeight
!= nBestSeenHeight
) {
523 // Ignore side chains and re-orgs; assuming they are random they don't
524 // affect the estimate. We'll potentially double count transactions in 1-block reorgs.
525 // Ignore txs if BlockPolicyEstimator is not in sync with chainActive.Tip().
526 // It will be synced next time a block is processed.
530 // Only want to be updating estimates when our blockchain is synced,
531 // otherwise we'll miscalculate how many blocks its taking to get included.
532 if (!validFeeEstimate
) {
538 // Feerates are stored and reported as BTC-per-kb:
539 CFeeRate
feeRate(entry
.GetFee(), entry
.GetTxSize());
541 mapMemPoolTxs
[hash
].blockHeight
= txHeight
;
542 unsigned int bucketIndex
= feeStats
->NewTx(txHeight
, (double)feeRate
.GetFeePerK());
543 mapMemPoolTxs
[hash
].bucketIndex
= bucketIndex
;
544 unsigned int bucketIndex2
= shortStats
->NewTx(txHeight
, (double)feeRate
.GetFeePerK());
545 assert(bucketIndex
== bucketIndex2
);
546 unsigned int bucketIndex3
= longStats
->NewTx(txHeight
, (double)feeRate
.GetFeePerK());
547 assert(bucketIndex
== bucketIndex3
);
550 bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight
, const CTxMemPoolEntry
* entry
)
552 if (!removeTx(entry
->GetTx().GetHash(), true)) {
553 // This transaction wasn't being tracked for fee estimation
557 // How many blocks did it take for miners to include this transaction?
558 // blocksToConfirm is 1-based, so a transaction included in the earliest
559 // possible block has confirmation count of 1
560 int blocksToConfirm
= nBlockHeight
- entry
->GetHeight();
561 if (blocksToConfirm
<= 0) {
562 // This can't happen because we don't process transactions from a block with a height
563 // lower than our greatest seen height
564 LogPrint(BCLog::ESTIMATEFEE
, "Blockpolicy error Transaction had negative blocksToConfirm\n");
568 // Feerates are stored and reported as BTC-per-kb:
569 CFeeRate
feeRate(entry
->GetFee(), entry
->GetTxSize());
571 feeStats
->Record(blocksToConfirm
, (double)feeRate
.GetFeePerK());
572 shortStats
->Record(blocksToConfirm
, (double)feeRate
.GetFeePerK());
573 longStats
->Record(blocksToConfirm
, (double)feeRate
.GetFeePerK());
577 void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight
,
578 std::vector
<const CTxMemPoolEntry
*>& entries
)
580 LOCK(cs_feeEstimator
);
581 if (nBlockHeight
<= nBestSeenHeight
) {
582 // Ignore side chains and re-orgs; assuming they are random
583 // they don't affect the estimate.
584 // And if an attacker can re-org the chain at will, then
585 // you've got much bigger problems than "attacker can influence
586 // transaction fees."
590 // Must update nBestSeenHeight in sync with ClearCurrent so that
591 // calls to removeTx (via processBlockTx) correctly calculate age
592 // of unconfirmed txs to remove from tracking.
593 nBestSeenHeight
= nBlockHeight
;
595 // Update unconfirmed circular buffer
596 feeStats
->ClearCurrent(nBlockHeight
);
597 shortStats
->ClearCurrent(nBlockHeight
);
598 longStats
->ClearCurrent(nBlockHeight
);
600 // Decay all exponential averages
601 feeStats
->UpdateMovingAverages();
602 shortStats
->UpdateMovingAverages();
603 longStats
->UpdateMovingAverages();
605 unsigned int countedTxs
= 0;
606 // Update averages with data points from current block
607 for (const auto& entry
: entries
) {
608 if (processBlockTx(nBlockHeight
, entry
))
612 if (firstRecordedHeight
== 0 && countedTxs
> 0) {
613 firstRecordedHeight
= nBestSeenHeight
;
614 LogPrint(BCLog::ESTIMATEFEE
, "Blockpolicy first recorded height %u\n", firstRecordedHeight
);
618 LogPrint(BCLog::ESTIMATEFEE
, "Blockpolicy estimates updated by %u of %u block txs, since last block %u of %u tracked, mempool map size %u, max target %u from %s\n",
619 countedTxs
, entries
.size(), trackedTxs
, trackedTxs
+ untrackedTxs
, mapMemPoolTxs
.size(),
620 MaxUsableEstimate(), HistoricalBlockSpan() > BlockSpan() ? "historical" : "current");
626 CFeeRate
CBlockPolicyEstimator::estimateFee(int confTarget
) const
628 // It's not possible to get reasonable estimates for confTarget of 1
632 return estimateRawFee(confTarget
, DOUBLE_SUCCESS_PCT
, FeeEstimateHorizon::MED_HALFLIFE
);
635 CFeeRate
CBlockPolicyEstimator::estimateRawFee(int confTarget
, double successThreshold
, FeeEstimateHorizon horizon
, EstimationResult
* result
) const
637 TxConfirmStats
* stats
;
638 double sufficientTxs
= SUFFICIENT_FEETXS
;
640 case FeeEstimateHorizon::SHORT_HALFLIFE
: {
642 sufficientTxs
= SUFFICIENT_TXS_SHORT
;
645 case FeeEstimateHorizon::MED_HALFLIFE
: {
649 case FeeEstimateHorizon::LONG_HALFLIFE
: {
658 LOCK(cs_feeEstimator
);
659 // Return failure if trying to analyze a target we're not tracking
660 if (confTarget
<= 0 || (unsigned int)confTarget
> stats
->GetMaxConfirms())
662 if (successThreshold
> 1)
665 double median
= stats
->EstimateMedianVal(confTarget
, sufficientTxs
, successThreshold
, true, nBestSeenHeight
, result
);
670 return CFeeRate(median
);
673 unsigned int CBlockPolicyEstimator::BlockSpan() const
675 if (firstRecordedHeight
== 0) return 0;
676 assert(nBestSeenHeight
>= firstRecordedHeight
);
678 return nBestSeenHeight
- firstRecordedHeight
;
681 unsigned int CBlockPolicyEstimator::HistoricalBlockSpan() const
683 if (historicalFirst
== 0) return 0;
684 assert(historicalBest
>= historicalFirst
);
686 if (nBestSeenHeight
- historicalBest
> OLDEST_ESTIMATE_HISTORY
) return 0;
688 return historicalBest
- historicalFirst
;
691 unsigned int CBlockPolicyEstimator::MaxUsableEstimate() const
693 // Block spans are divided by 2 to make sure there are enough potential failing data points for the estimate
694 return std::min(longStats
->GetMaxConfirms(), std::max(BlockSpan(), HistoricalBlockSpan()) / 2);
697 /** Return a fee estimate at the required successThreshold from the shortest
698 * time horizon which tracks confirmations up to the desired target. If
699 * checkShorterHorizon is requested, also allow short time horizon estimates
700 * for a lower target to reduce the given answer */
701 double CBlockPolicyEstimator::estimateCombinedFee(unsigned int confTarget
, double successThreshold
, bool checkShorterHorizon
) const
703 double estimate
= -1;
704 if (confTarget
>= 1 && confTarget
<= longStats
->GetMaxConfirms()) {
705 // Find estimate from shortest time horizon possible
706 if (confTarget
<= shortStats
->GetMaxConfirms()) { // short horizon
707 estimate
= shortStats
->EstimateMedianVal(confTarget
, SUFFICIENT_TXS_SHORT
, successThreshold
, true, nBestSeenHeight
);
709 else if (confTarget
<= feeStats
->GetMaxConfirms()) { // medium horizon
710 estimate
= feeStats
->EstimateMedianVal(confTarget
, SUFFICIENT_FEETXS
, successThreshold
, true, nBestSeenHeight
);
712 else { // long horizon
713 estimate
= longStats
->EstimateMedianVal(confTarget
, SUFFICIENT_FEETXS
, successThreshold
, true, nBestSeenHeight
);
715 if (checkShorterHorizon
) {
716 // If a lower confTarget from a more recent horizon returns a lower answer use it.
717 if (confTarget
> feeStats
->GetMaxConfirms()) {
718 double medMax
= feeStats
->EstimateMedianVal(feeStats
->GetMaxConfirms(), SUFFICIENT_FEETXS
, successThreshold
, true, nBestSeenHeight
);
719 if (medMax
> 0 && (estimate
== -1 || medMax
< estimate
))
722 if (confTarget
> shortStats
->GetMaxConfirms()) {
723 double shortMax
= shortStats
->EstimateMedianVal(shortStats
->GetMaxConfirms(), SUFFICIENT_TXS_SHORT
, successThreshold
, true, nBestSeenHeight
);
724 if (shortMax
> 0 && (estimate
== -1 || shortMax
< estimate
))
732 /** Ensure that for a conservative estimate, the DOUBLE_SUCCESS_PCT is also met
733 * at 2 * target for any longer time horizons.
735 double CBlockPolicyEstimator::estimateConservativeFee(unsigned int doubleTarget
) const
737 double estimate
= -1;
738 if (doubleTarget
<= shortStats
->GetMaxConfirms()) {
739 estimate
= feeStats
->EstimateMedianVal(doubleTarget
, SUFFICIENT_FEETXS
, DOUBLE_SUCCESS_PCT
, true, nBestSeenHeight
);
741 if (doubleTarget
<= feeStats
->GetMaxConfirms()) {
742 double longEstimate
= longStats
->EstimateMedianVal(doubleTarget
, SUFFICIENT_FEETXS
, DOUBLE_SUCCESS_PCT
, true, nBestSeenHeight
);
743 if (longEstimate
> estimate
) {
744 estimate
= longEstimate
;
750 /** estimateSmartFee returns the max of the feerates calculated with a 60%
751 * threshold required at target / 2, an 85% threshold required at target and a
752 * 95% threshold required at 2 * target. Each calculation is performed at the
753 * shortest time horizon which tracks the required target. Conservative
754 * estimates, however, required the 95% threshold at 2 * target be met for any
755 * longer time horizons also.
757 CFeeRate
CBlockPolicyEstimator::estimateSmartFee(int confTarget
, int *answerFoundAtTarget
, const CTxMemPool
& pool
, bool conservative
) const
759 if (answerFoundAtTarget
)
760 *answerFoundAtTarget
= confTarget
;
764 LOCK(cs_feeEstimator
);
766 // Return failure if trying to analyze a target we're not tracking
767 if (confTarget
<= 0 || (unsigned int)confTarget
> longStats
->GetMaxConfirms())
770 // It's not possible to get reasonable estimates for confTarget of 1
774 unsigned int maxUsableEstimate
= MaxUsableEstimate();
775 if (maxUsableEstimate
<= 1)
778 if ((unsigned int)confTarget
> maxUsableEstimate
) {
779 confTarget
= maxUsableEstimate
;
782 assert(confTarget
> 0); //estimateCombinedFee and estimateConservativeFee take unsigned ints
784 /** true is passed to estimateCombined fee for target/2 and target so
785 * that we check the max confirms for shorter time horizons as well.
786 * This is necessary to preserve monotonically increasing estimates.
787 * For non-conservative estimates we do the same thing for 2*target, but
788 * for conservative estimates we want to skip these shorter horizons
789 * checks for 2*target becuase we are taking the max over all time
790 * horizons so we already have monotonically increasing estimates and
791 * the purpose of conservative estimates is not to let short term
792 * fluctuations lower our estimates by too much.
794 double halfEst
= estimateCombinedFee(confTarget
/2, HALF_SUCCESS_PCT
, true);
795 double actualEst
= estimateCombinedFee(confTarget
, SUCCESS_PCT
, true);
796 double doubleEst
= estimateCombinedFee(2 * confTarget
, DOUBLE_SUCCESS_PCT
, !conservative
);
798 if (actualEst
> median
) {
801 if (doubleEst
> median
) {
805 if (conservative
|| median
== -1) {
806 double consEst
= estimateConservativeFee(2 * confTarget
);
807 if (consEst
> median
) {
811 } // Must unlock cs_feeEstimator before taking mempool locks
813 if (answerFoundAtTarget
)
814 *answerFoundAtTarget
= confTarget
;
816 // If mempool is limiting txs , return at least the min feerate from the mempool
817 CAmount minPoolFee
= pool
.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFeePerK();
818 if (minPoolFee
> 0 && minPoolFee
> median
)
819 return CFeeRate(minPoolFee
);
824 return CFeeRate(median
);
828 bool CBlockPolicyEstimator::Write(CAutoFile
& fileout
) const
831 LOCK(cs_feeEstimator
);
832 fileout
<< 149900; // version required to read: 0.14.99 or later
833 fileout
<< CLIENT_VERSION
; // version that wrote the file
834 fileout
<< nBestSeenHeight
;
835 if (BlockSpan() > HistoricalBlockSpan()/2) {
836 fileout
<< firstRecordedHeight
<< nBestSeenHeight
;
839 fileout
<< historicalFirst
<< historicalBest
;
842 feeStats
->Write(fileout
);
843 shortStats
->Write(fileout
);
844 longStats
->Write(fileout
);
846 catch (const std::exception
&) {
847 LogPrintf("CBlockPolicyEstimator::Write(): unable to write policy estimator data (non-fatal)\n");
853 bool CBlockPolicyEstimator::Read(CAutoFile
& filein
)
856 LOCK(cs_feeEstimator
);
857 int nVersionRequired
, nVersionThatWrote
;
858 unsigned int nFileBestSeenHeight
, nFileHistoricalFirst
, nFileHistoricalBest
;
859 filein
>> nVersionRequired
>> nVersionThatWrote
;
860 if (nVersionRequired
> CLIENT_VERSION
)
861 return error("CBlockPolicyEstimator::Read(): up-version (%d) fee estimate file", nVersionRequired
);
863 // Read fee estimates file into temporary variables so existing data
864 // structures aren't corrupted if there is an exception.
865 filein
>> nFileBestSeenHeight
;
867 if (nVersionThatWrote
< 149900) {
868 // Read the old fee estimates file for temporary use, but then discard. Will start collecting data from scratch.
869 // decay is stored before buckets in old versions, so pre-read decay and pass into TxConfirmStats constructor
872 if (tempDecay
<= 0 || tempDecay
>= 1)
873 throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
875 std::vector
<double> tempBuckets
;
876 filein
>> tempBuckets
;
877 size_t tempNum
= tempBuckets
.size();
878 if (tempNum
<= 1 || tempNum
> 1000)
879 throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
881 std::map
<double, unsigned int> tempMap
;
883 std::unique_ptr
<TxConfirmStats
> tempFeeStats(new TxConfirmStats(tempBuckets
, tempMap
, MED_BLOCK_PERIODS
, tempDecay
, 1));
884 tempFeeStats
->Read(filein
, nVersionThatWrote
, tempNum
);
885 // if nVersionThatWrote < 139900 then another TxConfirmStats (for priority) follows but can be ignored.
888 for (unsigned int i
= 0; i
< tempBuckets
.size(); i
++) {
889 tempMap
[tempBuckets
[i
]] = i
;
892 else { // nVersionThatWrote >= 149900
893 filein
>> nFileHistoricalFirst
>> nFileHistoricalBest
;
894 if (nFileHistoricalFirst
> nFileHistoricalBest
|| nFileHistoricalBest
> nFileBestSeenHeight
) {
895 throw std::runtime_error("Corrupt estimates file. Historical block range for estimates is invalid");
897 std::vector
<double> fileBuckets
;
898 filein
>> fileBuckets
;
899 size_t numBuckets
= fileBuckets
.size();
900 if (numBuckets
<= 1 || numBuckets
> 1000)
901 throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
903 std::unique_ptr
<TxConfirmStats
> fileFeeStats(new TxConfirmStats(buckets
, bucketMap
, MED_BLOCK_PERIODS
, MED_DECAY
, MED_SCALE
));
904 std::unique_ptr
<TxConfirmStats
> fileShortStats(new TxConfirmStats(buckets
, bucketMap
, SHORT_BLOCK_PERIODS
, SHORT_DECAY
, SHORT_SCALE
));
905 std::unique_ptr
<TxConfirmStats
> fileLongStats(new TxConfirmStats(buckets
, bucketMap
, LONG_BLOCK_PERIODS
, LONG_DECAY
, LONG_SCALE
));
906 fileFeeStats
->Read(filein
, nVersionThatWrote
, numBuckets
);
907 fileShortStats
->Read(filein
, nVersionThatWrote
, numBuckets
);
908 fileLongStats
->Read(filein
, nVersionThatWrote
, numBuckets
);
910 // Fee estimates file parsed correctly
911 // Copy buckets from file and refresh our bucketmap
912 buckets
= fileBuckets
;
914 for (unsigned int i
= 0; i
< buckets
.size(); i
++) {
915 bucketMap
[buckets
[i
]] = i
;
918 // Destroy old TxConfirmStats and point to new ones that already reference buckets and bucketMap
922 feeStats
= fileFeeStats
.release();
923 shortStats
= fileShortStats
.release();
924 longStats
= fileLongStats
.release();
926 nBestSeenHeight
= nFileBestSeenHeight
;
927 historicalFirst
= nFileHistoricalFirst
;
928 historicalBest
= nFileHistoricalBest
;
931 catch (const std::exception
& e
) {
932 LogPrintf("CBlockPolicyEstimator::Read(): unable to read policy estimator data (non-fatal): %s\n",e
.what());
938 void CBlockPolicyEstimator::FlushUnconfirmed(CTxMemPool
& pool
) {
939 int64_t startclear
= GetTimeMicros();
940 std::vector
<uint256
> txids
;
941 pool
.queryHashes(txids
);
942 LOCK(cs_feeEstimator
);
943 for (auto& txid
: txids
) {
944 removeTx(txid
, false);
946 int64_t endclear
= GetTimeMicros();
947 LogPrint(BCLog::ESTIMATEFEE
, "Recorded %u unconfirmed txs from mempool in %ld micros\n",txids
.size(), endclear
- startclear
);
950 FeeFilterRounder::FeeFilterRounder(const CFeeRate
& minIncrementalFee
)
952 CAmount minFeeLimit
= std::max(CAmount(1), minIncrementalFee
.GetFeePerK() / 2);
954 for (double bucketBoundary
= minFeeLimit
; bucketBoundary
<= MAX_FILTER_FEERATE
; bucketBoundary
*= FEE_FILTER_SPACING
) {
955 feeset
.insert(bucketBoundary
);
959 CAmount
FeeFilterRounder::round(CAmount currentMinFee
)
961 std::set
<double>::iterator it
= feeset
.lower_bound(currentMinFee
);
962 if ((it
!= feeset
.begin() && insecure_rand
.rand32() % 3 != 0) || it
== feeset
.end()) {