1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2015 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
9 #include "arith_uint256.h"
10 #include "chainparams.h"
11 #include "checkpoints.h"
12 #include "checkqueue.h"
13 #include "consensus/consensus.h"
14 #include "consensus/merkle.h"
15 #include "consensus/validation.h"
18 #include "merkleblock.h"
20 #include "policy/fees.h"
21 #include "policy/policy.h"
23 #include "primitives/block.h"
24 #include "primitives/transaction.h"
26 #include "script/script.h"
27 #include "script/sigcache.h"
28 #include "script/standard.h"
29 #include "tinyformat.h"
31 #include "txmempool.h"
32 #include "ui_interface.h"
35 #include "utilmoneystr.h"
36 #include "utilstrencodings.h"
37 #include "validationinterface.h"
38 #include "versionbits.h"
43 #include <boost/algorithm/string/replace.hpp>
44 #include <boost/algorithm/string/join.hpp>
45 #include <boost/filesystem.hpp>
46 #include <boost/filesystem/fstream.hpp>
47 #include <boost/math/distributions/poisson.hpp>
48 #include <boost/thread.hpp>
53 # error "Bitcoin cannot be compiled without assertions."
60 CCriticalSection cs_main
;
62 BlockMap mapBlockIndex
;
64 CBlockIndex
*pindexBestHeader
= NULL
;
65 int64_t nTimeBestReceived
= 0;
66 CWaitableCriticalSection csBestBlock
;
67 CConditionVariable cvBlockChange
;
68 int nScriptCheckThreads
= 0;
69 bool fImporting
= false;
70 bool fReindex
= false;
71 bool fTxIndex
= false;
72 bool fHavePruned
= false;
73 bool fPruneMode
= false;
74 bool fIsBareMultisigStd
= DEFAULT_PERMIT_BAREMULTISIG
;
75 bool fRequireStandard
= true;
76 unsigned int nBytesPerSigOp
= DEFAULT_BYTES_PER_SIGOP
;
77 bool fCheckBlockIndex
= false;
78 bool fCheckpointsEnabled
= DEFAULT_CHECKPOINTS_ENABLED
;
79 size_t nCoinCacheUsage
= 5000 * 300;
80 uint64_t nPruneTarget
= 0;
81 int64_t nMaxTipAge
= DEFAULT_MAX_TIP_AGE
;
82 bool fEnableReplacement
= DEFAULT_ENABLE_REPLACEMENT
;
85 CFeeRate minRelayTxFee
= CFeeRate(DEFAULT_MIN_RELAY_TX_FEE
);
86 CAmount maxTxFee
= DEFAULT_TRANSACTION_MAXFEE
;
88 CTxMemPool
mempool(::minRelayTxFee
);
89 FeeFilterRounder
filterRounder(::minRelayTxFee
);
95 map
<uint256
, COrphanTx
> mapOrphanTransactions
GUARDED_BY(cs_main
);
96 map
<uint256
, set
<uint256
> > mapOrphanTransactionsByPrev
GUARDED_BY(cs_main
);
97 void EraseOrphansFor(NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
);
100 * Returns true if there are nRequired or more blocks of minVersion or above
101 * in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards.
103 static bool IsSuperMajority(int minVersion
, const CBlockIndex
* pstart
, unsigned nRequired
, const Consensus::Params
& consensusParams
);
104 static void CheckBlockIndex(const Consensus::Params
& consensusParams
);
106 /** Constant stuff for coinbase transactions we create: */
107 CScript COINBASE_FLAGS
;
109 const string strMessageMagic
= "Bitcoin Signed Message:\n";
114 struct CBlockIndexWorkComparator
116 bool operator()(CBlockIndex
*pa
, CBlockIndex
*pb
) const {
117 // First sort by most total work, ...
118 if (pa
->nChainWork
> pb
->nChainWork
) return false;
119 if (pa
->nChainWork
< pb
->nChainWork
) return true;
121 // ... then by earliest time received, ...
122 if (pa
->nSequenceId
< pb
->nSequenceId
) return false;
123 if (pa
->nSequenceId
> pb
->nSequenceId
) return true;
125 // Use pointer address as tie breaker (should only happen with blocks
126 // loaded from disk, as those all have id 0).
127 if (pa
< pb
) return false;
128 if (pa
> pb
) return true;
135 CBlockIndex
*pindexBestInvalid
;
138 * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
139 * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
140 * missing the data for the block.
142 set
<CBlockIndex
*, CBlockIndexWorkComparator
> setBlockIndexCandidates
;
143 /** Number of nodes with fSyncStarted. */
144 int nSyncStarted
= 0;
145 /** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
146 * Pruned nodes may have entries where B is missing data.
148 multimap
<CBlockIndex
*, CBlockIndex
*> mapBlocksUnlinked
;
150 CCriticalSection cs_LastBlockFile
;
151 std::vector
<CBlockFileInfo
> vinfoBlockFile
;
152 int nLastBlockFile
= 0;
153 /** Global flag to indicate we should check to see if there are
154 * block/undo files that should be deleted. Set on startup
155 * or if we allocate more file space when we're in prune mode
157 bool fCheckForPruning
= false;
160 * Every received block is assigned a unique and increasing identifier, so we
161 * know which one to give priority in case of a fork.
163 CCriticalSection cs_nBlockSequenceId
;
164 /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
165 uint32_t nBlockSequenceId
= 1;
168 * Sources of received blocks, saved to be able to send them reject
169 * messages or ban them when processing happens afterwards. Protected by
172 map
<uint256
, NodeId
> mapBlockSource
;
175 * Filter for transactions that were recently rejected by
176 * AcceptToMemoryPool. These are not rerequested until the chain tip
177 * changes, at which point the entire filter is reset. Protected by
180 * Without this filter we'd be re-requesting txs from each of our peers,
181 * increasing bandwidth consumption considerably. For instance, with 100
182 * peers, half of which relay a tx we don't accept, that might be a 50x
183 * bandwidth increase. A flooding attacker attempting to roll-over the
184 * filter using minimum-sized, 60byte, transactions might manage to send
185 * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
186 * two minute window to send invs to us.
188 * Decreasing the false positive rate is fairly cheap, so we pick one in a
189 * million to make it highly unlikely for users to have issues with this
192 * Memory used: 1.3 MB
194 boost::scoped_ptr
<CRollingBloomFilter
> recentRejects
;
195 uint256 hashRecentRejectsChainTip
;
197 /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
200 CBlockIndex
* pindex
; //!< Optional.
201 bool fValidatedHeaders
; //!< Whether this block has validated headers at the time of request.
203 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> > mapBlocksInFlight
;
205 /** Number of preferable block download peers. */
206 int nPreferredDownload
= 0;
208 /** Dirty block index entries. */
209 set
<CBlockIndex
*> setDirtyBlockIndex
;
211 /** Dirty block file entries. */
212 set
<int> setDirtyFileInfo
;
214 /** Number of peers from which we're downloading blocks. */
215 int nPeersWithValidatedDownloads
= 0;
217 /** Relay map, protected by cs_main. */
218 typedef std::map
<uint256
, std::shared_ptr
<const CTransaction
>> MapRelay
;
220 /** Expiration-time ordered list of (expire time, relay map entry) pairs, protected by cs_main). */
221 std::deque
<std::pair
<int64_t, MapRelay::iterator
>> vRelayExpiration
;
224 //////////////////////////////////////////////////////////////////////////////
226 // Registration of network node signals.
231 struct CBlockReject
{
232 unsigned char chRejectCode
;
233 string strRejectReason
;
238 * Maintain validation-specific state about nodes, protected by cs_main, instead
239 * by CNode's own locks. This simplifies asynchronous operation, where
240 * processing of incoming data is done after the ProcessMessage call returns,
241 * and we're no longer holding the node's locks.
244 //! The peer's address
246 //! Whether we have a fully established connection.
247 bool fCurrentlyConnected
;
248 //! Accumulated misbehaviour score for this peer.
250 //! Whether this peer should be disconnected and banned (unless whitelisted).
252 //! String name of this peer (debugging/logging purposes).
254 //! List of asynchronously-determined block rejections to notify this peer about.
255 std::vector
<CBlockReject
> rejects
;
256 //! The best known block we know this peer has announced.
257 CBlockIndex
*pindexBestKnownBlock
;
258 //! The hash of the last unknown block this peer has announced.
259 uint256 hashLastUnknownBlock
;
260 //! The last full block we both have.
261 CBlockIndex
*pindexLastCommonBlock
;
262 //! The best header we have sent our peer.
263 CBlockIndex
*pindexBestHeaderSent
;
264 //! Whether we've started headers synchronization with this peer.
266 //! Since when we're stalling block download progress (in microseconds), or 0.
267 int64_t nStallingSince
;
268 list
<QueuedBlock
> vBlocksInFlight
;
269 //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
270 int64_t nDownloadingSince
;
272 int nBlocksInFlightValidHeaders
;
273 //! Whether we consider this a preferred download peer.
274 bool fPreferredDownload
;
275 //! Whether this peer wants invs or headers (when possible) for block announcements.
279 fCurrentlyConnected
= false;
282 pindexBestKnownBlock
= NULL
;
283 hashLastUnknownBlock
.SetNull();
284 pindexLastCommonBlock
= NULL
;
285 pindexBestHeaderSent
= NULL
;
286 fSyncStarted
= false;
288 nDownloadingSince
= 0;
290 nBlocksInFlightValidHeaders
= 0;
291 fPreferredDownload
= false;
292 fPreferHeaders
= false;
296 /** Map maintaining per-node state. Requires cs_main. */
297 map
<NodeId
, CNodeState
> mapNodeState
;
300 CNodeState
*State(NodeId pnode
) {
301 map
<NodeId
, CNodeState
>::iterator it
= mapNodeState
.find(pnode
);
302 if (it
== mapNodeState
.end())
310 return chainActive
.Height();
313 void UpdatePreferredDownload(CNode
* node
, CNodeState
* state
)
315 nPreferredDownload
-= state
->fPreferredDownload
;
317 // Whether this node should be marked as a preferred download node.
318 state
->fPreferredDownload
= (!node
->fInbound
|| node
->fWhitelisted
) && !node
->fOneShot
&& !node
->fClient
;
320 nPreferredDownload
+= state
->fPreferredDownload
;
323 void InitializeNode(NodeId nodeid
, const CNode
*pnode
) {
325 CNodeState
&state
= mapNodeState
.insert(std::make_pair(nodeid
, CNodeState())).first
->second
;
326 state
.name
= pnode
->addrName
;
327 state
.address
= pnode
->addr
;
330 void FinalizeNode(NodeId nodeid
) {
332 CNodeState
*state
= State(nodeid
);
334 if (state
->fSyncStarted
)
337 if (state
->nMisbehavior
== 0 && state
->fCurrentlyConnected
) {
338 AddressCurrentlyConnected(state
->address
);
341 BOOST_FOREACH(const QueuedBlock
& entry
, state
->vBlocksInFlight
) {
342 mapBlocksInFlight
.erase(entry
.hash
);
344 EraseOrphansFor(nodeid
);
345 nPreferredDownload
-= state
->fPreferredDownload
;
346 nPeersWithValidatedDownloads
-= (state
->nBlocksInFlightValidHeaders
!= 0);
347 assert(nPeersWithValidatedDownloads
>= 0);
349 mapNodeState
.erase(nodeid
);
351 if (mapNodeState
.empty()) {
352 // Do a consistency check after the last peer is removed.
353 assert(mapBlocksInFlight
.empty());
354 assert(nPreferredDownload
== 0);
355 assert(nPeersWithValidatedDownloads
== 0);
360 // Returns a bool indicating whether we requested this block.
361 bool MarkBlockAsReceived(const uint256
& hash
) {
362 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator itInFlight
= mapBlocksInFlight
.find(hash
);
363 if (itInFlight
!= mapBlocksInFlight
.end()) {
364 CNodeState
*state
= State(itInFlight
->second
.first
);
365 state
->nBlocksInFlightValidHeaders
-= itInFlight
->second
.second
->fValidatedHeaders
;
366 if (state
->nBlocksInFlightValidHeaders
== 0 && itInFlight
->second
.second
->fValidatedHeaders
) {
367 // Last validated block on the queue was received.
368 nPeersWithValidatedDownloads
--;
370 if (state
->vBlocksInFlight
.begin() == itInFlight
->second
.second
) {
371 // First block on the queue was received, update the start download time for the next one
372 state
->nDownloadingSince
= std::max(state
->nDownloadingSince
, GetTimeMicros());
374 state
->vBlocksInFlight
.erase(itInFlight
->second
.second
);
375 state
->nBlocksInFlight
--;
376 state
->nStallingSince
= 0;
377 mapBlocksInFlight
.erase(itInFlight
);
384 void MarkBlockAsInFlight(NodeId nodeid
, const uint256
& hash
, const Consensus::Params
& consensusParams
, CBlockIndex
*pindex
= NULL
) {
385 CNodeState
*state
= State(nodeid
);
386 assert(state
!= NULL
);
388 // Make sure it's not listed somewhere already.
389 MarkBlockAsReceived(hash
);
391 QueuedBlock newentry
= {hash
, pindex
, pindex
!= NULL
};
392 list
<QueuedBlock
>::iterator it
= state
->vBlocksInFlight
.insert(state
->vBlocksInFlight
.end(), newentry
);
393 state
->nBlocksInFlight
++;
394 state
->nBlocksInFlightValidHeaders
+= newentry
.fValidatedHeaders
;
395 if (state
->nBlocksInFlight
== 1) {
396 // We're starting a block download (batch) from this peer.
397 state
->nDownloadingSince
= GetTimeMicros();
399 if (state
->nBlocksInFlightValidHeaders
== 1 && pindex
!= NULL
) {
400 nPeersWithValidatedDownloads
++;
402 mapBlocksInFlight
[hash
] = std::make_pair(nodeid
, it
);
405 /** Check whether the last unknown block a peer advertised is not yet known. */
406 void ProcessBlockAvailability(NodeId nodeid
) {
407 CNodeState
*state
= State(nodeid
);
408 assert(state
!= NULL
);
410 if (!state
->hashLastUnknownBlock
.IsNull()) {
411 BlockMap::iterator itOld
= mapBlockIndex
.find(state
->hashLastUnknownBlock
);
412 if (itOld
!= mapBlockIndex
.end() && itOld
->second
->nChainWork
> 0) {
413 if (state
->pindexBestKnownBlock
== NULL
|| itOld
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
414 state
->pindexBestKnownBlock
= itOld
->second
;
415 state
->hashLastUnknownBlock
.SetNull();
420 /** Update tracking information about which blocks a peer is assumed to have. */
421 void UpdateBlockAvailability(NodeId nodeid
, const uint256
&hash
) {
422 CNodeState
*state
= State(nodeid
);
423 assert(state
!= NULL
);
425 ProcessBlockAvailability(nodeid
);
427 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
428 if (it
!= mapBlockIndex
.end() && it
->second
->nChainWork
> 0) {
429 // An actually better block was announced.
430 if (state
->pindexBestKnownBlock
== NULL
|| it
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
431 state
->pindexBestKnownBlock
= it
->second
;
433 // An unknown block was announced; just assume that the latest one is the best one.
434 state
->hashLastUnknownBlock
= hash
;
439 bool CanDirectFetch(const Consensus::Params
&consensusParams
)
441 return chainActive
.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams
.nPowTargetSpacing
* 20;
445 bool PeerHasHeader(CNodeState
*state
, CBlockIndex
*pindex
)
447 if (state
->pindexBestKnownBlock
&& pindex
== state
->pindexBestKnownBlock
->GetAncestor(pindex
->nHeight
))
449 if (state
->pindexBestHeaderSent
&& pindex
== state
->pindexBestHeaderSent
->GetAncestor(pindex
->nHeight
))
454 /** Find the last common ancestor two blocks have.
455 * Both pa and pb must be non-NULL. */
456 CBlockIndex
* LastCommonAncestor(CBlockIndex
* pa
, CBlockIndex
* pb
) {
457 if (pa
->nHeight
> pb
->nHeight
) {
458 pa
= pa
->GetAncestor(pb
->nHeight
);
459 } else if (pb
->nHeight
> pa
->nHeight
) {
460 pb
= pb
->GetAncestor(pa
->nHeight
);
463 while (pa
!= pb
&& pa
&& pb
) {
468 // Eventually all chain branches meet at the genesis block.
473 /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
474 * at most count entries. */
475 void FindNextBlocksToDownload(NodeId nodeid
, unsigned int count
, std::vector
<CBlockIndex
*>& vBlocks
, NodeId
& nodeStaller
) {
479 vBlocks
.reserve(vBlocks
.size() + count
);
480 CNodeState
*state
= State(nodeid
);
481 assert(state
!= NULL
);
483 // Make sure pindexBestKnownBlock is up to date, we'll need it.
484 ProcessBlockAvailability(nodeid
);
486 if (state
->pindexBestKnownBlock
== NULL
|| state
->pindexBestKnownBlock
->nChainWork
< chainActive
.Tip()->nChainWork
) {
487 // This peer has nothing interesting.
491 if (state
->pindexLastCommonBlock
== NULL
) {
492 // Bootstrap quickly by guessing a parent of our best tip is the forking point.
493 // Guessing wrong in either direction is not a problem.
494 state
->pindexLastCommonBlock
= chainActive
[std::min(state
->pindexBestKnownBlock
->nHeight
, chainActive
.Height())];
497 // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
498 // of its current tip anymore. Go back enough to fix that.
499 state
->pindexLastCommonBlock
= LastCommonAncestor(state
->pindexLastCommonBlock
, state
->pindexBestKnownBlock
);
500 if (state
->pindexLastCommonBlock
== state
->pindexBestKnownBlock
)
503 std::vector
<CBlockIndex
*> vToFetch
;
504 CBlockIndex
*pindexWalk
= state
->pindexLastCommonBlock
;
505 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
506 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
507 // download that next block if the window were 1 larger.
508 int nWindowEnd
= state
->pindexLastCommonBlock
->nHeight
+ BLOCK_DOWNLOAD_WINDOW
;
509 int nMaxHeight
= std::min
<int>(state
->pindexBestKnownBlock
->nHeight
, nWindowEnd
+ 1);
510 NodeId waitingfor
= -1;
511 while (pindexWalk
->nHeight
< nMaxHeight
) {
512 // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
513 // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
514 // as iterating over ~100 CBlockIndex* entries anyway.
515 int nToFetch
= std::min(nMaxHeight
- pindexWalk
->nHeight
, std::max
<int>(count
- vBlocks
.size(), 128));
516 vToFetch
.resize(nToFetch
);
517 pindexWalk
= state
->pindexBestKnownBlock
->GetAncestor(pindexWalk
->nHeight
+ nToFetch
);
518 vToFetch
[nToFetch
- 1] = pindexWalk
;
519 for (unsigned int i
= nToFetch
- 1; i
> 0; i
--) {
520 vToFetch
[i
- 1] = vToFetch
[i
]->pprev
;
523 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
524 // are not yet downloaded and not in flight to vBlocks. In the mean time, update
525 // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
526 // already part of our chain (and therefore don't need it even if pruned).
527 BOOST_FOREACH(CBlockIndex
* pindex
, vToFetch
) {
528 if (!pindex
->IsValid(BLOCK_VALID_TREE
)) {
529 // We consider the chain that this peer is on invalid.
532 if (pindex
->nStatus
& BLOCK_HAVE_DATA
|| chainActive
.Contains(pindex
)) {
533 if (pindex
->nChainTx
)
534 state
->pindexLastCommonBlock
= pindex
;
535 } else if (mapBlocksInFlight
.count(pindex
->GetBlockHash()) == 0) {
536 // The block is not already downloaded, and not yet in flight.
537 if (pindex
->nHeight
> nWindowEnd
) {
538 // We reached the end of the window.
539 if (vBlocks
.size() == 0 && waitingfor
!= nodeid
) {
540 // We aren't able to fetch anything, but we would be if the download window was one larger.
541 nodeStaller
= waitingfor
;
545 vBlocks
.push_back(pindex
);
546 if (vBlocks
.size() == count
) {
549 } else if (waitingfor
== -1) {
550 // This is the first already-in-flight block.
551 waitingfor
= mapBlocksInFlight
[pindex
->GetBlockHash()].first
;
559 bool GetNodeStateStats(NodeId nodeid
, CNodeStateStats
&stats
) {
561 CNodeState
*state
= State(nodeid
);
564 stats
.nMisbehavior
= state
->nMisbehavior
;
565 stats
.nSyncHeight
= state
->pindexBestKnownBlock
? state
->pindexBestKnownBlock
->nHeight
: -1;
566 stats
.nCommonHeight
= state
->pindexLastCommonBlock
? state
->pindexLastCommonBlock
->nHeight
: -1;
567 BOOST_FOREACH(const QueuedBlock
& queue
, state
->vBlocksInFlight
) {
569 stats
.vHeightInFlight
.push_back(queue
.pindex
->nHeight
);
574 void RegisterNodeSignals(CNodeSignals
& nodeSignals
)
576 nodeSignals
.GetHeight
.connect(&GetHeight
);
577 nodeSignals
.ProcessMessages
.connect(&ProcessMessages
);
578 nodeSignals
.SendMessages
.connect(&SendMessages
);
579 nodeSignals
.InitializeNode
.connect(&InitializeNode
);
580 nodeSignals
.FinalizeNode
.connect(&FinalizeNode
);
583 void UnregisterNodeSignals(CNodeSignals
& nodeSignals
)
585 nodeSignals
.GetHeight
.disconnect(&GetHeight
);
586 nodeSignals
.ProcessMessages
.disconnect(&ProcessMessages
);
587 nodeSignals
.SendMessages
.disconnect(&SendMessages
);
588 nodeSignals
.InitializeNode
.disconnect(&InitializeNode
);
589 nodeSignals
.FinalizeNode
.disconnect(&FinalizeNode
);
592 CBlockIndex
* FindForkInGlobalIndex(const CChain
& chain
, const CBlockLocator
& locator
)
594 // Find the first block the caller has in the main chain
595 BOOST_FOREACH(const uint256
& hash
, locator
.vHave
) {
596 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
597 if (mi
!= mapBlockIndex
.end())
599 CBlockIndex
* pindex
= (*mi
).second
;
600 if (chain
.Contains(pindex
))
604 return chain
.Genesis();
607 CCoinsViewCache
*pcoinsTip
= NULL
;
608 CBlockTreeDB
*pblocktree
= NULL
;
610 //////////////////////////////////////////////////////////////////////////////
612 // mapOrphanTransactions
615 bool AddOrphanTx(const CTransaction
& tx
, NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
617 uint256 hash
= tx
.GetHash();
618 if (mapOrphanTransactions
.count(hash
))
621 // Ignore big transactions, to avoid a
622 // send-big-orphans memory exhaustion attack. If a peer has a legitimate
623 // large transaction with a missing parent then we assume
624 // it will rebroadcast it later, after the parent transaction(s)
625 // have been mined or received.
626 // 10,000 orphans, each of which is at most 5,000 bytes big is
627 // at most 500 megabytes of orphans:
628 unsigned int sz
= tx
.GetSerializeSize(SER_NETWORK
, CTransaction::CURRENT_VERSION
);
631 LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz
, hash
.ToString());
635 mapOrphanTransactions
[hash
].tx
= tx
;
636 mapOrphanTransactions
[hash
].fromPeer
= peer
;
637 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
638 mapOrphanTransactionsByPrev
[txin
.prevout
.hash
].insert(hash
);
640 LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash
.ToString(),
641 mapOrphanTransactions
.size(), mapOrphanTransactionsByPrev
.size());
645 void static EraseOrphanTx(uint256 hash
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
647 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.find(hash
);
648 if (it
== mapOrphanTransactions
.end())
650 BOOST_FOREACH(const CTxIn
& txin
, it
->second
.tx
.vin
)
652 map
<uint256
, set
<uint256
> >::iterator itPrev
= mapOrphanTransactionsByPrev
.find(txin
.prevout
.hash
);
653 if (itPrev
== mapOrphanTransactionsByPrev
.end())
655 itPrev
->second
.erase(hash
);
656 if (itPrev
->second
.empty())
657 mapOrphanTransactionsByPrev
.erase(itPrev
);
659 mapOrphanTransactions
.erase(it
);
662 void EraseOrphansFor(NodeId peer
)
665 map
<uint256
, COrphanTx
>::iterator iter
= mapOrphanTransactions
.begin();
666 while (iter
!= mapOrphanTransactions
.end())
668 map
<uint256
, COrphanTx
>::iterator maybeErase
= iter
++; // increment to avoid iterator becoming invalid
669 if (maybeErase
->second
.fromPeer
== peer
)
671 EraseOrphanTx(maybeErase
->second
.tx
.GetHash());
675 if (nErased
> 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased
, peer
);
679 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
681 unsigned int nEvicted
= 0;
682 while (mapOrphanTransactions
.size() > nMaxOrphans
)
684 // Evict a random orphan:
685 uint256 randomhash
= GetRandHash();
686 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.lower_bound(randomhash
);
687 if (it
== mapOrphanTransactions
.end())
688 it
= mapOrphanTransactions
.begin();
689 EraseOrphanTx(it
->first
);
695 bool IsFinalTx(const CTransaction
&tx
, int nBlockHeight
, int64_t nBlockTime
)
697 if (tx
.nLockTime
== 0)
699 if ((int64_t)tx
.nLockTime
< ((int64_t)tx
.nLockTime
< LOCKTIME_THRESHOLD
? (int64_t)nBlockHeight
: nBlockTime
))
701 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
) {
702 if (!(txin
.nSequence
== CTxIn::SEQUENCE_FINAL
))
708 bool CheckFinalTx(const CTransaction
&tx
, int flags
)
710 AssertLockHeld(cs_main
);
712 // By convention a negative value for flags indicates that the
713 // current network-enforced consensus rules should be used. In
714 // a future soft-fork scenario that would mean checking which
715 // rules would be enforced for the next block and setting the
716 // appropriate flags. At the present time no soft-forks are
717 // scheduled, so no flags are set.
718 flags
= std::max(flags
, 0);
720 // CheckFinalTx() uses chainActive.Height()+1 to evaluate
721 // nLockTime because when IsFinalTx() is called within
722 // CBlock::AcceptBlock(), the height of the block *being*
723 // evaluated is what is used. Thus if we want to know if a
724 // transaction can be part of the *next* block, we need to call
725 // IsFinalTx() with one more than chainActive.Height().
726 const int nBlockHeight
= chainActive
.Height() + 1;
728 // BIP113 will require that time-locked transactions have nLockTime set to
729 // less than the median time of the previous block they're contained in.
730 // When the next block is created its previous block will be the current
731 // chain tip, so we use that to calculate the median time passed to
732 // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
733 const int64_t nBlockTime
= (flags
& LOCKTIME_MEDIAN_TIME_PAST
)
734 ? chainActive
.Tip()->GetMedianTimePast()
737 return IsFinalTx(tx
, nBlockHeight
, nBlockTime
);
741 * Calculates the block height and previous block's median time past at
742 * which the transaction will be considered final in the context of BIP 68.
743 * Also removes from the vector of input heights any entries which did not
744 * correspond to sequence locked inputs as they do not affect the calculation.
746 static std::pair
<int, int64_t> CalculateSequenceLocks(const CTransaction
&tx
, int flags
, std::vector
<int>* prevHeights
, const CBlockIndex
& block
)
748 assert(prevHeights
->size() == tx
.vin
.size());
750 // Will be set to the equivalent height- and time-based nLockTime
751 // values that would be necessary to satisfy all relative lock-
752 // time constraints given our view of block chain history.
753 // The semantics of nLockTime are the last invalid height/time, so
754 // use -1 to have the effect of any height or time being valid.
756 int64_t nMinTime
= -1;
758 // tx.nVersion is signed integer so requires cast to unsigned otherwise
759 // we would be doing a signed comparison and half the range of nVersion
760 // wouldn't support BIP 68.
761 bool fEnforceBIP68
= static_cast<uint32_t>(tx
.nVersion
) >= 2
762 && flags
& LOCKTIME_VERIFY_SEQUENCE
;
764 // Do not enforce sequence numbers as a relative lock time
765 // unless we have been instructed to
766 if (!fEnforceBIP68
) {
767 return std::make_pair(nMinHeight
, nMinTime
);
770 for (size_t txinIndex
= 0; txinIndex
< tx
.vin
.size(); txinIndex
++) {
771 const CTxIn
& txin
= tx
.vin
[txinIndex
];
773 // Sequence numbers with the most significant bit set are not
774 // treated as relative lock-times, nor are they given any
775 // consensus-enforced meaning at this point.
776 if (txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG
) {
777 // The height of this input is not relevant for sequence locks
778 (*prevHeights
)[txinIndex
] = 0;
782 int nCoinHeight
= (*prevHeights
)[txinIndex
];
784 if (txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG
) {
785 int64_t nCoinTime
= block
.GetAncestor(std::max(nCoinHeight
-1, 0))->GetMedianTimePast();
786 // NOTE: Subtract 1 to maintain nLockTime semantics
787 // BIP 68 relative lock times have the semantics of calculating
788 // the first block or time at which the transaction would be
789 // valid. When calculating the effective block time or height
790 // for the entire transaction, we switch to using the
791 // semantics of nLockTime which is the last invalid block
792 // time or height. Thus we subtract 1 from the calculated
795 // Time-based relative lock-times are measured from the
796 // smallest allowed timestamp of the block containing the
797 // txout being spent, which is the median time past of the
799 nMinTime
= std::max(nMinTime
, nCoinTime
+ (int64_t)((txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_MASK
) << CTxIn::SEQUENCE_LOCKTIME_GRANULARITY
) - 1);
801 nMinHeight
= std::max(nMinHeight
, nCoinHeight
+ (int)(txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_MASK
) - 1);
805 return std::make_pair(nMinHeight
, nMinTime
);
808 static bool EvaluateSequenceLocks(const CBlockIndex
& block
, std::pair
<int, int64_t> lockPair
)
811 int64_t nBlockTime
= block
.pprev
->GetMedianTimePast();
812 if (lockPair
.first
>= block
.nHeight
|| lockPair
.second
>= nBlockTime
)
818 bool SequenceLocks(const CTransaction
&tx
, int flags
, std::vector
<int>* prevHeights
, const CBlockIndex
& block
)
820 return EvaluateSequenceLocks(block
, CalculateSequenceLocks(tx
, flags
, prevHeights
, block
));
823 bool TestLockPointValidity(const LockPoints
* lp
)
825 AssertLockHeld(cs_main
);
827 // If there are relative lock times then the maxInputBlock will be set
828 // If there are no relative lock times, the LockPoints don't depend on the chain
829 if (lp
->maxInputBlock
) {
830 // Check whether chainActive is an extension of the block at which the LockPoints
831 // calculation was valid. If not LockPoints are no longer valid
832 if (!chainActive
.Contains(lp
->maxInputBlock
)) {
837 // LockPoints still valid
841 bool CheckSequenceLocks(const CTransaction
&tx
, int flags
, LockPoints
* lp
, bool useExistingLockPoints
)
843 AssertLockHeld(cs_main
);
844 AssertLockHeld(mempool
.cs
);
846 CBlockIndex
* tip
= chainActive
.Tip();
849 // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate
850 // height based locks because when SequenceLocks() is called within
851 // ConnectBlock(), the height of the block *being*
852 // evaluated is what is used.
853 // Thus if we want to know if a transaction can be part of the
854 // *next* block, we need to use one more than chainActive.Height()
855 index
.nHeight
= tip
->nHeight
+ 1;
857 std::pair
<int, int64_t> lockPair
;
858 if (useExistingLockPoints
) {
860 lockPair
.first
= lp
->height
;
861 lockPair
.second
= lp
->time
;
864 // pcoinsTip contains the UTXO set for chainActive.Tip()
865 CCoinsViewMemPool
viewMemPool(pcoinsTip
, mempool
);
866 std::vector
<int> prevheights
;
867 prevheights
.resize(tx
.vin
.size());
868 for (size_t txinIndex
= 0; txinIndex
< tx
.vin
.size(); txinIndex
++) {
869 const CTxIn
& txin
= tx
.vin
[txinIndex
];
871 if (!viewMemPool
.GetCoins(txin
.prevout
.hash
, coins
)) {
872 return error("%s: Missing input", __func__
);
874 if (coins
.nHeight
== MEMPOOL_HEIGHT
) {
875 // Assume all mempool transaction confirm in the next block
876 prevheights
[txinIndex
] = tip
->nHeight
+ 1;
878 prevheights
[txinIndex
] = coins
.nHeight
;
881 lockPair
= CalculateSequenceLocks(tx
, flags
, &prevheights
, index
);
883 lp
->height
= lockPair
.first
;
884 lp
->time
= lockPair
.second
;
885 // Also store the hash of the block with the highest height of
886 // all the blocks which have sequence locked prevouts.
887 // This hash needs to still be on the chain
888 // for these LockPoint calculations to be valid
889 // Note: It is impossible to correctly calculate a maxInputBlock
890 // if any of the sequence locked inputs depend on unconfirmed txs,
891 // except in the special case where the relative lock time/height
892 // is 0, which is equivalent to no sequence lock. Since we assume
893 // input height of tip+1 for mempool txs and test the resulting
894 // lockPair from CalculateSequenceLocks against tip+1. We know
895 // EvaluateSequenceLocks will fail if there was a non-zero sequence
896 // lock on a mempool input, so we can use the return value of
897 // CheckSequenceLocks to indicate the LockPoints validity
898 int maxInputHeight
= 0;
899 BOOST_FOREACH(int height
, prevheights
) {
900 // Can ignore mempool inputs since we'll fail if they had non-zero locks
901 if (height
!= tip
->nHeight
+1) {
902 maxInputHeight
= std::max(maxInputHeight
, height
);
905 lp
->maxInputBlock
= tip
->GetAncestor(maxInputHeight
);
908 return EvaluateSequenceLocks(index
, lockPair
);
912 unsigned int GetLegacySigOpCount(const CTransaction
& tx
)
914 unsigned int nSigOps
= 0;
915 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
917 nSigOps
+= txin
.scriptSig
.GetSigOpCount(false);
919 BOOST_FOREACH(const CTxOut
& txout
, tx
.vout
)
921 nSigOps
+= txout
.scriptPubKey
.GetSigOpCount(false);
926 unsigned int GetP2SHSigOpCount(const CTransaction
& tx
, const CCoinsViewCache
& inputs
)
931 unsigned int nSigOps
= 0;
932 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
934 const CTxOut
&prevout
= inputs
.GetOutputFor(tx
.vin
[i
]);
935 if (prevout
.scriptPubKey
.IsPayToScriptHash())
936 nSigOps
+= prevout
.scriptPubKey
.GetSigOpCount(tx
.vin
[i
].scriptSig
);
948 bool CheckTransaction(const CTransaction
& tx
, CValidationState
&state
)
950 // Basic checks that don't depend on any context
952 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vin-empty");
954 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vout-empty");
956 if (::GetSerializeSize(tx
, SER_NETWORK
, PROTOCOL_VERSION
) > MAX_BLOCK_SIZE
)
957 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-oversize");
959 // Check for negative or overflow output values
960 CAmount nValueOut
= 0;
961 BOOST_FOREACH(const CTxOut
& txout
, tx
.vout
)
963 if (txout
.nValue
< 0)
964 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-negative");
965 if (txout
.nValue
> MAX_MONEY
)
966 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-toolarge");
967 nValueOut
+= txout
.nValue
;
968 if (!MoneyRange(nValueOut
))
969 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-txouttotal-toolarge");
972 // Check for duplicate inputs
973 set
<COutPoint
> vInOutPoints
;
974 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
976 if (vInOutPoints
.count(txin
.prevout
))
977 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputs-duplicate");
978 vInOutPoints
.insert(txin
.prevout
);
983 if (tx
.vin
[0].scriptSig
.size() < 2 || tx
.vin
[0].scriptSig
.size() > 100)
984 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-length");
988 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
989 if (txin
.prevout
.IsNull())
990 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-prevout-null");
996 void LimitMempoolSize(CTxMemPool
& pool
, size_t limit
, unsigned long age
) {
997 int expired
= pool
.Expire(GetTime() - age
);
999 LogPrint("mempool", "Expired %i transactions from the memory pool\n", expired
);
1001 std::vector
<uint256
> vNoSpendsRemaining
;
1002 pool
.TrimToSize(limit
, &vNoSpendsRemaining
);
1003 BOOST_FOREACH(const uint256
& removed
, vNoSpendsRemaining
)
1004 pcoinsTip
->Uncache(removed
);
1007 /** Convert CValidationState to a human-readable message for logging */
1008 std::string
FormatStateMessage(const CValidationState
&state
)
1010 return strprintf("%s%s (code %i)",
1011 state
.GetRejectReason(),
1012 state
.GetDebugMessage().empty() ? "" : ", "+state
.GetDebugMessage(),
1013 state
.GetRejectCode());
1016 bool AcceptToMemoryPoolWorker(CTxMemPool
& pool
, CValidationState
& state
, const CTransaction
& tx
, bool fLimitFree
,
1017 bool* pfMissingInputs
, bool fOverrideMempoolLimit
, const CAmount
& nAbsurdFee
,
1018 std::vector
<uint256
>& vHashTxnToUncache
)
1020 const uint256 hash
= tx
.GetHash();
1021 AssertLockHeld(cs_main
);
1022 if (pfMissingInputs
)
1023 *pfMissingInputs
= false;
1025 if (!CheckTransaction(tx
, state
))
1026 return false; // state filled in by CheckTransaction
1028 // Coinbase is only valid in a block, not as a loose transaction
1029 if (tx
.IsCoinBase())
1030 return state
.DoS(100, false, REJECT_INVALID
, "coinbase");
1032 // Rather not work on nonstandard transactions (unless -testnet/-regtest)
1034 if (fRequireStandard
&& !IsStandardTx(tx
, reason
))
1035 return state
.DoS(0, false, REJECT_NONSTANDARD
, reason
);
1037 // Don't relay version 2 transactions until CSV is active, and we can be
1038 // sure that such transactions will be mined (unless we're on
1039 // -testnet/-regtest).
1040 const CChainParams
& chainparams
= Params();
1041 if (fRequireStandard
&& tx
.nVersion
>= 2 && VersionBitsTipState(chainparams
.GetConsensus(), Consensus::DEPLOYMENT_CSV
) != THRESHOLD_ACTIVE
) {
1042 return state
.DoS(0, false, REJECT_NONSTANDARD
, "premature-version2-tx");
1045 // Only accept nLockTime-using transactions that can be mined in the next
1046 // block; we don't want our mempool filled up with transactions that can't
1048 if (!CheckFinalTx(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
))
1049 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-final");
1051 // is it already in the memory pool?
1052 if (pool
.exists(hash
))
1053 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-in-mempool");
1055 // Check for conflicts with in-memory transactions
1056 set
<uint256
> setConflicts
;
1058 LOCK(pool
.cs
); // protect pool.mapNextTx
1059 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
)
1061 auto itConflicting
= pool
.mapNextTx
.find(txin
.prevout
);
1062 if (itConflicting
!= pool
.mapNextTx
.end())
1064 const CTransaction
*ptxConflicting
= itConflicting
->second
;
1065 if (!setConflicts
.count(ptxConflicting
->GetHash()))
1067 // Allow opt-out of transaction replacement by setting
1068 // nSequence >= maxint-1 on all inputs.
1070 // maxint-1 is picked to still allow use of nLockTime by
1071 // non-replacable transactions. All inputs rather than just one
1072 // is for the sake of multi-party protocols, where we don't
1073 // want a single party to be able to disable replacement.
1075 // The opt-out ignores descendants as anyone relying on
1076 // first-seen mempool behavior should be checking all
1077 // unconfirmed ancestors anyway; doing otherwise is hopelessly
1079 bool fReplacementOptOut
= true;
1080 if (fEnableReplacement
)
1082 BOOST_FOREACH(const CTxIn
&txin
, ptxConflicting
->vin
)
1084 if (txin
.nSequence
< std::numeric_limits
<unsigned int>::max()-1)
1086 fReplacementOptOut
= false;
1091 if (fReplacementOptOut
)
1092 return state
.Invalid(false, REJECT_CONFLICT
, "txn-mempool-conflict");
1094 setConflicts
.insert(ptxConflicting
->GetHash());
1102 CCoinsViewCache
view(&dummy
);
1104 CAmount nValueIn
= 0;
1108 CCoinsViewMemPool
viewMemPool(pcoinsTip
, pool
);
1109 view
.SetBackend(viewMemPool
);
1111 // do we already have it?
1112 bool fHadTxInCache
= pcoinsTip
->HaveCoinsInCache(hash
);
1113 if (view
.HaveCoins(hash
)) {
1115 vHashTxnToUncache
.push_back(hash
);
1116 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-known");
1119 // do all inputs exist?
1120 // Note that this does not check for the presence of actual outputs (see the next check for that),
1121 // and only helps with filling in pfMissingInputs (to determine missing vs spent).
1122 BOOST_FOREACH(const CTxIn txin
, tx
.vin
) {
1123 if (!pcoinsTip
->HaveCoinsInCache(txin
.prevout
.hash
))
1124 vHashTxnToUncache
.push_back(txin
.prevout
.hash
);
1125 if (!view
.HaveCoins(txin
.prevout
.hash
)) {
1126 if (pfMissingInputs
)
1127 *pfMissingInputs
= true;
1128 return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
1132 // are the actual inputs available?
1133 if (!view
.HaveInputs(tx
))
1134 return state
.Invalid(false, REJECT_DUPLICATE
, "bad-txns-inputs-spent");
1136 // Bring the best block into scope
1137 view
.GetBestBlock();
1139 nValueIn
= view
.GetValueIn(tx
);
1141 // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
1142 view
.SetBackend(dummy
);
1144 // Only accept BIP68 sequence locked transactions that can be mined in the next
1145 // block; we don't want our mempool filled up with transactions that can't
1147 // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
1148 // CoinsViewCache instead of create its own
1149 if (!CheckSequenceLocks(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
, &lp
))
1150 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-BIP68-final");
1153 // Check for non-standard pay-to-script-hash in inputs
1154 if (fRequireStandard
&& !AreInputsStandard(tx
, view
))
1155 return state
.Invalid(false, REJECT_NONSTANDARD
, "bad-txns-nonstandard-inputs");
1157 unsigned int nSigOps
= GetLegacySigOpCount(tx
);
1158 nSigOps
+= GetP2SHSigOpCount(tx
, view
);
1160 CAmount nValueOut
= tx
.GetValueOut();
1161 CAmount nFees
= nValueIn
-nValueOut
;
1162 // nModifiedFees includes any fee deltas from PrioritiseTransaction
1163 CAmount nModifiedFees
= nFees
;
1164 double nPriorityDummy
= 0;
1165 pool
.ApplyDeltas(hash
, nPriorityDummy
, nModifiedFees
);
1167 CAmount inChainInputValue
;
1168 double dPriority
= view
.GetPriority(tx
, chainActive
.Height(), inChainInputValue
);
1170 // Keep track of transactions that spend a coinbase, which we re-scan
1171 // during reorgs to ensure COINBASE_MATURITY is still met.
1172 bool fSpendsCoinbase
= false;
1173 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
1174 const CCoins
*coins
= view
.AccessCoins(txin
.prevout
.hash
);
1175 if (coins
->IsCoinBase()) {
1176 fSpendsCoinbase
= true;
1181 CTxMemPoolEntry
entry(tx
, nFees
, GetTime(), dPriority
, chainActive
.Height(), pool
.HasNoInputsOf(tx
), inChainInputValue
, fSpendsCoinbase
, nSigOps
, lp
);
1182 unsigned int nSize
= entry
.GetTxSize();
1184 // Check that the transaction doesn't have an excessive number of
1185 // sigops, making it impossible to mine. Since the coinbase transaction
1186 // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
1187 // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
1188 // merely non-standard transaction.
1189 if ((nSigOps
> MAX_STANDARD_TX_SIGOPS
) || (nBytesPerSigOp
&& nSigOps
> nSize
/ nBytesPerSigOp
))
1190 return state
.DoS(0, false, REJECT_NONSTANDARD
, "bad-txns-too-many-sigops", false,
1191 strprintf("%d", nSigOps
));
1193 CAmount mempoolRejectFee
= pool
.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFee(nSize
);
1194 if (mempoolRejectFee
> 0 && nModifiedFees
< mempoolRejectFee
) {
1195 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool min fee not met", false, strprintf("%d < %d", nFees
, mempoolRejectFee
));
1196 } else if (GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY
) && nModifiedFees
< ::minRelayTxFee
.GetFee(nSize
) && !AllowFree(entry
.GetPriority(chainActive
.Height() + 1))) {
1197 // Require that free transactions have sufficient priority to be mined in the next block.
1198 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "insufficient priority");
1201 // Continuously rate-limit free (really, very-low-fee) transactions
1202 // This mitigates 'penny-flooding' -- sending thousands of free transactions just to
1203 // be annoying or make others' transactions take longer to confirm.
1204 if (fLimitFree
&& nModifiedFees
< ::minRelayTxFee
.GetFee(nSize
))
1206 static CCriticalSection csFreeLimiter
;
1207 static double dFreeCount
;
1208 static int64_t nLastTime
;
1209 int64_t nNow
= GetTime();
1211 LOCK(csFreeLimiter
);
1213 // Use an exponentially decaying ~10-minute window:
1214 dFreeCount
*= pow(1.0 - 1.0/600.0, (double)(nNow
- nLastTime
));
1216 // -limitfreerelay unit is thousand-bytes-per-minute
1217 // At default rate it would take over a month to fill 1GB
1218 if (dFreeCount
+ nSize
>= GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY
) * 10 * 1000)
1219 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "rate limited free transaction");
1220 LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount
, dFreeCount
+nSize
);
1221 dFreeCount
+= nSize
;
1224 if (nAbsurdFee
&& nFees
> nAbsurdFee
)
1225 return state
.Invalid(false,
1226 REJECT_HIGHFEE
, "absurdly-high-fee",
1227 strprintf("%d > %d", nFees
, nAbsurdFee
));
1229 // Calculate in-mempool ancestors, up to a limit.
1230 CTxMemPool::setEntries setAncestors
;
1231 size_t nLimitAncestors
= GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT
);
1232 size_t nLimitAncestorSize
= GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT
)*1000;
1233 size_t nLimitDescendants
= GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT
);
1234 size_t nLimitDescendantSize
= GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT
)*1000;
1235 std::string errString
;
1236 if (!pool
.CalculateMemPoolAncestors(entry
, setAncestors
, nLimitAncestors
, nLimitAncestorSize
, nLimitDescendants
, nLimitDescendantSize
, errString
)) {
1237 return state
.DoS(0, false, REJECT_NONSTANDARD
, "too-long-mempool-chain", false, errString
);
1240 // A transaction that spends outputs that would be replaced by it is invalid. Now
1241 // that we have the set of all ancestors we can detect this
1242 // pathological case by making sure setConflicts and setAncestors don't
1244 BOOST_FOREACH(CTxMemPool::txiter ancestorIt
, setAncestors
)
1246 const uint256
&hashAncestor
= ancestorIt
->GetTx().GetHash();
1247 if (setConflicts
.count(hashAncestor
))
1249 return state
.DoS(10, false,
1250 REJECT_INVALID
, "bad-txns-spends-conflicting-tx", false,
1251 strprintf("%s spends conflicting transaction %s",
1253 hashAncestor
.ToString()));
1257 // Check if it's economically rational to mine this transaction rather
1258 // than the ones it replaces.
1259 CAmount nConflictingFees
= 0;
1260 size_t nConflictingSize
= 0;
1261 uint64_t nConflictingCount
= 0;
1262 CTxMemPool::setEntries allConflicting
;
1264 // If we don't hold the lock allConflicting might be incomplete; the
1265 // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
1266 // mempool consistency for us.
1268 if (setConflicts
.size())
1270 CFeeRate
newFeeRate(nModifiedFees
, nSize
);
1271 set
<uint256
> setConflictsParents
;
1272 const int maxDescendantsToVisit
= 100;
1273 CTxMemPool::setEntries setIterConflicting
;
1274 BOOST_FOREACH(const uint256
&hashConflicting
, setConflicts
)
1276 CTxMemPool::txiter mi
= pool
.mapTx
.find(hashConflicting
);
1277 if (mi
== pool
.mapTx
.end())
1280 // Save these to avoid repeated lookups
1281 setIterConflicting
.insert(mi
);
1283 // Don't allow the replacement to reduce the feerate of the
1286 // We usually don't want to accept replacements with lower
1287 // feerates than what they replaced as that would lower the
1288 // feerate of the next block. Requiring that the feerate always
1289 // be increased is also an easy-to-reason about way to prevent
1290 // DoS attacks via replacements.
1292 // The mining code doesn't (currently) take children into
1293 // account (CPFP) so we only consider the feerates of
1294 // transactions being directly replaced, not their indirect
1295 // descendants. While that does mean high feerate children are
1296 // ignored when deciding whether or not to replace, we do
1297 // require the replacement to pay more overall fees too,
1298 // mitigating most cases.
1299 CFeeRate
oldFeeRate(mi
->GetModifiedFee(), mi
->GetTxSize());
1300 if (newFeeRate
<= oldFeeRate
)
1302 return state
.DoS(0, false,
1303 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1304 strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
1306 newFeeRate
.ToString(),
1307 oldFeeRate
.ToString()));
1310 BOOST_FOREACH(const CTxIn
&txin
, mi
->GetTx().vin
)
1312 setConflictsParents
.insert(txin
.prevout
.hash
);
1315 nConflictingCount
+= mi
->GetCountWithDescendants();
1317 // This potentially overestimates the number of actual descendants
1318 // but we just want to be conservative to avoid doing too much
1320 if (nConflictingCount
<= maxDescendantsToVisit
) {
1321 // If not too many to replace, then calculate the set of
1322 // transactions that would have to be evicted
1323 BOOST_FOREACH(CTxMemPool::txiter it
, setIterConflicting
) {
1324 pool
.CalculateDescendants(it
, allConflicting
);
1326 BOOST_FOREACH(CTxMemPool::txiter it
, allConflicting
) {
1327 nConflictingFees
+= it
->GetModifiedFee();
1328 nConflictingSize
+= it
->GetTxSize();
1331 return state
.DoS(0, false,
1332 REJECT_NONSTANDARD
, "too many potential replacements", false,
1333 strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
1336 maxDescendantsToVisit
));
1339 for (unsigned int j
= 0; j
< tx
.vin
.size(); j
++)
1341 // We don't want to accept replacements that require low
1342 // feerate junk to be mined first. Ideally we'd keep track of
1343 // the ancestor feerates and make the decision based on that,
1344 // but for now requiring all new inputs to be confirmed works.
1345 if (!setConflictsParents
.count(tx
.vin
[j
].prevout
.hash
))
1347 // Rather than check the UTXO set - potentially expensive -
1348 // it's cheaper to just check if the new input refers to a
1349 // tx that's in the mempool.
1350 if (pool
.mapTx
.find(tx
.vin
[j
].prevout
.hash
) != pool
.mapTx
.end())
1351 return state
.DoS(0, false,
1352 REJECT_NONSTANDARD
, "replacement-adds-unconfirmed", false,
1353 strprintf("replacement %s adds unconfirmed input, idx %d",
1354 hash
.ToString(), j
));
1358 // The replacement must pay greater fees than the transactions it
1359 // replaces - if we did the bandwidth used by those conflicting
1360 // transactions would not be paid for.
1361 if (nModifiedFees
< nConflictingFees
)
1363 return state
.DoS(0, false,
1364 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1365 strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
1366 hash
.ToString(), FormatMoney(nModifiedFees
), FormatMoney(nConflictingFees
)));
1369 // Finally in addition to paying more fees than the conflicts the
1370 // new transaction must pay for its own bandwidth.
1371 CAmount nDeltaFees
= nModifiedFees
- nConflictingFees
;
1372 if (nDeltaFees
< ::minRelayTxFee
.GetFee(nSize
))
1374 return state
.DoS(0, false,
1375 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1376 strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
1378 FormatMoney(nDeltaFees
),
1379 FormatMoney(::minRelayTxFee
.GetFee(nSize
))));
1383 // Check against previous transactions
1384 // This is done last to help prevent CPU exhaustion denial-of-service attacks.
1385 if (!CheckInputs(tx
, state
, view
, true, STANDARD_SCRIPT_VERIFY_FLAGS
, true))
1386 return false; // state filled in by CheckInputs
1388 // Check again against just the consensus-critical mandatory script
1389 // verification flags, in case of bugs in the standard flags that cause
1390 // transactions to pass as valid when they're actually invalid. For
1391 // instance the STRICTENC flag was incorrectly allowing certain
1392 // CHECKSIG NOT scripts to pass, even though they were invalid.
1394 // There is a similar check in CreateNewBlock() to prevent creating
1395 // invalid blocks, however allowing such transactions into the mempool
1396 // can be exploited as a DoS attack.
1397 if (!CheckInputs(tx
, state
, view
, true, MANDATORY_SCRIPT_VERIFY_FLAGS
, true))
1399 return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s, %s",
1400 __func__
, hash
.ToString(), FormatStateMessage(state
));
1403 // Remove conflicting transactions from the mempool
1404 BOOST_FOREACH(const CTxMemPool::txiter it
, allConflicting
)
1406 LogPrint("mempool", "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
1407 it
->GetTx().GetHash().ToString(),
1409 FormatMoney(nModifiedFees
- nConflictingFees
),
1410 (int)nSize
- (int)nConflictingSize
);
1412 pool
.RemoveStaged(allConflicting
, false);
1414 // Store transaction in memory
1415 pool
.addUnchecked(hash
, entry
, setAncestors
, !IsInitialBlockDownload());
1417 // trim mempool and check if tx was trimmed
1418 if (!fOverrideMempoolLimit
) {
1419 LimitMempoolSize(pool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
1420 if (!pool
.exists(hash
))
1421 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool full");
1425 SyncWithWallets(tx
, NULL
, NULL
);
1430 bool AcceptToMemoryPool(CTxMemPool
& pool
, CValidationState
&state
, const CTransaction
&tx
, bool fLimitFree
,
1431 bool* pfMissingInputs
, bool fOverrideMempoolLimit
, const CAmount nAbsurdFee
)
1433 std::vector
<uint256
> vHashTxToUncache
;
1434 bool res
= AcceptToMemoryPoolWorker(pool
, state
, tx
, fLimitFree
, pfMissingInputs
, fOverrideMempoolLimit
, nAbsurdFee
, vHashTxToUncache
);
1436 BOOST_FOREACH(const uint256
& hashTx
, vHashTxToUncache
)
1437 pcoinsTip
->Uncache(hashTx
);
1442 /** Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock */
1443 bool GetTransaction(const uint256
&hash
, CTransaction
&txOut
, const Consensus::Params
& consensusParams
, uint256
&hashBlock
, bool fAllowSlow
)
1445 CBlockIndex
*pindexSlow
= NULL
;
1449 std::shared_ptr
<const CTransaction
> ptx
= mempool
.get(hash
);
1458 if (pblocktree
->ReadTxIndex(hash
, postx
)) {
1459 CAutoFile
file(OpenBlockFile(postx
, true), SER_DISK
, CLIENT_VERSION
);
1461 return error("%s: OpenBlockFile failed", __func__
);
1462 CBlockHeader header
;
1465 fseek(file
.Get(), postx
.nTxOffset
, SEEK_CUR
);
1467 } catch (const std::exception
& e
) {
1468 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
1470 hashBlock
= header
.GetHash();
1471 if (txOut
.GetHash() != hash
)
1472 return error("%s: txid mismatch", __func__
);
1477 if (fAllowSlow
) { // use coin database to locate block that contains transaction, and scan it
1480 const CCoinsViewCache
& view
= *pcoinsTip
;
1481 const CCoins
* coins
= view
.AccessCoins(hash
);
1483 nHeight
= coins
->nHeight
;
1486 pindexSlow
= chainActive
[nHeight
];
1491 if (ReadBlockFromDisk(block
, pindexSlow
, consensusParams
)) {
1492 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
1493 if (tx
.GetHash() == hash
) {
1495 hashBlock
= pindexSlow
->GetBlockHash();
1510 //////////////////////////////////////////////////////////////////////////////
1512 // CBlock and CBlockIndex
1515 bool WriteBlockToDisk(const CBlock
& block
, CDiskBlockPos
& pos
, const CMessageHeader::MessageStartChars
& messageStart
)
1517 // Open history file to append
1518 CAutoFile
fileout(OpenBlockFile(pos
), SER_DISK
, CLIENT_VERSION
);
1519 if (fileout
.IsNull())
1520 return error("WriteBlockToDisk: OpenBlockFile failed");
1522 // Write index header
1523 unsigned int nSize
= fileout
.GetSerializeSize(block
);
1524 fileout
<< FLATDATA(messageStart
) << nSize
;
1527 long fileOutPos
= ftell(fileout
.Get());
1529 return error("WriteBlockToDisk: ftell failed");
1530 pos
.nPos
= (unsigned int)fileOutPos
;
1536 bool ReadBlockFromDisk(CBlock
& block
, const CDiskBlockPos
& pos
, const Consensus::Params
& consensusParams
)
1540 // Open history file to read
1541 CAutoFile
filein(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
1542 if (filein
.IsNull())
1543 return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos
.ToString());
1549 catch (const std::exception
& e
) {
1550 return error("%s: Deserialize or I/O error - %s at %s", __func__
, e
.what(), pos
.ToString());
1554 if (!CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
1555 return error("ReadBlockFromDisk: Errors in block header at %s", pos
.ToString());
1560 bool ReadBlockFromDisk(CBlock
& block
, const CBlockIndex
* pindex
, const Consensus::Params
& consensusParams
)
1562 if (!ReadBlockFromDisk(block
, pindex
->GetBlockPos(), consensusParams
))
1564 if (block
.GetHash() != pindex
->GetBlockHash())
1565 return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1566 pindex
->ToString(), pindex
->GetBlockPos().ToString());
1570 CAmount
GetBlockSubsidy(int nHeight
, const Consensus::Params
& consensusParams
)
1572 int halvings
= nHeight
/ consensusParams
.nSubsidyHalvingInterval
;
1573 // Force block reward to zero when right shift is undefined.
1577 CAmount nSubsidy
= 50 * COIN
;
1578 // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1579 nSubsidy
>>= halvings
;
1583 bool IsInitialBlockDownload()
1585 const CChainParams
& chainParams
= Params();
1587 // Once this function has returned false, it must remain false.
1588 static std::atomic
<bool> latchToFalse
{false};
1589 // Optimization: pre-test latch before taking the lock.
1590 if (latchToFalse
.load(std::memory_order_relaxed
))
1594 if (latchToFalse
.load(std::memory_order_relaxed
))
1596 if (fImporting
|| fReindex
)
1598 if (fCheckpointsEnabled
&& chainActive
.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams
.Checkpoints()))
1600 bool state
= (chainActive
.Height() < pindexBestHeader
->nHeight
- 24 * 6 ||
1601 std::max(chainActive
.Tip()->GetBlockTime(), pindexBestHeader
->GetBlockTime()) < GetTime() - nMaxTipAge
);
1603 latchToFalse
.store(true, std::memory_order_relaxed
);
1607 bool fLargeWorkForkFound
= false;
1608 bool fLargeWorkInvalidChainFound
= false;
1609 CBlockIndex
*pindexBestForkTip
= NULL
, *pindexBestForkBase
= NULL
;
1611 static void AlertNotify(const std::string
& strMessage
)
1613 uiInterface
.NotifyAlertChanged();
1614 std::string strCmd
= GetArg("-alertnotify", "");
1615 if (strCmd
.empty()) return;
1617 // Alert text should be plain ascii coming from a trusted source, but to
1618 // be safe we first strip anything not in safeChars, then add single quotes around
1619 // the whole string before passing it to the shell:
1620 std::string
singleQuote("'");
1621 std::string safeStatus
= SanitizeString(strMessage
);
1622 safeStatus
= singleQuote
+safeStatus
+singleQuote
;
1623 boost::replace_all(strCmd
, "%s", safeStatus
);
1625 boost::thread
t(runCommand
, strCmd
); // thread runs free
1628 void CheckForkWarningConditions()
1630 AssertLockHeld(cs_main
);
1631 // Before we get past initial download, we cannot reliably alert about forks
1632 // (we assume we don't get stuck on a fork before the last checkpoint)
1633 if (IsInitialBlockDownload())
1636 // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
1637 // of our head, drop it
1638 if (pindexBestForkTip
&& chainActive
.Height() - pindexBestForkTip
->nHeight
>= 72)
1639 pindexBestForkTip
= NULL
;
1641 if (pindexBestForkTip
|| (pindexBestInvalid
&& pindexBestInvalid
->nChainWork
> chainActive
.Tip()->nChainWork
+ (GetBlockProof(*chainActive
.Tip()) * 6)))
1643 if (!fLargeWorkForkFound
&& pindexBestForkBase
)
1645 std::string warning
= std::string("'Warning: Large-work fork detected, forking after block ") +
1646 pindexBestForkBase
->phashBlock
->ToString() + std::string("'");
1647 AlertNotify(warning
);
1649 if (pindexBestForkTip
&& pindexBestForkBase
)
1651 LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__
,
1652 pindexBestForkBase
->nHeight
, pindexBestForkBase
->phashBlock
->ToString(),
1653 pindexBestForkTip
->nHeight
, pindexBestForkTip
->phashBlock
->ToString());
1654 fLargeWorkForkFound
= true;
1658 LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__
);
1659 fLargeWorkInvalidChainFound
= true;
1664 fLargeWorkForkFound
= false;
1665 fLargeWorkInvalidChainFound
= false;
1669 void CheckForkWarningConditionsOnNewFork(CBlockIndex
* pindexNewForkTip
)
1671 AssertLockHeld(cs_main
);
1672 // If we are on a fork that is sufficiently large, set a warning flag
1673 CBlockIndex
* pfork
= pindexNewForkTip
;
1674 CBlockIndex
* plonger
= chainActive
.Tip();
1675 while (pfork
&& pfork
!= plonger
)
1677 while (plonger
&& plonger
->nHeight
> pfork
->nHeight
)
1678 plonger
= plonger
->pprev
;
1679 if (pfork
== plonger
)
1681 pfork
= pfork
->pprev
;
1684 // We define a condition where we should warn the user about as a fork of at least 7 blocks
1685 // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
1686 // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
1687 // hash rate operating on the fork.
1688 // or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
1689 // We define it this way because it allows us to only store the highest fork tip (+ base) which meets
1690 // the 7-block condition and from this always have the most-likely-to-cause-warning fork
1691 if (pfork
&& (!pindexBestForkTip
|| (pindexBestForkTip
&& pindexNewForkTip
->nHeight
> pindexBestForkTip
->nHeight
)) &&
1692 pindexNewForkTip
->nChainWork
- pfork
->nChainWork
> (GetBlockProof(*pfork
) * 7) &&
1693 chainActive
.Height() - pindexNewForkTip
->nHeight
< 72)
1695 pindexBestForkTip
= pindexNewForkTip
;
1696 pindexBestForkBase
= pfork
;
1699 CheckForkWarningConditions();
1702 // Requires cs_main.
1703 void Misbehaving(NodeId pnode
, int howmuch
)
1708 CNodeState
*state
= State(pnode
);
1712 state
->nMisbehavior
+= howmuch
;
1713 int banscore
= GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD
);
1714 if (state
->nMisbehavior
>= banscore
&& state
->nMisbehavior
- howmuch
< banscore
)
1716 LogPrintf("%s: %s (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__
, state
->name
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1717 state
->fShouldBan
= true;
1719 LogPrintf("%s: %s (%d -> %d)\n", __func__
, state
->name
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1722 void static InvalidChainFound(CBlockIndex
* pindexNew
)
1724 if (!pindexBestInvalid
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
)
1725 pindexBestInvalid
= pindexNew
;
1727 LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1728 pindexNew
->GetBlockHash().ToString(), pindexNew
->nHeight
,
1729 log(pindexNew
->nChainWork
.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
1730 pindexNew
->GetBlockTime()));
1731 CBlockIndex
*tip
= chainActive
.Tip();
1733 LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1734 tip
->GetBlockHash().ToString(), chainActive
.Height(), log(tip
->nChainWork
.getdouble())/log(2.0),
1735 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip
->GetBlockTime()));
1736 CheckForkWarningConditions();
1739 void static InvalidBlockFound(CBlockIndex
*pindex
, const CValidationState
&state
) {
1741 if (state
.IsInvalid(nDoS
)) {
1742 std::map
<uint256
, NodeId
>::iterator it
= mapBlockSource
.find(pindex
->GetBlockHash());
1743 if (it
!= mapBlockSource
.end() && State(it
->second
)) {
1744 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
1745 CBlockReject reject
= {(unsigned char)state
.GetRejectCode(), state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), pindex
->GetBlockHash()};
1746 State(it
->second
)->rejects
.push_back(reject
);
1748 Misbehaving(it
->second
, nDoS
);
1751 if (!state
.CorruptionPossible()) {
1752 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
1753 setDirtyBlockIndex
.insert(pindex
);
1754 setBlockIndexCandidates
.erase(pindex
);
1755 InvalidChainFound(pindex
);
1759 void UpdateCoins(const CTransaction
& tx
, CCoinsViewCache
& inputs
, CTxUndo
&txundo
, int nHeight
)
1761 // mark inputs spent
1762 if (!tx
.IsCoinBase()) {
1763 txundo
.vprevout
.reserve(tx
.vin
.size());
1764 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
1765 CCoinsModifier coins
= inputs
.ModifyCoins(txin
.prevout
.hash
);
1766 unsigned nPos
= txin
.prevout
.n
;
1768 if (nPos
>= coins
->vout
.size() || coins
->vout
[nPos
].IsNull())
1770 // mark an outpoint spent, and construct undo information
1771 txundo
.vprevout
.push_back(CTxInUndo(coins
->vout
[nPos
]));
1773 if (coins
->vout
.size() == 0) {
1774 CTxInUndo
& undo
= txundo
.vprevout
.back();
1775 undo
.nHeight
= coins
->nHeight
;
1776 undo
.fCoinBase
= coins
->fCoinBase
;
1777 undo
.nVersion
= coins
->nVersion
;
1782 inputs
.ModifyNewCoins(tx
.GetHash(), tx
.IsCoinBase())->FromTx(tx
, nHeight
);
1785 void UpdateCoins(const CTransaction
& tx
, CCoinsViewCache
& inputs
, int nHeight
)
1788 UpdateCoins(tx
, inputs
, txundo
, nHeight
);
1791 bool CScriptCheck::operator()() {
1792 const CScript
&scriptSig
= ptxTo
->vin
[nIn
].scriptSig
;
1793 if (!VerifyScript(scriptSig
, scriptPubKey
, nFlags
, CachingTransactionSignatureChecker(ptxTo
, nIn
, cacheStore
), &error
)) {
1799 int GetSpendHeight(const CCoinsViewCache
& inputs
)
1802 CBlockIndex
* pindexPrev
= mapBlockIndex
.find(inputs
.GetBestBlock())->second
;
1803 return pindexPrev
->nHeight
+ 1;
1806 namespace Consensus
{
1807 bool CheckTxInputs(const CTransaction
& tx
, CValidationState
& state
, const CCoinsViewCache
& inputs
, int nSpendHeight
)
1809 // This doesn't trigger the DoS code on purpose; if it did, it would make it easier
1810 // for an attacker to attempt to split the network.
1811 if (!inputs
.HaveInputs(tx
))
1812 return state
.Invalid(false, 0, "", "Inputs unavailable");
1814 CAmount nValueIn
= 0;
1816 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
1818 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
1819 const CCoins
*coins
= inputs
.AccessCoins(prevout
.hash
);
1822 // If prev is coinbase, check that it's matured
1823 if (coins
->IsCoinBase()) {
1824 if (nSpendHeight
- coins
->nHeight
< COINBASE_MATURITY
)
1825 return state
.Invalid(false,
1826 REJECT_INVALID
, "bad-txns-premature-spend-of-coinbase",
1827 strprintf("tried to spend coinbase at depth %d", nSpendHeight
- coins
->nHeight
));
1830 // Check for negative or overflow input values
1831 nValueIn
+= coins
->vout
[prevout
.n
].nValue
;
1832 if (!MoneyRange(coins
->vout
[prevout
.n
].nValue
) || !MoneyRange(nValueIn
))
1833 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputvalues-outofrange");
1837 if (nValueIn
< tx
.GetValueOut())
1838 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-in-belowout", false,
1839 strprintf("value in (%s) < value out (%s)", FormatMoney(nValueIn
), FormatMoney(tx
.GetValueOut())));
1841 // Tally transaction fees
1842 CAmount nTxFee
= nValueIn
- tx
.GetValueOut();
1844 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-negative");
1846 if (!MoneyRange(nFees
))
1847 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-outofrange");
1850 }// namespace Consensus
1852 bool CheckInputs(const CTransaction
& tx
, CValidationState
&state
, const CCoinsViewCache
&inputs
, bool fScriptChecks
, unsigned int flags
, bool cacheStore
, std::vector
<CScriptCheck
> *pvChecks
)
1854 if (!tx
.IsCoinBase())
1856 if (!Consensus::CheckTxInputs(tx
, state
, inputs
, GetSpendHeight(inputs
)))
1860 pvChecks
->reserve(tx
.vin
.size());
1862 // The first loop above does all the inexpensive checks.
1863 // Only if ALL inputs pass do we perform expensive ECDSA signature checks.
1864 // Helps prevent CPU exhaustion attacks.
1866 // Skip ECDSA signature verification when connecting blocks before the
1867 // last block chain checkpoint. Assuming the checkpoints are valid this
1868 // is safe because block merkle hashes are still computed and checked,
1869 // and any change will be caught at the next checkpoint. Of course, if
1870 // the checkpoint is for a chain that's invalid due to false scriptSigs
1871 // this optimisation would allow an invalid chain to be accepted.
1872 if (fScriptChecks
) {
1873 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++) {
1874 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
1875 const CCoins
* coins
= inputs
.AccessCoins(prevout
.hash
);
1879 CScriptCheck
check(*coins
, tx
, i
, flags
, cacheStore
);
1881 pvChecks
->push_back(CScriptCheck());
1882 check
.swap(pvChecks
->back());
1883 } else if (!check()) {
1884 if (flags
& STANDARD_NOT_MANDATORY_VERIFY_FLAGS
) {
1885 // Check whether the failure was caused by a
1886 // non-mandatory script verification check, such as
1887 // non-standard DER encodings or non-null dummy
1888 // arguments; if so, don't trigger DoS protection to
1889 // avoid splitting the network between upgraded and
1890 // non-upgraded nodes.
1891 CScriptCheck
check2(*coins
, tx
, i
,
1892 flags
& ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS
, cacheStore
);
1894 return state
.Invalid(false, REJECT_NONSTANDARD
, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check
.GetScriptError())));
1896 // Failures of other flags indicate a transaction that is
1897 // invalid in new blocks, e.g. a invalid P2SH. We DoS ban
1898 // such nodes as they are not following the protocol. That
1899 // said during an upgrade careful thought should be taken
1900 // as to the correct behavior - we may want to continue
1901 // peering with non-upgraded nodes even after soft-fork
1902 // super-majority signaling has occurred.
1903 return state
.DoS(100,false, REJECT_INVALID
, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check
.GetScriptError())));
1914 bool UndoWriteToDisk(const CBlockUndo
& blockundo
, CDiskBlockPos
& pos
, const uint256
& hashBlock
, const CMessageHeader::MessageStartChars
& messageStart
)
1916 // Open history file to append
1917 CAutoFile
fileout(OpenUndoFile(pos
), SER_DISK
, CLIENT_VERSION
);
1918 if (fileout
.IsNull())
1919 return error("%s: OpenUndoFile failed", __func__
);
1921 // Write index header
1922 unsigned int nSize
= fileout
.GetSerializeSize(blockundo
);
1923 fileout
<< FLATDATA(messageStart
) << nSize
;
1926 long fileOutPos
= ftell(fileout
.Get());
1928 return error("%s: ftell failed", __func__
);
1929 pos
.nPos
= (unsigned int)fileOutPos
;
1930 fileout
<< blockundo
;
1932 // calculate & write checksum
1933 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
1934 hasher
<< hashBlock
;
1935 hasher
<< blockundo
;
1936 fileout
<< hasher
.GetHash();
1941 bool UndoReadFromDisk(CBlockUndo
& blockundo
, const CDiskBlockPos
& pos
, const uint256
& hashBlock
)
1943 // Open history file to read
1944 CAutoFile
filein(OpenUndoFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
1945 if (filein
.IsNull())
1946 return error("%s: OpenBlockFile failed", __func__
);
1949 uint256 hashChecksum
;
1951 filein
>> blockundo
;
1952 filein
>> hashChecksum
;
1954 catch (const std::exception
& e
) {
1955 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
1959 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
1960 hasher
<< hashBlock
;
1961 hasher
<< blockundo
;
1962 if (hashChecksum
!= hasher
.GetHash())
1963 return error("%s: Checksum mismatch", __func__
);
1968 /** Abort with a message */
1969 bool AbortNode(const std::string
& strMessage
, const std::string
& userMessage
="")
1971 strMiscWarning
= strMessage
;
1972 LogPrintf("*** %s\n", strMessage
);
1973 uiInterface
.ThreadSafeMessageBox(
1974 userMessage
.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage
,
1975 "", CClientUIInterface::MSG_ERROR
);
1980 bool AbortNode(CValidationState
& state
, const std::string
& strMessage
, const std::string
& userMessage
="")
1982 AbortNode(strMessage
, userMessage
);
1983 return state
.Error(strMessage
);
1989 * Apply the undo operation of a CTxInUndo to the given chain state.
1990 * @param undo The undo object.
1991 * @param view The coins view to which to apply the changes.
1992 * @param out The out point that corresponds to the tx input.
1993 * @return True on success.
1995 static bool ApplyTxInUndo(const CTxInUndo
& undo
, CCoinsViewCache
& view
, const COutPoint
& out
)
1999 CCoinsModifier coins
= view
.ModifyCoins(out
.hash
);
2000 if (undo
.nHeight
!= 0) {
2001 // undo data contains height: this is the last output of the prevout tx being spent
2002 if (!coins
->IsPruned())
2003 fClean
= fClean
&& error("%s: undo data overwriting existing transaction", __func__
);
2005 coins
->fCoinBase
= undo
.fCoinBase
;
2006 coins
->nHeight
= undo
.nHeight
;
2007 coins
->nVersion
= undo
.nVersion
;
2009 if (coins
->IsPruned())
2010 fClean
= fClean
&& error("%s: undo data adding output to missing transaction", __func__
);
2012 if (coins
->IsAvailable(out
.n
))
2013 fClean
= fClean
&& error("%s: undo data overwriting existing output", __func__
);
2014 if (coins
->vout
.size() < out
.n
+1)
2015 coins
->vout
.resize(out
.n
+1);
2016 coins
->vout
[out
.n
] = undo
.txout
;
2021 bool DisconnectBlock(const CBlock
& block
, CValidationState
& state
, const CBlockIndex
* pindex
, CCoinsViewCache
& view
, bool* pfClean
)
2023 assert(pindex
->GetBlockHash() == view
.GetBestBlock());
2030 CBlockUndo blockUndo
;
2031 CDiskBlockPos pos
= pindex
->GetUndoPos();
2033 return error("DisconnectBlock(): no undo data available");
2034 if (!UndoReadFromDisk(blockUndo
, pos
, pindex
->pprev
->GetBlockHash()))
2035 return error("DisconnectBlock(): failure reading undo data");
2037 if (blockUndo
.vtxundo
.size() + 1 != block
.vtx
.size())
2038 return error("DisconnectBlock(): block and undo data inconsistent");
2040 // undo transactions in reverse order
2041 for (int i
= block
.vtx
.size() - 1; i
>= 0; i
--) {
2042 const CTransaction
&tx
= block
.vtx
[i
];
2043 uint256 hash
= tx
.GetHash();
2045 // Check that all outputs are available and match the outputs in the block itself
2048 CCoinsModifier outs
= view
.ModifyCoins(hash
);
2049 outs
->ClearUnspendable();
2051 CCoins
outsBlock(tx
, pindex
->nHeight
);
2052 // The CCoins serialization does not serialize negative numbers.
2053 // No network rules currently depend on the version here, so an inconsistency is harmless
2054 // but it must be corrected before txout nversion ever influences a network rule.
2055 if (outsBlock
.nVersion
< 0)
2056 outs
->nVersion
= outsBlock
.nVersion
;
2057 if (*outs
!= outsBlock
)
2058 fClean
= fClean
&& error("DisconnectBlock(): added transaction mismatch? database corrupted");
2065 if (i
> 0) { // not coinbases
2066 const CTxUndo
&txundo
= blockUndo
.vtxundo
[i
-1];
2067 if (txundo
.vprevout
.size() != tx
.vin
.size())
2068 return error("DisconnectBlock(): transaction and undo data inconsistent");
2069 for (unsigned int j
= tx
.vin
.size(); j
-- > 0;) {
2070 const COutPoint
&out
= tx
.vin
[j
].prevout
;
2071 const CTxInUndo
&undo
= txundo
.vprevout
[j
];
2072 if (!ApplyTxInUndo(undo
, view
, out
))
2078 // move best block pointer to prevout block
2079 view
.SetBestBlock(pindex
->pprev
->GetBlockHash());
2089 void static FlushBlockFile(bool fFinalize
= false)
2091 LOCK(cs_LastBlockFile
);
2093 CDiskBlockPos
posOld(nLastBlockFile
, 0);
2095 FILE *fileOld
= OpenBlockFile(posOld
);
2098 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nSize
);
2099 FileCommit(fileOld
);
2103 fileOld
= OpenUndoFile(posOld
);
2106 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nUndoSize
);
2107 FileCommit(fileOld
);
2112 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
);
2114 static CCheckQueue
<CScriptCheck
> scriptcheckqueue(128);
2116 void ThreadScriptCheck() {
2117 RenameThread("bitcoin-scriptch");
2118 scriptcheckqueue
.Thread();
2122 // Called periodically asynchronously; alerts if it smells like
2123 // we're being fed a bad chain (blocks being generated much
2124 // too slowly or too quickly).
2126 void PartitionCheck(bool (*initialDownloadCheck
)(), CCriticalSection
& cs
, const CBlockIndex
*const &bestHeader
,
2127 int64_t nPowTargetSpacing
)
2129 if (bestHeader
== NULL
|| initialDownloadCheck()) return;
2131 static int64_t lastAlertTime
= 0;
2132 int64_t now
= GetAdjustedTime();
2133 if (lastAlertTime
> now
-60*60*24) return; // Alert at most once per day
2135 const int SPAN_HOURS
=4;
2136 const int SPAN_SECONDS
=SPAN_HOURS
*60*60;
2137 int BLOCKS_EXPECTED
= SPAN_SECONDS
/ nPowTargetSpacing
;
2139 boost::math::poisson_distribution
<double> poisson(BLOCKS_EXPECTED
);
2141 std::string strWarning
;
2142 int64_t startTime
= GetAdjustedTime()-SPAN_SECONDS
;
2145 const CBlockIndex
* i
= bestHeader
;
2147 while (i
->GetBlockTime() >= startTime
) {
2150 if (i
== NULL
) return; // Ran out of chain, we must not be fully sync'ed
2153 // How likely is it to find that many by chance?
2154 double p
= boost::math::pdf(poisson
, nBlocks
);
2156 LogPrint("partitioncheck", "%s: Found %d blocks in the last %d hours\n", __func__
, nBlocks
, SPAN_HOURS
);
2157 LogPrint("partitioncheck", "%s: likelihood: %g\n", __func__
, p
);
2159 // Aim for one false-positive about every fifty years of normal running:
2160 const int FIFTY_YEARS
= 50*365*24*60*60;
2161 double alertThreshold
= 1.0 / (FIFTY_YEARS
/ SPAN_SECONDS
);
2163 if (p
<= alertThreshold
&& nBlocks
< BLOCKS_EXPECTED
)
2165 // Many fewer blocks than expected: alert!
2166 strWarning
= strprintf(_("WARNING: check your network connection, %d blocks received in the last %d hours (%d expected)"),
2167 nBlocks
, SPAN_HOURS
, BLOCKS_EXPECTED
);
2169 else if (p
<= alertThreshold
&& nBlocks
> BLOCKS_EXPECTED
)
2171 // Many more blocks than expected: alert!
2172 strWarning
= strprintf(_("WARNING: abnormally high number of blocks generated, %d blocks received in the last %d hours (%d expected)"),
2173 nBlocks
, SPAN_HOURS
, BLOCKS_EXPECTED
);
2175 if (!strWarning
.empty())
2177 strMiscWarning
= strWarning
;
2178 AlertNotify(strWarning
);
2179 lastAlertTime
= now
;
2183 // Protected by cs_main
2184 VersionBitsCache versionbitscache
;
2186 int32_t ComputeBlockVersion(const CBlockIndex
* pindexPrev
, const Consensus::Params
& params
)
2189 int32_t nVersion
= VERSIONBITS_TOP_BITS
;
2191 for (int i
= 0; i
< (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS
; i
++) {
2192 ThresholdState state
= VersionBitsState(pindexPrev
, params
, (Consensus::DeploymentPos
)i
, versionbitscache
);
2193 if (state
== THRESHOLD_LOCKED_IN
|| state
== THRESHOLD_STARTED
) {
2194 nVersion
|= VersionBitsMask(params
, (Consensus::DeploymentPos
)i
);
2202 * Threshold condition checker that triggers when unknown versionbits are seen on the network.
2204 class WarningBitsConditionChecker
: public AbstractThresholdConditionChecker
2210 WarningBitsConditionChecker(int bitIn
) : bit(bitIn
) {}
2212 int64_t BeginTime(const Consensus::Params
& params
) const { return 0; }
2213 int64_t EndTime(const Consensus::Params
& params
) const { return std::numeric_limits
<int64_t>::max(); }
2214 int Period(const Consensus::Params
& params
) const { return params
.nMinerConfirmationWindow
; }
2215 int Threshold(const Consensus::Params
& params
) const { return params
.nRuleChangeActivationThreshold
; }
2217 bool Condition(const CBlockIndex
* pindex
, const Consensus::Params
& params
) const
2219 return ((pindex
->nVersion
& VERSIONBITS_TOP_MASK
) == VERSIONBITS_TOP_BITS
) &&
2220 ((pindex
->nVersion
>> bit
) & 1) != 0 &&
2221 ((ComputeBlockVersion(pindex
->pprev
, params
) >> bit
) & 1) == 0;
2225 // Protected by cs_main
2226 static ThresholdConditionCache warningcache
[VERSIONBITS_NUM_BITS
];
2228 static int64_t nTimeCheck
= 0;
2229 static int64_t nTimeForks
= 0;
2230 static int64_t nTimeVerify
= 0;
2231 static int64_t nTimeConnect
= 0;
2232 static int64_t nTimeIndex
= 0;
2233 static int64_t nTimeCallbacks
= 0;
2234 static int64_t nTimeTotal
= 0;
2236 bool ConnectBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* pindex
,
2237 CCoinsViewCache
& view
, const CChainParams
& chainparams
, bool fJustCheck
)
2239 AssertLockHeld(cs_main
);
2241 int64_t nTimeStart
= GetTimeMicros();
2243 // Check it again in case a previous version let a bad block in
2244 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), GetAdjustedTime(), !fJustCheck
, !fJustCheck
))
2245 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
2247 // verify that the view's current state corresponds to the previous block
2248 uint256 hashPrevBlock
= pindex
->pprev
== NULL
? uint256() : pindex
->pprev
->GetBlockHash();
2249 assert(hashPrevBlock
== view
.GetBestBlock());
2251 // Special case for the genesis block, skipping connection of its transactions
2252 // (its coinbase is unspendable)
2253 if (block
.GetHash() == chainparams
.GetConsensus().hashGenesisBlock
) {
2255 view
.SetBestBlock(pindex
->GetBlockHash());
2259 bool fScriptChecks
= true;
2260 if (fCheckpointsEnabled
) {
2261 CBlockIndex
*pindexLastCheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
2262 if (pindexLastCheckpoint
&& pindexLastCheckpoint
->GetAncestor(pindex
->nHeight
) == pindex
) {
2263 // This block is an ancestor of a checkpoint: disable script checks
2264 fScriptChecks
= false;
2268 int64_t nTime1
= GetTimeMicros(); nTimeCheck
+= nTime1
- nTimeStart
;
2269 LogPrint("bench", " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1
- nTimeStart
), nTimeCheck
* 0.000001);
2271 // Do not allow blocks that contain transactions which 'overwrite' older transactions,
2272 // unless those are already completely spent.
2273 // If such overwrites are allowed, coinbases and transactions depending upon those
2274 // can be duplicated to remove the ability to spend the first instance -- even after
2275 // being sent to another address.
2276 // See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
2277 // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
2278 // already refuses previously-known transaction ids entirely.
2279 // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
2280 // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
2281 // two in the chain that violate it. This prevents exploiting the issue against nodes during their
2282 // initial block download.
2283 bool fEnforceBIP30
= (!pindex
->phashBlock
) || // Enforce on CreateNewBlock invocations which don't have a hash.
2284 !((pindex
->nHeight
==91842 && pindex
->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
2285 (pindex
->nHeight
==91880 && pindex
->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
2287 // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
2288 // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
2289 // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
2290 // before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further
2291 // duplicate transactions descending from the known pairs either.
2292 // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
2293 CBlockIndex
*pindexBIP34height
= pindex
->pprev
->GetAncestor(chainparams
.GetConsensus().BIP34Height
);
2294 //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
2295 fEnforceBIP30
= fEnforceBIP30
&& (!pindexBIP34height
|| !(pindexBIP34height
->GetBlockHash() == chainparams
.GetConsensus().BIP34Hash
));
2297 if (fEnforceBIP30
) {
2298 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
) {
2299 const CCoins
* coins
= view
.AccessCoins(tx
.GetHash());
2300 if (coins
&& !coins
->IsPruned())
2301 return state
.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
2302 REJECT_INVALID
, "bad-txns-BIP30");
2306 // BIP16 didn't become active until Apr 1 2012
2307 int64_t nBIP16SwitchTime
= 1333238400;
2308 bool fStrictPayToScriptHash
= (pindex
->GetBlockTime() >= nBIP16SwitchTime
);
2310 unsigned int flags
= fStrictPayToScriptHash
? SCRIPT_VERIFY_P2SH
: SCRIPT_VERIFY_NONE
;
2312 // Start enforcing the DERSIG (BIP66) rules, for block.nVersion=3 blocks,
2313 // when 75% of the network has upgraded:
2314 if (block
.nVersion
>= 3 && IsSuperMajority(3, pindex
->pprev
, chainparams
.GetConsensus().nMajorityEnforceBlockUpgrade
, chainparams
.GetConsensus())) {
2315 flags
|= SCRIPT_VERIFY_DERSIG
;
2318 // Start enforcing CHECKLOCKTIMEVERIFY, (BIP65) for block.nVersion=4
2319 // blocks, when 75% of the network has upgraded:
2320 if (block
.nVersion
>= 4 && IsSuperMajority(4, pindex
->pprev
, chainparams
.GetConsensus().nMajorityEnforceBlockUpgrade
, chainparams
.GetConsensus())) {
2321 flags
|= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY
;
2324 // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
2325 int nLockTimeFlags
= 0;
2326 if (VersionBitsState(pindex
->pprev
, chainparams
.GetConsensus(), Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
2327 flags
|= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY
;
2328 nLockTimeFlags
|= LOCKTIME_VERIFY_SEQUENCE
;
2331 int64_t nTime2
= GetTimeMicros(); nTimeForks
+= nTime2
- nTime1
;
2332 LogPrint("bench", " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2
- nTime1
), nTimeForks
* 0.000001);
2334 CBlockUndo blockundo
;
2336 CCheckQueueControl
<CScriptCheck
> control(fScriptChecks
&& nScriptCheckThreads
? &scriptcheckqueue
: NULL
);
2338 std::vector
<int> prevheights
;
2341 unsigned int nSigOps
= 0;
2342 CDiskTxPos
pos(pindex
->GetBlockPos(), GetSizeOfCompactSize(block
.vtx
.size()));
2343 std::vector
<std::pair
<uint256
, CDiskTxPos
> > vPos
;
2344 vPos
.reserve(block
.vtx
.size());
2345 blockundo
.vtxundo
.reserve(block
.vtx
.size() - 1);
2346 for (unsigned int i
= 0; i
< block
.vtx
.size(); i
++)
2348 const CTransaction
&tx
= block
.vtx
[i
];
2350 nInputs
+= tx
.vin
.size();
2351 nSigOps
+= GetLegacySigOpCount(tx
);
2352 if (nSigOps
> MAX_BLOCK_SIGOPS
)
2353 return state
.DoS(100, error("ConnectBlock(): too many sigops"),
2354 REJECT_INVALID
, "bad-blk-sigops");
2356 if (!tx
.IsCoinBase())
2358 if (!view
.HaveInputs(tx
))
2359 return state
.DoS(100, error("ConnectBlock(): inputs missing/spent"),
2360 REJECT_INVALID
, "bad-txns-inputs-missingorspent");
2362 // Check that transaction is BIP68 final
2363 // BIP68 lock checks (as opposed to nLockTime checks) must
2364 // be in ConnectBlock because they require the UTXO set
2365 prevheights
.resize(tx
.vin
.size());
2366 for (size_t j
= 0; j
< tx
.vin
.size(); j
++) {
2367 prevheights
[j
] = view
.AccessCoins(tx
.vin
[j
].prevout
.hash
)->nHeight
;
2370 if (!SequenceLocks(tx
, nLockTimeFlags
, &prevheights
, *pindex
)) {
2371 return state
.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__
),
2372 REJECT_INVALID
, "bad-txns-nonfinal");
2375 if (fStrictPayToScriptHash
)
2377 // Add in sigops done by pay-to-script-hash inputs;
2378 // this is to prevent a "rogue miner" from creating
2379 // an incredibly-expensive-to-validate block.
2380 nSigOps
+= GetP2SHSigOpCount(tx
, view
);
2381 if (nSigOps
> MAX_BLOCK_SIGOPS
)
2382 return state
.DoS(100, error("ConnectBlock(): too many sigops"),
2383 REJECT_INVALID
, "bad-blk-sigops");
2386 nFees
+= view
.GetValueIn(tx
)-tx
.GetValueOut();
2388 std::vector
<CScriptCheck
> vChecks
;
2389 bool fCacheResults
= fJustCheck
; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
2390 if (!CheckInputs(tx
, state
, view
, fScriptChecks
, flags
, fCacheResults
, nScriptCheckThreads
? &vChecks
: NULL
))
2391 return error("ConnectBlock(): CheckInputs on %s failed with %s",
2392 tx
.GetHash().ToString(), FormatStateMessage(state
));
2393 control
.Add(vChecks
);
2398 blockundo
.vtxundo
.push_back(CTxUndo());
2400 UpdateCoins(tx
, view
, i
== 0 ? undoDummy
: blockundo
.vtxundo
.back(), pindex
->nHeight
);
2402 vPos
.push_back(std::make_pair(tx
.GetHash(), pos
));
2403 pos
.nTxOffset
+= ::GetSerializeSize(tx
, SER_DISK
, CLIENT_VERSION
);
2405 int64_t nTime3
= GetTimeMicros(); nTimeConnect
+= nTime3
- nTime2
;
2406 LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block
.vtx
.size(), 0.001 * (nTime3
- nTime2
), 0.001 * (nTime3
- nTime2
) / block
.vtx
.size(), nInputs
<= 1 ? 0 : 0.001 * (nTime3
- nTime2
) / (nInputs
-1), nTimeConnect
* 0.000001);
2408 CAmount blockReward
= nFees
+ GetBlockSubsidy(pindex
->nHeight
, chainparams
.GetConsensus());
2409 if (block
.vtx
[0].GetValueOut() > blockReward
)
2410 return state
.DoS(100,
2411 error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
2412 block
.vtx
[0].GetValueOut(), blockReward
),
2413 REJECT_INVALID
, "bad-cb-amount");
2415 if (!control
.Wait())
2416 return state
.DoS(100, false);
2417 int64_t nTime4
= GetTimeMicros(); nTimeVerify
+= nTime4
- nTime2
;
2418 LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs
- 1, 0.001 * (nTime4
- nTime2
), nInputs
<= 1 ? 0 : 0.001 * (nTime4
- nTime2
) / (nInputs
-1), nTimeVerify
* 0.000001);
2423 // Write undo information to disk
2424 if (pindex
->GetUndoPos().IsNull() || !pindex
->IsValid(BLOCK_VALID_SCRIPTS
))
2426 if (pindex
->GetUndoPos().IsNull()) {
2428 if (!FindUndoPos(state
, pindex
->nFile
, pos
, ::GetSerializeSize(blockundo
, SER_DISK
, CLIENT_VERSION
) + 40))
2429 return error("ConnectBlock(): FindUndoPos failed");
2430 if (!UndoWriteToDisk(blockundo
, pos
, pindex
->pprev
->GetBlockHash(), chainparams
.MessageStart()))
2431 return AbortNode(state
, "Failed to write undo data");
2433 // update nUndoPos in block index
2434 pindex
->nUndoPos
= pos
.nPos
;
2435 pindex
->nStatus
|= BLOCK_HAVE_UNDO
;
2438 pindex
->RaiseValidity(BLOCK_VALID_SCRIPTS
);
2439 setDirtyBlockIndex
.insert(pindex
);
2443 if (!pblocktree
->WriteTxIndex(vPos
))
2444 return AbortNode(state
, "Failed to write transaction index");
2446 // add this block to the view's block chain
2447 view
.SetBestBlock(pindex
->GetBlockHash());
2449 int64_t nTime5
= GetTimeMicros(); nTimeIndex
+= nTime5
- nTime4
;
2450 LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5
- nTime4
), nTimeIndex
* 0.000001);
2452 // Watch for changes to the previous coinbase transaction.
2453 static uint256 hashPrevBestCoinBase
;
2454 GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase
);
2455 hashPrevBestCoinBase
= block
.vtx
[0].GetHash();
2457 int64_t nTime6
= GetTimeMicros(); nTimeCallbacks
+= nTime6
- nTime5
;
2458 LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6
- nTime5
), nTimeCallbacks
* 0.000001);
2463 enum FlushStateMode
{
2465 FLUSH_STATE_IF_NEEDED
,
2466 FLUSH_STATE_PERIODIC
,
2471 * Update the on-disk chain state.
2472 * The caches and indexes are flushed depending on the mode we're called with
2473 * if they're too large, if it's been a while since the last write,
2474 * or always and in all cases if we're in prune mode and are deleting files.
2476 bool static FlushStateToDisk(CValidationState
&state
, FlushStateMode mode
) {
2477 const CChainParams
& chainparams
= Params();
2478 LOCK2(cs_main
, cs_LastBlockFile
);
2479 static int64_t nLastWrite
= 0;
2480 static int64_t nLastFlush
= 0;
2481 static int64_t nLastSetChain
= 0;
2482 std::set
<int> setFilesToPrune
;
2483 bool fFlushForPrune
= false;
2485 if (fPruneMode
&& fCheckForPruning
&& !fReindex
) {
2486 FindFilesToPrune(setFilesToPrune
, chainparams
.PruneAfterHeight());
2487 fCheckForPruning
= false;
2488 if (!setFilesToPrune
.empty()) {
2489 fFlushForPrune
= true;
2491 pblocktree
->WriteFlag("prunedblockfiles", true);
2496 int64_t nNow
= GetTimeMicros();
2497 // Avoid writing/flushing immediately after startup.
2498 if (nLastWrite
== 0) {
2501 if (nLastFlush
== 0) {
2504 if (nLastSetChain
== 0) {
2505 nLastSetChain
= nNow
;
2507 size_t cacheSize
= pcoinsTip
->DynamicMemoryUsage();
2508 // The cache is large and close to the limit, but we have time now (not in the middle of a block processing).
2509 bool fCacheLarge
= mode
== FLUSH_STATE_PERIODIC
&& cacheSize
* (10.0/9) > nCoinCacheUsage
;
2510 // The cache is over the limit, we have to write now.
2511 bool fCacheCritical
= mode
== FLUSH_STATE_IF_NEEDED
&& cacheSize
> nCoinCacheUsage
;
2512 // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2513 bool fPeriodicWrite
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastWrite
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000;
2514 // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2515 bool fPeriodicFlush
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastFlush
+ (int64_t)DATABASE_FLUSH_INTERVAL
* 1000000;
2516 // Combine all conditions that result in a full cache flush.
2517 bool fDoFullFlush
= (mode
== FLUSH_STATE_ALWAYS
) || fCacheLarge
|| fCacheCritical
|| fPeriodicFlush
|| fFlushForPrune
;
2518 // Write blocks and block index to disk.
2519 if (fDoFullFlush
|| fPeriodicWrite
) {
2520 // Depend on nMinDiskSpace to ensure we can write block index
2521 if (!CheckDiskSpace(0))
2522 return state
.Error("out of disk space");
2523 // First make sure all block and undo data is flushed to disk.
2525 // Then update all block file information (which may refer to block and undo files).
2527 std::vector
<std::pair
<int, const CBlockFileInfo
*> > vFiles
;
2528 vFiles
.reserve(setDirtyFileInfo
.size());
2529 for (set
<int>::iterator it
= setDirtyFileInfo
.begin(); it
!= setDirtyFileInfo
.end(); ) {
2530 vFiles
.push_back(make_pair(*it
, &vinfoBlockFile
[*it
]));
2531 setDirtyFileInfo
.erase(it
++);
2533 std::vector
<const CBlockIndex
*> vBlocks
;
2534 vBlocks
.reserve(setDirtyBlockIndex
.size());
2535 for (set
<CBlockIndex
*>::iterator it
= setDirtyBlockIndex
.begin(); it
!= setDirtyBlockIndex
.end(); ) {
2536 vBlocks
.push_back(*it
);
2537 setDirtyBlockIndex
.erase(it
++);
2539 if (!pblocktree
->WriteBatchSync(vFiles
, nLastBlockFile
, vBlocks
)) {
2540 return AbortNode(state
, "Files to write to block index database");
2543 // Finally remove any pruned files
2545 UnlinkPrunedFiles(setFilesToPrune
);
2548 // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2550 // Typical CCoins structures on disk are around 128 bytes in size.
2551 // Pushing a new one to the database can cause it to be written
2552 // twice (once in the log, and once in the tables). This is already
2553 // an overestimation, as most will delete an existing entry or
2554 // overwrite one. Still, use a conservative safety factor of 2.
2555 if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip
->GetCacheSize()))
2556 return state
.Error("out of disk space");
2557 // Flush the chainstate (which may refer to block index entries).
2558 if (!pcoinsTip
->Flush())
2559 return AbortNode(state
, "Failed to write to coin database");
2562 if (fDoFullFlush
|| ((mode
== FLUSH_STATE_ALWAYS
|| mode
== FLUSH_STATE_PERIODIC
) && nNow
> nLastSetChain
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000)) {
2563 // Update best block in wallet (so we can detect restored wallets).
2564 GetMainSignals().SetBestChain(chainActive
.GetLocator());
2565 nLastSetChain
= nNow
;
2567 } catch (const std::runtime_error
& e
) {
2568 return AbortNode(state
, std::string("System error while flushing: ") + e
.what());
2573 void FlushStateToDisk() {
2574 CValidationState state
;
2575 FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
2578 void PruneAndFlush() {
2579 CValidationState state
;
2580 fCheckForPruning
= true;
2581 FlushStateToDisk(state
, FLUSH_STATE_NONE
);
2584 /** Update chainActive and related internal data structures. */
2585 void static UpdateTip(CBlockIndex
*pindexNew
, const CChainParams
& chainParams
) {
2586 chainActive
.SetTip(pindexNew
);
2589 nTimeBestReceived
= GetTime();
2590 mempool
.AddTransactionsUpdated(1);
2592 cvBlockChange
.notify_all();
2594 static bool fWarned
= false;
2595 std::vector
<std::string
> warningMessages
;
2596 if (!IsInitialBlockDownload())
2599 const CBlockIndex
* pindex
= chainActive
.Tip();
2600 for (int bit
= 0; bit
< VERSIONBITS_NUM_BITS
; bit
++) {
2601 WarningBitsConditionChecker
checker(bit
);
2602 ThresholdState state
= checker
.GetStateFor(pindex
, chainParams
.GetConsensus(), warningcache
[bit
]);
2603 if (state
== THRESHOLD_ACTIVE
|| state
== THRESHOLD_LOCKED_IN
) {
2604 if (state
== THRESHOLD_ACTIVE
) {
2605 strMiscWarning
= strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit
);
2607 AlertNotify(strMiscWarning
);
2611 warningMessages
.push_back(strprintf("unknown new rules are about to activate (versionbit %i)", bit
));
2615 // Check the version of the last 100 blocks to see if we need to upgrade:
2616 for (int i
= 0; i
< 100 && pindex
!= NULL
; i
++)
2618 int32_t nExpectedVersion
= ComputeBlockVersion(pindex
->pprev
, chainParams
.GetConsensus());
2619 if (pindex
->nVersion
> VERSIONBITS_LAST_OLD_BLOCK_VERSION
&& (pindex
->nVersion
& ~nExpectedVersion
) != 0)
2621 pindex
= pindex
->pprev
;
2624 warningMessages
.push_back(strprintf("%d of last 100 blocks have unexpected version", nUpgraded
));
2625 if (nUpgraded
> 100/2)
2627 // strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
2628 strMiscWarning
= _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect");
2630 AlertNotify(strMiscWarning
);
2635 LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utx)", __func__
,
2636 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(), chainActive
.Tip()->nVersion
,
2637 log(chainActive
.Tip()->nChainWork
.getdouble())/log(2.0), (unsigned long)chainActive
.Tip()->nChainTx
,
2638 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
2639 Checkpoints::GuessVerificationProgress(chainParams
.Checkpoints(), chainActive
.Tip()), pcoinsTip
->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip
->GetCacheSize());
2640 if (!warningMessages
.empty())
2641 LogPrintf(" warning='%s'", boost::algorithm::join(warningMessages
, ", "));
2646 /** Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and manually re-limit mempool size after this, with cs_main held. */
2647 bool static DisconnectTip(CValidationState
& state
, const CChainParams
& chainparams
)
2649 CBlockIndex
*pindexDelete
= chainActive
.Tip();
2650 assert(pindexDelete
);
2651 // Read block from disk.
2653 if (!ReadBlockFromDisk(block
, pindexDelete
, chainparams
.GetConsensus()))
2654 return AbortNode(state
, "Failed to read block");
2655 // Apply the block atomically to the chain state.
2656 int64_t nStart
= GetTimeMicros();
2658 CCoinsViewCache
view(pcoinsTip
);
2659 if (!DisconnectBlock(block
, state
, pindexDelete
, view
))
2660 return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete
->GetBlockHash().ToString());
2661 assert(view
.Flush());
2663 LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart
) * 0.001);
2664 // Write the chain state to disk, if necessary.
2665 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2667 // Resurrect mempool transactions from the disconnected block.
2668 std::vector
<uint256
> vHashUpdate
;
2669 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
2670 // ignore validation errors in resurrected transactions
2671 list
<CTransaction
> removed
;
2672 CValidationState stateDummy
;
2673 if (tx
.IsCoinBase() || !AcceptToMemoryPool(mempool
, stateDummy
, tx
, false, NULL
, true)) {
2674 mempool
.removeRecursive(tx
, removed
);
2675 } else if (mempool
.exists(tx
.GetHash())) {
2676 vHashUpdate
.push_back(tx
.GetHash());
2679 // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
2680 // no in-mempool children, which is generally not true when adding
2681 // previously-confirmed transactions back to the mempool.
2682 // UpdateTransactionsFromBlock finds descendants of any transactions in this
2683 // block that were added back and cleans up the mempool state.
2684 mempool
.UpdateTransactionsFromBlock(vHashUpdate
);
2685 // Update chainActive and related variables.
2686 UpdateTip(pindexDelete
->pprev
, chainparams
);
2687 // Let wallets know transactions went from 1-confirmed to
2688 // 0-confirmed or conflicted:
2689 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
2690 SyncWithWallets(tx
, pindexDelete
->pprev
, NULL
);
2695 static int64_t nTimeReadFromDisk
= 0;
2696 static int64_t nTimeConnectTotal
= 0;
2697 static int64_t nTimeFlush
= 0;
2698 static int64_t nTimeChainState
= 0;
2699 static int64_t nTimePostConnect
= 0;
2702 * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
2703 * corresponding to pindexNew, to bypass loading it again from disk.
2705 bool static ConnectTip(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexNew
, const CBlock
* pblock
)
2707 assert(pindexNew
->pprev
== chainActive
.Tip());
2708 // Read block from disk.
2709 int64_t nTime1
= GetTimeMicros();
2712 if (!ReadBlockFromDisk(block
, pindexNew
, chainparams
.GetConsensus()))
2713 return AbortNode(state
, "Failed to read block");
2716 // Apply the block atomically to the chain state.
2717 int64_t nTime2
= GetTimeMicros(); nTimeReadFromDisk
+= nTime2
- nTime1
;
2719 LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2
- nTime1
) * 0.001, nTimeReadFromDisk
* 0.000001);
2721 CCoinsViewCache
view(pcoinsTip
);
2722 bool rv
= ConnectBlock(*pblock
, state
, pindexNew
, view
, chainparams
);
2723 GetMainSignals().BlockChecked(*pblock
, state
);
2725 if (state
.IsInvalid())
2726 InvalidBlockFound(pindexNew
, state
);
2727 return error("ConnectTip(): ConnectBlock %s failed", pindexNew
->GetBlockHash().ToString());
2729 mapBlockSource
.erase(pindexNew
->GetBlockHash());
2730 nTime3
= GetTimeMicros(); nTimeConnectTotal
+= nTime3
- nTime2
;
2731 LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3
- nTime2
) * 0.001, nTimeConnectTotal
* 0.000001);
2732 assert(view
.Flush());
2734 int64_t nTime4
= GetTimeMicros(); nTimeFlush
+= nTime4
- nTime3
;
2735 LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4
- nTime3
) * 0.001, nTimeFlush
* 0.000001);
2736 // Write the chain state to disk, if necessary.
2737 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2739 int64_t nTime5
= GetTimeMicros(); nTimeChainState
+= nTime5
- nTime4
;
2740 LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5
- nTime4
) * 0.001, nTimeChainState
* 0.000001);
2741 // Remove conflicting transactions from the mempool.
2742 list
<CTransaction
> txConflicted
;
2743 mempool
.removeForBlock(pblock
->vtx
, pindexNew
->nHeight
, txConflicted
, !IsInitialBlockDownload());
2744 // Update chainActive & related variables.
2745 UpdateTip(pindexNew
, chainparams
);
2746 // Tell wallet about transactions that went from mempool
2748 BOOST_FOREACH(const CTransaction
&tx
, txConflicted
) {
2749 SyncWithWallets(tx
, pindexNew
, NULL
);
2751 // ... and about transactions that got confirmed:
2752 BOOST_FOREACH(const CTransaction
&tx
, pblock
->vtx
) {
2753 SyncWithWallets(tx
, pindexNew
, pblock
);
2756 int64_t nTime6
= GetTimeMicros(); nTimePostConnect
+= nTime6
- nTime5
; nTimeTotal
+= nTime6
- nTime1
;
2757 LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6
- nTime5
) * 0.001, nTimePostConnect
* 0.000001);
2758 LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6
- nTime1
) * 0.001, nTimeTotal
* 0.000001);
2763 * Return the tip of the chain with the most work in it, that isn't
2764 * known to be invalid (it's however far from certain to be valid).
2766 static CBlockIndex
* FindMostWorkChain() {
2768 CBlockIndex
*pindexNew
= NULL
;
2770 // Find the best candidate header.
2772 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::reverse_iterator it
= setBlockIndexCandidates
.rbegin();
2773 if (it
== setBlockIndexCandidates
.rend())
2778 // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2779 // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2780 CBlockIndex
*pindexTest
= pindexNew
;
2781 bool fInvalidAncestor
= false;
2782 while (pindexTest
&& !chainActive
.Contains(pindexTest
)) {
2783 assert(pindexTest
->nChainTx
|| pindexTest
->nHeight
== 0);
2785 // Pruned nodes may have entries in setBlockIndexCandidates for
2786 // which block files have been deleted. Remove those as candidates
2787 // for the most work chain if we come across them; we can't switch
2788 // to a chain unless we have all the non-active-chain parent blocks.
2789 bool fFailedChain
= pindexTest
->nStatus
& BLOCK_FAILED_MASK
;
2790 bool fMissingData
= !(pindexTest
->nStatus
& BLOCK_HAVE_DATA
);
2791 if (fFailedChain
|| fMissingData
) {
2792 // Candidate chain is not usable (either invalid or missing data)
2793 if (fFailedChain
&& (pindexBestInvalid
== NULL
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
))
2794 pindexBestInvalid
= pindexNew
;
2795 CBlockIndex
*pindexFailed
= pindexNew
;
2796 // Remove the entire chain from the set.
2797 while (pindexTest
!= pindexFailed
) {
2799 pindexFailed
->nStatus
|= BLOCK_FAILED_CHILD
;
2800 } else if (fMissingData
) {
2801 // If we're missing data, then add back to mapBlocksUnlinked,
2802 // so that if the block arrives in the future we can try adding
2803 // to setBlockIndexCandidates again.
2804 mapBlocksUnlinked
.insert(std::make_pair(pindexFailed
->pprev
, pindexFailed
));
2806 setBlockIndexCandidates
.erase(pindexFailed
);
2807 pindexFailed
= pindexFailed
->pprev
;
2809 setBlockIndexCandidates
.erase(pindexTest
);
2810 fInvalidAncestor
= true;
2813 pindexTest
= pindexTest
->pprev
;
2815 if (!fInvalidAncestor
)
2820 /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
2821 static void PruneBlockIndexCandidates() {
2822 // Note that we can't delete the current block itself, as we may need to return to it later in case a
2823 // reorganization to a better block fails.
2824 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::iterator it
= setBlockIndexCandidates
.begin();
2825 while (it
!= setBlockIndexCandidates
.end() && setBlockIndexCandidates
.value_comp()(*it
, chainActive
.Tip())) {
2826 setBlockIndexCandidates
.erase(it
++);
2828 // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2829 assert(!setBlockIndexCandidates
.empty());
2833 * Try to make some progress towards making pindexMostWork the active block.
2834 * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
2836 static bool ActivateBestChainStep(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexMostWork
, const CBlock
* pblock
, bool& fInvalidFound
)
2838 AssertLockHeld(cs_main
);
2839 const CBlockIndex
*pindexOldTip
= chainActive
.Tip();
2840 const CBlockIndex
*pindexFork
= chainActive
.FindFork(pindexMostWork
);
2842 // Disconnect active blocks which are no longer in the best chain.
2843 bool fBlocksDisconnected
= false;
2844 while (chainActive
.Tip() && chainActive
.Tip() != pindexFork
) {
2845 if (!DisconnectTip(state
, chainparams
))
2847 fBlocksDisconnected
= true;
2850 // Build list of new blocks to connect.
2851 std::vector
<CBlockIndex
*> vpindexToConnect
;
2852 bool fContinue
= true;
2853 int nHeight
= pindexFork
? pindexFork
->nHeight
: -1;
2854 while (fContinue
&& nHeight
!= pindexMostWork
->nHeight
) {
2855 // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2856 // a few blocks along the way.
2857 int nTargetHeight
= std::min(nHeight
+ 32, pindexMostWork
->nHeight
);
2858 vpindexToConnect
.clear();
2859 vpindexToConnect
.reserve(nTargetHeight
- nHeight
);
2860 CBlockIndex
*pindexIter
= pindexMostWork
->GetAncestor(nTargetHeight
);
2861 while (pindexIter
&& pindexIter
->nHeight
!= nHeight
) {
2862 vpindexToConnect
.push_back(pindexIter
);
2863 pindexIter
= pindexIter
->pprev
;
2865 nHeight
= nTargetHeight
;
2867 // Connect new blocks.
2868 BOOST_REVERSE_FOREACH(CBlockIndex
*pindexConnect
, vpindexToConnect
) {
2869 if (!ConnectTip(state
, chainparams
, pindexConnect
, pindexConnect
== pindexMostWork
? pblock
: NULL
)) {
2870 if (state
.IsInvalid()) {
2871 // The block violates a consensus rule.
2872 if (!state
.CorruptionPossible())
2873 InvalidChainFound(vpindexToConnect
.back());
2874 state
= CValidationState();
2875 fInvalidFound
= true;
2879 // A system error occurred (disk space, database error, ...).
2883 PruneBlockIndexCandidates();
2884 if (!pindexOldTip
|| chainActive
.Tip()->nChainWork
> pindexOldTip
->nChainWork
) {
2885 // We're in a better position than we were. Return temporarily to release the lock.
2893 if (fBlocksDisconnected
) {
2894 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
2895 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
2897 mempool
.check(pcoinsTip
);
2899 // Callbacks/notifications for a new best chain.
2901 CheckForkWarningConditionsOnNewFork(vpindexToConnect
.back());
2903 CheckForkWarningConditions();
2908 static void NotifyHeaderTip() {
2909 bool fNotify
= false;
2910 bool fInitialBlockDownload
= false;
2911 static CBlockIndex
* pindexHeaderOld
= NULL
;
2912 CBlockIndex
* pindexHeader
= NULL
;
2915 if (!setBlockIndexCandidates
.empty()) {
2916 pindexHeader
= *setBlockIndexCandidates
.rbegin();
2918 if (pindexHeader
!= pindexHeaderOld
) {
2920 fInitialBlockDownload
= IsInitialBlockDownload();
2921 pindexHeaderOld
= pindexHeader
;
2924 // Send block tip changed notifications without cs_main
2926 uiInterface
.NotifyHeaderTip(fInitialBlockDownload
, pindexHeader
);
2931 * Make the best chain active, in multiple steps. The result is either failure
2932 * or an activated best chain. pblock is either NULL or a pointer to a block
2933 * that is already loaded (to avoid loading it again from disk).
2935 bool ActivateBestChain(CValidationState
&state
, const CChainParams
& chainparams
, const CBlock
*pblock
) {
2936 CBlockIndex
*pindexMostWork
= NULL
;
2937 CBlockIndex
*pindexNewTip
= NULL
;
2939 boost::this_thread::interruption_point();
2940 if (ShutdownRequested())
2943 const CBlockIndex
*pindexFork
;
2944 bool fInitialDownload
;
2948 CBlockIndex
*pindexOldTip
= chainActive
.Tip();
2949 if (pindexMostWork
== NULL
) {
2950 pindexMostWork
= FindMostWorkChain();
2953 // Whether we have anything to do at all.
2954 if (pindexMostWork
== NULL
|| pindexMostWork
== chainActive
.Tip())
2957 bool fInvalidFound
= false;
2958 if (!ActivateBestChainStep(state
, chainparams
, pindexMostWork
, pblock
&& pblock
->GetHash() == pindexMostWork
->GetBlockHash() ? pblock
: NULL
, fInvalidFound
))
2961 if (fInvalidFound
) {
2962 // Wipe cache, we may need another branch now.
2963 pindexMostWork
= NULL
;
2965 pindexNewTip
= chainActive
.Tip();
2966 pindexFork
= chainActive
.FindFork(pindexOldTip
);
2967 fInitialDownload
= IsInitialBlockDownload();
2968 nNewHeight
= chainActive
.Height();
2970 // When we reach this point, we switched to a new tip (stored in pindexNewTip).
2972 // Notifications/callbacks that can run without cs_main
2973 // Always notify the UI if a new block tip was connected
2974 if (pindexFork
!= pindexNewTip
) {
2975 uiInterface
.NotifyBlockTip(fInitialDownload
, pindexNewTip
);
2977 if (!fInitialDownload
) {
2978 // Find the hashes of all blocks that weren't previously in the best chain.
2979 std::vector
<uint256
> vHashes
;
2980 CBlockIndex
*pindexToAnnounce
= pindexNewTip
;
2981 while (pindexToAnnounce
!= pindexFork
) {
2982 vHashes
.push_back(pindexToAnnounce
->GetBlockHash());
2983 pindexToAnnounce
= pindexToAnnounce
->pprev
;
2984 if (vHashes
.size() == MAX_BLOCKS_TO_ANNOUNCE
) {
2985 // Limit announcements in case of a huge reorganization.
2986 // Rely on the peer's synchronization mechanism in that case.
2990 // Relay inventory, but don't relay old inventory during initial block download.
2991 int nBlockEstimate
= 0;
2992 if (fCheckpointsEnabled
)
2993 nBlockEstimate
= Checkpoints::GetTotalBlocksEstimate(chainparams
.Checkpoints());
2996 BOOST_FOREACH(CNode
* pnode
, vNodes
) {
2997 if (nNewHeight
> (pnode
->nStartingHeight
!= -1 ? pnode
->nStartingHeight
- 2000 : nBlockEstimate
)) {
2998 BOOST_REVERSE_FOREACH(const uint256
& hash
, vHashes
) {
2999 pnode
->PushBlockHash(hash
);
3004 // Notify external listeners about the new tip.
3005 if (!vHashes
.empty()) {
3006 GetMainSignals().UpdatedBlockTip(pindexNewTip
);
3010 } while (pindexNewTip
!= pindexMostWork
);
3011 CheckBlockIndex(chainparams
.GetConsensus());
3013 // Write changes periodically to disk, after relay.
3014 if (!FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
)) {
3021 bool InvalidateBlock(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
*pindex
)
3023 AssertLockHeld(cs_main
);
3025 // Mark the block itself as invalid.
3026 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
3027 setDirtyBlockIndex
.insert(pindex
);
3028 setBlockIndexCandidates
.erase(pindex
);
3030 while (chainActive
.Contains(pindex
)) {
3031 CBlockIndex
*pindexWalk
= chainActive
.Tip();
3032 pindexWalk
->nStatus
|= BLOCK_FAILED_CHILD
;
3033 setDirtyBlockIndex
.insert(pindexWalk
);
3034 setBlockIndexCandidates
.erase(pindexWalk
);
3035 // ActivateBestChain considers blocks already in chainActive
3036 // unconditionally valid already, so force disconnect away from it.
3037 if (!DisconnectTip(state
, chainparams
)) {
3038 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
3043 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
3045 // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
3047 BlockMap::iterator it
= mapBlockIndex
.begin();
3048 while (it
!= mapBlockIndex
.end()) {
3049 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& !setBlockIndexCandidates
.value_comp()(it
->second
, chainActive
.Tip())) {
3050 setBlockIndexCandidates
.insert(it
->second
);
3055 InvalidChainFound(pindex
);
3056 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
3060 bool ResetBlockFailureFlags(CBlockIndex
*pindex
) {
3061 AssertLockHeld(cs_main
);
3063 int nHeight
= pindex
->nHeight
;
3065 // Remove the invalidity flag from this block and all its descendants.
3066 BlockMap::iterator it
= mapBlockIndex
.begin();
3067 while (it
!= mapBlockIndex
.end()) {
3068 if (!it
->second
->IsValid() && it
->second
->GetAncestor(nHeight
) == pindex
) {
3069 it
->second
->nStatus
&= ~BLOCK_FAILED_MASK
;
3070 setDirtyBlockIndex
.insert(it
->second
);
3071 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& setBlockIndexCandidates
.value_comp()(chainActive
.Tip(), it
->second
)) {
3072 setBlockIndexCandidates
.insert(it
->second
);
3074 if (it
->second
== pindexBestInvalid
) {
3075 // Reset invalid block marker if it was pointing to one of those.
3076 pindexBestInvalid
= NULL
;
3082 // Remove the invalidity flag from all ancestors too.
3083 while (pindex
!= NULL
) {
3084 if (pindex
->nStatus
& BLOCK_FAILED_MASK
) {
3085 pindex
->nStatus
&= ~BLOCK_FAILED_MASK
;
3086 setDirtyBlockIndex
.insert(pindex
);
3088 pindex
= pindex
->pprev
;
3093 CBlockIndex
* AddToBlockIndex(const CBlockHeader
& block
)
3095 // Check for duplicate
3096 uint256 hash
= block
.GetHash();
3097 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
3098 if (it
!= mapBlockIndex
.end())
3101 // Construct new block index object
3102 CBlockIndex
* pindexNew
= new CBlockIndex(block
);
3104 // We assign the sequence id to blocks only when the full data is available,
3105 // to avoid miners withholding blocks but broadcasting headers, to get a
3106 // competitive advantage.
3107 pindexNew
->nSequenceId
= 0;
3108 BlockMap::iterator mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
3109 pindexNew
->phashBlock
= &((*mi
).first
);
3110 BlockMap::iterator miPrev
= mapBlockIndex
.find(block
.hashPrevBlock
);
3111 if (miPrev
!= mapBlockIndex
.end())
3113 pindexNew
->pprev
= (*miPrev
).second
;
3114 pindexNew
->nHeight
= pindexNew
->pprev
->nHeight
+ 1;
3115 pindexNew
->BuildSkip();
3117 pindexNew
->nChainWork
= (pindexNew
->pprev
? pindexNew
->pprev
->nChainWork
: 0) + GetBlockProof(*pindexNew
);
3118 pindexNew
->RaiseValidity(BLOCK_VALID_TREE
);
3119 if (pindexBestHeader
== NULL
|| pindexBestHeader
->nChainWork
< pindexNew
->nChainWork
)
3120 pindexBestHeader
= pindexNew
;
3122 setDirtyBlockIndex
.insert(pindexNew
);
3127 /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
3128 bool ReceivedBlockTransactions(const CBlock
&block
, CValidationState
& state
, CBlockIndex
*pindexNew
, const CDiskBlockPos
& pos
)
3130 pindexNew
->nTx
= block
.vtx
.size();
3131 pindexNew
->nChainTx
= 0;
3132 pindexNew
->nFile
= pos
.nFile
;
3133 pindexNew
->nDataPos
= pos
.nPos
;
3134 pindexNew
->nUndoPos
= 0;
3135 pindexNew
->nStatus
|= BLOCK_HAVE_DATA
;
3136 pindexNew
->RaiseValidity(BLOCK_VALID_TRANSACTIONS
);
3137 setDirtyBlockIndex
.insert(pindexNew
);
3139 if (pindexNew
->pprev
== NULL
|| pindexNew
->pprev
->nChainTx
) {
3140 // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
3141 deque
<CBlockIndex
*> queue
;
3142 queue
.push_back(pindexNew
);
3144 // Recursively process any descendant blocks that now may be eligible to be connected.
3145 while (!queue
.empty()) {
3146 CBlockIndex
*pindex
= queue
.front();
3148 pindex
->nChainTx
= (pindex
->pprev
? pindex
->pprev
->nChainTx
: 0) + pindex
->nTx
;
3150 LOCK(cs_nBlockSequenceId
);
3151 pindex
->nSequenceId
= nBlockSequenceId
++;
3153 if (chainActive
.Tip() == NULL
|| !setBlockIndexCandidates
.value_comp()(pindex
, chainActive
.Tip())) {
3154 setBlockIndexCandidates
.insert(pindex
);
3156 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
);
3157 while (range
.first
!= range
.second
) {
3158 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator it
= range
.first
;
3159 queue
.push_back(it
->second
);
3161 mapBlocksUnlinked
.erase(it
);
3165 if (pindexNew
->pprev
&& pindexNew
->pprev
->IsValid(BLOCK_VALID_TREE
)) {
3166 mapBlocksUnlinked
.insert(std::make_pair(pindexNew
->pprev
, pindexNew
));
3173 bool FindBlockPos(CValidationState
&state
, CDiskBlockPos
&pos
, unsigned int nAddSize
, unsigned int nHeight
, uint64_t nTime
, bool fKnown
= false)
3175 LOCK(cs_LastBlockFile
);
3177 unsigned int nFile
= fKnown
? pos
.nFile
: nLastBlockFile
;
3178 if (vinfoBlockFile
.size() <= nFile
) {
3179 vinfoBlockFile
.resize(nFile
+ 1);
3183 while (vinfoBlockFile
[nFile
].nSize
+ nAddSize
>= MAX_BLOCKFILE_SIZE
) {
3185 if (vinfoBlockFile
.size() <= nFile
) {
3186 vinfoBlockFile
.resize(nFile
+ 1);
3190 pos
.nPos
= vinfoBlockFile
[nFile
].nSize
;
3193 if ((int)nFile
!= nLastBlockFile
) {
3195 LogPrintf("Leaving block file %i: %s\n", nLastBlockFile
, vinfoBlockFile
[nLastBlockFile
].ToString());
3197 FlushBlockFile(!fKnown
);
3198 nLastBlockFile
= nFile
;
3201 vinfoBlockFile
[nFile
].AddBlock(nHeight
, nTime
);
3203 vinfoBlockFile
[nFile
].nSize
= std::max(pos
.nPos
+ nAddSize
, vinfoBlockFile
[nFile
].nSize
);
3205 vinfoBlockFile
[nFile
].nSize
+= nAddSize
;
3208 unsigned int nOldChunks
= (pos
.nPos
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
3209 unsigned int nNewChunks
= (vinfoBlockFile
[nFile
].nSize
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
3210 if (nNewChunks
> nOldChunks
) {
3212 fCheckForPruning
= true;
3213 if (CheckDiskSpace(nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
)) {
3214 FILE *file
= OpenBlockFile(pos
);
3216 LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks
* BLOCKFILE_CHUNK_SIZE
, pos
.nFile
);
3217 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
);
3222 return state
.Error("out of disk space");
3226 setDirtyFileInfo
.insert(nFile
);
3230 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
)
3234 LOCK(cs_LastBlockFile
);
3236 unsigned int nNewSize
;
3237 pos
.nPos
= vinfoBlockFile
[nFile
].nUndoSize
;
3238 nNewSize
= vinfoBlockFile
[nFile
].nUndoSize
+= nAddSize
;
3239 setDirtyFileInfo
.insert(nFile
);
3241 unsigned int nOldChunks
= (pos
.nPos
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
3242 unsigned int nNewChunks
= (nNewSize
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
3243 if (nNewChunks
> nOldChunks
) {
3245 fCheckForPruning
= true;
3246 if (CheckDiskSpace(nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
)) {
3247 FILE *file
= OpenUndoFile(pos
);
3249 LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks
* UNDOFILE_CHUNK_SIZE
, pos
.nFile
);
3250 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
);
3255 return state
.Error("out of disk space");
3261 bool CheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, int64_t nAdjustedTime
, bool fCheckPOW
)
3263 // Check proof of work matches claimed amount
3264 if (fCheckPOW
&& !CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
3265 return state
.DoS(50, false, REJECT_INVALID
, "high-hash", false, "proof of work failed");
3268 if (block
.GetBlockTime() > nAdjustedTime
+ 2 * 60 * 60)
3269 return state
.Invalid(false, REJECT_INVALID
, "time-too-new", "block timestamp too far in the future");
3274 bool CheckBlock(const CBlock
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, int64_t nAdjustedTime
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3276 // These are checks that are independent of context.
3281 // Check that the header is valid (particularly PoW). This is mostly
3282 // redundant with the call in AcceptBlockHeader.
3283 if (!CheckBlockHeader(block
, state
, consensusParams
, nAdjustedTime
, fCheckPOW
))
3286 // Check the merkle root.
3287 if (fCheckMerkleRoot
) {
3289 uint256 hashMerkleRoot2
= BlockMerkleRoot(block
, &mutated
);
3290 if (block
.hashMerkleRoot
!= hashMerkleRoot2
)
3291 return state
.DoS(100, false, REJECT_INVALID
, "bad-txnmrklroot", true, "hashMerkleRoot mismatch");
3293 // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3294 // of transactions in a block without affecting the merkle root of a block,
3295 // while still invalidating it.
3297 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-duplicate", true, "duplicate transaction");
3300 // All potential-corruption validation must be done before we do any
3301 // transaction validation, as otherwise we may mark the header as invalid
3302 // because we receive the wrong transactions for it.
3305 if (block
.vtx
.empty() || block
.vtx
.size() > MAX_BLOCK_SIZE
|| ::GetSerializeSize(block
, SER_NETWORK
, PROTOCOL_VERSION
) > MAX_BLOCK_SIZE
)
3306 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-length", false, "size limits failed");
3308 // First transaction must be coinbase, the rest must not be
3309 if (block
.vtx
.empty() || !block
.vtx
[0].IsCoinBase())
3310 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-missing", false, "first tx is not coinbase");
3311 for (unsigned int i
= 1; i
< block
.vtx
.size(); i
++)
3312 if (block
.vtx
[i
].IsCoinBase())
3313 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-multiple", false, "more than one coinbase");
3315 // Check transactions
3316 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
)
3317 if (!CheckTransaction(tx
, state
))
3318 return state
.Invalid(false, state
.GetRejectCode(), state
.GetRejectReason(),
3319 strprintf("Transaction check failed (tx hash %s) %s", tx
.GetHash().ToString(), state
.GetDebugMessage()));
3321 unsigned int nSigOps
= 0;
3322 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
)
3324 nSigOps
+= GetLegacySigOpCount(tx
);
3326 if (nSigOps
> MAX_BLOCK_SIGOPS
)
3327 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
3329 if (fCheckPOW
&& fCheckMerkleRoot
)
3330 block
.fChecked
= true;
3335 static bool CheckIndexAgainstCheckpoint(const CBlockIndex
* pindexPrev
, CValidationState
& state
, const CChainParams
& chainparams
, const uint256
& hash
)
3337 if (*pindexPrev
->phashBlock
== chainparams
.GetConsensus().hashGenesisBlock
)
3340 int nHeight
= pindexPrev
->nHeight
+1;
3341 // Don't accept any forks from the main chain prior to last checkpoint
3342 CBlockIndex
* pcheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
3343 if (pcheckpoint
&& nHeight
< pcheckpoint
->nHeight
)
3344 return state
.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__
, nHeight
));
3349 bool ContextualCheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, CBlockIndex
* const pindexPrev
)
3351 // Check proof of work
3352 if (block
.nBits
!= GetNextWorkRequired(pindexPrev
, &block
, consensusParams
))
3353 return state
.DoS(100, false, REJECT_INVALID
, "bad-diffbits", false, "incorrect proof of work");
3355 // Check timestamp against prev
3356 if (block
.GetBlockTime() <= pindexPrev
->GetMedianTimePast())
3357 return state
.Invalid(false, REJECT_INVALID
, "time-too-old", "block's timestamp is too early");
3359 // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
3360 for (int32_t version
= 2; version
< 5; ++version
) // check for version 2, 3 and 4 upgrades
3361 if (block
.nVersion
< version
&& IsSuperMajority(version
, pindexPrev
, consensusParams
.nMajorityRejectBlockOutdated
, consensusParams
))
3362 return state
.Invalid(false, REJECT_OBSOLETE
, strprintf("bad-version(0x%08x)", version
- 1),
3363 strprintf("rejected nVersion=0x%08x block", version
- 1));
3368 bool ContextualCheckBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* const pindexPrev
)
3370 const int nHeight
= pindexPrev
== NULL
? 0 : pindexPrev
->nHeight
+ 1;
3371 const Consensus::Params
& consensusParams
= Params().GetConsensus();
3373 // Start enforcing BIP113 (Median Time Past) using versionbits logic.
3374 int nLockTimeFlags
= 0;
3375 if (VersionBitsState(pindexPrev
, consensusParams
, Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
3376 nLockTimeFlags
|= LOCKTIME_MEDIAN_TIME_PAST
;
3379 int64_t nLockTimeCutoff
= (nLockTimeFlags
& LOCKTIME_MEDIAN_TIME_PAST
)
3380 ? pindexPrev
->GetMedianTimePast()
3381 : block
.GetBlockTime();
3383 // Check that all transactions are finalized
3384 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
) {
3385 if (!IsFinalTx(tx
, nHeight
, nLockTimeCutoff
)) {
3386 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-nonfinal", false, "non-final transaction");
3390 // Enforce block.nVersion=2 rule that the coinbase starts with serialized block height
3391 // if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet):
3392 if (block
.nVersion
>= 2 && IsSuperMajority(2, pindexPrev
, consensusParams
.nMajorityEnforceBlockUpgrade
, consensusParams
))
3394 CScript expect
= CScript() << nHeight
;
3395 if (block
.vtx
[0].vin
[0].scriptSig
.size() < expect
.size() ||
3396 !std::equal(expect
.begin(), expect
.end(), block
.vtx
[0].vin
[0].scriptSig
.begin())) {
3397 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-height", false, "block height mismatch in coinbase");
3404 static bool AcceptBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
=NULL
)
3406 AssertLockHeld(cs_main
);
3407 // Check for duplicate
3408 uint256 hash
= block
.GetHash();
3409 BlockMap::iterator miSelf
= mapBlockIndex
.find(hash
);
3410 CBlockIndex
*pindex
= NULL
;
3411 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
) {
3413 if (miSelf
!= mapBlockIndex
.end()) {
3414 // Block header is already known.
3415 pindex
= miSelf
->second
;
3418 if (pindex
->nStatus
& BLOCK_FAILED_MASK
)
3419 return state
.Invalid(error("%s: block %s is marked invalid", __func__
, hash
.ToString()), 0, "duplicate");
3423 if (!CheckBlockHeader(block
, state
, chainparams
.GetConsensus(), GetAdjustedTime()))
3424 return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3426 // Get prev block index
3427 CBlockIndex
* pindexPrev
= NULL
;
3428 BlockMap::iterator mi
= mapBlockIndex
.find(block
.hashPrevBlock
);
3429 if (mi
== mapBlockIndex
.end())
3430 return state
.DoS(10, error("%s: prev block not found", __func__
), 0, "bad-prevblk");
3431 pindexPrev
= (*mi
).second
;
3432 if (pindexPrev
->nStatus
& BLOCK_FAILED_MASK
)
3433 return state
.DoS(100, error("%s: prev block invalid", __func__
), REJECT_INVALID
, "bad-prevblk");
3436 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, hash
))
3437 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3439 if (!ContextualCheckBlockHeader(block
, state
, chainparams
.GetConsensus(), pindexPrev
))
3440 return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3443 pindex
= AddToBlockIndex(block
);
3451 /** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
3452 static bool AcceptBlock(const CBlock
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
, bool fRequested
, const CDiskBlockPos
* dbp
)
3454 AssertLockHeld(cs_main
);
3456 CBlockIndex
*pindexDummy
= NULL
;
3457 CBlockIndex
*&pindex
= ppindex
? *ppindex
: pindexDummy
;
3459 if (!AcceptBlockHeader(block
, state
, chainparams
, &pindex
))
3462 // Try to process all requested blocks that we don't have, but only
3463 // process an unrequested block if it's new and has enough work to
3464 // advance our tip, and isn't too many blocks ahead.
3465 bool fAlreadyHave
= pindex
->nStatus
& BLOCK_HAVE_DATA
;
3466 bool fHasMoreWork
= (chainActive
.Tip() ? pindex
->nChainWork
> chainActive
.Tip()->nChainWork
: true);
3467 // Blocks that are too out-of-order needlessly limit the effectiveness of
3468 // pruning, because pruning will not delete block files that contain any
3469 // blocks which are too close in height to the tip. Apply this test
3470 // regardless of whether pruning is enabled; it should generally be safe to
3471 // not process unrequested blocks.
3472 bool fTooFarAhead
= (pindex
->nHeight
> int(chainActive
.Height() + MIN_BLOCKS_TO_KEEP
));
3474 // TODO: deal better with return value and error conditions for duplicate
3475 // and unrequested blocks.
3476 if (fAlreadyHave
) return true;
3477 if (!fRequested
) { // If we didn't ask for it:
3478 if (pindex
->nTx
!= 0) return true; // This is a previously-processed block that was pruned
3479 if (!fHasMoreWork
) return true; // Don't process less-work chains
3480 if (fTooFarAhead
) return true; // Block height is too high
3483 if ((!CheckBlock(block
, state
, chainparams
.GetConsensus(), GetAdjustedTime())) || !ContextualCheckBlock(block
, state
, pindex
->pprev
)) {
3484 if (state
.IsInvalid() && !state
.CorruptionPossible()) {
3485 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
3486 setDirtyBlockIndex
.insert(pindex
);
3488 return error("%s: %s", __func__
, FormatStateMessage(state
));
3491 int nHeight
= pindex
->nHeight
;
3493 // Write block to history file
3495 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
3496 CDiskBlockPos blockPos
;
3499 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, nHeight
, block
.GetBlockTime(), dbp
!= NULL
))
3500 return error("AcceptBlock(): FindBlockPos failed");
3502 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
3503 AbortNode(state
, "Failed to write block");
3504 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
3505 return error("AcceptBlock(): ReceivedBlockTransactions failed");
3506 } catch (const std::runtime_error
& e
) {
3507 return AbortNode(state
, std::string("System error: ") + e
.what());
3510 if (fCheckForPruning
)
3511 FlushStateToDisk(state
, FLUSH_STATE_NONE
); // we just allocated more disk space for block files
3516 static bool IsSuperMajority(int minVersion
, const CBlockIndex
* pstart
, unsigned nRequired
, const Consensus::Params
& consensusParams
)
3518 unsigned int nFound
= 0;
3519 for (int i
= 0; i
< consensusParams
.nMajorityWindow
&& nFound
< nRequired
&& pstart
!= NULL
; i
++)
3521 if (pstart
->nVersion
>= minVersion
)
3523 pstart
= pstart
->pprev
;
3525 return (nFound
>= nRequired
);
3529 bool ProcessNewBlock(CValidationState
& state
, const CChainParams
& chainparams
, const CNode
* pfrom
, const CBlock
* pblock
, bool fForceProcessing
, const CDiskBlockPos
* dbp
)
3533 bool fRequested
= MarkBlockAsReceived(pblock
->GetHash());
3534 fRequested
|= fForceProcessing
;
3537 CBlockIndex
*pindex
= NULL
;
3538 bool ret
= AcceptBlock(*pblock
, state
, chainparams
, &pindex
, fRequested
, dbp
);
3539 if (pindex
&& pfrom
) {
3540 mapBlockSource
[pindex
->GetBlockHash()] = pfrom
->GetId();
3542 CheckBlockIndex(chainparams
.GetConsensus());
3544 return error("%s: AcceptBlock FAILED", __func__
);
3549 if (!ActivateBestChain(state
, chainparams
, pblock
))
3550 return error("%s: ActivateBestChain failed", __func__
);
3555 bool TestBlockValidity(CValidationState
& state
, const CChainParams
& chainparams
, const CBlock
& block
, CBlockIndex
* pindexPrev
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3557 AssertLockHeld(cs_main
);
3558 assert(pindexPrev
&& pindexPrev
== chainActive
.Tip());
3559 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, block
.GetHash()))
3560 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3562 CCoinsViewCache
viewNew(pcoinsTip
);
3563 CBlockIndex
indexDummy(block
);
3564 indexDummy
.pprev
= pindexPrev
;
3565 indexDummy
.nHeight
= pindexPrev
->nHeight
+ 1;
3567 // NOTE: CheckBlockHeader is called by CheckBlock
3568 if (!ContextualCheckBlockHeader(block
, state
, chainparams
.GetConsensus(), pindexPrev
))
3569 return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__
, FormatStateMessage(state
));
3570 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), GetAdjustedTime(), fCheckPOW
, fCheckMerkleRoot
))
3571 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
3572 if (!ContextualCheckBlock(block
, state
, pindexPrev
))
3573 return error("%s: Consensus::ContextualCheckBlock: %s", __func__
, FormatStateMessage(state
));
3574 if (!ConnectBlock(block
, state
, &indexDummy
, viewNew
, chainparams
, true))
3576 assert(state
.IsValid());
3582 * BLOCK PRUNING CODE
3585 /* Calculate the amount of disk space the block & undo files currently use */
3586 uint64_t CalculateCurrentUsage()
3588 uint64_t retval
= 0;
3589 BOOST_FOREACH(const CBlockFileInfo
&file
, vinfoBlockFile
) {
3590 retval
+= file
.nSize
+ file
.nUndoSize
;
3595 /* Prune a block file (modify associated database entries)*/
3596 void PruneOneBlockFile(const int fileNumber
)
3598 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); ++it
) {
3599 CBlockIndex
* pindex
= it
->second
;
3600 if (pindex
->nFile
== fileNumber
) {
3601 pindex
->nStatus
&= ~BLOCK_HAVE_DATA
;
3602 pindex
->nStatus
&= ~BLOCK_HAVE_UNDO
;
3604 pindex
->nDataPos
= 0;
3605 pindex
->nUndoPos
= 0;
3606 setDirtyBlockIndex
.insert(pindex
);
3608 // Prune from mapBlocksUnlinked -- any block we prune would have
3609 // to be downloaded again in order to consider its chain, at which
3610 // point it would be considered as a candidate for
3611 // mapBlocksUnlinked or setBlockIndexCandidates.
3612 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
3613 while (range
.first
!= range
.second
) {
3614 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator it
= range
.first
;
3616 if (it
->second
== pindex
) {
3617 mapBlocksUnlinked
.erase(it
);
3623 vinfoBlockFile
[fileNumber
].SetNull();
3624 setDirtyFileInfo
.insert(fileNumber
);
3628 void UnlinkPrunedFiles(std::set
<int>& setFilesToPrune
)
3630 for (set
<int>::iterator it
= setFilesToPrune
.begin(); it
!= setFilesToPrune
.end(); ++it
) {
3631 CDiskBlockPos
pos(*it
, 0);
3632 boost::filesystem::remove(GetBlockPosFilename(pos
, "blk"));
3633 boost::filesystem::remove(GetBlockPosFilename(pos
, "rev"));
3634 LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__
, *it
);
3638 /* Calculate the block/rev files that should be deleted to remain under target*/
3639 void FindFilesToPrune(std::set
<int>& setFilesToPrune
, uint64_t nPruneAfterHeight
)
3641 LOCK2(cs_main
, cs_LastBlockFile
);
3642 if (chainActive
.Tip() == NULL
|| nPruneTarget
== 0) {
3645 if ((uint64_t)chainActive
.Tip()->nHeight
<= nPruneAfterHeight
) {
3649 unsigned int nLastBlockWeCanPrune
= chainActive
.Tip()->nHeight
- MIN_BLOCKS_TO_KEEP
;
3650 uint64_t nCurrentUsage
= CalculateCurrentUsage();
3651 // We don't check to prune until after we've allocated new space for files
3652 // So we should leave a buffer under our target to account for another allocation
3653 // before the next pruning.
3654 uint64_t nBuffer
= BLOCKFILE_CHUNK_SIZE
+ UNDOFILE_CHUNK_SIZE
;
3655 uint64_t nBytesToPrune
;
3658 if (nCurrentUsage
+ nBuffer
>= nPruneTarget
) {
3659 for (int fileNumber
= 0; fileNumber
< nLastBlockFile
; fileNumber
++) {
3660 nBytesToPrune
= vinfoBlockFile
[fileNumber
].nSize
+ vinfoBlockFile
[fileNumber
].nUndoSize
;
3662 if (vinfoBlockFile
[fileNumber
].nSize
== 0)
3665 if (nCurrentUsage
+ nBuffer
< nPruneTarget
) // are we below our target?
3668 // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
3669 if (vinfoBlockFile
[fileNumber
].nHeightLast
> nLastBlockWeCanPrune
)
3672 PruneOneBlockFile(fileNumber
);
3673 // Queue up the files for removal
3674 setFilesToPrune
.insert(fileNumber
);
3675 nCurrentUsage
-= nBytesToPrune
;
3680 LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
3681 nPruneTarget
/1024/1024, nCurrentUsage
/1024/1024,
3682 ((int64_t)nPruneTarget
- (int64_t)nCurrentUsage
)/1024/1024,
3683 nLastBlockWeCanPrune
, count
);
3686 bool CheckDiskSpace(uint64_t nAdditionalBytes
)
3688 uint64_t nFreeBytesAvailable
= boost::filesystem::space(GetDataDir()).available
;
3690 // Check for nMinDiskSpace bytes (currently 50MB)
3691 if (nFreeBytesAvailable
< nMinDiskSpace
+ nAdditionalBytes
)
3692 return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
3697 FILE* OpenDiskFile(const CDiskBlockPos
&pos
, const char *prefix
, bool fReadOnly
)
3701 boost::filesystem::path path
= GetBlockPosFilename(pos
, prefix
);
3702 boost::filesystem::create_directories(path
.parent_path());
3703 FILE* file
= fopen(path
.string().c_str(), "rb+");
3704 if (!file
&& !fReadOnly
)
3705 file
= fopen(path
.string().c_str(), "wb+");
3707 LogPrintf("Unable to open file %s\n", path
.string());
3711 if (fseek(file
, pos
.nPos
, SEEK_SET
)) {
3712 LogPrintf("Unable to seek to position %u of %s\n", pos
.nPos
, path
.string());
3720 FILE* OpenBlockFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3721 return OpenDiskFile(pos
, "blk", fReadOnly
);
3724 FILE* OpenUndoFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3725 return OpenDiskFile(pos
, "rev", fReadOnly
);
3728 boost::filesystem::path
GetBlockPosFilename(const CDiskBlockPos
&pos
, const char *prefix
)
3730 return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix
, pos
.nFile
);
3733 CBlockIndex
* InsertBlockIndex(uint256 hash
)
3739 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
3740 if (mi
!= mapBlockIndex
.end())
3741 return (*mi
).second
;
3744 CBlockIndex
* pindexNew
= new CBlockIndex();
3746 throw runtime_error("LoadBlockIndex(): new CBlockIndex failed");
3747 mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
3748 pindexNew
->phashBlock
= &((*mi
).first
);
3753 bool static LoadBlockIndexDB()
3755 const CChainParams
& chainparams
= Params();
3756 if (!pblocktree
->LoadBlockIndexGuts(InsertBlockIndex
))
3759 boost::this_thread::interruption_point();
3761 // Calculate nChainWork
3762 vector
<pair
<int, CBlockIndex
*> > vSortedByHeight
;
3763 vSortedByHeight
.reserve(mapBlockIndex
.size());
3764 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
3766 CBlockIndex
* pindex
= item
.second
;
3767 vSortedByHeight
.push_back(make_pair(pindex
->nHeight
, pindex
));
3769 sort(vSortedByHeight
.begin(), vSortedByHeight
.end());
3770 BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex
*)& item
, vSortedByHeight
)
3772 CBlockIndex
* pindex
= item
.second
;
3773 pindex
->nChainWork
= (pindex
->pprev
? pindex
->pprev
->nChainWork
: 0) + GetBlockProof(*pindex
);
3774 // We can link the chain of blocks for which we've received transactions at some point.
3775 // Pruned nodes may have deleted the block.
3776 if (pindex
->nTx
> 0) {
3777 if (pindex
->pprev
) {
3778 if (pindex
->pprev
->nChainTx
) {
3779 pindex
->nChainTx
= pindex
->pprev
->nChainTx
+ pindex
->nTx
;
3781 pindex
->nChainTx
= 0;
3782 mapBlocksUnlinked
.insert(std::make_pair(pindex
->pprev
, pindex
));
3785 pindex
->nChainTx
= pindex
->nTx
;
3788 if (pindex
->IsValid(BLOCK_VALID_TRANSACTIONS
) && (pindex
->nChainTx
|| pindex
->pprev
== NULL
))
3789 setBlockIndexCandidates
.insert(pindex
);
3790 if (pindex
->nStatus
& BLOCK_FAILED_MASK
&& (!pindexBestInvalid
|| pindex
->nChainWork
> pindexBestInvalid
->nChainWork
))
3791 pindexBestInvalid
= pindex
;
3793 pindex
->BuildSkip();
3794 if (pindex
->IsValid(BLOCK_VALID_TREE
) && (pindexBestHeader
== NULL
|| CBlockIndexWorkComparator()(pindexBestHeader
, pindex
)))
3795 pindexBestHeader
= pindex
;
3798 // Load block file info
3799 pblocktree
->ReadLastBlockFile(nLastBlockFile
);
3800 vinfoBlockFile
.resize(nLastBlockFile
+ 1);
3801 LogPrintf("%s: last block file = %i\n", __func__
, nLastBlockFile
);
3802 for (int nFile
= 0; nFile
<= nLastBlockFile
; nFile
++) {
3803 pblocktree
->ReadBlockFileInfo(nFile
, vinfoBlockFile
[nFile
]);
3805 LogPrintf("%s: last block file info: %s\n", __func__
, vinfoBlockFile
[nLastBlockFile
].ToString());
3806 for (int nFile
= nLastBlockFile
+ 1; true; nFile
++) {
3807 CBlockFileInfo info
;
3808 if (pblocktree
->ReadBlockFileInfo(nFile
, info
)) {
3809 vinfoBlockFile
.push_back(info
);
3815 // Check presence of blk files
3816 LogPrintf("Checking all blk files are present...\n");
3817 set
<int> setBlkDataFiles
;
3818 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
3820 CBlockIndex
* pindex
= item
.second
;
3821 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) {
3822 setBlkDataFiles
.insert(pindex
->nFile
);
3825 for (std::set
<int>::iterator it
= setBlkDataFiles
.begin(); it
!= setBlkDataFiles
.end(); it
++)
3827 CDiskBlockPos
pos(*it
, 0);
3828 if (CAutoFile(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
).IsNull()) {
3833 // Check whether we have ever pruned block & undo files
3834 pblocktree
->ReadFlag("prunedblockfiles", fHavePruned
);
3836 LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
3838 // Check whether we need to continue reindexing
3839 bool fReindexing
= false;
3840 pblocktree
->ReadReindexing(fReindexing
);
3841 fReindex
|= fReindexing
;
3843 // Check whether we have a transaction index
3844 pblocktree
->ReadFlag("txindex", fTxIndex
);
3845 LogPrintf("%s: transaction index %s\n", __func__
, fTxIndex
? "enabled" : "disabled");
3847 // Load pointer to end of best chain
3848 BlockMap::iterator it
= mapBlockIndex
.find(pcoinsTip
->GetBestBlock());
3849 if (it
== mapBlockIndex
.end())
3851 chainActive
.SetTip(it
->second
);
3853 PruneBlockIndexCandidates();
3855 LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__
,
3856 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(),
3857 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
3858 Checkpoints::GuessVerificationProgress(chainparams
.Checkpoints(), chainActive
.Tip()));
3863 CVerifyDB::CVerifyDB()
3865 uiInterface
.ShowProgress(_("Verifying blocks..."), 0);
3868 CVerifyDB::~CVerifyDB()
3870 uiInterface
.ShowProgress("", 100);
3873 bool CVerifyDB::VerifyDB(const CChainParams
& chainparams
, CCoinsView
*coinsview
, int nCheckLevel
, int nCheckDepth
)
3876 if (chainActive
.Tip() == NULL
|| chainActive
.Tip()->pprev
== NULL
)
3879 // Verify blocks in the best chain
3880 if (nCheckDepth
<= 0)
3881 nCheckDepth
= 1000000000; // suffices until the year 19000
3882 if (nCheckDepth
> chainActive
.Height())
3883 nCheckDepth
= chainActive
.Height();
3884 nCheckLevel
= std::max(0, std::min(4, nCheckLevel
));
3885 LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth
, nCheckLevel
);
3886 CCoinsViewCache
coins(coinsview
);
3887 CBlockIndex
* pindexState
= chainActive
.Tip();
3888 CBlockIndex
* pindexFailure
= NULL
;
3889 int nGoodTransactions
= 0;
3890 CValidationState state
;
3892 LogPrintf("[0%]...");
3893 for (CBlockIndex
* pindex
= chainActive
.Tip(); pindex
&& pindex
->pprev
; pindex
= pindex
->pprev
)
3895 boost::this_thread::interruption_point();
3896 int percentageDone
= std::max(1, std::min(99, (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* (nCheckLevel
>= 4 ? 50 : 100))));
3897 if (reportDone
< percentageDone
/10) {
3898 // report every 10% step
3899 LogPrintf("[%d%%]...", percentageDone
);
3900 reportDone
= percentageDone
/10;
3902 uiInterface
.ShowProgress(_("Verifying blocks..."), percentageDone
);
3903 if (pindex
->nHeight
< chainActive
.Height()-nCheckDepth
)
3905 if (fPruneMode
&& !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) {
3906 // If pruning, only go back as far as we have data.
3907 LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex
->nHeight
);
3911 // check level 0: read from disk
3912 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
3913 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3914 // check level 1: verify block validity
3915 if (nCheckLevel
>= 1 && !CheckBlock(block
, state
, chainparams
.GetConsensus(), GetAdjustedTime()))
3916 return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__
,
3917 pindex
->nHeight
, pindex
->GetBlockHash().ToString(), FormatStateMessage(state
));
3918 // check level 2: verify undo validity
3919 if (nCheckLevel
>= 2 && pindex
) {
3921 CDiskBlockPos pos
= pindex
->GetUndoPos();
3922 if (!pos
.IsNull()) {
3923 if (!UndoReadFromDisk(undo
, pos
, pindex
->pprev
->GetBlockHash()))
3924 return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3927 // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
3928 if (nCheckLevel
>= 3 && pindex
== pindexState
&& (coins
.DynamicMemoryUsage() + pcoinsTip
->DynamicMemoryUsage()) <= nCoinCacheUsage
) {
3930 if (!DisconnectBlock(block
, state
, pindex
, coins
, &fClean
))
3931 return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3932 pindexState
= pindex
->pprev
;
3934 nGoodTransactions
= 0;
3935 pindexFailure
= pindex
;
3937 nGoodTransactions
+= block
.vtx
.size();
3939 if (ShutdownRequested())
3943 return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive
.Height() - pindexFailure
->nHeight
+ 1, nGoodTransactions
);
3945 // check level 4: try reconnecting blocks
3946 if (nCheckLevel
>= 4) {
3947 CBlockIndex
*pindex
= pindexState
;
3948 while (pindex
!= chainActive
.Tip()) {
3949 boost::this_thread::interruption_point();
3950 uiInterface
.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* 50))));
3951 pindex
= chainActive
.Next(pindex
);
3953 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
3954 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3955 if (!ConnectBlock(block
, state
, pindex
, coins
, chainparams
))
3956 return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3960 LogPrintf("[DONE].\n");
3961 LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive
.Height() - pindexState
->nHeight
, nGoodTransactions
);
3966 void UnloadBlockIndex()
3969 setBlockIndexCandidates
.clear();
3970 chainActive
.SetTip(NULL
);
3971 pindexBestInvalid
= NULL
;
3972 pindexBestHeader
= NULL
;
3974 mapOrphanTransactions
.clear();
3975 mapOrphanTransactionsByPrev
.clear();
3977 mapBlocksUnlinked
.clear();
3978 vinfoBlockFile
.clear();
3980 nBlockSequenceId
= 1;
3981 mapBlockSource
.clear();
3982 mapBlocksInFlight
.clear();
3983 nPreferredDownload
= 0;
3984 setDirtyBlockIndex
.clear();
3985 setDirtyFileInfo
.clear();
3986 mapNodeState
.clear();
3987 recentRejects
.reset(NULL
);
3988 versionbitscache
.Clear();
3989 for (int b
= 0; b
< VERSIONBITS_NUM_BITS
; b
++) {
3990 warningcache
[b
].clear();
3993 BOOST_FOREACH(BlockMap::value_type
& entry
, mapBlockIndex
) {
3994 delete entry
.second
;
3996 mapBlockIndex
.clear();
3997 fHavePruned
= false;
4000 bool LoadBlockIndex()
4002 // Load block index from databases
4003 if (!fReindex
&& !LoadBlockIndexDB())
4008 bool InitBlockIndex(const CChainParams
& chainparams
)
4012 // Initialize global variables that cannot be constructed at startup.
4013 recentRejects
.reset(new CRollingBloomFilter(120000, 0.000001));
4015 // Check whether we're already initialized
4016 if (chainActive
.Genesis() != NULL
)
4019 // Use the provided setting for -txindex in the new database
4020 fTxIndex
= GetBoolArg("-txindex", DEFAULT_TXINDEX
);
4021 pblocktree
->WriteFlag("txindex", fTxIndex
);
4022 LogPrintf("Initializing databases...\n");
4024 // Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
4027 CBlock
&block
= const_cast<CBlock
&>(chainparams
.GenesisBlock());
4028 // Start new block file
4029 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
4030 CDiskBlockPos blockPos
;
4031 CValidationState state
;
4032 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, 0, block
.GetBlockTime()))
4033 return error("LoadBlockIndex(): FindBlockPos failed");
4034 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
4035 return error("LoadBlockIndex(): writing genesis block to disk failed");
4036 CBlockIndex
*pindex
= AddToBlockIndex(block
);
4037 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
4038 return error("LoadBlockIndex(): genesis block not accepted");
4039 if (!ActivateBestChain(state
, chainparams
, &block
))
4040 return error("LoadBlockIndex(): genesis block cannot be activated");
4041 // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
4042 return FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
4043 } catch (const std::runtime_error
& e
) {
4044 return error("LoadBlockIndex(): failed to initialize block database: %s", e
.what());
4051 bool LoadExternalBlockFile(const CChainParams
& chainparams
, FILE* fileIn
, CDiskBlockPos
*dbp
)
4053 // Map of disk positions for blocks with unknown parent (only used for reindex)
4054 static std::multimap
<uint256
, CDiskBlockPos
> mapBlocksUnknownParent
;
4055 int64_t nStart
= GetTimeMillis();
4059 // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4060 CBufferedFile
blkdat(fileIn
, 2*MAX_BLOCK_SIZE
, MAX_BLOCK_SIZE
+8, SER_DISK
, CLIENT_VERSION
);
4061 uint64_t nRewind
= blkdat
.GetPos();
4062 while (!blkdat
.eof()) {
4063 boost::this_thread::interruption_point();
4065 blkdat
.SetPos(nRewind
);
4066 nRewind
++; // start one byte further next time, in case of failure
4067 blkdat
.SetLimit(); // remove former limit
4068 unsigned int nSize
= 0;
4071 unsigned char buf
[MESSAGE_START_SIZE
];
4072 blkdat
.FindByte(chainparams
.MessageStart()[0]);
4073 nRewind
= blkdat
.GetPos()+1;
4074 blkdat
>> FLATDATA(buf
);
4075 if (memcmp(buf
, chainparams
.MessageStart(), MESSAGE_START_SIZE
))
4079 if (nSize
< 80 || nSize
> MAX_BLOCK_SIZE
)
4081 } catch (const std::exception
&) {
4082 // no valid block header found; don't complain
4087 uint64_t nBlockPos
= blkdat
.GetPos();
4089 dbp
->nPos
= nBlockPos
;
4090 blkdat
.SetLimit(nBlockPos
+ nSize
);
4091 blkdat
.SetPos(nBlockPos
);
4094 nRewind
= blkdat
.GetPos();
4096 // detect out of order blocks, and store them for later
4097 uint256 hash
= block
.GetHash();
4098 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
.find(block
.hashPrevBlock
) == mapBlockIndex
.end()) {
4099 LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__
, hash
.ToString(),
4100 block
.hashPrevBlock
.ToString());
4102 mapBlocksUnknownParent
.insert(std::make_pair(block
.hashPrevBlock
, *dbp
));
4106 // process in case the block isn't known yet
4107 if (mapBlockIndex
.count(hash
) == 0 || (mapBlockIndex
[hash
]->nStatus
& BLOCK_HAVE_DATA
) == 0) {
4109 CValidationState state
;
4110 if (AcceptBlock(block
, state
, chainparams
, NULL
, true, dbp
))
4112 if (state
.IsError())
4114 } else if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
[hash
]->nHeight
% 1000 == 0) {
4115 LogPrint("reindex", "Block Import: already had block %s at height %d\n", hash
.ToString(), mapBlockIndex
[hash
]->nHeight
);
4118 // Activate the genesis block so normal node progress can continue
4119 if (hash
== chainparams
.GetConsensus().hashGenesisBlock
) {
4120 CValidationState state
;
4121 if (!ActivateBestChain(state
, chainparams
)) {
4128 // Recursively process earlier encountered successors of this block
4129 deque
<uint256
> queue
;
4130 queue
.push_back(hash
);
4131 while (!queue
.empty()) {
4132 uint256 head
= queue
.front();
4134 std::pair
<std::multimap
<uint256
, CDiskBlockPos
>::iterator
, std::multimap
<uint256
, CDiskBlockPos
>::iterator
> range
= mapBlocksUnknownParent
.equal_range(head
);
4135 while (range
.first
!= range
.second
) {
4136 std::multimap
<uint256
, CDiskBlockPos
>::iterator it
= range
.first
;
4137 if (ReadBlockFromDisk(block
, it
->second
, chainparams
.GetConsensus()))
4139 LogPrint("reindex", "%s: Processing out of order child %s of %s\n", __func__
, block
.GetHash().ToString(),
4142 CValidationState dummy
;
4143 if (AcceptBlock(block
, dummy
, chainparams
, NULL
, true, &it
->second
))
4146 queue
.push_back(block
.GetHash());
4150 mapBlocksUnknownParent
.erase(it
);
4154 } catch (const std::exception
& e
) {
4155 LogPrintf("%s: Deserialize or I/O error - %s\n", __func__
, e
.what());
4158 } catch (const std::runtime_error
& e
) {
4159 AbortNode(std::string("System error: ") + e
.what());
4162 LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded
, GetTimeMillis() - nStart
);
4166 void static CheckBlockIndex(const Consensus::Params
& consensusParams
)
4168 if (!fCheckBlockIndex
) {
4174 // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4175 // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
4176 // iterating the block tree require that chainActive has been initialized.)
4177 if (chainActive
.Height() < 0) {
4178 assert(mapBlockIndex
.size() <= 1);
4182 // Build forward-pointing map of the entire block tree.
4183 std::multimap
<CBlockIndex
*,CBlockIndex
*> forward
;
4184 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); it
++) {
4185 forward
.insert(std::make_pair(it
->second
->pprev
, it
->second
));
4188 assert(forward
.size() == mapBlockIndex
.size());
4190 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeGenesis
= forward
.equal_range(NULL
);
4191 CBlockIndex
*pindex
= rangeGenesis
.first
->second
;
4192 rangeGenesis
.first
++;
4193 assert(rangeGenesis
.first
== rangeGenesis
.second
); // There is only one index entry with parent NULL.
4195 // Iterate over the entire block tree, using depth-first search.
4196 // Along the way, remember whether there are blocks on the path from genesis
4197 // block being explored which are the first to have certain properties.
4200 CBlockIndex
* pindexFirstInvalid
= NULL
; // Oldest ancestor of pindex which is invalid.
4201 CBlockIndex
* pindexFirstMissing
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4202 CBlockIndex
* pindexFirstNeverProcessed
= NULL
; // Oldest ancestor of pindex for which nTx == 0.
4203 CBlockIndex
* pindexFirstNotTreeValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4204 CBlockIndex
* pindexFirstNotTransactionsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4205 CBlockIndex
* pindexFirstNotChainValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4206 CBlockIndex
* pindexFirstNotScriptsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4207 while (pindex
!= NULL
) {
4209 if (pindexFirstInvalid
== NULL
&& pindex
->nStatus
& BLOCK_FAILED_VALID
) pindexFirstInvalid
= pindex
;
4210 if (pindexFirstMissing
== NULL
&& !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) pindexFirstMissing
= pindex
;
4211 if (pindexFirstNeverProcessed
== NULL
&& pindex
->nTx
== 0) pindexFirstNeverProcessed
= pindex
;
4212 if (pindex
->pprev
!= NULL
&& pindexFirstNotTreeValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TREE
) pindexFirstNotTreeValid
= pindex
;
4213 if (pindex
->pprev
!= NULL
&& pindexFirstNotTransactionsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TRANSACTIONS
) pindexFirstNotTransactionsValid
= pindex
;
4214 if (pindex
->pprev
!= NULL
&& pindexFirstNotChainValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_CHAIN
) pindexFirstNotChainValid
= pindex
;
4215 if (pindex
->pprev
!= NULL
&& pindexFirstNotScriptsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_SCRIPTS
) pindexFirstNotScriptsValid
= pindex
;
4217 // Begin: actual consistency checks.
4218 if (pindex
->pprev
== NULL
) {
4219 // Genesis block checks.
4220 assert(pindex
->GetBlockHash() == consensusParams
.hashGenesisBlock
); // Genesis block's hash must match.
4221 assert(pindex
== chainActive
.Genesis()); // The current active chain's genesis block must be this block.
4223 if (pindex
->nChainTx
== 0) assert(pindex
->nSequenceId
== 0); // nSequenceId can't be set for blocks that aren't linked
4224 // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4225 // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4227 // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4228 assert(!(pindex
->nStatus
& BLOCK_HAVE_DATA
) == (pindex
->nTx
== 0));
4229 assert(pindexFirstMissing
== pindexFirstNeverProcessed
);
4231 // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4232 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) assert(pindex
->nTx
> 0);
4234 if (pindex
->nStatus
& BLOCK_HAVE_UNDO
) assert(pindex
->nStatus
& BLOCK_HAVE_DATA
);
4235 assert(((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TRANSACTIONS
) == (pindex
->nTx
> 0)); // This is pruning-independent.
4236 // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
4237 assert((pindexFirstNeverProcessed
!= NULL
) == (pindex
->nChainTx
== 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
4238 assert((pindexFirstNotTransactionsValid
!= NULL
) == (pindex
->nChainTx
== 0));
4239 assert(pindex
->nHeight
== nHeight
); // nHeight must be consistent.
4240 assert(pindex
->pprev
== NULL
|| pindex
->nChainWork
>= pindex
->pprev
->nChainWork
); // For every block except the genesis block, the chainwork must be larger than the parent's.
4241 assert(nHeight
< 2 || (pindex
->pskip
&& (pindex
->pskip
->nHeight
< nHeight
))); // The pskip pointer must point back for all but the first 2 blocks.
4242 assert(pindexFirstNotTreeValid
== NULL
); // All mapBlockIndex entries must at least be TREE valid
4243 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TREE
) assert(pindexFirstNotTreeValid
== NULL
); // TREE valid implies all parents are TREE valid
4244 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_CHAIN
) assert(pindexFirstNotChainValid
== NULL
); // CHAIN valid implies all parents are CHAIN valid
4245 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_SCRIPTS
) assert(pindexFirstNotScriptsValid
== NULL
); // SCRIPTS valid implies all parents are SCRIPTS valid
4246 if (pindexFirstInvalid
== NULL
) {
4247 // Checks for not-invalid blocks.
4248 assert((pindex
->nStatus
& BLOCK_FAILED_MASK
) == 0); // The failed mask cannot be set for blocks without invalid parents.
4250 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && pindexFirstNeverProcessed
== NULL
) {
4251 if (pindexFirstInvalid
== NULL
) {
4252 // If this block sorts at least as good as the current tip and
4253 // is valid and we have all data for its parents, it must be in
4254 // setBlockIndexCandidates. chainActive.Tip() must also be there
4255 // even if some data has been pruned.
4256 if (pindexFirstMissing
== NULL
|| pindex
== chainActive
.Tip()) {
4257 assert(setBlockIndexCandidates
.count(pindex
));
4259 // If some parent is missing, then it could be that this block was in
4260 // setBlockIndexCandidates but had to be removed because of the missing data.
4261 // In this case it must be in mapBlocksUnlinked -- see test below.
4263 } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4264 assert(setBlockIndexCandidates
.count(pindex
) == 0);
4266 // Check whether this block is in mapBlocksUnlinked.
4267 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeUnlinked
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
4268 bool foundInUnlinked
= false;
4269 while (rangeUnlinked
.first
!= rangeUnlinked
.second
) {
4270 assert(rangeUnlinked
.first
->first
== pindex
->pprev
);
4271 if (rangeUnlinked
.first
->second
== pindex
) {
4272 foundInUnlinked
= true;
4275 rangeUnlinked
.first
++;
4277 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
!= NULL
&& pindexFirstInvalid
== NULL
) {
4278 // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
4279 assert(foundInUnlinked
);
4281 if (!(pindex
->nStatus
& BLOCK_HAVE_DATA
)) assert(!foundInUnlinked
); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
4282 if (pindexFirstMissing
== NULL
) assert(!foundInUnlinked
); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
4283 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
== NULL
&& pindexFirstMissing
!= NULL
) {
4284 // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4285 assert(fHavePruned
); // We must have pruned.
4286 // This block may have entered mapBlocksUnlinked if:
4287 // - it has a descendant that at some point had more work than the
4289 // - we tried switching to that descendant but were missing
4290 // data for some intermediate block between chainActive and the
4292 // So if this block is itself better than chainActive.Tip() and it wasn't in
4293 // setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
4294 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && setBlockIndexCandidates
.count(pindex
) == 0) {
4295 if (pindexFirstInvalid
== NULL
) {
4296 assert(foundInUnlinked
);
4300 // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4301 // End: actual consistency checks.
4303 // Try descending into the first subnode.
4304 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> range
= forward
.equal_range(pindex
);
4305 if (range
.first
!= range
.second
) {
4306 // A subnode was found.
4307 pindex
= range
.first
->second
;
4311 // This is a leaf node.
4312 // Move upwards until we reach a node of which we have not yet visited the last child.
4314 // We are going to either move to a parent or a sibling of pindex.
4315 // If pindex was the first with a certain property, unset the corresponding variable.
4316 if (pindex
== pindexFirstInvalid
) pindexFirstInvalid
= NULL
;
4317 if (pindex
== pindexFirstMissing
) pindexFirstMissing
= NULL
;
4318 if (pindex
== pindexFirstNeverProcessed
) pindexFirstNeverProcessed
= NULL
;
4319 if (pindex
== pindexFirstNotTreeValid
) pindexFirstNotTreeValid
= NULL
;
4320 if (pindex
== pindexFirstNotTransactionsValid
) pindexFirstNotTransactionsValid
= NULL
;
4321 if (pindex
== pindexFirstNotChainValid
) pindexFirstNotChainValid
= NULL
;
4322 if (pindex
== pindexFirstNotScriptsValid
) pindexFirstNotScriptsValid
= NULL
;
4324 CBlockIndex
* pindexPar
= pindex
->pprev
;
4325 // Find which child we just visited.
4326 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangePar
= forward
.equal_range(pindexPar
);
4327 while (rangePar
.first
->second
!= pindex
) {
4328 assert(rangePar
.first
!= rangePar
.second
); // Our parent must have at least the node we're coming from as child.
4331 // Proceed to the next one.
4333 if (rangePar
.first
!= rangePar
.second
) {
4334 // Move to the sibling.
4335 pindex
= rangePar
.first
->second
;
4346 // Check that we actually traversed the entire map.
4347 assert(nNodes
== forward
.size());
4350 std::string
GetWarnings(const std::string
& strFor
)
4352 string strStatusBar
;
4356 if (!CLIENT_VERSION_IS_RELEASE
) {
4357 strStatusBar
= "This is a pre-release test build - use at your own risk - do not use for mining or merchant applications";
4358 strGUI
= _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
4361 if (GetBoolArg("-testsafemode", DEFAULT_TESTSAFEMODE
))
4362 strStatusBar
= strRPC
= strGUI
= "testsafemode enabled";
4364 // Misc warnings like out of disk space and clock is wrong
4365 if (strMiscWarning
!= "")
4367 strStatusBar
= strGUI
= strMiscWarning
;
4370 if (fLargeWorkForkFound
)
4372 strStatusBar
= strRPC
= "Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.";
4373 strGUI
= _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
4375 else if (fLargeWorkInvalidChainFound
)
4377 strStatusBar
= strRPC
= "Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.";
4378 strGUI
= _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
4381 if (strFor
== "gui")
4383 else if (strFor
== "statusbar")
4384 return strStatusBar
;
4385 else if (strFor
== "rpc")
4387 assert(!"GetWarnings(): invalid parameter");
4398 //////////////////////////////////////////////////////////////////////////////
4404 bool static AlreadyHave(const CInv
& inv
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
4410 assert(recentRejects
);
4411 if (chainActive
.Tip()->GetBlockHash() != hashRecentRejectsChainTip
)
4413 // If the chain tip has changed previously rejected transactions
4414 // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
4415 // or a double-spend. Reset the rejects filter and give those
4416 // txs a second chance.
4417 hashRecentRejectsChainTip
= chainActive
.Tip()->GetBlockHash();
4418 recentRejects
->reset();
4421 // Use pcoinsTip->HaveCoinsInCache as a quick approximation to exclude
4422 // requesting or processing some txs which have already been included in a block
4423 return recentRejects
->contains(inv
.hash
) ||
4424 mempool
.exists(inv
.hash
) ||
4425 mapOrphanTransactions
.count(inv
.hash
) ||
4426 pcoinsTip
->HaveCoinsInCache(inv
.hash
);
4429 return mapBlockIndex
.count(inv
.hash
);
4431 // Don't know what it is, just say we already got one
4435 void static ProcessGetData(CNode
* pfrom
, const Consensus::Params
& consensusParams
)
4437 std::deque
<CInv
>::iterator it
= pfrom
->vRecvGetData
.begin();
4439 vector
<CInv
> vNotFound
;
4443 while (it
!= pfrom
->vRecvGetData
.end()) {
4444 // Don't bother if send buffer is too full to respond anyway
4445 if (pfrom
->nSendSize
>= SendBufferSize())
4448 const CInv
&inv
= *it
;
4450 boost::this_thread::interruption_point();
4453 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
)
4456 BlockMap::iterator mi
= mapBlockIndex
.find(inv
.hash
);
4457 if (mi
!= mapBlockIndex
.end())
4459 if (chainActive
.Contains(mi
->second
)) {
4462 static const int nOneMonth
= 30 * 24 * 60 * 60;
4463 // To prevent fingerprinting attacks, only send blocks outside of the active
4464 // chain if they are valid, and no more than a month older (both in time, and in
4465 // best equivalent proof of work) than the best header chain we know about.
4466 send
= mi
->second
->IsValid(BLOCK_VALID_SCRIPTS
) && (pindexBestHeader
!= NULL
) &&
4467 (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() < nOneMonth
) &&
4468 (GetBlockProofEquivalentTime(*pindexBestHeader
, *mi
->second
, *pindexBestHeader
, consensusParams
) < nOneMonth
);
4470 LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__
, pfrom
->GetId());
4474 // disconnect node in case we have reached the outbound limit for serving historical blocks
4475 // never disconnect whitelisted nodes
4476 static const int nOneWeek
= 7 * 24 * 60 * 60; // assume > 1 week = historical
4477 if (send
&& CNode::OutboundTargetReached(true) && ( ((pindexBestHeader
!= NULL
) && (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() > nOneWeek
)) || inv
.type
== MSG_FILTERED_BLOCK
) && !pfrom
->fWhitelisted
)
4479 LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom
->GetId());
4482 pfrom
->fDisconnect
= true;
4485 // Pruned nodes may have deleted the block, so check whether
4486 // it's available before trying to send.
4487 if (send
&& (mi
->second
->nStatus
& BLOCK_HAVE_DATA
))
4489 // Send block from disk
4491 if (!ReadBlockFromDisk(block
, (*mi
).second
, consensusParams
))
4492 assert(!"cannot load block from disk");
4493 if (inv
.type
== MSG_BLOCK
)
4494 pfrom
->PushMessage(NetMsgType::BLOCK
, block
);
4495 else // MSG_FILTERED_BLOCK)
4497 LOCK(pfrom
->cs_filter
);
4500 CMerkleBlock
merkleBlock(block
, *pfrom
->pfilter
);
4501 pfrom
->PushMessage(NetMsgType::MERKLEBLOCK
, merkleBlock
);
4502 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
4503 // This avoids hurting performance by pointlessly requiring a round-trip
4504 // Note that there is currently no way for a node to request any single transactions we didn't send here -
4505 // they must either disconnect and retry or request the full block.
4506 // Thus, the protocol spec specified allows for us to provide duplicate txn here,
4507 // however we MUST always provide at least what the remote peer needs
4508 typedef std::pair
<unsigned int, uint256
> PairType
;
4509 BOOST_FOREACH(PairType
& pair
, merkleBlock
.vMatchedTxn
)
4510 pfrom
->PushMessage(NetMsgType::TX
, block
.vtx
[pair
.first
]);
4516 // Trigger the peer node to send a getblocks request for the next batch of inventory
4517 if (inv
.hash
== pfrom
->hashContinue
)
4519 // Bypass PushInventory, this must send even if redundant,
4520 // and we want it right after the last block so they don't
4521 // wait for other stuff first.
4523 vInv
.push_back(CInv(MSG_BLOCK
, chainActive
.Tip()->GetBlockHash()));
4524 pfrom
->PushMessage(NetMsgType::INV
, vInv
);
4525 pfrom
->hashContinue
.SetNull();
4529 else if (inv
.type
== MSG_TX
)
4531 // Send stream from relay memory
4533 auto mi
= mapRelay
.find(inv
.hash
);
4534 if (mi
!= mapRelay
.end()) {
4535 pfrom
->PushMessage(NetMsgType::TX
, *mi
->second
);
4537 } else if (pfrom
->timeLastMempoolReq
) {
4538 auto txinfo
= mempool
.info(inv
.hash
);
4539 // To protect privacy, do not answer getdata using the mempool when
4540 // that TX couldn't have been INVed in reply to a MEMPOOL request.
4541 if (txinfo
.tx
&& txinfo
.nTime
<= pfrom
->timeLastMempoolReq
) {
4542 pfrom
->PushMessage(NetMsgType::TX
, *txinfo
.tx
);
4547 vNotFound
.push_back(inv
);
4551 // Track requests for our stuff.
4552 GetMainSignals().Inventory(inv
.hash
);
4554 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
)
4559 pfrom
->vRecvGetData
.erase(pfrom
->vRecvGetData
.begin(), it
);
4561 if (!vNotFound
.empty()) {
4562 // Let the peer know that we didn't find what it asked for, so it doesn't
4563 // have to wait around forever. Currently only SPV clients actually care
4564 // about this message: it's needed when they are recursively walking the
4565 // dependencies of relevant unconfirmed transactions. SPV clients want to
4566 // do that because they want to know about (and store and rebroadcast and
4567 // risk analyze) the dependencies of transactions relevant to them, without
4568 // having to download the entire memory pool.
4569 pfrom
->PushMessage(NetMsgType::NOTFOUND
, vNotFound
);
4573 bool static ProcessMessage(CNode
* pfrom
, string strCommand
, CDataStream
& vRecv
, int64_t nTimeReceived
, const CChainParams
& chainparams
)
4575 LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand
), vRecv
.size(), pfrom
->id
);
4576 if (mapArgs
.count("-dropmessagestest") && GetRand(atoi(mapArgs
["-dropmessagestest"])) == 0)
4578 LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
4583 if (!(nLocalServices
& NODE_BLOOM
) &&
4584 (strCommand
== NetMsgType::FILTERLOAD
||
4585 strCommand
== NetMsgType::FILTERADD
||
4586 strCommand
== NetMsgType::FILTERCLEAR
))
4588 if (pfrom
->nVersion
>= NO_BLOOM_VERSION
) {
4590 Misbehaving(pfrom
->GetId(), 100);
4593 pfrom
->fDisconnect
= true;
4599 if (strCommand
== NetMsgType::VERSION
)
4601 // Each connection can only send one version message
4602 if (pfrom
->nVersion
!= 0)
4604 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_DUPLICATE
, string("Duplicate version message"));
4606 Misbehaving(pfrom
->GetId(), 1);
4613 uint64_t nNonce
= 1;
4614 uint64_t nServiceInt
;
4615 vRecv
>> pfrom
->nVersion
>> nServiceInt
>> nTime
>> addrMe
;
4616 pfrom
->nServices
= ServiceFlags(nServiceInt
);
4617 if (!pfrom
->fInbound
)
4619 addrman
.SetServices(pfrom
->addr
, pfrom
->nServices
);
4621 if (pfrom
->nServicesExpected
& ~pfrom
->nServices
)
4623 LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom
->id
, pfrom
->nServices
, pfrom
->nServicesExpected
);
4624 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_NONSTANDARD
,
4625 strprintf("Expected to offer services %08x", pfrom
->nServicesExpected
));
4626 pfrom
->fDisconnect
= true;
4630 if (pfrom
->nVersion
< MIN_PEER_PROTO_VERSION
)
4632 // disconnect from peers older than this proto version
4633 LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom
->id
, pfrom
->nVersion
);
4634 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_OBSOLETE
,
4635 strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION
));
4636 pfrom
->fDisconnect
= true;
4640 if (pfrom
->nVersion
== 10300)
4641 pfrom
->nVersion
= 300;
4643 vRecv
>> addrFrom
>> nNonce
;
4644 if (!vRecv
.empty()) {
4645 vRecv
>> LIMITED_STRING(pfrom
->strSubVer
, MAX_SUBVERSION_LENGTH
);
4646 pfrom
->cleanSubVer
= SanitizeString(pfrom
->strSubVer
);
4648 if (!vRecv
.empty()) {
4649 vRecv
>> pfrom
->nStartingHeight
;
4652 LOCK(pfrom
->cs_filter
);
4654 vRecv
>> pfrom
->fRelayTxes
; // set to true after we get the first filter* message
4656 pfrom
->fRelayTxes
= true;
4659 // Disconnect if we connected to ourself
4660 if (nNonce
== nLocalHostNonce
&& nNonce
> 1)
4662 LogPrintf("connected to self at %s, disconnecting\n", pfrom
->addr
.ToString());
4663 pfrom
->fDisconnect
= true;
4667 pfrom
->addrLocal
= addrMe
;
4668 if (pfrom
->fInbound
&& addrMe
.IsRoutable())
4673 // Be shy and don't send version until we hear
4674 if (pfrom
->fInbound
)
4675 pfrom
->PushVersion();
4677 pfrom
->fClient
= !(pfrom
->nServices
& NODE_NETWORK
);
4679 // Potentially mark this peer as a preferred download peer.
4682 UpdatePreferredDownload(pfrom
, State(pfrom
->GetId()));
4686 pfrom
->PushMessage(NetMsgType::VERACK
);
4687 pfrom
->ssSend
.SetVersion(min(pfrom
->nVersion
, PROTOCOL_VERSION
));
4689 if (!pfrom
->fInbound
)
4691 // Advertise our address
4692 if (fListen
&& !IsInitialBlockDownload())
4694 CAddress addr
= GetLocalAddress(&pfrom
->addr
);
4695 if (addr
.IsRoutable())
4697 LogPrintf("ProcessMessages: advertising address %s\n", addr
.ToString());
4698 pfrom
->PushAddress(addr
);
4699 } else if (IsPeerAddrLocalGood(pfrom
)) {
4700 addr
.SetIP(pfrom
->addrLocal
);
4701 LogPrintf("ProcessMessages: advertising address %s\n", addr
.ToString());
4702 pfrom
->PushAddress(addr
);
4706 // Get recent addresses
4707 if (pfrom
->fOneShot
|| pfrom
->nVersion
>= CADDR_TIME_VERSION
|| addrman
.size() < 1000)
4709 pfrom
->PushMessage(NetMsgType::GETADDR
);
4710 pfrom
->fGetAddr
= true;
4712 addrman
.Good(pfrom
->addr
);
4714 if (((CNetAddr
)pfrom
->addr
) == (CNetAddr
)addrFrom
)
4716 addrman
.Add(addrFrom
, addrFrom
);
4717 addrman
.Good(addrFrom
);
4721 pfrom
->fSuccessfullyConnected
= true;
4725 remoteAddr
= ", peeraddr=" + pfrom
->addr
.ToString();
4727 LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
4728 pfrom
->cleanSubVer
, pfrom
->nVersion
,
4729 pfrom
->nStartingHeight
, addrMe
.ToString(), pfrom
->id
,
4732 int64_t nTimeOffset
= nTime
- GetTime();
4733 pfrom
->nTimeOffset
= nTimeOffset
;
4734 AddTimeData(pfrom
->addr
, nTimeOffset
);
4738 else if (pfrom
->nVersion
== 0)
4740 // Must have a version message before anything else
4742 Misbehaving(pfrom
->GetId(), 1);
4747 else if (strCommand
== NetMsgType::VERACK
)
4749 pfrom
->SetRecvVersion(min(pfrom
->nVersion
, PROTOCOL_VERSION
));
4751 // Mark this node as currently connected, so we update its timestamp later.
4752 if (pfrom
->fNetworkNode
) {
4754 State(pfrom
->GetId())->fCurrentlyConnected
= true;
4757 if (pfrom
->nVersion
>= SENDHEADERS_VERSION
) {
4758 // Tell our peer we prefer to receive headers rather than inv's
4759 // We send this to non-NODE NETWORK peers as well, because even
4760 // non-NODE NETWORK peers can announce blocks (such as pruning
4762 pfrom
->PushMessage(NetMsgType::SENDHEADERS
);
4767 else if (strCommand
== NetMsgType::ADDR
)
4769 vector
<CAddress
> vAddr
;
4772 // Don't want addr from older versions unless seeding
4773 if (pfrom
->nVersion
< CADDR_TIME_VERSION
&& addrman
.size() > 1000)
4775 if (vAddr
.size() > 1000)
4778 Misbehaving(pfrom
->GetId(), 20);
4779 return error("message addr size() = %u", vAddr
.size());
4782 // Store the new addresses
4783 vector
<CAddress
> vAddrOk
;
4784 int64_t nNow
= GetAdjustedTime();
4785 int64_t nSince
= nNow
- 10 * 60;
4786 BOOST_FOREACH(CAddress
& addr
, vAddr
)
4788 boost::this_thread::interruption_point();
4790 if (!(addr
.nServices
& NODE_NETWORK
))
4793 if (addr
.nTime
<= 100000000 || addr
.nTime
> nNow
+ 10 * 60)
4794 addr
.nTime
= nNow
- 5 * 24 * 60 * 60;
4795 pfrom
->AddAddressKnown(addr
);
4796 bool fReachable
= IsReachable(addr
);
4797 if (addr
.nTime
> nSince
&& !pfrom
->fGetAddr
&& vAddr
.size() <= 10 && addr
.IsRoutable())
4799 // Relay to a limited number of other nodes
4802 // Use deterministic randomness to send to the same nodes for 24 hours
4803 // at a time so the addrKnowns of the chosen nodes prevent repeats
4804 static const uint64_t salt0
= GetRand(std::numeric_limits
<uint64_t>::max());
4805 static const uint64_t salt1
= GetRand(std::numeric_limits
<uint64_t>::max());
4806 uint64_t hashAddr
= addr
.GetHash();
4807 multimap
<uint64_t, CNode
*> mapMix
;
4808 const CSipHasher hasher
= CSipHasher(salt0
, salt1
).Write(hashAddr
<< 32).Write((GetTime() + hashAddr
) / (24*60*60));
4809 BOOST_FOREACH(CNode
* pnode
, vNodes
)
4811 if (pnode
->nVersion
< CADDR_TIME_VERSION
)
4813 uint64_t hashKey
= CSipHasher(hasher
).Write(pnode
->id
).Finalize();
4814 mapMix
.insert(make_pair(hashKey
, pnode
));
4816 int nRelayNodes
= fReachable
? 2 : 1; // limited relaying of addresses outside our network(s)
4817 for (multimap
<uint64_t, CNode
*>::iterator mi
= mapMix
.begin(); mi
!= mapMix
.end() && nRelayNodes
-- > 0; ++mi
)
4818 ((*mi
).second
)->PushAddress(addr
);
4821 // Do not store addresses outside our network
4823 vAddrOk
.push_back(addr
);
4825 addrman
.Add(vAddrOk
, pfrom
->addr
, 2 * 60 * 60);
4826 if (vAddr
.size() < 1000)
4827 pfrom
->fGetAddr
= false;
4828 if (pfrom
->fOneShot
)
4829 pfrom
->fDisconnect
= true;
4832 else if (strCommand
== NetMsgType::SENDHEADERS
)
4835 State(pfrom
->GetId())->fPreferHeaders
= true;
4839 else if (strCommand
== NetMsgType::INV
)
4843 if (vInv
.size() > MAX_INV_SZ
)
4846 Misbehaving(pfrom
->GetId(), 20);
4847 return error("message inv size() = %u", vInv
.size());
4850 bool fBlocksOnly
= !fRelayTxes
;
4852 // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
4853 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY
))
4854 fBlocksOnly
= false;
4858 std::vector
<CInv
> vToFetch
;
4860 for (unsigned int nInv
= 0; nInv
< vInv
.size(); nInv
++)
4862 const CInv
&inv
= vInv
[nInv
];
4864 boost::this_thread::interruption_point();
4866 bool fAlreadyHave
= AlreadyHave(inv
);
4867 LogPrint("net", "got inv: %s %s peer=%d\n", inv
.ToString(), fAlreadyHave
? "have" : "new", pfrom
->id
);
4869 if (inv
.type
== MSG_BLOCK
) {
4870 UpdateBlockAvailability(pfrom
->GetId(), inv
.hash
);
4871 if (!fAlreadyHave
&& !fImporting
&& !fReindex
&& !mapBlocksInFlight
.count(inv
.hash
)) {
4872 // First request the headers preceding the announced block. In the normal fully-synced
4873 // case where a new block is announced that succeeds the current tip (no reorganization),
4874 // there are no such headers.
4875 // Secondly, and only when we are close to being synced, we request the announced block directly,
4876 // to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
4877 // time the block arrives, the header chain leading up to it is already validated. Not
4878 // doing this will result in the received block being rejected as an orphan in case it is
4879 // not a direct successor.
4880 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexBestHeader
), inv
.hash
);
4881 CNodeState
*nodestate
= State(pfrom
->GetId());
4882 if (CanDirectFetch(chainparams
.GetConsensus()) &&
4883 nodestate
->nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
4884 vToFetch
.push_back(inv
);
4885 // Mark block as in flight already, even though the actual "getdata" message only goes out
4886 // later (within the same cs_main lock, though).
4887 MarkBlockAsInFlight(pfrom
->GetId(), inv
.hash
, chainparams
.GetConsensus());
4889 LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader
->nHeight
, inv
.hash
.ToString(), pfrom
->id
);
4894 pfrom
->AddInventoryKnown(inv
);
4896 LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv
.hash
.ToString(), pfrom
->id
);
4897 else if (!fAlreadyHave
&& !fImporting
&& !fReindex
&& !IsInitialBlockDownload())
4901 // Track requests for our stuff
4902 GetMainSignals().Inventory(inv
.hash
);
4904 if (pfrom
->nSendSize
> (SendBufferSize() * 2)) {
4905 Misbehaving(pfrom
->GetId(), 50);
4906 return error("send buffer size() = %u", pfrom
->nSendSize
);
4910 if (!vToFetch
.empty())
4911 pfrom
->PushMessage(NetMsgType::GETDATA
, vToFetch
);
4915 else if (strCommand
== NetMsgType::GETDATA
)
4919 if (vInv
.size() > MAX_INV_SZ
)
4922 Misbehaving(pfrom
->GetId(), 20);
4923 return error("message getdata size() = %u", vInv
.size());
4926 if (fDebug
|| (vInv
.size() != 1))
4927 LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv
.size(), pfrom
->id
);
4929 if ((fDebug
&& vInv
.size() > 0) || (vInv
.size() == 1))
4930 LogPrint("net", "received getdata for: %s peer=%d\n", vInv
[0].ToString(), pfrom
->id
);
4932 pfrom
->vRecvGetData
.insert(pfrom
->vRecvGetData
.end(), vInv
.begin(), vInv
.end());
4933 ProcessGetData(pfrom
, chainparams
.GetConsensus());
4937 else if (strCommand
== NetMsgType::GETBLOCKS
)
4939 CBlockLocator locator
;
4941 vRecv
>> locator
>> hashStop
;
4945 // Find the last block the caller has in the main chain
4946 CBlockIndex
* pindex
= FindForkInGlobalIndex(chainActive
, locator
);
4948 // Send the rest of the chain
4950 pindex
= chainActive
.Next(pindex
);
4952 LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.IsNull() ? "end" : hashStop
.ToString(), nLimit
, pfrom
->id
);
4953 for (; pindex
; pindex
= chainActive
.Next(pindex
))
4955 if (pindex
->GetBlockHash() == hashStop
)
4957 LogPrint("net", " getblocks stopping at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4960 // If pruning, don't inv blocks unless we have on disk and are likely to still have
4961 // for some reasonable time window (1 hour) that block relay might require.
4962 const int nPrunedBlocksLikelyToHave
= MIN_BLOCKS_TO_KEEP
- 3600 / chainparams
.GetConsensus().nPowTargetSpacing
;
4963 if (fPruneMode
&& (!(pindex
->nStatus
& BLOCK_HAVE_DATA
) || pindex
->nHeight
<= chainActive
.Tip()->nHeight
- nPrunedBlocksLikelyToHave
))
4965 LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4968 pfrom
->PushInventory(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
4971 // When this block is requested, we'll send an inv that'll
4972 // trigger the peer to getblocks the next batch of inventory.
4973 LogPrint("net", " getblocks stopping at limit %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4974 pfrom
->hashContinue
= pindex
->GetBlockHash();
4981 else if (strCommand
== NetMsgType::GETHEADERS
)
4983 CBlockLocator locator
;
4985 vRecv
>> locator
>> hashStop
;
4988 if (IsInitialBlockDownload() && !pfrom
->fWhitelisted
) {
4989 LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom
->id
);
4993 CNodeState
*nodestate
= State(pfrom
->GetId());
4994 CBlockIndex
* pindex
= NULL
;
4995 if (locator
.IsNull())
4997 // If locator is null, return the hashStop block
4998 BlockMap::iterator mi
= mapBlockIndex
.find(hashStop
);
4999 if (mi
== mapBlockIndex
.end())
5001 pindex
= (*mi
).second
;
5005 // Find the last block the caller has in the main chain
5006 pindex
= FindForkInGlobalIndex(chainActive
, locator
);
5008 pindex
= chainActive
.Next(pindex
);
5011 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
5012 vector
<CBlock
> vHeaders
;
5013 int nLimit
= MAX_HEADERS_RESULTS
;
5014 LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.ToString(), pfrom
->id
);
5015 for (; pindex
; pindex
= chainActive
.Next(pindex
))
5017 vHeaders
.push_back(pindex
->GetBlockHeader());
5018 if (--nLimit
<= 0 || pindex
->GetBlockHash() == hashStop
)
5021 // pindex can be NULL either if we sent chainActive.Tip() OR
5022 // if our peer has chainActive.Tip() (and thus we are sending an empty
5023 // headers message). In both cases it's safe to update
5024 // pindexBestHeaderSent to be our tip.
5025 nodestate
->pindexBestHeaderSent
= pindex
? pindex
: chainActive
.Tip();
5026 pfrom
->PushMessage(NetMsgType::HEADERS
, vHeaders
);
5030 else if (strCommand
== NetMsgType::TX
)
5032 // Stop processing the transaction early if
5033 // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
5034 if (!fRelayTxes
&& (!pfrom
->fWhitelisted
|| !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY
)))
5036 LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom
->id
);
5040 vector
<uint256
> vWorkQueue
;
5041 vector
<uint256
> vEraseQueue
;
5045 CInv
inv(MSG_TX
, tx
.GetHash());
5046 pfrom
->AddInventoryKnown(inv
);
5050 bool fMissingInputs
= false;
5051 CValidationState state
;
5053 pfrom
->setAskFor
.erase(inv
.hash
);
5054 mapAlreadyAskedFor
.erase(inv
.hash
);
5056 if (!AlreadyHave(inv
) && AcceptToMemoryPool(mempool
, state
, tx
, true, &fMissingInputs
)) {
5057 mempool
.check(pcoinsTip
);
5058 RelayTransaction(tx
);
5059 vWorkQueue
.push_back(inv
.hash
);
5061 LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
5063 tx
.GetHash().ToString(),
5064 mempool
.size(), mempool
.DynamicMemoryUsage() / 1000);
5066 // Recursively process any orphan transactions that depended on this one
5067 set
<NodeId
> setMisbehaving
;
5068 for (unsigned int i
= 0; i
< vWorkQueue
.size(); i
++)
5070 map
<uint256
, set
<uint256
> >::iterator itByPrev
= mapOrphanTransactionsByPrev
.find(vWorkQueue
[i
]);
5071 if (itByPrev
== mapOrphanTransactionsByPrev
.end())
5073 for (set
<uint256
>::iterator mi
= itByPrev
->second
.begin();
5074 mi
!= itByPrev
->second
.end();
5077 const uint256
& orphanHash
= *mi
;
5078 const CTransaction
& orphanTx
= mapOrphanTransactions
[orphanHash
].tx
;
5079 NodeId fromPeer
= mapOrphanTransactions
[orphanHash
].fromPeer
;
5080 bool fMissingInputs2
= false;
5081 // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
5082 // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
5083 // anyone relaying LegitTxX banned)
5084 CValidationState stateDummy
;
5087 if (setMisbehaving
.count(fromPeer
))
5089 if (AcceptToMemoryPool(mempool
, stateDummy
, orphanTx
, true, &fMissingInputs2
)) {
5090 LogPrint("mempool", " accepted orphan tx %s\n", orphanHash
.ToString());
5091 RelayTransaction(orphanTx
);
5092 vWorkQueue
.push_back(orphanHash
);
5093 vEraseQueue
.push_back(orphanHash
);
5095 else if (!fMissingInputs2
)
5098 if (stateDummy
.IsInvalid(nDos
) && nDos
> 0)
5100 // Punish peer that gave us an invalid orphan tx
5101 Misbehaving(fromPeer
, nDos
);
5102 setMisbehaving
.insert(fromPeer
);
5103 LogPrint("mempool", " invalid orphan tx %s\n", orphanHash
.ToString());
5105 // Has inputs but not accepted to mempool
5106 // Probably non-standard or insufficient fee/priority
5107 LogPrint("mempool", " removed orphan tx %s\n", orphanHash
.ToString());
5108 vEraseQueue
.push_back(orphanHash
);
5109 assert(recentRejects
);
5110 recentRejects
->insert(orphanHash
);
5112 mempool
.check(pcoinsTip
);
5116 BOOST_FOREACH(uint256 hash
, vEraseQueue
)
5117 EraseOrphanTx(hash
);
5119 else if (fMissingInputs
)
5121 AddOrphanTx(tx
, pfrom
->GetId());
5123 // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
5124 unsigned int nMaxOrphanTx
= (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS
));
5125 unsigned int nEvicted
= LimitOrphanTxSize(nMaxOrphanTx
);
5127 LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted
);
5129 assert(recentRejects
);
5130 recentRejects
->insert(tx
.GetHash());
5132 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY
)) {
5133 // Always relay transactions received from whitelisted peers, even
5134 // if they were already in the mempool or rejected from it due
5135 // to policy, allowing the node to function as a gateway for
5136 // nodes hidden behind it.
5138 // Never relay transactions that we would assign a non-zero DoS
5139 // score for, as we expect peers to do the same with us in that
5142 if (!state
.IsInvalid(nDoS
) || nDoS
== 0) {
5143 LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx
.GetHash().ToString(), pfrom
->id
);
5144 RelayTransaction(tx
);
5146 LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx
.GetHash().ToString(), pfrom
->id
, FormatStateMessage(state
));
5151 if (state
.IsInvalid(nDoS
))
5153 LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx
.GetHash().ToString(),
5155 FormatStateMessage(state
));
5156 if (state
.GetRejectCode() < REJECT_INTERNAL
) // Never send AcceptToMemoryPool's internal codes over P2P
5157 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, (unsigned char)state
.GetRejectCode(),
5158 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), inv
.hash
);
5160 Misbehaving(pfrom
->GetId(), nDoS
);
5162 FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
);
5166 else if (strCommand
== NetMsgType::HEADERS
&& !fImporting
&& !fReindex
) // Ignore headers received while importing
5168 std::vector
<CBlockHeader
> headers
;
5170 // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
5171 unsigned int nCount
= ReadCompactSize(vRecv
);
5172 if (nCount
> MAX_HEADERS_RESULTS
) {
5174 Misbehaving(pfrom
->GetId(), 20);
5175 return error("headers message size = %u", nCount
);
5177 headers
.resize(nCount
);
5178 for (unsigned int n
= 0; n
< nCount
; n
++) {
5179 vRecv
>> headers
[n
];
5180 ReadCompactSize(vRecv
); // ignore tx count; assume it is 0.
5187 // Nothing interesting. Stop asking this peers for more headers.
5191 // If we already know the last header in the message, then it contains
5192 // no new information for us. In this case, we do not request
5193 // more headers later. This prevents multiple chains of redundant
5194 // getheader requests from running in parallel if triggered by incoming
5195 // blocks while the node is still in initial headers sync.
5196 const bool hasNewHeaders
= (mapBlockIndex
.count(headers
.back().GetHash()) == 0);
5198 CBlockIndex
*pindexLast
= NULL
;
5199 BOOST_FOREACH(const CBlockHeader
& header
, headers
) {
5200 CValidationState state
;
5201 if (pindexLast
!= NULL
&& header
.hashPrevBlock
!= pindexLast
->GetBlockHash()) {
5202 Misbehaving(pfrom
->GetId(), 20);
5203 return error("non-continuous headers sequence");
5205 if (!AcceptBlockHeader(header
, state
, chainparams
, &pindexLast
)) {
5207 if (state
.IsInvalid(nDoS
)) {
5209 Misbehaving(pfrom
->GetId(), nDoS
);
5210 return error("invalid header received");
5216 UpdateBlockAvailability(pfrom
->GetId(), pindexLast
->GetBlockHash());
5218 if (nCount
== MAX_HEADERS_RESULTS
&& pindexLast
&& hasNewHeaders
) {
5219 // Headers message had its maximum size; the peer may have more headers.
5220 // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
5221 // from there instead.
5222 LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast
->nHeight
, pfrom
->id
, pfrom
->nStartingHeight
);
5223 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexLast
), uint256());
5226 bool fCanDirectFetch
= CanDirectFetch(chainparams
.GetConsensus());
5227 CNodeState
*nodestate
= State(pfrom
->GetId());
5228 // If this set of headers is valid and ends in a block with at least as
5229 // much work as our tip, download as much as possible.
5230 if (fCanDirectFetch
&& pindexLast
->IsValid(BLOCK_VALID_TREE
) && chainActive
.Tip()->nChainWork
<= pindexLast
->nChainWork
) {
5231 vector
<CBlockIndex
*> vToFetch
;
5232 CBlockIndex
*pindexWalk
= pindexLast
;
5233 // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
5234 while (pindexWalk
&& !chainActive
.Contains(pindexWalk
) && vToFetch
.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
5235 if (!(pindexWalk
->nStatus
& BLOCK_HAVE_DATA
) &&
5236 !mapBlocksInFlight
.count(pindexWalk
->GetBlockHash())) {
5237 // We don't have this block, and it's not yet in flight.
5238 vToFetch
.push_back(pindexWalk
);
5240 pindexWalk
= pindexWalk
->pprev
;
5242 // If pindexWalk still isn't on our main chain, we're looking at a
5243 // very large reorg at a time we think we're close to caught up to
5244 // the main chain -- this shouldn't really happen. Bail out on the
5245 // direct fetch and rely on parallel download instead.
5246 if (!chainActive
.Contains(pindexWalk
)) {
5247 LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
5248 pindexLast
->GetBlockHash().ToString(),
5249 pindexLast
->nHeight
);
5251 vector
<CInv
> vGetData
;
5252 // Download as much as possible, from earliest to latest.
5253 BOOST_REVERSE_FOREACH(CBlockIndex
*pindex
, vToFetch
) {
5254 if (nodestate
->nBlocksInFlight
>= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
5255 // Can't download any more from this peer
5258 vGetData
.push_back(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
5259 MarkBlockAsInFlight(pfrom
->GetId(), pindex
->GetBlockHash(), chainparams
.GetConsensus(), pindex
);
5260 LogPrint("net", "Requesting block %s from peer=%d\n",
5261 pindex
->GetBlockHash().ToString(), pfrom
->id
);
5263 if (vGetData
.size() > 1) {
5264 LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
5265 pindexLast
->GetBlockHash().ToString(), pindexLast
->nHeight
);
5267 if (vGetData
.size() > 0) {
5268 pfrom
->PushMessage(NetMsgType::GETDATA
, vGetData
);
5273 CheckBlockIndex(chainparams
.GetConsensus());
5279 else if (strCommand
== NetMsgType::BLOCK
&& !fImporting
&& !fReindex
) // Ignore blocks received while importing
5284 LogPrint("net", "received block %s peer=%d\n", block
.GetHash().ToString(), pfrom
->id
);
5286 CValidationState state
;
5287 // Process all blocks from whitelisted peers, even if not requested,
5288 // unless we're still syncing with the network.
5289 // Such an unrequested block may still be processed, subject to the
5290 // conditions in AcceptBlock().
5291 bool forceProcessing
= pfrom
->fWhitelisted
&& !IsInitialBlockDownload();
5292 ProcessNewBlock(state
, chainparams
, pfrom
, &block
, forceProcessing
, NULL
);
5294 if (state
.IsInvalid(nDoS
)) {
5295 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
5296 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, (unsigned char)state
.GetRejectCode(),
5297 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), block
.GetHash());
5300 Misbehaving(pfrom
->GetId(), nDoS
);
5307 else if (strCommand
== NetMsgType::GETADDR
)
5309 // This asymmetric behavior for inbound and outbound connections was introduced
5310 // to prevent a fingerprinting attack: an attacker can send specific fake addresses
5311 // to users' AddrMan and later request them by sending getaddr messages.
5312 // Making nodes which are behind NAT and can only make outgoing connections ignore
5313 // the getaddr message mitigates the attack.
5314 if (!pfrom
->fInbound
) {
5315 LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom
->id
);
5319 // Only send one GetAddr response per connection to reduce resource waste
5320 // and discourage addr stamping of INV announcements.
5321 if (pfrom
->fSentAddr
) {
5322 LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom
->id
);
5325 pfrom
->fSentAddr
= true;
5327 pfrom
->vAddrToSend
.clear();
5328 vector
<CAddress
> vAddr
= addrman
.GetAddr();
5329 BOOST_FOREACH(const CAddress
&addr
, vAddr
)
5330 pfrom
->PushAddress(addr
);
5334 else if (strCommand
== NetMsgType::MEMPOOL
)
5336 if (!(nLocalServices
& NODE_BLOOM
) && !pfrom
->fWhitelisted
)
5338 LogPrint("net", "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom
->GetId());
5339 pfrom
->fDisconnect
= true;
5343 if (CNode::OutboundTargetReached(false) && !pfrom
->fWhitelisted
)
5345 LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom
->GetId());
5346 pfrom
->fDisconnect
= true;
5350 LOCK(pfrom
->cs_inventory
);
5351 pfrom
->fSendMempool
= true;
5355 else if (strCommand
== NetMsgType::PING
)
5357 if (pfrom
->nVersion
> BIP0031_VERSION
)
5361 // Echo the message back with the nonce. This allows for two useful features:
5363 // 1) A remote node can quickly check if the connection is operational
5364 // 2) Remote nodes can measure the latency of the network thread. If this node
5365 // is overloaded it won't respond to pings quickly and the remote node can
5366 // avoid sending us more work, like chain download requests.
5368 // The nonce stops the remote getting confused between different pings: without
5369 // it, if the remote node sends a ping once per second and this node takes 5
5370 // seconds to respond to each, the 5th ping the remote sends would appear to
5371 // return very quickly.
5372 pfrom
->PushMessage(NetMsgType::PONG
, nonce
);
5377 else if (strCommand
== NetMsgType::PONG
)
5379 int64_t pingUsecEnd
= nTimeReceived
;
5381 size_t nAvail
= vRecv
.in_avail();
5382 bool bPingFinished
= false;
5383 std::string sProblem
;
5385 if (nAvail
>= sizeof(nonce
)) {
5388 // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
5389 if (pfrom
->nPingNonceSent
!= 0) {
5390 if (nonce
== pfrom
->nPingNonceSent
) {
5391 // Matching pong received, this ping is no longer outstanding
5392 bPingFinished
= true;
5393 int64_t pingUsecTime
= pingUsecEnd
- pfrom
->nPingUsecStart
;
5394 if (pingUsecTime
> 0) {
5395 // Successful ping time measurement, replace previous
5396 pfrom
->nPingUsecTime
= pingUsecTime
;
5397 pfrom
->nMinPingUsecTime
= std::min(pfrom
->nMinPingUsecTime
, pingUsecTime
);
5399 // This should never happen
5400 sProblem
= "Timing mishap";
5403 // Nonce mismatches are normal when pings are overlapping
5404 sProblem
= "Nonce mismatch";
5406 // This is most likely a bug in another implementation somewhere; cancel this ping
5407 bPingFinished
= true;
5408 sProblem
= "Nonce zero";
5412 sProblem
= "Unsolicited pong without ping";
5415 // This is most likely a bug in another implementation somewhere; cancel this ping
5416 bPingFinished
= true;
5417 sProblem
= "Short payload";
5420 if (!(sProblem
.empty())) {
5421 LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
5424 pfrom
->nPingNonceSent
,
5428 if (bPingFinished
) {
5429 pfrom
->nPingNonceSent
= 0;
5434 else if (strCommand
== NetMsgType::FILTERLOAD
)
5436 CBloomFilter filter
;
5439 LOCK(pfrom
->cs_filter
);
5441 if (!filter
.IsWithinSizeConstraints())
5443 // There is no excuse for sending a too-large filter
5445 Misbehaving(pfrom
->GetId(), 100);
5449 delete pfrom
->pfilter
;
5450 pfrom
->pfilter
= new CBloomFilter(filter
);
5451 pfrom
->pfilter
->UpdateEmptyFull();
5453 pfrom
->fRelayTxes
= true;
5457 else if (strCommand
== NetMsgType::FILTERADD
)
5459 vector
<unsigned char> vData
;
5462 // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
5463 // and thus, the maximum size any matched object can have) in a filteradd message
5464 if (vData
.size() > MAX_SCRIPT_ELEMENT_SIZE
)
5467 Misbehaving(pfrom
->GetId(), 100);
5469 LOCK(pfrom
->cs_filter
);
5471 pfrom
->pfilter
->insert(vData
);
5475 Misbehaving(pfrom
->GetId(), 100);
5481 else if (strCommand
== NetMsgType::FILTERCLEAR
)
5483 LOCK(pfrom
->cs_filter
);
5484 delete pfrom
->pfilter
;
5485 pfrom
->pfilter
= new CBloomFilter();
5486 pfrom
->fRelayTxes
= true;
5490 else if (strCommand
== NetMsgType::REJECT
)
5494 string strMsg
; unsigned char ccode
; string strReason
;
5495 vRecv
>> LIMITED_STRING(strMsg
, CMessageHeader::COMMAND_SIZE
) >> ccode
>> LIMITED_STRING(strReason
, MAX_REJECT_MESSAGE_LENGTH
);
5498 ss
<< strMsg
<< " code " << itostr(ccode
) << ": " << strReason
;
5500 if (strMsg
== NetMsgType::BLOCK
|| strMsg
== NetMsgType::TX
)
5504 ss
<< ": hash " << hash
.ToString();
5506 LogPrint("net", "Reject %s\n", SanitizeString(ss
.str()));
5507 } catch (const std::ios_base::failure
&) {
5508 // Avoid feedback loops by preventing reject messages from triggering a new reject message.
5509 LogPrint("net", "Unparseable reject message received\n");
5514 else if (strCommand
== NetMsgType::FEEFILTER
) {
5515 CAmount newFeeFilter
= 0;
5516 vRecv
>> newFeeFilter
;
5517 if (MoneyRange(newFeeFilter
)) {
5519 LOCK(pfrom
->cs_feeFilter
);
5520 pfrom
->minFeeFilter
= newFeeFilter
;
5522 LogPrint("net", "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter
).ToString(), pfrom
->id
);
5527 // Ignore unknown commands for extensibility
5528 LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand
), pfrom
->id
);
5536 // requires LOCK(cs_vRecvMsg)
5537 bool ProcessMessages(CNode
* pfrom
)
5539 const CChainParams
& chainparams
= Params();
5541 // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
5545 // (4) message start
5553 if (!pfrom
->vRecvGetData
.empty())
5554 ProcessGetData(pfrom
, chainparams
.GetConsensus());
5556 // this maintains the order of responses
5557 if (!pfrom
->vRecvGetData
.empty()) return fOk
;
5559 std::deque
<CNetMessage
>::iterator it
= pfrom
->vRecvMsg
.begin();
5560 while (!pfrom
->fDisconnect
&& it
!= pfrom
->vRecvMsg
.end()) {
5561 // Don't bother if send buffer is too full to respond anyway
5562 if (pfrom
->nSendSize
>= SendBufferSize())
5566 CNetMessage
& msg
= *it
;
5569 // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
5570 // msg.hdr.nMessageSize, msg.vRecv.size(),
5571 // msg.complete() ? "Y" : "N");
5573 // end, if an incomplete message is found
5574 if (!msg
.complete())
5577 // at this point, any failure means we can delete the current message
5580 // Scan for message start
5581 if (memcmp(msg
.hdr
.pchMessageStart
, chainparams
.MessageStart(), MESSAGE_START_SIZE
) != 0) {
5582 LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg
.hdr
.GetCommand()), pfrom
->id
);
5588 CMessageHeader
& hdr
= msg
.hdr
;
5589 if (!hdr
.IsValid(chainparams
.MessageStart()))
5591 LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr
.GetCommand()), pfrom
->id
);
5594 string strCommand
= hdr
.GetCommand();
5597 unsigned int nMessageSize
= hdr
.nMessageSize
;
5600 CDataStream
& vRecv
= msg
.vRecv
;
5601 uint256 hash
= Hash(vRecv
.begin(), vRecv
.begin() + nMessageSize
);
5602 unsigned int nChecksum
= ReadLE32((unsigned char*)&hash
);
5603 if (nChecksum
!= hdr
.nChecksum
)
5605 LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n", __func__
,
5606 SanitizeString(strCommand
), nMessageSize
, nChecksum
, hdr
.nChecksum
);
5614 fRet
= ProcessMessage(pfrom
, strCommand
, vRecv
, msg
.nTime
, chainparams
);
5615 boost::this_thread::interruption_point();
5617 catch (const std::ios_base::failure
& e
)
5619 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_MALFORMED
, string("error parsing message"));
5620 if (strstr(e
.what(), "end of data"))
5622 // Allow exceptions from under-length message on vRecv
5623 LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
5625 else if (strstr(e
.what(), "size too large"))
5627 // Allow exceptions from over-long size
5628 LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
5630 else if (strstr(e
.what(), "non-canonical ReadCompactSize()"))
5632 // Allow exceptions from non-canonical encoding
5633 LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
5637 PrintExceptionContinue(&e
, "ProcessMessages()");
5640 catch (const boost::thread_interrupted
&) {
5643 catch (const std::exception
& e
) {
5644 PrintExceptionContinue(&e
, "ProcessMessages()");
5646 PrintExceptionContinue(NULL
, "ProcessMessages()");
5650 LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__
, SanitizeString(strCommand
), nMessageSize
, pfrom
->id
);
5655 // In case the connection got shut down, its receive buffer was wiped
5656 if (!pfrom
->fDisconnect
)
5657 pfrom
->vRecvMsg
.erase(pfrom
->vRecvMsg
.begin(), it
);
5662 class CompareInvMempoolOrder
5666 CompareInvMempoolOrder(CTxMemPool
*mempool
)
5671 bool operator()(std::set
<uint256
>::iterator a
, std::set
<uint256
>::iterator b
)
5673 /* As std::make_heap produces a max-heap, we want the entries with the
5674 * fewest ancestors/highest fee to sort later. */
5675 return mp
->CompareDepthAndScore(*b
, *a
);
5679 bool SendMessages(CNode
* pto
)
5681 const Consensus::Params
& consensusParams
= Params().GetConsensus();
5683 // Don't send anything until we get its version message
5684 if (pto
->nVersion
== 0)
5690 bool pingSend
= false;
5691 if (pto
->fPingQueued
) {
5692 // RPC ping request by user
5695 if (pto
->nPingNonceSent
== 0 && pto
->nPingUsecStart
+ PING_INTERVAL
* 1000000 < GetTimeMicros()) {
5696 // Ping automatically sent as a latency probe & keepalive.
5701 while (nonce
== 0) {
5702 GetRandBytes((unsigned char*)&nonce
, sizeof(nonce
));
5704 pto
->fPingQueued
= false;
5705 pto
->nPingUsecStart
= GetTimeMicros();
5706 if (pto
->nVersion
> BIP0031_VERSION
) {
5707 pto
->nPingNonceSent
= nonce
;
5708 pto
->PushMessage(NetMsgType::PING
, nonce
);
5710 // Peer is too old to support ping command with nonce, pong will never arrive.
5711 pto
->nPingNonceSent
= 0;
5712 pto
->PushMessage(NetMsgType::PING
);
5716 TRY_LOCK(cs_main
, lockMain
); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
5720 // Address refresh broadcast
5721 int64_t nNow
= GetTimeMicros();
5722 if (!IsInitialBlockDownload() && pto
->nNextLocalAddrSend
< nNow
) {
5723 AdvertiseLocal(pto
);
5724 pto
->nNextLocalAddrSend
= PoissonNextSend(nNow
, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
);
5730 if (pto
->nNextAddrSend
< nNow
) {
5731 pto
->nNextAddrSend
= PoissonNextSend(nNow
, AVG_ADDRESS_BROADCAST_INTERVAL
);
5732 vector
<CAddress
> vAddr
;
5733 vAddr
.reserve(pto
->vAddrToSend
.size());
5734 BOOST_FOREACH(const CAddress
& addr
, pto
->vAddrToSend
)
5736 if (!pto
->addrKnown
.contains(addr
.GetKey()))
5738 pto
->addrKnown
.insert(addr
.GetKey());
5739 vAddr
.push_back(addr
);
5740 // receiver rejects addr messages larger than 1000
5741 if (vAddr
.size() >= 1000)
5743 pto
->PushMessage(NetMsgType::ADDR
, vAddr
);
5748 pto
->vAddrToSend
.clear();
5750 pto
->PushMessage(NetMsgType::ADDR
, vAddr
);
5751 // we only send the big addr message once
5752 if (pto
->vAddrToSend
.capacity() > 40)
5753 pto
->vAddrToSend
.shrink_to_fit();
5756 CNodeState
&state
= *State(pto
->GetId());
5757 if (state
.fShouldBan
) {
5758 if (pto
->fWhitelisted
)
5759 LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto
->addr
.ToString());
5761 pto
->fDisconnect
= true;
5762 if (pto
->addr
.IsLocal())
5763 LogPrintf("Warning: not banning local peer %s!\n", pto
->addr
.ToString());
5766 CNode::Ban(pto
->addr
, BanReasonNodeMisbehaving
);
5769 state
.fShouldBan
= false;
5772 BOOST_FOREACH(const CBlockReject
& reject
, state
.rejects
)
5773 pto
->PushMessage(NetMsgType::REJECT
, (string
)NetMsgType::BLOCK
, reject
.chRejectCode
, reject
.strRejectReason
, reject
.hashBlock
);
5774 state
.rejects
.clear();
5777 if (pindexBestHeader
== NULL
)
5778 pindexBestHeader
= chainActive
.Tip();
5779 bool fFetch
= state
.fPreferredDownload
|| (nPreferredDownload
== 0 && !pto
->fClient
&& !pto
->fOneShot
); // Download if this is a nice peer, or we have no nice peers and this one might do.
5780 if (!state
.fSyncStarted
&& !pto
->fClient
&& !fImporting
&& !fReindex
) {
5781 // Only actively request headers from a single peer, unless we're close to today.
5782 if ((nSyncStarted
== 0 && fFetch
) || pindexBestHeader
->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
5783 state
.fSyncStarted
= true;
5785 const CBlockIndex
*pindexStart
= pindexBestHeader
;
5786 /* If possible, start at the block preceding the currently
5787 best known header. This ensures that we always get a
5788 non-empty list of headers back as long as the peer
5789 is up-to-date. With a non-empty response, we can initialise
5790 the peer's known best block. This wouldn't be possible
5791 if we requested starting at pindexBestHeader and
5792 got back an empty response. */
5793 if (pindexStart
->pprev
)
5794 pindexStart
= pindexStart
->pprev
;
5795 LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart
->nHeight
, pto
->id
, pto
->nStartingHeight
);
5796 pto
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexStart
), uint256());
5800 // Resend wallet transactions that haven't gotten in a block yet
5801 // Except during reindex, importing and IBD, when old wallet
5802 // transactions become unconfirmed and spams other nodes.
5803 if (!fReindex
&& !fImporting
&& !IsInitialBlockDownload())
5805 GetMainSignals().Broadcast(nTimeBestReceived
);
5809 // Try sending block announcements via headers
5812 // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
5813 // list of block hashes we're relaying, and our peer wants
5814 // headers announcements, then find the first header
5815 // not yet known to our peer but would connect, and send.
5816 // If no header would connect, or if we have too many
5817 // blocks, or if the peer doesn't want headers, just
5818 // add all to the inv queue.
5819 LOCK(pto
->cs_inventory
);
5820 vector
<CBlock
> vHeaders
;
5821 bool fRevertToInv
= (!state
.fPreferHeaders
|| pto
->vBlockHashesToAnnounce
.size() > MAX_BLOCKS_TO_ANNOUNCE
);
5822 CBlockIndex
*pBestIndex
= NULL
; // last header queued for delivery
5823 ProcessBlockAvailability(pto
->id
); // ensure pindexBestKnownBlock is up-to-date
5825 if (!fRevertToInv
) {
5826 bool fFoundStartingHeader
= false;
5827 // Try to find first header that our peer doesn't have, and
5828 // then send all headers past that one. If we come across any
5829 // headers that aren't on chainActive, give up.
5830 BOOST_FOREACH(const uint256
&hash
, pto
->vBlockHashesToAnnounce
) {
5831 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
5832 assert(mi
!= mapBlockIndex
.end());
5833 CBlockIndex
*pindex
= mi
->second
;
5834 if (chainActive
[pindex
->nHeight
] != pindex
) {
5835 // Bail out if we reorged away from this block
5836 fRevertToInv
= true;
5839 if (pBestIndex
!= NULL
&& pindex
->pprev
!= pBestIndex
) {
5840 // This means that the list of blocks to announce don't
5841 // connect to each other.
5842 // This shouldn't really be possible to hit during
5843 // regular operation (because reorgs should take us to
5844 // a chain that has some block not on the prior chain,
5845 // which should be caught by the prior check), but one
5846 // way this could happen is by using invalidateblock /
5847 // reconsiderblock repeatedly on the tip, causing it to
5848 // be added multiple times to vBlockHashesToAnnounce.
5849 // Robustly deal with this rare situation by reverting
5851 fRevertToInv
= true;
5854 pBestIndex
= pindex
;
5855 if (fFoundStartingHeader
) {
5856 // add this to the headers message
5857 vHeaders
.push_back(pindex
->GetBlockHeader());
5858 } else if (PeerHasHeader(&state
, pindex
)) {
5859 continue; // keep looking for the first new block
5860 } else if (pindex
->pprev
== NULL
|| PeerHasHeader(&state
, pindex
->pprev
)) {
5861 // Peer doesn't have this header but they do have the prior one.
5862 // Start sending headers.
5863 fFoundStartingHeader
= true;
5864 vHeaders
.push_back(pindex
->GetBlockHeader());
5866 // Peer doesn't have this header or the prior one -- nothing will
5867 // connect, so bail out.
5868 fRevertToInv
= true;
5874 // If falling back to using an inv, just try to inv the tip.
5875 // The last entry in vBlockHashesToAnnounce was our tip at some point
5877 if (!pto
->vBlockHashesToAnnounce
.empty()) {
5878 const uint256
&hashToAnnounce
= pto
->vBlockHashesToAnnounce
.back();
5879 BlockMap::iterator mi
= mapBlockIndex
.find(hashToAnnounce
);
5880 assert(mi
!= mapBlockIndex
.end());
5881 CBlockIndex
*pindex
= mi
->second
;
5883 // Warn if we're announcing a block that is not on the main chain.
5884 // This should be very rare and could be optimized out.
5885 // Just log for now.
5886 if (chainActive
[pindex
->nHeight
] != pindex
) {
5887 LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
5888 hashToAnnounce
.ToString(), chainActive
.Tip()->GetBlockHash().ToString());
5891 // If the peer's chain has this block, don't inv it back.
5892 if (!PeerHasHeader(&state
, pindex
)) {
5893 pto
->PushInventory(CInv(MSG_BLOCK
, hashToAnnounce
));
5894 LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__
,
5895 pto
->id
, hashToAnnounce
.ToString());
5898 } else if (!vHeaders
.empty()) {
5899 if (vHeaders
.size() > 1) {
5900 LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__
,
5902 vHeaders
.front().GetHash().ToString(),
5903 vHeaders
.back().GetHash().ToString(), pto
->id
);
5905 LogPrint("net", "%s: sending header %s to peer=%d\n", __func__
,
5906 vHeaders
.front().GetHash().ToString(), pto
->id
);
5908 pto
->PushMessage(NetMsgType::HEADERS
, vHeaders
);
5909 state
.pindexBestHeaderSent
= pBestIndex
;
5911 pto
->vBlockHashesToAnnounce
.clear();
5915 // Message: inventory
5919 LOCK(pto
->cs_inventory
);
5920 vInv
.reserve(std::max
<size_t>(pto
->vInventoryBlockToSend
.size(), INVENTORY_BROADCAST_MAX
));
5923 BOOST_FOREACH(const uint256
& hash
, pto
->vInventoryBlockToSend
) {
5924 vInv
.push_back(CInv(MSG_BLOCK
, hash
));
5925 if (vInv
.size() == MAX_INV_SZ
) {
5926 pto
->PushMessage(NetMsgType::INV
, vInv
);
5930 pto
->vInventoryBlockToSend
.clear();
5932 // Check whether periodic sends should happen
5933 bool fSendTrickle
= pto
->fWhitelisted
;
5934 if (pto
->nNextInvSend
< nNow
) {
5935 fSendTrickle
= true;
5936 // Use half the delay for outbound peers, as there is less privacy concern for them.
5937 pto
->nNextInvSend
= PoissonNextSend(nNow
, INVENTORY_BROADCAST_INTERVAL
>> !pto
->fInbound
);
5940 // Time to send but the peer has requested we not relay transactions.
5942 LOCK(pto
->cs_filter
);
5943 if (!pto
->fRelayTxes
) pto
->setInventoryTxToSend
.clear();
5946 // Respond to BIP35 mempool requests
5947 if (fSendTrickle
&& pto
->fSendMempool
) {
5948 auto vtxinfo
= mempool
.infoAll();
5949 pto
->fSendMempool
= false;
5950 CAmount filterrate
= 0;
5952 LOCK(pto
->cs_feeFilter
);
5953 filterrate
= pto
->minFeeFilter
;
5956 LOCK(pto
->cs_filter
);
5958 for (const auto& txinfo
: vtxinfo
) {
5959 const uint256
& hash
= txinfo
.tx
->GetHash();
5960 CInv
inv(MSG_TX
, hash
);
5961 pto
->setInventoryTxToSend
.erase(hash
);
5963 if (txinfo
.feeRate
.GetFeePerK() < filterrate
)
5967 if (!pto
->pfilter
->IsRelevantAndUpdate(*txinfo
.tx
)) continue;
5969 pto
->filterInventoryKnown
.insert(hash
);
5970 vInv
.push_back(inv
);
5971 if (vInv
.size() == MAX_INV_SZ
) {
5972 pto
->PushMessage(NetMsgType::INV
, vInv
);
5976 pto
->timeLastMempoolReq
= GetTime();
5979 // Determine transactions to relay
5981 // Produce a vector with all candidates for sending
5982 vector
<std::set
<uint256
>::iterator
> vInvTx
;
5983 vInvTx
.reserve(pto
->setInventoryTxToSend
.size());
5984 for (std::set
<uint256
>::iterator it
= pto
->setInventoryTxToSend
.begin(); it
!= pto
->setInventoryTxToSend
.end(); it
++) {
5985 vInvTx
.push_back(it
);
5987 CAmount filterrate
= 0;
5989 LOCK(pto
->cs_feeFilter
);
5990 filterrate
= pto
->minFeeFilter
;
5992 // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
5993 // A heap is used so that not all items need sorting if only a few are being sent.
5994 CompareInvMempoolOrder
compareInvMempoolOrder(&mempool
);
5995 std::make_heap(vInvTx
.begin(), vInvTx
.end(), compareInvMempoolOrder
);
5996 // No reason to drain out at many times the network's capacity,
5997 // especially since we have many peers and some will draw much shorter delays.
5998 unsigned int nRelayedTransactions
= 0;
5999 LOCK(pto
->cs_filter
);
6000 while (!vInvTx
.empty() && nRelayedTransactions
< INVENTORY_BROADCAST_MAX
) {
6001 // Fetch the top element from the heap
6002 std::pop_heap(vInvTx
.begin(), vInvTx
.end(), compareInvMempoolOrder
);
6003 std::set
<uint256
>::iterator it
= vInvTx
.back();
6006 // Remove it from the to-be-sent set
6007 pto
->setInventoryTxToSend
.erase(it
);
6008 // Check if not in the filter already
6009 if (pto
->filterInventoryKnown
.contains(hash
)) {
6012 // Not in the mempool anymore? don't bother sending it.
6013 auto txinfo
= mempool
.info(hash
);
6017 if (filterrate
&& txinfo
.feeRate
.GetFeePerK() < filterrate
) {
6020 if (pto
->pfilter
&& !pto
->pfilter
->IsRelevantAndUpdate(*txinfo
.tx
)) continue;
6022 vInv
.push_back(CInv(MSG_TX
, hash
));
6023 nRelayedTransactions
++;
6025 // Expire old relay messages
6026 while (!vRelayExpiration
.empty() && vRelayExpiration
.front().first
< nNow
)
6028 mapRelay
.erase(vRelayExpiration
.front().second
);
6029 vRelayExpiration
.pop_front();
6032 auto ret
= mapRelay
.insert(std::make_pair(hash
, std::move(txinfo
.tx
)));
6034 vRelayExpiration
.push_back(std::make_pair(nNow
+ 15 * 60 * 1000000, ret
.first
));
6037 if (vInv
.size() == MAX_INV_SZ
) {
6038 pto
->PushMessage(NetMsgType::INV
, vInv
);
6041 pto
->filterInventoryKnown
.insert(hash
);
6046 pto
->PushMessage(NetMsgType::INV
, vInv
);
6048 // Detect whether we're stalling
6049 nNow
= GetTimeMicros();
6050 if (!pto
->fDisconnect
&& state
.nStallingSince
&& state
.nStallingSince
< nNow
- 1000000 * BLOCK_STALLING_TIMEOUT
) {
6051 // Stalling only triggers when the block download window cannot move. During normal steady state,
6052 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
6053 // should only happen during initial block download.
6054 LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto
->id
);
6055 pto
->fDisconnect
= true;
6057 // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
6058 // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
6059 // We compensate for other peers to prevent killing off peers due to our own downstream link
6060 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
6061 // to unreasonably increase our timeout.
6062 if (!pto
->fDisconnect
&& state
.vBlocksInFlight
.size() > 0) {
6063 QueuedBlock
&queuedBlock
= state
.vBlocksInFlight
.front();
6064 int nOtherPeersWithValidatedDownloads
= nPeersWithValidatedDownloads
- (state
.nBlocksInFlightValidHeaders
> 0);
6065 if (nNow
> state
.nDownloadingSince
+ consensusParams
.nPowTargetSpacing
* (BLOCK_DOWNLOAD_TIMEOUT_BASE
+ BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
* nOtherPeersWithValidatedDownloads
)) {
6066 LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock
.hash
.ToString(), pto
->id
);
6067 pto
->fDisconnect
= true;
6072 // Message: getdata (blocks)
6074 vector
<CInv
> vGetData
;
6075 if (!pto
->fDisconnect
&& !pto
->fClient
&& (fFetch
|| !IsInitialBlockDownload()) && state
.nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
6076 vector
<CBlockIndex
*> vToDownload
;
6077 NodeId staller
= -1;
6078 FindNextBlocksToDownload(pto
->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER
- state
.nBlocksInFlight
, vToDownload
, staller
);
6079 BOOST_FOREACH(CBlockIndex
*pindex
, vToDownload
) {
6080 vGetData
.push_back(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
6081 MarkBlockAsInFlight(pto
->GetId(), pindex
->GetBlockHash(), consensusParams
, pindex
);
6082 LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex
->GetBlockHash().ToString(),
6083 pindex
->nHeight
, pto
->id
);
6085 if (state
.nBlocksInFlight
== 0 && staller
!= -1) {
6086 if (State(staller
)->nStallingSince
== 0) {
6087 State(staller
)->nStallingSince
= nNow
;
6088 LogPrint("net", "Stall started peer=%d\n", staller
);
6094 // Message: getdata (non-blocks)
6096 while (!pto
->fDisconnect
&& !pto
->mapAskFor
.empty() && (*pto
->mapAskFor
.begin()).first
<= nNow
)
6098 const CInv
& inv
= (*pto
->mapAskFor
.begin()).second
;
6099 if (!AlreadyHave(inv
))
6102 LogPrint("net", "Requesting %s peer=%d\n", inv
.ToString(), pto
->id
);
6103 vGetData
.push_back(inv
);
6104 if (vGetData
.size() >= 1000)
6106 pto
->PushMessage(NetMsgType::GETDATA
, vGetData
);
6110 //If we're not going to ask, don't expect a response.
6111 pto
->setAskFor
.erase(inv
.hash
);
6113 pto
->mapAskFor
.erase(pto
->mapAskFor
.begin());
6115 if (!vGetData
.empty())
6116 pto
->PushMessage(NetMsgType::GETDATA
, vGetData
);
6119 // Message: feefilter
6121 // We don't want white listed peers to filter txs to us if we have -whitelistforcerelay
6122 if (pto
->nVersion
>= FEEFILTER_VERSION
&& GetBoolArg("-feefilter", DEFAULT_FEEFILTER
) &&
6123 !(pto
->fWhitelisted
&& GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY
))) {
6124 CAmount currentFilter
= mempool
.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFeePerK();
6125 int64_t timeNow
= GetTimeMicros();
6126 if (timeNow
> pto
->nextSendTimeFeeFilter
) {
6127 CAmount filterToSend
= filterRounder
.round(currentFilter
);
6128 if (filterToSend
!= pto
->lastSentFeeFilter
) {
6129 pto
->PushMessage(NetMsgType::FEEFILTER
, filterToSend
);
6130 pto
->lastSentFeeFilter
= filterToSend
;
6132 pto
->nextSendTimeFeeFilter
= PoissonNextSend(timeNow
, AVG_FEEFILTER_BROADCAST_INTERVAL
);
6134 // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
6135 // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
6136 else if (timeNow
+ MAX_FEEFILTER_CHANGE_DELAY
* 1000000 < pto
->nextSendTimeFeeFilter
&&
6137 (currentFilter
< 3 * pto
->lastSentFeeFilter
/ 4 || currentFilter
> 4 * pto
->lastSentFeeFilter
/ 3)) {
6138 pto
->nextSendTimeFeeFilter
= timeNow
+ (insecure_rand() % MAX_FEEFILTER_CHANGE_DELAY
) * 1000000;
6145 std::string
CBlockFileInfo::ToString() const {
6146 return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks
, nSize
, nHeightFirst
, nHeightLast
, DateTimeStrFormat("%Y-%m-%d", nTimeFirst
), DateTimeStrFormat("%Y-%m-%d", nTimeLast
));
6149 ThresholdState
VersionBitsTipState(const Consensus::Params
& params
, Consensus::DeploymentPos pos
)
6152 return VersionBitsState(chainActive
.Tip(), params
, pos
, versionbitscache
);
6161 BlockMap::iterator it1
= mapBlockIndex
.begin();
6162 for (; it1
!= mapBlockIndex
.end(); it1
++)
6163 delete (*it1
).second
;
6164 mapBlockIndex
.clear();
6166 // orphan transactions
6167 mapOrphanTransactions
.clear();
6168 mapOrphanTransactionsByPrev
.clear();
6170 } instance_of_cmaincleanup
;