1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2014 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
10 #include "arith_uint256.h"
11 #include "chainparams.h"
12 #include "checkpoints.h"
13 #include "checkqueue.h"
14 #include "consensus/consensus.h"
15 #include "consensus/merkle.h"
16 #include "consensus/validation.h"
19 #include "merkleblock.h"
21 #include "policy/policy.h"
23 #include "primitives/block.h"
24 #include "primitives/transaction.h"
25 #include "script/script.h"
26 #include "script/sigcache.h"
27 #include "script/standard.h"
28 #include "tinyformat.h"
30 #include "txmempool.h"
31 #include "ui_interface.h"
34 #include "utilmoneystr.h"
35 #include "utilstrencodings.h"
36 #include "validationinterface.h"
40 #include <boost/algorithm/string/replace.hpp>
41 #include <boost/filesystem.hpp>
42 #include <boost/filesystem/fstream.hpp>
43 #include <boost/math/distributions/poisson.hpp>
44 #include <boost/thread.hpp>
49 # error "Bitcoin cannot be compiled without assertions."
56 CCriticalSection cs_main
;
58 BlockMap mapBlockIndex
;
60 CBlockIndex
*pindexBestHeader
= NULL
;
61 int64_t nTimeBestReceived
= 0;
62 CWaitableCriticalSection csBestBlock
;
63 CConditionVariable cvBlockChange
;
64 int nScriptCheckThreads
= 0;
65 bool fImporting
= false;
66 bool fReindex
= false;
67 bool fTxIndex
= false;
68 bool fHavePruned
= false;
69 bool fPruneMode
= false;
70 bool fIsBareMultisigStd
= DEFAULT_PERMIT_BAREMULTISIG
;
71 bool fRequireStandard
= true;
72 bool fCheckBlockIndex
= false;
73 bool fCheckpointsEnabled
= DEFAULT_CHECKPOINTS_ENABLED
;
74 size_t nCoinCacheUsage
= 5000 * 300;
75 uint64_t nPruneTarget
= 0;
76 bool fAlerts
= DEFAULT_ALERTS
;
78 /** Fees smaller than this (in satoshi) are considered zero fee (for relaying, mining and transaction creation) */
79 CFeeRate minRelayTxFee
= CFeeRate(DEFAULT_MIN_RELAY_TX_FEE
);
81 CTxMemPool
mempool(::minRelayTxFee
);
87 map
<uint256
, COrphanTx
> mapOrphanTransactions
GUARDED_BY(cs_main
);;
88 map
<uint256
, set
<uint256
> > mapOrphanTransactionsByPrev
GUARDED_BY(cs_main
);;
89 void EraseOrphansFor(NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
);
92 * Returns true if there are nRequired or more blocks of minVersion or above
93 * in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards.
95 static bool IsSuperMajority(int minVersion
, const CBlockIndex
* pstart
, unsigned nRequired
, const Consensus::Params
& consensusParams
);
96 static void CheckBlockIndex(const Consensus::Params
& consensusParams
);
98 /** Constant stuff for coinbase transactions we create: */
99 CScript COINBASE_FLAGS
;
101 const string strMessageMagic
= "Bitcoin Signed Message:\n";
106 struct CBlockIndexWorkComparator
108 bool operator()(CBlockIndex
*pa
, CBlockIndex
*pb
) const {
109 // First sort by most total work, ...
110 if (pa
->nChainWork
> pb
->nChainWork
) return false;
111 if (pa
->nChainWork
< pb
->nChainWork
) return true;
113 // ... then by earliest time received, ...
114 if (pa
->nSequenceId
< pb
->nSequenceId
) return false;
115 if (pa
->nSequenceId
> pb
->nSequenceId
) return true;
117 // Use pointer address as tie breaker (should only happen with blocks
118 // loaded from disk, as those all have id 0).
119 if (pa
< pb
) return false;
120 if (pa
> pb
) return true;
127 CBlockIndex
*pindexBestInvalid
;
130 * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
131 * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
132 * missing the data for the block.
134 set
<CBlockIndex
*, CBlockIndexWorkComparator
> setBlockIndexCandidates
;
135 /** Number of nodes with fSyncStarted. */
136 int nSyncStarted
= 0;
137 /** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
138 * Pruned nodes may have entries where B is missing data.
140 multimap
<CBlockIndex
*, CBlockIndex
*> mapBlocksUnlinked
;
142 CCriticalSection cs_LastBlockFile
;
143 std::vector
<CBlockFileInfo
> vinfoBlockFile
;
144 int nLastBlockFile
= 0;
145 /** Global flag to indicate we should check to see if there are
146 * block/undo files that should be deleted. Set on startup
147 * or if we allocate more file space when we're in prune mode
149 bool fCheckForPruning
= false;
152 * Every received block is assigned a unique and increasing identifier, so we
153 * know which one to give priority in case of a fork.
155 CCriticalSection cs_nBlockSequenceId
;
156 /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
157 uint32_t nBlockSequenceId
= 1;
160 * Sources of received blocks, saved to be able to send them reject
161 * messages or ban them when processing happens afterwards. Protected by
164 map
<uint256
, NodeId
> mapBlockSource
;
167 * Filter for transactions that were recently rejected by
168 * AcceptToMemoryPool. These are not rerequested until the chain tip
169 * changes, at which point the entire filter is reset. Protected by
172 * Without this filter we'd be re-requesting txs from each of our peers,
173 * increasing bandwidth consumption considerably. For instance, with 100
174 * peers, half of which relay a tx we don't accept, that might be a 50x
175 * bandwidth increase. A flooding attacker attempting to roll-over the
176 * filter using minimum-sized, 60byte, transactions might manage to send
177 * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
178 * two minute window to send invs to us.
180 * Decreasing the false positive rate is fairly cheap, so we pick one in a
181 * million to make it highly unlikely for users to have issues with this
186 boost::scoped_ptr
<CRollingBloomFilter
> recentRejects
;
187 uint256 hashRecentRejectsChainTip
;
189 /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
192 CBlockIndex
*pindex
; //! Optional.
193 int64_t nTime
; //! Time of "getdata" request in microseconds.
194 bool fValidatedHeaders
; //! Whether this block has validated headers at the time of request.
195 int64_t nTimeDisconnect
; //! The timeout for this block request (for disconnecting a slow peer)
197 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> > mapBlocksInFlight
;
199 /** Number of blocks in flight with validated headers. */
200 int nQueuedValidatedHeaders
= 0;
202 /** Number of preferable block download peers. */
203 int nPreferredDownload
= 0;
205 /** Dirty block index entries. */
206 set
<CBlockIndex
*> setDirtyBlockIndex
;
208 /** Dirty block file entries. */
209 set
<int> setDirtyFileInfo
;
212 //////////////////////////////////////////////////////////////////////////////
214 // Registration of network node signals.
219 struct CBlockReject
{
220 unsigned char chRejectCode
;
221 string strRejectReason
;
226 * Maintain validation-specific state about nodes, protected by cs_main, instead
227 * by CNode's own locks. This simplifies asynchronous operation, where
228 * processing of incoming data is done after the ProcessMessage call returns,
229 * and we're no longer holding the node's locks.
232 //! The peer's address
234 //! Whether we have a fully established connection.
235 bool fCurrentlyConnected
;
236 //! Accumulated misbehaviour score for this peer.
238 //! Whether this peer should be disconnected and banned (unless whitelisted).
240 //! String name of this peer (debugging/logging purposes).
242 //! List of asynchronously-determined block rejections to notify this peer about.
243 std::vector
<CBlockReject
> rejects
;
244 //! The best known block we know this peer has announced.
245 CBlockIndex
*pindexBestKnownBlock
;
246 //! The hash of the last unknown block this peer has announced.
247 uint256 hashLastUnknownBlock
;
248 //! The last full block we both have.
249 CBlockIndex
*pindexLastCommonBlock
;
250 //! The best header we have sent our peer.
251 CBlockIndex
*pindexBestHeaderSent
;
252 //! Whether we've started headers synchronization with this peer.
254 //! Since when we're stalling block download progress (in microseconds), or 0.
255 int64_t nStallingSince
;
256 list
<QueuedBlock
> vBlocksInFlight
;
258 int nBlocksInFlightValidHeaders
;
259 //! Whether we consider this a preferred download peer.
260 bool fPreferredDownload
;
261 //! Whether this peer wants invs or headers (when possible) for block announcements.
265 fCurrentlyConnected
= false;
268 pindexBestKnownBlock
= NULL
;
269 hashLastUnknownBlock
.SetNull();
270 pindexLastCommonBlock
= NULL
;
271 pindexBestHeaderSent
= NULL
;
272 fSyncStarted
= false;
275 nBlocksInFlightValidHeaders
= 0;
276 fPreferredDownload
= false;
277 fPreferHeaders
= false;
281 /** Map maintaining per-node state. Requires cs_main. */
282 map
<NodeId
, CNodeState
> mapNodeState
;
285 CNodeState
*State(NodeId pnode
) {
286 map
<NodeId
, CNodeState
>::iterator it
= mapNodeState
.find(pnode
);
287 if (it
== mapNodeState
.end())
295 return chainActive
.Height();
298 void UpdatePreferredDownload(CNode
* node
, CNodeState
* state
)
300 nPreferredDownload
-= state
->fPreferredDownload
;
302 // Whether this node should be marked as a preferred download node.
303 state
->fPreferredDownload
= (!node
->fInbound
|| node
->fWhitelisted
) && !node
->fOneShot
&& !node
->fClient
;
305 nPreferredDownload
+= state
->fPreferredDownload
;
308 // Returns time at which to timeout block request (nTime in microseconds)
309 int64_t GetBlockTimeout(int64_t nTime
, int nValidatedQueuedBefore
, const Consensus::Params
&consensusParams
)
311 return nTime
+ 500000 * consensusParams
.nPowTargetSpacing
* (4 + nValidatedQueuedBefore
);
314 void InitializeNode(NodeId nodeid
, const CNode
*pnode
) {
316 CNodeState
&state
= mapNodeState
.insert(std::make_pair(nodeid
, CNodeState())).first
->second
;
317 state
.name
= pnode
->addrName
;
318 state
.address
= pnode
->addr
;
321 void FinalizeNode(NodeId nodeid
) {
323 CNodeState
*state
= State(nodeid
);
325 if (state
->fSyncStarted
)
328 if (state
->nMisbehavior
== 0 && state
->fCurrentlyConnected
) {
329 AddressCurrentlyConnected(state
->address
);
332 BOOST_FOREACH(const QueuedBlock
& entry
, state
->vBlocksInFlight
)
333 mapBlocksInFlight
.erase(entry
.hash
);
334 EraseOrphansFor(nodeid
);
335 nPreferredDownload
-= state
->fPreferredDownload
;
337 mapNodeState
.erase(nodeid
);
341 // Returns a bool indicating whether we requested this block.
342 bool MarkBlockAsReceived(const uint256
& hash
) {
343 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator itInFlight
= mapBlocksInFlight
.find(hash
);
344 if (itInFlight
!= mapBlocksInFlight
.end()) {
345 CNodeState
*state
= State(itInFlight
->second
.first
);
346 nQueuedValidatedHeaders
-= itInFlight
->second
.second
->fValidatedHeaders
;
347 state
->nBlocksInFlightValidHeaders
-= itInFlight
->second
.second
->fValidatedHeaders
;
348 state
->vBlocksInFlight
.erase(itInFlight
->second
.second
);
349 state
->nBlocksInFlight
--;
350 state
->nStallingSince
= 0;
351 mapBlocksInFlight
.erase(itInFlight
);
358 void MarkBlockAsInFlight(NodeId nodeid
, const uint256
& hash
, const Consensus::Params
& consensusParams
, CBlockIndex
*pindex
= NULL
) {
359 CNodeState
*state
= State(nodeid
);
360 assert(state
!= NULL
);
362 // Make sure it's not listed somewhere already.
363 MarkBlockAsReceived(hash
);
365 int64_t nNow
= GetTimeMicros();
366 QueuedBlock newentry
= {hash
, pindex
, nNow
, pindex
!= NULL
, GetBlockTimeout(nNow
, nQueuedValidatedHeaders
, consensusParams
)};
367 nQueuedValidatedHeaders
+= newentry
.fValidatedHeaders
;
368 list
<QueuedBlock
>::iterator it
= state
->vBlocksInFlight
.insert(state
->vBlocksInFlight
.end(), newentry
);
369 state
->nBlocksInFlight
++;
370 state
->nBlocksInFlightValidHeaders
+= newentry
.fValidatedHeaders
;
371 mapBlocksInFlight
[hash
] = std::make_pair(nodeid
, it
);
374 /** Check whether the last unknown block a peer advertized is not yet known. */
375 void ProcessBlockAvailability(NodeId nodeid
) {
376 CNodeState
*state
= State(nodeid
);
377 assert(state
!= NULL
);
379 if (!state
->hashLastUnknownBlock
.IsNull()) {
380 BlockMap::iterator itOld
= mapBlockIndex
.find(state
->hashLastUnknownBlock
);
381 if (itOld
!= mapBlockIndex
.end() && itOld
->second
->nChainWork
> 0) {
382 if (state
->pindexBestKnownBlock
== NULL
|| itOld
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
383 state
->pindexBestKnownBlock
= itOld
->second
;
384 state
->hashLastUnknownBlock
.SetNull();
389 /** Update tracking information about which blocks a peer is assumed to have. */
390 void UpdateBlockAvailability(NodeId nodeid
, const uint256
&hash
) {
391 CNodeState
*state
= State(nodeid
);
392 assert(state
!= NULL
);
394 ProcessBlockAvailability(nodeid
);
396 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
397 if (it
!= mapBlockIndex
.end() && it
->second
->nChainWork
> 0) {
398 // An actually better block was announced.
399 if (state
->pindexBestKnownBlock
== NULL
|| it
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
400 state
->pindexBestKnownBlock
= it
->second
;
402 // An unknown block was announced; just assume that the latest one is the best one.
403 state
->hashLastUnknownBlock
= hash
;
408 bool CanDirectFetch(const Consensus::Params
&consensusParams
)
410 return chainActive
.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams
.nPowTargetSpacing
* 20;
414 bool PeerHasHeader(CNodeState
*state
, CBlockIndex
*pindex
)
416 if (state
->pindexBestKnownBlock
&& pindex
== state
->pindexBestKnownBlock
->GetAncestor(pindex
->nHeight
))
418 if (state
->pindexBestHeaderSent
&& pindex
== state
->pindexBestHeaderSent
->GetAncestor(pindex
->nHeight
))
423 /** Find the last common ancestor two blocks have.
424 * Both pa and pb must be non-NULL. */
425 CBlockIndex
* LastCommonAncestor(CBlockIndex
* pa
, CBlockIndex
* pb
) {
426 if (pa
->nHeight
> pb
->nHeight
) {
427 pa
= pa
->GetAncestor(pb
->nHeight
);
428 } else if (pb
->nHeight
> pa
->nHeight
) {
429 pb
= pb
->GetAncestor(pa
->nHeight
);
432 while (pa
!= pb
&& pa
&& pb
) {
437 // Eventually all chain branches meet at the genesis block.
442 /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
443 * at most count entries. */
444 void FindNextBlocksToDownload(NodeId nodeid
, unsigned int count
, std::vector
<CBlockIndex
*>& vBlocks
, NodeId
& nodeStaller
) {
448 vBlocks
.reserve(vBlocks
.size() + count
);
449 CNodeState
*state
= State(nodeid
);
450 assert(state
!= NULL
);
452 // Make sure pindexBestKnownBlock is up to date, we'll need it.
453 ProcessBlockAvailability(nodeid
);
455 if (state
->pindexBestKnownBlock
== NULL
|| state
->pindexBestKnownBlock
->nChainWork
< chainActive
.Tip()->nChainWork
) {
456 // This peer has nothing interesting.
460 if (state
->pindexLastCommonBlock
== NULL
) {
461 // Bootstrap quickly by guessing a parent of our best tip is the forking point.
462 // Guessing wrong in either direction is not a problem.
463 state
->pindexLastCommonBlock
= chainActive
[std::min(state
->pindexBestKnownBlock
->nHeight
, chainActive
.Height())];
466 // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
467 // of its current tip anymore. Go back enough to fix that.
468 state
->pindexLastCommonBlock
= LastCommonAncestor(state
->pindexLastCommonBlock
, state
->pindexBestKnownBlock
);
469 if (state
->pindexLastCommonBlock
== state
->pindexBestKnownBlock
)
472 std::vector
<CBlockIndex
*> vToFetch
;
473 CBlockIndex
*pindexWalk
= state
->pindexLastCommonBlock
;
474 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
475 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
476 // download that next block if the window were 1 larger.
477 int nWindowEnd
= state
->pindexLastCommonBlock
->nHeight
+ BLOCK_DOWNLOAD_WINDOW
;
478 int nMaxHeight
= std::min
<int>(state
->pindexBestKnownBlock
->nHeight
, nWindowEnd
+ 1);
479 NodeId waitingfor
= -1;
480 while (pindexWalk
->nHeight
< nMaxHeight
) {
481 // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
482 // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
483 // as iterating over ~100 CBlockIndex* entries anyway.
484 int nToFetch
= std::min(nMaxHeight
- pindexWalk
->nHeight
, std::max
<int>(count
- vBlocks
.size(), 128));
485 vToFetch
.resize(nToFetch
);
486 pindexWalk
= state
->pindexBestKnownBlock
->GetAncestor(pindexWalk
->nHeight
+ nToFetch
);
487 vToFetch
[nToFetch
- 1] = pindexWalk
;
488 for (unsigned int i
= nToFetch
- 1; i
> 0; i
--) {
489 vToFetch
[i
- 1] = vToFetch
[i
]->pprev
;
492 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
493 // are not yet downloaded and not in flight to vBlocks. In the mean time, update
494 // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
495 // already part of our chain (and therefore don't need it even if pruned).
496 BOOST_FOREACH(CBlockIndex
* pindex
, vToFetch
) {
497 if (!pindex
->IsValid(BLOCK_VALID_TREE
)) {
498 // We consider the chain that this peer is on invalid.
501 if (pindex
->nStatus
& BLOCK_HAVE_DATA
|| chainActive
.Contains(pindex
)) {
502 if (pindex
->nChainTx
)
503 state
->pindexLastCommonBlock
= pindex
;
504 } else if (mapBlocksInFlight
.count(pindex
->GetBlockHash()) == 0) {
505 // The block is not already downloaded, and not yet in flight.
506 if (pindex
->nHeight
> nWindowEnd
) {
507 // We reached the end of the window.
508 if (vBlocks
.size() == 0 && waitingfor
!= nodeid
) {
509 // We aren't able to fetch anything, but we would be if the download window was one larger.
510 nodeStaller
= waitingfor
;
514 vBlocks
.push_back(pindex
);
515 if (vBlocks
.size() == count
) {
518 } else if (waitingfor
== -1) {
519 // This is the first already-in-flight block.
520 waitingfor
= mapBlocksInFlight
[pindex
->GetBlockHash()].first
;
528 bool GetNodeStateStats(NodeId nodeid
, CNodeStateStats
&stats
) {
530 CNodeState
*state
= State(nodeid
);
533 stats
.nMisbehavior
= state
->nMisbehavior
;
534 stats
.nSyncHeight
= state
->pindexBestKnownBlock
? state
->pindexBestKnownBlock
->nHeight
: -1;
535 stats
.nCommonHeight
= state
->pindexLastCommonBlock
? state
->pindexLastCommonBlock
->nHeight
: -1;
536 BOOST_FOREACH(const QueuedBlock
& queue
, state
->vBlocksInFlight
) {
538 stats
.vHeightInFlight
.push_back(queue
.pindex
->nHeight
);
543 void RegisterNodeSignals(CNodeSignals
& nodeSignals
)
545 nodeSignals
.GetHeight
.connect(&GetHeight
);
546 nodeSignals
.ProcessMessages
.connect(&ProcessMessages
);
547 nodeSignals
.SendMessages
.connect(&SendMessages
);
548 nodeSignals
.InitializeNode
.connect(&InitializeNode
);
549 nodeSignals
.FinalizeNode
.connect(&FinalizeNode
);
552 void UnregisterNodeSignals(CNodeSignals
& nodeSignals
)
554 nodeSignals
.GetHeight
.disconnect(&GetHeight
);
555 nodeSignals
.ProcessMessages
.disconnect(&ProcessMessages
);
556 nodeSignals
.SendMessages
.disconnect(&SendMessages
);
557 nodeSignals
.InitializeNode
.disconnect(&InitializeNode
);
558 nodeSignals
.FinalizeNode
.disconnect(&FinalizeNode
);
561 CBlockIndex
* FindForkInGlobalIndex(const CChain
& chain
, const CBlockLocator
& locator
)
563 // Find the first block the caller has in the main chain
564 BOOST_FOREACH(const uint256
& hash
, locator
.vHave
) {
565 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
566 if (mi
!= mapBlockIndex
.end())
568 CBlockIndex
* pindex
= (*mi
).second
;
569 if (chain
.Contains(pindex
))
573 return chain
.Genesis();
576 CCoinsViewCache
*pcoinsTip
= NULL
;
577 CBlockTreeDB
*pblocktree
= NULL
;
579 //////////////////////////////////////////////////////////////////////////////
581 // mapOrphanTransactions
584 bool AddOrphanTx(const CTransaction
& tx
, NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
586 uint256 hash
= tx
.GetHash();
587 if (mapOrphanTransactions
.count(hash
))
590 // Ignore big transactions, to avoid a
591 // send-big-orphans memory exhaustion attack. If a peer has a legitimate
592 // large transaction with a missing parent then we assume
593 // it will rebroadcast it later, after the parent transaction(s)
594 // have been mined or received.
595 // 10,000 orphans, each of which is at most 5,000 bytes big is
596 // at most 500 megabytes of orphans:
597 unsigned int sz
= tx
.GetSerializeSize(SER_NETWORK
, CTransaction::CURRENT_VERSION
);
600 LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz
, hash
.ToString());
604 mapOrphanTransactions
[hash
].tx
= tx
;
605 mapOrphanTransactions
[hash
].fromPeer
= peer
;
606 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
607 mapOrphanTransactionsByPrev
[txin
.prevout
.hash
].insert(hash
);
609 LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash
.ToString(),
610 mapOrphanTransactions
.size(), mapOrphanTransactionsByPrev
.size());
614 void static EraseOrphanTx(uint256 hash
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
616 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.find(hash
);
617 if (it
== mapOrphanTransactions
.end())
619 BOOST_FOREACH(const CTxIn
& txin
, it
->second
.tx
.vin
)
621 map
<uint256
, set
<uint256
> >::iterator itPrev
= mapOrphanTransactionsByPrev
.find(txin
.prevout
.hash
);
622 if (itPrev
== mapOrphanTransactionsByPrev
.end())
624 itPrev
->second
.erase(hash
);
625 if (itPrev
->second
.empty())
626 mapOrphanTransactionsByPrev
.erase(itPrev
);
628 mapOrphanTransactions
.erase(it
);
631 void EraseOrphansFor(NodeId peer
)
634 map
<uint256
, COrphanTx
>::iterator iter
= mapOrphanTransactions
.begin();
635 while (iter
!= mapOrphanTransactions
.end())
637 map
<uint256
, COrphanTx
>::iterator maybeErase
= iter
++; // increment to avoid iterator becoming invalid
638 if (maybeErase
->second
.fromPeer
== peer
)
640 EraseOrphanTx(maybeErase
->second
.tx
.GetHash());
644 if (nErased
> 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased
, peer
);
648 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
650 unsigned int nEvicted
= 0;
651 while (mapOrphanTransactions
.size() > nMaxOrphans
)
653 // Evict a random orphan:
654 uint256 randomhash
= GetRandHash();
655 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.lower_bound(randomhash
);
656 if (it
== mapOrphanTransactions
.end())
657 it
= mapOrphanTransactions
.begin();
658 EraseOrphanTx(it
->first
);
664 bool IsFinalTx(const CTransaction
&tx
, int nBlockHeight
, int64_t nBlockTime
)
666 if (tx
.nLockTime
== 0)
668 if ((int64_t)tx
.nLockTime
< ((int64_t)tx
.nLockTime
< LOCKTIME_THRESHOLD
? (int64_t)nBlockHeight
: nBlockTime
))
670 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
676 bool CheckFinalTx(const CTransaction
&tx
, int flags
)
678 AssertLockHeld(cs_main
);
680 // By convention a negative value for flags indicates that the
681 // current network-enforced consensus rules should be used. In
682 // a future soft-fork scenario that would mean checking which
683 // rules would be enforced for the next block and setting the
684 // appropriate flags. At the present time no soft-forks are
685 // scheduled, so no flags are set.
686 flags
= std::max(flags
, 0);
688 // CheckFinalTx() uses chainActive.Height()+1 to evaluate
689 // nLockTime because when IsFinalTx() is called within
690 // CBlock::AcceptBlock(), the height of the block *being*
691 // evaluated is what is used. Thus if we want to know if a
692 // transaction can be part of the *next* block, we need to call
693 // IsFinalTx() with one more than chainActive.Height().
694 const int nBlockHeight
= chainActive
.Height() + 1;
696 // BIP113 will require that time-locked transactions have nLockTime set to
697 // less than the median time of the previous block they're contained in.
698 // When the next block is created its previous block will be the current
699 // chain tip, so we use that to calculate the median time passed to
700 // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
701 const int64_t nBlockTime
= (flags
& LOCKTIME_MEDIAN_TIME_PAST
)
702 ? chainActive
.Tip()->GetMedianTimePast()
705 return IsFinalTx(tx
, nBlockHeight
, nBlockTime
);
708 unsigned int GetLegacySigOpCount(const CTransaction
& tx
)
710 unsigned int nSigOps
= 0;
711 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
713 nSigOps
+= txin
.scriptSig
.GetSigOpCount(false);
715 BOOST_FOREACH(const CTxOut
& txout
, tx
.vout
)
717 nSigOps
+= txout
.scriptPubKey
.GetSigOpCount(false);
722 unsigned int GetP2SHSigOpCount(const CTransaction
& tx
, const CCoinsViewCache
& inputs
)
727 unsigned int nSigOps
= 0;
728 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
730 const CTxOut
&prevout
= inputs
.GetOutputFor(tx
.vin
[i
]);
731 if (prevout
.scriptPubKey
.IsPayToScriptHash())
732 nSigOps
+= prevout
.scriptPubKey
.GetSigOpCount(tx
.vin
[i
].scriptSig
);
744 bool CheckTransaction(const CTransaction
& tx
, CValidationState
&state
)
746 // Basic checks that don't depend on any context
748 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vin-empty");
750 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vout-empty");
752 if (::GetSerializeSize(tx
, SER_NETWORK
, PROTOCOL_VERSION
) > MAX_BLOCK_SIZE
)
753 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-oversize");
755 // Check for negative or overflow output values
756 CAmount nValueOut
= 0;
757 BOOST_FOREACH(const CTxOut
& txout
, tx
.vout
)
759 if (txout
.nValue
< 0)
760 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-negative");
761 if (txout
.nValue
> MAX_MONEY
)
762 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-toolarge");
763 nValueOut
+= txout
.nValue
;
764 if (!MoneyRange(nValueOut
))
765 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-txouttotal-toolarge");
768 // Check for duplicate inputs
769 set
<COutPoint
> vInOutPoints
;
770 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
772 if (vInOutPoints
.count(txin
.prevout
))
773 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputs-duplicate");
774 vInOutPoints
.insert(txin
.prevout
);
779 if (tx
.vin
[0].scriptSig
.size() < 2 || tx
.vin
[0].scriptSig
.size() > 100)
780 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-length");
784 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
785 if (txin
.prevout
.IsNull())
786 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-prevout-null");
792 void LimitMempoolSize(CTxMemPool
& pool
, size_t limit
, unsigned long age
) {
793 int expired
= pool
.Expire(GetTime() - age
);
795 LogPrint("mempool", "Expired %i transactions from the memory pool\n", expired
);
797 std::vector
<uint256
> vNoSpendsRemaining
;
798 pool
.TrimToSize(limit
, &vNoSpendsRemaining
);
799 BOOST_FOREACH(const uint256
& removed
, vNoSpendsRemaining
)
800 pcoinsTip
->Uncache(removed
);
803 CAmount
GetMinRelayFee(const CTransaction
& tx
, const CTxMemPool
& pool
, unsigned int nBytes
, bool fAllowFree
)
805 uint256 hash
= tx
.GetHash();
806 double dPriorityDelta
= 0;
807 CAmount nFeeDelta
= 0;
808 pool
.ApplyDeltas(hash
, dPriorityDelta
, nFeeDelta
);
809 if (dPriorityDelta
> 0 || nFeeDelta
> 0)
812 CAmount nMinFee
= ::minRelayTxFee
.GetFee(nBytes
);
816 // There is a free transaction area in blocks created by most miners,
817 // * If we are relaying we allow transactions up to DEFAULT_BLOCK_PRIORITY_SIZE - 1000
818 // to be considered to fall into this category. We don't want to encourage sending
819 // multiple transactions instead of one big transaction to avoid fees.
820 if (nBytes
< (DEFAULT_BLOCK_PRIORITY_SIZE
- 1000))
824 if (!MoneyRange(nMinFee
))
829 /** Convert CValidationState to a human-readable message for logging */
830 std::string
FormatStateMessage(const CValidationState
&state
)
832 return strprintf("%s%s (code %i)",
833 state
.GetRejectReason(),
834 state
.GetDebugMessage().empty() ? "" : ", "+state
.GetDebugMessage(),
835 state
.GetRejectCode());
838 bool AcceptToMemoryPoolWorker(CTxMemPool
& pool
, CValidationState
&state
, const CTransaction
&tx
, bool fLimitFree
,
839 bool* pfMissingInputs
, bool fOverrideMempoolLimit
, bool fRejectAbsurdFee
,
840 std::vector
<uint256
>& vHashTxnToUncache
)
842 AssertLockHeld(cs_main
);
844 *pfMissingInputs
= false;
846 if (!CheckTransaction(tx
, state
))
849 // Coinbase is only valid in a block, not as a loose transaction
851 return state
.DoS(100, false, REJECT_INVALID
, "coinbase");
853 // Rather not work on nonstandard transactions (unless -testnet/-regtest)
855 if (fRequireStandard
&& !IsStandardTx(tx
, reason
))
856 return state
.DoS(0, false, REJECT_NONSTANDARD
, reason
);
858 // Only accept nLockTime-using transactions that can be mined in the next
859 // block; we don't want our mempool filled up with transactions that can't
861 if (!CheckFinalTx(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
))
862 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-final");
864 // is it already in the memory pool?
865 uint256 hash
= tx
.GetHash();
866 if (pool
.exists(hash
))
867 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-in-mempool");
869 // Check for conflicts with in-memory transactions
870 set
<uint256
> setConflicts
;
872 LOCK(pool
.cs
); // protect pool.mapNextTx
873 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
)
875 if (pool
.mapNextTx
.count(txin
.prevout
))
877 const CTransaction
*ptxConflicting
= pool
.mapNextTx
[txin
.prevout
].ptx
;
878 if (!setConflicts
.count(ptxConflicting
->GetHash()))
880 // Allow opt-out of transaction replacement by setting
881 // nSequence >= maxint-1 on all inputs.
883 // maxint-1 is picked to still allow use of nLockTime by
884 // non-replacable transactions. All inputs rather than just one
885 // is for the sake of multi-party protocols, where we don't
886 // want a single party to be able to disable replacement.
888 // The opt-out ignores descendants as anyone relying on
889 // first-seen mempool behavior should be checking all
890 // unconfirmed ancestors anyway; doing otherwise is hopelessly
892 bool fReplacementOptOut
= true;
893 BOOST_FOREACH(const CTxIn
&txin
, ptxConflicting
->vin
)
895 if (txin
.nSequence
< std::numeric_limits
<unsigned int>::max()-1)
897 fReplacementOptOut
= false;
901 if (fReplacementOptOut
)
902 return state
.Invalid(false, REJECT_CONFLICT
, "txn-mempool-conflict");
904 setConflicts
.insert(ptxConflicting
->GetHash());
912 CCoinsViewCache
view(&dummy
);
914 CAmount nValueIn
= 0;
917 CCoinsViewMemPool
viewMemPool(pcoinsTip
, pool
);
918 view
.SetBackend(viewMemPool
);
920 // do we already have it?
921 bool fHadTxInCache
= pcoinsTip
->HaveCoinsInCache(hash
);
922 if (view
.HaveCoins(hash
)) {
924 vHashTxnToUncache
.push_back(hash
);
925 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-known");
928 // do all inputs exist?
929 // Note that this does not check for the presence of actual outputs (see the next check for that),
930 // and only helps with filling in pfMissingInputs (to determine missing vs spent).
931 BOOST_FOREACH(const CTxIn txin
, tx
.vin
) {
932 if (!pcoinsTip
->HaveCoinsInCache(txin
.prevout
.hash
))
933 vHashTxnToUncache
.push_back(txin
.prevout
.hash
);
934 if (!view
.HaveCoins(txin
.prevout
.hash
)) {
936 *pfMissingInputs
= true;
937 return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
941 // are the actual inputs available?
942 if (!view
.HaveInputs(tx
))
943 return state
.Invalid(false, REJECT_DUPLICATE
, "bad-txns-inputs-spent");
945 // Bring the best block into scope
948 nValueIn
= view
.GetValueIn(tx
);
950 // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
951 view
.SetBackend(dummy
);
954 // Check for non-standard pay-to-script-hash in inputs
955 if (fRequireStandard
&& !AreInputsStandard(tx
, view
))
956 return state
.Invalid(false, REJECT_NONSTANDARD
, "bad-txns-nonstandard-inputs");
958 // Check that the transaction doesn't have an excessive number of
959 // sigops, making it impossible to mine. Since the coinbase transaction
960 // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
961 // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
962 // merely non-standard transaction.
963 unsigned int nSigOps
= GetLegacySigOpCount(tx
);
964 nSigOps
+= GetP2SHSigOpCount(tx
, view
);
965 if (nSigOps
> MAX_STANDARD_TX_SIGOPS
)
966 return state
.DoS(0, false, REJECT_NONSTANDARD
, "bad-txns-too-many-sigops", false,
967 strprintf("%d > %d", nSigOps
, MAX_STANDARD_TX_SIGOPS
));
969 CAmount nValueOut
= tx
.GetValueOut();
970 CAmount nFees
= nValueIn
-nValueOut
;
971 CAmount inChainInputValue
;
972 double dPriority
= view
.GetPriority(tx
, chainActive
.Height(), inChainInputValue
);
974 // Keep track of transactions that spend a coinbase, which we re-scan
975 // during reorgs to ensure COINBASE_MATURITY is still met.
976 bool fSpendsCoinbase
= false;
977 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
978 const CCoins
*coins
= view
.AccessCoins(txin
.prevout
.hash
);
979 if (coins
->IsCoinBase()) {
980 fSpendsCoinbase
= true;
985 CTxMemPoolEntry
entry(tx
, nFees
, GetTime(), dPriority
, chainActive
.Height(), pool
.HasNoInputsOf(tx
), inChainInputValue
, fSpendsCoinbase
, nSigOps
);
986 unsigned int nSize
= entry
.GetTxSize();
988 // Don't accept it if it can't get into a block
989 CAmount txMinFee
= GetMinRelayFee(tx
, pool
, nSize
, true);
990 if (fLimitFree
&& nFees
< txMinFee
)
991 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
992 strprintf("%d < %d", nFees
, txMinFee
));
994 CAmount mempoolRejectFee
= pool
.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFee(nSize
);
995 if (mempoolRejectFee
> 0 && nFees
< mempoolRejectFee
) {
996 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool min fee not met", false, strprintf("%d < %d", nFees
, mempoolRejectFee
));
997 } else if (GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY
) && nFees
< ::minRelayTxFee
.GetFee(nSize
) && !AllowFree(entry
.GetPriority(chainActive
.Height() + 1))) {
998 // Require that free transactions have sufficient priority to be mined in the next block.
999 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "insufficient priority");
1002 // Continuously rate-limit free (really, very-low-fee) transactions
1003 // This mitigates 'penny-flooding' -- sending thousands of free transactions just to
1004 // be annoying or make others' transactions take longer to confirm.
1005 if (fLimitFree
&& nFees
< ::minRelayTxFee
.GetFee(nSize
))
1007 static CCriticalSection csFreeLimiter
;
1008 static double dFreeCount
;
1009 static int64_t nLastTime
;
1010 int64_t nNow
= GetTime();
1012 LOCK(csFreeLimiter
);
1014 // Use an exponentially decaying ~10-minute window:
1015 dFreeCount
*= pow(1.0 - 1.0/600.0, (double)(nNow
- nLastTime
));
1017 // -limitfreerelay unit is thousand-bytes-per-minute
1018 // At default rate it would take over a month to fill 1GB
1019 if (dFreeCount
>= GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY
) * 10 * 1000)
1020 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "rate limited free transaction");
1021 LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount
, dFreeCount
+nSize
);
1022 dFreeCount
+= nSize
;
1025 if (fRejectAbsurdFee
&& nFees
> ::minRelayTxFee
.GetFee(nSize
) * 10000)
1026 return state
.Invalid(false,
1027 REJECT_HIGHFEE
, "absurdly-high-fee",
1028 strprintf("%d > %d", nFees
, ::minRelayTxFee
.GetFee(nSize
) * 10000));
1030 // Calculate in-mempool ancestors, up to a limit.
1031 CTxMemPool::setEntries setAncestors
;
1032 size_t nLimitAncestors
= GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT
);
1033 size_t nLimitAncestorSize
= GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT
)*1000;
1034 size_t nLimitDescendants
= GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT
);
1035 size_t nLimitDescendantSize
= GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT
)*1000;
1036 std::string errString
;
1037 if (!pool
.CalculateMemPoolAncestors(entry
, setAncestors
, nLimitAncestors
, nLimitAncestorSize
, nLimitDescendants
, nLimitDescendantSize
, errString
)) {
1038 return state
.DoS(0, false, REJECT_NONSTANDARD
, "too-long-mempool-chain", false, errString
);
1041 // A transaction that spends outputs that would be replaced by it is invalid. Now
1042 // that we have the set of all ancestors we can detect this
1043 // pathological case by making sure setConflicts and setAncestors don't
1045 BOOST_FOREACH(CTxMemPool::txiter ancestorIt
, setAncestors
)
1047 const uint256
&hashAncestor
= ancestorIt
->GetTx().GetHash();
1048 if (setConflicts
.count(hashAncestor
))
1050 return state
.DoS(10, error("AcceptToMemoryPool: %s spends conflicting transaction %s",
1052 hashAncestor
.ToString()),
1053 REJECT_INVALID
, "bad-txns-spends-conflicting-tx");
1057 // Check if it's economically rational to mine this transaction rather
1058 // than the ones it replaces.
1059 CAmount nConflictingFees
= 0;
1060 size_t nConflictingSize
= 0;
1061 uint64_t nConflictingCount
= 0;
1062 CTxMemPool::setEntries allConflicting
;
1064 // If we don't hold the lock allConflicting might be incomplete; the
1065 // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
1066 // mempool consistency for us.
1068 if (setConflicts
.size())
1070 CFeeRate
newFeeRate(nFees
, nSize
);
1071 set
<uint256
> setConflictsParents
;
1072 const int maxDescendantsToVisit
= 100;
1073 CTxMemPool::setEntries setIterConflicting
;
1074 BOOST_FOREACH(const uint256
&hashConflicting
, setConflicts
)
1076 CTxMemPool::txiter mi
= pool
.mapTx
.find(hashConflicting
);
1077 if (mi
== pool
.mapTx
.end())
1080 // Save these to avoid repeated lookups
1081 setIterConflicting
.insert(mi
);
1083 // If this entry is "dirty", then we don't have descendant
1084 // state for this transaction, which means we probably have
1085 // lots of in-mempool descendants.
1086 // Don't allow replacements of dirty transactions, to ensure
1087 // that we don't spend too much time walking descendants.
1088 // This should be rare.
1089 if (mi
->IsDirty()) {
1091 error("AcceptToMemoryPool: rejecting replacement %s; cannot replace tx %s with untracked descendants",
1093 mi
->GetTx().GetHash().ToString()),
1094 REJECT_NONSTANDARD
, "too many potential replacements");
1097 // Don't allow the replacement to reduce the feerate of the
1100 // We usually don't want to accept replacements with lower
1101 // feerates than what they replaced as that would lower the
1102 // feerate of the next block. Requiring that the feerate always
1103 // be increased is also an easy-to-reason about way to prevent
1104 // DoS attacks via replacements.
1106 // The mining code doesn't (currently) take children into
1107 // account (CPFP) so we only consider the feerates of
1108 // transactions being directly replaced, not their indirect
1109 // descendants. While that does mean high feerate children are
1110 // ignored when deciding whether or not to replace, we do
1111 // require the replacement to pay more overall fees too,
1112 // mitigating most cases.
1113 CFeeRate
oldFeeRate(mi
->GetFee(), mi
->GetTxSize());
1114 if (newFeeRate
<= oldFeeRate
)
1117 error("AcceptToMemoryPool: rejecting replacement %s; new feerate %s <= old feerate %s",
1119 newFeeRate
.ToString(),
1120 oldFeeRate
.ToString()),
1121 REJECT_INSUFFICIENTFEE
, "insufficient fee");
1124 BOOST_FOREACH(const CTxIn
&txin
, mi
->GetTx().vin
)
1126 setConflictsParents
.insert(txin
.prevout
.hash
);
1129 nConflictingCount
+= mi
->GetCountWithDescendants();
1131 // This potentially overestimates the number of actual descendants
1132 // but we just want to be conservative to avoid doing too much
1134 if (nConflictingCount
<= maxDescendantsToVisit
) {
1135 // If not too many to replace, then calculate the set of
1136 // transactions that would have to be evicted
1137 BOOST_FOREACH(CTxMemPool::txiter it
, setIterConflicting
) {
1138 pool
.CalculateDescendants(it
, allConflicting
);
1140 BOOST_FOREACH(CTxMemPool::txiter it
, allConflicting
) {
1141 nConflictingFees
+= it
->GetFee();
1142 nConflictingSize
+= it
->GetTxSize();
1146 error("AcceptToMemoryPool: rejecting replacement %s; too many potential replacements (%d > %d)\n",
1149 maxDescendantsToVisit
),
1150 REJECT_NONSTANDARD
, "too many potential replacements");
1153 for (unsigned int j
= 0; j
< tx
.vin
.size(); j
++)
1155 // We don't want to accept replacements that require low
1156 // feerate junk to be mined first. Ideally we'd keep track of
1157 // the ancestor feerates and make the decision based on that,
1158 // but for now requiring all new inputs to be confirmed works.
1159 if (!setConflictsParents
.count(tx
.vin
[j
].prevout
.hash
))
1161 // Rather than check the UTXO set - potentially expensive -
1162 // it's cheaper to just check if the new input refers to a
1163 // tx that's in the mempool.
1164 if (pool
.mapTx
.find(tx
.vin
[j
].prevout
.hash
) != pool
.mapTx
.end())
1165 return state
.DoS(0, error("AcceptToMemoryPool: replacement %s adds unconfirmed input, idx %d",
1166 hash
.ToString(), j
),
1167 REJECT_NONSTANDARD
, "replacement-adds-unconfirmed");
1171 // The replacement must pay greater fees than the transactions it
1172 // replaces - if we did the bandwidth used by those conflicting
1173 // transactions would not be paid for.
1174 if (nFees
< nConflictingFees
)
1176 return state
.DoS(0, error("AcceptToMemoryPool: rejecting replacement %s, less fees than conflicting txs; %s < %s",
1177 hash
.ToString(), FormatMoney(nFees
), FormatMoney(nConflictingFees
)),
1178 REJECT_INSUFFICIENTFEE
, "insufficient fee");
1181 // Finally in addition to paying more fees than the conflicts the
1182 // new transaction must pay for its own bandwidth.
1183 CAmount nDeltaFees
= nFees
- nConflictingFees
;
1184 if (nDeltaFees
< ::minRelayTxFee
.GetFee(nSize
))
1187 error("AcceptToMemoryPool: rejecting replacement %s, not enough additional fees to relay; %s < %s",
1189 FormatMoney(nDeltaFees
),
1190 FormatMoney(::minRelayTxFee
.GetFee(nSize
))),
1191 REJECT_INSUFFICIENTFEE
, "insufficient fee");
1195 // Check against previous transactions
1196 // This is done last to help prevent CPU exhaustion denial-of-service attacks.
1197 if (!CheckInputs(tx
, state
, view
, true, STANDARD_SCRIPT_VERIFY_FLAGS
, true))
1200 // Check again against just the consensus-critical mandatory script
1201 // verification flags, in case of bugs in the standard flags that cause
1202 // transactions to pass as valid when they're actually invalid. For
1203 // instance the STRICTENC flag was incorrectly allowing certain
1204 // CHECKSIG NOT scripts to pass, even though they were invalid.
1206 // There is a similar check in CreateNewBlock() to prevent creating
1207 // invalid blocks, however allowing such transactions into the mempool
1208 // can be exploited as a DoS attack.
1209 if (!CheckInputs(tx
, state
, view
, true, MANDATORY_SCRIPT_VERIFY_FLAGS
, true))
1211 return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s, %s",
1212 __func__
, hash
.ToString(), FormatStateMessage(state
));
1215 // Remove conflicting transactions from the mempool
1216 BOOST_FOREACH(const CTxMemPool::txiter it
, allConflicting
)
1218 LogPrint("mempool", "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
1219 it
->GetTx().GetHash().ToString(),
1221 FormatMoney(nFees
- nConflictingFees
),
1222 (int)nSize
- (int)nConflictingSize
);
1224 pool
.RemoveStaged(allConflicting
);
1226 // Store transaction in memory
1227 pool
.addUnchecked(hash
, entry
, setAncestors
, !IsInitialBlockDownload());
1229 // trim mempool and check if tx was trimmed
1230 if (!fOverrideMempoolLimit
) {
1231 LimitMempoolSize(pool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
1232 if (!pool
.exists(hash
))
1233 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool full");
1237 SyncWithWallets(tx
, NULL
);
1242 bool AcceptToMemoryPool(CTxMemPool
& pool
, CValidationState
&state
, const CTransaction
&tx
, bool fLimitFree
,
1243 bool* pfMissingInputs
, bool fOverrideMempoolLimit
, bool fRejectAbsurdFee
)
1245 std::vector
<uint256
> vHashTxToUncache
;
1246 bool res
= AcceptToMemoryPoolWorker(pool
, state
, tx
, fLimitFree
, pfMissingInputs
, fOverrideMempoolLimit
, fRejectAbsurdFee
, vHashTxToUncache
);
1248 BOOST_FOREACH(const uint256
& hashTx
, vHashTxToUncache
)
1249 pcoinsTip
->Uncache(hashTx
);
1254 /** Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock */
1255 bool GetTransaction(const uint256
&hash
, CTransaction
&txOut
, const Consensus::Params
& consensusParams
, uint256
&hashBlock
, bool fAllowSlow
)
1257 CBlockIndex
*pindexSlow
= NULL
;
1261 if (mempool
.lookup(hash
, txOut
))
1268 if (pblocktree
->ReadTxIndex(hash
, postx
)) {
1269 CAutoFile
file(OpenBlockFile(postx
, true), SER_DISK
, CLIENT_VERSION
);
1271 return error("%s: OpenBlockFile failed", __func__
);
1272 CBlockHeader header
;
1275 fseek(file
.Get(), postx
.nTxOffset
, SEEK_CUR
);
1277 } catch (const std::exception
& e
) {
1278 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
1280 hashBlock
= header
.GetHash();
1281 if (txOut
.GetHash() != hash
)
1282 return error("%s: txid mismatch", __func__
);
1287 if (fAllowSlow
) { // use coin database to locate block that contains transaction, and scan it
1290 CCoinsViewCache
&view
= *pcoinsTip
;
1291 const CCoins
* coins
= view
.AccessCoins(hash
);
1293 nHeight
= coins
->nHeight
;
1296 pindexSlow
= chainActive
[nHeight
];
1301 if (ReadBlockFromDisk(block
, pindexSlow
, consensusParams
)) {
1302 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
1303 if (tx
.GetHash() == hash
) {
1305 hashBlock
= pindexSlow
->GetBlockHash();
1320 //////////////////////////////////////////////////////////////////////////////
1322 // CBlock and CBlockIndex
1325 bool WriteBlockToDisk(const CBlock
& block
, CDiskBlockPos
& pos
, const CMessageHeader::MessageStartChars
& messageStart
)
1327 // Open history file to append
1328 CAutoFile
fileout(OpenBlockFile(pos
), SER_DISK
, CLIENT_VERSION
);
1329 if (fileout
.IsNull())
1330 return error("WriteBlockToDisk: OpenBlockFile failed");
1332 // Write index header
1333 unsigned int nSize
= fileout
.GetSerializeSize(block
);
1334 fileout
<< FLATDATA(messageStart
) << nSize
;
1337 long fileOutPos
= ftell(fileout
.Get());
1339 return error("WriteBlockToDisk: ftell failed");
1340 pos
.nPos
= (unsigned int)fileOutPos
;
1346 bool ReadBlockFromDisk(CBlock
& block
, const CDiskBlockPos
& pos
, const Consensus::Params
& consensusParams
)
1350 // Open history file to read
1351 CAutoFile
filein(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
1352 if (filein
.IsNull())
1353 return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos
.ToString());
1359 catch (const std::exception
& e
) {
1360 return error("%s: Deserialize or I/O error - %s at %s", __func__
, e
.what(), pos
.ToString());
1364 if (!CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
1365 return error("ReadBlockFromDisk: Errors in block header at %s", pos
.ToString());
1370 bool ReadBlockFromDisk(CBlock
& block
, const CBlockIndex
* pindex
, const Consensus::Params
& consensusParams
)
1372 if (!ReadBlockFromDisk(block
, pindex
->GetBlockPos(), consensusParams
))
1374 if (block
.GetHash() != pindex
->GetBlockHash())
1375 return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1376 pindex
->ToString(), pindex
->GetBlockPos().ToString());
1380 CAmount
GetBlockSubsidy(int nHeight
, const Consensus::Params
& consensusParams
)
1382 int halvings
= nHeight
/ consensusParams
.nSubsidyHalvingInterval
;
1383 // Force block reward to zero when right shift is undefined.
1387 CAmount nSubsidy
= 50 * COIN
;
1388 // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1389 nSubsidy
>>= halvings
;
1393 bool IsInitialBlockDownload()
1395 const CChainParams
& chainParams
= Params();
1397 if (fImporting
|| fReindex
)
1399 if (fCheckpointsEnabled
&& chainActive
.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams
.Checkpoints()))
1401 static bool lockIBDState
= false;
1404 bool state
= (chainActive
.Height() < pindexBestHeader
->nHeight
- 24 * 6 ||
1405 pindexBestHeader
->GetBlockTime() < GetTime() - chainParams
.MaxTipAge());
1407 lockIBDState
= true;
1411 bool fLargeWorkForkFound
= false;
1412 bool fLargeWorkInvalidChainFound
= false;
1413 CBlockIndex
*pindexBestForkTip
= NULL
, *pindexBestForkBase
= NULL
;
1415 void CheckForkWarningConditions()
1417 AssertLockHeld(cs_main
);
1418 // Before we get past initial download, we cannot reliably alert about forks
1419 // (we assume we don't get stuck on a fork before the last checkpoint)
1420 if (IsInitialBlockDownload())
1423 // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
1424 // of our head, drop it
1425 if (pindexBestForkTip
&& chainActive
.Height() - pindexBestForkTip
->nHeight
>= 72)
1426 pindexBestForkTip
= NULL
;
1428 if (pindexBestForkTip
|| (pindexBestInvalid
&& pindexBestInvalid
->nChainWork
> chainActive
.Tip()->nChainWork
+ (GetBlockProof(*chainActive
.Tip()) * 6)))
1430 if (!fLargeWorkForkFound
&& pindexBestForkBase
)
1432 std::string warning
= std::string("'Warning: Large-work fork detected, forking after block ") +
1433 pindexBestForkBase
->phashBlock
->ToString() + std::string("'");
1434 CAlert::Notify(warning
, true);
1436 if (pindexBestForkTip
&& pindexBestForkBase
)
1438 LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__
,
1439 pindexBestForkBase
->nHeight
, pindexBestForkBase
->phashBlock
->ToString(),
1440 pindexBestForkTip
->nHeight
, pindexBestForkTip
->phashBlock
->ToString());
1441 fLargeWorkForkFound
= true;
1445 LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__
);
1446 fLargeWorkInvalidChainFound
= true;
1451 fLargeWorkForkFound
= false;
1452 fLargeWorkInvalidChainFound
= false;
1456 void CheckForkWarningConditionsOnNewFork(CBlockIndex
* pindexNewForkTip
)
1458 AssertLockHeld(cs_main
);
1459 // If we are on a fork that is sufficiently large, set a warning flag
1460 CBlockIndex
* pfork
= pindexNewForkTip
;
1461 CBlockIndex
* plonger
= chainActive
.Tip();
1462 while (pfork
&& pfork
!= plonger
)
1464 while (plonger
&& plonger
->nHeight
> pfork
->nHeight
)
1465 plonger
= plonger
->pprev
;
1466 if (pfork
== plonger
)
1468 pfork
= pfork
->pprev
;
1471 // We define a condition where we should warn the user about as a fork of at least 7 blocks
1472 // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
1473 // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
1474 // hash rate operating on the fork.
1475 // or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
1476 // We define it this way because it allows us to only store the highest fork tip (+ base) which meets
1477 // the 7-block condition and from this always have the most-likely-to-cause-warning fork
1478 if (pfork
&& (!pindexBestForkTip
|| (pindexBestForkTip
&& pindexNewForkTip
->nHeight
> pindexBestForkTip
->nHeight
)) &&
1479 pindexNewForkTip
->nChainWork
- pfork
->nChainWork
> (GetBlockProof(*pfork
) * 7) &&
1480 chainActive
.Height() - pindexNewForkTip
->nHeight
< 72)
1482 pindexBestForkTip
= pindexNewForkTip
;
1483 pindexBestForkBase
= pfork
;
1486 CheckForkWarningConditions();
1489 // Requires cs_main.
1490 void Misbehaving(NodeId pnode
, int howmuch
)
1495 CNodeState
*state
= State(pnode
);
1499 state
->nMisbehavior
+= howmuch
;
1500 int banscore
= GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD
);
1501 if (state
->nMisbehavior
>= banscore
&& state
->nMisbehavior
- howmuch
< banscore
)
1503 LogPrintf("%s: %s (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__
, state
->name
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1504 state
->fShouldBan
= true;
1506 LogPrintf("%s: %s (%d -> %d)\n", __func__
, state
->name
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1509 void static InvalidChainFound(CBlockIndex
* pindexNew
)
1511 if (!pindexBestInvalid
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
)
1512 pindexBestInvalid
= pindexNew
;
1514 LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1515 pindexNew
->GetBlockHash().ToString(), pindexNew
->nHeight
,
1516 log(pindexNew
->nChainWork
.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
1517 pindexNew
->GetBlockTime()));
1518 CBlockIndex
*tip
= chainActive
.Tip();
1520 LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1521 tip
->GetBlockHash().ToString(), chainActive
.Height(), log(tip
->nChainWork
.getdouble())/log(2.0),
1522 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip
->GetBlockTime()));
1523 CheckForkWarningConditions();
1526 void static InvalidBlockFound(CBlockIndex
*pindex
, const CValidationState
&state
) {
1528 if (state
.IsInvalid(nDoS
)) {
1529 std::map
<uint256
, NodeId
>::iterator it
= mapBlockSource
.find(pindex
->GetBlockHash());
1530 if (it
!= mapBlockSource
.end() && State(it
->second
)) {
1531 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
1532 CBlockReject reject
= {(unsigned char)state
.GetRejectCode(), state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), pindex
->GetBlockHash()};
1533 State(it
->second
)->rejects
.push_back(reject
);
1535 Misbehaving(it
->second
, nDoS
);
1538 if (!state
.CorruptionPossible()) {
1539 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
1540 setDirtyBlockIndex
.insert(pindex
);
1541 setBlockIndexCandidates
.erase(pindex
);
1542 InvalidChainFound(pindex
);
1546 void UpdateCoins(const CTransaction
& tx
, CValidationState
&state
, CCoinsViewCache
&inputs
, CTxUndo
&txundo
, int nHeight
)
1548 // mark inputs spent
1549 if (!tx
.IsCoinBase()) {
1550 txundo
.vprevout
.reserve(tx
.vin
.size());
1551 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
1552 CCoinsModifier coins
= inputs
.ModifyCoins(txin
.prevout
.hash
);
1553 unsigned nPos
= txin
.prevout
.n
;
1555 if (nPos
>= coins
->vout
.size() || coins
->vout
[nPos
].IsNull())
1557 // mark an outpoint spent, and construct undo information
1558 txundo
.vprevout
.push_back(CTxInUndo(coins
->vout
[nPos
]));
1560 if (coins
->vout
.size() == 0) {
1561 CTxInUndo
& undo
= txundo
.vprevout
.back();
1562 undo
.nHeight
= coins
->nHeight
;
1563 undo
.fCoinBase
= coins
->fCoinBase
;
1564 undo
.nVersion
= coins
->nVersion
;
1568 inputs
.ModifyNewCoins(tx
.GetHash())->FromTx(tx
, nHeight
);
1571 // add outputs for coinbase tx
1572 // In this case call the full ModifyCoins which will do a database
1573 // lookup to be sure the coins do not already exist otherwise we do not
1574 // know whether to mark them fresh or not. We want the duplicate coinbases
1575 // before BIP30 to still be properly overwritten.
1576 inputs
.ModifyCoins(tx
.GetHash())->FromTx(tx
, nHeight
);
1580 void UpdateCoins(const CTransaction
& tx
, CValidationState
&state
, CCoinsViewCache
&inputs
, int nHeight
)
1583 UpdateCoins(tx
, state
, inputs
, txundo
, nHeight
);
1586 bool CScriptCheck::operator()() {
1587 const CScript
&scriptSig
= ptxTo
->vin
[nIn
].scriptSig
;
1588 if (!VerifyScript(scriptSig
, scriptPubKey
, nFlags
, CachingTransactionSignatureChecker(ptxTo
, nIn
, cacheStore
), &error
)) {
1594 int GetSpendHeight(const CCoinsViewCache
& inputs
)
1597 CBlockIndex
* pindexPrev
= mapBlockIndex
.find(inputs
.GetBestBlock())->second
;
1598 return pindexPrev
->nHeight
+ 1;
1601 namespace Consensus
{
1602 bool CheckTxInputs(const CTransaction
& tx
, CValidationState
& state
, const CCoinsViewCache
& inputs
, int nSpendHeight
)
1604 // This doesn't trigger the DoS code on purpose; if it did, it would make it easier
1605 // for an attacker to attempt to split the network.
1606 if (!inputs
.HaveInputs(tx
))
1607 return state
.Invalid(false, 0, "", "Inputs unavailable");
1609 CAmount nValueIn
= 0;
1611 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
1613 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
1614 const CCoins
*coins
= inputs
.AccessCoins(prevout
.hash
);
1617 // If prev is coinbase, check that it's matured
1618 if (coins
->IsCoinBase()) {
1619 if (nSpendHeight
- coins
->nHeight
< COINBASE_MATURITY
)
1620 return state
.Invalid(false,
1621 REJECT_INVALID
, "bad-txns-premature-spend-of-coinbase",
1622 strprintf("tried to spend coinbase at depth %d", nSpendHeight
- coins
->nHeight
));
1625 // Check for negative or overflow input values
1626 nValueIn
+= coins
->vout
[prevout
.n
].nValue
;
1627 if (!MoneyRange(coins
->vout
[prevout
.n
].nValue
) || !MoneyRange(nValueIn
))
1628 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputvalues-outofrange");
1632 if (nValueIn
< tx
.GetValueOut())
1633 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-in-belowout", false,
1634 strprintf("value in (%s) < value out (%s)", FormatMoney(nValueIn
), FormatMoney(tx
.GetValueOut())));
1636 // Tally transaction fees
1637 CAmount nTxFee
= nValueIn
- tx
.GetValueOut();
1639 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-negative");
1641 if (!MoneyRange(nFees
))
1642 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-outofrange");
1645 }// namespace Consensus
1647 bool CheckInputs(const CTransaction
& tx
, CValidationState
&state
, const CCoinsViewCache
&inputs
, bool fScriptChecks
, unsigned int flags
, bool cacheStore
, std::vector
<CScriptCheck
> *pvChecks
)
1649 if (!tx
.IsCoinBase())
1651 if (!Consensus::CheckTxInputs(tx
, state
, inputs
, GetSpendHeight(inputs
)))
1655 pvChecks
->reserve(tx
.vin
.size());
1657 // The first loop above does all the inexpensive checks.
1658 // Only if ALL inputs pass do we perform expensive ECDSA signature checks.
1659 // Helps prevent CPU exhaustion attacks.
1661 // Skip ECDSA signature verification when connecting blocks
1662 // before the last block chain checkpoint. This is safe because block merkle hashes are
1663 // still computed and checked, and any change will be caught at the next checkpoint.
1664 if (fScriptChecks
) {
1665 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++) {
1666 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
1667 const CCoins
* coins
= inputs
.AccessCoins(prevout
.hash
);
1671 CScriptCheck
check(*coins
, tx
, i
, flags
, cacheStore
);
1673 pvChecks
->push_back(CScriptCheck());
1674 check
.swap(pvChecks
->back());
1675 } else if (!check()) {
1676 if (flags
& STANDARD_NOT_MANDATORY_VERIFY_FLAGS
) {
1677 // Check whether the failure was caused by a
1678 // non-mandatory script verification check, such as
1679 // non-standard DER encodings or non-null dummy
1680 // arguments; if so, don't trigger DoS protection to
1681 // avoid splitting the network between upgraded and
1682 // non-upgraded nodes.
1683 CScriptCheck
check(*coins
, tx
, i
,
1684 flags
& ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS
, cacheStore
);
1686 return state
.Invalid(false, REJECT_NONSTANDARD
, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check
.GetScriptError())));
1688 // Failures of other flags indicate a transaction that is
1689 // invalid in new blocks, e.g. a invalid P2SH. We DoS ban
1690 // such nodes as they are not following the protocol. That
1691 // said during an upgrade careful thought should be taken
1692 // as to the correct behavior - we may want to continue
1693 // peering with non-upgraded nodes even after a soft-fork
1694 // super-majority vote has passed.
1695 return state
.DoS(100,false, REJECT_INVALID
, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check
.GetScriptError())));
1706 bool UndoWriteToDisk(const CBlockUndo
& blockundo
, CDiskBlockPos
& pos
, const uint256
& hashBlock
, const CMessageHeader::MessageStartChars
& messageStart
)
1708 // Open history file to append
1709 CAutoFile
fileout(OpenUndoFile(pos
), SER_DISK
, CLIENT_VERSION
);
1710 if (fileout
.IsNull())
1711 return error("%s: OpenUndoFile failed", __func__
);
1713 // Write index header
1714 unsigned int nSize
= fileout
.GetSerializeSize(blockundo
);
1715 fileout
<< FLATDATA(messageStart
) << nSize
;
1718 long fileOutPos
= ftell(fileout
.Get());
1720 return error("%s: ftell failed", __func__
);
1721 pos
.nPos
= (unsigned int)fileOutPos
;
1722 fileout
<< blockundo
;
1724 // calculate & write checksum
1725 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
1726 hasher
<< hashBlock
;
1727 hasher
<< blockundo
;
1728 fileout
<< hasher
.GetHash();
1733 bool UndoReadFromDisk(CBlockUndo
& blockundo
, const CDiskBlockPos
& pos
, const uint256
& hashBlock
)
1735 // Open history file to read
1736 CAutoFile
filein(OpenUndoFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
1737 if (filein
.IsNull())
1738 return error("%s: OpenBlockFile failed", __func__
);
1741 uint256 hashChecksum
;
1743 filein
>> blockundo
;
1744 filein
>> hashChecksum
;
1746 catch (const std::exception
& e
) {
1747 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
1751 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
1752 hasher
<< hashBlock
;
1753 hasher
<< blockundo
;
1754 if (hashChecksum
!= hasher
.GetHash())
1755 return error("%s: Checksum mismatch", __func__
);
1760 /** Abort with a message */
1761 bool AbortNode(const std::string
& strMessage
, const std::string
& userMessage
="")
1763 strMiscWarning
= strMessage
;
1764 LogPrintf("*** %s\n", strMessage
);
1765 uiInterface
.ThreadSafeMessageBox(
1766 userMessage
.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage
,
1767 "", CClientUIInterface::MSG_ERROR
);
1772 bool AbortNode(CValidationState
& state
, const std::string
& strMessage
, const std::string
& userMessage
="")
1774 AbortNode(strMessage
, userMessage
);
1775 return state
.Error(strMessage
);
1781 * Apply the undo operation of a CTxInUndo to the given chain state.
1782 * @param undo The undo object.
1783 * @param view The coins view to which to apply the changes.
1784 * @param out The out point that corresponds to the tx input.
1785 * @return True on success.
1787 static bool ApplyTxInUndo(const CTxInUndo
& undo
, CCoinsViewCache
& view
, const COutPoint
& out
)
1791 CCoinsModifier coins
= view
.ModifyCoins(out
.hash
);
1792 if (undo
.nHeight
!= 0) {
1793 // undo data contains height: this is the last output of the prevout tx being spent
1794 if (!coins
->IsPruned())
1795 fClean
= fClean
&& error("%s: undo data overwriting existing transaction", __func__
);
1797 coins
->fCoinBase
= undo
.fCoinBase
;
1798 coins
->nHeight
= undo
.nHeight
;
1799 coins
->nVersion
= undo
.nVersion
;
1801 if (coins
->IsPruned())
1802 fClean
= fClean
&& error("%s: undo data adding output to missing transaction", __func__
);
1804 if (coins
->IsAvailable(out
.n
))
1805 fClean
= fClean
&& error("%s: undo data overwriting existing output", __func__
);
1806 if (coins
->vout
.size() < out
.n
+1)
1807 coins
->vout
.resize(out
.n
+1);
1808 coins
->vout
[out
.n
] = undo
.txout
;
1813 bool DisconnectBlock(const CBlock
& block
, CValidationState
& state
, const CBlockIndex
* pindex
, CCoinsViewCache
& view
, bool* pfClean
)
1815 assert(pindex
->GetBlockHash() == view
.GetBestBlock());
1822 CBlockUndo blockUndo
;
1823 CDiskBlockPos pos
= pindex
->GetUndoPos();
1825 return error("DisconnectBlock(): no undo data available");
1826 if (!UndoReadFromDisk(blockUndo
, pos
, pindex
->pprev
->GetBlockHash()))
1827 return error("DisconnectBlock(): failure reading undo data");
1829 if (blockUndo
.vtxundo
.size() + 1 != block
.vtx
.size())
1830 return error("DisconnectBlock(): block and undo data inconsistent");
1832 // undo transactions in reverse order
1833 for (int i
= block
.vtx
.size() - 1; i
>= 0; i
--) {
1834 const CTransaction
&tx
= block
.vtx
[i
];
1835 uint256 hash
= tx
.GetHash();
1837 // Check that all outputs are available and match the outputs in the block itself
1840 CCoinsModifier outs
= view
.ModifyCoins(hash
);
1841 outs
->ClearUnspendable();
1843 CCoins
outsBlock(tx
, pindex
->nHeight
);
1844 // The CCoins serialization does not serialize negative numbers.
1845 // No network rules currently depend on the version here, so an inconsistency is harmless
1846 // but it must be corrected before txout nversion ever influences a network rule.
1847 if (outsBlock
.nVersion
< 0)
1848 outs
->nVersion
= outsBlock
.nVersion
;
1849 if (*outs
!= outsBlock
)
1850 fClean
= fClean
&& error("DisconnectBlock(): added transaction mismatch? database corrupted");
1857 if (i
> 0) { // not coinbases
1858 const CTxUndo
&txundo
= blockUndo
.vtxundo
[i
-1];
1859 if (txundo
.vprevout
.size() != tx
.vin
.size())
1860 return error("DisconnectBlock(): transaction and undo data inconsistent");
1861 for (unsigned int j
= tx
.vin
.size(); j
-- > 0;) {
1862 const COutPoint
&out
= tx
.vin
[j
].prevout
;
1863 const CTxInUndo
&undo
= txundo
.vprevout
[j
];
1864 if (!ApplyTxInUndo(undo
, view
, out
))
1870 // move best block pointer to prevout block
1871 view
.SetBestBlock(pindex
->pprev
->GetBlockHash());
1881 void static FlushBlockFile(bool fFinalize
= false)
1883 LOCK(cs_LastBlockFile
);
1885 CDiskBlockPos
posOld(nLastBlockFile
, 0);
1887 FILE *fileOld
= OpenBlockFile(posOld
);
1890 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nSize
);
1891 FileCommit(fileOld
);
1895 fileOld
= OpenUndoFile(posOld
);
1898 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nUndoSize
);
1899 FileCommit(fileOld
);
1904 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
);
1906 static CCheckQueue
<CScriptCheck
> scriptcheckqueue(128);
1908 void ThreadScriptCheck() {
1909 RenameThread("bitcoin-scriptch");
1910 scriptcheckqueue
.Thread();
1914 // Called periodically asynchronously; alerts if it smells like
1915 // we're being fed a bad chain (blocks being generated much
1916 // too slowly or too quickly).
1918 void PartitionCheck(bool (*initialDownloadCheck
)(), CCriticalSection
& cs
, const CBlockIndex
*const &bestHeader
,
1919 int64_t nPowTargetSpacing
)
1921 if (bestHeader
== NULL
|| initialDownloadCheck()) return;
1923 static int64_t lastAlertTime
= 0;
1924 int64_t now
= GetAdjustedTime();
1925 if (lastAlertTime
> now
-60*60*24) return; // Alert at most once per day
1927 const int SPAN_HOURS
=4;
1928 const int SPAN_SECONDS
=SPAN_HOURS
*60*60;
1929 int BLOCKS_EXPECTED
= SPAN_SECONDS
/ nPowTargetSpacing
;
1931 boost::math::poisson_distribution
<double> poisson(BLOCKS_EXPECTED
);
1933 std::string strWarning
;
1934 int64_t startTime
= GetAdjustedTime()-SPAN_SECONDS
;
1937 const CBlockIndex
* i
= bestHeader
;
1939 while (i
->GetBlockTime() >= startTime
) {
1942 if (i
== NULL
) return; // Ran out of chain, we must not be fully sync'ed
1945 // How likely is it to find that many by chance?
1946 double p
= boost::math::pdf(poisson
, nBlocks
);
1948 LogPrint("partitioncheck", "%s: Found %d blocks in the last %d hours\n", __func__
, nBlocks
, SPAN_HOURS
);
1949 LogPrint("partitioncheck", "%s: likelihood: %g\n", __func__
, p
);
1951 // Aim for one false-positive about every fifty years of normal running:
1952 const int FIFTY_YEARS
= 50*365*24*60*60;
1953 double alertThreshold
= 1.0 / (FIFTY_YEARS
/ SPAN_SECONDS
);
1955 if (p
<= alertThreshold
&& nBlocks
< BLOCKS_EXPECTED
)
1957 // Many fewer blocks than expected: alert!
1958 strWarning
= strprintf(_("WARNING: check your network connection, %d blocks received in the last %d hours (%d expected)"),
1959 nBlocks
, SPAN_HOURS
, BLOCKS_EXPECTED
);
1961 else if (p
<= alertThreshold
&& nBlocks
> BLOCKS_EXPECTED
)
1963 // Many more blocks than expected: alert!
1964 strWarning
= strprintf(_("WARNING: abnormally high number of blocks generated, %d blocks received in the last %d hours (%d expected)"),
1965 nBlocks
, SPAN_HOURS
, BLOCKS_EXPECTED
);
1967 if (!strWarning
.empty())
1969 strMiscWarning
= strWarning
;
1970 CAlert::Notify(strWarning
, true);
1971 lastAlertTime
= now
;
1975 static int64_t nTimeCheck
= 0;
1976 static int64_t nTimeForks
= 0;
1977 static int64_t nTimeVerify
= 0;
1978 static int64_t nTimeConnect
= 0;
1979 static int64_t nTimeIndex
= 0;
1980 static int64_t nTimeCallbacks
= 0;
1981 static int64_t nTimeTotal
= 0;
1983 bool ConnectBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* pindex
, CCoinsViewCache
& view
, bool fJustCheck
)
1985 const CChainParams
& chainparams
= Params();
1986 AssertLockHeld(cs_main
);
1988 int64_t nTimeStart
= GetTimeMicros();
1990 // Check it again in case a previous version let a bad block in
1991 if (!CheckBlock(block
, state
, !fJustCheck
, !fJustCheck
))
1994 // verify that the view's current state corresponds to the previous block
1995 uint256 hashPrevBlock
= pindex
->pprev
== NULL
? uint256() : pindex
->pprev
->GetBlockHash();
1996 assert(hashPrevBlock
== view
.GetBestBlock());
1998 // Special case for the genesis block, skipping connection of its transactions
1999 // (its coinbase is unspendable)
2000 if (block
.GetHash() == chainparams
.GetConsensus().hashGenesisBlock
) {
2002 view
.SetBestBlock(pindex
->GetBlockHash());
2006 bool fScriptChecks
= true;
2007 if (fCheckpointsEnabled
) {
2008 CBlockIndex
*pindexLastCheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
2009 if (pindexLastCheckpoint
&& pindexLastCheckpoint
->GetAncestor(pindex
->nHeight
) == pindex
) {
2010 // This block is an ancestor of a checkpoint: disable script checks
2011 fScriptChecks
= false;
2015 int64_t nTime1
= GetTimeMicros(); nTimeCheck
+= nTime1
- nTimeStart
;
2016 LogPrint("bench", " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1
- nTimeStart
), nTimeCheck
* 0.000001);
2018 // Do not allow blocks that contain transactions which 'overwrite' older transactions,
2019 // unless those are already completely spent.
2020 // If such overwrites are allowed, coinbases and transactions depending upon those
2021 // can be duplicated to remove the ability to spend the first instance -- even after
2022 // being sent to another address.
2023 // See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
2024 // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
2025 // already refuses previously-known transaction ids entirely.
2026 // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
2027 // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
2028 // two in the chain that violate it. This prevents exploiting the issue against nodes during their
2029 // initial block download.
2030 bool fEnforceBIP30
= (!pindex
->phashBlock
) || // Enforce on CreateNewBlock invocations which don't have a hash.
2031 !((pindex
->nHeight
==91842 && pindex
->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
2032 (pindex
->nHeight
==91880 && pindex
->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
2034 // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
2035 // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
2036 // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
2037 // before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further
2038 // duplicate transactions descending from the known pairs either.
2039 // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
2040 CBlockIndex
*pindexBIP34height
= pindex
->pprev
->GetAncestor(chainparams
.GetConsensus().BIP34Height
);
2041 //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
2042 fEnforceBIP30
= fEnforceBIP30
&& (!pindexBIP34height
|| !(pindexBIP34height
->GetBlockHash() == chainparams
.GetConsensus().BIP34Hash
));
2044 if (fEnforceBIP30
) {
2045 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
) {
2046 const CCoins
* coins
= view
.AccessCoins(tx
.GetHash());
2047 if (coins
&& !coins
->IsPruned())
2048 return state
.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
2049 REJECT_INVALID
, "bad-txns-BIP30");
2053 // BIP16 didn't become active until Apr 1 2012
2054 int64_t nBIP16SwitchTime
= 1333238400;
2055 bool fStrictPayToScriptHash
= (pindex
->GetBlockTime() >= nBIP16SwitchTime
);
2057 unsigned int flags
= fStrictPayToScriptHash
? SCRIPT_VERIFY_P2SH
: SCRIPT_VERIFY_NONE
;
2059 // Start enforcing the DERSIG (BIP66) rules, for block.nVersion=3 blocks,
2060 // when 75% of the network has upgraded:
2061 if (block
.nVersion
>= 3 && IsSuperMajority(3, pindex
->pprev
, chainparams
.GetConsensus().nMajorityEnforceBlockUpgrade
, chainparams
.GetConsensus())) {
2062 flags
|= SCRIPT_VERIFY_DERSIG
;
2065 // Start enforcing CHECKLOCKTIMEVERIFY, (BIP65) for block.nVersion=4
2066 // blocks, when 75% of the network has upgraded:
2067 if (block
.nVersion
>= 4 && IsSuperMajority(4, pindex
->pprev
, chainparams
.GetConsensus().nMajorityEnforceBlockUpgrade
, chainparams
.GetConsensus())) {
2068 flags
|= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY
;
2071 int64_t nTime2
= GetTimeMicros(); nTimeForks
+= nTime2
- nTime1
;
2072 LogPrint("bench", " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2
- nTime1
), nTimeForks
* 0.000001);
2074 CBlockUndo blockundo
;
2076 CCheckQueueControl
<CScriptCheck
> control(fScriptChecks
&& nScriptCheckThreads
? &scriptcheckqueue
: NULL
);
2080 unsigned int nSigOps
= 0;
2081 CDiskTxPos
pos(pindex
->GetBlockPos(), GetSizeOfCompactSize(block
.vtx
.size()));
2082 std::vector
<std::pair
<uint256
, CDiskTxPos
> > vPos
;
2083 vPos
.reserve(block
.vtx
.size());
2084 blockundo
.vtxundo
.reserve(block
.vtx
.size() - 1);
2085 for (unsigned int i
= 0; i
< block
.vtx
.size(); i
++)
2087 const CTransaction
&tx
= block
.vtx
[i
];
2089 nInputs
+= tx
.vin
.size();
2090 nSigOps
+= GetLegacySigOpCount(tx
);
2091 if (nSigOps
> MAX_BLOCK_SIGOPS
)
2092 return state
.DoS(100, error("ConnectBlock(): too many sigops"),
2093 REJECT_INVALID
, "bad-blk-sigops");
2095 if (!tx
.IsCoinBase())
2097 if (!view
.HaveInputs(tx
))
2098 return state
.DoS(100, error("ConnectBlock(): inputs missing/spent"),
2099 REJECT_INVALID
, "bad-txns-inputs-missingorspent");
2101 if (fStrictPayToScriptHash
)
2103 // Add in sigops done by pay-to-script-hash inputs;
2104 // this is to prevent a "rogue miner" from creating
2105 // an incredibly-expensive-to-validate block.
2106 nSigOps
+= GetP2SHSigOpCount(tx
, view
);
2107 if (nSigOps
> MAX_BLOCK_SIGOPS
)
2108 return state
.DoS(100, error("ConnectBlock(): too many sigops"),
2109 REJECT_INVALID
, "bad-blk-sigops");
2112 nFees
+= view
.GetValueIn(tx
)-tx
.GetValueOut();
2114 std::vector
<CScriptCheck
> vChecks
;
2115 bool fCacheResults
= fJustCheck
; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
2116 if (!CheckInputs(tx
, state
, view
, fScriptChecks
, flags
, fCacheResults
, nScriptCheckThreads
? &vChecks
: NULL
))
2117 return error("ConnectBlock(): CheckInputs on %s failed with %s",
2118 tx
.GetHash().ToString(), FormatStateMessage(state
));
2119 control
.Add(vChecks
);
2124 blockundo
.vtxundo
.push_back(CTxUndo());
2126 UpdateCoins(tx
, state
, view
, i
== 0 ? undoDummy
: blockundo
.vtxundo
.back(), pindex
->nHeight
);
2128 vPos
.push_back(std::make_pair(tx
.GetHash(), pos
));
2129 pos
.nTxOffset
+= ::GetSerializeSize(tx
, SER_DISK
, CLIENT_VERSION
);
2131 int64_t nTime3
= GetTimeMicros(); nTimeConnect
+= nTime3
- nTime2
;
2132 LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block
.vtx
.size(), 0.001 * (nTime3
- nTime2
), 0.001 * (nTime3
- nTime2
) / block
.vtx
.size(), nInputs
<= 1 ? 0 : 0.001 * (nTime3
- nTime2
) / (nInputs
-1), nTimeConnect
* 0.000001);
2134 CAmount blockReward
= nFees
+ GetBlockSubsidy(pindex
->nHeight
, chainparams
.GetConsensus());
2135 if (block
.vtx
[0].GetValueOut() > blockReward
)
2136 return state
.DoS(100,
2137 error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
2138 block
.vtx
[0].GetValueOut(), blockReward
),
2139 REJECT_INVALID
, "bad-cb-amount");
2141 if (!control
.Wait())
2142 return state
.DoS(100, false);
2143 int64_t nTime4
= GetTimeMicros(); nTimeVerify
+= nTime4
- nTime2
;
2144 LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs
- 1, 0.001 * (nTime4
- nTime2
), nInputs
<= 1 ? 0 : 0.001 * (nTime4
- nTime2
) / (nInputs
-1), nTimeVerify
* 0.000001);
2149 // Write undo information to disk
2150 if (pindex
->GetUndoPos().IsNull() || !pindex
->IsValid(BLOCK_VALID_SCRIPTS
))
2152 if (pindex
->GetUndoPos().IsNull()) {
2154 if (!FindUndoPos(state
, pindex
->nFile
, pos
, ::GetSerializeSize(blockundo
, SER_DISK
, CLIENT_VERSION
) + 40))
2155 return error("ConnectBlock(): FindUndoPos failed");
2156 if (!UndoWriteToDisk(blockundo
, pos
, pindex
->pprev
->GetBlockHash(), chainparams
.MessageStart()))
2157 return AbortNode(state
, "Failed to write undo data");
2159 // update nUndoPos in block index
2160 pindex
->nUndoPos
= pos
.nPos
;
2161 pindex
->nStatus
|= BLOCK_HAVE_UNDO
;
2164 pindex
->RaiseValidity(BLOCK_VALID_SCRIPTS
);
2165 setDirtyBlockIndex
.insert(pindex
);
2169 if (!pblocktree
->WriteTxIndex(vPos
))
2170 return AbortNode(state
, "Failed to write transaction index");
2172 // add this block to the view's block chain
2173 view
.SetBestBlock(pindex
->GetBlockHash());
2175 int64_t nTime5
= GetTimeMicros(); nTimeIndex
+= nTime5
- nTime4
;
2176 LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5
- nTime4
), nTimeIndex
* 0.000001);
2178 // Watch for changes to the previous coinbase transaction.
2179 static uint256 hashPrevBestCoinBase
;
2180 GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase
);
2181 hashPrevBestCoinBase
= block
.vtx
[0].GetHash();
2183 int64_t nTime6
= GetTimeMicros(); nTimeCallbacks
+= nTime6
- nTime5
;
2184 LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6
- nTime5
), nTimeCallbacks
* 0.000001);
2189 enum FlushStateMode
{
2191 FLUSH_STATE_IF_NEEDED
,
2192 FLUSH_STATE_PERIODIC
,
2197 * Update the on-disk chain state.
2198 * The caches and indexes are flushed depending on the mode we're called with
2199 * if they're too large, if it's been a while since the last write,
2200 * or always and in all cases if we're in prune mode and are deleting files.
2202 bool static FlushStateToDisk(CValidationState
&state
, FlushStateMode mode
) {
2203 const CChainParams
& chainparams
= Params();
2204 LOCK2(cs_main
, cs_LastBlockFile
);
2205 static int64_t nLastWrite
= 0;
2206 static int64_t nLastFlush
= 0;
2207 static int64_t nLastSetChain
= 0;
2208 std::set
<int> setFilesToPrune
;
2209 bool fFlushForPrune
= false;
2211 if (fPruneMode
&& fCheckForPruning
&& !fReindex
) {
2212 FindFilesToPrune(setFilesToPrune
, chainparams
.PruneAfterHeight());
2213 fCheckForPruning
= false;
2214 if (!setFilesToPrune
.empty()) {
2215 fFlushForPrune
= true;
2217 pblocktree
->WriteFlag("prunedblockfiles", true);
2222 int64_t nNow
= GetTimeMicros();
2223 // Avoid writing/flushing immediately after startup.
2224 if (nLastWrite
== 0) {
2227 if (nLastFlush
== 0) {
2230 if (nLastSetChain
== 0) {
2231 nLastSetChain
= nNow
;
2233 size_t cacheSize
= pcoinsTip
->DynamicMemoryUsage();
2234 // The cache is large and close to the limit, but we have time now (not in the middle of a block processing).
2235 bool fCacheLarge
= mode
== FLUSH_STATE_PERIODIC
&& cacheSize
* (10.0/9) > nCoinCacheUsage
;
2236 // The cache is over the limit, we have to write now.
2237 bool fCacheCritical
= mode
== FLUSH_STATE_IF_NEEDED
&& cacheSize
> nCoinCacheUsage
;
2238 // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2239 bool fPeriodicWrite
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastWrite
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000;
2240 // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2241 bool fPeriodicFlush
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastFlush
+ (int64_t)DATABASE_FLUSH_INTERVAL
* 1000000;
2242 // Combine all conditions that result in a full cache flush.
2243 bool fDoFullFlush
= (mode
== FLUSH_STATE_ALWAYS
) || fCacheLarge
|| fCacheCritical
|| fPeriodicFlush
|| fFlushForPrune
;
2244 // Write blocks and block index to disk.
2245 if (fDoFullFlush
|| fPeriodicWrite
) {
2246 // Depend on nMinDiskSpace to ensure we can write block index
2247 if (!CheckDiskSpace(0))
2248 return state
.Error("out of disk space");
2249 // First make sure all block and undo data is flushed to disk.
2251 // Then update all block file information (which may refer to block and undo files).
2253 std::vector
<std::pair
<int, const CBlockFileInfo
*> > vFiles
;
2254 vFiles
.reserve(setDirtyFileInfo
.size());
2255 for (set
<int>::iterator it
= setDirtyFileInfo
.begin(); it
!= setDirtyFileInfo
.end(); ) {
2256 vFiles
.push_back(make_pair(*it
, &vinfoBlockFile
[*it
]));
2257 setDirtyFileInfo
.erase(it
++);
2259 std::vector
<const CBlockIndex
*> vBlocks
;
2260 vBlocks
.reserve(setDirtyBlockIndex
.size());
2261 for (set
<CBlockIndex
*>::iterator it
= setDirtyBlockIndex
.begin(); it
!= setDirtyBlockIndex
.end(); ) {
2262 vBlocks
.push_back(*it
);
2263 setDirtyBlockIndex
.erase(it
++);
2265 if (!pblocktree
->WriteBatchSync(vFiles
, nLastBlockFile
, vBlocks
)) {
2266 return AbortNode(state
, "Files to write to block index database");
2269 // Finally remove any pruned files
2271 UnlinkPrunedFiles(setFilesToPrune
);
2274 // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2276 // Typical CCoins structures on disk are around 128 bytes in size.
2277 // Pushing a new one to the database can cause it to be written
2278 // twice (once in the log, and once in the tables). This is already
2279 // an overestimation, as most will delete an existing entry or
2280 // overwrite one. Still, use a conservative safety factor of 2.
2281 if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip
->GetCacheSize()))
2282 return state
.Error("out of disk space");
2283 // Flush the chainstate (which may refer to block index entries).
2284 if (!pcoinsTip
->Flush())
2285 return AbortNode(state
, "Failed to write to coin database");
2288 if ((mode
== FLUSH_STATE_ALWAYS
|| mode
== FLUSH_STATE_PERIODIC
) && nNow
> nLastSetChain
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000) {
2289 // Update best block in wallet (so we can detect restored wallets).
2290 GetMainSignals().SetBestChain(chainActive
.GetLocator());
2291 nLastSetChain
= nNow
;
2293 } catch (const std::runtime_error
& e
) {
2294 return AbortNode(state
, std::string("System error while flushing: ") + e
.what());
2299 void FlushStateToDisk() {
2300 CValidationState state
;
2301 FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
2304 void PruneAndFlush() {
2305 CValidationState state
;
2306 fCheckForPruning
= true;
2307 FlushStateToDisk(state
, FLUSH_STATE_NONE
);
2310 /** Update chainActive and related internal data structures. */
2311 void static UpdateTip(CBlockIndex
*pindexNew
) {
2312 const CChainParams
& chainParams
= Params();
2313 chainActive
.SetTip(pindexNew
);
2316 nTimeBestReceived
= GetTime();
2317 mempool
.AddTransactionsUpdated(1);
2319 LogPrintf("%s: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%.1fMiB(%utx)\n", __func__
,
2320 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(), log(chainActive
.Tip()->nChainWork
.getdouble())/log(2.0), (unsigned long)chainActive
.Tip()->nChainTx
,
2321 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
2322 Checkpoints::GuessVerificationProgress(chainParams
.Checkpoints(), chainActive
.Tip()), pcoinsTip
->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip
->GetCacheSize());
2324 cvBlockChange
.notify_all();
2326 // Check the version of the last 100 blocks to see if we need to upgrade:
2327 static bool fWarned
= false;
2328 if (!IsInitialBlockDownload() && !fWarned
)
2331 const CBlockIndex
* pindex
= chainActive
.Tip();
2332 for (int i
= 0; i
< 100 && pindex
!= NULL
; i
++)
2334 if (pindex
->nVersion
> CBlock::CURRENT_VERSION
)
2336 pindex
= pindex
->pprev
;
2339 LogPrintf("%s: %d of last 100 blocks above version %d\n", __func__
, nUpgraded
, (int)CBlock::CURRENT_VERSION
);
2340 if (nUpgraded
> 100/2)
2342 // strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
2343 strMiscWarning
= _("Warning: This version is obsolete; upgrade required!");
2344 CAlert::Notify(strMiscWarning
, true);
2350 /** Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and manually re-limit mempool size after this, with cs_main held. */
2351 bool static DisconnectTip(CValidationState
& state
, const Consensus::Params
& consensusParams
)
2353 CBlockIndex
*pindexDelete
= chainActive
.Tip();
2354 assert(pindexDelete
);
2355 // Read block from disk.
2357 if (!ReadBlockFromDisk(block
, pindexDelete
, consensusParams
))
2358 return AbortNode(state
, "Failed to read block");
2359 // Apply the block atomically to the chain state.
2360 int64_t nStart
= GetTimeMicros();
2362 CCoinsViewCache
view(pcoinsTip
);
2363 if (!DisconnectBlock(block
, state
, pindexDelete
, view
))
2364 return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete
->GetBlockHash().ToString());
2365 assert(view
.Flush());
2367 LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart
) * 0.001);
2368 // Write the chain state to disk, if necessary.
2369 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2371 // Resurrect mempool transactions from the disconnected block.
2372 std::vector
<uint256
> vHashUpdate
;
2373 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
2374 // ignore validation errors in resurrected transactions
2375 list
<CTransaction
> removed
;
2376 CValidationState stateDummy
;
2377 if (tx
.IsCoinBase() || !AcceptToMemoryPool(mempool
, stateDummy
, tx
, false, NULL
, true)) {
2378 mempool
.remove(tx
, removed
, true);
2379 } else if (mempool
.exists(tx
.GetHash())) {
2380 vHashUpdate
.push_back(tx
.GetHash());
2383 // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
2384 // no in-mempool children, which is generally not true when adding
2385 // previously-confirmed transactions back to the mempool.
2386 // UpdateTransactionsFromBlock finds descendants of any transactions in this
2387 // block that were added back and cleans up the mempool state.
2388 mempool
.UpdateTransactionsFromBlock(vHashUpdate
);
2389 // Update chainActive and related variables.
2390 UpdateTip(pindexDelete
->pprev
);
2391 // Let wallets know transactions went from 1-confirmed to
2392 // 0-confirmed or conflicted:
2393 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
2394 SyncWithWallets(tx
, NULL
);
2399 static int64_t nTimeReadFromDisk
= 0;
2400 static int64_t nTimeConnectTotal
= 0;
2401 static int64_t nTimeFlush
= 0;
2402 static int64_t nTimeChainState
= 0;
2403 static int64_t nTimePostConnect
= 0;
2406 * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
2407 * corresponding to pindexNew, to bypass loading it again from disk.
2409 bool static ConnectTip(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexNew
, const CBlock
* pblock
)
2411 assert(pindexNew
->pprev
== chainActive
.Tip());
2412 // Read block from disk.
2413 int64_t nTime1
= GetTimeMicros();
2416 if (!ReadBlockFromDisk(block
, pindexNew
, chainparams
.GetConsensus()))
2417 return AbortNode(state
, "Failed to read block");
2420 // Apply the block atomically to the chain state.
2421 int64_t nTime2
= GetTimeMicros(); nTimeReadFromDisk
+= nTime2
- nTime1
;
2423 LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2
- nTime1
) * 0.001, nTimeReadFromDisk
* 0.000001);
2425 CCoinsViewCache
view(pcoinsTip
);
2426 bool rv
= ConnectBlock(*pblock
, state
, pindexNew
, view
);
2427 GetMainSignals().BlockChecked(*pblock
, state
);
2429 if (state
.IsInvalid())
2430 InvalidBlockFound(pindexNew
, state
);
2431 return error("ConnectTip(): ConnectBlock %s failed", pindexNew
->GetBlockHash().ToString());
2433 mapBlockSource
.erase(pindexNew
->GetBlockHash());
2434 nTime3
= GetTimeMicros(); nTimeConnectTotal
+= nTime3
- nTime2
;
2435 LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3
- nTime2
) * 0.001, nTimeConnectTotal
* 0.000001);
2436 assert(view
.Flush());
2438 int64_t nTime4
= GetTimeMicros(); nTimeFlush
+= nTime4
- nTime3
;
2439 LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4
- nTime3
) * 0.001, nTimeFlush
* 0.000001);
2440 // Write the chain state to disk, if necessary.
2441 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2443 int64_t nTime5
= GetTimeMicros(); nTimeChainState
+= nTime5
- nTime4
;
2444 LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5
- nTime4
) * 0.001, nTimeChainState
* 0.000001);
2445 // Remove conflicting transactions from the mempool.
2446 list
<CTransaction
> txConflicted
;
2447 mempool
.removeForBlock(pblock
->vtx
, pindexNew
->nHeight
, txConflicted
, !IsInitialBlockDownload());
2448 // Update chainActive & related variables.
2449 UpdateTip(pindexNew
);
2450 // Tell wallet about transactions that went from mempool
2452 BOOST_FOREACH(const CTransaction
&tx
, txConflicted
) {
2453 SyncWithWallets(tx
, NULL
);
2455 // ... and about transactions that got confirmed:
2456 BOOST_FOREACH(const CTransaction
&tx
, pblock
->vtx
) {
2457 SyncWithWallets(tx
, pblock
);
2460 int64_t nTime6
= GetTimeMicros(); nTimePostConnect
+= nTime6
- nTime5
; nTimeTotal
+= nTime6
- nTime1
;
2461 LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6
- nTime5
) * 0.001, nTimePostConnect
* 0.000001);
2462 LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6
- nTime1
) * 0.001, nTimeTotal
* 0.000001);
2467 * Return the tip of the chain with the most work in it, that isn't
2468 * known to be invalid (it's however far from certain to be valid).
2470 static CBlockIndex
* FindMostWorkChain() {
2472 CBlockIndex
*pindexNew
= NULL
;
2474 // Find the best candidate header.
2476 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::reverse_iterator it
= setBlockIndexCandidates
.rbegin();
2477 if (it
== setBlockIndexCandidates
.rend())
2482 // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2483 // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2484 CBlockIndex
*pindexTest
= pindexNew
;
2485 bool fInvalidAncestor
= false;
2486 while (pindexTest
&& !chainActive
.Contains(pindexTest
)) {
2487 assert(pindexTest
->nChainTx
|| pindexTest
->nHeight
== 0);
2489 // Pruned nodes may have entries in setBlockIndexCandidates for
2490 // which block files have been deleted. Remove those as candidates
2491 // for the most work chain if we come across them; we can't switch
2492 // to a chain unless we have all the non-active-chain parent blocks.
2493 bool fFailedChain
= pindexTest
->nStatus
& BLOCK_FAILED_MASK
;
2494 bool fMissingData
= !(pindexTest
->nStatus
& BLOCK_HAVE_DATA
);
2495 if (fFailedChain
|| fMissingData
) {
2496 // Candidate chain is not usable (either invalid or missing data)
2497 if (fFailedChain
&& (pindexBestInvalid
== NULL
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
))
2498 pindexBestInvalid
= pindexNew
;
2499 CBlockIndex
*pindexFailed
= pindexNew
;
2500 // Remove the entire chain from the set.
2501 while (pindexTest
!= pindexFailed
) {
2503 pindexFailed
->nStatus
|= BLOCK_FAILED_CHILD
;
2504 } else if (fMissingData
) {
2505 // If we're missing data, then add back to mapBlocksUnlinked,
2506 // so that if the block arrives in the future we can try adding
2507 // to setBlockIndexCandidates again.
2508 mapBlocksUnlinked
.insert(std::make_pair(pindexFailed
->pprev
, pindexFailed
));
2510 setBlockIndexCandidates
.erase(pindexFailed
);
2511 pindexFailed
= pindexFailed
->pprev
;
2513 setBlockIndexCandidates
.erase(pindexTest
);
2514 fInvalidAncestor
= true;
2517 pindexTest
= pindexTest
->pprev
;
2519 if (!fInvalidAncestor
)
2524 /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
2525 static void PruneBlockIndexCandidates() {
2526 // Note that we can't delete the current block itself, as we may need to return to it later in case a
2527 // reorganization to a better block fails.
2528 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::iterator it
= setBlockIndexCandidates
.begin();
2529 while (it
!= setBlockIndexCandidates
.end() && setBlockIndexCandidates
.value_comp()(*it
, chainActive
.Tip())) {
2530 setBlockIndexCandidates
.erase(it
++);
2532 // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2533 assert(!setBlockIndexCandidates
.empty());
2537 * Try to make some progress towards making pindexMostWork the active block.
2538 * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
2540 static bool ActivateBestChainStep(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexMostWork
, const CBlock
* pblock
)
2542 AssertLockHeld(cs_main
);
2543 bool fInvalidFound
= false;
2544 const CBlockIndex
*pindexOldTip
= chainActive
.Tip();
2545 const CBlockIndex
*pindexFork
= chainActive
.FindFork(pindexMostWork
);
2547 // Disconnect active blocks which are no longer in the best chain.
2548 bool fBlocksDisconnected
= false;
2549 while (chainActive
.Tip() && chainActive
.Tip() != pindexFork
) {
2550 if (!DisconnectTip(state
, chainparams
.GetConsensus()))
2552 fBlocksDisconnected
= true;
2555 // Build list of new blocks to connect.
2556 std::vector
<CBlockIndex
*> vpindexToConnect
;
2557 bool fContinue
= true;
2558 int nHeight
= pindexFork
? pindexFork
->nHeight
: -1;
2559 while (fContinue
&& nHeight
!= pindexMostWork
->nHeight
) {
2560 // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2561 // a few blocks along the way.
2562 int nTargetHeight
= std::min(nHeight
+ 32, pindexMostWork
->nHeight
);
2563 vpindexToConnect
.clear();
2564 vpindexToConnect
.reserve(nTargetHeight
- nHeight
);
2565 CBlockIndex
*pindexIter
= pindexMostWork
->GetAncestor(nTargetHeight
);
2566 while (pindexIter
&& pindexIter
->nHeight
!= nHeight
) {
2567 vpindexToConnect
.push_back(pindexIter
);
2568 pindexIter
= pindexIter
->pprev
;
2570 nHeight
= nTargetHeight
;
2572 // Connect new blocks.
2573 BOOST_REVERSE_FOREACH(CBlockIndex
*pindexConnect
, vpindexToConnect
) {
2574 if (!ConnectTip(state
, chainparams
, pindexConnect
, pindexConnect
== pindexMostWork
? pblock
: NULL
)) {
2575 if (state
.IsInvalid()) {
2576 // The block violates a consensus rule.
2577 if (!state
.CorruptionPossible())
2578 InvalidChainFound(vpindexToConnect
.back());
2579 state
= CValidationState();
2580 fInvalidFound
= true;
2584 // A system error occurred (disk space, database error, ...).
2588 PruneBlockIndexCandidates();
2589 if (!pindexOldTip
|| chainActive
.Tip()->nChainWork
> pindexOldTip
->nChainWork
) {
2590 // We're in a better position than we were. Return temporarily to release the lock.
2598 if (fBlocksDisconnected
) {
2599 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
2600 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
2602 mempool
.check(pcoinsTip
);
2604 // Callbacks/notifications for a new best chain.
2606 CheckForkWarningConditionsOnNewFork(vpindexToConnect
.back());
2608 CheckForkWarningConditions();
2614 * Make the best chain active, in multiple steps. The result is either failure
2615 * or an activated best chain. pblock is either NULL or a pointer to a block
2616 * that is already loaded (to avoid loading it again from disk).
2618 bool ActivateBestChain(CValidationState
&state
, const CChainParams
& chainparams
, const CBlock
*pblock
) {
2619 CBlockIndex
*pindexMostWork
= NULL
;
2621 boost::this_thread::interruption_point();
2623 CBlockIndex
*pindexNewTip
= NULL
;
2624 const CBlockIndex
*pindexFork
;
2625 bool fInitialDownload
;
2628 CBlockIndex
*pindexOldTip
= chainActive
.Tip();
2629 pindexMostWork
= FindMostWorkChain();
2631 // Whether we have anything to do at all.
2632 if (pindexMostWork
== NULL
|| pindexMostWork
== chainActive
.Tip())
2635 if (!ActivateBestChainStep(state
, chainparams
, pindexMostWork
, pblock
&& pblock
->GetHash() == pindexMostWork
->GetBlockHash() ? pblock
: NULL
))
2638 pindexNewTip
= chainActive
.Tip();
2639 pindexFork
= chainActive
.FindFork(pindexOldTip
);
2640 fInitialDownload
= IsInitialBlockDownload();
2642 // When we reach this point, we switched to a new tip (stored in pindexNewTip).
2644 // Notifications/callbacks that can run without cs_main
2645 // Always notify the UI if a new block tip was connected
2646 if (pindexFork
!= pindexNewTip
) {
2647 uiInterface
.NotifyBlockTip(fInitialDownload
, pindexNewTip
);
2649 if (!fInitialDownload
) {
2650 // Find the hashes of all blocks that weren't previously in the best chain.
2651 std::vector
<uint256
> vHashes
;
2652 CBlockIndex
*pindexToAnnounce
= pindexNewTip
;
2653 while (pindexToAnnounce
!= pindexFork
) {
2654 vHashes
.push_back(pindexToAnnounce
->GetBlockHash());
2655 pindexToAnnounce
= pindexToAnnounce
->pprev
;
2656 if (vHashes
.size() == MAX_BLOCKS_TO_ANNOUNCE
) {
2657 // Limit announcements in case of a huge reorganization.
2658 // Rely on the peer's synchronization mechanism in that case.
2662 // Relay inventory, but don't relay old inventory during initial block download.
2663 int nBlockEstimate
= 0;
2664 if (fCheckpointsEnabled
)
2665 nBlockEstimate
= Checkpoints::GetTotalBlocksEstimate(chainparams
.Checkpoints());
2668 BOOST_FOREACH(CNode
* pnode
, vNodes
) {
2669 if (chainActive
.Height() > (pnode
->nStartingHeight
!= -1 ? pnode
->nStartingHeight
- 2000 : nBlockEstimate
)) {
2670 BOOST_REVERSE_FOREACH(const uint256
& hash
, vHashes
) {
2671 pnode
->PushBlockHash(hash
);
2676 // Notify external listeners about the new tip.
2677 if (!vHashes
.empty()) {
2678 GetMainSignals().UpdatedBlockTip(pindexNewTip
);
2682 } while(pindexMostWork
!= chainActive
.Tip());
2683 CheckBlockIndex(chainparams
.GetConsensus());
2685 // Write changes periodically to disk, after relay.
2686 if (!FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
)) {
2693 bool InvalidateBlock(CValidationState
& state
, const Consensus::Params
& consensusParams
, CBlockIndex
*pindex
)
2695 AssertLockHeld(cs_main
);
2697 // Mark the block itself as invalid.
2698 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
2699 setDirtyBlockIndex
.insert(pindex
);
2700 setBlockIndexCandidates
.erase(pindex
);
2702 while (chainActive
.Contains(pindex
)) {
2703 CBlockIndex
*pindexWalk
= chainActive
.Tip();
2704 pindexWalk
->nStatus
|= BLOCK_FAILED_CHILD
;
2705 setDirtyBlockIndex
.insert(pindexWalk
);
2706 setBlockIndexCandidates
.erase(pindexWalk
);
2707 // ActivateBestChain considers blocks already in chainActive
2708 // unconditionally valid already, so force disconnect away from it.
2709 if (!DisconnectTip(state
, consensusParams
)) {
2710 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
2715 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
2717 // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
2719 BlockMap::iterator it
= mapBlockIndex
.begin();
2720 while (it
!= mapBlockIndex
.end()) {
2721 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& !setBlockIndexCandidates
.value_comp()(it
->second
, chainActive
.Tip())) {
2722 setBlockIndexCandidates
.insert(it
->second
);
2727 InvalidChainFound(pindex
);
2728 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
2732 bool ReconsiderBlock(CValidationState
& state
, CBlockIndex
*pindex
) {
2733 AssertLockHeld(cs_main
);
2735 int nHeight
= pindex
->nHeight
;
2737 // Remove the invalidity flag from this block and all its descendants.
2738 BlockMap::iterator it
= mapBlockIndex
.begin();
2739 while (it
!= mapBlockIndex
.end()) {
2740 if (!it
->second
->IsValid() && it
->second
->GetAncestor(nHeight
) == pindex
) {
2741 it
->second
->nStatus
&= ~BLOCK_FAILED_MASK
;
2742 setDirtyBlockIndex
.insert(it
->second
);
2743 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& setBlockIndexCandidates
.value_comp()(chainActive
.Tip(), it
->second
)) {
2744 setBlockIndexCandidates
.insert(it
->second
);
2746 if (it
->second
== pindexBestInvalid
) {
2747 // Reset invalid block marker if it was pointing to one of those.
2748 pindexBestInvalid
= NULL
;
2754 // Remove the invalidity flag from all ancestors too.
2755 while (pindex
!= NULL
) {
2756 if (pindex
->nStatus
& BLOCK_FAILED_MASK
) {
2757 pindex
->nStatus
&= ~BLOCK_FAILED_MASK
;
2758 setDirtyBlockIndex
.insert(pindex
);
2760 pindex
= pindex
->pprev
;
2765 CBlockIndex
* AddToBlockIndex(const CBlockHeader
& block
)
2767 // Check for duplicate
2768 uint256 hash
= block
.GetHash();
2769 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
2770 if (it
!= mapBlockIndex
.end())
2773 // Construct new block index object
2774 CBlockIndex
* pindexNew
= new CBlockIndex(block
);
2776 // We assign the sequence id to blocks only when the full data is available,
2777 // to avoid miners withholding blocks but broadcasting headers, to get a
2778 // competitive advantage.
2779 pindexNew
->nSequenceId
= 0;
2780 BlockMap::iterator mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
2781 pindexNew
->phashBlock
= &((*mi
).first
);
2782 BlockMap::iterator miPrev
= mapBlockIndex
.find(block
.hashPrevBlock
);
2783 if (miPrev
!= mapBlockIndex
.end())
2785 pindexNew
->pprev
= (*miPrev
).second
;
2786 pindexNew
->nHeight
= pindexNew
->pprev
->nHeight
+ 1;
2787 pindexNew
->BuildSkip();
2789 pindexNew
->nChainWork
= (pindexNew
->pprev
? pindexNew
->pprev
->nChainWork
: 0) + GetBlockProof(*pindexNew
);
2790 pindexNew
->RaiseValidity(BLOCK_VALID_TREE
);
2791 if (pindexBestHeader
== NULL
|| pindexBestHeader
->nChainWork
< pindexNew
->nChainWork
)
2792 pindexBestHeader
= pindexNew
;
2794 setDirtyBlockIndex
.insert(pindexNew
);
2799 /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
2800 bool ReceivedBlockTransactions(const CBlock
&block
, CValidationState
& state
, CBlockIndex
*pindexNew
, const CDiskBlockPos
& pos
)
2802 pindexNew
->nTx
= block
.vtx
.size();
2803 pindexNew
->nChainTx
= 0;
2804 pindexNew
->nFile
= pos
.nFile
;
2805 pindexNew
->nDataPos
= pos
.nPos
;
2806 pindexNew
->nUndoPos
= 0;
2807 pindexNew
->nStatus
|= BLOCK_HAVE_DATA
;
2808 pindexNew
->RaiseValidity(BLOCK_VALID_TRANSACTIONS
);
2809 setDirtyBlockIndex
.insert(pindexNew
);
2811 if (pindexNew
->pprev
== NULL
|| pindexNew
->pprev
->nChainTx
) {
2812 // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
2813 deque
<CBlockIndex
*> queue
;
2814 queue
.push_back(pindexNew
);
2816 // Recursively process any descendant blocks that now may be eligible to be connected.
2817 while (!queue
.empty()) {
2818 CBlockIndex
*pindex
= queue
.front();
2820 pindex
->nChainTx
= (pindex
->pprev
? pindex
->pprev
->nChainTx
: 0) + pindex
->nTx
;
2822 LOCK(cs_nBlockSequenceId
);
2823 pindex
->nSequenceId
= nBlockSequenceId
++;
2825 if (chainActive
.Tip() == NULL
|| !setBlockIndexCandidates
.value_comp()(pindex
, chainActive
.Tip())) {
2826 setBlockIndexCandidates
.insert(pindex
);
2828 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
);
2829 while (range
.first
!= range
.second
) {
2830 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator it
= range
.first
;
2831 queue
.push_back(it
->second
);
2833 mapBlocksUnlinked
.erase(it
);
2837 if (pindexNew
->pprev
&& pindexNew
->pprev
->IsValid(BLOCK_VALID_TREE
)) {
2838 mapBlocksUnlinked
.insert(std::make_pair(pindexNew
->pprev
, pindexNew
));
2845 bool FindBlockPos(CValidationState
&state
, CDiskBlockPos
&pos
, unsigned int nAddSize
, unsigned int nHeight
, uint64_t nTime
, bool fKnown
= false)
2847 LOCK(cs_LastBlockFile
);
2849 unsigned int nFile
= fKnown
? pos
.nFile
: nLastBlockFile
;
2850 if (vinfoBlockFile
.size() <= nFile
) {
2851 vinfoBlockFile
.resize(nFile
+ 1);
2855 while (vinfoBlockFile
[nFile
].nSize
+ nAddSize
>= MAX_BLOCKFILE_SIZE
) {
2857 if (vinfoBlockFile
.size() <= nFile
) {
2858 vinfoBlockFile
.resize(nFile
+ 1);
2862 pos
.nPos
= vinfoBlockFile
[nFile
].nSize
;
2865 if ((int)nFile
!= nLastBlockFile
) {
2867 LogPrintf("Leaving block file %i: %s\n", nLastBlockFile
, vinfoBlockFile
[nLastBlockFile
].ToString());
2869 FlushBlockFile(!fKnown
);
2870 nLastBlockFile
= nFile
;
2873 vinfoBlockFile
[nFile
].AddBlock(nHeight
, nTime
);
2875 vinfoBlockFile
[nFile
].nSize
= std::max(pos
.nPos
+ nAddSize
, vinfoBlockFile
[nFile
].nSize
);
2877 vinfoBlockFile
[nFile
].nSize
+= nAddSize
;
2880 unsigned int nOldChunks
= (pos
.nPos
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
2881 unsigned int nNewChunks
= (vinfoBlockFile
[nFile
].nSize
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
2882 if (nNewChunks
> nOldChunks
) {
2884 fCheckForPruning
= true;
2885 if (CheckDiskSpace(nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
)) {
2886 FILE *file
= OpenBlockFile(pos
);
2888 LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks
* BLOCKFILE_CHUNK_SIZE
, pos
.nFile
);
2889 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
);
2894 return state
.Error("out of disk space");
2898 setDirtyFileInfo
.insert(nFile
);
2902 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
)
2906 LOCK(cs_LastBlockFile
);
2908 unsigned int nNewSize
;
2909 pos
.nPos
= vinfoBlockFile
[nFile
].nUndoSize
;
2910 nNewSize
= vinfoBlockFile
[nFile
].nUndoSize
+= nAddSize
;
2911 setDirtyFileInfo
.insert(nFile
);
2913 unsigned int nOldChunks
= (pos
.nPos
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
2914 unsigned int nNewChunks
= (nNewSize
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
2915 if (nNewChunks
> nOldChunks
) {
2917 fCheckForPruning
= true;
2918 if (CheckDiskSpace(nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
)) {
2919 FILE *file
= OpenUndoFile(pos
);
2921 LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks
* UNDOFILE_CHUNK_SIZE
, pos
.nFile
);
2922 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
);
2927 return state
.Error("out of disk space");
2933 bool CheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, bool fCheckPOW
)
2935 // Check proof of work matches claimed amount
2936 if (fCheckPOW
&& !CheckProofOfWork(block
.GetHash(), block
.nBits
, Params().GetConsensus()))
2937 return state
.DoS(50, error("CheckBlockHeader(): proof of work failed"),
2938 REJECT_INVALID
, "high-hash");
2941 if (block
.GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60)
2942 return state
.Invalid(error("CheckBlockHeader(): block timestamp too far in the future"),
2943 REJECT_INVALID
, "time-too-new");
2948 bool CheckBlock(const CBlock
& block
, CValidationState
& state
, bool fCheckPOW
, bool fCheckMerkleRoot
)
2950 // These are checks that are independent of context.
2955 // Check that the header is valid (particularly PoW). This is mostly
2956 // redundant with the call in AcceptBlockHeader.
2957 if (!CheckBlockHeader(block
, state
, fCheckPOW
))
2960 // Check the merkle root.
2961 if (fCheckMerkleRoot
) {
2963 uint256 hashMerkleRoot2
= BlockMerkleRoot(block
, &mutated
);
2964 if (block
.hashMerkleRoot
!= hashMerkleRoot2
)
2965 return state
.DoS(100, error("CheckBlock(): hashMerkleRoot mismatch"),
2966 REJECT_INVALID
, "bad-txnmrklroot", true);
2968 // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
2969 // of transactions in a block without affecting the merkle root of a block,
2970 // while still invalidating it.
2972 return state
.DoS(100, error("CheckBlock(): duplicate transaction"),
2973 REJECT_INVALID
, "bad-txns-duplicate", true);
2976 // All potential-corruption validation must be done before we do any
2977 // transaction validation, as otherwise we may mark the header as invalid
2978 // because we receive the wrong transactions for it.
2981 if (block
.vtx
.empty() || block
.vtx
.size() > MAX_BLOCK_SIZE
|| ::GetSerializeSize(block
, SER_NETWORK
, PROTOCOL_VERSION
) > MAX_BLOCK_SIZE
)
2982 return state
.DoS(100, error("CheckBlock(): size limits failed"),
2983 REJECT_INVALID
, "bad-blk-length");
2985 // First transaction must be coinbase, the rest must not be
2986 if (block
.vtx
.empty() || !block
.vtx
[0].IsCoinBase())
2987 return state
.DoS(100, error("CheckBlock(): first tx is not coinbase"),
2988 REJECT_INVALID
, "bad-cb-missing");
2989 for (unsigned int i
= 1; i
< block
.vtx
.size(); i
++)
2990 if (block
.vtx
[i
].IsCoinBase())
2991 return state
.DoS(100, error("CheckBlock(): more than one coinbase"),
2992 REJECT_INVALID
, "bad-cb-multiple");
2994 // Check transactions
2995 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
)
2996 if (!CheckTransaction(tx
, state
))
2997 return error("CheckBlock(): CheckTransaction of %s failed with %s",
2998 tx
.GetHash().ToString(),
2999 FormatStateMessage(state
));
3001 unsigned int nSigOps
= 0;
3002 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
)
3004 nSigOps
+= GetLegacySigOpCount(tx
);
3006 if (nSigOps
> MAX_BLOCK_SIGOPS
)
3007 return state
.DoS(100, error("CheckBlock(): out-of-bounds SigOpCount"),
3008 REJECT_INVALID
, "bad-blk-sigops", true);
3010 if (fCheckPOW
&& fCheckMerkleRoot
)
3011 block
.fChecked
= true;
3016 static bool CheckIndexAgainstCheckpoint(const CBlockIndex
* pindexPrev
, CValidationState
& state
, const CChainParams
& chainparams
, const uint256
& hash
)
3018 if (*pindexPrev
->phashBlock
== chainparams
.GetConsensus().hashGenesisBlock
)
3021 int nHeight
= pindexPrev
->nHeight
+1;
3022 // Don't accept any forks from the main chain prior to last checkpoint
3023 CBlockIndex
* pcheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
3024 if (pcheckpoint
&& nHeight
< pcheckpoint
->nHeight
)
3025 return state
.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__
, nHeight
));
3030 bool ContextualCheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, CBlockIndex
* const pindexPrev
)
3032 const Consensus::Params
& consensusParams
= Params().GetConsensus();
3033 // Check proof of work
3034 if (block
.nBits
!= GetNextWorkRequired(pindexPrev
, &block
, consensusParams
))
3035 return state
.DoS(100, error("%s: incorrect proof of work", __func__
),
3036 REJECT_INVALID
, "bad-diffbits");
3038 // Check timestamp against prev
3039 if (block
.GetBlockTime() <= pindexPrev
->GetMedianTimePast())
3040 return state
.Invalid(error("%s: block's timestamp is too early", __func__
),
3041 REJECT_INVALID
, "time-too-old");
3043 // Reject block.nVersion=1 blocks when 95% (75% on testnet) of the network has upgraded:
3044 if (block
.nVersion
< 2 && IsSuperMajority(2, pindexPrev
, consensusParams
.nMajorityRejectBlockOutdated
, consensusParams
))
3045 return state
.Invalid(error("%s: rejected nVersion=1 block", __func__
),
3046 REJECT_OBSOLETE
, "bad-version");
3048 // Reject block.nVersion=2 blocks when 95% (75% on testnet) of the network has upgraded:
3049 if (block
.nVersion
< 3 && IsSuperMajority(3, pindexPrev
, consensusParams
.nMajorityRejectBlockOutdated
, consensusParams
))
3050 return state
.Invalid(error("%s: rejected nVersion=2 block", __func__
),
3051 REJECT_OBSOLETE
, "bad-version");
3053 // Reject block.nVersion=3 blocks when 95% (75% on testnet) of the network has upgraded:
3054 if (block
.nVersion
< 4 && IsSuperMajority(4, pindexPrev
, consensusParams
.nMajorityRejectBlockOutdated
, consensusParams
))
3055 return state
.Invalid(error("%s : rejected nVersion=3 block", __func__
),
3056 REJECT_OBSOLETE
, "bad-version");
3061 bool ContextualCheckBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* const pindexPrev
)
3063 const int nHeight
= pindexPrev
== NULL
? 0 : pindexPrev
->nHeight
+ 1;
3064 const Consensus::Params
& consensusParams
= Params().GetConsensus();
3066 // Check that all transactions are finalized
3067 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
) {
3068 int nLockTimeFlags
= 0;
3069 int64_t nLockTimeCutoff
= (nLockTimeFlags
& LOCKTIME_MEDIAN_TIME_PAST
)
3070 ? pindexPrev
->GetMedianTimePast()
3071 : block
.GetBlockTime();
3072 if (!IsFinalTx(tx
, nHeight
, nLockTimeCutoff
)) {
3073 return state
.DoS(10, error("%s: contains a non-final transaction", __func__
), REJECT_INVALID
, "bad-txns-nonfinal");
3077 // Enforce block.nVersion=2 rule that the coinbase starts with serialized block height
3078 // if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet):
3079 if (block
.nVersion
>= 2 && IsSuperMajority(2, pindexPrev
, consensusParams
.nMajorityEnforceBlockUpgrade
, consensusParams
))
3081 CScript expect
= CScript() << nHeight
;
3082 if (block
.vtx
[0].vin
[0].scriptSig
.size() < expect
.size() ||
3083 !std::equal(expect
.begin(), expect
.end(), block
.vtx
[0].vin
[0].scriptSig
.begin())) {
3084 return state
.DoS(100, error("%s: block height mismatch in coinbase", __func__
), REJECT_INVALID
, "bad-cb-height");
3091 static bool AcceptBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
=NULL
)
3093 AssertLockHeld(cs_main
);
3094 // Check for duplicate
3095 uint256 hash
= block
.GetHash();
3096 BlockMap::iterator miSelf
= mapBlockIndex
.find(hash
);
3097 CBlockIndex
*pindex
= NULL
;
3098 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
) {
3100 if (miSelf
!= mapBlockIndex
.end()) {
3101 // Block header is already known.
3102 pindex
= miSelf
->second
;
3105 if (pindex
->nStatus
& BLOCK_FAILED_MASK
)
3106 return state
.Invalid(error("%s: block is marked invalid", __func__
), 0, "duplicate");
3110 if (!CheckBlockHeader(block
, state
))
3113 // Get prev block index
3114 CBlockIndex
* pindexPrev
= NULL
;
3115 BlockMap::iterator mi
= mapBlockIndex
.find(block
.hashPrevBlock
);
3116 if (mi
== mapBlockIndex
.end())
3117 return state
.DoS(10, error("%s: prev block not found", __func__
), 0, "bad-prevblk");
3118 pindexPrev
= (*mi
).second
;
3119 if (pindexPrev
->nStatus
& BLOCK_FAILED_MASK
)
3120 return state
.DoS(100, error("%s: prev block invalid", __func__
), REJECT_INVALID
, "bad-prevblk");
3123 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, hash
))
3124 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3126 if (!ContextualCheckBlockHeader(block
, state
, pindexPrev
))
3130 pindex
= AddToBlockIndex(block
);
3138 /** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
3139 static bool AcceptBlock(const CBlock
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
, bool fRequested
, CDiskBlockPos
* dbp
)
3141 AssertLockHeld(cs_main
);
3143 CBlockIndex
*&pindex
= *ppindex
;
3145 if (!AcceptBlockHeader(block
, state
, chainparams
, &pindex
))
3148 // Try to process all requested blocks that we don't have, but only
3149 // process an unrequested block if it's new and has enough work to
3150 // advance our tip, and isn't too many blocks ahead.
3151 bool fAlreadyHave
= pindex
->nStatus
& BLOCK_HAVE_DATA
;
3152 bool fHasMoreWork
= (chainActive
.Tip() ? pindex
->nChainWork
> chainActive
.Tip()->nChainWork
: true);
3153 // Blocks that are too out-of-order needlessly limit the effectiveness of
3154 // pruning, because pruning will not delete block files that contain any
3155 // blocks which are too close in height to the tip. Apply this test
3156 // regardless of whether pruning is enabled; it should generally be safe to
3157 // not process unrequested blocks.
3158 bool fTooFarAhead
= (pindex
->nHeight
> int(chainActive
.Height() + MIN_BLOCKS_TO_KEEP
));
3160 // TODO: deal better with return value and error conditions for duplicate
3161 // and unrequested blocks.
3162 if (fAlreadyHave
) return true;
3163 if (!fRequested
) { // If we didn't ask for it:
3164 if (pindex
->nTx
!= 0) return true; // This is a previously-processed block that was pruned
3165 if (!fHasMoreWork
) return true; // Don't process less-work chains
3166 if (fTooFarAhead
) return true; // Block height is too high
3169 if ((!CheckBlock(block
, state
)) || !ContextualCheckBlock(block
, state
, pindex
->pprev
)) {
3170 if (state
.IsInvalid() && !state
.CorruptionPossible()) {
3171 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
3172 setDirtyBlockIndex
.insert(pindex
);
3177 int nHeight
= pindex
->nHeight
;
3179 // Write block to history file
3181 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
3182 CDiskBlockPos blockPos
;
3185 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, nHeight
, block
.GetBlockTime(), dbp
!= NULL
))
3186 return error("AcceptBlock(): FindBlockPos failed");
3188 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
3189 AbortNode(state
, "Failed to write block");
3190 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
3191 return error("AcceptBlock(): ReceivedBlockTransactions failed");
3192 } catch (const std::runtime_error
& e
) {
3193 return AbortNode(state
, std::string("System error: ") + e
.what());
3196 if (fCheckForPruning
)
3197 FlushStateToDisk(state
, FLUSH_STATE_NONE
); // we just allocated more disk space for block files
3202 static bool IsSuperMajority(int minVersion
, const CBlockIndex
* pstart
, unsigned nRequired
, const Consensus::Params
& consensusParams
)
3204 unsigned int nFound
= 0;
3205 for (int i
= 0; i
< consensusParams
.nMajorityWindow
&& nFound
< nRequired
&& pstart
!= NULL
; i
++)
3207 if (pstart
->nVersion
>= minVersion
)
3209 pstart
= pstart
->pprev
;
3211 return (nFound
>= nRequired
);
3215 bool ProcessNewBlock(CValidationState
& state
, const CChainParams
& chainparams
, const CNode
* pfrom
, const CBlock
* pblock
, bool fForceProcessing
, CDiskBlockPos
* dbp
)
3217 // Preliminary checks
3218 bool checked
= CheckBlock(*pblock
, state
);
3222 bool fRequested
= MarkBlockAsReceived(pblock
->GetHash());
3223 fRequested
|= fForceProcessing
;
3225 return error("%s: CheckBlock FAILED", __func__
);
3229 CBlockIndex
*pindex
= NULL
;
3230 bool ret
= AcceptBlock(*pblock
, state
, chainparams
, &pindex
, fRequested
, dbp
);
3231 if (pindex
&& pfrom
) {
3232 mapBlockSource
[pindex
->GetBlockHash()] = pfrom
->GetId();
3234 CheckBlockIndex(chainparams
.GetConsensus());
3236 return error("%s: AcceptBlock FAILED", __func__
);
3239 if (!ActivateBestChain(state
, chainparams
, pblock
))
3240 return error("%s: ActivateBestChain failed", __func__
);
3245 bool TestBlockValidity(CValidationState
& state
, const CChainParams
& chainparams
, const CBlock
& block
, CBlockIndex
* pindexPrev
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3247 AssertLockHeld(cs_main
);
3248 assert(pindexPrev
&& pindexPrev
== chainActive
.Tip());
3249 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, block
.GetHash()))
3250 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3252 CCoinsViewCache
viewNew(pcoinsTip
);
3253 CBlockIndex
indexDummy(block
);
3254 indexDummy
.pprev
= pindexPrev
;
3255 indexDummy
.nHeight
= pindexPrev
->nHeight
+ 1;
3257 // NOTE: CheckBlockHeader is called by CheckBlock
3258 if (!ContextualCheckBlockHeader(block
, state
, pindexPrev
))
3260 if (!CheckBlock(block
, state
, fCheckPOW
, fCheckMerkleRoot
))
3262 if (!ContextualCheckBlock(block
, state
, pindexPrev
))
3264 if (!ConnectBlock(block
, state
, &indexDummy
, viewNew
, true))
3266 assert(state
.IsValid());
3272 * BLOCK PRUNING CODE
3275 /* Calculate the amount of disk space the block & undo files currently use */
3276 uint64_t CalculateCurrentUsage()
3278 uint64_t retval
= 0;
3279 BOOST_FOREACH(const CBlockFileInfo
&file
, vinfoBlockFile
) {
3280 retval
+= file
.nSize
+ file
.nUndoSize
;
3285 /* Prune a block file (modify associated database entries)*/
3286 void PruneOneBlockFile(const int fileNumber
)
3288 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); ++it
) {
3289 CBlockIndex
* pindex
= it
->second
;
3290 if (pindex
->nFile
== fileNumber
) {
3291 pindex
->nStatus
&= ~BLOCK_HAVE_DATA
;
3292 pindex
->nStatus
&= ~BLOCK_HAVE_UNDO
;
3294 pindex
->nDataPos
= 0;
3295 pindex
->nUndoPos
= 0;
3296 setDirtyBlockIndex
.insert(pindex
);
3298 // Prune from mapBlocksUnlinked -- any block we prune would have
3299 // to be downloaded again in order to consider its chain, at which
3300 // point it would be considered as a candidate for
3301 // mapBlocksUnlinked or setBlockIndexCandidates.
3302 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
3303 while (range
.first
!= range
.second
) {
3304 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator it
= range
.first
;
3306 if (it
->second
== pindex
) {
3307 mapBlocksUnlinked
.erase(it
);
3313 vinfoBlockFile
[fileNumber
].SetNull();
3314 setDirtyFileInfo
.insert(fileNumber
);
3318 void UnlinkPrunedFiles(std::set
<int>& setFilesToPrune
)
3320 for (set
<int>::iterator it
= setFilesToPrune
.begin(); it
!= setFilesToPrune
.end(); ++it
) {
3321 CDiskBlockPos
pos(*it
, 0);
3322 boost::filesystem::remove(GetBlockPosFilename(pos
, "blk"));
3323 boost::filesystem::remove(GetBlockPosFilename(pos
, "rev"));
3324 LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__
, *it
);
3328 /* Calculate the block/rev files that should be deleted to remain under target*/
3329 void FindFilesToPrune(std::set
<int>& setFilesToPrune
, uint64_t nPruneAfterHeight
)
3331 LOCK2(cs_main
, cs_LastBlockFile
);
3332 if (chainActive
.Tip() == NULL
|| nPruneTarget
== 0) {
3335 if ((uint64_t)chainActive
.Tip()->nHeight
<= nPruneAfterHeight
) {
3339 unsigned int nLastBlockWeCanPrune
= chainActive
.Tip()->nHeight
- MIN_BLOCKS_TO_KEEP
;
3340 uint64_t nCurrentUsage
= CalculateCurrentUsage();
3341 // We don't check to prune until after we've allocated new space for files
3342 // So we should leave a buffer under our target to account for another allocation
3343 // before the next pruning.
3344 uint64_t nBuffer
= BLOCKFILE_CHUNK_SIZE
+ UNDOFILE_CHUNK_SIZE
;
3345 uint64_t nBytesToPrune
;
3348 if (nCurrentUsage
+ nBuffer
>= nPruneTarget
) {
3349 for (int fileNumber
= 0; fileNumber
< nLastBlockFile
; fileNumber
++) {
3350 nBytesToPrune
= vinfoBlockFile
[fileNumber
].nSize
+ vinfoBlockFile
[fileNumber
].nUndoSize
;
3352 if (vinfoBlockFile
[fileNumber
].nSize
== 0)
3355 if (nCurrentUsage
+ nBuffer
< nPruneTarget
) // are we below our target?
3358 // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
3359 if (vinfoBlockFile
[fileNumber
].nHeightLast
> nLastBlockWeCanPrune
)
3362 PruneOneBlockFile(fileNumber
);
3363 // Queue up the files for removal
3364 setFilesToPrune
.insert(fileNumber
);
3365 nCurrentUsage
-= nBytesToPrune
;
3370 LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
3371 nPruneTarget
/1024/1024, nCurrentUsage
/1024/1024,
3372 ((int64_t)nPruneTarget
- (int64_t)nCurrentUsage
)/1024/1024,
3373 nLastBlockWeCanPrune
, count
);
3376 bool CheckDiskSpace(uint64_t nAdditionalBytes
)
3378 uint64_t nFreeBytesAvailable
= boost::filesystem::space(GetDataDir()).available
;
3380 // Check for nMinDiskSpace bytes (currently 50MB)
3381 if (nFreeBytesAvailable
< nMinDiskSpace
+ nAdditionalBytes
)
3382 return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
3387 FILE* OpenDiskFile(const CDiskBlockPos
&pos
, const char *prefix
, bool fReadOnly
)
3391 boost::filesystem::path path
= GetBlockPosFilename(pos
, prefix
);
3392 boost::filesystem::create_directories(path
.parent_path());
3393 FILE* file
= fopen(path
.string().c_str(), "rb+");
3394 if (!file
&& !fReadOnly
)
3395 file
= fopen(path
.string().c_str(), "wb+");
3397 LogPrintf("Unable to open file %s\n", path
.string());
3401 if (fseek(file
, pos
.nPos
, SEEK_SET
)) {
3402 LogPrintf("Unable to seek to position %u of %s\n", pos
.nPos
, path
.string());
3410 FILE* OpenBlockFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3411 return OpenDiskFile(pos
, "blk", fReadOnly
);
3414 FILE* OpenUndoFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3415 return OpenDiskFile(pos
, "rev", fReadOnly
);
3418 boost::filesystem::path
GetBlockPosFilename(const CDiskBlockPos
&pos
, const char *prefix
)
3420 return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix
, pos
.nFile
);
3423 CBlockIndex
* InsertBlockIndex(uint256 hash
)
3429 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
3430 if (mi
!= mapBlockIndex
.end())
3431 return (*mi
).second
;
3434 CBlockIndex
* pindexNew
= new CBlockIndex();
3436 throw runtime_error("LoadBlockIndex(): new CBlockIndex failed");
3437 mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
3438 pindexNew
->phashBlock
= &((*mi
).first
);
3443 bool static LoadBlockIndexDB()
3445 const CChainParams
& chainparams
= Params();
3446 if (!pblocktree
->LoadBlockIndexGuts())
3449 boost::this_thread::interruption_point();
3451 // Calculate nChainWork
3452 vector
<pair
<int, CBlockIndex
*> > vSortedByHeight
;
3453 vSortedByHeight
.reserve(mapBlockIndex
.size());
3454 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
3456 CBlockIndex
* pindex
= item
.second
;
3457 vSortedByHeight
.push_back(make_pair(pindex
->nHeight
, pindex
));
3459 sort(vSortedByHeight
.begin(), vSortedByHeight
.end());
3460 BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex
*)& item
, vSortedByHeight
)
3462 CBlockIndex
* pindex
= item
.second
;
3463 pindex
->nChainWork
= (pindex
->pprev
? pindex
->pprev
->nChainWork
: 0) + GetBlockProof(*pindex
);
3464 // We can link the chain of blocks for which we've received transactions at some point.
3465 // Pruned nodes may have deleted the block.
3466 if (pindex
->nTx
> 0) {
3467 if (pindex
->pprev
) {
3468 if (pindex
->pprev
->nChainTx
) {
3469 pindex
->nChainTx
= pindex
->pprev
->nChainTx
+ pindex
->nTx
;
3471 pindex
->nChainTx
= 0;
3472 mapBlocksUnlinked
.insert(std::make_pair(pindex
->pprev
, pindex
));
3475 pindex
->nChainTx
= pindex
->nTx
;
3478 if (pindex
->IsValid(BLOCK_VALID_TRANSACTIONS
) && (pindex
->nChainTx
|| pindex
->pprev
== NULL
))
3479 setBlockIndexCandidates
.insert(pindex
);
3480 if (pindex
->nStatus
& BLOCK_FAILED_MASK
&& (!pindexBestInvalid
|| pindex
->nChainWork
> pindexBestInvalid
->nChainWork
))
3481 pindexBestInvalid
= pindex
;
3483 pindex
->BuildSkip();
3484 if (pindex
->IsValid(BLOCK_VALID_TREE
) && (pindexBestHeader
== NULL
|| CBlockIndexWorkComparator()(pindexBestHeader
, pindex
)))
3485 pindexBestHeader
= pindex
;
3488 // Load block file info
3489 pblocktree
->ReadLastBlockFile(nLastBlockFile
);
3490 vinfoBlockFile
.resize(nLastBlockFile
+ 1);
3491 LogPrintf("%s: last block file = %i\n", __func__
, nLastBlockFile
);
3492 for (int nFile
= 0; nFile
<= nLastBlockFile
; nFile
++) {
3493 pblocktree
->ReadBlockFileInfo(nFile
, vinfoBlockFile
[nFile
]);
3495 LogPrintf("%s: last block file info: %s\n", __func__
, vinfoBlockFile
[nLastBlockFile
].ToString());
3496 for (int nFile
= nLastBlockFile
+ 1; true; nFile
++) {
3497 CBlockFileInfo info
;
3498 if (pblocktree
->ReadBlockFileInfo(nFile
, info
)) {
3499 vinfoBlockFile
.push_back(info
);
3505 // Check presence of blk files
3506 LogPrintf("Checking all blk files are present...\n");
3507 set
<int> setBlkDataFiles
;
3508 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
3510 CBlockIndex
* pindex
= item
.second
;
3511 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) {
3512 setBlkDataFiles
.insert(pindex
->nFile
);
3515 for (std::set
<int>::iterator it
= setBlkDataFiles
.begin(); it
!= setBlkDataFiles
.end(); it
++)
3517 CDiskBlockPos
pos(*it
, 0);
3518 if (CAutoFile(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
).IsNull()) {
3523 // Check whether we have ever pruned block & undo files
3524 pblocktree
->ReadFlag("prunedblockfiles", fHavePruned
);
3526 LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
3528 // Check whether we need to continue reindexing
3529 bool fReindexing
= false;
3530 pblocktree
->ReadReindexing(fReindexing
);
3531 fReindex
|= fReindexing
;
3533 // Check whether we have a transaction index
3534 pblocktree
->ReadFlag("txindex", fTxIndex
);
3535 LogPrintf("%s: transaction index %s\n", __func__
, fTxIndex
? "enabled" : "disabled");
3537 // Load pointer to end of best chain
3538 BlockMap::iterator it
= mapBlockIndex
.find(pcoinsTip
->GetBestBlock());
3539 if (it
== mapBlockIndex
.end())
3541 chainActive
.SetTip(it
->second
);
3543 PruneBlockIndexCandidates();
3545 LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__
,
3546 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(),
3547 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
3548 Checkpoints::GuessVerificationProgress(chainparams
.Checkpoints(), chainActive
.Tip()));
3553 CVerifyDB::CVerifyDB()
3555 uiInterface
.ShowProgress(_("Verifying blocks..."), 0);
3558 CVerifyDB::~CVerifyDB()
3560 uiInterface
.ShowProgress("", 100);
3563 bool CVerifyDB::VerifyDB(const CChainParams
& chainparams
, CCoinsView
*coinsview
, int nCheckLevel
, int nCheckDepth
)
3566 if (chainActive
.Tip() == NULL
|| chainActive
.Tip()->pprev
== NULL
)
3569 // Verify blocks in the best chain
3570 if (nCheckDepth
<= 0)
3571 nCheckDepth
= 1000000000; // suffices until the year 19000
3572 if (nCheckDepth
> chainActive
.Height())
3573 nCheckDepth
= chainActive
.Height();
3574 nCheckLevel
= std::max(0, std::min(4, nCheckLevel
));
3575 LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth
, nCheckLevel
);
3576 CCoinsViewCache
coins(coinsview
);
3577 CBlockIndex
* pindexState
= chainActive
.Tip();
3578 CBlockIndex
* pindexFailure
= NULL
;
3579 int nGoodTransactions
= 0;
3580 CValidationState state
;
3581 for (CBlockIndex
* pindex
= chainActive
.Tip(); pindex
&& pindex
->pprev
; pindex
= pindex
->pprev
)
3583 boost::this_thread::interruption_point();
3584 uiInterface
.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* (nCheckLevel
>= 4 ? 50 : 100)))));
3585 if (pindex
->nHeight
< chainActive
.Height()-nCheckDepth
)
3588 // check level 0: read from disk
3589 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
3590 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3591 // check level 1: verify block validity
3592 if (nCheckLevel
>= 1 && !CheckBlock(block
, state
))
3593 return error("VerifyDB(): *** found bad block at %d, hash=%s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3594 // check level 2: verify undo validity
3595 if (nCheckLevel
>= 2 && pindex
) {
3597 CDiskBlockPos pos
= pindex
->GetUndoPos();
3598 if (!pos
.IsNull()) {
3599 if (!UndoReadFromDisk(undo
, pos
, pindex
->pprev
->GetBlockHash()))
3600 return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3603 // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
3604 if (nCheckLevel
>= 3 && pindex
== pindexState
&& (coins
.DynamicMemoryUsage() + pcoinsTip
->DynamicMemoryUsage()) <= nCoinCacheUsage
) {
3606 if (!DisconnectBlock(block
, state
, pindex
, coins
, &fClean
))
3607 return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3608 pindexState
= pindex
->pprev
;
3610 nGoodTransactions
= 0;
3611 pindexFailure
= pindex
;
3613 nGoodTransactions
+= block
.vtx
.size();
3615 if (ShutdownRequested())
3619 return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive
.Height() - pindexFailure
->nHeight
+ 1, nGoodTransactions
);
3621 // check level 4: try reconnecting blocks
3622 if (nCheckLevel
>= 4) {
3623 CBlockIndex
*pindex
= pindexState
;
3624 while (pindex
!= chainActive
.Tip()) {
3625 boost::this_thread::interruption_point();
3626 uiInterface
.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* 50))));
3627 pindex
= chainActive
.Next(pindex
);
3629 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
3630 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3631 if (!ConnectBlock(block
, state
, pindex
, coins
))
3632 return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3636 LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive
.Height() - pindexState
->nHeight
, nGoodTransactions
);
3641 void UnloadBlockIndex()
3644 setBlockIndexCandidates
.clear();
3645 chainActive
.SetTip(NULL
);
3646 pindexBestInvalid
= NULL
;
3647 pindexBestHeader
= NULL
;
3649 mapOrphanTransactions
.clear();
3650 mapOrphanTransactionsByPrev
.clear();
3652 mapBlocksUnlinked
.clear();
3653 vinfoBlockFile
.clear();
3655 nBlockSequenceId
= 1;
3656 mapBlockSource
.clear();
3657 mapBlocksInFlight
.clear();
3658 nQueuedValidatedHeaders
= 0;
3659 nPreferredDownload
= 0;
3660 setDirtyBlockIndex
.clear();
3661 setDirtyFileInfo
.clear();
3662 mapNodeState
.clear();
3663 recentRejects
.reset(NULL
);
3665 BOOST_FOREACH(BlockMap::value_type
& entry
, mapBlockIndex
) {
3666 delete entry
.second
;
3668 mapBlockIndex
.clear();
3669 fHavePruned
= false;
3672 bool LoadBlockIndex()
3674 // Load block index from databases
3675 if (!fReindex
&& !LoadBlockIndexDB())
3680 bool InitBlockIndex(const CChainParams
& chainparams
)
3684 // Initialize global variables that cannot be constructed at startup.
3685 recentRejects
.reset(new CRollingBloomFilter(120000, 0.000001));
3687 // Check whether we're already initialized
3688 if (chainActive
.Genesis() != NULL
)
3691 // Use the provided setting for -txindex in the new database
3692 fTxIndex
= GetBoolArg("-txindex", DEFAULT_TXINDEX
);
3693 pblocktree
->WriteFlag("txindex", fTxIndex
);
3694 LogPrintf("Initializing databases...\n");
3696 // Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
3699 CBlock
&block
= const_cast<CBlock
&>(chainparams
.GenesisBlock());
3700 // Start new block file
3701 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
3702 CDiskBlockPos blockPos
;
3703 CValidationState state
;
3704 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, 0, block
.GetBlockTime()))
3705 return error("LoadBlockIndex(): FindBlockPos failed");
3706 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
3707 return error("LoadBlockIndex(): writing genesis block to disk failed");
3708 CBlockIndex
*pindex
= AddToBlockIndex(block
);
3709 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
3710 return error("LoadBlockIndex(): genesis block not accepted");
3711 if (!ActivateBestChain(state
, chainparams
, &block
))
3712 return error("LoadBlockIndex(): genesis block cannot be activated");
3713 // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
3714 return FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
3715 } catch (const std::runtime_error
& e
) {
3716 return error("LoadBlockIndex(): failed to initialize block database: %s", e
.what());
3723 bool LoadExternalBlockFile(const CChainParams
& chainparams
, FILE* fileIn
, CDiskBlockPos
*dbp
)
3725 // Map of disk positions for blocks with unknown parent (only used for reindex)
3726 static std::multimap
<uint256
, CDiskBlockPos
> mapBlocksUnknownParent
;
3727 int64_t nStart
= GetTimeMillis();
3731 // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
3732 CBufferedFile
blkdat(fileIn
, 2*MAX_BLOCK_SIZE
, MAX_BLOCK_SIZE
+8, SER_DISK
, CLIENT_VERSION
);
3733 uint64_t nRewind
= blkdat
.GetPos();
3734 while (!blkdat
.eof()) {
3735 boost::this_thread::interruption_point();
3737 blkdat
.SetPos(nRewind
);
3738 nRewind
++; // start one byte further next time, in case of failure
3739 blkdat
.SetLimit(); // remove former limit
3740 unsigned int nSize
= 0;
3743 unsigned char buf
[MESSAGE_START_SIZE
];
3744 blkdat
.FindByte(chainparams
.MessageStart()[0]);
3745 nRewind
= blkdat
.GetPos()+1;
3746 blkdat
>> FLATDATA(buf
);
3747 if (memcmp(buf
, chainparams
.MessageStart(), MESSAGE_START_SIZE
))
3751 if (nSize
< 80 || nSize
> MAX_BLOCK_SIZE
)
3753 } catch (const std::exception
&) {
3754 // no valid block header found; don't complain
3759 uint64_t nBlockPos
= blkdat
.GetPos();
3761 dbp
->nPos
= nBlockPos
;
3762 blkdat
.SetLimit(nBlockPos
+ nSize
);
3763 blkdat
.SetPos(nBlockPos
);
3766 nRewind
= blkdat
.GetPos();
3768 // detect out of order blocks, and store them for later
3769 uint256 hash
= block
.GetHash();
3770 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
.find(block
.hashPrevBlock
) == mapBlockIndex
.end()) {
3771 LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__
, hash
.ToString(),
3772 block
.hashPrevBlock
.ToString());
3774 mapBlocksUnknownParent
.insert(std::make_pair(block
.hashPrevBlock
, *dbp
));
3778 // process in case the block isn't known yet
3779 if (mapBlockIndex
.count(hash
) == 0 || (mapBlockIndex
[hash
]->nStatus
& BLOCK_HAVE_DATA
) == 0) {
3780 CValidationState state
;
3781 if (ProcessNewBlock(state
, chainparams
, NULL
, &block
, true, dbp
))
3783 if (state
.IsError())
3785 } else if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
[hash
]->nHeight
% 1000 == 0) {
3786 LogPrintf("Block Import: already had block %s at height %d\n", hash
.ToString(), mapBlockIndex
[hash
]->nHeight
);
3789 // Recursively process earlier encountered successors of this block
3790 deque
<uint256
> queue
;
3791 queue
.push_back(hash
);
3792 while (!queue
.empty()) {
3793 uint256 head
= queue
.front();
3795 std::pair
<std::multimap
<uint256
, CDiskBlockPos
>::iterator
, std::multimap
<uint256
, CDiskBlockPos
>::iterator
> range
= mapBlocksUnknownParent
.equal_range(head
);
3796 while (range
.first
!= range
.second
) {
3797 std::multimap
<uint256
, CDiskBlockPos
>::iterator it
= range
.first
;
3798 if (ReadBlockFromDisk(block
, it
->second
, chainparams
.GetConsensus()))
3800 LogPrintf("%s: Processing out of order child %s of %s\n", __func__
, block
.GetHash().ToString(),
3802 CValidationState dummy
;
3803 if (ProcessNewBlock(dummy
, chainparams
, NULL
, &block
, true, &it
->second
))
3806 queue
.push_back(block
.GetHash());
3810 mapBlocksUnknownParent
.erase(it
);
3813 } catch (const std::exception
& e
) {
3814 LogPrintf("%s: Deserialize or I/O error - %s\n", __func__
, e
.what());
3817 } catch (const std::runtime_error
& e
) {
3818 AbortNode(std::string("System error: ") + e
.what());
3821 LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded
, GetTimeMillis() - nStart
);
3825 void static CheckBlockIndex(const Consensus::Params
& consensusParams
)
3827 if (!fCheckBlockIndex
) {
3833 // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
3834 // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
3835 // iterating the block tree require that chainActive has been initialized.)
3836 if (chainActive
.Height() < 0) {
3837 assert(mapBlockIndex
.size() <= 1);
3841 // Build forward-pointing map of the entire block tree.
3842 std::multimap
<CBlockIndex
*,CBlockIndex
*> forward
;
3843 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); it
++) {
3844 forward
.insert(std::make_pair(it
->second
->pprev
, it
->second
));
3847 assert(forward
.size() == mapBlockIndex
.size());
3849 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeGenesis
= forward
.equal_range(NULL
);
3850 CBlockIndex
*pindex
= rangeGenesis
.first
->second
;
3851 rangeGenesis
.first
++;
3852 assert(rangeGenesis
.first
== rangeGenesis
.second
); // There is only one index entry with parent NULL.
3854 // Iterate over the entire block tree, using depth-first search.
3855 // Along the way, remember whether there are blocks on the path from genesis
3856 // block being explored which are the first to have certain properties.
3859 CBlockIndex
* pindexFirstInvalid
= NULL
; // Oldest ancestor of pindex which is invalid.
3860 CBlockIndex
* pindexFirstMissing
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
3861 CBlockIndex
* pindexFirstNeverProcessed
= NULL
; // Oldest ancestor of pindex for which nTx == 0.
3862 CBlockIndex
* pindexFirstNotTreeValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
3863 CBlockIndex
* pindexFirstNotTransactionsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
3864 CBlockIndex
* pindexFirstNotChainValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
3865 CBlockIndex
* pindexFirstNotScriptsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
3866 while (pindex
!= NULL
) {
3868 if (pindexFirstInvalid
== NULL
&& pindex
->nStatus
& BLOCK_FAILED_VALID
) pindexFirstInvalid
= pindex
;
3869 if (pindexFirstMissing
== NULL
&& !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) pindexFirstMissing
= pindex
;
3870 if (pindexFirstNeverProcessed
== NULL
&& pindex
->nTx
== 0) pindexFirstNeverProcessed
= pindex
;
3871 if (pindex
->pprev
!= NULL
&& pindexFirstNotTreeValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TREE
) pindexFirstNotTreeValid
= pindex
;
3872 if (pindex
->pprev
!= NULL
&& pindexFirstNotTransactionsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TRANSACTIONS
) pindexFirstNotTransactionsValid
= pindex
;
3873 if (pindex
->pprev
!= NULL
&& pindexFirstNotChainValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_CHAIN
) pindexFirstNotChainValid
= pindex
;
3874 if (pindex
->pprev
!= NULL
&& pindexFirstNotScriptsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_SCRIPTS
) pindexFirstNotScriptsValid
= pindex
;
3876 // Begin: actual consistency checks.
3877 if (pindex
->pprev
== NULL
) {
3878 // Genesis block checks.
3879 assert(pindex
->GetBlockHash() == consensusParams
.hashGenesisBlock
); // Genesis block's hash must match.
3880 assert(pindex
== chainActive
.Genesis()); // The current active chain's genesis block must be this block.
3882 if (pindex
->nChainTx
== 0) assert(pindex
->nSequenceId
== 0); // nSequenceId can't be set for blocks that aren't linked
3883 // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
3884 // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
3886 // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
3887 assert(!(pindex
->nStatus
& BLOCK_HAVE_DATA
) == (pindex
->nTx
== 0));
3888 assert(pindexFirstMissing
== pindexFirstNeverProcessed
);
3890 // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
3891 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) assert(pindex
->nTx
> 0);
3893 if (pindex
->nStatus
& BLOCK_HAVE_UNDO
) assert(pindex
->nStatus
& BLOCK_HAVE_DATA
);
3894 assert(((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TRANSACTIONS
) == (pindex
->nTx
> 0)); // This is pruning-independent.
3895 // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
3896 assert((pindexFirstNeverProcessed
!= NULL
) == (pindex
->nChainTx
== 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
3897 assert((pindexFirstNotTransactionsValid
!= NULL
) == (pindex
->nChainTx
== 0));
3898 assert(pindex
->nHeight
== nHeight
); // nHeight must be consistent.
3899 assert(pindex
->pprev
== NULL
|| pindex
->nChainWork
>= pindex
->pprev
->nChainWork
); // For every block except the genesis block, the chainwork must be larger than the parent's.
3900 assert(nHeight
< 2 || (pindex
->pskip
&& (pindex
->pskip
->nHeight
< nHeight
))); // The pskip pointer must point back for all but the first 2 blocks.
3901 assert(pindexFirstNotTreeValid
== NULL
); // All mapBlockIndex entries must at least be TREE valid
3902 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TREE
) assert(pindexFirstNotTreeValid
== NULL
); // TREE valid implies all parents are TREE valid
3903 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_CHAIN
) assert(pindexFirstNotChainValid
== NULL
); // CHAIN valid implies all parents are CHAIN valid
3904 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_SCRIPTS
) assert(pindexFirstNotScriptsValid
== NULL
); // SCRIPTS valid implies all parents are SCRIPTS valid
3905 if (pindexFirstInvalid
== NULL
) {
3906 // Checks for not-invalid blocks.
3907 assert((pindex
->nStatus
& BLOCK_FAILED_MASK
) == 0); // The failed mask cannot be set for blocks without invalid parents.
3909 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && pindexFirstNeverProcessed
== NULL
) {
3910 if (pindexFirstInvalid
== NULL
) {
3911 // If this block sorts at least as good as the current tip and
3912 // is valid and we have all data for its parents, it must be in
3913 // setBlockIndexCandidates. chainActive.Tip() must also be there
3914 // even if some data has been pruned.
3915 if (pindexFirstMissing
== NULL
|| pindex
== chainActive
.Tip()) {
3916 assert(setBlockIndexCandidates
.count(pindex
));
3918 // If some parent is missing, then it could be that this block was in
3919 // setBlockIndexCandidates but had to be removed because of the missing data.
3920 // In this case it must be in mapBlocksUnlinked -- see test below.
3922 } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
3923 assert(setBlockIndexCandidates
.count(pindex
) == 0);
3925 // Check whether this block is in mapBlocksUnlinked.
3926 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeUnlinked
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
3927 bool foundInUnlinked
= false;
3928 while (rangeUnlinked
.first
!= rangeUnlinked
.second
) {
3929 assert(rangeUnlinked
.first
->first
== pindex
->pprev
);
3930 if (rangeUnlinked
.first
->second
== pindex
) {
3931 foundInUnlinked
= true;
3934 rangeUnlinked
.first
++;
3936 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
!= NULL
&& pindexFirstInvalid
== NULL
) {
3937 // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
3938 assert(foundInUnlinked
);
3940 if (!(pindex
->nStatus
& BLOCK_HAVE_DATA
)) assert(!foundInUnlinked
); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
3941 if (pindexFirstMissing
== NULL
) assert(!foundInUnlinked
); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
3942 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
== NULL
&& pindexFirstMissing
!= NULL
) {
3943 // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
3944 assert(fHavePruned
); // We must have pruned.
3945 // This block may have entered mapBlocksUnlinked if:
3946 // - it has a descendant that at some point had more work than the
3948 // - we tried switching to that descendant but were missing
3949 // data for some intermediate block between chainActive and the
3951 // So if this block is itself better than chainActive.Tip() and it wasn't in
3952 // setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
3953 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && setBlockIndexCandidates
.count(pindex
) == 0) {
3954 if (pindexFirstInvalid
== NULL
) {
3955 assert(foundInUnlinked
);
3959 // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
3960 // End: actual consistency checks.
3962 // Try descending into the first subnode.
3963 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> range
= forward
.equal_range(pindex
);
3964 if (range
.first
!= range
.second
) {
3965 // A subnode was found.
3966 pindex
= range
.first
->second
;
3970 // This is a leaf node.
3971 // Move upwards until we reach a node of which we have not yet visited the last child.
3973 // We are going to either move to a parent or a sibling of pindex.
3974 // If pindex was the first with a certain property, unset the corresponding variable.
3975 if (pindex
== pindexFirstInvalid
) pindexFirstInvalid
= NULL
;
3976 if (pindex
== pindexFirstMissing
) pindexFirstMissing
= NULL
;
3977 if (pindex
== pindexFirstNeverProcessed
) pindexFirstNeverProcessed
= NULL
;
3978 if (pindex
== pindexFirstNotTreeValid
) pindexFirstNotTreeValid
= NULL
;
3979 if (pindex
== pindexFirstNotTransactionsValid
) pindexFirstNotTransactionsValid
= NULL
;
3980 if (pindex
== pindexFirstNotChainValid
) pindexFirstNotChainValid
= NULL
;
3981 if (pindex
== pindexFirstNotScriptsValid
) pindexFirstNotScriptsValid
= NULL
;
3983 CBlockIndex
* pindexPar
= pindex
->pprev
;
3984 // Find which child we just visited.
3985 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangePar
= forward
.equal_range(pindexPar
);
3986 while (rangePar
.first
->second
!= pindex
) {
3987 assert(rangePar
.first
!= rangePar
.second
); // Our parent must have at least the node we're coming from as child.
3990 // Proceed to the next one.
3992 if (rangePar
.first
!= rangePar
.second
) {
3993 // Move to the sibling.
3994 pindex
= rangePar
.first
->second
;
4005 // Check that we actually traversed the entire map.
4006 assert(nNodes
== forward
.size());
4009 //////////////////////////////////////////////////////////////////////////////
4014 std::string
GetWarnings(const std::string
& strFor
)
4017 string strStatusBar
;
4021 if (!CLIENT_VERSION_IS_RELEASE
) {
4022 strStatusBar
= "This is a pre-release test build - use at your own risk - do not use for mining or merchant applications";
4023 strGUI
= _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
4026 if (GetBoolArg("-testsafemode", DEFAULT_TESTSAFEMODE
))
4027 strStatusBar
= strRPC
= strGUI
= "testsafemode enabled";
4029 // Misc warnings like out of disk space and clock is wrong
4030 if (strMiscWarning
!= "")
4033 strStatusBar
= strGUI
= strMiscWarning
;
4036 if (fLargeWorkForkFound
)
4039 strStatusBar
= strRPC
= "Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.";
4040 strGUI
= _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
4042 else if (fLargeWorkInvalidChainFound
)
4045 strStatusBar
= strRPC
= "Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.";
4046 strGUI
= _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
4052 BOOST_FOREACH(PAIRTYPE(const uint256
, CAlert
)& item
, mapAlerts
)
4054 const CAlert
& alert
= item
.second
;
4055 if (alert
.AppliesToMe() && alert
.nPriority
> nPriority
)
4057 nPriority
= alert
.nPriority
;
4058 strStatusBar
= strGUI
= alert
.strStatusBar
;
4063 if (strFor
== "gui")
4065 else if (strFor
== "statusbar")
4066 return strStatusBar
;
4067 else if (strFor
== "rpc")
4069 assert(!"GetWarnings(): invalid parameter");
4080 //////////////////////////////////////////////////////////////////////////////
4086 bool static AlreadyHave(const CInv
& inv
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
4092 assert(recentRejects
);
4093 if (chainActive
.Tip()->GetBlockHash() != hashRecentRejectsChainTip
)
4095 // If the chain tip has changed previously rejected transactions
4096 // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
4097 // or a double-spend. Reset the rejects filter and give those
4098 // txs a second chance.
4099 hashRecentRejectsChainTip
= chainActive
.Tip()->GetBlockHash();
4100 recentRejects
->reset();
4103 return recentRejects
->contains(inv
.hash
) ||
4104 mempool
.exists(inv
.hash
) ||
4105 mapOrphanTransactions
.count(inv
.hash
) ||
4106 pcoinsTip
->HaveCoins(inv
.hash
);
4109 return mapBlockIndex
.count(inv
.hash
);
4111 // Don't know what it is, just say we already got one
4115 void static ProcessGetData(CNode
* pfrom
, const Consensus::Params
& consensusParams
)
4117 std::deque
<CInv
>::iterator it
= pfrom
->vRecvGetData
.begin();
4119 vector
<CInv
> vNotFound
;
4123 while (it
!= pfrom
->vRecvGetData
.end()) {
4124 // Don't bother if send buffer is too full to respond anyway
4125 if (pfrom
->nSendSize
>= SendBufferSize())
4128 const CInv
&inv
= *it
;
4130 boost::this_thread::interruption_point();
4133 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
)
4136 BlockMap::iterator mi
= mapBlockIndex
.find(inv
.hash
);
4137 if (mi
!= mapBlockIndex
.end())
4139 if (chainActive
.Contains(mi
->second
)) {
4142 static const int nOneMonth
= 30 * 24 * 60 * 60;
4143 // To prevent fingerprinting attacks, only send blocks outside of the active
4144 // chain if they are valid, and no more than a month older (both in time, and in
4145 // best equivalent proof of work) than the best header chain we know about.
4146 send
= mi
->second
->IsValid(BLOCK_VALID_SCRIPTS
) && (pindexBestHeader
!= NULL
) &&
4147 (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() < nOneMonth
) &&
4148 (GetBlockProofEquivalentTime(*pindexBestHeader
, *mi
->second
, *pindexBestHeader
, consensusParams
) < nOneMonth
);
4150 LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__
, pfrom
->GetId());
4154 // disconnect node in case we have reached the outbound limit for serving historical blocks
4155 // never disconnect whitelisted nodes
4156 static const int nOneWeek
= 7 * 24 * 60 * 60; // assume > 1 week = historical
4157 if (send
&& CNode::OutboundTargetReached(true) && ( ((pindexBestHeader
!= NULL
) && (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() > nOneWeek
)) || inv
.type
== MSG_FILTERED_BLOCK
) && !pfrom
->fWhitelisted
)
4159 LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom
->GetId());
4162 pfrom
->fDisconnect
= true;
4165 // Pruned nodes may have deleted the block, so check whether
4166 // it's available before trying to send.
4167 if (send
&& (mi
->second
->nStatus
& BLOCK_HAVE_DATA
))
4169 // Send block from disk
4171 if (!ReadBlockFromDisk(block
, (*mi
).second
, consensusParams
))
4172 assert(!"cannot load block from disk");
4173 if (inv
.type
== MSG_BLOCK
)
4174 pfrom
->PushMessage("block", block
);
4175 else // MSG_FILTERED_BLOCK)
4177 LOCK(pfrom
->cs_filter
);
4180 CMerkleBlock
merkleBlock(block
, *pfrom
->pfilter
);
4181 pfrom
->PushMessage("merkleblock", merkleBlock
);
4182 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
4183 // This avoids hurting performance by pointlessly requiring a round-trip
4184 // Note that there is currently no way for a node to request any single transactions we didn't send here -
4185 // they must either disconnect and retry or request the full block.
4186 // Thus, the protocol spec specified allows for us to provide duplicate txn here,
4187 // however we MUST always provide at least what the remote peer needs
4188 typedef std::pair
<unsigned int, uint256
> PairType
;
4189 BOOST_FOREACH(PairType
& pair
, merkleBlock
.vMatchedTxn
)
4190 if (!pfrom
->setInventoryKnown
.count(CInv(MSG_TX
, pair
.second
)))
4191 pfrom
->PushMessage("tx", block
.vtx
[pair
.first
]);
4197 // Trigger the peer node to send a getblocks request for the next batch of inventory
4198 if (inv
.hash
== pfrom
->hashContinue
)
4200 // Bypass PushInventory, this must send even if redundant,
4201 // and we want it right after the last block so they don't
4202 // wait for other stuff first.
4204 vInv
.push_back(CInv(MSG_BLOCK
, chainActive
.Tip()->GetBlockHash()));
4205 pfrom
->PushMessage("inv", vInv
);
4206 pfrom
->hashContinue
.SetNull();
4210 else if (inv
.IsKnownType())
4212 // Send stream from relay memory
4213 bool pushed
= false;
4216 map
<CInv
, CDataStream
>::iterator mi
= mapRelay
.find(inv
);
4217 if (mi
!= mapRelay
.end()) {
4218 pfrom
->PushMessage(inv
.GetCommand(), (*mi
).second
);
4222 if (!pushed
&& inv
.type
== MSG_TX
) {
4224 if (mempool
.lookup(inv
.hash
, tx
)) {
4225 CDataStream
ss(SER_NETWORK
, PROTOCOL_VERSION
);
4228 pfrom
->PushMessage("tx", ss
);
4233 vNotFound
.push_back(inv
);
4237 // Track requests for our stuff.
4238 GetMainSignals().Inventory(inv
.hash
);
4240 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
)
4245 pfrom
->vRecvGetData
.erase(pfrom
->vRecvGetData
.begin(), it
);
4247 if (!vNotFound
.empty()) {
4248 // Let the peer know that we didn't find what it asked for, so it doesn't
4249 // have to wait around forever. Currently only SPV clients actually care
4250 // about this message: it's needed when they are recursively walking the
4251 // dependencies of relevant unconfirmed transactions. SPV clients want to
4252 // do that because they want to know about (and store and rebroadcast and
4253 // risk analyze) the dependencies of transactions relevant to them, without
4254 // having to download the entire memory pool.
4255 pfrom
->PushMessage("notfound", vNotFound
);
4259 bool static ProcessMessage(CNode
* pfrom
, string strCommand
, CDataStream
& vRecv
, int64_t nTimeReceived
)
4261 const CChainParams
& chainparams
= Params();
4262 RandAddSeedPerfmon();
4263 LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand
), vRecv
.size(), pfrom
->id
);
4264 if (mapArgs
.count("-dropmessagestest") && GetRand(atoi(mapArgs
["-dropmessagestest"])) == 0)
4266 LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
4271 if (!(nLocalServices
& NODE_BLOOM
) &&
4272 (strCommand
== "filterload" ||
4273 strCommand
== "filteradd" ||
4274 strCommand
== "filterclear"))
4276 if (pfrom
->nVersion
>= NO_BLOOM_VERSION
) {
4277 Misbehaving(pfrom
->GetId(), 100);
4279 } else if (GetBoolArg("-enforcenodebloom", false)) {
4280 pfrom
->fDisconnect
= true;
4286 if (strCommand
== "version")
4288 // Each connection can only send one version message
4289 if (pfrom
->nVersion
!= 0)
4291 pfrom
->PushMessage("reject", strCommand
, REJECT_DUPLICATE
, string("Duplicate version message"));
4292 Misbehaving(pfrom
->GetId(), 1);
4299 uint64_t nNonce
= 1;
4300 vRecv
>> pfrom
->nVersion
>> pfrom
->nServices
>> nTime
>> addrMe
;
4301 if (pfrom
->nVersion
< MIN_PEER_PROTO_VERSION
)
4303 // disconnect from peers older than this proto version
4304 LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom
->id
, pfrom
->nVersion
);
4305 pfrom
->PushMessage("reject", strCommand
, REJECT_OBSOLETE
,
4306 strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION
));
4307 pfrom
->fDisconnect
= true;
4311 if (pfrom
->nVersion
== 10300)
4312 pfrom
->nVersion
= 300;
4314 vRecv
>> addrFrom
>> nNonce
;
4315 if (!vRecv
.empty()) {
4316 vRecv
>> LIMITED_STRING(pfrom
->strSubVer
, MAX_SUBVERSION_LENGTH
);
4317 pfrom
->cleanSubVer
= SanitizeString(pfrom
->strSubVer
);
4320 vRecv
>> pfrom
->nStartingHeight
;
4322 vRecv
>> pfrom
->fRelayTxes
; // set to true after we get the first filter* message
4324 pfrom
->fRelayTxes
= true;
4326 // Disconnect if we connected to ourself
4327 if (nNonce
== nLocalHostNonce
&& nNonce
> 1)
4329 LogPrintf("connected to self at %s, disconnecting\n", pfrom
->addr
.ToString());
4330 pfrom
->fDisconnect
= true;
4334 pfrom
->addrLocal
= addrMe
;
4335 if (pfrom
->fInbound
&& addrMe
.IsRoutable())
4340 // Be shy and don't send version until we hear
4341 if (pfrom
->fInbound
)
4342 pfrom
->PushVersion();
4344 pfrom
->fClient
= !(pfrom
->nServices
& NODE_NETWORK
);
4346 // Potentially mark this peer as a preferred download peer.
4347 UpdatePreferredDownload(pfrom
, State(pfrom
->GetId()));
4350 pfrom
->PushMessage("verack");
4351 pfrom
->ssSend
.SetVersion(min(pfrom
->nVersion
, PROTOCOL_VERSION
));
4353 if (!pfrom
->fInbound
)
4355 // Advertise our address
4356 if (fListen
&& !IsInitialBlockDownload())
4358 CAddress addr
= GetLocalAddress(&pfrom
->addr
);
4359 if (addr
.IsRoutable())
4361 LogPrintf("ProcessMessages: advertizing address %s\n", addr
.ToString());
4362 pfrom
->PushAddress(addr
);
4363 } else if (IsPeerAddrLocalGood(pfrom
)) {
4364 addr
.SetIP(pfrom
->addrLocal
);
4365 LogPrintf("ProcessMessages: advertizing address %s\n", addr
.ToString());
4366 pfrom
->PushAddress(addr
);
4370 // Get recent addresses
4371 if (pfrom
->fOneShot
|| pfrom
->nVersion
>= CADDR_TIME_VERSION
|| addrman
.size() < 1000)
4373 pfrom
->PushMessage("getaddr");
4374 pfrom
->fGetAddr
= true;
4376 addrman
.Good(pfrom
->addr
);
4378 if (((CNetAddr
)pfrom
->addr
) == (CNetAddr
)addrFrom
)
4380 addrman
.Add(addrFrom
, addrFrom
);
4381 addrman
.Good(addrFrom
);
4388 BOOST_FOREACH(PAIRTYPE(const uint256
, CAlert
)& item
, mapAlerts
)
4389 item
.second
.RelayTo(pfrom
);
4392 pfrom
->fSuccessfullyConnected
= true;
4396 remoteAddr
= ", peeraddr=" + pfrom
->addr
.ToString();
4398 LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
4399 pfrom
->cleanSubVer
, pfrom
->nVersion
,
4400 pfrom
->nStartingHeight
, addrMe
.ToString(), pfrom
->id
,
4403 int64_t nTimeOffset
= nTime
- GetTime();
4404 pfrom
->nTimeOffset
= nTimeOffset
;
4405 AddTimeData(pfrom
->addr
, nTimeOffset
);
4409 else if (pfrom
->nVersion
== 0)
4411 // Must have a version message before anything else
4412 Misbehaving(pfrom
->GetId(), 1);
4417 else if (strCommand
== "verack")
4419 pfrom
->SetRecvVersion(min(pfrom
->nVersion
, PROTOCOL_VERSION
));
4421 // Mark this node as currently connected, so we update its timestamp later.
4422 if (pfrom
->fNetworkNode
) {
4424 State(pfrom
->GetId())->fCurrentlyConnected
= true;
4427 if (pfrom
->nVersion
>= SENDHEADERS_VERSION
) {
4428 // Tell our peer we prefer to receive headers rather than inv's
4429 // We send this to non-NODE NETWORK peers as well, because even
4430 // non-NODE NETWORK peers can announce blocks (such as pruning
4432 pfrom
->PushMessage("sendheaders");
4437 else if (strCommand
== "addr")
4439 vector
<CAddress
> vAddr
;
4442 // Don't want addr from older versions unless seeding
4443 if (pfrom
->nVersion
< CADDR_TIME_VERSION
&& addrman
.size() > 1000)
4445 if (vAddr
.size() > 1000)
4447 Misbehaving(pfrom
->GetId(), 20);
4448 return error("message addr size() = %u", vAddr
.size());
4451 // Store the new addresses
4452 vector
<CAddress
> vAddrOk
;
4453 int64_t nNow
= GetAdjustedTime();
4454 int64_t nSince
= nNow
- 10 * 60;
4455 BOOST_FOREACH(CAddress
& addr
, vAddr
)
4457 boost::this_thread::interruption_point();
4459 if (addr
.nTime
<= 100000000 || addr
.nTime
> nNow
+ 10 * 60)
4460 addr
.nTime
= nNow
- 5 * 24 * 60 * 60;
4461 pfrom
->AddAddressKnown(addr
);
4462 bool fReachable
= IsReachable(addr
);
4463 if (addr
.nTime
> nSince
&& !pfrom
->fGetAddr
&& vAddr
.size() <= 10 && addr
.IsRoutable())
4465 // Relay to a limited number of other nodes
4468 // Use deterministic randomness to send to the same nodes for 24 hours
4469 // at a time so the addrKnowns of the chosen nodes prevent repeats
4470 static uint256 hashSalt
;
4471 if (hashSalt
.IsNull())
4472 hashSalt
= GetRandHash();
4473 uint64_t hashAddr
= addr
.GetHash();
4474 uint256 hashRand
= ArithToUint256(UintToArith256(hashSalt
) ^ (hashAddr
<<32) ^ ((GetTime()+hashAddr
)/(24*60*60)));
4475 hashRand
= Hash(BEGIN(hashRand
), END(hashRand
));
4476 multimap
<uint256
, CNode
*> mapMix
;
4477 BOOST_FOREACH(CNode
* pnode
, vNodes
)
4479 if (pnode
->nVersion
< CADDR_TIME_VERSION
)
4481 unsigned int nPointer
;
4482 memcpy(&nPointer
, &pnode
, sizeof(nPointer
));
4483 uint256 hashKey
= ArithToUint256(UintToArith256(hashRand
) ^ nPointer
);
4484 hashKey
= Hash(BEGIN(hashKey
), END(hashKey
));
4485 mapMix
.insert(make_pair(hashKey
, pnode
));
4487 int nRelayNodes
= fReachable
? 2 : 1; // limited relaying of addresses outside our network(s)
4488 for (multimap
<uint256
, CNode
*>::iterator mi
= mapMix
.begin(); mi
!= mapMix
.end() && nRelayNodes
-- > 0; ++mi
)
4489 ((*mi
).second
)->PushAddress(addr
);
4492 // Do not store addresses outside our network
4494 vAddrOk
.push_back(addr
);
4496 addrman
.Add(vAddrOk
, pfrom
->addr
, 2 * 60 * 60);
4497 if (vAddr
.size() < 1000)
4498 pfrom
->fGetAddr
= false;
4499 if (pfrom
->fOneShot
)
4500 pfrom
->fDisconnect
= true;
4503 else if (strCommand
== "sendheaders")
4506 State(pfrom
->GetId())->fPreferHeaders
= true;
4510 else if (strCommand
== "inv")
4514 if (vInv
.size() > MAX_INV_SZ
)
4516 Misbehaving(pfrom
->GetId(), 20);
4517 return error("message inv size() = %u", vInv
.size());
4520 bool fBlocksOnly
= GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY
);
4522 // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistalwaysrelay is true
4523 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistalwaysrelay", DEFAULT_WHITELISTALWAYSRELAY
))
4524 fBlocksOnly
= false;
4528 std::vector
<CInv
> vToFetch
;
4530 for (unsigned int nInv
= 0; nInv
< vInv
.size(); nInv
++)
4532 const CInv
&inv
= vInv
[nInv
];
4534 boost::this_thread::interruption_point();
4535 pfrom
->AddInventoryKnown(inv
);
4537 bool fAlreadyHave
= AlreadyHave(inv
);
4538 LogPrint("net", "got inv: %s %s peer=%d\n", inv
.ToString(), fAlreadyHave
? "have" : "new", pfrom
->id
);
4540 if (inv
.type
== MSG_BLOCK
) {
4541 UpdateBlockAvailability(pfrom
->GetId(), inv
.hash
);
4542 if (!fAlreadyHave
&& !fImporting
&& !fReindex
&& !mapBlocksInFlight
.count(inv
.hash
)) {
4543 // First request the headers preceding the announced block. In the normal fully-synced
4544 // case where a new block is announced that succeeds the current tip (no reorganization),
4545 // there are no such headers.
4546 // Secondly, and only when we are close to being synced, we request the announced block directly,
4547 // to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
4548 // time the block arrives, the header chain leading up to it is already validated. Not
4549 // doing this will result in the received block being rejected as an orphan in case it is
4550 // not a direct successor.
4551 pfrom
->PushMessage("getheaders", chainActive
.GetLocator(pindexBestHeader
), inv
.hash
);
4552 CNodeState
*nodestate
= State(pfrom
->GetId());
4553 if (CanDirectFetch(chainparams
.GetConsensus()) &&
4554 nodestate
->nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
4555 vToFetch
.push_back(inv
);
4556 // Mark block as in flight already, even though the actual "getdata" message only goes out
4557 // later (within the same cs_main lock, though).
4558 MarkBlockAsInFlight(pfrom
->GetId(), inv
.hash
, chainparams
.GetConsensus());
4560 LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader
->nHeight
, inv
.hash
.ToString(), pfrom
->id
);
4566 LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv
.hash
.ToString(), pfrom
->id
);
4567 else if (!fAlreadyHave
&& !fImporting
&& !fReindex
)
4571 // Track requests for our stuff
4572 GetMainSignals().Inventory(inv
.hash
);
4574 if (pfrom
->nSendSize
> (SendBufferSize() * 2)) {
4575 Misbehaving(pfrom
->GetId(), 50);
4576 return error("send buffer size() = %u", pfrom
->nSendSize
);
4580 if (!vToFetch
.empty())
4581 pfrom
->PushMessage("getdata", vToFetch
);
4585 else if (strCommand
== "getdata")
4589 if (vInv
.size() > MAX_INV_SZ
)
4591 Misbehaving(pfrom
->GetId(), 20);
4592 return error("message getdata size() = %u", vInv
.size());
4595 if (fDebug
|| (vInv
.size() != 1))
4596 LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv
.size(), pfrom
->id
);
4598 if ((fDebug
&& vInv
.size() > 0) || (vInv
.size() == 1))
4599 LogPrint("net", "received getdata for: %s peer=%d\n", vInv
[0].ToString(), pfrom
->id
);
4601 pfrom
->vRecvGetData
.insert(pfrom
->vRecvGetData
.end(), vInv
.begin(), vInv
.end());
4602 ProcessGetData(pfrom
, chainparams
.GetConsensus());
4606 else if (strCommand
== "getblocks")
4608 CBlockLocator locator
;
4610 vRecv
>> locator
>> hashStop
;
4614 // Find the last block the caller has in the main chain
4615 CBlockIndex
* pindex
= FindForkInGlobalIndex(chainActive
, locator
);
4617 // Send the rest of the chain
4619 pindex
= chainActive
.Next(pindex
);
4621 LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.IsNull() ? "end" : hashStop
.ToString(), nLimit
, pfrom
->id
);
4622 for (; pindex
; pindex
= chainActive
.Next(pindex
))
4624 if (pindex
->GetBlockHash() == hashStop
)
4626 LogPrint("net", " getblocks stopping at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4629 // If pruning, don't inv blocks unless we have on disk and are likely to still have
4630 // for some reasonable time window (1 hour) that block relay might require.
4631 const int nPrunedBlocksLikelyToHave
= MIN_BLOCKS_TO_KEEP
- 3600 / chainparams
.GetConsensus().nPowTargetSpacing
;
4632 if (fPruneMode
&& (!(pindex
->nStatus
& BLOCK_HAVE_DATA
) || pindex
->nHeight
<= chainActive
.Tip()->nHeight
- nPrunedBlocksLikelyToHave
))
4634 LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4637 pfrom
->PushInventory(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
4640 // When this block is requested, we'll send an inv that'll
4641 // trigger the peer to getblocks the next batch of inventory.
4642 LogPrint("net", " getblocks stopping at limit %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4643 pfrom
->hashContinue
= pindex
->GetBlockHash();
4650 else if (strCommand
== "getheaders")
4652 CBlockLocator locator
;
4654 vRecv
>> locator
>> hashStop
;
4657 if (IsInitialBlockDownload() && !pfrom
->fWhitelisted
) {
4658 LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom
->id
);
4662 CNodeState
*nodestate
= State(pfrom
->GetId());
4663 CBlockIndex
* pindex
= NULL
;
4664 if (locator
.IsNull())
4666 // If locator is null, return the hashStop block
4667 BlockMap::iterator mi
= mapBlockIndex
.find(hashStop
);
4668 if (mi
== mapBlockIndex
.end())
4670 pindex
= (*mi
).second
;
4674 // Find the last block the caller has in the main chain
4675 pindex
= FindForkInGlobalIndex(chainActive
, locator
);
4677 pindex
= chainActive
.Next(pindex
);
4680 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
4681 vector
<CBlock
> vHeaders
;
4682 int nLimit
= MAX_HEADERS_RESULTS
;
4683 LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.ToString(), pfrom
->id
);
4684 for (; pindex
; pindex
= chainActive
.Next(pindex
))
4686 vHeaders
.push_back(pindex
->GetBlockHeader());
4687 if (--nLimit
<= 0 || pindex
->GetBlockHash() == hashStop
)
4690 // pindex can be NULL either if we sent chainActive.Tip() OR
4691 // if our peer has chainActive.Tip() (and thus we are sending an empty
4692 // headers message). In both cases it's safe to update
4693 // pindexBestHeaderSent to be our tip.
4694 nodestate
->pindexBestHeaderSent
= pindex
? pindex
: chainActive
.Tip();
4695 pfrom
->PushMessage("headers", vHeaders
);
4699 else if (strCommand
== "tx")
4701 // Stop processing the transaction early if
4702 // We are in blocks only mode and peer is either not whitelisted or whitelistalwaysrelay is off
4703 if (GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY
) && (!pfrom
->fWhitelisted
|| !GetBoolArg("-whitelistalwaysrelay", DEFAULT_WHITELISTALWAYSRELAY
)))
4705 LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom
->id
);
4709 vector
<uint256
> vWorkQueue
;
4710 vector
<uint256
> vEraseQueue
;
4714 CInv
inv(MSG_TX
, tx
.GetHash());
4715 pfrom
->AddInventoryKnown(inv
);
4719 bool fMissingInputs
= false;
4720 CValidationState state
;
4722 pfrom
->setAskFor
.erase(inv
.hash
);
4723 mapAlreadyAskedFor
.erase(inv
);
4725 if (!AlreadyHave(inv
) && AcceptToMemoryPool(mempool
, state
, tx
, true, &fMissingInputs
))
4727 mempool
.check(pcoinsTip
);
4728 RelayTransaction(tx
);
4729 vWorkQueue
.push_back(inv
.hash
);
4731 LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
4733 tx
.GetHash().ToString(),
4734 mempool
.size(), mempool
.DynamicMemoryUsage() / 1000);
4736 // Recursively process any orphan transactions that depended on this one
4737 set
<NodeId
> setMisbehaving
;
4738 for (unsigned int i
= 0; i
< vWorkQueue
.size(); i
++)
4740 map
<uint256
, set
<uint256
> >::iterator itByPrev
= mapOrphanTransactionsByPrev
.find(vWorkQueue
[i
]);
4741 if (itByPrev
== mapOrphanTransactionsByPrev
.end())
4743 for (set
<uint256
>::iterator mi
= itByPrev
->second
.begin();
4744 mi
!= itByPrev
->second
.end();
4747 const uint256
& orphanHash
= *mi
;
4748 const CTransaction
& orphanTx
= mapOrphanTransactions
[orphanHash
].tx
;
4749 NodeId fromPeer
= mapOrphanTransactions
[orphanHash
].fromPeer
;
4750 bool fMissingInputs2
= false;
4751 // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
4752 // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
4753 // anyone relaying LegitTxX banned)
4754 CValidationState stateDummy
;
4757 if (setMisbehaving
.count(fromPeer
))
4759 if (AcceptToMemoryPool(mempool
, stateDummy
, orphanTx
, true, &fMissingInputs2
))
4761 LogPrint("mempool", " accepted orphan tx %s\n", orphanHash
.ToString());
4762 RelayTransaction(orphanTx
);
4763 vWorkQueue
.push_back(orphanHash
);
4764 vEraseQueue
.push_back(orphanHash
);
4766 else if (!fMissingInputs2
)
4769 if (stateDummy
.IsInvalid(nDos
) && nDos
> 0)
4771 // Punish peer that gave us an invalid orphan tx
4772 Misbehaving(fromPeer
, nDos
);
4773 setMisbehaving
.insert(fromPeer
);
4774 LogPrint("mempool", " invalid orphan tx %s\n", orphanHash
.ToString());
4776 // Has inputs but not accepted to mempool
4777 // Probably non-standard or insufficient fee/priority
4778 LogPrint("mempool", " removed orphan tx %s\n", orphanHash
.ToString());
4779 vEraseQueue
.push_back(orphanHash
);
4780 assert(recentRejects
);
4781 recentRejects
->insert(orphanHash
);
4783 mempool
.check(pcoinsTip
);
4787 BOOST_FOREACH(uint256 hash
, vEraseQueue
)
4788 EraseOrphanTx(hash
);
4790 else if (fMissingInputs
)
4792 AddOrphanTx(tx
, pfrom
->GetId());
4794 // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
4795 unsigned int nMaxOrphanTx
= (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS
));
4796 unsigned int nEvicted
= LimitOrphanTxSize(nMaxOrphanTx
);
4798 LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted
);
4800 assert(recentRejects
);
4801 recentRejects
->insert(tx
.GetHash());
4803 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistalwaysrelay", DEFAULT_WHITELISTALWAYSRELAY
)) {
4804 // Always relay transactions received from whitelisted peers, even
4805 // if they were already in the mempool or rejected from it due
4806 // to policy, allowing the node to function as a gateway for
4807 // nodes hidden behind it.
4809 // Never relay transactions that we would assign a non-zero DoS
4810 // score for, as we expect peers to do the same with us in that
4813 if (!state
.IsInvalid(nDoS
) || nDoS
== 0) {
4814 LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx
.GetHash().ToString(), pfrom
->id
);
4815 RelayTransaction(tx
);
4817 LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx
.GetHash().ToString(), pfrom
->id
, FormatStateMessage(state
));
4822 if (state
.IsInvalid(nDoS
))
4824 LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx
.GetHash().ToString(),
4826 FormatStateMessage(state
));
4827 if (state
.GetRejectCode() < REJECT_INTERNAL
) // Never send AcceptToMemoryPool's internal codes over P2P
4828 pfrom
->PushMessage("reject", strCommand
, state
.GetRejectCode(),
4829 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), inv
.hash
);
4831 Misbehaving(pfrom
->GetId(), nDoS
);
4833 FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
);
4837 else if (strCommand
== "headers" && !fImporting
&& !fReindex
) // Ignore headers received while importing
4839 std::vector
<CBlockHeader
> headers
;
4841 // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
4842 unsigned int nCount
= ReadCompactSize(vRecv
);
4843 if (nCount
> MAX_HEADERS_RESULTS
) {
4844 Misbehaving(pfrom
->GetId(), 20);
4845 return error("headers message size = %u", nCount
);
4847 headers
.resize(nCount
);
4848 for (unsigned int n
= 0; n
< nCount
; n
++) {
4849 vRecv
>> headers
[n
];
4850 ReadCompactSize(vRecv
); // ignore tx count; assume it is 0.
4856 // Nothing interesting. Stop asking this peers for more headers.
4860 CBlockIndex
*pindexLast
= NULL
;
4861 BOOST_FOREACH(const CBlockHeader
& header
, headers
) {
4862 CValidationState state
;
4863 if (pindexLast
!= NULL
&& header
.hashPrevBlock
!= pindexLast
->GetBlockHash()) {
4864 Misbehaving(pfrom
->GetId(), 20);
4865 return error("non-continuous headers sequence");
4867 if (!AcceptBlockHeader(header
, state
, chainparams
, &pindexLast
)) {
4869 if (state
.IsInvalid(nDoS
)) {
4871 Misbehaving(pfrom
->GetId(), nDoS
);
4872 return error("invalid header received");
4878 UpdateBlockAvailability(pfrom
->GetId(), pindexLast
->GetBlockHash());
4880 if (nCount
== MAX_HEADERS_RESULTS
&& pindexLast
) {
4881 // Headers message had its maximum size; the peer may have more headers.
4882 // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
4883 // from there instead.
4884 LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast
->nHeight
, pfrom
->id
, pfrom
->nStartingHeight
);
4885 pfrom
->PushMessage("getheaders", chainActive
.GetLocator(pindexLast
), uint256());
4888 bool fCanDirectFetch
= CanDirectFetch(chainparams
.GetConsensus());
4889 CNodeState
*nodestate
= State(pfrom
->GetId());
4890 // If this set of headers is valid and ends in a block with at least as
4891 // much work as our tip, download as much as possible.
4892 if (fCanDirectFetch
&& pindexLast
->IsValid(BLOCK_VALID_TREE
) && chainActive
.Tip()->nChainWork
<= pindexLast
->nChainWork
) {
4893 vector
<CBlockIndex
*> vToFetch
;
4894 CBlockIndex
*pindexWalk
= pindexLast
;
4895 // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
4896 while (pindexWalk
&& !chainActive
.Contains(pindexWalk
) && vToFetch
.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
4897 if (!(pindexWalk
->nStatus
& BLOCK_HAVE_DATA
) &&
4898 !mapBlocksInFlight
.count(pindexWalk
->GetBlockHash())) {
4899 // We don't have this block, and it's not yet in flight.
4900 vToFetch
.push_back(pindexWalk
);
4902 pindexWalk
= pindexWalk
->pprev
;
4904 // If pindexWalk still isn't on our main chain, we're looking at a
4905 // very large reorg at a time we think we're close to caught up to
4906 // the main chain -- this shouldn't really happen. Bail out on the
4907 // direct fetch and rely on parallel download instead.
4908 if (!chainActive
.Contains(pindexWalk
)) {
4909 LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
4910 pindexLast
->GetBlockHash().ToString(),
4911 pindexLast
->nHeight
);
4913 vector
<CInv
> vGetData
;
4914 // Download as much as possible, from earliest to latest.
4915 BOOST_REVERSE_FOREACH(CBlockIndex
*pindex
, vToFetch
) {
4916 if (nodestate
->nBlocksInFlight
>= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
4917 // Can't download any more from this peer
4920 vGetData
.push_back(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
4921 MarkBlockAsInFlight(pfrom
->GetId(), pindex
->GetBlockHash(), chainparams
.GetConsensus(), pindex
);
4922 LogPrint("net", "Requesting block %s from peer=%d\n",
4923 pindex
->GetBlockHash().ToString(), pfrom
->id
);
4925 if (vGetData
.size() > 1) {
4926 LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
4927 pindexLast
->GetBlockHash().ToString(), pindexLast
->nHeight
);
4929 if (vGetData
.size() > 0) {
4930 pfrom
->PushMessage("getdata", vGetData
);
4935 CheckBlockIndex(chainparams
.GetConsensus());
4938 else if (strCommand
== "block" && !fImporting
&& !fReindex
) // Ignore blocks received while importing
4943 CInv
inv(MSG_BLOCK
, block
.GetHash());
4944 LogPrint("net", "received block %s peer=%d\n", inv
.hash
.ToString(), pfrom
->id
);
4946 pfrom
->AddInventoryKnown(inv
);
4948 CValidationState state
;
4949 // Process all blocks from whitelisted peers, even if not requested,
4950 // unless we're still syncing with the network.
4951 // Such an unrequested block may still be processed, subject to the
4952 // conditions in AcceptBlock().
4953 bool forceProcessing
= pfrom
->fWhitelisted
&& !IsInitialBlockDownload();
4954 ProcessNewBlock(state
, chainparams
, pfrom
, &block
, forceProcessing
, NULL
);
4956 if (state
.IsInvalid(nDoS
)) {
4957 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
4958 pfrom
->PushMessage("reject", strCommand
, state
.GetRejectCode(),
4959 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), inv
.hash
);
4962 Misbehaving(pfrom
->GetId(), nDoS
);
4969 // This asymmetric behavior for inbound and outbound connections was introduced
4970 // to prevent a fingerprinting attack: an attacker can send specific fake addresses
4971 // to users' AddrMan and later request them by sending getaddr messages.
4972 // Making nodes which are behind NAT and can only make outgoing connections ignore
4973 // the getaddr message mitigates the attack.
4974 else if ((strCommand
== "getaddr") && (pfrom
->fInbound
))
4976 pfrom
->vAddrToSend
.clear();
4977 vector
<CAddress
> vAddr
= addrman
.GetAddr();
4978 BOOST_FOREACH(const CAddress
&addr
, vAddr
)
4979 pfrom
->PushAddress(addr
);
4983 else if (strCommand
== "mempool")
4985 LOCK2(cs_main
, pfrom
->cs_filter
);
4987 std::vector
<uint256
> vtxid
;
4988 mempool
.queryHashes(vtxid
);
4990 BOOST_FOREACH(uint256
& hash
, vtxid
) {
4991 CInv
inv(MSG_TX
, hash
);
4993 bool fInMemPool
= mempool
.lookup(hash
, tx
);
4994 if (!fInMemPool
) continue; // another thread removed since queryHashes, maybe...
4995 if ((pfrom
->pfilter
&& pfrom
->pfilter
->IsRelevantAndUpdate(tx
)) ||
4997 vInv
.push_back(inv
);
4998 if (vInv
.size() == MAX_INV_SZ
) {
4999 pfrom
->PushMessage("inv", vInv
);
5003 if (vInv
.size() > 0)
5004 pfrom
->PushMessage("inv", vInv
);
5008 else if (strCommand
== "ping")
5010 if (pfrom
->nVersion
> BIP0031_VERSION
)
5014 // Echo the message back with the nonce. This allows for two useful features:
5016 // 1) A remote node can quickly check if the connection is operational
5017 // 2) Remote nodes can measure the latency of the network thread. If this node
5018 // is overloaded it won't respond to pings quickly and the remote node can
5019 // avoid sending us more work, like chain download requests.
5021 // The nonce stops the remote getting confused between different pings: without
5022 // it, if the remote node sends a ping once per second and this node takes 5
5023 // seconds to respond to each, the 5th ping the remote sends would appear to
5024 // return very quickly.
5025 pfrom
->PushMessage("pong", nonce
);
5030 else if (strCommand
== "pong")
5032 int64_t pingUsecEnd
= nTimeReceived
;
5034 size_t nAvail
= vRecv
.in_avail();
5035 bool bPingFinished
= false;
5036 std::string sProblem
;
5038 if (nAvail
>= sizeof(nonce
)) {
5041 // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
5042 if (pfrom
->nPingNonceSent
!= 0) {
5043 if (nonce
== pfrom
->nPingNonceSent
) {
5044 // Matching pong received, this ping is no longer outstanding
5045 bPingFinished
= true;
5046 int64_t pingUsecTime
= pingUsecEnd
- pfrom
->nPingUsecStart
;
5047 if (pingUsecTime
> 0) {
5048 // Successful ping time measurement, replace previous
5049 pfrom
->nPingUsecTime
= pingUsecTime
;
5050 pfrom
->nMinPingUsecTime
= std::min(pfrom
->nMinPingUsecTime
, pingUsecTime
);
5052 // This should never happen
5053 sProblem
= "Timing mishap";
5056 // Nonce mismatches are normal when pings are overlapping
5057 sProblem
= "Nonce mismatch";
5059 // This is most likely a bug in another implementation somewhere; cancel this ping
5060 bPingFinished
= true;
5061 sProblem
= "Nonce zero";
5065 sProblem
= "Unsolicited pong without ping";
5068 // This is most likely a bug in another implementation somewhere; cancel this ping
5069 bPingFinished
= true;
5070 sProblem
= "Short payload";
5073 if (!(sProblem
.empty())) {
5074 LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
5077 pfrom
->nPingNonceSent
,
5081 if (bPingFinished
) {
5082 pfrom
->nPingNonceSent
= 0;
5087 else if (fAlerts
&& strCommand
== "alert")
5092 uint256 alertHash
= alert
.GetHash();
5093 if (pfrom
->setKnown
.count(alertHash
) == 0)
5095 if (alert
.ProcessAlert(chainparams
.AlertKey()))
5098 pfrom
->setKnown
.insert(alertHash
);
5101 BOOST_FOREACH(CNode
* pnode
, vNodes
)
5102 alert
.RelayTo(pnode
);
5106 // Small DoS penalty so peers that send us lots of
5107 // duplicate/expired/invalid-signature/whatever alerts
5108 // eventually get banned.
5109 // This isn't a Misbehaving(100) (immediate ban) because the
5110 // peer might be an older or different implementation with
5111 // a different signature key, etc.
5112 Misbehaving(pfrom
->GetId(), 10);
5118 else if (strCommand
== "filterload")
5120 CBloomFilter filter
;
5123 if (!filter
.IsWithinSizeConstraints())
5124 // There is no excuse for sending a too-large filter
5125 Misbehaving(pfrom
->GetId(), 100);
5128 LOCK(pfrom
->cs_filter
);
5129 delete pfrom
->pfilter
;
5130 pfrom
->pfilter
= new CBloomFilter(filter
);
5131 pfrom
->pfilter
->UpdateEmptyFull();
5133 pfrom
->fRelayTxes
= true;
5137 else if (strCommand
== "filteradd")
5139 vector
<unsigned char> vData
;
5142 // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
5143 // and thus, the maximum size any matched object can have) in a filteradd message
5144 if (vData
.size() > MAX_SCRIPT_ELEMENT_SIZE
)
5146 Misbehaving(pfrom
->GetId(), 100);
5148 LOCK(pfrom
->cs_filter
);
5150 pfrom
->pfilter
->insert(vData
);
5152 Misbehaving(pfrom
->GetId(), 100);
5157 else if (strCommand
== "filterclear")
5159 LOCK(pfrom
->cs_filter
);
5160 delete pfrom
->pfilter
;
5161 pfrom
->pfilter
= new CBloomFilter();
5162 pfrom
->fRelayTxes
= true;
5166 else if (strCommand
== "reject")
5170 string strMsg
; unsigned char ccode
; string strReason
;
5171 vRecv
>> LIMITED_STRING(strMsg
, CMessageHeader::COMMAND_SIZE
) >> ccode
>> LIMITED_STRING(strReason
, MAX_REJECT_MESSAGE_LENGTH
);
5174 ss
<< strMsg
<< " code " << itostr(ccode
) << ": " << strReason
;
5176 if (strMsg
== "block" || strMsg
== "tx")
5180 ss
<< ": hash " << hash
.ToString();
5182 LogPrint("net", "Reject %s\n", SanitizeString(ss
.str()));
5183 } catch (const std::ios_base::failure
&) {
5184 // Avoid feedback loops by preventing reject messages from triggering a new reject message.
5185 LogPrint("net", "Unparseable reject message received\n");
5192 // Ignore unknown commands for extensibility
5193 LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand
), pfrom
->id
);
5201 // requires LOCK(cs_vRecvMsg)
5202 bool ProcessMessages(CNode
* pfrom
)
5204 const CChainParams
& chainparams
= Params();
5206 // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
5210 // (4) message start
5218 if (!pfrom
->vRecvGetData
.empty())
5219 ProcessGetData(pfrom
, chainparams
.GetConsensus());
5221 // this maintains the order of responses
5222 if (!pfrom
->vRecvGetData
.empty()) return fOk
;
5224 std::deque
<CNetMessage
>::iterator it
= pfrom
->vRecvMsg
.begin();
5225 while (!pfrom
->fDisconnect
&& it
!= pfrom
->vRecvMsg
.end()) {
5226 // Don't bother if send buffer is too full to respond anyway
5227 if (pfrom
->nSendSize
>= SendBufferSize())
5231 CNetMessage
& msg
= *it
;
5234 // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
5235 // msg.hdr.nMessageSize, msg.vRecv.size(),
5236 // msg.complete() ? "Y" : "N");
5238 // end, if an incomplete message is found
5239 if (!msg
.complete())
5242 // at this point, any failure means we can delete the current message
5245 // Scan for message start
5246 if (memcmp(msg
.hdr
.pchMessageStart
, chainparams
.MessageStart(), MESSAGE_START_SIZE
) != 0) {
5247 LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg
.hdr
.GetCommand()), pfrom
->id
);
5253 CMessageHeader
& hdr
= msg
.hdr
;
5254 if (!hdr
.IsValid(chainparams
.MessageStart()))
5256 LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr
.GetCommand()), pfrom
->id
);
5259 string strCommand
= hdr
.GetCommand();
5262 unsigned int nMessageSize
= hdr
.nMessageSize
;
5265 CDataStream
& vRecv
= msg
.vRecv
;
5266 uint256 hash
= Hash(vRecv
.begin(), vRecv
.begin() + nMessageSize
);
5267 unsigned int nChecksum
= ReadLE32((unsigned char*)&hash
);
5268 if (nChecksum
!= hdr
.nChecksum
)
5270 LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n", __func__
,
5271 SanitizeString(strCommand
), nMessageSize
, nChecksum
, hdr
.nChecksum
);
5279 fRet
= ProcessMessage(pfrom
, strCommand
, vRecv
, msg
.nTime
);
5280 boost::this_thread::interruption_point();
5282 catch (const std::ios_base::failure
& e
)
5284 pfrom
->PushMessage("reject", strCommand
, REJECT_MALFORMED
, string("error parsing message"));
5285 if (strstr(e
.what(), "end of data"))
5287 // Allow exceptions from under-length message on vRecv
5288 LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
5290 else if (strstr(e
.what(), "size too large"))
5292 // Allow exceptions from over-long size
5293 LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
5297 PrintExceptionContinue(&e
, "ProcessMessages()");
5300 catch (const boost::thread_interrupted
&) {
5303 catch (const std::exception
& e
) {
5304 PrintExceptionContinue(&e
, "ProcessMessages()");
5306 PrintExceptionContinue(NULL
, "ProcessMessages()");
5310 LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__
, SanitizeString(strCommand
), nMessageSize
, pfrom
->id
);
5315 // In case the connection got shut down, its receive buffer was wiped
5316 if (!pfrom
->fDisconnect
)
5317 pfrom
->vRecvMsg
.erase(pfrom
->vRecvMsg
.begin(), it
);
5323 bool SendMessages(CNode
* pto
, bool fSendTrickle
)
5325 const Consensus::Params
& consensusParams
= Params().GetConsensus();
5327 // Don't send anything until we get its version message
5328 if (pto
->nVersion
== 0)
5334 bool pingSend
= false;
5335 if (pto
->fPingQueued
) {
5336 // RPC ping request by user
5339 if (pto
->nPingNonceSent
== 0 && pto
->nPingUsecStart
+ PING_INTERVAL
* 1000000 < GetTimeMicros()) {
5340 // Ping automatically sent as a latency probe & keepalive.
5345 while (nonce
== 0) {
5346 GetRandBytes((unsigned char*)&nonce
, sizeof(nonce
));
5348 pto
->fPingQueued
= false;
5349 pto
->nPingUsecStart
= GetTimeMicros();
5350 if (pto
->nVersion
> BIP0031_VERSION
) {
5351 pto
->nPingNonceSent
= nonce
;
5352 pto
->PushMessage("ping", nonce
);
5354 // Peer is too old to support ping command with nonce, pong will never arrive.
5355 pto
->nPingNonceSent
= 0;
5356 pto
->PushMessage("ping");
5360 TRY_LOCK(cs_main
, lockMain
); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
5364 // Address refresh broadcast
5365 static int64_t nLastRebroadcast
;
5366 if (!IsInitialBlockDownload() && (GetTime() - nLastRebroadcast
> 24 * 60 * 60))
5369 BOOST_FOREACH(CNode
* pnode
, vNodes
)
5371 // Periodically clear addrKnown to allow refresh broadcasts
5372 if (nLastRebroadcast
)
5373 pnode
->addrKnown
.reset();
5375 // Rebroadcast our address
5376 AdvertizeLocal(pnode
);
5378 if (!vNodes
.empty())
5379 nLastRebroadcast
= GetTime();
5387 vector
<CAddress
> vAddr
;
5388 vAddr
.reserve(pto
->vAddrToSend
.size());
5389 BOOST_FOREACH(const CAddress
& addr
, pto
->vAddrToSend
)
5391 if (!pto
->addrKnown
.contains(addr
.GetKey()))
5393 pto
->addrKnown
.insert(addr
.GetKey());
5394 vAddr
.push_back(addr
);
5395 // receiver rejects addr messages larger than 1000
5396 if (vAddr
.size() >= 1000)
5398 pto
->PushMessage("addr", vAddr
);
5403 pto
->vAddrToSend
.clear();
5405 pto
->PushMessage("addr", vAddr
);
5408 CNodeState
&state
= *State(pto
->GetId());
5409 if (state
.fShouldBan
) {
5410 if (pto
->fWhitelisted
)
5411 LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto
->addr
.ToString());
5413 pto
->fDisconnect
= true;
5414 if (pto
->addr
.IsLocal())
5415 LogPrintf("Warning: not banning local peer %s!\n", pto
->addr
.ToString());
5418 CNode::Ban(pto
->addr
, BanReasonNodeMisbehaving
);
5421 state
.fShouldBan
= false;
5424 BOOST_FOREACH(const CBlockReject
& reject
, state
.rejects
)
5425 pto
->PushMessage("reject", (string
)"block", reject
.chRejectCode
, reject
.strRejectReason
, reject
.hashBlock
);
5426 state
.rejects
.clear();
5429 if (pindexBestHeader
== NULL
)
5430 pindexBestHeader
= chainActive
.Tip();
5431 bool fFetch
= state
.fPreferredDownload
|| (nPreferredDownload
== 0 && !pto
->fClient
&& !pto
->fOneShot
); // Download if this is a nice peer, or we have no nice peers and this one might do.
5432 if (!state
.fSyncStarted
&& !pto
->fClient
&& !fImporting
&& !fReindex
) {
5433 // Only actively request headers from a single peer, unless we're close to today.
5434 if ((nSyncStarted
== 0 && fFetch
) || pindexBestHeader
->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
5435 state
.fSyncStarted
= true;
5437 const CBlockIndex
*pindexStart
= pindexBestHeader
;
5438 /* If possible, start at the block preceding the currently
5439 best known header. This ensures that we always get a
5440 non-empty list of headers back as long as the peer
5441 is up-to-date. With a non-empty response, we can initialise
5442 the peer's known best block. This wouldn't be possible
5443 if we requested starting at pindexBestHeader and
5444 got back an empty response. */
5445 if (pindexStart
->pprev
)
5446 pindexStart
= pindexStart
->pprev
;
5447 LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart
->nHeight
, pto
->id
, pto
->nStartingHeight
);
5448 pto
->PushMessage("getheaders", chainActive
.GetLocator(pindexStart
), uint256());
5452 // Resend wallet transactions that haven't gotten in a block yet
5453 // Except during reindex, importing and IBD, when old wallet
5454 // transactions become unconfirmed and spams other nodes.
5455 if (!fReindex
&& !fImporting
&& !IsInitialBlockDownload())
5457 GetMainSignals().Broadcast(nTimeBestReceived
);
5461 // Try sending block announcements via headers
5464 // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
5465 // list of block hashes we're relaying, and our peer wants
5466 // headers announcements, then find the first header
5467 // not yet known to our peer but would connect, and send.
5468 // If no header would connect, or if we have too many
5469 // blocks, or if the peer doesn't want headers, just
5470 // add all to the inv queue.
5471 LOCK(pto
->cs_inventory
);
5472 vector
<CBlock
> vHeaders
;
5473 bool fRevertToInv
= (!state
.fPreferHeaders
|| pto
->vBlockHashesToAnnounce
.size() > MAX_BLOCKS_TO_ANNOUNCE
);
5474 CBlockIndex
*pBestIndex
= NULL
; // last header queued for delivery
5475 ProcessBlockAvailability(pto
->id
); // ensure pindexBestKnownBlock is up-to-date
5477 if (!fRevertToInv
) {
5478 bool fFoundStartingHeader
= false;
5479 // Try to find first header that our peer doesn't have, and
5480 // then send all headers past that one. If we come across any
5481 // headers that aren't on chainActive, give up.
5482 BOOST_FOREACH(const uint256
&hash
, pto
->vBlockHashesToAnnounce
) {
5483 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
5484 assert(mi
!= mapBlockIndex
.end());
5485 CBlockIndex
*pindex
= mi
->second
;
5486 if (chainActive
[pindex
->nHeight
] != pindex
) {
5487 // Bail out if we reorged away from this block
5488 fRevertToInv
= true;
5491 assert(pBestIndex
== NULL
|| pindex
->pprev
== pBestIndex
);
5492 pBestIndex
= pindex
;
5493 if (fFoundStartingHeader
) {
5494 // add this to the headers message
5495 vHeaders
.push_back(pindex
->GetBlockHeader());
5496 } else if (PeerHasHeader(&state
, pindex
)) {
5497 continue; // keep looking for the first new block
5498 } else if (pindex
->pprev
== NULL
|| PeerHasHeader(&state
, pindex
->pprev
)) {
5499 // Peer doesn't have this header but they do have the prior one.
5500 // Start sending headers.
5501 fFoundStartingHeader
= true;
5502 vHeaders
.push_back(pindex
->GetBlockHeader());
5504 // Peer doesn't have this header or the prior one -- nothing will
5505 // connect, so bail out.
5506 fRevertToInv
= true;
5512 // If falling back to using an inv, just try to inv the tip.
5513 // The last entry in vBlockHashesToAnnounce was our tip at some point
5515 if (!pto
->vBlockHashesToAnnounce
.empty()) {
5516 const uint256
&hashToAnnounce
= pto
->vBlockHashesToAnnounce
.back();
5517 BlockMap::iterator mi
= mapBlockIndex
.find(hashToAnnounce
);
5518 assert(mi
!= mapBlockIndex
.end());
5519 CBlockIndex
*pindex
= mi
->second
;
5521 // Warn if we're announcing a block that is not on the main chain.
5522 // This should be very rare and could be optimized out.
5523 // Just log for now.
5524 if (chainActive
[pindex
->nHeight
] != pindex
) {
5525 LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
5526 hashToAnnounce
.ToString(), chainActive
.Tip()->GetBlockHash().ToString());
5529 // If the peer announced this block to us, don't inv it back.
5530 // (Since block announcements may not be via inv's, we can't solely rely on
5531 // setInventoryKnown to track this.)
5532 if (!PeerHasHeader(&state
, pindex
)) {
5533 pto
->PushInventory(CInv(MSG_BLOCK
, hashToAnnounce
));
5534 LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__
,
5535 pto
->id
, hashToAnnounce
.ToString());
5538 } else if (!vHeaders
.empty()) {
5539 if (vHeaders
.size() > 1) {
5540 LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__
,
5542 vHeaders
.front().GetHash().ToString(),
5543 vHeaders
.back().GetHash().ToString(), pto
->id
);
5545 LogPrint("net", "%s: sending header %s to peer=%d\n", __func__
,
5546 vHeaders
.front().GetHash().ToString(), pto
->id
);
5548 pto
->PushMessage("headers", vHeaders
);
5549 state
.pindexBestHeaderSent
= pBestIndex
;
5551 pto
->vBlockHashesToAnnounce
.clear();
5555 // Message: inventory
5558 vector
<CInv
> vInvWait
;
5560 LOCK(pto
->cs_inventory
);
5561 vInv
.reserve(pto
->vInventoryToSend
.size());
5562 vInvWait
.reserve(pto
->vInventoryToSend
.size());
5563 BOOST_FOREACH(const CInv
& inv
, pto
->vInventoryToSend
)
5565 if (pto
->setInventoryKnown
.count(inv
))
5568 // trickle out tx inv to protect privacy
5569 if (inv
.type
== MSG_TX
&& !fSendTrickle
)
5571 // 1/4 of tx invs blast to all immediately
5572 static uint256 hashSalt
;
5573 if (hashSalt
.IsNull())
5574 hashSalt
= GetRandHash();
5575 uint256 hashRand
= ArithToUint256(UintToArith256(inv
.hash
) ^ UintToArith256(hashSalt
));
5576 hashRand
= Hash(BEGIN(hashRand
), END(hashRand
));
5577 bool fTrickleWait
= ((UintToArith256(hashRand
) & 3) != 0);
5581 vInvWait
.push_back(inv
);
5586 // returns true if wasn't already contained in the set
5587 if (pto
->setInventoryKnown
.insert(inv
).second
)
5589 vInv
.push_back(inv
);
5590 if (vInv
.size() >= 1000)
5592 pto
->PushMessage("inv", vInv
);
5597 pto
->vInventoryToSend
= vInvWait
;
5600 pto
->PushMessage("inv", vInv
);
5602 // Detect whether we're stalling
5603 int64_t nNow
= GetTimeMicros();
5604 if (!pto
->fDisconnect
&& state
.nStallingSince
&& state
.nStallingSince
< nNow
- 1000000 * BLOCK_STALLING_TIMEOUT
) {
5605 // Stalling only triggers when the block download window cannot move. During normal steady state,
5606 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
5607 // should only happen during initial block download.
5608 LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto
->id
);
5609 pto
->fDisconnect
= true;
5611 // In case there is a block that has been in flight from this peer for (2 + 0.5 * N) times the block interval
5612 // (with N the number of validated blocks that were in flight at the time it was requested), disconnect due to
5613 // timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link
5614 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
5615 // to unreasonably increase our timeout.
5616 // We also compare the block download timeout originally calculated against the time at which we'd disconnect
5617 // if we assumed the block were being requested now (ignoring blocks we've requested from this peer, since we're
5618 // only looking at this peer's oldest request). This way a large queue in the past doesn't result in a
5619 // permanently large window for this block to be delivered (ie if the number of blocks in flight is decreasing
5620 // more quickly than once every 5 minutes, then we'll shorten the download window for this block).
5621 if (!pto
->fDisconnect
&& state
.vBlocksInFlight
.size() > 0) {
5622 QueuedBlock
&queuedBlock
= state
.vBlocksInFlight
.front();
5623 int64_t nTimeoutIfRequestedNow
= GetBlockTimeout(nNow
, nQueuedValidatedHeaders
- state
.nBlocksInFlightValidHeaders
, consensusParams
);
5624 if (queuedBlock
.nTimeDisconnect
> nTimeoutIfRequestedNow
) {
5625 LogPrint("net", "Reducing block download timeout for peer=%d block=%s, orig=%d new=%d\n", pto
->id
, queuedBlock
.hash
.ToString(), queuedBlock
.nTimeDisconnect
, nTimeoutIfRequestedNow
);
5626 queuedBlock
.nTimeDisconnect
= nTimeoutIfRequestedNow
;
5628 if (queuedBlock
.nTimeDisconnect
< nNow
) {
5629 LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock
.hash
.ToString(), pto
->id
);
5630 pto
->fDisconnect
= true;
5635 // Message: getdata (blocks)
5637 vector
<CInv
> vGetData
;
5638 if (!pto
->fDisconnect
&& !pto
->fClient
&& (fFetch
|| !IsInitialBlockDownload()) && state
.nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
5639 vector
<CBlockIndex
*> vToDownload
;
5640 NodeId staller
= -1;
5641 FindNextBlocksToDownload(pto
->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER
- state
.nBlocksInFlight
, vToDownload
, staller
);
5642 BOOST_FOREACH(CBlockIndex
*pindex
, vToDownload
) {
5643 vGetData
.push_back(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
5644 MarkBlockAsInFlight(pto
->GetId(), pindex
->GetBlockHash(), consensusParams
, pindex
);
5645 LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex
->GetBlockHash().ToString(),
5646 pindex
->nHeight
, pto
->id
);
5648 if (state
.nBlocksInFlight
== 0 && staller
!= -1) {
5649 if (State(staller
)->nStallingSince
== 0) {
5650 State(staller
)->nStallingSince
= nNow
;
5651 LogPrint("net", "Stall started peer=%d\n", staller
);
5657 // Message: getdata (non-blocks)
5659 while (!pto
->fDisconnect
&& !pto
->mapAskFor
.empty() && (*pto
->mapAskFor
.begin()).first
<= nNow
)
5661 const CInv
& inv
= (*pto
->mapAskFor
.begin()).second
;
5662 if (!AlreadyHave(inv
))
5665 LogPrint("net", "Requesting %s peer=%d\n", inv
.ToString(), pto
->id
);
5666 vGetData
.push_back(inv
);
5667 if (vGetData
.size() >= 1000)
5669 pto
->PushMessage("getdata", vGetData
);
5673 //If we're not going to ask, don't expect a response.
5674 pto
->setAskFor
.erase(inv
.hash
);
5676 pto
->mapAskFor
.erase(pto
->mapAskFor
.begin());
5678 if (!vGetData
.empty())
5679 pto
->PushMessage("getdata", vGetData
);
5685 std::string
CBlockFileInfo::ToString() const {
5686 return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks
, nSize
, nHeightFirst
, nHeightLast
, DateTimeStrFormat("%Y-%m-%d", nTimeFirst
), DateTimeStrFormat("%Y-%m-%d", nTimeLast
));
5697 BlockMap::iterator it1
= mapBlockIndex
.begin();
5698 for (; it1
!= mapBlockIndex
.end(); it1
++)
5699 delete (*it1
).second
;
5700 mapBlockIndex
.clear();
5702 // orphan transactions
5703 mapOrphanTransactions
.clear();
5704 mapOrphanTransactionsByPrev
.clear();
5706 } instance_of_cmaincleanup
;