1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2015 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
10 #include "arith_uint256.h"
11 #include "chainparams.h"
12 #include "checkpoints.h"
13 #include "checkqueue.h"
14 #include "consensus/consensus.h"
15 #include "consensus/merkle.h"
16 #include "consensus/validation.h"
19 #include "merkleblock.h"
21 #include "policy/policy.h"
23 #include "primitives/block.h"
24 #include "primitives/transaction.h"
25 #include "script/script.h"
26 #include "script/sigcache.h"
27 #include "script/standard.h"
28 #include "tinyformat.h"
30 #include "txmempool.h"
31 #include "ui_interface.h"
34 #include "utilmoneystr.h"
35 #include "utilstrencodings.h"
36 #include "validationinterface.h"
37 #include "versionbits.h"
41 #include <boost/algorithm/string/replace.hpp>
42 #include <boost/filesystem.hpp>
43 #include <boost/filesystem/fstream.hpp>
44 #include <boost/math/distributions/poisson.hpp>
45 #include <boost/thread.hpp>
50 # error "Bitcoin cannot be compiled without assertions."
57 CCriticalSection cs_main
;
59 BlockMap mapBlockIndex
;
61 CBlockIndex
*pindexBestHeader
= NULL
;
62 int64_t nTimeBestReceived
= 0;
63 CWaitableCriticalSection csBestBlock
;
64 CConditionVariable cvBlockChange
;
65 int nScriptCheckThreads
= 0;
66 bool fImporting
= false;
67 bool fReindex
= false;
68 bool fTxIndex
= false;
69 bool fHavePruned
= false;
70 bool fPruneMode
= false;
71 bool fIsBareMultisigStd
= DEFAULT_PERMIT_BAREMULTISIG
;
72 bool fRequireStandard
= true;
73 unsigned int nBytesPerSigOp
= DEFAULT_BYTES_PER_SIGOP
;
74 bool fCheckBlockIndex
= false;
75 bool fCheckpointsEnabled
= DEFAULT_CHECKPOINTS_ENABLED
;
76 size_t nCoinCacheUsage
= 5000 * 300;
77 uint64_t nPruneTarget
= 0;
78 bool fAlerts
= DEFAULT_ALERTS
;
79 int64_t nMaxTipAge
= DEFAULT_MAX_TIP_AGE
;
80 bool fEnableReplacement
= DEFAULT_ENABLE_REPLACEMENT
;
82 CFeeRate minRelayTxFee
= CFeeRate(DEFAULT_MIN_RELAY_TX_FEE
);
83 CAmount maxTxFee
= DEFAULT_TRANSACTION_MAXFEE
;
85 CTxMemPool
mempool(::minRelayTxFee
);
91 map
<uint256
, COrphanTx
> mapOrphanTransactions
GUARDED_BY(cs_main
);
92 map
<uint256
, set
<uint256
> > mapOrphanTransactionsByPrev
GUARDED_BY(cs_main
);
93 void EraseOrphansFor(NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
);
96 * Returns true if there are nRequired or more blocks of minVersion or above
97 * in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards.
99 static bool IsSuperMajority(int minVersion
, const CBlockIndex
* pstart
, unsigned nRequired
, const Consensus::Params
& consensusParams
);
100 static void CheckBlockIndex(const Consensus::Params
& consensusParams
);
102 /** Constant stuff for coinbase transactions we create: */
103 CScript COINBASE_FLAGS
;
105 const string strMessageMagic
= "Bitcoin Signed Message:\n";
110 struct CBlockIndexWorkComparator
112 bool operator()(CBlockIndex
*pa
, CBlockIndex
*pb
) const {
113 // First sort by most total work, ...
114 if (pa
->nChainWork
> pb
->nChainWork
) return false;
115 if (pa
->nChainWork
< pb
->nChainWork
) return true;
117 // ... then by earliest time received, ...
118 if (pa
->nSequenceId
< pb
->nSequenceId
) return false;
119 if (pa
->nSequenceId
> pb
->nSequenceId
) return true;
121 // Use pointer address as tie breaker (should only happen with blocks
122 // loaded from disk, as those all have id 0).
123 if (pa
< pb
) return false;
124 if (pa
> pb
) return true;
131 CBlockIndex
*pindexBestInvalid
;
134 * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
135 * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
136 * missing the data for the block.
138 set
<CBlockIndex
*, CBlockIndexWorkComparator
> setBlockIndexCandidates
;
139 /** Number of nodes with fSyncStarted. */
140 int nSyncStarted
= 0;
141 /** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
142 * Pruned nodes may have entries where B is missing data.
144 multimap
<CBlockIndex
*, CBlockIndex
*> mapBlocksUnlinked
;
146 CCriticalSection cs_LastBlockFile
;
147 std::vector
<CBlockFileInfo
> vinfoBlockFile
;
148 int nLastBlockFile
= 0;
149 /** Global flag to indicate we should check to see if there are
150 * block/undo files that should be deleted. Set on startup
151 * or if we allocate more file space when we're in prune mode
153 bool fCheckForPruning
= false;
156 * Every received block is assigned a unique and increasing identifier, so we
157 * know which one to give priority in case of a fork.
159 CCriticalSection cs_nBlockSequenceId
;
160 /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
161 uint32_t nBlockSequenceId
= 1;
164 * Sources of received blocks, saved to be able to send them reject
165 * messages or ban them when processing happens afterwards. Protected by
168 map
<uint256
, NodeId
> mapBlockSource
;
171 * Filter for transactions that were recently rejected by
172 * AcceptToMemoryPool. These are not rerequested until the chain tip
173 * changes, at which point the entire filter is reset. Protected by
176 * Without this filter we'd be re-requesting txs from each of our peers,
177 * increasing bandwidth consumption considerably. For instance, with 100
178 * peers, half of which relay a tx we don't accept, that might be a 50x
179 * bandwidth increase. A flooding attacker attempting to roll-over the
180 * filter using minimum-sized, 60byte, transactions might manage to send
181 * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
182 * two minute window to send invs to us.
184 * Decreasing the false positive rate is fairly cheap, so we pick one in a
185 * million to make it highly unlikely for users to have issues with this
188 * Memory used: 1.3 MB
190 boost::scoped_ptr
<CRollingBloomFilter
> recentRejects
;
191 uint256 hashRecentRejectsChainTip
;
193 /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
196 CBlockIndex
*pindex
; //! Optional.
197 int64_t nTime
; //! Time of "getdata" request in microseconds.
198 bool fValidatedHeaders
; //! Whether this block has validated headers at the time of request.
199 int64_t nTimeDisconnect
; //! The timeout for this block request (for disconnecting a slow peer)
201 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> > mapBlocksInFlight
;
203 /** Number of blocks in flight with validated headers. */
204 int nQueuedValidatedHeaders
= 0;
206 /** Number of preferable block download peers. */
207 int nPreferredDownload
= 0;
209 /** Dirty block index entries. */
210 set
<CBlockIndex
*> setDirtyBlockIndex
;
212 /** Dirty block file entries. */
213 set
<int> setDirtyFileInfo
;
216 //////////////////////////////////////////////////////////////////////////////
218 // Registration of network node signals.
223 struct CBlockReject
{
224 unsigned char chRejectCode
;
225 string strRejectReason
;
230 * Maintain validation-specific state about nodes, protected by cs_main, instead
231 * by CNode's own locks. This simplifies asynchronous operation, where
232 * processing of incoming data is done after the ProcessMessage call returns,
233 * and we're no longer holding the node's locks.
236 //! The peer's address
238 //! Whether we have a fully established connection.
239 bool fCurrentlyConnected
;
240 //! Accumulated misbehaviour score for this peer.
242 //! Whether this peer should be disconnected and banned (unless whitelisted).
244 //! String name of this peer (debugging/logging purposes).
246 //! List of asynchronously-determined block rejections to notify this peer about.
247 std::vector
<CBlockReject
> rejects
;
248 //! The best known block we know this peer has announced.
249 CBlockIndex
*pindexBestKnownBlock
;
250 //! The hash of the last unknown block this peer has announced.
251 uint256 hashLastUnknownBlock
;
252 //! The last full block we both have.
253 CBlockIndex
*pindexLastCommonBlock
;
254 //! The best header we have sent our peer.
255 CBlockIndex
*pindexBestHeaderSent
;
256 //! Whether we've started headers synchronization with this peer.
258 //! Since when we're stalling block download progress (in microseconds), or 0.
259 int64_t nStallingSince
;
260 list
<QueuedBlock
> vBlocksInFlight
;
262 int nBlocksInFlightValidHeaders
;
263 //! Whether we consider this a preferred download peer.
264 bool fPreferredDownload
;
265 //! Whether this peer wants invs or headers (when possible) for block announcements.
269 fCurrentlyConnected
= false;
272 pindexBestKnownBlock
= NULL
;
273 hashLastUnknownBlock
.SetNull();
274 pindexLastCommonBlock
= NULL
;
275 pindexBestHeaderSent
= NULL
;
276 fSyncStarted
= false;
279 nBlocksInFlightValidHeaders
= 0;
280 fPreferredDownload
= false;
281 fPreferHeaders
= false;
285 /** Map maintaining per-node state. Requires cs_main. */
286 map
<NodeId
, CNodeState
> mapNodeState
;
289 CNodeState
*State(NodeId pnode
) {
290 map
<NodeId
, CNodeState
>::iterator it
= mapNodeState
.find(pnode
);
291 if (it
== mapNodeState
.end())
299 return chainActive
.Height();
302 void UpdatePreferredDownload(CNode
* node
, CNodeState
* state
)
304 nPreferredDownload
-= state
->fPreferredDownload
;
306 // Whether this node should be marked as a preferred download node.
307 state
->fPreferredDownload
= (!node
->fInbound
|| node
->fWhitelisted
) && !node
->fOneShot
&& !node
->fClient
;
309 nPreferredDownload
+= state
->fPreferredDownload
;
312 // Returns time at which to timeout block request (nTime in microseconds)
313 int64_t GetBlockTimeout(int64_t nTime
, int nValidatedQueuedBefore
, const Consensus::Params
&consensusParams
)
315 return nTime
+ 500000 * consensusParams
.nPowTargetSpacing
* (4 + nValidatedQueuedBefore
);
318 void InitializeNode(NodeId nodeid
, const CNode
*pnode
) {
320 CNodeState
&state
= mapNodeState
.insert(std::make_pair(nodeid
, CNodeState())).first
->second
;
321 state
.name
= pnode
->addrName
;
322 state
.address
= pnode
->addr
;
325 void FinalizeNode(NodeId nodeid
) {
327 CNodeState
*state
= State(nodeid
);
329 if (state
->fSyncStarted
)
332 if (state
->nMisbehavior
== 0 && state
->fCurrentlyConnected
) {
333 AddressCurrentlyConnected(state
->address
);
336 BOOST_FOREACH(const QueuedBlock
& entry
, state
->vBlocksInFlight
) {
337 nQueuedValidatedHeaders
-= entry
.fValidatedHeaders
;
338 mapBlocksInFlight
.erase(entry
.hash
);
340 EraseOrphansFor(nodeid
);
341 nPreferredDownload
-= state
->fPreferredDownload
;
343 mapNodeState
.erase(nodeid
);
347 // Returns a bool indicating whether we requested this block.
348 bool MarkBlockAsReceived(const uint256
& hash
) {
349 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator itInFlight
= mapBlocksInFlight
.find(hash
);
350 if (itInFlight
!= mapBlocksInFlight
.end()) {
351 CNodeState
*state
= State(itInFlight
->second
.first
);
352 nQueuedValidatedHeaders
-= itInFlight
->second
.second
->fValidatedHeaders
;
353 state
->nBlocksInFlightValidHeaders
-= itInFlight
->second
.second
->fValidatedHeaders
;
354 state
->vBlocksInFlight
.erase(itInFlight
->second
.second
);
355 state
->nBlocksInFlight
--;
356 state
->nStallingSince
= 0;
357 mapBlocksInFlight
.erase(itInFlight
);
364 void MarkBlockAsInFlight(NodeId nodeid
, const uint256
& hash
, const Consensus::Params
& consensusParams
, CBlockIndex
*pindex
= NULL
) {
365 CNodeState
*state
= State(nodeid
);
366 assert(state
!= NULL
);
368 // Make sure it's not listed somewhere already.
369 MarkBlockAsReceived(hash
);
371 int64_t nNow
= GetTimeMicros();
372 QueuedBlock newentry
= {hash
, pindex
, nNow
, pindex
!= NULL
, GetBlockTimeout(nNow
, nQueuedValidatedHeaders
, consensusParams
)};
373 nQueuedValidatedHeaders
+= newentry
.fValidatedHeaders
;
374 list
<QueuedBlock
>::iterator it
= state
->vBlocksInFlight
.insert(state
->vBlocksInFlight
.end(), newentry
);
375 state
->nBlocksInFlight
++;
376 state
->nBlocksInFlightValidHeaders
+= newentry
.fValidatedHeaders
;
377 mapBlocksInFlight
[hash
] = std::make_pair(nodeid
, it
);
380 /** Check whether the last unknown block a peer advertised is not yet known. */
381 void ProcessBlockAvailability(NodeId nodeid
) {
382 CNodeState
*state
= State(nodeid
);
383 assert(state
!= NULL
);
385 if (!state
->hashLastUnknownBlock
.IsNull()) {
386 BlockMap::iterator itOld
= mapBlockIndex
.find(state
->hashLastUnknownBlock
);
387 if (itOld
!= mapBlockIndex
.end() && itOld
->second
->nChainWork
> 0) {
388 if (state
->pindexBestKnownBlock
== NULL
|| itOld
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
389 state
->pindexBestKnownBlock
= itOld
->second
;
390 state
->hashLastUnknownBlock
.SetNull();
395 /** Update tracking information about which blocks a peer is assumed to have. */
396 void UpdateBlockAvailability(NodeId nodeid
, const uint256
&hash
) {
397 CNodeState
*state
= State(nodeid
);
398 assert(state
!= NULL
);
400 ProcessBlockAvailability(nodeid
);
402 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
403 if (it
!= mapBlockIndex
.end() && it
->second
->nChainWork
> 0) {
404 // An actually better block was announced.
405 if (state
->pindexBestKnownBlock
== NULL
|| it
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
406 state
->pindexBestKnownBlock
= it
->second
;
408 // An unknown block was announced; just assume that the latest one is the best one.
409 state
->hashLastUnknownBlock
= hash
;
414 bool CanDirectFetch(const Consensus::Params
&consensusParams
)
416 return chainActive
.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams
.nPowTargetSpacing
* 20;
420 bool PeerHasHeader(CNodeState
*state
, CBlockIndex
*pindex
)
422 if (state
->pindexBestKnownBlock
&& pindex
== state
->pindexBestKnownBlock
->GetAncestor(pindex
->nHeight
))
424 if (state
->pindexBestHeaderSent
&& pindex
== state
->pindexBestHeaderSent
->GetAncestor(pindex
->nHeight
))
429 /** Find the last common ancestor two blocks have.
430 * Both pa and pb must be non-NULL. */
431 CBlockIndex
* LastCommonAncestor(CBlockIndex
* pa
, CBlockIndex
* pb
) {
432 if (pa
->nHeight
> pb
->nHeight
) {
433 pa
= pa
->GetAncestor(pb
->nHeight
);
434 } else if (pb
->nHeight
> pa
->nHeight
) {
435 pb
= pb
->GetAncestor(pa
->nHeight
);
438 while (pa
!= pb
&& pa
&& pb
) {
443 // Eventually all chain branches meet at the genesis block.
448 /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
449 * at most count entries. */
450 void FindNextBlocksToDownload(NodeId nodeid
, unsigned int count
, std::vector
<CBlockIndex
*>& vBlocks
, NodeId
& nodeStaller
) {
454 vBlocks
.reserve(vBlocks
.size() + count
);
455 CNodeState
*state
= State(nodeid
);
456 assert(state
!= NULL
);
458 // Make sure pindexBestKnownBlock is up to date, we'll need it.
459 ProcessBlockAvailability(nodeid
);
461 if (state
->pindexBestKnownBlock
== NULL
|| state
->pindexBestKnownBlock
->nChainWork
< chainActive
.Tip()->nChainWork
) {
462 // This peer has nothing interesting.
466 if (state
->pindexLastCommonBlock
== NULL
) {
467 // Bootstrap quickly by guessing a parent of our best tip is the forking point.
468 // Guessing wrong in either direction is not a problem.
469 state
->pindexLastCommonBlock
= chainActive
[std::min(state
->pindexBestKnownBlock
->nHeight
, chainActive
.Height())];
472 // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
473 // of its current tip anymore. Go back enough to fix that.
474 state
->pindexLastCommonBlock
= LastCommonAncestor(state
->pindexLastCommonBlock
, state
->pindexBestKnownBlock
);
475 if (state
->pindexLastCommonBlock
== state
->pindexBestKnownBlock
)
478 std::vector
<CBlockIndex
*> vToFetch
;
479 CBlockIndex
*pindexWalk
= state
->pindexLastCommonBlock
;
480 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
481 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
482 // download that next block if the window were 1 larger.
483 int nWindowEnd
= state
->pindexLastCommonBlock
->nHeight
+ BLOCK_DOWNLOAD_WINDOW
;
484 int nMaxHeight
= std::min
<int>(state
->pindexBestKnownBlock
->nHeight
, nWindowEnd
+ 1);
485 NodeId waitingfor
= -1;
486 while (pindexWalk
->nHeight
< nMaxHeight
) {
487 // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
488 // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
489 // as iterating over ~100 CBlockIndex* entries anyway.
490 int nToFetch
= std::min(nMaxHeight
- pindexWalk
->nHeight
, std::max
<int>(count
- vBlocks
.size(), 128));
491 vToFetch
.resize(nToFetch
);
492 pindexWalk
= state
->pindexBestKnownBlock
->GetAncestor(pindexWalk
->nHeight
+ nToFetch
);
493 vToFetch
[nToFetch
- 1] = pindexWalk
;
494 for (unsigned int i
= nToFetch
- 1; i
> 0; i
--) {
495 vToFetch
[i
- 1] = vToFetch
[i
]->pprev
;
498 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
499 // are not yet downloaded and not in flight to vBlocks. In the mean time, update
500 // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
501 // already part of our chain (and therefore don't need it even if pruned).
502 BOOST_FOREACH(CBlockIndex
* pindex
, vToFetch
) {
503 if (!pindex
->IsValid(BLOCK_VALID_TREE
)) {
504 // We consider the chain that this peer is on invalid.
507 if (pindex
->nStatus
& BLOCK_HAVE_DATA
|| chainActive
.Contains(pindex
)) {
508 if (pindex
->nChainTx
)
509 state
->pindexLastCommonBlock
= pindex
;
510 } else if (mapBlocksInFlight
.count(pindex
->GetBlockHash()) == 0) {
511 // The block is not already downloaded, and not yet in flight.
512 if (pindex
->nHeight
> nWindowEnd
) {
513 // We reached the end of the window.
514 if (vBlocks
.size() == 0 && waitingfor
!= nodeid
) {
515 // We aren't able to fetch anything, but we would be if the download window was one larger.
516 nodeStaller
= waitingfor
;
520 vBlocks
.push_back(pindex
);
521 if (vBlocks
.size() == count
) {
524 } else if (waitingfor
== -1) {
525 // This is the first already-in-flight block.
526 waitingfor
= mapBlocksInFlight
[pindex
->GetBlockHash()].first
;
534 bool GetNodeStateStats(NodeId nodeid
, CNodeStateStats
&stats
) {
536 CNodeState
*state
= State(nodeid
);
539 stats
.nMisbehavior
= state
->nMisbehavior
;
540 stats
.nSyncHeight
= state
->pindexBestKnownBlock
? state
->pindexBestKnownBlock
->nHeight
: -1;
541 stats
.nCommonHeight
= state
->pindexLastCommonBlock
? state
->pindexLastCommonBlock
->nHeight
: -1;
542 BOOST_FOREACH(const QueuedBlock
& queue
, state
->vBlocksInFlight
) {
544 stats
.vHeightInFlight
.push_back(queue
.pindex
->nHeight
);
549 void RegisterNodeSignals(CNodeSignals
& nodeSignals
)
551 nodeSignals
.GetHeight
.connect(&GetHeight
);
552 nodeSignals
.ProcessMessages
.connect(&ProcessMessages
);
553 nodeSignals
.SendMessages
.connect(&SendMessages
);
554 nodeSignals
.InitializeNode
.connect(&InitializeNode
);
555 nodeSignals
.FinalizeNode
.connect(&FinalizeNode
);
558 void UnregisterNodeSignals(CNodeSignals
& nodeSignals
)
560 nodeSignals
.GetHeight
.disconnect(&GetHeight
);
561 nodeSignals
.ProcessMessages
.disconnect(&ProcessMessages
);
562 nodeSignals
.SendMessages
.disconnect(&SendMessages
);
563 nodeSignals
.InitializeNode
.disconnect(&InitializeNode
);
564 nodeSignals
.FinalizeNode
.disconnect(&FinalizeNode
);
567 CBlockIndex
* FindForkInGlobalIndex(const CChain
& chain
, const CBlockLocator
& locator
)
569 // Find the first block the caller has in the main chain
570 BOOST_FOREACH(const uint256
& hash
, locator
.vHave
) {
571 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
572 if (mi
!= mapBlockIndex
.end())
574 CBlockIndex
* pindex
= (*mi
).second
;
575 if (chain
.Contains(pindex
))
579 return chain
.Genesis();
582 CCoinsViewCache
*pcoinsTip
= NULL
;
583 CBlockTreeDB
*pblocktree
= NULL
;
585 //////////////////////////////////////////////////////////////////////////////
587 // mapOrphanTransactions
590 bool AddOrphanTx(const CTransaction
& tx
, NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
592 uint256 hash
= tx
.GetHash();
593 if (mapOrphanTransactions
.count(hash
))
596 // Ignore big transactions, to avoid a
597 // send-big-orphans memory exhaustion attack. If a peer has a legitimate
598 // large transaction with a missing parent then we assume
599 // it will rebroadcast it later, after the parent transaction(s)
600 // have been mined or received.
601 // 10,000 orphans, each of which is at most 5,000 bytes big is
602 // at most 500 megabytes of orphans:
603 unsigned int sz
= tx
.GetSerializeSize(SER_NETWORK
, CTransaction::CURRENT_VERSION
);
606 LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz
, hash
.ToString());
610 mapOrphanTransactions
[hash
].tx
= tx
;
611 mapOrphanTransactions
[hash
].fromPeer
= peer
;
612 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
613 mapOrphanTransactionsByPrev
[txin
.prevout
.hash
].insert(hash
);
615 LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash
.ToString(),
616 mapOrphanTransactions
.size(), mapOrphanTransactionsByPrev
.size());
620 void static EraseOrphanTx(uint256 hash
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
622 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.find(hash
);
623 if (it
== mapOrphanTransactions
.end())
625 BOOST_FOREACH(const CTxIn
& txin
, it
->second
.tx
.vin
)
627 map
<uint256
, set
<uint256
> >::iterator itPrev
= mapOrphanTransactionsByPrev
.find(txin
.prevout
.hash
);
628 if (itPrev
== mapOrphanTransactionsByPrev
.end())
630 itPrev
->second
.erase(hash
);
631 if (itPrev
->second
.empty())
632 mapOrphanTransactionsByPrev
.erase(itPrev
);
634 mapOrphanTransactions
.erase(it
);
637 void EraseOrphansFor(NodeId peer
)
640 map
<uint256
, COrphanTx
>::iterator iter
= mapOrphanTransactions
.begin();
641 while (iter
!= mapOrphanTransactions
.end())
643 map
<uint256
, COrphanTx
>::iterator maybeErase
= iter
++; // increment to avoid iterator becoming invalid
644 if (maybeErase
->second
.fromPeer
== peer
)
646 EraseOrphanTx(maybeErase
->second
.tx
.GetHash());
650 if (nErased
> 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased
, peer
);
654 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
656 unsigned int nEvicted
= 0;
657 while (mapOrphanTransactions
.size() > nMaxOrphans
)
659 // Evict a random orphan:
660 uint256 randomhash
= GetRandHash();
661 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.lower_bound(randomhash
);
662 if (it
== mapOrphanTransactions
.end())
663 it
= mapOrphanTransactions
.begin();
664 EraseOrphanTx(it
->first
);
670 bool IsFinalTx(const CTransaction
&tx
, int nBlockHeight
, int64_t nBlockTime
)
672 if (tx
.nLockTime
== 0)
674 if ((int64_t)tx
.nLockTime
< ((int64_t)tx
.nLockTime
< LOCKTIME_THRESHOLD
? (int64_t)nBlockHeight
: nBlockTime
))
676 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
) {
677 if (!(txin
.nSequence
== CTxIn::SEQUENCE_FINAL
))
683 bool CheckFinalTx(const CTransaction
&tx
, int flags
)
685 AssertLockHeld(cs_main
);
687 // By convention a negative value for flags indicates that the
688 // current network-enforced consensus rules should be used. In
689 // a future soft-fork scenario that would mean checking which
690 // rules would be enforced for the next block and setting the
691 // appropriate flags. At the present time no soft-forks are
692 // scheduled, so no flags are set.
693 flags
= std::max(flags
, 0);
695 // CheckFinalTx() uses chainActive.Height()+1 to evaluate
696 // nLockTime because when IsFinalTx() is called within
697 // CBlock::AcceptBlock(), the height of the block *being*
698 // evaluated is what is used. Thus if we want to know if a
699 // transaction can be part of the *next* block, we need to call
700 // IsFinalTx() with one more than chainActive.Height().
701 const int nBlockHeight
= chainActive
.Height() + 1;
703 // BIP113 will require that time-locked transactions have nLockTime set to
704 // less than the median time of the previous block they're contained in.
705 // When the next block is created its previous block will be the current
706 // chain tip, so we use that to calculate the median time passed to
707 // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
708 const int64_t nBlockTime
= (flags
& LOCKTIME_MEDIAN_TIME_PAST
)
709 ? chainActive
.Tip()->GetMedianTimePast()
712 return IsFinalTx(tx
, nBlockHeight
, nBlockTime
);
716 * Calculates the block height and previous block's median time past at
717 * which the transaction will be considered final in the context of BIP 68.
718 * Also removes from the vector of input heights any entries which did not
719 * correspond to sequence locked inputs as they do not affect the calculation.
721 static std::pair
<int, int64_t> CalculateSequenceLocks(const CTransaction
&tx
, int flags
, std::vector
<int>* prevHeights
, const CBlockIndex
& block
)
723 assert(prevHeights
->size() == tx
.vin
.size());
725 // Will be set to the equivalent height- and time-based nLockTime
726 // values that would be necessary to satisfy all relative lock-
727 // time constraints given our view of block chain history.
728 // The semantics of nLockTime are the last invalid height/time, so
729 // use -1 to have the effect of any height or time being valid.
731 int64_t nMinTime
= -1;
733 // tx.nVersion is signed integer so requires cast to unsigned otherwise
734 // we would be doing a signed comparison and half the range of nVersion
735 // wouldn't support BIP 68.
736 bool fEnforceBIP68
= static_cast<uint32_t>(tx
.nVersion
) >= 2
737 && flags
& LOCKTIME_VERIFY_SEQUENCE
;
739 // Do not enforce sequence numbers as a relative lock time
740 // unless we have been instructed to
741 if (!fEnforceBIP68
) {
742 return std::make_pair(nMinHeight
, nMinTime
);
745 for (size_t txinIndex
= 0; txinIndex
< tx
.vin
.size(); txinIndex
++) {
746 const CTxIn
& txin
= tx
.vin
[txinIndex
];
748 // Sequence numbers with the most significant bit set are not
749 // treated as relative lock-times, nor are they given any
750 // consensus-enforced meaning at this point.
751 if (txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG
) {
752 // The height of this input is not relevant for sequence locks
753 (*prevHeights
)[txinIndex
] = 0;
757 int nCoinHeight
= (*prevHeights
)[txinIndex
];
759 if (txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG
) {
760 int64_t nCoinTime
= block
.GetAncestor(std::max(nCoinHeight
-1, 0))->GetMedianTimePast();
761 // NOTE: Subtract 1 to maintain nLockTime semantics
762 // BIP 68 relative lock times have the semantics of calculating
763 // the first block or time at which the transaction would be
764 // valid. When calculating the effective block time or height
765 // for the entire transaction, we switch to using the
766 // semantics of nLockTime which is the last invalid block
767 // time or height. Thus we subtract 1 from the calculated
770 // Time-based relative lock-times are measured from the
771 // smallest allowed timestamp of the block containing the
772 // txout being spent, which is the median time past of the
774 nMinTime
= std::max(nMinTime
, nCoinTime
+ (int64_t)((txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_MASK
) << CTxIn::SEQUENCE_LOCKTIME_GRANULARITY
) - 1);
776 nMinHeight
= std::max(nMinHeight
, nCoinHeight
+ (int)(txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_MASK
) - 1);
780 return std::make_pair(nMinHeight
, nMinTime
);
783 static bool EvaluateSequenceLocks(const CBlockIndex
& block
, std::pair
<int, int64_t> lockPair
)
786 int64_t nBlockTime
= block
.pprev
->GetMedianTimePast();
787 if (lockPair
.first
>= block
.nHeight
|| lockPair
.second
>= nBlockTime
)
793 bool SequenceLocks(const CTransaction
&tx
, int flags
, std::vector
<int>* prevHeights
, const CBlockIndex
& block
)
795 return EvaluateSequenceLocks(block
, CalculateSequenceLocks(tx
, flags
, prevHeights
, block
));
798 bool TestLockPointValidity(const LockPoints
* lp
)
800 AssertLockHeld(cs_main
);
802 // If there are relative lock times then the maxInputBlock will be set
803 // If there are no relative lock times, the LockPoints don't depend on the chain
804 if (lp
->maxInputBlock
) {
805 // Check whether chainActive is an extension of the block at which the LockPoints
806 // calculation was valid. If not LockPoints are no longer valid
807 if (!chainActive
.Contains(lp
->maxInputBlock
)) {
812 // LockPoints still valid
816 bool CheckSequenceLocks(const CTransaction
&tx
, int flags
, LockPoints
* lp
, bool useExistingLockPoints
)
818 AssertLockHeld(cs_main
);
819 AssertLockHeld(mempool
.cs
);
821 CBlockIndex
* tip
= chainActive
.Tip();
824 // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate
825 // height based locks because when SequenceLocks() is called within
826 // ConnectBlock(), the height of the block *being*
827 // evaluated is what is used.
828 // Thus if we want to know if a transaction can be part of the
829 // *next* block, we need to use one more than chainActive.Height()
830 index
.nHeight
= tip
->nHeight
+ 1;
832 std::pair
<int, int64_t> lockPair
;
833 if (useExistingLockPoints
) {
835 lockPair
.first
= lp
->height
;
836 lockPair
.second
= lp
->time
;
839 // pcoinsTip contains the UTXO set for chainActive.Tip()
840 CCoinsViewMemPool
viewMemPool(pcoinsTip
, mempool
);
841 std::vector
<int> prevheights
;
842 prevheights
.resize(tx
.vin
.size());
843 for (size_t txinIndex
= 0; txinIndex
< tx
.vin
.size(); txinIndex
++) {
844 const CTxIn
& txin
= tx
.vin
[txinIndex
];
846 if (!viewMemPool
.GetCoins(txin
.prevout
.hash
, coins
)) {
847 return error("%s: Missing input", __func__
);
849 if (coins
.nHeight
== MEMPOOL_HEIGHT
) {
850 // Assume all mempool transaction confirm in the next block
851 prevheights
[txinIndex
] = tip
->nHeight
+ 1;
853 prevheights
[txinIndex
] = coins
.nHeight
;
856 lockPair
= CalculateSequenceLocks(tx
, flags
, &prevheights
, index
);
858 lp
->height
= lockPair
.first
;
859 lp
->time
= lockPair
.second
;
860 // Also store the hash of the block with the highest height of
861 // all the blocks which have sequence locked prevouts.
862 // This hash needs to still be on the chain
863 // for these LockPoint calculations to be valid
864 // Note: It is impossible to correctly calculate a maxInputBlock
865 // if any of the sequence locked inputs depend on unconfirmed txs,
866 // except in the special case where the relative lock time/height
867 // is 0, which is equivalent to no sequence lock. Since we assume
868 // input height of tip+1 for mempool txs and test the resulting
869 // lockPair from CalculateSequenceLocks against tip+1. We know
870 // EvaluateSequenceLocks will fail if there was a non-zero sequence
871 // lock on a mempool input, so we can use the return value of
872 // CheckSequenceLocks to indicate the LockPoints validity
873 int maxInputHeight
= 0;
874 BOOST_FOREACH(int height
, prevheights
) {
875 // Can ignore mempool inputs since we'll fail if they had non-zero locks
876 if (height
!= tip
->nHeight
+1) {
877 maxInputHeight
= std::max(maxInputHeight
, height
);
880 lp
->maxInputBlock
= tip
->GetAncestor(maxInputHeight
);
883 return EvaluateSequenceLocks(index
, lockPair
);
887 unsigned int GetLegacySigOpCount(const CTransaction
& tx
)
889 unsigned int nSigOps
= 0;
890 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
892 nSigOps
+= txin
.scriptSig
.GetSigOpCount(false);
894 BOOST_FOREACH(const CTxOut
& txout
, tx
.vout
)
896 nSigOps
+= txout
.scriptPubKey
.GetSigOpCount(false);
901 unsigned int GetP2SHSigOpCount(const CTransaction
& tx
, const CCoinsViewCache
& inputs
)
906 unsigned int nSigOps
= 0;
907 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
909 const CTxOut
&prevout
= inputs
.GetOutputFor(tx
.vin
[i
]);
910 if (prevout
.scriptPubKey
.IsPayToScriptHash())
911 nSigOps
+= prevout
.scriptPubKey
.GetSigOpCount(tx
.vin
[i
].scriptSig
);
923 bool CheckTransaction(const CTransaction
& tx
, CValidationState
&state
)
925 // Basic checks that don't depend on any context
927 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vin-empty");
929 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vout-empty");
931 if (::GetSerializeSize(tx
, SER_NETWORK
, PROTOCOL_VERSION
) > MAX_BLOCK_SIZE
)
932 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-oversize");
934 // Check for negative or overflow output values
935 CAmount nValueOut
= 0;
936 BOOST_FOREACH(const CTxOut
& txout
, tx
.vout
)
938 if (txout
.nValue
< 0)
939 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-negative");
940 if (txout
.nValue
> MAX_MONEY
)
941 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-toolarge");
942 nValueOut
+= txout
.nValue
;
943 if (!MoneyRange(nValueOut
))
944 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-txouttotal-toolarge");
947 // Check for duplicate inputs
948 set
<COutPoint
> vInOutPoints
;
949 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
951 if (vInOutPoints
.count(txin
.prevout
))
952 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputs-duplicate");
953 vInOutPoints
.insert(txin
.prevout
);
958 if (tx
.vin
[0].scriptSig
.size() < 2 || tx
.vin
[0].scriptSig
.size() > 100)
959 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-length");
963 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
)
964 if (txin
.prevout
.IsNull())
965 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-prevout-null");
971 void LimitMempoolSize(CTxMemPool
& pool
, size_t limit
, unsigned long age
) {
972 int expired
= pool
.Expire(GetTime() - age
);
974 LogPrint("mempool", "Expired %i transactions from the memory pool\n", expired
);
976 std::vector
<uint256
> vNoSpendsRemaining
;
977 pool
.TrimToSize(limit
, &vNoSpendsRemaining
);
978 BOOST_FOREACH(const uint256
& removed
, vNoSpendsRemaining
)
979 pcoinsTip
->Uncache(removed
);
982 /** Convert CValidationState to a human-readable message for logging */
983 std::string
FormatStateMessage(const CValidationState
&state
)
985 return strprintf("%s%s (code %i)",
986 state
.GetRejectReason(),
987 state
.GetDebugMessage().empty() ? "" : ", "+state
.GetDebugMessage(),
988 state
.GetRejectCode());
991 bool AcceptToMemoryPoolWorker(CTxMemPool
& pool
, CValidationState
& state
, const CTransaction
& tx
, bool fLimitFree
,
992 bool* pfMissingInputs
, bool fOverrideMempoolLimit
, const CAmount nAbsurdFee
,
993 std::vector
<uint256
>& vHashTxnToUncache
)
995 const uint256 hash
= tx
.GetHash();
996 AssertLockHeld(cs_main
);
998 *pfMissingInputs
= false;
1000 if (!CheckTransaction(tx
, state
))
1001 return false; // state filled in by CheckTransaction
1003 // Coinbase is only valid in a block, not as a loose transaction
1004 if (tx
.IsCoinBase())
1005 return state
.DoS(100, false, REJECT_INVALID
, "coinbase");
1007 // Rather not work on nonstandard transactions (unless -testnet/-regtest)
1009 if (fRequireStandard
&& !IsStandardTx(tx
, reason
))
1010 return state
.DoS(0, false, REJECT_NONSTANDARD
, reason
);
1012 // Only accept nLockTime-using transactions that can be mined in the next
1013 // block; we don't want our mempool filled up with transactions that can't
1015 if (!CheckFinalTx(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
))
1016 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-final");
1018 // is it already in the memory pool?
1019 if (pool
.exists(hash
))
1020 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-in-mempool");
1022 // Check for conflicts with in-memory transactions
1023 set
<uint256
> setConflicts
;
1025 LOCK(pool
.cs
); // protect pool.mapNextTx
1026 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
)
1028 if (pool
.mapNextTx
.count(txin
.prevout
))
1030 const CTransaction
*ptxConflicting
= pool
.mapNextTx
[txin
.prevout
].ptx
;
1031 if (!setConflicts
.count(ptxConflicting
->GetHash()))
1033 // Allow opt-out of transaction replacement by setting
1034 // nSequence >= maxint-1 on all inputs.
1036 // maxint-1 is picked to still allow use of nLockTime by
1037 // non-replacable transactions. All inputs rather than just one
1038 // is for the sake of multi-party protocols, where we don't
1039 // want a single party to be able to disable replacement.
1041 // The opt-out ignores descendants as anyone relying on
1042 // first-seen mempool behavior should be checking all
1043 // unconfirmed ancestors anyway; doing otherwise is hopelessly
1045 bool fReplacementOptOut
= true;
1046 if (fEnableReplacement
)
1048 BOOST_FOREACH(const CTxIn
&txin
, ptxConflicting
->vin
)
1050 if (txin
.nSequence
< std::numeric_limits
<unsigned int>::max()-1)
1052 fReplacementOptOut
= false;
1057 if (fReplacementOptOut
)
1058 return state
.Invalid(false, REJECT_CONFLICT
, "txn-mempool-conflict");
1060 setConflicts
.insert(ptxConflicting
->GetHash());
1068 CCoinsViewCache
view(&dummy
);
1070 CAmount nValueIn
= 0;
1074 CCoinsViewMemPool
viewMemPool(pcoinsTip
, pool
);
1075 view
.SetBackend(viewMemPool
);
1077 // do we already have it?
1078 bool fHadTxInCache
= pcoinsTip
->HaveCoinsInCache(hash
);
1079 if (view
.HaveCoins(hash
)) {
1081 vHashTxnToUncache
.push_back(hash
);
1082 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-known");
1085 // do all inputs exist?
1086 // Note that this does not check for the presence of actual outputs (see the next check for that),
1087 // and only helps with filling in pfMissingInputs (to determine missing vs spent).
1088 BOOST_FOREACH(const CTxIn txin
, tx
.vin
) {
1089 if (!pcoinsTip
->HaveCoinsInCache(txin
.prevout
.hash
))
1090 vHashTxnToUncache
.push_back(txin
.prevout
.hash
);
1091 if (!view
.HaveCoins(txin
.prevout
.hash
)) {
1092 if (pfMissingInputs
)
1093 *pfMissingInputs
= true;
1094 return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
1098 // are the actual inputs available?
1099 if (!view
.HaveInputs(tx
))
1100 return state
.Invalid(false, REJECT_DUPLICATE
, "bad-txns-inputs-spent");
1102 // Bring the best block into scope
1103 view
.GetBestBlock();
1105 nValueIn
= view
.GetValueIn(tx
);
1107 // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
1108 view
.SetBackend(dummy
);
1110 // Only accept BIP68 sequence locked transactions that can be mined in the next
1111 // block; we don't want our mempool filled up with transactions that can't
1113 // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
1114 // CoinsViewCache instead of create its own
1115 if (!CheckSequenceLocks(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
, &lp
))
1116 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-BIP68-final");
1119 // Check for non-standard pay-to-script-hash in inputs
1120 if (fRequireStandard
&& !AreInputsStandard(tx
, view
))
1121 return state
.Invalid(false, REJECT_NONSTANDARD
, "bad-txns-nonstandard-inputs");
1123 unsigned int nSigOps
= GetLegacySigOpCount(tx
);
1124 nSigOps
+= GetP2SHSigOpCount(tx
, view
);
1126 CAmount nValueOut
= tx
.GetValueOut();
1127 CAmount nFees
= nValueIn
-nValueOut
;
1128 // nModifiedFees includes any fee deltas from PrioritiseTransaction
1129 CAmount nModifiedFees
= nFees
;
1130 double nPriorityDummy
= 0;
1131 pool
.ApplyDeltas(hash
, nPriorityDummy
, nModifiedFees
);
1133 CAmount inChainInputValue
;
1134 double dPriority
= view
.GetPriority(tx
, chainActive
.Height(), inChainInputValue
);
1136 // Keep track of transactions that spend a coinbase, which we re-scan
1137 // during reorgs to ensure COINBASE_MATURITY is still met.
1138 bool fSpendsCoinbase
= false;
1139 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
1140 const CCoins
*coins
= view
.AccessCoins(txin
.prevout
.hash
);
1141 if (coins
->IsCoinBase()) {
1142 fSpendsCoinbase
= true;
1147 CTxMemPoolEntry
entry(tx
, nFees
, GetTime(), dPriority
, chainActive
.Height(), pool
.HasNoInputsOf(tx
), inChainInputValue
, fSpendsCoinbase
, nSigOps
, lp
);
1148 unsigned int nSize
= entry
.GetTxSize();
1150 // Check that the transaction doesn't have an excessive number of
1151 // sigops, making it impossible to mine. Since the coinbase transaction
1152 // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
1153 // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
1154 // merely non-standard transaction.
1155 if ((nSigOps
> MAX_STANDARD_TX_SIGOPS
) || (nBytesPerSigOp
&& nSigOps
> nSize
/ nBytesPerSigOp
))
1156 return state
.DoS(0, false, REJECT_NONSTANDARD
, "bad-txns-too-many-sigops", false,
1157 strprintf("%d", nSigOps
));
1159 CAmount mempoolRejectFee
= pool
.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFee(nSize
);
1160 if (mempoolRejectFee
> 0 && nModifiedFees
< mempoolRejectFee
) {
1161 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool min fee not met", false, strprintf("%d < %d", nFees
, mempoolRejectFee
));
1162 } else if (GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY
) && nModifiedFees
< ::minRelayTxFee
.GetFee(nSize
) && !AllowFree(entry
.GetPriority(chainActive
.Height() + 1))) {
1163 // Require that free transactions have sufficient priority to be mined in the next block.
1164 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "insufficient priority");
1167 // Continuously rate-limit free (really, very-low-fee) transactions
1168 // This mitigates 'penny-flooding' -- sending thousands of free transactions just to
1169 // be annoying or make others' transactions take longer to confirm.
1170 if (fLimitFree
&& nModifiedFees
< ::minRelayTxFee
.GetFee(nSize
))
1172 static CCriticalSection csFreeLimiter
;
1173 static double dFreeCount
;
1174 static int64_t nLastTime
;
1175 int64_t nNow
= GetTime();
1177 LOCK(csFreeLimiter
);
1179 // Use an exponentially decaying ~10-minute window:
1180 dFreeCount
*= pow(1.0 - 1.0/600.0, (double)(nNow
- nLastTime
));
1182 // -limitfreerelay unit is thousand-bytes-per-minute
1183 // At default rate it would take over a month to fill 1GB
1184 if (dFreeCount
+ nSize
>= GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY
) * 10 * 1000)
1185 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "rate limited free transaction");
1186 LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount
, dFreeCount
+nSize
);
1187 dFreeCount
+= nSize
;
1190 if (nAbsurdFee
&& nFees
> nAbsurdFee
)
1191 return state
.Invalid(false,
1192 REJECT_HIGHFEE
, "absurdly-high-fee",
1193 strprintf("%d > %d", nFees
, nAbsurdFee
));
1195 // Calculate in-mempool ancestors, up to a limit.
1196 CTxMemPool::setEntries setAncestors
;
1197 size_t nLimitAncestors
= GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT
);
1198 size_t nLimitAncestorSize
= GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT
)*1000;
1199 size_t nLimitDescendants
= GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT
);
1200 size_t nLimitDescendantSize
= GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT
)*1000;
1201 std::string errString
;
1202 if (!pool
.CalculateMemPoolAncestors(entry
, setAncestors
, nLimitAncestors
, nLimitAncestorSize
, nLimitDescendants
, nLimitDescendantSize
, errString
)) {
1203 return state
.DoS(0, false, REJECT_NONSTANDARD
, "too-long-mempool-chain", false, errString
);
1206 // A transaction that spends outputs that would be replaced by it is invalid. Now
1207 // that we have the set of all ancestors we can detect this
1208 // pathological case by making sure setConflicts and setAncestors don't
1210 BOOST_FOREACH(CTxMemPool::txiter ancestorIt
, setAncestors
)
1212 const uint256
&hashAncestor
= ancestorIt
->GetTx().GetHash();
1213 if (setConflicts
.count(hashAncestor
))
1215 return state
.DoS(10, false,
1216 REJECT_INVALID
, "bad-txns-spends-conflicting-tx", false,
1217 strprintf("%s spends conflicting transaction %s",
1219 hashAncestor
.ToString()));
1223 // Check if it's economically rational to mine this transaction rather
1224 // than the ones it replaces.
1225 CAmount nConflictingFees
= 0;
1226 size_t nConflictingSize
= 0;
1227 uint64_t nConflictingCount
= 0;
1228 CTxMemPool::setEntries allConflicting
;
1230 // If we don't hold the lock allConflicting might be incomplete; the
1231 // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
1232 // mempool consistency for us.
1234 if (setConflicts
.size())
1236 CFeeRate
newFeeRate(nModifiedFees
, nSize
);
1237 set
<uint256
> setConflictsParents
;
1238 const int maxDescendantsToVisit
= 100;
1239 CTxMemPool::setEntries setIterConflicting
;
1240 BOOST_FOREACH(const uint256
&hashConflicting
, setConflicts
)
1242 CTxMemPool::txiter mi
= pool
.mapTx
.find(hashConflicting
);
1243 if (mi
== pool
.mapTx
.end())
1246 // Save these to avoid repeated lookups
1247 setIterConflicting
.insert(mi
);
1249 // Don't allow the replacement to reduce the feerate of the
1252 // We usually don't want to accept replacements with lower
1253 // feerates than what they replaced as that would lower the
1254 // feerate of the next block. Requiring that the feerate always
1255 // be increased is also an easy-to-reason about way to prevent
1256 // DoS attacks via replacements.
1258 // The mining code doesn't (currently) take children into
1259 // account (CPFP) so we only consider the feerates of
1260 // transactions being directly replaced, not their indirect
1261 // descendants. While that does mean high feerate children are
1262 // ignored when deciding whether or not to replace, we do
1263 // require the replacement to pay more overall fees too,
1264 // mitigating most cases.
1265 CFeeRate
oldFeeRate(mi
->GetModifiedFee(), mi
->GetTxSize());
1266 if (newFeeRate
<= oldFeeRate
)
1268 return state
.DoS(0, false,
1269 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1270 strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
1272 newFeeRate
.ToString(),
1273 oldFeeRate
.ToString()));
1276 BOOST_FOREACH(const CTxIn
&txin
, mi
->GetTx().vin
)
1278 setConflictsParents
.insert(txin
.prevout
.hash
);
1281 nConflictingCount
+= mi
->GetCountWithDescendants();
1283 // This potentially overestimates the number of actual descendants
1284 // but we just want to be conservative to avoid doing too much
1286 if (nConflictingCount
<= maxDescendantsToVisit
) {
1287 // If not too many to replace, then calculate the set of
1288 // transactions that would have to be evicted
1289 BOOST_FOREACH(CTxMemPool::txiter it
, setIterConflicting
) {
1290 pool
.CalculateDescendants(it
, allConflicting
);
1292 BOOST_FOREACH(CTxMemPool::txiter it
, allConflicting
) {
1293 nConflictingFees
+= it
->GetModifiedFee();
1294 nConflictingSize
+= it
->GetTxSize();
1297 return state
.DoS(0, false,
1298 REJECT_NONSTANDARD
, "too many potential replacements", false,
1299 strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
1302 maxDescendantsToVisit
));
1305 for (unsigned int j
= 0; j
< tx
.vin
.size(); j
++)
1307 // We don't want to accept replacements that require low
1308 // feerate junk to be mined first. Ideally we'd keep track of
1309 // the ancestor feerates and make the decision based on that,
1310 // but for now requiring all new inputs to be confirmed works.
1311 if (!setConflictsParents
.count(tx
.vin
[j
].prevout
.hash
))
1313 // Rather than check the UTXO set - potentially expensive -
1314 // it's cheaper to just check if the new input refers to a
1315 // tx that's in the mempool.
1316 if (pool
.mapTx
.find(tx
.vin
[j
].prevout
.hash
) != pool
.mapTx
.end())
1317 return state
.DoS(0, false,
1318 REJECT_NONSTANDARD
, "replacement-adds-unconfirmed", false,
1319 strprintf("replacement %s adds unconfirmed input, idx %d",
1320 hash
.ToString(), j
));
1324 // The replacement must pay greater fees than the transactions it
1325 // replaces - if we did the bandwidth used by those conflicting
1326 // transactions would not be paid for.
1327 if (nModifiedFees
< nConflictingFees
)
1329 return state
.DoS(0, false,
1330 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1331 strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
1332 hash
.ToString(), FormatMoney(nModifiedFees
), FormatMoney(nConflictingFees
)));
1335 // Finally in addition to paying more fees than the conflicts the
1336 // new transaction must pay for its own bandwidth.
1337 CAmount nDeltaFees
= nModifiedFees
- nConflictingFees
;
1338 if (nDeltaFees
< ::minRelayTxFee
.GetFee(nSize
))
1340 return state
.DoS(0, false,
1341 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1342 strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
1344 FormatMoney(nDeltaFees
),
1345 FormatMoney(::minRelayTxFee
.GetFee(nSize
))));
1349 // Check against previous transactions
1350 // This is done last to help prevent CPU exhaustion denial-of-service attacks.
1351 if (!CheckInputs(tx
, state
, view
, true, STANDARD_SCRIPT_VERIFY_FLAGS
, true))
1352 return false; // state filled in by CheckInputs
1354 // Check again against just the consensus-critical mandatory script
1355 // verification flags, in case of bugs in the standard flags that cause
1356 // transactions to pass as valid when they're actually invalid. For
1357 // instance the STRICTENC flag was incorrectly allowing certain
1358 // CHECKSIG NOT scripts to pass, even though they were invalid.
1360 // There is a similar check in CreateNewBlock() to prevent creating
1361 // invalid blocks, however allowing such transactions into the mempool
1362 // can be exploited as a DoS attack.
1363 if (!CheckInputs(tx
, state
, view
, true, MANDATORY_SCRIPT_VERIFY_FLAGS
, true))
1365 return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s, %s",
1366 __func__
, hash
.ToString(), FormatStateMessage(state
));
1369 // Remove conflicting transactions from the mempool
1370 BOOST_FOREACH(const CTxMemPool::txiter it
, allConflicting
)
1372 LogPrint("mempool", "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
1373 it
->GetTx().GetHash().ToString(),
1375 FormatMoney(nModifiedFees
- nConflictingFees
),
1376 (int)nSize
- (int)nConflictingSize
);
1378 pool
.RemoveStaged(allConflicting
, false);
1380 // Store transaction in memory
1381 pool
.addUnchecked(hash
, entry
, setAncestors
, !IsInitialBlockDownload());
1383 // trim mempool and check if tx was trimmed
1384 if (!fOverrideMempoolLimit
) {
1385 LimitMempoolSize(pool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
1386 if (!pool
.exists(hash
))
1387 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool full");
1391 SyncWithWallets(tx
, NULL
, NULL
);
1396 bool AcceptToMemoryPool(CTxMemPool
& pool
, CValidationState
&state
, const CTransaction
&tx
, bool fLimitFree
,
1397 bool* pfMissingInputs
, bool fOverrideMempoolLimit
, const CAmount nAbsurdFee
)
1399 std::vector
<uint256
> vHashTxToUncache
;
1400 bool res
= AcceptToMemoryPoolWorker(pool
, state
, tx
, fLimitFree
, pfMissingInputs
, fOverrideMempoolLimit
, nAbsurdFee
, vHashTxToUncache
);
1402 BOOST_FOREACH(const uint256
& hashTx
, vHashTxToUncache
)
1403 pcoinsTip
->Uncache(hashTx
);
1408 /** Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock */
1409 bool GetTransaction(const uint256
&hash
, CTransaction
&txOut
, const Consensus::Params
& consensusParams
, uint256
&hashBlock
, bool fAllowSlow
)
1411 CBlockIndex
*pindexSlow
= NULL
;
1415 if (mempool
.lookup(hash
, txOut
))
1422 if (pblocktree
->ReadTxIndex(hash
, postx
)) {
1423 CAutoFile
file(OpenBlockFile(postx
, true), SER_DISK
, CLIENT_VERSION
);
1425 return error("%s: OpenBlockFile failed", __func__
);
1426 CBlockHeader header
;
1429 fseek(file
.Get(), postx
.nTxOffset
, SEEK_CUR
);
1431 } catch (const std::exception
& e
) {
1432 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
1434 hashBlock
= header
.GetHash();
1435 if (txOut
.GetHash() != hash
)
1436 return error("%s: txid mismatch", __func__
);
1441 if (fAllowSlow
) { // use coin database to locate block that contains transaction, and scan it
1444 CCoinsViewCache
&view
= *pcoinsTip
;
1445 const CCoins
* coins
= view
.AccessCoins(hash
);
1447 nHeight
= coins
->nHeight
;
1450 pindexSlow
= chainActive
[nHeight
];
1455 if (ReadBlockFromDisk(block
, pindexSlow
, consensusParams
)) {
1456 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
1457 if (tx
.GetHash() == hash
) {
1459 hashBlock
= pindexSlow
->GetBlockHash();
1474 //////////////////////////////////////////////////////////////////////////////
1476 // CBlock and CBlockIndex
1479 bool WriteBlockToDisk(const CBlock
& block
, CDiskBlockPos
& pos
, const CMessageHeader::MessageStartChars
& messageStart
)
1481 // Open history file to append
1482 CAutoFile
fileout(OpenBlockFile(pos
), SER_DISK
, CLIENT_VERSION
);
1483 if (fileout
.IsNull())
1484 return error("WriteBlockToDisk: OpenBlockFile failed");
1486 // Write index header
1487 unsigned int nSize
= fileout
.GetSerializeSize(block
);
1488 fileout
<< FLATDATA(messageStart
) << nSize
;
1491 long fileOutPos
= ftell(fileout
.Get());
1493 return error("WriteBlockToDisk: ftell failed");
1494 pos
.nPos
= (unsigned int)fileOutPos
;
1500 bool ReadBlockFromDisk(CBlock
& block
, const CDiskBlockPos
& pos
, const Consensus::Params
& consensusParams
)
1504 // Open history file to read
1505 CAutoFile
filein(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
1506 if (filein
.IsNull())
1507 return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos
.ToString());
1513 catch (const std::exception
& e
) {
1514 return error("%s: Deserialize or I/O error - %s at %s", __func__
, e
.what(), pos
.ToString());
1518 if (!CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
1519 return error("ReadBlockFromDisk: Errors in block header at %s", pos
.ToString());
1524 bool ReadBlockFromDisk(CBlock
& block
, const CBlockIndex
* pindex
, const Consensus::Params
& consensusParams
)
1526 if (!ReadBlockFromDisk(block
, pindex
->GetBlockPos(), consensusParams
))
1528 if (block
.GetHash() != pindex
->GetBlockHash())
1529 return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1530 pindex
->ToString(), pindex
->GetBlockPos().ToString());
1534 CAmount
GetBlockSubsidy(int nHeight
, const Consensus::Params
& consensusParams
)
1536 int halvings
= nHeight
/ consensusParams
.nSubsidyHalvingInterval
;
1537 // Force block reward to zero when right shift is undefined.
1541 CAmount nSubsidy
= 50 * COIN
;
1542 // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1543 nSubsidy
>>= halvings
;
1547 bool IsInitialBlockDownload()
1549 const CChainParams
& chainParams
= Params();
1551 if (fImporting
|| fReindex
)
1553 if (fCheckpointsEnabled
&& chainActive
.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams
.Checkpoints()))
1555 static bool lockIBDState
= false;
1558 bool state
= (chainActive
.Height() < pindexBestHeader
->nHeight
- 24 * 6 ||
1559 pindexBestHeader
->GetBlockTime() < GetTime() - nMaxTipAge
);
1561 lockIBDState
= true;
1565 bool fLargeWorkForkFound
= false;
1566 bool fLargeWorkInvalidChainFound
= false;
1567 CBlockIndex
*pindexBestForkTip
= NULL
, *pindexBestForkBase
= NULL
;
1569 void CheckForkWarningConditions()
1571 AssertLockHeld(cs_main
);
1572 // Before we get past initial download, we cannot reliably alert about forks
1573 // (we assume we don't get stuck on a fork before the last checkpoint)
1574 if (IsInitialBlockDownload())
1577 // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
1578 // of our head, drop it
1579 if (pindexBestForkTip
&& chainActive
.Height() - pindexBestForkTip
->nHeight
>= 72)
1580 pindexBestForkTip
= NULL
;
1582 if (pindexBestForkTip
|| (pindexBestInvalid
&& pindexBestInvalid
->nChainWork
> chainActive
.Tip()->nChainWork
+ (GetBlockProof(*chainActive
.Tip()) * 6)))
1584 if (!fLargeWorkForkFound
&& pindexBestForkBase
)
1586 std::string warning
= std::string("'Warning: Large-work fork detected, forking after block ") +
1587 pindexBestForkBase
->phashBlock
->ToString() + std::string("'");
1588 CAlert::Notify(warning
, true);
1590 if (pindexBestForkTip
&& pindexBestForkBase
)
1592 LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__
,
1593 pindexBestForkBase
->nHeight
, pindexBestForkBase
->phashBlock
->ToString(),
1594 pindexBestForkTip
->nHeight
, pindexBestForkTip
->phashBlock
->ToString());
1595 fLargeWorkForkFound
= true;
1599 LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__
);
1600 fLargeWorkInvalidChainFound
= true;
1605 fLargeWorkForkFound
= false;
1606 fLargeWorkInvalidChainFound
= false;
1610 void CheckForkWarningConditionsOnNewFork(CBlockIndex
* pindexNewForkTip
)
1612 AssertLockHeld(cs_main
);
1613 // If we are on a fork that is sufficiently large, set a warning flag
1614 CBlockIndex
* pfork
= pindexNewForkTip
;
1615 CBlockIndex
* plonger
= chainActive
.Tip();
1616 while (pfork
&& pfork
!= plonger
)
1618 while (plonger
&& plonger
->nHeight
> pfork
->nHeight
)
1619 plonger
= plonger
->pprev
;
1620 if (pfork
== plonger
)
1622 pfork
= pfork
->pprev
;
1625 // We define a condition where we should warn the user about as a fork of at least 7 blocks
1626 // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
1627 // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
1628 // hash rate operating on the fork.
1629 // or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
1630 // We define it this way because it allows us to only store the highest fork tip (+ base) which meets
1631 // the 7-block condition and from this always have the most-likely-to-cause-warning fork
1632 if (pfork
&& (!pindexBestForkTip
|| (pindexBestForkTip
&& pindexNewForkTip
->nHeight
> pindexBestForkTip
->nHeight
)) &&
1633 pindexNewForkTip
->nChainWork
- pfork
->nChainWork
> (GetBlockProof(*pfork
) * 7) &&
1634 chainActive
.Height() - pindexNewForkTip
->nHeight
< 72)
1636 pindexBestForkTip
= pindexNewForkTip
;
1637 pindexBestForkBase
= pfork
;
1640 CheckForkWarningConditions();
1643 // Requires cs_main.
1644 void Misbehaving(NodeId pnode
, int howmuch
)
1649 CNodeState
*state
= State(pnode
);
1653 state
->nMisbehavior
+= howmuch
;
1654 int banscore
= GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD
);
1655 if (state
->nMisbehavior
>= banscore
&& state
->nMisbehavior
- howmuch
< banscore
)
1657 LogPrintf("%s: %s (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__
, state
->name
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1658 state
->fShouldBan
= true;
1660 LogPrintf("%s: %s (%d -> %d)\n", __func__
, state
->name
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1663 void static InvalidChainFound(CBlockIndex
* pindexNew
)
1665 if (!pindexBestInvalid
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
)
1666 pindexBestInvalid
= pindexNew
;
1668 LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1669 pindexNew
->GetBlockHash().ToString(), pindexNew
->nHeight
,
1670 log(pindexNew
->nChainWork
.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
1671 pindexNew
->GetBlockTime()));
1672 CBlockIndex
*tip
= chainActive
.Tip();
1674 LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1675 tip
->GetBlockHash().ToString(), chainActive
.Height(), log(tip
->nChainWork
.getdouble())/log(2.0),
1676 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip
->GetBlockTime()));
1677 CheckForkWarningConditions();
1680 void static InvalidBlockFound(CBlockIndex
*pindex
, const CValidationState
&state
) {
1682 if (state
.IsInvalid(nDoS
)) {
1683 std::map
<uint256
, NodeId
>::iterator it
= mapBlockSource
.find(pindex
->GetBlockHash());
1684 if (it
!= mapBlockSource
.end() && State(it
->second
)) {
1685 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
1686 CBlockReject reject
= {(unsigned char)state
.GetRejectCode(), state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), pindex
->GetBlockHash()};
1687 State(it
->second
)->rejects
.push_back(reject
);
1689 Misbehaving(it
->second
, nDoS
);
1692 if (!state
.CorruptionPossible()) {
1693 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
1694 setDirtyBlockIndex
.insert(pindex
);
1695 setBlockIndexCandidates
.erase(pindex
);
1696 InvalidChainFound(pindex
);
1700 void UpdateCoins(const CTransaction
& tx
, CValidationState
&state
, CCoinsViewCache
&inputs
, CTxUndo
&txundo
, int nHeight
)
1702 // mark inputs spent
1703 if (!tx
.IsCoinBase()) {
1704 txundo
.vprevout
.reserve(tx
.vin
.size());
1705 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
1706 CCoinsModifier coins
= inputs
.ModifyCoins(txin
.prevout
.hash
);
1707 unsigned nPos
= txin
.prevout
.n
;
1709 if (nPos
>= coins
->vout
.size() || coins
->vout
[nPos
].IsNull())
1711 // mark an outpoint spent, and construct undo information
1712 txundo
.vprevout
.push_back(CTxInUndo(coins
->vout
[nPos
]));
1714 if (coins
->vout
.size() == 0) {
1715 CTxInUndo
& undo
= txundo
.vprevout
.back();
1716 undo
.nHeight
= coins
->nHeight
;
1717 undo
.fCoinBase
= coins
->fCoinBase
;
1718 undo
.nVersion
= coins
->nVersion
;
1723 inputs
.ModifyNewCoins(tx
.GetHash(), tx
.IsCoinBase())->FromTx(tx
, nHeight
);
1726 void UpdateCoins(const CTransaction
& tx
, CValidationState
&state
, CCoinsViewCache
&inputs
, int nHeight
)
1729 UpdateCoins(tx
, state
, inputs
, txundo
, nHeight
);
1732 bool CScriptCheck::operator()() {
1733 const CScript
&scriptSig
= ptxTo
->vin
[nIn
].scriptSig
;
1734 if (!VerifyScript(scriptSig
, scriptPubKey
, nFlags
, CachingTransactionSignatureChecker(ptxTo
, nIn
, cacheStore
), &error
)) {
1740 int GetSpendHeight(const CCoinsViewCache
& inputs
)
1743 CBlockIndex
* pindexPrev
= mapBlockIndex
.find(inputs
.GetBestBlock())->second
;
1744 return pindexPrev
->nHeight
+ 1;
1747 namespace Consensus
{
1748 bool CheckTxInputs(const CTransaction
& tx
, CValidationState
& state
, const CCoinsViewCache
& inputs
, int nSpendHeight
)
1750 // This doesn't trigger the DoS code on purpose; if it did, it would make it easier
1751 // for an attacker to attempt to split the network.
1752 if (!inputs
.HaveInputs(tx
))
1753 return state
.Invalid(false, 0, "", "Inputs unavailable");
1755 CAmount nValueIn
= 0;
1757 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
1759 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
1760 const CCoins
*coins
= inputs
.AccessCoins(prevout
.hash
);
1763 // If prev is coinbase, check that it's matured
1764 if (coins
->IsCoinBase()) {
1765 if (nSpendHeight
- coins
->nHeight
< COINBASE_MATURITY
)
1766 return state
.Invalid(false,
1767 REJECT_INVALID
, "bad-txns-premature-spend-of-coinbase",
1768 strprintf("tried to spend coinbase at depth %d", nSpendHeight
- coins
->nHeight
));
1771 // Check for negative or overflow input values
1772 nValueIn
+= coins
->vout
[prevout
.n
].nValue
;
1773 if (!MoneyRange(coins
->vout
[prevout
.n
].nValue
) || !MoneyRange(nValueIn
))
1774 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputvalues-outofrange");
1778 if (nValueIn
< tx
.GetValueOut())
1779 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-in-belowout", false,
1780 strprintf("value in (%s) < value out (%s)", FormatMoney(nValueIn
), FormatMoney(tx
.GetValueOut())));
1782 // Tally transaction fees
1783 CAmount nTxFee
= nValueIn
- tx
.GetValueOut();
1785 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-negative");
1787 if (!MoneyRange(nFees
))
1788 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-outofrange");
1791 }// namespace Consensus
1793 bool CheckInputs(const CTransaction
& tx
, CValidationState
&state
, const CCoinsViewCache
&inputs
, bool fScriptChecks
, unsigned int flags
, bool cacheStore
, std::vector
<CScriptCheck
> *pvChecks
)
1795 if (!tx
.IsCoinBase())
1797 if (!Consensus::CheckTxInputs(tx
, state
, inputs
, GetSpendHeight(inputs
)))
1801 pvChecks
->reserve(tx
.vin
.size());
1803 // The first loop above does all the inexpensive checks.
1804 // Only if ALL inputs pass do we perform expensive ECDSA signature checks.
1805 // Helps prevent CPU exhaustion attacks.
1807 // Skip ECDSA signature verification when connecting blocks before the
1808 // last block chain checkpoint. Assuming the checkpoints are valid this
1809 // is safe because block merkle hashes are still computed and checked,
1810 // and any change will be caught at the next checkpoint. Of course, if
1811 // the checkpoint is for a chain that's invalid due to false scriptSigs
1812 // this optimisation would allow an invalid chain to be accepted.
1813 if (fScriptChecks
) {
1814 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++) {
1815 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
1816 const CCoins
* coins
= inputs
.AccessCoins(prevout
.hash
);
1820 CScriptCheck
check(*coins
, tx
, i
, flags
, cacheStore
);
1822 pvChecks
->push_back(CScriptCheck());
1823 check
.swap(pvChecks
->back());
1824 } else if (!check()) {
1825 if (flags
& STANDARD_NOT_MANDATORY_VERIFY_FLAGS
) {
1826 // Check whether the failure was caused by a
1827 // non-mandatory script verification check, such as
1828 // non-standard DER encodings or non-null dummy
1829 // arguments; if so, don't trigger DoS protection to
1830 // avoid splitting the network between upgraded and
1831 // non-upgraded nodes.
1832 CScriptCheck
check2(*coins
, tx
, i
,
1833 flags
& ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS
, cacheStore
);
1835 return state
.Invalid(false, REJECT_NONSTANDARD
, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check
.GetScriptError())));
1837 // Failures of other flags indicate a transaction that is
1838 // invalid in new blocks, e.g. a invalid P2SH. We DoS ban
1839 // such nodes as they are not following the protocol. That
1840 // said during an upgrade careful thought should be taken
1841 // as to the correct behavior - we may want to continue
1842 // peering with non-upgraded nodes even after a soft-fork
1843 // super-majority vote has passed.
1844 return state
.DoS(100,false, REJECT_INVALID
, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check
.GetScriptError())));
1855 bool UndoWriteToDisk(const CBlockUndo
& blockundo
, CDiskBlockPos
& pos
, const uint256
& hashBlock
, const CMessageHeader::MessageStartChars
& messageStart
)
1857 // Open history file to append
1858 CAutoFile
fileout(OpenUndoFile(pos
), SER_DISK
, CLIENT_VERSION
);
1859 if (fileout
.IsNull())
1860 return error("%s: OpenUndoFile failed", __func__
);
1862 // Write index header
1863 unsigned int nSize
= fileout
.GetSerializeSize(blockundo
);
1864 fileout
<< FLATDATA(messageStart
) << nSize
;
1867 long fileOutPos
= ftell(fileout
.Get());
1869 return error("%s: ftell failed", __func__
);
1870 pos
.nPos
= (unsigned int)fileOutPos
;
1871 fileout
<< blockundo
;
1873 // calculate & write checksum
1874 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
1875 hasher
<< hashBlock
;
1876 hasher
<< blockundo
;
1877 fileout
<< hasher
.GetHash();
1882 bool UndoReadFromDisk(CBlockUndo
& blockundo
, const CDiskBlockPos
& pos
, const uint256
& hashBlock
)
1884 // Open history file to read
1885 CAutoFile
filein(OpenUndoFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
1886 if (filein
.IsNull())
1887 return error("%s: OpenBlockFile failed", __func__
);
1890 uint256 hashChecksum
;
1892 filein
>> blockundo
;
1893 filein
>> hashChecksum
;
1895 catch (const std::exception
& e
) {
1896 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
1900 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
1901 hasher
<< hashBlock
;
1902 hasher
<< blockundo
;
1903 if (hashChecksum
!= hasher
.GetHash())
1904 return error("%s: Checksum mismatch", __func__
);
1909 /** Abort with a message */
1910 bool AbortNode(const std::string
& strMessage
, const std::string
& userMessage
="")
1912 strMiscWarning
= strMessage
;
1913 LogPrintf("*** %s\n", strMessage
);
1914 uiInterface
.ThreadSafeMessageBox(
1915 userMessage
.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage
,
1916 "", CClientUIInterface::MSG_ERROR
);
1921 bool AbortNode(CValidationState
& state
, const std::string
& strMessage
, const std::string
& userMessage
="")
1923 AbortNode(strMessage
, userMessage
);
1924 return state
.Error(strMessage
);
1930 * Apply the undo operation of a CTxInUndo to the given chain state.
1931 * @param undo The undo object.
1932 * @param view The coins view to which to apply the changes.
1933 * @param out The out point that corresponds to the tx input.
1934 * @return True on success.
1936 static bool ApplyTxInUndo(const CTxInUndo
& undo
, CCoinsViewCache
& view
, const COutPoint
& out
)
1940 CCoinsModifier coins
= view
.ModifyCoins(out
.hash
);
1941 if (undo
.nHeight
!= 0) {
1942 // undo data contains height: this is the last output of the prevout tx being spent
1943 if (!coins
->IsPruned())
1944 fClean
= fClean
&& error("%s: undo data overwriting existing transaction", __func__
);
1946 coins
->fCoinBase
= undo
.fCoinBase
;
1947 coins
->nHeight
= undo
.nHeight
;
1948 coins
->nVersion
= undo
.nVersion
;
1950 if (coins
->IsPruned())
1951 fClean
= fClean
&& error("%s: undo data adding output to missing transaction", __func__
);
1953 if (coins
->IsAvailable(out
.n
))
1954 fClean
= fClean
&& error("%s: undo data overwriting existing output", __func__
);
1955 if (coins
->vout
.size() < out
.n
+1)
1956 coins
->vout
.resize(out
.n
+1);
1957 coins
->vout
[out
.n
] = undo
.txout
;
1962 bool DisconnectBlock(const CBlock
& block
, CValidationState
& state
, const CBlockIndex
* pindex
, CCoinsViewCache
& view
, bool* pfClean
)
1964 assert(pindex
->GetBlockHash() == view
.GetBestBlock());
1971 CBlockUndo blockUndo
;
1972 CDiskBlockPos pos
= pindex
->GetUndoPos();
1974 return error("DisconnectBlock(): no undo data available");
1975 if (!UndoReadFromDisk(blockUndo
, pos
, pindex
->pprev
->GetBlockHash()))
1976 return error("DisconnectBlock(): failure reading undo data");
1978 if (blockUndo
.vtxundo
.size() + 1 != block
.vtx
.size())
1979 return error("DisconnectBlock(): block and undo data inconsistent");
1981 // undo transactions in reverse order
1982 for (int i
= block
.vtx
.size() - 1; i
>= 0; i
--) {
1983 const CTransaction
&tx
= block
.vtx
[i
];
1984 uint256 hash
= tx
.GetHash();
1986 // Check that all outputs are available and match the outputs in the block itself
1989 CCoinsModifier outs
= view
.ModifyCoins(hash
);
1990 outs
->ClearUnspendable();
1992 CCoins
outsBlock(tx
, pindex
->nHeight
);
1993 // The CCoins serialization does not serialize negative numbers.
1994 // No network rules currently depend on the version here, so an inconsistency is harmless
1995 // but it must be corrected before txout nversion ever influences a network rule.
1996 if (outsBlock
.nVersion
< 0)
1997 outs
->nVersion
= outsBlock
.nVersion
;
1998 if (*outs
!= outsBlock
)
1999 fClean
= fClean
&& error("DisconnectBlock(): added transaction mismatch? database corrupted");
2006 if (i
> 0) { // not coinbases
2007 const CTxUndo
&txundo
= blockUndo
.vtxundo
[i
-1];
2008 if (txundo
.vprevout
.size() != tx
.vin
.size())
2009 return error("DisconnectBlock(): transaction and undo data inconsistent");
2010 for (unsigned int j
= tx
.vin
.size(); j
-- > 0;) {
2011 const COutPoint
&out
= tx
.vin
[j
].prevout
;
2012 const CTxInUndo
&undo
= txundo
.vprevout
[j
];
2013 if (!ApplyTxInUndo(undo
, view
, out
))
2019 // move best block pointer to prevout block
2020 view
.SetBestBlock(pindex
->pprev
->GetBlockHash());
2030 void static FlushBlockFile(bool fFinalize
= false)
2032 LOCK(cs_LastBlockFile
);
2034 CDiskBlockPos
posOld(nLastBlockFile
, 0);
2036 FILE *fileOld
= OpenBlockFile(posOld
);
2039 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nSize
);
2040 FileCommit(fileOld
);
2044 fileOld
= OpenUndoFile(posOld
);
2047 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nUndoSize
);
2048 FileCommit(fileOld
);
2053 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
);
2055 static CCheckQueue
<CScriptCheck
> scriptcheckqueue(128);
2057 void ThreadScriptCheck() {
2058 RenameThread("bitcoin-scriptch");
2059 scriptcheckqueue
.Thread();
2063 // Called periodically asynchronously; alerts if it smells like
2064 // we're being fed a bad chain (blocks being generated much
2065 // too slowly or too quickly).
2067 void PartitionCheck(bool (*initialDownloadCheck
)(), CCriticalSection
& cs
, const CBlockIndex
*const &bestHeader
,
2068 int64_t nPowTargetSpacing
)
2070 if (bestHeader
== NULL
|| initialDownloadCheck()) return;
2072 static int64_t lastAlertTime
= 0;
2073 int64_t now
= GetAdjustedTime();
2074 if (lastAlertTime
> now
-60*60*24) return; // Alert at most once per day
2076 const int SPAN_HOURS
=4;
2077 const int SPAN_SECONDS
=SPAN_HOURS
*60*60;
2078 int BLOCKS_EXPECTED
= SPAN_SECONDS
/ nPowTargetSpacing
;
2080 boost::math::poisson_distribution
<double> poisson(BLOCKS_EXPECTED
);
2082 std::string strWarning
;
2083 int64_t startTime
= GetAdjustedTime()-SPAN_SECONDS
;
2086 const CBlockIndex
* i
= bestHeader
;
2088 while (i
->GetBlockTime() >= startTime
) {
2091 if (i
== NULL
) return; // Ran out of chain, we must not be fully sync'ed
2094 // How likely is it to find that many by chance?
2095 double p
= boost::math::pdf(poisson
, nBlocks
);
2097 LogPrint("partitioncheck", "%s: Found %d blocks in the last %d hours\n", __func__
, nBlocks
, SPAN_HOURS
);
2098 LogPrint("partitioncheck", "%s: likelihood: %g\n", __func__
, p
);
2100 // Aim for one false-positive about every fifty years of normal running:
2101 const int FIFTY_YEARS
= 50*365*24*60*60;
2102 double alertThreshold
= 1.0 / (FIFTY_YEARS
/ SPAN_SECONDS
);
2104 if (p
<= alertThreshold
&& nBlocks
< BLOCKS_EXPECTED
)
2106 // Many fewer blocks than expected: alert!
2107 strWarning
= strprintf(_("WARNING: check your network connection, %d blocks received in the last %d hours (%d expected)"),
2108 nBlocks
, SPAN_HOURS
, BLOCKS_EXPECTED
);
2110 else if (p
<= alertThreshold
&& nBlocks
> BLOCKS_EXPECTED
)
2112 // Many more blocks than expected: alert!
2113 strWarning
= strprintf(_("WARNING: abnormally high number of blocks generated, %d blocks received in the last %d hours (%d expected)"),
2114 nBlocks
, SPAN_HOURS
, BLOCKS_EXPECTED
);
2116 if (!strWarning
.empty())
2118 strMiscWarning
= strWarning
;
2119 CAlert::Notify(strWarning
, true);
2120 lastAlertTime
= now
;
2124 // Protected by cs_main
2125 static VersionBitsCache versionbitscache
;
2127 int32_t ComputeBlockVersion(const CBlockIndex
* pindexPrev
, const Consensus::Params
& params
)
2130 int32_t nVersion
= VERSIONBITS_TOP_BITS
;
2132 for (int i
= 0; i
< (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS
; i
++) {
2133 ThresholdState state
= VersionBitsState(pindexPrev
, params
, (Consensus::DeploymentPos
)i
, versionbitscache
);
2134 if (state
== THRESHOLD_LOCKED_IN
|| state
== THRESHOLD_STARTED
) {
2135 nVersion
|= VersionBitsMask(params
, (Consensus::DeploymentPos
)i
);
2143 * Threshold condition checker that triggers when unknown versionbits are seen on the network.
2145 class WarningBitsConditionChecker
: public AbstractThresholdConditionChecker
2151 WarningBitsConditionChecker(int bitIn
) : bit(bitIn
) {}
2153 int64_t BeginTime(const Consensus::Params
& params
) const { return 0; }
2154 int64_t EndTime(const Consensus::Params
& params
) const { return std::numeric_limits
<int64_t>::max(); }
2155 int Period(const Consensus::Params
& params
) const { return params
.nMinerConfirmationWindow
; }
2156 int Threshold(const Consensus::Params
& params
) const { return params
.nRuleChangeActivationThreshold
; }
2158 bool Condition(const CBlockIndex
* pindex
, const Consensus::Params
& params
) const
2160 return ((pindex
->nVersion
& VERSIONBITS_TOP_MASK
) == VERSIONBITS_TOP_BITS
) &&
2161 ((pindex
->nVersion
>> bit
) & 1) != 0 &&
2162 ((ComputeBlockVersion(pindex
->pprev
, params
) >> bit
) & 1) == 0;
2166 // Protected by cs_main
2167 static ThresholdConditionCache warningcache
[VERSIONBITS_NUM_BITS
];
2169 static int64_t nTimeCheck
= 0;
2170 static int64_t nTimeForks
= 0;
2171 static int64_t nTimeVerify
= 0;
2172 static int64_t nTimeConnect
= 0;
2173 static int64_t nTimeIndex
= 0;
2174 static int64_t nTimeCallbacks
= 0;
2175 static int64_t nTimeTotal
= 0;
2177 bool ConnectBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* pindex
, CCoinsViewCache
& view
, bool fJustCheck
)
2179 const CChainParams
& chainparams
= Params();
2180 AssertLockHeld(cs_main
);
2182 int64_t nTimeStart
= GetTimeMicros();
2184 // Check it again in case a previous version let a bad block in
2185 if (!CheckBlock(block
, state
, !fJustCheck
, !fJustCheck
))
2186 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
2188 // verify that the view's current state corresponds to the previous block
2189 uint256 hashPrevBlock
= pindex
->pprev
== NULL
? uint256() : pindex
->pprev
->GetBlockHash();
2190 assert(hashPrevBlock
== view
.GetBestBlock());
2192 // Special case for the genesis block, skipping connection of its transactions
2193 // (its coinbase is unspendable)
2194 if (block
.GetHash() == chainparams
.GetConsensus().hashGenesisBlock
) {
2196 view
.SetBestBlock(pindex
->GetBlockHash());
2200 bool fScriptChecks
= true;
2201 if (fCheckpointsEnabled
) {
2202 CBlockIndex
*pindexLastCheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
2203 if (pindexLastCheckpoint
&& pindexLastCheckpoint
->GetAncestor(pindex
->nHeight
) == pindex
) {
2204 // This block is an ancestor of a checkpoint: disable script checks
2205 fScriptChecks
= false;
2209 int64_t nTime1
= GetTimeMicros(); nTimeCheck
+= nTime1
- nTimeStart
;
2210 LogPrint("bench", " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1
- nTimeStart
), nTimeCheck
* 0.000001);
2212 // Do not allow blocks that contain transactions which 'overwrite' older transactions,
2213 // unless those are already completely spent.
2214 // If such overwrites are allowed, coinbases and transactions depending upon those
2215 // can be duplicated to remove the ability to spend the first instance -- even after
2216 // being sent to another address.
2217 // See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
2218 // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
2219 // already refuses previously-known transaction ids entirely.
2220 // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
2221 // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
2222 // two in the chain that violate it. This prevents exploiting the issue against nodes during their
2223 // initial block download.
2224 bool fEnforceBIP30
= (!pindex
->phashBlock
) || // Enforce on CreateNewBlock invocations which don't have a hash.
2225 !((pindex
->nHeight
==91842 && pindex
->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
2226 (pindex
->nHeight
==91880 && pindex
->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
2228 // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
2229 // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
2230 // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
2231 // before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further
2232 // duplicate transactions descending from the known pairs either.
2233 // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
2234 CBlockIndex
*pindexBIP34height
= pindex
->pprev
->GetAncestor(chainparams
.GetConsensus().BIP34Height
);
2235 //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
2236 fEnforceBIP30
= fEnforceBIP30
&& (!pindexBIP34height
|| !(pindexBIP34height
->GetBlockHash() == chainparams
.GetConsensus().BIP34Hash
));
2238 if (fEnforceBIP30
) {
2239 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
) {
2240 const CCoins
* coins
= view
.AccessCoins(tx
.GetHash());
2241 if (coins
&& !coins
->IsPruned())
2242 return state
.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
2243 REJECT_INVALID
, "bad-txns-BIP30");
2247 // BIP16 didn't become active until Apr 1 2012
2248 int64_t nBIP16SwitchTime
= 1333238400;
2249 bool fStrictPayToScriptHash
= (pindex
->GetBlockTime() >= nBIP16SwitchTime
);
2251 unsigned int flags
= fStrictPayToScriptHash
? SCRIPT_VERIFY_P2SH
: SCRIPT_VERIFY_NONE
;
2253 // Start enforcing the DERSIG (BIP66) rules, for block.nVersion=3 blocks,
2254 // when 75% of the network has upgraded:
2255 if (block
.nVersion
>= 3 && IsSuperMajority(3, pindex
->pprev
, chainparams
.GetConsensus().nMajorityEnforceBlockUpgrade
, chainparams
.GetConsensus())) {
2256 flags
|= SCRIPT_VERIFY_DERSIG
;
2259 // Start enforcing CHECKLOCKTIMEVERIFY, (BIP65) for block.nVersion=4
2260 // blocks, when 75% of the network has upgraded:
2261 if (block
.nVersion
>= 4 && IsSuperMajority(4, pindex
->pprev
, chainparams
.GetConsensus().nMajorityEnforceBlockUpgrade
, chainparams
.GetConsensus())) {
2262 flags
|= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY
;
2265 // Start enforcing CHECKSEQUENCEVERIFY using versionbits logic.
2266 if (VersionBitsState(pindex
->pprev
, chainparams
.GetConsensus(), Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
2267 flags
|= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY
;
2270 int64_t nTime2
= GetTimeMicros(); nTimeForks
+= nTime2
- nTime1
;
2271 LogPrint("bench", " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2
- nTime1
), nTimeForks
* 0.000001);
2273 CBlockUndo blockundo
;
2275 CCheckQueueControl
<CScriptCheck
> control(fScriptChecks
&& nScriptCheckThreads
? &scriptcheckqueue
: NULL
);
2277 std::vector
<int> prevheights
;
2278 int nLockTimeFlags
= 0;
2281 unsigned int nSigOps
= 0;
2282 CDiskTxPos
pos(pindex
->GetBlockPos(), GetSizeOfCompactSize(block
.vtx
.size()));
2283 std::vector
<std::pair
<uint256
, CDiskTxPos
> > vPos
;
2284 vPos
.reserve(block
.vtx
.size());
2285 blockundo
.vtxundo
.reserve(block
.vtx
.size() - 1);
2286 for (unsigned int i
= 0; i
< block
.vtx
.size(); i
++)
2288 const CTransaction
&tx
= block
.vtx
[i
];
2290 nInputs
+= tx
.vin
.size();
2291 nSigOps
+= GetLegacySigOpCount(tx
);
2292 if (nSigOps
> MAX_BLOCK_SIGOPS
)
2293 return state
.DoS(100, error("ConnectBlock(): too many sigops"),
2294 REJECT_INVALID
, "bad-blk-sigops");
2296 if (!tx
.IsCoinBase())
2298 if (!view
.HaveInputs(tx
))
2299 return state
.DoS(100, error("ConnectBlock(): inputs missing/spent"),
2300 REJECT_INVALID
, "bad-txns-inputs-missingorspent");
2302 // Check that transaction is BIP68 final
2303 // BIP68 lock checks (as opposed to nLockTime checks) must
2304 // be in ConnectBlock because they require the UTXO set
2305 prevheights
.resize(tx
.vin
.size());
2306 for (size_t j
= 0; j
< tx
.vin
.size(); j
++) {
2307 prevheights
[j
] = view
.AccessCoins(tx
.vin
[j
].prevout
.hash
)->nHeight
;
2310 if (!SequenceLocks(tx
, nLockTimeFlags
, &prevheights
, *pindex
)) {
2311 return state
.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__
),
2312 REJECT_INVALID
, "bad-txns-nonfinal");
2315 if (fStrictPayToScriptHash
)
2317 // Add in sigops done by pay-to-script-hash inputs;
2318 // this is to prevent a "rogue miner" from creating
2319 // an incredibly-expensive-to-validate block.
2320 nSigOps
+= GetP2SHSigOpCount(tx
, view
);
2321 if (nSigOps
> MAX_BLOCK_SIGOPS
)
2322 return state
.DoS(100, error("ConnectBlock(): too many sigops"),
2323 REJECT_INVALID
, "bad-blk-sigops");
2326 nFees
+= view
.GetValueIn(tx
)-tx
.GetValueOut();
2328 std::vector
<CScriptCheck
> vChecks
;
2329 bool fCacheResults
= fJustCheck
; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
2330 if (!CheckInputs(tx
, state
, view
, fScriptChecks
, flags
, fCacheResults
, nScriptCheckThreads
? &vChecks
: NULL
))
2331 return error("ConnectBlock(): CheckInputs on %s failed with %s",
2332 tx
.GetHash().ToString(), FormatStateMessage(state
));
2333 control
.Add(vChecks
);
2338 blockundo
.vtxundo
.push_back(CTxUndo());
2340 UpdateCoins(tx
, state
, view
, i
== 0 ? undoDummy
: blockundo
.vtxundo
.back(), pindex
->nHeight
);
2342 vPos
.push_back(std::make_pair(tx
.GetHash(), pos
));
2343 pos
.nTxOffset
+= ::GetSerializeSize(tx
, SER_DISK
, CLIENT_VERSION
);
2345 int64_t nTime3
= GetTimeMicros(); nTimeConnect
+= nTime3
- nTime2
;
2346 LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block
.vtx
.size(), 0.001 * (nTime3
- nTime2
), 0.001 * (nTime3
- nTime2
) / block
.vtx
.size(), nInputs
<= 1 ? 0 : 0.001 * (nTime3
- nTime2
) / (nInputs
-1), nTimeConnect
* 0.000001);
2348 CAmount blockReward
= nFees
+ GetBlockSubsidy(pindex
->nHeight
, chainparams
.GetConsensus());
2349 if (block
.vtx
[0].GetValueOut() > blockReward
)
2350 return state
.DoS(100,
2351 error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
2352 block
.vtx
[0].GetValueOut(), blockReward
),
2353 REJECT_INVALID
, "bad-cb-amount");
2355 if (!control
.Wait())
2356 return state
.DoS(100, false);
2357 int64_t nTime4
= GetTimeMicros(); nTimeVerify
+= nTime4
- nTime2
;
2358 LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs
- 1, 0.001 * (nTime4
- nTime2
), nInputs
<= 1 ? 0 : 0.001 * (nTime4
- nTime2
) / (nInputs
-1), nTimeVerify
* 0.000001);
2363 // Write undo information to disk
2364 if (pindex
->GetUndoPos().IsNull() || !pindex
->IsValid(BLOCK_VALID_SCRIPTS
))
2366 if (pindex
->GetUndoPos().IsNull()) {
2368 if (!FindUndoPos(state
, pindex
->nFile
, pos
, ::GetSerializeSize(blockundo
, SER_DISK
, CLIENT_VERSION
) + 40))
2369 return error("ConnectBlock(): FindUndoPos failed");
2370 if (!UndoWriteToDisk(blockundo
, pos
, pindex
->pprev
->GetBlockHash(), chainparams
.MessageStart()))
2371 return AbortNode(state
, "Failed to write undo data");
2373 // update nUndoPos in block index
2374 pindex
->nUndoPos
= pos
.nPos
;
2375 pindex
->nStatus
|= BLOCK_HAVE_UNDO
;
2378 pindex
->RaiseValidity(BLOCK_VALID_SCRIPTS
);
2379 setDirtyBlockIndex
.insert(pindex
);
2383 if (!pblocktree
->WriteTxIndex(vPos
))
2384 return AbortNode(state
, "Failed to write transaction index");
2386 // add this block to the view's block chain
2387 view
.SetBestBlock(pindex
->GetBlockHash());
2389 int64_t nTime5
= GetTimeMicros(); nTimeIndex
+= nTime5
- nTime4
;
2390 LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5
- nTime4
), nTimeIndex
* 0.000001);
2392 // Watch for changes to the previous coinbase transaction.
2393 static uint256 hashPrevBestCoinBase
;
2394 GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase
);
2395 hashPrevBestCoinBase
= block
.vtx
[0].GetHash();
2397 int64_t nTime6
= GetTimeMicros(); nTimeCallbacks
+= nTime6
- nTime5
;
2398 LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6
- nTime5
), nTimeCallbacks
* 0.000001);
2403 enum FlushStateMode
{
2405 FLUSH_STATE_IF_NEEDED
,
2406 FLUSH_STATE_PERIODIC
,
2411 * Update the on-disk chain state.
2412 * The caches and indexes are flushed depending on the mode we're called with
2413 * if they're too large, if it's been a while since the last write,
2414 * or always and in all cases if we're in prune mode and are deleting files.
2416 bool static FlushStateToDisk(CValidationState
&state
, FlushStateMode mode
) {
2417 const CChainParams
& chainparams
= Params();
2418 LOCK2(cs_main
, cs_LastBlockFile
);
2419 static int64_t nLastWrite
= 0;
2420 static int64_t nLastFlush
= 0;
2421 static int64_t nLastSetChain
= 0;
2422 std::set
<int> setFilesToPrune
;
2423 bool fFlushForPrune
= false;
2425 if (fPruneMode
&& fCheckForPruning
&& !fReindex
) {
2426 FindFilesToPrune(setFilesToPrune
, chainparams
.PruneAfterHeight());
2427 fCheckForPruning
= false;
2428 if (!setFilesToPrune
.empty()) {
2429 fFlushForPrune
= true;
2431 pblocktree
->WriteFlag("prunedblockfiles", true);
2436 int64_t nNow
= GetTimeMicros();
2437 // Avoid writing/flushing immediately after startup.
2438 if (nLastWrite
== 0) {
2441 if (nLastFlush
== 0) {
2444 if (nLastSetChain
== 0) {
2445 nLastSetChain
= nNow
;
2447 size_t cacheSize
= pcoinsTip
->DynamicMemoryUsage();
2448 // The cache is large and close to the limit, but we have time now (not in the middle of a block processing).
2449 bool fCacheLarge
= mode
== FLUSH_STATE_PERIODIC
&& cacheSize
* (10.0/9) > nCoinCacheUsage
;
2450 // The cache is over the limit, we have to write now.
2451 bool fCacheCritical
= mode
== FLUSH_STATE_IF_NEEDED
&& cacheSize
> nCoinCacheUsage
;
2452 // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2453 bool fPeriodicWrite
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastWrite
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000;
2454 // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2455 bool fPeriodicFlush
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastFlush
+ (int64_t)DATABASE_FLUSH_INTERVAL
* 1000000;
2456 // Combine all conditions that result in a full cache flush.
2457 bool fDoFullFlush
= (mode
== FLUSH_STATE_ALWAYS
) || fCacheLarge
|| fCacheCritical
|| fPeriodicFlush
|| fFlushForPrune
;
2458 // Write blocks and block index to disk.
2459 if (fDoFullFlush
|| fPeriodicWrite
) {
2460 // Depend on nMinDiskSpace to ensure we can write block index
2461 if (!CheckDiskSpace(0))
2462 return state
.Error("out of disk space");
2463 // First make sure all block and undo data is flushed to disk.
2465 // Then update all block file information (which may refer to block and undo files).
2467 std::vector
<std::pair
<int, const CBlockFileInfo
*> > vFiles
;
2468 vFiles
.reserve(setDirtyFileInfo
.size());
2469 for (set
<int>::iterator it
= setDirtyFileInfo
.begin(); it
!= setDirtyFileInfo
.end(); ) {
2470 vFiles
.push_back(make_pair(*it
, &vinfoBlockFile
[*it
]));
2471 setDirtyFileInfo
.erase(it
++);
2473 std::vector
<const CBlockIndex
*> vBlocks
;
2474 vBlocks
.reserve(setDirtyBlockIndex
.size());
2475 for (set
<CBlockIndex
*>::iterator it
= setDirtyBlockIndex
.begin(); it
!= setDirtyBlockIndex
.end(); ) {
2476 vBlocks
.push_back(*it
);
2477 setDirtyBlockIndex
.erase(it
++);
2479 if (!pblocktree
->WriteBatchSync(vFiles
, nLastBlockFile
, vBlocks
)) {
2480 return AbortNode(state
, "Files to write to block index database");
2483 // Finally remove any pruned files
2485 UnlinkPrunedFiles(setFilesToPrune
);
2488 // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2490 // Typical CCoins structures on disk are around 128 bytes in size.
2491 // Pushing a new one to the database can cause it to be written
2492 // twice (once in the log, and once in the tables). This is already
2493 // an overestimation, as most will delete an existing entry or
2494 // overwrite one. Still, use a conservative safety factor of 2.
2495 if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip
->GetCacheSize()))
2496 return state
.Error("out of disk space");
2497 // Flush the chainstate (which may refer to block index entries).
2498 if (!pcoinsTip
->Flush())
2499 return AbortNode(state
, "Failed to write to coin database");
2502 if (fDoFullFlush
|| ((mode
== FLUSH_STATE_ALWAYS
|| mode
== FLUSH_STATE_PERIODIC
) && nNow
> nLastSetChain
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000)) {
2503 // Update best block in wallet (so we can detect restored wallets).
2504 GetMainSignals().SetBestChain(chainActive
.GetLocator());
2505 nLastSetChain
= nNow
;
2507 } catch (const std::runtime_error
& e
) {
2508 return AbortNode(state
, std::string("System error while flushing: ") + e
.what());
2513 void FlushStateToDisk() {
2514 CValidationState state
;
2515 FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
2518 void PruneAndFlush() {
2519 CValidationState state
;
2520 fCheckForPruning
= true;
2521 FlushStateToDisk(state
, FLUSH_STATE_NONE
);
2524 /** Update chainActive and related internal data structures. */
2525 void static UpdateTip(CBlockIndex
*pindexNew
) {
2526 const CChainParams
& chainParams
= Params();
2527 chainActive
.SetTip(pindexNew
);
2530 nTimeBestReceived
= GetTime();
2531 mempool
.AddTransactionsUpdated(1);
2533 LogPrintf("%s: new best=%s height=%d bits=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%.1fMiB(%utx)\n", __func__
,
2534 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(), chainActive
.Tip()->nBits
,
2535 log(chainActive
.Tip()->nChainWork
.getdouble())/log(2.0), (unsigned long)chainActive
.Tip()->nChainTx
,
2536 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
2537 Checkpoints::GuessVerificationProgress(chainParams
.Checkpoints(), chainActive
.Tip()), pcoinsTip
->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip
->GetCacheSize());
2539 cvBlockChange
.notify_all();
2541 // Check the version of the last 100 blocks to see if we need to upgrade:
2542 static bool fWarned
= false;
2543 if (!IsInitialBlockDownload())
2546 const CBlockIndex
* pindex
= chainActive
.Tip();
2547 for (int bit
= 0; bit
< VERSIONBITS_NUM_BITS
; bit
++) {
2548 WarningBitsConditionChecker
checker(bit
);
2549 ThresholdState state
= checker
.GetStateFor(pindex
, chainParams
.GetConsensus(), warningcache
[bit
]);
2550 if (state
== THRESHOLD_ACTIVE
|| state
== THRESHOLD_LOCKED_IN
) {
2551 if (state
== THRESHOLD_ACTIVE
) {
2552 strMiscWarning
= strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit
);
2554 CAlert::Notify(strMiscWarning
, true);
2558 LogPrintf("%s: unknown new rules are about to activate (versionbit %i)\n", __func__
, bit
);
2562 for (int i
= 0; i
< 100 && pindex
!= NULL
; i
++)
2564 int32_t nExpectedVersion
= ComputeBlockVersion(pindex
->pprev
, chainParams
.GetConsensus());
2565 if (pindex
->nVersion
> VERSIONBITS_LAST_OLD_BLOCK_VERSION
&& (pindex
->nVersion
& ~nExpectedVersion
) != 0)
2567 pindex
= pindex
->pprev
;
2570 LogPrintf("%s: %d of last 100 blocks have unexpected version\n", __func__
, nUpgraded
);
2571 if (nUpgraded
> 100/2)
2573 // strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
2574 strMiscWarning
= _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect");
2576 CAlert::Notify(strMiscWarning
, true);
2583 /** Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and manually re-limit mempool size after this, with cs_main held. */
2584 bool static DisconnectTip(CValidationState
& state
, const Consensus::Params
& consensusParams
)
2586 CBlockIndex
*pindexDelete
= chainActive
.Tip();
2587 assert(pindexDelete
);
2588 // Read block from disk.
2590 if (!ReadBlockFromDisk(block
, pindexDelete
, consensusParams
))
2591 return AbortNode(state
, "Failed to read block");
2592 // Apply the block atomically to the chain state.
2593 int64_t nStart
= GetTimeMicros();
2595 CCoinsViewCache
view(pcoinsTip
);
2596 if (!DisconnectBlock(block
, state
, pindexDelete
, view
))
2597 return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete
->GetBlockHash().ToString());
2598 assert(view
.Flush());
2600 LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart
) * 0.001);
2601 // Write the chain state to disk, if necessary.
2602 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2604 // Resurrect mempool transactions from the disconnected block.
2605 std::vector
<uint256
> vHashUpdate
;
2606 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
2607 // ignore validation errors in resurrected transactions
2608 list
<CTransaction
> removed
;
2609 CValidationState stateDummy
;
2610 if (tx
.IsCoinBase() || !AcceptToMemoryPool(mempool
, stateDummy
, tx
, false, NULL
, true)) {
2611 mempool
.removeRecursive(tx
, removed
);
2612 } else if (mempool
.exists(tx
.GetHash())) {
2613 vHashUpdate
.push_back(tx
.GetHash());
2616 // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
2617 // no in-mempool children, which is generally not true when adding
2618 // previously-confirmed transactions back to the mempool.
2619 // UpdateTransactionsFromBlock finds descendants of any transactions in this
2620 // block that were added back and cleans up the mempool state.
2621 mempool
.UpdateTransactionsFromBlock(vHashUpdate
);
2622 // Update chainActive and related variables.
2623 UpdateTip(pindexDelete
->pprev
);
2624 // Let wallets know transactions went from 1-confirmed to
2625 // 0-confirmed or conflicted:
2626 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
2627 SyncWithWallets(tx
, pindexDelete
->pprev
, NULL
);
2632 static int64_t nTimeReadFromDisk
= 0;
2633 static int64_t nTimeConnectTotal
= 0;
2634 static int64_t nTimeFlush
= 0;
2635 static int64_t nTimeChainState
= 0;
2636 static int64_t nTimePostConnect
= 0;
2639 * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
2640 * corresponding to pindexNew, to bypass loading it again from disk.
2642 bool static ConnectTip(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexNew
, const CBlock
* pblock
)
2644 assert(pindexNew
->pprev
== chainActive
.Tip());
2645 // Read block from disk.
2646 int64_t nTime1
= GetTimeMicros();
2649 if (!ReadBlockFromDisk(block
, pindexNew
, chainparams
.GetConsensus()))
2650 return AbortNode(state
, "Failed to read block");
2653 // Apply the block atomically to the chain state.
2654 int64_t nTime2
= GetTimeMicros(); nTimeReadFromDisk
+= nTime2
- nTime1
;
2656 LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2
- nTime1
) * 0.001, nTimeReadFromDisk
* 0.000001);
2658 CCoinsViewCache
view(pcoinsTip
);
2659 bool rv
= ConnectBlock(*pblock
, state
, pindexNew
, view
);
2660 GetMainSignals().BlockChecked(*pblock
, state
);
2662 if (state
.IsInvalid())
2663 InvalidBlockFound(pindexNew
, state
);
2664 return error("ConnectTip(): ConnectBlock %s failed", pindexNew
->GetBlockHash().ToString());
2666 mapBlockSource
.erase(pindexNew
->GetBlockHash());
2667 nTime3
= GetTimeMicros(); nTimeConnectTotal
+= nTime3
- nTime2
;
2668 LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3
- nTime2
) * 0.001, nTimeConnectTotal
* 0.000001);
2669 assert(view
.Flush());
2671 int64_t nTime4
= GetTimeMicros(); nTimeFlush
+= nTime4
- nTime3
;
2672 LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4
- nTime3
) * 0.001, nTimeFlush
* 0.000001);
2673 // Write the chain state to disk, if necessary.
2674 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2676 int64_t nTime5
= GetTimeMicros(); nTimeChainState
+= nTime5
- nTime4
;
2677 LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5
- nTime4
) * 0.001, nTimeChainState
* 0.000001);
2678 // Remove conflicting transactions from the mempool.
2679 list
<CTransaction
> txConflicted
;
2680 mempool
.removeForBlock(pblock
->vtx
, pindexNew
->nHeight
, txConflicted
, !IsInitialBlockDownload());
2681 // Update chainActive & related variables.
2682 UpdateTip(pindexNew
);
2683 // Tell wallet about transactions that went from mempool
2685 BOOST_FOREACH(const CTransaction
&tx
, txConflicted
) {
2686 SyncWithWallets(tx
, pindexNew
, NULL
);
2688 // ... and about transactions that got confirmed:
2689 BOOST_FOREACH(const CTransaction
&tx
, pblock
->vtx
) {
2690 SyncWithWallets(tx
, pindexNew
, pblock
);
2693 int64_t nTime6
= GetTimeMicros(); nTimePostConnect
+= nTime6
- nTime5
; nTimeTotal
+= nTime6
- nTime1
;
2694 LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6
- nTime5
) * 0.001, nTimePostConnect
* 0.000001);
2695 LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6
- nTime1
) * 0.001, nTimeTotal
* 0.000001);
2700 * Return the tip of the chain with the most work in it, that isn't
2701 * known to be invalid (it's however far from certain to be valid).
2703 static CBlockIndex
* FindMostWorkChain() {
2705 CBlockIndex
*pindexNew
= NULL
;
2707 // Find the best candidate header.
2709 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::reverse_iterator it
= setBlockIndexCandidates
.rbegin();
2710 if (it
== setBlockIndexCandidates
.rend())
2715 // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2716 // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2717 CBlockIndex
*pindexTest
= pindexNew
;
2718 bool fInvalidAncestor
= false;
2719 while (pindexTest
&& !chainActive
.Contains(pindexTest
)) {
2720 assert(pindexTest
->nChainTx
|| pindexTest
->nHeight
== 0);
2722 // Pruned nodes may have entries in setBlockIndexCandidates for
2723 // which block files have been deleted. Remove those as candidates
2724 // for the most work chain if we come across them; we can't switch
2725 // to a chain unless we have all the non-active-chain parent blocks.
2726 bool fFailedChain
= pindexTest
->nStatus
& BLOCK_FAILED_MASK
;
2727 bool fMissingData
= !(pindexTest
->nStatus
& BLOCK_HAVE_DATA
);
2728 if (fFailedChain
|| fMissingData
) {
2729 // Candidate chain is not usable (either invalid or missing data)
2730 if (fFailedChain
&& (pindexBestInvalid
== NULL
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
))
2731 pindexBestInvalid
= pindexNew
;
2732 CBlockIndex
*pindexFailed
= pindexNew
;
2733 // Remove the entire chain from the set.
2734 while (pindexTest
!= pindexFailed
) {
2736 pindexFailed
->nStatus
|= BLOCK_FAILED_CHILD
;
2737 } else if (fMissingData
) {
2738 // If we're missing data, then add back to mapBlocksUnlinked,
2739 // so that if the block arrives in the future we can try adding
2740 // to setBlockIndexCandidates again.
2741 mapBlocksUnlinked
.insert(std::make_pair(pindexFailed
->pprev
, pindexFailed
));
2743 setBlockIndexCandidates
.erase(pindexFailed
);
2744 pindexFailed
= pindexFailed
->pprev
;
2746 setBlockIndexCandidates
.erase(pindexTest
);
2747 fInvalidAncestor
= true;
2750 pindexTest
= pindexTest
->pprev
;
2752 if (!fInvalidAncestor
)
2757 /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
2758 static void PruneBlockIndexCandidates() {
2759 // Note that we can't delete the current block itself, as we may need to return to it later in case a
2760 // reorganization to a better block fails.
2761 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::iterator it
= setBlockIndexCandidates
.begin();
2762 while (it
!= setBlockIndexCandidates
.end() && setBlockIndexCandidates
.value_comp()(*it
, chainActive
.Tip())) {
2763 setBlockIndexCandidates
.erase(it
++);
2765 // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2766 assert(!setBlockIndexCandidates
.empty());
2770 * Try to make some progress towards making pindexMostWork the active block.
2771 * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
2773 static bool ActivateBestChainStep(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexMostWork
, const CBlock
* pblock
)
2775 AssertLockHeld(cs_main
);
2776 bool fInvalidFound
= false;
2777 const CBlockIndex
*pindexOldTip
= chainActive
.Tip();
2778 const CBlockIndex
*pindexFork
= chainActive
.FindFork(pindexMostWork
);
2780 // Disconnect active blocks which are no longer in the best chain.
2781 bool fBlocksDisconnected
= false;
2782 while (chainActive
.Tip() && chainActive
.Tip() != pindexFork
) {
2783 if (!DisconnectTip(state
, chainparams
.GetConsensus()))
2785 fBlocksDisconnected
= true;
2788 // Build list of new blocks to connect.
2789 std::vector
<CBlockIndex
*> vpindexToConnect
;
2790 bool fContinue
= true;
2791 int nHeight
= pindexFork
? pindexFork
->nHeight
: -1;
2792 while (fContinue
&& nHeight
!= pindexMostWork
->nHeight
) {
2793 // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2794 // a few blocks along the way.
2795 int nTargetHeight
= std::min(nHeight
+ 32, pindexMostWork
->nHeight
);
2796 vpindexToConnect
.clear();
2797 vpindexToConnect
.reserve(nTargetHeight
- nHeight
);
2798 CBlockIndex
*pindexIter
= pindexMostWork
->GetAncestor(nTargetHeight
);
2799 while (pindexIter
&& pindexIter
->nHeight
!= nHeight
) {
2800 vpindexToConnect
.push_back(pindexIter
);
2801 pindexIter
= pindexIter
->pprev
;
2803 nHeight
= nTargetHeight
;
2805 // Connect new blocks.
2806 BOOST_REVERSE_FOREACH(CBlockIndex
*pindexConnect
, vpindexToConnect
) {
2807 if (!ConnectTip(state
, chainparams
, pindexConnect
, pindexConnect
== pindexMostWork
? pblock
: NULL
)) {
2808 if (state
.IsInvalid()) {
2809 // The block violates a consensus rule.
2810 if (!state
.CorruptionPossible())
2811 InvalidChainFound(vpindexToConnect
.back());
2812 state
= CValidationState();
2813 fInvalidFound
= true;
2817 // A system error occurred (disk space, database error, ...).
2821 PruneBlockIndexCandidates();
2822 if (!pindexOldTip
|| chainActive
.Tip()->nChainWork
> pindexOldTip
->nChainWork
) {
2823 // We're in a better position than we were. Return temporarily to release the lock.
2831 if (fBlocksDisconnected
) {
2832 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
2833 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
2835 mempool
.check(pcoinsTip
);
2837 // Callbacks/notifications for a new best chain.
2839 CheckForkWarningConditionsOnNewFork(vpindexToConnect
.back());
2841 CheckForkWarningConditions();
2847 * Make the best chain active, in multiple steps. The result is either failure
2848 * or an activated best chain. pblock is either NULL or a pointer to a block
2849 * that is already loaded (to avoid loading it again from disk).
2851 bool ActivateBestChain(CValidationState
&state
, const CChainParams
& chainparams
, const CBlock
*pblock
) {
2852 CBlockIndex
*pindexMostWork
= NULL
;
2854 boost::this_thread::interruption_point();
2856 CBlockIndex
*pindexNewTip
= NULL
;
2857 const CBlockIndex
*pindexFork
;
2858 bool fInitialDownload
;
2861 CBlockIndex
*pindexOldTip
= chainActive
.Tip();
2862 pindexMostWork
= FindMostWorkChain();
2864 // Whether we have anything to do at all.
2865 if (pindexMostWork
== NULL
|| pindexMostWork
== chainActive
.Tip())
2868 if (!ActivateBestChainStep(state
, chainparams
, pindexMostWork
, pblock
&& pblock
->GetHash() == pindexMostWork
->GetBlockHash() ? pblock
: NULL
))
2871 pindexNewTip
= chainActive
.Tip();
2872 pindexFork
= chainActive
.FindFork(pindexOldTip
);
2873 fInitialDownload
= IsInitialBlockDownload();
2875 // When we reach this point, we switched to a new tip (stored in pindexNewTip).
2877 // Notifications/callbacks that can run without cs_main
2878 // Always notify the UI if a new block tip was connected
2879 if (pindexFork
!= pindexNewTip
) {
2880 uiInterface
.NotifyBlockTip(fInitialDownload
, pindexNewTip
);
2882 if (!fInitialDownload
) {
2883 // Find the hashes of all blocks that weren't previously in the best chain.
2884 std::vector
<uint256
> vHashes
;
2885 CBlockIndex
*pindexToAnnounce
= pindexNewTip
;
2886 while (pindexToAnnounce
!= pindexFork
) {
2887 vHashes
.push_back(pindexToAnnounce
->GetBlockHash());
2888 pindexToAnnounce
= pindexToAnnounce
->pprev
;
2889 if (vHashes
.size() == MAX_BLOCKS_TO_ANNOUNCE
) {
2890 // Limit announcements in case of a huge reorganization.
2891 // Rely on the peer's synchronization mechanism in that case.
2895 // Relay inventory, but don't relay old inventory during initial block download.
2896 int nBlockEstimate
= 0;
2897 if (fCheckpointsEnabled
)
2898 nBlockEstimate
= Checkpoints::GetTotalBlocksEstimate(chainparams
.Checkpoints());
2901 BOOST_FOREACH(CNode
* pnode
, vNodes
) {
2902 if (chainActive
.Height() > (pnode
->nStartingHeight
!= -1 ? pnode
->nStartingHeight
- 2000 : nBlockEstimate
)) {
2903 BOOST_REVERSE_FOREACH(const uint256
& hash
, vHashes
) {
2904 pnode
->PushBlockHash(hash
);
2909 // Notify external listeners about the new tip.
2910 if (!vHashes
.empty()) {
2911 GetMainSignals().UpdatedBlockTip(pindexNewTip
);
2915 } while(pindexMostWork
!= chainActive
.Tip());
2916 CheckBlockIndex(chainparams
.GetConsensus());
2918 // Write changes periodically to disk, after relay.
2919 if (!FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
)) {
2926 bool InvalidateBlock(CValidationState
& state
, const Consensus::Params
& consensusParams
, CBlockIndex
*pindex
)
2928 AssertLockHeld(cs_main
);
2930 // Mark the block itself as invalid.
2931 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
2932 setDirtyBlockIndex
.insert(pindex
);
2933 setBlockIndexCandidates
.erase(pindex
);
2935 while (chainActive
.Contains(pindex
)) {
2936 CBlockIndex
*pindexWalk
= chainActive
.Tip();
2937 pindexWalk
->nStatus
|= BLOCK_FAILED_CHILD
;
2938 setDirtyBlockIndex
.insert(pindexWalk
);
2939 setBlockIndexCandidates
.erase(pindexWalk
);
2940 // ActivateBestChain considers blocks already in chainActive
2941 // unconditionally valid already, so force disconnect away from it.
2942 if (!DisconnectTip(state
, consensusParams
)) {
2943 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
2948 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
2950 // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
2952 BlockMap::iterator it
= mapBlockIndex
.begin();
2953 while (it
!= mapBlockIndex
.end()) {
2954 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& !setBlockIndexCandidates
.value_comp()(it
->second
, chainActive
.Tip())) {
2955 setBlockIndexCandidates
.insert(it
->second
);
2960 InvalidChainFound(pindex
);
2961 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
2965 bool ReconsiderBlock(CValidationState
& state
, CBlockIndex
*pindex
) {
2966 AssertLockHeld(cs_main
);
2968 int nHeight
= pindex
->nHeight
;
2970 // Remove the invalidity flag from this block and all its descendants.
2971 BlockMap::iterator it
= mapBlockIndex
.begin();
2972 while (it
!= mapBlockIndex
.end()) {
2973 if (!it
->second
->IsValid() && it
->second
->GetAncestor(nHeight
) == pindex
) {
2974 it
->second
->nStatus
&= ~BLOCK_FAILED_MASK
;
2975 setDirtyBlockIndex
.insert(it
->second
);
2976 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& setBlockIndexCandidates
.value_comp()(chainActive
.Tip(), it
->second
)) {
2977 setBlockIndexCandidates
.insert(it
->second
);
2979 if (it
->second
== pindexBestInvalid
) {
2980 // Reset invalid block marker if it was pointing to one of those.
2981 pindexBestInvalid
= NULL
;
2987 // Remove the invalidity flag from all ancestors too.
2988 while (pindex
!= NULL
) {
2989 if (pindex
->nStatus
& BLOCK_FAILED_MASK
) {
2990 pindex
->nStatus
&= ~BLOCK_FAILED_MASK
;
2991 setDirtyBlockIndex
.insert(pindex
);
2993 pindex
= pindex
->pprev
;
2998 CBlockIndex
* AddToBlockIndex(const CBlockHeader
& block
)
3000 // Check for duplicate
3001 uint256 hash
= block
.GetHash();
3002 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
3003 if (it
!= mapBlockIndex
.end())
3006 // Construct new block index object
3007 CBlockIndex
* pindexNew
= new CBlockIndex(block
);
3009 // We assign the sequence id to blocks only when the full data is available,
3010 // to avoid miners withholding blocks but broadcasting headers, to get a
3011 // competitive advantage.
3012 pindexNew
->nSequenceId
= 0;
3013 BlockMap::iterator mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
3014 pindexNew
->phashBlock
= &((*mi
).first
);
3015 BlockMap::iterator miPrev
= mapBlockIndex
.find(block
.hashPrevBlock
);
3016 if (miPrev
!= mapBlockIndex
.end())
3018 pindexNew
->pprev
= (*miPrev
).second
;
3019 pindexNew
->nHeight
= pindexNew
->pprev
->nHeight
+ 1;
3020 pindexNew
->BuildSkip();
3022 pindexNew
->nChainWork
= (pindexNew
->pprev
? pindexNew
->pprev
->nChainWork
: 0) + GetBlockProof(*pindexNew
);
3023 pindexNew
->RaiseValidity(BLOCK_VALID_TREE
);
3024 if (pindexBestHeader
== NULL
|| pindexBestHeader
->nChainWork
< pindexNew
->nChainWork
)
3025 pindexBestHeader
= pindexNew
;
3027 setDirtyBlockIndex
.insert(pindexNew
);
3032 /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
3033 bool ReceivedBlockTransactions(const CBlock
&block
, CValidationState
& state
, CBlockIndex
*pindexNew
, const CDiskBlockPos
& pos
)
3035 pindexNew
->nTx
= block
.vtx
.size();
3036 pindexNew
->nChainTx
= 0;
3037 pindexNew
->nFile
= pos
.nFile
;
3038 pindexNew
->nDataPos
= pos
.nPos
;
3039 pindexNew
->nUndoPos
= 0;
3040 pindexNew
->nStatus
|= BLOCK_HAVE_DATA
;
3041 pindexNew
->RaiseValidity(BLOCK_VALID_TRANSACTIONS
);
3042 setDirtyBlockIndex
.insert(pindexNew
);
3044 if (pindexNew
->pprev
== NULL
|| pindexNew
->pprev
->nChainTx
) {
3045 // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
3046 deque
<CBlockIndex
*> queue
;
3047 queue
.push_back(pindexNew
);
3049 // Recursively process any descendant blocks that now may be eligible to be connected.
3050 while (!queue
.empty()) {
3051 CBlockIndex
*pindex
= queue
.front();
3053 pindex
->nChainTx
= (pindex
->pprev
? pindex
->pprev
->nChainTx
: 0) + pindex
->nTx
;
3055 LOCK(cs_nBlockSequenceId
);
3056 pindex
->nSequenceId
= nBlockSequenceId
++;
3058 if (chainActive
.Tip() == NULL
|| !setBlockIndexCandidates
.value_comp()(pindex
, chainActive
.Tip())) {
3059 setBlockIndexCandidates
.insert(pindex
);
3061 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
);
3062 while (range
.first
!= range
.second
) {
3063 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator it
= range
.first
;
3064 queue
.push_back(it
->second
);
3066 mapBlocksUnlinked
.erase(it
);
3070 if (pindexNew
->pprev
&& pindexNew
->pprev
->IsValid(BLOCK_VALID_TREE
)) {
3071 mapBlocksUnlinked
.insert(std::make_pair(pindexNew
->pprev
, pindexNew
));
3078 bool FindBlockPos(CValidationState
&state
, CDiskBlockPos
&pos
, unsigned int nAddSize
, unsigned int nHeight
, uint64_t nTime
, bool fKnown
= false)
3080 LOCK(cs_LastBlockFile
);
3082 unsigned int nFile
= fKnown
? pos
.nFile
: nLastBlockFile
;
3083 if (vinfoBlockFile
.size() <= nFile
) {
3084 vinfoBlockFile
.resize(nFile
+ 1);
3088 while (vinfoBlockFile
[nFile
].nSize
+ nAddSize
>= MAX_BLOCKFILE_SIZE
) {
3090 if (vinfoBlockFile
.size() <= nFile
) {
3091 vinfoBlockFile
.resize(nFile
+ 1);
3095 pos
.nPos
= vinfoBlockFile
[nFile
].nSize
;
3098 if ((int)nFile
!= nLastBlockFile
) {
3100 LogPrintf("Leaving block file %i: %s\n", nLastBlockFile
, vinfoBlockFile
[nLastBlockFile
].ToString());
3102 FlushBlockFile(!fKnown
);
3103 nLastBlockFile
= nFile
;
3106 vinfoBlockFile
[nFile
].AddBlock(nHeight
, nTime
);
3108 vinfoBlockFile
[nFile
].nSize
= std::max(pos
.nPos
+ nAddSize
, vinfoBlockFile
[nFile
].nSize
);
3110 vinfoBlockFile
[nFile
].nSize
+= nAddSize
;
3113 unsigned int nOldChunks
= (pos
.nPos
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
3114 unsigned int nNewChunks
= (vinfoBlockFile
[nFile
].nSize
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
3115 if (nNewChunks
> nOldChunks
) {
3117 fCheckForPruning
= true;
3118 if (CheckDiskSpace(nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
)) {
3119 FILE *file
= OpenBlockFile(pos
);
3121 LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks
* BLOCKFILE_CHUNK_SIZE
, pos
.nFile
);
3122 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
);
3127 return state
.Error("out of disk space");
3131 setDirtyFileInfo
.insert(nFile
);
3135 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
)
3139 LOCK(cs_LastBlockFile
);
3141 unsigned int nNewSize
;
3142 pos
.nPos
= vinfoBlockFile
[nFile
].nUndoSize
;
3143 nNewSize
= vinfoBlockFile
[nFile
].nUndoSize
+= nAddSize
;
3144 setDirtyFileInfo
.insert(nFile
);
3146 unsigned int nOldChunks
= (pos
.nPos
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
3147 unsigned int nNewChunks
= (nNewSize
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
3148 if (nNewChunks
> nOldChunks
) {
3150 fCheckForPruning
= true;
3151 if (CheckDiskSpace(nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
)) {
3152 FILE *file
= OpenUndoFile(pos
);
3154 LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks
* UNDOFILE_CHUNK_SIZE
, pos
.nFile
);
3155 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
);
3160 return state
.Error("out of disk space");
3166 bool CheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, bool fCheckPOW
)
3168 // Check proof of work matches claimed amount
3169 if (fCheckPOW
&& !CheckProofOfWork(block
.GetHash(), block
.nBits
, Params().GetConsensus()))
3170 return state
.DoS(50, false, REJECT_INVALID
, "high-hash", false, "proof of work failed");
3173 if (block
.GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60)
3174 return state
.Invalid(false, REJECT_INVALID
, "time-too-new", "block timestamp too far in the future");
3179 bool CheckBlock(const CBlock
& block
, CValidationState
& state
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3181 // These are checks that are independent of context.
3186 // Check that the header is valid (particularly PoW). This is mostly
3187 // redundant with the call in AcceptBlockHeader.
3188 if (!CheckBlockHeader(block
, state
, fCheckPOW
))
3191 // Check the merkle root.
3192 if (fCheckMerkleRoot
) {
3194 uint256 hashMerkleRoot2
= BlockMerkleRoot(block
, &mutated
);
3195 if (block
.hashMerkleRoot
!= hashMerkleRoot2
)
3196 return state
.DoS(100, false, REJECT_INVALID
, "bad-txnmrklroot", true, "hashMerkleRoot mismatch");
3198 // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3199 // of transactions in a block without affecting the merkle root of a block,
3200 // while still invalidating it.
3202 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-duplicate", true, "duplicate transaction");
3205 // All potential-corruption validation must be done before we do any
3206 // transaction validation, as otherwise we may mark the header as invalid
3207 // because we receive the wrong transactions for it.
3210 if (block
.vtx
.empty() || block
.vtx
.size() > MAX_BLOCK_SIZE
|| ::GetSerializeSize(block
, SER_NETWORK
, PROTOCOL_VERSION
) > MAX_BLOCK_SIZE
)
3211 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-length", false, "size limits failed");
3213 // First transaction must be coinbase, the rest must not be
3214 if (block
.vtx
.empty() || !block
.vtx
[0].IsCoinBase())
3215 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-missing", false, "first tx is not coinbase");
3216 for (unsigned int i
= 1; i
< block
.vtx
.size(); i
++)
3217 if (block
.vtx
[i
].IsCoinBase())
3218 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-multiple", false, "more than one coinbase");
3220 // Check transactions
3221 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
)
3222 if (!CheckTransaction(tx
, state
))
3223 return state
.Invalid(false, state
.GetRejectCode(), state
.GetRejectReason(),
3224 strprintf("Transaction check failed (tx hash %s) %s", tx
.GetHash().ToString(), state
.GetDebugMessage()));
3226 unsigned int nSigOps
= 0;
3227 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
)
3229 nSigOps
+= GetLegacySigOpCount(tx
);
3231 if (nSigOps
> MAX_BLOCK_SIGOPS
)
3232 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
3234 if (fCheckPOW
&& fCheckMerkleRoot
)
3235 block
.fChecked
= true;
3240 static bool CheckIndexAgainstCheckpoint(const CBlockIndex
* pindexPrev
, CValidationState
& state
, const CChainParams
& chainparams
, const uint256
& hash
)
3242 if (*pindexPrev
->phashBlock
== chainparams
.GetConsensus().hashGenesisBlock
)
3245 int nHeight
= pindexPrev
->nHeight
+1;
3246 // Don't accept any forks from the main chain prior to last checkpoint
3247 CBlockIndex
* pcheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
3248 if (pcheckpoint
&& nHeight
< pcheckpoint
->nHeight
)
3249 return state
.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__
, nHeight
));
3254 bool ContextualCheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, CBlockIndex
* const pindexPrev
)
3256 const Consensus::Params
& consensusParams
= Params().GetConsensus();
3257 // Check proof of work
3258 if (block
.nBits
!= GetNextWorkRequired(pindexPrev
, &block
, consensusParams
))
3259 return state
.DoS(100, false, REJECT_INVALID
, "bad-diffbits", false, "incorrect proof of work");
3261 // Check timestamp against prev
3262 if (block
.GetBlockTime() <= pindexPrev
->GetMedianTimePast())
3263 return state
.Invalid(false, REJECT_INVALID
, "time-too-old", "block's timestamp is too early");
3265 // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
3266 for (int32_t version
= 2; version
< 5; ++version
) // check for version 2, 3 and 4 upgrades
3267 if (block
.nVersion
< version
&& IsSuperMajority(version
, pindexPrev
, consensusParams
.nMajorityRejectBlockOutdated
, consensusParams
))
3268 return state
.Invalid(false, REJECT_OBSOLETE
, strprintf("bad-version(v%d)", version
- 1),
3269 strprintf("rejected nVersion=%d block", version
- 1));
3274 bool ContextualCheckBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* const pindexPrev
)
3276 const int nHeight
= pindexPrev
== NULL
? 0 : pindexPrev
->nHeight
+ 1;
3277 const Consensus::Params
& consensusParams
= Params().GetConsensus();
3279 // Check that all transactions are finalized
3280 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
) {
3281 int nLockTimeFlags
= 0;
3282 int64_t nLockTimeCutoff
= (nLockTimeFlags
& LOCKTIME_MEDIAN_TIME_PAST
)
3283 ? pindexPrev
->GetMedianTimePast()
3284 : block
.GetBlockTime();
3285 if (!IsFinalTx(tx
, nHeight
, nLockTimeCutoff
)) {
3286 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-nonfinal", false, "non-final transaction");
3290 // Enforce block.nVersion=2 rule that the coinbase starts with serialized block height
3291 // if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet):
3292 if (block
.nVersion
>= 2 && IsSuperMajority(2, pindexPrev
, consensusParams
.nMajorityEnforceBlockUpgrade
, consensusParams
))
3294 CScript expect
= CScript() << nHeight
;
3295 if (block
.vtx
[0].vin
[0].scriptSig
.size() < expect
.size() ||
3296 !std::equal(expect
.begin(), expect
.end(), block
.vtx
[0].vin
[0].scriptSig
.begin())) {
3297 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-height", false, "block height mismatch in coinbase");
3304 static bool AcceptBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
=NULL
)
3306 AssertLockHeld(cs_main
);
3307 // Check for duplicate
3308 uint256 hash
= block
.GetHash();
3309 BlockMap::iterator miSelf
= mapBlockIndex
.find(hash
);
3310 CBlockIndex
*pindex
= NULL
;
3311 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
) {
3313 if (miSelf
!= mapBlockIndex
.end()) {
3314 // Block header is already known.
3315 pindex
= miSelf
->second
;
3318 if (pindex
->nStatus
& BLOCK_FAILED_MASK
)
3319 return state
.Invalid(error("%s: block is marked invalid", __func__
), 0, "duplicate");
3323 if (!CheckBlockHeader(block
, state
))
3324 return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3326 // Get prev block index
3327 CBlockIndex
* pindexPrev
= NULL
;
3328 BlockMap::iterator mi
= mapBlockIndex
.find(block
.hashPrevBlock
);
3329 if (mi
== mapBlockIndex
.end())
3330 return state
.DoS(10, error("%s: prev block not found", __func__
), 0, "bad-prevblk");
3331 pindexPrev
= (*mi
).second
;
3332 if (pindexPrev
->nStatus
& BLOCK_FAILED_MASK
)
3333 return state
.DoS(100, error("%s: prev block invalid", __func__
), REJECT_INVALID
, "bad-prevblk");
3336 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, hash
))
3337 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3339 if (!ContextualCheckBlockHeader(block
, state
, pindexPrev
))
3340 return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3343 pindex
= AddToBlockIndex(block
);
3351 /** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
3352 static bool AcceptBlock(const CBlock
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
, bool fRequested
, CDiskBlockPos
* dbp
)
3354 AssertLockHeld(cs_main
);
3356 CBlockIndex
*&pindex
= *ppindex
;
3358 if (!AcceptBlockHeader(block
, state
, chainparams
, &pindex
))
3361 // Try to process all requested blocks that we don't have, but only
3362 // process an unrequested block if it's new and has enough work to
3363 // advance our tip, and isn't too many blocks ahead.
3364 bool fAlreadyHave
= pindex
->nStatus
& BLOCK_HAVE_DATA
;
3365 bool fHasMoreWork
= (chainActive
.Tip() ? pindex
->nChainWork
> chainActive
.Tip()->nChainWork
: true);
3366 // Blocks that are too out-of-order needlessly limit the effectiveness of
3367 // pruning, because pruning will not delete block files that contain any
3368 // blocks which are too close in height to the tip. Apply this test
3369 // regardless of whether pruning is enabled; it should generally be safe to
3370 // not process unrequested blocks.
3371 bool fTooFarAhead
= (pindex
->nHeight
> int(chainActive
.Height() + MIN_BLOCKS_TO_KEEP
));
3373 // TODO: deal better with return value and error conditions for duplicate
3374 // and unrequested blocks.
3375 if (fAlreadyHave
) return true;
3376 if (!fRequested
) { // If we didn't ask for it:
3377 if (pindex
->nTx
!= 0) return true; // This is a previously-processed block that was pruned
3378 if (!fHasMoreWork
) return true; // Don't process less-work chains
3379 if (fTooFarAhead
) return true; // Block height is too high
3382 if ((!CheckBlock(block
, state
)) || !ContextualCheckBlock(block
, state
, pindex
->pprev
)) {
3383 if (state
.IsInvalid() && !state
.CorruptionPossible()) {
3384 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
3385 setDirtyBlockIndex
.insert(pindex
);
3387 return error("%s: %s", __func__
, FormatStateMessage(state
));
3390 int nHeight
= pindex
->nHeight
;
3392 // Write block to history file
3394 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
3395 CDiskBlockPos blockPos
;
3398 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, nHeight
, block
.GetBlockTime(), dbp
!= NULL
))
3399 return error("AcceptBlock(): FindBlockPos failed");
3401 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
3402 AbortNode(state
, "Failed to write block");
3403 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
3404 return error("AcceptBlock(): ReceivedBlockTransactions failed");
3405 } catch (const std::runtime_error
& e
) {
3406 return AbortNode(state
, std::string("System error: ") + e
.what());
3409 if (fCheckForPruning
)
3410 FlushStateToDisk(state
, FLUSH_STATE_NONE
); // we just allocated more disk space for block files
3415 static bool IsSuperMajority(int minVersion
, const CBlockIndex
* pstart
, unsigned nRequired
, const Consensus::Params
& consensusParams
)
3417 unsigned int nFound
= 0;
3418 for (int i
= 0; i
< consensusParams
.nMajorityWindow
&& nFound
< nRequired
&& pstart
!= NULL
; i
++)
3420 if (pstart
->nVersion
>= minVersion
)
3422 pstart
= pstart
->pprev
;
3424 return (nFound
>= nRequired
);
3428 bool ProcessNewBlock(CValidationState
& state
, const CChainParams
& chainparams
, const CNode
* pfrom
, const CBlock
* pblock
, bool fForceProcessing
, CDiskBlockPos
* dbp
)
3432 bool fRequested
= MarkBlockAsReceived(pblock
->GetHash());
3433 fRequested
|= fForceProcessing
;
3436 CBlockIndex
*pindex
= NULL
;
3437 bool ret
= AcceptBlock(*pblock
, state
, chainparams
, &pindex
, fRequested
, dbp
);
3438 if (pindex
&& pfrom
) {
3439 mapBlockSource
[pindex
->GetBlockHash()] = pfrom
->GetId();
3441 CheckBlockIndex(chainparams
.GetConsensus());
3443 return error("%s: AcceptBlock FAILED", __func__
);
3446 if (!ActivateBestChain(state
, chainparams
, pblock
))
3447 return error("%s: ActivateBestChain failed", __func__
);
3452 bool TestBlockValidity(CValidationState
& state
, const CChainParams
& chainparams
, const CBlock
& block
, CBlockIndex
* pindexPrev
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3454 AssertLockHeld(cs_main
);
3455 assert(pindexPrev
&& pindexPrev
== chainActive
.Tip());
3456 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, block
.GetHash()))
3457 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3459 CCoinsViewCache
viewNew(pcoinsTip
);
3460 CBlockIndex
indexDummy(block
);
3461 indexDummy
.pprev
= pindexPrev
;
3462 indexDummy
.nHeight
= pindexPrev
->nHeight
+ 1;
3464 // NOTE: CheckBlockHeader is called by CheckBlock
3465 if (!ContextualCheckBlockHeader(block
, state
, pindexPrev
))
3466 return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__
, FormatStateMessage(state
));
3467 if (!CheckBlock(block
, state
, fCheckPOW
, fCheckMerkleRoot
))
3468 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
3469 if (!ContextualCheckBlock(block
, state
, pindexPrev
))
3470 return error("%s: Consensus::ContextualCheckBlock: %s", __func__
, FormatStateMessage(state
));
3471 if (!ConnectBlock(block
, state
, &indexDummy
, viewNew
, true))
3473 assert(state
.IsValid());
3479 * BLOCK PRUNING CODE
3482 /* Calculate the amount of disk space the block & undo files currently use */
3483 uint64_t CalculateCurrentUsage()
3485 uint64_t retval
= 0;
3486 BOOST_FOREACH(const CBlockFileInfo
&file
, vinfoBlockFile
) {
3487 retval
+= file
.nSize
+ file
.nUndoSize
;
3492 /* Prune a block file (modify associated database entries)*/
3493 void PruneOneBlockFile(const int fileNumber
)
3495 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); ++it
) {
3496 CBlockIndex
* pindex
= it
->second
;
3497 if (pindex
->nFile
== fileNumber
) {
3498 pindex
->nStatus
&= ~BLOCK_HAVE_DATA
;
3499 pindex
->nStatus
&= ~BLOCK_HAVE_UNDO
;
3501 pindex
->nDataPos
= 0;
3502 pindex
->nUndoPos
= 0;
3503 setDirtyBlockIndex
.insert(pindex
);
3505 // Prune from mapBlocksUnlinked -- any block we prune would have
3506 // to be downloaded again in order to consider its chain, at which
3507 // point it would be considered as a candidate for
3508 // mapBlocksUnlinked or setBlockIndexCandidates.
3509 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
3510 while (range
.first
!= range
.second
) {
3511 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator it
= range
.first
;
3513 if (it
->second
== pindex
) {
3514 mapBlocksUnlinked
.erase(it
);
3520 vinfoBlockFile
[fileNumber
].SetNull();
3521 setDirtyFileInfo
.insert(fileNumber
);
3525 void UnlinkPrunedFiles(std::set
<int>& setFilesToPrune
)
3527 for (set
<int>::iterator it
= setFilesToPrune
.begin(); it
!= setFilesToPrune
.end(); ++it
) {
3528 CDiskBlockPos
pos(*it
, 0);
3529 boost::filesystem::remove(GetBlockPosFilename(pos
, "blk"));
3530 boost::filesystem::remove(GetBlockPosFilename(pos
, "rev"));
3531 LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__
, *it
);
3535 /* Calculate the block/rev files that should be deleted to remain under target*/
3536 void FindFilesToPrune(std::set
<int>& setFilesToPrune
, uint64_t nPruneAfterHeight
)
3538 LOCK2(cs_main
, cs_LastBlockFile
);
3539 if (chainActive
.Tip() == NULL
|| nPruneTarget
== 0) {
3542 if ((uint64_t)chainActive
.Tip()->nHeight
<= nPruneAfterHeight
) {
3546 unsigned int nLastBlockWeCanPrune
= chainActive
.Tip()->nHeight
- MIN_BLOCKS_TO_KEEP
;
3547 uint64_t nCurrentUsage
= CalculateCurrentUsage();
3548 // We don't check to prune until after we've allocated new space for files
3549 // So we should leave a buffer under our target to account for another allocation
3550 // before the next pruning.
3551 uint64_t nBuffer
= BLOCKFILE_CHUNK_SIZE
+ UNDOFILE_CHUNK_SIZE
;
3552 uint64_t nBytesToPrune
;
3555 if (nCurrentUsage
+ nBuffer
>= nPruneTarget
) {
3556 for (int fileNumber
= 0; fileNumber
< nLastBlockFile
; fileNumber
++) {
3557 nBytesToPrune
= vinfoBlockFile
[fileNumber
].nSize
+ vinfoBlockFile
[fileNumber
].nUndoSize
;
3559 if (vinfoBlockFile
[fileNumber
].nSize
== 0)
3562 if (nCurrentUsage
+ nBuffer
< nPruneTarget
) // are we below our target?
3565 // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
3566 if (vinfoBlockFile
[fileNumber
].nHeightLast
> nLastBlockWeCanPrune
)
3569 PruneOneBlockFile(fileNumber
);
3570 // Queue up the files for removal
3571 setFilesToPrune
.insert(fileNumber
);
3572 nCurrentUsage
-= nBytesToPrune
;
3577 LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
3578 nPruneTarget
/1024/1024, nCurrentUsage
/1024/1024,
3579 ((int64_t)nPruneTarget
- (int64_t)nCurrentUsage
)/1024/1024,
3580 nLastBlockWeCanPrune
, count
);
3583 bool CheckDiskSpace(uint64_t nAdditionalBytes
)
3585 uint64_t nFreeBytesAvailable
= boost::filesystem::space(GetDataDir()).available
;
3587 // Check for nMinDiskSpace bytes (currently 50MB)
3588 if (nFreeBytesAvailable
< nMinDiskSpace
+ nAdditionalBytes
)
3589 return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
3594 FILE* OpenDiskFile(const CDiskBlockPos
&pos
, const char *prefix
, bool fReadOnly
)
3598 boost::filesystem::path path
= GetBlockPosFilename(pos
, prefix
);
3599 boost::filesystem::create_directories(path
.parent_path());
3600 FILE* file
= fopen(path
.string().c_str(), "rb+");
3601 if (!file
&& !fReadOnly
)
3602 file
= fopen(path
.string().c_str(), "wb+");
3604 LogPrintf("Unable to open file %s\n", path
.string());
3608 if (fseek(file
, pos
.nPos
, SEEK_SET
)) {
3609 LogPrintf("Unable to seek to position %u of %s\n", pos
.nPos
, path
.string());
3617 FILE* OpenBlockFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3618 return OpenDiskFile(pos
, "blk", fReadOnly
);
3621 FILE* OpenUndoFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3622 return OpenDiskFile(pos
, "rev", fReadOnly
);
3625 boost::filesystem::path
GetBlockPosFilename(const CDiskBlockPos
&pos
, const char *prefix
)
3627 return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix
, pos
.nFile
);
3630 CBlockIndex
* InsertBlockIndex(uint256 hash
)
3636 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
3637 if (mi
!= mapBlockIndex
.end())
3638 return (*mi
).second
;
3641 CBlockIndex
* pindexNew
= new CBlockIndex();
3643 throw runtime_error("LoadBlockIndex(): new CBlockIndex failed");
3644 mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
3645 pindexNew
->phashBlock
= &((*mi
).first
);
3650 bool static LoadBlockIndexDB()
3652 const CChainParams
& chainparams
= Params();
3653 if (!pblocktree
->LoadBlockIndexGuts())
3656 boost::this_thread::interruption_point();
3658 // Calculate nChainWork
3659 vector
<pair
<int, CBlockIndex
*> > vSortedByHeight
;
3660 vSortedByHeight
.reserve(mapBlockIndex
.size());
3661 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
3663 CBlockIndex
* pindex
= item
.second
;
3664 vSortedByHeight
.push_back(make_pair(pindex
->nHeight
, pindex
));
3666 sort(vSortedByHeight
.begin(), vSortedByHeight
.end());
3667 BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex
*)& item
, vSortedByHeight
)
3669 CBlockIndex
* pindex
= item
.second
;
3670 pindex
->nChainWork
= (pindex
->pprev
? pindex
->pprev
->nChainWork
: 0) + GetBlockProof(*pindex
);
3671 // We can link the chain of blocks for which we've received transactions at some point.
3672 // Pruned nodes may have deleted the block.
3673 if (pindex
->nTx
> 0) {
3674 if (pindex
->pprev
) {
3675 if (pindex
->pprev
->nChainTx
) {
3676 pindex
->nChainTx
= pindex
->pprev
->nChainTx
+ pindex
->nTx
;
3678 pindex
->nChainTx
= 0;
3679 mapBlocksUnlinked
.insert(std::make_pair(pindex
->pprev
, pindex
));
3682 pindex
->nChainTx
= pindex
->nTx
;
3685 if (pindex
->IsValid(BLOCK_VALID_TRANSACTIONS
) && (pindex
->nChainTx
|| pindex
->pprev
== NULL
))
3686 setBlockIndexCandidates
.insert(pindex
);
3687 if (pindex
->nStatus
& BLOCK_FAILED_MASK
&& (!pindexBestInvalid
|| pindex
->nChainWork
> pindexBestInvalid
->nChainWork
))
3688 pindexBestInvalid
= pindex
;
3690 pindex
->BuildSkip();
3691 if (pindex
->IsValid(BLOCK_VALID_TREE
) && (pindexBestHeader
== NULL
|| CBlockIndexWorkComparator()(pindexBestHeader
, pindex
)))
3692 pindexBestHeader
= pindex
;
3695 // Load block file info
3696 pblocktree
->ReadLastBlockFile(nLastBlockFile
);
3697 vinfoBlockFile
.resize(nLastBlockFile
+ 1);
3698 LogPrintf("%s: last block file = %i\n", __func__
, nLastBlockFile
);
3699 for (int nFile
= 0; nFile
<= nLastBlockFile
; nFile
++) {
3700 pblocktree
->ReadBlockFileInfo(nFile
, vinfoBlockFile
[nFile
]);
3702 LogPrintf("%s: last block file info: %s\n", __func__
, vinfoBlockFile
[nLastBlockFile
].ToString());
3703 for (int nFile
= nLastBlockFile
+ 1; true; nFile
++) {
3704 CBlockFileInfo info
;
3705 if (pblocktree
->ReadBlockFileInfo(nFile
, info
)) {
3706 vinfoBlockFile
.push_back(info
);
3712 // Check presence of blk files
3713 LogPrintf("Checking all blk files are present...\n");
3714 set
<int> setBlkDataFiles
;
3715 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
3717 CBlockIndex
* pindex
= item
.second
;
3718 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) {
3719 setBlkDataFiles
.insert(pindex
->nFile
);
3722 for (std::set
<int>::iterator it
= setBlkDataFiles
.begin(); it
!= setBlkDataFiles
.end(); it
++)
3724 CDiskBlockPos
pos(*it
, 0);
3725 if (CAutoFile(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
).IsNull()) {
3730 // Check whether we have ever pruned block & undo files
3731 pblocktree
->ReadFlag("prunedblockfiles", fHavePruned
);
3733 LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
3735 // Check whether we need to continue reindexing
3736 bool fReindexing
= false;
3737 pblocktree
->ReadReindexing(fReindexing
);
3738 fReindex
|= fReindexing
;
3740 // Check whether we have a transaction index
3741 pblocktree
->ReadFlag("txindex", fTxIndex
);
3742 LogPrintf("%s: transaction index %s\n", __func__
, fTxIndex
? "enabled" : "disabled");
3744 // Load pointer to end of best chain
3745 BlockMap::iterator it
= mapBlockIndex
.find(pcoinsTip
->GetBestBlock());
3746 if (it
== mapBlockIndex
.end())
3748 chainActive
.SetTip(it
->second
);
3750 PruneBlockIndexCandidates();
3752 LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__
,
3753 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(),
3754 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
3755 Checkpoints::GuessVerificationProgress(chainparams
.Checkpoints(), chainActive
.Tip()));
3760 CVerifyDB::CVerifyDB()
3762 uiInterface
.ShowProgress(_("Verifying blocks..."), 0);
3765 CVerifyDB::~CVerifyDB()
3767 uiInterface
.ShowProgress("", 100);
3770 bool CVerifyDB::VerifyDB(const CChainParams
& chainparams
, CCoinsView
*coinsview
, int nCheckLevel
, int nCheckDepth
)
3773 if (chainActive
.Tip() == NULL
|| chainActive
.Tip()->pprev
== NULL
)
3776 // Verify blocks in the best chain
3777 if (nCheckDepth
<= 0)
3778 nCheckDepth
= 1000000000; // suffices until the year 19000
3779 if (nCheckDepth
> chainActive
.Height())
3780 nCheckDepth
= chainActive
.Height();
3781 nCheckLevel
= std::max(0, std::min(4, nCheckLevel
));
3782 LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth
, nCheckLevel
);
3783 CCoinsViewCache
coins(coinsview
);
3784 CBlockIndex
* pindexState
= chainActive
.Tip();
3785 CBlockIndex
* pindexFailure
= NULL
;
3786 int nGoodTransactions
= 0;
3787 CValidationState state
;
3788 for (CBlockIndex
* pindex
= chainActive
.Tip(); pindex
&& pindex
->pprev
; pindex
= pindex
->pprev
)
3790 boost::this_thread::interruption_point();
3791 uiInterface
.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* (nCheckLevel
>= 4 ? 50 : 100)))));
3792 if (pindex
->nHeight
< chainActive
.Height()-nCheckDepth
)
3795 // check level 0: read from disk
3796 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
3797 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3798 // check level 1: verify block validity
3799 if (nCheckLevel
>= 1 && !CheckBlock(block
, state
))
3800 return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__
,
3801 pindex
->nHeight
, pindex
->GetBlockHash().ToString(), FormatStateMessage(state
));
3802 // check level 2: verify undo validity
3803 if (nCheckLevel
>= 2 && pindex
) {
3805 CDiskBlockPos pos
= pindex
->GetUndoPos();
3806 if (!pos
.IsNull()) {
3807 if (!UndoReadFromDisk(undo
, pos
, pindex
->pprev
->GetBlockHash()))
3808 return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3811 // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
3812 if (nCheckLevel
>= 3 && pindex
== pindexState
&& (coins
.DynamicMemoryUsage() + pcoinsTip
->DynamicMemoryUsage()) <= nCoinCacheUsage
) {
3814 if (!DisconnectBlock(block
, state
, pindex
, coins
, &fClean
))
3815 return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3816 pindexState
= pindex
->pprev
;
3818 nGoodTransactions
= 0;
3819 pindexFailure
= pindex
;
3821 nGoodTransactions
+= block
.vtx
.size();
3823 if (ShutdownRequested())
3827 return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive
.Height() - pindexFailure
->nHeight
+ 1, nGoodTransactions
);
3829 // check level 4: try reconnecting blocks
3830 if (nCheckLevel
>= 4) {
3831 CBlockIndex
*pindex
= pindexState
;
3832 while (pindex
!= chainActive
.Tip()) {
3833 boost::this_thread::interruption_point();
3834 uiInterface
.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* 50))));
3835 pindex
= chainActive
.Next(pindex
);
3837 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
3838 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3839 if (!ConnectBlock(block
, state
, pindex
, coins
))
3840 return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3844 LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive
.Height() - pindexState
->nHeight
, nGoodTransactions
);
3849 void UnloadBlockIndex()
3852 setBlockIndexCandidates
.clear();
3853 chainActive
.SetTip(NULL
);
3854 pindexBestInvalid
= NULL
;
3855 pindexBestHeader
= NULL
;
3857 mapOrphanTransactions
.clear();
3858 mapOrphanTransactionsByPrev
.clear();
3860 mapBlocksUnlinked
.clear();
3861 vinfoBlockFile
.clear();
3863 nBlockSequenceId
= 1;
3864 mapBlockSource
.clear();
3865 mapBlocksInFlight
.clear();
3866 nQueuedValidatedHeaders
= 0;
3867 nPreferredDownload
= 0;
3868 setDirtyBlockIndex
.clear();
3869 setDirtyFileInfo
.clear();
3870 mapNodeState
.clear();
3871 recentRejects
.reset(NULL
);
3872 versionbitscache
.Clear();
3873 for (int b
= 0; b
< VERSIONBITS_NUM_BITS
; b
++) {
3874 warningcache
[b
].clear();
3877 BOOST_FOREACH(BlockMap::value_type
& entry
, mapBlockIndex
) {
3878 delete entry
.second
;
3880 mapBlockIndex
.clear();
3881 fHavePruned
= false;
3884 bool LoadBlockIndex()
3886 // Load block index from databases
3887 if (!fReindex
&& !LoadBlockIndexDB())
3892 bool InitBlockIndex(const CChainParams
& chainparams
)
3896 // Initialize global variables that cannot be constructed at startup.
3897 recentRejects
.reset(new CRollingBloomFilter(120000, 0.000001));
3899 // Check whether we're already initialized
3900 if (chainActive
.Genesis() != NULL
)
3903 // Use the provided setting for -txindex in the new database
3904 fTxIndex
= GetBoolArg("-txindex", DEFAULT_TXINDEX
);
3905 pblocktree
->WriteFlag("txindex", fTxIndex
);
3906 LogPrintf("Initializing databases...\n");
3908 // Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
3911 CBlock
&block
= const_cast<CBlock
&>(chainparams
.GenesisBlock());
3912 // Start new block file
3913 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
3914 CDiskBlockPos blockPos
;
3915 CValidationState state
;
3916 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, 0, block
.GetBlockTime()))
3917 return error("LoadBlockIndex(): FindBlockPos failed");
3918 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
3919 return error("LoadBlockIndex(): writing genesis block to disk failed");
3920 CBlockIndex
*pindex
= AddToBlockIndex(block
);
3921 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
3922 return error("LoadBlockIndex(): genesis block not accepted");
3923 if (!ActivateBestChain(state
, chainparams
, &block
))
3924 return error("LoadBlockIndex(): genesis block cannot be activated");
3925 // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
3926 return FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
3927 } catch (const std::runtime_error
& e
) {
3928 return error("LoadBlockIndex(): failed to initialize block database: %s", e
.what());
3935 bool LoadExternalBlockFile(const CChainParams
& chainparams
, FILE* fileIn
, CDiskBlockPos
*dbp
)
3937 // Map of disk positions for blocks with unknown parent (only used for reindex)
3938 static std::multimap
<uint256
, CDiskBlockPos
> mapBlocksUnknownParent
;
3939 int64_t nStart
= GetTimeMillis();
3943 // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
3944 CBufferedFile
blkdat(fileIn
, 2*MAX_BLOCK_SIZE
, MAX_BLOCK_SIZE
+8, SER_DISK
, CLIENT_VERSION
);
3945 uint64_t nRewind
= blkdat
.GetPos();
3946 while (!blkdat
.eof()) {
3947 boost::this_thread::interruption_point();
3949 blkdat
.SetPos(nRewind
);
3950 nRewind
++; // start one byte further next time, in case of failure
3951 blkdat
.SetLimit(); // remove former limit
3952 unsigned int nSize
= 0;
3955 unsigned char buf
[MESSAGE_START_SIZE
];
3956 blkdat
.FindByte(chainparams
.MessageStart()[0]);
3957 nRewind
= blkdat
.GetPos()+1;
3958 blkdat
>> FLATDATA(buf
);
3959 if (memcmp(buf
, chainparams
.MessageStart(), MESSAGE_START_SIZE
))
3963 if (nSize
< 80 || nSize
> MAX_BLOCK_SIZE
)
3965 } catch (const std::exception
&) {
3966 // no valid block header found; don't complain
3971 uint64_t nBlockPos
= blkdat
.GetPos();
3973 dbp
->nPos
= nBlockPos
;
3974 blkdat
.SetLimit(nBlockPos
+ nSize
);
3975 blkdat
.SetPos(nBlockPos
);
3978 nRewind
= blkdat
.GetPos();
3980 // detect out of order blocks, and store them for later
3981 uint256 hash
= block
.GetHash();
3982 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
.find(block
.hashPrevBlock
) == mapBlockIndex
.end()) {
3983 LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__
, hash
.ToString(),
3984 block
.hashPrevBlock
.ToString());
3986 mapBlocksUnknownParent
.insert(std::make_pair(block
.hashPrevBlock
, *dbp
));
3990 // process in case the block isn't known yet
3991 if (mapBlockIndex
.count(hash
) == 0 || (mapBlockIndex
[hash
]->nStatus
& BLOCK_HAVE_DATA
) == 0) {
3992 CValidationState state
;
3993 if (ProcessNewBlock(state
, chainparams
, NULL
, &block
, true, dbp
))
3995 if (state
.IsError())
3997 } else if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
[hash
]->nHeight
% 1000 == 0) {
3998 LogPrintf("Block Import: already had block %s at height %d\n", hash
.ToString(), mapBlockIndex
[hash
]->nHeight
);
4001 // Recursively process earlier encountered successors of this block
4002 deque
<uint256
> queue
;
4003 queue
.push_back(hash
);
4004 while (!queue
.empty()) {
4005 uint256 head
= queue
.front();
4007 std::pair
<std::multimap
<uint256
, CDiskBlockPos
>::iterator
, std::multimap
<uint256
, CDiskBlockPos
>::iterator
> range
= mapBlocksUnknownParent
.equal_range(head
);
4008 while (range
.first
!= range
.second
) {
4009 std::multimap
<uint256
, CDiskBlockPos
>::iterator it
= range
.first
;
4010 if (ReadBlockFromDisk(block
, it
->second
, chainparams
.GetConsensus()))
4012 LogPrintf("%s: Processing out of order child %s of %s\n", __func__
, block
.GetHash().ToString(),
4014 CValidationState dummy
;
4015 if (ProcessNewBlock(dummy
, chainparams
, NULL
, &block
, true, &it
->second
))
4018 queue
.push_back(block
.GetHash());
4022 mapBlocksUnknownParent
.erase(it
);
4025 } catch (const std::exception
& e
) {
4026 LogPrintf("%s: Deserialize or I/O error - %s\n", __func__
, e
.what());
4029 } catch (const std::runtime_error
& e
) {
4030 AbortNode(std::string("System error: ") + e
.what());
4033 LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded
, GetTimeMillis() - nStart
);
4037 void static CheckBlockIndex(const Consensus::Params
& consensusParams
)
4039 if (!fCheckBlockIndex
) {
4045 // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4046 // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
4047 // iterating the block tree require that chainActive has been initialized.)
4048 if (chainActive
.Height() < 0) {
4049 assert(mapBlockIndex
.size() <= 1);
4053 // Build forward-pointing map of the entire block tree.
4054 std::multimap
<CBlockIndex
*,CBlockIndex
*> forward
;
4055 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); it
++) {
4056 forward
.insert(std::make_pair(it
->second
->pprev
, it
->second
));
4059 assert(forward
.size() == mapBlockIndex
.size());
4061 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeGenesis
= forward
.equal_range(NULL
);
4062 CBlockIndex
*pindex
= rangeGenesis
.first
->second
;
4063 rangeGenesis
.first
++;
4064 assert(rangeGenesis
.first
== rangeGenesis
.second
); // There is only one index entry with parent NULL.
4066 // Iterate over the entire block tree, using depth-first search.
4067 // Along the way, remember whether there are blocks on the path from genesis
4068 // block being explored which are the first to have certain properties.
4071 CBlockIndex
* pindexFirstInvalid
= NULL
; // Oldest ancestor of pindex which is invalid.
4072 CBlockIndex
* pindexFirstMissing
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4073 CBlockIndex
* pindexFirstNeverProcessed
= NULL
; // Oldest ancestor of pindex for which nTx == 0.
4074 CBlockIndex
* pindexFirstNotTreeValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4075 CBlockIndex
* pindexFirstNotTransactionsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4076 CBlockIndex
* pindexFirstNotChainValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4077 CBlockIndex
* pindexFirstNotScriptsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4078 while (pindex
!= NULL
) {
4080 if (pindexFirstInvalid
== NULL
&& pindex
->nStatus
& BLOCK_FAILED_VALID
) pindexFirstInvalid
= pindex
;
4081 if (pindexFirstMissing
== NULL
&& !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) pindexFirstMissing
= pindex
;
4082 if (pindexFirstNeverProcessed
== NULL
&& pindex
->nTx
== 0) pindexFirstNeverProcessed
= pindex
;
4083 if (pindex
->pprev
!= NULL
&& pindexFirstNotTreeValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TREE
) pindexFirstNotTreeValid
= pindex
;
4084 if (pindex
->pprev
!= NULL
&& pindexFirstNotTransactionsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TRANSACTIONS
) pindexFirstNotTransactionsValid
= pindex
;
4085 if (pindex
->pprev
!= NULL
&& pindexFirstNotChainValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_CHAIN
) pindexFirstNotChainValid
= pindex
;
4086 if (pindex
->pprev
!= NULL
&& pindexFirstNotScriptsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_SCRIPTS
) pindexFirstNotScriptsValid
= pindex
;
4088 // Begin: actual consistency checks.
4089 if (pindex
->pprev
== NULL
) {
4090 // Genesis block checks.
4091 assert(pindex
->GetBlockHash() == consensusParams
.hashGenesisBlock
); // Genesis block's hash must match.
4092 assert(pindex
== chainActive
.Genesis()); // The current active chain's genesis block must be this block.
4094 if (pindex
->nChainTx
== 0) assert(pindex
->nSequenceId
== 0); // nSequenceId can't be set for blocks that aren't linked
4095 // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4096 // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4098 // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4099 assert(!(pindex
->nStatus
& BLOCK_HAVE_DATA
) == (pindex
->nTx
== 0));
4100 assert(pindexFirstMissing
== pindexFirstNeverProcessed
);
4102 // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4103 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) assert(pindex
->nTx
> 0);
4105 if (pindex
->nStatus
& BLOCK_HAVE_UNDO
) assert(pindex
->nStatus
& BLOCK_HAVE_DATA
);
4106 assert(((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TRANSACTIONS
) == (pindex
->nTx
> 0)); // This is pruning-independent.
4107 // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
4108 assert((pindexFirstNeverProcessed
!= NULL
) == (pindex
->nChainTx
== 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
4109 assert((pindexFirstNotTransactionsValid
!= NULL
) == (pindex
->nChainTx
== 0));
4110 assert(pindex
->nHeight
== nHeight
); // nHeight must be consistent.
4111 assert(pindex
->pprev
== NULL
|| pindex
->nChainWork
>= pindex
->pprev
->nChainWork
); // For every block except the genesis block, the chainwork must be larger than the parent's.
4112 assert(nHeight
< 2 || (pindex
->pskip
&& (pindex
->pskip
->nHeight
< nHeight
))); // The pskip pointer must point back for all but the first 2 blocks.
4113 assert(pindexFirstNotTreeValid
== NULL
); // All mapBlockIndex entries must at least be TREE valid
4114 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TREE
) assert(pindexFirstNotTreeValid
== NULL
); // TREE valid implies all parents are TREE valid
4115 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_CHAIN
) assert(pindexFirstNotChainValid
== NULL
); // CHAIN valid implies all parents are CHAIN valid
4116 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_SCRIPTS
) assert(pindexFirstNotScriptsValid
== NULL
); // SCRIPTS valid implies all parents are SCRIPTS valid
4117 if (pindexFirstInvalid
== NULL
) {
4118 // Checks for not-invalid blocks.
4119 assert((pindex
->nStatus
& BLOCK_FAILED_MASK
) == 0); // The failed mask cannot be set for blocks without invalid parents.
4121 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && pindexFirstNeverProcessed
== NULL
) {
4122 if (pindexFirstInvalid
== NULL
) {
4123 // If this block sorts at least as good as the current tip and
4124 // is valid and we have all data for its parents, it must be in
4125 // setBlockIndexCandidates. chainActive.Tip() must also be there
4126 // even if some data has been pruned.
4127 if (pindexFirstMissing
== NULL
|| pindex
== chainActive
.Tip()) {
4128 assert(setBlockIndexCandidates
.count(pindex
));
4130 // If some parent is missing, then it could be that this block was in
4131 // setBlockIndexCandidates but had to be removed because of the missing data.
4132 // In this case it must be in mapBlocksUnlinked -- see test below.
4134 } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4135 assert(setBlockIndexCandidates
.count(pindex
) == 0);
4137 // Check whether this block is in mapBlocksUnlinked.
4138 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeUnlinked
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
4139 bool foundInUnlinked
= false;
4140 while (rangeUnlinked
.first
!= rangeUnlinked
.second
) {
4141 assert(rangeUnlinked
.first
->first
== pindex
->pprev
);
4142 if (rangeUnlinked
.first
->second
== pindex
) {
4143 foundInUnlinked
= true;
4146 rangeUnlinked
.first
++;
4148 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
!= NULL
&& pindexFirstInvalid
== NULL
) {
4149 // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
4150 assert(foundInUnlinked
);
4152 if (!(pindex
->nStatus
& BLOCK_HAVE_DATA
)) assert(!foundInUnlinked
); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
4153 if (pindexFirstMissing
== NULL
) assert(!foundInUnlinked
); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
4154 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
== NULL
&& pindexFirstMissing
!= NULL
) {
4155 // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4156 assert(fHavePruned
); // We must have pruned.
4157 // This block may have entered mapBlocksUnlinked if:
4158 // - it has a descendant that at some point had more work than the
4160 // - we tried switching to that descendant but were missing
4161 // data for some intermediate block between chainActive and the
4163 // So if this block is itself better than chainActive.Tip() and it wasn't in
4164 // setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
4165 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && setBlockIndexCandidates
.count(pindex
) == 0) {
4166 if (pindexFirstInvalid
== NULL
) {
4167 assert(foundInUnlinked
);
4171 // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4172 // End: actual consistency checks.
4174 // Try descending into the first subnode.
4175 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> range
= forward
.equal_range(pindex
);
4176 if (range
.first
!= range
.second
) {
4177 // A subnode was found.
4178 pindex
= range
.first
->second
;
4182 // This is a leaf node.
4183 // Move upwards until we reach a node of which we have not yet visited the last child.
4185 // We are going to either move to a parent or a sibling of pindex.
4186 // If pindex was the first with a certain property, unset the corresponding variable.
4187 if (pindex
== pindexFirstInvalid
) pindexFirstInvalid
= NULL
;
4188 if (pindex
== pindexFirstMissing
) pindexFirstMissing
= NULL
;
4189 if (pindex
== pindexFirstNeverProcessed
) pindexFirstNeverProcessed
= NULL
;
4190 if (pindex
== pindexFirstNotTreeValid
) pindexFirstNotTreeValid
= NULL
;
4191 if (pindex
== pindexFirstNotTransactionsValid
) pindexFirstNotTransactionsValid
= NULL
;
4192 if (pindex
== pindexFirstNotChainValid
) pindexFirstNotChainValid
= NULL
;
4193 if (pindex
== pindexFirstNotScriptsValid
) pindexFirstNotScriptsValid
= NULL
;
4195 CBlockIndex
* pindexPar
= pindex
->pprev
;
4196 // Find which child we just visited.
4197 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangePar
= forward
.equal_range(pindexPar
);
4198 while (rangePar
.first
->second
!= pindex
) {
4199 assert(rangePar
.first
!= rangePar
.second
); // Our parent must have at least the node we're coming from as child.
4202 // Proceed to the next one.
4204 if (rangePar
.first
!= rangePar
.second
) {
4205 // Move to the sibling.
4206 pindex
= rangePar
.first
->second
;
4217 // Check that we actually traversed the entire map.
4218 assert(nNodes
== forward
.size());
4221 //////////////////////////////////////////////////////////////////////////////
4226 std::string
GetWarnings(const std::string
& strFor
)
4229 string strStatusBar
;
4233 if (!CLIENT_VERSION_IS_RELEASE
) {
4234 strStatusBar
= "This is a pre-release test build - use at your own risk - do not use for mining or merchant applications";
4235 strGUI
= _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
4238 if (GetBoolArg("-testsafemode", DEFAULT_TESTSAFEMODE
))
4239 strStatusBar
= strRPC
= strGUI
= "testsafemode enabled";
4241 // Misc warnings like out of disk space and clock is wrong
4242 if (strMiscWarning
!= "")
4245 strStatusBar
= strGUI
= strMiscWarning
;
4248 if (fLargeWorkForkFound
)
4251 strStatusBar
= strRPC
= "Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.";
4252 strGUI
= _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
4254 else if (fLargeWorkInvalidChainFound
)
4257 strStatusBar
= strRPC
= "Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.";
4258 strGUI
= _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
4264 BOOST_FOREACH(PAIRTYPE(const uint256
, CAlert
)& item
, mapAlerts
)
4266 const CAlert
& alert
= item
.second
;
4267 if (alert
.AppliesToMe() && alert
.nPriority
> nPriority
)
4269 nPriority
= alert
.nPriority
;
4270 strStatusBar
= strGUI
= alert
.strStatusBar
;
4275 if (strFor
== "gui")
4277 else if (strFor
== "statusbar")
4278 return strStatusBar
;
4279 else if (strFor
== "rpc")
4281 assert(!"GetWarnings(): invalid parameter");
4292 //////////////////////////////////////////////////////////////////////////////
4298 bool static AlreadyHave(const CInv
& inv
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
4304 assert(recentRejects
);
4305 if (chainActive
.Tip()->GetBlockHash() != hashRecentRejectsChainTip
)
4307 // If the chain tip has changed previously rejected transactions
4308 // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
4309 // or a double-spend. Reset the rejects filter and give those
4310 // txs a second chance.
4311 hashRecentRejectsChainTip
= chainActive
.Tip()->GetBlockHash();
4312 recentRejects
->reset();
4315 return recentRejects
->contains(inv
.hash
) ||
4316 mempool
.exists(inv
.hash
) ||
4317 mapOrphanTransactions
.count(inv
.hash
) ||
4318 pcoinsTip
->HaveCoins(inv
.hash
);
4321 return mapBlockIndex
.count(inv
.hash
);
4323 // Don't know what it is, just say we already got one
4327 void static ProcessGetData(CNode
* pfrom
, const Consensus::Params
& consensusParams
)
4329 std::deque
<CInv
>::iterator it
= pfrom
->vRecvGetData
.begin();
4331 vector
<CInv
> vNotFound
;
4335 while (it
!= pfrom
->vRecvGetData
.end()) {
4336 // Don't bother if send buffer is too full to respond anyway
4337 if (pfrom
->nSendSize
>= SendBufferSize())
4340 const CInv
&inv
= *it
;
4342 boost::this_thread::interruption_point();
4345 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
)
4348 BlockMap::iterator mi
= mapBlockIndex
.find(inv
.hash
);
4349 if (mi
!= mapBlockIndex
.end())
4351 if (chainActive
.Contains(mi
->second
)) {
4354 static const int nOneMonth
= 30 * 24 * 60 * 60;
4355 // To prevent fingerprinting attacks, only send blocks outside of the active
4356 // chain if they are valid, and no more than a month older (both in time, and in
4357 // best equivalent proof of work) than the best header chain we know about.
4358 send
= mi
->second
->IsValid(BLOCK_VALID_SCRIPTS
) && (pindexBestHeader
!= NULL
) &&
4359 (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() < nOneMonth
) &&
4360 (GetBlockProofEquivalentTime(*pindexBestHeader
, *mi
->second
, *pindexBestHeader
, consensusParams
) < nOneMonth
);
4362 LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__
, pfrom
->GetId());
4366 // disconnect node in case we have reached the outbound limit for serving historical blocks
4367 // never disconnect whitelisted nodes
4368 static const int nOneWeek
= 7 * 24 * 60 * 60; // assume > 1 week = historical
4369 if (send
&& CNode::OutboundTargetReached(true) && ( ((pindexBestHeader
!= NULL
) && (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() > nOneWeek
)) || inv
.type
== MSG_FILTERED_BLOCK
) && !pfrom
->fWhitelisted
)
4371 LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom
->GetId());
4374 pfrom
->fDisconnect
= true;
4377 // Pruned nodes may have deleted the block, so check whether
4378 // it's available before trying to send.
4379 if (send
&& (mi
->second
->nStatus
& BLOCK_HAVE_DATA
))
4381 // Send block from disk
4383 if (!ReadBlockFromDisk(block
, (*mi
).second
, consensusParams
))
4384 assert(!"cannot load block from disk");
4385 if (inv
.type
== MSG_BLOCK
)
4386 pfrom
->PushMessage(NetMsgType::BLOCK
, block
);
4387 else // MSG_FILTERED_BLOCK)
4389 LOCK(pfrom
->cs_filter
);
4392 CMerkleBlock
merkleBlock(block
, *pfrom
->pfilter
);
4393 pfrom
->PushMessage(NetMsgType::MERKLEBLOCK
, merkleBlock
);
4394 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
4395 // This avoids hurting performance by pointlessly requiring a round-trip
4396 // Note that there is currently no way for a node to request any single transactions we didn't send here -
4397 // they must either disconnect and retry or request the full block.
4398 // Thus, the protocol spec specified allows for us to provide duplicate txn here,
4399 // however we MUST always provide at least what the remote peer needs
4400 typedef std::pair
<unsigned int, uint256
> PairType
;
4401 BOOST_FOREACH(PairType
& pair
, merkleBlock
.vMatchedTxn
)
4402 pfrom
->PushMessage(NetMsgType::TX
, block
.vtx
[pair
.first
]);
4408 // Trigger the peer node to send a getblocks request for the next batch of inventory
4409 if (inv
.hash
== pfrom
->hashContinue
)
4411 // Bypass PushInventory, this must send even if redundant,
4412 // and we want it right after the last block so they don't
4413 // wait for other stuff first.
4415 vInv
.push_back(CInv(MSG_BLOCK
, chainActive
.Tip()->GetBlockHash()));
4416 pfrom
->PushMessage(NetMsgType::INV
, vInv
);
4417 pfrom
->hashContinue
.SetNull();
4421 else if (inv
.IsKnownType())
4423 // Send stream from relay memory
4424 bool pushed
= false;
4427 map
<CInv
, CDataStream
>::iterator mi
= mapRelay
.find(inv
);
4428 if (mi
!= mapRelay
.end()) {
4429 pfrom
->PushMessage(inv
.GetCommand(), (*mi
).second
);
4433 if (!pushed
&& inv
.type
== MSG_TX
) {
4435 if (mempool
.lookup(inv
.hash
, tx
)) {
4436 CDataStream
ss(SER_NETWORK
, PROTOCOL_VERSION
);
4439 pfrom
->PushMessage(NetMsgType::TX
, ss
);
4444 vNotFound
.push_back(inv
);
4448 // Track requests for our stuff.
4449 GetMainSignals().Inventory(inv
.hash
);
4451 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
)
4456 pfrom
->vRecvGetData
.erase(pfrom
->vRecvGetData
.begin(), it
);
4458 if (!vNotFound
.empty()) {
4459 // Let the peer know that we didn't find what it asked for, so it doesn't
4460 // have to wait around forever. Currently only SPV clients actually care
4461 // about this message: it's needed when they are recursively walking the
4462 // dependencies of relevant unconfirmed transactions. SPV clients want to
4463 // do that because they want to know about (and store and rebroadcast and
4464 // risk analyze) the dependencies of transactions relevant to them, without
4465 // having to download the entire memory pool.
4466 pfrom
->PushMessage(NetMsgType::NOTFOUND
, vNotFound
);
4470 bool static ProcessMessage(CNode
* pfrom
, string strCommand
, CDataStream
& vRecv
, int64_t nTimeReceived
)
4472 const CChainParams
& chainparams
= Params();
4473 RandAddSeedPerfmon();
4474 LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand
), vRecv
.size(), pfrom
->id
);
4475 if (mapArgs
.count("-dropmessagestest") && GetRand(atoi(mapArgs
["-dropmessagestest"])) == 0)
4477 LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
4482 if (!(nLocalServices
& NODE_BLOOM
) &&
4483 (strCommand
== NetMsgType::FILTERLOAD
||
4484 strCommand
== NetMsgType::FILTERADD
||
4485 strCommand
== NetMsgType::FILTERCLEAR
))
4487 if (pfrom
->nVersion
>= NO_BLOOM_VERSION
) {
4488 Misbehaving(pfrom
->GetId(), 100);
4490 } else if (GetBoolArg("-enforcenodebloom", DEFAULT_ENFORCENODEBLOOM
)) {
4491 pfrom
->fDisconnect
= true;
4497 if (strCommand
== NetMsgType::VERSION
)
4499 // Each connection can only send one version message
4500 if (pfrom
->nVersion
!= 0)
4502 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_DUPLICATE
, string("Duplicate version message"));
4503 Misbehaving(pfrom
->GetId(), 1);
4510 uint64_t nNonce
= 1;
4511 vRecv
>> pfrom
->nVersion
>> pfrom
->nServices
>> nTime
>> addrMe
;
4512 if (pfrom
->nVersion
< MIN_PEER_PROTO_VERSION
)
4514 // disconnect from peers older than this proto version
4515 LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom
->id
, pfrom
->nVersion
);
4516 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_OBSOLETE
,
4517 strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION
));
4518 pfrom
->fDisconnect
= true;
4522 if (pfrom
->nVersion
== 10300)
4523 pfrom
->nVersion
= 300;
4525 vRecv
>> addrFrom
>> nNonce
;
4526 if (!vRecv
.empty()) {
4527 vRecv
>> LIMITED_STRING(pfrom
->strSubVer
, MAX_SUBVERSION_LENGTH
);
4528 pfrom
->cleanSubVer
= SanitizeString(pfrom
->strSubVer
);
4531 vRecv
>> pfrom
->nStartingHeight
;
4533 vRecv
>> pfrom
->fRelayTxes
; // set to true after we get the first filter* message
4535 pfrom
->fRelayTxes
= true;
4537 // Disconnect if we connected to ourself
4538 if (nNonce
== nLocalHostNonce
&& nNonce
> 1)
4540 LogPrintf("connected to self at %s, disconnecting\n", pfrom
->addr
.ToString());
4541 pfrom
->fDisconnect
= true;
4545 pfrom
->addrLocal
= addrMe
;
4546 if (pfrom
->fInbound
&& addrMe
.IsRoutable())
4551 // Be shy and don't send version until we hear
4552 if (pfrom
->fInbound
)
4553 pfrom
->PushVersion();
4555 pfrom
->fClient
= !(pfrom
->nServices
& NODE_NETWORK
);
4557 // Potentially mark this peer as a preferred download peer.
4558 UpdatePreferredDownload(pfrom
, State(pfrom
->GetId()));
4561 pfrom
->PushMessage(NetMsgType::VERACK
);
4562 pfrom
->ssSend
.SetVersion(min(pfrom
->nVersion
, PROTOCOL_VERSION
));
4564 if (!pfrom
->fInbound
)
4566 // Advertise our address
4567 if (fListen
&& !IsInitialBlockDownload())
4569 CAddress addr
= GetLocalAddress(&pfrom
->addr
);
4570 if (addr
.IsRoutable())
4572 LogPrintf("ProcessMessages: advertising address %s\n", addr
.ToString());
4573 pfrom
->PushAddress(addr
);
4574 } else if (IsPeerAddrLocalGood(pfrom
)) {
4575 addr
.SetIP(pfrom
->addrLocal
);
4576 LogPrintf("ProcessMessages: advertising address %s\n", addr
.ToString());
4577 pfrom
->PushAddress(addr
);
4581 // Get recent addresses
4582 if (pfrom
->fOneShot
|| pfrom
->nVersion
>= CADDR_TIME_VERSION
|| addrman
.size() < 1000)
4584 pfrom
->PushMessage(NetMsgType::GETADDR
);
4585 pfrom
->fGetAddr
= true;
4587 addrman
.Good(pfrom
->addr
);
4589 if (((CNetAddr
)pfrom
->addr
) == (CNetAddr
)addrFrom
)
4591 addrman
.Add(addrFrom
, addrFrom
);
4592 addrman
.Good(addrFrom
);
4599 BOOST_FOREACH(PAIRTYPE(const uint256
, CAlert
)& item
, mapAlerts
)
4600 item
.second
.RelayTo(pfrom
);
4603 pfrom
->fSuccessfullyConnected
= true;
4607 remoteAddr
= ", peeraddr=" + pfrom
->addr
.ToString();
4609 LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
4610 pfrom
->cleanSubVer
, pfrom
->nVersion
,
4611 pfrom
->nStartingHeight
, addrMe
.ToString(), pfrom
->id
,
4614 int64_t nTimeOffset
= nTime
- GetTime();
4615 pfrom
->nTimeOffset
= nTimeOffset
;
4616 AddTimeData(pfrom
->addr
, nTimeOffset
);
4620 else if (pfrom
->nVersion
== 0)
4622 // Must have a version message before anything else
4623 Misbehaving(pfrom
->GetId(), 1);
4628 else if (strCommand
== NetMsgType::VERACK
)
4630 pfrom
->SetRecvVersion(min(pfrom
->nVersion
, PROTOCOL_VERSION
));
4632 // Mark this node as currently connected, so we update its timestamp later.
4633 if (pfrom
->fNetworkNode
) {
4635 State(pfrom
->GetId())->fCurrentlyConnected
= true;
4638 if (pfrom
->nVersion
>= SENDHEADERS_VERSION
) {
4639 // Tell our peer we prefer to receive headers rather than inv's
4640 // We send this to non-NODE NETWORK peers as well, because even
4641 // non-NODE NETWORK peers can announce blocks (such as pruning
4643 pfrom
->PushMessage(NetMsgType::SENDHEADERS
);
4648 else if (strCommand
== NetMsgType::ADDR
)
4650 vector
<CAddress
> vAddr
;
4653 // Don't want addr from older versions unless seeding
4654 if (pfrom
->nVersion
< CADDR_TIME_VERSION
&& addrman
.size() > 1000)
4656 if (vAddr
.size() > 1000)
4658 Misbehaving(pfrom
->GetId(), 20);
4659 return error("message addr size() = %u", vAddr
.size());
4662 // Store the new addresses
4663 vector
<CAddress
> vAddrOk
;
4664 int64_t nNow
= GetAdjustedTime();
4665 int64_t nSince
= nNow
- 10 * 60;
4666 BOOST_FOREACH(CAddress
& addr
, vAddr
)
4668 boost::this_thread::interruption_point();
4670 if (addr
.nTime
<= 100000000 || addr
.nTime
> nNow
+ 10 * 60)
4671 addr
.nTime
= nNow
- 5 * 24 * 60 * 60;
4672 pfrom
->AddAddressKnown(addr
);
4673 bool fReachable
= IsReachable(addr
);
4674 if (addr
.nTime
> nSince
&& !pfrom
->fGetAddr
&& vAddr
.size() <= 10 && addr
.IsRoutable())
4676 // Relay to a limited number of other nodes
4679 // Use deterministic randomness to send to the same nodes for 24 hours
4680 // at a time so the addrKnowns of the chosen nodes prevent repeats
4681 static uint256 hashSalt
;
4682 if (hashSalt
.IsNull())
4683 hashSalt
= GetRandHash();
4684 uint64_t hashAddr
= addr
.GetHash();
4685 uint256 hashRand
= ArithToUint256(UintToArith256(hashSalt
) ^ (hashAddr
<<32) ^ ((GetTime()+hashAddr
)/(24*60*60)));
4686 hashRand
= Hash(BEGIN(hashRand
), END(hashRand
));
4687 multimap
<uint256
, CNode
*> mapMix
;
4688 BOOST_FOREACH(CNode
* pnode
, vNodes
)
4690 if (pnode
->nVersion
< CADDR_TIME_VERSION
)
4692 unsigned int nPointer
;
4693 memcpy(&nPointer
, &pnode
, sizeof(nPointer
));
4694 uint256 hashKey
= ArithToUint256(UintToArith256(hashRand
) ^ nPointer
);
4695 hashKey
= Hash(BEGIN(hashKey
), END(hashKey
));
4696 mapMix
.insert(make_pair(hashKey
, pnode
));
4698 int nRelayNodes
= fReachable
? 2 : 1; // limited relaying of addresses outside our network(s)
4699 for (multimap
<uint256
, CNode
*>::iterator mi
= mapMix
.begin(); mi
!= mapMix
.end() && nRelayNodes
-- > 0; ++mi
)
4700 ((*mi
).second
)->PushAddress(addr
);
4703 // Do not store addresses outside our network
4705 vAddrOk
.push_back(addr
);
4707 addrman
.Add(vAddrOk
, pfrom
->addr
, 2 * 60 * 60);
4708 if (vAddr
.size() < 1000)
4709 pfrom
->fGetAddr
= false;
4710 if (pfrom
->fOneShot
)
4711 pfrom
->fDisconnect
= true;
4714 else if (strCommand
== NetMsgType::SENDHEADERS
)
4717 State(pfrom
->GetId())->fPreferHeaders
= true;
4721 else if (strCommand
== NetMsgType::INV
)
4725 if (vInv
.size() > MAX_INV_SZ
)
4727 Misbehaving(pfrom
->GetId(), 20);
4728 return error("message inv size() = %u", vInv
.size());
4731 bool fBlocksOnly
= GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY
);
4733 // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
4734 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY
))
4735 fBlocksOnly
= false;
4739 std::vector
<CInv
> vToFetch
;
4741 for (unsigned int nInv
= 0; nInv
< vInv
.size(); nInv
++)
4743 const CInv
&inv
= vInv
[nInv
];
4745 boost::this_thread::interruption_point();
4746 pfrom
->AddInventoryKnown(inv
);
4748 bool fAlreadyHave
= AlreadyHave(inv
);
4749 LogPrint("net", "got inv: %s %s peer=%d\n", inv
.ToString(), fAlreadyHave
? "have" : "new", pfrom
->id
);
4751 if (inv
.type
== MSG_BLOCK
) {
4752 UpdateBlockAvailability(pfrom
->GetId(), inv
.hash
);
4753 if (!fAlreadyHave
&& !fImporting
&& !fReindex
&& !mapBlocksInFlight
.count(inv
.hash
)) {
4754 // First request the headers preceding the announced block. In the normal fully-synced
4755 // case where a new block is announced that succeeds the current tip (no reorganization),
4756 // there are no such headers.
4757 // Secondly, and only when we are close to being synced, we request the announced block directly,
4758 // to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
4759 // time the block arrives, the header chain leading up to it is already validated. Not
4760 // doing this will result in the received block being rejected as an orphan in case it is
4761 // not a direct successor.
4762 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexBestHeader
), inv
.hash
);
4763 CNodeState
*nodestate
= State(pfrom
->GetId());
4764 if (CanDirectFetch(chainparams
.GetConsensus()) &&
4765 nodestate
->nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
4766 vToFetch
.push_back(inv
);
4767 // Mark block as in flight already, even though the actual "getdata" message only goes out
4768 // later (within the same cs_main lock, though).
4769 MarkBlockAsInFlight(pfrom
->GetId(), inv
.hash
, chainparams
.GetConsensus());
4771 LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader
->nHeight
, inv
.hash
.ToString(), pfrom
->id
);
4777 LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv
.hash
.ToString(), pfrom
->id
);
4778 else if (!fAlreadyHave
&& !fImporting
&& !fReindex
&& !IsInitialBlockDownload())
4782 // Track requests for our stuff
4783 GetMainSignals().Inventory(inv
.hash
);
4785 if (pfrom
->nSendSize
> (SendBufferSize() * 2)) {
4786 Misbehaving(pfrom
->GetId(), 50);
4787 return error("send buffer size() = %u", pfrom
->nSendSize
);
4791 if (!vToFetch
.empty())
4792 pfrom
->PushMessage(NetMsgType::GETDATA
, vToFetch
);
4796 else if (strCommand
== NetMsgType::GETDATA
)
4800 if (vInv
.size() > MAX_INV_SZ
)
4802 Misbehaving(pfrom
->GetId(), 20);
4803 return error("message getdata size() = %u", vInv
.size());
4806 if (fDebug
|| (vInv
.size() != 1))
4807 LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv
.size(), pfrom
->id
);
4809 if ((fDebug
&& vInv
.size() > 0) || (vInv
.size() == 1))
4810 LogPrint("net", "received getdata for: %s peer=%d\n", vInv
[0].ToString(), pfrom
->id
);
4812 pfrom
->vRecvGetData
.insert(pfrom
->vRecvGetData
.end(), vInv
.begin(), vInv
.end());
4813 ProcessGetData(pfrom
, chainparams
.GetConsensus());
4817 else if (strCommand
== NetMsgType::GETBLOCKS
)
4819 CBlockLocator locator
;
4821 vRecv
>> locator
>> hashStop
;
4825 // Find the last block the caller has in the main chain
4826 CBlockIndex
* pindex
= FindForkInGlobalIndex(chainActive
, locator
);
4828 // Send the rest of the chain
4830 pindex
= chainActive
.Next(pindex
);
4832 LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.IsNull() ? "end" : hashStop
.ToString(), nLimit
, pfrom
->id
);
4833 for (; pindex
; pindex
= chainActive
.Next(pindex
))
4835 if (pindex
->GetBlockHash() == hashStop
)
4837 LogPrint("net", " getblocks stopping at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4840 // If pruning, don't inv blocks unless we have on disk and are likely to still have
4841 // for some reasonable time window (1 hour) that block relay might require.
4842 const int nPrunedBlocksLikelyToHave
= MIN_BLOCKS_TO_KEEP
- 3600 / chainparams
.GetConsensus().nPowTargetSpacing
;
4843 if (fPruneMode
&& (!(pindex
->nStatus
& BLOCK_HAVE_DATA
) || pindex
->nHeight
<= chainActive
.Tip()->nHeight
- nPrunedBlocksLikelyToHave
))
4845 LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4848 pfrom
->PushInventory(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
4851 // When this block is requested, we'll send an inv that'll
4852 // trigger the peer to getblocks the next batch of inventory.
4853 LogPrint("net", " getblocks stopping at limit %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4854 pfrom
->hashContinue
= pindex
->GetBlockHash();
4861 else if (strCommand
== NetMsgType::GETHEADERS
)
4863 CBlockLocator locator
;
4865 vRecv
>> locator
>> hashStop
;
4868 if (IsInitialBlockDownload() && !pfrom
->fWhitelisted
) {
4869 LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom
->id
);
4873 CNodeState
*nodestate
= State(pfrom
->GetId());
4874 CBlockIndex
* pindex
= NULL
;
4875 if (locator
.IsNull())
4877 // If locator is null, return the hashStop block
4878 BlockMap::iterator mi
= mapBlockIndex
.find(hashStop
);
4879 if (mi
== mapBlockIndex
.end())
4881 pindex
= (*mi
).second
;
4885 // Find the last block the caller has in the main chain
4886 pindex
= FindForkInGlobalIndex(chainActive
, locator
);
4888 pindex
= chainActive
.Next(pindex
);
4891 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
4892 vector
<CBlock
> vHeaders
;
4893 int nLimit
= MAX_HEADERS_RESULTS
;
4894 LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.ToString(), pfrom
->id
);
4895 for (; pindex
; pindex
= chainActive
.Next(pindex
))
4897 vHeaders
.push_back(pindex
->GetBlockHeader());
4898 if (--nLimit
<= 0 || pindex
->GetBlockHash() == hashStop
)
4901 // pindex can be NULL either if we sent chainActive.Tip() OR
4902 // if our peer has chainActive.Tip() (and thus we are sending an empty
4903 // headers message). In both cases it's safe to update
4904 // pindexBestHeaderSent to be our tip.
4905 nodestate
->pindexBestHeaderSent
= pindex
? pindex
: chainActive
.Tip();
4906 pfrom
->PushMessage(NetMsgType::HEADERS
, vHeaders
);
4910 else if (strCommand
== NetMsgType::TX
)
4912 // Stop processing the transaction early if
4913 // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
4914 if (GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY
) && (!pfrom
->fWhitelisted
|| !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY
)))
4916 LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom
->id
);
4920 vector
<uint256
> vWorkQueue
;
4921 vector
<uint256
> vEraseQueue
;
4925 CInv
inv(MSG_TX
, tx
.GetHash());
4926 pfrom
->AddInventoryKnown(inv
);
4930 bool fMissingInputs
= false;
4931 CValidationState state
;
4933 pfrom
->setAskFor
.erase(inv
.hash
);
4934 mapAlreadyAskedFor
.erase(inv
);
4936 if (!AlreadyHave(inv
) && AcceptToMemoryPool(mempool
, state
, tx
, true, &fMissingInputs
))
4938 mempool
.check(pcoinsTip
);
4939 RelayTransaction(tx
);
4940 vWorkQueue
.push_back(inv
.hash
);
4942 LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
4944 tx
.GetHash().ToString(),
4945 mempool
.size(), mempool
.DynamicMemoryUsage() / 1000);
4947 // Recursively process any orphan transactions that depended on this one
4948 set
<NodeId
> setMisbehaving
;
4949 for (unsigned int i
= 0; i
< vWorkQueue
.size(); i
++)
4951 map
<uint256
, set
<uint256
> >::iterator itByPrev
= mapOrphanTransactionsByPrev
.find(vWorkQueue
[i
]);
4952 if (itByPrev
== mapOrphanTransactionsByPrev
.end())
4954 for (set
<uint256
>::iterator mi
= itByPrev
->second
.begin();
4955 mi
!= itByPrev
->second
.end();
4958 const uint256
& orphanHash
= *mi
;
4959 const CTransaction
& orphanTx
= mapOrphanTransactions
[orphanHash
].tx
;
4960 NodeId fromPeer
= mapOrphanTransactions
[orphanHash
].fromPeer
;
4961 bool fMissingInputs2
= false;
4962 // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
4963 // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
4964 // anyone relaying LegitTxX banned)
4965 CValidationState stateDummy
;
4968 if (setMisbehaving
.count(fromPeer
))
4970 if (AcceptToMemoryPool(mempool
, stateDummy
, orphanTx
, true, &fMissingInputs2
))
4972 LogPrint("mempool", " accepted orphan tx %s\n", orphanHash
.ToString());
4973 RelayTransaction(orphanTx
);
4974 vWorkQueue
.push_back(orphanHash
);
4975 vEraseQueue
.push_back(orphanHash
);
4977 else if (!fMissingInputs2
)
4980 if (stateDummy
.IsInvalid(nDos
) && nDos
> 0)
4982 // Punish peer that gave us an invalid orphan tx
4983 Misbehaving(fromPeer
, nDos
);
4984 setMisbehaving
.insert(fromPeer
);
4985 LogPrint("mempool", " invalid orphan tx %s\n", orphanHash
.ToString());
4987 // Has inputs but not accepted to mempool
4988 // Probably non-standard or insufficient fee/priority
4989 LogPrint("mempool", " removed orphan tx %s\n", orphanHash
.ToString());
4990 vEraseQueue
.push_back(orphanHash
);
4991 assert(recentRejects
);
4992 recentRejects
->insert(orphanHash
);
4994 mempool
.check(pcoinsTip
);
4998 BOOST_FOREACH(uint256 hash
, vEraseQueue
)
4999 EraseOrphanTx(hash
);
5001 else if (fMissingInputs
)
5003 AddOrphanTx(tx
, pfrom
->GetId());
5005 // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
5006 unsigned int nMaxOrphanTx
= (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS
));
5007 unsigned int nEvicted
= LimitOrphanTxSize(nMaxOrphanTx
);
5009 LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted
);
5011 assert(recentRejects
);
5012 recentRejects
->insert(tx
.GetHash());
5014 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY
)) {
5015 // Always relay transactions received from whitelisted peers, even
5016 // if they were already in the mempool or rejected from it due
5017 // to policy, allowing the node to function as a gateway for
5018 // nodes hidden behind it.
5020 // Never relay transactions that we would assign a non-zero DoS
5021 // score for, as we expect peers to do the same with us in that
5024 if (!state
.IsInvalid(nDoS
) || nDoS
== 0) {
5025 LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx
.GetHash().ToString(), pfrom
->id
);
5026 RelayTransaction(tx
);
5028 LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx
.GetHash().ToString(), pfrom
->id
, FormatStateMessage(state
));
5033 if (state
.IsInvalid(nDoS
))
5035 LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx
.GetHash().ToString(),
5037 FormatStateMessage(state
));
5038 if (state
.GetRejectCode() < REJECT_INTERNAL
) // Never send AcceptToMemoryPool's internal codes over P2P
5039 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, (unsigned char)state
.GetRejectCode(),
5040 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), inv
.hash
);
5042 Misbehaving(pfrom
->GetId(), nDoS
);
5044 FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
);
5048 else if (strCommand
== NetMsgType::HEADERS
&& !fImporting
&& !fReindex
) // Ignore headers received while importing
5050 std::vector
<CBlockHeader
> headers
;
5052 // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
5053 unsigned int nCount
= ReadCompactSize(vRecv
);
5054 if (nCount
> MAX_HEADERS_RESULTS
) {
5055 Misbehaving(pfrom
->GetId(), 20);
5056 return error("headers message size = %u", nCount
);
5058 headers
.resize(nCount
);
5059 for (unsigned int n
= 0; n
< nCount
; n
++) {
5060 vRecv
>> headers
[n
];
5061 ReadCompactSize(vRecv
); // ignore tx count; assume it is 0.
5067 // Nothing interesting. Stop asking this peers for more headers.
5071 CBlockIndex
*pindexLast
= NULL
;
5072 BOOST_FOREACH(const CBlockHeader
& header
, headers
) {
5073 CValidationState state
;
5074 if (pindexLast
!= NULL
&& header
.hashPrevBlock
!= pindexLast
->GetBlockHash()) {
5075 Misbehaving(pfrom
->GetId(), 20);
5076 return error("non-continuous headers sequence");
5078 if (!AcceptBlockHeader(header
, state
, chainparams
, &pindexLast
)) {
5080 if (state
.IsInvalid(nDoS
)) {
5082 Misbehaving(pfrom
->GetId(), nDoS
);
5083 return error("invalid header received");
5089 UpdateBlockAvailability(pfrom
->GetId(), pindexLast
->GetBlockHash());
5091 if (nCount
== MAX_HEADERS_RESULTS
&& pindexLast
) {
5092 // Headers message had its maximum size; the peer may have more headers.
5093 // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
5094 // from there instead.
5095 LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast
->nHeight
, pfrom
->id
, pfrom
->nStartingHeight
);
5096 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexLast
), uint256());
5099 bool fCanDirectFetch
= CanDirectFetch(chainparams
.GetConsensus());
5100 CNodeState
*nodestate
= State(pfrom
->GetId());
5101 // If this set of headers is valid and ends in a block with at least as
5102 // much work as our tip, download as much as possible.
5103 if (fCanDirectFetch
&& pindexLast
->IsValid(BLOCK_VALID_TREE
) && chainActive
.Tip()->nChainWork
<= pindexLast
->nChainWork
) {
5104 vector
<CBlockIndex
*> vToFetch
;
5105 CBlockIndex
*pindexWalk
= pindexLast
;
5106 // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
5107 while (pindexWalk
&& !chainActive
.Contains(pindexWalk
) && vToFetch
.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
5108 if (!(pindexWalk
->nStatus
& BLOCK_HAVE_DATA
) &&
5109 !mapBlocksInFlight
.count(pindexWalk
->GetBlockHash())) {
5110 // We don't have this block, and it's not yet in flight.
5111 vToFetch
.push_back(pindexWalk
);
5113 pindexWalk
= pindexWalk
->pprev
;
5115 // If pindexWalk still isn't on our main chain, we're looking at a
5116 // very large reorg at a time we think we're close to caught up to
5117 // the main chain -- this shouldn't really happen. Bail out on the
5118 // direct fetch and rely on parallel download instead.
5119 if (!chainActive
.Contains(pindexWalk
)) {
5120 LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
5121 pindexLast
->GetBlockHash().ToString(),
5122 pindexLast
->nHeight
);
5124 vector
<CInv
> vGetData
;
5125 // Download as much as possible, from earliest to latest.
5126 BOOST_REVERSE_FOREACH(CBlockIndex
*pindex
, vToFetch
) {
5127 if (nodestate
->nBlocksInFlight
>= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
5128 // Can't download any more from this peer
5131 vGetData
.push_back(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
5132 MarkBlockAsInFlight(pfrom
->GetId(), pindex
->GetBlockHash(), chainparams
.GetConsensus(), pindex
);
5133 LogPrint("net", "Requesting block %s from peer=%d\n",
5134 pindex
->GetBlockHash().ToString(), pfrom
->id
);
5136 if (vGetData
.size() > 1) {
5137 LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
5138 pindexLast
->GetBlockHash().ToString(), pindexLast
->nHeight
);
5140 if (vGetData
.size() > 0) {
5141 pfrom
->PushMessage(NetMsgType::GETDATA
, vGetData
);
5146 CheckBlockIndex(chainparams
.GetConsensus());
5149 else if (strCommand
== NetMsgType::BLOCK
&& !fImporting
&& !fReindex
) // Ignore blocks received while importing
5154 CInv
inv(MSG_BLOCK
, block
.GetHash());
5155 LogPrint("net", "received block %s peer=%d\n", inv
.hash
.ToString(), pfrom
->id
);
5157 pfrom
->AddInventoryKnown(inv
);
5159 CValidationState state
;
5160 // Process all blocks from whitelisted peers, even if not requested,
5161 // unless we're still syncing with the network.
5162 // Such an unrequested block may still be processed, subject to the
5163 // conditions in AcceptBlock().
5164 bool forceProcessing
= pfrom
->fWhitelisted
&& !IsInitialBlockDownload();
5165 ProcessNewBlock(state
, chainparams
, pfrom
, &block
, forceProcessing
, NULL
);
5167 if (state
.IsInvalid(nDoS
)) {
5168 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
5169 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, (unsigned char)state
.GetRejectCode(),
5170 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), inv
.hash
);
5173 Misbehaving(pfrom
->GetId(), nDoS
);
5180 else if (strCommand
== NetMsgType::GETADDR
)
5182 // This asymmetric behavior for inbound and outbound connections was introduced
5183 // to prevent a fingerprinting attack: an attacker can send specific fake addresses
5184 // to users' AddrMan and later request them by sending getaddr messages.
5185 // Making nodes which are behind NAT and can only make outgoing connections ignore
5186 // the getaddr message mitigates the attack.
5187 if (!pfrom
->fInbound
) {
5188 LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom
->id
);
5192 pfrom
->vAddrToSend
.clear();
5193 vector
<CAddress
> vAddr
= addrman
.GetAddr();
5194 BOOST_FOREACH(const CAddress
&addr
, vAddr
)
5195 pfrom
->PushAddress(addr
);
5199 else if (strCommand
== NetMsgType::MEMPOOL
)
5201 if (CNode::OutboundTargetReached(false) && !pfrom
->fWhitelisted
)
5203 LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom
->GetId());
5204 pfrom
->fDisconnect
= true;
5207 LOCK2(cs_main
, pfrom
->cs_filter
);
5209 std::vector
<uint256
> vtxid
;
5210 mempool
.queryHashes(vtxid
);
5212 BOOST_FOREACH(uint256
& hash
, vtxid
) {
5213 CInv
inv(MSG_TX
, hash
);
5214 if (pfrom
->pfilter
) {
5216 bool fInMemPool
= mempool
.lookup(hash
, tx
);
5217 if (!fInMemPool
) continue; // another thread removed since queryHashes, maybe...
5218 if (!pfrom
->pfilter
->IsRelevantAndUpdate(tx
)) continue;
5220 vInv
.push_back(inv
);
5221 if (vInv
.size() == MAX_INV_SZ
) {
5222 pfrom
->PushMessage(NetMsgType::INV
, vInv
);
5226 if (vInv
.size() > 0)
5227 pfrom
->PushMessage(NetMsgType::INV
, vInv
);
5231 else if (strCommand
== NetMsgType::PING
)
5233 if (pfrom
->nVersion
> BIP0031_VERSION
)
5237 // Echo the message back with the nonce. This allows for two useful features:
5239 // 1) A remote node can quickly check if the connection is operational
5240 // 2) Remote nodes can measure the latency of the network thread. If this node
5241 // is overloaded it won't respond to pings quickly and the remote node can
5242 // avoid sending us more work, like chain download requests.
5244 // The nonce stops the remote getting confused between different pings: without
5245 // it, if the remote node sends a ping once per second and this node takes 5
5246 // seconds to respond to each, the 5th ping the remote sends would appear to
5247 // return very quickly.
5248 pfrom
->PushMessage(NetMsgType::PONG
, nonce
);
5253 else if (strCommand
== NetMsgType::PONG
)
5255 int64_t pingUsecEnd
= nTimeReceived
;
5257 size_t nAvail
= vRecv
.in_avail();
5258 bool bPingFinished
= false;
5259 std::string sProblem
;
5261 if (nAvail
>= sizeof(nonce
)) {
5264 // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
5265 if (pfrom
->nPingNonceSent
!= 0) {
5266 if (nonce
== pfrom
->nPingNonceSent
) {
5267 // Matching pong received, this ping is no longer outstanding
5268 bPingFinished
= true;
5269 int64_t pingUsecTime
= pingUsecEnd
- pfrom
->nPingUsecStart
;
5270 if (pingUsecTime
> 0) {
5271 // Successful ping time measurement, replace previous
5272 pfrom
->nPingUsecTime
= pingUsecTime
;
5273 pfrom
->nMinPingUsecTime
= std::min(pfrom
->nMinPingUsecTime
, pingUsecTime
);
5275 // This should never happen
5276 sProblem
= "Timing mishap";
5279 // Nonce mismatches are normal when pings are overlapping
5280 sProblem
= "Nonce mismatch";
5282 // This is most likely a bug in another implementation somewhere; cancel this ping
5283 bPingFinished
= true;
5284 sProblem
= "Nonce zero";
5288 sProblem
= "Unsolicited pong without ping";
5291 // This is most likely a bug in another implementation somewhere; cancel this ping
5292 bPingFinished
= true;
5293 sProblem
= "Short payload";
5296 if (!(sProblem
.empty())) {
5297 LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
5300 pfrom
->nPingNonceSent
,
5304 if (bPingFinished
) {
5305 pfrom
->nPingNonceSent
= 0;
5310 else if (fAlerts
&& strCommand
== NetMsgType::ALERT
)
5315 uint256 alertHash
= alert
.GetHash();
5316 if (pfrom
->setKnown
.count(alertHash
) == 0)
5318 if (alert
.ProcessAlert(chainparams
.AlertKey()))
5321 pfrom
->setKnown
.insert(alertHash
);
5324 BOOST_FOREACH(CNode
* pnode
, vNodes
)
5325 alert
.RelayTo(pnode
);
5329 // Small DoS penalty so peers that send us lots of
5330 // duplicate/expired/invalid-signature/whatever alerts
5331 // eventually get banned.
5332 // This isn't a Misbehaving(100) (immediate ban) because the
5333 // peer might be an older or different implementation with
5334 // a different signature key, etc.
5335 Misbehaving(pfrom
->GetId(), 10);
5341 else if (strCommand
== NetMsgType::FILTERLOAD
)
5343 CBloomFilter filter
;
5346 if (!filter
.IsWithinSizeConstraints())
5347 // There is no excuse for sending a too-large filter
5348 Misbehaving(pfrom
->GetId(), 100);
5351 LOCK(pfrom
->cs_filter
);
5352 delete pfrom
->pfilter
;
5353 pfrom
->pfilter
= new CBloomFilter(filter
);
5354 pfrom
->pfilter
->UpdateEmptyFull();
5356 pfrom
->fRelayTxes
= true;
5360 else if (strCommand
== NetMsgType::FILTERADD
)
5362 vector
<unsigned char> vData
;
5365 // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
5366 // and thus, the maximum size any matched object can have) in a filteradd message
5367 if (vData
.size() > MAX_SCRIPT_ELEMENT_SIZE
)
5369 Misbehaving(pfrom
->GetId(), 100);
5371 LOCK(pfrom
->cs_filter
);
5373 pfrom
->pfilter
->insert(vData
);
5375 Misbehaving(pfrom
->GetId(), 100);
5380 else if (strCommand
== NetMsgType::FILTERCLEAR
)
5382 LOCK(pfrom
->cs_filter
);
5383 delete pfrom
->pfilter
;
5384 pfrom
->pfilter
= new CBloomFilter();
5385 pfrom
->fRelayTxes
= true;
5389 else if (strCommand
== NetMsgType::REJECT
)
5393 string strMsg
; unsigned char ccode
; string strReason
;
5394 vRecv
>> LIMITED_STRING(strMsg
, CMessageHeader::COMMAND_SIZE
) >> ccode
>> LIMITED_STRING(strReason
, MAX_REJECT_MESSAGE_LENGTH
);
5397 ss
<< strMsg
<< " code " << itostr(ccode
) << ": " << strReason
;
5399 if (strMsg
== NetMsgType::BLOCK
|| strMsg
== NetMsgType::TX
)
5403 ss
<< ": hash " << hash
.ToString();
5405 LogPrint("net", "Reject %s\n", SanitizeString(ss
.str()));
5406 } catch (const std::ios_base::failure
&) {
5407 // Avoid feedback loops by preventing reject messages from triggering a new reject message.
5408 LogPrint("net", "Unparseable reject message received\n");
5415 // Ignore unknown commands for extensibility
5416 LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand
), pfrom
->id
);
5424 // requires LOCK(cs_vRecvMsg)
5425 bool ProcessMessages(CNode
* pfrom
)
5427 const CChainParams
& chainparams
= Params();
5429 // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
5433 // (4) message start
5441 if (!pfrom
->vRecvGetData
.empty())
5442 ProcessGetData(pfrom
, chainparams
.GetConsensus());
5444 // this maintains the order of responses
5445 if (!pfrom
->vRecvGetData
.empty()) return fOk
;
5447 std::deque
<CNetMessage
>::iterator it
= pfrom
->vRecvMsg
.begin();
5448 while (!pfrom
->fDisconnect
&& it
!= pfrom
->vRecvMsg
.end()) {
5449 // Don't bother if send buffer is too full to respond anyway
5450 if (pfrom
->nSendSize
>= SendBufferSize())
5454 CNetMessage
& msg
= *it
;
5457 // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
5458 // msg.hdr.nMessageSize, msg.vRecv.size(),
5459 // msg.complete() ? "Y" : "N");
5461 // end, if an incomplete message is found
5462 if (!msg
.complete())
5465 // at this point, any failure means we can delete the current message
5468 // Scan for message start
5469 if (memcmp(msg
.hdr
.pchMessageStart
, chainparams
.MessageStart(), MESSAGE_START_SIZE
) != 0) {
5470 LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg
.hdr
.GetCommand()), pfrom
->id
);
5476 CMessageHeader
& hdr
= msg
.hdr
;
5477 if (!hdr
.IsValid(chainparams
.MessageStart()))
5479 LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr
.GetCommand()), pfrom
->id
);
5482 string strCommand
= hdr
.GetCommand();
5485 unsigned int nMessageSize
= hdr
.nMessageSize
;
5488 CDataStream
& vRecv
= msg
.vRecv
;
5489 uint256 hash
= Hash(vRecv
.begin(), vRecv
.begin() + nMessageSize
);
5490 unsigned int nChecksum
= ReadLE32((unsigned char*)&hash
);
5491 if (nChecksum
!= hdr
.nChecksum
)
5493 LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n", __func__
,
5494 SanitizeString(strCommand
), nMessageSize
, nChecksum
, hdr
.nChecksum
);
5502 fRet
= ProcessMessage(pfrom
, strCommand
, vRecv
, msg
.nTime
);
5503 boost::this_thread::interruption_point();
5505 catch (const std::ios_base::failure
& e
)
5507 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_MALFORMED
, string("error parsing message"));
5508 if (strstr(e
.what(), "end of data"))
5510 // Allow exceptions from under-length message on vRecv
5511 LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
5513 else if (strstr(e
.what(), "size too large"))
5515 // Allow exceptions from over-long size
5516 LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
5520 PrintExceptionContinue(&e
, "ProcessMessages()");
5523 catch (const boost::thread_interrupted
&) {
5526 catch (const std::exception
& e
) {
5527 PrintExceptionContinue(&e
, "ProcessMessages()");
5529 PrintExceptionContinue(NULL
, "ProcessMessages()");
5533 LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__
, SanitizeString(strCommand
), nMessageSize
, pfrom
->id
);
5538 // In case the connection got shut down, its receive buffer was wiped
5539 if (!pfrom
->fDisconnect
)
5540 pfrom
->vRecvMsg
.erase(pfrom
->vRecvMsg
.begin(), it
);
5546 bool SendMessages(CNode
* pto
)
5548 const Consensus::Params
& consensusParams
= Params().GetConsensus();
5550 // Don't send anything until we get its version message
5551 if (pto
->nVersion
== 0)
5557 bool pingSend
= false;
5558 if (pto
->fPingQueued
) {
5559 // RPC ping request by user
5562 if (pto
->nPingNonceSent
== 0 && pto
->nPingUsecStart
+ PING_INTERVAL
* 1000000 < GetTimeMicros()) {
5563 // Ping automatically sent as a latency probe & keepalive.
5568 while (nonce
== 0) {
5569 GetRandBytes((unsigned char*)&nonce
, sizeof(nonce
));
5571 pto
->fPingQueued
= false;
5572 pto
->nPingUsecStart
= GetTimeMicros();
5573 if (pto
->nVersion
> BIP0031_VERSION
) {
5574 pto
->nPingNonceSent
= nonce
;
5575 pto
->PushMessage(NetMsgType::PING
, nonce
);
5577 // Peer is too old to support ping command with nonce, pong will never arrive.
5578 pto
->nPingNonceSent
= 0;
5579 pto
->PushMessage(NetMsgType::PING
);
5583 TRY_LOCK(cs_main
, lockMain
); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
5587 // Address refresh broadcast
5588 int64_t nNow
= GetTimeMicros();
5589 if (!IsInitialBlockDownload() && pto
->nNextLocalAddrSend
< nNow
) {
5590 AdvertiseLocal(pto
);
5591 pto
->nNextLocalAddrSend
= PoissonNextSend(nNow
, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
);
5597 if (pto
->nNextAddrSend
< nNow
) {
5598 pto
->nNextAddrSend
= PoissonNextSend(nNow
, AVG_ADDRESS_BROADCAST_INTERVAL
);
5599 vector
<CAddress
> vAddr
;
5600 vAddr
.reserve(pto
->vAddrToSend
.size());
5601 BOOST_FOREACH(const CAddress
& addr
, pto
->vAddrToSend
)
5603 if (!pto
->addrKnown
.contains(addr
.GetKey()))
5605 pto
->addrKnown
.insert(addr
.GetKey());
5606 vAddr
.push_back(addr
);
5607 // receiver rejects addr messages larger than 1000
5608 if (vAddr
.size() >= 1000)
5610 pto
->PushMessage(NetMsgType::ADDR
, vAddr
);
5615 pto
->vAddrToSend
.clear();
5617 pto
->PushMessage(NetMsgType::ADDR
, vAddr
);
5620 CNodeState
&state
= *State(pto
->GetId());
5621 if (state
.fShouldBan
) {
5622 if (pto
->fWhitelisted
)
5623 LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto
->addr
.ToString());
5625 pto
->fDisconnect
= true;
5626 if (pto
->addr
.IsLocal())
5627 LogPrintf("Warning: not banning local peer %s!\n", pto
->addr
.ToString());
5630 CNode::Ban(pto
->addr
, BanReasonNodeMisbehaving
);
5633 state
.fShouldBan
= false;
5636 BOOST_FOREACH(const CBlockReject
& reject
, state
.rejects
)
5637 pto
->PushMessage(NetMsgType::REJECT
, (string
)NetMsgType::BLOCK
, reject
.chRejectCode
, reject
.strRejectReason
, reject
.hashBlock
);
5638 state
.rejects
.clear();
5641 if (pindexBestHeader
== NULL
)
5642 pindexBestHeader
= chainActive
.Tip();
5643 bool fFetch
= state
.fPreferredDownload
|| (nPreferredDownload
== 0 && !pto
->fClient
&& !pto
->fOneShot
); // Download if this is a nice peer, or we have no nice peers and this one might do.
5644 if (!state
.fSyncStarted
&& !pto
->fClient
&& !fImporting
&& !fReindex
) {
5645 // Only actively request headers from a single peer, unless we're close to today.
5646 if ((nSyncStarted
== 0 && fFetch
) || pindexBestHeader
->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
5647 state
.fSyncStarted
= true;
5649 const CBlockIndex
*pindexStart
= pindexBestHeader
;
5650 /* If possible, start at the block preceding the currently
5651 best known header. This ensures that we always get a
5652 non-empty list of headers back as long as the peer
5653 is up-to-date. With a non-empty response, we can initialise
5654 the peer's known best block. This wouldn't be possible
5655 if we requested starting at pindexBestHeader and
5656 got back an empty response. */
5657 if (pindexStart
->pprev
)
5658 pindexStart
= pindexStart
->pprev
;
5659 LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart
->nHeight
, pto
->id
, pto
->nStartingHeight
);
5660 pto
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexStart
), uint256());
5664 // Resend wallet transactions that haven't gotten in a block yet
5665 // Except during reindex, importing and IBD, when old wallet
5666 // transactions become unconfirmed and spams other nodes.
5667 if (!fReindex
&& !fImporting
&& !IsInitialBlockDownload())
5669 GetMainSignals().Broadcast(nTimeBestReceived
);
5673 // Try sending block announcements via headers
5676 // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
5677 // list of block hashes we're relaying, and our peer wants
5678 // headers announcements, then find the first header
5679 // not yet known to our peer but would connect, and send.
5680 // If no header would connect, or if we have too many
5681 // blocks, or if the peer doesn't want headers, just
5682 // add all to the inv queue.
5683 LOCK(pto
->cs_inventory
);
5684 vector
<CBlock
> vHeaders
;
5685 bool fRevertToInv
= (!state
.fPreferHeaders
|| pto
->vBlockHashesToAnnounce
.size() > MAX_BLOCKS_TO_ANNOUNCE
);
5686 CBlockIndex
*pBestIndex
= NULL
; // last header queued for delivery
5687 ProcessBlockAvailability(pto
->id
); // ensure pindexBestKnownBlock is up-to-date
5689 if (!fRevertToInv
) {
5690 bool fFoundStartingHeader
= false;
5691 // Try to find first header that our peer doesn't have, and
5692 // then send all headers past that one. If we come across any
5693 // headers that aren't on chainActive, give up.
5694 BOOST_FOREACH(const uint256
&hash
, pto
->vBlockHashesToAnnounce
) {
5695 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
5696 assert(mi
!= mapBlockIndex
.end());
5697 CBlockIndex
*pindex
= mi
->second
;
5698 if (chainActive
[pindex
->nHeight
] != pindex
) {
5699 // Bail out if we reorged away from this block
5700 fRevertToInv
= true;
5703 assert(pBestIndex
== NULL
|| pindex
->pprev
== pBestIndex
);
5704 pBestIndex
= pindex
;
5705 if (fFoundStartingHeader
) {
5706 // add this to the headers message
5707 vHeaders
.push_back(pindex
->GetBlockHeader());
5708 } else if (PeerHasHeader(&state
, pindex
)) {
5709 continue; // keep looking for the first new block
5710 } else if (pindex
->pprev
== NULL
|| PeerHasHeader(&state
, pindex
->pprev
)) {
5711 // Peer doesn't have this header but they do have the prior one.
5712 // Start sending headers.
5713 fFoundStartingHeader
= true;
5714 vHeaders
.push_back(pindex
->GetBlockHeader());
5716 // Peer doesn't have this header or the prior one -- nothing will
5717 // connect, so bail out.
5718 fRevertToInv
= true;
5724 // If falling back to using an inv, just try to inv the tip.
5725 // The last entry in vBlockHashesToAnnounce was our tip at some point
5727 if (!pto
->vBlockHashesToAnnounce
.empty()) {
5728 const uint256
&hashToAnnounce
= pto
->vBlockHashesToAnnounce
.back();
5729 BlockMap::iterator mi
= mapBlockIndex
.find(hashToAnnounce
);
5730 assert(mi
!= mapBlockIndex
.end());
5731 CBlockIndex
*pindex
= mi
->second
;
5733 // Warn if we're announcing a block that is not on the main chain.
5734 // This should be very rare and could be optimized out.
5735 // Just log for now.
5736 if (chainActive
[pindex
->nHeight
] != pindex
) {
5737 LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
5738 hashToAnnounce
.ToString(), chainActive
.Tip()->GetBlockHash().ToString());
5741 // If the peer announced this block to us, don't inv it back.
5742 // (Since block announcements may not be via inv's, we can't solely rely on
5743 // setInventoryKnown to track this.)
5744 if (!PeerHasHeader(&state
, pindex
)) {
5745 pto
->PushInventory(CInv(MSG_BLOCK
, hashToAnnounce
));
5746 LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__
,
5747 pto
->id
, hashToAnnounce
.ToString());
5750 } else if (!vHeaders
.empty()) {
5751 if (vHeaders
.size() > 1) {
5752 LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__
,
5754 vHeaders
.front().GetHash().ToString(),
5755 vHeaders
.back().GetHash().ToString(), pto
->id
);
5757 LogPrint("net", "%s: sending header %s to peer=%d\n", __func__
,
5758 vHeaders
.front().GetHash().ToString(), pto
->id
);
5760 pto
->PushMessage(NetMsgType::HEADERS
, vHeaders
);
5761 state
.pindexBestHeaderSent
= pBestIndex
;
5763 pto
->vBlockHashesToAnnounce
.clear();
5767 // Message: inventory
5770 vector
<CInv
> vInvWait
;
5772 bool fSendTrickle
= pto
->fWhitelisted
;
5773 if (pto
->nNextInvSend
< nNow
) {
5774 fSendTrickle
= true;
5775 pto
->nNextInvSend
= PoissonNextSend(nNow
, AVG_INVENTORY_BROADCAST_INTERVAL
);
5777 LOCK(pto
->cs_inventory
);
5778 vInv
.reserve(std::min
<size_t>(1000, pto
->vInventoryToSend
.size()));
5779 vInvWait
.reserve(pto
->vInventoryToSend
.size());
5780 BOOST_FOREACH(const CInv
& inv
, pto
->vInventoryToSend
)
5782 if (inv
.type
== MSG_TX
&& pto
->filterInventoryKnown
.contains(inv
.hash
))
5785 // trickle out tx inv to protect privacy
5786 if (inv
.type
== MSG_TX
&& !fSendTrickle
)
5788 // 1/4 of tx invs blast to all immediately
5789 static uint256 hashSalt
;
5790 if (hashSalt
.IsNull())
5791 hashSalt
= GetRandHash();
5792 uint256 hashRand
= ArithToUint256(UintToArith256(inv
.hash
) ^ UintToArith256(hashSalt
));
5793 hashRand
= Hash(BEGIN(hashRand
), END(hashRand
));
5794 bool fTrickleWait
= ((UintToArith256(hashRand
) & 3) != 0);
5798 vInvWait
.push_back(inv
);
5803 pto
->filterInventoryKnown
.insert(inv
.hash
);
5805 vInv
.push_back(inv
);
5806 if (vInv
.size() >= 1000)
5808 pto
->PushMessage(NetMsgType::INV
, vInv
);
5812 pto
->vInventoryToSend
= vInvWait
;
5815 pto
->PushMessage(NetMsgType::INV
, vInv
);
5817 // Detect whether we're stalling
5818 nNow
= GetTimeMicros();
5819 if (!pto
->fDisconnect
&& state
.nStallingSince
&& state
.nStallingSince
< nNow
- 1000000 * BLOCK_STALLING_TIMEOUT
) {
5820 // Stalling only triggers when the block download window cannot move. During normal steady state,
5821 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
5822 // should only happen during initial block download.
5823 LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto
->id
);
5824 pto
->fDisconnect
= true;
5826 // In case there is a block that has been in flight from this peer for (2 + 0.5 * N) times the block interval
5827 // (with N the number of validated blocks that were in flight at the time it was requested), disconnect due to
5828 // timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link
5829 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
5830 // to unreasonably increase our timeout.
5831 // We also compare the block download timeout originally calculated against the time at which we'd disconnect
5832 // if we assumed the block were being requested now (ignoring blocks we've requested from this peer, since we're
5833 // only looking at this peer's oldest request). This way a large queue in the past doesn't result in a
5834 // permanently large window for this block to be delivered (ie if the number of blocks in flight is decreasing
5835 // more quickly than once every 5 minutes, then we'll shorten the download window for this block).
5836 if (!pto
->fDisconnect
&& state
.vBlocksInFlight
.size() > 0) {
5837 QueuedBlock
&queuedBlock
= state
.vBlocksInFlight
.front();
5838 int64_t nTimeoutIfRequestedNow
= GetBlockTimeout(nNow
, nQueuedValidatedHeaders
- state
.nBlocksInFlightValidHeaders
, consensusParams
);
5839 if (queuedBlock
.nTimeDisconnect
> nTimeoutIfRequestedNow
) {
5840 LogPrint("net", "Reducing block download timeout for peer=%d block=%s, orig=%d new=%d\n", pto
->id
, queuedBlock
.hash
.ToString(), queuedBlock
.nTimeDisconnect
, nTimeoutIfRequestedNow
);
5841 queuedBlock
.nTimeDisconnect
= nTimeoutIfRequestedNow
;
5843 if (queuedBlock
.nTimeDisconnect
< nNow
) {
5844 LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock
.hash
.ToString(), pto
->id
);
5845 pto
->fDisconnect
= true;
5850 // Message: getdata (blocks)
5852 vector
<CInv
> vGetData
;
5853 if (!pto
->fDisconnect
&& !pto
->fClient
&& (fFetch
|| !IsInitialBlockDownload()) && state
.nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
5854 vector
<CBlockIndex
*> vToDownload
;
5855 NodeId staller
= -1;
5856 FindNextBlocksToDownload(pto
->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER
- state
.nBlocksInFlight
, vToDownload
, staller
);
5857 BOOST_FOREACH(CBlockIndex
*pindex
, vToDownload
) {
5858 vGetData
.push_back(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
5859 MarkBlockAsInFlight(pto
->GetId(), pindex
->GetBlockHash(), consensusParams
, pindex
);
5860 LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex
->GetBlockHash().ToString(),
5861 pindex
->nHeight
, pto
->id
);
5863 if (state
.nBlocksInFlight
== 0 && staller
!= -1) {
5864 if (State(staller
)->nStallingSince
== 0) {
5865 State(staller
)->nStallingSince
= nNow
;
5866 LogPrint("net", "Stall started peer=%d\n", staller
);
5872 // Message: getdata (non-blocks)
5874 while (!pto
->fDisconnect
&& !pto
->mapAskFor
.empty() && (*pto
->mapAskFor
.begin()).first
<= nNow
)
5876 const CInv
& inv
= (*pto
->mapAskFor
.begin()).second
;
5877 if (!AlreadyHave(inv
))
5880 LogPrint("net", "Requesting %s peer=%d\n", inv
.ToString(), pto
->id
);
5881 vGetData
.push_back(inv
);
5882 if (vGetData
.size() >= 1000)
5884 pto
->PushMessage(NetMsgType::GETDATA
, vGetData
);
5888 //If we're not going to ask, don't expect a response.
5889 pto
->setAskFor
.erase(inv
.hash
);
5891 pto
->mapAskFor
.erase(pto
->mapAskFor
.begin());
5893 if (!vGetData
.empty())
5894 pto
->PushMessage(NetMsgType::GETDATA
, vGetData
);
5900 std::string
CBlockFileInfo::ToString() const {
5901 return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks
, nSize
, nHeightFirst
, nHeightLast
, DateTimeStrFormat("%Y-%m-%d", nTimeFirst
), DateTimeStrFormat("%Y-%m-%d", nTimeLast
));
5904 ThresholdState
VersionBitsTipState(const Consensus::Params
& params
, Consensus::DeploymentPos pos
)
5907 return VersionBitsState(chainActive
.Tip(), params
, pos
, versionbitscache
);
5916 BlockMap::iterator it1
= mapBlockIndex
.begin();
5917 for (; it1
!= mapBlockIndex
.end(); it1
++)
5918 delete (*it1
).second
;
5919 mapBlockIndex
.clear();
5921 // orphan transactions
5922 mapOrphanTransactions
.clear();
5923 mapOrphanTransactionsByPrev
.clear();
5925 } instance_of_cmaincleanup
;