1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2016 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
9 #include "arith_uint256.h"
10 #include "blockencodings.h"
11 #include "chainparams.h"
12 #include "checkpoints.h"
13 #include "checkqueue.h"
14 #include "consensus/consensus.h"
15 #include "consensus/merkle.h"
16 #include "consensus/validation.h"
19 #include "merkleblock.h"
21 #include "policy/fees.h"
22 #include "policy/policy.h"
24 #include "primitives/block.h"
25 #include "primitives/transaction.h"
27 #include "script/script.h"
28 #include "script/sigcache.h"
29 #include "script/standard.h"
30 #include "tinyformat.h"
32 #include "txmempool.h"
33 #include "ui_interface.h"
36 #include "utilmoneystr.h"
37 #include "utilstrencodings.h"
38 #include "validationinterface.h"
39 #include "versionbits.h"
44 #include <boost/algorithm/string/replace.hpp>
45 #include <boost/algorithm/string/join.hpp>
46 #include <boost/filesystem.hpp>
47 #include <boost/filesystem/fstream.hpp>
48 #include <boost/math/distributions/poisson.hpp>
49 #include <boost/thread.hpp>
54 # error "Bitcoin cannot be compiled without assertions."
61 CCriticalSection cs_main
;
63 BlockMap mapBlockIndex
;
65 CBlockIndex
*pindexBestHeader
= NULL
;
66 int64_t nTimeBestReceived
= 0;
67 CWaitableCriticalSection csBestBlock
;
68 CConditionVariable cvBlockChange
;
69 int nScriptCheckThreads
= 0;
70 bool fImporting
= false;
71 bool fReindex
= false;
72 bool fTxIndex
= false;
73 bool fHavePruned
= false;
74 bool fPruneMode
= false;
75 bool fIsBareMultisigStd
= DEFAULT_PERMIT_BAREMULTISIG
;
76 bool fRequireStandard
= true;
77 bool fCheckBlockIndex
= false;
78 bool fCheckpointsEnabled
= DEFAULT_CHECKPOINTS_ENABLED
;
79 size_t nCoinCacheUsage
= 5000 * 300;
80 uint64_t nPruneTarget
= 0;
81 int64_t nMaxTipAge
= DEFAULT_MAX_TIP_AGE
;
82 bool fEnableReplacement
= DEFAULT_ENABLE_REPLACEMENT
;
85 CFeeRate minRelayTxFee
= CFeeRate(DEFAULT_MIN_RELAY_TX_FEE
);
86 CAmount maxTxFee
= DEFAULT_TRANSACTION_MAXFEE
;
88 CTxMemPool
mempool(::minRelayTxFee
);
89 FeeFilterRounder
filterRounder(::minRelayTxFee
);
91 struct IteratorComparator
94 bool operator()(const I
& a
, const I
& b
)
105 map
<uint256
, COrphanTx
> mapOrphanTransactions
GUARDED_BY(cs_main
);
106 map
<COutPoint
, set
<map
<uint256
, COrphanTx
>::iterator
, IteratorComparator
>> mapOrphanTransactionsByPrev
GUARDED_BY(cs_main
);
107 void EraseOrphansFor(NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
);
109 static void CheckBlockIndex(const Consensus::Params
& consensusParams
);
111 /** Constant stuff for coinbase transactions we create: */
112 CScript COINBASE_FLAGS
;
114 const string strMessageMagic
= "Bitcoin Signed Message:\n";
116 static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY
= 0x3cac0035b5866b90ULL
; // SHA256("main address relay")[0:8]
121 struct CBlockIndexWorkComparator
123 bool operator()(CBlockIndex
*pa
, CBlockIndex
*pb
) const {
124 // First sort by most total work, ...
125 if (pa
->nChainWork
> pb
->nChainWork
) return false;
126 if (pa
->nChainWork
< pb
->nChainWork
) return true;
128 // ... then by earliest time received, ...
129 if (pa
->nSequenceId
< pb
->nSequenceId
) return false;
130 if (pa
->nSequenceId
> pb
->nSequenceId
) return true;
132 // Use pointer address as tie breaker (should only happen with blocks
133 // loaded from disk, as those all have id 0).
134 if (pa
< pb
) return false;
135 if (pa
> pb
) return true;
142 CBlockIndex
*pindexBestInvalid
;
145 * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
146 * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
147 * missing the data for the block.
149 set
<CBlockIndex
*, CBlockIndexWorkComparator
> setBlockIndexCandidates
;
150 /** Number of nodes with fSyncStarted. */
151 int nSyncStarted
= 0;
152 /** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
153 * Pruned nodes may have entries where B is missing data.
155 multimap
<CBlockIndex
*, CBlockIndex
*> mapBlocksUnlinked
;
157 CCriticalSection cs_LastBlockFile
;
158 std::vector
<CBlockFileInfo
> vinfoBlockFile
;
159 int nLastBlockFile
= 0;
160 /** Global flag to indicate we should check to see if there are
161 * block/undo files that should be deleted. Set on startup
162 * or if we allocate more file space when we're in prune mode
164 bool fCheckForPruning
= false;
167 * Every received block is assigned a unique and increasing identifier, so we
168 * know which one to give priority in case of a fork.
170 CCriticalSection cs_nBlockSequenceId
;
171 /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
172 int32_t nBlockSequenceId
= 1;
173 /** Decreasing counter (used by subsequent preciousblock calls). */
174 int32_t nBlockReverseSequenceId
= -1;
175 /** chainwork for the last block that preciousblock has been applied to. */
176 arith_uint256 nLastPreciousChainwork
= 0;
179 * Sources of received blocks, saved to be able to send them reject
180 * messages or ban them when processing happens afterwards. Protected by
183 map
<uint256
, NodeId
> mapBlockSource
;
186 * Filter for transactions that were recently rejected by
187 * AcceptToMemoryPool. These are not rerequested until the chain tip
188 * changes, at which point the entire filter is reset. Protected by
191 * Without this filter we'd be re-requesting txs from each of our peers,
192 * increasing bandwidth consumption considerably. For instance, with 100
193 * peers, half of which relay a tx we don't accept, that might be a 50x
194 * bandwidth increase. A flooding attacker attempting to roll-over the
195 * filter using minimum-sized, 60byte, transactions might manage to send
196 * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
197 * two minute window to send invs to us.
199 * Decreasing the false positive rate is fairly cheap, so we pick one in a
200 * million to make it highly unlikely for users to have issues with this
203 * Memory used: 1.3 MB
205 std::unique_ptr
<CRollingBloomFilter
> recentRejects
;
206 uint256 hashRecentRejectsChainTip
;
208 /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
211 CBlockIndex
* pindex
; //!< Optional.
212 bool fValidatedHeaders
; //!< Whether this block has validated headers at the time of request.
213 std::unique_ptr
<PartiallyDownloadedBlock
> partialBlock
; //!< Optional, used for CMPCTBLOCK downloads
215 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> > mapBlocksInFlight
;
217 /** Stack of nodes which we have set to announce using compact blocks */
218 list
<NodeId
> lNodesAnnouncingHeaderAndIDs
;
220 /** Number of preferable block download peers. */
221 int nPreferredDownload
= 0;
223 /** Dirty block index entries. */
224 set
<CBlockIndex
*> setDirtyBlockIndex
;
226 /** Dirty block file entries. */
227 set
<int> setDirtyFileInfo
;
229 /** Number of peers from which we're downloading blocks. */
230 int nPeersWithValidatedDownloads
= 0;
232 /** Relay map, protected by cs_main. */
233 typedef std::map
<uint256
, std::shared_ptr
<const CTransaction
>> MapRelay
;
235 /** Expiration-time ordered list of (expire time, relay map entry) pairs, protected by cs_main). */
236 std::deque
<std::pair
<int64_t, MapRelay::iterator
>> vRelayExpiration
;
239 //////////////////////////////////////////////////////////////////////////////
241 // Registration of network node signals.
246 struct CBlockReject
{
247 unsigned char chRejectCode
;
248 string strRejectReason
;
253 * Maintain validation-specific state about nodes, protected by cs_main, instead
254 * by CNode's own locks. This simplifies asynchronous operation, where
255 * processing of incoming data is done after the ProcessMessage call returns,
256 * and we're no longer holding the node's locks.
259 //! The peer's address
261 //! Whether we have a fully established connection.
262 bool fCurrentlyConnected
;
263 //! Accumulated misbehaviour score for this peer.
265 //! Whether this peer should be disconnected and banned (unless whitelisted).
267 //! String name of this peer (debugging/logging purposes).
269 //! List of asynchronously-determined block rejections to notify this peer about.
270 std::vector
<CBlockReject
> rejects
;
271 //! The best known block we know this peer has announced.
272 CBlockIndex
*pindexBestKnownBlock
;
273 //! The hash of the last unknown block this peer has announced.
274 uint256 hashLastUnknownBlock
;
275 //! The last full block we both have.
276 CBlockIndex
*pindexLastCommonBlock
;
277 //! The best header we have sent our peer.
278 CBlockIndex
*pindexBestHeaderSent
;
279 //! Length of current-streak of unconnecting headers announcements
280 int nUnconnectingHeaders
;
281 //! Whether we've started headers synchronization with this peer.
283 //! Since when we're stalling block download progress (in microseconds), or 0.
284 int64_t nStallingSince
;
285 list
<QueuedBlock
> vBlocksInFlight
;
286 //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
287 int64_t nDownloadingSince
;
289 int nBlocksInFlightValidHeaders
;
290 //! Whether we consider this a preferred download peer.
291 bool fPreferredDownload
;
292 //! Whether this peer wants invs or headers (when possible) for block announcements.
294 //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
295 bool fPreferHeaderAndIDs
;
297 * Whether this peer will send us cmpctblocks if we request them.
298 * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion,
299 * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send.
301 bool fProvidesHeaderAndIDs
;
302 //! Whether this peer can give us witnesses
304 //! Whether this peer wants witnesses in cmpctblocks/blocktxns
305 bool fWantsCmpctWitness
;
307 * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns,
308 * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns.
310 bool fSupportsDesiredCmpctVersion
;
313 fCurrentlyConnected
= false;
316 pindexBestKnownBlock
= NULL
;
317 hashLastUnknownBlock
.SetNull();
318 pindexLastCommonBlock
= NULL
;
319 pindexBestHeaderSent
= NULL
;
320 nUnconnectingHeaders
= 0;
321 fSyncStarted
= false;
323 nDownloadingSince
= 0;
325 nBlocksInFlightValidHeaders
= 0;
326 fPreferredDownload
= false;
327 fPreferHeaders
= false;
328 fPreferHeaderAndIDs
= false;
329 fProvidesHeaderAndIDs
= false;
330 fHaveWitness
= false;
331 fWantsCmpctWitness
= false;
332 fSupportsDesiredCmpctVersion
= false;
336 /** Map maintaining per-node state. Requires cs_main. */
337 map
<NodeId
, CNodeState
> mapNodeState
;
340 CNodeState
*State(NodeId pnode
) {
341 map
<NodeId
, CNodeState
>::iterator it
= mapNodeState
.find(pnode
);
342 if (it
== mapNodeState
.end())
347 void UpdatePreferredDownload(CNode
* node
, CNodeState
* state
)
349 nPreferredDownload
-= state
->fPreferredDownload
;
351 // Whether this node should be marked as a preferred download node.
352 state
->fPreferredDownload
= (!node
->fInbound
|| node
->fWhitelisted
) && !node
->fOneShot
&& !node
->fClient
;
354 nPreferredDownload
+= state
->fPreferredDownload
;
357 void InitializeNode(NodeId nodeid
, const CNode
*pnode
) {
359 CNodeState
&state
= mapNodeState
.insert(std::make_pair(nodeid
, CNodeState())).first
->second
;
360 state
.name
= pnode
->addrName
;
361 state
.address
= pnode
->addr
;
364 void FinalizeNode(NodeId nodeid
, bool& fUpdateConnectionTime
) {
365 fUpdateConnectionTime
= false;
367 CNodeState
*state
= State(nodeid
);
369 if (state
->fSyncStarted
)
372 if (state
->nMisbehavior
== 0 && state
->fCurrentlyConnected
) {
373 fUpdateConnectionTime
= true;
376 BOOST_FOREACH(const QueuedBlock
& entry
, state
->vBlocksInFlight
) {
377 mapBlocksInFlight
.erase(entry
.hash
);
379 EraseOrphansFor(nodeid
);
380 nPreferredDownload
-= state
->fPreferredDownload
;
381 nPeersWithValidatedDownloads
-= (state
->nBlocksInFlightValidHeaders
!= 0);
382 assert(nPeersWithValidatedDownloads
>= 0);
384 mapNodeState
.erase(nodeid
);
386 if (mapNodeState
.empty()) {
387 // Do a consistency check after the last peer is removed.
388 assert(mapBlocksInFlight
.empty());
389 assert(nPreferredDownload
== 0);
390 assert(nPeersWithValidatedDownloads
== 0);
395 // Returns a bool indicating whether we requested this block.
396 // Also used if a block was /not/ received and timed out or started with another peer
397 bool MarkBlockAsReceived(const uint256
& hash
) {
398 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator itInFlight
= mapBlocksInFlight
.find(hash
);
399 if (itInFlight
!= mapBlocksInFlight
.end()) {
400 CNodeState
*state
= State(itInFlight
->second
.first
);
401 state
->nBlocksInFlightValidHeaders
-= itInFlight
->second
.second
->fValidatedHeaders
;
402 if (state
->nBlocksInFlightValidHeaders
== 0 && itInFlight
->second
.second
->fValidatedHeaders
) {
403 // Last validated block on the queue was received.
404 nPeersWithValidatedDownloads
--;
406 if (state
->vBlocksInFlight
.begin() == itInFlight
->second
.second
) {
407 // First block on the queue was received, update the start download time for the next one
408 state
->nDownloadingSince
= std::max(state
->nDownloadingSince
, GetTimeMicros());
410 state
->vBlocksInFlight
.erase(itInFlight
->second
.second
);
411 state
->nBlocksInFlight
--;
412 state
->nStallingSince
= 0;
413 mapBlocksInFlight
.erase(itInFlight
);
420 // returns false, still setting pit, if the block was already in flight from the same peer
421 // pit will only be valid as long as the same cs_main lock is being held
422 bool MarkBlockAsInFlight(NodeId nodeid
, const uint256
& hash
, const Consensus::Params
& consensusParams
, CBlockIndex
*pindex
= NULL
, list
<QueuedBlock
>::iterator
**pit
= NULL
) {
423 CNodeState
*state
= State(nodeid
);
424 assert(state
!= NULL
);
426 // Short-circuit most stuff in case its from the same node
427 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator itInFlight
= mapBlocksInFlight
.find(hash
);
428 if (itInFlight
!= mapBlocksInFlight
.end() && itInFlight
->second
.first
== nodeid
) {
429 *pit
= &itInFlight
->second
.second
;
433 // Make sure it's not listed somewhere already.
434 MarkBlockAsReceived(hash
);
436 list
<QueuedBlock
>::iterator it
= state
->vBlocksInFlight
.insert(state
->vBlocksInFlight
.end(),
437 {hash
, pindex
, pindex
!= NULL
, std::unique_ptr
<PartiallyDownloadedBlock
>(pit
? new PartiallyDownloadedBlock(&mempool
) : NULL
)});
438 state
->nBlocksInFlight
++;
439 state
->nBlocksInFlightValidHeaders
+= it
->fValidatedHeaders
;
440 if (state
->nBlocksInFlight
== 1) {
441 // We're starting a block download (batch) from this peer.
442 state
->nDownloadingSince
= GetTimeMicros();
444 if (state
->nBlocksInFlightValidHeaders
== 1 && pindex
!= NULL
) {
445 nPeersWithValidatedDownloads
++;
447 itInFlight
= mapBlocksInFlight
.insert(std::make_pair(hash
, std::make_pair(nodeid
, it
))).first
;
449 *pit
= &itInFlight
->second
.second
;
453 /** Check whether the last unknown block a peer advertised is not yet known. */
454 void ProcessBlockAvailability(NodeId nodeid
) {
455 CNodeState
*state
= State(nodeid
);
456 assert(state
!= NULL
);
458 if (!state
->hashLastUnknownBlock
.IsNull()) {
459 BlockMap::iterator itOld
= mapBlockIndex
.find(state
->hashLastUnknownBlock
);
460 if (itOld
!= mapBlockIndex
.end() && itOld
->second
->nChainWork
> 0) {
461 if (state
->pindexBestKnownBlock
== NULL
|| itOld
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
462 state
->pindexBestKnownBlock
= itOld
->second
;
463 state
->hashLastUnknownBlock
.SetNull();
468 /** Update tracking information about which blocks a peer is assumed to have. */
469 void UpdateBlockAvailability(NodeId nodeid
, const uint256
&hash
) {
470 CNodeState
*state
= State(nodeid
);
471 assert(state
!= NULL
);
473 ProcessBlockAvailability(nodeid
);
475 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
476 if (it
!= mapBlockIndex
.end() && it
->second
->nChainWork
> 0) {
477 // An actually better block was announced.
478 if (state
->pindexBestKnownBlock
== NULL
|| it
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
479 state
->pindexBestKnownBlock
= it
->second
;
481 // An unknown block was announced; just assume that the latest one is the best one.
482 state
->hashLastUnknownBlock
= hash
;
486 void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState
* nodestate
, CNode
* pfrom
, CConnman
& connman
) {
487 if (!nodestate
->fSupportsDesiredCmpctVersion
) {
488 // Never ask from peers who can't provide witnesses.
491 if (nodestate
->fProvidesHeaderAndIDs
) {
492 for (std::list
<NodeId
>::iterator it
= lNodesAnnouncingHeaderAndIDs
.begin(); it
!= lNodesAnnouncingHeaderAndIDs
.end(); it
++) {
493 if (*it
== pfrom
->GetId()) {
494 lNodesAnnouncingHeaderAndIDs
.erase(it
);
495 lNodesAnnouncingHeaderAndIDs
.push_back(pfrom
->GetId());
499 bool fAnnounceUsingCMPCTBLOCK
= false;
500 uint64_t nCMPCTBLOCKVersion
= (pfrom
->GetLocalServices() & NODE_WITNESS
) ? 2 : 1;
501 if (lNodesAnnouncingHeaderAndIDs
.size() >= 3) {
502 // As per BIP152, we only get 3 of our peers to announce
503 // blocks using compact encodings.
504 bool found
= connman
.ForNode(lNodesAnnouncingHeaderAndIDs
.front(), [fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
](CNode
* pnodeStop
){
505 pnodeStop
->PushMessage(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
);
509 lNodesAnnouncingHeaderAndIDs
.pop_front();
511 fAnnounceUsingCMPCTBLOCK
= true;
512 pfrom
->PushMessage(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
);
513 lNodesAnnouncingHeaderAndIDs
.push_back(pfrom
->GetId());
518 bool CanDirectFetch(const Consensus::Params
&consensusParams
)
520 return chainActive
.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams
.nPowTargetSpacing
* 20;
524 bool PeerHasHeader(CNodeState
*state
, CBlockIndex
*pindex
)
526 if (state
->pindexBestKnownBlock
&& pindex
== state
->pindexBestKnownBlock
->GetAncestor(pindex
->nHeight
))
528 if (state
->pindexBestHeaderSent
&& pindex
== state
->pindexBestHeaderSent
->GetAncestor(pindex
->nHeight
))
533 /** Find the last common ancestor two blocks have.
534 * Both pa and pb must be non-NULL. */
535 CBlockIndex
* LastCommonAncestor(CBlockIndex
* pa
, CBlockIndex
* pb
) {
536 if (pa
->nHeight
> pb
->nHeight
) {
537 pa
= pa
->GetAncestor(pb
->nHeight
);
538 } else if (pb
->nHeight
> pa
->nHeight
) {
539 pb
= pb
->GetAncestor(pa
->nHeight
);
542 while (pa
!= pb
&& pa
&& pb
) {
547 // Eventually all chain branches meet at the genesis block.
552 /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
553 * at most count entries. */
554 void FindNextBlocksToDownload(NodeId nodeid
, unsigned int count
, std::vector
<CBlockIndex
*>& vBlocks
, NodeId
& nodeStaller
, const Consensus::Params
& consensusParams
) {
558 vBlocks
.reserve(vBlocks
.size() + count
);
559 CNodeState
*state
= State(nodeid
);
560 assert(state
!= NULL
);
562 // Make sure pindexBestKnownBlock is up to date, we'll need it.
563 ProcessBlockAvailability(nodeid
);
565 if (state
->pindexBestKnownBlock
== NULL
|| state
->pindexBestKnownBlock
->nChainWork
< chainActive
.Tip()->nChainWork
) {
566 // This peer has nothing interesting.
570 if (state
->pindexLastCommonBlock
== NULL
) {
571 // Bootstrap quickly by guessing a parent of our best tip is the forking point.
572 // Guessing wrong in either direction is not a problem.
573 state
->pindexLastCommonBlock
= chainActive
[std::min(state
->pindexBestKnownBlock
->nHeight
, chainActive
.Height())];
576 // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
577 // of its current tip anymore. Go back enough to fix that.
578 state
->pindexLastCommonBlock
= LastCommonAncestor(state
->pindexLastCommonBlock
, state
->pindexBestKnownBlock
);
579 if (state
->pindexLastCommonBlock
== state
->pindexBestKnownBlock
)
582 std::vector
<CBlockIndex
*> vToFetch
;
583 CBlockIndex
*pindexWalk
= state
->pindexLastCommonBlock
;
584 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
585 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
586 // download that next block if the window were 1 larger.
587 int nWindowEnd
= state
->pindexLastCommonBlock
->nHeight
+ BLOCK_DOWNLOAD_WINDOW
;
588 int nMaxHeight
= std::min
<int>(state
->pindexBestKnownBlock
->nHeight
, nWindowEnd
+ 1);
589 NodeId waitingfor
= -1;
590 while (pindexWalk
->nHeight
< nMaxHeight
) {
591 // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
592 // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
593 // as iterating over ~100 CBlockIndex* entries anyway.
594 int nToFetch
= std::min(nMaxHeight
- pindexWalk
->nHeight
, std::max
<int>(count
- vBlocks
.size(), 128));
595 vToFetch
.resize(nToFetch
);
596 pindexWalk
= state
->pindexBestKnownBlock
->GetAncestor(pindexWalk
->nHeight
+ nToFetch
);
597 vToFetch
[nToFetch
- 1] = pindexWalk
;
598 for (unsigned int i
= nToFetch
- 1; i
> 0; i
--) {
599 vToFetch
[i
- 1] = vToFetch
[i
]->pprev
;
602 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
603 // are not yet downloaded and not in flight to vBlocks. In the mean time, update
604 // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
605 // already part of our chain (and therefore don't need it even if pruned).
606 BOOST_FOREACH(CBlockIndex
* pindex
, vToFetch
) {
607 if (!pindex
->IsValid(BLOCK_VALID_TREE
)) {
608 // We consider the chain that this peer is on invalid.
611 if (!State(nodeid
)->fHaveWitness
&& IsWitnessEnabled(pindex
->pprev
, consensusParams
)) {
612 // We wouldn't download this block or its descendants from this peer.
615 if (pindex
->nStatus
& BLOCK_HAVE_DATA
|| chainActive
.Contains(pindex
)) {
616 if (pindex
->nChainTx
)
617 state
->pindexLastCommonBlock
= pindex
;
618 } else if (mapBlocksInFlight
.count(pindex
->GetBlockHash()) == 0) {
619 // The block is not already downloaded, and not yet in flight.
620 if (pindex
->nHeight
> nWindowEnd
) {
621 // We reached the end of the window.
622 if (vBlocks
.size() == 0 && waitingfor
!= nodeid
) {
623 // We aren't able to fetch anything, but we would be if the download window was one larger.
624 nodeStaller
= waitingfor
;
628 vBlocks
.push_back(pindex
);
629 if (vBlocks
.size() == count
) {
632 } else if (waitingfor
== -1) {
633 // This is the first already-in-flight block.
634 waitingfor
= mapBlocksInFlight
[pindex
->GetBlockHash()].first
;
642 bool GetNodeStateStats(NodeId nodeid
, CNodeStateStats
&stats
) {
644 CNodeState
*state
= State(nodeid
);
647 stats
.nMisbehavior
= state
->nMisbehavior
;
648 stats
.nSyncHeight
= state
->pindexBestKnownBlock
? state
->pindexBestKnownBlock
->nHeight
: -1;
649 stats
.nCommonHeight
= state
->pindexLastCommonBlock
? state
->pindexLastCommonBlock
->nHeight
: -1;
650 BOOST_FOREACH(const QueuedBlock
& queue
, state
->vBlocksInFlight
) {
652 stats
.vHeightInFlight
.push_back(queue
.pindex
->nHeight
);
657 void RegisterNodeSignals(CNodeSignals
& nodeSignals
)
659 nodeSignals
.ProcessMessages
.connect(&ProcessMessages
);
660 nodeSignals
.SendMessages
.connect(&SendMessages
);
661 nodeSignals
.InitializeNode
.connect(&InitializeNode
);
662 nodeSignals
.FinalizeNode
.connect(&FinalizeNode
);
665 void UnregisterNodeSignals(CNodeSignals
& nodeSignals
)
667 nodeSignals
.ProcessMessages
.disconnect(&ProcessMessages
);
668 nodeSignals
.SendMessages
.disconnect(&SendMessages
);
669 nodeSignals
.InitializeNode
.disconnect(&InitializeNode
);
670 nodeSignals
.FinalizeNode
.disconnect(&FinalizeNode
);
673 CBlockIndex
* FindForkInGlobalIndex(const CChain
& chain
, const CBlockLocator
& locator
)
675 // Find the first block the caller has in the main chain
676 BOOST_FOREACH(const uint256
& hash
, locator
.vHave
) {
677 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
678 if (mi
!= mapBlockIndex
.end())
680 CBlockIndex
* pindex
= (*mi
).second
;
681 if (chain
.Contains(pindex
))
683 if (pindex
->GetAncestor(chain
.Height()) == chain
.Tip()) {
688 return chain
.Genesis();
691 CCoinsViewCache
*pcoinsTip
= NULL
;
692 CBlockTreeDB
*pblocktree
= NULL
;
694 //////////////////////////////////////////////////////////////////////////////
696 // mapOrphanTransactions
699 bool AddOrphanTx(const CTransaction
& tx
, NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
701 uint256 hash
= tx
.GetHash();
702 if (mapOrphanTransactions
.count(hash
))
705 // Ignore big transactions, to avoid a
706 // send-big-orphans memory exhaustion attack. If a peer has a legitimate
707 // large transaction with a missing parent then we assume
708 // it will rebroadcast it later, after the parent transaction(s)
709 // have been mined or received.
710 // 100 orphans, each of which is at most 99,999 bytes big is
711 // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
712 unsigned int sz
= GetTransactionWeight(tx
);
713 if (sz
>= MAX_STANDARD_TX_WEIGHT
)
715 LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz
, hash
.ToString());
719 auto ret
= mapOrphanTransactions
.emplace(hash
, COrphanTx
{tx
, peer
, GetTime() + ORPHAN_TX_EXPIRE_TIME
});
721 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
) {
722 mapOrphanTransactionsByPrev
[txin
.prevout
].insert(ret
.first
);
725 LogPrint("mempool", "stored orphan tx %s (mapsz %u outsz %u)\n", hash
.ToString(),
726 mapOrphanTransactions
.size(), mapOrphanTransactionsByPrev
.size());
730 int static EraseOrphanTx(uint256 hash
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
732 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.find(hash
);
733 if (it
== mapOrphanTransactions
.end())
735 BOOST_FOREACH(const CTxIn
& txin
, it
->second
.tx
.vin
)
737 auto itPrev
= mapOrphanTransactionsByPrev
.find(txin
.prevout
);
738 if (itPrev
== mapOrphanTransactionsByPrev
.end())
740 itPrev
->second
.erase(it
);
741 if (itPrev
->second
.empty())
742 mapOrphanTransactionsByPrev
.erase(itPrev
);
744 mapOrphanTransactions
.erase(it
);
748 void EraseOrphansFor(NodeId peer
)
751 map
<uint256
, COrphanTx
>::iterator iter
= mapOrphanTransactions
.begin();
752 while (iter
!= mapOrphanTransactions
.end())
754 map
<uint256
, COrphanTx
>::iterator maybeErase
= iter
++; // increment to avoid iterator becoming invalid
755 if (maybeErase
->second
.fromPeer
== peer
)
757 nErased
+= EraseOrphanTx(maybeErase
->second
.tx
.GetHash());
760 if (nErased
> 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased
, peer
);
764 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
766 unsigned int nEvicted
= 0;
767 static int64_t nNextSweep
;
768 int64_t nNow
= GetTime();
769 if (nNextSweep
<= nNow
) {
770 // Sweep out expired orphan pool entries:
772 int64_t nMinExpTime
= nNow
+ ORPHAN_TX_EXPIRE_TIME
- ORPHAN_TX_EXPIRE_INTERVAL
;
773 map
<uint256
, COrphanTx
>::iterator iter
= mapOrphanTransactions
.begin();
774 while (iter
!= mapOrphanTransactions
.end())
776 map
<uint256
, COrphanTx
>::iterator maybeErase
= iter
++;
777 if (maybeErase
->second
.nTimeExpire
<= nNow
) {
778 nErased
+= EraseOrphanTx(maybeErase
->second
.tx
.GetHash());
780 nMinExpTime
= std::min(maybeErase
->second
.nTimeExpire
, nMinExpTime
);
783 // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
784 nNextSweep
= nMinExpTime
+ ORPHAN_TX_EXPIRE_INTERVAL
;
785 if (nErased
> 0) LogPrint("mempool", "Erased %d orphan tx due to expiration\n", nErased
);
787 while (mapOrphanTransactions
.size() > nMaxOrphans
)
789 // Evict a random orphan:
790 uint256 randomhash
= GetRandHash();
791 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.lower_bound(randomhash
);
792 if (it
== mapOrphanTransactions
.end())
793 it
= mapOrphanTransactions
.begin();
794 EraseOrphanTx(it
->first
);
800 bool IsFinalTx(const CTransaction
&tx
, int nBlockHeight
, int64_t nBlockTime
)
802 if (tx
.nLockTime
== 0)
804 if ((int64_t)tx
.nLockTime
< ((int64_t)tx
.nLockTime
< LOCKTIME_THRESHOLD
? (int64_t)nBlockHeight
: nBlockTime
))
806 for (const auto& txin
: tx
.vin
) {
807 if (!(txin
.nSequence
== CTxIn::SEQUENCE_FINAL
))
813 bool CheckFinalTx(const CTransaction
&tx
, int flags
)
815 AssertLockHeld(cs_main
);
817 // By convention a negative value for flags indicates that the
818 // current network-enforced consensus rules should be used. In
819 // a future soft-fork scenario that would mean checking which
820 // rules would be enforced for the next block and setting the
821 // appropriate flags. At the present time no soft-forks are
822 // scheduled, so no flags are set.
823 flags
= std::max(flags
, 0);
825 // CheckFinalTx() uses chainActive.Height()+1 to evaluate
826 // nLockTime because when IsFinalTx() is called within
827 // CBlock::AcceptBlock(), the height of the block *being*
828 // evaluated is what is used. Thus if we want to know if a
829 // transaction can be part of the *next* block, we need to call
830 // IsFinalTx() with one more than chainActive.Height().
831 const int nBlockHeight
= chainActive
.Height() + 1;
833 // BIP113 will require that time-locked transactions have nLockTime set to
834 // less than the median time of the previous block they're contained in.
835 // When the next block is created its previous block will be the current
836 // chain tip, so we use that to calculate the median time passed to
837 // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
838 const int64_t nBlockTime
= (flags
& LOCKTIME_MEDIAN_TIME_PAST
)
839 ? chainActive
.Tip()->GetMedianTimePast()
842 return IsFinalTx(tx
, nBlockHeight
, nBlockTime
);
846 * Calculates the block height and previous block's median time past at
847 * which the transaction will be considered final in the context of BIP 68.
848 * Also removes from the vector of input heights any entries which did not
849 * correspond to sequence locked inputs as they do not affect the calculation.
851 static std::pair
<int, int64_t> CalculateSequenceLocks(const CTransaction
&tx
, int flags
, std::vector
<int>* prevHeights
, const CBlockIndex
& block
)
853 assert(prevHeights
->size() == tx
.vin
.size());
855 // Will be set to the equivalent height- and time-based nLockTime
856 // values that would be necessary to satisfy all relative lock-
857 // time constraints given our view of block chain history.
858 // The semantics of nLockTime are the last invalid height/time, so
859 // use -1 to have the effect of any height or time being valid.
861 int64_t nMinTime
= -1;
863 // tx.nVersion is signed integer so requires cast to unsigned otherwise
864 // we would be doing a signed comparison and half the range of nVersion
865 // wouldn't support BIP 68.
866 bool fEnforceBIP68
= static_cast<uint32_t>(tx
.nVersion
) >= 2
867 && flags
& LOCKTIME_VERIFY_SEQUENCE
;
869 // Do not enforce sequence numbers as a relative lock time
870 // unless we have been instructed to
871 if (!fEnforceBIP68
) {
872 return std::make_pair(nMinHeight
, nMinTime
);
875 for (size_t txinIndex
= 0; txinIndex
< tx
.vin
.size(); txinIndex
++) {
876 const CTxIn
& txin
= tx
.vin
[txinIndex
];
878 // Sequence numbers with the most significant bit set are not
879 // treated as relative lock-times, nor are they given any
880 // consensus-enforced meaning at this point.
881 if (txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG
) {
882 // The height of this input is not relevant for sequence locks
883 (*prevHeights
)[txinIndex
] = 0;
887 int nCoinHeight
= (*prevHeights
)[txinIndex
];
889 if (txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG
) {
890 int64_t nCoinTime
= block
.GetAncestor(std::max(nCoinHeight
-1, 0))->GetMedianTimePast();
891 // NOTE: Subtract 1 to maintain nLockTime semantics
892 // BIP 68 relative lock times have the semantics of calculating
893 // the first block or time at which the transaction would be
894 // valid. When calculating the effective block time or height
895 // for the entire transaction, we switch to using the
896 // semantics of nLockTime which is the last invalid block
897 // time or height. Thus we subtract 1 from the calculated
900 // Time-based relative lock-times are measured from the
901 // smallest allowed timestamp of the block containing the
902 // txout being spent, which is the median time past of the
904 nMinTime
= std::max(nMinTime
, nCoinTime
+ (int64_t)((txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_MASK
) << CTxIn::SEQUENCE_LOCKTIME_GRANULARITY
) - 1);
906 nMinHeight
= std::max(nMinHeight
, nCoinHeight
+ (int)(txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_MASK
) - 1);
910 return std::make_pair(nMinHeight
, nMinTime
);
913 static bool EvaluateSequenceLocks(const CBlockIndex
& block
, std::pair
<int, int64_t> lockPair
)
916 int64_t nBlockTime
= block
.pprev
->GetMedianTimePast();
917 if (lockPair
.first
>= block
.nHeight
|| lockPair
.second
>= nBlockTime
)
923 bool SequenceLocks(const CTransaction
&tx
, int flags
, std::vector
<int>* prevHeights
, const CBlockIndex
& block
)
925 return EvaluateSequenceLocks(block
, CalculateSequenceLocks(tx
, flags
, prevHeights
, block
));
928 bool TestLockPointValidity(const LockPoints
* lp
)
930 AssertLockHeld(cs_main
);
932 // If there are relative lock times then the maxInputBlock will be set
933 // If there are no relative lock times, the LockPoints don't depend on the chain
934 if (lp
->maxInputBlock
) {
935 // Check whether chainActive is an extension of the block at which the LockPoints
936 // calculation was valid. If not LockPoints are no longer valid
937 if (!chainActive
.Contains(lp
->maxInputBlock
)) {
942 // LockPoints still valid
946 bool CheckSequenceLocks(const CTransaction
&tx
, int flags
, LockPoints
* lp
, bool useExistingLockPoints
)
948 AssertLockHeld(cs_main
);
949 AssertLockHeld(mempool
.cs
);
951 CBlockIndex
* tip
= chainActive
.Tip();
954 // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate
955 // height based locks because when SequenceLocks() is called within
956 // ConnectBlock(), the height of the block *being*
957 // evaluated is what is used.
958 // Thus if we want to know if a transaction can be part of the
959 // *next* block, we need to use one more than chainActive.Height()
960 index
.nHeight
= tip
->nHeight
+ 1;
962 std::pair
<int, int64_t> lockPair
;
963 if (useExistingLockPoints
) {
965 lockPair
.first
= lp
->height
;
966 lockPair
.second
= lp
->time
;
969 // pcoinsTip contains the UTXO set for chainActive.Tip()
970 CCoinsViewMemPool
viewMemPool(pcoinsTip
, mempool
);
971 std::vector
<int> prevheights
;
972 prevheights
.resize(tx
.vin
.size());
973 for (size_t txinIndex
= 0; txinIndex
< tx
.vin
.size(); txinIndex
++) {
974 const CTxIn
& txin
= tx
.vin
[txinIndex
];
976 if (!viewMemPool
.GetCoins(txin
.prevout
.hash
, coins
)) {
977 return error("%s: Missing input", __func__
);
979 if (coins
.nHeight
== MEMPOOL_HEIGHT
) {
980 // Assume all mempool transaction confirm in the next block
981 prevheights
[txinIndex
] = tip
->nHeight
+ 1;
983 prevheights
[txinIndex
] = coins
.nHeight
;
986 lockPair
= CalculateSequenceLocks(tx
, flags
, &prevheights
, index
);
988 lp
->height
= lockPair
.first
;
989 lp
->time
= lockPair
.second
;
990 // Also store the hash of the block with the highest height of
991 // all the blocks which have sequence locked prevouts.
992 // This hash needs to still be on the chain
993 // for these LockPoint calculations to be valid
994 // Note: It is impossible to correctly calculate a maxInputBlock
995 // if any of the sequence locked inputs depend on unconfirmed txs,
996 // except in the special case where the relative lock time/height
997 // is 0, which is equivalent to no sequence lock. Since we assume
998 // input height of tip+1 for mempool txs and test the resulting
999 // lockPair from CalculateSequenceLocks against tip+1. We know
1000 // EvaluateSequenceLocks will fail if there was a non-zero sequence
1001 // lock on a mempool input, so we can use the return value of
1002 // CheckSequenceLocks to indicate the LockPoints validity
1003 int maxInputHeight
= 0;
1004 BOOST_FOREACH(int height
, prevheights
) {
1005 // Can ignore mempool inputs since we'll fail if they had non-zero locks
1006 if (height
!= tip
->nHeight
+1) {
1007 maxInputHeight
= std::max(maxInputHeight
, height
);
1010 lp
->maxInputBlock
= tip
->GetAncestor(maxInputHeight
);
1013 return EvaluateSequenceLocks(index
, lockPair
);
1017 unsigned int GetLegacySigOpCount(const CTransaction
& tx
)
1019 unsigned int nSigOps
= 0;
1020 for (const auto& txin
: tx
.vin
)
1022 nSigOps
+= txin
.scriptSig
.GetSigOpCount(false);
1024 for (const auto& txout
: tx
.vout
)
1026 nSigOps
+= txout
.scriptPubKey
.GetSigOpCount(false);
1031 unsigned int GetP2SHSigOpCount(const CTransaction
& tx
, const CCoinsViewCache
& inputs
)
1033 if (tx
.IsCoinBase())
1036 unsigned int nSigOps
= 0;
1037 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
1039 const CTxOut
&prevout
= inputs
.GetOutputFor(tx
.vin
[i
]);
1040 if (prevout
.scriptPubKey
.IsPayToScriptHash())
1041 nSigOps
+= prevout
.scriptPubKey
.GetSigOpCount(tx
.vin
[i
].scriptSig
);
1046 int64_t GetTransactionSigOpCost(const CTransaction
& tx
, const CCoinsViewCache
& inputs
, int flags
)
1048 int64_t nSigOps
= GetLegacySigOpCount(tx
) * WITNESS_SCALE_FACTOR
;
1050 if (tx
.IsCoinBase())
1053 if (flags
& SCRIPT_VERIFY_P2SH
) {
1054 nSigOps
+= GetP2SHSigOpCount(tx
, inputs
) * WITNESS_SCALE_FACTOR
;
1057 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
1059 const CTxOut
&prevout
= inputs
.GetOutputFor(tx
.vin
[i
]);
1060 nSigOps
+= CountWitnessSigOps(tx
.vin
[i
].scriptSig
, prevout
.scriptPubKey
, i
< tx
.wit
.vtxinwit
.size() ? &tx
.wit
.vtxinwit
[i
].scriptWitness
: NULL
, flags
);
1069 bool CheckTransaction(const CTransaction
& tx
, CValidationState
&state
)
1071 // Basic checks that don't depend on any context
1073 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vin-empty");
1074 if (tx
.vout
.empty())
1075 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vout-empty");
1076 // Size limits (this doesn't take the witness into account, as that hasn't been checked for malleability)
1077 if (::GetSerializeSize(tx
, SER_NETWORK
, PROTOCOL_VERSION
| SERIALIZE_TRANSACTION_NO_WITNESS
) > MAX_BLOCK_BASE_SIZE
)
1078 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-oversize");
1080 // Check for negative or overflow output values
1081 CAmount nValueOut
= 0;
1082 for (const auto& txout
: tx
.vout
)
1084 if (txout
.nValue
< 0)
1085 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-negative");
1086 if (txout
.nValue
> MAX_MONEY
)
1087 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-toolarge");
1088 nValueOut
+= txout
.nValue
;
1089 if (!MoneyRange(nValueOut
))
1090 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-txouttotal-toolarge");
1093 // Check for duplicate inputs
1094 set
<COutPoint
> vInOutPoints
;
1095 for (const auto& txin
: tx
.vin
)
1097 if (vInOutPoints
.count(txin
.prevout
))
1098 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputs-duplicate");
1099 vInOutPoints
.insert(txin
.prevout
);
1102 if (tx
.IsCoinBase())
1104 if (tx
.vin
[0].scriptSig
.size() < 2 || tx
.vin
[0].scriptSig
.size() > 100)
1105 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-length");
1109 for (const auto& txin
: tx
.vin
)
1110 if (txin
.prevout
.IsNull())
1111 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-prevout-null");
1117 void LimitMempoolSize(CTxMemPool
& pool
, size_t limit
, unsigned long age
) {
1118 int expired
= pool
.Expire(GetTime() - age
);
1120 LogPrint("mempool", "Expired %i transactions from the memory pool\n", expired
);
1122 std::vector
<uint256
> vNoSpendsRemaining
;
1123 pool
.TrimToSize(limit
, &vNoSpendsRemaining
);
1124 BOOST_FOREACH(const uint256
& removed
, vNoSpendsRemaining
)
1125 pcoinsTip
->Uncache(removed
);
1128 /** Convert CValidationState to a human-readable message for logging */
1129 std::string
FormatStateMessage(const CValidationState
&state
)
1131 return strprintf("%s%s (code %i)",
1132 state
.GetRejectReason(),
1133 state
.GetDebugMessage().empty() ? "" : ", "+state
.GetDebugMessage(),
1134 state
.GetRejectCode());
1137 bool AcceptToMemoryPoolWorker(CTxMemPool
& pool
, CValidationState
& state
, const CTransaction
& tx
, bool fLimitFree
,
1138 bool* pfMissingInputs
, int64_t nAcceptTime
, bool fOverrideMempoolLimit
, const CAmount
& nAbsurdFee
,
1139 std::vector
<uint256
>& vHashTxnToUncache
)
1141 const uint256 hash
= tx
.GetHash();
1142 AssertLockHeld(cs_main
);
1143 if (pfMissingInputs
)
1144 *pfMissingInputs
= false;
1146 if (!CheckTransaction(tx
, state
))
1147 return false; // state filled in by CheckTransaction
1149 // Coinbase is only valid in a block, not as a loose transaction
1150 if (tx
.IsCoinBase())
1151 return state
.DoS(100, false, REJECT_INVALID
, "coinbase");
1153 // Don't relay version 2 transactions until CSV is active, and we can be
1154 // sure that such transactions will be mined (unless we're on
1155 // -testnet/-regtest).
1156 const CChainParams
& chainparams
= Params();
1157 if (fRequireStandard
&& tx
.nVersion
>= 2 && VersionBitsTipState(chainparams
.GetConsensus(), Consensus::DEPLOYMENT_CSV
) != THRESHOLD_ACTIVE
) {
1158 return state
.DoS(0, false, REJECT_NONSTANDARD
, "premature-version2-tx");
1161 // Reject transactions with witness before segregated witness activates (override with -prematurewitness)
1162 bool witnessEnabled
= IsWitnessEnabled(chainActive
.Tip(), Params().GetConsensus());
1163 if (!GetBoolArg("-prematurewitness",false) && !tx
.wit
.IsNull() && !witnessEnabled
) {
1164 return state
.DoS(0, false, REJECT_NONSTANDARD
, "no-witness-yet", true);
1167 // Rather not work on nonstandard transactions (unless -testnet/-regtest)
1169 if (fRequireStandard
&& !IsStandardTx(tx
, reason
, witnessEnabled
))
1170 return state
.DoS(0, false, REJECT_NONSTANDARD
, reason
);
1172 // Only accept nLockTime-using transactions that can be mined in the next
1173 // block; we don't want our mempool filled up with transactions that can't
1175 if (!CheckFinalTx(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
))
1176 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-final");
1178 // is it already in the memory pool?
1179 if (pool
.exists(hash
))
1180 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-in-mempool");
1182 // Check for conflicts with in-memory transactions
1183 set
<uint256
> setConflicts
;
1185 LOCK(pool
.cs
); // protect pool.mapNextTx
1186 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
)
1188 auto itConflicting
= pool
.mapNextTx
.find(txin
.prevout
);
1189 if (itConflicting
!= pool
.mapNextTx
.end())
1191 const CTransaction
*ptxConflicting
= itConflicting
->second
;
1192 if (!setConflicts
.count(ptxConflicting
->GetHash()))
1194 // Allow opt-out of transaction replacement by setting
1195 // nSequence >= maxint-1 on all inputs.
1197 // maxint-1 is picked to still allow use of nLockTime by
1198 // non-replaceable transactions. All inputs rather than just one
1199 // is for the sake of multi-party protocols, where we don't
1200 // want a single party to be able to disable replacement.
1202 // The opt-out ignores descendants as anyone relying on
1203 // first-seen mempool behavior should be checking all
1204 // unconfirmed ancestors anyway; doing otherwise is hopelessly
1206 bool fReplacementOptOut
= true;
1207 if (fEnableReplacement
)
1209 BOOST_FOREACH(const CTxIn
&_txin
, ptxConflicting
->vin
)
1211 if (_txin
.nSequence
< std::numeric_limits
<unsigned int>::max()-1)
1213 fReplacementOptOut
= false;
1218 if (fReplacementOptOut
)
1219 return state
.Invalid(false, REJECT_CONFLICT
, "txn-mempool-conflict");
1221 setConflicts
.insert(ptxConflicting
->GetHash());
1229 CCoinsViewCache
view(&dummy
);
1231 CAmount nValueIn
= 0;
1235 CCoinsViewMemPool
viewMemPool(pcoinsTip
, pool
);
1236 view
.SetBackend(viewMemPool
);
1238 // do we already have it?
1239 bool fHadTxInCache
= pcoinsTip
->HaveCoinsInCache(hash
);
1240 if (view
.HaveCoins(hash
)) {
1242 vHashTxnToUncache
.push_back(hash
);
1243 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-known");
1246 // do all inputs exist?
1247 // Note that this does not check for the presence of actual outputs (see the next check for that),
1248 // and only helps with filling in pfMissingInputs (to determine missing vs spent).
1249 BOOST_FOREACH(const CTxIn txin
, tx
.vin
) {
1250 if (!pcoinsTip
->HaveCoinsInCache(txin
.prevout
.hash
))
1251 vHashTxnToUncache
.push_back(txin
.prevout
.hash
);
1252 if (!view
.HaveCoins(txin
.prevout
.hash
)) {
1253 if (pfMissingInputs
)
1254 *pfMissingInputs
= true;
1255 return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
1259 // are the actual inputs available?
1260 if (!view
.HaveInputs(tx
))
1261 return state
.Invalid(false, REJECT_DUPLICATE
, "bad-txns-inputs-spent");
1263 // Bring the best block into scope
1264 view
.GetBestBlock();
1266 nValueIn
= view
.GetValueIn(tx
);
1268 // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
1269 view
.SetBackend(dummy
);
1271 // Only accept BIP68 sequence locked transactions that can be mined in the next
1272 // block; we don't want our mempool filled up with transactions that can't
1274 // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
1275 // CoinsViewCache instead of create its own
1276 if (!CheckSequenceLocks(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
, &lp
))
1277 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-BIP68-final");
1280 // Check for non-standard pay-to-script-hash in inputs
1281 if (fRequireStandard
&& !AreInputsStandard(tx
, view
))
1282 return state
.Invalid(false, REJECT_NONSTANDARD
, "bad-txns-nonstandard-inputs");
1284 // Check for non-standard witness in P2WSH
1285 if (!tx
.wit
.IsNull() && fRequireStandard
&& !IsWitnessStandard(tx
, view
))
1286 return state
.DoS(0, false, REJECT_NONSTANDARD
, "bad-witness-nonstandard", true);
1288 int64_t nSigOpsCost
= GetTransactionSigOpCost(tx
, view
, STANDARD_SCRIPT_VERIFY_FLAGS
);
1290 CAmount nValueOut
= tx
.GetValueOut();
1291 CAmount nFees
= nValueIn
-nValueOut
;
1292 // nModifiedFees includes any fee deltas from PrioritiseTransaction
1293 CAmount nModifiedFees
= nFees
;
1294 double nPriorityDummy
= 0;
1295 pool
.ApplyDeltas(hash
, nPriorityDummy
, nModifiedFees
);
1297 CAmount inChainInputValue
;
1298 double dPriority
= view
.GetPriority(tx
, chainActive
.Height(), inChainInputValue
);
1300 // Keep track of transactions that spend a coinbase, which we re-scan
1301 // during reorgs to ensure COINBASE_MATURITY is still met.
1302 bool fSpendsCoinbase
= false;
1303 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
1304 const CCoins
*coins
= view
.AccessCoins(txin
.prevout
.hash
);
1305 if (coins
->IsCoinBase()) {
1306 fSpendsCoinbase
= true;
1311 CTxMemPoolEntry
entry(tx
, nFees
, nAcceptTime
, dPriority
, chainActive
.Height(), pool
.HasNoInputsOf(tx
), inChainInputValue
, fSpendsCoinbase
, nSigOpsCost
, lp
);
1312 unsigned int nSize
= entry
.GetTxSize();
1314 // Check that the transaction doesn't have an excessive number of
1315 // sigops, making it impossible to mine. Since the coinbase transaction
1316 // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
1317 // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
1318 // merely non-standard transaction.
1319 if (nSigOpsCost
> MAX_STANDARD_TX_SIGOPS_COST
)
1320 return state
.DoS(0, false, REJECT_NONSTANDARD
, "bad-txns-too-many-sigops", false,
1321 strprintf("%d", nSigOpsCost
));
1323 CAmount mempoolRejectFee
= pool
.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFee(nSize
);
1324 if (mempoolRejectFee
> 0 && nModifiedFees
< mempoolRejectFee
) {
1325 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool min fee not met", false, strprintf("%d < %d", nFees
, mempoolRejectFee
));
1326 } else if (GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY
) && nModifiedFees
< ::minRelayTxFee
.GetFee(nSize
) && !AllowFree(entry
.GetPriority(chainActive
.Height() + 1))) {
1327 // Require that free transactions have sufficient priority to be mined in the next block.
1328 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "insufficient priority");
1331 // Continuously rate-limit free (really, very-low-fee) transactions
1332 // This mitigates 'penny-flooding' -- sending thousands of free transactions just to
1333 // be annoying or make others' transactions take longer to confirm.
1334 if (fLimitFree
&& nModifiedFees
< ::minRelayTxFee
.GetFee(nSize
))
1336 static CCriticalSection csFreeLimiter
;
1337 static double dFreeCount
;
1338 static int64_t nLastTime
;
1339 int64_t nNow
= GetTime();
1341 LOCK(csFreeLimiter
);
1343 // Use an exponentially decaying ~10-minute window:
1344 dFreeCount
*= pow(1.0 - 1.0/600.0, (double)(nNow
- nLastTime
));
1346 // -limitfreerelay unit is thousand-bytes-per-minute
1347 // At default rate it would take over a month to fill 1GB
1348 if (dFreeCount
+ nSize
>= GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY
) * 10 * 1000)
1349 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "rate limited free transaction");
1350 LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount
, dFreeCount
+nSize
);
1351 dFreeCount
+= nSize
;
1354 if (nAbsurdFee
&& nFees
> nAbsurdFee
)
1355 return state
.Invalid(false,
1356 REJECT_HIGHFEE
, "absurdly-high-fee",
1357 strprintf("%d > %d", nFees
, nAbsurdFee
));
1359 // Calculate in-mempool ancestors, up to a limit.
1360 CTxMemPool::setEntries setAncestors
;
1361 size_t nLimitAncestors
= GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT
);
1362 size_t nLimitAncestorSize
= GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT
)*1000;
1363 size_t nLimitDescendants
= GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT
);
1364 size_t nLimitDescendantSize
= GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT
)*1000;
1365 std::string errString
;
1366 if (!pool
.CalculateMemPoolAncestors(entry
, setAncestors
, nLimitAncestors
, nLimitAncestorSize
, nLimitDescendants
, nLimitDescendantSize
, errString
)) {
1367 return state
.DoS(0, false, REJECT_NONSTANDARD
, "too-long-mempool-chain", false, errString
);
1370 // A transaction that spends outputs that would be replaced by it is invalid. Now
1371 // that we have the set of all ancestors we can detect this
1372 // pathological case by making sure setConflicts and setAncestors don't
1374 BOOST_FOREACH(CTxMemPool::txiter ancestorIt
, setAncestors
)
1376 const uint256
&hashAncestor
= ancestorIt
->GetTx().GetHash();
1377 if (setConflicts
.count(hashAncestor
))
1379 return state
.DoS(10, false,
1380 REJECT_INVALID
, "bad-txns-spends-conflicting-tx", false,
1381 strprintf("%s spends conflicting transaction %s",
1383 hashAncestor
.ToString()));
1387 // Check if it's economically rational to mine this transaction rather
1388 // than the ones it replaces.
1389 CAmount nConflictingFees
= 0;
1390 size_t nConflictingSize
= 0;
1391 uint64_t nConflictingCount
= 0;
1392 CTxMemPool::setEntries allConflicting
;
1394 // If we don't hold the lock allConflicting might be incomplete; the
1395 // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
1396 // mempool consistency for us.
1398 if (setConflicts
.size())
1400 CFeeRate
newFeeRate(nModifiedFees
, nSize
);
1401 set
<uint256
> setConflictsParents
;
1402 const int maxDescendantsToVisit
= 100;
1403 CTxMemPool::setEntries setIterConflicting
;
1404 BOOST_FOREACH(const uint256
&hashConflicting
, setConflicts
)
1406 CTxMemPool::txiter mi
= pool
.mapTx
.find(hashConflicting
);
1407 if (mi
== pool
.mapTx
.end())
1410 // Save these to avoid repeated lookups
1411 setIterConflicting
.insert(mi
);
1413 // Don't allow the replacement to reduce the feerate of the
1416 // We usually don't want to accept replacements with lower
1417 // feerates than what they replaced as that would lower the
1418 // feerate of the next block. Requiring that the feerate always
1419 // be increased is also an easy-to-reason about way to prevent
1420 // DoS attacks via replacements.
1422 // The mining code doesn't (currently) take children into
1423 // account (CPFP) so we only consider the feerates of
1424 // transactions being directly replaced, not their indirect
1425 // descendants. While that does mean high feerate children are
1426 // ignored when deciding whether or not to replace, we do
1427 // require the replacement to pay more overall fees too,
1428 // mitigating most cases.
1429 CFeeRate
oldFeeRate(mi
->GetModifiedFee(), mi
->GetTxSize());
1430 if (newFeeRate
<= oldFeeRate
)
1432 return state
.DoS(0, false,
1433 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1434 strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
1436 newFeeRate
.ToString(),
1437 oldFeeRate
.ToString()));
1440 BOOST_FOREACH(const CTxIn
&txin
, mi
->GetTx().vin
)
1442 setConflictsParents
.insert(txin
.prevout
.hash
);
1445 nConflictingCount
+= mi
->GetCountWithDescendants();
1447 // This potentially overestimates the number of actual descendants
1448 // but we just want to be conservative to avoid doing too much
1450 if (nConflictingCount
<= maxDescendantsToVisit
) {
1451 // If not too many to replace, then calculate the set of
1452 // transactions that would have to be evicted
1453 BOOST_FOREACH(CTxMemPool::txiter it
, setIterConflicting
) {
1454 pool
.CalculateDescendants(it
, allConflicting
);
1456 BOOST_FOREACH(CTxMemPool::txiter it
, allConflicting
) {
1457 nConflictingFees
+= it
->GetModifiedFee();
1458 nConflictingSize
+= it
->GetTxSize();
1461 return state
.DoS(0, false,
1462 REJECT_NONSTANDARD
, "too many potential replacements", false,
1463 strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
1466 maxDescendantsToVisit
));
1469 for (unsigned int j
= 0; j
< tx
.vin
.size(); j
++)
1471 // We don't want to accept replacements that require low
1472 // feerate junk to be mined first. Ideally we'd keep track of
1473 // the ancestor feerates and make the decision based on that,
1474 // but for now requiring all new inputs to be confirmed works.
1475 if (!setConflictsParents
.count(tx
.vin
[j
].prevout
.hash
))
1477 // Rather than check the UTXO set - potentially expensive -
1478 // it's cheaper to just check if the new input refers to a
1479 // tx that's in the mempool.
1480 if (pool
.mapTx
.find(tx
.vin
[j
].prevout
.hash
) != pool
.mapTx
.end())
1481 return state
.DoS(0, false,
1482 REJECT_NONSTANDARD
, "replacement-adds-unconfirmed", false,
1483 strprintf("replacement %s adds unconfirmed input, idx %d",
1484 hash
.ToString(), j
));
1488 // The replacement must pay greater fees than the transactions it
1489 // replaces - if we did the bandwidth used by those conflicting
1490 // transactions would not be paid for.
1491 if (nModifiedFees
< nConflictingFees
)
1493 return state
.DoS(0, false,
1494 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1495 strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
1496 hash
.ToString(), FormatMoney(nModifiedFees
), FormatMoney(nConflictingFees
)));
1499 // Finally in addition to paying more fees than the conflicts the
1500 // new transaction must pay for its own bandwidth.
1501 CAmount nDeltaFees
= nModifiedFees
- nConflictingFees
;
1502 if (nDeltaFees
< ::minRelayTxFee
.GetFee(nSize
))
1504 return state
.DoS(0, false,
1505 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1506 strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
1508 FormatMoney(nDeltaFees
),
1509 FormatMoney(::minRelayTxFee
.GetFee(nSize
))));
1513 unsigned int scriptVerifyFlags
= STANDARD_SCRIPT_VERIFY_FLAGS
;
1514 if (!Params().RequireStandard()) {
1515 scriptVerifyFlags
= GetArg("-promiscuousmempoolflags", scriptVerifyFlags
);
1518 // Check against previous transactions
1519 // This is done last to help prevent CPU exhaustion denial-of-service attacks.
1520 PrecomputedTransactionData
txdata(tx
);
1521 if (!CheckInputs(tx
, state
, view
, true, scriptVerifyFlags
, true, txdata
)) {
1522 // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
1523 // need to turn both off, and compare against just turning off CLEANSTACK
1524 // to see if the failure is specifically due to witness validation.
1525 if (tx
.wit
.IsNull() && CheckInputs(tx
, state
, view
, true, scriptVerifyFlags
& ~(SCRIPT_VERIFY_WITNESS
| SCRIPT_VERIFY_CLEANSTACK
), true, txdata
) &&
1526 !CheckInputs(tx
, state
, view
, true, scriptVerifyFlags
& ~SCRIPT_VERIFY_CLEANSTACK
, true, txdata
)) {
1527 // Only the witness is missing, so the transaction itself may be fine.
1528 state
.SetCorruptionPossible();
1533 // Check again against just the consensus-critical mandatory script
1534 // verification flags, in case of bugs in the standard flags that cause
1535 // transactions to pass as valid when they're actually invalid. For
1536 // instance the STRICTENC flag was incorrectly allowing certain
1537 // CHECKSIG NOT scripts to pass, even though they were invalid.
1539 // There is a similar check in CreateNewBlock() to prevent creating
1540 // invalid blocks, however allowing such transactions into the mempool
1541 // can be exploited as a DoS attack.
1542 if (!CheckInputs(tx
, state
, view
, true, MANDATORY_SCRIPT_VERIFY_FLAGS
, true, txdata
))
1544 return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s, %s",
1545 __func__
, hash
.ToString(), FormatStateMessage(state
));
1548 // Remove conflicting transactions from the mempool
1549 BOOST_FOREACH(const CTxMemPool::txiter it
, allConflicting
)
1551 LogPrint("mempool", "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
1552 it
->GetTx().GetHash().ToString(),
1554 FormatMoney(nModifiedFees
- nConflictingFees
),
1555 (int)nSize
- (int)nConflictingSize
);
1557 pool
.RemoveStaged(allConflicting
, false);
1559 // Store transaction in memory
1560 pool
.addUnchecked(hash
, entry
, setAncestors
, !IsInitialBlockDownload());
1562 // trim mempool and check if tx was trimmed
1563 if (!fOverrideMempoolLimit
) {
1564 LimitMempoolSize(pool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
1565 if (!pool
.exists(hash
))
1566 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool full");
1570 GetMainSignals().SyncTransaction(tx
, NULL
, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK
);
1575 bool AcceptToMemoryPoolWithTime(CTxMemPool
& pool
, CValidationState
&state
, const CTransaction
&tx
, bool fLimitFree
,
1576 bool* pfMissingInputs
, int64_t nAcceptTime
, bool fOverrideMempoolLimit
, const CAmount nAbsurdFee
)
1578 std::vector
<uint256
> vHashTxToUncache
;
1579 bool res
= AcceptToMemoryPoolWorker(pool
, state
, tx
, fLimitFree
, pfMissingInputs
, nAcceptTime
, fOverrideMempoolLimit
, nAbsurdFee
, vHashTxToUncache
);
1581 BOOST_FOREACH(const uint256
& hashTx
, vHashTxToUncache
)
1582 pcoinsTip
->Uncache(hashTx
);
1587 bool AcceptToMemoryPool(CTxMemPool
& pool
, CValidationState
&state
, const CTransaction
&tx
, bool fLimitFree
,
1588 bool* pfMissingInputs
, bool fOverrideMempoolLimit
, const CAmount nAbsurdFee
)
1590 return AcceptToMemoryPoolWithTime(pool
, state
, tx
, fLimitFree
, pfMissingInputs
, GetTime(), fOverrideMempoolLimit
, nAbsurdFee
);
1593 /** Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock */
1594 bool GetTransaction(const uint256
&hash
, CTransaction
&txOut
, const Consensus::Params
& consensusParams
, uint256
&hashBlock
, bool fAllowSlow
)
1596 CBlockIndex
*pindexSlow
= NULL
;
1600 std::shared_ptr
<const CTransaction
> ptx
= mempool
.get(hash
);
1609 if (pblocktree
->ReadTxIndex(hash
, postx
)) {
1610 CAutoFile
file(OpenBlockFile(postx
, true), SER_DISK
, CLIENT_VERSION
);
1612 return error("%s: OpenBlockFile failed", __func__
);
1613 CBlockHeader header
;
1616 fseek(file
.Get(), postx
.nTxOffset
, SEEK_CUR
);
1618 } catch (const std::exception
& e
) {
1619 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
1621 hashBlock
= header
.GetHash();
1622 if (txOut
.GetHash() != hash
)
1623 return error("%s: txid mismatch", __func__
);
1628 if (fAllowSlow
) { // use coin database to locate block that contains transaction, and scan it
1631 const CCoinsViewCache
& view
= *pcoinsTip
;
1632 const CCoins
* coins
= view
.AccessCoins(hash
);
1634 nHeight
= coins
->nHeight
;
1637 pindexSlow
= chainActive
[nHeight
];
1642 if (ReadBlockFromDisk(block
, pindexSlow
, consensusParams
)) {
1643 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
1644 if (tx
.GetHash() == hash
) {
1646 hashBlock
= pindexSlow
->GetBlockHash();
1661 //////////////////////////////////////////////////////////////////////////////
1663 // CBlock and CBlockIndex
1666 bool WriteBlockToDisk(const CBlock
& block
, CDiskBlockPos
& pos
, const CMessageHeader::MessageStartChars
& messageStart
)
1668 // Open history file to append
1669 CAutoFile
fileout(OpenBlockFile(pos
), SER_DISK
, CLIENT_VERSION
);
1670 if (fileout
.IsNull())
1671 return error("WriteBlockToDisk: OpenBlockFile failed");
1673 // Write index header
1674 unsigned int nSize
= fileout
.GetSerializeSize(block
);
1675 fileout
<< FLATDATA(messageStart
) << nSize
;
1678 long fileOutPos
= ftell(fileout
.Get());
1680 return error("WriteBlockToDisk: ftell failed");
1681 pos
.nPos
= (unsigned int)fileOutPos
;
1687 bool ReadBlockFromDisk(CBlock
& block
, const CDiskBlockPos
& pos
, const Consensus::Params
& consensusParams
)
1691 // Open history file to read
1692 CAutoFile
filein(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
1693 if (filein
.IsNull())
1694 return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos
.ToString());
1700 catch (const std::exception
& e
) {
1701 return error("%s: Deserialize or I/O error - %s at %s", __func__
, e
.what(), pos
.ToString());
1705 if (!CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
1706 return error("ReadBlockFromDisk: Errors in block header at %s", pos
.ToString());
1711 bool ReadBlockFromDisk(CBlock
& block
, const CBlockIndex
* pindex
, const Consensus::Params
& consensusParams
)
1713 if (!ReadBlockFromDisk(block
, pindex
->GetBlockPos(), consensusParams
))
1715 if (block
.GetHash() != pindex
->GetBlockHash())
1716 return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1717 pindex
->ToString(), pindex
->GetBlockPos().ToString());
1721 CAmount
GetBlockSubsidy(int nHeight
, const Consensus::Params
& consensusParams
)
1723 int halvings
= nHeight
/ consensusParams
.nSubsidyHalvingInterval
;
1724 // Force block reward to zero when right shift is undefined.
1728 CAmount nSubsidy
= 50 * COIN
;
1729 // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1730 nSubsidy
>>= halvings
;
1734 bool IsInitialBlockDownload()
1736 const CChainParams
& chainParams
= Params();
1738 // Once this function has returned false, it must remain false.
1739 static std::atomic
<bool> latchToFalse
{false};
1740 // Optimization: pre-test latch before taking the lock.
1741 if (latchToFalse
.load(std::memory_order_relaxed
))
1745 if (latchToFalse
.load(std::memory_order_relaxed
))
1747 if (fImporting
|| fReindex
)
1749 if (chainActive
.Tip() == NULL
)
1751 if (chainActive
.Tip()->nChainWork
< UintToArith256(chainParams
.GetConsensus().nMinimumChainWork
))
1753 if (chainActive
.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge
))
1755 latchToFalse
.store(true, std::memory_order_relaxed
);
1759 bool fLargeWorkForkFound
= false;
1760 bool fLargeWorkInvalidChainFound
= false;
1761 CBlockIndex
*pindexBestForkTip
= NULL
, *pindexBestForkBase
= NULL
;
1763 static void AlertNotify(const std::string
& strMessage
)
1765 uiInterface
.NotifyAlertChanged();
1766 std::string strCmd
= GetArg("-alertnotify", "");
1767 if (strCmd
.empty()) return;
1769 // Alert text should be plain ascii coming from a trusted source, but to
1770 // be safe we first strip anything not in safeChars, then add single quotes around
1771 // the whole string before passing it to the shell:
1772 std::string
singleQuote("'");
1773 std::string safeStatus
= SanitizeString(strMessage
);
1774 safeStatus
= singleQuote
+safeStatus
+singleQuote
;
1775 boost::replace_all(strCmd
, "%s", safeStatus
);
1777 boost::thread
t(runCommand
, strCmd
); // thread runs free
1780 void CheckForkWarningConditions()
1782 AssertLockHeld(cs_main
);
1783 // Before we get past initial download, we cannot reliably alert about forks
1784 // (we assume we don't get stuck on a fork before finishing our initial sync)
1785 if (IsInitialBlockDownload())
1788 // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
1789 // of our head, drop it
1790 if (pindexBestForkTip
&& chainActive
.Height() - pindexBestForkTip
->nHeight
>= 72)
1791 pindexBestForkTip
= NULL
;
1793 if (pindexBestForkTip
|| (pindexBestInvalid
&& pindexBestInvalid
->nChainWork
> chainActive
.Tip()->nChainWork
+ (GetBlockProof(*chainActive
.Tip()) * 6)))
1795 if (!fLargeWorkForkFound
&& pindexBestForkBase
)
1797 std::string warning
= std::string("'Warning: Large-work fork detected, forking after block ") +
1798 pindexBestForkBase
->phashBlock
->ToString() + std::string("'");
1799 AlertNotify(warning
);
1801 if (pindexBestForkTip
&& pindexBestForkBase
)
1803 LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__
,
1804 pindexBestForkBase
->nHeight
, pindexBestForkBase
->phashBlock
->ToString(),
1805 pindexBestForkTip
->nHeight
, pindexBestForkTip
->phashBlock
->ToString());
1806 fLargeWorkForkFound
= true;
1810 LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__
);
1811 fLargeWorkInvalidChainFound
= true;
1816 fLargeWorkForkFound
= false;
1817 fLargeWorkInvalidChainFound
= false;
1821 void CheckForkWarningConditionsOnNewFork(CBlockIndex
* pindexNewForkTip
)
1823 AssertLockHeld(cs_main
);
1824 // If we are on a fork that is sufficiently large, set a warning flag
1825 CBlockIndex
* pfork
= pindexNewForkTip
;
1826 CBlockIndex
* plonger
= chainActive
.Tip();
1827 while (pfork
&& pfork
!= plonger
)
1829 while (plonger
&& plonger
->nHeight
> pfork
->nHeight
)
1830 plonger
= plonger
->pprev
;
1831 if (pfork
== plonger
)
1833 pfork
= pfork
->pprev
;
1836 // We define a condition where we should warn the user about as a fork of at least 7 blocks
1837 // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
1838 // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
1839 // hash rate operating on the fork.
1840 // or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
1841 // We define it this way because it allows us to only store the highest fork tip (+ base) which meets
1842 // the 7-block condition and from this always have the most-likely-to-cause-warning fork
1843 if (pfork
&& (!pindexBestForkTip
|| (pindexBestForkTip
&& pindexNewForkTip
->nHeight
> pindexBestForkTip
->nHeight
)) &&
1844 pindexNewForkTip
->nChainWork
- pfork
->nChainWork
> (GetBlockProof(*pfork
) * 7) &&
1845 chainActive
.Height() - pindexNewForkTip
->nHeight
< 72)
1847 pindexBestForkTip
= pindexNewForkTip
;
1848 pindexBestForkBase
= pfork
;
1851 CheckForkWarningConditions();
1854 // Requires cs_main.
1855 void Misbehaving(NodeId pnode
, int howmuch
)
1860 CNodeState
*state
= State(pnode
);
1864 state
->nMisbehavior
+= howmuch
;
1865 int banscore
= GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD
);
1866 if (state
->nMisbehavior
>= banscore
&& state
->nMisbehavior
- howmuch
< banscore
)
1868 LogPrintf("%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__
, state
->name
, pnode
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1869 state
->fShouldBan
= true;
1871 LogPrintf("%s: %s peer=%d (%d -> %d)\n", __func__
, state
->name
, pnode
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1874 void static InvalidChainFound(CBlockIndex
* pindexNew
)
1876 if (!pindexBestInvalid
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
)
1877 pindexBestInvalid
= pindexNew
;
1879 LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1880 pindexNew
->GetBlockHash().ToString(), pindexNew
->nHeight
,
1881 log(pindexNew
->nChainWork
.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
1882 pindexNew
->GetBlockTime()));
1883 CBlockIndex
*tip
= chainActive
.Tip();
1885 LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1886 tip
->GetBlockHash().ToString(), chainActive
.Height(), log(tip
->nChainWork
.getdouble())/log(2.0),
1887 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip
->GetBlockTime()));
1888 CheckForkWarningConditions();
1891 void static InvalidBlockFound(CBlockIndex
*pindex
, const CValidationState
&state
) {
1892 if (!state
.CorruptionPossible()) {
1893 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
1894 setDirtyBlockIndex
.insert(pindex
);
1895 setBlockIndexCandidates
.erase(pindex
);
1896 InvalidChainFound(pindex
);
1900 void UpdateCoins(const CTransaction
& tx
, CCoinsViewCache
& inputs
, CTxUndo
&txundo
, int nHeight
)
1902 // mark inputs spent
1903 if (!tx
.IsCoinBase()) {
1904 txundo
.vprevout
.reserve(tx
.vin
.size());
1905 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
1906 CCoinsModifier coins
= inputs
.ModifyCoins(txin
.prevout
.hash
);
1907 unsigned nPos
= txin
.prevout
.n
;
1909 if (nPos
>= coins
->vout
.size() || coins
->vout
[nPos
].IsNull())
1911 // mark an outpoint spent, and construct undo information
1912 txundo
.vprevout
.push_back(CTxInUndo(coins
->vout
[nPos
]));
1914 if (coins
->vout
.size() == 0) {
1915 CTxInUndo
& undo
= txundo
.vprevout
.back();
1916 undo
.nHeight
= coins
->nHeight
;
1917 undo
.fCoinBase
= coins
->fCoinBase
;
1918 undo
.nVersion
= coins
->nVersion
;
1923 inputs
.ModifyNewCoins(tx
.GetHash(), tx
.IsCoinBase())->FromTx(tx
, nHeight
);
1926 void UpdateCoins(const CTransaction
& tx
, CCoinsViewCache
& inputs
, int nHeight
)
1929 UpdateCoins(tx
, inputs
, txundo
, nHeight
);
1932 bool CScriptCheck::operator()() {
1933 const CScript
&scriptSig
= ptxTo
->vin
[nIn
].scriptSig
;
1934 const CScriptWitness
*witness
= (nIn
< ptxTo
->wit
.vtxinwit
.size()) ? &ptxTo
->wit
.vtxinwit
[nIn
].scriptWitness
: NULL
;
1935 if (!VerifyScript(scriptSig
, scriptPubKey
, witness
, nFlags
, CachingTransactionSignatureChecker(ptxTo
, nIn
, amount
, cacheStore
, *txdata
), &error
)) {
1941 int GetSpendHeight(const CCoinsViewCache
& inputs
)
1944 CBlockIndex
* pindexPrev
= mapBlockIndex
.find(inputs
.GetBestBlock())->second
;
1945 return pindexPrev
->nHeight
+ 1;
1948 namespace Consensus
{
1949 bool CheckTxInputs(const CTransaction
& tx
, CValidationState
& state
, const CCoinsViewCache
& inputs
, int nSpendHeight
)
1951 // This doesn't trigger the DoS code on purpose; if it did, it would make it easier
1952 // for an attacker to attempt to split the network.
1953 if (!inputs
.HaveInputs(tx
))
1954 return state
.Invalid(false, 0, "", "Inputs unavailable");
1956 CAmount nValueIn
= 0;
1958 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
1960 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
1961 const CCoins
*coins
= inputs
.AccessCoins(prevout
.hash
);
1964 // If prev is coinbase, check that it's matured
1965 if (coins
->IsCoinBase()) {
1966 if (nSpendHeight
- coins
->nHeight
< COINBASE_MATURITY
)
1967 return state
.Invalid(false,
1968 REJECT_INVALID
, "bad-txns-premature-spend-of-coinbase",
1969 strprintf("tried to spend coinbase at depth %d", nSpendHeight
- coins
->nHeight
));
1972 // Check for negative or overflow input values
1973 nValueIn
+= coins
->vout
[prevout
.n
].nValue
;
1974 if (!MoneyRange(coins
->vout
[prevout
.n
].nValue
) || !MoneyRange(nValueIn
))
1975 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputvalues-outofrange");
1979 if (nValueIn
< tx
.GetValueOut())
1980 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-in-belowout", false,
1981 strprintf("value in (%s) < value out (%s)", FormatMoney(nValueIn
), FormatMoney(tx
.GetValueOut())));
1983 // Tally transaction fees
1984 CAmount nTxFee
= nValueIn
- tx
.GetValueOut();
1986 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-negative");
1988 if (!MoneyRange(nFees
))
1989 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-outofrange");
1992 }// namespace Consensus
1994 bool CheckInputs(const CTransaction
& tx
, CValidationState
&state
, const CCoinsViewCache
&inputs
, bool fScriptChecks
, unsigned int flags
, bool cacheStore
, PrecomputedTransactionData
& txdata
, std::vector
<CScriptCheck
> *pvChecks
)
1996 if (!tx
.IsCoinBase())
1998 if (!Consensus::CheckTxInputs(tx
, state
, inputs
, GetSpendHeight(inputs
)))
2002 pvChecks
->reserve(tx
.vin
.size());
2004 // The first loop above does all the inexpensive checks.
2005 // Only if ALL inputs pass do we perform expensive ECDSA signature checks.
2006 // Helps prevent CPU exhaustion attacks.
2008 // Skip ECDSA signature verification when connecting blocks before the
2009 // last block chain checkpoint. Assuming the checkpoints are valid this
2010 // is safe because block merkle hashes are still computed and checked,
2011 // and any change will be caught at the next checkpoint. Of course, if
2012 // the checkpoint is for a chain that's invalid due to false scriptSigs
2013 // this optimization would allow an invalid chain to be accepted.
2014 if (fScriptChecks
) {
2015 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++) {
2016 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
2017 const CCoins
* coins
= inputs
.AccessCoins(prevout
.hash
);
2021 CScriptCheck
check(*coins
, tx
, i
, flags
, cacheStore
, &txdata
);
2023 pvChecks
->push_back(CScriptCheck());
2024 check
.swap(pvChecks
->back());
2025 } else if (!check()) {
2026 if (flags
& STANDARD_NOT_MANDATORY_VERIFY_FLAGS
) {
2027 // Check whether the failure was caused by a
2028 // non-mandatory script verification check, such as
2029 // non-standard DER encodings or non-null dummy
2030 // arguments; if so, don't trigger DoS protection to
2031 // avoid splitting the network between upgraded and
2032 // non-upgraded nodes.
2033 CScriptCheck
check2(*coins
, tx
, i
,
2034 flags
& ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS
, cacheStore
, &txdata
);
2036 return state
.Invalid(false, REJECT_NONSTANDARD
, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check
.GetScriptError())));
2038 // Failures of other flags indicate a transaction that is
2039 // invalid in new blocks, e.g. a invalid P2SH. We DoS ban
2040 // such nodes as they are not following the protocol. That
2041 // said during an upgrade careful thought should be taken
2042 // as to the correct behavior - we may want to continue
2043 // peering with non-upgraded nodes even after soft-fork
2044 // super-majority signaling has occurred.
2045 return state
.DoS(100,false, REJECT_INVALID
, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check
.GetScriptError())));
2056 bool UndoWriteToDisk(const CBlockUndo
& blockundo
, CDiskBlockPos
& pos
, const uint256
& hashBlock
, const CMessageHeader::MessageStartChars
& messageStart
)
2058 // Open history file to append
2059 CAutoFile
fileout(OpenUndoFile(pos
), SER_DISK
, CLIENT_VERSION
);
2060 if (fileout
.IsNull())
2061 return error("%s: OpenUndoFile failed", __func__
);
2063 // Write index header
2064 unsigned int nSize
= fileout
.GetSerializeSize(blockundo
);
2065 fileout
<< FLATDATA(messageStart
) << nSize
;
2068 long fileOutPos
= ftell(fileout
.Get());
2070 return error("%s: ftell failed", __func__
);
2071 pos
.nPos
= (unsigned int)fileOutPos
;
2072 fileout
<< blockundo
;
2074 // calculate & write checksum
2075 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
2076 hasher
<< hashBlock
;
2077 hasher
<< blockundo
;
2078 fileout
<< hasher
.GetHash();
2083 bool UndoReadFromDisk(CBlockUndo
& blockundo
, const CDiskBlockPos
& pos
, const uint256
& hashBlock
)
2085 // Open history file to read
2086 CAutoFile
filein(OpenUndoFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
2087 if (filein
.IsNull())
2088 return error("%s: OpenUndoFile failed", __func__
);
2091 uint256 hashChecksum
;
2093 filein
>> blockundo
;
2094 filein
>> hashChecksum
;
2096 catch (const std::exception
& e
) {
2097 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
2101 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
2102 hasher
<< hashBlock
;
2103 hasher
<< blockundo
;
2104 if (hashChecksum
!= hasher
.GetHash())
2105 return error("%s: Checksum mismatch", __func__
);
2110 /** Abort with a message */
2111 bool AbortNode(const std::string
& strMessage
, const std::string
& userMessage
="")
2113 strMiscWarning
= strMessage
;
2114 LogPrintf("*** %s\n", strMessage
);
2115 uiInterface
.ThreadSafeMessageBox(
2116 userMessage
.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage
,
2117 "", CClientUIInterface::MSG_ERROR
);
2122 bool AbortNode(CValidationState
& state
, const std::string
& strMessage
, const std::string
& userMessage
="")
2124 AbortNode(strMessage
, userMessage
);
2125 return state
.Error(strMessage
);
2131 * Apply the undo operation of a CTxInUndo to the given chain state.
2132 * @param undo The undo object.
2133 * @param view The coins view to which to apply the changes.
2134 * @param out The out point that corresponds to the tx input.
2135 * @return True on success.
2137 static bool ApplyTxInUndo(const CTxInUndo
& undo
, CCoinsViewCache
& view
, const COutPoint
& out
)
2141 CCoinsModifier coins
= view
.ModifyCoins(out
.hash
);
2142 if (undo
.nHeight
!= 0) {
2143 // undo data contains height: this is the last output of the prevout tx being spent
2144 if (!coins
->IsPruned())
2145 fClean
= fClean
&& error("%s: undo data overwriting existing transaction", __func__
);
2147 coins
->fCoinBase
= undo
.fCoinBase
;
2148 coins
->nHeight
= undo
.nHeight
;
2149 coins
->nVersion
= undo
.nVersion
;
2151 if (coins
->IsPruned())
2152 fClean
= fClean
&& error("%s: undo data adding output to missing transaction", __func__
);
2154 if (coins
->IsAvailable(out
.n
))
2155 fClean
= fClean
&& error("%s: undo data overwriting existing output", __func__
);
2156 if (coins
->vout
.size() < out
.n
+1)
2157 coins
->vout
.resize(out
.n
+1);
2158 coins
->vout
[out
.n
] = undo
.txout
;
2163 bool DisconnectBlock(const CBlock
& block
, CValidationState
& state
, const CBlockIndex
* pindex
, CCoinsViewCache
& view
, bool* pfClean
)
2165 assert(pindex
->GetBlockHash() == view
.GetBestBlock());
2172 CBlockUndo blockUndo
;
2173 CDiskBlockPos pos
= pindex
->GetUndoPos();
2175 return error("DisconnectBlock(): no undo data available");
2176 if (!UndoReadFromDisk(blockUndo
, pos
, pindex
->pprev
->GetBlockHash()))
2177 return error("DisconnectBlock(): failure reading undo data");
2179 if (blockUndo
.vtxundo
.size() + 1 != block
.vtx
.size())
2180 return error("DisconnectBlock(): block and undo data inconsistent");
2182 // undo transactions in reverse order
2183 for (int i
= block
.vtx
.size() - 1; i
>= 0; i
--) {
2184 const CTransaction
&tx
= block
.vtx
[i
];
2185 uint256 hash
= tx
.GetHash();
2187 // Check that all outputs are available and match the outputs in the block itself
2190 CCoinsModifier outs
= view
.ModifyCoins(hash
);
2191 outs
->ClearUnspendable();
2193 CCoins
outsBlock(tx
, pindex
->nHeight
);
2194 // The CCoins serialization does not serialize negative numbers.
2195 // No network rules currently depend on the version here, so an inconsistency is harmless
2196 // but it must be corrected before txout nversion ever influences a network rule.
2197 if (outsBlock
.nVersion
< 0)
2198 outs
->nVersion
= outsBlock
.nVersion
;
2199 if (*outs
!= outsBlock
)
2200 fClean
= fClean
&& error("DisconnectBlock(): added transaction mismatch? database corrupted");
2207 if (i
> 0) { // not coinbases
2208 const CTxUndo
&txundo
= blockUndo
.vtxundo
[i
-1];
2209 if (txundo
.vprevout
.size() != tx
.vin
.size())
2210 return error("DisconnectBlock(): transaction and undo data inconsistent");
2211 for (unsigned int j
= tx
.vin
.size(); j
-- > 0;) {
2212 const COutPoint
&out
= tx
.vin
[j
].prevout
;
2213 const CTxInUndo
&undo
= txundo
.vprevout
[j
];
2214 if (!ApplyTxInUndo(undo
, view
, out
))
2220 // move best block pointer to prevout block
2221 view
.SetBestBlock(pindex
->pprev
->GetBlockHash());
2231 void static FlushBlockFile(bool fFinalize
= false)
2233 LOCK(cs_LastBlockFile
);
2235 CDiskBlockPos
posOld(nLastBlockFile
, 0);
2237 FILE *fileOld
= OpenBlockFile(posOld
);
2240 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nSize
);
2241 FileCommit(fileOld
);
2245 fileOld
= OpenUndoFile(posOld
);
2248 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nUndoSize
);
2249 FileCommit(fileOld
);
2254 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
);
2256 static CCheckQueue
<CScriptCheck
> scriptcheckqueue(128);
2258 void ThreadScriptCheck() {
2259 RenameThread("bitcoin-scriptch");
2260 scriptcheckqueue
.Thread();
2263 // Protected by cs_main
2264 VersionBitsCache versionbitscache
;
2266 int32_t ComputeBlockVersion(const CBlockIndex
* pindexPrev
, const Consensus::Params
& params
)
2269 int32_t nVersion
= VERSIONBITS_TOP_BITS
;
2271 for (int i
= 0; i
< (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS
; i
++) {
2272 ThresholdState state
= VersionBitsState(pindexPrev
, params
, (Consensus::DeploymentPos
)i
, versionbitscache
);
2273 if (state
== THRESHOLD_LOCKED_IN
|| state
== THRESHOLD_STARTED
) {
2274 nVersion
|= VersionBitsMask(params
, (Consensus::DeploymentPos
)i
);
2282 * Threshold condition checker that triggers when unknown versionbits are seen on the network.
2284 class WarningBitsConditionChecker
: public AbstractThresholdConditionChecker
2290 WarningBitsConditionChecker(int bitIn
) : bit(bitIn
) {}
2292 int64_t BeginTime(const Consensus::Params
& params
) const { return 0; }
2293 int64_t EndTime(const Consensus::Params
& params
) const { return std::numeric_limits
<int64_t>::max(); }
2294 int Period(const Consensus::Params
& params
) const { return params
.nMinerConfirmationWindow
; }
2295 int Threshold(const Consensus::Params
& params
) const { return params
.nRuleChangeActivationThreshold
; }
2297 bool Condition(const CBlockIndex
* pindex
, const Consensus::Params
& params
) const
2299 return ((pindex
->nVersion
& VERSIONBITS_TOP_MASK
) == VERSIONBITS_TOP_BITS
) &&
2300 ((pindex
->nVersion
>> bit
) & 1) != 0 &&
2301 ((ComputeBlockVersion(pindex
->pprev
, params
) >> bit
) & 1) == 0;
2305 // Protected by cs_main
2306 static ThresholdConditionCache warningcache
[VERSIONBITS_NUM_BITS
];
2308 static int64_t nTimeCheck
= 0;
2309 static int64_t nTimeForks
= 0;
2310 static int64_t nTimeVerify
= 0;
2311 static int64_t nTimeConnect
= 0;
2312 static int64_t nTimeIndex
= 0;
2313 static int64_t nTimeCallbacks
= 0;
2314 static int64_t nTimeTotal
= 0;
2316 bool ConnectBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* pindex
,
2317 CCoinsViewCache
& view
, const CChainParams
& chainparams
, bool fJustCheck
)
2319 AssertLockHeld(cs_main
);
2321 int64_t nTimeStart
= GetTimeMicros();
2323 // Check it again in case a previous version let a bad block in
2324 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), !fJustCheck
, !fJustCheck
))
2325 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
2327 // verify that the view's current state corresponds to the previous block
2328 uint256 hashPrevBlock
= pindex
->pprev
== NULL
? uint256() : pindex
->pprev
->GetBlockHash();
2329 assert(hashPrevBlock
== view
.GetBestBlock());
2331 // Special case for the genesis block, skipping connection of its transactions
2332 // (its coinbase is unspendable)
2333 if (block
.GetHash() == chainparams
.GetConsensus().hashGenesisBlock
) {
2335 view
.SetBestBlock(pindex
->GetBlockHash());
2339 bool fScriptChecks
= true;
2340 if (fCheckpointsEnabled
) {
2341 CBlockIndex
*pindexLastCheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
2342 if (pindexLastCheckpoint
&& pindexLastCheckpoint
->GetAncestor(pindex
->nHeight
) == pindex
) {
2343 // This block is an ancestor of a checkpoint: disable script checks
2344 fScriptChecks
= false;
2348 int64_t nTime1
= GetTimeMicros(); nTimeCheck
+= nTime1
- nTimeStart
;
2349 LogPrint("bench", " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1
- nTimeStart
), nTimeCheck
* 0.000001);
2351 // Do not allow blocks that contain transactions which 'overwrite' older transactions,
2352 // unless those are already completely spent.
2353 // If such overwrites are allowed, coinbases and transactions depending upon those
2354 // can be duplicated to remove the ability to spend the first instance -- even after
2355 // being sent to another address.
2356 // See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
2357 // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
2358 // already refuses previously-known transaction ids entirely.
2359 // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
2360 // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
2361 // two in the chain that violate it. This prevents exploiting the issue against nodes during their
2362 // initial block download.
2363 bool fEnforceBIP30
= (!pindex
->phashBlock
) || // Enforce on CreateNewBlock invocations which don't have a hash.
2364 !((pindex
->nHeight
==91842 && pindex
->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
2365 (pindex
->nHeight
==91880 && pindex
->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
2367 // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
2368 // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
2369 // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
2370 // before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further
2371 // duplicate transactions descending from the known pairs either.
2372 // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
2373 CBlockIndex
*pindexBIP34height
= pindex
->pprev
->GetAncestor(chainparams
.GetConsensus().BIP34Height
);
2374 //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
2375 fEnforceBIP30
= fEnforceBIP30
&& (!pindexBIP34height
|| !(pindexBIP34height
->GetBlockHash() == chainparams
.GetConsensus().BIP34Hash
));
2377 if (fEnforceBIP30
) {
2378 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
) {
2379 const CCoins
* coins
= view
.AccessCoins(tx
.GetHash());
2380 if (coins
&& !coins
->IsPruned())
2381 return state
.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
2382 REJECT_INVALID
, "bad-txns-BIP30");
2386 // BIP16 didn't become active until Apr 1 2012
2387 int64_t nBIP16SwitchTime
= 1333238400;
2388 bool fStrictPayToScriptHash
= (pindex
->GetBlockTime() >= nBIP16SwitchTime
);
2390 unsigned int flags
= fStrictPayToScriptHash
? SCRIPT_VERIFY_P2SH
: SCRIPT_VERIFY_NONE
;
2392 // Start enforcing the DERSIG (BIP66) rule
2393 if (pindex
->nHeight
>= chainparams
.GetConsensus().BIP66Height
) {
2394 flags
|= SCRIPT_VERIFY_DERSIG
;
2397 // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
2398 if (pindex
->nHeight
>= chainparams
.GetConsensus().BIP65Height
) {
2399 flags
|= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY
;
2402 // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
2403 int nLockTimeFlags
= 0;
2404 if (VersionBitsState(pindex
->pprev
, chainparams
.GetConsensus(), Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
2405 flags
|= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY
;
2406 nLockTimeFlags
|= LOCKTIME_VERIFY_SEQUENCE
;
2409 // Start enforcing WITNESS rules using versionbits logic.
2410 if (IsWitnessEnabled(pindex
->pprev
, chainparams
.GetConsensus())) {
2411 flags
|= SCRIPT_VERIFY_WITNESS
;
2412 flags
|= SCRIPT_VERIFY_NULLDUMMY
;
2415 int64_t nTime2
= GetTimeMicros(); nTimeForks
+= nTime2
- nTime1
;
2416 LogPrint("bench", " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2
- nTime1
), nTimeForks
* 0.000001);
2418 CBlockUndo blockundo
;
2420 CCheckQueueControl
<CScriptCheck
> control(fScriptChecks
&& nScriptCheckThreads
? &scriptcheckqueue
: NULL
);
2422 std::vector
<uint256
> vOrphanErase
;
2423 std::vector
<int> prevheights
;
2426 int64_t nSigOpsCost
= 0;
2427 CDiskTxPos
pos(pindex
->GetBlockPos(), GetSizeOfCompactSize(block
.vtx
.size()));
2428 std::vector
<std::pair
<uint256
, CDiskTxPos
> > vPos
;
2429 vPos
.reserve(block
.vtx
.size());
2430 blockundo
.vtxundo
.reserve(block
.vtx
.size() - 1);
2431 std::vector
<PrecomputedTransactionData
> txdata
;
2432 txdata
.reserve(block
.vtx
.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
2433 for (unsigned int i
= 0; i
< block
.vtx
.size(); i
++)
2435 const CTransaction
&tx
= block
.vtx
[i
];
2437 nInputs
+= tx
.vin
.size();
2439 if (!tx
.IsCoinBase())
2441 if (!view
.HaveInputs(tx
))
2442 return state
.DoS(100, error("ConnectBlock(): inputs missing/spent"),
2443 REJECT_INVALID
, "bad-txns-inputs-missingorspent");
2445 // Check that transaction is BIP68 final
2446 // BIP68 lock checks (as opposed to nLockTime checks) must
2447 // be in ConnectBlock because they require the UTXO set
2448 prevheights
.resize(tx
.vin
.size());
2449 for (size_t j
= 0; j
< tx
.vin
.size(); j
++) {
2450 prevheights
[j
] = view
.AccessCoins(tx
.vin
[j
].prevout
.hash
)->nHeight
;
2453 // Which orphan pool entries must we evict?
2454 for (size_t j
= 0; j
< tx
.vin
.size(); j
++) {
2455 auto itByPrev
= mapOrphanTransactionsByPrev
.find(tx
.vin
[j
].prevout
);
2456 if (itByPrev
== mapOrphanTransactionsByPrev
.end()) continue;
2457 for (auto mi
= itByPrev
->second
.begin(); mi
!= itByPrev
->second
.end(); ++mi
) {
2458 const CTransaction
& orphanTx
= (*mi
)->second
.tx
;
2459 const uint256
& orphanHash
= orphanTx
.GetHash();
2460 vOrphanErase
.push_back(orphanHash
);
2464 if (!SequenceLocks(tx
, nLockTimeFlags
, &prevheights
, *pindex
)) {
2465 return state
.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__
),
2466 REJECT_INVALID
, "bad-txns-nonfinal");
2470 // GetTransactionSigOpCost counts 3 types of sigops:
2471 // * legacy (always)
2472 // * p2sh (when P2SH enabled in flags and excludes coinbase)
2473 // * witness (when witness enabled in flags and excludes coinbase)
2474 nSigOpsCost
+= GetTransactionSigOpCost(tx
, view
, flags
);
2475 if (nSigOpsCost
> MAX_BLOCK_SIGOPS_COST
)
2476 return state
.DoS(100, error("ConnectBlock(): too many sigops"),
2477 REJECT_INVALID
, "bad-blk-sigops");
2479 txdata
.emplace_back(tx
);
2480 if (!tx
.IsCoinBase())
2482 nFees
+= view
.GetValueIn(tx
)-tx
.GetValueOut();
2484 std::vector
<CScriptCheck
> vChecks
;
2485 bool fCacheResults
= fJustCheck
; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
2486 if (!CheckInputs(tx
, state
, view
, fScriptChecks
, flags
, fCacheResults
, txdata
[i
], nScriptCheckThreads
? &vChecks
: NULL
))
2487 return error("ConnectBlock(): CheckInputs on %s failed with %s",
2488 tx
.GetHash().ToString(), FormatStateMessage(state
));
2489 control
.Add(vChecks
);
2494 blockundo
.vtxundo
.push_back(CTxUndo());
2496 UpdateCoins(tx
, view
, i
== 0 ? undoDummy
: blockundo
.vtxundo
.back(), pindex
->nHeight
);
2498 vPos
.push_back(std::make_pair(tx
.GetHash(), pos
));
2499 pos
.nTxOffset
+= ::GetSerializeSize(tx
, SER_DISK
, CLIENT_VERSION
);
2501 int64_t nTime3
= GetTimeMicros(); nTimeConnect
+= nTime3
- nTime2
;
2502 LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block
.vtx
.size(), 0.001 * (nTime3
- nTime2
), 0.001 * (nTime3
- nTime2
) / block
.vtx
.size(), nInputs
<= 1 ? 0 : 0.001 * (nTime3
- nTime2
) / (nInputs
-1), nTimeConnect
* 0.000001);
2504 CAmount blockReward
= nFees
+ GetBlockSubsidy(pindex
->nHeight
, chainparams
.GetConsensus());
2505 if (block
.vtx
[0].GetValueOut() > blockReward
)
2506 return state
.DoS(100,
2507 error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
2508 block
.vtx
[0].GetValueOut(), blockReward
),
2509 REJECT_INVALID
, "bad-cb-amount");
2511 if (!control
.Wait())
2512 return state
.DoS(100, false);
2513 int64_t nTime4
= GetTimeMicros(); nTimeVerify
+= nTime4
- nTime2
;
2514 LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs
- 1, 0.001 * (nTime4
- nTime2
), nInputs
<= 1 ? 0 : 0.001 * (nTime4
- nTime2
) / (nInputs
-1), nTimeVerify
* 0.000001);
2519 // Write undo information to disk
2520 if (pindex
->GetUndoPos().IsNull() || !pindex
->IsValid(BLOCK_VALID_SCRIPTS
))
2522 if (pindex
->GetUndoPos().IsNull()) {
2524 if (!FindUndoPos(state
, pindex
->nFile
, _pos
, ::GetSerializeSize(blockundo
, SER_DISK
, CLIENT_VERSION
) + 40))
2525 return error("ConnectBlock(): FindUndoPos failed");
2526 if (!UndoWriteToDisk(blockundo
, _pos
, pindex
->pprev
->GetBlockHash(), chainparams
.MessageStart()))
2527 return AbortNode(state
, "Failed to write undo data");
2529 // update nUndoPos in block index
2530 pindex
->nUndoPos
= _pos
.nPos
;
2531 pindex
->nStatus
|= BLOCK_HAVE_UNDO
;
2534 pindex
->RaiseValidity(BLOCK_VALID_SCRIPTS
);
2535 setDirtyBlockIndex
.insert(pindex
);
2539 if (!pblocktree
->WriteTxIndex(vPos
))
2540 return AbortNode(state
, "Failed to write transaction index");
2542 // add this block to the view's block chain
2543 view
.SetBestBlock(pindex
->GetBlockHash());
2545 int64_t nTime5
= GetTimeMicros(); nTimeIndex
+= nTime5
- nTime4
;
2546 LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5
- nTime4
), nTimeIndex
* 0.000001);
2548 // Watch for changes to the previous coinbase transaction.
2549 static uint256 hashPrevBestCoinBase
;
2550 GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase
);
2551 hashPrevBestCoinBase
= block
.vtx
[0].GetHash();
2553 // Erase orphan transactions include or precluded by this block
2554 if (vOrphanErase
.size()) {
2556 BOOST_FOREACH(uint256
&orphanHash
, vOrphanErase
) {
2557 nErased
+= EraseOrphanTx(orphanHash
);
2559 LogPrint("mempool", "Erased %d orphan tx included or conflicted by block\n", nErased
);
2562 int64_t nTime6
= GetTimeMicros(); nTimeCallbacks
+= nTime6
- nTime5
;
2563 LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6
- nTime5
), nTimeCallbacks
* 0.000001);
2568 enum FlushStateMode
{
2570 FLUSH_STATE_IF_NEEDED
,
2571 FLUSH_STATE_PERIODIC
,
2576 * Update the on-disk chain state.
2577 * The caches and indexes are flushed depending on the mode we're called with
2578 * if they're too large, if it's been a while since the last write,
2579 * or always and in all cases if we're in prune mode and are deleting files.
2581 bool static FlushStateToDisk(CValidationState
&state
, FlushStateMode mode
) {
2582 const CChainParams
& chainparams
= Params();
2583 LOCK2(cs_main
, cs_LastBlockFile
);
2584 static int64_t nLastWrite
= 0;
2585 static int64_t nLastFlush
= 0;
2586 static int64_t nLastSetChain
= 0;
2587 std::set
<int> setFilesToPrune
;
2588 bool fFlushForPrune
= false;
2590 if (fPruneMode
&& fCheckForPruning
&& !fReindex
) {
2591 FindFilesToPrune(setFilesToPrune
, chainparams
.PruneAfterHeight());
2592 fCheckForPruning
= false;
2593 if (!setFilesToPrune
.empty()) {
2594 fFlushForPrune
= true;
2596 pblocktree
->WriteFlag("prunedblockfiles", true);
2601 int64_t nNow
= GetTimeMicros();
2602 // Avoid writing/flushing immediately after startup.
2603 if (nLastWrite
== 0) {
2606 if (nLastFlush
== 0) {
2609 if (nLastSetChain
== 0) {
2610 nLastSetChain
= nNow
;
2612 size_t cacheSize
= pcoinsTip
->DynamicMemoryUsage();
2613 // The cache is large and close to the limit, but we have time now (not in the middle of a block processing).
2614 bool fCacheLarge
= mode
== FLUSH_STATE_PERIODIC
&& cacheSize
* (10.0/9) > nCoinCacheUsage
;
2615 // The cache is over the limit, we have to write now.
2616 bool fCacheCritical
= mode
== FLUSH_STATE_IF_NEEDED
&& cacheSize
> nCoinCacheUsage
;
2617 // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2618 bool fPeriodicWrite
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastWrite
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000;
2619 // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2620 bool fPeriodicFlush
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastFlush
+ (int64_t)DATABASE_FLUSH_INTERVAL
* 1000000;
2621 // Combine all conditions that result in a full cache flush.
2622 bool fDoFullFlush
= (mode
== FLUSH_STATE_ALWAYS
) || fCacheLarge
|| fCacheCritical
|| fPeriodicFlush
|| fFlushForPrune
;
2623 // Write blocks and block index to disk.
2624 if (fDoFullFlush
|| fPeriodicWrite
) {
2625 // Depend on nMinDiskSpace to ensure we can write block index
2626 if (!CheckDiskSpace(0))
2627 return state
.Error("out of disk space");
2628 // First make sure all block and undo data is flushed to disk.
2630 // Then update all block file information (which may refer to block and undo files).
2632 std::vector
<std::pair
<int, const CBlockFileInfo
*> > vFiles
;
2633 vFiles
.reserve(setDirtyFileInfo
.size());
2634 for (set
<int>::iterator it
= setDirtyFileInfo
.begin(); it
!= setDirtyFileInfo
.end(); ) {
2635 vFiles
.push_back(make_pair(*it
, &vinfoBlockFile
[*it
]));
2636 setDirtyFileInfo
.erase(it
++);
2638 std::vector
<const CBlockIndex
*> vBlocks
;
2639 vBlocks
.reserve(setDirtyBlockIndex
.size());
2640 for (set
<CBlockIndex
*>::iterator it
= setDirtyBlockIndex
.begin(); it
!= setDirtyBlockIndex
.end(); ) {
2641 vBlocks
.push_back(*it
);
2642 setDirtyBlockIndex
.erase(it
++);
2644 if (!pblocktree
->WriteBatchSync(vFiles
, nLastBlockFile
, vBlocks
)) {
2645 return AbortNode(state
, "Files to write to block index database");
2648 // Finally remove any pruned files
2650 UnlinkPrunedFiles(setFilesToPrune
);
2653 // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2655 // Typical CCoins structures on disk are around 128 bytes in size.
2656 // Pushing a new one to the database can cause it to be written
2657 // twice (once in the log, and once in the tables). This is already
2658 // an overestimation, as most will delete an existing entry or
2659 // overwrite one. Still, use a conservative safety factor of 2.
2660 if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip
->GetCacheSize()))
2661 return state
.Error("out of disk space");
2662 // Flush the chainstate (which may refer to block index entries).
2663 if (!pcoinsTip
->Flush())
2664 return AbortNode(state
, "Failed to write to coin database");
2667 if (fDoFullFlush
|| ((mode
== FLUSH_STATE_ALWAYS
|| mode
== FLUSH_STATE_PERIODIC
) && nNow
> nLastSetChain
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000)) {
2668 // Update best block in wallet (so we can detect restored wallets).
2669 GetMainSignals().SetBestChain(chainActive
.GetLocator());
2670 nLastSetChain
= nNow
;
2672 } catch (const std::runtime_error
& e
) {
2673 return AbortNode(state
, std::string("System error while flushing: ") + e
.what());
2678 void FlushStateToDisk() {
2679 CValidationState state
;
2680 FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
2683 void PruneAndFlush() {
2684 CValidationState state
;
2685 fCheckForPruning
= true;
2686 FlushStateToDisk(state
, FLUSH_STATE_NONE
);
2689 /** Update chainActive and related internal data structures. */
2690 void static UpdateTip(CBlockIndex
*pindexNew
, const CChainParams
& chainParams
) {
2691 chainActive
.SetTip(pindexNew
);
2694 nTimeBestReceived
= GetTime();
2695 mempool
.AddTransactionsUpdated(1);
2697 cvBlockChange
.notify_all();
2699 static bool fWarned
= false;
2700 std::vector
<std::string
> warningMessages
;
2701 if (!IsInitialBlockDownload())
2704 const CBlockIndex
* pindex
= chainActive
.Tip();
2705 for (int bit
= 0; bit
< VERSIONBITS_NUM_BITS
; bit
++) {
2706 WarningBitsConditionChecker
checker(bit
);
2707 ThresholdState state
= checker
.GetStateFor(pindex
, chainParams
.GetConsensus(), warningcache
[bit
]);
2708 if (state
== THRESHOLD_ACTIVE
|| state
== THRESHOLD_LOCKED_IN
) {
2709 if (state
== THRESHOLD_ACTIVE
) {
2710 strMiscWarning
= strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit
);
2712 AlertNotify(strMiscWarning
);
2716 warningMessages
.push_back(strprintf("unknown new rules are about to activate (versionbit %i)", bit
));
2720 // Check the version of the last 100 blocks to see if we need to upgrade:
2721 for (int i
= 0; i
< 100 && pindex
!= NULL
; i
++)
2723 int32_t nExpectedVersion
= ComputeBlockVersion(pindex
->pprev
, chainParams
.GetConsensus());
2724 if (pindex
->nVersion
> VERSIONBITS_LAST_OLD_BLOCK_VERSION
&& (pindex
->nVersion
& ~nExpectedVersion
) != 0)
2726 pindex
= pindex
->pprev
;
2729 warningMessages
.push_back(strprintf("%d of last 100 blocks have unexpected version", nUpgraded
));
2730 if (nUpgraded
> 100/2)
2732 // strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
2733 strMiscWarning
= _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect");
2735 AlertNotify(strMiscWarning
);
2740 LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utx)", __func__
,
2741 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(), chainActive
.Tip()->nVersion
,
2742 log(chainActive
.Tip()->nChainWork
.getdouble())/log(2.0), (unsigned long)chainActive
.Tip()->nChainTx
,
2743 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
2744 Checkpoints::GuessVerificationProgress(chainParams
.Checkpoints(), chainActive
.Tip()), pcoinsTip
->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip
->GetCacheSize());
2745 if (!warningMessages
.empty())
2746 LogPrintf(" warning='%s'", boost::algorithm::join(warningMessages
, ", "));
2751 /** Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and manually re-limit mempool size after this, with cs_main held. */
2752 bool static DisconnectTip(CValidationState
& state
, const CChainParams
& chainparams
, bool fBare
= false)
2754 CBlockIndex
*pindexDelete
= chainActive
.Tip();
2755 assert(pindexDelete
);
2756 // Read block from disk.
2758 if (!ReadBlockFromDisk(block
, pindexDelete
, chainparams
.GetConsensus()))
2759 return AbortNode(state
, "Failed to read block");
2760 // Apply the block atomically to the chain state.
2761 int64_t nStart
= GetTimeMicros();
2763 CCoinsViewCache
view(pcoinsTip
);
2764 if (!DisconnectBlock(block
, state
, pindexDelete
, view
))
2765 return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete
->GetBlockHash().ToString());
2766 assert(view
.Flush());
2768 LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart
) * 0.001);
2769 // Write the chain state to disk, if necessary.
2770 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2774 // Resurrect mempool transactions from the disconnected block.
2775 std::vector
<uint256
> vHashUpdate
;
2776 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
2777 // ignore validation errors in resurrected transactions
2778 CValidationState stateDummy
;
2779 if (tx
.IsCoinBase() || !AcceptToMemoryPool(mempool
, stateDummy
, tx
, false, NULL
, true)) {
2780 mempool
.removeRecursive(tx
);
2781 } else if (mempool
.exists(tx
.GetHash())) {
2782 vHashUpdate
.push_back(tx
.GetHash());
2785 // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
2786 // no in-mempool children, which is generally not true when adding
2787 // previously-confirmed transactions back to the mempool.
2788 // UpdateTransactionsFromBlock finds descendants of any transactions in this
2789 // block that were added back and cleans up the mempool state.
2790 mempool
.UpdateTransactionsFromBlock(vHashUpdate
);
2793 // Update chainActive and related variables.
2794 UpdateTip(pindexDelete
->pprev
, chainparams
);
2795 // Let wallets know transactions went from 1-confirmed to
2796 // 0-confirmed or conflicted:
2797 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
2798 GetMainSignals().SyncTransaction(tx
, pindexDelete
->pprev
, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK
);
2803 static int64_t nTimeReadFromDisk
= 0;
2804 static int64_t nTimeConnectTotal
= 0;
2805 static int64_t nTimeFlush
= 0;
2806 static int64_t nTimeChainState
= 0;
2807 static int64_t nTimePostConnect
= 0;
2810 * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
2811 * corresponding to pindexNew, to bypass loading it again from disk.
2813 bool static ConnectTip(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexNew
, const CBlock
* pblock
, std::vector
<std::shared_ptr
<const CTransaction
>> &txConflicted
, std::vector
<std::tuple
<CTransaction
,CBlockIndex
*,int>> &txChanged
)
2815 assert(pindexNew
->pprev
== chainActive
.Tip());
2816 // Read block from disk.
2817 int64_t nTime1
= GetTimeMicros();
2820 if (!ReadBlockFromDisk(block
, pindexNew
, chainparams
.GetConsensus()))
2821 return AbortNode(state
, "Failed to read block");
2824 // Apply the block atomically to the chain state.
2825 int64_t nTime2
= GetTimeMicros(); nTimeReadFromDisk
+= nTime2
- nTime1
;
2827 LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2
- nTime1
) * 0.001, nTimeReadFromDisk
* 0.000001);
2829 CCoinsViewCache
view(pcoinsTip
);
2830 bool rv
= ConnectBlock(*pblock
, state
, pindexNew
, view
, chainparams
);
2831 GetMainSignals().BlockChecked(*pblock
, state
);
2833 if (state
.IsInvalid())
2834 InvalidBlockFound(pindexNew
, state
);
2835 return error("ConnectTip(): ConnectBlock %s failed", pindexNew
->GetBlockHash().ToString());
2837 nTime3
= GetTimeMicros(); nTimeConnectTotal
+= nTime3
- nTime2
;
2838 LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3
- nTime2
) * 0.001, nTimeConnectTotal
* 0.000001);
2839 assert(view
.Flush());
2841 int64_t nTime4
= GetTimeMicros(); nTimeFlush
+= nTime4
- nTime3
;
2842 LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4
- nTime3
) * 0.001, nTimeFlush
* 0.000001);
2843 // Write the chain state to disk, if necessary.
2844 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2846 int64_t nTime5
= GetTimeMicros(); nTimeChainState
+= nTime5
- nTime4
;
2847 LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5
- nTime4
) * 0.001, nTimeChainState
* 0.000001);
2848 // Remove conflicting transactions from the mempool.;
2849 mempool
.removeForBlock(pblock
->vtx
, pindexNew
->nHeight
, &txConflicted
, !IsInitialBlockDownload());
2850 // Update chainActive & related variables.
2851 UpdateTip(pindexNew
, chainparams
);
2853 for(unsigned int i
=0; i
< pblock
->vtx
.size(); i
++)
2854 txChanged
.emplace_back(pblock
->vtx
[i
], pindexNew
, i
);
2856 int64_t nTime6
= GetTimeMicros(); nTimePostConnect
+= nTime6
- nTime5
; nTimeTotal
+= nTime6
- nTime1
;
2857 LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6
- nTime5
) * 0.001, nTimePostConnect
* 0.000001);
2858 LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6
- nTime1
) * 0.001, nTimeTotal
* 0.000001);
2863 * Return the tip of the chain with the most work in it, that isn't
2864 * known to be invalid (it's however far from certain to be valid).
2866 static CBlockIndex
* FindMostWorkChain() {
2868 CBlockIndex
*pindexNew
= NULL
;
2870 // Find the best candidate header.
2872 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::reverse_iterator it
= setBlockIndexCandidates
.rbegin();
2873 if (it
== setBlockIndexCandidates
.rend())
2878 // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2879 // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2880 CBlockIndex
*pindexTest
= pindexNew
;
2881 bool fInvalidAncestor
= false;
2882 while (pindexTest
&& !chainActive
.Contains(pindexTest
)) {
2883 assert(pindexTest
->nChainTx
|| pindexTest
->nHeight
== 0);
2885 // Pruned nodes may have entries in setBlockIndexCandidates for
2886 // which block files have been deleted. Remove those as candidates
2887 // for the most work chain if we come across them; we can't switch
2888 // to a chain unless we have all the non-active-chain parent blocks.
2889 bool fFailedChain
= pindexTest
->nStatus
& BLOCK_FAILED_MASK
;
2890 bool fMissingData
= !(pindexTest
->nStatus
& BLOCK_HAVE_DATA
);
2891 if (fFailedChain
|| fMissingData
) {
2892 // Candidate chain is not usable (either invalid or missing data)
2893 if (fFailedChain
&& (pindexBestInvalid
== NULL
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
))
2894 pindexBestInvalid
= pindexNew
;
2895 CBlockIndex
*pindexFailed
= pindexNew
;
2896 // Remove the entire chain from the set.
2897 while (pindexTest
!= pindexFailed
) {
2899 pindexFailed
->nStatus
|= BLOCK_FAILED_CHILD
;
2900 } else if (fMissingData
) {
2901 // If we're missing data, then add back to mapBlocksUnlinked,
2902 // so that if the block arrives in the future we can try adding
2903 // to setBlockIndexCandidates again.
2904 mapBlocksUnlinked
.insert(std::make_pair(pindexFailed
->pprev
, pindexFailed
));
2906 setBlockIndexCandidates
.erase(pindexFailed
);
2907 pindexFailed
= pindexFailed
->pprev
;
2909 setBlockIndexCandidates
.erase(pindexTest
);
2910 fInvalidAncestor
= true;
2913 pindexTest
= pindexTest
->pprev
;
2915 if (!fInvalidAncestor
)
2920 /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
2921 static void PruneBlockIndexCandidates() {
2922 // Note that we can't delete the current block itself, as we may need to return to it later in case a
2923 // reorganization to a better block fails.
2924 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::iterator it
= setBlockIndexCandidates
.begin();
2925 while (it
!= setBlockIndexCandidates
.end() && setBlockIndexCandidates
.value_comp()(*it
, chainActive
.Tip())) {
2926 setBlockIndexCandidates
.erase(it
++);
2928 // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2929 assert(!setBlockIndexCandidates
.empty());
2933 * Try to make some progress towards making pindexMostWork the active block.
2934 * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
2936 static bool ActivateBestChainStep(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexMostWork
, const CBlock
* pblock
, bool& fInvalidFound
, std::vector
<std::shared_ptr
<const CTransaction
>>& txConflicted
, std::vector
<std::tuple
<CTransaction
,CBlockIndex
*,int>>& txChanged
)
2938 AssertLockHeld(cs_main
);
2939 const CBlockIndex
*pindexOldTip
= chainActive
.Tip();
2940 const CBlockIndex
*pindexFork
= chainActive
.FindFork(pindexMostWork
);
2942 // Disconnect active blocks which are no longer in the best chain.
2943 bool fBlocksDisconnected
= false;
2944 while (chainActive
.Tip() && chainActive
.Tip() != pindexFork
) {
2945 if (!DisconnectTip(state
, chainparams
))
2947 fBlocksDisconnected
= true;
2950 // Build list of new blocks to connect.
2951 std::vector
<CBlockIndex
*> vpindexToConnect
;
2952 bool fContinue
= true;
2953 int nHeight
= pindexFork
? pindexFork
->nHeight
: -1;
2954 while (fContinue
&& nHeight
!= pindexMostWork
->nHeight
) {
2955 // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2956 // a few blocks along the way.
2957 int nTargetHeight
= std::min(nHeight
+ 32, pindexMostWork
->nHeight
);
2958 vpindexToConnect
.clear();
2959 vpindexToConnect
.reserve(nTargetHeight
- nHeight
);
2960 CBlockIndex
*pindexIter
= pindexMostWork
->GetAncestor(nTargetHeight
);
2961 while (pindexIter
&& pindexIter
->nHeight
!= nHeight
) {
2962 vpindexToConnect
.push_back(pindexIter
);
2963 pindexIter
= pindexIter
->pprev
;
2965 nHeight
= nTargetHeight
;
2967 // Connect new blocks.
2968 BOOST_REVERSE_FOREACH(CBlockIndex
*pindexConnect
, vpindexToConnect
) {
2969 if (!ConnectTip(state
, chainparams
, pindexConnect
, pindexConnect
== pindexMostWork
? pblock
: NULL
, txConflicted
, txChanged
)) {
2970 if (state
.IsInvalid()) {
2971 // The block violates a consensus rule.
2972 if (!state
.CorruptionPossible())
2973 InvalidChainFound(vpindexToConnect
.back());
2974 state
= CValidationState();
2975 fInvalidFound
= true;
2979 // A system error occurred (disk space, database error, ...).
2983 PruneBlockIndexCandidates();
2984 if (!pindexOldTip
|| chainActive
.Tip()->nChainWork
> pindexOldTip
->nChainWork
) {
2985 // We're in a better position than we were. Return temporarily to release the lock.
2993 if (fBlocksDisconnected
) {
2994 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
2995 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
2997 mempool
.check(pcoinsTip
);
2999 // Callbacks/notifications for a new best chain.
3001 CheckForkWarningConditionsOnNewFork(vpindexToConnect
.back());
3003 CheckForkWarningConditions();
3008 static void NotifyHeaderTip() {
3009 bool fNotify
= false;
3010 bool fInitialBlockDownload
= false;
3011 static CBlockIndex
* pindexHeaderOld
= NULL
;
3012 CBlockIndex
* pindexHeader
= NULL
;
3015 pindexHeader
= pindexBestHeader
;
3017 if (pindexHeader
!= pindexHeaderOld
) {
3019 fInitialBlockDownload
= IsInitialBlockDownload();
3020 pindexHeaderOld
= pindexHeader
;
3023 // Send block tip changed notifications without cs_main
3025 uiInterface
.NotifyHeaderTip(fInitialBlockDownload
, pindexHeader
);
3030 * Make the best chain active, in multiple steps. The result is either failure
3031 * or an activated best chain. pblock is either NULL or a pointer to a block
3032 * that is already loaded (to avoid loading it again from disk).
3034 bool ActivateBestChain(CValidationState
&state
, const CChainParams
& chainparams
, const CBlock
*pblock
) {
3035 CBlockIndex
*pindexMostWork
= NULL
;
3036 CBlockIndex
*pindexNewTip
= NULL
;
3037 std::vector
<std::tuple
<CTransaction
,CBlockIndex
*,int>> txChanged
;
3039 txChanged
.reserve(pblock
->vtx
.size());
3042 boost::this_thread::interruption_point();
3043 if (ShutdownRequested())
3046 const CBlockIndex
*pindexFork
;
3047 std::vector
<std::shared_ptr
<const CTransaction
>> txConflicted
;
3048 bool fInitialDownload
;
3051 CBlockIndex
*pindexOldTip
= chainActive
.Tip();
3052 if (pindexMostWork
== NULL
) {
3053 pindexMostWork
= FindMostWorkChain();
3056 // Whether we have anything to do at all.
3057 if (pindexMostWork
== NULL
|| pindexMostWork
== chainActive
.Tip())
3060 bool fInvalidFound
= false;
3061 if (!ActivateBestChainStep(state
, chainparams
, pindexMostWork
, pblock
&& pblock
->GetHash() == pindexMostWork
->GetBlockHash() ? pblock
: NULL
, fInvalidFound
, txConflicted
, txChanged
))
3064 if (fInvalidFound
) {
3065 // Wipe cache, we may need another branch now.
3066 pindexMostWork
= NULL
;
3068 pindexNewTip
= chainActive
.Tip();
3069 pindexFork
= chainActive
.FindFork(pindexOldTip
);
3070 fInitialDownload
= IsInitialBlockDownload();
3072 // When we reach this point, we switched to a new tip (stored in pindexNewTip).
3074 // Notifications/callbacks that can run without cs_main
3076 // throw all transactions though the signal-interface
3077 // while _not_ holding the cs_main lock
3078 for(std::shared_ptr
<const CTransaction
> tx
: txConflicted
)
3080 GetMainSignals().SyncTransaction(*tx
, pindexNewTip
, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK
);
3082 // ... and about transactions that got confirmed:
3083 for(unsigned int i
= 0; i
< txChanged
.size(); i
++)
3084 GetMainSignals().SyncTransaction(std::get
<0>(txChanged
[i
]), std::get
<1>(txChanged
[i
]), std::get
<2>(txChanged
[i
]));
3086 // Notify external listeners about the new tip.
3087 GetMainSignals().UpdatedBlockTip(pindexNewTip
, pindexFork
, fInitialDownload
);
3089 // Always notify the UI if a new block tip was connected
3090 if (pindexFork
!= pindexNewTip
) {
3091 uiInterface
.NotifyBlockTip(fInitialDownload
, pindexNewTip
);
3093 } while (pindexNewTip
!= pindexMostWork
);
3094 CheckBlockIndex(chainparams
.GetConsensus());
3096 // Write changes periodically to disk, after relay.
3097 if (!FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
)) {
3105 bool PreciousBlock(CValidationState
& state
, const CChainParams
& params
, CBlockIndex
*pindex
)
3109 if (pindex
->nChainWork
< chainActive
.Tip()->nChainWork
) {
3110 // Nothing to do, this block is not at the tip.
3113 if (chainActive
.Tip()->nChainWork
> nLastPreciousChainwork
) {
3114 // The chain has been extended since the last call, reset the counter.
3115 nBlockReverseSequenceId
= -1;
3117 nLastPreciousChainwork
= chainActive
.Tip()->nChainWork
;
3118 setBlockIndexCandidates
.erase(pindex
);
3119 pindex
->nSequenceId
= nBlockReverseSequenceId
;
3120 if (nBlockReverseSequenceId
> std::numeric_limits
<int32_t>::min()) {
3121 // We can't keep reducing the counter if somebody really wants to
3122 // call preciousblock 2**31-1 times on the same set of tips...
3123 nBlockReverseSequenceId
--;
3125 if (pindex
->IsValid(BLOCK_VALID_TRANSACTIONS
) && pindex
->nChainTx
) {
3126 setBlockIndexCandidates
.insert(pindex
);
3127 PruneBlockIndexCandidates();
3131 return ActivateBestChain(state
, params
);
3134 bool InvalidateBlock(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
*pindex
)
3136 AssertLockHeld(cs_main
);
3138 // Mark the block itself as invalid.
3139 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
3140 setDirtyBlockIndex
.insert(pindex
);
3141 setBlockIndexCandidates
.erase(pindex
);
3143 while (chainActive
.Contains(pindex
)) {
3144 CBlockIndex
*pindexWalk
= chainActive
.Tip();
3145 pindexWalk
->nStatus
|= BLOCK_FAILED_CHILD
;
3146 setDirtyBlockIndex
.insert(pindexWalk
);
3147 setBlockIndexCandidates
.erase(pindexWalk
);
3148 // ActivateBestChain considers blocks already in chainActive
3149 // unconditionally valid already, so force disconnect away from it.
3150 if (!DisconnectTip(state
, chainparams
)) {
3151 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
3156 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
3158 // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
3160 BlockMap::iterator it
= mapBlockIndex
.begin();
3161 while (it
!= mapBlockIndex
.end()) {
3162 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& !setBlockIndexCandidates
.value_comp()(it
->second
, chainActive
.Tip())) {
3163 setBlockIndexCandidates
.insert(it
->second
);
3168 InvalidChainFound(pindex
);
3169 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
3173 bool ResetBlockFailureFlags(CBlockIndex
*pindex
) {
3174 AssertLockHeld(cs_main
);
3176 int nHeight
= pindex
->nHeight
;
3178 // Remove the invalidity flag from this block and all its descendants.
3179 BlockMap::iterator it
= mapBlockIndex
.begin();
3180 while (it
!= mapBlockIndex
.end()) {
3181 if (!it
->second
->IsValid() && it
->second
->GetAncestor(nHeight
) == pindex
) {
3182 it
->second
->nStatus
&= ~BLOCK_FAILED_MASK
;
3183 setDirtyBlockIndex
.insert(it
->second
);
3184 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& setBlockIndexCandidates
.value_comp()(chainActive
.Tip(), it
->second
)) {
3185 setBlockIndexCandidates
.insert(it
->second
);
3187 if (it
->second
== pindexBestInvalid
) {
3188 // Reset invalid block marker if it was pointing to one of those.
3189 pindexBestInvalid
= NULL
;
3195 // Remove the invalidity flag from all ancestors too.
3196 while (pindex
!= NULL
) {
3197 if (pindex
->nStatus
& BLOCK_FAILED_MASK
) {
3198 pindex
->nStatus
&= ~BLOCK_FAILED_MASK
;
3199 setDirtyBlockIndex
.insert(pindex
);
3201 pindex
= pindex
->pprev
;
3206 CBlockIndex
* AddToBlockIndex(const CBlockHeader
& block
)
3208 // Check for duplicate
3209 uint256 hash
= block
.GetHash();
3210 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
3211 if (it
!= mapBlockIndex
.end())
3214 // Construct new block index object
3215 CBlockIndex
* pindexNew
= new CBlockIndex(block
);
3217 // We assign the sequence id to blocks only when the full data is available,
3218 // to avoid miners withholding blocks but broadcasting headers, to get a
3219 // competitive advantage.
3220 pindexNew
->nSequenceId
= 0;
3221 BlockMap::iterator mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
3222 pindexNew
->phashBlock
= &((*mi
).first
);
3223 BlockMap::iterator miPrev
= mapBlockIndex
.find(block
.hashPrevBlock
);
3224 if (miPrev
!= mapBlockIndex
.end())
3226 pindexNew
->pprev
= (*miPrev
).second
;
3227 pindexNew
->nHeight
= pindexNew
->pprev
->nHeight
+ 1;
3228 pindexNew
->BuildSkip();
3230 pindexNew
->nChainWork
= (pindexNew
->pprev
? pindexNew
->pprev
->nChainWork
: 0) + GetBlockProof(*pindexNew
);
3231 pindexNew
->RaiseValidity(BLOCK_VALID_TREE
);
3232 if (pindexBestHeader
== NULL
|| pindexBestHeader
->nChainWork
< pindexNew
->nChainWork
)
3233 pindexBestHeader
= pindexNew
;
3235 setDirtyBlockIndex
.insert(pindexNew
);
3240 /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
3241 bool ReceivedBlockTransactions(const CBlock
&block
, CValidationState
& state
, CBlockIndex
*pindexNew
, const CDiskBlockPos
& pos
)
3243 pindexNew
->nTx
= block
.vtx
.size();
3244 pindexNew
->nChainTx
= 0;
3245 pindexNew
->nFile
= pos
.nFile
;
3246 pindexNew
->nDataPos
= pos
.nPos
;
3247 pindexNew
->nUndoPos
= 0;
3248 pindexNew
->nStatus
|= BLOCK_HAVE_DATA
;
3249 if (IsWitnessEnabled(pindexNew
->pprev
, Params().GetConsensus())) {
3250 pindexNew
->nStatus
|= BLOCK_OPT_WITNESS
;
3252 pindexNew
->RaiseValidity(BLOCK_VALID_TRANSACTIONS
);
3253 setDirtyBlockIndex
.insert(pindexNew
);
3255 if (pindexNew
->pprev
== NULL
|| pindexNew
->pprev
->nChainTx
) {
3256 // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
3257 deque
<CBlockIndex
*> queue
;
3258 queue
.push_back(pindexNew
);
3260 // Recursively process any descendant blocks that now may be eligible to be connected.
3261 while (!queue
.empty()) {
3262 CBlockIndex
*pindex
= queue
.front();
3264 pindex
->nChainTx
= (pindex
->pprev
? pindex
->pprev
->nChainTx
: 0) + pindex
->nTx
;
3266 LOCK(cs_nBlockSequenceId
);
3267 pindex
->nSequenceId
= nBlockSequenceId
++;
3269 if (chainActive
.Tip() == NULL
|| !setBlockIndexCandidates
.value_comp()(pindex
, chainActive
.Tip())) {
3270 setBlockIndexCandidates
.insert(pindex
);
3272 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
);
3273 while (range
.first
!= range
.second
) {
3274 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator it
= range
.first
;
3275 queue
.push_back(it
->second
);
3277 mapBlocksUnlinked
.erase(it
);
3281 if (pindexNew
->pprev
&& pindexNew
->pprev
->IsValid(BLOCK_VALID_TREE
)) {
3282 mapBlocksUnlinked
.insert(std::make_pair(pindexNew
->pprev
, pindexNew
));
3289 bool FindBlockPos(CValidationState
&state
, CDiskBlockPos
&pos
, unsigned int nAddSize
, unsigned int nHeight
, uint64_t nTime
, bool fKnown
= false)
3291 LOCK(cs_LastBlockFile
);
3293 unsigned int nFile
= fKnown
? pos
.nFile
: nLastBlockFile
;
3294 if (vinfoBlockFile
.size() <= nFile
) {
3295 vinfoBlockFile
.resize(nFile
+ 1);
3299 while (vinfoBlockFile
[nFile
].nSize
+ nAddSize
>= MAX_BLOCKFILE_SIZE
) {
3301 if (vinfoBlockFile
.size() <= nFile
) {
3302 vinfoBlockFile
.resize(nFile
+ 1);
3306 pos
.nPos
= vinfoBlockFile
[nFile
].nSize
;
3309 if ((int)nFile
!= nLastBlockFile
) {
3311 LogPrintf("Leaving block file %i: %s\n", nLastBlockFile
, vinfoBlockFile
[nLastBlockFile
].ToString());
3313 FlushBlockFile(!fKnown
);
3314 nLastBlockFile
= nFile
;
3317 vinfoBlockFile
[nFile
].AddBlock(nHeight
, nTime
);
3319 vinfoBlockFile
[nFile
].nSize
= std::max(pos
.nPos
+ nAddSize
, vinfoBlockFile
[nFile
].nSize
);
3321 vinfoBlockFile
[nFile
].nSize
+= nAddSize
;
3324 unsigned int nOldChunks
= (pos
.nPos
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
3325 unsigned int nNewChunks
= (vinfoBlockFile
[nFile
].nSize
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
3326 if (nNewChunks
> nOldChunks
) {
3328 fCheckForPruning
= true;
3329 if (CheckDiskSpace(nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
)) {
3330 FILE *file
= OpenBlockFile(pos
);
3332 LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks
* BLOCKFILE_CHUNK_SIZE
, pos
.nFile
);
3333 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
);
3338 return state
.Error("out of disk space");
3342 setDirtyFileInfo
.insert(nFile
);
3346 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
)
3350 LOCK(cs_LastBlockFile
);
3352 unsigned int nNewSize
;
3353 pos
.nPos
= vinfoBlockFile
[nFile
].nUndoSize
;
3354 nNewSize
= vinfoBlockFile
[nFile
].nUndoSize
+= nAddSize
;
3355 setDirtyFileInfo
.insert(nFile
);
3357 unsigned int nOldChunks
= (pos
.nPos
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
3358 unsigned int nNewChunks
= (nNewSize
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
3359 if (nNewChunks
> nOldChunks
) {
3361 fCheckForPruning
= true;
3362 if (CheckDiskSpace(nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
)) {
3363 FILE *file
= OpenUndoFile(pos
);
3365 LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks
* UNDOFILE_CHUNK_SIZE
, pos
.nFile
);
3366 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
);
3371 return state
.Error("out of disk space");
3377 bool CheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, bool fCheckPOW
)
3379 // Check proof of work matches claimed amount
3380 if (fCheckPOW
&& !CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
3381 return state
.DoS(50, false, REJECT_INVALID
, "high-hash", false, "proof of work failed");
3386 bool CheckBlock(const CBlock
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3388 // These are checks that are independent of context.
3393 // Check that the header is valid (particularly PoW). This is mostly
3394 // redundant with the call in AcceptBlockHeader.
3395 if (!CheckBlockHeader(block
, state
, consensusParams
, fCheckPOW
))
3398 // Check the merkle root.
3399 if (fCheckMerkleRoot
) {
3401 uint256 hashMerkleRoot2
= BlockMerkleRoot(block
, &mutated
);
3402 if (block
.hashMerkleRoot
!= hashMerkleRoot2
)
3403 return state
.DoS(100, false, REJECT_INVALID
, "bad-txnmrklroot", true, "hashMerkleRoot mismatch");
3405 // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3406 // of transactions in a block without affecting the merkle root of a block,
3407 // while still invalidating it.
3409 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-duplicate", true, "duplicate transaction");
3412 // All potential-corruption validation must be done before we do any
3413 // transaction validation, as otherwise we may mark the header as invalid
3414 // because we receive the wrong transactions for it.
3415 // Note that witness malleability is checked in ContextualCheckBlock, so no
3416 // checks that use witness data may be performed here.
3419 if (block
.vtx
.empty() || block
.vtx
.size() > MAX_BLOCK_BASE_SIZE
|| ::GetSerializeSize(block
, SER_NETWORK
, PROTOCOL_VERSION
| SERIALIZE_TRANSACTION_NO_WITNESS
) > MAX_BLOCK_BASE_SIZE
)
3420 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-length", false, "size limits failed");
3422 // First transaction must be coinbase, the rest must not be
3423 if (block
.vtx
.empty() || !block
.vtx
[0].IsCoinBase())
3424 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-missing", false, "first tx is not coinbase");
3425 for (unsigned int i
= 1; i
< block
.vtx
.size(); i
++)
3426 if (block
.vtx
[i
].IsCoinBase())
3427 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-multiple", false, "more than one coinbase");
3429 // Check transactions
3430 for (const auto& tx
: block
.vtx
)
3431 if (!CheckTransaction(tx
, state
))
3432 return state
.Invalid(false, state
.GetRejectCode(), state
.GetRejectReason(),
3433 strprintf("Transaction check failed (tx hash %s) %s", tx
.GetHash().ToString(), state
.GetDebugMessage()));
3435 unsigned int nSigOps
= 0;
3436 for (const auto& tx
: block
.vtx
)
3438 nSigOps
+= GetLegacySigOpCount(tx
);
3440 if (nSigOps
* WITNESS_SCALE_FACTOR
> MAX_BLOCK_SIGOPS_COST
)
3441 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
3443 if (fCheckPOW
&& fCheckMerkleRoot
)
3444 block
.fChecked
= true;
3449 static bool CheckIndexAgainstCheckpoint(const CBlockIndex
* pindexPrev
, CValidationState
& state
, const CChainParams
& chainparams
, const uint256
& hash
)
3451 if (*pindexPrev
->phashBlock
== chainparams
.GetConsensus().hashGenesisBlock
)
3454 int nHeight
= pindexPrev
->nHeight
+1;
3455 // Don't accept any forks from the main chain prior to last checkpoint
3456 CBlockIndex
* pcheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
3457 if (pcheckpoint
&& nHeight
< pcheckpoint
->nHeight
)
3458 return state
.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__
, nHeight
));
3463 bool IsWitnessEnabled(const CBlockIndex
* pindexPrev
, const Consensus::Params
& params
)
3466 return (VersionBitsState(pindexPrev
, params
, Consensus::DEPLOYMENT_SEGWIT
, versionbitscache
) == THRESHOLD_ACTIVE
);
3469 // Compute at which vout of the block's coinbase transaction the witness
3470 // commitment occurs, or -1 if not found.
3471 static int GetWitnessCommitmentIndex(const CBlock
& block
)
3474 for (size_t o
= 0; o
< block
.vtx
[0].vout
.size(); o
++) {
3475 if (block
.vtx
[0].vout
[o
].scriptPubKey
.size() >= 38 && block
.vtx
[0].vout
[o
].scriptPubKey
[0] == OP_RETURN
&& block
.vtx
[0].vout
[o
].scriptPubKey
[1] == 0x24 && block
.vtx
[0].vout
[o
].scriptPubKey
[2] == 0xaa && block
.vtx
[0].vout
[o
].scriptPubKey
[3] == 0x21 && block
.vtx
[0].vout
[o
].scriptPubKey
[4] == 0xa9 && block
.vtx
[0].vout
[o
].scriptPubKey
[5] == 0xed) {
3482 void UpdateUncommittedBlockStructures(CBlock
& block
, const CBlockIndex
* pindexPrev
, const Consensus::Params
& consensusParams
)
3484 int commitpos
= GetWitnessCommitmentIndex(block
);
3485 static const std::vector
<unsigned char> nonce(32, 0x00);
3486 if (commitpos
!= -1 && IsWitnessEnabled(pindexPrev
, consensusParams
) && block
.vtx
[0].wit
.IsEmpty()) {
3487 block
.vtx
[0].wit
.vtxinwit
.resize(1);
3488 block
.vtx
[0].wit
.vtxinwit
[0].scriptWitness
.stack
.resize(1);
3489 block
.vtx
[0].wit
.vtxinwit
[0].scriptWitness
.stack
[0] = nonce
;
3493 std::vector
<unsigned char> GenerateCoinbaseCommitment(CBlock
& block
, const CBlockIndex
* pindexPrev
, const Consensus::Params
& consensusParams
)
3495 std::vector
<unsigned char> commitment
;
3496 int commitpos
= GetWitnessCommitmentIndex(block
);
3497 bool fHaveWitness
= false;
3498 for (size_t t
= 1; t
< block
.vtx
.size(); t
++) {
3499 if (!block
.vtx
[t
].wit
.IsNull()) {
3500 fHaveWitness
= true;
3504 std::vector
<unsigned char> ret(32, 0x00);
3505 if (fHaveWitness
&& IsWitnessEnabled(pindexPrev
, consensusParams
)) {
3506 if (commitpos
== -1) {
3507 uint256 witnessroot
= BlockWitnessMerkleRoot(block
, NULL
);
3508 CHash256().Write(witnessroot
.begin(), 32).Write(&ret
[0], 32).Finalize(witnessroot
.begin());
3511 out
.scriptPubKey
.resize(38);
3512 out
.scriptPubKey
[0] = OP_RETURN
;
3513 out
.scriptPubKey
[1] = 0x24;
3514 out
.scriptPubKey
[2] = 0xaa;
3515 out
.scriptPubKey
[3] = 0x21;
3516 out
.scriptPubKey
[4] = 0xa9;
3517 out
.scriptPubKey
[5] = 0xed;
3518 memcpy(&out
.scriptPubKey
[6], witnessroot
.begin(), 32);
3519 commitment
= std::vector
<unsigned char>(out
.scriptPubKey
.begin(), out
.scriptPubKey
.end());
3520 const_cast<std::vector
<CTxOut
>*>(&block
.vtx
[0].vout
)->push_back(out
);
3521 block
.vtx
[0].UpdateHash();
3524 UpdateUncommittedBlockStructures(block
, pindexPrev
, consensusParams
);
3528 bool ContextualCheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, const CBlockIndex
* pindexPrev
, int64_t nAdjustedTime
)
3530 const int nHeight
= pindexPrev
== NULL
? 0 : pindexPrev
->nHeight
+ 1;
3531 // Check proof of work
3532 if (block
.nBits
!= GetNextWorkRequired(pindexPrev
, &block
, consensusParams
))
3533 return state
.DoS(100, false, REJECT_INVALID
, "bad-diffbits", false, "incorrect proof of work");
3535 // Check timestamp against prev
3536 if (block
.GetBlockTime() <= pindexPrev
->GetMedianTimePast())
3537 return state
.Invalid(false, REJECT_INVALID
, "time-too-old", "block's timestamp is too early");
3540 if (block
.GetBlockTime() > nAdjustedTime
+ 2 * 60 * 60)
3541 return state
.Invalid(false, REJECT_INVALID
, "time-too-new", "block timestamp too far in the future");
3543 // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
3544 // check for version 2, 3 and 4 upgrades
3545 if((block
.nVersion
< 2 && nHeight
>= consensusParams
.BIP34Height
) ||
3546 (block
.nVersion
< 3 && nHeight
>= consensusParams
.BIP66Height
) ||
3547 (block
.nVersion
< 4 && nHeight
>= consensusParams
.BIP65Height
))
3548 return state
.Invalid(false, REJECT_OBSOLETE
, strprintf("bad-version(0x%08x)", block
.nVersion
),
3549 strprintf("rejected nVersion=0x%08x block", block
.nVersion
));
3554 bool ContextualCheckBlock(const CBlock
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, const CBlockIndex
* pindexPrev
)
3556 const int nHeight
= pindexPrev
== NULL
? 0 : pindexPrev
->nHeight
+ 1;
3558 // Start enforcing BIP113 (Median Time Past) using versionbits logic.
3559 int nLockTimeFlags
= 0;
3560 if (VersionBitsState(pindexPrev
, consensusParams
, Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
3561 nLockTimeFlags
|= LOCKTIME_MEDIAN_TIME_PAST
;
3564 int64_t nLockTimeCutoff
= (nLockTimeFlags
& LOCKTIME_MEDIAN_TIME_PAST
)
3565 ? pindexPrev
->GetMedianTimePast()
3566 : block
.GetBlockTime();
3568 // Check that all transactions are finalized
3569 for (const auto& tx
: block
.vtx
) {
3570 if (!IsFinalTx(tx
, nHeight
, nLockTimeCutoff
)) {
3571 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-nonfinal", false, "non-final transaction");
3575 // Enforce rule that the coinbase starts with serialized block height
3576 if (nHeight
>= consensusParams
.BIP34Height
)
3578 CScript expect
= CScript() << nHeight
;
3579 if (block
.vtx
[0].vin
[0].scriptSig
.size() < expect
.size() ||
3580 !std::equal(expect
.begin(), expect
.end(), block
.vtx
[0].vin
[0].scriptSig
.begin())) {
3581 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-height", false, "block height mismatch in coinbase");
3585 // Validation for witness commitments.
3586 // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
3587 // coinbase (where 0x0000....0000 is used instead).
3588 // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness nonce (unconstrained).
3589 // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
3590 // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
3591 // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness nonce). In case there are
3592 // multiple, the last one is used.
3593 bool fHaveWitness
= false;
3594 if (VersionBitsState(pindexPrev
, consensusParams
, Consensus::DEPLOYMENT_SEGWIT
, versionbitscache
) == THRESHOLD_ACTIVE
) {
3595 int commitpos
= GetWitnessCommitmentIndex(block
);
3596 if (commitpos
!= -1) {
3597 bool malleated
= false;
3598 uint256 hashWitness
= BlockWitnessMerkleRoot(block
, &malleated
);
3599 // The malleation check is ignored; as the transaction tree itself
3600 // already does not permit it, it is impossible to trigger in the
3602 if (block
.vtx
[0].wit
.vtxinwit
.size() != 1 || block
.vtx
[0].wit
.vtxinwit
[0].scriptWitness
.stack
.size() != 1 || block
.vtx
[0].wit
.vtxinwit
[0].scriptWitness
.stack
[0].size() != 32) {
3603 return state
.DoS(100, false, REJECT_INVALID
, "bad-witness-nonce-size", true, strprintf("%s : invalid witness nonce size", __func__
));
3605 CHash256().Write(hashWitness
.begin(), 32).Write(&block
.vtx
[0].wit
.vtxinwit
[0].scriptWitness
.stack
[0][0], 32).Finalize(hashWitness
.begin());
3606 if (memcmp(hashWitness
.begin(), &block
.vtx
[0].vout
[commitpos
].scriptPubKey
[6], 32)) {
3607 return state
.DoS(100, false, REJECT_INVALID
, "bad-witness-merkle-match", true, strprintf("%s : witness merkle commitment mismatch", __func__
));
3609 fHaveWitness
= true;
3613 // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
3614 if (!fHaveWitness
) {
3615 for (size_t i
= 0; i
< block
.vtx
.size(); i
++) {
3616 if (!block
.vtx
[i
].wit
.IsNull()) {
3617 return state
.DoS(100, false, REJECT_INVALID
, "unexpected-witness", true, strprintf("%s : unexpected witness data found", __func__
));
3622 // After the coinbase witness nonce and commitment are verified,
3623 // we can check if the block weight passes (before we've checked the
3624 // coinbase witness, it would be possible for the weight to be too
3625 // large by filling up the coinbase witness, which doesn't change
3626 // the block hash, so we couldn't mark the block as permanently
3628 if (GetBlockWeight(block
) > MAX_BLOCK_WEIGHT
) {
3629 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-weight", false, strprintf("%s : weight limit failed", __func__
));
3635 static bool AcceptBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
=NULL
)
3637 AssertLockHeld(cs_main
);
3638 // Check for duplicate
3639 uint256 hash
= block
.GetHash();
3640 BlockMap::iterator miSelf
= mapBlockIndex
.find(hash
);
3641 CBlockIndex
*pindex
= NULL
;
3642 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
) {
3644 if (miSelf
!= mapBlockIndex
.end()) {
3645 // Block header is already known.
3646 pindex
= miSelf
->second
;
3649 if (pindex
->nStatus
& BLOCK_FAILED_MASK
)
3650 return state
.Invalid(error("%s: block %s is marked invalid", __func__
, hash
.ToString()), 0, "duplicate");
3654 if (!CheckBlockHeader(block
, state
, chainparams
.GetConsensus()))
3655 return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3657 // Get prev block index
3658 CBlockIndex
* pindexPrev
= NULL
;
3659 BlockMap::iterator mi
= mapBlockIndex
.find(block
.hashPrevBlock
);
3660 if (mi
== mapBlockIndex
.end())
3661 return state
.DoS(10, error("%s: prev block not found", __func__
), 0, "bad-prevblk");
3662 pindexPrev
= (*mi
).second
;
3663 if (pindexPrev
->nStatus
& BLOCK_FAILED_MASK
)
3664 return state
.DoS(100, error("%s: prev block invalid", __func__
), REJECT_INVALID
, "bad-prevblk");
3667 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, hash
))
3668 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3670 if (!ContextualCheckBlockHeader(block
, state
, chainparams
.GetConsensus(), pindexPrev
, GetAdjustedTime()))
3671 return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3674 pindex
= AddToBlockIndex(block
);
3682 /** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
3683 static bool AcceptBlock(const CBlock
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
, bool fRequested
, const CDiskBlockPos
* dbp
, bool* fNewBlock
)
3685 if (fNewBlock
) *fNewBlock
= false;
3686 AssertLockHeld(cs_main
);
3688 CBlockIndex
*pindexDummy
= NULL
;
3689 CBlockIndex
*&pindex
= ppindex
? *ppindex
: pindexDummy
;
3691 if (!AcceptBlockHeader(block
, state
, chainparams
, &pindex
))
3694 // Try to process all requested blocks that we don't have, but only
3695 // process an unrequested block if it's new and has enough work to
3696 // advance our tip, and isn't too many blocks ahead.
3697 bool fAlreadyHave
= pindex
->nStatus
& BLOCK_HAVE_DATA
;
3698 bool fHasMoreWork
= (chainActive
.Tip() ? pindex
->nChainWork
> chainActive
.Tip()->nChainWork
: true);
3699 // Blocks that are too out-of-order needlessly limit the effectiveness of
3700 // pruning, because pruning will not delete block files that contain any
3701 // blocks which are too close in height to the tip. Apply this test
3702 // regardless of whether pruning is enabled; it should generally be safe to
3703 // not process unrequested blocks.
3704 bool fTooFarAhead
= (pindex
->nHeight
> int(chainActive
.Height() + MIN_BLOCKS_TO_KEEP
));
3706 // TODO: deal better with return value and error conditions for duplicate
3707 // and unrequested blocks.
3708 if (fAlreadyHave
) return true;
3709 if (!fRequested
) { // If we didn't ask for it:
3710 if (pindex
->nTx
!= 0) return true; // This is a previously-processed block that was pruned
3711 if (!fHasMoreWork
) return true; // Don't process less-work chains
3712 if (fTooFarAhead
) return true; // Block height is too high
3714 if (fNewBlock
) *fNewBlock
= true;
3716 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), GetAdjustedTime()) ||
3717 !ContextualCheckBlock(block
, state
, chainparams
.GetConsensus(), pindex
->pprev
)) {
3718 if (state
.IsInvalid() && !state
.CorruptionPossible()) {
3719 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
3720 setDirtyBlockIndex
.insert(pindex
);
3722 return error("%s: %s", __func__
, FormatStateMessage(state
));
3725 int nHeight
= pindex
->nHeight
;
3727 // Write block to history file
3729 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
3730 CDiskBlockPos blockPos
;
3733 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, nHeight
, block
.GetBlockTime(), dbp
!= NULL
))
3734 return error("AcceptBlock(): FindBlockPos failed");
3736 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
3737 AbortNode(state
, "Failed to write block");
3738 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
3739 return error("AcceptBlock(): ReceivedBlockTransactions failed");
3740 } catch (const std::runtime_error
& e
) {
3741 return AbortNode(state
, std::string("System error: ") + e
.what());
3744 if (fCheckForPruning
)
3745 FlushStateToDisk(state
, FLUSH_STATE_NONE
); // we just allocated more disk space for block files
3750 bool ProcessNewBlock(CValidationState
& state
, const CChainParams
& chainparams
, CNode
* pfrom
, const CBlock
* pblock
, bool fForceProcessing
, const CDiskBlockPos
* dbp
)
3754 bool fRequested
= MarkBlockAsReceived(pblock
->GetHash());
3755 fRequested
|= fForceProcessing
;
3758 CBlockIndex
*pindex
= NULL
;
3759 bool fNewBlock
= false;
3760 bool ret
= AcceptBlock(*pblock
, state
, chainparams
, &pindex
, fRequested
, dbp
, &fNewBlock
);
3761 if (pindex
&& pfrom
) {
3762 mapBlockSource
[pindex
->GetBlockHash()] = pfrom
->GetId();
3763 if (fNewBlock
) pfrom
->nLastBlockTime
= GetTime();
3765 CheckBlockIndex(chainparams
.GetConsensus());
3767 return error("%s: AcceptBlock FAILED", __func__
);
3772 if (!ActivateBestChain(state
, chainparams
, pblock
))
3773 return error("%s: ActivateBestChain failed", __func__
);
3778 bool TestBlockValidity(CValidationState
& state
, const CChainParams
& chainparams
, const CBlock
& block
, CBlockIndex
* pindexPrev
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3780 AssertLockHeld(cs_main
);
3781 assert(pindexPrev
&& pindexPrev
== chainActive
.Tip());
3782 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, block
.GetHash()))
3783 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3785 CCoinsViewCache
viewNew(pcoinsTip
);
3786 CBlockIndex
indexDummy(block
);
3787 indexDummy
.pprev
= pindexPrev
;
3788 indexDummy
.nHeight
= pindexPrev
->nHeight
+ 1;
3790 // NOTE: CheckBlockHeader is called by CheckBlock
3791 if (!ContextualCheckBlockHeader(block
, state
, chainparams
.GetConsensus(), pindexPrev
, GetAdjustedTime()))
3792 return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__
, FormatStateMessage(state
));
3793 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), fCheckPOW
, fCheckMerkleRoot
))
3794 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
3795 if (!ContextualCheckBlock(block
, state
, chainparams
.GetConsensus(), pindexPrev
))
3796 return error("%s: Consensus::ContextualCheckBlock: %s", __func__
, FormatStateMessage(state
));
3797 if (!ConnectBlock(block
, state
, &indexDummy
, viewNew
, chainparams
, true))
3799 assert(state
.IsValid());
3805 * BLOCK PRUNING CODE
3808 /* Calculate the amount of disk space the block & undo files currently use */
3809 uint64_t CalculateCurrentUsage()
3811 uint64_t retval
= 0;
3812 BOOST_FOREACH(const CBlockFileInfo
&file
, vinfoBlockFile
) {
3813 retval
+= file
.nSize
+ file
.nUndoSize
;
3818 /* Prune a block file (modify associated database entries)*/
3819 void PruneOneBlockFile(const int fileNumber
)
3821 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); ++it
) {
3822 CBlockIndex
* pindex
= it
->second
;
3823 if (pindex
->nFile
== fileNumber
) {
3824 pindex
->nStatus
&= ~BLOCK_HAVE_DATA
;
3825 pindex
->nStatus
&= ~BLOCK_HAVE_UNDO
;
3827 pindex
->nDataPos
= 0;
3828 pindex
->nUndoPos
= 0;
3829 setDirtyBlockIndex
.insert(pindex
);
3831 // Prune from mapBlocksUnlinked -- any block we prune would have
3832 // to be downloaded again in order to consider its chain, at which
3833 // point it would be considered as a candidate for
3834 // mapBlocksUnlinked or setBlockIndexCandidates.
3835 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
3836 while (range
.first
!= range
.second
) {
3837 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator _it
= range
.first
;
3839 if (_it
->second
== pindex
) {
3840 mapBlocksUnlinked
.erase(_it
);
3846 vinfoBlockFile
[fileNumber
].SetNull();
3847 setDirtyFileInfo
.insert(fileNumber
);
3851 void UnlinkPrunedFiles(std::set
<int>& setFilesToPrune
)
3853 for (set
<int>::iterator it
= setFilesToPrune
.begin(); it
!= setFilesToPrune
.end(); ++it
) {
3854 CDiskBlockPos
pos(*it
, 0);
3855 boost::filesystem::remove(GetBlockPosFilename(pos
, "blk"));
3856 boost::filesystem::remove(GetBlockPosFilename(pos
, "rev"));
3857 LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__
, *it
);
3861 /* Calculate the block/rev files that should be deleted to remain under target*/
3862 void FindFilesToPrune(std::set
<int>& setFilesToPrune
, uint64_t nPruneAfterHeight
)
3864 LOCK2(cs_main
, cs_LastBlockFile
);
3865 if (chainActive
.Tip() == NULL
|| nPruneTarget
== 0) {
3868 if ((uint64_t)chainActive
.Tip()->nHeight
<= nPruneAfterHeight
) {
3872 unsigned int nLastBlockWeCanPrune
= chainActive
.Tip()->nHeight
- MIN_BLOCKS_TO_KEEP
;
3873 uint64_t nCurrentUsage
= CalculateCurrentUsage();
3874 // We don't check to prune until after we've allocated new space for files
3875 // So we should leave a buffer under our target to account for another allocation
3876 // before the next pruning.
3877 uint64_t nBuffer
= BLOCKFILE_CHUNK_SIZE
+ UNDOFILE_CHUNK_SIZE
;
3878 uint64_t nBytesToPrune
;
3881 if (nCurrentUsage
+ nBuffer
>= nPruneTarget
) {
3882 for (int fileNumber
= 0; fileNumber
< nLastBlockFile
; fileNumber
++) {
3883 nBytesToPrune
= vinfoBlockFile
[fileNumber
].nSize
+ vinfoBlockFile
[fileNumber
].nUndoSize
;
3885 if (vinfoBlockFile
[fileNumber
].nSize
== 0)
3888 if (nCurrentUsage
+ nBuffer
< nPruneTarget
) // are we below our target?
3891 // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
3892 if (vinfoBlockFile
[fileNumber
].nHeightLast
> nLastBlockWeCanPrune
)
3895 PruneOneBlockFile(fileNumber
);
3896 // Queue up the files for removal
3897 setFilesToPrune
.insert(fileNumber
);
3898 nCurrentUsage
-= nBytesToPrune
;
3903 LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
3904 nPruneTarget
/1024/1024, nCurrentUsage
/1024/1024,
3905 ((int64_t)nPruneTarget
- (int64_t)nCurrentUsage
)/1024/1024,
3906 nLastBlockWeCanPrune
, count
);
3909 bool CheckDiskSpace(uint64_t nAdditionalBytes
)
3911 uint64_t nFreeBytesAvailable
= boost::filesystem::space(GetDataDir()).available
;
3913 // Check for nMinDiskSpace bytes (currently 50MB)
3914 if (nFreeBytesAvailable
< nMinDiskSpace
+ nAdditionalBytes
)
3915 return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
3920 FILE* OpenDiskFile(const CDiskBlockPos
&pos
, const char *prefix
, bool fReadOnly
)
3924 boost::filesystem::path path
= GetBlockPosFilename(pos
, prefix
);
3925 boost::filesystem::create_directories(path
.parent_path());
3926 FILE* file
= fopen(path
.string().c_str(), "rb+");
3927 if (!file
&& !fReadOnly
)
3928 file
= fopen(path
.string().c_str(), "wb+");
3930 LogPrintf("Unable to open file %s\n", path
.string());
3934 if (fseek(file
, pos
.nPos
, SEEK_SET
)) {
3935 LogPrintf("Unable to seek to position %u of %s\n", pos
.nPos
, path
.string());
3943 FILE* OpenBlockFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3944 return OpenDiskFile(pos
, "blk", fReadOnly
);
3947 FILE* OpenUndoFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3948 return OpenDiskFile(pos
, "rev", fReadOnly
);
3951 boost::filesystem::path
GetBlockPosFilename(const CDiskBlockPos
&pos
, const char *prefix
)
3953 return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix
, pos
.nFile
);
3956 CBlockIndex
* InsertBlockIndex(uint256 hash
)
3962 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
3963 if (mi
!= mapBlockIndex
.end())
3964 return (*mi
).second
;
3967 CBlockIndex
* pindexNew
= new CBlockIndex();
3969 throw runtime_error(std::string(__func__
) + ": new CBlockIndex failed");
3970 mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
3971 pindexNew
->phashBlock
= &((*mi
).first
);
3976 bool static LoadBlockIndexDB(const CChainParams
& chainparams
)
3978 if (!pblocktree
->LoadBlockIndexGuts(InsertBlockIndex
))
3981 boost::this_thread::interruption_point();
3983 // Calculate nChainWork
3984 vector
<pair
<int, CBlockIndex
*> > vSortedByHeight
;
3985 vSortedByHeight
.reserve(mapBlockIndex
.size());
3986 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
3988 CBlockIndex
* pindex
= item
.second
;
3989 vSortedByHeight
.push_back(make_pair(pindex
->nHeight
, pindex
));
3991 sort(vSortedByHeight
.begin(), vSortedByHeight
.end());
3992 BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex
*)& item
, vSortedByHeight
)
3994 CBlockIndex
* pindex
= item
.second
;
3995 pindex
->nChainWork
= (pindex
->pprev
? pindex
->pprev
->nChainWork
: 0) + GetBlockProof(*pindex
);
3996 // We can link the chain of blocks for which we've received transactions at some point.
3997 // Pruned nodes may have deleted the block.
3998 if (pindex
->nTx
> 0) {
3999 if (pindex
->pprev
) {
4000 if (pindex
->pprev
->nChainTx
) {
4001 pindex
->nChainTx
= pindex
->pprev
->nChainTx
+ pindex
->nTx
;
4003 pindex
->nChainTx
= 0;
4004 mapBlocksUnlinked
.insert(std::make_pair(pindex
->pprev
, pindex
));
4007 pindex
->nChainTx
= pindex
->nTx
;
4010 if (pindex
->IsValid(BLOCK_VALID_TRANSACTIONS
) && (pindex
->nChainTx
|| pindex
->pprev
== NULL
))
4011 setBlockIndexCandidates
.insert(pindex
);
4012 if (pindex
->nStatus
& BLOCK_FAILED_MASK
&& (!pindexBestInvalid
|| pindex
->nChainWork
> pindexBestInvalid
->nChainWork
))
4013 pindexBestInvalid
= pindex
;
4015 pindex
->BuildSkip();
4016 if (pindex
->IsValid(BLOCK_VALID_TREE
) && (pindexBestHeader
== NULL
|| CBlockIndexWorkComparator()(pindexBestHeader
, pindex
)))
4017 pindexBestHeader
= pindex
;
4020 // Load block file info
4021 pblocktree
->ReadLastBlockFile(nLastBlockFile
);
4022 vinfoBlockFile
.resize(nLastBlockFile
+ 1);
4023 LogPrintf("%s: last block file = %i\n", __func__
, nLastBlockFile
);
4024 for (int nFile
= 0; nFile
<= nLastBlockFile
; nFile
++) {
4025 pblocktree
->ReadBlockFileInfo(nFile
, vinfoBlockFile
[nFile
]);
4027 LogPrintf("%s: last block file info: %s\n", __func__
, vinfoBlockFile
[nLastBlockFile
].ToString());
4028 for (int nFile
= nLastBlockFile
+ 1; true; nFile
++) {
4029 CBlockFileInfo info
;
4030 if (pblocktree
->ReadBlockFileInfo(nFile
, info
)) {
4031 vinfoBlockFile
.push_back(info
);
4037 // Check presence of blk files
4038 LogPrintf("Checking all blk files are present...\n");
4039 set
<int> setBlkDataFiles
;
4040 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
4042 CBlockIndex
* pindex
= item
.second
;
4043 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) {
4044 setBlkDataFiles
.insert(pindex
->nFile
);
4047 for (std::set
<int>::iterator it
= setBlkDataFiles
.begin(); it
!= setBlkDataFiles
.end(); it
++)
4049 CDiskBlockPos
pos(*it
, 0);
4050 if (CAutoFile(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
).IsNull()) {
4055 // Check whether we have ever pruned block & undo files
4056 pblocktree
->ReadFlag("prunedblockfiles", fHavePruned
);
4058 LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
4060 // Check whether we need to continue reindexing
4061 bool fReindexing
= false;
4062 pblocktree
->ReadReindexing(fReindexing
);
4063 fReindex
|= fReindexing
;
4065 // Check whether we have a transaction index
4066 pblocktree
->ReadFlag("txindex", fTxIndex
);
4067 LogPrintf("%s: transaction index %s\n", __func__
, fTxIndex
? "enabled" : "disabled");
4069 // Load pointer to end of best chain
4070 BlockMap::iterator it
= mapBlockIndex
.find(pcoinsTip
->GetBestBlock());
4071 if (it
== mapBlockIndex
.end())
4073 chainActive
.SetTip(it
->second
);
4075 PruneBlockIndexCandidates();
4077 LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__
,
4078 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(),
4079 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
4080 Checkpoints::GuessVerificationProgress(chainparams
.Checkpoints(), chainActive
.Tip()));
4085 CVerifyDB::CVerifyDB()
4087 uiInterface
.ShowProgress(_("Verifying blocks..."), 0);
4090 CVerifyDB::~CVerifyDB()
4092 uiInterface
.ShowProgress("", 100);
4095 bool CVerifyDB::VerifyDB(const CChainParams
& chainparams
, CCoinsView
*coinsview
, int nCheckLevel
, int nCheckDepth
)
4098 if (chainActive
.Tip() == NULL
|| chainActive
.Tip()->pprev
== NULL
)
4101 // Verify blocks in the best chain
4102 if (nCheckDepth
<= 0)
4103 nCheckDepth
= 1000000000; // suffices until the year 19000
4104 if (nCheckDepth
> chainActive
.Height())
4105 nCheckDepth
= chainActive
.Height();
4106 nCheckLevel
= std::max(0, std::min(4, nCheckLevel
));
4107 LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth
, nCheckLevel
);
4108 CCoinsViewCache
coins(coinsview
);
4109 CBlockIndex
* pindexState
= chainActive
.Tip();
4110 CBlockIndex
* pindexFailure
= NULL
;
4111 int nGoodTransactions
= 0;
4112 CValidationState state
;
4114 LogPrintf("[0%%]...");
4115 for (CBlockIndex
* pindex
= chainActive
.Tip(); pindex
&& pindex
->pprev
; pindex
= pindex
->pprev
)
4117 boost::this_thread::interruption_point();
4118 int percentageDone
= std::max(1, std::min(99, (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* (nCheckLevel
>= 4 ? 50 : 100))));
4119 if (reportDone
< percentageDone
/10) {
4120 // report every 10% step
4121 LogPrintf("[%d%%]...", percentageDone
);
4122 reportDone
= percentageDone
/10;
4124 uiInterface
.ShowProgress(_("Verifying blocks..."), percentageDone
);
4125 if (pindex
->nHeight
< chainActive
.Height()-nCheckDepth
)
4127 if (fPruneMode
&& !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) {
4128 // If pruning, only go back as far as we have data.
4129 LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex
->nHeight
);
4133 // check level 0: read from disk
4134 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
4135 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4136 // check level 1: verify block validity
4137 if (nCheckLevel
>= 1 && !CheckBlock(block
, state
, chainparams
.GetConsensus()))
4138 return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__
,
4139 pindex
->nHeight
, pindex
->GetBlockHash().ToString(), FormatStateMessage(state
));
4140 // check level 2: verify undo validity
4141 if (nCheckLevel
>= 2 && pindex
) {
4143 CDiskBlockPos pos
= pindex
->GetUndoPos();
4144 if (!pos
.IsNull()) {
4145 if (!UndoReadFromDisk(undo
, pos
, pindex
->pprev
->GetBlockHash()))
4146 return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4149 // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
4150 if (nCheckLevel
>= 3 && pindex
== pindexState
&& (coins
.DynamicMemoryUsage() + pcoinsTip
->DynamicMemoryUsage()) <= nCoinCacheUsage
) {
4152 if (!DisconnectBlock(block
, state
, pindex
, coins
, &fClean
))
4153 return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4154 pindexState
= pindex
->pprev
;
4156 nGoodTransactions
= 0;
4157 pindexFailure
= pindex
;
4159 nGoodTransactions
+= block
.vtx
.size();
4161 if (ShutdownRequested())
4165 return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive
.Height() - pindexFailure
->nHeight
+ 1, nGoodTransactions
);
4167 // check level 4: try reconnecting blocks
4168 if (nCheckLevel
>= 4) {
4169 CBlockIndex
*pindex
= pindexState
;
4170 while (pindex
!= chainActive
.Tip()) {
4171 boost::this_thread::interruption_point();
4172 uiInterface
.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* 50))));
4173 pindex
= chainActive
.Next(pindex
);
4175 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
4176 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4177 if (!ConnectBlock(block
, state
, pindex
, coins
, chainparams
))
4178 return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4182 LogPrintf("[DONE].\n");
4183 LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive
.Height() - pindexState
->nHeight
, nGoodTransactions
);
4188 bool RewindBlockIndex(const CChainParams
& params
)
4193 while (nHeight
<= chainActive
.Height()) {
4194 if (IsWitnessEnabled(chainActive
[nHeight
- 1], params
.GetConsensus()) && !(chainActive
[nHeight
]->nStatus
& BLOCK_OPT_WITNESS
)) {
4200 // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
4201 CValidationState state
;
4202 CBlockIndex
* pindex
= chainActive
.Tip();
4203 while (chainActive
.Height() >= nHeight
) {
4204 if (fPruneMode
&& !(chainActive
.Tip()->nStatus
& BLOCK_HAVE_DATA
)) {
4205 // If pruning, don't try rewinding past the HAVE_DATA point;
4206 // since older blocks can't be served anyway, there's
4207 // no need to walk further, and trying to DisconnectTip()
4208 // will fail (and require a needless reindex/redownload
4209 // of the blockchain).
4212 if (!DisconnectTip(state
, params
, true)) {
4213 return error("RewindBlockIndex: unable to disconnect block at height %i", pindex
->nHeight
);
4215 // Occasionally flush state to disk.
4216 if (!FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
))
4220 // Reduce validity flag and have-data flags.
4221 // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
4222 // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
4223 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); it
++) {
4224 CBlockIndex
* pindexIter
= it
->second
;
4226 // Note: If we encounter an insufficiently validated block that
4227 // is on chainActive, it must be because we are a pruning node, and
4228 // this block or some successor doesn't HAVE_DATA, so we were unable to
4229 // rewind all the way. Blocks remaining on chainActive at this point
4230 // must not have their validity reduced.
4231 if (IsWitnessEnabled(pindexIter
->pprev
, params
.GetConsensus()) && !(pindexIter
->nStatus
& BLOCK_OPT_WITNESS
) && !chainActive
.Contains(pindexIter
)) {
4233 pindexIter
->nStatus
= std::min
<unsigned int>(pindexIter
->nStatus
& BLOCK_VALID_MASK
, BLOCK_VALID_TREE
) | (pindexIter
->nStatus
& ~BLOCK_VALID_MASK
);
4234 // Remove have-data flags.
4235 pindexIter
->nStatus
&= ~(BLOCK_HAVE_DATA
| BLOCK_HAVE_UNDO
);
4236 // Remove storage location.
4237 pindexIter
->nFile
= 0;
4238 pindexIter
->nDataPos
= 0;
4239 pindexIter
->nUndoPos
= 0;
4240 // Remove various other things
4241 pindexIter
->nTx
= 0;
4242 pindexIter
->nChainTx
= 0;
4243 pindexIter
->nSequenceId
= 0;
4244 // Make sure it gets written.
4245 setDirtyBlockIndex
.insert(pindexIter
);
4247 setBlockIndexCandidates
.erase(pindexIter
);
4248 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> ret
= mapBlocksUnlinked
.equal_range(pindexIter
->pprev
);
4249 while (ret
.first
!= ret
.second
) {
4250 if (ret
.first
->second
== pindexIter
) {
4251 mapBlocksUnlinked
.erase(ret
.first
++);
4256 } else if (pindexIter
->IsValid(BLOCK_VALID_TRANSACTIONS
) && pindexIter
->nChainTx
) {
4257 setBlockIndexCandidates
.insert(pindexIter
);
4261 PruneBlockIndexCandidates();
4263 CheckBlockIndex(params
.GetConsensus());
4265 if (!FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
)) {
4272 void UnloadBlockIndex()
4275 setBlockIndexCandidates
.clear();
4276 chainActive
.SetTip(NULL
);
4277 pindexBestInvalid
= NULL
;
4278 pindexBestHeader
= NULL
;
4280 mapOrphanTransactions
.clear();
4281 mapOrphanTransactionsByPrev
.clear();
4283 mapBlocksUnlinked
.clear();
4284 vinfoBlockFile
.clear();
4286 nBlockSequenceId
= 1;
4287 mapBlockSource
.clear();
4288 mapBlocksInFlight
.clear();
4289 nPreferredDownload
= 0;
4290 setDirtyBlockIndex
.clear();
4291 setDirtyFileInfo
.clear();
4292 mapNodeState
.clear();
4293 recentRejects
.reset(NULL
);
4294 versionbitscache
.Clear();
4295 for (int b
= 0; b
< VERSIONBITS_NUM_BITS
; b
++) {
4296 warningcache
[b
].clear();
4299 BOOST_FOREACH(BlockMap::value_type
& entry
, mapBlockIndex
) {
4300 delete entry
.second
;
4302 mapBlockIndex
.clear();
4303 fHavePruned
= false;
4306 bool LoadBlockIndex(const CChainParams
& chainparams
)
4308 // Load block index from databases
4309 if (!fReindex
&& !LoadBlockIndexDB(chainparams
))
4314 bool InitBlockIndex(const CChainParams
& chainparams
)
4318 // Initialize global variables that cannot be constructed at startup.
4319 recentRejects
.reset(new CRollingBloomFilter(120000, 0.000001));
4321 // Check whether we're already initialized
4322 if (chainActive
.Genesis() != NULL
)
4325 // Use the provided setting for -txindex in the new database
4326 fTxIndex
= GetBoolArg("-txindex", DEFAULT_TXINDEX
);
4327 pblocktree
->WriteFlag("txindex", fTxIndex
);
4328 LogPrintf("Initializing databases...\n");
4330 // Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
4333 CBlock
&block
= const_cast<CBlock
&>(chainparams
.GenesisBlock());
4334 // Start new block file
4335 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
4336 CDiskBlockPos blockPos
;
4337 CValidationState state
;
4338 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, 0, block
.GetBlockTime()))
4339 return error("LoadBlockIndex(): FindBlockPos failed");
4340 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
4341 return error("LoadBlockIndex(): writing genesis block to disk failed");
4342 CBlockIndex
*pindex
= AddToBlockIndex(block
);
4343 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
4344 return error("LoadBlockIndex(): genesis block not accepted");
4345 // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
4346 return FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
4347 } catch (const std::runtime_error
& e
) {
4348 return error("LoadBlockIndex(): failed to initialize block database: %s", e
.what());
4355 bool LoadExternalBlockFile(const CChainParams
& chainparams
, FILE* fileIn
, CDiskBlockPos
*dbp
)
4357 // Map of disk positions for blocks with unknown parent (only used for reindex)
4358 static std::multimap
<uint256
, CDiskBlockPos
> mapBlocksUnknownParent
;
4359 int64_t nStart
= GetTimeMillis();
4363 // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4364 CBufferedFile
blkdat(fileIn
, 2*MAX_BLOCK_SERIALIZED_SIZE
, MAX_BLOCK_SERIALIZED_SIZE
+8, SER_DISK
, CLIENT_VERSION
);
4365 uint64_t nRewind
= blkdat
.GetPos();
4366 while (!blkdat
.eof()) {
4367 boost::this_thread::interruption_point();
4369 blkdat
.SetPos(nRewind
);
4370 nRewind
++; // start one byte further next time, in case of failure
4371 blkdat
.SetLimit(); // remove former limit
4372 unsigned int nSize
= 0;
4375 unsigned char buf
[CMessageHeader::MESSAGE_START_SIZE
];
4376 blkdat
.FindByte(chainparams
.MessageStart()[0]);
4377 nRewind
= blkdat
.GetPos()+1;
4378 blkdat
>> FLATDATA(buf
);
4379 if (memcmp(buf
, chainparams
.MessageStart(), CMessageHeader::MESSAGE_START_SIZE
))
4383 if (nSize
< 80 || nSize
> MAX_BLOCK_SERIALIZED_SIZE
)
4385 } catch (const std::exception
&) {
4386 // no valid block header found; don't complain
4391 uint64_t nBlockPos
= blkdat
.GetPos();
4393 dbp
->nPos
= nBlockPos
;
4394 blkdat
.SetLimit(nBlockPos
+ nSize
);
4395 blkdat
.SetPos(nBlockPos
);
4398 nRewind
= blkdat
.GetPos();
4400 // detect out of order blocks, and store them for later
4401 uint256 hash
= block
.GetHash();
4402 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
.find(block
.hashPrevBlock
) == mapBlockIndex
.end()) {
4403 LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__
, hash
.ToString(),
4404 block
.hashPrevBlock
.ToString());
4406 mapBlocksUnknownParent
.insert(std::make_pair(block
.hashPrevBlock
, *dbp
));
4410 // process in case the block isn't known yet
4411 if (mapBlockIndex
.count(hash
) == 0 || (mapBlockIndex
[hash
]->nStatus
& BLOCK_HAVE_DATA
) == 0) {
4413 CValidationState state
;
4414 if (AcceptBlock(block
, state
, chainparams
, NULL
, true, dbp
, NULL
))
4416 if (state
.IsError())
4418 } else if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
[hash
]->nHeight
% 1000 == 0) {
4419 LogPrint("reindex", "Block Import: already had block %s at height %d\n", hash
.ToString(), mapBlockIndex
[hash
]->nHeight
);
4422 // Activate the genesis block so normal node progress can continue
4423 if (hash
== chainparams
.GetConsensus().hashGenesisBlock
) {
4424 CValidationState state
;
4425 if (!ActivateBestChain(state
, chainparams
)) {
4432 // Recursively process earlier encountered successors of this block
4433 deque
<uint256
> queue
;
4434 queue
.push_back(hash
);
4435 while (!queue
.empty()) {
4436 uint256 head
= queue
.front();
4438 std::pair
<std::multimap
<uint256
, CDiskBlockPos
>::iterator
, std::multimap
<uint256
, CDiskBlockPos
>::iterator
> range
= mapBlocksUnknownParent
.equal_range(head
);
4439 while (range
.first
!= range
.second
) {
4440 std::multimap
<uint256
, CDiskBlockPos
>::iterator it
= range
.first
;
4441 if (ReadBlockFromDisk(block
, it
->second
, chainparams
.GetConsensus()))
4443 LogPrint("reindex", "%s: Processing out of order child %s of %s\n", __func__
, block
.GetHash().ToString(),
4446 CValidationState dummy
;
4447 if (AcceptBlock(block
, dummy
, chainparams
, NULL
, true, &it
->second
, NULL
))
4450 queue
.push_back(block
.GetHash());
4454 mapBlocksUnknownParent
.erase(it
);
4458 } catch (const std::exception
& e
) {
4459 LogPrintf("%s: Deserialize or I/O error - %s\n", __func__
, e
.what());
4462 } catch (const std::runtime_error
& e
) {
4463 AbortNode(std::string("System error: ") + e
.what());
4466 LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded
, GetTimeMillis() - nStart
);
4470 void static CheckBlockIndex(const Consensus::Params
& consensusParams
)
4472 if (!fCheckBlockIndex
) {
4478 // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4479 // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
4480 // iterating the block tree require that chainActive has been initialized.)
4481 if (chainActive
.Height() < 0) {
4482 assert(mapBlockIndex
.size() <= 1);
4486 // Build forward-pointing map of the entire block tree.
4487 std::multimap
<CBlockIndex
*,CBlockIndex
*> forward
;
4488 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); it
++) {
4489 forward
.insert(std::make_pair(it
->second
->pprev
, it
->second
));
4492 assert(forward
.size() == mapBlockIndex
.size());
4494 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeGenesis
= forward
.equal_range(NULL
);
4495 CBlockIndex
*pindex
= rangeGenesis
.first
->second
;
4496 rangeGenesis
.first
++;
4497 assert(rangeGenesis
.first
== rangeGenesis
.second
); // There is only one index entry with parent NULL.
4499 // Iterate over the entire block tree, using depth-first search.
4500 // Along the way, remember whether there are blocks on the path from genesis
4501 // block being explored which are the first to have certain properties.
4504 CBlockIndex
* pindexFirstInvalid
= NULL
; // Oldest ancestor of pindex which is invalid.
4505 CBlockIndex
* pindexFirstMissing
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4506 CBlockIndex
* pindexFirstNeverProcessed
= NULL
; // Oldest ancestor of pindex for which nTx == 0.
4507 CBlockIndex
* pindexFirstNotTreeValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4508 CBlockIndex
* pindexFirstNotTransactionsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4509 CBlockIndex
* pindexFirstNotChainValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4510 CBlockIndex
* pindexFirstNotScriptsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4511 while (pindex
!= NULL
) {
4513 if (pindexFirstInvalid
== NULL
&& pindex
->nStatus
& BLOCK_FAILED_VALID
) pindexFirstInvalid
= pindex
;
4514 if (pindexFirstMissing
== NULL
&& !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) pindexFirstMissing
= pindex
;
4515 if (pindexFirstNeverProcessed
== NULL
&& pindex
->nTx
== 0) pindexFirstNeverProcessed
= pindex
;
4516 if (pindex
->pprev
!= NULL
&& pindexFirstNotTreeValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TREE
) pindexFirstNotTreeValid
= pindex
;
4517 if (pindex
->pprev
!= NULL
&& pindexFirstNotTransactionsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TRANSACTIONS
) pindexFirstNotTransactionsValid
= pindex
;
4518 if (pindex
->pprev
!= NULL
&& pindexFirstNotChainValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_CHAIN
) pindexFirstNotChainValid
= pindex
;
4519 if (pindex
->pprev
!= NULL
&& pindexFirstNotScriptsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_SCRIPTS
) pindexFirstNotScriptsValid
= pindex
;
4521 // Begin: actual consistency checks.
4522 if (pindex
->pprev
== NULL
) {
4523 // Genesis block checks.
4524 assert(pindex
->GetBlockHash() == consensusParams
.hashGenesisBlock
); // Genesis block's hash must match.
4525 assert(pindex
== chainActive
.Genesis()); // The current active chain's genesis block must be this block.
4527 if (pindex
->nChainTx
== 0) assert(pindex
->nSequenceId
<= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
4528 // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4529 // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4531 // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4532 assert(!(pindex
->nStatus
& BLOCK_HAVE_DATA
) == (pindex
->nTx
== 0));
4533 assert(pindexFirstMissing
== pindexFirstNeverProcessed
);
4535 // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4536 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) assert(pindex
->nTx
> 0);
4538 if (pindex
->nStatus
& BLOCK_HAVE_UNDO
) assert(pindex
->nStatus
& BLOCK_HAVE_DATA
);
4539 assert(((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TRANSACTIONS
) == (pindex
->nTx
> 0)); // This is pruning-independent.
4540 // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
4541 assert((pindexFirstNeverProcessed
!= NULL
) == (pindex
->nChainTx
== 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
4542 assert((pindexFirstNotTransactionsValid
!= NULL
) == (pindex
->nChainTx
== 0));
4543 assert(pindex
->nHeight
== nHeight
); // nHeight must be consistent.
4544 assert(pindex
->pprev
== NULL
|| pindex
->nChainWork
>= pindex
->pprev
->nChainWork
); // For every block except the genesis block, the chainwork must be larger than the parent's.
4545 assert(nHeight
< 2 || (pindex
->pskip
&& (pindex
->pskip
->nHeight
< nHeight
))); // The pskip pointer must point back for all but the first 2 blocks.
4546 assert(pindexFirstNotTreeValid
== NULL
); // All mapBlockIndex entries must at least be TREE valid
4547 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TREE
) assert(pindexFirstNotTreeValid
== NULL
); // TREE valid implies all parents are TREE valid
4548 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_CHAIN
) assert(pindexFirstNotChainValid
== NULL
); // CHAIN valid implies all parents are CHAIN valid
4549 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_SCRIPTS
) assert(pindexFirstNotScriptsValid
== NULL
); // SCRIPTS valid implies all parents are SCRIPTS valid
4550 if (pindexFirstInvalid
== NULL
) {
4551 // Checks for not-invalid blocks.
4552 assert((pindex
->nStatus
& BLOCK_FAILED_MASK
) == 0); // The failed mask cannot be set for blocks without invalid parents.
4554 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && pindexFirstNeverProcessed
== NULL
) {
4555 if (pindexFirstInvalid
== NULL
) {
4556 // If this block sorts at least as good as the current tip and
4557 // is valid and we have all data for its parents, it must be in
4558 // setBlockIndexCandidates. chainActive.Tip() must also be there
4559 // even if some data has been pruned.
4560 if (pindexFirstMissing
== NULL
|| pindex
== chainActive
.Tip()) {
4561 assert(setBlockIndexCandidates
.count(pindex
));
4563 // If some parent is missing, then it could be that this block was in
4564 // setBlockIndexCandidates but had to be removed because of the missing data.
4565 // In this case it must be in mapBlocksUnlinked -- see test below.
4567 } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4568 assert(setBlockIndexCandidates
.count(pindex
) == 0);
4570 // Check whether this block is in mapBlocksUnlinked.
4571 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeUnlinked
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
4572 bool foundInUnlinked
= false;
4573 while (rangeUnlinked
.first
!= rangeUnlinked
.second
) {
4574 assert(rangeUnlinked
.first
->first
== pindex
->pprev
);
4575 if (rangeUnlinked
.first
->second
== pindex
) {
4576 foundInUnlinked
= true;
4579 rangeUnlinked
.first
++;
4581 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
!= NULL
&& pindexFirstInvalid
== NULL
) {
4582 // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
4583 assert(foundInUnlinked
);
4585 if (!(pindex
->nStatus
& BLOCK_HAVE_DATA
)) assert(!foundInUnlinked
); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
4586 if (pindexFirstMissing
== NULL
) assert(!foundInUnlinked
); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
4587 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
== NULL
&& pindexFirstMissing
!= NULL
) {
4588 // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4589 assert(fHavePruned
); // We must have pruned.
4590 // This block may have entered mapBlocksUnlinked if:
4591 // - it has a descendant that at some point had more work than the
4593 // - we tried switching to that descendant but were missing
4594 // data for some intermediate block between chainActive and the
4596 // So if this block is itself better than chainActive.Tip() and it wasn't in
4597 // setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
4598 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && setBlockIndexCandidates
.count(pindex
) == 0) {
4599 if (pindexFirstInvalid
== NULL
) {
4600 assert(foundInUnlinked
);
4604 // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4605 // End: actual consistency checks.
4607 // Try descending into the first subnode.
4608 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> range
= forward
.equal_range(pindex
);
4609 if (range
.first
!= range
.second
) {
4610 // A subnode was found.
4611 pindex
= range
.first
->second
;
4615 // This is a leaf node.
4616 // Move upwards until we reach a node of which we have not yet visited the last child.
4618 // We are going to either move to a parent or a sibling of pindex.
4619 // If pindex was the first with a certain property, unset the corresponding variable.
4620 if (pindex
== pindexFirstInvalid
) pindexFirstInvalid
= NULL
;
4621 if (pindex
== pindexFirstMissing
) pindexFirstMissing
= NULL
;
4622 if (pindex
== pindexFirstNeverProcessed
) pindexFirstNeverProcessed
= NULL
;
4623 if (pindex
== pindexFirstNotTreeValid
) pindexFirstNotTreeValid
= NULL
;
4624 if (pindex
== pindexFirstNotTransactionsValid
) pindexFirstNotTransactionsValid
= NULL
;
4625 if (pindex
== pindexFirstNotChainValid
) pindexFirstNotChainValid
= NULL
;
4626 if (pindex
== pindexFirstNotScriptsValid
) pindexFirstNotScriptsValid
= NULL
;
4628 CBlockIndex
* pindexPar
= pindex
->pprev
;
4629 // Find which child we just visited.
4630 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangePar
= forward
.equal_range(pindexPar
);
4631 while (rangePar
.first
->second
!= pindex
) {
4632 assert(rangePar
.first
!= rangePar
.second
); // Our parent must have at least the node we're coming from as child.
4635 // Proceed to the next one.
4637 if (rangePar
.first
!= rangePar
.second
) {
4638 // Move to the sibling.
4639 pindex
= rangePar
.first
->second
;
4650 // Check that we actually traversed the entire map.
4651 assert(nNodes
== forward
.size());
4654 std::string
GetWarnings(const std::string
& strFor
)
4656 string strStatusBar
;
4659 const string uiAlertSeperator
= "<hr />";
4661 if (!CLIENT_VERSION_IS_RELEASE
) {
4662 strStatusBar
= "This is a pre-release test build - use at your own risk - do not use for mining or merchant applications";
4663 strGUI
= _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
4666 if (GetBoolArg("-testsafemode", DEFAULT_TESTSAFEMODE
))
4667 strStatusBar
= strRPC
= strGUI
= "testsafemode enabled";
4669 // Misc warnings like out of disk space and clock is wrong
4670 if (strMiscWarning
!= "")
4672 strStatusBar
= strMiscWarning
;
4673 strGUI
+= (strGUI
.empty() ? "" : uiAlertSeperator
) + strMiscWarning
;
4676 if (fLargeWorkForkFound
)
4678 strStatusBar
= strRPC
= "Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.";
4679 strGUI
+= (strGUI
.empty() ? "" : uiAlertSeperator
) + _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
4681 else if (fLargeWorkInvalidChainFound
)
4683 strStatusBar
= strRPC
= "Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.";
4684 strGUI
+= (strGUI
.empty() ? "" : uiAlertSeperator
) + _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
4687 if (strFor
== "gui")
4689 else if (strFor
== "statusbar")
4690 return strStatusBar
;
4691 else if (strFor
== "rpc")
4693 assert(!"GetWarnings(): invalid parameter");
4704 //////////////////////////////////////////////////////////////////////////////
4706 // blockchain -> download logic notification
4709 void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex
*pindexNew
, const CBlockIndex
*pindexFork
, bool fInitialDownload
) {
4710 const int nNewHeight
= pindexNew
->nHeight
;
4711 connman
->SetBestHeight(nNewHeight
);
4713 if (!fInitialDownload
) {
4714 // Find the hashes of all blocks that weren't previously in the best chain.
4715 std::vector
<uint256
> vHashes
;
4716 const CBlockIndex
*pindexToAnnounce
= pindexNew
;
4717 while (pindexToAnnounce
!= pindexFork
) {
4718 vHashes
.push_back(pindexToAnnounce
->GetBlockHash());
4719 pindexToAnnounce
= pindexToAnnounce
->pprev
;
4720 if (vHashes
.size() == MAX_BLOCKS_TO_ANNOUNCE
) {
4721 // Limit announcements in case of a huge reorganization.
4722 // Rely on the peer's synchronization mechanism in that case.
4726 // Relay inventory, but don't relay old inventory during initial block download.
4727 connman
->ForEachNode([nNewHeight
, &vHashes
](CNode
* pnode
) {
4728 if (nNewHeight
> (pnode
->nStartingHeight
!= -1 ? pnode
->nStartingHeight
- 2000 : 0)) {
4729 BOOST_REVERSE_FOREACH(const uint256
& hash
, vHashes
) {
4730 pnode
->PushBlockHash(hash
);
4737 void PeerLogicValidation::BlockChecked(const CBlock
& block
, const CValidationState
& state
) {
4740 const uint256
hash(block
.GetHash());
4741 std::map
<uint256
, NodeId
>::iterator it
= mapBlockSource
.find(hash
);
4744 if (state
.IsInvalid(nDoS
)) {
4745 if (it
!= mapBlockSource
.end() && State(it
->second
)) {
4746 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
4747 CBlockReject reject
= {(unsigned char)state
.GetRejectCode(), state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), hash
};
4748 State(it
->second
)->rejects
.push_back(reject
);
4750 Misbehaving(it
->second
, nDoS
);
4753 if (it
!= mapBlockSource
.end())
4754 mapBlockSource
.erase(it
);
4757 //////////////////////////////////////////////////////////////////////////////
4763 bool static AlreadyHave(const CInv
& inv
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
4768 case MSG_WITNESS_TX
:
4770 assert(recentRejects
);
4771 if (chainActive
.Tip()->GetBlockHash() != hashRecentRejectsChainTip
)
4773 // If the chain tip has changed previously rejected transactions
4774 // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
4775 // or a double-spend. Reset the rejects filter and give those
4776 // txs a second chance.
4777 hashRecentRejectsChainTip
= chainActive
.Tip()->GetBlockHash();
4778 recentRejects
->reset();
4781 // Use pcoinsTip->HaveCoinsInCache as a quick approximation to exclude
4782 // requesting or processing some txs which have already been included in a block
4783 return recentRejects
->contains(inv
.hash
) ||
4784 mempool
.exists(inv
.hash
) ||
4785 mapOrphanTransactions
.count(inv
.hash
) ||
4786 pcoinsTip
->HaveCoinsInCache(inv
.hash
);
4789 case MSG_WITNESS_BLOCK
:
4790 return mapBlockIndex
.count(inv
.hash
);
4792 // Don't know what it is, just say we already got one
4796 static void RelayTransaction(const CTransaction
& tx
, CConnman
& connman
)
4798 CInv
inv(MSG_TX
, tx
.GetHash());
4799 connman
.ForEachNode([&inv
](CNode
* pnode
)
4801 pnode
->PushInventory(inv
);
4805 static void RelayAddress(const CAddress
& addr
, bool fReachable
, CConnman
& connman
)
4807 unsigned int nRelayNodes
= fReachable
? 2 : 1; // limited relaying of addresses outside our network(s)
4809 // Relay to a limited number of other nodes
4810 // Use deterministic randomness to send to the same nodes for 24 hours
4811 // at a time so the addrKnowns of the chosen nodes prevent repeats
4812 uint64_t hashAddr
= addr
.GetHash();
4813 const CSipHasher hasher
= connman
.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY
).Write(hashAddr
<< 32).Write((GetTime() + hashAddr
) / (24*60*60));
4814 FastRandomContext insecure_rand
;
4816 std::array
<std::pair
<uint64_t, CNode
*>,2> best
{{{0, nullptr}, {0, nullptr}}};
4817 assert(nRelayNodes
<= best
.size());
4819 auto sortfunc
= [&best
, &hasher
, nRelayNodes
](CNode
* pnode
) {
4820 if (pnode
->nVersion
>= CADDR_TIME_VERSION
) {
4821 uint64_t hashKey
= CSipHasher(hasher
).Write(pnode
->id
).Finalize();
4822 for (unsigned int i
= 0; i
< nRelayNodes
; i
++) {
4823 if (hashKey
> best
[i
].first
) {
4824 std::copy(best
.begin() + i
, best
.begin() + nRelayNodes
- 1, best
.begin() + i
+ 1);
4825 best
[i
] = std::make_pair(hashKey
, pnode
);
4832 auto pushfunc
= [&addr
, &best
, nRelayNodes
, &insecure_rand
] {
4833 for (unsigned int i
= 0; i
< nRelayNodes
&& best
[i
].first
!= 0; i
++) {
4834 best
[i
].second
->PushAddress(addr
, insecure_rand
);
4838 connman
.ForEachNodeThen(std::move(sortfunc
), std::move(pushfunc
));
4841 void static ProcessGetData(CNode
* pfrom
, const Consensus::Params
& consensusParams
, CConnman
& connman
)
4843 std::deque
<CInv
>::iterator it
= pfrom
->vRecvGetData
.begin();
4844 unsigned int nMaxSendBufferSize
= connman
.GetSendBufferSize();
4846 vector
<CInv
> vNotFound
;
4850 while (it
!= pfrom
->vRecvGetData
.end()) {
4851 // Don't bother if send buffer is too full to respond anyway
4852 if (pfrom
->nSendSize
>= nMaxSendBufferSize
)
4855 const CInv
&inv
= *it
;
4857 boost::this_thread::interruption_point();
4860 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
|| inv
.type
== MSG_CMPCT_BLOCK
|| inv
.type
== MSG_WITNESS_BLOCK
)
4863 BlockMap::iterator mi
= mapBlockIndex
.find(inv
.hash
);
4864 if (mi
!= mapBlockIndex
.end())
4866 if (chainActive
.Contains(mi
->second
)) {
4869 static const int nOneMonth
= 30 * 24 * 60 * 60;
4870 // To prevent fingerprinting attacks, only send blocks outside of the active
4871 // chain if they are valid, and no more than a month older (both in time, and in
4872 // best equivalent proof of work) than the best header chain we know about.
4873 send
= mi
->second
->IsValid(BLOCK_VALID_SCRIPTS
) && (pindexBestHeader
!= NULL
) &&
4874 (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() < nOneMonth
) &&
4875 (GetBlockProofEquivalentTime(*pindexBestHeader
, *mi
->second
, *pindexBestHeader
, consensusParams
) < nOneMonth
);
4877 LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__
, pfrom
->GetId());
4881 // disconnect node in case we have reached the outbound limit for serving historical blocks
4882 // never disconnect whitelisted nodes
4883 static const int nOneWeek
= 7 * 24 * 60 * 60; // assume > 1 week = historical
4884 if (send
&& connman
.OutboundTargetReached(true) && ( ((pindexBestHeader
!= NULL
) && (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() > nOneWeek
)) || inv
.type
== MSG_FILTERED_BLOCK
) && !pfrom
->fWhitelisted
)
4886 LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom
->GetId());
4889 pfrom
->fDisconnect
= true;
4892 // Pruned nodes may have deleted the block, so check whether
4893 // it's available before trying to send.
4894 if (send
&& (mi
->second
->nStatus
& BLOCK_HAVE_DATA
))
4896 // Send block from disk
4898 if (!ReadBlockFromDisk(block
, (*mi
).second
, consensusParams
))
4899 assert(!"cannot load block from disk");
4900 if (inv
.type
== MSG_BLOCK
)
4901 pfrom
->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::BLOCK
, block
);
4902 else if (inv
.type
== MSG_WITNESS_BLOCK
)
4903 pfrom
->PushMessage(NetMsgType::BLOCK
, block
);
4904 else if (inv
.type
== MSG_FILTERED_BLOCK
)
4906 bool sendMerkleBlock
= false;
4907 CMerkleBlock merkleBlock
;
4909 LOCK(pfrom
->cs_filter
);
4910 if (pfrom
->pfilter
) {
4911 sendMerkleBlock
= true;
4912 merkleBlock
= CMerkleBlock(block
, *pfrom
->pfilter
);
4915 if (sendMerkleBlock
) {
4916 pfrom
->PushMessage(NetMsgType::MERKLEBLOCK
, merkleBlock
);
4917 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
4918 // This avoids hurting performance by pointlessly requiring a round-trip
4919 // Note that there is currently no way for a node to request any single transactions we didn't send here -
4920 // they must either disconnect and retry or request the full block.
4921 // Thus, the protocol spec specified allows for us to provide duplicate txn here,
4922 // however we MUST always provide at least what the remote peer needs
4923 typedef std::pair
<unsigned int, uint256
> PairType
;
4924 BOOST_FOREACH(PairType
& pair
, merkleBlock
.vMatchedTxn
)
4925 pfrom
->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::TX
, block
.vtx
[pair
.first
]);
4930 else if (inv
.type
== MSG_CMPCT_BLOCK
)
4932 // If a peer is asking for old blocks, we're almost guaranteed
4933 // they wont have a useful mempool to match against a compact block,
4934 // and we don't feel like constructing the object for them, so
4935 // instead we respond with the full, non-compact block.
4936 bool fPeerWantsWitness
= State(pfrom
->GetId())->fWantsCmpctWitness
;
4937 if (CanDirectFetch(consensusParams
) && mi
->second
->nHeight
>= chainActive
.Height() - MAX_CMPCTBLOCK_DEPTH
) {
4938 CBlockHeaderAndShortTxIDs
cmpctblock(block
, fPeerWantsWitness
);
4939 pfrom
->PushMessageWithFlag(fPeerWantsWitness
? 0 : SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::CMPCTBLOCK
, cmpctblock
);
4941 pfrom
->PushMessageWithFlag(fPeerWantsWitness
? 0 : SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::BLOCK
, block
);
4944 // Trigger the peer node to send a getblocks request for the next batch of inventory
4945 if (inv
.hash
== pfrom
->hashContinue
)
4947 // Bypass PushInventory, this must send even if redundant,
4948 // and we want it right after the last block so they don't
4949 // wait for other stuff first.
4951 vInv
.push_back(CInv(MSG_BLOCK
, chainActive
.Tip()->GetBlockHash()));
4952 pfrom
->PushMessage(NetMsgType::INV
, vInv
);
4953 pfrom
->hashContinue
.SetNull();
4957 else if (inv
.type
== MSG_TX
|| inv
.type
== MSG_WITNESS_TX
)
4959 // Send stream from relay memory
4961 auto mi
= mapRelay
.find(inv
.hash
);
4962 if (mi
!= mapRelay
.end()) {
4963 pfrom
->PushMessageWithFlag(inv
.type
== MSG_TX
? SERIALIZE_TRANSACTION_NO_WITNESS
: 0, NetMsgType::TX
, *mi
->second
);
4965 } else if (pfrom
->timeLastMempoolReq
) {
4966 auto txinfo
= mempool
.info(inv
.hash
);
4967 // To protect privacy, do not answer getdata using the mempool when
4968 // that TX couldn't have been INVed in reply to a MEMPOOL request.
4969 if (txinfo
.tx
&& txinfo
.nTime
<= pfrom
->timeLastMempoolReq
) {
4970 pfrom
->PushMessageWithFlag(inv
.type
== MSG_TX
? SERIALIZE_TRANSACTION_NO_WITNESS
: 0, NetMsgType::TX
, *txinfo
.tx
);
4975 vNotFound
.push_back(inv
);
4979 // Track requests for our stuff.
4980 GetMainSignals().Inventory(inv
.hash
);
4982 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
|| inv
.type
== MSG_CMPCT_BLOCK
|| inv
.type
== MSG_WITNESS_BLOCK
)
4987 pfrom
->vRecvGetData
.erase(pfrom
->vRecvGetData
.begin(), it
);
4989 if (!vNotFound
.empty()) {
4990 // Let the peer know that we didn't find what it asked for, so it doesn't
4991 // have to wait around forever. Currently only SPV clients actually care
4992 // about this message: it's needed when they are recursively walking the
4993 // dependencies of relevant unconfirmed transactions. SPV clients want to
4994 // do that because they want to know about (and store and rebroadcast and
4995 // risk analyze) the dependencies of transactions relevant to them, without
4996 // having to download the entire memory pool.
4997 pfrom
->PushMessage(NetMsgType::NOTFOUND
, vNotFound
);
5001 uint32_t GetFetchFlags(CNode
* pfrom
, CBlockIndex
* pprev
, const Consensus::Params
& chainparams
) {
5002 uint32_t nFetchFlags
= 0;
5003 if ((pfrom
->GetLocalServices() & NODE_WITNESS
) && State(pfrom
->GetId())->fHaveWitness
) {
5004 nFetchFlags
|= MSG_WITNESS_FLAG
;
5009 bool static ProcessMessage(CNode
* pfrom
, string strCommand
, CDataStream
& vRecv
, int64_t nTimeReceived
, const CChainParams
& chainparams
, CConnman
& connman
)
5011 unsigned int nMaxSendBufferSize
= connman
.GetSendBufferSize();
5013 LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand
), vRecv
.size(), pfrom
->id
);
5014 if (mapArgs
.count("-dropmessagestest") && GetRand(atoi(mapArgs
["-dropmessagestest"])) == 0)
5016 LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
5021 if (!(pfrom
->GetLocalServices() & NODE_BLOOM
) &&
5022 (strCommand
== NetMsgType::FILTERLOAD
||
5023 strCommand
== NetMsgType::FILTERADD
||
5024 strCommand
== NetMsgType::FILTERCLEAR
))
5026 if (pfrom
->nVersion
>= NO_BLOOM_VERSION
) {
5028 Misbehaving(pfrom
->GetId(), 100);
5031 pfrom
->fDisconnect
= true;
5037 if (strCommand
== NetMsgType::VERSION
)
5039 // Feeler connections exist only to verify if address is online.
5040 if (pfrom
->fFeeler
) {
5041 assert(pfrom
->fInbound
== false);
5042 pfrom
->fDisconnect
= true;
5045 // Each connection can only send one version message
5046 if (pfrom
->nVersion
!= 0)
5048 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_DUPLICATE
, string("Duplicate version message"));
5050 Misbehaving(pfrom
->GetId(), 1);
5057 uint64_t nNonce
= 1;
5058 uint64_t nServiceInt
;
5059 vRecv
>> pfrom
->nVersion
>> nServiceInt
>> nTime
>> addrMe
;
5060 pfrom
->nServices
= ServiceFlags(nServiceInt
);
5061 if (!pfrom
->fInbound
)
5063 connman
.SetServices(pfrom
->addr
, pfrom
->nServices
);
5065 if (pfrom
->nServicesExpected
& ~pfrom
->nServices
)
5067 LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom
->id
, pfrom
->nServices
, pfrom
->nServicesExpected
);
5068 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_NONSTANDARD
,
5069 strprintf("Expected to offer services %08x", pfrom
->nServicesExpected
));
5070 pfrom
->fDisconnect
= true;
5074 if (pfrom
->nVersion
< MIN_PEER_PROTO_VERSION
)
5076 // disconnect from peers older than this proto version
5077 LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom
->id
, pfrom
->nVersion
);
5078 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_OBSOLETE
,
5079 strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION
));
5080 pfrom
->fDisconnect
= true;
5084 if (pfrom
->nVersion
== 10300)
5085 pfrom
->nVersion
= 300;
5087 vRecv
>> addrFrom
>> nNonce
;
5088 if (!vRecv
.empty()) {
5089 vRecv
>> LIMITED_STRING(pfrom
->strSubVer
, MAX_SUBVERSION_LENGTH
);
5090 pfrom
->cleanSubVer
= SanitizeString(pfrom
->strSubVer
);
5092 if (!vRecv
.empty()) {
5093 vRecv
>> pfrom
->nStartingHeight
;
5096 LOCK(pfrom
->cs_filter
);
5098 vRecv
>> pfrom
->fRelayTxes
; // set to true after we get the first filter* message
5100 pfrom
->fRelayTxes
= true;
5103 // Disconnect if we connected to ourself
5104 if (pfrom
->fInbound
&& !connman
.CheckIncomingNonce(nNonce
))
5106 LogPrintf("connected to self at %s, disconnecting\n", pfrom
->addr
.ToString());
5107 pfrom
->fDisconnect
= true;
5111 pfrom
->addrLocal
= addrMe
;
5112 if (pfrom
->fInbound
&& addrMe
.IsRoutable())
5117 // Be shy and don't send version until we hear
5118 if (pfrom
->fInbound
)
5119 pfrom
->PushVersion();
5121 pfrom
->fClient
= !(pfrom
->nServices
& NODE_NETWORK
);
5123 if((pfrom
->nServices
& NODE_WITNESS
))
5126 State(pfrom
->GetId())->fHaveWitness
= true;
5129 // Potentially mark this peer as a preferred download peer.
5132 UpdatePreferredDownload(pfrom
, State(pfrom
->GetId()));
5136 pfrom
->PushMessage(NetMsgType::VERACK
);
5137 pfrom
->ssSend
.SetVersion(min(pfrom
->nVersion
, PROTOCOL_VERSION
));
5139 if (!pfrom
->fInbound
)
5141 // Advertise our address
5142 if (fListen
&& !IsInitialBlockDownload())
5144 CAddress addr
= GetLocalAddress(&pfrom
->addr
, pfrom
->GetLocalServices());
5145 FastRandomContext insecure_rand
;
5146 if (addr
.IsRoutable())
5148 LogPrint("net", "ProcessMessages: advertising address %s\n", addr
.ToString());
5149 pfrom
->PushAddress(addr
, insecure_rand
);
5150 } else if (IsPeerAddrLocalGood(pfrom
)) {
5151 addr
.SetIP(pfrom
->addrLocal
);
5152 LogPrint("net", "ProcessMessages: advertising address %s\n", addr
.ToString());
5153 pfrom
->PushAddress(addr
, insecure_rand
);
5157 // Get recent addresses
5158 if (pfrom
->fOneShot
|| pfrom
->nVersion
>= CADDR_TIME_VERSION
|| connman
.GetAddressCount() < 1000)
5160 pfrom
->PushMessage(NetMsgType::GETADDR
);
5161 pfrom
->fGetAddr
= true;
5163 connman
.MarkAddressGood(pfrom
->addr
);
5166 pfrom
->fSuccessfullyConnected
= true;
5170 remoteAddr
= ", peeraddr=" + pfrom
->addr
.ToString();
5172 LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
5173 pfrom
->cleanSubVer
, pfrom
->nVersion
,
5174 pfrom
->nStartingHeight
, addrMe
.ToString(), pfrom
->id
,
5177 int64_t nTimeOffset
= nTime
- GetTime();
5178 pfrom
->nTimeOffset
= nTimeOffset
;
5179 AddTimeData(pfrom
->addr
, nTimeOffset
);
5183 else if (pfrom
->nVersion
== 0)
5185 // Must have a version message before anything else
5187 Misbehaving(pfrom
->GetId(), 1);
5192 else if (strCommand
== NetMsgType::VERACK
)
5194 pfrom
->SetRecvVersion(min(pfrom
->nVersion
, PROTOCOL_VERSION
));
5196 // Mark this node as currently connected, so we update its timestamp later.
5197 if (pfrom
->fNetworkNode
) {
5199 State(pfrom
->GetId())->fCurrentlyConnected
= true;
5202 if (pfrom
->nVersion
>= SENDHEADERS_VERSION
) {
5203 // Tell our peer we prefer to receive headers rather than inv's
5204 // We send this to non-NODE NETWORK peers as well, because even
5205 // non-NODE NETWORK peers can announce blocks (such as pruning
5207 pfrom
->PushMessage(NetMsgType::SENDHEADERS
);
5209 if (pfrom
->nVersion
>= SHORT_IDS_BLOCKS_VERSION
) {
5210 // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
5211 // However, we do not request new block announcements using
5212 // cmpctblock messages.
5213 // We send this to non-NODE NETWORK peers as well, because
5214 // they may wish to request compact blocks from us
5215 bool fAnnounceUsingCMPCTBLOCK
= false;
5216 uint64_t nCMPCTBLOCKVersion
= 2;
5217 if (pfrom
->GetLocalServices() & NODE_WITNESS
)
5218 pfrom
->PushMessage(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
);
5219 nCMPCTBLOCKVersion
= 1;
5220 pfrom
->PushMessage(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
);
5225 else if (strCommand
== NetMsgType::ADDR
)
5227 vector
<CAddress
> vAddr
;
5230 // Don't want addr from older versions unless seeding
5231 if (pfrom
->nVersion
< CADDR_TIME_VERSION
&& connman
.GetAddressCount() > 1000)
5233 if (vAddr
.size() > 1000)
5236 Misbehaving(pfrom
->GetId(), 20);
5237 return error("message addr size() = %u", vAddr
.size());
5240 // Store the new addresses
5241 vector
<CAddress
> vAddrOk
;
5242 int64_t nNow
= GetAdjustedTime();
5243 int64_t nSince
= nNow
- 10 * 60;
5244 BOOST_FOREACH(CAddress
& addr
, vAddr
)
5246 boost::this_thread::interruption_point();
5248 if ((addr
.nServices
& REQUIRED_SERVICES
) != REQUIRED_SERVICES
)
5251 if (addr
.nTime
<= 100000000 || addr
.nTime
> nNow
+ 10 * 60)
5252 addr
.nTime
= nNow
- 5 * 24 * 60 * 60;
5253 pfrom
->AddAddressKnown(addr
);
5254 bool fReachable
= IsReachable(addr
);
5255 if (addr
.nTime
> nSince
&& !pfrom
->fGetAddr
&& vAddr
.size() <= 10 && addr
.IsRoutable())
5257 // Relay to a limited number of other nodes
5258 RelayAddress(addr
, fReachable
, connman
);
5260 // Do not store addresses outside our network
5262 vAddrOk
.push_back(addr
);
5264 connman
.AddNewAddresses(vAddrOk
, pfrom
->addr
, 2 * 60 * 60);
5265 if (vAddr
.size() < 1000)
5266 pfrom
->fGetAddr
= false;
5267 if (pfrom
->fOneShot
)
5268 pfrom
->fDisconnect
= true;
5271 else if (strCommand
== NetMsgType::SENDHEADERS
)
5274 State(pfrom
->GetId())->fPreferHeaders
= true;
5277 else if (strCommand
== NetMsgType::SENDCMPCT
)
5279 bool fAnnounceUsingCMPCTBLOCK
= false;
5280 uint64_t nCMPCTBLOCKVersion
= 0;
5281 vRecv
>> fAnnounceUsingCMPCTBLOCK
>> nCMPCTBLOCKVersion
;
5282 if (nCMPCTBLOCKVersion
== 1 || ((pfrom
->GetLocalServices() & NODE_WITNESS
) && nCMPCTBLOCKVersion
== 2)) {
5284 // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
5285 if (!State(pfrom
->GetId())->fProvidesHeaderAndIDs
) {
5286 State(pfrom
->GetId())->fProvidesHeaderAndIDs
= true;
5287 State(pfrom
->GetId())->fWantsCmpctWitness
= nCMPCTBLOCKVersion
== 2;
5289 if (State(pfrom
->GetId())->fWantsCmpctWitness
== (nCMPCTBLOCKVersion
== 2)) // ignore later version announces
5290 State(pfrom
->GetId())->fPreferHeaderAndIDs
= fAnnounceUsingCMPCTBLOCK
;
5291 if (!State(pfrom
->GetId())->fSupportsDesiredCmpctVersion
) {
5292 if (pfrom
->GetLocalServices() & NODE_WITNESS
)
5293 State(pfrom
->GetId())->fSupportsDesiredCmpctVersion
= (nCMPCTBLOCKVersion
== 2);
5295 State(pfrom
->GetId())->fSupportsDesiredCmpctVersion
= (nCMPCTBLOCKVersion
== 1);
5301 else if (strCommand
== NetMsgType::INV
)
5305 if (vInv
.size() > MAX_INV_SZ
)
5308 Misbehaving(pfrom
->GetId(), 20);
5309 return error("message inv size() = %u", vInv
.size());
5312 bool fBlocksOnly
= !fRelayTxes
;
5314 // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
5315 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY
))
5316 fBlocksOnly
= false;
5320 uint32_t nFetchFlags
= GetFetchFlags(pfrom
, chainActive
.Tip(), chainparams
.GetConsensus());
5322 std::vector
<CInv
> vToFetch
;
5324 for (unsigned int nInv
= 0; nInv
< vInv
.size(); nInv
++)
5326 CInv
&inv
= vInv
[nInv
];
5328 boost::this_thread::interruption_point();
5330 bool fAlreadyHave
= AlreadyHave(inv
);
5331 LogPrint("net", "got inv: %s %s peer=%d\n", inv
.ToString(), fAlreadyHave
? "have" : "new", pfrom
->id
);
5333 if (inv
.type
== MSG_TX
) {
5334 inv
.type
|= nFetchFlags
;
5337 if (inv
.type
== MSG_BLOCK
) {
5338 UpdateBlockAvailability(pfrom
->GetId(), inv
.hash
);
5339 if (!fAlreadyHave
&& !fImporting
&& !fReindex
&& !mapBlocksInFlight
.count(inv
.hash
)) {
5340 // First request the headers preceding the announced block. In the normal fully-synced
5341 // case where a new block is announced that succeeds the current tip (no reorganization),
5342 // there are no such headers.
5343 // Secondly, and only when we are close to being synced, we request the announced block directly,
5344 // to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
5345 // time the block arrives, the header chain leading up to it is already validated. Not
5346 // doing this will result in the received block being rejected as an orphan in case it is
5347 // not a direct successor.
5348 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexBestHeader
), inv
.hash
);
5349 CNodeState
*nodestate
= State(pfrom
->GetId());
5350 if (CanDirectFetch(chainparams
.GetConsensus()) &&
5351 nodestate
->nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
&&
5352 (!IsWitnessEnabled(chainActive
.Tip(), chainparams
.GetConsensus()) || State(pfrom
->GetId())->fHaveWitness
)) {
5353 inv
.type
|= nFetchFlags
;
5354 if (nodestate
->fSupportsDesiredCmpctVersion
)
5355 vToFetch
.push_back(CInv(MSG_CMPCT_BLOCK
, inv
.hash
));
5357 vToFetch
.push_back(inv
);
5358 // Mark block as in flight already, even though the actual "getdata" message only goes out
5359 // later (within the same cs_main lock, though).
5360 MarkBlockAsInFlight(pfrom
->GetId(), inv
.hash
, chainparams
.GetConsensus());
5362 LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader
->nHeight
, inv
.hash
.ToString(), pfrom
->id
);
5367 pfrom
->AddInventoryKnown(inv
);
5369 LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv
.hash
.ToString(), pfrom
->id
);
5370 else if (!fAlreadyHave
&& !fImporting
&& !fReindex
&& !IsInitialBlockDownload())
5374 // Track requests for our stuff
5375 GetMainSignals().Inventory(inv
.hash
);
5377 if (pfrom
->nSendSize
> (nMaxSendBufferSize
* 2)) {
5378 Misbehaving(pfrom
->GetId(), 50);
5379 return error("send buffer size() = %u", pfrom
->nSendSize
);
5383 if (!vToFetch
.empty())
5384 pfrom
->PushMessage(NetMsgType::GETDATA
, vToFetch
);
5388 else if (strCommand
== NetMsgType::GETDATA
)
5392 if (vInv
.size() > MAX_INV_SZ
)
5395 Misbehaving(pfrom
->GetId(), 20);
5396 return error("message getdata size() = %u", vInv
.size());
5399 if (fDebug
|| (vInv
.size() != 1))
5400 LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv
.size(), pfrom
->id
);
5402 if ((fDebug
&& vInv
.size() > 0) || (vInv
.size() == 1))
5403 LogPrint("net", "received getdata for: %s peer=%d\n", vInv
[0].ToString(), pfrom
->id
);
5405 pfrom
->vRecvGetData
.insert(pfrom
->vRecvGetData
.end(), vInv
.begin(), vInv
.end());
5406 ProcessGetData(pfrom
, chainparams
.GetConsensus(), connman
);
5410 else if (strCommand
== NetMsgType::GETBLOCKS
)
5412 CBlockLocator locator
;
5414 vRecv
>> locator
>> hashStop
;
5418 // Find the last block the caller has in the main chain
5419 CBlockIndex
* pindex
= FindForkInGlobalIndex(chainActive
, locator
);
5421 // Send the rest of the chain
5423 pindex
= chainActive
.Next(pindex
);
5425 LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.IsNull() ? "end" : hashStop
.ToString(), nLimit
, pfrom
->id
);
5426 for (; pindex
; pindex
= chainActive
.Next(pindex
))
5428 if (pindex
->GetBlockHash() == hashStop
)
5430 LogPrint("net", " getblocks stopping at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
5433 // If pruning, don't inv blocks unless we have on disk and are likely to still have
5434 // for some reasonable time window (1 hour) that block relay might require.
5435 const int nPrunedBlocksLikelyToHave
= MIN_BLOCKS_TO_KEEP
- 3600 / chainparams
.GetConsensus().nPowTargetSpacing
;
5436 if (fPruneMode
&& (!(pindex
->nStatus
& BLOCK_HAVE_DATA
) || pindex
->nHeight
<= chainActive
.Tip()->nHeight
- nPrunedBlocksLikelyToHave
))
5438 LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
5441 pfrom
->PushInventory(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
5444 // When this block is requested, we'll send an inv that'll
5445 // trigger the peer to getblocks the next batch of inventory.
5446 LogPrint("net", " getblocks stopping at limit %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
5447 pfrom
->hashContinue
= pindex
->GetBlockHash();
5454 else if (strCommand
== NetMsgType::GETBLOCKTXN
)
5456 BlockTransactionsRequest req
;
5461 BlockMap::iterator it
= mapBlockIndex
.find(req
.blockhash
);
5462 if (it
== mapBlockIndex
.end() || !(it
->second
->nStatus
& BLOCK_HAVE_DATA
)) {
5463 LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom
->id
);
5467 if (it
->second
->nHeight
< chainActive
.Height() - MAX_BLOCKTXN_DEPTH
) {
5468 LogPrint("net", "Peer %d sent us a getblocktxn for a block > %i deep", pfrom
->id
, MAX_BLOCKTXN_DEPTH
);
5473 assert(ReadBlockFromDisk(block
, it
->second
, chainparams
.GetConsensus()));
5475 BlockTransactions
resp(req
);
5476 for (size_t i
= 0; i
< req
.indexes
.size(); i
++) {
5477 if (req
.indexes
[i
] >= block
.vtx
.size()) {
5478 Misbehaving(pfrom
->GetId(), 100);
5479 LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom
->id
);
5482 resp
.txn
[i
] = block
.vtx
[req
.indexes
[i
]];
5484 pfrom
->PushMessageWithFlag(State(pfrom
->GetId())->fWantsCmpctWitness
? 0 : SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::BLOCKTXN
, resp
);
5488 else if (strCommand
== NetMsgType::GETHEADERS
)
5490 CBlockLocator locator
;
5492 vRecv
>> locator
>> hashStop
;
5495 if (IsInitialBlockDownload() && !pfrom
->fWhitelisted
) {
5496 LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom
->id
);
5500 CNodeState
*nodestate
= State(pfrom
->GetId());
5501 CBlockIndex
* pindex
= NULL
;
5502 if (locator
.IsNull())
5504 // If locator is null, return the hashStop block
5505 BlockMap::iterator mi
= mapBlockIndex
.find(hashStop
);
5506 if (mi
== mapBlockIndex
.end())
5508 pindex
= (*mi
).second
;
5512 // Find the last block the caller has in the main chain
5513 pindex
= FindForkInGlobalIndex(chainActive
, locator
);
5515 pindex
= chainActive
.Next(pindex
);
5518 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
5519 vector
<CBlock
> vHeaders
;
5520 int nLimit
= MAX_HEADERS_RESULTS
;
5521 LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.IsNull() ? "end" : hashStop
.ToString(), pfrom
->id
);
5522 for (; pindex
; pindex
= chainActive
.Next(pindex
))
5524 vHeaders
.push_back(pindex
->GetBlockHeader());
5525 if (--nLimit
<= 0 || pindex
->GetBlockHash() == hashStop
)
5528 // pindex can be NULL either if we sent chainActive.Tip() OR
5529 // if our peer has chainActive.Tip() (and thus we are sending an empty
5530 // headers message). In both cases it's safe to update
5531 // pindexBestHeaderSent to be our tip.
5532 nodestate
->pindexBestHeaderSent
= pindex
? pindex
: chainActive
.Tip();
5533 pfrom
->PushMessage(NetMsgType::HEADERS
, vHeaders
);
5537 else if (strCommand
== NetMsgType::TX
)
5539 // Stop processing the transaction early if
5540 // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
5541 if (!fRelayTxes
&& (!pfrom
->fWhitelisted
|| !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY
)))
5543 LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom
->id
);
5547 deque
<COutPoint
> vWorkQueue
;
5548 vector
<uint256
> vEraseQueue
;
5552 CInv
inv(MSG_TX
, tx
.GetHash());
5553 pfrom
->AddInventoryKnown(inv
);
5557 bool fMissingInputs
= false;
5558 CValidationState state
;
5560 pfrom
->setAskFor
.erase(inv
.hash
);
5561 mapAlreadyAskedFor
.erase(inv
.hash
);
5563 if (!AlreadyHave(inv
) && AcceptToMemoryPool(mempool
, state
, tx
, true, &fMissingInputs
)) {
5564 mempool
.check(pcoinsTip
);
5565 RelayTransaction(tx
, connman
);
5566 for (unsigned int i
= 0; i
< tx
.vout
.size(); i
++) {
5567 vWorkQueue
.emplace_back(inv
.hash
, i
);
5570 pfrom
->nLastTXTime
= GetTime();
5572 LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
5574 tx
.GetHash().ToString(),
5575 mempool
.size(), mempool
.DynamicMemoryUsage() / 1000);
5577 // Recursively process any orphan transactions that depended on this one
5578 set
<NodeId
> setMisbehaving
;
5579 while (!vWorkQueue
.empty()) {
5580 auto itByPrev
= mapOrphanTransactionsByPrev
.find(vWorkQueue
.front());
5581 vWorkQueue
.pop_front();
5582 if (itByPrev
== mapOrphanTransactionsByPrev
.end())
5584 for (auto mi
= itByPrev
->second
.begin();
5585 mi
!= itByPrev
->second
.end();
5588 const CTransaction
& orphanTx
= (*mi
)->second
.tx
;
5589 const uint256
& orphanHash
= orphanTx
.GetHash();
5590 NodeId fromPeer
= (*mi
)->second
.fromPeer
;
5591 bool fMissingInputs2
= false;
5592 // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
5593 // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
5594 // anyone relaying LegitTxX banned)
5595 CValidationState stateDummy
;
5598 if (setMisbehaving
.count(fromPeer
))
5600 if (AcceptToMemoryPool(mempool
, stateDummy
, orphanTx
, true, &fMissingInputs2
)) {
5601 LogPrint("mempool", " accepted orphan tx %s\n", orphanHash
.ToString());
5602 RelayTransaction(orphanTx
, connman
);
5603 for (unsigned int i
= 0; i
< orphanTx
.vout
.size(); i
++) {
5604 vWorkQueue
.emplace_back(orphanHash
, i
);
5606 vEraseQueue
.push_back(orphanHash
);
5608 else if (!fMissingInputs2
)
5611 if (stateDummy
.IsInvalid(nDos
) && nDos
> 0)
5613 // Punish peer that gave us an invalid orphan tx
5614 Misbehaving(fromPeer
, nDos
);
5615 setMisbehaving
.insert(fromPeer
);
5616 LogPrint("mempool", " invalid orphan tx %s\n", orphanHash
.ToString());
5618 // Has inputs but not accepted to mempool
5619 // Probably non-standard or insufficient fee/priority
5620 LogPrint("mempool", " removed orphan tx %s\n", orphanHash
.ToString());
5621 vEraseQueue
.push_back(orphanHash
);
5622 if (orphanTx
.wit
.IsNull() && !stateDummy
.CorruptionPossible()) {
5623 // Do not use rejection cache for witness transactions or
5624 // witness-stripped transactions, as they can have been malleated.
5625 // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
5626 assert(recentRejects
);
5627 recentRejects
->insert(orphanHash
);
5630 mempool
.check(pcoinsTip
);
5634 BOOST_FOREACH(uint256 hash
, vEraseQueue
)
5635 EraseOrphanTx(hash
);
5637 else if (fMissingInputs
)
5639 bool fRejectedParents
= false; // It may be the case that the orphans parents have all been rejected
5640 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
) {
5641 if (recentRejects
->contains(txin
.prevout
.hash
)) {
5642 fRejectedParents
= true;
5646 if (!fRejectedParents
) {
5647 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
) {
5648 CInv
_inv(MSG_TX
, txin
.prevout
.hash
);
5649 pfrom
->AddInventoryKnown(_inv
);
5650 if (!AlreadyHave(_inv
)) pfrom
->AskFor(_inv
);
5652 AddOrphanTx(tx
, pfrom
->GetId());
5654 // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
5655 unsigned int nMaxOrphanTx
= (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS
));
5656 unsigned int nEvicted
= LimitOrphanTxSize(nMaxOrphanTx
);
5658 LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted
);
5660 LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx
.GetHash().ToString());
5663 if (tx
.wit
.IsNull() && !state
.CorruptionPossible()) {
5664 // Do not use rejection cache for witness transactions or
5665 // witness-stripped transactions, as they can have been malleated.
5666 // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
5667 assert(recentRejects
);
5668 recentRejects
->insert(tx
.GetHash());
5671 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY
)) {
5672 // Always relay transactions received from whitelisted peers, even
5673 // if they were already in the mempool or rejected from it due
5674 // to policy, allowing the node to function as a gateway for
5675 // nodes hidden behind it.
5677 // Never relay transactions that we would assign a non-zero DoS
5678 // score for, as we expect peers to do the same with us in that
5681 if (!state
.IsInvalid(nDoS
) || nDoS
== 0) {
5682 LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx
.GetHash().ToString(), pfrom
->id
);
5683 RelayTransaction(tx
, connman
);
5685 LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx
.GetHash().ToString(), pfrom
->id
, FormatStateMessage(state
));
5690 if (state
.IsInvalid(nDoS
))
5692 LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx
.GetHash().ToString(),
5694 FormatStateMessage(state
));
5695 if (state
.GetRejectCode() < REJECT_INTERNAL
) // Never send AcceptToMemoryPool's internal codes over P2P
5696 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, (unsigned char)state
.GetRejectCode(),
5697 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), inv
.hash
);
5699 Misbehaving(pfrom
->GetId(), nDoS
);
5702 FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
);
5706 else if (strCommand
== NetMsgType::CMPCTBLOCK
&& !fImporting
&& !fReindex
) // Ignore blocks received while importing
5708 CBlockHeaderAndShortTxIDs cmpctblock
;
5709 vRecv
>> cmpctblock
;
5713 if (mapBlockIndex
.find(cmpctblock
.header
.hashPrevBlock
) == mapBlockIndex
.end()) {
5714 // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
5715 if (!IsInitialBlockDownload())
5716 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexBestHeader
), uint256());
5720 CBlockIndex
*pindex
= NULL
;
5721 CValidationState state
;
5722 if (!AcceptBlockHeader(cmpctblock
.header
, state
, chainparams
, &pindex
)) {
5724 if (state
.IsInvalid(nDoS
)) {
5726 Misbehaving(pfrom
->GetId(), nDoS
);
5727 LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom
->id
);
5732 // If AcceptBlockHeader returned true, it set pindex
5734 UpdateBlockAvailability(pfrom
->GetId(), pindex
->GetBlockHash());
5736 std::map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator blockInFlightIt
= mapBlocksInFlight
.find(pindex
->GetBlockHash());
5737 bool fAlreadyInFlight
= blockInFlightIt
!= mapBlocksInFlight
.end();
5739 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) // Nothing to do here
5742 if (pindex
->nChainWork
<= chainActive
.Tip()->nChainWork
|| // We know something better
5743 pindex
->nTx
!= 0) { // We had this block at some point, but pruned it
5744 if (fAlreadyInFlight
) {
5745 // We requested this block for some reason, but our mempool will probably be useless
5746 // so we just grab the block via normal getdata
5747 std::vector
<CInv
> vInv(1);
5748 vInv
[0] = CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus()), cmpctblock
.header
.GetHash());
5749 pfrom
->PushMessage(NetMsgType::GETDATA
, vInv
);
5754 // If we're not close to tip yet, give up and let parallel block fetch work its magic
5755 if (!fAlreadyInFlight
&& !CanDirectFetch(chainparams
.GetConsensus()))
5758 CNodeState
*nodestate
= State(pfrom
->GetId());
5760 if (IsWitnessEnabled(pindex
->pprev
, chainparams
.GetConsensus()) && !nodestate
->fSupportsDesiredCmpctVersion
) {
5761 // Don't bother trying to process compact blocks from v1 peers
5762 // after segwit activates.
5766 // We want to be a bit conservative just to be extra careful about DoS
5767 // possibilities in compact block processing...
5768 if (pindex
->nHeight
<= chainActive
.Height() + 2) {
5769 if ((!fAlreadyInFlight
&& nodestate
->nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) ||
5770 (fAlreadyInFlight
&& blockInFlightIt
->second
.first
== pfrom
->GetId())) {
5771 list
<QueuedBlock
>::iterator
*queuedBlockIt
= NULL
;
5772 if (!MarkBlockAsInFlight(pfrom
->GetId(), pindex
->GetBlockHash(), chainparams
.GetConsensus(), pindex
, &queuedBlockIt
)) {
5773 if (!(*queuedBlockIt
)->partialBlock
)
5774 (*queuedBlockIt
)->partialBlock
.reset(new PartiallyDownloadedBlock(&mempool
));
5776 // The block was already in flight using compact blocks from the same peer
5777 LogPrint("net", "Peer sent us compact block we were already syncing!\n");
5782 PartiallyDownloadedBlock
& partialBlock
= *(*queuedBlockIt
)->partialBlock
;
5783 ReadStatus status
= partialBlock
.InitData(cmpctblock
);
5784 if (status
== READ_STATUS_INVALID
) {
5785 MarkBlockAsReceived(pindex
->GetBlockHash()); // Reset in-flight state in case of whitelist
5786 Misbehaving(pfrom
->GetId(), 100);
5787 LogPrintf("Peer %d sent us invalid compact block\n", pfrom
->id
);
5789 } else if (status
== READ_STATUS_FAILED
) {
5790 // Duplicate txindexes, the block is now in-flight, so just request it
5791 std::vector
<CInv
> vInv(1);
5792 vInv
[0] = CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus()), cmpctblock
.header
.GetHash());
5793 pfrom
->PushMessage(NetMsgType::GETDATA
, vInv
);
5797 if (!fAlreadyInFlight
&& mapBlocksInFlight
.size() == 1 && pindex
->pprev
->IsValid(BLOCK_VALID_CHAIN
)) {
5798 // We seem to be rather well-synced, so it appears pfrom was the first to provide us
5799 // with this block! Let's get them to announce using compact blocks in the future.
5800 MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate
, pfrom
, connman
);
5803 BlockTransactionsRequest req
;
5804 for (size_t i
= 0; i
< cmpctblock
.BlockTxCount(); i
++) {
5805 if (!partialBlock
.IsTxAvailable(i
))
5806 req
.indexes
.push_back(i
);
5808 if (req
.indexes
.empty()) {
5809 // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
5810 BlockTransactions txn
;
5811 txn
.blockhash
= cmpctblock
.header
.GetHash();
5812 CDataStream
blockTxnMsg(SER_NETWORK
, PROTOCOL_VERSION
);
5814 return ProcessMessage(pfrom
, NetMsgType::BLOCKTXN
, blockTxnMsg
, nTimeReceived
, chainparams
, connman
);
5816 req
.blockhash
= pindex
->GetBlockHash();
5817 pfrom
->PushMessage(NetMsgType::GETBLOCKTXN
, req
);
5821 if (fAlreadyInFlight
) {
5822 // We requested this block, but its far into the future, so our
5823 // mempool will probably be useless - request the block normally
5824 std::vector
<CInv
> vInv(1);
5825 vInv
[0] = CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus()), cmpctblock
.header
.GetHash());
5826 pfrom
->PushMessage(NetMsgType::GETDATA
, vInv
);
5829 // If this was an announce-cmpctblock, we want the same treatment as a header message
5830 // Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions)
5831 std::vector
<CBlock
> headers
;
5832 headers
.push_back(cmpctblock
.header
);
5833 CDataStream
vHeadersMsg(SER_NETWORK
, PROTOCOL_VERSION
);
5834 vHeadersMsg
<< headers
;
5835 return ProcessMessage(pfrom
, NetMsgType::HEADERS
, vHeadersMsg
, nTimeReceived
, chainparams
, connman
);
5839 CheckBlockIndex(chainparams
.GetConsensus());
5842 else if (strCommand
== NetMsgType::BLOCKTXN
&& !fImporting
&& !fReindex
) // Ignore blocks received while importing
5844 BlockTransactions resp
;
5848 bool fBlockRead
= false;
5852 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator it
= mapBlocksInFlight
.find(resp
.blockhash
);
5853 if (it
== mapBlocksInFlight
.end() || !it
->second
.second
->partialBlock
||
5854 it
->second
.first
!= pfrom
->GetId()) {
5855 LogPrint("net", "Peer %d sent us block transactions for block we weren't expecting\n", pfrom
->id
);
5859 PartiallyDownloadedBlock
& partialBlock
= *it
->second
.second
->partialBlock
;
5860 ReadStatus status
= partialBlock
.FillBlock(block
, resp
.txn
);
5861 if (status
== READ_STATUS_INVALID
) {
5862 MarkBlockAsReceived(resp
.blockhash
); // Reset in-flight state in case of whitelist
5863 Misbehaving(pfrom
->GetId(), 100);
5864 LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom
->id
);
5866 } else if (status
== READ_STATUS_FAILED
) {
5867 // Might have collided, fall back to getdata now :(
5868 std::vector
<CInv
> invs
;
5869 invs
.push_back(CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, chainActive
.Tip(), chainparams
.GetConsensus()), resp
.blockhash
));
5870 pfrom
->PushMessage(NetMsgType::GETDATA
, invs
);
5873 } // Don't hold cs_main when we call into ProcessNewBlock
5875 CValidationState state
;
5876 ProcessNewBlock(state
, chainparams
, pfrom
, &block
, false, NULL
);
5878 if (state
.IsInvalid(nDoS
)) {
5879 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
5880 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, (unsigned char)state
.GetRejectCode(),
5881 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), block
.GetHash());
5884 Misbehaving(pfrom
->GetId(), nDoS
);
5891 else if (strCommand
== NetMsgType::HEADERS
&& !fImporting
&& !fReindex
) // Ignore headers received while importing
5893 std::vector
<CBlockHeader
> headers
;
5895 // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
5896 unsigned int nCount
= ReadCompactSize(vRecv
);
5897 if (nCount
> MAX_HEADERS_RESULTS
) {
5899 Misbehaving(pfrom
->GetId(), 20);
5900 return error("headers message size = %u", nCount
);
5902 headers
.resize(nCount
);
5903 for (unsigned int n
= 0; n
< nCount
; n
++) {
5904 vRecv
>> headers
[n
];
5905 ReadCompactSize(vRecv
); // ignore tx count; assume it is 0.
5912 // Nothing interesting. Stop asking this peers for more headers.
5916 CNodeState
*nodestate
= State(pfrom
->GetId());
5918 // If this looks like it could be a block announcement (nCount <
5919 // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
5921 // - Send a getheaders message in response to try to connect the chain.
5922 // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
5923 // don't connect before giving DoS points
5924 // - Once a headers message is received that is valid and does connect,
5925 // nUnconnectingHeaders gets reset back to 0.
5926 if (mapBlockIndex
.find(headers
[0].hashPrevBlock
) == mapBlockIndex
.end() && nCount
< MAX_BLOCKS_TO_ANNOUNCE
) {
5927 nodestate
->nUnconnectingHeaders
++;
5928 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexBestHeader
), uint256());
5929 LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
5930 headers
[0].GetHash().ToString(),
5931 headers
[0].hashPrevBlock
.ToString(),
5932 pindexBestHeader
->nHeight
,
5933 pfrom
->id
, nodestate
->nUnconnectingHeaders
);
5934 // Set hashLastUnknownBlock for this peer, so that if we
5935 // eventually get the headers - even from a different peer -
5936 // we can use this peer to download.
5937 UpdateBlockAvailability(pfrom
->GetId(), headers
.back().GetHash());
5939 if (nodestate
->nUnconnectingHeaders
% MAX_UNCONNECTING_HEADERS
== 0) {
5940 Misbehaving(pfrom
->GetId(), 20);
5945 CBlockIndex
*pindexLast
= NULL
;
5946 BOOST_FOREACH(const CBlockHeader
& header
, headers
) {
5947 CValidationState state
;
5948 if (pindexLast
!= NULL
&& header
.hashPrevBlock
!= pindexLast
->GetBlockHash()) {
5949 Misbehaving(pfrom
->GetId(), 20);
5950 return error("non-continuous headers sequence");
5952 if (!AcceptBlockHeader(header
, state
, chainparams
, &pindexLast
)) {
5954 if (state
.IsInvalid(nDoS
)) {
5956 Misbehaving(pfrom
->GetId(), nDoS
);
5957 return error("invalid header received");
5962 if (nodestate
->nUnconnectingHeaders
> 0) {
5963 LogPrint("net", "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom
->id
, nodestate
->nUnconnectingHeaders
);
5965 nodestate
->nUnconnectingHeaders
= 0;
5968 UpdateBlockAvailability(pfrom
->GetId(), pindexLast
->GetBlockHash());
5970 if (nCount
== MAX_HEADERS_RESULTS
) {
5971 // Headers message had its maximum size; the peer may have more headers.
5972 // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
5973 // from there instead.
5974 LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast
->nHeight
, pfrom
->id
, pfrom
->nStartingHeight
);
5975 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexLast
), uint256());
5978 bool fCanDirectFetch
= CanDirectFetch(chainparams
.GetConsensus());
5979 // If this set of headers is valid and ends in a block with at least as
5980 // much work as our tip, download as much as possible.
5981 if (fCanDirectFetch
&& pindexLast
->IsValid(BLOCK_VALID_TREE
) && chainActive
.Tip()->nChainWork
<= pindexLast
->nChainWork
) {
5982 vector
<CBlockIndex
*> vToFetch
;
5983 CBlockIndex
*pindexWalk
= pindexLast
;
5984 // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
5985 while (pindexWalk
&& !chainActive
.Contains(pindexWalk
) && vToFetch
.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
5986 if (!(pindexWalk
->nStatus
& BLOCK_HAVE_DATA
) &&
5987 !mapBlocksInFlight
.count(pindexWalk
->GetBlockHash()) &&
5988 (!IsWitnessEnabled(pindexWalk
->pprev
, chainparams
.GetConsensus()) || State(pfrom
->GetId())->fHaveWitness
)) {
5989 // We don't have this block, and it's not yet in flight.
5990 vToFetch
.push_back(pindexWalk
);
5992 pindexWalk
= pindexWalk
->pprev
;
5994 // If pindexWalk still isn't on our main chain, we're looking at a
5995 // very large reorg at a time we think we're close to caught up to
5996 // the main chain -- this shouldn't really happen. Bail out on the
5997 // direct fetch and rely on parallel download instead.
5998 if (!chainActive
.Contains(pindexWalk
)) {
5999 LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
6000 pindexLast
->GetBlockHash().ToString(),
6001 pindexLast
->nHeight
);
6003 vector
<CInv
> vGetData
;
6004 // Download as much as possible, from earliest to latest.
6005 BOOST_REVERSE_FOREACH(CBlockIndex
*pindex
, vToFetch
) {
6006 if (nodestate
->nBlocksInFlight
>= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
6007 // Can't download any more from this peer
6010 uint32_t nFetchFlags
= GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus());
6011 vGetData
.push_back(CInv(MSG_BLOCK
| nFetchFlags
, pindex
->GetBlockHash()));
6012 MarkBlockAsInFlight(pfrom
->GetId(), pindex
->GetBlockHash(), chainparams
.GetConsensus(), pindex
);
6013 LogPrint("net", "Requesting block %s from peer=%d\n",
6014 pindex
->GetBlockHash().ToString(), pfrom
->id
);
6016 if (vGetData
.size() > 1) {
6017 LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
6018 pindexLast
->GetBlockHash().ToString(), pindexLast
->nHeight
);
6020 if (vGetData
.size() > 0) {
6021 if (nodestate
->fSupportsDesiredCmpctVersion
&& vGetData
.size() == 1 && mapBlocksInFlight
.size() == 1 && pindexLast
->pprev
->IsValid(BLOCK_VALID_CHAIN
)) {
6022 // We seem to be rather well-synced, so it appears pfrom was the first to provide us
6023 // with this block! Let's get them to announce using compact blocks in the future.
6024 MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate
, pfrom
, connman
);
6025 // In any case, we want to download using a compact block, not a regular one
6026 vGetData
[0] = CInv(MSG_CMPCT_BLOCK
, vGetData
[0].hash
);
6028 pfrom
->PushMessage(NetMsgType::GETDATA
, vGetData
);
6033 CheckBlockIndex(chainparams
.GetConsensus());
6039 else if (strCommand
== NetMsgType::BLOCK
&& !fImporting
&& !fReindex
) // Ignore blocks received while importing
6044 LogPrint("net", "received block %s peer=%d\n", block
.GetHash().ToString(), pfrom
->id
);
6046 CValidationState state
;
6047 // Process all blocks from whitelisted peers, even if not requested,
6048 // unless we're still syncing with the network.
6049 // Such an unrequested block may still be processed, subject to the
6050 // conditions in AcceptBlock().
6051 bool forceProcessing
= pfrom
->fWhitelisted
&& !IsInitialBlockDownload();
6052 ProcessNewBlock(state
, chainparams
, pfrom
, &block
, forceProcessing
, NULL
);
6054 if (state
.IsInvalid(nDoS
)) {
6055 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
6056 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, (unsigned char)state
.GetRejectCode(),
6057 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), block
.GetHash());
6060 Misbehaving(pfrom
->GetId(), nDoS
);
6067 else if (strCommand
== NetMsgType::GETADDR
)
6069 // This asymmetric behavior for inbound and outbound connections was introduced
6070 // to prevent a fingerprinting attack: an attacker can send specific fake addresses
6071 // to users' AddrMan and later request them by sending getaddr messages.
6072 // Making nodes which are behind NAT and can only make outgoing connections ignore
6073 // the getaddr message mitigates the attack.
6074 if (!pfrom
->fInbound
) {
6075 LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom
->id
);
6079 // Only send one GetAddr response per connection to reduce resource waste
6080 // and discourage addr stamping of INV announcements.
6081 if (pfrom
->fSentAddr
) {
6082 LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom
->id
);
6085 pfrom
->fSentAddr
= true;
6087 pfrom
->vAddrToSend
.clear();
6088 vector
<CAddress
> vAddr
= connman
.GetAddresses();
6089 FastRandomContext insecure_rand
;
6090 BOOST_FOREACH(const CAddress
&addr
, vAddr
)
6091 pfrom
->PushAddress(addr
, insecure_rand
);
6095 else if (strCommand
== NetMsgType::MEMPOOL
)
6097 if (!(pfrom
->GetLocalServices() & NODE_BLOOM
) && !pfrom
->fWhitelisted
)
6099 LogPrint("net", "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom
->GetId());
6100 pfrom
->fDisconnect
= true;
6104 if (connman
.OutboundTargetReached(false) && !pfrom
->fWhitelisted
)
6106 LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom
->GetId());
6107 pfrom
->fDisconnect
= true;
6111 LOCK(pfrom
->cs_inventory
);
6112 pfrom
->fSendMempool
= true;
6116 else if (strCommand
== NetMsgType::PING
)
6118 if (pfrom
->nVersion
> BIP0031_VERSION
)
6122 // Echo the message back with the nonce. This allows for two useful features:
6124 // 1) A remote node can quickly check if the connection is operational
6125 // 2) Remote nodes can measure the latency of the network thread. If this node
6126 // is overloaded it won't respond to pings quickly and the remote node can
6127 // avoid sending us more work, like chain download requests.
6129 // The nonce stops the remote getting confused between different pings: without
6130 // it, if the remote node sends a ping once per second and this node takes 5
6131 // seconds to respond to each, the 5th ping the remote sends would appear to
6132 // return very quickly.
6133 pfrom
->PushMessage(NetMsgType::PONG
, nonce
);
6138 else if (strCommand
== NetMsgType::PONG
)
6140 int64_t pingUsecEnd
= nTimeReceived
;
6142 size_t nAvail
= vRecv
.in_avail();
6143 bool bPingFinished
= false;
6144 std::string sProblem
;
6146 if (nAvail
>= sizeof(nonce
)) {
6149 // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
6150 if (pfrom
->nPingNonceSent
!= 0) {
6151 if (nonce
== pfrom
->nPingNonceSent
) {
6152 // Matching pong received, this ping is no longer outstanding
6153 bPingFinished
= true;
6154 int64_t pingUsecTime
= pingUsecEnd
- pfrom
->nPingUsecStart
;
6155 if (pingUsecTime
> 0) {
6156 // Successful ping time measurement, replace previous
6157 pfrom
->nPingUsecTime
= pingUsecTime
;
6158 pfrom
->nMinPingUsecTime
= std::min(pfrom
->nMinPingUsecTime
, pingUsecTime
);
6160 // This should never happen
6161 sProblem
= "Timing mishap";
6164 // Nonce mismatches are normal when pings are overlapping
6165 sProblem
= "Nonce mismatch";
6167 // This is most likely a bug in another implementation somewhere; cancel this ping
6168 bPingFinished
= true;
6169 sProblem
= "Nonce zero";
6173 sProblem
= "Unsolicited pong without ping";
6176 // This is most likely a bug in another implementation somewhere; cancel this ping
6177 bPingFinished
= true;
6178 sProblem
= "Short payload";
6181 if (!(sProblem
.empty())) {
6182 LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
6185 pfrom
->nPingNonceSent
,
6189 if (bPingFinished
) {
6190 pfrom
->nPingNonceSent
= 0;
6195 else if (strCommand
== NetMsgType::FILTERLOAD
)
6197 CBloomFilter filter
;
6200 if (!filter
.IsWithinSizeConstraints())
6202 // There is no excuse for sending a too-large filter
6204 Misbehaving(pfrom
->GetId(), 100);
6208 LOCK(pfrom
->cs_filter
);
6209 delete pfrom
->pfilter
;
6210 pfrom
->pfilter
= new CBloomFilter(filter
);
6211 pfrom
->pfilter
->UpdateEmptyFull();
6212 pfrom
->fRelayTxes
= true;
6217 else if (strCommand
== NetMsgType::FILTERADD
)
6219 vector
<unsigned char> vData
;
6222 // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
6223 // and thus, the maximum size any matched object can have) in a filteradd message
6225 if (vData
.size() > MAX_SCRIPT_ELEMENT_SIZE
) {
6228 LOCK(pfrom
->cs_filter
);
6229 if (pfrom
->pfilter
) {
6230 pfrom
->pfilter
->insert(vData
);
6237 Misbehaving(pfrom
->GetId(), 100);
6242 else if (strCommand
== NetMsgType::FILTERCLEAR
)
6244 LOCK(pfrom
->cs_filter
);
6245 delete pfrom
->pfilter
;
6246 pfrom
->pfilter
= new CBloomFilter();
6247 pfrom
->fRelayTxes
= true;
6251 else if (strCommand
== NetMsgType::REJECT
)
6255 string strMsg
; unsigned char ccode
; string strReason
;
6256 vRecv
>> LIMITED_STRING(strMsg
, CMessageHeader::COMMAND_SIZE
) >> ccode
>> LIMITED_STRING(strReason
, MAX_REJECT_MESSAGE_LENGTH
);
6259 ss
<< strMsg
<< " code " << itostr(ccode
) << ": " << strReason
;
6261 if (strMsg
== NetMsgType::BLOCK
|| strMsg
== NetMsgType::TX
)
6265 ss
<< ": hash " << hash
.ToString();
6267 LogPrint("net", "Reject %s\n", SanitizeString(ss
.str()));
6268 } catch (const std::ios_base::failure
&) {
6269 // Avoid feedback loops by preventing reject messages from triggering a new reject message.
6270 LogPrint("net", "Unparseable reject message received\n");
6275 else if (strCommand
== NetMsgType::FEEFILTER
) {
6276 CAmount newFeeFilter
= 0;
6277 vRecv
>> newFeeFilter
;
6278 if (MoneyRange(newFeeFilter
)) {
6280 LOCK(pfrom
->cs_feeFilter
);
6281 pfrom
->minFeeFilter
= newFeeFilter
;
6283 LogPrint("net", "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter
).ToString(), pfrom
->id
);
6287 else if (strCommand
== NetMsgType::NOTFOUND
) {
6288 // We do not care about the NOTFOUND message, but logging an Unknown Command
6289 // message would be undesirable as we transmit it ourselves.
6293 // Ignore unknown commands for extensibility
6294 LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand
), pfrom
->id
);
6302 // requires LOCK(cs_vRecvMsg)
6303 bool ProcessMessages(CNode
* pfrom
, CConnman
& connman
)
6305 const CChainParams
& chainparams
= Params();
6306 unsigned int nMaxSendBufferSize
= connman
.GetSendBufferSize();
6308 // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
6312 // (4) message start
6320 if (!pfrom
->vRecvGetData
.empty())
6321 ProcessGetData(pfrom
, chainparams
.GetConsensus(), connman
);
6323 // this maintains the order of responses
6324 if (!pfrom
->vRecvGetData
.empty()) return fOk
;
6326 std::deque
<CNetMessage
>::iterator it
= pfrom
->vRecvMsg
.begin();
6327 while (!pfrom
->fDisconnect
&& it
!= pfrom
->vRecvMsg
.end()) {
6328 // Don't bother if send buffer is too full to respond anyway
6329 if (pfrom
->nSendSize
>= nMaxSendBufferSize
)
6333 CNetMessage
& msg
= *it
;
6336 // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
6337 // msg.hdr.nMessageSize, msg.vRecv.size(),
6338 // msg.complete() ? "Y" : "N");
6340 // end, if an incomplete message is found
6341 if (!msg
.complete())
6344 // at this point, any failure means we can delete the current message
6347 // Scan for message start
6348 if (memcmp(msg
.hdr
.pchMessageStart
, chainparams
.MessageStart(), CMessageHeader::MESSAGE_START_SIZE
) != 0) {
6349 LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg
.hdr
.GetCommand()), pfrom
->id
);
6355 CMessageHeader
& hdr
= msg
.hdr
;
6356 if (!hdr
.IsValid(chainparams
.MessageStart()))
6358 LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr
.GetCommand()), pfrom
->id
);
6361 string strCommand
= hdr
.GetCommand();
6364 unsigned int nMessageSize
= hdr
.nMessageSize
;
6367 CDataStream
& vRecv
= msg
.vRecv
;
6368 uint256 hash
= Hash(vRecv
.begin(), vRecv
.begin() + nMessageSize
);
6369 if (memcmp(hash
.begin(), hdr
.pchChecksum
, CMessageHeader::CHECKSUM_SIZE
) != 0)
6371 LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__
,
6372 SanitizeString(strCommand
), nMessageSize
,
6373 HexStr(hash
.begin(), hash
.begin()+CMessageHeader::CHECKSUM_SIZE
),
6374 HexStr(hdr
.pchChecksum
, hdr
.pchChecksum
+CMessageHeader::CHECKSUM_SIZE
));
6382 fRet
= ProcessMessage(pfrom
, strCommand
, vRecv
, msg
.nTime
, chainparams
, connman
);
6383 boost::this_thread::interruption_point();
6385 catch (const std::ios_base::failure
& e
)
6387 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_MALFORMED
, string("error parsing message"));
6388 if (strstr(e
.what(), "end of data"))
6390 // Allow exceptions from under-length message on vRecv
6391 LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
6393 else if (strstr(e
.what(), "size too large"))
6395 // Allow exceptions from over-long size
6396 LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
6398 else if (strstr(e
.what(), "non-canonical ReadCompactSize()"))
6400 // Allow exceptions from non-canonical encoding
6401 LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
6405 PrintExceptionContinue(&e
, "ProcessMessages()");
6408 catch (const boost::thread_interrupted
&) {
6411 catch (const std::exception
& e
) {
6412 PrintExceptionContinue(&e
, "ProcessMessages()");
6414 PrintExceptionContinue(NULL
, "ProcessMessages()");
6418 LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__
, SanitizeString(strCommand
), nMessageSize
, pfrom
->id
);
6423 // In case the connection got shut down, its receive buffer was wiped
6424 if (!pfrom
->fDisconnect
)
6425 pfrom
->vRecvMsg
.erase(pfrom
->vRecvMsg
.begin(), it
);
6430 class CompareInvMempoolOrder
6434 CompareInvMempoolOrder(CTxMemPool
*_mempool
)
6439 bool operator()(std::set
<uint256
>::iterator a
, std::set
<uint256
>::iterator b
)
6441 /* As std::make_heap produces a max-heap, we want the entries with the
6442 * fewest ancestors/highest fee to sort later. */
6443 return mp
->CompareDepthAndScore(*b
, *a
);
6447 bool SendMessages(CNode
* pto
, CConnman
& connman
)
6449 const Consensus::Params
& consensusParams
= Params().GetConsensus();
6451 // Don't send anything until we get its version message
6452 if (pto
->nVersion
== 0)
6458 bool pingSend
= false;
6459 if (pto
->fPingQueued
) {
6460 // RPC ping request by user
6463 if (pto
->nPingNonceSent
== 0 && pto
->nPingUsecStart
+ PING_INTERVAL
* 1000000 < GetTimeMicros()) {
6464 // Ping automatically sent as a latency probe & keepalive.
6467 if (pingSend
&& !pto
->fDisconnect
) {
6469 while (nonce
== 0) {
6470 GetRandBytes((unsigned char*)&nonce
, sizeof(nonce
));
6472 pto
->fPingQueued
= false;
6473 pto
->nPingUsecStart
= GetTimeMicros();
6474 if (pto
->nVersion
> BIP0031_VERSION
) {
6475 pto
->nPingNonceSent
= nonce
;
6476 pto
->PushMessage(NetMsgType::PING
, nonce
);
6478 // Peer is too old to support ping command with nonce, pong will never arrive.
6479 pto
->nPingNonceSent
= 0;
6480 pto
->PushMessage(NetMsgType::PING
);
6484 TRY_LOCK(cs_main
, lockMain
); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
6488 // Address refresh broadcast
6489 int64_t nNow
= GetTimeMicros();
6490 if (!IsInitialBlockDownload() && pto
->nNextLocalAddrSend
< nNow
) {
6491 AdvertiseLocal(pto
);
6492 pto
->nNextLocalAddrSend
= PoissonNextSend(nNow
, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
);
6498 if (pto
->nNextAddrSend
< nNow
) {
6499 pto
->nNextAddrSend
= PoissonNextSend(nNow
, AVG_ADDRESS_BROADCAST_INTERVAL
);
6500 vector
<CAddress
> vAddr
;
6501 vAddr
.reserve(pto
->vAddrToSend
.size());
6502 BOOST_FOREACH(const CAddress
& addr
, pto
->vAddrToSend
)
6504 if (!pto
->addrKnown
.contains(addr
.GetKey()))
6506 pto
->addrKnown
.insert(addr
.GetKey());
6507 vAddr
.push_back(addr
);
6508 // receiver rejects addr messages larger than 1000
6509 if (vAddr
.size() >= 1000)
6511 pto
->PushMessage(NetMsgType::ADDR
, vAddr
);
6516 pto
->vAddrToSend
.clear();
6518 pto
->PushMessage(NetMsgType::ADDR
, vAddr
);
6519 // we only send the big addr message once
6520 if (pto
->vAddrToSend
.capacity() > 40)
6521 pto
->vAddrToSend
.shrink_to_fit();
6524 CNodeState
&state
= *State(pto
->GetId());
6525 if (state
.fShouldBan
) {
6526 if (pto
->fWhitelisted
)
6527 LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto
->addr
.ToString());
6529 pto
->fDisconnect
= true;
6530 if (pto
->addr
.IsLocal())
6531 LogPrintf("Warning: not banning local peer %s!\n", pto
->addr
.ToString());
6534 connman
.Ban(pto
->addr
, BanReasonNodeMisbehaving
);
6537 state
.fShouldBan
= false;
6540 BOOST_FOREACH(const CBlockReject
& reject
, state
.rejects
)
6541 pto
->PushMessage(NetMsgType::REJECT
, (string
)NetMsgType::BLOCK
, reject
.chRejectCode
, reject
.strRejectReason
, reject
.hashBlock
);
6542 state
.rejects
.clear();
6545 if (pindexBestHeader
== NULL
)
6546 pindexBestHeader
= chainActive
.Tip();
6547 bool fFetch
= state
.fPreferredDownload
|| (nPreferredDownload
== 0 && !pto
->fClient
&& !pto
->fOneShot
); // Download if this is a nice peer, or we have no nice peers and this one might do.
6548 if (!state
.fSyncStarted
&& !pto
->fClient
&& !pto
->fDisconnect
&& !fImporting
&& !fReindex
) {
6549 // Only actively request headers from a single peer, unless we're close to today.
6550 if ((nSyncStarted
== 0 && fFetch
) || pindexBestHeader
->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
6551 state
.fSyncStarted
= true;
6553 const CBlockIndex
*pindexStart
= pindexBestHeader
;
6554 /* If possible, start at the block preceding the currently
6555 best known header. This ensures that we always get a
6556 non-empty list of headers back as long as the peer
6557 is up-to-date. With a non-empty response, we can initialise
6558 the peer's known best block. This wouldn't be possible
6559 if we requested starting at pindexBestHeader and
6560 got back an empty response. */
6561 if (pindexStart
->pprev
)
6562 pindexStart
= pindexStart
->pprev
;
6563 LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart
->nHeight
, pto
->id
, pto
->nStartingHeight
);
6564 pto
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexStart
), uint256());
6568 // Resend wallet transactions that haven't gotten in a block yet
6569 // Except during reindex, importing and IBD, when old wallet
6570 // transactions become unconfirmed and spams other nodes.
6571 if (!fReindex
&& !fImporting
&& !IsInitialBlockDownload())
6573 GetMainSignals().Broadcast(nTimeBestReceived
, &connman
);
6577 // Try sending block announcements via headers
6580 // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
6581 // list of block hashes we're relaying, and our peer wants
6582 // headers announcements, then find the first header
6583 // not yet known to our peer but would connect, and send.
6584 // If no header would connect, or if we have too many
6585 // blocks, or if the peer doesn't want headers, just
6586 // add all to the inv queue.
6587 LOCK(pto
->cs_inventory
);
6588 vector
<CBlock
> vHeaders
;
6589 bool fRevertToInv
= ((!state
.fPreferHeaders
&&
6590 (!state
.fPreferHeaderAndIDs
|| pto
->vBlockHashesToAnnounce
.size() > 1)) ||
6591 pto
->vBlockHashesToAnnounce
.size() > MAX_BLOCKS_TO_ANNOUNCE
);
6592 CBlockIndex
*pBestIndex
= NULL
; // last header queued for delivery
6593 ProcessBlockAvailability(pto
->id
); // ensure pindexBestKnownBlock is up-to-date
6595 if (!fRevertToInv
) {
6596 bool fFoundStartingHeader
= false;
6597 // Try to find first header that our peer doesn't have, and
6598 // then send all headers past that one. If we come across any
6599 // headers that aren't on chainActive, give up.
6600 BOOST_FOREACH(const uint256
&hash
, pto
->vBlockHashesToAnnounce
) {
6601 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
6602 assert(mi
!= mapBlockIndex
.end());
6603 CBlockIndex
*pindex
= mi
->second
;
6604 if (chainActive
[pindex
->nHeight
] != pindex
) {
6605 // Bail out if we reorged away from this block
6606 fRevertToInv
= true;
6609 if (pBestIndex
!= NULL
&& pindex
->pprev
!= pBestIndex
) {
6610 // This means that the list of blocks to announce don't
6611 // connect to each other.
6612 // This shouldn't really be possible to hit during
6613 // regular operation (because reorgs should take us to
6614 // a chain that has some block not on the prior chain,
6615 // which should be caught by the prior check), but one
6616 // way this could happen is by using invalidateblock /
6617 // reconsiderblock repeatedly on the tip, causing it to
6618 // be added multiple times to vBlockHashesToAnnounce.
6619 // Robustly deal with this rare situation by reverting
6621 fRevertToInv
= true;
6624 pBestIndex
= pindex
;
6625 if (fFoundStartingHeader
) {
6626 // add this to the headers message
6627 vHeaders
.push_back(pindex
->GetBlockHeader());
6628 } else if (PeerHasHeader(&state
, pindex
)) {
6629 continue; // keep looking for the first new block
6630 } else if (pindex
->pprev
== NULL
|| PeerHasHeader(&state
, pindex
->pprev
)) {
6631 // Peer doesn't have this header but they do have the prior one.
6632 // Start sending headers.
6633 fFoundStartingHeader
= true;
6634 vHeaders
.push_back(pindex
->GetBlockHeader());
6636 // Peer doesn't have this header or the prior one -- nothing will
6637 // connect, so bail out.
6638 fRevertToInv
= true;
6643 if (!fRevertToInv
&& !vHeaders
.empty()) {
6644 if (vHeaders
.size() == 1 && state
.fPreferHeaderAndIDs
) {
6645 // We only send up to 1 block as header-and-ids, as otherwise
6646 // probably means we're doing an initial-ish-sync or they're slow
6647 LogPrint("net", "%s sending header-and-ids %s to peer %d\n", __func__
,
6648 vHeaders
.front().GetHash().ToString(), pto
->id
);
6649 //TODO: Shouldn't need to reload block from disk, but requires refactor
6651 assert(ReadBlockFromDisk(block
, pBestIndex
, consensusParams
));
6652 CBlockHeaderAndShortTxIDs
cmpctblock(block
, state
.fWantsCmpctWitness
);
6653 pto
->PushMessageWithFlag(state
.fWantsCmpctWitness
? 0 : SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::CMPCTBLOCK
, cmpctblock
);
6654 state
.pindexBestHeaderSent
= pBestIndex
;
6655 } else if (state
.fPreferHeaders
) {
6656 if (vHeaders
.size() > 1) {
6657 LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__
,
6659 vHeaders
.front().GetHash().ToString(),
6660 vHeaders
.back().GetHash().ToString(), pto
->id
);
6662 LogPrint("net", "%s: sending header %s to peer=%d\n", __func__
,
6663 vHeaders
.front().GetHash().ToString(), pto
->id
);
6665 pto
->PushMessage(NetMsgType::HEADERS
, vHeaders
);
6666 state
.pindexBestHeaderSent
= pBestIndex
;
6668 fRevertToInv
= true;
6671 // If falling back to using an inv, just try to inv the tip.
6672 // The last entry in vBlockHashesToAnnounce was our tip at some point
6674 if (!pto
->vBlockHashesToAnnounce
.empty()) {
6675 const uint256
&hashToAnnounce
= pto
->vBlockHashesToAnnounce
.back();
6676 BlockMap::iterator mi
= mapBlockIndex
.find(hashToAnnounce
);
6677 assert(mi
!= mapBlockIndex
.end());
6678 CBlockIndex
*pindex
= mi
->second
;
6680 // Warn if we're announcing a block that is not on the main chain.
6681 // This should be very rare and could be optimized out.
6682 // Just log for now.
6683 if (chainActive
[pindex
->nHeight
] != pindex
) {
6684 LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
6685 hashToAnnounce
.ToString(), chainActive
.Tip()->GetBlockHash().ToString());
6688 // If the peer's chain has this block, don't inv it back.
6689 if (!PeerHasHeader(&state
, pindex
)) {
6690 pto
->PushInventory(CInv(MSG_BLOCK
, hashToAnnounce
));
6691 LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__
,
6692 pto
->id
, hashToAnnounce
.ToString());
6696 pto
->vBlockHashesToAnnounce
.clear();
6700 // Message: inventory
6704 LOCK(pto
->cs_inventory
);
6705 vInv
.reserve(std::max
<size_t>(pto
->vInventoryBlockToSend
.size(), INVENTORY_BROADCAST_MAX
));
6708 BOOST_FOREACH(const uint256
& hash
, pto
->vInventoryBlockToSend
) {
6709 vInv
.push_back(CInv(MSG_BLOCK
, hash
));
6710 if (vInv
.size() == MAX_INV_SZ
) {
6711 pto
->PushMessage(NetMsgType::INV
, vInv
);
6715 pto
->vInventoryBlockToSend
.clear();
6717 // Check whether periodic sends should happen
6718 bool fSendTrickle
= pto
->fWhitelisted
;
6719 if (pto
->nNextInvSend
< nNow
) {
6720 fSendTrickle
= true;
6721 // Use half the delay for outbound peers, as there is less privacy concern for them.
6722 pto
->nNextInvSend
= PoissonNextSend(nNow
, INVENTORY_BROADCAST_INTERVAL
>> !pto
->fInbound
);
6725 // Time to send but the peer has requested we not relay transactions.
6727 LOCK(pto
->cs_filter
);
6728 if (!pto
->fRelayTxes
) pto
->setInventoryTxToSend
.clear();
6731 // Respond to BIP35 mempool requests
6732 if (fSendTrickle
&& pto
->fSendMempool
) {
6733 auto vtxinfo
= mempool
.infoAll();
6734 pto
->fSendMempool
= false;
6735 CAmount filterrate
= 0;
6737 LOCK(pto
->cs_feeFilter
);
6738 filterrate
= pto
->minFeeFilter
;
6741 LOCK(pto
->cs_filter
);
6743 for (const auto& txinfo
: vtxinfo
) {
6744 const uint256
& hash
= txinfo
.tx
->GetHash();
6745 CInv
inv(MSG_TX
, hash
);
6746 pto
->setInventoryTxToSend
.erase(hash
);
6748 if (txinfo
.feeRate
.GetFeePerK() < filterrate
)
6752 if (!pto
->pfilter
->IsRelevantAndUpdate(*txinfo
.tx
)) continue;
6754 pto
->filterInventoryKnown
.insert(hash
);
6755 vInv
.push_back(inv
);
6756 if (vInv
.size() == MAX_INV_SZ
) {
6757 pto
->PushMessage(NetMsgType::INV
, vInv
);
6761 pto
->timeLastMempoolReq
= GetTime();
6764 // Determine transactions to relay
6766 // Produce a vector with all candidates for sending
6767 vector
<std::set
<uint256
>::iterator
> vInvTx
;
6768 vInvTx
.reserve(pto
->setInventoryTxToSend
.size());
6769 for (std::set
<uint256
>::iterator it
= pto
->setInventoryTxToSend
.begin(); it
!= pto
->setInventoryTxToSend
.end(); it
++) {
6770 vInvTx
.push_back(it
);
6772 CAmount filterrate
= 0;
6774 LOCK(pto
->cs_feeFilter
);
6775 filterrate
= pto
->minFeeFilter
;
6777 // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
6778 // A heap is used so that not all items need sorting if only a few are being sent.
6779 CompareInvMempoolOrder
compareInvMempoolOrder(&mempool
);
6780 std::make_heap(vInvTx
.begin(), vInvTx
.end(), compareInvMempoolOrder
);
6781 // No reason to drain out at many times the network's capacity,
6782 // especially since we have many peers and some will draw much shorter delays.
6783 unsigned int nRelayedTransactions
= 0;
6784 LOCK(pto
->cs_filter
);
6785 while (!vInvTx
.empty() && nRelayedTransactions
< INVENTORY_BROADCAST_MAX
) {
6786 // Fetch the top element from the heap
6787 std::pop_heap(vInvTx
.begin(), vInvTx
.end(), compareInvMempoolOrder
);
6788 std::set
<uint256
>::iterator it
= vInvTx
.back();
6791 // Remove it from the to-be-sent set
6792 pto
->setInventoryTxToSend
.erase(it
);
6793 // Check if not in the filter already
6794 if (pto
->filterInventoryKnown
.contains(hash
)) {
6797 // Not in the mempool anymore? don't bother sending it.
6798 auto txinfo
= mempool
.info(hash
);
6802 if (filterrate
&& txinfo
.feeRate
.GetFeePerK() < filterrate
) {
6805 if (pto
->pfilter
&& !pto
->pfilter
->IsRelevantAndUpdate(*txinfo
.tx
)) continue;
6807 vInv
.push_back(CInv(MSG_TX
, hash
));
6808 nRelayedTransactions
++;
6810 // Expire old relay messages
6811 while (!vRelayExpiration
.empty() && vRelayExpiration
.front().first
< nNow
)
6813 mapRelay
.erase(vRelayExpiration
.front().second
);
6814 vRelayExpiration
.pop_front();
6817 auto ret
= mapRelay
.insert(std::make_pair(hash
, std::move(txinfo
.tx
)));
6819 vRelayExpiration
.push_back(std::make_pair(nNow
+ 15 * 60 * 1000000, ret
.first
));
6822 if (vInv
.size() == MAX_INV_SZ
) {
6823 pto
->PushMessage(NetMsgType::INV
, vInv
);
6826 pto
->filterInventoryKnown
.insert(hash
);
6831 pto
->PushMessage(NetMsgType::INV
, vInv
);
6833 // Detect whether we're stalling
6834 nNow
= GetTimeMicros();
6835 if (!pto
->fDisconnect
&& state
.nStallingSince
&& state
.nStallingSince
< nNow
- 1000000 * BLOCK_STALLING_TIMEOUT
) {
6836 // Stalling only triggers when the block download window cannot move. During normal steady state,
6837 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
6838 // should only happen during initial block download.
6839 LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto
->id
);
6840 pto
->fDisconnect
= true;
6842 // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
6843 // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
6844 // We compensate for other peers to prevent killing off peers due to our own downstream link
6845 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
6846 // to unreasonably increase our timeout.
6847 if (!pto
->fDisconnect
&& state
.vBlocksInFlight
.size() > 0) {
6848 QueuedBlock
&queuedBlock
= state
.vBlocksInFlight
.front();
6849 int nOtherPeersWithValidatedDownloads
= nPeersWithValidatedDownloads
- (state
.nBlocksInFlightValidHeaders
> 0);
6850 if (nNow
> state
.nDownloadingSince
+ consensusParams
.nPowTargetSpacing
* (BLOCK_DOWNLOAD_TIMEOUT_BASE
+ BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
* nOtherPeersWithValidatedDownloads
)) {
6851 LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock
.hash
.ToString(), pto
->id
);
6852 pto
->fDisconnect
= true;
6857 // Message: getdata (blocks)
6859 vector
<CInv
> vGetData
;
6860 if (!pto
->fDisconnect
&& !pto
->fClient
&& (fFetch
|| !IsInitialBlockDownload()) && state
.nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
6861 vector
<CBlockIndex
*> vToDownload
;
6862 NodeId staller
= -1;
6863 FindNextBlocksToDownload(pto
->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER
- state
.nBlocksInFlight
, vToDownload
, staller
, consensusParams
);
6864 BOOST_FOREACH(CBlockIndex
*pindex
, vToDownload
) {
6865 uint32_t nFetchFlags
= GetFetchFlags(pto
, pindex
->pprev
, consensusParams
);
6866 vGetData
.push_back(CInv(MSG_BLOCK
| nFetchFlags
, pindex
->GetBlockHash()));
6867 MarkBlockAsInFlight(pto
->GetId(), pindex
->GetBlockHash(), consensusParams
, pindex
);
6868 LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex
->GetBlockHash().ToString(),
6869 pindex
->nHeight
, pto
->id
);
6871 if (state
.nBlocksInFlight
== 0 && staller
!= -1) {
6872 if (State(staller
)->nStallingSince
== 0) {
6873 State(staller
)->nStallingSince
= nNow
;
6874 LogPrint("net", "Stall started peer=%d\n", staller
);
6880 // Message: getdata (non-blocks)
6882 while (!pto
->fDisconnect
&& !pto
->mapAskFor
.empty() && (*pto
->mapAskFor
.begin()).first
<= nNow
)
6884 const CInv
& inv
= (*pto
->mapAskFor
.begin()).second
;
6885 if (!AlreadyHave(inv
))
6888 LogPrint("net", "Requesting %s peer=%d\n", inv
.ToString(), pto
->id
);
6889 vGetData
.push_back(inv
);
6890 if (vGetData
.size() >= 1000)
6892 pto
->PushMessage(NetMsgType::GETDATA
, vGetData
);
6896 //If we're not going to ask, don't expect a response.
6897 pto
->setAskFor
.erase(inv
.hash
);
6899 pto
->mapAskFor
.erase(pto
->mapAskFor
.begin());
6901 if (!vGetData
.empty())
6902 pto
->PushMessage(NetMsgType::GETDATA
, vGetData
);
6905 // Message: feefilter
6907 // We don't want white listed peers to filter txs to us if we have -whitelistforcerelay
6908 if (pto
->nVersion
>= FEEFILTER_VERSION
&& GetBoolArg("-feefilter", DEFAULT_FEEFILTER
) &&
6909 !(pto
->fWhitelisted
&& GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY
))) {
6910 CAmount currentFilter
= mempool
.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFeePerK();
6911 int64_t timeNow
= GetTimeMicros();
6912 if (timeNow
> pto
->nextSendTimeFeeFilter
) {
6913 CAmount filterToSend
= filterRounder
.round(currentFilter
);
6914 if (filterToSend
!= pto
->lastSentFeeFilter
) {
6915 pto
->PushMessage(NetMsgType::FEEFILTER
, filterToSend
);
6916 pto
->lastSentFeeFilter
= filterToSend
;
6918 pto
->nextSendTimeFeeFilter
= PoissonNextSend(timeNow
, AVG_FEEFILTER_BROADCAST_INTERVAL
);
6920 // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
6921 // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
6922 else if (timeNow
+ MAX_FEEFILTER_CHANGE_DELAY
* 1000000 < pto
->nextSendTimeFeeFilter
&&
6923 (currentFilter
< 3 * pto
->lastSentFeeFilter
/ 4 || currentFilter
> 4 * pto
->lastSentFeeFilter
/ 3)) {
6924 pto
->nextSendTimeFeeFilter
= timeNow
+ GetRandInt(MAX_FEEFILTER_CHANGE_DELAY
) * 1000000;
6931 std::string
CBlockFileInfo::ToString() const {
6932 return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks
, nSize
, nHeightFirst
, nHeightLast
, DateTimeStrFormat("%Y-%m-%d", nTimeFirst
), DateTimeStrFormat("%Y-%m-%d", nTimeLast
));
6935 ThresholdState
VersionBitsTipState(const Consensus::Params
& params
, Consensus::DeploymentPos pos
)
6938 return VersionBitsState(chainActive
.Tip(), params
, pos
, versionbitscache
);
6941 int VersionBitsTipStateSinceHeight(const Consensus::Params
& params
, Consensus::DeploymentPos pos
)
6944 return VersionBitsStateSinceHeight(chainActive
.Tip(), params
, pos
, versionbitscache
);
6947 static const uint64_t MEMPOOL_DUMP_VERSION
= 1;
6949 bool LoadMempool(void)
6951 int64_t nExpiryTimeout
= GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60;
6952 FILE* filestr
= fopen((GetDataDir() / "mempool.dat").string().c_str(), "r");
6953 CAutoFile
file(filestr
, SER_DISK
, CLIENT_VERSION
);
6954 if (file
.IsNull()) {
6955 LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
6960 int64_t skipped
= 0;
6962 int64_t nNow
= GetTime();
6967 if (version
!= MEMPOOL_DUMP_VERSION
) {
6972 double prioritydummy
= 0;
6981 CAmount amountdelta
= nFeeDelta
;
6983 mempool
.PrioritiseTransaction(tx
.GetHash(), tx
.GetHash().ToString(), prioritydummy
, amountdelta
);
6985 CValidationState state
;
6986 if (nTime
+ nExpiryTimeout
> nNow
) {
6988 AcceptToMemoryPoolWithTime(mempool
, state
, tx
, true, NULL
, nTime
);
6989 if (state
.IsValid()) {
6998 std::map
<uint256
, CAmount
> mapDeltas
;
7001 for (const auto& i
: mapDeltas
) {
7002 mempool
.PrioritiseTransaction(i
.first
, i
.first
.ToString(), prioritydummy
, i
.second
);
7004 } catch (const std::exception
& e
) {
7005 LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e
.what());
7009 LogPrintf("Imported mempool transactions from disk: %i successes, %i failed, %i expired\n", count
, failed
, skipped
);
7013 void DumpMempool(void)
7015 int64_t start
= GetTimeMicros();
7017 std::map
<uint256
, CAmount
> mapDeltas
;
7018 std::vector
<TxMempoolInfo
> vinfo
;
7022 for (const auto &i
: mempool
.mapDeltas
) {
7023 mapDeltas
[i
.first
] = i
.second
.first
;
7025 vinfo
= mempool
.infoAll();
7028 int64_t mid
= GetTimeMicros();
7031 FILE* filestr
= fopen((GetDataDir() / "mempool.dat.new").string().c_str(), "w");
7036 CAutoFile
file(filestr
, SER_DISK
, CLIENT_VERSION
);
7038 uint64_t version
= MEMPOOL_DUMP_VERSION
;
7041 file
<< (uint64_t)vinfo
.size();
7042 for (const auto& i
: vinfo
) {
7044 file
<< (int64_t)i
.nTime
;
7045 file
<< (int64_t)i
.nFeeDelta
;
7046 mapDeltas
.erase(i
.tx
->GetHash());
7050 FileCommit(file
.Get());
7052 RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat");
7053 int64_t last
= GetTimeMicros();
7054 LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid
-start
)*0.000001, (last
-mid
)*0.000001);
7055 } catch (const std::exception
& e
) {
7056 LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e
.what());
7066 BlockMap::iterator it1
= mapBlockIndex
.begin();
7067 for (; it1
!= mapBlockIndex
.end(); it1
++)
7068 delete (*it1
).second
;
7069 mapBlockIndex
.clear();
7071 // orphan transactions
7072 mapOrphanTransactions
.clear();
7073 mapOrphanTransactionsByPrev
.clear();
7075 } instance_of_cmaincleanup
;