1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2016 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
9 #include "arith_uint256.h"
10 #include "blockencodings.h"
11 #include "chainparams.h"
12 #include "checkpoints.h"
13 #include "checkqueue.h"
14 #include "consensus/consensus.h"
15 #include "consensus/merkle.h"
16 #include "consensus/validation.h"
19 #include "merkleblock.h"
21 #include "netmessagemaker.h"
23 #include "policy/fees.h"
24 #include "policy/policy.h"
26 #include "primitives/block.h"
27 #include "primitives/transaction.h"
29 #include "script/script.h"
30 #include "script/sigcache.h"
31 #include "script/standard.h"
32 #include "tinyformat.h"
34 #include "txmempool.h"
35 #include "ui_interface.h"
38 #include "utilmoneystr.h"
39 #include "utilstrencodings.h"
40 #include "validationinterface.h"
41 #include "versionbits.h"
46 #include <boost/algorithm/string/replace.hpp>
47 #include <boost/algorithm/string/join.hpp>
48 #include <boost/filesystem.hpp>
49 #include <boost/filesystem/fstream.hpp>
50 #include <boost/math/distributions/poisson.hpp>
51 #include <boost/thread.hpp>
56 # error "Bitcoin cannot be compiled without assertions."
63 CCriticalSection cs_main
;
65 BlockMap mapBlockIndex
;
67 CBlockIndex
*pindexBestHeader
= NULL
;
68 int64_t nTimeBestReceived
= 0; // Used only to inform the wallet of when we last received a block
69 CWaitableCriticalSection csBestBlock
;
70 CConditionVariable cvBlockChange
;
71 int nScriptCheckThreads
= 0;
72 std::atomic_bool
fImporting(false);
73 bool fReindex
= false;
74 bool fTxIndex
= false;
75 bool fHavePruned
= false;
76 bool fPruneMode
= false;
77 bool fIsBareMultisigStd
= DEFAULT_PERMIT_BAREMULTISIG
;
78 bool fRequireStandard
= true;
79 bool fCheckBlockIndex
= false;
80 bool fCheckpointsEnabled
= DEFAULT_CHECKPOINTS_ENABLED
;
81 size_t nCoinCacheUsage
= 5000 * 300;
82 uint64_t nPruneTarget
= 0;
83 int64_t nMaxTipAge
= DEFAULT_MAX_TIP_AGE
;
84 bool fEnableReplacement
= DEFAULT_ENABLE_REPLACEMENT
;
87 CFeeRate minRelayTxFee
= CFeeRate(DEFAULT_MIN_RELAY_TX_FEE
);
88 CAmount maxTxFee
= DEFAULT_TRANSACTION_MAXFEE
;
90 CTxMemPool
mempool(::minRelayTxFee
);
91 FeeFilterRounder
filterRounder(::minRelayTxFee
);
93 struct IteratorComparator
96 bool operator()(const I
& a
, const I
& b
)
107 map
<uint256
, COrphanTx
> mapOrphanTransactions
GUARDED_BY(cs_main
);
108 map
<COutPoint
, set
<map
<uint256
, COrphanTx
>::iterator
, IteratorComparator
>> mapOrphanTransactionsByPrev
GUARDED_BY(cs_main
);
109 void EraseOrphansFor(NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
);
111 static void CheckBlockIndex(const Consensus::Params
& consensusParams
);
113 /** Constant stuff for coinbase transactions we create: */
114 CScript COINBASE_FLAGS
;
116 const string strMessageMagic
= "Bitcoin Signed Message:\n";
118 static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY
= 0x3cac0035b5866b90ULL
; // SHA256("main address relay")[0:8]
123 struct CBlockIndexWorkComparator
125 bool operator()(CBlockIndex
*pa
, CBlockIndex
*pb
) const {
126 // First sort by most total work, ...
127 if (pa
->nChainWork
> pb
->nChainWork
) return false;
128 if (pa
->nChainWork
< pb
->nChainWork
) return true;
130 // ... then by earliest time received, ...
131 if (pa
->nSequenceId
< pb
->nSequenceId
) return false;
132 if (pa
->nSequenceId
> pb
->nSequenceId
) return true;
134 // Use pointer address as tie breaker (should only happen with blocks
135 // loaded from disk, as those all have id 0).
136 if (pa
< pb
) return false;
137 if (pa
> pb
) return true;
144 CBlockIndex
*pindexBestInvalid
;
147 * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
148 * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
149 * missing the data for the block.
151 set
<CBlockIndex
*, CBlockIndexWorkComparator
> setBlockIndexCandidates
;
152 /** Number of nodes with fSyncStarted. */
153 int nSyncStarted
= 0;
154 /** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
155 * Pruned nodes may have entries where B is missing data.
157 multimap
<CBlockIndex
*, CBlockIndex
*> mapBlocksUnlinked
;
159 CCriticalSection cs_LastBlockFile
;
160 std::vector
<CBlockFileInfo
> vinfoBlockFile
;
161 int nLastBlockFile
= 0;
162 /** Global flag to indicate we should check to see if there are
163 * block/undo files that should be deleted. Set on startup
164 * or if we allocate more file space when we're in prune mode
166 bool fCheckForPruning
= false;
169 * Every received block is assigned a unique and increasing identifier, so we
170 * know which one to give priority in case of a fork.
172 CCriticalSection cs_nBlockSequenceId
;
173 /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
174 int32_t nBlockSequenceId
= 1;
175 /** Decreasing counter (used by subsequent preciousblock calls). */
176 int32_t nBlockReverseSequenceId
= -1;
177 /** chainwork for the last block that preciousblock has been applied to. */
178 arith_uint256 nLastPreciousChainwork
= 0;
181 * Sources of received blocks, saved to be able to send them reject
182 * messages or ban them when processing happens afterwards. Protected by
184 * Set mapBlockSource[hash].second to false if the node should not be
185 * punished if the block is invalid.
187 map
<uint256
, std::pair
<NodeId
, bool>> mapBlockSource
;
190 * Filter for transactions that were recently rejected by
191 * AcceptToMemoryPool. These are not rerequested until the chain tip
192 * changes, at which point the entire filter is reset. Protected by
195 * Without this filter we'd be re-requesting txs from each of our peers,
196 * increasing bandwidth consumption considerably. For instance, with 100
197 * peers, half of which relay a tx we don't accept, that might be a 50x
198 * bandwidth increase. A flooding attacker attempting to roll-over the
199 * filter using minimum-sized, 60byte, transactions might manage to send
200 * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
201 * two minute window to send invs to us.
203 * Decreasing the false positive rate is fairly cheap, so we pick one in a
204 * million to make it highly unlikely for users to have issues with this
207 * Memory used: 1.3 MB
209 std::unique_ptr
<CRollingBloomFilter
> recentRejects
;
210 uint256 hashRecentRejectsChainTip
;
212 /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
215 CBlockIndex
* pindex
; //!< Optional.
216 bool fValidatedHeaders
; //!< Whether this block has validated headers at the time of request.
217 std::unique_ptr
<PartiallyDownloadedBlock
> partialBlock
; //!< Optional, used for CMPCTBLOCK downloads
219 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> > mapBlocksInFlight
;
221 /** Stack of nodes which we have set to announce using compact blocks */
222 list
<NodeId
> lNodesAnnouncingHeaderAndIDs
;
224 /** Number of preferable block download peers. */
225 int nPreferredDownload
= 0;
227 /** Dirty block index entries. */
228 set
<CBlockIndex
*> setDirtyBlockIndex
;
230 /** Dirty block file entries. */
231 set
<int> setDirtyFileInfo
;
233 /** Number of peers from which we're downloading blocks. */
234 int nPeersWithValidatedDownloads
= 0;
236 /** Relay map, protected by cs_main. */
237 typedef std::map
<uint256
, CTransactionRef
> MapRelay
;
239 /** Expiration-time ordered list of (expire time, relay map entry) pairs, protected by cs_main). */
240 std::deque
<std::pair
<int64_t, MapRelay::iterator
>> vRelayExpiration
;
243 //////////////////////////////////////////////////////////////////////////////
245 // Registration of network node signals.
250 struct CBlockReject
{
251 unsigned char chRejectCode
;
252 string strRejectReason
;
257 * Maintain validation-specific state about nodes, protected by cs_main, instead
258 * by CNode's own locks. This simplifies asynchronous operation, where
259 * processing of incoming data is done after the ProcessMessage call returns,
260 * and we're no longer holding the node's locks.
263 //! The peer's address
264 const CService address
;
265 //! Whether we have a fully established connection.
266 bool fCurrentlyConnected
;
267 //! Accumulated misbehaviour score for this peer.
269 //! Whether this peer should be disconnected and banned (unless whitelisted).
271 //! String name of this peer (debugging/logging purposes).
272 const std::string name
;
273 //! List of asynchronously-determined block rejections to notify this peer about.
274 std::vector
<CBlockReject
> rejects
;
275 //! The best known block we know this peer has announced.
276 CBlockIndex
*pindexBestKnownBlock
;
277 //! The hash of the last unknown block this peer has announced.
278 uint256 hashLastUnknownBlock
;
279 //! The last full block we both have.
280 CBlockIndex
*pindexLastCommonBlock
;
281 //! The best header we have sent our peer.
282 CBlockIndex
*pindexBestHeaderSent
;
283 //! Length of current-streak of unconnecting headers announcements
284 int nUnconnectingHeaders
;
285 //! Whether we've started headers synchronization with this peer.
287 //! Since when we're stalling block download progress (in microseconds), or 0.
288 int64_t nStallingSince
;
289 list
<QueuedBlock
> vBlocksInFlight
;
290 //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
291 int64_t nDownloadingSince
;
293 int nBlocksInFlightValidHeaders
;
294 //! Whether we consider this a preferred download peer.
295 bool fPreferredDownload
;
296 //! Whether this peer wants invs or headers (when possible) for block announcements.
298 //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
299 bool fPreferHeaderAndIDs
;
301 * Whether this peer will send us cmpctblocks if we request them.
302 * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion,
303 * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send.
305 bool fProvidesHeaderAndIDs
;
306 //! Whether this peer can give us witnesses
308 //! Whether this peer wants witnesses in cmpctblocks/blocktxns
309 bool fWantsCmpctWitness
;
311 * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns,
312 * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns.
314 bool fSupportsDesiredCmpctVersion
;
316 CNodeState(CAddress addrIn
, std::string addrNameIn
) : address(addrIn
), name(addrNameIn
) {
317 fCurrentlyConnected
= false;
320 pindexBestKnownBlock
= NULL
;
321 hashLastUnknownBlock
.SetNull();
322 pindexLastCommonBlock
= NULL
;
323 pindexBestHeaderSent
= NULL
;
324 nUnconnectingHeaders
= 0;
325 fSyncStarted
= false;
327 nDownloadingSince
= 0;
329 nBlocksInFlightValidHeaders
= 0;
330 fPreferredDownload
= false;
331 fPreferHeaders
= false;
332 fPreferHeaderAndIDs
= false;
333 fProvidesHeaderAndIDs
= false;
334 fHaveWitness
= false;
335 fWantsCmpctWitness
= false;
336 fSupportsDesiredCmpctVersion
= false;
340 /** Map maintaining per-node state. Requires cs_main. */
341 map
<NodeId
, CNodeState
> mapNodeState
;
344 CNodeState
*State(NodeId pnode
) {
345 map
<NodeId
, CNodeState
>::iterator it
= mapNodeState
.find(pnode
);
346 if (it
== mapNodeState
.end())
351 void UpdatePreferredDownload(CNode
* node
, CNodeState
* state
)
353 nPreferredDownload
-= state
->fPreferredDownload
;
355 // Whether this node should be marked as a preferred download node.
356 state
->fPreferredDownload
= (!node
->fInbound
|| node
->fWhitelisted
) && !node
->fOneShot
&& !node
->fClient
;
358 nPreferredDownload
+= state
->fPreferredDownload
;
361 void PushNodeVersion(CNode
*pnode
, CConnman
& connman
, int64_t nTime
)
363 ServiceFlags nLocalNodeServices
= pnode
->GetLocalServices();
364 uint64_t nonce
= pnode
->GetLocalNonce();
365 int nNodeStartingHeight
= pnode
->GetMyStartingHeight();
366 NodeId nodeid
= pnode
->GetId();
367 CAddress addr
= pnode
->addr
;
369 CAddress addrYou
= (addr
.IsRoutable() && !IsProxy(addr
) ? addr
: CAddress(CService(), addr
.nServices
));
370 CAddress addrMe
= CAddress(CService(), nLocalNodeServices
);
372 connman
.PushMessage(pnode
, CNetMsgMaker(INIT_PROTO_VERSION
).Make(NetMsgType::VERSION
, PROTOCOL_VERSION
, (uint64_t)nLocalNodeServices
, nTime
, addrYou
, addrMe
,
373 nonce
, strSubVersion
, nNodeStartingHeight
, ::fRelayTxes
));
376 LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION
, nNodeStartingHeight
, addrMe
.ToString(), addrYou
.ToString(), nodeid
);
378 LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION
, nNodeStartingHeight
, addrMe
.ToString(), nodeid
);
381 void InitializeNode(CNode
*pnode
, CConnman
& connman
) {
382 CAddress addr
= pnode
->addr
;
383 std::string addrName
= pnode
->addrName
;
384 NodeId nodeid
= pnode
->GetId();
387 mapNodeState
.emplace_hint(mapNodeState
.end(), std::piecewise_construct
, std::forward_as_tuple(nodeid
), std::forward_as_tuple(addr
, std::move(addrName
)));
390 PushNodeVersion(pnode
, connman
, GetTime());
393 void FinalizeNode(NodeId nodeid
, bool& fUpdateConnectionTime
) {
394 fUpdateConnectionTime
= false;
396 CNodeState
*state
= State(nodeid
);
398 if (state
->fSyncStarted
)
401 if (state
->nMisbehavior
== 0 && state
->fCurrentlyConnected
) {
402 fUpdateConnectionTime
= true;
405 BOOST_FOREACH(const QueuedBlock
& entry
, state
->vBlocksInFlight
) {
406 mapBlocksInFlight
.erase(entry
.hash
);
408 EraseOrphansFor(nodeid
);
409 nPreferredDownload
-= state
->fPreferredDownload
;
410 nPeersWithValidatedDownloads
-= (state
->nBlocksInFlightValidHeaders
!= 0);
411 assert(nPeersWithValidatedDownloads
>= 0);
413 mapNodeState
.erase(nodeid
);
415 if (mapNodeState
.empty()) {
416 // Do a consistency check after the last peer is removed.
417 assert(mapBlocksInFlight
.empty());
418 assert(nPreferredDownload
== 0);
419 assert(nPeersWithValidatedDownloads
== 0);
424 // Returns a bool indicating whether we requested this block.
425 // Also used if a block was /not/ received and timed out or started with another peer
426 bool MarkBlockAsReceived(const uint256
& hash
) {
427 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator itInFlight
= mapBlocksInFlight
.find(hash
);
428 if (itInFlight
!= mapBlocksInFlight
.end()) {
429 CNodeState
*state
= State(itInFlight
->second
.first
);
430 state
->nBlocksInFlightValidHeaders
-= itInFlight
->second
.second
->fValidatedHeaders
;
431 if (state
->nBlocksInFlightValidHeaders
== 0 && itInFlight
->second
.second
->fValidatedHeaders
) {
432 // Last validated block on the queue was received.
433 nPeersWithValidatedDownloads
--;
435 if (state
->vBlocksInFlight
.begin() == itInFlight
->second
.second
) {
436 // First block on the queue was received, update the start download time for the next one
437 state
->nDownloadingSince
= std::max(state
->nDownloadingSince
, GetTimeMicros());
439 state
->vBlocksInFlight
.erase(itInFlight
->second
.second
);
440 state
->nBlocksInFlight
--;
441 state
->nStallingSince
= 0;
442 mapBlocksInFlight
.erase(itInFlight
);
449 // returns false, still setting pit, if the block was already in flight from the same peer
450 // pit will only be valid as long as the same cs_main lock is being held
451 bool MarkBlockAsInFlight(NodeId nodeid
, const uint256
& hash
, const Consensus::Params
& consensusParams
, CBlockIndex
*pindex
= NULL
, list
<QueuedBlock
>::iterator
**pit
= NULL
) {
452 CNodeState
*state
= State(nodeid
);
453 assert(state
!= NULL
);
455 // Short-circuit most stuff in case its from the same node
456 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator itInFlight
= mapBlocksInFlight
.find(hash
);
457 if (itInFlight
!= mapBlocksInFlight
.end() && itInFlight
->second
.first
== nodeid
) {
458 *pit
= &itInFlight
->second
.second
;
462 // Make sure it's not listed somewhere already.
463 MarkBlockAsReceived(hash
);
465 list
<QueuedBlock
>::iterator it
= state
->vBlocksInFlight
.insert(state
->vBlocksInFlight
.end(),
466 {hash
, pindex
, pindex
!= NULL
, std::unique_ptr
<PartiallyDownloadedBlock
>(pit
? new PartiallyDownloadedBlock(&mempool
) : NULL
)});
467 state
->nBlocksInFlight
++;
468 state
->nBlocksInFlightValidHeaders
+= it
->fValidatedHeaders
;
469 if (state
->nBlocksInFlight
== 1) {
470 // We're starting a block download (batch) from this peer.
471 state
->nDownloadingSince
= GetTimeMicros();
473 if (state
->nBlocksInFlightValidHeaders
== 1 && pindex
!= NULL
) {
474 nPeersWithValidatedDownloads
++;
476 itInFlight
= mapBlocksInFlight
.insert(std::make_pair(hash
, std::make_pair(nodeid
, it
))).first
;
478 *pit
= &itInFlight
->second
.second
;
482 /** Check whether the last unknown block a peer advertised is not yet known. */
483 void ProcessBlockAvailability(NodeId nodeid
) {
484 CNodeState
*state
= State(nodeid
);
485 assert(state
!= NULL
);
487 if (!state
->hashLastUnknownBlock
.IsNull()) {
488 BlockMap::iterator itOld
= mapBlockIndex
.find(state
->hashLastUnknownBlock
);
489 if (itOld
!= mapBlockIndex
.end() && itOld
->second
->nChainWork
> 0) {
490 if (state
->pindexBestKnownBlock
== NULL
|| itOld
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
491 state
->pindexBestKnownBlock
= itOld
->second
;
492 state
->hashLastUnknownBlock
.SetNull();
497 /** Update tracking information about which blocks a peer is assumed to have. */
498 void UpdateBlockAvailability(NodeId nodeid
, const uint256
&hash
) {
499 CNodeState
*state
= State(nodeid
);
500 assert(state
!= NULL
);
502 ProcessBlockAvailability(nodeid
);
504 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
505 if (it
!= mapBlockIndex
.end() && it
->second
->nChainWork
> 0) {
506 // An actually better block was announced.
507 if (state
->pindexBestKnownBlock
== NULL
|| it
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
508 state
->pindexBestKnownBlock
= it
->second
;
510 // An unknown block was announced; just assume that the latest one is the best one.
511 state
->hashLastUnknownBlock
= hash
;
515 void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState
* nodestate
, CNode
* pfrom
, CConnman
& connman
) {
516 if (!nodestate
->fSupportsDesiredCmpctVersion
) {
517 // Never ask from peers who can't provide witnesses.
520 if (nodestate
->fProvidesHeaderAndIDs
) {
521 for (std::list
<NodeId
>::iterator it
= lNodesAnnouncingHeaderAndIDs
.begin(); it
!= lNodesAnnouncingHeaderAndIDs
.end(); it
++) {
522 if (*it
== pfrom
->GetId()) {
523 lNodesAnnouncingHeaderAndIDs
.erase(it
);
524 lNodesAnnouncingHeaderAndIDs
.push_back(pfrom
->GetId());
528 bool fAnnounceUsingCMPCTBLOCK
= false;
529 uint64_t nCMPCTBLOCKVersion
= (pfrom
->GetLocalServices() & NODE_WITNESS
) ? 2 : 1;
530 if (lNodesAnnouncingHeaderAndIDs
.size() >= 3) {
531 // As per BIP152, we only get 3 of our peers to announce
532 // blocks using compact encodings.
533 connman
.ForNode(lNodesAnnouncingHeaderAndIDs
.front(), [&connman
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
](CNode
* pnodeStop
){
534 connman
.PushMessage(pnodeStop
, CNetMsgMaker(pnodeStop
->GetSendVersion()).Make(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
));
537 lNodesAnnouncingHeaderAndIDs
.pop_front();
539 fAnnounceUsingCMPCTBLOCK
= true;
540 connman
.PushMessage(pfrom
, CNetMsgMaker(pfrom
->GetSendVersion()).Make(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
));
541 lNodesAnnouncingHeaderAndIDs
.push_back(pfrom
->GetId());
546 bool CanDirectFetch(const Consensus::Params
&consensusParams
)
548 return chainActive
.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams
.nPowTargetSpacing
* 20;
552 bool PeerHasHeader(CNodeState
*state
, CBlockIndex
*pindex
)
554 if (state
->pindexBestKnownBlock
&& pindex
== state
->pindexBestKnownBlock
->GetAncestor(pindex
->nHeight
))
556 if (state
->pindexBestHeaderSent
&& pindex
== state
->pindexBestHeaderSent
->GetAncestor(pindex
->nHeight
))
561 /** Find the last common ancestor two blocks have.
562 * Both pa and pb must be non-NULL. */
563 CBlockIndex
* LastCommonAncestor(CBlockIndex
* pa
, CBlockIndex
* pb
) {
564 if (pa
->nHeight
> pb
->nHeight
) {
565 pa
= pa
->GetAncestor(pb
->nHeight
);
566 } else if (pb
->nHeight
> pa
->nHeight
) {
567 pb
= pb
->GetAncestor(pa
->nHeight
);
570 while (pa
!= pb
&& pa
&& pb
) {
575 // Eventually all chain branches meet at the genesis block.
580 /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
581 * at most count entries. */
582 void FindNextBlocksToDownload(NodeId nodeid
, unsigned int count
, std::vector
<CBlockIndex
*>& vBlocks
, NodeId
& nodeStaller
, const Consensus::Params
& consensusParams
) {
586 vBlocks
.reserve(vBlocks
.size() + count
);
587 CNodeState
*state
= State(nodeid
);
588 assert(state
!= NULL
);
590 // Make sure pindexBestKnownBlock is up to date, we'll need it.
591 ProcessBlockAvailability(nodeid
);
593 if (state
->pindexBestKnownBlock
== NULL
|| state
->pindexBestKnownBlock
->nChainWork
< chainActive
.Tip()->nChainWork
) {
594 // This peer has nothing interesting.
598 if (state
->pindexLastCommonBlock
== NULL
) {
599 // Bootstrap quickly by guessing a parent of our best tip is the forking point.
600 // Guessing wrong in either direction is not a problem.
601 state
->pindexLastCommonBlock
= chainActive
[std::min(state
->pindexBestKnownBlock
->nHeight
, chainActive
.Height())];
604 // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
605 // of its current tip anymore. Go back enough to fix that.
606 state
->pindexLastCommonBlock
= LastCommonAncestor(state
->pindexLastCommonBlock
, state
->pindexBestKnownBlock
);
607 if (state
->pindexLastCommonBlock
== state
->pindexBestKnownBlock
)
610 std::vector
<CBlockIndex
*> vToFetch
;
611 CBlockIndex
*pindexWalk
= state
->pindexLastCommonBlock
;
612 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
613 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
614 // download that next block if the window were 1 larger.
615 int nWindowEnd
= state
->pindexLastCommonBlock
->nHeight
+ BLOCK_DOWNLOAD_WINDOW
;
616 int nMaxHeight
= std::min
<int>(state
->pindexBestKnownBlock
->nHeight
, nWindowEnd
+ 1);
617 NodeId waitingfor
= -1;
618 while (pindexWalk
->nHeight
< nMaxHeight
) {
619 // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
620 // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
621 // as iterating over ~100 CBlockIndex* entries anyway.
622 int nToFetch
= std::min(nMaxHeight
- pindexWalk
->nHeight
, std::max
<int>(count
- vBlocks
.size(), 128));
623 vToFetch
.resize(nToFetch
);
624 pindexWalk
= state
->pindexBestKnownBlock
->GetAncestor(pindexWalk
->nHeight
+ nToFetch
);
625 vToFetch
[nToFetch
- 1] = pindexWalk
;
626 for (unsigned int i
= nToFetch
- 1; i
> 0; i
--) {
627 vToFetch
[i
- 1] = vToFetch
[i
]->pprev
;
630 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
631 // are not yet downloaded and not in flight to vBlocks. In the mean time, update
632 // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
633 // already part of our chain (and therefore don't need it even if pruned).
634 BOOST_FOREACH(CBlockIndex
* pindex
, vToFetch
) {
635 if (!pindex
->IsValid(BLOCK_VALID_TREE
)) {
636 // We consider the chain that this peer is on invalid.
639 if (!State(nodeid
)->fHaveWitness
&& IsWitnessEnabled(pindex
->pprev
, consensusParams
)) {
640 // We wouldn't download this block or its descendants from this peer.
643 if (pindex
->nStatus
& BLOCK_HAVE_DATA
|| chainActive
.Contains(pindex
)) {
644 if (pindex
->nChainTx
)
645 state
->pindexLastCommonBlock
= pindex
;
646 } else if (mapBlocksInFlight
.count(pindex
->GetBlockHash()) == 0) {
647 // The block is not already downloaded, and not yet in flight.
648 if (pindex
->nHeight
> nWindowEnd
) {
649 // We reached the end of the window.
650 if (vBlocks
.size() == 0 && waitingfor
!= nodeid
) {
651 // We aren't able to fetch anything, but we would be if the download window was one larger.
652 nodeStaller
= waitingfor
;
656 vBlocks
.push_back(pindex
);
657 if (vBlocks
.size() == count
) {
660 } else if (waitingfor
== -1) {
661 // This is the first already-in-flight block.
662 waitingfor
= mapBlocksInFlight
[pindex
->GetBlockHash()].first
;
670 bool GetNodeStateStats(NodeId nodeid
, CNodeStateStats
&stats
) {
672 CNodeState
*state
= State(nodeid
);
675 stats
.nMisbehavior
= state
->nMisbehavior
;
676 stats
.nSyncHeight
= state
->pindexBestKnownBlock
? state
->pindexBestKnownBlock
->nHeight
: -1;
677 stats
.nCommonHeight
= state
->pindexLastCommonBlock
? state
->pindexLastCommonBlock
->nHeight
: -1;
678 BOOST_FOREACH(const QueuedBlock
& queue
, state
->vBlocksInFlight
) {
680 stats
.vHeightInFlight
.push_back(queue
.pindex
->nHeight
);
685 void RegisterNodeSignals(CNodeSignals
& nodeSignals
)
687 nodeSignals
.ProcessMessages
.connect(&ProcessMessages
);
688 nodeSignals
.SendMessages
.connect(&SendMessages
);
689 nodeSignals
.InitializeNode
.connect(&InitializeNode
);
690 nodeSignals
.FinalizeNode
.connect(&FinalizeNode
);
693 void UnregisterNodeSignals(CNodeSignals
& nodeSignals
)
695 nodeSignals
.ProcessMessages
.disconnect(&ProcessMessages
);
696 nodeSignals
.SendMessages
.disconnect(&SendMessages
);
697 nodeSignals
.InitializeNode
.disconnect(&InitializeNode
);
698 nodeSignals
.FinalizeNode
.disconnect(&FinalizeNode
);
701 CBlockIndex
* FindForkInGlobalIndex(const CChain
& chain
, const CBlockLocator
& locator
)
703 // Find the first block the caller has in the main chain
704 BOOST_FOREACH(const uint256
& hash
, locator
.vHave
) {
705 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
706 if (mi
!= mapBlockIndex
.end())
708 CBlockIndex
* pindex
= (*mi
).second
;
709 if (chain
.Contains(pindex
))
711 if (pindex
->GetAncestor(chain
.Height()) == chain
.Tip()) {
716 return chain
.Genesis();
719 CCoinsViewCache
*pcoinsTip
= NULL
;
720 CBlockTreeDB
*pblocktree
= NULL
;
722 enum FlushStateMode
{
724 FLUSH_STATE_IF_NEEDED
,
725 FLUSH_STATE_PERIODIC
,
729 // See definition for documentation
730 bool static FlushStateToDisk(CValidationState
&state
, FlushStateMode mode
);
732 //////////////////////////////////////////////////////////////////////////////
734 // mapOrphanTransactions
737 bool AddOrphanTx(const CTransaction
& tx
, NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
739 uint256 hash
= tx
.GetHash();
740 if (mapOrphanTransactions
.count(hash
))
743 // Ignore big transactions, to avoid a
744 // send-big-orphans memory exhaustion attack. If a peer has a legitimate
745 // large transaction with a missing parent then we assume
746 // it will rebroadcast it later, after the parent transaction(s)
747 // have been mined or received.
748 // 100 orphans, each of which is at most 99,999 bytes big is
749 // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
750 unsigned int sz
= GetTransactionWeight(tx
);
751 if (sz
>= MAX_STANDARD_TX_WEIGHT
)
753 LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz
, hash
.ToString());
757 auto ret
= mapOrphanTransactions
.emplace(hash
, COrphanTx
{tx
, peer
, GetTime() + ORPHAN_TX_EXPIRE_TIME
});
759 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
) {
760 mapOrphanTransactionsByPrev
[txin
.prevout
].insert(ret
.first
);
763 LogPrint("mempool", "stored orphan tx %s (mapsz %u outsz %u)\n", hash
.ToString(),
764 mapOrphanTransactions
.size(), mapOrphanTransactionsByPrev
.size());
768 int static EraseOrphanTx(uint256 hash
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
770 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.find(hash
);
771 if (it
== mapOrphanTransactions
.end())
773 BOOST_FOREACH(const CTxIn
& txin
, it
->second
.tx
.vin
)
775 auto itPrev
= mapOrphanTransactionsByPrev
.find(txin
.prevout
);
776 if (itPrev
== mapOrphanTransactionsByPrev
.end())
778 itPrev
->second
.erase(it
);
779 if (itPrev
->second
.empty())
780 mapOrphanTransactionsByPrev
.erase(itPrev
);
782 mapOrphanTransactions
.erase(it
);
786 void EraseOrphansFor(NodeId peer
)
789 map
<uint256
, COrphanTx
>::iterator iter
= mapOrphanTransactions
.begin();
790 while (iter
!= mapOrphanTransactions
.end())
792 map
<uint256
, COrphanTx
>::iterator maybeErase
= iter
++; // increment to avoid iterator becoming invalid
793 if (maybeErase
->second
.fromPeer
== peer
)
795 nErased
+= EraseOrphanTx(maybeErase
->second
.tx
.GetHash());
798 if (nErased
> 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased
, peer
);
802 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
804 unsigned int nEvicted
= 0;
805 static int64_t nNextSweep
;
806 int64_t nNow
= GetTime();
807 if (nNextSweep
<= nNow
) {
808 // Sweep out expired orphan pool entries:
810 int64_t nMinExpTime
= nNow
+ ORPHAN_TX_EXPIRE_TIME
- ORPHAN_TX_EXPIRE_INTERVAL
;
811 map
<uint256
, COrphanTx
>::iterator iter
= mapOrphanTransactions
.begin();
812 while (iter
!= mapOrphanTransactions
.end())
814 map
<uint256
, COrphanTx
>::iterator maybeErase
= iter
++;
815 if (maybeErase
->second
.nTimeExpire
<= nNow
) {
816 nErased
+= EraseOrphanTx(maybeErase
->second
.tx
.GetHash());
818 nMinExpTime
= std::min(maybeErase
->second
.nTimeExpire
, nMinExpTime
);
821 // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
822 nNextSweep
= nMinExpTime
+ ORPHAN_TX_EXPIRE_INTERVAL
;
823 if (nErased
> 0) LogPrint("mempool", "Erased %d orphan tx due to expiration\n", nErased
);
825 while (mapOrphanTransactions
.size() > nMaxOrphans
)
827 // Evict a random orphan:
828 uint256 randomhash
= GetRandHash();
829 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.lower_bound(randomhash
);
830 if (it
== mapOrphanTransactions
.end())
831 it
= mapOrphanTransactions
.begin();
832 EraseOrphanTx(it
->first
);
838 bool IsFinalTx(const CTransaction
&tx
, int nBlockHeight
, int64_t nBlockTime
)
840 if (tx
.nLockTime
== 0)
842 if ((int64_t)tx
.nLockTime
< ((int64_t)tx
.nLockTime
< LOCKTIME_THRESHOLD
? (int64_t)nBlockHeight
: nBlockTime
))
844 for (const auto& txin
: tx
.vin
) {
845 if (!(txin
.nSequence
== CTxIn::SEQUENCE_FINAL
))
851 bool CheckFinalTx(const CTransaction
&tx
, int flags
)
853 AssertLockHeld(cs_main
);
855 // By convention a negative value for flags indicates that the
856 // current network-enforced consensus rules should be used. In
857 // a future soft-fork scenario that would mean checking which
858 // rules would be enforced for the next block and setting the
859 // appropriate flags. At the present time no soft-forks are
860 // scheduled, so no flags are set.
861 flags
= std::max(flags
, 0);
863 // CheckFinalTx() uses chainActive.Height()+1 to evaluate
864 // nLockTime because when IsFinalTx() is called within
865 // CBlock::AcceptBlock(), the height of the block *being*
866 // evaluated is what is used. Thus if we want to know if a
867 // transaction can be part of the *next* block, we need to call
868 // IsFinalTx() with one more than chainActive.Height().
869 const int nBlockHeight
= chainActive
.Height() + 1;
871 // BIP113 will require that time-locked transactions have nLockTime set to
872 // less than the median time of the previous block they're contained in.
873 // When the next block is created its previous block will be the current
874 // chain tip, so we use that to calculate the median time passed to
875 // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
876 const int64_t nBlockTime
= (flags
& LOCKTIME_MEDIAN_TIME_PAST
)
877 ? chainActive
.Tip()->GetMedianTimePast()
880 return IsFinalTx(tx
, nBlockHeight
, nBlockTime
);
884 * Calculates the block height and previous block's median time past at
885 * which the transaction will be considered final in the context of BIP 68.
886 * Also removes from the vector of input heights any entries which did not
887 * correspond to sequence locked inputs as they do not affect the calculation.
889 static std::pair
<int, int64_t> CalculateSequenceLocks(const CTransaction
&tx
, int flags
, std::vector
<int>* prevHeights
, const CBlockIndex
& block
)
891 assert(prevHeights
->size() == tx
.vin
.size());
893 // Will be set to the equivalent height- and time-based nLockTime
894 // values that would be necessary to satisfy all relative lock-
895 // time constraints given our view of block chain history.
896 // The semantics of nLockTime are the last invalid height/time, so
897 // use -1 to have the effect of any height or time being valid.
899 int64_t nMinTime
= -1;
901 // tx.nVersion is signed integer so requires cast to unsigned otherwise
902 // we would be doing a signed comparison and half the range of nVersion
903 // wouldn't support BIP 68.
904 bool fEnforceBIP68
= static_cast<uint32_t>(tx
.nVersion
) >= 2
905 && flags
& LOCKTIME_VERIFY_SEQUENCE
;
907 // Do not enforce sequence numbers as a relative lock time
908 // unless we have been instructed to
909 if (!fEnforceBIP68
) {
910 return std::make_pair(nMinHeight
, nMinTime
);
913 for (size_t txinIndex
= 0; txinIndex
< tx
.vin
.size(); txinIndex
++) {
914 const CTxIn
& txin
= tx
.vin
[txinIndex
];
916 // Sequence numbers with the most significant bit set are not
917 // treated as relative lock-times, nor are they given any
918 // consensus-enforced meaning at this point.
919 if (txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG
) {
920 // The height of this input is not relevant for sequence locks
921 (*prevHeights
)[txinIndex
] = 0;
925 int nCoinHeight
= (*prevHeights
)[txinIndex
];
927 if (txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG
) {
928 int64_t nCoinTime
= block
.GetAncestor(std::max(nCoinHeight
-1, 0))->GetMedianTimePast();
929 // NOTE: Subtract 1 to maintain nLockTime semantics
930 // BIP 68 relative lock times have the semantics of calculating
931 // the first block or time at which the transaction would be
932 // valid. When calculating the effective block time or height
933 // for the entire transaction, we switch to using the
934 // semantics of nLockTime which is the last invalid block
935 // time or height. Thus we subtract 1 from the calculated
938 // Time-based relative lock-times are measured from the
939 // smallest allowed timestamp of the block containing the
940 // txout being spent, which is the median time past of the
942 nMinTime
= std::max(nMinTime
, nCoinTime
+ (int64_t)((txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_MASK
) << CTxIn::SEQUENCE_LOCKTIME_GRANULARITY
) - 1);
944 nMinHeight
= std::max(nMinHeight
, nCoinHeight
+ (int)(txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_MASK
) - 1);
948 return std::make_pair(nMinHeight
, nMinTime
);
951 static bool EvaluateSequenceLocks(const CBlockIndex
& block
, std::pair
<int, int64_t> lockPair
)
954 int64_t nBlockTime
= block
.pprev
->GetMedianTimePast();
955 if (lockPair
.first
>= block
.nHeight
|| lockPair
.second
>= nBlockTime
)
961 bool SequenceLocks(const CTransaction
&tx
, int flags
, std::vector
<int>* prevHeights
, const CBlockIndex
& block
)
963 return EvaluateSequenceLocks(block
, CalculateSequenceLocks(tx
, flags
, prevHeights
, block
));
966 bool TestLockPointValidity(const LockPoints
* lp
)
968 AssertLockHeld(cs_main
);
970 // If there are relative lock times then the maxInputBlock will be set
971 // If there are no relative lock times, the LockPoints don't depend on the chain
972 if (lp
->maxInputBlock
) {
973 // Check whether chainActive is an extension of the block at which the LockPoints
974 // calculation was valid. If not LockPoints are no longer valid
975 if (!chainActive
.Contains(lp
->maxInputBlock
)) {
980 // LockPoints still valid
984 bool CheckSequenceLocks(const CTransaction
&tx
, int flags
, LockPoints
* lp
, bool useExistingLockPoints
)
986 AssertLockHeld(cs_main
);
987 AssertLockHeld(mempool
.cs
);
989 CBlockIndex
* tip
= chainActive
.Tip();
992 // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate
993 // height based locks because when SequenceLocks() is called within
994 // ConnectBlock(), the height of the block *being*
995 // evaluated is what is used.
996 // Thus if we want to know if a transaction can be part of the
997 // *next* block, we need to use one more than chainActive.Height()
998 index
.nHeight
= tip
->nHeight
+ 1;
1000 std::pair
<int, int64_t> lockPair
;
1001 if (useExistingLockPoints
) {
1003 lockPair
.first
= lp
->height
;
1004 lockPair
.second
= lp
->time
;
1007 // pcoinsTip contains the UTXO set for chainActive.Tip()
1008 CCoinsViewMemPool
viewMemPool(pcoinsTip
, mempool
);
1009 std::vector
<int> prevheights
;
1010 prevheights
.resize(tx
.vin
.size());
1011 for (size_t txinIndex
= 0; txinIndex
< tx
.vin
.size(); txinIndex
++) {
1012 const CTxIn
& txin
= tx
.vin
[txinIndex
];
1014 if (!viewMemPool
.GetCoins(txin
.prevout
.hash
, coins
)) {
1015 return error("%s: Missing input", __func__
);
1017 if (coins
.nHeight
== MEMPOOL_HEIGHT
) {
1018 // Assume all mempool transaction confirm in the next block
1019 prevheights
[txinIndex
] = tip
->nHeight
+ 1;
1021 prevheights
[txinIndex
] = coins
.nHeight
;
1024 lockPair
= CalculateSequenceLocks(tx
, flags
, &prevheights
, index
);
1026 lp
->height
= lockPair
.first
;
1027 lp
->time
= lockPair
.second
;
1028 // Also store the hash of the block with the highest height of
1029 // all the blocks which have sequence locked prevouts.
1030 // This hash needs to still be on the chain
1031 // for these LockPoint calculations to be valid
1032 // Note: It is impossible to correctly calculate a maxInputBlock
1033 // if any of the sequence locked inputs depend on unconfirmed txs,
1034 // except in the special case where the relative lock time/height
1035 // is 0, which is equivalent to no sequence lock. Since we assume
1036 // input height of tip+1 for mempool txs and test the resulting
1037 // lockPair from CalculateSequenceLocks against tip+1. We know
1038 // EvaluateSequenceLocks will fail if there was a non-zero sequence
1039 // lock on a mempool input, so we can use the return value of
1040 // CheckSequenceLocks to indicate the LockPoints validity
1041 int maxInputHeight
= 0;
1042 BOOST_FOREACH(int height
, prevheights
) {
1043 // Can ignore mempool inputs since we'll fail if they had non-zero locks
1044 if (height
!= tip
->nHeight
+1) {
1045 maxInputHeight
= std::max(maxInputHeight
, height
);
1048 lp
->maxInputBlock
= tip
->GetAncestor(maxInputHeight
);
1051 return EvaluateSequenceLocks(index
, lockPair
);
1055 unsigned int GetLegacySigOpCount(const CTransaction
& tx
)
1057 unsigned int nSigOps
= 0;
1058 for (const auto& txin
: tx
.vin
)
1060 nSigOps
+= txin
.scriptSig
.GetSigOpCount(false);
1062 for (const auto& txout
: tx
.vout
)
1064 nSigOps
+= txout
.scriptPubKey
.GetSigOpCount(false);
1069 unsigned int GetP2SHSigOpCount(const CTransaction
& tx
, const CCoinsViewCache
& inputs
)
1071 if (tx
.IsCoinBase())
1074 unsigned int nSigOps
= 0;
1075 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
1077 const CTxOut
&prevout
= inputs
.GetOutputFor(tx
.vin
[i
]);
1078 if (prevout
.scriptPubKey
.IsPayToScriptHash())
1079 nSigOps
+= prevout
.scriptPubKey
.GetSigOpCount(tx
.vin
[i
].scriptSig
);
1084 int64_t GetTransactionSigOpCost(const CTransaction
& tx
, const CCoinsViewCache
& inputs
, int flags
)
1086 int64_t nSigOps
= GetLegacySigOpCount(tx
) * WITNESS_SCALE_FACTOR
;
1088 if (tx
.IsCoinBase())
1091 if (flags
& SCRIPT_VERIFY_P2SH
) {
1092 nSigOps
+= GetP2SHSigOpCount(tx
, inputs
) * WITNESS_SCALE_FACTOR
;
1095 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
1097 const CTxOut
&prevout
= inputs
.GetOutputFor(tx
.vin
[i
]);
1098 nSigOps
+= CountWitnessSigOps(tx
.vin
[i
].scriptSig
, prevout
.scriptPubKey
, i
< tx
.wit
.vtxinwit
.size() ? &tx
.wit
.vtxinwit
[i
].scriptWitness
: NULL
, flags
);
1107 bool CheckTransaction(const CTransaction
& tx
, CValidationState
&state
, bool fCheckDuplicateInputs
)
1109 // Basic checks that don't depend on any context
1111 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vin-empty");
1112 if (tx
.vout
.empty())
1113 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vout-empty");
1114 // Size limits (this doesn't take the witness into account, as that hasn't been checked for malleability)
1115 if (::GetSerializeSize(tx
, SER_NETWORK
, PROTOCOL_VERSION
| SERIALIZE_TRANSACTION_NO_WITNESS
) > MAX_BLOCK_BASE_SIZE
)
1116 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-oversize");
1118 // Check for negative or overflow output values
1119 CAmount nValueOut
= 0;
1120 for (const auto& txout
: tx
.vout
)
1122 if (txout
.nValue
< 0)
1123 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-negative");
1124 if (txout
.nValue
> MAX_MONEY
)
1125 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-toolarge");
1126 nValueOut
+= txout
.nValue
;
1127 if (!MoneyRange(nValueOut
))
1128 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-txouttotal-toolarge");
1131 // Check for duplicate inputs - note that this check is slow so we skip it in CheckBlock
1132 if (fCheckDuplicateInputs
) {
1133 set
<COutPoint
> vInOutPoints
;
1134 for (const auto& txin
: tx
.vin
)
1136 if (!vInOutPoints
.insert(txin
.prevout
).second
)
1137 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputs-duplicate");
1141 if (tx
.IsCoinBase())
1143 if (tx
.vin
[0].scriptSig
.size() < 2 || tx
.vin
[0].scriptSig
.size() > 100)
1144 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-length");
1148 for (const auto& txin
: tx
.vin
)
1149 if (txin
.prevout
.IsNull())
1150 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-prevout-null");
1156 void LimitMempoolSize(CTxMemPool
& pool
, size_t limit
, unsigned long age
) {
1157 int expired
= pool
.Expire(GetTime() - age
);
1159 LogPrint("mempool", "Expired %i transactions from the memory pool\n", expired
);
1161 std::vector
<uint256
> vNoSpendsRemaining
;
1162 pool
.TrimToSize(limit
, &vNoSpendsRemaining
);
1163 BOOST_FOREACH(const uint256
& removed
, vNoSpendsRemaining
)
1164 pcoinsTip
->Uncache(removed
);
1167 /** Convert CValidationState to a human-readable message for logging */
1168 std::string
FormatStateMessage(const CValidationState
&state
)
1170 return strprintf("%s%s (code %i)",
1171 state
.GetRejectReason(),
1172 state
.GetDebugMessage().empty() ? "" : ", "+state
.GetDebugMessage(),
1173 state
.GetRejectCode());
1176 bool AcceptToMemoryPoolWorker(CTxMemPool
& pool
, CValidationState
& state
, const CTransaction
& tx
, bool fLimitFree
,
1177 bool* pfMissingInputs
, int64_t nAcceptTime
, bool fOverrideMempoolLimit
, const CAmount
& nAbsurdFee
,
1178 std::vector
<uint256
>& vHashTxnToUncache
)
1180 const uint256 hash
= tx
.GetHash();
1181 AssertLockHeld(cs_main
);
1182 if (pfMissingInputs
)
1183 *pfMissingInputs
= false;
1185 if (!CheckTransaction(tx
, state
))
1186 return false; // state filled in by CheckTransaction
1188 // Coinbase is only valid in a block, not as a loose transaction
1189 if (tx
.IsCoinBase())
1190 return state
.DoS(100, false, REJECT_INVALID
, "coinbase");
1192 // Don't relay version 2 transactions until CSV is active, and we can be
1193 // sure that such transactions will be mined (unless we're on
1194 // -testnet/-regtest).
1195 const CChainParams
& chainparams
= Params();
1196 if (fRequireStandard
&& tx
.nVersion
>= 2 && VersionBitsTipState(chainparams
.GetConsensus(), Consensus::DEPLOYMENT_CSV
) != THRESHOLD_ACTIVE
) {
1197 return state
.DoS(0, false, REJECT_NONSTANDARD
, "premature-version2-tx");
1200 // Reject transactions with witness before segregated witness activates (override with -prematurewitness)
1201 bool witnessEnabled
= IsWitnessEnabled(chainActive
.Tip(), Params().GetConsensus());
1202 if (!GetBoolArg("-prematurewitness",false) && !tx
.wit
.IsNull() && !witnessEnabled
) {
1203 return state
.DoS(0, false, REJECT_NONSTANDARD
, "no-witness-yet", true);
1206 // Rather not work on nonstandard transactions (unless -testnet/-regtest)
1208 if (fRequireStandard
&& !IsStandardTx(tx
, reason
, witnessEnabled
))
1209 return state
.DoS(0, false, REJECT_NONSTANDARD
, reason
);
1211 // Only accept nLockTime-using transactions that can be mined in the next
1212 // block; we don't want our mempool filled up with transactions that can't
1214 if (!CheckFinalTx(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
))
1215 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-final");
1217 // is it already in the memory pool?
1218 if (pool
.exists(hash
))
1219 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-in-mempool");
1221 // Check for conflicts with in-memory transactions
1222 set
<uint256
> setConflicts
;
1224 LOCK(pool
.cs
); // protect pool.mapNextTx
1225 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
)
1227 auto itConflicting
= pool
.mapNextTx
.find(txin
.prevout
);
1228 if (itConflicting
!= pool
.mapNextTx
.end())
1230 const CTransaction
*ptxConflicting
= itConflicting
->second
;
1231 if (!setConflicts
.count(ptxConflicting
->GetHash()))
1233 // Allow opt-out of transaction replacement by setting
1234 // nSequence >= maxint-1 on all inputs.
1236 // maxint-1 is picked to still allow use of nLockTime by
1237 // non-replaceable transactions. All inputs rather than just one
1238 // is for the sake of multi-party protocols, where we don't
1239 // want a single party to be able to disable replacement.
1241 // The opt-out ignores descendants as anyone relying on
1242 // first-seen mempool behavior should be checking all
1243 // unconfirmed ancestors anyway; doing otherwise is hopelessly
1245 bool fReplacementOptOut
= true;
1246 if (fEnableReplacement
)
1248 BOOST_FOREACH(const CTxIn
&_txin
, ptxConflicting
->vin
)
1250 if (_txin
.nSequence
< std::numeric_limits
<unsigned int>::max()-1)
1252 fReplacementOptOut
= false;
1257 if (fReplacementOptOut
)
1258 return state
.Invalid(false, REJECT_CONFLICT
, "txn-mempool-conflict");
1260 setConflicts
.insert(ptxConflicting
->GetHash());
1268 CCoinsViewCache
view(&dummy
);
1270 CAmount nValueIn
= 0;
1274 CCoinsViewMemPool
viewMemPool(pcoinsTip
, pool
);
1275 view
.SetBackend(viewMemPool
);
1277 // do we already have it?
1278 bool fHadTxInCache
= pcoinsTip
->HaveCoinsInCache(hash
);
1279 if (view
.HaveCoins(hash
)) {
1281 vHashTxnToUncache
.push_back(hash
);
1282 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-known");
1285 // do all inputs exist?
1286 // Note that this does not check for the presence of actual outputs (see the next check for that),
1287 // and only helps with filling in pfMissingInputs (to determine missing vs spent).
1288 BOOST_FOREACH(const CTxIn txin
, tx
.vin
) {
1289 if (!pcoinsTip
->HaveCoinsInCache(txin
.prevout
.hash
))
1290 vHashTxnToUncache
.push_back(txin
.prevout
.hash
);
1291 if (!view
.HaveCoins(txin
.prevout
.hash
)) {
1292 if (pfMissingInputs
)
1293 *pfMissingInputs
= true;
1294 return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
1298 // are the actual inputs available?
1299 if (!view
.HaveInputs(tx
))
1300 return state
.Invalid(false, REJECT_DUPLICATE
, "bad-txns-inputs-spent");
1302 // Bring the best block into scope
1303 view
.GetBestBlock();
1305 nValueIn
= view
.GetValueIn(tx
);
1307 // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
1308 view
.SetBackend(dummy
);
1310 // Only accept BIP68 sequence locked transactions that can be mined in the next
1311 // block; we don't want our mempool filled up with transactions that can't
1313 // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
1314 // CoinsViewCache instead of create its own
1315 if (!CheckSequenceLocks(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
, &lp
))
1316 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-BIP68-final");
1319 // Check for non-standard pay-to-script-hash in inputs
1320 if (fRequireStandard
&& !AreInputsStandard(tx
, view
))
1321 return state
.Invalid(false, REJECT_NONSTANDARD
, "bad-txns-nonstandard-inputs");
1323 // Check for non-standard witness in P2WSH
1324 if (!tx
.wit
.IsNull() && fRequireStandard
&& !IsWitnessStandard(tx
, view
))
1325 return state
.DoS(0, false, REJECT_NONSTANDARD
, "bad-witness-nonstandard", true);
1327 int64_t nSigOpsCost
= GetTransactionSigOpCost(tx
, view
, STANDARD_SCRIPT_VERIFY_FLAGS
);
1329 CAmount nValueOut
= tx
.GetValueOut();
1330 CAmount nFees
= nValueIn
-nValueOut
;
1331 // nModifiedFees includes any fee deltas from PrioritiseTransaction
1332 CAmount nModifiedFees
= nFees
;
1333 double nPriorityDummy
= 0;
1334 pool
.ApplyDeltas(hash
, nPriorityDummy
, nModifiedFees
);
1336 CAmount inChainInputValue
;
1337 double dPriority
= view
.GetPriority(tx
, chainActive
.Height(), inChainInputValue
);
1339 // Keep track of transactions that spend a coinbase, which we re-scan
1340 // during reorgs to ensure COINBASE_MATURITY is still met.
1341 bool fSpendsCoinbase
= false;
1342 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
1343 const CCoins
*coins
= view
.AccessCoins(txin
.prevout
.hash
);
1344 if (coins
->IsCoinBase()) {
1345 fSpendsCoinbase
= true;
1350 CTxMemPoolEntry
entry(tx
, nFees
, nAcceptTime
, dPriority
, chainActive
.Height(), pool
.HasNoInputsOf(tx
), inChainInputValue
, fSpendsCoinbase
, nSigOpsCost
, lp
);
1351 unsigned int nSize
= entry
.GetTxSize();
1353 // Check that the transaction doesn't have an excessive number of
1354 // sigops, making it impossible to mine. Since the coinbase transaction
1355 // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
1356 // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
1357 // merely non-standard transaction.
1358 if (nSigOpsCost
> MAX_STANDARD_TX_SIGOPS_COST
)
1359 return state
.DoS(0, false, REJECT_NONSTANDARD
, "bad-txns-too-many-sigops", false,
1360 strprintf("%d", nSigOpsCost
));
1362 CAmount mempoolRejectFee
= pool
.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFee(nSize
);
1363 if (mempoolRejectFee
> 0 && nModifiedFees
< mempoolRejectFee
) {
1364 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool min fee not met", false, strprintf("%d < %d", nFees
, mempoolRejectFee
));
1365 } else if (GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY
) && nModifiedFees
< ::minRelayTxFee
.GetFee(nSize
) && !AllowFree(entry
.GetPriority(chainActive
.Height() + 1))) {
1366 // Require that free transactions have sufficient priority to be mined in the next block.
1367 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "insufficient priority");
1370 // Continuously rate-limit free (really, very-low-fee) transactions
1371 // This mitigates 'penny-flooding' -- sending thousands of free transactions just to
1372 // be annoying or make others' transactions take longer to confirm.
1373 if (fLimitFree
&& nModifiedFees
< ::minRelayTxFee
.GetFee(nSize
))
1375 static CCriticalSection csFreeLimiter
;
1376 static double dFreeCount
;
1377 static int64_t nLastTime
;
1378 int64_t nNow
= GetTime();
1380 LOCK(csFreeLimiter
);
1382 // Use an exponentially decaying ~10-minute window:
1383 dFreeCount
*= pow(1.0 - 1.0/600.0, (double)(nNow
- nLastTime
));
1385 // -limitfreerelay unit is thousand-bytes-per-minute
1386 // At default rate it would take over a month to fill 1GB
1387 if (dFreeCount
+ nSize
>= GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY
) * 10 * 1000)
1388 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "rate limited free transaction");
1389 LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount
, dFreeCount
+nSize
);
1390 dFreeCount
+= nSize
;
1393 if (nAbsurdFee
&& nFees
> nAbsurdFee
)
1394 return state
.Invalid(false,
1395 REJECT_HIGHFEE
, "absurdly-high-fee",
1396 strprintf("%d > %d", nFees
, nAbsurdFee
));
1398 // Calculate in-mempool ancestors, up to a limit.
1399 CTxMemPool::setEntries setAncestors
;
1400 size_t nLimitAncestors
= GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT
);
1401 size_t nLimitAncestorSize
= GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT
)*1000;
1402 size_t nLimitDescendants
= GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT
);
1403 size_t nLimitDescendantSize
= GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT
)*1000;
1404 std::string errString
;
1405 if (!pool
.CalculateMemPoolAncestors(entry
, setAncestors
, nLimitAncestors
, nLimitAncestorSize
, nLimitDescendants
, nLimitDescendantSize
, errString
)) {
1406 return state
.DoS(0, false, REJECT_NONSTANDARD
, "too-long-mempool-chain", false, errString
);
1409 // A transaction that spends outputs that would be replaced by it is invalid. Now
1410 // that we have the set of all ancestors we can detect this
1411 // pathological case by making sure setConflicts and setAncestors don't
1413 BOOST_FOREACH(CTxMemPool::txiter ancestorIt
, setAncestors
)
1415 const uint256
&hashAncestor
= ancestorIt
->GetTx().GetHash();
1416 if (setConflicts
.count(hashAncestor
))
1418 return state
.DoS(10, false,
1419 REJECT_INVALID
, "bad-txns-spends-conflicting-tx", false,
1420 strprintf("%s spends conflicting transaction %s",
1422 hashAncestor
.ToString()));
1426 // Check if it's economically rational to mine this transaction rather
1427 // than the ones it replaces.
1428 CAmount nConflictingFees
= 0;
1429 size_t nConflictingSize
= 0;
1430 uint64_t nConflictingCount
= 0;
1431 CTxMemPool::setEntries allConflicting
;
1433 // If we don't hold the lock allConflicting might be incomplete; the
1434 // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
1435 // mempool consistency for us.
1437 if (setConflicts
.size())
1439 CFeeRate
newFeeRate(nModifiedFees
, nSize
);
1440 set
<uint256
> setConflictsParents
;
1441 const int maxDescendantsToVisit
= 100;
1442 CTxMemPool::setEntries setIterConflicting
;
1443 BOOST_FOREACH(const uint256
&hashConflicting
, setConflicts
)
1445 CTxMemPool::txiter mi
= pool
.mapTx
.find(hashConflicting
);
1446 if (mi
== pool
.mapTx
.end())
1449 // Save these to avoid repeated lookups
1450 setIterConflicting
.insert(mi
);
1452 // Don't allow the replacement to reduce the feerate of the
1455 // We usually don't want to accept replacements with lower
1456 // feerates than what they replaced as that would lower the
1457 // feerate of the next block. Requiring that the feerate always
1458 // be increased is also an easy-to-reason about way to prevent
1459 // DoS attacks via replacements.
1461 // The mining code doesn't (currently) take children into
1462 // account (CPFP) so we only consider the feerates of
1463 // transactions being directly replaced, not their indirect
1464 // descendants. While that does mean high feerate children are
1465 // ignored when deciding whether or not to replace, we do
1466 // require the replacement to pay more overall fees too,
1467 // mitigating most cases.
1468 CFeeRate
oldFeeRate(mi
->GetModifiedFee(), mi
->GetTxSize());
1469 if (newFeeRate
<= oldFeeRate
)
1471 return state
.DoS(0, false,
1472 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1473 strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
1475 newFeeRate
.ToString(),
1476 oldFeeRate
.ToString()));
1479 BOOST_FOREACH(const CTxIn
&txin
, mi
->GetTx().vin
)
1481 setConflictsParents
.insert(txin
.prevout
.hash
);
1484 nConflictingCount
+= mi
->GetCountWithDescendants();
1486 // This potentially overestimates the number of actual descendants
1487 // but we just want to be conservative to avoid doing too much
1489 if (nConflictingCount
<= maxDescendantsToVisit
) {
1490 // If not too many to replace, then calculate the set of
1491 // transactions that would have to be evicted
1492 BOOST_FOREACH(CTxMemPool::txiter it
, setIterConflicting
) {
1493 pool
.CalculateDescendants(it
, allConflicting
);
1495 BOOST_FOREACH(CTxMemPool::txiter it
, allConflicting
) {
1496 nConflictingFees
+= it
->GetModifiedFee();
1497 nConflictingSize
+= it
->GetTxSize();
1500 return state
.DoS(0, false,
1501 REJECT_NONSTANDARD
, "too many potential replacements", false,
1502 strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
1505 maxDescendantsToVisit
));
1508 for (unsigned int j
= 0; j
< tx
.vin
.size(); j
++)
1510 // We don't want to accept replacements that require low
1511 // feerate junk to be mined first. Ideally we'd keep track of
1512 // the ancestor feerates and make the decision based on that,
1513 // but for now requiring all new inputs to be confirmed works.
1514 if (!setConflictsParents
.count(tx
.vin
[j
].prevout
.hash
))
1516 // Rather than check the UTXO set - potentially expensive -
1517 // it's cheaper to just check if the new input refers to a
1518 // tx that's in the mempool.
1519 if (pool
.mapTx
.find(tx
.vin
[j
].prevout
.hash
) != pool
.mapTx
.end())
1520 return state
.DoS(0, false,
1521 REJECT_NONSTANDARD
, "replacement-adds-unconfirmed", false,
1522 strprintf("replacement %s adds unconfirmed input, idx %d",
1523 hash
.ToString(), j
));
1527 // The replacement must pay greater fees than the transactions it
1528 // replaces - if we did the bandwidth used by those conflicting
1529 // transactions would not be paid for.
1530 if (nModifiedFees
< nConflictingFees
)
1532 return state
.DoS(0, false,
1533 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1534 strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
1535 hash
.ToString(), FormatMoney(nModifiedFees
), FormatMoney(nConflictingFees
)));
1538 // Finally in addition to paying more fees than the conflicts the
1539 // new transaction must pay for its own bandwidth.
1540 CAmount nDeltaFees
= nModifiedFees
- nConflictingFees
;
1541 if (nDeltaFees
< ::minRelayTxFee
.GetFee(nSize
))
1543 return state
.DoS(0, false,
1544 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1545 strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
1547 FormatMoney(nDeltaFees
),
1548 FormatMoney(::minRelayTxFee
.GetFee(nSize
))));
1552 unsigned int scriptVerifyFlags
= STANDARD_SCRIPT_VERIFY_FLAGS
;
1553 if (!Params().RequireStandard()) {
1554 scriptVerifyFlags
= GetArg("-promiscuousmempoolflags", scriptVerifyFlags
);
1557 // Check against previous transactions
1558 // This is done last to help prevent CPU exhaustion denial-of-service attacks.
1559 PrecomputedTransactionData
txdata(tx
);
1560 if (!CheckInputs(tx
, state
, view
, true, scriptVerifyFlags
, true, txdata
)) {
1561 // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
1562 // need to turn both off, and compare against just turning off CLEANSTACK
1563 // to see if the failure is specifically due to witness validation.
1564 if (tx
.wit
.IsNull() && CheckInputs(tx
, state
, view
, true, scriptVerifyFlags
& ~(SCRIPT_VERIFY_WITNESS
| SCRIPT_VERIFY_CLEANSTACK
), true, txdata
) &&
1565 !CheckInputs(tx
, state
, view
, true, scriptVerifyFlags
& ~SCRIPT_VERIFY_CLEANSTACK
, true, txdata
)) {
1566 // Only the witness is missing, so the transaction itself may be fine.
1567 state
.SetCorruptionPossible();
1572 // Check again against just the consensus-critical mandatory script
1573 // verification flags, in case of bugs in the standard flags that cause
1574 // transactions to pass as valid when they're actually invalid. For
1575 // instance the STRICTENC flag was incorrectly allowing certain
1576 // CHECKSIG NOT scripts to pass, even though they were invalid.
1578 // There is a similar check in CreateNewBlock() to prevent creating
1579 // invalid blocks, however allowing such transactions into the mempool
1580 // can be exploited as a DoS attack.
1581 if (!CheckInputs(tx
, state
, view
, true, MANDATORY_SCRIPT_VERIFY_FLAGS
, true, txdata
))
1583 return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s, %s",
1584 __func__
, hash
.ToString(), FormatStateMessage(state
));
1587 // Remove conflicting transactions from the mempool
1588 BOOST_FOREACH(const CTxMemPool::txiter it
, allConflicting
)
1590 LogPrint("mempool", "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
1591 it
->GetTx().GetHash().ToString(),
1593 FormatMoney(nModifiedFees
- nConflictingFees
),
1594 (int)nSize
- (int)nConflictingSize
);
1596 pool
.RemoveStaged(allConflicting
, false);
1598 // Store transaction in memory
1599 pool
.addUnchecked(hash
, entry
, setAncestors
, !IsInitialBlockDownload());
1601 // trim mempool and check if tx was trimmed
1602 if (!fOverrideMempoolLimit
) {
1603 LimitMempoolSize(pool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
1604 if (!pool
.exists(hash
))
1605 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool full");
1609 GetMainSignals().SyncTransaction(tx
, NULL
, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK
);
1614 bool AcceptToMemoryPoolWithTime(CTxMemPool
& pool
, CValidationState
&state
, const CTransaction
&tx
, bool fLimitFree
,
1615 bool* pfMissingInputs
, int64_t nAcceptTime
, bool fOverrideMempoolLimit
, const CAmount nAbsurdFee
)
1617 std::vector
<uint256
> vHashTxToUncache
;
1618 bool res
= AcceptToMemoryPoolWorker(pool
, state
, tx
, fLimitFree
, pfMissingInputs
, nAcceptTime
, fOverrideMempoolLimit
, nAbsurdFee
, vHashTxToUncache
);
1620 BOOST_FOREACH(const uint256
& hashTx
, vHashTxToUncache
)
1621 pcoinsTip
->Uncache(hashTx
);
1623 // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
1624 CValidationState stateDummy
;
1625 FlushStateToDisk(stateDummy
, FLUSH_STATE_PERIODIC
);
1629 bool AcceptToMemoryPool(CTxMemPool
& pool
, CValidationState
&state
, const CTransaction
&tx
, bool fLimitFree
,
1630 bool* pfMissingInputs
, bool fOverrideMempoolLimit
, const CAmount nAbsurdFee
)
1632 return AcceptToMemoryPoolWithTime(pool
, state
, tx
, fLimitFree
, pfMissingInputs
, GetTime(), fOverrideMempoolLimit
, nAbsurdFee
);
1635 /** Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock */
1636 bool GetTransaction(const uint256
&hash
, CTransaction
&txOut
, const Consensus::Params
& consensusParams
, uint256
&hashBlock
, bool fAllowSlow
)
1638 CBlockIndex
*pindexSlow
= NULL
;
1642 CTransactionRef ptx
= mempool
.get(hash
);
1651 if (pblocktree
->ReadTxIndex(hash
, postx
)) {
1652 CAutoFile
file(OpenBlockFile(postx
, true), SER_DISK
, CLIENT_VERSION
);
1654 return error("%s: OpenBlockFile failed", __func__
);
1655 CBlockHeader header
;
1658 fseek(file
.Get(), postx
.nTxOffset
, SEEK_CUR
);
1660 } catch (const std::exception
& e
) {
1661 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
1663 hashBlock
= header
.GetHash();
1664 if (txOut
.GetHash() != hash
)
1665 return error("%s: txid mismatch", __func__
);
1670 if (fAllowSlow
) { // use coin database to locate block that contains transaction, and scan it
1673 const CCoinsViewCache
& view
= *pcoinsTip
;
1674 const CCoins
* coins
= view
.AccessCoins(hash
);
1676 nHeight
= coins
->nHeight
;
1679 pindexSlow
= chainActive
[nHeight
];
1684 if (ReadBlockFromDisk(block
, pindexSlow
, consensusParams
)) {
1685 for (const auto& tx
: block
.vtx
) {
1686 if (tx
->GetHash() == hash
) {
1688 hashBlock
= pindexSlow
->GetBlockHash();
1703 //////////////////////////////////////////////////////////////////////////////
1705 // CBlock and CBlockIndex
1708 bool WriteBlockToDisk(const CBlock
& block
, CDiskBlockPos
& pos
, const CMessageHeader::MessageStartChars
& messageStart
)
1710 // Open history file to append
1711 CAutoFile
fileout(OpenBlockFile(pos
), SER_DISK
, CLIENT_VERSION
);
1712 if (fileout
.IsNull())
1713 return error("WriteBlockToDisk: OpenBlockFile failed");
1715 // Write index header
1716 unsigned int nSize
= GetSerializeSize(fileout
, block
);
1717 fileout
<< FLATDATA(messageStart
) << nSize
;
1720 long fileOutPos
= ftell(fileout
.Get());
1722 return error("WriteBlockToDisk: ftell failed");
1723 pos
.nPos
= (unsigned int)fileOutPos
;
1729 bool ReadBlockFromDisk(CBlock
& block
, const CDiskBlockPos
& pos
, const Consensus::Params
& consensusParams
)
1733 // Open history file to read
1734 CAutoFile
filein(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
1735 if (filein
.IsNull())
1736 return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos
.ToString());
1742 catch (const std::exception
& e
) {
1743 return error("%s: Deserialize or I/O error - %s at %s", __func__
, e
.what(), pos
.ToString());
1747 if (!CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
1748 return error("ReadBlockFromDisk: Errors in block header at %s", pos
.ToString());
1753 bool ReadBlockFromDisk(CBlock
& block
, const CBlockIndex
* pindex
, const Consensus::Params
& consensusParams
)
1755 if (!ReadBlockFromDisk(block
, pindex
->GetBlockPos(), consensusParams
))
1757 if (block
.GetHash() != pindex
->GetBlockHash())
1758 return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1759 pindex
->ToString(), pindex
->GetBlockPos().ToString());
1763 CAmount
GetBlockSubsidy(int nHeight
, const Consensus::Params
& consensusParams
)
1765 int halvings
= nHeight
/ consensusParams
.nSubsidyHalvingInterval
;
1766 // Force block reward to zero when right shift is undefined.
1770 CAmount nSubsidy
= 50 * COIN
;
1771 // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1772 nSubsidy
>>= halvings
;
1776 bool IsInitialBlockDownload()
1778 const CChainParams
& chainParams
= Params();
1780 // Once this function has returned false, it must remain false.
1781 static std::atomic
<bool> latchToFalse
{false};
1782 // Optimization: pre-test latch before taking the lock.
1783 if (latchToFalse
.load(std::memory_order_relaxed
))
1787 if (latchToFalse
.load(std::memory_order_relaxed
))
1789 if (fImporting
|| fReindex
)
1791 if (chainActive
.Tip() == NULL
)
1793 if (chainActive
.Tip()->nChainWork
< UintToArith256(chainParams
.GetConsensus().nMinimumChainWork
))
1795 if (chainActive
.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge
))
1797 latchToFalse
.store(true, std::memory_order_relaxed
);
1801 bool fLargeWorkForkFound
= false;
1802 bool fLargeWorkInvalidChainFound
= false;
1803 CBlockIndex
*pindexBestForkTip
= NULL
, *pindexBestForkBase
= NULL
;
1805 static void AlertNotify(const std::string
& strMessage
)
1807 uiInterface
.NotifyAlertChanged();
1808 std::string strCmd
= GetArg("-alertnotify", "");
1809 if (strCmd
.empty()) return;
1811 // Alert text should be plain ascii coming from a trusted source, but to
1812 // be safe we first strip anything not in safeChars, then add single quotes around
1813 // the whole string before passing it to the shell:
1814 std::string
singleQuote("'");
1815 std::string safeStatus
= SanitizeString(strMessage
);
1816 safeStatus
= singleQuote
+safeStatus
+singleQuote
;
1817 boost::replace_all(strCmd
, "%s", safeStatus
);
1819 boost::thread
t(runCommand
, strCmd
); // thread runs free
1822 void CheckForkWarningConditions()
1824 AssertLockHeld(cs_main
);
1825 // Before we get past initial download, we cannot reliably alert about forks
1826 // (we assume we don't get stuck on a fork before finishing our initial sync)
1827 if (IsInitialBlockDownload())
1830 // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
1831 // of our head, drop it
1832 if (pindexBestForkTip
&& chainActive
.Height() - pindexBestForkTip
->nHeight
>= 72)
1833 pindexBestForkTip
= NULL
;
1835 if (pindexBestForkTip
|| (pindexBestInvalid
&& pindexBestInvalid
->nChainWork
> chainActive
.Tip()->nChainWork
+ (GetBlockProof(*chainActive
.Tip()) * 6)))
1837 if (!fLargeWorkForkFound
&& pindexBestForkBase
)
1839 std::string warning
= std::string("'Warning: Large-work fork detected, forking after block ") +
1840 pindexBestForkBase
->phashBlock
->ToString() + std::string("'");
1841 AlertNotify(warning
);
1843 if (pindexBestForkTip
&& pindexBestForkBase
)
1845 LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__
,
1846 pindexBestForkBase
->nHeight
, pindexBestForkBase
->phashBlock
->ToString(),
1847 pindexBestForkTip
->nHeight
, pindexBestForkTip
->phashBlock
->ToString());
1848 fLargeWorkForkFound
= true;
1852 LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__
);
1853 fLargeWorkInvalidChainFound
= true;
1858 fLargeWorkForkFound
= false;
1859 fLargeWorkInvalidChainFound
= false;
1863 void CheckForkWarningConditionsOnNewFork(CBlockIndex
* pindexNewForkTip
)
1865 AssertLockHeld(cs_main
);
1866 // If we are on a fork that is sufficiently large, set a warning flag
1867 CBlockIndex
* pfork
= pindexNewForkTip
;
1868 CBlockIndex
* plonger
= chainActive
.Tip();
1869 while (pfork
&& pfork
!= plonger
)
1871 while (plonger
&& plonger
->nHeight
> pfork
->nHeight
)
1872 plonger
= plonger
->pprev
;
1873 if (pfork
== plonger
)
1875 pfork
= pfork
->pprev
;
1878 // We define a condition where we should warn the user about as a fork of at least 7 blocks
1879 // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
1880 // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
1881 // hash rate operating on the fork.
1882 // or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
1883 // We define it this way because it allows us to only store the highest fork tip (+ base) which meets
1884 // the 7-block condition and from this always have the most-likely-to-cause-warning fork
1885 if (pfork
&& (!pindexBestForkTip
|| (pindexBestForkTip
&& pindexNewForkTip
->nHeight
> pindexBestForkTip
->nHeight
)) &&
1886 pindexNewForkTip
->nChainWork
- pfork
->nChainWork
> (GetBlockProof(*pfork
) * 7) &&
1887 chainActive
.Height() - pindexNewForkTip
->nHeight
< 72)
1889 pindexBestForkTip
= pindexNewForkTip
;
1890 pindexBestForkBase
= pfork
;
1893 CheckForkWarningConditions();
1896 // Requires cs_main.
1897 void Misbehaving(NodeId pnode
, int howmuch
)
1902 CNodeState
*state
= State(pnode
);
1906 state
->nMisbehavior
+= howmuch
;
1907 int banscore
= GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD
);
1908 if (state
->nMisbehavior
>= banscore
&& state
->nMisbehavior
- howmuch
< banscore
)
1910 LogPrintf("%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__
, state
->name
, pnode
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1911 state
->fShouldBan
= true;
1913 LogPrintf("%s: %s peer=%d (%d -> %d)\n", __func__
, state
->name
, pnode
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1916 void static InvalidChainFound(CBlockIndex
* pindexNew
)
1918 if (!pindexBestInvalid
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
)
1919 pindexBestInvalid
= pindexNew
;
1921 LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1922 pindexNew
->GetBlockHash().ToString(), pindexNew
->nHeight
,
1923 log(pindexNew
->nChainWork
.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
1924 pindexNew
->GetBlockTime()));
1925 CBlockIndex
*tip
= chainActive
.Tip();
1927 LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1928 tip
->GetBlockHash().ToString(), chainActive
.Height(), log(tip
->nChainWork
.getdouble())/log(2.0),
1929 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip
->GetBlockTime()));
1930 CheckForkWarningConditions();
1933 void static InvalidBlockFound(CBlockIndex
*pindex
, const CValidationState
&state
) {
1934 if (!state
.CorruptionPossible()) {
1935 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
1936 setDirtyBlockIndex
.insert(pindex
);
1937 setBlockIndexCandidates
.erase(pindex
);
1938 InvalidChainFound(pindex
);
1942 void UpdateCoins(const CTransaction
& tx
, CCoinsViewCache
& inputs
, CTxUndo
&txundo
, int nHeight
)
1944 // mark inputs spent
1945 if (!tx
.IsCoinBase()) {
1946 txundo
.vprevout
.reserve(tx
.vin
.size());
1947 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
1948 CCoinsModifier coins
= inputs
.ModifyCoins(txin
.prevout
.hash
);
1949 unsigned nPos
= txin
.prevout
.n
;
1951 if (nPos
>= coins
->vout
.size() || coins
->vout
[nPos
].IsNull())
1953 // mark an outpoint spent, and construct undo information
1954 txundo
.vprevout
.push_back(CTxInUndo(coins
->vout
[nPos
]));
1956 if (coins
->vout
.size() == 0) {
1957 CTxInUndo
& undo
= txundo
.vprevout
.back();
1958 undo
.nHeight
= coins
->nHeight
;
1959 undo
.fCoinBase
= coins
->fCoinBase
;
1960 undo
.nVersion
= coins
->nVersion
;
1965 inputs
.ModifyNewCoins(tx
.GetHash(), tx
.IsCoinBase())->FromTx(tx
, nHeight
);
1968 void UpdateCoins(const CTransaction
& tx
, CCoinsViewCache
& inputs
, int nHeight
)
1971 UpdateCoins(tx
, inputs
, txundo
, nHeight
);
1974 bool CScriptCheck::operator()() {
1975 const CScript
&scriptSig
= ptxTo
->vin
[nIn
].scriptSig
;
1976 const CScriptWitness
*witness
= (nIn
< ptxTo
->wit
.vtxinwit
.size()) ? &ptxTo
->wit
.vtxinwit
[nIn
].scriptWitness
: NULL
;
1977 if (!VerifyScript(scriptSig
, scriptPubKey
, witness
, nFlags
, CachingTransactionSignatureChecker(ptxTo
, nIn
, amount
, cacheStore
, *txdata
), &error
)) {
1983 int GetSpendHeight(const CCoinsViewCache
& inputs
)
1986 CBlockIndex
* pindexPrev
= mapBlockIndex
.find(inputs
.GetBestBlock())->second
;
1987 return pindexPrev
->nHeight
+ 1;
1990 namespace Consensus
{
1991 bool CheckTxInputs(const CTransaction
& tx
, CValidationState
& state
, const CCoinsViewCache
& inputs
, int nSpendHeight
)
1993 // This doesn't trigger the DoS code on purpose; if it did, it would make it easier
1994 // for an attacker to attempt to split the network.
1995 if (!inputs
.HaveInputs(tx
))
1996 return state
.Invalid(false, 0, "", "Inputs unavailable");
1998 CAmount nValueIn
= 0;
2000 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
2002 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
2003 const CCoins
*coins
= inputs
.AccessCoins(prevout
.hash
);
2006 // If prev is coinbase, check that it's matured
2007 if (coins
->IsCoinBase()) {
2008 if (nSpendHeight
- coins
->nHeight
< COINBASE_MATURITY
)
2009 return state
.Invalid(false,
2010 REJECT_INVALID
, "bad-txns-premature-spend-of-coinbase",
2011 strprintf("tried to spend coinbase at depth %d", nSpendHeight
- coins
->nHeight
));
2014 // Check for negative or overflow input values
2015 nValueIn
+= coins
->vout
[prevout
.n
].nValue
;
2016 if (!MoneyRange(coins
->vout
[prevout
.n
].nValue
) || !MoneyRange(nValueIn
))
2017 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputvalues-outofrange");
2021 if (nValueIn
< tx
.GetValueOut())
2022 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-in-belowout", false,
2023 strprintf("value in (%s) < value out (%s)", FormatMoney(nValueIn
), FormatMoney(tx
.GetValueOut())));
2025 // Tally transaction fees
2026 CAmount nTxFee
= nValueIn
- tx
.GetValueOut();
2028 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-negative");
2030 if (!MoneyRange(nFees
))
2031 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-outofrange");
2034 }// namespace Consensus
2036 bool CheckInputs(const CTransaction
& tx
, CValidationState
&state
, const CCoinsViewCache
&inputs
, bool fScriptChecks
, unsigned int flags
, bool cacheStore
, PrecomputedTransactionData
& txdata
, std::vector
<CScriptCheck
> *pvChecks
)
2038 if (!tx
.IsCoinBase())
2040 if (!Consensus::CheckTxInputs(tx
, state
, inputs
, GetSpendHeight(inputs
)))
2044 pvChecks
->reserve(tx
.vin
.size());
2046 // The first loop above does all the inexpensive checks.
2047 // Only if ALL inputs pass do we perform expensive ECDSA signature checks.
2048 // Helps prevent CPU exhaustion attacks.
2050 // Skip ECDSA signature verification when connecting blocks before the
2051 // last block chain checkpoint. Assuming the checkpoints are valid this
2052 // is safe because block merkle hashes are still computed and checked,
2053 // and any change will be caught at the next checkpoint. Of course, if
2054 // the checkpoint is for a chain that's invalid due to false scriptSigs
2055 // this optimization would allow an invalid chain to be accepted.
2056 if (fScriptChecks
) {
2057 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++) {
2058 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
2059 const CCoins
* coins
= inputs
.AccessCoins(prevout
.hash
);
2063 CScriptCheck
check(*coins
, tx
, i
, flags
, cacheStore
, &txdata
);
2065 pvChecks
->push_back(CScriptCheck());
2066 check
.swap(pvChecks
->back());
2067 } else if (!check()) {
2068 if (flags
& STANDARD_NOT_MANDATORY_VERIFY_FLAGS
) {
2069 // Check whether the failure was caused by a
2070 // non-mandatory script verification check, such as
2071 // non-standard DER encodings or non-null dummy
2072 // arguments; if so, don't trigger DoS protection to
2073 // avoid splitting the network between upgraded and
2074 // non-upgraded nodes.
2075 CScriptCheck
check2(*coins
, tx
, i
,
2076 flags
& ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS
, cacheStore
, &txdata
);
2078 return state
.Invalid(false, REJECT_NONSTANDARD
, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check
.GetScriptError())));
2080 // Failures of other flags indicate a transaction that is
2081 // invalid in new blocks, e.g. a invalid P2SH. We DoS ban
2082 // such nodes as they are not following the protocol. That
2083 // said during an upgrade careful thought should be taken
2084 // as to the correct behavior - we may want to continue
2085 // peering with non-upgraded nodes even after soft-fork
2086 // super-majority signaling has occurred.
2087 return state
.DoS(100,false, REJECT_INVALID
, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check
.GetScriptError())));
2098 bool UndoWriteToDisk(const CBlockUndo
& blockundo
, CDiskBlockPos
& pos
, const uint256
& hashBlock
, const CMessageHeader::MessageStartChars
& messageStart
)
2100 // Open history file to append
2101 CAutoFile
fileout(OpenUndoFile(pos
), SER_DISK
, CLIENT_VERSION
);
2102 if (fileout
.IsNull())
2103 return error("%s: OpenUndoFile failed", __func__
);
2105 // Write index header
2106 unsigned int nSize
= GetSerializeSize(fileout
, blockundo
);
2107 fileout
<< FLATDATA(messageStart
) << nSize
;
2110 long fileOutPos
= ftell(fileout
.Get());
2112 return error("%s: ftell failed", __func__
);
2113 pos
.nPos
= (unsigned int)fileOutPos
;
2114 fileout
<< blockundo
;
2116 // calculate & write checksum
2117 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
2118 hasher
<< hashBlock
;
2119 hasher
<< blockundo
;
2120 fileout
<< hasher
.GetHash();
2125 bool UndoReadFromDisk(CBlockUndo
& blockundo
, const CDiskBlockPos
& pos
, const uint256
& hashBlock
)
2127 // Open history file to read
2128 CAutoFile
filein(OpenUndoFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
2129 if (filein
.IsNull())
2130 return error("%s: OpenUndoFile failed", __func__
);
2133 uint256 hashChecksum
;
2135 filein
>> blockundo
;
2136 filein
>> hashChecksum
;
2138 catch (const std::exception
& e
) {
2139 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
2143 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
2144 hasher
<< hashBlock
;
2145 hasher
<< blockundo
;
2146 if (hashChecksum
!= hasher
.GetHash())
2147 return error("%s: Checksum mismatch", __func__
);
2152 /** Abort with a message */
2153 bool AbortNode(const std::string
& strMessage
, const std::string
& userMessage
="")
2155 strMiscWarning
= strMessage
;
2156 LogPrintf("*** %s\n", strMessage
);
2157 uiInterface
.ThreadSafeMessageBox(
2158 userMessage
.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage
,
2159 "", CClientUIInterface::MSG_ERROR
);
2164 bool AbortNode(CValidationState
& state
, const std::string
& strMessage
, const std::string
& userMessage
="")
2166 AbortNode(strMessage
, userMessage
);
2167 return state
.Error(strMessage
);
2173 * Apply the undo operation of a CTxInUndo to the given chain state.
2174 * @param undo The undo object.
2175 * @param view The coins view to which to apply the changes.
2176 * @param out The out point that corresponds to the tx input.
2177 * @return True on success.
2179 static bool ApplyTxInUndo(const CTxInUndo
& undo
, CCoinsViewCache
& view
, const COutPoint
& out
)
2183 CCoinsModifier coins
= view
.ModifyCoins(out
.hash
);
2184 if (undo
.nHeight
!= 0) {
2185 // undo data contains height: this is the last output of the prevout tx being spent
2186 if (!coins
->IsPruned())
2187 fClean
= fClean
&& error("%s: undo data overwriting existing transaction", __func__
);
2189 coins
->fCoinBase
= undo
.fCoinBase
;
2190 coins
->nHeight
= undo
.nHeight
;
2191 coins
->nVersion
= undo
.nVersion
;
2193 if (coins
->IsPruned())
2194 fClean
= fClean
&& error("%s: undo data adding output to missing transaction", __func__
);
2196 if (coins
->IsAvailable(out
.n
))
2197 fClean
= fClean
&& error("%s: undo data overwriting existing output", __func__
);
2198 if (coins
->vout
.size() < out
.n
+1)
2199 coins
->vout
.resize(out
.n
+1);
2200 coins
->vout
[out
.n
] = undo
.txout
;
2205 bool DisconnectBlock(const CBlock
& block
, CValidationState
& state
, const CBlockIndex
* pindex
, CCoinsViewCache
& view
, bool* pfClean
)
2207 assert(pindex
->GetBlockHash() == view
.GetBestBlock());
2214 CBlockUndo blockUndo
;
2215 CDiskBlockPos pos
= pindex
->GetUndoPos();
2217 return error("DisconnectBlock(): no undo data available");
2218 if (!UndoReadFromDisk(blockUndo
, pos
, pindex
->pprev
->GetBlockHash()))
2219 return error("DisconnectBlock(): failure reading undo data");
2221 if (blockUndo
.vtxundo
.size() + 1 != block
.vtx
.size())
2222 return error("DisconnectBlock(): block and undo data inconsistent");
2224 // undo transactions in reverse order
2225 for (int i
= block
.vtx
.size() - 1; i
>= 0; i
--) {
2226 const CTransaction
&tx
= *(block
.vtx
[i
]);
2227 uint256 hash
= tx
.GetHash();
2229 // Check that all outputs are available and match the outputs in the block itself
2232 CCoinsModifier outs
= view
.ModifyCoins(hash
);
2233 outs
->ClearUnspendable();
2235 CCoins
outsBlock(tx
, pindex
->nHeight
);
2236 // The CCoins serialization does not serialize negative numbers.
2237 // No network rules currently depend on the version here, so an inconsistency is harmless
2238 // but it must be corrected before txout nversion ever influences a network rule.
2239 if (outsBlock
.nVersion
< 0)
2240 outs
->nVersion
= outsBlock
.nVersion
;
2241 if (*outs
!= outsBlock
)
2242 fClean
= fClean
&& error("DisconnectBlock(): added transaction mismatch? database corrupted");
2249 if (i
> 0) { // not coinbases
2250 const CTxUndo
&txundo
= blockUndo
.vtxundo
[i
-1];
2251 if (txundo
.vprevout
.size() != tx
.vin
.size())
2252 return error("DisconnectBlock(): transaction and undo data inconsistent");
2253 for (unsigned int j
= tx
.vin
.size(); j
-- > 0;) {
2254 const COutPoint
&out
= tx
.vin
[j
].prevout
;
2255 const CTxInUndo
&undo
= txundo
.vprevout
[j
];
2256 if (!ApplyTxInUndo(undo
, view
, out
))
2262 // move best block pointer to prevout block
2263 view
.SetBestBlock(pindex
->pprev
->GetBlockHash());
2273 void static FlushBlockFile(bool fFinalize
= false)
2275 LOCK(cs_LastBlockFile
);
2277 CDiskBlockPos
posOld(nLastBlockFile
, 0);
2279 FILE *fileOld
= OpenBlockFile(posOld
);
2282 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nSize
);
2283 FileCommit(fileOld
);
2287 fileOld
= OpenUndoFile(posOld
);
2290 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nUndoSize
);
2291 FileCommit(fileOld
);
2296 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
);
2298 static CCheckQueue
<CScriptCheck
> scriptcheckqueue(128);
2300 void ThreadScriptCheck() {
2301 RenameThread("bitcoin-scriptch");
2302 scriptcheckqueue
.Thread();
2305 // Protected by cs_main
2306 VersionBitsCache versionbitscache
;
2308 int32_t ComputeBlockVersion(const CBlockIndex
* pindexPrev
, const Consensus::Params
& params
)
2311 int32_t nVersion
= VERSIONBITS_TOP_BITS
;
2313 for (int i
= 0; i
< (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS
; i
++) {
2314 ThresholdState state
= VersionBitsState(pindexPrev
, params
, (Consensus::DeploymentPos
)i
, versionbitscache
);
2315 if (state
== THRESHOLD_LOCKED_IN
|| state
== THRESHOLD_STARTED
) {
2316 nVersion
|= VersionBitsMask(params
, (Consensus::DeploymentPos
)i
);
2324 * Threshold condition checker that triggers when unknown versionbits are seen on the network.
2326 class WarningBitsConditionChecker
: public AbstractThresholdConditionChecker
2332 WarningBitsConditionChecker(int bitIn
) : bit(bitIn
) {}
2334 int64_t BeginTime(const Consensus::Params
& params
) const { return 0; }
2335 int64_t EndTime(const Consensus::Params
& params
) const { return std::numeric_limits
<int64_t>::max(); }
2336 int Period(const Consensus::Params
& params
) const { return params
.nMinerConfirmationWindow
; }
2337 int Threshold(const Consensus::Params
& params
) const { return params
.nRuleChangeActivationThreshold
; }
2339 bool Condition(const CBlockIndex
* pindex
, const Consensus::Params
& params
) const
2341 return ((pindex
->nVersion
& VERSIONBITS_TOP_MASK
) == VERSIONBITS_TOP_BITS
) &&
2342 ((pindex
->nVersion
>> bit
) & 1) != 0 &&
2343 ((ComputeBlockVersion(pindex
->pprev
, params
) >> bit
) & 1) == 0;
2347 // Protected by cs_main
2348 static ThresholdConditionCache warningcache
[VERSIONBITS_NUM_BITS
];
2350 static int64_t nTimeCheck
= 0;
2351 static int64_t nTimeForks
= 0;
2352 static int64_t nTimeVerify
= 0;
2353 static int64_t nTimeConnect
= 0;
2354 static int64_t nTimeIndex
= 0;
2355 static int64_t nTimeCallbacks
= 0;
2356 static int64_t nTimeTotal
= 0;
2358 bool ConnectBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* pindex
,
2359 CCoinsViewCache
& view
, const CChainParams
& chainparams
, bool fJustCheck
)
2361 AssertLockHeld(cs_main
);
2363 int64_t nTimeStart
= GetTimeMicros();
2365 // Check it again in case a previous version let a bad block in
2366 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), !fJustCheck
, !fJustCheck
))
2367 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
2369 // verify that the view's current state corresponds to the previous block
2370 uint256 hashPrevBlock
= pindex
->pprev
== NULL
? uint256() : pindex
->pprev
->GetBlockHash();
2371 assert(hashPrevBlock
== view
.GetBestBlock());
2373 // Special case for the genesis block, skipping connection of its transactions
2374 // (its coinbase is unspendable)
2375 if (block
.GetHash() == chainparams
.GetConsensus().hashGenesisBlock
) {
2377 view
.SetBestBlock(pindex
->GetBlockHash());
2381 bool fScriptChecks
= true;
2382 if (fCheckpointsEnabled
) {
2383 CBlockIndex
*pindexLastCheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
2384 if (pindexLastCheckpoint
&& pindexLastCheckpoint
->GetAncestor(pindex
->nHeight
) == pindex
) {
2385 // This block is an ancestor of a checkpoint: disable script checks
2386 fScriptChecks
= false;
2390 int64_t nTime1
= GetTimeMicros(); nTimeCheck
+= nTime1
- nTimeStart
;
2391 LogPrint("bench", " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1
- nTimeStart
), nTimeCheck
* 0.000001);
2393 // Do not allow blocks that contain transactions which 'overwrite' older transactions,
2394 // unless those are already completely spent.
2395 // If such overwrites are allowed, coinbases and transactions depending upon those
2396 // can be duplicated to remove the ability to spend the first instance -- even after
2397 // being sent to another address.
2398 // See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
2399 // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
2400 // already refuses previously-known transaction ids entirely.
2401 // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
2402 // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
2403 // two in the chain that violate it. This prevents exploiting the issue against nodes during their
2404 // initial block download.
2405 bool fEnforceBIP30
= (!pindex
->phashBlock
) || // Enforce on CreateNewBlock invocations which don't have a hash.
2406 !((pindex
->nHeight
==91842 && pindex
->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
2407 (pindex
->nHeight
==91880 && pindex
->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
2409 // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
2410 // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
2411 // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
2412 // before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further
2413 // duplicate transactions descending from the known pairs either.
2414 // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
2415 CBlockIndex
*pindexBIP34height
= pindex
->pprev
->GetAncestor(chainparams
.GetConsensus().BIP34Height
);
2416 //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
2417 fEnforceBIP30
= fEnforceBIP30
&& (!pindexBIP34height
|| !(pindexBIP34height
->GetBlockHash() == chainparams
.GetConsensus().BIP34Hash
));
2419 if (fEnforceBIP30
) {
2420 for (const auto& tx
: block
.vtx
) {
2421 const CCoins
* coins
= view
.AccessCoins(tx
->GetHash());
2422 if (coins
&& !coins
->IsPruned())
2423 return state
.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
2424 REJECT_INVALID
, "bad-txns-BIP30");
2428 // BIP16 didn't become active until Apr 1 2012
2429 int64_t nBIP16SwitchTime
= 1333238400;
2430 bool fStrictPayToScriptHash
= (pindex
->GetBlockTime() >= nBIP16SwitchTime
);
2432 unsigned int flags
= fStrictPayToScriptHash
? SCRIPT_VERIFY_P2SH
: SCRIPT_VERIFY_NONE
;
2434 // Start enforcing the DERSIG (BIP66) rule
2435 if (pindex
->nHeight
>= chainparams
.GetConsensus().BIP66Height
) {
2436 flags
|= SCRIPT_VERIFY_DERSIG
;
2439 // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
2440 if (pindex
->nHeight
>= chainparams
.GetConsensus().BIP65Height
) {
2441 flags
|= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY
;
2444 // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
2445 int nLockTimeFlags
= 0;
2446 if (VersionBitsState(pindex
->pprev
, chainparams
.GetConsensus(), Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
2447 flags
|= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY
;
2448 nLockTimeFlags
|= LOCKTIME_VERIFY_SEQUENCE
;
2451 // Start enforcing WITNESS rules using versionbits logic.
2452 if (IsWitnessEnabled(pindex
->pprev
, chainparams
.GetConsensus())) {
2453 flags
|= SCRIPT_VERIFY_WITNESS
;
2454 flags
|= SCRIPT_VERIFY_NULLDUMMY
;
2457 int64_t nTime2
= GetTimeMicros(); nTimeForks
+= nTime2
- nTime1
;
2458 LogPrint("bench", " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2
- nTime1
), nTimeForks
* 0.000001);
2460 CBlockUndo blockundo
;
2462 CCheckQueueControl
<CScriptCheck
> control(fScriptChecks
&& nScriptCheckThreads
? &scriptcheckqueue
: NULL
);
2464 std::vector
<int> prevheights
;
2467 int64_t nSigOpsCost
= 0;
2468 CDiskTxPos
pos(pindex
->GetBlockPos(), GetSizeOfCompactSize(block
.vtx
.size()));
2469 std::vector
<std::pair
<uint256
, CDiskTxPos
> > vPos
;
2470 vPos
.reserve(block
.vtx
.size());
2471 blockundo
.vtxundo
.reserve(block
.vtx
.size() - 1);
2472 std::vector
<PrecomputedTransactionData
> txdata
;
2473 txdata
.reserve(block
.vtx
.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
2474 for (unsigned int i
= 0; i
< block
.vtx
.size(); i
++)
2476 const CTransaction
&tx
= *(block
.vtx
[i
]);
2478 nInputs
+= tx
.vin
.size();
2480 if (!tx
.IsCoinBase())
2482 if (!view
.HaveInputs(tx
))
2483 return state
.DoS(100, error("ConnectBlock(): inputs missing/spent"),
2484 REJECT_INVALID
, "bad-txns-inputs-missingorspent");
2486 // Check that transaction is BIP68 final
2487 // BIP68 lock checks (as opposed to nLockTime checks) must
2488 // be in ConnectBlock because they require the UTXO set
2489 prevheights
.resize(tx
.vin
.size());
2490 for (size_t j
= 0; j
< tx
.vin
.size(); j
++) {
2491 prevheights
[j
] = view
.AccessCoins(tx
.vin
[j
].prevout
.hash
)->nHeight
;
2494 if (!SequenceLocks(tx
, nLockTimeFlags
, &prevheights
, *pindex
)) {
2495 return state
.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__
),
2496 REJECT_INVALID
, "bad-txns-nonfinal");
2500 // GetTransactionSigOpCost counts 3 types of sigops:
2501 // * legacy (always)
2502 // * p2sh (when P2SH enabled in flags and excludes coinbase)
2503 // * witness (when witness enabled in flags and excludes coinbase)
2504 nSigOpsCost
+= GetTransactionSigOpCost(tx
, view
, flags
);
2505 if (nSigOpsCost
> MAX_BLOCK_SIGOPS_COST
)
2506 return state
.DoS(100, error("ConnectBlock(): too many sigops"),
2507 REJECT_INVALID
, "bad-blk-sigops");
2509 txdata
.emplace_back(tx
);
2510 if (!tx
.IsCoinBase())
2512 nFees
+= view
.GetValueIn(tx
)-tx
.GetValueOut();
2514 std::vector
<CScriptCheck
> vChecks
;
2515 bool fCacheResults
= fJustCheck
; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
2516 if (!CheckInputs(tx
, state
, view
, fScriptChecks
, flags
, fCacheResults
, txdata
[i
], nScriptCheckThreads
? &vChecks
: NULL
))
2517 return error("ConnectBlock(): CheckInputs on %s failed with %s",
2518 tx
.GetHash().ToString(), FormatStateMessage(state
));
2519 control
.Add(vChecks
);
2524 blockundo
.vtxundo
.push_back(CTxUndo());
2526 UpdateCoins(tx
, view
, i
== 0 ? undoDummy
: blockundo
.vtxundo
.back(), pindex
->nHeight
);
2528 vPos
.push_back(std::make_pair(tx
.GetHash(), pos
));
2529 pos
.nTxOffset
+= ::GetSerializeSize(tx
, SER_DISK
, CLIENT_VERSION
);
2531 int64_t nTime3
= GetTimeMicros(); nTimeConnect
+= nTime3
- nTime2
;
2532 LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block
.vtx
.size(), 0.001 * (nTime3
- nTime2
), 0.001 * (nTime3
- nTime2
) / block
.vtx
.size(), nInputs
<= 1 ? 0 : 0.001 * (nTime3
- nTime2
) / (nInputs
-1), nTimeConnect
* 0.000001);
2534 CAmount blockReward
= nFees
+ GetBlockSubsidy(pindex
->nHeight
, chainparams
.GetConsensus());
2535 if (block
.vtx
[0]->GetValueOut() > blockReward
)
2536 return state
.DoS(100,
2537 error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
2538 block
.vtx
[0]->GetValueOut(), blockReward
),
2539 REJECT_INVALID
, "bad-cb-amount");
2541 if (!control
.Wait())
2542 return state
.DoS(100, false);
2543 int64_t nTime4
= GetTimeMicros(); nTimeVerify
+= nTime4
- nTime2
;
2544 LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs
- 1, 0.001 * (nTime4
- nTime2
), nInputs
<= 1 ? 0 : 0.001 * (nTime4
- nTime2
) / (nInputs
-1), nTimeVerify
* 0.000001);
2549 // Write undo information to disk
2550 if (pindex
->GetUndoPos().IsNull() || !pindex
->IsValid(BLOCK_VALID_SCRIPTS
))
2552 if (pindex
->GetUndoPos().IsNull()) {
2554 if (!FindUndoPos(state
, pindex
->nFile
, _pos
, ::GetSerializeSize(blockundo
, SER_DISK
, CLIENT_VERSION
) + 40))
2555 return error("ConnectBlock(): FindUndoPos failed");
2556 if (!UndoWriteToDisk(blockundo
, _pos
, pindex
->pprev
->GetBlockHash(), chainparams
.MessageStart()))
2557 return AbortNode(state
, "Failed to write undo data");
2559 // update nUndoPos in block index
2560 pindex
->nUndoPos
= _pos
.nPos
;
2561 pindex
->nStatus
|= BLOCK_HAVE_UNDO
;
2564 pindex
->RaiseValidity(BLOCK_VALID_SCRIPTS
);
2565 setDirtyBlockIndex
.insert(pindex
);
2569 if (!pblocktree
->WriteTxIndex(vPos
))
2570 return AbortNode(state
, "Failed to write transaction index");
2572 // add this block to the view's block chain
2573 view
.SetBestBlock(pindex
->GetBlockHash());
2575 int64_t nTime5
= GetTimeMicros(); nTimeIndex
+= nTime5
- nTime4
;
2576 LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5
- nTime4
), nTimeIndex
* 0.000001);
2578 // Watch for changes to the previous coinbase transaction.
2579 static uint256 hashPrevBestCoinBase
;
2580 GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase
);
2581 hashPrevBestCoinBase
= block
.vtx
[0]->GetHash();
2584 int64_t nTime6
= GetTimeMicros(); nTimeCallbacks
+= nTime6
- nTime5
;
2585 LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6
- nTime5
), nTimeCallbacks
* 0.000001);
2591 * Update the on-disk chain state.
2592 * The caches and indexes are flushed depending on the mode we're called with
2593 * if they're too large, if it's been a while since the last write,
2594 * or always and in all cases if we're in prune mode and are deleting files.
2596 bool static FlushStateToDisk(CValidationState
&state
, FlushStateMode mode
) {
2597 const CChainParams
& chainparams
= Params();
2598 LOCK2(cs_main
, cs_LastBlockFile
);
2599 static int64_t nLastWrite
= 0;
2600 static int64_t nLastFlush
= 0;
2601 static int64_t nLastSetChain
= 0;
2602 std::set
<int> setFilesToPrune
;
2603 bool fFlushForPrune
= false;
2605 if (fPruneMode
&& fCheckForPruning
&& !fReindex
) {
2606 FindFilesToPrune(setFilesToPrune
, chainparams
.PruneAfterHeight());
2607 fCheckForPruning
= false;
2608 if (!setFilesToPrune
.empty()) {
2609 fFlushForPrune
= true;
2611 pblocktree
->WriteFlag("prunedblockfiles", true);
2616 int64_t nNow
= GetTimeMicros();
2617 // Avoid writing/flushing immediately after startup.
2618 if (nLastWrite
== 0) {
2621 if (nLastFlush
== 0) {
2624 if (nLastSetChain
== 0) {
2625 nLastSetChain
= nNow
;
2627 size_t cacheSize
= pcoinsTip
->DynamicMemoryUsage();
2628 // The cache is large and close to the limit, but we have time now (not in the middle of a block processing).
2629 bool fCacheLarge
= mode
== FLUSH_STATE_PERIODIC
&& cacheSize
* (10.0/9) > nCoinCacheUsage
;
2630 // The cache is over the limit, we have to write now.
2631 bool fCacheCritical
= mode
== FLUSH_STATE_IF_NEEDED
&& cacheSize
> nCoinCacheUsage
;
2632 // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2633 bool fPeriodicWrite
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastWrite
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000;
2634 // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2635 bool fPeriodicFlush
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastFlush
+ (int64_t)DATABASE_FLUSH_INTERVAL
* 1000000;
2636 // Combine all conditions that result in a full cache flush.
2637 bool fDoFullFlush
= (mode
== FLUSH_STATE_ALWAYS
) || fCacheLarge
|| fCacheCritical
|| fPeriodicFlush
|| fFlushForPrune
;
2638 // Write blocks and block index to disk.
2639 if (fDoFullFlush
|| fPeriodicWrite
) {
2640 // Depend on nMinDiskSpace to ensure we can write block index
2641 if (!CheckDiskSpace(0))
2642 return state
.Error("out of disk space");
2643 // First make sure all block and undo data is flushed to disk.
2645 // Then update all block file information (which may refer to block and undo files).
2647 std::vector
<std::pair
<int, const CBlockFileInfo
*> > vFiles
;
2648 vFiles
.reserve(setDirtyFileInfo
.size());
2649 for (set
<int>::iterator it
= setDirtyFileInfo
.begin(); it
!= setDirtyFileInfo
.end(); ) {
2650 vFiles
.push_back(make_pair(*it
, &vinfoBlockFile
[*it
]));
2651 setDirtyFileInfo
.erase(it
++);
2653 std::vector
<const CBlockIndex
*> vBlocks
;
2654 vBlocks
.reserve(setDirtyBlockIndex
.size());
2655 for (set
<CBlockIndex
*>::iterator it
= setDirtyBlockIndex
.begin(); it
!= setDirtyBlockIndex
.end(); ) {
2656 vBlocks
.push_back(*it
);
2657 setDirtyBlockIndex
.erase(it
++);
2659 if (!pblocktree
->WriteBatchSync(vFiles
, nLastBlockFile
, vBlocks
)) {
2660 return AbortNode(state
, "Files to write to block index database");
2663 // Finally remove any pruned files
2665 UnlinkPrunedFiles(setFilesToPrune
);
2668 // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2670 // Typical CCoins structures on disk are around 128 bytes in size.
2671 // Pushing a new one to the database can cause it to be written
2672 // twice (once in the log, and once in the tables). This is already
2673 // an overestimation, as most will delete an existing entry or
2674 // overwrite one. Still, use a conservative safety factor of 2.
2675 if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip
->GetCacheSize()))
2676 return state
.Error("out of disk space");
2677 // Flush the chainstate (which may refer to block index entries).
2678 if (!pcoinsTip
->Flush())
2679 return AbortNode(state
, "Failed to write to coin database");
2682 if (fDoFullFlush
|| ((mode
== FLUSH_STATE_ALWAYS
|| mode
== FLUSH_STATE_PERIODIC
) && nNow
> nLastSetChain
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000)) {
2683 // Update best block in wallet (so we can detect restored wallets).
2684 GetMainSignals().SetBestChain(chainActive
.GetLocator());
2685 nLastSetChain
= nNow
;
2687 } catch (const std::runtime_error
& e
) {
2688 return AbortNode(state
, std::string("System error while flushing: ") + e
.what());
2693 void FlushStateToDisk() {
2694 CValidationState state
;
2695 FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
2698 void PruneAndFlush() {
2699 CValidationState state
;
2700 fCheckForPruning
= true;
2701 FlushStateToDisk(state
, FLUSH_STATE_NONE
);
2704 /** Update chainActive and related internal data structures. */
2705 void static UpdateTip(CBlockIndex
*pindexNew
, const CChainParams
& chainParams
) {
2706 chainActive
.SetTip(pindexNew
);
2709 mempool
.AddTransactionsUpdated(1);
2711 cvBlockChange
.notify_all();
2713 static bool fWarned
= false;
2714 std::vector
<std::string
> warningMessages
;
2715 if (!IsInitialBlockDownload())
2718 const CBlockIndex
* pindex
= chainActive
.Tip();
2719 for (int bit
= 0; bit
< VERSIONBITS_NUM_BITS
; bit
++) {
2720 WarningBitsConditionChecker
checker(bit
);
2721 ThresholdState state
= checker
.GetStateFor(pindex
, chainParams
.GetConsensus(), warningcache
[bit
]);
2722 if (state
== THRESHOLD_ACTIVE
|| state
== THRESHOLD_LOCKED_IN
) {
2723 if (state
== THRESHOLD_ACTIVE
) {
2724 strMiscWarning
= strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit
);
2726 AlertNotify(strMiscWarning
);
2730 warningMessages
.push_back(strprintf("unknown new rules are about to activate (versionbit %i)", bit
));
2734 // Check the version of the last 100 blocks to see if we need to upgrade:
2735 for (int i
= 0; i
< 100 && pindex
!= NULL
; i
++)
2737 int32_t nExpectedVersion
= ComputeBlockVersion(pindex
->pprev
, chainParams
.GetConsensus());
2738 if (pindex
->nVersion
> VERSIONBITS_LAST_OLD_BLOCK_VERSION
&& (pindex
->nVersion
& ~nExpectedVersion
) != 0)
2740 pindex
= pindex
->pprev
;
2743 warningMessages
.push_back(strprintf("%d of last 100 blocks have unexpected version", nUpgraded
));
2744 if (nUpgraded
> 100/2)
2746 // strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
2747 strMiscWarning
= _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect");
2749 AlertNotify(strMiscWarning
);
2754 LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utx)", __func__
,
2755 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(), chainActive
.Tip()->nVersion
,
2756 log(chainActive
.Tip()->nChainWork
.getdouble())/log(2.0), (unsigned long)chainActive
.Tip()->nChainTx
,
2757 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
2758 Checkpoints::GuessVerificationProgress(chainParams
.Checkpoints(), chainActive
.Tip()), pcoinsTip
->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip
->GetCacheSize());
2759 if (!warningMessages
.empty())
2760 LogPrintf(" warning='%s'", boost::algorithm::join(warningMessages
, ", "));
2765 /** Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and manually re-limit mempool size after this, with cs_main held. */
2766 bool static DisconnectTip(CValidationState
& state
, const CChainParams
& chainparams
, bool fBare
= false)
2768 CBlockIndex
*pindexDelete
= chainActive
.Tip();
2769 assert(pindexDelete
);
2770 // Read block from disk.
2772 if (!ReadBlockFromDisk(block
, pindexDelete
, chainparams
.GetConsensus()))
2773 return AbortNode(state
, "Failed to read block");
2774 // Apply the block atomically to the chain state.
2775 int64_t nStart
= GetTimeMicros();
2777 CCoinsViewCache
view(pcoinsTip
);
2778 if (!DisconnectBlock(block
, state
, pindexDelete
, view
))
2779 return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete
->GetBlockHash().ToString());
2780 assert(view
.Flush());
2782 LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart
) * 0.001);
2783 // Write the chain state to disk, if necessary.
2784 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2788 // Resurrect mempool transactions from the disconnected block.
2789 std::vector
<uint256
> vHashUpdate
;
2790 for (const auto& it
: block
.vtx
) {
2791 const CTransaction
& tx
= *it
;
2792 // ignore validation errors in resurrected transactions
2793 CValidationState stateDummy
;
2794 if (tx
.IsCoinBase() || !AcceptToMemoryPool(mempool
, stateDummy
, tx
, false, NULL
, true)) {
2795 mempool
.removeRecursive(tx
);
2796 } else if (mempool
.exists(tx
.GetHash())) {
2797 vHashUpdate
.push_back(tx
.GetHash());
2800 // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
2801 // no in-mempool children, which is generally not true when adding
2802 // previously-confirmed transactions back to the mempool.
2803 // UpdateTransactionsFromBlock finds descendants of any transactions in this
2804 // block that were added back and cleans up the mempool state.
2805 mempool
.UpdateTransactionsFromBlock(vHashUpdate
);
2808 // Update chainActive and related variables.
2809 UpdateTip(pindexDelete
->pprev
, chainparams
);
2810 // Let wallets know transactions went from 1-confirmed to
2811 // 0-confirmed or conflicted:
2812 for (const auto& tx
: block
.vtx
) {
2813 GetMainSignals().SyncTransaction(*tx
, pindexDelete
->pprev
, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK
);
2818 static int64_t nTimeReadFromDisk
= 0;
2819 static int64_t nTimeConnectTotal
= 0;
2820 static int64_t nTimeFlush
= 0;
2821 static int64_t nTimeChainState
= 0;
2822 static int64_t nTimePostConnect
= 0;
2825 * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
2826 * corresponding to pindexNew, to bypass loading it again from disk.
2828 bool static ConnectTip(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexNew
, const CBlock
* pblock
, std::vector
<CTransactionRef
> &txConflicted
, std::vector
<std::tuple
<CTransactionRef
,CBlockIndex
*,int>> &txChanged
)
2830 assert(pindexNew
->pprev
== chainActive
.Tip());
2831 // Read block from disk.
2832 int64_t nTime1
= GetTimeMicros();
2835 if (!ReadBlockFromDisk(block
, pindexNew
, chainparams
.GetConsensus()))
2836 return AbortNode(state
, "Failed to read block");
2839 // Apply the block atomically to the chain state.
2840 int64_t nTime2
= GetTimeMicros(); nTimeReadFromDisk
+= nTime2
- nTime1
;
2842 LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2
- nTime1
) * 0.001, nTimeReadFromDisk
* 0.000001);
2844 CCoinsViewCache
view(pcoinsTip
);
2845 bool rv
= ConnectBlock(*pblock
, state
, pindexNew
, view
, chainparams
);
2846 GetMainSignals().BlockChecked(*pblock
, state
);
2848 if (state
.IsInvalid())
2849 InvalidBlockFound(pindexNew
, state
);
2850 return error("ConnectTip(): ConnectBlock %s failed", pindexNew
->GetBlockHash().ToString());
2852 nTime3
= GetTimeMicros(); nTimeConnectTotal
+= nTime3
- nTime2
;
2853 LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3
- nTime2
) * 0.001, nTimeConnectTotal
* 0.000001);
2854 assert(view
.Flush());
2856 int64_t nTime4
= GetTimeMicros(); nTimeFlush
+= nTime4
- nTime3
;
2857 LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4
- nTime3
) * 0.001, nTimeFlush
* 0.000001);
2858 // Write the chain state to disk, if necessary.
2859 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2861 int64_t nTime5
= GetTimeMicros(); nTimeChainState
+= nTime5
- nTime4
;
2862 LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5
- nTime4
) * 0.001, nTimeChainState
* 0.000001);
2863 // Remove conflicting transactions from the mempool.;
2864 mempool
.removeForBlock(pblock
->vtx
, pindexNew
->nHeight
, &txConflicted
, !IsInitialBlockDownload());
2865 // Update chainActive & related variables.
2866 UpdateTip(pindexNew
, chainparams
);
2868 for (unsigned int i
=0; i
< pblock
->vtx
.size(); i
++)
2869 txChanged
.emplace_back(pblock
->vtx
[i
], pindexNew
, i
);
2871 int64_t nTime6
= GetTimeMicros(); nTimePostConnect
+= nTime6
- nTime5
; nTimeTotal
+= nTime6
- nTime1
;
2872 LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6
- nTime5
) * 0.001, nTimePostConnect
* 0.000001);
2873 LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6
- nTime1
) * 0.001, nTimeTotal
* 0.000001);
2878 * Return the tip of the chain with the most work in it, that isn't
2879 * known to be invalid (it's however far from certain to be valid).
2881 static CBlockIndex
* FindMostWorkChain() {
2883 CBlockIndex
*pindexNew
= NULL
;
2885 // Find the best candidate header.
2887 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::reverse_iterator it
= setBlockIndexCandidates
.rbegin();
2888 if (it
== setBlockIndexCandidates
.rend())
2893 // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2894 // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2895 CBlockIndex
*pindexTest
= pindexNew
;
2896 bool fInvalidAncestor
= false;
2897 while (pindexTest
&& !chainActive
.Contains(pindexTest
)) {
2898 assert(pindexTest
->nChainTx
|| pindexTest
->nHeight
== 0);
2900 // Pruned nodes may have entries in setBlockIndexCandidates for
2901 // which block files have been deleted. Remove those as candidates
2902 // for the most work chain if we come across them; we can't switch
2903 // to a chain unless we have all the non-active-chain parent blocks.
2904 bool fFailedChain
= pindexTest
->nStatus
& BLOCK_FAILED_MASK
;
2905 bool fMissingData
= !(pindexTest
->nStatus
& BLOCK_HAVE_DATA
);
2906 if (fFailedChain
|| fMissingData
) {
2907 // Candidate chain is not usable (either invalid or missing data)
2908 if (fFailedChain
&& (pindexBestInvalid
== NULL
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
))
2909 pindexBestInvalid
= pindexNew
;
2910 CBlockIndex
*pindexFailed
= pindexNew
;
2911 // Remove the entire chain from the set.
2912 while (pindexTest
!= pindexFailed
) {
2914 pindexFailed
->nStatus
|= BLOCK_FAILED_CHILD
;
2915 } else if (fMissingData
) {
2916 // If we're missing data, then add back to mapBlocksUnlinked,
2917 // so that if the block arrives in the future we can try adding
2918 // to setBlockIndexCandidates again.
2919 mapBlocksUnlinked
.insert(std::make_pair(pindexFailed
->pprev
, pindexFailed
));
2921 setBlockIndexCandidates
.erase(pindexFailed
);
2922 pindexFailed
= pindexFailed
->pprev
;
2924 setBlockIndexCandidates
.erase(pindexTest
);
2925 fInvalidAncestor
= true;
2928 pindexTest
= pindexTest
->pprev
;
2930 if (!fInvalidAncestor
)
2935 /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
2936 static void PruneBlockIndexCandidates() {
2937 // Note that we can't delete the current block itself, as we may need to return to it later in case a
2938 // reorganization to a better block fails.
2939 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::iterator it
= setBlockIndexCandidates
.begin();
2940 while (it
!= setBlockIndexCandidates
.end() && setBlockIndexCandidates
.value_comp()(*it
, chainActive
.Tip())) {
2941 setBlockIndexCandidates
.erase(it
++);
2943 // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2944 assert(!setBlockIndexCandidates
.empty());
2948 * Try to make some progress towards making pindexMostWork the active block.
2949 * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
2951 static bool ActivateBestChainStep(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexMostWork
, const CBlock
* pblock
, bool& fInvalidFound
, std::vector
<CTransactionRef
>& txConflicted
, std::vector
<std::tuple
<CTransactionRef
,CBlockIndex
*,int>>& txChanged
)
2953 AssertLockHeld(cs_main
);
2954 const CBlockIndex
*pindexOldTip
= chainActive
.Tip();
2955 const CBlockIndex
*pindexFork
= chainActive
.FindFork(pindexMostWork
);
2957 // Disconnect active blocks which are no longer in the best chain.
2958 bool fBlocksDisconnected
= false;
2959 while (chainActive
.Tip() && chainActive
.Tip() != pindexFork
) {
2960 if (!DisconnectTip(state
, chainparams
))
2962 fBlocksDisconnected
= true;
2965 // Build list of new blocks to connect.
2966 std::vector
<CBlockIndex
*> vpindexToConnect
;
2967 bool fContinue
= true;
2968 int nHeight
= pindexFork
? pindexFork
->nHeight
: -1;
2969 while (fContinue
&& nHeight
!= pindexMostWork
->nHeight
) {
2970 // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2971 // a few blocks along the way.
2972 int nTargetHeight
= std::min(nHeight
+ 32, pindexMostWork
->nHeight
);
2973 vpindexToConnect
.clear();
2974 vpindexToConnect
.reserve(nTargetHeight
- nHeight
);
2975 CBlockIndex
*pindexIter
= pindexMostWork
->GetAncestor(nTargetHeight
);
2976 while (pindexIter
&& pindexIter
->nHeight
!= nHeight
) {
2977 vpindexToConnect
.push_back(pindexIter
);
2978 pindexIter
= pindexIter
->pprev
;
2980 nHeight
= nTargetHeight
;
2982 // Connect new blocks.
2983 BOOST_REVERSE_FOREACH(CBlockIndex
*pindexConnect
, vpindexToConnect
) {
2984 if (!ConnectTip(state
, chainparams
, pindexConnect
, pindexConnect
== pindexMostWork
? pblock
: NULL
, txConflicted
, txChanged
)) {
2985 if (state
.IsInvalid()) {
2986 // The block violates a consensus rule.
2987 if (!state
.CorruptionPossible())
2988 InvalidChainFound(vpindexToConnect
.back());
2989 state
= CValidationState();
2990 fInvalidFound
= true;
2994 // A system error occurred (disk space, database error, ...).
2998 PruneBlockIndexCandidates();
2999 if (!pindexOldTip
|| chainActive
.Tip()->nChainWork
> pindexOldTip
->nChainWork
) {
3000 // We're in a better position than we were. Return temporarily to release the lock.
3008 if (fBlocksDisconnected
) {
3009 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
3010 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
3012 mempool
.check(pcoinsTip
);
3014 // Callbacks/notifications for a new best chain.
3016 CheckForkWarningConditionsOnNewFork(vpindexToConnect
.back());
3018 CheckForkWarningConditions();
3023 static void NotifyHeaderTip() {
3024 bool fNotify
= false;
3025 bool fInitialBlockDownload
= false;
3026 static CBlockIndex
* pindexHeaderOld
= NULL
;
3027 CBlockIndex
* pindexHeader
= NULL
;
3030 pindexHeader
= pindexBestHeader
;
3032 if (pindexHeader
!= pindexHeaderOld
) {
3034 fInitialBlockDownload
= IsInitialBlockDownload();
3035 pindexHeaderOld
= pindexHeader
;
3038 // Send block tip changed notifications without cs_main
3040 uiInterface
.NotifyHeaderTip(fInitialBlockDownload
, pindexHeader
);
3045 * Make the best chain active, in multiple steps. The result is either failure
3046 * or an activated best chain. pblock is either NULL or a pointer to a block
3047 * that is already loaded (to avoid loading it again from disk).
3049 bool ActivateBestChain(CValidationState
&state
, const CChainParams
& chainparams
, const CBlock
*pblock
) {
3050 CBlockIndex
*pindexMostWork
= NULL
;
3051 CBlockIndex
*pindexNewTip
= NULL
;
3052 std::vector
<std::tuple
<CTransactionRef
,CBlockIndex
*,int>> txChanged
;
3054 txChanged
.reserve(pblock
->vtx
.size());
3057 boost::this_thread::interruption_point();
3058 if (ShutdownRequested())
3061 const CBlockIndex
*pindexFork
;
3062 std::vector
<CTransactionRef
> txConflicted
;
3063 bool fInitialDownload
;
3066 CBlockIndex
*pindexOldTip
= chainActive
.Tip();
3067 if (pindexMostWork
== NULL
) {
3068 pindexMostWork
= FindMostWorkChain();
3071 // Whether we have anything to do at all.
3072 if (pindexMostWork
== NULL
|| pindexMostWork
== chainActive
.Tip())
3075 bool fInvalidFound
= false;
3076 if (!ActivateBestChainStep(state
, chainparams
, pindexMostWork
, pblock
&& pblock
->GetHash() == pindexMostWork
->GetBlockHash() ? pblock
: NULL
, fInvalidFound
, txConflicted
, txChanged
))
3079 if (fInvalidFound
) {
3080 // Wipe cache, we may need another branch now.
3081 pindexMostWork
= NULL
;
3083 pindexNewTip
= chainActive
.Tip();
3084 pindexFork
= chainActive
.FindFork(pindexOldTip
);
3085 fInitialDownload
= IsInitialBlockDownload();
3087 // When we reach this point, we switched to a new tip (stored in pindexNewTip).
3089 // Notifications/callbacks that can run without cs_main
3091 // throw all transactions though the signal-interface
3092 // while _not_ holding the cs_main lock
3093 for (const auto& tx
: txConflicted
)
3095 GetMainSignals().SyncTransaction(*tx
, pindexNewTip
, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK
);
3097 // ... and about transactions that got confirmed:
3098 for (unsigned int i
= 0; i
< txChanged
.size(); i
++)
3099 GetMainSignals().SyncTransaction(*std::get
<0>(txChanged
[i
]), std::get
<1>(txChanged
[i
]), std::get
<2>(txChanged
[i
]));
3101 // Notify external listeners about the new tip.
3102 GetMainSignals().UpdatedBlockTip(pindexNewTip
, pindexFork
, fInitialDownload
);
3104 // Always notify the UI if a new block tip was connected
3105 if (pindexFork
!= pindexNewTip
) {
3106 uiInterface
.NotifyBlockTip(fInitialDownload
, pindexNewTip
);
3108 } while (pindexNewTip
!= pindexMostWork
);
3109 CheckBlockIndex(chainparams
.GetConsensus());
3111 // Write changes periodically to disk, after relay.
3112 if (!FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
)) {
3120 bool PreciousBlock(CValidationState
& state
, const CChainParams
& params
, CBlockIndex
*pindex
)
3124 if (pindex
->nChainWork
< chainActive
.Tip()->nChainWork
) {
3125 // Nothing to do, this block is not at the tip.
3128 if (chainActive
.Tip()->nChainWork
> nLastPreciousChainwork
) {
3129 // The chain has been extended since the last call, reset the counter.
3130 nBlockReverseSequenceId
= -1;
3132 nLastPreciousChainwork
= chainActive
.Tip()->nChainWork
;
3133 setBlockIndexCandidates
.erase(pindex
);
3134 pindex
->nSequenceId
= nBlockReverseSequenceId
;
3135 if (nBlockReverseSequenceId
> std::numeric_limits
<int32_t>::min()) {
3136 // We can't keep reducing the counter if somebody really wants to
3137 // call preciousblock 2**31-1 times on the same set of tips...
3138 nBlockReverseSequenceId
--;
3140 if (pindex
->IsValid(BLOCK_VALID_TRANSACTIONS
) && pindex
->nChainTx
) {
3141 setBlockIndexCandidates
.insert(pindex
);
3142 PruneBlockIndexCandidates();
3146 return ActivateBestChain(state
, params
);
3149 bool InvalidateBlock(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
*pindex
)
3151 AssertLockHeld(cs_main
);
3153 // Mark the block itself as invalid.
3154 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
3155 setDirtyBlockIndex
.insert(pindex
);
3156 setBlockIndexCandidates
.erase(pindex
);
3158 while (chainActive
.Contains(pindex
)) {
3159 CBlockIndex
*pindexWalk
= chainActive
.Tip();
3160 pindexWalk
->nStatus
|= BLOCK_FAILED_CHILD
;
3161 setDirtyBlockIndex
.insert(pindexWalk
);
3162 setBlockIndexCandidates
.erase(pindexWalk
);
3163 // ActivateBestChain considers blocks already in chainActive
3164 // unconditionally valid already, so force disconnect away from it.
3165 if (!DisconnectTip(state
, chainparams
)) {
3166 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
3171 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
3173 // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
3175 BlockMap::iterator it
= mapBlockIndex
.begin();
3176 while (it
!= mapBlockIndex
.end()) {
3177 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& !setBlockIndexCandidates
.value_comp()(it
->second
, chainActive
.Tip())) {
3178 setBlockIndexCandidates
.insert(it
->second
);
3183 InvalidChainFound(pindex
);
3184 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
3185 uiInterface
.NotifyBlockTip(IsInitialBlockDownload(), pindex
->pprev
);
3189 bool ResetBlockFailureFlags(CBlockIndex
*pindex
) {
3190 AssertLockHeld(cs_main
);
3192 int nHeight
= pindex
->nHeight
;
3194 // Remove the invalidity flag from this block and all its descendants.
3195 BlockMap::iterator it
= mapBlockIndex
.begin();
3196 while (it
!= mapBlockIndex
.end()) {
3197 if (!it
->second
->IsValid() && it
->second
->GetAncestor(nHeight
) == pindex
) {
3198 it
->second
->nStatus
&= ~BLOCK_FAILED_MASK
;
3199 setDirtyBlockIndex
.insert(it
->second
);
3200 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& setBlockIndexCandidates
.value_comp()(chainActive
.Tip(), it
->second
)) {
3201 setBlockIndexCandidates
.insert(it
->second
);
3203 if (it
->second
== pindexBestInvalid
) {
3204 // Reset invalid block marker if it was pointing to one of those.
3205 pindexBestInvalid
= NULL
;
3211 // Remove the invalidity flag from all ancestors too.
3212 while (pindex
!= NULL
) {
3213 if (pindex
->nStatus
& BLOCK_FAILED_MASK
) {
3214 pindex
->nStatus
&= ~BLOCK_FAILED_MASK
;
3215 setDirtyBlockIndex
.insert(pindex
);
3217 pindex
= pindex
->pprev
;
3222 CBlockIndex
* AddToBlockIndex(const CBlockHeader
& block
)
3224 // Check for duplicate
3225 uint256 hash
= block
.GetHash();
3226 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
3227 if (it
!= mapBlockIndex
.end())
3230 // Construct new block index object
3231 CBlockIndex
* pindexNew
= new CBlockIndex(block
);
3233 // We assign the sequence id to blocks only when the full data is available,
3234 // to avoid miners withholding blocks but broadcasting headers, to get a
3235 // competitive advantage.
3236 pindexNew
->nSequenceId
= 0;
3237 BlockMap::iterator mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
3238 pindexNew
->phashBlock
= &((*mi
).first
);
3239 BlockMap::iterator miPrev
= mapBlockIndex
.find(block
.hashPrevBlock
);
3240 if (miPrev
!= mapBlockIndex
.end())
3242 pindexNew
->pprev
= (*miPrev
).second
;
3243 pindexNew
->nHeight
= pindexNew
->pprev
->nHeight
+ 1;
3244 pindexNew
->BuildSkip();
3246 pindexNew
->nChainWork
= (pindexNew
->pprev
? pindexNew
->pprev
->nChainWork
: 0) + GetBlockProof(*pindexNew
);
3247 pindexNew
->RaiseValidity(BLOCK_VALID_TREE
);
3248 if (pindexBestHeader
== NULL
|| pindexBestHeader
->nChainWork
< pindexNew
->nChainWork
)
3249 pindexBestHeader
= pindexNew
;
3251 setDirtyBlockIndex
.insert(pindexNew
);
3256 /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
3257 bool ReceivedBlockTransactions(const CBlock
&block
, CValidationState
& state
, CBlockIndex
*pindexNew
, const CDiskBlockPos
& pos
)
3259 pindexNew
->nTx
= block
.vtx
.size();
3260 pindexNew
->nChainTx
= 0;
3261 pindexNew
->nFile
= pos
.nFile
;
3262 pindexNew
->nDataPos
= pos
.nPos
;
3263 pindexNew
->nUndoPos
= 0;
3264 pindexNew
->nStatus
|= BLOCK_HAVE_DATA
;
3265 if (IsWitnessEnabled(pindexNew
->pprev
, Params().GetConsensus())) {
3266 pindexNew
->nStatus
|= BLOCK_OPT_WITNESS
;
3268 pindexNew
->RaiseValidity(BLOCK_VALID_TRANSACTIONS
);
3269 setDirtyBlockIndex
.insert(pindexNew
);
3271 if (pindexNew
->pprev
== NULL
|| pindexNew
->pprev
->nChainTx
) {
3272 // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
3273 deque
<CBlockIndex
*> queue
;
3274 queue
.push_back(pindexNew
);
3276 // Recursively process any descendant blocks that now may be eligible to be connected.
3277 while (!queue
.empty()) {
3278 CBlockIndex
*pindex
= queue
.front();
3280 pindex
->nChainTx
= (pindex
->pprev
? pindex
->pprev
->nChainTx
: 0) + pindex
->nTx
;
3282 LOCK(cs_nBlockSequenceId
);
3283 pindex
->nSequenceId
= nBlockSequenceId
++;
3285 if (chainActive
.Tip() == NULL
|| !setBlockIndexCandidates
.value_comp()(pindex
, chainActive
.Tip())) {
3286 setBlockIndexCandidates
.insert(pindex
);
3288 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
);
3289 while (range
.first
!= range
.second
) {
3290 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator it
= range
.first
;
3291 queue
.push_back(it
->second
);
3293 mapBlocksUnlinked
.erase(it
);
3297 if (pindexNew
->pprev
&& pindexNew
->pprev
->IsValid(BLOCK_VALID_TREE
)) {
3298 mapBlocksUnlinked
.insert(std::make_pair(pindexNew
->pprev
, pindexNew
));
3305 bool FindBlockPos(CValidationState
&state
, CDiskBlockPos
&pos
, unsigned int nAddSize
, unsigned int nHeight
, uint64_t nTime
, bool fKnown
= false)
3307 LOCK(cs_LastBlockFile
);
3309 unsigned int nFile
= fKnown
? pos
.nFile
: nLastBlockFile
;
3310 if (vinfoBlockFile
.size() <= nFile
) {
3311 vinfoBlockFile
.resize(nFile
+ 1);
3315 while (vinfoBlockFile
[nFile
].nSize
+ nAddSize
>= MAX_BLOCKFILE_SIZE
) {
3317 if (vinfoBlockFile
.size() <= nFile
) {
3318 vinfoBlockFile
.resize(nFile
+ 1);
3322 pos
.nPos
= vinfoBlockFile
[nFile
].nSize
;
3325 if ((int)nFile
!= nLastBlockFile
) {
3327 LogPrintf("Leaving block file %i: %s\n", nLastBlockFile
, vinfoBlockFile
[nLastBlockFile
].ToString());
3329 FlushBlockFile(!fKnown
);
3330 nLastBlockFile
= nFile
;
3333 vinfoBlockFile
[nFile
].AddBlock(nHeight
, nTime
);
3335 vinfoBlockFile
[nFile
].nSize
= std::max(pos
.nPos
+ nAddSize
, vinfoBlockFile
[nFile
].nSize
);
3337 vinfoBlockFile
[nFile
].nSize
+= nAddSize
;
3340 unsigned int nOldChunks
= (pos
.nPos
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
3341 unsigned int nNewChunks
= (vinfoBlockFile
[nFile
].nSize
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
3342 if (nNewChunks
> nOldChunks
) {
3344 fCheckForPruning
= true;
3345 if (CheckDiskSpace(nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
)) {
3346 FILE *file
= OpenBlockFile(pos
);
3348 LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks
* BLOCKFILE_CHUNK_SIZE
, pos
.nFile
);
3349 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
);
3354 return state
.Error("out of disk space");
3358 setDirtyFileInfo
.insert(nFile
);
3362 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
)
3366 LOCK(cs_LastBlockFile
);
3368 unsigned int nNewSize
;
3369 pos
.nPos
= vinfoBlockFile
[nFile
].nUndoSize
;
3370 nNewSize
= vinfoBlockFile
[nFile
].nUndoSize
+= nAddSize
;
3371 setDirtyFileInfo
.insert(nFile
);
3373 unsigned int nOldChunks
= (pos
.nPos
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
3374 unsigned int nNewChunks
= (nNewSize
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
3375 if (nNewChunks
> nOldChunks
) {
3377 fCheckForPruning
= true;
3378 if (CheckDiskSpace(nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
)) {
3379 FILE *file
= OpenUndoFile(pos
);
3381 LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks
* UNDOFILE_CHUNK_SIZE
, pos
.nFile
);
3382 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
);
3387 return state
.Error("out of disk space");
3393 bool CheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, bool fCheckPOW
)
3395 // Check proof of work matches claimed amount
3396 if (fCheckPOW
&& !CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
3397 return state
.DoS(50, false, REJECT_INVALID
, "high-hash", false, "proof of work failed");
3402 bool CheckBlock(const CBlock
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3404 // These are checks that are independent of context.
3409 // Check that the header is valid (particularly PoW). This is mostly
3410 // redundant with the call in AcceptBlockHeader.
3411 if (!CheckBlockHeader(block
, state
, consensusParams
, fCheckPOW
))
3414 // Check the merkle root.
3415 if (fCheckMerkleRoot
) {
3417 uint256 hashMerkleRoot2
= BlockMerkleRoot(block
, &mutated
);
3418 if (block
.hashMerkleRoot
!= hashMerkleRoot2
)
3419 return state
.DoS(100, false, REJECT_INVALID
, "bad-txnmrklroot", true, "hashMerkleRoot mismatch");
3421 // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3422 // of transactions in a block without affecting the merkle root of a block,
3423 // while still invalidating it.
3425 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-duplicate", true, "duplicate transaction");
3428 // All potential-corruption validation must be done before we do any
3429 // transaction validation, as otherwise we may mark the header as invalid
3430 // because we receive the wrong transactions for it.
3431 // Note that witness malleability is checked in ContextualCheckBlock, so no
3432 // checks that use witness data may be performed here.
3435 if (block
.vtx
.empty() || block
.vtx
.size() > MAX_BLOCK_BASE_SIZE
|| ::GetSerializeSize(block
, SER_NETWORK
, PROTOCOL_VERSION
| SERIALIZE_TRANSACTION_NO_WITNESS
) > MAX_BLOCK_BASE_SIZE
)
3436 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-length", false, "size limits failed");
3438 // First transaction must be coinbase, the rest must not be
3439 if (block
.vtx
.empty() || !block
.vtx
[0]->IsCoinBase())
3440 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-missing", false, "first tx is not coinbase");
3441 for (unsigned int i
= 1; i
< block
.vtx
.size(); i
++)
3442 if (block
.vtx
[i
]->IsCoinBase())
3443 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-multiple", false, "more than one coinbase");
3445 // Check transactions
3446 for (const auto& tx
: block
.vtx
)
3447 if (!CheckTransaction(*tx
, state
, false))
3448 return state
.Invalid(false, state
.GetRejectCode(), state
.GetRejectReason(),
3449 strprintf("Transaction check failed (tx hash %s) %s", tx
->GetHash().ToString(), state
.GetDebugMessage()));
3451 unsigned int nSigOps
= 0;
3452 for (const auto& tx
: block
.vtx
)
3454 nSigOps
+= GetLegacySigOpCount(*tx
);
3456 if (nSigOps
* WITNESS_SCALE_FACTOR
> MAX_BLOCK_SIGOPS_COST
)
3457 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
3459 if (fCheckPOW
&& fCheckMerkleRoot
)
3460 block
.fChecked
= true;
3465 static bool CheckIndexAgainstCheckpoint(const CBlockIndex
* pindexPrev
, CValidationState
& state
, const CChainParams
& chainparams
, const uint256
& hash
)
3467 if (*pindexPrev
->phashBlock
== chainparams
.GetConsensus().hashGenesisBlock
)
3470 int nHeight
= pindexPrev
->nHeight
+1;
3471 // Don't accept any forks from the main chain prior to last checkpoint
3472 CBlockIndex
* pcheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
3473 if (pcheckpoint
&& nHeight
< pcheckpoint
->nHeight
)
3474 return state
.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__
, nHeight
));
3479 bool IsWitnessEnabled(const CBlockIndex
* pindexPrev
, const Consensus::Params
& params
)
3482 return (VersionBitsState(pindexPrev
, params
, Consensus::DEPLOYMENT_SEGWIT
, versionbitscache
) == THRESHOLD_ACTIVE
);
3485 // Compute at which vout of the block's coinbase transaction the witness
3486 // commitment occurs, or -1 if not found.
3487 static int GetWitnessCommitmentIndex(const CBlock
& block
)
3490 for (size_t o
= 0; o
< block
.vtx
[0]->vout
.size(); o
++) {
3491 if (block
.vtx
[0]->vout
[o
].scriptPubKey
.size() >= 38 && block
.vtx
[0]->vout
[o
].scriptPubKey
[0] == OP_RETURN
&& block
.vtx
[0]->vout
[o
].scriptPubKey
[1] == 0x24 && block
.vtx
[0]->vout
[o
].scriptPubKey
[2] == 0xaa && block
.vtx
[0]->vout
[o
].scriptPubKey
[3] == 0x21 && block
.vtx
[0]->vout
[o
].scriptPubKey
[4] == 0xa9 && block
.vtx
[0]->vout
[o
].scriptPubKey
[5] == 0xed) {
3498 void UpdateUncommittedBlockStructures(CBlock
& block
, const CBlockIndex
* pindexPrev
, const Consensus::Params
& consensusParams
)
3500 int commitpos
= GetWitnessCommitmentIndex(block
);
3501 static const std::vector
<unsigned char> nonce(32, 0x00);
3502 if (commitpos
!= -1 && IsWitnessEnabled(pindexPrev
, consensusParams
) && block
.vtx
[0]->wit
.IsEmpty()) {
3503 CMutableTransaction
tx(*block
.vtx
[0]);
3504 tx
.wit
.vtxinwit
.resize(1);
3505 tx
.wit
.vtxinwit
[0].scriptWitness
.stack
.resize(1);
3506 tx
.wit
.vtxinwit
[0].scriptWitness
.stack
[0] = nonce
;
3507 block
.vtx
[0] = MakeTransactionRef(std::move(tx
));
3511 std::vector
<unsigned char> GenerateCoinbaseCommitment(CBlock
& block
, const CBlockIndex
* pindexPrev
, const Consensus::Params
& consensusParams
)
3513 std::vector
<unsigned char> commitment
;
3514 int commitpos
= GetWitnessCommitmentIndex(block
);
3515 std::vector
<unsigned char> ret(32, 0x00);
3516 if (consensusParams
.vDeployments
[Consensus::DEPLOYMENT_SEGWIT
].nTimeout
!= 0) {
3517 if (commitpos
== -1) {
3518 uint256 witnessroot
= BlockWitnessMerkleRoot(block
, NULL
);
3519 CHash256().Write(witnessroot
.begin(), 32).Write(&ret
[0], 32).Finalize(witnessroot
.begin());
3522 out
.scriptPubKey
.resize(38);
3523 out
.scriptPubKey
[0] = OP_RETURN
;
3524 out
.scriptPubKey
[1] = 0x24;
3525 out
.scriptPubKey
[2] = 0xaa;
3526 out
.scriptPubKey
[3] = 0x21;
3527 out
.scriptPubKey
[4] = 0xa9;
3528 out
.scriptPubKey
[5] = 0xed;
3529 memcpy(&out
.scriptPubKey
[6], witnessroot
.begin(), 32);
3530 commitment
= std::vector
<unsigned char>(out
.scriptPubKey
.begin(), out
.scriptPubKey
.end());
3531 const_cast<std::vector
<CTxOut
>*>(&block
.vtx
[0]->vout
)->push_back(out
);
3532 block
.vtx
[0]->UpdateHash();
3535 UpdateUncommittedBlockStructures(block
, pindexPrev
, consensusParams
);
3539 bool ContextualCheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, const CBlockIndex
* pindexPrev
, int64_t nAdjustedTime
)
3541 const int nHeight
= pindexPrev
== NULL
? 0 : pindexPrev
->nHeight
+ 1;
3542 // Check proof of work
3543 if (block
.nBits
!= GetNextWorkRequired(pindexPrev
, &block
, consensusParams
))
3544 return state
.DoS(100, false, REJECT_INVALID
, "bad-diffbits", false, "incorrect proof of work");
3546 // Check timestamp against prev
3547 if (block
.GetBlockTime() <= pindexPrev
->GetMedianTimePast())
3548 return state
.Invalid(false, REJECT_INVALID
, "time-too-old", "block's timestamp is too early");
3551 if (block
.GetBlockTime() > nAdjustedTime
+ 2 * 60 * 60)
3552 return state
.Invalid(false, REJECT_INVALID
, "time-too-new", "block timestamp too far in the future");
3554 // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
3555 // check for version 2, 3 and 4 upgrades
3556 if((block
.nVersion
< 2 && nHeight
>= consensusParams
.BIP34Height
) ||
3557 (block
.nVersion
< 3 && nHeight
>= consensusParams
.BIP66Height
) ||
3558 (block
.nVersion
< 4 && nHeight
>= consensusParams
.BIP65Height
))
3559 return state
.Invalid(false, REJECT_OBSOLETE
, strprintf("bad-version(0x%08x)", block
.nVersion
),
3560 strprintf("rejected nVersion=0x%08x block", block
.nVersion
));
3565 bool ContextualCheckBlock(const CBlock
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, const CBlockIndex
* pindexPrev
)
3567 const int nHeight
= pindexPrev
== NULL
? 0 : pindexPrev
->nHeight
+ 1;
3569 // Start enforcing BIP113 (Median Time Past) using versionbits logic.
3570 int nLockTimeFlags
= 0;
3571 if (VersionBitsState(pindexPrev
, consensusParams
, Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
3572 nLockTimeFlags
|= LOCKTIME_MEDIAN_TIME_PAST
;
3575 int64_t nLockTimeCutoff
= (nLockTimeFlags
& LOCKTIME_MEDIAN_TIME_PAST
)
3576 ? pindexPrev
->GetMedianTimePast()
3577 : block
.GetBlockTime();
3579 // Check that all transactions are finalized
3580 for (const auto& tx
: block
.vtx
) {
3581 if (!IsFinalTx(*tx
, nHeight
, nLockTimeCutoff
)) {
3582 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-nonfinal", false, "non-final transaction");
3586 // Enforce rule that the coinbase starts with serialized block height
3587 if (nHeight
>= consensusParams
.BIP34Height
)
3589 CScript expect
= CScript() << nHeight
;
3590 if (block
.vtx
[0]->vin
[0].scriptSig
.size() < expect
.size() ||
3591 !std::equal(expect
.begin(), expect
.end(), block
.vtx
[0]->vin
[0].scriptSig
.begin())) {
3592 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-height", false, "block height mismatch in coinbase");
3596 // Validation for witness commitments.
3597 // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
3598 // coinbase (where 0x0000....0000 is used instead).
3599 // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness nonce (unconstrained).
3600 // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
3601 // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
3602 // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness nonce). In case there are
3603 // multiple, the last one is used.
3604 bool fHaveWitness
= false;
3605 if (VersionBitsState(pindexPrev
, consensusParams
, Consensus::DEPLOYMENT_SEGWIT
, versionbitscache
) == THRESHOLD_ACTIVE
) {
3606 int commitpos
= GetWitnessCommitmentIndex(block
);
3607 if (commitpos
!= -1) {
3608 bool malleated
= false;
3609 uint256 hashWitness
= BlockWitnessMerkleRoot(block
, &malleated
);
3610 // The malleation check is ignored; as the transaction tree itself
3611 // already does not permit it, it is impossible to trigger in the
3613 if (block
.vtx
[0]->wit
.vtxinwit
.size() != 1 || block
.vtx
[0]->wit
.vtxinwit
[0].scriptWitness
.stack
.size() != 1 || block
.vtx
[0]->wit
.vtxinwit
[0].scriptWitness
.stack
[0].size() != 32) {
3614 return state
.DoS(100, false, REJECT_INVALID
, "bad-witness-nonce-size", true, strprintf("%s : invalid witness nonce size", __func__
));
3616 CHash256().Write(hashWitness
.begin(), 32).Write(&block
.vtx
[0]->wit
.vtxinwit
[0].scriptWitness
.stack
[0][0], 32).Finalize(hashWitness
.begin());
3617 if (memcmp(hashWitness
.begin(), &block
.vtx
[0]->vout
[commitpos
].scriptPubKey
[6], 32)) {
3618 return state
.DoS(100, false, REJECT_INVALID
, "bad-witness-merkle-match", true, strprintf("%s : witness merkle commitment mismatch", __func__
));
3620 fHaveWitness
= true;
3624 // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
3625 if (!fHaveWitness
) {
3626 for (size_t i
= 0; i
< block
.vtx
.size(); i
++) {
3627 if (!block
.vtx
[i
]->wit
.IsNull()) {
3628 return state
.DoS(100, false, REJECT_INVALID
, "unexpected-witness", true, strprintf("%s : unexpected witness data found", __func__
));
3633 // After the coinbase witness nonce and commitment are verified,
3634 // we can check if the block weight passes (before we've checked the
3635 // coinbase witness, it would be possible for the weight to be too
3636 // large by filling up the coinbase witness, which doesn't change
3637 // the block hash, so we couldn't mark the block as permanently
3639 if (GetBlockWeight(block
) > MAX_BLOCK_WEIGHT
) {
3640 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-weight", false, strprintf("%s : weight limit failed", __func__
));
3646 static bool AcceptBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
=NULL
)
3648 AssertLockHeld(cs_main
);
3649 // Check for duplicate
3650 uint256 hash
= block
.GetHash();
3651 BlockMap::iterator miSelf
= mapBlockIndex
.find(hash
);
3652 CBlockIndex
*pindex
= NULL
;
3653 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
) {
3655 if (miSelf
!= mapBlockIndex
.end()) {
3656 // Block header is already known.
3657 pindex
= miSelf
->second
;
3660 if (pindex
->nStatus
& BLOCK_FAILED_MASK
)
3661 return state
.Invalid(error("%s: block %s is marked invalid", __func__
, hash
.ToString()), 0, "duplicate");
3665 if (!CheckBlockHeader(block
, state
, chainparams
.GetConsensus()))
3666 return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3668 // Get prev block index
3669 CBlockIndex
* pindexPrev
= NULL
;
3670 BlockMap::iterator mi
= mapBlockIndex
.find(block
.hashPrevBlock
);
3671 if (mi
== mapBlockIndex
.end())
3672 return state
.DoS(10, error("%s: prev block not found", __func__
), 0, "bad-prevblk");
3673 pindexPrev
= (*mi
).second
;
3674 if (pindexPrev
->nStatus
& BLOCK_FAILED_MASK
)
3675 return state
.DoS(100, error("%s: prev block invalid", __func__
), REJECT_INVALID
, "bad-prevblk");
3678 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, hash
))
3679 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3681 if (!ContextualCheckBlockHeader(block
, state
, chainparams
.GetConsensus(), pindexPrev
, GetAdjustedTime()))
3682 return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3685 pindex
= AddToBlockIndex(block
);
3690 CheckBlockIndex(chainparams
.GetConsensus());
3695 /** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
3696 static bool AcceptBlock(const CBlock
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
, bool fRequested
, const CDiskBlockPos
* dbp
, bool* fNewBlock
)
3698 if (fNewBlock
) *fNewBlock
= false;
3699 AssertLockHeld(cs_main
);
3701 CBlockIndex
*pindexDummy
= NULL
;
3702 CBlockIndex
*&pindex
= ppindex
? *ppindex
: pindexDummy
;
3704 if (!AcceptBlockHeader(block
, state
, chainparams
, &pindex
))
3707 // Try to process all requested blocks that we don't have, but only
3708 // process an unrequested block if it's new and has enough work to
3709 // advance our tip, and isn't too many blocks ahead.
3710 bool fAlreadyHave
= pindex
->nStatus
& BLOCK_HAVE_DATA
;
3711 bool fHasMoreWork
= (chainActive
.Tip() ? pindex
->nChainWork
> chainActive
.Tip()->nChainWork
: true);
3712 // Blocks that are too out-of-order needlessly limit the effectiveness of
3713 // pruning, because pruning will not delete block files that contain any
3714 // blocks which are too close in height to the tip. Apply this test
3715 // regardless of whether pruning is enabled; it should generally be safe to
3716 // not process unrequested blocks.
3717 bool fTooFarAhead
= (pindex
->nHeight
> int(chainActive
.Height() + MIN_BLOCKS_TO_KEEP
));
3719 // TODO: Decouple this function from the block download logic by removing fRequested
3720 // This requires some new chain datastructure to efficiently look up if a
3721 // block is in a chain leading to a candidate for best tip, despite not
3722 // being such a candidate itself.
3724 // TODO: deal better with return value and error conditions for duplicate
3725 // and unrequested blocks.
3726 if (fAlreadyHave
) return true;
3727 if (!fRequested
) { // If we didn't ask for it:
3728 if (pindex
->nTx
!= 0) return true; // This is a previously-processed block that was pruned
3729 if (!fHasMoreWork
) return true; // Don't process less-work chains
3730 if (fTooFarAhead
) return true; // Block height is too high
3732 if (fNewBlock
) *fNewBlock
= true;
3734 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), GetAdjustedTime()) ||
3735 !ContextualCheckBlock(block
, state
, chainparams
.GetConsensus(), pindex
->pprev
)) {
3736 if (state
.IsInvalid() && !state
.CorruptionPossible()) {
3737 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
3738 setDirtyBlockIndex
.insert(pindex
);
3740 return error("%s: %s", __func__
, FormatStateMessage(state
));
3743 int nHeight
= pindex
->nHeight
;
3745 // Write block to history file
3747 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
3748 CDiskBlockPos blockPos
;
3751 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, nHeight
, block
.GetBlockTime(), dbp
!= NULL
))
3752 return error("AcceptBlock(): FindBlockPos failed");
3754 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
3755 AbortNode(state
, "Failed to write block");
3756 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
3757 return error("AcceptBlock(): ReceivedBlockTransactions failed");
3758 } catch (const std::runtime_error
& e
) {
3759 return AbortNode(state
, std::string("System error: ") + e
.what());
3762 if (fCheckForPruning
)
3763 FlushStateToDisk(state
, FLUSH_STATE_NONE
); // we just allocated more disk space for block files
3768 bool ProcessNewBlock(const CChainParams
& chainparams
, const CBlock
* pblock
, bool fForceProcessing
, const CDiskBlockPos
* dbp
, bool *fNewBlock
)
3774 CBlockIndex
*pindex
= NULL
;
3775 if (fNewBlock
) *fNewBlock
= false;
3776 CValidationState state
;
3777 bool ret
= AcceptBlock(*pblock
, state
, chainparams
, &pindex
, fForceProcessing
, dbp
, fNewBlock
);
3778 CheckBlockIndex(chainparams
.GetConsensus());
3780 GetMainSignals().BlockChecked(*pblock
, state
);
3781 return error("%s: AcceptBlock FAILED", __func__
);
3787 CValidationState state
; // Only used to report errors, not invalidity - ignore it
3788 if (!ActivateBestChain(state
, chainparams
, pblock
))
3789 return error("%s: ActivateBestChain failed", __func__
);
3794 bool TestBlockValidity(CValidationState
& state
, const CChainParams
& chainparams
, const CBlock
& block
, CBlockIndex
* pindexPrev
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3796 AssertLockHeld(cs_main
);
3797 assert(pindexPrev
&& pindexPrev
== chainActive
.Tip());
3798 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, block
.GetHash()))
3799 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3801 CCoinsViewCache
viewNew(pcoinsTip
);
3802 CBlockIndex
indexDummy(block
);
3803 indexDummy
.pprev
= pindexPrev
;
3804 indexDummy
.nHeight
= pindexPrev
->nHeight
+ 1;
3806 // NOTE: CheckBlockHeader is called by CheckBlock
3807 if (!ContextualCheckBlockHeader(block
, state
, chainparams
.GetConsensus(), pindexPrev
, GetAdjustedTime()))
3808 return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__
, FormatStateMessage(state
));
3809 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), fCheckPOW
, fCheckMerkleRoot
))
3810 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
3811 if (!ContextualCheckBlock(block
, state
, chainparams
.GetConsensus(), pindexPrev
))
3812 return error("%s: Consensus::ContextualCheckBlock: %s", __func__
, FormatStateMessage(state
));
3813 if (!ConnectBlock(block
, state
, &indexDummy
, viewNew
, chainparams
, true))
3815 assert(state
.IsValid());
3821 * BLOCK PRUNING CODE
3824 /* Calculate the amount of disk space the block & undo files currently use */
3825 uint64_t CalculateCurrentUsage()
3827 uint64_t retval
= 0;
3828 BOOST_FOREACH(const CBlockFileInfo
&file
, vinfoBlockFile
) {
3829 retval
+= file
.nSize
+ file
.nUndoSize
;
3834 /* Prune a block file (modify associated database entries)*/
3835 void PruneOneBlockFile(const int fileNumber
)
3837 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); ++it
) {
3838 CBlockIndex
* pindex
= it
->second
;
3839 if (pindex
->nFile
== fileNumber
) {
3840 pindex
->nStatus
&= ~BLOCK_HAVE_DATA
;
3841 pindex
->nStatus
&= ~BLOCK_HAVE_UNDO
;
3843 pindex
->nDataPos
= 0;
3844 pindex
->nUndoPos
= 0;
3845 setDirtyBlockIndex
.insert(pindex
);
3847 // Prune from mapBlocksUnlinked -- any block we prune would have
3848 // to be downloaded again in order to consider its chain, at which
3849 // point it would be considered as a candidate for
3850 // mapBlocksUnlinked or setBlockIndexCandidates.
3851 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
3852 while (range
.first
!= range
.second
) {
3853 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator _it
= range
.first
;
3855 if (_it
->second
== pindex
) {
3856 mapBlocksUnlinked
.erase(_it
);
3862 vinfoBlockFile
[fileNumber
].SetNull();
3863 setDirtyFileInfo
.insert(fileNumber
);
3867 void UnlinkPrunedFiles(std::set
<int>& setFilesToPrune
)
3869 for (set
<int>::iterator it
= setFilesToPrune
.begin(); it
!= setFilesToPrune
.end(); ++it
) {
3870 CDiskBlockPos
pos(*it
, 0);
3871 boost::filesystem::remove(GetBlockPosFilename(pos
, "blk"));
3872 boost::filesystem::remove(GetBlockPosFilename(pos
, "rev"));
3873 LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__
, *it
);
3877 /* Calculate the block/rev files that should be deleted to remain under target*/
3878 void FindFilesToPrune(std::set
<int>& setFilesToPrune
, uint64_t nPruneAfterHeight
)
3880 LOCK2(cs_main
, cs_LastBlockFile
);
3881 if (chainActive
.Tip() == NULL
|| nPruneTarget
== 0) {
3884 if ((uint64_t)chainActive
.Tip()->nHeight
<= nPruneAfterHeight
) {
3888 unsigned int nLastBlockWeCanPrune
= chainActive
.Tip()->nHeight
- MIN_BLOCKS_TO_KEEP
;
3889 uint64_t nCurrentUsage
= CalculateCurrentUsage();
3890 // We don't check to prune until after we've allocated new space for files
3891 // So we should leave a buffer under our target to account for another allocation
3892 // before the next pruning.
3893 uint64_t nBuffer
= BLOCKFILE_CHUNK_SIZE
+ UNDOFILE_CHUNK_SIZE
;
3894 uint64_t nBytesToPrune
;
3897 if (nCurrentUsage
+ nBuffer
>= nPruneTarget
) {
3898 for (int fileNumber
= 0; fileNumber
< nLastBlockFile
; fileNumber
++) {
3899 nBytesToPrune
= vinfoBlockFile
[fileNumber
].nSize
+ vinfoBlockFile
[fileNumber
].nUndoSize
;
3901 if (vinfoBlockFile
[fileNumber
].nSize
== 0)
3904 if (nCurrentUsage
+ nBuffer
< nPruneTarget
) // are we below our target?
3907 // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
3908 if (vinfoBlockFile
[fileNumber
].nHeightLast
> nLastBlockWeCanPrune
)
3911 PruneOneBlockFile(fileNumber
);
3912 // Queue up the files for removal
3913 setFilesToPrune
.insert(fileNumber
);
3914 nCurrentUsage
-= nBytesToPrune
;
3919 LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
3920 nPruneTarget
/1024/1024, nCurrentUsage
/1024/1024,
3921 ((int64_t)nPruneTarget
- (int64_t)nCurrentUsage
)/1024/1024,
3922 nLastBlockWeCanPrune
, count
);
3925 bool CheckDiskSpace(uint64_t nAdditionalBytes
)
3927 uint64_t nFreeBytesAvailable
= boost::filesystem::space(GetDataDir()).available
;
3929 // Check for nMinDiskSpace bytes (currently 50MB)
3930 if (nFreeBytesAvailable
< nMinDiskSpace
+ nAdditionalBytes
)
3931 return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
3936 FILE* OpenDiskFile(const CDiskBlockPos
&pos
, const char *prefix
, bool fReadOnly
)
3940 boost::filesystem::path path
= GetBlockPosFilename(pos
, prefix
);
3941 boost::filesystem::create_directories(path
.parent_path());
3942 FILE* file
= fopen(path
.string().c_str(), "rb+");
3943 if (!file
&& !fReadOnly
)
3944 file
= fopen(path
.string().c_str(), "wb+");
3946 LogPrintf("Unable to open file %s\n", path
.string());
3950 if (fseek(file
, pos
.nPos
, SEEK_SET
)) {
3951 LogPrintf("Unable to seek to position %u of %s\n", pos
.nPos
, path
.string());
3959 FILE* OpenBlockFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3960 return OpenDiskFile(pos
, "blk", fReadOnly
);
3963 FILE* OpenUndoFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3964 return OpenDiskFile(pos
, "rev", fReadOnly
);
3967 boost::filesystem::path
GetBlockPosFilename(const CDiskBlockPos
&pos
, const char *prefix
)
3969 return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix
, pos
.nFile
);
3972 CBlockIndex
* InsertBlockIndex(uint256 hash
)
3978 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
3979 if (mi
!= mapBlockIndex
.end())
3980 return (*mi
).second
;
3983 CBlockIndex
* pindexNew
= new CBlockIndex();
3985 throw runtime_error(std::string(__func__
) + ": new CBlockIndex failed");
3986 mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
3987 pindexNew
->phashBlock
= &((*mi
).first
);
3992 bool static LoadBlockIndexDB(const CChainParams
& chainparams
)
3994 if (!pblocktree
->LoadBlockIndexGuts(InsertBlockIndex
))
3997 boost::this_thread::interruption_point();
3999 // Calculate nChainWork
4000 vector
<pair
<int, CBlockIndex
*> > vSortedByHeight
;
4001 vSortedByHeight
.reserve(mapBlockIndex
.size());
4002 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
4004 CBlockIndex
* pindex
= item
.second
;
4005 vSortedByHeight
.push_back(make_pair(pindex
->nHeight
, pindex
));
4007 sort(vSortedByHeight
.begin(), vSortedByHeight
.end());
4008 BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex
*)& item
, vSortedByHeight
)
4010 CBlockIndex
* pindex
= item
.second
;
4011 pindex
->nChainWork
= (pindex
->pprev
? pindex
->pprev
->nChainWork
: 0) + GetBlockProof(*pindex
);
4012 // We can link the chain of blocks for which we've received transactions at some point.
4013 // Pruned nodes may have deleted the block.
4014 if (pindex
->nTx
> 0) {
4015 if (pindex
->pprev
) {
4016 if (pindex
->pprev
->nChainTx
) {
4017 pindex
->nChainTx
= pindex
->pprev
->nChainTx
+ pindex
->nTx
;
4019 pindex
->nChainTx
= 0;
4020 mapBlocksUnlinked
.insert(std::make_pair(pindex
->pprev
, pindex
));
4023 pindex
->nChainTx
= pindex
->nTx
;
4026 if (pindex
->IsValid(BLOCK_VALID_TRANSACTIONS
) && (pindex
->nChainTx
|| pindex
->pprev
== NULL
))
4027 setBlockIndexCandidates
.insert(pindex
);
4028 if (pindex
->nStatus
& BLOCK_FAILED_MASK
&& (!pindexBestInvalid
|| pindex
->nChainWork
> pindexBestInvalid
->nChainWork
))
4029 pindexBestInvalid
= pindex
;
4031 pindex
->BuildSkip();
4032 if (pindex
->IsValid(BLOCK_VALID_TREE
) && (pindexBestHeader
== NULL
|| CBlockIndexWorkComparator()(pindexBestHeader
, pindex
)))
4033 pindexBestHeader
= pindex
;
4036 // Load block file info
4037 pblocktree
->ReadLastBlockFile(nLastBlockFile
);
4038 vinfoBlockFile
.resize(nLastBlockFile
+ 1);
4039 LogPrintf("%s: last block file = %i\n", __func__
, nLastBlockFile
);
4040 for (int nFile
= 0; nFile
<= nLastBlockFile
; nFile
++) {
4041 pblocktree
->ReadBlockFileInfo(nFile
, vinfoBlockFile
[nFile
]);
4043 LogPrintf("%s: last block file info: %s\n", __func__
, vinfoBlockFile
[nLastBlockFile
].ToString());
4044 for (int nFile
= nLastBlockFile
+ 1; true; nFile
++) {
4045 CBlockFileInfo info
;
4046 if (pblocktree
->ReadBlockFileInfo(nFile
, info
)) {
4047 vinfoBlockFile
.push_back(info
);
4053 // Check presence of blk files
4054 LogPrintf("Checking all blk files are present...\n");
4055 set
<int> setBlkDataFiles
;
4056 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
4058 CBlockIndex
* pindex
= item
.second
;
4059 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) {
4060 setBlkDataFiles
.insert(pindex
->nFile
);
4063 for (std::set
<int>::iterator it
= setBlkDataFiles
.begin(); it
!= setBlkDataFiles
.end(); it
++)
4065 CDiskBlockPos
pos(*it
, 0);
4066 if (CAutoFile(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
).IsNull()) {
4071 // Check whether we have ever pruned block & undo files
4072 pblocktree
->ReadFlag("prunedblockfiles", fHavePruned
);
4074 LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
4076 // Check whether we need to continue reindexing
4077 bool fReindexing
= false;
4078 pblocktree
->ReadReindexing(fReindexing
);
4079 fReindex
|= fReindexing
;
4081 // Check whether we have a transaction index
4082 pblocktree
->ReadFlag("txindex", fTxIndex
);
4083 LogPrintf("%s: transaction index %s\n", __func__
, fTxIndex
? "enabled" : "disabled");
4085 // Load pointer to end of best chain
4086 BlockMap::iterator it
= mapBlockIndex
.find(pcoinsTip
->GetBestBlock());
4087 if (it
== mapBlockIndex
.end())
4089 chainActive
.SetTip(it
->second
);
4091 PruneBlockIndexCandidates();
4093 LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__
,
4094 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(),
4095 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
4096 Checkpoints::GuessVerificationProgress(chainparams
.Checkpoints(), chainActive
.Tip()));
4101 CVerifyDB::CVerifyDB()
4103 uiInterface
.ShowProgress(_("Verifying blocks..."), 0);
4106 CVerifyDB::~CVerifyDB()
4108 uiInterface
.ShowProgress("", 100);
4111 bool CVerifyDB::VerifyDB(const CChainParams
& chainparams
, CCoinsView
*coinsview
, int nCheckLevel
, int nCheckDepth
)
4114 if (chainActive
.Tip() == NULL
|| chainActive
.Tip()->pprev
== NULL
)
4117 // Verify blocks in the best chain
4118 if (nCheckDepth
<= 0)
4119 nCheckDepth
= 1000000000; // suffices until the year 19000
4120 if (nCheckDepth
> chainActive
.Height())
4121 nCheckDepth
= chainActive
.Height();
4122 nCheckLevel
= std::max(0, std::min(4, nCheckLevel
));
4123 LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth
, nCheckLevel
);
4124 CCoinsViewCache
coins(coinsview
);
4125 CBlockIndex
* pindexState
= chainActive
.Tip();
4126 CBlockIndex
* pindexFailure
= NULL
;
4127 int nGoodTransactions
= 0;
4128 CValidationState state
;
4130 LogPrintf("[0%%]...");
4131 for (CBlockIndex
* pindex
= chainActive
.Tip(); pindex
&& pindex
->pprev
; pindex
= pindex
->pprev
)
4133 boost::this_thread::interruption_point();
4134 int percentageDone
= std::max(1, std::min(99, (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* (nCheckLevel
>= 4 ? 50 : 100))));
4135 if (reportDone
< percentageDone
/10) {
4136 // report every 10% step
4137 LogPrintf("[%d%%]...", percentageDone
);
4138 reportDone
= percentageDone
/10;
4140 uiInterface
.ShowProgress(_("Verifying blocks..."), percentageDone
);
4141 if (pindex
->nHeight
< chainActive
.Height()-nCheckDepth
)
4143 if (fPruneMode
&& !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) {
4144 // If pruning, only go back as far as we have data.
4145 LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex
->nHeight
);
4149 // check level 0: read from disk
4150 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
4151 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4152 // check level 1: verify block validity
4153 if (nCheckLevel
>= 1 && !CheckBlock(block
, state
, chainparams
.GetConsensus()))
4154 return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__
,
4155 pindex
->nHeight
, pindex
->GetBlockHash().ToString(), FormatStateMessage(state
));
4156 // check level 2: verify undo validity
4157 if (nCheckLevel
>= 2 && pindex
) {
4159 CDiskBlockPos pos
= pindex
->GetUndoPos();
4160 if (!pos
.IsNull()) {
4161 if (!UndoReadFromDisk(undo
, pos
, pindex
->pprev
->GetBlockHash()))
4162 return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4165 // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
4166 if (nCheckLevel
>= 3 && pindex
== pindexState
&& (coins
.DynamicMemoryUsage() + pcoinsTip
->DynamicMemoryUsage()) <= nCoinCacheUsage
) {
4168 if (!DisconnectBlock(block
, state
, pindex
, coins
, &fClean
))
4169 return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4170 pindexState
= pindex
->pprev
;
4172 nGoodTransactions
= 0;
4173 pindexFailure
= pindex
;
4175 nGoodTransactions
+= block
.vtx
.size();
4177 if (ShutdownRequested())
4181 return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive
.Height() - pindexFailure
->nHeight
+ 1, nGoodTransactions
);
4183 // check level 4: try reconnecting blocks
4184 if (nCheckLevel
>= 4) {
4185 CBlockIndex
*pindex
= pindexState
;
4186 while (pindex
!= chainActive
.Tip()) {
4187 boost::this_thread::interruption_point();
4188 uiInterface
.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* 50))));
4189 pindex
= chainActive
.Next(pindex
);
4191 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
4192 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4193 if (!ConnectBlock(block
, state
, pindex
, coins
, chainparams
))
4194 return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4198 LogPrintf("[DONE].\n");
4199 LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive
.Height() - pindexState
->nHeight
, nGoodTransactions
);
4204 bool RewindBlockIndex(const CChainParams
& params
)
4209 while (nHeight
<= chainActive
.Height()) {
4210 if (IsWitnessEnabled(chainActive
[nHeight
- 1], params
.GetConsensus()) && !(chainActive
[nHeight
]->nStatus
& BLOCK_OPT_WITNESS
)) {
4216 // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
4217 CValidationState state
;
4218 CBlockIndex
* pindex
= chainActive
.Tip();
4219 while (chainActive
.Height() >= nHeight
) {
4220 if (fPruneMode
&& !(chainActive
.Tip()->nStatus
& BLOCK_HAVE_DATA
)) {
4221 // If pruning, don't try rewinding past the HAVE_DATA point;
4222 // since older blocks can't be served anyway, there's
4223 // no need to walk further, and trying to DisconnectTip()
4224 // will fail (and require a needless reindex/redownload
4225 // of the blockchain).
4228 if (!DisconnectTip(state
, params
, true)) {
4229 return error("RewindBlockIndex: unable to disconnect block at height %i", pindex
->nHeight
);
4231 // Occasionally flush state to disk.
4232 if (!FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
))
4236 // Reduce validity flag and have-data flags.
4237 // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
4238 // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
4239 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); it
++) {
4240 CBlockIndex
* pindexIter
= it
->second
;
4242 // Note: If we encounter an insufficiently validated block that
4243 // is on chainActive, it must be because we are a pruning node, and
4244 // this block or some successor doesn't HAVE_DATA, so we were unable to
4245 // rewind all the way. Blocks remaining on chainActive at this point
4246 // must not have their validity reduced.
4247 if (IsWitnessEnabled(pindexIter
->pprev
, params
.GetConsensus()) && !(pindexIter
->nStatus
& BLOCK_OPT_WITNESS
) && !chainActive
.Contains(pindexIter
)) {
4249 pindexIter
->nStatus
= std::min
<unsigned int>(pindexIter
->nStatus
& BLOCK_VALID_MASK
, BLOCK_VALID_TREE
) | (pindexIter
->nStatus
& ~BLOCK_VALID_MASK
);
4250 // Remove have-data flags.
4251 pindexIter
->nStatus
&= ~(BLOCK_HAVE_DATA
| BLOCK_HAVE_UNDO
);
4252 // Remove storage location.
4253 pindexIter
->nFile
= 0;
4254 pindexIter
->nDataPos
= 0;
4255 pindexIter
->nUndoPos
= 0;
4256 // Remove various other things
4257 pindexIter
->nTx
= 0;
4258 pindexIter
->nChainTx
= 0;
4259 pindexIter
->nSequenceId
= 0;
4260 // Make sure it gets written.
4261 setDirtyBlockIndex
.insert(pindexIter
);
4263 setBlockIndexCandidates
.erase(pindexIter
);
4264 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> ret
= mapBlocksUnlinked
.equal_range(pindexIter
->pprev
);
4265 while (ret
.first
!= ret
.second
) {
4266 if (ret
.first
->second
== pindexIter
) {
4267 mapBlocksUnlinked
.erase(ret
.first
++);
4272 } else if (pindexIter
->IsValid(BLOCK_VALID_TRANSACTIONS
) && pindexIter
->nChainTx
) {
4273 setBlockIndexCandidates
.insert(pindexIter
);
4277 PruneBlockIndexCandidates();
4279 CheckBlockIndex(params
.GetConsensus());
4281 if (!FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
)) {
4288 // May NOT be used after any connections are up as much
4289 // of the peer-processing logic assumes a consistent
4290 // block index state
4291 void UnloadBlockIndex()
4294 setBlockIndexCandidates
.clear();
4295 chainActive
.SetTip(NULL
);
4296 pindexBestInvalid
= NULL
;
4297 pindexBestHeader
= NULL
;
4299 mapOrphanTransactions
.clear();
4300 mapOrphanTransactionsByPrev
.clear();
4301 mapBlocksUnlinked
.clear();
4302 vinfoBlockFile
.clear();
4304 nBlockSequenceId
= 1;
4305 setDirtyBlockIndex
.clear();
4306 setDirtyFileInfo
.clear();
4307 versionbitscache
.Clear();
4308 for (int b
= 0; b
< VERSIONBITS_NUM_BITS
; b
++) {
4309 warningcache
[b
].clear();
4312 BOOST_FOREACH(BlockMap::value_type
& entry
, mapBlockIndex
) {
4313 delete entry
.second
;
4315 mapBlockIndex
.clear();
4316 fHavePruned
= false;
4319 bool LoadBlockIndex(const CChainParams
& chainparams
)
4321 // Load block index from databases
4322 if (!fReindex
&& !LoadBlockIndexDB(chainparams
))
4327 bool InitBlockIndex(const CChainParams
& chainparams
)
4331 // Check whether we're already initialized
4332 if (chainActive
.Genesis() != NULL
)
4335 // Use the provided setting for -txindex in the new database
4336 fTxIndex
= GetBoolArg("-txindex", DEFAULT_TXINDEX
);
4337 pblocktree
->WriteFlag("txindex", fTxIndex
);
4338 LogPrintf("Initializing databases...\n");
4340 // Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
4343 CBlock
&block
= const_cast<CBlock
&>(chainparams
.GenesisBlock());
4344 // Start new block file
4345 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
4346 CDiskBlockPos blockPos
;
4347 CValidationState state
;
4348 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, 0, block
.GetBlockTime()))
4349 return error("LoadBlockIndex(): FindBlockPos failed");
4350 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
4351 return error("LoadBlockIndex(): writing genesis block to disk failed");
4352 CBlockIndex
*pindex
= AddToBlockIndex(block
);
4353 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
4354 return error("LoadBlockIndex(): genesis block not accepted");
4355 // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
4356 return FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
4357 } catch (const std::runtime_error
& e
) {
4358 return error("LoadBlockIndex(): failed to initialize block database: %s", e
.what());
4365 bool LoadExternalBlockFile(const CChainParams
& chainparams
, FILE* fileIn
, CDiskBlockPos
*dbp
)
4367 // Map of disk positions for blocks with unknown parent (only used for reindex)
4368 static std::multimap
<uint256
, CDiskBlockPos
> mapBlocksUnknownParent
;
4369 int64_t nStart
= GetTimeMillis();
4373 // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4374 CBufferedFile
blkdat(fileIn
, 2*MAX_BLOCK_SERIALIZED_SIZE
, MAX_BLOCK_SERIALIZED_SIZE
+8, SER_DISK
, CLIENT_VERSION
);
4375 uint64_t nRewind
= blkdat
.GetPos();
4376 while (!blkdat
.eof()) {
4377 boost::this_thread::interruption_point();
4379 blkdat
.SetPos(nRewind
);
4380 nRewind
++; // start one byte further next time, in case of failure
4381 blkdat
.SetLimit(); // remove former limit
4382 unsigned int nSize
= 0;
4385 unsigned char buf
[CMessageHeader::MESSAGE_START_SIZE
];
4386 blkdat
.FindByte(chainparams
.MessageStart()[0]);
4387 nRewind
= blkdat
.GetPos()+1;
4388 blkdat
>> FLATDATA(buf
);
4389 if (memcmp(buf
, chainparams
.MessageStart(), CMessageHeader::MESSAGE_START_SIZE
))
4393 if (nSize
< 80 || nSize
> MAX_BLOCK_SERIALIZED_SIZE
)
4395 } catch (const std::exception
&) {
4396 // no valid block header found; don't complain
4401 uint64_t nBlockPos
= blkdat
.GetPos();
4403 dbp
->nPos
= nBlockPos
;
4404 blkdat
.SetLimit(nBlockPos
+ nSize
);
4405 blkdat
.SetPos(nBlockPos
);
4408 nRewind
= blkdat
.GetPos();
4410 // detect out of order blocks, and store them for later
4411 uint256 hash
= block
.GetHash();
4412 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
.find(block
.hashPrevBlock
) == mapBlockIndex
.end()) {
4413 LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__
, hash
.ToString(),
4414 block
.hashPrevBlock
.ToString());
4416 mapBlocksUnknownParent
.insert(std::make_pair(block
.hashPrevBlock
, *dbp
));
4420 // process in case the block isn't known yet
4421 if (mapBlockIndex
.count(hash
) == 0 || (mapBlockIndex
[hash
]->nStatus
& BLOCK_HAVE_DATA
) == 0) {
4423 CValidationState state
;
4424 if (AcceptBlock(block
, state
, chainparams
, NULL
, true, dbp
, NULL
))
4426 if (state
.IsError())
4428 } else if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
[hash
]->nHeight
% 1000 == 0) {
4429 LogPrint("reindex", "Block Import: already had block %s at height %d\n", hash
.ToString(), mapBlockIndex
[hash
]->nHeight
);
4432 // Activate the genesis block so normal node progress can continue
4433 if (hash
== chainparams
.GetConsensus().hashGenesisBlock
) {
4434 CValidationState state
;
4435 if (!ActivateBestChain(state
, chainparams
)) {
4442 // Recursively process earlier encountered successors of this block
4443 deque
<uint256
> queue
;
4444 queue
.push_back(hash
);
4445 while (!queue
.empty()) {
4446 uint256 head
= queue
.front();
4448 std::pair
<std::multimap
<uint256
, CDiskBlockPos
>::iterator
, std::multimap
<uint256
, CDiskBlockPos
>::iterator
> range
= mapBlocksUnknownParent
.equal_range(head
);
4449 while (range
.first
!= range
.second
) {
4450 std::multimap
<uint256
, CDiskBlockPos
>::iterator it
= range
.first
;
4451 if (ReadBlockFromDisk(block
, it
->second
, chainparams
.GetConsensus()))
4453 LogPrint("reindex", "%s: Processing out of order child %s of %s\n", __func__
, block
.GetHash().ToString(),
4456 CValidationState dummy
;
4457 if (AcceptBlock(block
, dummy
, chainparams
, NULL
, true, &it
->second
, NULL
))
4460 queue
.push_back(block
.GetHash());
4464 mapBlocksUnknownParent
.erase(it
);
4468 } catch (const std::exception
& e
) {
4469 LogPrintf("%s: Deserialize or I/O error - %s\n", __func__
, e
.what());
4472 } catch (const std::runtime_error
& e
) {
4473 AbortNode(std::string("System error: ") + e
.what());
4476 LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded
, GetTimeMillis() - nStart
);
4480 void static CheckBlockIndex(const Consensus::Params
& consensusParams
)
4482 if (!fCheckBlockIndex
) {
4488 // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4489 // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
4490 // iterating the block tree require that chainActive has been initialized.)
4491 if (chainActive
.Height() < 0) {
4492 assert(mapBlockIndex
.size() <= 1);
4496 // Build forward-pointing map of the entire block tree.
4497 std::multimap
<CBlockIndex
*,CBlockIndex
*> forward
;
4498 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); it
++) {
4499 forward
.insert(std::make_pair(it
->second
->pprev
, it
->second
));
4502 assert(forward
.size() == mapBlockIndex
.size());
4504 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeGenesis
= forward
.equal_range(NULL
);
4505 CBlockIndex
*pindex
= rangeGenesis
.first
->second
;
4506 rangeGenesis
.first
++;
4507 assert(rangeGenesis
.first
== rangeGenesis
.second
); // There is only one index entry with parent NULL.
4509 // Iterate over the entire block tree, using depth-first search.
4510 // Along the way, remember whether there are blocks on the path from genesis
4511 // block being explored which are the first to have certain properties.
4514 CBlockIndex
* pindexFirstInvalid
= NULL
; // Oldest ancestor of pindex which is invalid.
4515 CBlockIndex
* pindexFirstMissing
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4516 CBlockIndex
* pindexFirstNeverProcessed
= NULL
; // Oldest ancestor of pindex for which nTx == 0.
4517 CBlockIndex
* pindexFirstNotTreeValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4518 CBlockIndex
* pindexFirstNotTransactionsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4519 CBlockIndex
* pindexFirstNotChainValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4520 CBlockIndex
* pindexFirstNotScriptsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4521 while (pindex
!= NULL
) {
4523 if (pindexFirstInvalid
== NULL
&& pindex
->nStatus
& BLOCK_FAILED_VALID
) pindexFirstInvalid
= pindex
;
4524 if (pindexFirstMissing
== NULL
&& !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) pindexFirstMissing
= pindex
;
4525 if (pindexFirstNeverProcessed
== NULL
&& pindex
->nTx
== 0) pindexFirstNeverProcessed
= pindex
;
4526 if (pindex
->pprev
!= NULL
&& pindexFirstNotTreeValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TREE
) pindexFirstNotTreeValid
= pindex
;
4527 if (pindex
->pprev
!= NULL
&& pindexFirstNotTransactionsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TRANSACTIONS
) pindexFirstNotTransactionsValid
= pindex
;
4528 if (pindex
->pprev
!= NULL
&& pindexFirstNotChainValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_CHAIN
) pindexFirstNotChainValid
= pindex
;
4529 if (pindex
->pprev
!= NULL
&& pindexFirstNotScriptsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_SCRIPTS
) pindexFirstNotScriptsValid
= pindex
;
4531 // Begin: actual consistency checks.
4532 if (pindex
->pprev
== NULL
) {
4533 // Genesis block checks.
4534 assert(pindex
->GetBlockHash() == consensusParams
.hashGenesisBlock
); // Genesis block's hash must match.
4535 assert(pindex
== chainActive
.Genesis()); // The current active chain's genesis block must be this block.
4537 if (pindex
->nChainTx
== 0) assert(pindex
->nSequenceId
<= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
4538 // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4539 // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4541 // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4542 assert(!(pindex
->nStatus
& BLOCK_HAVE_DATA
) == (pindex
->nTx
== 0));
4543 assert(pindexFirstMissing
== pindexFirstNeverProcessed
);
4545 // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4546 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) assert(pindex
->nTx
> 0);
4548 if (pindex
->nStatus
& BLOCK_HAVE_UNDO
) assert(pindex
->nStatus
& BLOCK_HAVE_DATA
);
4549 assert(((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TRANSACTIONS
) == (pindex
->nTx
> 0)); // This is pruning-independent.
4550 // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
4551 assert((pindexFirstNeverProcessed
!= NULL
) == (pindex
->nChainTx
== 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
4552 assert((pindexFirstNotTransactionsValid
!= NULL
) == (pindex
->nChainTx
== 0));
4553 assert(pindex
->nHeight
== nHeight
); // nHeight must be consistent.
4554 assert(pindex
->pprev
== NULL
|| pindex
->nChainWork
>= pindex
->pprev
->nChainWork
); // For every block except the genesis block, the chainwork must be larger than the parent's.
4555 assert(nHeight
< 2 || (pindex
->pskip
&& (pindex
->pskip
->nHeight
< nHeight
))); // The pskip pointer must point back for all but the first 2 blocks.
4556 assert(pindexFirstNotTreeValid
== NULL
); // All mapBlockIndex entries must at least be TREE valid
4557 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TREE
) assert(pindexFirstNotTreeValid
== NULL
); // TREE valid implies all parents are TREE valid
4558 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_CHAIN
) assert(pindexFirstNotChainValid
== NULL
); // CHAIN valid implies all parents are CHAIN valid
4559 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_SCRIPTS
) assert(pindexFirstNotScriptsValid
== NULL
); // SCRIPTS valid implies all parents are SCRIPTS valid
4560 if (pindexFirstInvalid
== NULL
) {
4561 // Checks for not-invalid blocks.
4562 assert((pindex
->nStatus
& BLOCK_FAILED_MASK
) == 0); // The failed mask cannot be set for blocks without invalid parents.
4564 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && pindexFirstNeverProcessed
== NULL
) {
4565 if (pindexFirstInvalid
== NULL
) {
4566 // If this block sorts at least as good as the current tip and
4567 // is valid and we have all data for its parents, it must be in
4568 // setBlockIndexCandidates. chainActive.Tip() must also be there
4569 // even if some data has been pruned.
4570 if (pindexFirstMissing
== NULL
|| pindex
== chainActive
.Tip()) {
4571 assert(setBlockIndexCandidates
.count(pindex
));
4573 // If some parent is missing, then it could be that this block was in
4574 // setBlockIndexCandidates but had to be removed because of the missing data.
4575 // In this case it must be in mapBlocksUnlinked -- see test below.
4577 } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4578 assert(setBlockIndexCandidates
.count(pindex
) == 0);
4580 // Check whether this block is in mapBlocksUnlinked.
4581 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeUnlinked
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
4582 bool foundInUnlinked
= false;
4583 while (rangeUnlinked
.first
!= rangeUnlinked
.second
) {
4584 assert(rangeUnlinked
.first
->first
== pindex
->pprev
);
4585 if (rangeUnlinked
.first
->second
== pindex
) {
4586 foundInUnlinked
= true;
4589 rangeUnlinked
.first
++;
4591 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
!= NULL
&& pindexFirstInvalid
== NULL
) {
4592 // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
4593 assert(foundInUnlinked
);
4595 if (!(pindex
->nStatus
& BLOCK_HAVE_DATA
)) assert(!foundInUnlinked
); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
4596 if (pindexFirstMissing
== NULL
) assert(!foundInUnlinked
); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
4597 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
== NULL
&& pindexFirstMissing
!= NULL
) {
4598 // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4599 assert(fHavePruned
); // We must have pruned.
4600 // This block may have entered mapBlocksUnlinked if:
4601 // - it has a descendant that at some point had more work than the
4603 // - we tried switching to that descendant but were missing
4604 // data for some intermediate block between chainActive and the
4606 // So if this block is itself better than chainActive.Tip() and it wasn't in
4607 // setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
4608 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && setBlockIndexCandidates
.count(pindex
) == 0) {
4609 if (pindexFirstInvalid
== NULL
) {
4610 assert(foundInUnlinked
);
4614 // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4615 // End: actual consistency checks.
4617 // Try descending into the first subnode.
4618 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> range
= forward
.equal_range(pindex
);
4619 if (range
.first
!= range
.second
) {
4620 // A subnode was found.
4621 pindex
= range
.first
->second
;
4625 // This is a leaf node.
4626 // Move upwards until we reach a node of which we have not yet visited the last child.
4628 // We are going to either move to a parent or a sibling of pindex.
4629 // If pindex was the first with a certain property, unset the corresponding variable.
4630 if (pindex
== pindexFirstInvalid
) pindexFirstInvalid
= NULL
;
4631 if (pindex
== pindexFirstMissing
) pindexFirstMissing
= NULL
;
4632 if (pindex
== pindexFirstNeverProcessed
) pindexFirstNeverProcessed
= NULL
;
4633 if (pindex
== pindexFirstNotTreeValid
) pindexFirstNotTreeValid
= NULL
;
4634 if (pindex
== pindexFirstNotTransactionsValid
) pindexFirstNotTransactionsValid
= NULL
;
4635 if (pindex
== pindexFirstNotChainValid
) pindexFirstNotChainValid
= NULL
;
4636 if (pindex
== pindexFirstNotScriptsValid
) pindexFirstNotScriptsValid
= NULL
;
4638 CBlockIndex
* pindexPar
= pindex
->pprev
;
4639 // Find which child we just visited.
4640 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangePar
= forward
.equal_range(pindexPar
);
4641 while (rangePar
.first
->second
!= pindex
) {
4642 assert(rangePar
.first
!= rangePar
.second
); // Our parent must have at least the node we're coming from as child.
4645 // Proceed to the next one.
4647 if (rangePar
.first
!= rangePar
.second
) {
4648 // Move to the sibling.
4649 pindex
= rangePar
.first
->second
;
4660 // Check that we actually traversed the entire map.
4661 assert(nNodes
== forward
.size());
4664 std::string
GetWarnings(const std::string
& strFor
)
4666 string strStatusBar
;
4669 const string uiAlertSeperator
= "<hr />";
4671 if (!CLIENT_VERSION_IS_RELEASE
) {
4672 strStatusBar
= "This is a pre-release test build - use at your own risk - do not use for mining or merchant applications";
4673 strGUI
= _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
4676 if (GetBoolArg("-testsafemode", DEFAULT_TESTSAFEMODE
))
4677 strStatusBar
= strRPC
= strGUI
= "testsafemode enabled";
4679 // Misc warnings like out of disk space and clock is wrong
4680 if (strMiscWarning
!= "")
4682 strStatusBar
= strMiscWarning
;
4683 strGUI
+= (strGUI
.empty() ? "" : uiAlertSeperator
) + strMiscWarning
;
4686 if (fLargeWorkForkFound
)
4688 strStatusBar
= strRPC
= "Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.";
4689 strGUI
+= (strGUI
.empty() ? "" : uiAlertSeperator
) + _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
4691 else if (fLargeWorkInvalidChainFound
)
4693 strStatusBar
= strRPC
= "Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.";
4694 strGUI
+= (strGUI
.empty() ? "" : uiAlertSeperator
) + _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
4697 if (strFor
== "gui")
4699 else if (strFor
== "statusbar")
4700 return strStatusBar
;
4701 else if (strFor
== "rpc")
4703 assert(!"GetWarnings(): invalid parameter");
4714 //////////////////////////////////////////////////////////////////////////////
4716 // blockchain -> download logic notification
4719 PeerLogicValidation::PeerLogicValidation(CConnman
* connmanIn
) : connman(connmanIn
) {
4720 // Initialize global variables that cannot be constructed at startup.
4721 recentRejects
.reset(new CRollingBloomFilter(120000, 0.000001));
4724 void PeerLogicValidation::SyncTransaction(const CTransaction
& tx
, const CBlockIndex
* pindex
, int nPosInBlock
) {
4725 if (nPosInBlock
== CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK
)
4730 std::vector
<uint256
> vOrphanErase
;
4731 // Which orphan pool entries must we evict?
4732 for (size_t j
= 0; j
< tx
.vin
.size(); j
++) {
4733 auto itByPrev
= mapOrphanTransactionsByPrev
.find(tx
.vin
[j
].prevout
);
4734 if (itByPrev
== mapOrphanTransactionsByPrev
.end()) continue;
4735 for (auto mi
= itByPrev
->second
.begin(); mi
!= itByPrev
->second
.end(); ++mi
) {
4736 const CTransaction
& orphanTx
= (*mi
)->second
.tx
;
4737 const uint256
& orphanHash
= orphanTx
.GetHash();
4738 vOrphanErase
.push_back(orphanHash
);
4742 // Erase orphan transactions include or precluded by this block
4743 if (vOrphanErase
.size()) {
4745 BOOST_FOREACH(uint256
&orphanHash
, vOrphanErase
) {
4746 nErased
+= EraseOrphanTx(orphanHash
);
4748 LogPrint("mempool", "Erased %d orphan tx included or conflicted by block\n", nErased
);
4752 void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex
*pindexNew
, const CBlockIndex
*pindexFork
, bool fInitialDownload
) {
4753 const int nNewHeight
= pindexNew
->nHeight
;
4754 connman
->SetBestHeight(nNewHeight
);
4756 if (!fInitialDownload
) {
4757 // Find the hashes of all blocks that weren't previously in the best chain.
4758 std::vector
<uint256
> vHashes
;
4759 const CBlockIndex
*pindexToAnnounce
= pindexNew
;
4760 while (pindexToAnnounce
!= pindexFork
) {
4761 vHashes
.push_back(pindexToAnnounce
->GetBlockHash());
4762 pindexToAnnounce
= pindexToAnnounce
->pprev
;
4763 if (vHashes
.size() == MAX_BLOCKS_TO_ANNOUNCE
) {
4764 // Limit announcements in case of a huge reorganization.
4765 // Rely on the peer's synchronization mechanism in that case.
4769 // Relay inventory, but don't relay old inventory during initial block download.
4770 connman
->ForEachNode([nNewHeight
, &vHashes
](CNode
* pnode
) {
4771 if (nNewHeight
> (pnode
->nStartingHeight
!= -1 ? pnode
->nStartingHeight
- 2000 : 0)) {
4772 BOOST_REVERSE_FOREACH(const uint256
& hash
, vHashes
) {
4773 pnode
->PushBlockHash(hash
);
4779 nTimeBestReceived
= GetTime();
4782 void PeerLogicValidation::BlockChecked(const CBlock
& block
, const CValidationState
& state
) {
4785 const uint256
hash(block
.GetHash());
4786 std::map
<uint256
, std::pair
<NodeId
, bool>>::iterator it
= mapBlockSource
.find(hash
);
4789 if (state
.IsInvalid(nDoS
)) {
4790 if (it
!= mapBlockSource
.end() && State(it
->second
.first
)) {
4791 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
4792 CBlockReject reject
= {(unsigned char)state
.GetRejectCode(), state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), hash
};
4793 State(it
->second
.first
)->rejects
.push_back(reject
);
4794 if (nDoS
> 0 && it
->second
.second
)
4795 Misbehaving(it
->second
.first
, nDoS
);
4798 if (it
!= mapBlockSource
.end())
4799 mapBlockSource
.erase(it
);
4802 //////////////////////////////////////////////////////////////////////////////
4808 bool static AlreadyHave(const CInv
& inv
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
4813 case MSG_WITNESS_TX
:
4815 assert(recentRejects
);
4816 if (chainActive
.Tip()->GetBlockHash() != hashRecentRejectsChainTip
)
4818 // If the chain tip has changed previously rejected transactions
4819 // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
4820 // or a double-spend. Reset the rejects filter and give those
4821 // txs a second chance.
4822 hashRecentRejectsChainTip
= chainActive
.Tip()->GetBlockHash();
4823 recentRejects
->reset();
4826 // Use pcoinsTip->HaveCoinsInCache as a quick approximation to exclude
4827 // requesting or processing some txs which have already been included in a block
4828 return recentRejects
->contains(inv
.hash
) ||
4829 mempool
.exists(inv
.hash
) ||
4830 mapOrphanTransactions
.count(inv
.hash
) ||
4831 pcoinsTip
->HaveCoinsInCache(inv
.hash
);
4834 case MSG_WITNESS_BLOCK
:
4835 return mapBlockIndex
.count(inv
.hash
);
4837 // Don't know what it is, just say we already got one
4841 static void RelayTransaction(const CTransaction
& tx
, CConnman
& connman
)
4843 CInv
inv(MSG_TX
, tx
.GetHash());
4844 connman
.ForEachNode([&inv
](CNode
* pnode
)
4846 pnode
->PushInventory(inv
);
4850 static void RelayAddress(const CAddress
& addr
, bool fReachable
, CConnman
& connman
)
4852 unsigned int nRelayNodes
= fReachable
? 2 : 1; // limited relaying of addresses outside our network(s)
4854 // Relay to a limited number of other nodes
4855 // Use deterministic randomness to send to the same nodes for 24 hours
4856 // at a time so the addrKnowns of the chosen nodes prevent repeats
4857 uint64_t hashAddr
= addr
.GetHash();
4858 const CSipHasher hasher
= connman
.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY
).Write(hashAddr
<< 32).Write((GetTime() + hashAddr
) / (24*60*60));
4859 FastRandomContext insecure_rand
;
4861 std::array
<std::pair
<uint64_t, CNode
*>,2> best
{{{0, nullptr}, {0, nullptr}}};
4862 assert(nRelayNodes
<= best
.size());
4864 auto sortfunc
= [&best
, &hasher
, nRelayNodes
](CNode
* pnode
) {
4865 if (pnode
->nVersion
>= CADDR_TIME_VERSION
) {
4866 uint64_t hashKey
= CSipHasher(hasher
).Write(pnode
->id
).Finalize();
4867 for (unsigned int i
= 0; i
< nRelayNodes
; i
++) {
4868 if (hashKey
> best
[i
].first
) {
4869 std::copy(best
.begin() + i
, best
.begin() + nRelayNodes
- 1, best
.begin() + i
+ 1);
4870 best
[i
] = std::make_pair(hashKey
, pnode
);
4877 auto pushfunc
= [&addr
, &best
, nRelayNodes
, &insecure_rand
] {
4878 for (unsigned int i
= 0; i
< nRelayNodes
&& best
[i
].first
!= 0; i
++) {
4879 best
[i
].second
->PushAddress(addr
, insecure_rand
);
4883 connman
.ForEachNodeThen(std::move(sortfunc
), std::move(pushfunc
));
4886 void static ProcessGetData(CNode
* pfrom
, const Consensus::Params
& consensusParams
, CConnman
& connman
)
4888 std::deque
<CInv
>::iterator it
= pfrom
->vRecvGetData
.begin();
4889 unsigned int nMaxSendBufferSize
= connman
.GetSendBufferSize();
4890 vector
<CInv
> vNotFound
;
4891 CNetMsgMaker
msgMaker(pfrom
->GetSendVersion());
4894 while (it
!= pfrom
->vRecvGetData
.end()) {
4895 // Don't bother if send buffer is too full to respond anyway
4896 if (pfrom
->nSendSize
>= nMaxSendBufferSize
)
4899 const CInv
&inv
= *it
;
4901 boost::this_thread::interruption_point();
4904 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
|| inv
.type
== MSG_CMPCT_BLOCK
|| inv
.type
== MSG_WITNESS_BLOCK
)
4907 BlockMap::iterator mi
= mapBlockIndex
.find(inv
.hash
);
4908 if (mi
!= mapBlockIndex
.end())
4910 if (chainActive
.Contains(mi
->second
)) {
4913 static const int nOneMonth
= 30 * 24 * 60 * 60;
4914 // To prevent fingerprinting attacks, only send blocks outside of the active
4915 // chain if they are valid, and no more than a month older (both in time, and in
4916 // best equivalent proof of work) than the best header chain we know about.
4917 send
= mi
->second
->IsValid(BLOCK_VALID_SCRIPTS
) && (pindexBestHeader
!= NULL
) &&
4918 (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() < nOneMonth
) &&
4919 (GetBlockProofEquivalentTime(*pindexBestHeader
, *mi
->second
, *pindexBestHeader
, consensusParams
) < nOneMonth
);
4921 LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__
, pfrom
->GetId());
4925 // disconnect node in case we have reached the outbound limit for serving historical blocks
4926 // never disconnect whitelisted nodes
4927 static const int nOneWeek
= 7 * 24 * 60 * 60; // assume > 1 week = historical
4928 if (send
&& connman
.OutboundTargetReached(true) && ( ((pindexBestHeader
!= NULL
) && (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() > nOneWeek
)) || inv
.type
== MSG_FILTERED_BLOCK
) && !pfrom
->fWhitelisted
)
4930 LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom
->GetId());
4933 pfrom
->fDisconnect
= true;
4936 // Pruned nodes may have deleted the block, so check whether
4937 // it's available before trying to send.
4938 if (send
&& (mi
->second
->nStatus
& BLOCK_HAVE_DATA
))
4940 // Send block from disk
4942 if (!ReadBlockFromDisk(block
, (*mi
).second
, consensusParams
))
4943 assert(!"cannot load block from disk");
4944 if (inv
.type
== MSG_BLOCK
)
4945 connman
.PushMessage(pfrom
, msgMaker
.Make(SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::BLOCK
, block
));
4946 else if (inv
.type
== MSG_WITNESS_BLOCK
)
4947 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::BLOCK
, block
));
4948 else if (inv
.type
== MSG_FILTERED_BLOCK
)
4950 bool sendMerkleBlock
= false;
4951 CMerkleBlock merkleBlock
;
4953 LOCK(pfrom
->cs_filter
);
4954 if (pfrom
->pfilter
) {
4955 sendMerkleBlock
= true;
4956 merkleBlock
= CMerkleBlock(block
, *pfrom
->pfilter
);
4959 if (sendMerkleBlock
) {
4960 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::MERKLEBLOCK
, merkleBlock
));
4961 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
4962 // This avoids hurting performance by pointlessly requiring a round-trip
4963 // Note that there is currently no way for a node to request any single transactions we didn't send here -
4964 // they must either disconnect and retry or request the full block.
4965 // Thus, the protocol spec specified allows for us to provide duplicate txn here,
4966 // however we MUST always provide at least what the remote peer needs
4967 typedef std::pair
<unsigned int, uint256
> PairType
;
4968 BOOST_FOREACH(PairType
& pair
, merkleBlock
.vMatchedTxn
)
4969 connman
.PushMessage(pfrom
, msgMaker
.Make(SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::TX
, *block
.vtx
[pair
.first
]));
4974 else if (inv
.type
== MSG_CMPCT_BLOCK
)
4976 // If a peer is asking for old blocks, we're almost guaranteed
4977 // they won't have a useful mempool to match against a compact block,
4978 // and we don't feel like constructing the object for them, so
4979 // instead we respond with the full, non-compact block.
4980 bool fPeerWantsWitness
= State(pfrom
->GetId())->fWantsCmpctWitness
;
4981 int nSendFlags
= fPeerWantsWitness
? 0 : SERIALIZE_TRANSACTION_NO_WITNESS
;
4982 if (CanDirectFetch(consensusParams
) && mi
->second
->nHeight
>= chainActive
.Height() - MAX_CMPCTBLOCK_DEPTH
) {
4983 CBlockHeaderAndShortTxIDs
cmpctblock(block
, fPeerWantsWitness
);
4984 connman
.PushMessage(pfrom
, msgMaker
.Make(nSendFlags
, NetMsgType::CMPCTBLOCK
, cmpctblock
));
4986 connman
.PushMessage(pfrom
, msgMaker
.Make(nSendFlags
, NetMsgType::BLOCK
, block
));
4989 // Trigger the peer node to send a getblocks request for the next batch of inventory
4990 if (inv
.hash
== pfrom
->hashContinue
)
4992 // Bypass PushInventory, this must send even if redundant,
4993 // and we want it right after the last block so they don't
4994 // wait for other stuff first.
4996 vInv
.push_back(CInv(MSG_BLOCK
, chainActive
.Tip()->GetBlockHash()));
4997 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::INV
, vInv
));
4998 pfrom
->hashContinue
.SetNull();
5002 else if (inv
.type
== MSG_TX
|| inv
.type
== MSG_WITNESS_TX
)
5004 // Send stream from relay memory
5006 auto mi
= mapRelay
.find(inv
.hash
);
5007 int nSendFlags
= (inv
.type
== MSG_TX
? SERIALIZE_TRANSACTION_NO_WITNESS
: 0);
5008 if (mi
!= mapRelay
.end()) {
5009 connman
.PushMessage(pfrom
, msgMaker
.Make(nSendFlags
, NetMsgType::TX
, *mi
->second
));
5011 } else if (pfrom
->timeLastMempoolReq
) {
5012 auto txinfo
= mempool
.info(inv
.hash
);
5013 // To protect privacy, do not answer getdata using the mempool when
5014 // that TX couldn't have been INVed in reply to a MEMPOOL request.
5015 if (txinfo
.tx
&& txinfo
.nTime
<= pfrom
->timeLastMempoolReq
) {
5016 connman
.PushMessage(pfrom
, msgMaker
.Make(nSendFlags
, NetMsgType::TX
, *txinfo
.tx
));
5021 vNotFound
.push_back(inv
);
5025 // Track requests for our stuff.
5026 GetMainSignals().Inventory(inv
.hash
);
5028 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
|| inv
.type
== MSG_CMPCT_BLOCK
|| inv
.type
== MSG_WITNESS_BLOCK
)
5033 pfrom
->vRecvGetData
.erase(pfrom
->vRecvGetData
.begin(), it
);
5035 if (!vNotFound
.empty()) {
5036 // Let the peer know that we didn't find what it asked for, so it doesn't
5037 // have to wait around forever. Currently only SPV clients actually care
5038 // about this message: it's needed when they are recursively walking the
5039 // dependencies of relevant unconfirmed transactions. SPV clients want to
5040 // do that because they want to know about (and store and rebroadcast and
5041 // risk analyze) the dependencies of transactions relevant to them, without
5042 // having to download the entire memory pool.
5043 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::NOTFOUND
, vNotFound
));
5047 uint32_t GetFetchFlags(CNode
* pfrom
, CBlockIndex
* pprev
, const Consensus::Params
& chainparams
) {
5048 uint32_t nFetchFlags
= 0;
5049 if ((pfrom
->GetLocalServices() & NODE_WITNESS
) && State(pfrom
->GetId())->fHaveWitness
) {
5050 nFetchFlags
|= MSG_WITNESS_FLAG
;
5055 bool static ProcessMessage(CNode
* pfrom
, string strCommand
, CDataStream
& vRecv
, int64_t nTimeReceived
, const CChainParams
& chainparams
, CConnman
& connman
)
5057 unsigned int nMaxSendBufferSize
= connman
.GetSendBufferSize();
5059 LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand
), vRecv
.size(), pfrom
->id
);
5060 if (mapArgs
.count("-dropmessagestest") && GetRand(atoi(mapArgs
["-dropmessagestest"])) == 0)
5062 LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
5067 if (!(pfrom
->GetLocalServices() & NODE_BLOOM
) &&
5068 (strCommand
== NetMsgType::FILTERLOAD
||
5069 strCommand
== NetMsgType::FILTERADD
))
5071 if (pfrom
->nVersion
>= NO_BLOOM_VERSION
) {
5073 Misbehaving(pfrom
->GetId(), 100);
5076 pfrom
->fDisconnect
= true;
5082 if (strCommand
== NetMsgType::VERSION
)
5084 // Each connection can only send one version message
5085 if (pfrom
->nVersion
!= 0)
5087 connman
.PushMessage(pfrom
, CNetMsgMaker(INIT_PROTO_VERSION
).Make(NetMsgType::REJECT
, strCommand
, REJECT_DUPLICATE
, string("Duplicate version message")));
5089 Misbehaving(pfrom
->GetId(), 1);
5096 uint64_t nNonce
= 1;
5097 uint64_t nServiceInt
;
5098 vRecv
>> pfrom
->nVersion
>> nServiceInt
>> nTime
>> addrMe
;
5099 pfrom
->nServices
= ServiceFlags(nServiceInt
);
5100 if (!pfrom
->fInbound
)
5102 connman
.SetServices(pfrom
->addr
, pfrom
->nServices
);
5104 if (pfrom
->nServicesExpected
& ~pfrom
->nServices
)
5106 LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom
->id
, pfrom
->nServices
, pfrom
->nServicesExpected
);
5107 connman
.PushMessage(pfrom
, CNetMsgMaker(INIT_PROTO_VERSION
).Make(NetMsgType::REJECT
, strCommand
, REJECT_NONSTANDARD
,
5108 strprintf("Expected to offer services %08x", pfrom
->nServicesExpected
)));
5109 pfrom
->fDisconnect
= true;
5113 if (pfrom
->nVersion
< MIN_PEER_PROTO_VERSION
)
5115 // disconnect from peers older than this proto version
5116 LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom
->id
, pfrom
->nVersion
);
5117 connman
.PushMessage(pfrom
, CNetMsgMaker(INIT_PROTO_VERSION
).Make(NetMsgType::REJECT
, strCommand
, REJECT_OBSOLETE
,
5118 strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION
)));
5119 pfrom
->fDisconnect
= true;
5123 if (pfrom
->nVersion
== 10300)
5124 pfrom
->nVersion
= 300;
5126 vRecv
>> addrFrom
>> nNonce
;
5127 if (!vRecv
.empty()) {
5128 vRecv
>> LIMITED_STRING(pfrom
->strSubVer
, MAX_SUBVERSION_LENGTH
);
5129 pfrom
->cleanSubVer
= SanitizeString(pfrom
->strSubVer
);
5131 if (!vRecv
.empty()) {
5132 vRecv
>> pfrom
->nStartingHeight
;
5135 LOCK(pfrom
->cs_filter
);
5137 vRecv
>> pfrom
->fRelayTxes
; // set to true after we get the first filter* message
5139 pfrom
->fRelayTxes
= true;
5142 // Disconnect if we connected to ourself
5143 if (pfrom
->fInbound
&& !connman
.CheckIncomingNonce(nNonce
))
5145 LogPrintf("connected to self at %s, disconnecting\n", pfrom
->addr
.ToString());
5146 pfrom
->fDisconnect
= true;
5150 pfrom
->addrLocal
= addrMe
;
5151 if (pfrom
->fInbound
&& addrMe
.IsRoutable())
5156 // Be shy and don't send version until we hear
5157 if (pfrom
->fInbound
)
5158 PushNodeVersion(pfrom
, connman
, GetAdjustedTime());
5160 pfrom
->fClient
= !(pfrom
->nServices
& NODE_NETWORK
);
5162 if((pfrom
->nServices
& NODE_WITNESS
))
5165 State(pfrom
->GetId())->fHaveWitness
= true;
5168 // Potentially mark this peer as a preferred download peer.
5171 UpdatePreferredDownload(pfrom
, State(pfrom
->GetId()));
5175 connman
.PushMessage(pfrom
, CNetMsgMaker(INIT_PROTO_VERSION
).Make(NetMsgType::VERACK
));
5176 int nSendVersion
= std::min(pfrom
->nVersion
, PROTOCOL_VERSION
);
5177 pfrom
->SetSendVersion(nSendVersion
);
5179 if (!pfrom
->fInbound
)
5181 // Advertise our address
5182 if (fListen
&& !IsInitialBlockDownload())
5184 CAddress addr
= GetLocalAddress(&pfrom
->addr
, pfrom
->GetLocalServices());
5185 FastRandomContext insecure_rand
;
5186 if (addr
.IsRoutable())
5188 LogPrint("net", "ProcessMessages: advertising address %s\n", addr
.ToString());
5189 pfrom
->PushAddress(addr
, insecure_rand
);
5190 } else if (IsPeerAddrLocalGood(pfrom
)) {
5191 addr
.SetIP(pfrom
->addrLocal
);
5192 LogPrint("net", "ProcessMessages: advertising address %s\n", addr
.ToString());
5193 pfrom
->PushAddress(addr
, insecure_rand
);
5197 // Get recent addresses
5198 if (pfrom
->fOneShot
|| pfrom
->nVersion
>= CADDR_TIME_VERSION
|| connman
.GetAddressCount() < 1000)
5200 connman
.PushMessage(pfrom
, CNetMsgMaker(nSendVersion
).Make(NetMsgType::GETADDR
));
5201 pfrom
->fGetAddr
= true;
5203 connman
.MarkAddressGood(pfrom
->addr
);
5206 pfrom
->fSuccessfullyConnected
= true;
5210 remoteAddr
= ", peeraddr=" + pfrom
->addr
.ToString();
5212 LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
5213 pfrom
->cleanSubVer
, pfrom
->nVersion
,
5214 pfrom
->nStartingHeight
, addrMe
.ToString(), pfrom
->id
,
5217 int64_t nTimeOffset
= nTime
- GetTime();
5218 pfrom
->nTimeOffset
= nTimeOffset
;
5219 AddTimeData(pfrom
->addr
, nTimeOffset
);
5221 // Feeler connections exist only to verify if address is online.
5222 if (pfrom
->fFeeler
) {
5223 assert(pfrom
->fInbound
== false);
5224 pfrom
->fDisconnect
= true;
5230 else if (pfrom
->nVersion
== 0)
5232 // Must have a version message before anything else
5234 Misbehaving(pfrom
->GetId(), 1);
5238 // At this point, the outgoing message serialization version can't change.
5239 CNetMsgMaker
msgMaker(pfrom
->GetSendVersion());
5241 if (strCommand
== NetMsgType::VERACK
)
5243 pfrom
->SetRecvVersion(min(pfrom
->nVersion
, PROTOCOL_VERSION
));
5245 if (!pfrom
->fInbound
) {
5246 // Mark this node as currently connected, so we update its timestamp later.
5248 State(pfrom
->GetId())->fCurrentlyConnected
= true;
5251 if (pfrom
->nVersion
>= SENDHEADERS_VERSION
) {
5252 // Tell our peer we prefer to receive headers rather than inv's
5253 // We send this to non-NODE NETWORK peers as well, because even
5254 // non-NODE NETWORK peers can announce blocks (such as pruning
5256 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::SENDHEADERS
));
5258 if (pfrom
->nVersion
>= SHORT_IDS_BLOCKS_VERSION
) {
5259 // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
5260 // However, we do not request new block announcements using
5261 // cmpctblock messages.
5262 // We send this to non-NODE NETWORK peers as well, because
5263 // they may wish to request compact blocks from us
5264 bool fAnnounceUsingCMPCTBLOCK
= false;
5265 uint64_t nCMPCTBLOCKVersion
= 2;
5266 if (pfrom
->GetLocalServices() & NODE_WITNESS
)
5267 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
));
5268 nCMPCTBLOCKVersion
= 1;
5269 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
));
5274 else if (strCommand
== NetMsgType::ADDR
)
5276 vector
<CAddress
> vAddr
;
5279 // Don't want addr from older versions unless seeding
5280 if (pfrom
->nVersion
< CADDR_TIME_VERSION
&& connman
.GetAddressCount() > 1000)
5282 if (vAddr
.size() > 1000)
5285 Misbehaving(pfrom
->GetId(), 20);
5286 return error("message addr size() = %u", vAddr
.size());
5289 // Store the new addresses
5290 vector
<CAddress
> vAddrOk
;
5291 int64_t nNow
= GetAdjustedTime();
5292 int64_t nSince
= nNow
- 10 * 60;
5293 BOOST_FOREACH(CAddress
& addr
, vAddr
)
5295 boost::this_thread::interruption_point();
5297 if ((addr
.nServices
& REQUIRED_SERVICES
) != REQUIRED_SERVICES
)
5300 if (addr
.nTime
<= 100000000 || addr
.nTime
> nNow
+ 10 * 60)
5301 addr
.nTime
= nNow
- 5 * 24 * 60 * 60;
5302 pfrom
->AddAddressKnown(addr
);
5303 bool fReachable
= IsReachable(addr
);
5304 if (addr
.nTime
> nSince
&& !pfrom
->fGetAddr
&& vAddr
.size() <= 10 && addr
.IsRoutable())
5306 // Relay to a limited number of other nodes
5307 RelayAddress(addr
, fReachable
, connman
);
5309 // Do not store addresses outside our network
5311 vAddrOk
.push_back(addr
);
5313 connman
.AddNewAddresses(vAddrOk
, pfrom
->addr
, 2 * 60 * 60);
5314 if (vAddr
.size() < 1000)
5315 pfrom
->fGetAddr
= false;
5316 if (pfrom
->fOneShot
)
5317 pfrom
->fDisconnect
= true;
5320 else if (strCommand
== NetMsgType::SENDHEADERS
)
5323 State(pfrom
->GetId())->fPreferHeaders
= true;
5326 else if (strCommand
== NetMsgType::SENDCMPCT
)
5328 bool fAnnounceUsingCMPCTBLOCK
= false;
5329 uint64_t nCMPCTBLOCKVersion
= 0;
5330 vRecv
>> fAnnounceUsingCMPCTBLOCK
>> nCMPCTBLOCKVersion
;
5331 if (nCMPCTBLOCKVersion
== 1 || ((pfrom
->GetLocalServices() & NODE_WITNESS
) && nCMPCTBLOCKVersion
== 2)) {
5333 // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
5334 if (!State(pfrom
->GetId())->fProvidesHeaderAndIDs
) {
5335 State(pfrom
->GetId())->fProvidesHeaderAndIDs
= true;
5336 State(pfrom
->GetId())->fWantsCmpctWitness
= nCMPCTBLOCKVersion
== 2;
5338 if (State(pfrom
->GetId())->fWantsCmpctWitness
== (nCMPCTBLOCKVersion
== 2)) // ignore later version announces
5339 State(pfrom
->GetId())->fPreferHeaderAndIDs
= fAnnounceUsingCMPCTBLOCK
;
5340 if (!State(pfrom
->GetId())->fSupportsDesiredCmpctVersion
) {
5341 if (pfrom
->GetLocalServices() & NODE_WITNESS
)
5342 State(pfrom
->GetId())->fSupportsDesiredCmpctVersion
= (nCMPCTBLOCKVersion
== 2);
5344 State(pfrom
->GetId())->fSupportsDesiredCmpctVersion
= (nCMPCTBLOCKVersion
== 1);
5350 else if (strCommand
== NetMsgType::INV
)
5354 if (vInv
.size() > MAX_INV_SZ
)
5357 Misbehaving(pfrom
->GetId(), 20);
5358 return error("message inv size() = %u", vInv
.size());
5361 bool fBlocksOnly
= !fRelayTxes
;
5363 // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
5364 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY
))
5365 fBlocksOnly
= false;
5369 uint32_t nFetchFlags
= GetFetchFlags(pfrom
, chainActive
.Tip(), chainparams
.GetConsensus());
5371 std::vector
<CInv
> vToFetch
;
5373 for (unsigned int nInv
= 0; nInv
< vInv
.size(); nInv
++)
5375 CInv
&inv
= vInv
[nInv
];
5377 boost::this_thread::interruption_point();
5379 bool fAlreadyHave
= AlreadyHave(inv
);
5380 LogPrint("net", "got inv: %s %s peer=%d\n", inv
.ToString(), fAlreadyHave
? "have" : "new", pfrom
->id
);
5382 if (inv
.type
== MSG_TX
) {
5383 inv
.type
|= nFetchFlags
;
5386 if (inv
.type
== MSG_BLOCK
) {
5387 UpdateBlockAvailability(pfrom
->GetId(), inv
.hash
);
5388 if (!fAlreadyHave
&& !fImporting
&& !fReindex
&& !mapBlocksInFlight
.count(inv
.hash
)) {
5389 // We used to request the full block here, but since headers-announcements are now the
5390 // primary method of announcement on the network, and since, in the case that a node
5391 // fell back to inv we probably have a reorg which we should get the headers for first,
5392 // we now only provide a getheaders response here. When we receive the headers, we will
5393 // then ask for the blocks we need.
5394 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexBestHeader
), inv
.hash
));
5395 LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader
->nHeight
, inv
.hash
.ToString(), pfrom
->id
);
5400 pfrom
->AddInventoryKnown(inv
);
5402 LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv
.hash
.ToString(), pfrom
->id
);
5403 else if (!fAlreadyHave
&& !fImporting
&& !fReindex
&& !IsInitialBlockDownload())
5407 // Track requests for our stuff
5408 GetMainSignals().Inventory(inv
.hash
);
5410 if (pfrom
->nSendSize
> (nMaxSendBufferSize
* 2)) {
5411 Misbehaving(pfrom
->GetId(), 50);
5412 return error("send buffer size() = %u", pfrom
->nSendSize
);
5416 if (!vToFetch
.empty())
5417 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::GETDATA
, vToFetch
));
5421 else if (strCommand
== NetMsgType::GETDATA
)
5425 if (vInv
.size() > MAX_INV_SZ
)
5428 Misbehaving(pfrom
->GetId(), 20);
5429 return error("message getdata size() = %u", vInv
.size());
5432 if (fDebug
|| (vInv
.size() != 1))
5433 LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv
.size(), pfrom
->id
);
5435 if ((fDebug
&& vInv
.size() > 0) || (vInv
.size() == 1))
5436 LogPrint("net", "received getdata for: %s peer=%d\n", vInv
[0].ToString(), pfrom
->id
);
5438 pfrom
->vRecvGetData
.insert(pfrom
->vRecvGetData
.end(), vInv
.begin(), vInv
.end());
5439 ProcessGetData(pfrom
, chainparams
.GetConsensus(), connman
);
5443 else if (strCommand
== NetMsgType::GETBLOCKS
)
5445 CBlockLocator locator
;
5447 vRecv
>> locator
>> hashStop
;
5451 // Find the last block the caller has in the main chain
5452 CBlockIndex
* pindex
= FindForkInGlobalIndex(chainActive
, locator
);
5454 // Send the rest of the chain
5456 pindex
= chainActive
.Next(pindex
);
5458 LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.IsNull() ? "end" : hashStop
.ToString(), nLimit
, pfrom
->id
);
5459 for (; pindex
; pindex
= chainActive
.Next(pindex
))
5461 if (pindex
->GetBlockHash() == hashStop
)
5463 LogPrint("net", " getblocks stopping at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
5466 // If pruning, don't inv blocks unless we have on disk and are likely to still have
5467 // for some reasonable time window (1 hour) that block relay might require.
5468 const int nPrunedBlocksLikelyToHave
= MIN_BLOCKS_TO_KEEP
- 3600 / chainparams
.GetConsensus().nPowTargetSpacing
;
5469 if (fPruneMode
&& (!(pindex
->nStatus
& BLOCK_HAVE_DATA
) || pindex
->nHeight
<= chainActive
.Tip()->nHeight
- nPrunedBlocksLikelyToHave
))
5471 LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
5474 pfrom
->PushInventory(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
5477 // When this block is requested, we'll send an inv that'll
5478 // trigger the peer to getblocks the next batch of inventory.
5479 LogPrint("net", " getblocks stopping at limit %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
5480 pfrom
->hashContinue
= pindex
->GetBlockHash();
5487 else if (strCommand
== NetMsgType::GETBLOCKTXN
)
5489 BlockTransactionsRequest req
;
5494 BlockMap::iterator it
= mapBlockIndex
.find(req
.blockhash
);
5495 if (it
== mapBlockIndex
.end() || !(it
->second
->nStatus
& BLOCK_HAVE_DATA
)) {
5496 LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom
->id
);
5500 if (it
->second
->nHeight
< chainActive
.Height() - MAX_BLOCKTXN_DEPTH
) {
5501 // If an older block is requested (should never happen in practice,
5502 // but can happen in tests) send a block response instead of a
5503 // blocktxn response. Sending a full block response instead of a
5504 // small blocktxn response is preferable in the case where a peer
5505 // might maliciously send lots of getblocktxn requests to trigger
5506 // expensive disk reads, because it will require the peer to
5507 // actually receive all the data read from disk over the network.
5508 LogPrint("net", "Peer %d sent us a getblocktxn for a block > %i deep", pfrom
->id
, MAX_BLOCKTXN_DEPTH
);
5510 inv
.type
= State(pfrom
->GetId())->fWantsCmpctWitness
? MSG_WITNESS_BLOCK
: MSG_BLOCK
;
5511 inv
.hash
= req
.blockhash
;
5512 pfrom
->vRecvGetData
.push_back(inv
);
5513 ProcessGetData(pfrom
, chainparams
.GetConsensus(), connman
);
5518 assert(ReadBlockFromDisk(block
, it
->second
, chainparams
.GetConsensus()));
5520 BlockTransactions
resp(req
);
5521 for (size_t i
= 0; i
< req
.indexes
.size(); i
++) {
5522 if (req
.indexes
[i
] >= block
.vtx
.size()) {
5523 Misbehaving(pfrom
->GetId(), 100);
5524 LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom
->id
);
5527 resp
.txn
[i
] = block
.vtx
[req
.indexes
[i
]];
5529 int nSendFlags
= State(pfrom
->GetId())->fWantsCmpctWitness
? 0 : SERIALIZE_TRANSACTION_NO_WITNESS
;
5530 connman
.PushMessage(pfrom
, msgMaker
.Make(nSendFlags
, NetMsgType::BLOCKTXN
, resp
));
5534 else if (strCommand
== NetMsgType::GETHEADERS
)
5536 CBlockLocator locator
;
5538 vRecv
>> locator
>> hashStop
;
5541 if (IsInitialBlockDownload() && !pfrom
->fWhitelisted
) {
5542 LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom
->id
);
5546 CNodeState
*nodestate
= State(pfrom
->GetId());
5547 CBlockIndex
* pindex
= NULL
;
5548 if (locator
.IsNull())
5550 // If locator is null, return the hashStop block
5551 BlockMap::iterator mi
= mapBlockIndex
.find(hashStop
);
5552 if (mi
== mapBlockIndex
.end())
5554 pindex
= (*mi
).second
;
5558 // Find the last block the caller has in the main chain
5559 pindex
= FindForkInGlobalIndex(chainActive
, locator
);
5561 pindex
= chainActive
.Next(pindex
);
5564 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
5565 vector
<CBlock
> vHeaders
;
5566 int nLimit
= MAX_HEADERS_RESULTS
;
5567 LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.IsNull() ? "end" : hashStop
.ToString(), pfrom
->id
);
5568 for (; pindex
; pindex
= chainActive
.Next(pindex
))
5570 vHeaders
.push_back(pindex
->GetBlockHeader());
5571 if (--nLimit
<= 0 || pindex
->GetBlockHash() == hashStop
)
5574 // pindex can be NULL either if we sent chainActive.Tip() OR
5575 // if our peer has chainActive.Tip() (and thus we are sending an empty
5576 // headers message). In both cases it's safe to update
5577 // pindexBestHeaderSent to be our tip.
5578 nodestate
->pindexBestHeaderSent
= pindex
? pindex
: chainActive
.Tip();
5579 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::HEADERS
, vHeaders
));
5583 else if (strCommand
== NetMsgType::TX
)
5585 // Stop processing the transaction early if
5586 // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
5587 if (!fRelayTxes
&& (!pfrom
->fWhitelisted
|| !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY
)))
5589 LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom
->id
);
5593 deque
<COutPoint
> vWorkQueue
;
5594 vector
<uint256
> vEraseQueue
;
5598 CInv
inv(MSG_TX
, tx
.GetHash());
5599 pfrom
->AddInventoryKnown(inv
);
5603 bool fMissingInputs
= false;
5604 CValidationState state
;
5606 pfrom
->setAskFor
.erase(inv
.hash
);
5607 mapAlreadyAskedFor
.erase(inv
.hash
);
5609 if (!AlreadyHave(inv
) && AcceptToMemoryPool(mempool
, state
, tx
, true, &fMissingInputs
)) {
5610 mempool
.check(pcoinsTip
);
5611 RelayTransaction(tx
, connman
);
5612 for (unsigned int i
= 0; i
< tx
.vout
.size(); i
++) {
5613 vWorkQueue
.emplace_back(inv
.hash
, i
);
5616 pfrom
->nLastTXTime
= GetTime();
5618 LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
5620 tx
.GetHash().ToString(),
5621 mempool
.size(), mempool
.DynamicMemoryUsage() / 1000);
5623 // Recursively process any orphan transactions that depended on this one
5624 set
<NodeId
> setMisbehaving
;
5625 while (!vWorkQueue
.empty()) {
5626 auto itByPrev
= mapOrphanTransactionsByPrev
.find(vWorkQueue
.front());
5627 vWorkQueue
.pop_front();
5628 if (itByPrev
== mapOrphanTransactionsByPrev
.end())
5630 for (auto mi
= itByPrev
->second
.begin();
5631 mi
!= itByPrev
->second
.end();
5634 const CTransaction
& orphanTx
= (*mi
)->second
.tx
;
5635 const uint256
& orphanHash
= orphanTx
.GetHash();
5636 NodeId fromPeer
= (*mi
)->second
.fromPeer
;
5637 bool fMissingInputs2
= false;
5638 // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
5639 // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
5640 // anyone relaying LegitTxX banned)
5641 CValidationState stateDummy
;
5644 if (setMisbehaving
.count(fromPeer
))
5646 if (AcceptToMemoryPool(mempool
, stateDummy
, orphanTx
, true, &fMissingInputs2
)) {
5647 LogPrint("mempool", " accepted orphan tx %s\n", orphanHash
.ToString());
5648 RelayTransaction(orphanTx
, connman
);
5649 for (unsigned int i
= 0; i
< orphanTx
.vout
.size(); i
++) {
5650 vWorkQueue
.emplace_back(orphanHash
, i
);
5652 vEraseQueue
.push_back(orphanHash
);
5654 else if (!fMissingInputs2
)
5657 if (stateDummy
.IsInvalid(nDos
) && nDos
> 0)
5659 // Punish peer that gave us an invalid orphan tx
5660 Misbehaving(fromPeer
, nDos
);
5661 setMisbehaving
.insert(fromPeer
);
5662 LogPrint("mempool", " invalid orphan tx %s\n", orphanHash
.ToString());
5664 // Has inputs but not accepted to mempool
5665 // Probably non-standard or insufficient fee/priority
5666 LogPrint("mempool", " removed orphan tx %s\n", orphanHash
.ToString());
5667 vEraseQueue
.push_back(orphanHash
);
5668 if (orphanTx
.wit
.IsNull() && !stateDummy
.CorruptionPossible()) {
5669 // Do not use rejection cache for witness transactions or
5670 // witness-stripped transactions, as they can have been malleated.
5671 // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
5672 assert(recentRejects
);
5673 recentRejects
->insert(orphanHash
);
5676 mempool
.check(pcoinsTip
);
5680 BOOST_FOREACH(uint256 hash
, vEraseQueue
)
5681 EraseOrphanTx(hash
);
5683 else if (fMissingInputs
)
5685 bool fRejectedParents
= false; // It may be the case that the orphans parents have all been rejected
5686 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
) {
5687 if (recentRejects
->contains(txin
.prevout
.hash
)) {
5688 fRejectedParents
= true;
5692 if (!fRejectedParents
) {
5693 uint32_t nFetchFlags
= GetFetchFlags(pfrom
, chainActive
.Tip(), chainparams
.GetConsensus());
5694 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
) {
5695 CInv
_inv(MSG_TX
| nFetchFlags
, txin
.prevout
.hash
);
5696 pfrom
->AddInventoryKnown(_inv
);
5697 if (!AlreadyHave(_inv
)) pfrom
->AskFor(_inv
);
5699 AddOrphanTx(tx
, pfrom
->GetId());
5701 // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
5702 unsigned int nMaxOrphanTx
= (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS
));
5703 unsigned int nEvicted
= LimitOrphanTxSize(nMaxOrphanTx
);
5705 LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted
);
5707 LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx
.GetHash().ToString());
5710 if (tx
.wit
.IsNull() && !state
.CorruptionPossible()) {
5711 // Do not use rejection cache for witness transactions or
5712 // witness-stripped transactions, as they can have been malleated.
5713 // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
5714 assert(recentRejects
);
5715 recentRejects
->insert(tx
.GetHash());
5718 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY
)) {
5719 // Always relay transactions received from whitelisted peers, even
5720 // if they were already in the mempool or rejected from it due
5721 // to policy, allowing the node to function as a gateway for
5722 // nodes hidden behind it.
5724 // Never relay transactions that we would assign a non-zero DoS
5725 // score for, as we expect peers to do the same with us in that
5728 if (!state
.IsInvalid(nDoS
) || nDoS
== 0) {
5729 LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx
.GetHash().ToString(), pfrom
->id
);
5730 RelayTransaction(tx
, connman
);
5732 LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx
.GetHash().ToString(), pfrom
->id
, FormatStateMessage(state
));
5737 if (state
.IsInvalid(nDoS
))
5739 LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx
.GetHash().ToString(),
5741 FormatStateMessage(state
));
5742 if (state
.GetRejectCode() < REJECT_INTERNAL
) // Never send AcceptToMemoryPool's internal codes over P2P
5743 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::REJECT
, strCommand
, (unsigned char)state
.GetRejectCode(),
5744 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), inv
.hash
));
5746 Misbehaving(pfrom
->GetId(), nDoS
);
5752 else if (strCommand
== NetMsgType::CMPCTBLOCK
&& !fImporting
&& !fReindex
) // Ignore blocks received while importing
5754 CBlockHeaderAndShortTxIDs cmpctblock
;
5755 vRecv
>> cmpctblock
;
5759 if (mapBlockIndex
.find(cmpctblock
.header
.hashPrevBlock
) == mapBlockIndex
.end()) {
5760 // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
5761 if (!IsInitialBlockDownload())
5762 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexBestHeader
), uint256()));
5766 CBlockIndex
*pindex
= NULL
;
5767 CValidationState state
;
5768 if (!AcceptBlockHeader(cmpctblock
.header
, state
, chainparams
, &pindex
)) {
5770 if (state
.IsInvalid(nDoS
)) {
5772 Misbehaving(pfrom
->GetId(), nDoS
);
5773 LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom
->id
);
5778 // If AcceptBlockHeader returned true, it set pindex
5780 UpdateBlockAvailability(pfrom
->GetId(), pindex
->GetBlockHash());
5782 std::map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator blockInFlightIt
= mapBlocksInFlight
.find(pindex
->GetBlockHash());
5783 bool fAlreadyInFlight
= blockInFlightIt
!= mapBlocksInFlight
.end();
5785 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) // Nothing to do here
5788 if (pindex
->nChainWork
<= chainActive
.Tip()->nChainWork
|| // We know something better
5789 pindex
->nTx
!= 0) { // We had this block at some point, but pruned it
5790 if (fAlreadyInFlight
) {
5791 // We requested this block for some reason, but our mempool will probably be useless
5792 // so we just grab the block via normal getdata
5793 std::vector
<CInv
> vInv(1);
5794 vInv
[0] = CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus()), cmpctblock
.header
.GetHash());
5795 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::GETDATA
, vInv
));
5800 // If we're not close to tip yet, give up and let parallel block fetch work its magic
5801 if (!fAlreadyInFlight
&& !CanDirectFetch(chainparams
.GetConsensus()))
5804 CNodeState
*nodestate
= State(pfrom
->GetId());
5806 if (IsWitnessEnabled(pindex
->pprev
, chainparams
.GetConsensus()) && !nodestate
->fSupportsDesiredCmpctVersion
) {
5807 // Don't bother trying to process compact blocks from v1 peers
5808 // after segwit activates.
5812 // We want to be a bit conservative just to be extra careful about DoS
5813 // possibilities in compact block processing...
5814 if (pindex
->nHeight
<= chainActive
.Height() + 2) {
5815 if ((!fAlreadyInFlight
&& nodestate
->nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) ||
5816 (fAlreadyInFlight
&& blockInFlightIt
->second
.first
== pfrom
->GetId())) {
5817 list
<QueuedBlock
>::iterator
*queuedBlockIt
= NULL
;
5818 if (!MarkBlockAsInFlight(pfrom
->GetId(), pindex
->GetBlockHash(), chainparams
.GetConsensus(), pindex
, &queuedBlockIt
)) {
5819 if (!(*queuedBlockIt
)->partialBlock
)
5820 (*queuedBlockIt
)->partialBlock
.reset(new PartiallyDownloadedBlock(&mempool
));
5822 // The block was already in flight using compact blocks from the same peer
5823 LogPrint("net", "Peer sent us compact block we were already syncing!\n");
5828 PartiallyDownloadedBlock
& partialBlock
= *(*queuedBlockIt
)->partialBlock
;
5829 ReadStatus status
= partialBlock
.InitData(cmpctblock
);
5830 if (status
== READ_STATUS_INVALID
) {
5831 MarkBlockAsReceived(pindex
->GetBlockHash()); // Reset in-flight state in case of whitelist
5832 Misbehaving(pfrom
->GetId(), 100);
5833 LogPrintf("Peer %d sent us invalid compact block\n", pfrom
->id
);
5835 } else if (status
== READ_STATUS_FAILED
) {
5836 // Duplicate txindexes, the block is now in-flight, so just request it
5837 std::vector
<CInv
> vInv(1);
5838 vInv
[0] = CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus()), cmpctblock
.header
.GetHash());
5839 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::GETDATA
, vInv
));
5843 if (!fAlreadyInFlight
&& mapBlocksInFlight
.size() == 1 && pindex
->pprev
->IsValid(BLOCK_VALID_CHAIN
)) {
5844 // We seem to be rather well-synced, so it appears pfrom was the first to provide us
5845 // with this block! Let's get them to announce using compact blocks in the future.
5846 MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate
, pfrom
, connman
);
5849 BlockTransactionsRequest req
;
5850 for (size_t i
= 0; i
< cmpctblock
.BlockTxCount(); i
++) {
5851 if (!partialBlock
.IsTxAvailable(i
))
5852 req
.indexes
.push_back(i
);
5854 if (req
.indexes
.empty()) {
5855 // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
5856 BlockTransactions txn
;
5857 txn
.blockhash
= cmpctblock
.header
.GetHash();
5858 CDataStream
blockTxnMsg(SER_NETWORK
, PROTOCOL_VERSION
);
5860 return ProcessMessage(pfrom
, NetMsgType::BLOCKTXN
, blockTxnMsg
, nTimeReceived
, chainparams
, connman
);
5862 req
.blockhash
= pindex
->GetBlockHash();
5863 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::GETBLOCKTXN
, req
));
5867 if (fAlreadyInFlight
) {
5868 // We requested this block, but its far into the future, so our
5869 // mempool will probably be useless - request the block normally
5870 std::vector
<CInv
> vInv(1);
5871 vInv
[0] = CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus()), cmpctblock
.header
.GetHash());
5872 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::GETDATA
, vInv
));
5875 // If this was an announce-cmpctblock, we want the same treatment as a header message
5876 // Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions)
5877 std::vector
<CBlock
> headers
;
5878 headers
.push_back(cmpctblock
.header
);
5879 CDataStream
vHeadersMsg(SER_NETWORK
, PROTOCOL_VERSION
);
5880 vHeadersMsg
<< headers
;
5881 return ProcessMessage(pfrom
, NetMsgType::HEADERS
, vHeadersMsg
, nTimeReceived
, chainparams
, connman
);
5886 else if (strCommand
== NetMsgType::BLOCKTXN
&& !fImporting
&& !fReindex
) // Ignore blocks received while importing
5888 BlockTransactions resp
;
5892 bool fBlockRead
= false;
5896 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator it
= mapBlocksInFlight
.find(resp
.blockhash
);
5897 if (it
== mapBlocksInFlight
.end() || !it
->second
.second
->partialBlock
||
5898 it
->second
.first
!= pfrom
->GetId()) {
5899 LogPrint("net", "Peer %d sent us block transactions for block we weren't expecting\n", pfrom
->id
);
5903 PartiallyDownloadedBlock
& partialBlock
= *it
->second
.second
->partialBlock
;
5904 ReadStatus status
= partialBlock
.FillBlock(block
, resp
.txn
);
5905 if (status
== READ_STATUS_INVALID
) {
5906 MarkBlockAsReceived(resp
.blockhash
); // Reset in-flight state in case of whitelist
5907 Misbehaving(pfrom
->GetId(), 100);
5908 LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom
->id
);
5910 } else if (status
== READ_STATUS_FAILED
) {
5911 // Might have collided, fall back to getdata now :(
5912 std::vector
<CInv
> invs
;
5913 invs
.push_back(CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, chainActive
.Tip(), chainparams
.GetConsensus()), resp
.blockhash
));
5914 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::GETDATA
, invs
));
5916 // Block is either okay, or possibly we received
5917 // READ_STATUS_CHECKBLOCK_FAILED.
5918 // Note that CheckBlock can only fail for one of a few reasons:
5919 // 1. bad-proof-of-work (impossible here, because we've already
5920 // accepted the header)
5921 // 2. merkleroot doesn't match the transactions given (already
5922 // caught in FillBlock with READ_STATUS_FAILED, so
5924 // 3. the block is otherwise invalid (eg invalid coinbase,
5925 // block is too big, too many legacy sigops, etc).
5926 // So if CheckBlock failed, #3 is the only possibility.
5927 // Under BIP 152, we don't DoS-ban unless proof of work is
5928 // invalid (we don't require all the stateless checks to have
5929 // been run). This is handled below, so just treat this as
5930 // though the block was successfully read, and rely on the
5931 // handling in ProcessNewBlock to ensure the block index is
5932 // updated, reject messages go out, etc.
5933 MarkBlockAsReceived(resp
.blockhash
); // it is now an empty pointer
5935 // mapBlockSource is only used for sending reject messages and DoS scores,
5936 // so the race between here and cs_main in ProcessNewBlock is fine.
5937 // BIP 152 permits peers to relay compact blocks after validating
5938 // the header only; we should not punish peers if the block turns
5939 // out to be invalid.
5940 mapBlockSource
.emplace(resp
.blockhash
, std::make_pair(pfrom
->GetId(), false));
5942 } // Don't hold cs_main when we call into ProcessNewBlock
5944 bool fNewBlock
= false;
5945 // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
5946 // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
5947 ProcessNewBlock(chainparams
, &block
, true, NULL
, &fNewBlock
);
5949 pfrom
->nLastBlockTime
= GetTime();
5954 else if (strCommand
== NetMsgType::HEADERS
&& !fImporting
&& !fReindex
) // Ignore headers received while importing
5956 std::vector
<CBlockHeader
> headers
;
5958 // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
5959 unsigned int nCount
= ReadCompactSize(vRecv
);
5960 if (nCount
> MAX_HEADERS_RESULTS
) {
5962 Misbehaving(pfrom
->GetId(), 20);
5963 return error("headers message size = %u", nCount
);
5965 headers
.resize(nCount
);
5966 for (unsigned int n
= 0; n
< nCount
; n
++) {
5967 vRecv
>> headers
[n
];
5968 ReadCompactSize(vRecv
); // ignore tx count; assume it is 0.
5975 // Nothing interesting. Stop asking this peers for more headers.
5979 CNodeState
*nodestate
= State(pfrom
->GetId());
5981 // If this looks like it could be a block announcement (nCount <
5982 // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
5984 // - Send a getheaders message in response to try to connect the chain.
5985 // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
5986 // don't connect before giving DoS points
5987 // - Once a headers message is received that is valid and does connect,
5988 // nUnconnectingHeaders gets reset back to 0.
5989 if (mapBlockIndex
.find(headers
[0].hashPrevBlock
) == mapBlockIndex
.end() && nCount
< MAX_BLOCKS_TO_ANNOUNCE
) {
5990 nodestate
->nUnconnectingHeaders
++;
5991 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexBestHeader
), uint256()));
5992 LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
5993 headers
[0].GetHash().ToString(),
5994 headers
[0].hashPrevBlock
.ToString(),
5995 pindexBestHeader
->nHeight
,
5996 pfrom
->id
, nodestate
->nUnconnectingHeaders
);
5997 // Set hashLastUnknownBlock for this peer, so that if we
5998 // eventually get the headers - even from a different peer -
5999 // we can use this peer to download.
6000 UpdateBlockAvailability(pfrom
->GetId(), headers
.back().GetHash());
6002 if (nodestate
->nUnconnectingHeaders
% MAX_UNCONNECTING_HEADERS
== 0) {
6003 Misbehaving(pfrom
->GetId(), 20);
6008 CBlockIndex
*pindexLast
= NULL
;
6009 BOOST_FOREACH(const CBlockHeader
& header
, headers
) {
6010 CValidationState state
;
6011 if (pindexLast
!= NULL
&& header
.hashPrevBlock
!= pindexLast
->GetBlockHash()) {
6012 Misbehaving(pfrom
->GetId(), 20);
6013 return error("non-continuous headers sequence");
6015 if (!AcceptBlockHeader(header
, state
, chainparams
, &pindexLast
)) {
6017 if (state
.IsInvalid(nDoS
)) {
6019 Misbehaving(pfrom
->GetId(), nDoS
);
6020 return error("invalid header received");
6025 if (nodestate
->nUnconnectingHeaders
> 0) {
6026 LogPrint("net", "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom
->id
, nodestate
->nUnconnectingHeaders
);
6028 nodestate
->nUnconnectingHeaders
= 0;
6031 UpdateBlockAvailability(pfrom
->GetId(), pindexLast
->GetBlockHash());
6033 if (nCount
== MAX_HEADERS_RESULTS
) {
6034 // Headers message had its maximum size; the peer may have more headers.
6035 // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
6036 // from there instead.
6037 LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast
->nHeight
, pfrom
->id
, pfrom
->nStartingHeight
);
6038 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexLast
), uint256()));
6041 bool fCanDirectFetch
= CanDirectFetch(chainparams
.GetConsensus());
6042 // If this set of headers is valid and ends in a block with at least as
6043 // much work as our tip, download as much as possible.
6044 if (fCanDirectFetch
&& pindexLast
->IsValid(BLOCK_VALID_TREE
) && chainActive
.Tip()->nChainWork
<= pindexLast
->nChainWork
) {
6045 vector
<CBlockIndex
*> vToFetch
;
6046 CBlockIndex
*pindexWalk
= pindexLast
;
6047 // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
6048 while (pindexWalk
&& !chainActive
.Contains(pindexWalk
) && vToFetch
.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
6049 if (!(pindexWalk
->nStatus
& BLOCK_HAVE_DATA
) &&
6050 !mapBlocksInFlight
.count(pindexWalk
->GetBlockHash()) &&
6051 (!IsWitnessEnabled(pindexWalk
->pprev
, chainparams
.GetConsensus()) || State(pfrom
->GetId())->fHaveWitness
)) {
6052 // We don't have this block, and it's not yet in flight.
6053 vToFetch
.push_back(pindexWalk
);
6055 pindexWalk
= pindexWalk
->pprev
;
6057 // If pindexWalk still isn't on our main chain, we're looking at a
6058 // very large reorg at a time we think we're close to caught up to
6059 // the main chain -- this shouldn't really happen. Bail out on the
6060 // direct fetch and rely on parallel download instead.
6061 if (!chainActive
.Contains(pindexWalk
)) {
6062 LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
6063 pindexLast
->GetBlockHash().ToString(),
6064 pindexLast
->nHeight
);
6066 vector
<CInv
> vGetData
;
6067 // Download as much as possible, from earliest to latest.
6068 BOOST_REVERSE_FOREACH(CBlockIndex
*pindex
, vToFetch
) {
6069 if (nodestate
->nBlocksInFlight
>= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
6070 // Can't download any more from this peer
6073 uint32_t nFetchFlags
= GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus());
6074 vGetData
.push_back(CInv(MSG_BLOCK
| nFetchFlags
, pindex
->GetBlockHash()));
6075 MarkBlockAsInFlight(pfrom
->GetId(), pindex
->GetBlockHash(), chainparams
.GetConsensus(), pindex
);
6076 LogPrint("net", "Requesting block %s from peer=%d\n",
6077 pindex
->GetBlockHash().ToString(), pfrom
->id
);
6079 if (vGetData
.size() > 1) {
6080 LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
6081 pindexLast
->GetBlockHash().ToString(), pindexLast
->nHeight
);
6083 if (vGetData
.size() > 0) {
6084 if (nodestate
->fSupportsDesiredCmpctVersion
&& vGetData
.size() == 1 && mapBlocksInFlight
.size() == 1 && pindexLast
->pprev
->IsValid(BLOCK_VALID_CHAIN
)) {
6085 // We seem to be rather well-synced, so it appears pfrom was the first to provide us
6086 // with this block! Let's get them to announce using compact blocks in the future.
6087 MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate
, pfrom
, connman
);
6088 // In any case, we want to download using a compact block, not a regular one
6089 vGetData
[0] = CInv(MSG_CMPCT_BLOCK
, vGetData
[0].hash
);
6091 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::GETDATA
, vGetData
));
6100 else if (strCommand
== NetMsgType::BLOCK
&& !fImporting
&& !fReindex
) // Ignore blocks received while importing
6105 LogPrint("net", "received block %s peer=%d\n", block
.GetHash().ToString(), pfrom
->id
);
6107 // Process all blocks from whitelisted peers, even if not requested,
6108 // unless we're still syncing with the network.
6109 // Such an unrequested block may still be processed, subject to the
6110 // conditions in AcceptBlock().
6111 bool forceProcessing
= pfrom
->fWhitelisted
&& !IsInitialBlockDownload();
6112 const uint256
hash(block
.GetHash());
6115 // Also always process if we requested the block explicitly, as we may
6116 // need it even though it is not a candidate for a new best tip.
6117 forceProcessing
|= MarkBlockAsReceived(hash
);
6118 // mapBlockSource is only used for sending reject messages and DoS scores,
6119 // so the race between here and cs_main in ProcessNewBlock is fine.
6120 mapBlockSource
.emplace(hash
, std::make_pair(pfrom
->GetId(), true));
6122 bool fNewBlock
= false;
6123 ProcessNewBlock(chainparams
, &block
, forceProcessing
, NULL
, &fNewBlock
);
6125 pfrom
->nLastBlockTime
= GetTime();
6129 else if (strCommand
== NetMsgType::GETADDR
)
6131 // This asymmetric behavior for inbound and outbound connections was introduced
6132 // to prevent a fingerprinting attack: an attacker can send specific fake addresses
6133 // to users' AddrMan and later request them by sending getaddr messages.
6134 // Making nodes which are behind NAT and can only make outgoing connections ignore
6135 // the getaddr message mitigates the attack.
6136 if (!pfrom
->fInbound
) {
6137 LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom
->id
);
6141 // Only send one GetAddr response per connection to reduce resource waste
6142 // and discourage addr stamping of INV announcements.
6143 if (pfrom
->fSentAddr
) {
6144 LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom
->id
);
6147 pfrom
->fSentAddr
= true;
6149 pfrom
->vAddrToSend
.clear();
6150 vector
<CAddress
> vAddr
= connman
.GetAddresses();
6151 FastRandomContext insecure_rand
;
6152 BOOST_FOREACH(const CAddress
&addr
, vAddr
)
6153 pfrom
->PushAddress(addr
, insecure_rand
);
6157 else if (strCommand
== NetMsgType::MEMPOOL
)
6159 if (!(pfrom
->GetLocalServices() & NODE_BLOOM
) && !pfrom
->fWhitelisted
)
6161 LogPrint("net", "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom
->GetId());
6162 pfrom
->fDisconnect
= true;
6166 if (connman
.OutboundTargetReached(false) && !pfrom
->fWhitelisted
)
6168 LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom
->GetId());
6169 pfrom
->fDisconnect
= true;
6173 LOCK(pfrom
->cs_inventory
);
6174 pfrom
->fSendMempool
= true;
6178 else if (strCommand
== NetMsgType::PING
)
6180 if (pfrom
->nVersion
> BIP0031_VERSION
)
6184 // Echo the message back with the nonce. This allows for two useful features:
6186 // 1) A remote node can quickly check if the connection is operational
6187 // 2) Remote nodes can measure the latency of the network thread. If this node
6188 // is overloaded it won't respond to pings quickly and the remote node can
6189 // avoid sending us more work, like chain download requests.
6191 // The nonce stops the remote getting confused between different pings: without
6192 // it, if the remote node sends a ping once per second and this node takes 5
6193 // seconds to respond to each, the 5th ping the remote sends would appear to
6194 // return very quickly.
6195 connman
.PushMessage(pfrom
, msgMaker
.Make(NetMsgType::PONG
, nonce
));
6200 else if (strCommand
== NetMsgType::PONG
)
6202 int64_t pingUsecEnd
= nTimeReceived
;
6204 size_t nAvail
= vRecv
.in_avail();
6205 bool bPingFinished
= false;
6206 std::string sProblem
;
6208 if (nAvail
>= sizeof(nonce
)) {
6211 // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
6212 if (pfrom
->nPingNonceSent
!= 0) {
6213 if (nonce
== pfrom
->nPingNonceSent
) {
6214 // Matching pong received, this ping is no longer outstanding
6215 bPingFinished
= true;
6216 int64_t pingUsecTime
= pingUsecEnd
- pfrom
->nPingUsecStart
;
6217 if (pingUsecTime
> 0) {
6218 // Successful ping time measurement, replace previous
6219 pfrom
->nPingUsecTime
= pingUsecTime
;
6220 pfrom
->nMinPingUsecTime
= std::min(pfrom
->nMinPingUsecTime
, pingUsecTime
);
6222 // This should never happen
6223 sProblem
= "Timing mishap";
6226 // Nonce mismatches are normal when pings are overlapping
6227 sProblem
= "Nonce mismatch";
6229 // This is most likely a bug in another implementation somewhere; cancel this ping
6230 bPingFinished
= true;
6231 sProblem
= "Nonce zero";
6235 sProblem
= "Unsolicited pong without ping";
6238 // This is most likely a bug in another implementation somewhere; cancel this ping
6239 bPingFinished
= true;
6240 sProblem
= "Short payload";
6243 if (!(sProblem
.empty())) {
6244 LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
6247 pfrom
->nPingNonceSent
,
6251 if (bPingFinished
) {
6252 pfrom
->nPingNonceSent
= 0;
6257 else if (strCommand
== NetMsgType::FILTERLOAD
)
6259 CBloomFilter filter
;
6262 if (!filter
.IsWithinSizeConstraints())
6264 // There is no excuse for sending a too-large filter
6266 Misbehaving(pfrom
->GetId(), 100);
6270 LOCK(pfrom
->cs_filter
);
6271 delete pfrom
->pfilter
;
6272 pfrom
->pfilter
= new CBloomFilter(filter
);
6273 pfrom
->pfilter
->UpdateEmptyFull();
6274 pfrom
->fRelayTxes
= true;
6279 else if (strCommand
== NetMsgType::FILTERADD
)
6281 vector
<unsigned char> vData
;
6284 // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
6285 // and thus, the maximum size any matched object can have) in a filteradd message
6287 if (vData
.size() > MAX_SCRIPT_ELEMENT_SIZE
) {
6290 LOCK(pfrom
->cs_filter
);
6291 if (pfrom
->pfilter
) {
6292 pfrom
->pfilter
->insert(vData
);
6299 Misbehaving(pfrom
->GetId(), 100);
6304 else if (strCommand
== NetMsgType::FILTERCLEAR
)
6306 LOCK(pfrom
->cs_filter
);
6307 if (pfrom
->GetLocalServices() & NODE_BLOOM
) {
6308 delete pfrom
->pfilter
;
6309 pfrom
->pfilter
= new CBloomFilter();
6311 pfrom
->fRelayTxes
= true;
6315 else if (strCommand
== NetMsgType::REJECT
)
6319 string strMsg
; unsigned char ccode
; string strReason
;
6320 vRecv
>> LIMITED_STRING(strMsg
, CMessageHeader::COMMAND_SIZE
) >> ccode
>> LIMITED_STRING(strReason
, MAX_REJECT_MESSAGE_LENGTH
);
6323 ss
<< strMsg
<< " code " << itostr(ccode
) << ": " << strReason
;
6325 if (strMsg
== NetMsgType::BLOCK
|| strMsg
== NetMsgType::TX
)
6329 ss
<< ": hash " << hash
.ToString();
6331 LogPrint("net", "Reject %s\n", SanitizeString(ss
.str()));
6332 } catch (const std::ios_base::failure
&) {
6333 // Avoid feedback loops by preventing reject messages from triggering a new reject message.
6334 LogPrint("net", "Unparseable reject message received\n");
6339 else if (strCommand
== NetMsgType::FEEFILTER
) {
6340 CAmount newFeeFilter
= 0;
6341 vRecv
>> newFeeFilter
;
6342 if (MoneyRange(newFeeFilter
)) {
6344 LOCK(pfrom
->cs_feeFilter
);
6345 pfrom
->minFeeFilter
= newFeeFilter
;
6347 LogPrint("net", "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter
).ToString(), pfrom
->id
);
6351 else if (strCommand
== NetMsgType::NOTFOUND
) {
6352 // We do not care about the NOTFOUND message, but logging an Unknown Command
6353 // message would be undesirable as we transmit it ourselves.
6357 // Ignore unknown commands for extensibility
6358 LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand
), pfrom
->id
);
6366 // requires LOCK(cs_vRecvMsg)
6367 bool ProcessMessages(CNode
* pfrom
, CConnman
& connman
)
6369 const CChainParams
& chainparams
= Params();
6370 unsigned int nMaxSendBufferSize
= connman
.GetSendBufferSize();
6372 // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
6376 // (4) message start
6384 if (!pfrom
->vRecvGetData
.empty())
6385 ProcessGetData(pfrom
, chainparams
.GetConsensus(), connman
);
6387 // this maintains the order of responses
6388 if (!pfrom
->vRecvGetData
.empty()) return fOk
;
6390 std::deque
<CNetMessage
>::iterator it
= pfrom
->vRecvMsg
.begin();
6391 while (!pfrom
->fDisconnect
&& it
!= pfrom
->vRecvMsg
.end()) {
6392 // Don't bother if send buffer is too full to respond anyway
6393 if (pfrom
->nSendSize
>= nMaxSendBufferSize
)
6397 CNetMessage
& msg
= *it
;
6400 // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
6401 // msg.hdr.nMessageSize, msg.vRecv.size(),
6402 // msg.complete() ? "Y" : "N");
6404 // end, if an incomplete message is found
6405 if (!msg
.complete())
6408 // at this point, any failure means we can delete the current message
6411 // Scan for message start
6412 if (memcmp(msg
.hdr
.pchMessageStart
, chainparams
.MessageStart(), CMessageHeader::MESSAGE_START_SIZE
) != 0) {
6413 LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg
.hdr
.GetCommand()), pfrom
->id
);
6419 CMessageHeader
& hdr
= msg
.hdr
;
6420 if (!hdr
.IsValid(chainparams
.MessageStart()))
6422 LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr
.GetCommand()), pfrom
->id
);
6425 string strCommand
= hdr
.GetCommand();
6428 unsigned int nMessageSize
= hdr
.nMessageSize
;
6431 CDataStream
& vRecv
= msg
.vRecv
;
6432 const uint256
& hash
= msg
.GetMessageHash();
6433 if (memcmp(hash
.begin(), hdr
.pchChecksum
, CMessageHeader::CHECKSUM_SIZE
) != 0)
6435 LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__
,
6436 SanitizeString(strCommand
), nMessageSize
,
6437 HexStr(hash
.begin(), hash
.begin()+CMessageHeader::CHECKSUM_SIZE
),
6438 HexStr(hdr
.pchChecksum
, hdr
.pchChecksum
+CMessageHeader::CHECKSUM_SIZE
));
6446 fRet
= ProcessMessage(pfrom
, strCommand
, vRecv
, msg
.nTime
, chainparams
, connman
);
6447 boost::this_thread::interruption_point();
6449 catch (const std::ios_base::failure
& e
)
6451 connman
.PushMessage(pfrom
, CNetMsgMaker(INIT_PROTO_VERSION
).Make(NetMsgType::REJECT
, strCommand
, REJECT_MALFORMED
, string("error parsing message")));
6452 if (strstr(e
.what(), "end of data"))
6454 // Allow exceptions from under-length message on vRecv
6455 LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
6457 else if (strstr(e
.what(), "size too large"))
6459 // Allow exceptions from over-long size
6460 LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
6462 else if (strstr(e
.what(), "non-canonical ReadCompactSize()"))
6464 // Allow exceptions from non-canonical encoding
6465 LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
6469 PrintExceptionContinue(&e
, "ProcessMessages()");
6472 catch (const boost::thread_interrupted
&) {
6475 catch (const std::exception
& e
) {
6476 PrintExceptionContinue(&e
, "ProcessMessages()");
6478 PrintExceptionContinue(NULL
, "ProcessMessages()");
6482 LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__
, SanitizeString(strCommand
), nMessageSize
, pfrom
->id
);
6487 // In case the connection got shut down, its receive buffer was wiped
6488 if (!pfrom
->fDisconnect
)
6489 pfrom
->vRecvMsg
.erase(pfrom
->vRecvMsg
.begin(), it
);
6494 class CompareInvMempoolOrder
6498 CompareInvMempoolOrder(CTxMemPool
*_mempool
)
6503 bool operator()(std::set
<uint256
>::iterator a
, std::set
<uint256
>::iterator b
)
6505 /* As std::make_heap produces a max-heap, we want the entries with the
6506 * fewest ancestors/highest fee to sort later. */
6507 return mp
->CompareDepthAndScore(*b
, *a
);
6511 bool SendMessages(CNode
* pto
, CConnman
& connman
)
6513 const Consensus::Params
& consensusParams
= Params().GetConsensus();
6515 // Don't send anything until we get its version message
6516 if (pto
->nVersion
== 0 || pto
->fDisconnect
)
6519 // If we get here, the outgoing message serialization version is set and can't change.
6520 CNetMsgMaker
msgMaker(pto
->GetSendVersion());
6525 bool pingSend
= false;
6526 if (pto
->fPingQueued
) {
6527 // RPC ping request by user
6530 if (pto
->nPingNonceSent
== 0 && pto
->nPingUsecStart
+ PING_INTERVAL
* 1000000 < GetTimeMicros()) {
6531 // Ping automatically sent as a latency probe & keepalive.
6536 while (nonce
== 0) {
6537 GetRandBytes((unsigned char*)&nonce
, sizeof(nonce
));
6539 pto
->fPingQueued
= false;
6540 pto
->nPingUsecStart
= GetTimeMicros();
6541 if (pto
->nVersion
> BIP0031_VERSION
) {
6542 pto
->nPingNonceSent
= nonce
;
6543 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::PING
, nonce
));
6545 // Peer is too old to support ping command with nonce, pong will never arrive.
6546 pto
->nPingNonceSent
= 0;
6547 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::PING
));
6551 TRY_LOCK(cs_main
, lockMain
); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
6555 CNodeState
&state
= *State(pto
->GetId());
6557 BOOST_FOREACH(const CBlockReject
& reject
, state
.rejects
)
6558 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::REJECT
, (string
)NetMsgType::BLOCK
, reject
.chRejectCode
, reject
.strRejectReason
, reject
.hashBlock
));
6559 state
.rejects
.clear();
6561 if (state
.fShouldBan
) {
6562 state
.fShouldBan
= false;
6563 if (pto
->fWhitelisted
)
6564 LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto
->addr
.ToString());
6566 pto
->fDisconnect
= true;
6567 if (pto
->addr
.IsLocal())
6568 LogPrintf("Warning: not banning local peer %s!\n", pto
->addr
.ToString());
6571 connman
.Ban(pto
->addr
, BanReasonNodeMisbehaving
);
6577 // Address refresh broadcast
6578 int64_t nNow
= GetTimeMicros();
6579 if (!IsInitialBlockDownload() && pto
->nNextLocalAddrSend
< nNow
) {
6580 AdvertiseLocal(pto
);
6581 pto
->nNextLocalAddrSend
= PoissonNextSend(nNow
, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
);
6587 if (pto
->nNextAddrSend
< nNow
) {
6588 pto
->nNextAddrSend
= PoissonNextSend(nNow
, AVG_ADDRESS_BROADCAST_INTERVAL
);
6589 vector
<CAddress
> vAddr
;
6590 vAddr
.reserve(pto
->vAddrToSend
.size());
6591 BOOST_FOREACH(const CAddress
& addr
, pto
->vAddrToSend
)
6593 if (!pto
->addrKnown
.contains(addr
.GetKey()))
6595 pto
->addrKnown
.insert(addr
.GetKey());
6596 vAddr
.push_back(addr
);
6597 // receiver rejects addr messages larger than 1000
6598 if (vAddr
.size() >= 1000)
6600 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::ADDR
, vAddr
));
6605 pto
->vAddrToSend
.clear();
6607 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::ADDR
, vAddr
));
6608 // we only send the big addr message once
6609 if (pto
->vAddrToSend
.capacity() > 40)
6610 pto
->vAddrToSend
.shrink_to_fit();
6614 if (pindexBestHeader
== NULL
)
6615 pindexBestHeader
= chainActive
.Tip();
6616 bool fFetch
= state
.fPreferredDownload
|| (nPreferredDownload
== 0 && !pto
->fClient
&& !pto
->fOneShot
); // Download if this is a nice peer, or we have no nice peers and this one might do.
6617 if (!state
.fSyncStarted
&& !pto
->fClient
&& !fImporting
&& !fReindex
) {
6618 // Only actively request headers from a single peer, unless we're close to today.
6619 if ((nSyncStarted
== 0 && fFetch
) || pindexBestHeader
->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
6620 state
.fSyncStarted
= true;
6622 const CBlockIndex
*pindexStart
= pindexBestHeader
;
6623 /* If possible, start at the block preceding the currently
6624 best known header. This ensures that we always get a
6625 non-empty list of headers back as long as the peer
6626 is up-to-date. With a non-empty response, we can initialise
6627 the peer's known best block. This wouldn't be possible
6628 if we requested starting at pindexBestHeader and
6629 got back an empty response. */
6630 if (pindexStart
->pprev
)
6631 pindexStart
= pindexStart
->pprev
;
6632 LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart
->nHeight
, pto
->id
, pto
->nStartingHeight
);
6633 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexStart
), uint256()));
6637 // Resend wallet transactions that haven't gotten in a block yet
6638 // Except during reindex, importing and IBD, when old wallet
6639 // transactions become unconfirmed and spams other nodes.
6640 if (!fReindex
&& !fImporting
&& !IsInitialBlockDownload())
6642 GetMainSignals().Broadcast(nTimeBestReceived
, &connman
);
6646 // Try sending block announcements via headers
6649 // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
6650 // list of block hashes we're relaying, and our peer wants
6651 // headers announcements, then find the first header
6652 // not yet known to our peer but would connect, and send.
6653 // If no header would connect, or if we have too many
6654 // blocks, or if the peer doesn't want headers, just
6655 // add all to the inv queue.
6656 LOCK(pto
->cs_inventory
);
6657 vector
<CBlock
> vHeaders
;
6658 bool fRevertToInv
= ((!state
.fPreferHeaders
&&
6659 (!state
.fPreferHeaderAndIDs
|| pto
->vBlockHashesToAnnounce
.size() > 1)) ||
6660 pto
->vBlockHashesToAnnounce
.size() > MAX_BLOCKS_TO_ANNOUNCE
);
6661 CBlockIndex
*pBestIndex
= NULL
; // last header queued for delivery
6662 ProcessBlockAvailability(pto
->id
); // ensure pindexBestKnownBlock is up-to-date
6664 if (!fRevertToInv
) {
6665 bool fFoundStartingHeader
= false;
6666 // Try to find first header that our peer doesn't have, and
6667 // then send all headers past that one. If we come across any
6668 // headers that aren't on chainActive, give up.
6669 BOOST_FOREACH(const uint256
&hash
, pto
->vBlockHashesToAnnounce
) {
6670 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
6671 assert(mi
!= mapBlockIndex
.end());
6672 CBlockIndex
*pindex
= mi
->second
;
6673 if (chainActive
[pindex
->nHeight
] != pindex
) {
6674 // Bail out if we reorged away from this block
6675 fRevertToInv
= true;
6678 if (pBestIndex
!= NULL
&& pindex
->pprev
!= pBestIndex
) {
6679 // This means that the list of blocks to announce don't
6680 // connect to each other.
6681 // This shouldn't really be possible to hit during
6682 // regular operation (because reorgs should take us to
6683 // a chain that has some block not on the prior chain,
6684 // which should be caught by the prior check), but one
6685 // way this could happen is by using invalidateblock /
6686 // reconsiderblock repeatedly on the tip, causing it to
6687 // be added multiple times to vBlockHashesToAnnounce.
6688 // Robustly deal with this rare situation by reverting
6690 fRevertToInv
= true;
6693 pBestIndex
= pindex
;
6694 if (fFoundStartingHeader
) {
6695 // add this to the headers message
6696 vHeaders
.push_back(pindex
->GetBlockHeader());
6697 } else if (PeerHasHeader(&state
, pindex
)) {
6698 continue; // keep looking for the first new block
6699 } else if (pindex
->pprev
== NULL
|| PeerHasHeader(&state
, pindex
->pprev
)) {
6700 // Peer doesn't have this header but they do have the prior one.
6701 // Start sending headers.
6702 fFoundStartingHeader
= true;
6703 vHeaders
.push_back(pindex
->GetBlockHeader());
6705 // Peer doesn't have this header or the prior one -- nothing will
6706 // connect, so bail out.
6707 fRevertToInv
= true;
6712 if (!fRevertToInv
&& !vHeaders
.empty()) {
6713 if (vHeaders
.size() == 1 && state
.fPreferHeaderAndIDs
) {
6714 // We only send up to 1 block as header-and-ids, as otherwise
6715 // probably means we're doing an initial-ish-sync or they're slow
6716 LogPrint("net", "%s sending header-and-ids %s to peer %d\n", __func__
,
6717 vHeaders
.front().GetHash().ToString(), pto
->id
);
6718 //TODO: Shouldn't need to reload block from disk, but requires refactor
6720 assert(ReadBlockFromDisk(block
, pBestIndex
, consensusParams
));
6721 CBlockHeaderAndShortTxIDs
cmpctblock(block
, state
.fWantsCmpctWitness
);
6722 int nSendFlags
= state
.fWantsCmpctWitness
? 0 : SERIALIZE_TRANSACTION_NO_WITNESS
;
6723 connman
.PushMessage(pto
, msgMaker
.Make(nSendFlags
, NetMsgType::CMPCTBLOCK
, cmpctblock
));
6724 state
.pindexBestHeaderSent
= pBestIndex
;
6725 } else if (state
.fPreferHeaders
) {
6726 if (vHeaders
.size() > 1) {
6727 LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__
,
6729 vHeaders
.front().GetHash().ToString(),
6730 vHeaders
.back().GetHash().ToString(), pto
->id
);
6732 LogPrint("net", "%s: sending header %s to peer=%d\n", __func__
,
6733 vHeaders
.front().GetHash().ToString(), pto
->id
);
6735 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::HEADERS
, vHeaders
));
6736 state
.pindexBestHeaderSent
= pBestIndex
;
6738 fRevertToInv
= true;
6741 // If falling back to using an inv, just try to inv the tip.
6742 // The last entry in vBlockHashesToAnnounce was our tip at some point
6744 if (!pto
->vBlockHashesToAnnounce
.empty()) {
6745 const uint256
&hashToAnnounce
= pto
->vBlockHashesToAnnounce
.back();
6746 BlockMap::iterator mi
= mapBlockIndex
.find(hashToAnnounce
);
6747 assert(mi
!= mapBlockIndex
.end());
6748 CBlockIndex
*pindex
= mi
->second
;
6750 // Warn if we're announcing a block that is not on the main chain.
6751 // This should be very rare and could be optimized out.
6752 // Just log for now.
6753 if (chainActive
[pindex
->nHeight
] != pindex
) {
6754 LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
6755 hashToAnnounce
.ToString(), chainActive
.Tip()->GetBlockHash().ToString());
6758 // If the peer's chain has this block, don't inv it back.
6759 if (!PeerHasHeader(&state
, pindex
)) {
6760 pto
->PushInventory(CInv(MSG_BLOCK
, hashToAnnounce
));
6761 LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__
,
6762 pto
->id
, hashToAnnounce
.ToString());
6766 pto
->vBlockHashesToAnnounce
.clear();
6770 // Message: inventory
6774 LOCK(pto
->cs_inventory
);
6775 vInv
.reserve(std::max
<size_t>(pto
->vInventoryBlockToSend
.size(), INVENTORY_BROADCAST_MAX
));
6778 BOOST_FOREACH(const uint256
& hash
, pto
->vInventoryBlockToSend
) {
6779 vInv
.push_back(CInv(MSG_BLOCK
, hash
));
6780 if (vInv
.size() == MAX_INV_SZ
) {
6781 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::INV
, vInv
));
6785 pto
->vInventoryBlockToSend
.clear();
6787 // Check whether periodic sends should happen
6788 bool fSendTrickle
= pto
->fWhitelisted
;
6789 if (pto
->nNextInvSend
< nNow
) {
6790 fSendTrickle
= true;
6791 // Use half the delay for outbound peers, as there is less privacy concern for them.
6792 pto
->nNextInvSend
= PoissonNextSend(nNow
, INVENTORY_BROADCAST_INTERVAL
>> !pto
->fInbound
);
6795 // Time to send but the peer has requested we not relay transactions.
6797 LOCK(pto
->cs_filter
);
6798 if (!pto
->fRelayTxes
) pto
->setInventoryTxToSend
.clear();
6801 // Respond to BIP35 mempool requests
6802 if (fSendTrickle
&& pto
->fSendMempool
) {
6803 auto vtxinfo
= mempool
.infoAll();
6804 pto
->fSendMempool
= false;
6805 CAmount filterrate
= 0;
6807 LOCK(pto
->cs_feeFilter
);
6808 filterrate
= pto
->minFeeFilter
;
6811 LOCK(pto
->cs_filter
);
6813 for (const auto& txinfo
: vtxinfo
) {
6814 const uint256
& hash
= txinfo
.tx
->GetHash();
6815 CInv
inv(MSG_TX
, hash
);
6816 pto
->setInventoryTxToSend
.erase(hash
);
6818 if (txinfo
.feeRate
.GetFeePerK() < filterrate
)
6822 if (!pto
->pfilter
->IsRelevantAndUpdate(*txinfo
.tx
)) continue;
6824 pto
->filterInventoryKnown
.insert(hash
);
6825 vInv
.push_back(inv
);
6826 if (vInv
.size() == MAX_INV_SZ
) {
6827 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::INV
, vInv
));
6831 pto
->timeLastMempoolReq
= GetTime();
6834 // Determine transactions to relay
6836 // Produce a vector with all candidates for sending
6837 vector
<std::set
<uint256
>::iterator
> vInvTx
;
6838 vInvTx
.reserve(pto
->setInventoryTxToSend
.size());
6839 for (std::set
<uint256
>::iterator it
= pto
->setInventoryTxToSend
.begin(); it
!= pto
->setInventoryTxToSend
.end(); it
++) {
6840 vInvTx
.push_back(it
);
6842 CAmount filterrate
= 0;
6844 LOCK(pto
->cs_feeFilter
);
6845 filterrate
= pto
->minFeeFilter
;
6847 // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
6848 // A heap is used so that not all items need sorting if only a few are being sent.
6849 CompareInvMempoolOrder
compareInvMempoolOrder(&mempool
);
6850 std::make_heap(vInvTx
.begin(), vInvTx
.end(), compareInvMempoolOrder
);
6851 // No reason to drain out at many times the network's capacity,
6852 // especially since we have many peers and some will draw much shorter delays.
6853 unsigned int nRelayedTransactions
= 0;
6854 LOCK(pto
->cs_filter
);
6855 while (!vInvTx
.empty() && nRelayedTransactions
< INVENTORY_BROADCAST_MAX
) {
6856 // Fetch the top element from the heap
6857 std::pop_heap(vInvTx
.begin(), vInvTx
.end(), compareInvMempoolOrder
);
6858 std::set
<uint256
>::iterator it
= vInvTx
.back();
6861 // Remove it from the to-be-sent set
6862 pto
->setInventoryTxToSend
.erase(it
);
6863 // Check if not in the filter already
6864 if (pto
->filterInventoryKnown
.contains(hash
)) {
6867 // Not in the mempool anymore? don't bother sending it.
6868 auto txinfo
= mempool
.info(hash
);
6872 if (filterrate
&& txinfo
.feeRate
.GetFeePerK() < filterrate
) {
6875 if (pto
->pfilter
&& !pto
->pfilter
->IsRelevantAndUpdate(*txinfo
.tx
)) continue;
6877 vInv
.push_back(CInv(MSG_TX
, hash
));
6878 nRelayedTransactions
++;
6880 // Expire old relay messages
6881 while (!vRelayExpiration
.empty() && vRelayExpiration
.front().first
< nNow
)
6883 mapRelay
.erase(vRelayExpiration
.front().second
);
6884 vRelayExpiration
.pop_front();
6887 auto ret
= mapRelay
.insert(std::make_pair(hash
, std::move(txinfo
.tx
)));
6889 vRelayExpiration
.push_back(std::make_pair(nNow
+ 15 * 60 * 1000000, ret
.first
));
6892 if (vInv
.size() == MAX_INV_SZ
) {
6893 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::INV
, vInv
));
6896 pto
->filterInventoryKnown
.insert(hash
);
6901 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::INV
, vInv
));
6903 // Detect whether we're stalling
6904 nNow
= GetTimeMicros();
6905 if (state
.nStallingSince
&& state
.nStallingSince
< nNow
- 1000000 * BLOCK_STALLING_TIMEOUT
) {
6906 // Stalling only triggers when the block download window cannot move. During normal steady state,
6907 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
6908 // should only happen during initial block download.
6909 LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto
->id
);
6910 pto
->fDisconnect
= true;
6913 // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
6914 // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
6915 // We compensate for other peers to prevent killing off peers due to our own downstream link
6916 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
6917 // to unreasonably increase our timeout.
6918 if (state
.vBlocksInFlight
.size() > 0) {
6919 QueuedBlock
&queuedBlock
= state
.vBlocksInFlight
.front();
6920 int nOtherPeersWithValidatedDownloads
= nPeersWithValidatedDownloads
- (state
.nBlocksInFlightValidHeaders
> 0);
6921 if (nNow
> state
.nDownloadingSince
+ consensusParams
.nPowTargetSpacing
* (BLOCK_DOWNLOAD_TIMEOUT_BASE
+ BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
* nOtherPeersWithValidatedDownloads
)) {
6922 LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock
.hash
.ToString(), pto
->id
);
6923 pto
->fDisconnect
= true;
6929 // Message: getdata (blocks)
6931 vector
<CInv
> vGetData
;
6932 if (!pto
->fClient
&& (fFetch
|| !IsInitialBlockDownload()) && state
.nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
6933 vector
<CBlockIndex
*> vToDownload
;
6934 NodeId staller
= -1;
6935 FindNextBlocksToDownload(pto
->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER
- state
.nBlocksInFlight
, vToDownload
, staller
, consensusParams
);
6936 BOOST_FOREACH(CBlockIndex
*pindex
, vToDownload
) {
6937 uint32_t nFetchFlags
= GetFetchFlags(pto
, pindex
->pprev
, consensusParams
);
6938 vGetData
.push_back(CInv(MSG_BLOCK
| nFetchFlags
, pindex
->GetBlockHash()));
6939 MarkBlockAsInFlight(pto
->GetId(), pindex
->GetBlockHash(), consensusParams
, pindex
);
6940 LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex
->GetBlockHash().ToString(),
6941 pindex
->nHeight
, pto
->id
);
6943 if (state
.nBlocksInFlight
== 0 && staller
!= -1) {
6944 if (State(staller
)->nStallingSince
== 0) {
6945 State(staller
)->nStallingSince
= nNow
;
6946 LogPrint("net", "Stall started peer=%d\n", staller
);
6952 // Message: getdata (non-blocks)
6954 while (!pto
->mapAskFor
.empty() && (*pto
->mapAskFor
.begin()).first
<= nNow
)
6956 const CInv
& inv
= (*pto
->mapAskFor
.begin()).second
;
6957 if (!AlreadyHave(inv
))
6960 LogPrint("net", "Requesting %s peer=%d\n", inv
.ToString(), pto
->id
);
6961 vGetData
.push_back(inv
);
6962 if (vGetData
.size() >= 1000)
6964 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::GETDATA
, vGetData
));
6968 //If we're not going to ask, don't expect a response.
6969 pto
->setAskFor
.erase(inv
.hash
);
6971 pto
->mapAskFor
.erase(pto
->mapAskFor
.begin());
6973 if (!vGetData
.empty())
6974 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::GETDATA
, vGetData
));
6977 // Message: feefilter
6979 // We don't want white listed peers to filter txs to us if we have -whitelistforcerelay
6980 if (pto
->nVersion
>= FEEFILTER_VERSION
&& GetBoolArg("-feefilter", DEFAULT_FEEFILTER
) &&
6981 !(pto
->fWhitelisted
&& GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY
))) {
6982 CAmount currentFilter
= mempool
.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFeePerK();
6983 int64_t timeNow
= GetTimeMicros();
6984 if (timeNow
> pto
->nextSendTimeFeeFilter
) {
6985 CAmount filterToSend
= filterRounder
.round(currentFilter
);
6986 if (filterToSend
!= pto
->lastSentFeeFilter
) {
6987 connman
.PushMessage(pto
, msgMaker
.Make(NetMsgType::FEEFILTER
, filterToSend
));
6988 pto
->lastSentFeeFilter
= filterToSend
;
6990 pto
->nextSendTimeFeeFilter
= PoissonNextSend(timeNow
, AVG_FEEFILTER_BROADCAST_INTERVAL
);
6992 // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
6993 // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
6994 else if (timeNow
+ MAX_FEEFILTER_CHANGE_DELAY
* 1000000 < pto
->nextSendTimeFeeFilter
&&
6995 (currentFilter
< 3 * pto
->lastSentFeeFilter
/ 4 || currentFilter
> 4 * pto
->lastSentFeeFilter
/ 3)) {
6996 pto
->nextSendTimeFeeFilter
= timeNow
+ GetRandInt(MAX_FEEFILTER_CHANGE_DELAY
) * 1000000;
7003 std::string
CBlockFileInfo::ToString() const {
7004 return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks
, nSize
, nHeightFirst
, nHeightLast
, DateTimeStrFormat("%Y-%m-%d", nTimeFirst
), DateTimeStrFormat("%Y-%m-%d", nTimeLast
));
7007 ThresholdState
VersionBitsTipState(const Consensus::Params
& params
, Consensus::DeploymentPos pos
)
7010 return VersionBitsState(chainActive
.Tip(), params
, pos
, versionbitscache
);
7013 int VersionBitsTipStateSinceHeight(const Consensus::Params
& params
, Consensus::DeploymentPos pos
)
7016 return VersionBitsStateSinceHeight(chainActive
.Tip(), params
, pos
, versionbitscache
);
7019 static const uint64_t MEMPOOL_DUMP_VERSION
= 1;
7021 bool LoadMempool(void)
7023 int64_t nExpiryTimeout
= GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60;
7024 FILE* filestr
= fopen((GetDataDir() / "mempool.dat").string().c_str(), "r");
7025 CAutoFile
file(filestr
, SER_DISK
, CLIENT_VERSION
);
7026 if (file
.IsNull()) {
7027 LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
7032 int64_t skipped
= 0;
7034 int64_t nNow
= GetTime();
7039 if (version
!= MEMPOOL_DUMP_VERSION
) {
7044 double prioritydummy
= 0;
7053 CAmount amountdelta
= nFeeDelta
;
7055 mempool
.PrioritiseTransaction(tx
.GetHash(), tx
.GetHash().ToString(), prioritydummy
, amountdelta
);
7057 CValidationState state
;
7058 if (nTime
+ nExpiryTimeout
> nNow
) {
7060 AcceptToMemoryPoolWithTime(mempool
, state
, tx
, true, NULL
, nTime
);
7061 if (state
.IsValid()) {
7070 std::map
<uint256
, CAmount
> mapDeltas
;
7073 for (const auto& i
: mapDeltas
) {
7074 mempool
.PrioritiseTransaction(i
.first
, i
.first
.ToString(), prioritydummy
, i
.second
);
7076 } catch (const std::exception
& e
) {
7077 LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e
.what());
7081 LogPrintf("Imported mempool transactions from disk: %i successes, %i failed, %i expired\n", count
, failed
, skipped
);
7085 void DumpMempool(void)
7087 int64_t start
= GetTimeMicros();
7089 std::map
<uint256
, CAmount
> mapDeltas
;
7090 std::vector
<TxMempoolInfo
> vinfo
;
7094 for (const auto &i
: mempool
.mapDeltas
) {
7095 mapDeltas
[i
.first
] = i
.second
.first
;
7097 vinfo
= mempool
.infoAll();
7100 int64_t mid
= GetTimeMicros();
7103 FILE* filestr
= fopen((GetDataDir() / "mempool.dat.new").string().c_str(), "w");
7108 CAutoFile
file(filestr
, SER_DISK
, CLIENT_VERSION
);
7110 uint64_t version
= MEMPOOL_DUMP_VERSION
;
7113 file
<< (uint64_t)vinfo
.size();
7114 for (const auto& i
: vinfo
) {
7116 file
<< (int64_t)i
.nTime
;
7117 file
<< (int64_t)i
.nFeeDelta
;
7118 mapDeltas
.erase(i
.tx
->GetHash());
7122 FileCommit(file
.Get());
7124 RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat");
7125 int64_t last
= GetTimeMicros();
7126 LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid
-start
)*0.000001, (last
-mid
)*0.000001);
7127 } catch (const std::exception
& e
) {
7128 LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e
.what());
7138 BlockMap::iterator it1
= mapBlockIndex
.begin();
7139 for (; it1
!= mapBlockIndex
.end(); it1
++)
7140 delete (*it1
).second
;
7141 mapBlockIndex
.clear();
7143 // orphan transactions
7144 mapOrphanTransactions
.clear();
7145 mapOrphanTransactionsByPrev
.clear();
7147 } instance_of_cmaincleanup
;