1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2016 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
9 #include "arith_uint256.h"
10 #include "blockencodings.h"
11 #include "chainparams.h"
12 #include "checkpoints.h"
13 #include "checkqueue.h"
14 #include "consensus/consensus.h"
15 #include "consensus/merkle.h"
16 #include "consensus/validation.h"
19 #include "merkleblock.h"
21 #include "policy/fees.h"
22 #include "policy/policy.h"
24 #include "primitives/block.h"
25 #include "primitives/transaction.h"
27 #include "script/script.h"
28 #include "script/sigcache.h"
29 #include "script/standard.h"
30 #include "tinyformat.h"
32 #include "txmempool.h"
33 #include "ui_interface.h"
36 #include "utilmoneystr.h"
37 #include "utilstrencodings.h"
38 #include "validationinterface.h"
39 #include "versionbits.h"
44 #include <boost/algorithm/string/replace.hpp>
45 #include <boost/algorithm/string/join.hpp>
46 #include <boost/filesystem.hpp>
47 #include <boost/filesystem/fstream.hpp>
48 #include <boost/math/distributions/poisson.hpp>
49 #include <boost/thread.hpp>
54 # error "Bitcoin cannot be compiled without assertions."
61 CCriticalSection cs_main
;
63 BlockMap mapBlockIndex
;
65 CBlockIndex
*pindexBestHeader
= NULL
;
66 int64_t nTimeBestReceived
= 0;
67 CWaitableCriticalSection csBestBlock
;
68 CConditionVariable cvBlockChange
;
69 int nScriptCheckThreads
= 0;
70 bool fImporting
= false;
71 bool fReindex
= false;
72 bool fTxIndex
= false;
73 bool fHavePruned
= false;
74 bool fPruneMode
= false;
75 bool fIsBareMultisigStd
= DEFAULT_PERMIT_BAREMULTISIG
;
76 bool fRequireStandard
= true;
77 bool fCheckBlockIndex
= false;
78 bool fCheckpointsEnabled
= DEFAULT_CHECKPOINTS_ENABLED
;
79 size_t nCoinCacheUsage
= 5000 * 300;
80 uint64_t nPruneTarget
= 0;
81 int64_t nMaxTipAge
= DEFAULT_MAX_TIP_AGE
;
82 bool fEnableReplacement
= DEFAULT_ENABLE_REPLACEMENT
;
85 CFeeRate minRelayTxFee
= CFeeRate(DEFAULT_MIN_RELAY_TX_FEE
);
86 CAmount maxTxFee
= DEFAULT_TRANSACTION_MAXFEE
;
88 CTxMemPool
mempool(::minRelayTxFee
);
89 FeeFilterRounder
filterRounder(::minRelayTxFee
);
91 struct IteratorComparator
94 bool operator()(const I
& a
, const I
& b
)
105 map
<uint256
, COrphanTx
> mapOrphanTransactions
GUARDED_BY(cs_main
);
106 map
<COutPoint
, set
<map
<uint256
, COrphanTx
>::iterator
, IteratorComparator
>> mapOrphanTransactionsByPrev
GUARDED_BY(cs_main
);
107 void EraseOrphansFor(NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
);
109 static void CheckBlockIndex(const Consensus::Params
& consensusParams
);
111 /** Constant stuff for coinbase transactions we create: */
112 CScript COINBASE_FLAGS
;
114 const string strMessageMagic
= "Bitcoin Signed Message:\n";
116 static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY
= 0x3cac0035b5866b90ULL
; // SHA256("main address relay")[0:8]
121 struct CBlockIndexWorkComparator
123 bool operator()(CBlockIndex
*pa
, CBlockIndex
*pb
) const {
124 // First sort by most total work, ...
125 if (pa
->nChainWork
> pb
->nChainWork
) return false;
126 if (pa
->nChainWork
< pb
->nChainWork
) return true;
128 // ... then by earliest time received, ...
129 if (pa
->nSequenceId
< pb
->nSequenceId
) return false;
130 if (pa
->nSequenceId
> pb
->nSequenceId
) return true;
132 // Use pointer address as tie breaker (should only happen with blocks
133 // loaded from disk, as those all have id 0).
134 if (pa
< pb
) return false;
135 if (pa
> pb
) return true;
142 CBlockIndex
*pindexBestInvalid
;
145 * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
146 * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
147 * missing the data for the block.
149 set
<CBlockIndex
*, CBlockIndexWorkComparator
> setBlockIndexCandidates
;
150 /** Number of nodes with fSyncStarted. */
151 int nSyncStarted
= 0;
152 /** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
153 * Pruned nodes may have entries where B is missing data.
155 multimap
<CBlockIndex
*, CBlockIndex
*> mapBlocksUnlinked
;
157 CCriticalSection cs_LastBlockFile
;
158 std::vector
<CBlockFileInfo
> vinfoBlockFile
;
159 int nLastBlockFile
= 0;
160 /** Global flag to indicate we should check to see if there are
161 * block/undo files that should be deleted. Set on startup
162 * or if we allocate more file space when we're in prune mode
164 bool fCheckForPruning
= false;
167 * Every received block is assigned a unique and increasing identifier, so we
168 * know which one to give priority in case of a fork.
170 CCriticalSection cs_nBlockSequenceId
;
171 /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
172 int32_t nBlockSequenceId
= 1;
173 /** Decreasing counter (used by subsequent preciousblock calls). */
174 int32_t nBlockReverseSequenceId
= -1;
175 /** chainwork for the last block that preciousblock has been applied to. */
176 arith_uint256 nLastPreciousChainwork
= 0;
179 * Sources of received blocks, saved to be able to send them reject
180 * messages or ban them when processing happens afterwards. Protected by
183 map
<uint256
, NodeId
> mapBlockSource
;
186 * Filter for transactions that were recently rejected by
187 * AcceptToMemoryPool. These are not rerequested until the chain tip
188 * changes, at which point the entire filter is reset. Protected by
191 * Without this filter we'd be re-requesting txs from each of our peers,
192 * increasing bandwidth consumption considerably. For instance, with 100
193 * peers, half of which relay a tx we don't accept, that might be a 50x
194 * bandwidth increase. A flooding attacker attempting to roll-over the
195 * filter using minimum-sized, 60byte, transactions might manage to send
196 * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
197 * two minute window to send invs to us.
199 * Decreasing the false positive rate is fairly cheap, so we pick one in a
200 * million to make it highly unlikely for users to have issues with this
203 * Memory used: 1.3 MB
205 std::unique_ptr
<CRollingBloomFilter
> recentRejects
;
206 uint256 hashRecentRejectsChainTip
;
208 /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
211 CBlockIndex
* pindex
; //!< Optional.
212 bool fValidatedHeaders
; //!< Whether this block has validated headers at the time of request.
213 std::unique_ptr
<PartiallyDownloadedBlock
> partialBlock
; //!< Optional, used for CMPCTBLOCK downloads
215 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> > mapBlocksInFlight
;
217 /** Stack of nodes which we have set to announce using compact blocks */
218 list
<NodeId
> lNodesAnnouncingHeaderAndIDs
;
220 /** Number of preferable block download peers. */
221 int nPreferredDownload
= 0;
223 /** Dirty block index entries. */
224 set
<CBlockIndex
*> setDirtyBlockIndex
;
226 /** Dirty block file entries. */
227 set
<int> setDirtyFileInfo
;
229 /** Number of peers from which we're downloading blocks. */
230 int nPeersWithValidatedDownloads
= 0;
232 /** Relay map, protected by cs_main. */
233 typedef std::map
<uint256
, std::shared_ptr
<const CTransaction
>> MapRelay
;
235 /** Expiration-time ordered list of (expire time, relay map entry) pairs, protected by cs_main). */
236 std::deque
<std::pair
<int64_t, MapRelay::iterator
>> vRelayExpiration
;
239 //////////////////////////////////////////////////////////////////////////////
241 // Registration of network node signals.
246 struct CBlockReject
{
247 unsigned char chRejectCode
;
248 string strRejectReason
;
253 * Maintain validation-specific state about nodes, protected by cs_main, instead
254 * by CNode's own locks. This simplifies asynchronous operation, where
255 * processing of incoming data is done after the ProcessMessage call returns,
256 * and we're no longer holding the node's locks.
259 //! The peer's address
261 //! Whether we have a fully established connection.
262 bool fCurrentlyConnected
;
263 //! Accumulated misbehaviour score for this peer.
265 //! Whether this peer should be disconnected and banned (unless whitelisted).
267 //! String name of this peer (debugging/logging purposes).
269 //! List of asynchronously-determined block rejections to notify this peer about.
270 std::vector
<CBlockReject
> rejects
;
271 //! The best known block we know this peer has announced.
272 CBlockIndex
*pindexBestKnownBlock
;
273 //! The hash of the last unknown block this peer has announced.
274 uint256 hashLastUnknownBlock
;
275 //! The last full block we both have.
276 CBlockIndex
*pindexLastCommonBlock
;
277 //! The best header we have sent our peer.
278 CBlockIndex
*pindexBestHeaderSent
;
279 //! Length of current-streak of unconnecting headers announcements
280 int nUnconnectingHeaders
;
281 //! Whether we've started headers synchronization with this peer.
283 //! Since when we're stalling block download progress (in microseconds), or 0.
284 int64_t nStallingSince
;
285 list
<QueuedBlock
> vBlocksInFlight
;
286 //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
287 int64_t nDownloadingSince
;
289 int nBlocksInFlightValidHeaders
;
290 //! Whether we consider this a preferred download peer.
291 bool fPreferredDownload
;
292 //! Whether this peer wants invs or headers (when possible) for block announcements.
294 //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
295 bool fPreferHeaderAndIDs
;
297 * Whether this peer will send us cmpctblocks if we request them.
298 * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion,
299 * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send.
301 bool fProvidesHeaderAndIDs
;
302 //! Whether this peer can give us witnesses
304 //! Whether this peer wants witnesses in cmpctblocks/blocktxns
305 bool fWantsCmpctWitness
;
307 * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns,
308 * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns.
310 bool fSupportsDesiredCmpctVersion
;
313 fCurrentlyConnected
= false;
316 pindexBestKnownBlock
= NULL
;
317 hashLastUnknownBlock
.SetNull();
318 pindexLastCommonBlock
= NULL
;
319 pindexBestHeaderSent
= NULL
;
320 nUnconnectingHeaders
= 0;
321 fSyncStarted
= false;
323 nDownloadingSince
= 0;
325 nBlocksInFlightValidHeaders
= 0;
326 fPreferredDownload
= false;
327 fPreferHeaders
= false;
328 fPreferHeaderAndIDs
= false;
329 fProvidesHeaderAndIDs
= false;
330 fHaveWitness
= false;
331 fWantsCmpctWitness
= false;
332 fSupportsDesiredCmpctVersion
= false;
336 /** Map maintaining per-node state. Requires cs_main. */
337 map
<NodeId
, CNodeState
> mapNodeState
;
340 CNodeState
*State(NodeId pnode
) {
341 map
<NodeId
, CNodeState
>::iterator it
= mapNodeState
.find(pnode
);
342 if (it
== mapNodeState
.end())
347 void UpdatePreferredDownload(CNode
* node
, CNodeState
* state
)
349 nPreferredDownload
-= state
->fPreferredDownload
;
351 // Whether this node should be marked as a preferred download node.
352 state
->fPreferredDownload
= (!node
->fInbound
|| node
->fWhitelisted
) && !node
->fOneShot
&& !node
->fClient
;
354 nPreferredDownload
+= state
->fPreferredDownload
;
357 void InitializeNode(NodeId nodeid
, const CNode
*pnode
) {
359 CNodeState
&state
= mapNodeState
.insert(std::make_pair(nodeid
, CNodeState())).first
->second
;
360 state
.name
= pnode
->addrName
;
361 state
.address
= pnode
->addr
;
364 void FinalizeNode(NodeId nodeid
, bool& fUpdateConnectionTime
) {
365 fUpdateConnectionTime
= false;
367 CNodeState
*state
= State(nodeid
);
369 if (state
->fSyncStarted
)
372 if (state
->nMisbehavior
== 0 && state
->fCurrentlyConnected
) {
373 fUpdateConnectionTime
= true;
376 BOOST_FOREACH(const QueuedBlock
& entry
, state
->vBlocksInFlight
) {
377 mapBlocksInFlight
.erase(entry
.hash
);
379 EraseOrphansFor(nodeid
);
380 nPreferredDownload
-= state
->fPreferredDownload
;
381 nPeersWithValidatedDownloads
-= (state
->nBlocksInFlightValidHeaders
!= 0);
382 assert(nPeersWithValidatedDownloads
>= 0);
384 mapNodeState
.erase(nodeid
);
386 if (mapNodeState
.empty()) {
387 // Do a consistency check after the last peer is removed.
388 assert(mapBlocksInFlight
.empty());
389 assert(nPreferredDownload
== 0);
390 assert(nPeersWithValidatedDownloads
== 0);
395 // Returns a bool indicating whether we requested this block.
396 // Also used if a block was /not/ received and timed out or started with another peer
397 bool MarkBlockAsReceived(const uint256
& hash
) {
398 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator itInFlight
= mapBlocksInFlight
.find(hash
);
399 if (itInFlight
!= mapBlocksInFlight
.end()) {
400 CNodeState
*state
= State(itInFlight
->second
.first
);
401 state
->nBlocksInFlightValidHeaders
-= itInFlight
->second
.second
->fValidatedHeaders
;
402 if (state
->nBlocksInFlightValidHeaders
== 0 && itInFlight
->second
.second
->fValidatedHeaders
) {
403 // Last validated block on the queue was received.
404 nPeersWithValidatedDownloads
--;
406 if (state
->vBlocksInFlight
.begin() == itInFlight
->second
.second
) {
407 // First block on the queue was received, update the start download time for the next one
408 state
->nDownloadingSince
= std::max(state
->nDownloadingSince
, GetTimeMicros());
410 state
->vBlocksInFlight
.erase(itInFlight
->second
.second
);
411 state
->nBlocksInFlight
--;
412 state
->nStallingSince
= 0;
413 mapBlocksInFlight
.erase(itInFlight
);
420 // returns false, still setting pit, if the block was already in flight from the same peer
421 // pit will only be valid as long as the same cs_main lock is being held
422 bool MarkBlockAsInFlight(NodeId nodeid
, const uint256
& hash
, const Consensus::Params
& consensusParams
, CBlockIndex
*pindex
= NULL
, list
<QueuedBlock
>::iterator
**pit
= NULL
) {
423 CNodeState
*state
= State(nodeid
);
424 assert(state
!= NULL
);
426 // Short-circuit most stuff in case its from the same node
427 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator itInFlight
= mapBlocksInFlight
.find(hash
);
428 if (itInFlight
!= mapBlocksInFlight
.end() && itInFlight
->second
.first
== nodeid
) {
429 *pit
= &itInFlight
->second
.second
;
433 // Make sure it's not listed somewhere already.
434 MarkBlockAsReceived(hash
);
436 list
<QueuedBlock
>::iterator it
= state
->vBlocksInFlight
.insert(state
->vBlocksInFlight
.end(),
437 {hash
, pindex
, pindex
!= NULL
, std::unique_ptr
<PartiallyDownloadedBlock
>(pit
? new PartiallyDownloadedBlock(&mempool
) : NULL
)});
438 state
->nBlocksInFlight
++;
439 state
->nBlocksInFlightValidHeaders
+= it
->fValidatedHeaders
;
440 if (state
->nBlocksInFlight
== 1) {
441 // We're starting a block download (batch) from this peer.
442 state
->nDownloadingSince
= GetTimeMicros();
444 if (state
->nBlocksInFlightValidHeaders
== 1 && pindex
!= NULL
) {
445 nPeersWithValidatedDownloads
++;
447 itInFlight
= mapBlocksInFlight
.insert(std::make_pair(hash
, std::make_pair(nodeid
, it
))).first
;
449 *pit
= &itInFlight
->second
.second
;
453 /** Check whether the last unknown block a peer advertised is not yet known. */
454 void ProcessBlockAvailability(NodeId nodeid
) {
455 CNodeState
*state
= State(nodeid
);
456 assert(state
!= NULL
);
458 if (!state
->hashLastUnknownBlock
.IsNull()) {
459 BlockMap::iterator itOld
= mapBlockIndex
.find(state
->hashLastUnknownBlock
);
460 if (itOld
!= mapBlockIndex
.end() && itOld
->second
->nChainWork
> 0) {
461 if (state
->pindexBestKnownBlock
== NULL
|| itOld
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
462 state
->pindexBestKnownBlock
= itOld
->second
;
463 state
->hashLastUnknownBlock
.SetNull();
468 /** Update tracking information about which blocks a peer is assumed to have. */
469 void UpdateBlockAvailability(NodeId nodeid
, const uint256
&hash
) {
470 CNodeState
*state
= State(nodeid
);
471 assert(state
!= NULL
);
473 ProcessBlockAvailability(nodeid
);
475 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
476 if (it
!= mapBlockIndex
.end() && it
->second
->nChainWork
> 0) {
477 // An actually better block was announced.
478 if (state
->pindexBestKnownBlock
== NULL
|| it
->second
->nChainWork
>= state
->pindexBestKnownBlock
->nChainWork
)
479 state
->pindexBestKnownBlock
= it
->second
;
481 // An unknown block was announced; just assume that the latest one is the best one.
482 state
->hashLastUnknownBlock
= hash
;
486 void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState
* nodestate
, CNode
* pfrom
, CConnman
& connman
) {
487 if (!nodestate
->fSupportsDesiredCmpctVersion
) {
488 // Never ask from peers who can't provide witnesses.
491 if (nodestate
->fProvidesHeaderAndIDs
) {
492 for (std::list
<NodeId
>::iterator it
= lNodesAnnouncingHeaderAndIDs
.begin(); it
!= lNodesAnnouncingHeaderAndIDs
.end(); it
++) {
493 if (*it
== pfrom
->GetId()) {
494 lNodesAnnouncingHeaderAndIDs
.erase(it
);
495 lNodesAnnouncingHeaderAndIDs
.push_back(pfrom
->GetId());
499 bool fAnnounceUsingCMPCTBLOCK
= false;
500 uint64_t nCMPCTBLOCKVersion
= (pfrom
->GetLocalServices() & NODE_WITNESS
) ? 2 : 1;
501 if (lNodesAnnouncingHeaderAndIDs
.size() >= 3) {
502 // As per BIP152, we only get 3 of our peers to announce
503 // blocks using compact encodings.
504 bool found
= connman
.ForNode(lNodesAnnouncingHeaderAndIDs
.front(), [fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
](CNode
* pnodeStop
){
505 pnodeStop
->PushMessage(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
);
509 lNodesAnnouncingHeaderAndIDs
.pop_front();
511 fAnnounceUsingCMPCTBLOCK
= true;
512 pfrom
->PushMessage(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
);
513 lNodesAnnouncingHeaderAndIDs
.push_back(pfrom
->GetId());
518 bool CanDirectFetch(const Consensus::Params
&consensusParams
)
520 return chainActive
.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams
.nPowTargetSpacing
* 20;
524 bool PeerHasHeader(CNodeState
*state
, CBlockIndex
*pindex
)
526 if (state
->pindexBestKnownBlock
&& pindex
== state
->pindexBestKnownBlock
->GetAncestor(pindex
->nHeight
))
528 if (state
->pindexBestHeaderSent
&& pindex
== state
->pindexBestHeaderSent
->GetAncestor(pindex
->nHeight
))
533 /** Find the last common ancestor two blocks have.
534 * Both pa and pb must be non-NULL. */
535 CBlockIndex
* LastCommonAncestor(CBlockIndex
* pa
, CBlockIndex
* pb
) {
536 if (pa
->nHeight
> pb
->nHeight
) {
537 pa
= pa
->GetAncestor(pb
->nHeight
);
538 } else if (pb
->nHeight
> pa
->nHeight
) {
539 pb
= pb
->GetAncestor(pa
->nHeight
);
542 while (pa
!= pb
&& pa
&& pb
) {
547 // Eventually all chain branches meet at the genesis block.
552 /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
553 * at most count entries. */
554 void FindNextBlocksToDownload(NodeId nodeid
, unsigned int count
, std::vector
<CBlockIndex
*>& vBlocks
, NodeId
& nodeStaller
, const Consensus::Params
& consensusParams
) {
558 vBlocks
.reserve(vBlocks
.size() + count
);
559 CNodeState
*state
= State(nodeid
);
560 assert(state
!= NULL
);
562 // Make sure pindexBestKnownBlock is up to date, we'll need it.
563 ProcessBlockAvailability(nodeid
);
565 if (state
->pindexBestKnownBlock
== NULL
|| state
->pindexBestKnownBlock
->nChainWork
< chainActive
.Tip()->nChainWork
) {
566 // This peer has nothing interesting.
570 if (state
->pindexLastCommonBlock
== NULL
) {
571 // Bootstrap quickly by guessing a parent of our best tip is the forking point.
572 // Guessing wrong in either direction is not a problem.
573 state
->pindexLastCommonBlock
= chainActive
[std::min(state
->pindexBestKnownBlock
->nHeight
, chainActive
.Height())];
576 // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
577 // of its current tip anymore. Go back enough to fix that.
578 state
->pindexLastCommonBlock
= LastCommonAncestor(state
->pindexLastCommonBlock
, state
->pindexBestKnownBlock
);
579 if (state
->pindexLastCommonBlock
== state
->pindexBestKnownBlock
)
582 std::vector
<CBlockIndex
*> vToFetch
;
583 CBlockIndex
*pindexWalk
= state
->pindexLastCommonBlock
;
584 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
585 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
586 // download that next block if the window were 1 larger.
587 int nWindowEnd
= state
->pindexLastCommonBlock
->nHeight
+ BLOCK_DOWNLOAD_WINDOW
;
588 int nMaxHeight
= std::min
<int>(state
->pindexBestKnownBlock
->nHeight
, nWindowEnd
+ 1);
589 NodeId waitingfor
= -1;
590 while (pindexWalk
->nHeight
< nMaxHeight
) {
591 // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
592 // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
593 // as iterating over ~100 CBlockIndex* entries anyway.
594 int nToFetch
= std::min(nMaxHeight
- pindexWalk
->nHeight
, std::max
<int>(count
- vBlocks
.size(), 128));
595 vToFetch
.resize(nToFetch
);
596 pindexWalk
= state
->pindexBestKnownBlock
->GetAncestor(pindexWalk
->nHeight
+ nToFetch
);
597 vToFetch
[nToFetch
- 1] = pindexWalk
;
598 for (unsigned int i
= nToFetch
- 1; i
> 0; i
--) {
599 vToFetch
[i
- 1] = vToFetch
[i
]->pprev
;
602 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
603 // are not yet downloaded and not in flight to vBlocks. In the mean time, update
604 // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
605 // already part of our chain (and therefore don't need it even if pruned).
606 BOOST_FOREACH(CBlockIndex
* pindex
, vToFetch
) {
607 if (!pindex
->IsValid(BLOCK_VALID_TREE
)) {
608 // We consider the chain that this peer is on invalid.
611 if (!State(nodeid
)->fHaveWitness
&& IsWitnessEnabled(pindex
->pprev
, consensusParams
)) {
612 // We wouldn't download this block or its descendants from this peer.
615 if (pindex
->nStatus
& BLOCK_HAVE_DATA
|| chainActive
.Contains(pindex
)) {
616 if (pindex
->nChainTx
)
617 state
->pindexLastCommonBlock
= pindex
;
618 } else if (mapBlocksInFlight
.count(pindex
->GetBlockHash()) == 0) {
619 // The block is not already downloaded, and not yet in flight.
620 if (pindex
->nHeight
> nWindowEnd
) {
621 // We reached the end of the window.
622 if (vBlocks
.size() == 0 && waitingfor
!= nodeid
) {
623 // We aren't able to fetch anything, but we would be if the download window was one larger.
624 nodeStaller
= waitingfor
;
628 vBlocks
.push_back(pindex
);
629 if (vBlocks
.size() == count
) {
632 } else if (waitingfor
== -1) {
633 // This is the first already-in-flight block.
634 waitingfor
= mapBlocksInFlight
[pindex
->GetBlockHash()].first
;
642 bool GetNodeStateStats(NodeId nodeid
, CNodeStateStats
&stats
) {
644 CNodeState
*state
= State(nodeid
);
647 stats
.nMisbehavior
= state
->nMisbehavior
;
648 stats
.nSyncHeight
= state
->pindexBestKnownBlock
? state
->pindexBestKnownBlock
->nHeight
: -1;
649 stats
.nCommonHeight
= state
->pindexLastCommonBlock
? state
->pindexLastCommonBlock
->nHeight
: -1;
650 BOOST_FOREACH(const QueuedBlock
& queue
, state
->vBlocksInFlight
) {
652 stats
.vHeightInFlight
.push_back(queue
.pindex
->nHeight
);
657 void RegisterNodeSignals(CNodeSignals
& nodeSignals
)
659 nodeSignals
.ProcessMessages
.connect(&ProcessMessages
);
660 nodeSignals
.SendMessages
.connect(&SendMessages
);
661 nodeSignals
.InitializeNode
.connect(&InitializeNode
);
662 nodeSignals
.FinalizeNode
.connect(&FinalizeNode
);
665 void UnregisterNodeSignals(CNodeSignals
& nodeSignals
)
667 nodeSignals
.ProcessMessages
.disconnect(&ProcessMessages
);
668 nodeSignals
.SendMessages
.disconnect(&SendMessages
);
669 nodeSignals
.InitializeNode
.disconnect(&InitializeNode
);
670 nodeSignals
.FinalizeNode
.disconnect(&FinalizeNode
);
673 CBlockIndex
* FindForkInGlobalIndex(const CChain
& chain
, const CBlockLocator
& locator
)
675 // Find the first block the caller has in the main chain
676 BOOST_FOREACH(const uint256
& hash
, locator
.vHave
) {
677 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
678 if (mi
!= mapBlockIndex
.end())
680 CBlockIndex
* pindex
= (*mi
).second
;
681 if (chain
.Contains(pindex
))
683 if (pindex
->GetAncestor(chain
.Height()) == chain
.Tip()) {
688 return chain
.Genesis();
691 CCoinsViewCache
*pcoinsTip
= NULL
;
692 CBlockTreeDB
*pblocktree
= NULL
;
694 //////////////////////////////////////////////////////////////////////////////
696 // mapOrphanTransactions
699 bool AddOrphanTx(const CTransaction
& tx
, NodeId peer
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
701 uint256 hash
= tx
.GetHash();
702 if (mapOrphanTransactions
.count(hash
))
705 // Ignore big transactions, to avoid a
706 // send-big-orphans memory exhaustion attack. If a peer has a legitimate
707 // large transaction with a missing parent then we assume
708 // it will rebroadcast it later, after the parent transaction(s)
709 // have been mined or received.
710 // 100 orphans, each of which is at most 99,999 bytes big is
711 // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
712 unsigned int sz
= GetTransactionWeight(tx
);
713 if (sz
>= MAX_STANDARD_TX_WEIGHT
)
715 LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz
, hash
.ToString());
719 auto ret
= mapOrphanTransactions
.emplace(hash
, COrphanTx
{tx
, peer
, GetTime() + ORPHAN_TX_EXPIRE_TIME
});
721 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
) {
722 mapOrphanTransactionsByPrev
[txin
.prevout
].insert(ret
.first
);
725 LogPrint("mempool", "stored orphan tx %s (mapsz %u outsz %u)\n", hash
.ToString(),
726 mapOrphanTransactions
.size(), mapOrphanTransactionsByPrev
.size());
730 int static EraseOrphanTx(uint256 hash
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
732 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.find(hash
);
733 if (it
== mapOrphanTransactions
.end())
735 BOOST_FOREACH(const CTxIn
& txin
, it
->second
.tx
.vin
)
737 auto itPrev
= mapOrphanTransactionsByPrev
.find(txin
.prevout
);
738 if (itPrev
== mapOrphanTransactionsByPrev
.end())
740 itPrev
->second
.erase(it
);
741 if (itPrev
->second
.empty())
742 mapOrphanTransactionsByPrev
.erase(itPrev
);
744 mapOrphanTransactions
.erase(it
);
748 void EraseOrphansFor(NodeId peer
)
751 map
<uint256
, COrphanTx
>::iterator iter
= mapOrphanTransactions
.begin();
752 while (iter
!= mapOrphanTransactions
.end())
754 map
<uint256
, COrphanTx
>::iterator maybeErase
= iter
++; // increment to avoid iterator becoming invalid
755 if (maybeErase
->second
.fromPeer
== peer
)
757 nErased
+= EraseOrphanTx(maybeErase
->second
.tx
.GetHash());
760 if (nErased
> 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased
, peer
);
764 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
766 unsigned int nEvicted
= 0;
767 static int64_t nNextSweep
;
768 int64_t nNow
= GetTime();
769 if (nNextSweep
<= nNow
) {
770 // Sweep out expired orphan pool entries:
772 int64_t nMinExpTime
= nNow
+ ORPHAN_TX_EXPIRE_TIME
- ORPHAN_TX_EXPIRE_INTERVAL
;
773 map
<uint256
, COrphanTx
>::iterator iter
= mapOrphanTransactions
.begin();
774 while (iter
!= mapOrphanTransactions
.end())
776 map
<uint256
, COrphanTx
>::iterator maybeErase
= iter
++;
777 if (maybeErase
->second
.nTimeExpire
<= nNow
) {
778 nErased
+= EraseOrphanTx(maybeErase
->second
.tx
.GetHash());
780 nMinExpTime
= std::min(maybeErase
->second
.nTimeExpire
, nMinExpTime
);
783 // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
784 nNextSweep
= nMinExpTime
+ ORPHAN_TX_EXPIRE_INTERVAL
;
785 if (nErased
> 0) LogPrint("mempool", "Erased %d orphan tx due to expiration\n", nErased
);
787 while (mapOrphanTransactions
.size() > nMaxOrphans
)
789 // Evict a random orphan:
790 uint256 randomhash
= GetRandHash();
791 map
<uint256
, COrphanTx
>::iterator it
= mapOrphanTransactions
.lower_bound(randomhash
);
792 if (it
== mapOrphanTransactions
.end())
793 it
= mapOrphanTransactions
.begin();
794 EraseOrphanTx(it
->first
);
800 bool IsFinalTx(const CTransaction
&tx
, int nBlockHeight
, int64_t nBlockTime
)
802 if (tx
.nLockTime
== 0)
804 if ((int64_t)tx
.nLockTime
< ((int64_t)tx
.nLockTime
< LOCKTIME_THRESHOLD
? (int64_t)nBlockHeight
: nBlockTime
))
806 for (const auto& txin
: tx
.vin
) {
807 if (!(txin
.nSequence
== CTxIn::SEQUENCE_FINAL
))
813 bool CheckFinalTx(const CTransaction
&tx
, int flags
)
815 AssertLockHeld(cs_main
);
817 // By convention a negative value for flags indicates that the
818 // current network-enforced consensus rules should be used. In
819 // a future soft-fork scenario that would mean checking which
820 // rules would be enforced for the next block and setting the
821 // appropriate flags. At the present time no soft-forks are
822 // scheduled, so no flags are set.
823 flags
= std::max(flags
, 0);
825 // CheckFinalTx() uses chainActive.Height()+1 to evaluate
826 // nLockTime because when IsFinalTx() is called within
827 // CBlock::AcceptBlock(), the height of the block *being*
828 // evaluated is what is used. Thus if we want to know if a
829 // transaction can be part of the *next* block, we need to call
830 // IsFinalTx() with one more than chainActive.Height().
831 const int nBlockHeight
= chainActive
.Height() + 1;
833 // BIP113 will require that time-locked transactions have nLockTime set to
834 // less than the median time of the previous block they're contained in.
835 // When the next block is created its previous block will be the current
836 // chain tip, so we use that to calculate the median time passed to
837 // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
838 const int64_t nBlockTime
= (flags
& LOCKTIME_MEDIAN_TIME_PAST
)
839 ? chainActive
.Tip()->GetMedianTimePast()
842 return IsFinalTx(tx
, nBlockHeight
, nBlockTime
);
846 * Calculates the block height and previous block's median time past at
847 * which the transaction will be considered final in the context of BIP 68.
848 * Also removes from the vector of input heights any entries which did not
849 * correspond to sequence locked inputs as they do not affect the calculation.
851 static std::pair
<int, int64_t> CalculateSequenceLocks(const CTransaction
&tx
, int flags
, std::vector
<int>* prevHeights
, const CBlockIndex
& block
)
853 assert(prevHeights
->size() == tx
.vin
.size());
855 // Will be set to the equivalent height- and time-based nLockTime
856 // values that would be necessary to satisfy all relative lock-
857 // time constraints given our view of block chain history.
858 // The semantics of nLockTime are the last invalid height/time, so
859 // use -1 to have the effect of any height or time being valid.
861 int64_t nMinTime
= -1;
863 // tx.nVersion is signed integer so requires cast to unsigned otherwise
864 // we would be doing a signed comparison and half the range of nVersion
865 // wouldn't support BIP 68.
866 bool fEnforceBIP68
= static_cast<uint32_t>(tx
.nVersion
) >= 2
867 && flags
& LOCKTIME_VERIFY_SEQUENCE
;
869 // Do not enforce sequence numbers as a relative lock time
870 // unless we have been instructed to
871 if (!fEnforceBIP68
) {
872 return std::make_pair(nMinHeight
, nMinTime
);
875 for (size_t txinIndex
= 0; txinIndex
< tx
.vin
.size(); txinIndex
++) {
876 const CTxIn
& txin
= tx
.vin
[txinIndex
];
878 // Sequence numbers with the most significant bit set are not
879 // treated as relative lock-times, nor are they given any
880 // consensus-enforced meaning at this point.
881 if (txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG
) {
882 // The height of this input is not relevant for sequence locks
883 (*prevHeights
)[txinIndex
] = 0;
887 int nCoinHeight
= (*prevHeights
)[txinIndex
];
889 if (txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG
) {
890 int64_t nCoinTime
= block
.GetAncestor(std::max(nCoinHeight
-1, 0))->GetMedianTimePast();
891 // NOTE: Subtract 1 to maintain nLockTime semantics
892 // BIP 68 relative lock times have the semantics of calculating
893 // the first block or time at which the transaction would be
894 // valid. When calculating the effective block time or height
895 // for the entire transaction, we switch to using the
896 // semantics of nLockTime which is the last invalid block
897 // time or height. Thus we subtract 1 from the calculated
900 // Time-based relative lock-times are measured from the
901 // smallest allowed timestamp of the block containing the
902 // txout being spent, which is the median time past of the
904 nMinTime
= std::max(nMinTime
, nCoinTime
+ (int64_t)((txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_MASK
) << CTxIn::SEQUENCE_LOCKTIME_GRANULARITY
) - 1);
906 nMinHeight
= std::max(nMinHeight
, nCoinHeight
+ (int)(txin
.nSequence
& CTxIn::SEQUENCE_LOCKTIME_MASK
) - 1);
910 return std::make_pair(nMinHeight
, nMinTime
);
913 static bool EvaluateSequenceLocks(const CBlockIndex
& block
, std::pair
<int, int64_t> lockPair
)
916 int64_t nBlockTime
= block
.pprev
->GetMedianTimePast();
917 if (lockPair
.first
>= block
.nHeight
|| lockPair
.second
>= nBlockTime
)
923 bool SequenceLocks(const CTransaction
&tx
, int flags
, std::vector
<int>* prevHeights
, const CBlockIndex
& block
)
925 return EvaluateSequenceLocks(block
, CalculateSequenceLocks(tx
, flags
, prevHeights
, block
));
928 bool TestLockPointValidity(const LockPoints
* lp
)
930 AssertLockHeld(cs_main
);
932 // If there are relative lock times then the maxInputBlock will be set
933 // If there are no relative lock times, the LockPoints don't depend on the chain
934 if (lp
->maxInputBlock
) {
935 // Check whether chainActive is an extension of the block at which the LockPoints
936 // calculation was valid. If not LockPoints are no longer valid
937 if (!chainActive
.Contains(lp
->maxInputBlock
)) {
942 // LockPoints still valid
946 bool CheckSequenceLocks(const CTransaction
&tx
, int flags
, LockPoints
* lp
, bool useExistingLockPoints
)
948 AssertLockHeld(cs_main
);
949 AssertLockHeld(mempool
.cs
);
951 CBlockIndex
* tip
= chainActive
.Tip();
954 // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate
955 // height based locks because when SequenceLocks() is called within
956 // ConnectBlock(), the height of the block *being*
957 // evaluated is what is used.
958 // Thus if we want to know if a transaction can be part of the
959 // *next* block, we need to use one more than chainActive.Height()
960 index
.nHeight
= tip
->nHeight
+ 1;
962 std::pair
<int, int64_t> lockPair
;
963 if (useExistingLockPoints
) {
965 lockPair
.first
= lp
->height
;
966 lockPair
.second
= lp
->time
;
969 // pcoinsTip contains the UTXO set for chainActive.Tip()
970 CCoinsViewMemPool
viewMemPool(pcoinsTip
, mempool
);
971 std::vector
<int> prevheights
;
972 prevheights
.resize(tx
.vin
.size());
973 for (size_t txinIndex
= 0; txinIndex
< tx
.vin
.size(); txinIndex
++) {
974 const CTxIn
& txin
= tx
.vin
[txinIndex
];
976 if (!viewMemPool
.GetCoins(txin
.prevout
.hash
, coins
)) {
977 return error("%s: Missing input", __func__
);
979 if (coins
.nHeight
== MEMPOOL_HEIGHT
) {
980 // Assume all mempool transaction confirm in the next block
981 prevheights
[txinIndex
] = tip
->nHeight
+ 1;
983 prevheights
[txinIndex
] = coins
.nHeight
;
986 lockPair
= CalculateSequenceLocks(tx
, flags
, &prevheights
, index
);
988 lp
->height
= lockPair
.first
;
989 lp
->time
= lockPair
.second
;
990 // Also store the hash of the block with the highest height of
991 // all the blocks which have sequence locked prevouts.
992 // This hash needs to still be on the chain
993 // for these LockPoint calculations to be valid
994 // Note: It is impossible to correctly calculate a maxInputBlock
995 // if any of the sequence locked inputs depend on unconfirmed txs,
996 // except in the special case where the relative lock time/height
997 // is 0, which is equivalent to no sequence lock. Since we assume
998 // input height of tip+1 for mempool txs and test the resulting
999 // lockPair from CalculateSequenceLocks against tip+1. We know
1000 // EvaluateSequenceLocks will fail if there was a non-zero sequence
1001 // lock on a mempool input, so we can use the return value of
1002 // CheckSequenceLocks to indicate the LockPoints validity
1003 int maxInputHeight
= 0;
1004 BOOST_FOREACH(int height
, prevheights
) {
1005 // Can ignore mempool inputs since we'll fail if they had non-zero locks
1006 if (height
!= tip
->nHeight
+1) {
1007 maxInputHeight
= std::max(maxInputHeight
, height
);
1010 lp
->maxInputBlock
= tip
->GetAncestor(maxInputHeight
);
1013 return EvaluateSequenceLocks(index
, lockPair
);
1017 unsigned int GetLegacySigOpCount(const CTransaction
& tx
)
1019 unsigned int nSigOps
= 0;
1020 for (const auto& txin
: tx
.vin
)
1022 nSigOps
+= txin
.scriptSig
.GetSigOpCount(false);
1024 for (const auto& txout
: tx
.vout
)
1026 nSigOps
+= txout
.scriptPubKey
.GetSigOpCount(false);
1031 unsigned int GetP2SHSigOpCount(const CTransaction
& tx
, const CCoinsViewCache
& inputs
)
1033 if (tx
.IsCoinBase())
1036 unsigned int nSigOps
= 0;
1037 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
1039 const CTxOut
&prevout
= inputs
.GetOutputFor(tx
.vin
[i
]);
1040 if (prevout
.scriptPubKey
.IsPayToScriptHash())
1041 nSigOps
+= prevout
.scriptPubKey
.GetSigOpCount(tx
.vin
[i
].scriptSig
);
1046 int64_t GetTransactionSigOpCost(const CTransaction
& tx
, const CCoinsViewCache
& inputs
, int flags
)
1048 int64_t nSigOps
= GetLegacySigOpCount(tx
) * WITNESS_SCALE_FACTOR
;
1050 if (tx
.IsCoinBase())
1053 if (flags
& SCRIPT_VERIFY_P2SH
) {
1054 nSigOps
+= GetP2SHSigOpCount(tx
, inputs
) * WITNESS_SCALE_FACTOR
;
1057 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
1059 const CTxOut
&prevout
= inputs
.GetOutputFor(tx
.vin
[i
]);
1060 nSigOps
+= CountWitnessSigOps(tx
.vin
[i
].scriptSig
, prevout
.scriptPubKey
, i
< tx
.wit
.vtxinwit
.size() ? &tx
.wit
.vtxinwit
[i
].scriptWitness
: NULL
, flags
);
1069 bool CheckTransaction(const CTransaction
& tx
, CValidationState
&state
)
1071 // Basic checks that don't depend on any context
1073 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vin-empty");
1074 if (tx
.vout
.empty())
1075 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-vout-empty");
1076 // Size limits (this doesn't take the witness into account, as that hasn't been checked for malleability)
1077 if (::GetSerializeSize(tx
, SER_NETWORK
, PROTOCOL_VERSION
| SERIALIZE_TRANSACTION_NO_WITNESS
) > MAX_BLOCK_BASE_SIZE
)
1078 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-oversize");
1080 // Check for negative or overflow output values
1081 CAmount nValueOut
= 0;
1082 for (const auto& txout
: tx
.vout
)
1084 if (txout
.nValue
< 0)
1085 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-negative");
1086 if (txout
.nValue
> MAX_MONEY
)
1087 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-vout-toolarge");
1088 nValueOut
+= txout
.nValue
;
1089 if (!MoneyRange(nValueOut
))
1090 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-txouttotal-toolarge");
1093 // Check for duplicate inputs
1094 set
<COutPoint
> vInOutPoints
;
1095 for (const auto& txin
: tx
.vin
)
1097 if (vInOutPoints
.count(txin
.prevout
))
1098 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputs-duplicate");
1099 vInOutPoints
.insert(txin
.prevout
);
1102 if (tx
.IsCoinBase())
1104 if (tx
.vin
[0].scriptSig
.size() < 2 || tx
.vin
[0].scriptSig
.size() > 100)
1105 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-length");
1109 for (const auto& txin
: tx
.vin
)
1110 if (txin
.prevout
.IsNull())
1111 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-prevout-null");
1117 void LimitMempoolSize(CTxMemPool
& pool
, size_t limit
, unsigned long age
) {
1118 int expired
= pool
.Expire(GetTime() - age
);
1120 LogPrint("mempool", "Expired %i transactions from the memory pool\n", expired
);
1122 std::vector
<uint256
> vNoSpendsRemaining
;
1123 pool
.TrimToSize(limit
, &vNoSpendsRemaining
);
1124 BOOST_FOREACH(const uint256
& removed
, vNoSpendsRemaining
)
1125 pcoinsTip
->Uncache(removed
);
1128 /** Convert CValidationState to a human-readable message for logging */
1129 std::string
FormatStateMessage(const CValidationState
&state
)
1131 return strprintf("%s%s (code %i)",
1132 state
.GetRejectReason(),
1133 state
.GetDebugMessage().empty() ? "" : ", "+state
.GetDebugMessage(),
1134 state
.GetRejectCode());
1137 bool AcceptToMemoryPoolWorker(CTxMemPool
& pool
, CValidationState
& state
, const CTransaction
& tx
, bool fLimitFree
,
1138 bool* pfMissingInputs
, int64_t nAcceptTime
, bool fOverrideMempoolLimit
, const CAmount
& nAbsurdFee
,
1139 std::vector
<uint256
>& vHashTxnToUncache
)
1141 const uint256 hash
= tx
.GetHash();
1142 AssertLockHeld(cs_main
);
1143 if (pfMissingInputs
)
1144 *pfMissingInputs
= false;
1146 if (!CheckTransaction(tx
, state
))
1147 return false; // state filled in by CheckTransaction
1149 // Coinbase is only valid in a block, not as a loose transaction
1150 if (tx
.IsCoinBase())
1151 return state
.DoS(100, false, REJECT_INVALID
, "coinbase");
1153 // Don't relay version 2 transactions until CSV is active, and we can be
1154 // sure that such transactions will be mined (unless we're on
1155 // -testnet/-regtest).
1156 const CChainParams
& chainparams
= Params();
1157 if (fRequireStandard
&& tx
.nVersion
>= 2 && VersionBitsTipState(chainparams
.GetConsensus(), Consensus::DEPLOYMENT_CSV
) != THRESHOLD_ACTIVE
) {
1158 return state
.DoS(0, false, REJECT_NONSTANDARD
, "premature-version2-tx");
1161 // Reject transactions with witness before segregated witness activates (override with -prematurewitness)
1162 bool witnessEnabled
= IsWitnessEnabled(chainActive
.Tip(), Params().GetConsensus());
1163 if (!GetBoolArg("-prematurewitness",false) && !tx
.wit
.IsNull() && !witnessEnabled
) {
1164 return state
.DoS(0, false, REJECT_NONSTANDARD
, "no-witness-yet", true);
1167 // Rather not work on nonstandard transactions (unless -testnet/-regtest)
1169 if (fRequireStandard
&& !IsStandardTx(tx
, reason
, witnessEnabled
))
1170 return state
.DoS(0, false, REJECT_NONSTANDARD
, reason
);
1172 // Only accept nLockTime-using transactions that can be mined in the next
1173 // block; we don't want our mempool filled up with transactions that can't
1175 if (!CheckFinalTx(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
))
1176 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-final");
1178 // is it already in the memory pool?
1179 if (pool
.exists(hash
))
1180 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-in-mempool");
1182 // Check for conflicts with in-memory transactions
1183 set
<uint256
> setConflicts
;
1185 LOCK(pool
.cs
); // protect pool.mapNextTx
1186 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
)
1188 auto itConflicting
= pool
.mapNextTx
.find(txin
.prevout
);
1189 if (itConflicting
!= pool
.mapNextTx
.end())
1191 const CTransaction
*ptxConflicting
= itConflicting
->second
;
1192 if (!setConflicts
.count(ptxConflicting
->GetHash()))
1194 // Allow opt-out of transaction replacement by setting
1195 // nSequence >= maxint-1 on all inputs.
1197 // maxint-1 is picked to still allow use of nLockTime by
1198 // non-replaceable transactions. All inputs rather than just one
1199 // is for the sake of multi-party protocols, where we don't
1200 // want a single party to be able to disable replacement.
1202 // The opt-out ignores descendants as anyone relying on
1203 // first-seen mempool behavior should be checking all
1204 // unconfirmed ancestors anyway; doing otherwise is hopelessly
1206 bool fReplacementOptOut
= true;
1207 if (fEnableReplacement
)
1209 BOOST_FOREACH(const CTxIn
&_txin
, ptxConflicting
->vin
)
1211 if (_txin
.nSequence
< std::numeric_limits
<unsigned int>::max()-1)
1213 fReplacementOptOut
= false;
1218 if (fReplacementOptOut
)
1219 return state
.Invalid(false, REJECT_CONFLICT
, "txn-mempool-conflict");
1221 setConflicts
.insert(ptxConflicting
->GetHash());
1229 CCoinsViewCache
view(&dummy
);
1231 CAmount nValueIn
= 0;
1235 CCoinsViewMemPool
viewMemPool(pcoinsTip
, pool
);
1236 view
.SetBackend(viewMemPool
);
1238 // do we already have it?
1239 bool fHadTxInCache
= pcoinsTip
->HaveCoinsInCache(hash
);
1240 if (view
.HaveCoins(hash
)) {
1242 vHashTxnToUncache
.push_back(hash
);
1243 return state
.Invalid(false, REJECT_ALREADY_KNOWN
, "txn-already-known");
1246 // do all inputs exist?
1247 // Note that this does not check for the presence of actual outputs (see the next check for that),
1248 // and only helps with filling in pfMissingInputs (to determine missing vs spent).
1249 BOOST_FOREACH(const CTxIn txin
, tx
.vin
) {
1250 if (!pcoinsTip
->HaveCoinsInCache(txin
.prevout
.hash
))
1251 vHashTxnToUncache
.push_back(txin
.prevout
.hash
);
1252 if (!view
.HaveCoins(txin
.prevout
.hash
)) {
1253 if (pfMissingInputs
)
1254 *pfMissingInputs
= true;
1255 return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
1259 // are the actual inputs available?
1260 if (!view
.HaveInputs(tx
))
1261 return state
.Invalid(false, REJECT_DUPLICATE
, "bad-txns-inputs-spent");
1263 // Bring the best block into scope
1264 view
.GetBestBlock();
1266 nValueIn
= view
.GetValueIn(tx
);
1268 // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
1269 view
.SetBackend(dummy
);
1271 // Only accept BIP68 sequence locked transactions that can be mined in the next
1272 // block; we don't want our mempool filled up with transactions that can't
1274 // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
1275 // CoinsViewCache instead of create its own
1276 if (!CheckSequenceLocks(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
, &lp
))
1277 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-BIP68-final");
1280 // Check for non-standard pay-to-script-hash in inputs
1281 if (fRequireStandard
&& !AreInputsStandard(tx
, view
))
1282 return state
.Invalid(false, REJECT_NONSTANDARD
, "bad-txns-nonstandard-inputs");
1284 // Check for non-standard witness in P2WSH
1285 if (!tx
.wit
.IsNull() && fRequireStandard
&& !IsWitnessStandard(tx
, view
))
1286 return state
.DoS(0, false, REJECT_NONSTANDARD
, "bad-witness-nonstandard", true);
1288 int64_t nSigOpsCost
= GetTransactionSigOpCost(tx
, view
, STANDARD_SCRIPT_VERIFY_FLAGS
);
1290 CAmount nValueOut
= tx
.GetValueOut();
1291 CAmount nFees
= nValueIn
-nValueOut
;
1292 // nModifiedFees includes any fee deltas from PrioritiseTransaction
1293 CAmount nModifiedFees
= nFees
;
1294 double nPriorityDummy
= 0;
1295 pool
.ApplyDeltas(hash
, nPriorityDummy
, nModifiedFees
);
1297 CAmount inChainInputValue
;
1298 double dPriority
= view
.GetPriority(tx
, chainActive
.Height(), inChainInputValue
);
1300 // Keep track of transactions that spend a coinbase, which we re-scan
1301 // during reorgs to ensure COINBASE_MATURITY is still met.
1302 bool fSpendsCoinbase
= false;
1303 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
1304 const CCoins
*coins
= view
.AccessCoins(txin
.prevout
.hash
);
1305 if (coins
->IsCoinBase()) {
1306 fSpendsCoinbase
= true;
1311 CTxMemPoolEntry
entry(tx
, nFees
, nAcceptTime
, dPriority
, chainActive
.Height(), pool
.HasNoInputsOf(tx
), inChainInputValue
, fSpendsCoinbase
, nSigOpsCost
, lp
);
1312 unsigned int nSize
= entry
.GetTxSize();
1314 // Check that the transaction doesn't have an excessive number of
1315 // sigops, making it impossible to mine. Since the coinbase transaction
1316 // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
1317 // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
1318 // merely non-standard transaction.
1319 if (nSigOpsCost
> MAX_STANDARD_TX_SIGOPS_COST
)
1320 return state
.DoS(0, false, REJECT_NONSTANDARD
, "bad-txns-too-many-sigops", false,
1321 strprintf("%d", nSigOpsCost
));
1323 CAmount mempoolRejectFee
= pool
.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFee(nSize
);
1324 if (mempoolRejectFee
> 0 && nModifiedFees
< mempoolRejectFee
) {
1325 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool min fee not met", false, strprintf("%d < %d", nFees
, mempoolRejectFee
));
1326 } else if (GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY
) && nModifiedFees
< ::minRelayTxFee
.GetFee(nSize
) && !AllowFree(entry
.GetPriority(chainActive
.Height() + 1))) {
1327 // Require that free transactions have sufficient priority to be mined in the next block.
1328 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "insufficient priority");
1331 // Continuously rate-limit free (really, very-low-fee) transactions
1332 // This mitigates 'penny-flooding' -- sending thousands of free transactions just to
1333 // be annoying or make others' transactions take longer to confirm.
1334 if (fLimitFree
&& nModifiedFees
< ::minRelayTxFee
.GetFee(nSize
))
1336 static CCriticalSection csFreeLimiter
;
1337 static double dFreeCount
;
1338 static int64_t nLastTime
;
1339 int64_t nNow
= GetTime();
1341 LOCK(csFreeLimiter
);
1343 // Use an exponentially decaying ~10-minute window:
1344 dFreeCount
*= pow(1.0 - 1.0/600.0, (double)(nNow
- nLastTime
));
1346 // -limitfreerelay unit is thousand-bytes-per-minute
1347 // At default rate it would take over a month to fill 1GB
1348 if (dFreeCount
+ nSize
>= GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY
) * 10 * 1000)
1349 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "rate limited free transaction");
1350 LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount
, dFreeCount
+nSize
);
1351 dFreeCount
+= nSize
;
1354 if (nAbsurdFee
&& nFees
> nAbsurdFee
)
1355 return state
.Invalid(false,
1356 REJECT_HIGHFEE
, "absurdly-high-fee",
1357 strprintf("%d > %d", nFees
, nAbsurdFee
));
1359 // Calculate in-mempool ancestors, up to a limit.
1360 CTxMemPool::setEntries setAncestors
;
1361 size_t nLimitAncestors
= GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT
);
1362 size_t nLimitAncestorSize
= GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT
)*1000;
1363 size_t nLimitDescendants
= GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT
);
1364 size_t nLimitDescendantSize
= GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT
)*1000;
1365 std::string errString
;
1366 if (!pool
.CalculateMemPoolAncestors(entry
, setAncestors
, nLimitAncestors
, nLimitAncestorSize
, nLimitDescendants
, nLimitDescendantSize
, errString
)) {
1367 return state
.DoS(0, false, REJECT_NONSTANDARD
, "too-long-mempool-chain", false, errString
);
1370 // A transaction that spends outputs that would be replaced by it is invalid. Now
1371 // that we have the set of all ancestors we can detect this
1372 // pathological case by making sure setConflicts and setAncestors don't
1374 BOOST_FOREACH(CTxMemPool::txiter ancestorIt
, setAncestors
)
1376 const uint256
&hashAncestor
= ancestorIt
->GetTx().GetHash();
1377 if (setConflicts
.count(hashAncestor
))
1379 return state
.DoS(10, false,
1380 REJECT_INVALID
, "bad-txns-spends-conflicting-tx", false,
1381 strprintf("%s spends conflicting transaction %s",
1383 hashAncestor
.ToString()));
1387 // Check if it's economically rational to mine this transaction rather
1388 // than the ones it replaces.
1389 CAmount nConflictingFees
= 0;
1390 size_t nConflictingSize
= 0;
1391 uint64_t nConflictingCount
= 0;
1392 CTxMemPool::setEntries allConflicting
;
1394 // If we don't hold the lock allConflicting might be incomplete; the
1395 // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
1396 // mempool consistency for us.
1398 if (setConflicts
.size())
1400 CFeeRate
newFeeRate(nModifiedFees
, nSize
);
1401 set
<uint256
> setConflictsParents
;
1402 const int maxDescendantsToVisit
= 100;
1403 CTxMemPool::setEntries setIterConflicting
;
1404 BOOST_FOREACH(const uint256
&hashConflicting
, setConflicts
)
1406 CTxMemPool::txiter mi
= pool
.mapTx
.find(hashConflicting
);
1407 if (mi
== pool
.mapTx
.end())
1410 // Save these to avoid repeated lookups
1411 setIterConflicting
.insert(mi
);
1413 // Don't allow the replacement to reduce the feerate of the
1416 // We usually don't want to accept replacements with lower
1417 // feerates than what they replaced as that would lower the
1418 // feerate of the next block. Requiring that the feerate always
1419 // be increased is also an easy-to-reason about way to prevent
1420 // DoS attacks via replacements.
1422 // The mining code doesn't (currently) take children into
1423 // account (CPFP) so we only consider the feerates of
1424 // transactions being directly replaced, not their indirect
1425 // descendants. While that does mean high feerate children are
1426 // ignored when deciding whether or not to replace, we do
1427 // require the replacement to pay more overall fees too,
1428 // mitigating most cases.
1429 CFeeRate
oldFeeRate(mi
->GetModifiedFee(), mi
->GetTxSize());
1430 if (newFeeRate
<= oldFeeRate
)
1432 return state
.DoS(0, false,
1433 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1434 strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
1436 newFeeRate
.ToString(),
1437 oldFeeRate
.ToString()));
1440 BOOST_FOREACH(const CTxIn
&txin
, mi
->GetTx().vin
)
1442 setConflictsParents
.insert(txin
.prevout
.hash
);
1445 nConflictingCount
+= mi
->GetCountWithDescendants();
1447 // This potentially overestimates the number of actual descendants
1448 // but we just want to be conservative to avoid doing too much
1450 if (nConflictingCount
<= maxDescendantsToVisit
) {
1451 // If not too many to replace, then calculate the set of
1452 // transactions that would have to be evicted
1453 BOOST_FOREACH(CTxMemPool::txiter it
, setIterConflicting
) {
1454 pool
.CalculateDescendants(it
, allConflicting
);
1456 BOOST_FOREACH(CTxMemPool::txiter it
, allConflicting
) {
1457 nConflictingFees
+= it
->GetModifiedFee();
1458 nConflictingSize
+= it
->GetTxSize();
1461 return state
.DoS(0, false,
1462 REJECT_NONSTANDARD
, "too many potential replacements", false,
1463 strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
1466 maxDescendantsToVisit
));
1469 for (unsigned int j
= 0; j
< tx
.vin
.size(); j
++)
1471 // We don't want to accept replacements that require low
1472 // feerate junk to be mined first. Ideally we'd keep track of
1473 // the ancestor feerates and make the decision based on that,
1474 // but for now requiring all new inputs to be confirmed works.
1475 if (!setConflictsParents
.count(tx
.vin
[j
].prevout
.hash
))
1477 // Rather than check the UTXO set - potentially expensive -
1478 // it's cheaper to just check if the new input refers to a
1479 // tx that's in the mempool.
1480 if (pool
.mapTx
.find(tx
.vin
[j
].prevout
.hash
) != pool
.mapTx
.end())
1481 return state
.DoS(0, false,
1482 REJECT_NONSTANDARD
, "replacement-adds-unconfirmed", false,
1483 strprintf("replacement %s adds unconfirmed input, idx %d",
1484 hash
.ToString(), j
));
1488 // The replacement must pay greater fees than the transactions it
1489 // replaces - if we did the bandwidth used by those conflicting
1490 // transactions would not be paid for.
1491 if (nModifiedFees
< nConflictingFees
)
1493 return state
.DoS(0, false,
1494 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1495 strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
1496 hash
.ToString(), FormatMoney(nModifiedFees
), FormatMoney(nConflictingFees
)));
1499 // Finally in addition to paying more fees than the conflicts the
1500 // new transaction must pay for its own bandwidth.
1501 CAmount nDeltaFees
= nModifiedFees
- nConflictingFees
;
1502 if (nDeltaFees
< ::minRelayTxFee
.GetFee(nSize
))
1504 return state
.DoS(0, false,
1505 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
1506 strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
1508 FormatMoney(nDeltaFees
),
1509 FormatMoney(::minRelayTxFee
.GetFee(nSize
))));
1513 unsigned int scriptVerifyFlags
= STANDARD_SCRIPT_VERIFY_FLAGS
;
1514 if (!Params().RequireStandard()) {
1515 scriptVerifyFlags
= GetArg("-promiscuousmempoolflags", scriptVerifyFlags
);
1518 // Check against previous transactions
1519 // This is done last to help prevent CPU exhaustion denial-of-service attacks.
1520 PrecomputedTransactionData
txdata(tx
);
1521 if (!CheckInputs(tx
, state
, view
, true, scriptVerifyFlags
, true, txdata
)) {
1522 // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
1523 // need to turn both off, and compare against just turning off CLEANSTACK
1524 // to see if the failure is specifically due to witness validation.
1525 if (tx
.wit
.IsNull() && CheckInputs(tx
, state
, view
, true, scriptVerifyFlags
& ~(SCRIPT_VERIFY_WITNESS
| SCRIPT_VERIFY_CLEANSTACK
), true, txdata
) &&
1526 !CheckInputs(tx
, state
, view
, true, scriptVerifyFlags
& ~SCRIPT_VERIFY_CLEANSTACK
, true, txdata
)) {
1527 // Only the witness is missing, so the transaction itself may be fine.
1528 state
.SetCorruptionPossible();
1533 // Check again against just the consensus-critical mandatory script
1534 // verification flags, in case of bugs in the standard flags that cause
1535 // transactions to pass as valid when they're actually invalid. For
1536 // instance the STRICTENC flag was incorrectly allowing certain
1537 // CHECKSIG NOT scripts to pass, even though they were invalid.
1539 // There is a similar check in CreateNewBlock() to prevent creating
1540 // invalid blocks, however allowing such transactions into the mempool
1541 // can be exploited as a DoS attack.
1542 if (!CheckInputs(tx
, state
, view
, true, MANDATORY_SCRIPT_VERIFY_FLAGS
, true, txdata
))
1544 return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s, %s",
1545 __func__
, hash
.ToString(), FormatStateMessage(state
));
1548 // Remove conflicting transactions from the mempool
1549 BOOST_FOREACH(const CTxMemPool::txiter it
, allConflicting
)
1551 LogPrint("mempool", "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
1552 it
->GetTx().GetHash().ToString(),
1554 FormatMoney(nModifiedFees
- nConflictingFees
),
1555 (int)nSize
- (int)nConflictingSize
);
1557 pool
.RemoveStaged(allConflicting
, false);
1559 // Store transaction in memory
1560 pool
.addUnchecked(hash
, entry
, setAncestors
, !IsInitialBlockDownload());
1562 // trim mempool and check if tx was trimmed
1563 if (!fOverrideMempoolLimit
) {
1564 LimitMempoolSize(pool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
1565 if (!pool
.exists(hash
))
1566 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool full");
1570 GetMainSignals().SyncTransaction(tx
, NULL
, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK
);
1575 bool AcceptToMemoryPoolWithTime(CTxMemPool
& pool
, CValidationState
&state
, const CTransaction
&tx
, bool fLimitFree
,
1576 bool* pfMissingInputs
, int64_t nAcceptTime
, bool fOverrideMempoolLimit
, const CAmount nAbsurdFee
)
1578 std::vector
<uint256
> vHashTxToUncache
;
1579 bool res
= AcceptToMemoryPoolWorker(pool
, state
, tx
, fLimitFree
, pfMissingInputs
, nAcceptTime
, fOverrideMempoolLimit
, nAbsurdFee
, vHashTxToUncache
);
1581 BOOST_FOREACH(const uint256
& hashTx
, vHashTxToUncache
)
1582 pcoinsTip
->Uncache(hashTx
);
1587 bool AcceptToMemoryPool(CTxMemPool
& pool
, CValidationState
&state
, const CTransaction
&tx
, bool fLimitFree
,
1588 bool* pfMissingInputs
, bool fOverrideMempoolLimit
, const CAmount nAbsurdFee
)
1590 return AcceptToMemoryPoolWithTime(pool
, state
, tx
, fLimitFree
, pfMissingInputs
, GetTime(), fOverrideMempoolLimit
, nAbsurdFee
);
1593 /** Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock */
1594 bool GetTransaction(const uint256
&hash
, CTransaction
&txOut
, const Consensus::Params
& consensusParams
, uint256
&hashBlock
, bool fAllowSlow
)
1596 CBlockIndex
*pindexSlow
= NULL
;
1600 std::shared_ptr
<const CTransaction
> ptx
= mempool
.get(hash
);
1609 if (pblocktree
->ReadTxIndex(hash
, postx
)) {
1610 CAutoFile
file(OpenBlockFile(postx
, true), SER_DISK
, CLIENT_VERSION
);
1612 return error("%s: OpenBlockFile failed", __func__
);
1613 CBlockHeader header
;
1616 fseek(file
.Get(), postx
.nTxOffset
, SEEK_CUR
);
1618 } catch (const std::exception
& e
) {
1619 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
1621 hashBlock
= header
.GetHash();
1622 if (txOut
.GetHash() != hash
)
1623 return error("%s: txid mismatch", __func__
);
1628 if (fAllowSlow
) { // use coin database to locate block that contains transaction, and scan it
1631 const CCoinsViewCache
& view
= *pcoinsTip
;
1632 const CCoins
* coins
= view
.AccessCoins(hash
);
1634 nHeight
= coins
->nHeight
;
1637 pindexSlow
= chainActive
[nHeight
];
1642 if (ReadBlockFromDisk(block
, pindexSlow
, consensusParams
)) {
1643 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
1644 if (tx
.GetHash() == hash
) {
1646 hashBlock
= pindexSlow
->GetBlockHash();
1661 //////////////////////////////////////////////////////////////////////////////
1663 // CBlock and CBlockIndex
1666 bool WriteBlockToDisk(const CBlock
& block
, CDiskBlockPos
& pos
, const CMessageHeader::MessageStartChars
& messageStart
)
1668 // Open history file to append
1669 CAutoFile
fileout(OpenBlockFile(pos
), SER_DISK
, CLIENT_VERSION
);
1670 if (fileout
.IsNull())
1671 return error("WriteBlockToDisk: OpenBlockFile failed");
1673 // Write index header
1674 unsigned int nSize
= fileout
.GetSerializeSize(block
);
1675 fileout
<< FLATDATA(messageStart
) << nSize
;
1678 long fileOutPos
= ftell(fileout
.Get());
1680 return error("WriteBlockToDisk: ftell failed");
1681 pos
.nPos
= (unsigned int)fileOutPos
;
1687 bool ReadBlockFromDisk(CBlock
& block
, const CDiskBlockPos
& pos
, const Consensus::Params
& consensusParams
)
1691 // Open history file to read
1692 CAutoFile
filein(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
1693 if (filein
.IsNull())
1694 return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos
.ToString());
1700 catch (const std::exception
& e
) {
1701 return error("%s: Deserialize or I/O error - %s at %s", __func__
, e
.what(), pos
.ToString());
1705 if (!CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
1706 return error("ReadBlockFromDisk: Errors in block header at %s", pos
.ToString());
1711 bool ReadBlockFromDisk(CBlock
& block
, const CBlockIndex
* pindex
, const Consensus::Params
& consensusParams
)
1713 if (!ReadBlockFromDisk(block
, pindex
->GetBlockPos(), consensusParams
))
1715 if (block
.GetHash() != pindex
->GetBlockHash())
1716 return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1717 pindex
->ToString(), pindex
->GetBlockPos().ToString());
1721 CAmount
GetBlockSubsidy(int nHeight
, const Consensus::Params
& consensusParams
)
1723 int halvings
= nHeight
/ consensusParams
.nSubsidyHalvingInterval
;
1724 // Force block reward to zero when right shift is undefined.
1728 CAmount nSubsidy
= 50 * COIN
;
1729 // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1730 nSubsidy
>>= halvings
;
1734 bool IsInitialBlockDownload()
1736 const CChainParams
& chainParams
= Params();
1738 // Once this function has returned false, it must remain false.
1739 static std::atomic
<bool> latchToFalse
{false};
1740 // Optimization: pre-test latch before taking the lock.
1741 if (latchToFalse
.load(std::memory_order_relaxed
))
1745 if (latchToFalse
.load(std::memory_order_relaxed
))
1747 if (fImporting
|| fReindex
)
1749 if (fCheckpointsEnabled
&& chainActive
.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams
.Checkpoints()))
1751 bool state
= (chainActive
.Height() < pindexBestHeader
->nHeight
- 24 * 6 ||
1752 std::max(chainActive
.Tip()->GetBlockTime(), pindexBestHeader
->GetBlockTime()) < GetTime() - nMaxTipAge
);
1754 latchToFalse
.store(true, std::memory_order_relaxed
);
1758 bool fLargeWorkForkFound
= false;
1759 bool fLargeWorkInvalidChainFound
= false;
1760 CBlockIndex
*pindexBestForkTip
= NULL
, *pindexBestForkBase
= NULL
;
1762 static void AlertNotify(const std::string
& strMessage
)
1764 uiInterface
.NotifyAlertChanged();
1765 std::string strCmd
= GetArg("-alertnotify", "");
1766 if (strCmd
.empty()) return;
1768 // Alert text should be plain ascii coming from a trusted source, but to
1769 // be safe we first strip anything not in safeChars, then add single quotes around
1770 // the whole string before passing it to the shell:
1771 std::string
singleQuote("'");
1772 std::string safeStatus
= SanitizeString(strMessage
);
1773 safeStatus
= singleQuote
+safeStatus
+singleQuote
;
1774 boost::replace_all(strCmd
, "%s", safeStatus
);
1776 boost::thread
t(runCommand
, strCmd
); // thread runs free
1779 void CheckForkWarningConditions()
1781 AssertLockHeld(cs_main
);
1782 // Before we get past initial download, we cannot reliably alert about forks
1783 // (we assume we don't get stuck on a fork before the last checkpoint)
1784 if (IsInitialBlockDownload())
1787 // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
1788 // of our head, drop it
1789 if (pindexBestForkTip
&& chainActive
.Height() - pindexBestForkTip
->nHeight
>= 72)
1790 pindexBestForkTip
= NULL
;
1792 if (pindexBestForkTip
|| (pindexBestInvalid
&& pindexBestInvalid
->nChainWork
> chainActive
.Tip()->nChainWork
+ (GetBlockProof(*chainActive
.Tip()) * 6)))
1794 if (!fLargeWorkForkFound
&& pindexBestForkBase
)
1796 std::string warning
= std::string("'Warning: Large-work fork detected, forking after block ") +
1797 pindexBestForkBase
->phashBlock
->ToString() + std::string("'");
1798 AlertNotify(warning
);
1800 if (pindexBestForkTip
&& pindexBestForkBase
)
1802 LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__
,
1803 pindexBestForkBase
->nHeight
, pindexBestForkBase
->phashBlock
->ToString(),
1804 pindexBestForkTip
->nHeight
, pindexBestForkTip
->phashBlock
->ToString());
1805 fLargeWorkForkFound
= true;
1809 LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__
);
1810 fLargeWorkInvalidChainFound
= true;
1815 fLargeWorkForkFound
= false;
1816 fLargeWorkInvalidChainFound
= false;
1820 void CheckForkWarningConditionsOnNewFork(CBlockIndex
* pindexNewForkTip
)
1822 AssertLockHeld(cs_main
);
1823 // If we are on a fork that is sufficiently large, set a warning flag
1824 CBlockIndex
* pfork
= pindexNewForkTip
;
1825 CBlockIndex
* plonger
= chainActive
.Tip();
1826 while (pfork
&& pfork
!= plonger
)
1828 while (plonger
&& plonger
->nHeight
> pfork
->nHeight
)
1829 plonger
= plonger
->pprev
;
1830 if (pfork
== plonger
)
1832 pfork
= pfork
->pprev
;
1835 // We define a condition where we should warn the user about as a fork of at least 7 blocks
1836 // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
1837 // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
1838 // hash rate operating on the fork.
1839 // or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
1840 // We define it this way because it allows us to only store the highest fork tip (+ base) which meets
1841 // the 7-block condition and from this always have the most-likely-to-cause-warning fork
1842 if (pfork
&& (!pindexBestForkTip
|| (pindexBestForkTip
&& pindexNewForkTip
->nHeight
> pindexBestForkTip
->nHeight
)) &&
1843 pindexNewForkTip
->nChainWork
- pfork
->nChainWork
> (GetBlockProof(*pfork
) * 7) &&
1844 chainActive
.Height() - pindexNewForkTip
->nHeight
< 72)
1846 pindexBestForkTip
= pindexNewForkTip
;
1847 pindexBestForkBase
= pfork
;
1850 CheckForkWarningConditions();
1853 // Requires cs_main.
1854 void Misbehaving(NodeId pnode
, int howmuch
)
1859 CNodeState
*state
= State(pnode
);
1863 state
->nMisbehavior
+= howmuch
;
1864 int banscore
= GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD
);
1865 if (state
->nMisbehavior
>= banscore
&& state
->nMisbehavior
- howmuch
< banscore
)
1867 LogPrintf("%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__
, state
->name
, pnode
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1868 state
->fShouldBan
= true;
1870 LogPrintf("%s: %s peer=%d (%d -> %d)\n", __func__
, state
->name
, pnode
, state
->nMisbehavior
-howmuch
, state
->nMisbehavior
);
1873 void static InvalidChainFound(CBlockIndex
* pindexNew
)
1875 if (!pindexBestInvalid
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
)
1876 pindexBestInvalid
= pindexNew
;
1878 LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1879 pindexNew
->GetBlockHash().ToString(), pindexNew
->nHeight
,
1880 log(pindexNew
->nChainWork
.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
1881 pindexNew
->GetBlockTime()));
1882 CBlockIndex
*tip
= chainActive
.Tip();
1884 LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1885 tip
->GetBlockHash().ToString(), chainActive
.Height(), log(tip
->nChainWork
.getdouble())/log(2.0),
1886 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip
->GetBlockTime()));
1887 CheckForkWarningConditions();
1890 void static InvalidBlockFound(CBlockIndex
*pindex
, const CValidationState
&state
) {
1891 if (!state
.CorruptionPossible()) {
1892 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
1893 setDirtyBlockIndex
.insert(pindex
);
1894 setBlockIndexCandidates
.erase(pindex
);
1895 InvalidChainFound(pindex
);
1899 void UpdateCoins(const CTransaction
& tx
, CCoinsViewCache
& inputs
, CTxUndo
&txundo
, int nHeight
)
1901 // mark inputs spent
1902 if (!tx
.IsCoinBase()) {
1903 txundo
.vprevout
.reserve(tx
.vin
.size());
1904 BOOST_FOREACH(const CTxIn
&txin
, tx
.vin
) {
1905 CCoinsModifier coins
= inputs
.ModifyCoins(txin
.prevout
.hash
);
1906 unsigned nPos
= txin
.prevout
.n
;
1908 if (nPos
>= coins
->vout
.size() || coins
->vout
[nPos
].IsNull())
1910 // mark an outpoint spent, and construct undo information
1911 txundo
.vprevout
.push_back(CTxInUndo(coins
->vout
[nPos
]));
1913 if (coins
->vout
.size() == 0) {
1914 CTxInUndo
& undo
= txundo
.vprevout
.back();
1915 undo
.nHeight
= coins
->nHeight
;
1916 undo
.fCoinBase
= coins
->fCoinBase
;
1917 undo
.nVersion
= coins
->nVersion
;
1922 inputs
.ModifyNewCoins(tx
.GetHash(), tx
.IsCoinBase())->FromTx(tx
, nHeight
);
1925 void UpdateCoins(const CTransaction
& tx
, CCoinsViewCache
& inputs
, int nHeight
)
1928 UpdateCoins(tx
, inputs
, txundo
, nHeight
);
1931 bool CScriptCheck::operator()() {
1932 const CScript
&scriptSig
= ptxTo
->vin
[nIn
].scriptSig
;
1933 const CScriptWitness
*witness
= (nIn
< ptxTo
->wit
.vtxinwit
.size()) ? &ptxTo
->wit
.vtxinwit
[nIn
].scriptWitness
: NULL
;
1934 if (!VerifyScript(scriptSig
, scriptPubKey
, witness
, nFlags
, CachingTransactionSignatureChecker(ptxTo
, nIn
, amount
, cacheStore
, *txdata
), &error
)) {
1940 int GetSpendHeight(const CCoinsViewCache
& inputs
)
1943 CBlockIndex
* pindexPrev
= mapBlockIndex
.find(inputs
.GetBestBlock())->second
;
1944 return pindexPrev
->nHeight
+ 1;
1947 namespace Consensus
{
1948 bool CheckTxInputs(const CTransaction
& tx
, CValidationState
& state
, const CCoinsViewCache
& inputs
, int nSpendHeight
)
1950 // This doesn't trigger the DoS code on purpose; if it did, it would make it easier
1951 // for an attacker to attempt to split the network.
1952 if (!inputs
.HaveInputs(tx
))
1953 return state
.Invalid(false, 0, "", "Inputs unavailable");
1955 CAmount nValueIn
= 0;
1957 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++)
1959 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
1960 const CCoins
*coins
= inputs
.AccessCoins(prevout
.hash
);
1963 // If prev is coinbase, check that it's matured
1964 if (coins
->IsCoinBase()) {
1965 if (nSpendHeight
- coins
->nHeight
< COINBASE_MATURITY
)
1966 return state
.Invalid(false,
1967 REJECT_INVALID
, "bad-txns-premature-spend-of-coinbase",
1968 strprintf("tried to spend coinbase at depth %d", nSpendHeight
- coins
->nHeight
));
1971 // Check for negative or overflow input values
1972 nValueIn
+= coins
->vout
[prevout
.n
].nValue
;
1973 if (!MoneyRange(coins
->vout
[prevout
.n
].nValue
) || !MoneyRange(nValueIn
))
1974 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-inputvalues-outofrange");
1978 if (nValueIn
< tx
.GetValueOut())
1979 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-in-belowout", false,
1980 strprintf("value in (%s) < value out (%s)", FormatMoney(nValueIn
), FormatMoney(tx
.GetValueOut())));
1982 // Tally transaction fees
1983 CAmount nTxFee
= nValueIn
- tx
.GetValueOut();
1985 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-negative");
1987 if (!MoneyRange(nFees
))
1988 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-fee-outofrange");
1991 }// namespace Consensus
1993 bool CheckInputs(const CTransaction
& tx
, CValidationState
&state
, const CCoinsViewCache
&inputs
, bool fScriptChecks
, unsigned int flags
, bool cacheStore
, PrecomputedTransactionData
& txdata
, std::vector
<CScriptCheck
> *pvChecks
)
1995 if (!tx
.IsCoinBase())
1997 if (!Consensus::CheckTxInputs(tx
, state
, inputs
, GetSpendHeight(inputs
)))
2001 pvChecks
->reserve(tx
.vin
.size());
2003 // The first loop above does all the inexpensive checks.
2004 // Only if ALL inputs pass do we perform expensive ECDSA signature checks.
2005 // Helps prevent CPU exhaustion attacks.
2007 // Skip ECDSA signature verification when connecting blocks before the
2008 // last block chain checkpoint. Assuming the checkpoints are valid this
2009 // is safe because block merkle hashes are still computed and checked,
2010 // and any change will be caught at the next checkpoint. Of course, if
2011 // the checkpoint is for a chain that's invalid due to false scriptSigs
2012 // this optimization would allow an invalid chain to be accepted.
2013 if (fScriptChecks
) {
2014 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++) {
2015 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
2016 const CCoins
* coins
= inputs
.AccessCoins(prevout
.hash
);
2020 CScriptCheck
check(*coins
, tx
, i
, flags
, cacheStore
, &txdata
);
2022 pvChecks
->push_back(CScriptCheck());
2023 check
.swap(pvChecks
->back());
2024 } else if (!check()) {
2025 if (flags
& STANDARD_NOT_MANDATORY_VERIFY_FLAGS
) {
2026 // Check whether the failure was caused by a
2027 // non-mandatory script verification check, such as
2028 // non-standard DER encodings or non-null dummy
2029 // arguments; if so, don't trigger DoS protection to
2030 // avoid splitting the network between upgraded and
2031 // non-upgraded nodes.
2032 CScriptCheck
check2(*coins
, tx
, i
,
2033 flags
& ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS
, cacheStore
, &txdata
);
2035 return state
.Invalid(false, REJECT_NONSTANDARD
, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check
.GetScriptError())));
2037 // Failures of other flags indicate a transaction that is
2038 // invalid in new blocks, e.g. a invalid P2SH. We DoS ban
2039 // such nodes as they are not following the protocol. That
2040 // said during an upgrade careful thought should be taken
2041 // as to the correct behavior - we may want to continue
2042 // peering with non-upgraded nodes even after soft-fork
2043 // super-majority signaling has occurred.
2044 return state
.DoS(100,false, REJECT_INVALID
, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check
.GetScriptError())));
2055 bool UndoWriteToDisk(const CBlockUndo
& blockundo
, CDiskBlockPos
& pos
, const uint256
& hashBlock
, const CMessageHeader::MessageStartChars
& messageStart
)
2057 // Open history file to append
2058 CAutoFile
fileout(OpenUndoFile(pos
), SER_DISK
, CLIENT_VERSION
);
2059 if (fileout
.IsNull())
2060 return error("%s: OpenUndoFile failed", __func__
);
2062 // Write index header
2063 unsigned int nSize
= fileout
.GetSerializeSize(blockundo
);
2064 fileout
<< FLATDATA(messageStart
) << nSize
;
2067 long fileOutPos
= ftell(fileout
.Get());
2069 return error("%s: ftell failed", __func__
);
2070 pos
.nPos
= (unsigned int)fileOutPos
;
2071 fileout
<< blockundo
;
2073 // calculate & write checksum
2074 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
2075 hasher
<< hashBlock
;
2076 hasher
<< blockundo
;
2077 fileout
<< hasher
.GetHash();
2082 bool UndoReadFromDisk(CBlockUndo
& blockundo
, const CDiskBlockPos
& pos
, const uint256
& hashBlock
)
2084 // Open history file to read
2085 CAutoFile
filein(OpenUndoFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
2086 if (filein
.IsNull())
2087 return error("%s: OpenUndoFile failed", __func__
);
2090 uint256 hashChecksum
;
2092 filein
>> blockundo
;
2093 filein
>> hashChecksum
;
2095 catch (const std::exception
& e
) {
2096 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
2100 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
2101 hasher
<< hashBlock
;
2102 hasher
<< blockundo
;
2103 if (hashChecksum
!= hasher
.GetHash())
2104 return error("%s: Checksum mismatch", __func__
);
2109 /** Abort with a message */
2110 bool AbortNode(const std::string
& strMessage
, const std::string
& userMessage
="")
2112 strMiscWarning
= strMessage
;
2113 LogPrintf("*** %s\n", strMessage
);
2114 uiInterface
.ThreadSafeMessageBox(
2115 userMessage
.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage
,
2116 "", CClientUIInterface::MSG_ERROR
);
2121 bool AbortNode(CValidationState
& state
, const std::string
& strMessage
, const std::string
& userMessage
="")
2123 AbortNode(strMessage
, userMessage
);
2124 return state
.Error(strMessage
);
2130 * Apply the undo operation of a CTxInUndo to the given chain state.
2131 * @param undo The undo object.
2132 * @param view The coins view to which to apply the changes.
2133 * @param out The out point that corresponds to the tx input.
2134 * @return True on success.
2136 static bool ApplyTxInUndo(const CTxInUndo
& undo
, CCoinsViewCache
& view
, const COutPoint
& out
)
2140 CCoinsModifier coins
= view
.ModifyCoins(out
.hash
);
2141 if (undo
.nHeight
!= 0) {
2142 // undo data contains height: this is the last output of the prevout tx being spent
2143 if (!coins
->IsPruned())
2144 fClean
= fClean
&& error("%s: undo data overwriting existing transaction", __func__
);
2146 coins
->fCoinBase
= undo
.fCoinBase
;
2147 coins
->nHeight
= undo
.nHeight
;
2148 coins
->nVersion
= undo
.nVersion
;
2150 if (coins
->IsPruned())
2151 fClean
= fClean
&& error("%s: undo data adding output to missing transaction", __func__
);
2153 if (coins
->IsAvailable(out
.n
))
2154 fClean
= fClean
&& error("%s: undo data overwriting existing output", __func__
);
2155 if (coins
->vout
.size() < out
.n
+1)
2156 coins
->vout
.resize(out
.n
+1);
2157 coins
->vout
[out
.n
] = undo
.txout
;
2162 bool DisconnectBlock(const CBlock
& block
, CValidationState
& state
, const CBlockIndex
* pindex
, CCoinsViewCache
& view
, bool* pfClean
)
2164 assert(pindex
->GetBlockHash() == view
.GetBestBlock());
2171 CBlockUndo blockUndo
;
2172 CDiskBlockPos pos
= pindex
->GetUndoPos();
2174 return error("DisconnectBlock(): no undo data available");
2175 if (!UndoReadFromDisk(blockUndo
, pos
, pindex
->pprev
->GetBlockHash()))
2176 return error("DisconnectBlock(): failure reading undo data");
2178 if (blockUndo
.vtxundo
.size() + 1 != block
.vtx
.size())
2179 return error("DisconnectBlock(): block and undo data inconsistent");
2181 // undo transactions in reverse order
2182 for (int i
= block
.vtx
.size() - 1; i
>= 0; i
--) {
2183 const CTransaction
&tx
= block
.vtx
[i
];
2184 uint256 hash
= tx
.GetHash();
2186 // Check that all outputs are available and match the outputs in the block itself
2189 CCoinsModifier outs
= view
.ModifyCoins(hash
);
2190 outs
->ClearUnspendable();
2192 CCoins
outsBlock(tx
, pindex
->nHeight
);
2193 // The CCoins serialization does not serialize negative numbers.
2194 // No network rules currently depend on the version here, so an inconsistency is harmless
2195 // but it must be corrected before txout nversion ever influences a network rule.
2196 if (outsBlock
.nVersion
< 0)
2197 outs
->nVersion
= outsBlock
.nVersion
;
2198 if (*outs
!= outsBlock
)
2199 fClean
= fClean
&& error("DisconnectBlock(): added transaction mismatch? database corrupted");
2206 if (i
> 0) { // not coinbases
2207 const CTxUndo
&txundo
= blockUndo
.vtxundo
[i
-1];
2208 if (txundo
.vprevout
.size() != tx
.vin
.size())
2209 return error("DisconnectBlock(): transaction and undo data inconsistent");
2210 for (unsigned int j
= tx
.vin
.size(); j
-- > 0;) {
2211 const COutPoint
&out
= tx
.vin
[j
].prevout
;
2212 const CTxInUndo
&undo
= txundo
.vprevout
[j
];
2213 if (!ApplyTxInUndo(undo
, view
, out
))
2219 // move best block pointer to prevout block
2220 view
.SetBestBlock(pindex
->pprev
->GetBlockHash());
2230 void static FlushBlockFile(bool fFinalize
= false)
2232 LOCK(cs_LastBlockFile
);
2234 CDiskBlockPos
posOld(nLastBlockFile
, 0);
2236 FILE *fileOld
= OpenBlockFile(posOld
);
2239 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nSize
);
2240 FileCommit(fileOld
);
2244 fileOld
= OpenUndoFile(posOld
);
2247 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nUndoSize
);
2248 FileCommit(fileOld
);
2253 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
);
2255 static CCheckQueue
<CScriptCheck
> scriptcheckqueue(128);
2257 void ThreadScriptCheck() {
2258 RenameThread("bitcoin-scriptch");
2259 scriptcheckqueue
.Thread();
2262 // Protected by cs_main
2263 VersionBitsCache versionbitscache
;
2265 int32_t ComputeBlockVersion(const CBlockIndex
* pindexPrev
, const Consensus::Params
& params
)
2268 int32_t nVersion
= VERSIONBITS_TOP_BITS
;
2270 for (int i
= 0; i
< (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS
; i
++) {
2271 ThresholdState state
= VersionBitsState(pindexPrev
, params
, (Consensus::DeploymentPos
)i
, versionbitscache
);
2272 if (state
== THRESHOLD_LOCKED_IN
|| state
== THRESHOLD_STARTED
) {
2273 nVersion
|= VersionBitsMask(params
, (Consensus::DeploymentPos
)i
);
2281 * Threshold condition checker that triggers when unknown versionbits are seen on the network.
2283 class WarningBitsConditionChecker
: public AbstractThresholdConditionChecker
2289 WarningBitsConditionChecker(int bitIn
) : bit(bitIn
) {}
2291 int64_t BeginTime(const Consensus::Params
& params
) const { return 0; }
2292 int64_t EndTime(const Consensus::Params
& params
) const { return std::numeric_limits
<int64_t>::max(); }
2293 int Period(const Consensus::Params
& params
) const { return params
.nMinerConfirmationWindow
; }
2294 int Threshold(const Consensus::Params
& params
) const { return params
.nRuleChangeActivationThreshold
; }
2296 bool Condition(const CBlockIndex
* pindex
, const Consensus::Params
& params
) const
2298 return ((pindex
->nVersion
& VERSIONBITS_TOP_MASK
) == VERSIONBITS_TOP_BITS
) &&
2299 ((pindex
->nVersion
>> bit
) & 1) != 0 &&
2300 ((ComputeBlockVersion(pindex
->pprev
, params
) >> bit
) & 1) == 0;
2304 // Protected by cs_main
2305 static ThresholdConditionCache warningcache
[VERSIONBITS_NUM_BITS
];
2307 static int64_t nTimeCheck
= 0;
2308 static int64_t nTimeForks
= 0;
2309 static int64_t nTimeVerify
= 0;
2310 static int64_t nTimeConnect
= 0;
2311 static int64_t nTimeIndex
= 0;
2312 static int64_t nTimeCallbacks
= 0;
2313 static int64_t nTimeTotal
= 0;
2315 bool ConnectBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* pindex
,
2316 CCoinsViewCache
& view
, const CChainParams
& chainparams
, bool fJustCheck
)
2318 AssertLockHeld(cs_main
);
2320 int64_t nTimeStart
= GetTimeMicros();
2322 // Check it again in case a previous version let a bad block in
2323 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), !fJustCheck
, !fJustCheck
))
2324 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
2326 // verify that the view's current state corresponds to the previous block
2327 uint256 hashPrevBlock
= pindex
->pprev
== NULL
? uint256() : pindex
->pprev
->GetBlockHash();
2328 assert(hashPrevBlock
== view
.GetBestBlock());
2330 // Special case for the genesis block, skipping connection of its transactions
2331 // (its coinbase is unspendable)
2332 if (block
.GetHash() == chainparams
.GetConsensus().hashGenesisBlock
) {
2334 view
.SetBestBlock(pindex
->GetBlockHash());
2338 bool fScriptChecks
= true;
2339 if (fCheckpointsEnabled
) {
2340 CBlockIndex
*pindexLastCheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
2341 if (pindexLastCheckpoint
&& pindexLastCheckpoint
->GetAncestor(pindex
->nHeight
) == pindex
) {
2342 // This block is an ancestor of a checkpoint: disable script checks
2343 fScriptChecks
= false;
2347 int64_t nTime1
= GetTimeMicros(); nTimeCheck
+= nTime1
- nTimeStart
;
2348 LogPrint("bench", " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1
- nTimeStart
), nTimeCheck
* 0.000001);
2350 // Do not allow blocks that contain transactions which 'overwrite' older transactions,
2351 // unless those are already completely spent.
2352 // If such overwrites are allowed, coinbases and transactions depending upon those
2353 // can be duplicated to remove the ability to spend the first instance -- even after
2354 // being sent to another address.
2355 // See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
2356 // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
2357 // already refuses previously-known transaction ids entirely.
2358 // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
2359 // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
2360 // two in the chain that violate it. This prevents exploiting the issue against nodes during their
2361 // initial block download.
2362 bool fEnforceBIP30
= (!pindex
->phashBlock
) || // Enforce on CreateNewBlock invocations which don't have a hash.
2363 !((pindex
->nHeight
==91842 && pindex
->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
2364 (pindex
->nHeight
==91880 && pindex
->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
2366 // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
2367 // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
2368 // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
2369 // before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further
2370 // duplicate transactions descending from the known pairs either.
2371 // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
2372 CBlockIndex
*pindexBIP34height
= pindex
->pprev
->GetAncestor(chainparams
.GetConsensus().BIP34Height
);
2373 //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
2374 fEnforceBIP30
= fEnforceBIP30
&& (!pindexBIP34height
|| !(pindexBIP34height
->GetBlockHash() == chainparams
.GetConsensus().BIP34Hash
));
2376 if (fEnforceBIP30
) {
2377 BOOST_FOREACH(const CTransaction
& tx
, block
.vtx
) {
2378 const CCoins
* coins
= view
.AccessCoins(tx
.GetHash());
2379 if (coins
&& !coins
->IsPruned())
2380 return state
.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
2381 REJECT_INVALID
, "bad-txns-BIP30");
2385 // BIP16 didn't become active until Apr 1 2012
2386 int64_t nBIP16SwitchTime
= 1333238400;
2387 bool fStrictPayToScriptHash
= (pindex
->GetBlockTime() >= nBIP16SwitchTime
);
2389 unsigned int flags
= fStrictPayToScriptHash
? SCRIPT_VERIFY_P2SH
: SCRIPT_VERIFY_NONE
;
2391 // Start enforcing the DERSIG (BIP66) rule
2392 if (pindex
->nHeight
>= chainparams
.GetConsensus().BIP66Height
) {
2393 flags
|= SCRIPT_VERIFY_DERSIG
;
2396 // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
2397 if (pindex
->nHeight
>= chainparams
.GetConsensus().BIP65Height
) {
2398 flags
|= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY
;
2401 // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
2402 int nLockTimeFlags
= 0;
2403 if (VersionBitsState(pindex
->pprev
, chainparams
.GetConsensus(), Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
2404 flags
|= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY
;
2405 nLockTimeFlags
|= LOCKTIME_VERIFY_SEQUENCE
;
2408 // Start enforcing WITNESS rules using versionbits logic.
2409 if (IsWitnessEnabled(pindex
->pprev
, chainparams
.GetConsensus())) {
2410 flags
|= SCRIPT_VERIFY_WITNESS
;
2411 flags
|= SCRIPT_VERIFY_NULLDUMMY
;
2414 int64_t nTime2
= GetTimeMicros(); nTimeForks
+= nTime2
- nTime1
;
2415 LogPrint("bench", " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2
- nTime1
), nTimeForks
* 0.000001);
2417 CBlockUndo blockundo
;
2419 CCheckQueueControl
<CScriptCheck
> control(fScriptChecks
&& nScriptCheckThreads
? &scriptcheckqueue
: NULL
);
2421 std::vector
<uint256
> vOrphanErase
;
2422 std::vector
<int> prevheights
;
2425 int64_t nSigOpsCost
= 0;
2426 CDiskTxPos
pos(pindex
->GetBlockPos(), GetSizeOfCompactSize(block
.vtx
.size()));
2427 std::vector
<std::pair
<uint256
, CDiskTxPos
> > vPos
;
2428 vPos
.reserve(block
.vtx
.size());
2429 blockundo
.vtxundo
.reserve(block
.vtx
.size() - 1);
2430 std::vector
<PrecomputedTransactionData
> txdata
;
2431 txdata
.reserve(block
.vtx
.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
2432 for (unsigned int i
= 0; i
< block
.vtx
.size(); i
++)
2434 const CTransaction
&tx
= block
.vtx
[i
];
2436 nInputs
+= tx
.vin
.size();
2438 if (!tx
.IsCoinBase())
2440 if (!view
.HaveInputs(tx
))
2441 return state
.DoS(100, error("ConnectBlock(): inputs missing/spent"),
2442 REJECT_INVALID
, "bad-txns-inputs-missingorspent");
2444 // Check that transaction is BIP68 final
2445 // BIP68 lock checks (as opposed to nLockTime checks) must
2446 // be in ConnectBlock because they require the UTXO set
2447 prevheights
.resize(tx
.vin
.size());
2448 for (size_t j
= 0; j
< tx
.vin
.size(); j
++) {
2449 prevheights
[j
] = view
.AccessCoins(tx
.vin
[j
].prevout
.hash
)->nHeight
;
2452 // Which orphan pool entries must we evict?
2453 for (size_t j
= 0; j
< tx
.vin
.size(); j
++) {
2454 auto itByPrev
= mapOrphanTransactionsByPrev
.find(tx
.vin
[j
].prevout
);
2455 if (itByPrev
== mapOrphanTransactionsByPrev
.end()) continue;
2456 for (auto mi
= itByPrev
->second
.begin(); mi
!= itByPrev
->second
.end(); ++mi
) {
2457 const CTransaction
& orphanTx
= (*mi
)->second
.tx
;
2458 const uint256
& orphanHash
= orphanTx
.GetHash();
2459 vOrphanErase
.push_back(orphanHash
);
2463 if (!SequenceLocks(tx
, nLockTimeFlags
, &prevheights
, *pindex
)) {
2464 return state
.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__
),
2465 REJECT_INVALID
, "bad-txns-nonfinal");
2469 // GetTransactionSigOpCost counts 3 types of sigops:
2470 // * legacy (always)
2471 // * p2sh (when P2SH enabled in flags and excludes coinbase)
2472 // * witness (when witness enabled in flags and excludes coinbase)
2473 nSigOpsCost
+= GetTransactionSigOpCost(tx
, view
, flags
);
2474 if (nSigOpsCost
> MAX_BLOCK_SIGOPS_COST
)
2475 return state
.DoS(100, error("ConnectBlock(): too many sigops"),
2476 REJECT_INVALID
, "bad-blk-sigops");
2478 txdata
.emplace_back(tx
);
2479 if (!tx
.IsCoinBase())
2481 nFees
+= view
.GetValueIn(tx
)-tx
.GetValueOut();
2483 std::vector
<CScriptCheck
> vChecks
;
2484 bool fCacheResults
= fJustCheck
; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
2485 if (!CheckInputs(tx
, state
, view
, fScriptChecks
, flags
, fCacheResults
, txdata
[i
], nScriptCheckThreads
? &vChecks
: NULL
))
2486 return error("ConnectBlock(): CheckInputs on %s failed with %s",
2487 tx
.GetHash().ToString(), FormatStateMessage(state
));
2488 control
.Add(vChecks
);
2493 blockundo
.vtxundo
.push_back(CTxUndo());
2495 UpdateCoins(tx
, view
, i
== 0 ? undoDummy
: blockundo
.vtxundo
.back(), pindex
->nHeight
);
2497 vPos
.push_back(std::make_pair(tx
.GetHash(), pos
));
2498 pos
.nTxOffset
+= ::GetSerializeSize(tx
, SER_DISK
, CLIENT_VERSION
);
2500 int64_t nTime3
= GetTimeMicros(); nTimeConnect
+= nTime3
- nTime2
;
2501 LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block
.vtx
.size(), 0.001 * (nTime3
- nTime2
), 0.001 * (nTime3
- nTime2
) / block
.vtx
.size(), nInputs
<= 1 ? 0 : 0.001 * (nTime3
- nTime2
) / (nInputs
-1), nTimeConnect
* 0.000001);
2503 CAmount blockReward
= nFees
+ GetBlockSubsidy(pindex
->nHeight
, chainparams
.GetConsensus());
2504 if (block
.vtx
[0].GetValueOut() > blockReward
)
2505 return state
.DoS(100,
2506 error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
2507 block
.vtx
[0].GetValueOut(), blockReward
),
2508 REJECT_INVALID
, "bad-cb-amount");
2510 if (!control
.Wait())
2511 return state
.DoS(100, false);
2512 int64_t nTime4
= GetTimeMicros(); nTimeVerify
+= nTime4
- nTime2
;
2513 LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs
- 1, 0.001 * (nTime4
- nTime2
), nInputs
<= 1 ? 0 : 0.001 * (nTime4
- nTime2
) / (nInputs
-1), nTimeVerify
* 0.000001);
2518 // Write undo information to disk
2519 if (pindex
->GetUndoPos().IsNull() || !pindex
->IsValid(BLOCK_VALID_SCRIPTS
))
2521 if (pindex
->GetUndoPos().IsNull()) {
2523 if (!FindUndoPos(state
, pindex
->nFile
, _pos
, ::GetSerializeSize(blockundo
, SER_DISK
, CLIENT_VERSION
) + 40))
2524 return error("ConnectBlock(): FindUndoPos failed");
2525 if (!UndoWriteToDisk(blockundo
, _pos
, pindex
->pprev
->GetBlockHash(), chainparams
.MessageStart()))
2526 return AbortNode(state
, "Failed to write undo data");
2528 // update nUndoPos in block index
2529 pindex
->nUndoPos
= _pos
.nPos
;
2530 pindex
->nStatus
|= BLOCK_HAVE_UNDO
;
2533 pindex
->RaiseValidity(BLOCK_VALID_SCRIPTS
);
2534 setDirtyBlockIndex
.insert(pindex
);
2538 if (!pblocktree
->WriteTxIndex(vPos
))
2539 return AbortNode(state
, "Failed to write transaction index");
2541 // add this block to the view's block chain
2542 view
.SetBestBlock(pindex
->GetBlockHash());
2544 int64_t nTime5
= GetTimeMicros(); nTimeIndex
+= nTime5
- nTime4
;
2545 LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5
- nTime4
), nTimeIndex
* 0.000001);
2547 // Watch for changes to the previous coinbase transaction.
2548 static uint256 hashPrevBestCoinBase
;
2549 GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase
);
2550 hashPrevBestCoinBase
= block
.vtx
[0].GetHash();
2552 // Erase orphan transactions include or precluded by this block
2553 if (vOrphanErase
.size()) {
2555 BOOST_FOREACH(uint256
&orphanHash
, vOrphanErase
) {
2556 nErased
+= EraseOrphanTx(orphanHash
);
2558 LogPrint("mempool", "Erased %d orphan tx included or conflicted by block\n", nErased
);
2561 int64_t nTime6
= GetTimeMicros(); nTimeCallbacks
+= nTime6
- nTime5
;
2562 LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6
- nTime5
), nTimeCallbacks
* 0.000001);
2567 enum FlushStateMode
{
2569 FLUSH_STATE_IF_NEEDED
,
2570 FLUSH_STATE_PERIODIC
,
2575 * Update the on-disk chain state.
2576 * The caches and indexes are flushed depending on the mode we're called with
2577 * if they're too large, if it's been a while since the last write,
2578 * or always and in all cases if we're in prune mode and are deleting files.
2580 bool static FlushStateToDisk(CValidationState
&state
, FlushStateMode mode
) {
2581 const CChainParams
& chainparams
= Params();
2582 LOCK2(cs_main
, cs_LastBlockFile
);
2583 static int64_t nLastWrite
= 0;
2584 static int64_t nLastFlush
= 0;
2585 static int64_t nLastSetChain
= 0;
2586 std::set
<int> setFilesToPrune
;
2587 bool fFlushForPrune
= false;
2589 if (fPruneMode
&& fCheckForPruning
&& !fReindex
) {
2590 FindFilesToPrune(setFilesToPrune
, chainparams
.PruneAfterHeight());
2591 fCheckForPruning
= false;
2592 if (!setFilesToPrune
.empty()) {
2593 fFlushForPrune
= true;
2595 pblocktree
->WriteFlag("prunedblockfiles", true);
2600 int64_t nNow
= GetTimeMicros();
2601 // Avoid writing/flushing immediately after startup.
2602 if (nLastWrite
== 0) {
2605 if (nLastFlush
== 0) {
2608 if (nLastSetChain
== 0) {
2609 nLastSetChain
= nNow
;
2611 size_t cacheSize
= pcoinsTip
->DynamicMemoryUsage();
2612 // The cache is large and close to the limit, but we have time now (not in the middle of a block processing).
2613 bool fCacheLarge
= mode
== FLUSH_STATE_PERIODIC
&& cacheSize
* (10.0/9) > nCoinCacheUsage
;
2614 // The cache is over the limit, we have to write now.
2615 bool fCacheCritical
= mode
== FLUSH_STATE_IF_NEEDED
&& cacheSize
> nCoinCacheUsage
;
2616 // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2617 bool fPeriodicWrite
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastWrite
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000;
2618 // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2619 bool fPeriodicFlush
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastFlush
+ (int64_t)DATABASE_FLUSH_INTERVAL
* 1000000;
2620 // Combine all conditions that result in a full cache flush.
2621 bool fDoFullFlush
= (mode
== FLUSH_STATE_ALWAYS
) || fCacheLarge
|| fCacheCritical
|| fPeriodicFlush
|| fFlushForPrune
;
2622 // Write blocks and block index to disk.
2623 if (fDoFullFlush
|| fPeriodicWrite
) {
2624 // Depend on nMinDiskSpace to ensure we can write block index
2625 if (!CheckDiskSpace(0))
2626 return state
.Error("out of disk space");
2627 // First make sure all block and undo data is flushed to disk.
2629 // Then update all block file information (which may refer to block and undo files).
2631 std::vector
<std::pair
<int, const CBlockFileInfo
*> > vFiles
;
2632 vFiles
.reserve(setDirtyFileInfo
.size());
2633 for (set
<int>::iterator it
= setDirtyFileInfo
.begin(); it
!= setDirtyFileInfo
.end(); ) {
2634 vFiles
.push_back(make_pair(*it
, &vinfoBlockFile
[*it
]));
2635 setDirtyFileInfo
.erase(it
++);
2637 std::vector
<const CBlockIndex
*> vBlocks
;
2638 vBlocks
.reserve(setDirtyBlockIndex
.size());
2639 for (set
<CBlockIndex
*>::iterator it
= setDirtyBlockIndex
.begin(); it
!= setDirtyBlockIndex
.end(); ) {
2640 vBlocks
.push_back(*it
);
2641 setDirtyBlockIndex
.erase(it
++);
2643 if (!pblocktree
->WriteBatchSync(vFiles
, nLastBlockFile
, vBlocks
)) {
2644 return AbortNode(state
, "Files to write to block index database");
2647 // Finally remove any pruned files
2649 UnlinkPrunedFiles(setFilesToPrune
);
2652 // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2654 // Typical CCoins structures on disk are around 128 bytes in size.
2655 // Pushing a new one to the database can cause it to be written
2656 // twice (once in the log, and once in the tables). This is already
2657 // an overestimation, as most will delete an existing entry or
2658 // overwrite one. Still, use a conservative safety factor of 2.
2659 if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip
->GetCacheSize()))
2660 return state
.Error("out of disk space");
2661 // Flush the chainstate (which may refer to block index entries).
2662 if (!pcoinsTip
->Flush())
2663 return AbortNode(state
, "Failed to write to coin database");
2666 if (fDoFullFlush
|| ((mode
== FLUSH_STATE_ALWAYS
|| mode
== FLUSH_STATE_PERIODIC
) && nNow
> nLastSetChain
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000)) {
2667 // Update best block in wallet (so we can detect restored wallets).
2668 GetMainSignals().SetBestChain(chainActive
.GetLocator());
2669 nLastSetChain
= nNow
;
2671 } catch (const std::runtime_error
& e
) {
2672 return AbortNode(state
, std::string("System error while flushing: ") + e
.what());
2677 void FlushStateToDisk() {
2678 CValidationState state
;
2679 FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
2682 void PruneAndFlush() {
2683 CValidationState state
;
2684 fCheckForPruning
= true;
2685 FlushStateToDisk(state
, FLUSH_STATE_NONE
);
2688 /** Update chainActive and related internal data structures. */
2689 void static UpdateTip(CBlockIndex
*pindexNew
, const CChainParams
& chainParams
) {
2690 chainActive
.SetTip(pindexNew
);
2693 nTimeBestReceived
= GetTime();
2694 mempool
.AddTransactionsUpdated(1);
2696 cvBlockChange
.notify_all();
2698 static bool fWarned
= false;
2699 std::vector
<std::string
> warningMessages
;
2700 if (!IsInitialBlockDownload())
2703 const CBlockIndex
* pindex
= chainActive
.Tip();
2704 for (int bit
= 0; bit
< VERSIONBITS_NUM_BITS
; bit
++) {
2705 WarningBitsConditionChecker
checker(bit
);
2706 ThresholdState state
= checker
.GetStateFor(pindex
, chainParams
.GetConsensus(), warningcache
[bit
]);
2707 if (state
== THRESHOLD_ACTIVE
|| state
== THRESHOLD_LOCKED_IN
) {
2708 if (state
== THRESHOLD_ACTIVE
) {
2709 strMiscWarning
= strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit
);
2711 AlertNotify(strMiscWarning
);
2715 warningMessages
.push_back(strprintf("unknown new rules are about to activate (versionbit %i)", bit
));
2719 // Check the version of the last 100 blocks to see if we need to upgrade:
2720 for (int i
= 0; i
< 100 && pindex
!= NULL
; i
++)
2722 int32_t nExpectedVersion
= ComputeBlockVersion(pindex
->pprev
, chainParams
.GetConsensus());
2723 if (pindex
->nVersion
> VERSIONBITS_LAST_OLD_BLOCK_VERSION
&& (pindex
->nVersion
& ~nExpectedVersion
) != 0)
2725 pindex
= pindex
->pprev
;
2728 warningMessages
.push_back(strprintf("%d of last 100 blocks have unexpected version", nUpgraded
));
2729 if (nUpgraded
> 100/2)
2731 // strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
2732 strMiscWarning
= _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect");
2734 AlertNotify(strMiscWarning
);
2739 LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utx)", __func__
,
2740 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(), chainActive
.Tip()->nVersion
,
2741 log(chainActive
.Tip()->nChainWork
.getdouble())/log(2.0), (unsigned long)chainActive
.Tip()->nChainTx
,
2742 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
2743 Checkpoints::GuessVerificationProgress(chainParams
.Checkpoints(), chainActive
.Tip()), pcoinsTip
->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip
->GetCacheSize());
2744 if (!warningMessages
.empty())
2745 LogPrintf(" warning='%s'", boost::algorithm::join(warningMessages
, ", "));
2750 /** Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and manually re-limit mempool size after this, with cs_main held. */
2751 bool static DisconnectTip(CValidationState
& state
, const CChainParams
& chainparams
, bool fBare
= false)
2753 CBlockIndex
*pindexDelete
= chainActive
.Tip();
2754 assert(pindexDelete
);
2755 // Read block from disk.
2757 if (!ReadBlockFromDisk(block
, pindexDelete
, chainparams
.GetConsensus()))
2758 return AbortNode(state
, "Failed to read block");
2759 // Apply the block atomically to the chain state.
2760 int64_t nStart
= GetTimeMicros();
2762 CCoinsViewCache
view(pcoinsTip
);
2763 if (!DisconnectBlock(block
, state
, pindexDelete
, view
))
2764 return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete
->GetBlockHash().ToString());
2765 assert(view
.Flush());
2767 LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart
) * 0.001);
2768 // Write the chain state to disk, if necessary.
2769 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2773 // Resurrect mempool transactions from the disconnected block.
2774 std::vector
<uint256
> vHashUpdate
;
2775 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
2776 // ignore validation errors in resurrected transactions
2777 CValidationState stateDummy
;
2778 if (tx
.IsCoinBase() || !AcceptToMemoryPool(mempool
, stateDummy
, tx
, false, NULL
, true)) {
2779 mempool
.removeRecursive(tx
);
2780 } else if (mempool
.exists(tx
.GetHash())) {
2781 vHashUpdate
.push_back(tx
.GetHash());
2784 // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
2785 // no in-mempool children, which is generally not true when adding
2786 // previously-confirmed transactions back to the mempool.
2787 // UpdateTransactionsFromBlock finds descendants of any transactions in this
2788 // block that were added back and cleans up the mempool state.
2789 mempool
.UpdateTransactionsFromBlock(vHashUpdate
);
2792 // Update chainActive and related variables.
2793 UpdateTip(pindexDelete
->pprev
, chainparams
);
2794 // Let wallets know transactions went from 1-confirmed to
2795 // 0-confirmed or conflicted:
2796 BOOST_FOREACH(const CTransaction
&tx
, block
.vtx
) {
2797 GetMainSignals().SyncTransaction(tx
, pindexDelete
->pprev
, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK
);
2802 static int64_t nTimeReadFromDisk
= 0;
2803 static int64_t nTimeConnectTotal
= 0;
2804 static int64_t nTimeFlush
= 0;
2805 static int64_t nTimeChainState
= 0;
2806 static int64_t nTimePostConnect
= 0;
2809 * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
2810 * corresponding to pindexNew, to bypass loading it again from disk.
2812 bool static ConnectTip(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexNew
, const CBlock
* pblock
, std::vector
<std::shared_ptr
<const CTransaction
>> &txConflicted
, std::vector
<std::tuple
<CTransaction
,CBlockIndex
*,int>> &txChanged
)
2814 assert(pindexNew
->pprev
== chainActive
.Tip());
2815 // Read block from disk.
2816 int64_t nTime1
= GetTimeMicros();
2819 if (!ReadBlockFromDisk(block
, pindexNew
, chainparams
.GetConsensus()))
2820 return AbortNode(state
, "Failed to read block");
2823 // Apply the block atomically to the chain state.
2824 int64_t nTime2
= GetTimeMicros(); nTimeReadFromDisk
+= nTime2
- nTime1
;
2826 LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2
- nTime1
) * 0.001, nTimeReadFromDisk
* 0.000001);
2828 CCoinsViewCache
view(pcoinsTip
);
2829 bool rv
= ConnectBlock(*pblock
, state
, pindexNew
, view
, chainparams
);
2830 GetMainSignals().BlockChecked(*pblock
, state
);
2832 if (state
.IsInvalid())
2833 InvalidBlockFound(pindexNew
, state
);
2834 return error("ConnectTip(): ConnectBlock %s failed", pindexNew
->GetBlockHash().ToString());
2836 nTime3
= GetTimeMicros(); nTimeConnectTotal
+= nTime3
- nTime2
;
2837 LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3
- nTime2
) * 0.001, nTimeConnectTotal
* 0.000001);
2838 assert(view
.Flush());
2840 int64_t nTime4
= GetTimeMicros(); nTimeFlush
+= nTime4
- nTime3
;
2841 LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4
- nTime3
) * 0.001, nTimeFlush
* 0.000001);
2842 // Write the chain state to disk, if necessary.
2843 if (!FlushStateToDisk(state
, FLUSH_STATE_IF_NEEDED
))
2845 int64_t nTime5
= GetTimeMicros(); nTimeChainState
+= nTime5
- nTime4
;
2846 LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5
- nTime4
) * 0.001, nTimeChainState
* 0.000001);
2847 // Remove conflicting transactions from the mempool.;
2848 mempool
.removeForBlock(pblock
->vtx
, pindexNew
->nHeight
, &txConflicted
, !IsInitialBlockDownload());
2849 // Update chainActive & related variables.
2850 UpdateTip(pindexNew
, chainparams
);
2852 for(unsigned int i
=0; i
< pblock
->vtx
.size(); i
++)
2853 txChanged
.emplace_back(pblock
->vtx
[i
], pindexNew
, i
);
2855 int64_t nTime6
= GetTimeMicros(); nTimePostConnect
+= nTime6
- nTime5
; nTimeTotal
+= nTime6
- nTime1
;
2856 LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6
- nTime5
) * 0.001, nTimePostConnect
* 0.000001);
2857 LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6
- nTime1
) * 0.001, nTimeTotal
* 0.000001);
2862 * Return the tip of the chain with the most work in it, that isn't
2863 * known to be invalid (it's however far from certain to be valid).
2865 static CBlockIndex
* FindMostWorkChain() {
2867 CBlockIndex
*pindexNew
= NULL
;
2869 // Find the best candidate header.
2871 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::reverse_iterator it
= setBlockIndexCandidates
.rbegin();
2872 if (it
== setBlockIndexCandidates
.rend())
2877 // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2878 // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2879 CBlockIndex
*pindexTest
= pindexNew
;
2880 bool fInvalidAncestor
= false;
2881 while (pindexTest
&& !chainActive
.Contains(pindexTest
)) {
2882 assert(pindexTest
->nChainTx
|| pindexTest
->nHeight
== 0);
2884 // Pruned nodes may have entries in setBlockIndexCandidates for
2885 // which block files have been deleted. Remove those as candidates
2886 // for the most work chain if we come across them; we can't switch
2887 // to a chain unless we have all the non-active-chain parent blocks.
2888 bool fFailedChain
= pindexTest
->nStatus
& BLOCK_FAILED_MASK
;
2889 bool fMissingData
= !(pindexTest
->nStatus
& BLOCK_HAVE_DATA
);
2890 if (fFailedChain
|| fMissingData
) {
2891 // Candidate chain is not usable (either invalid or missing data)
2892 if (fFailedChain
&& (pindexBestInvalid
== NULL
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
))
2893 pindexBestInvalid
= pindexNew
;
2894 CBlockIndex
*pindexFailed
= pindexNew
;
2895 // Remove the entire chain from the set.
2896 while (pindexTest
!= pindexFailed
) {
2898 pindexFailed
->nStatus
|= BLOCK_FAILED_CHILD
;
2899 } else if (fMissingData
) {
2900 // If we're missing data, then add back to mapBlocksUnlinked,
2901 // so that if the block arrives in the future we can try adding
2902 // to setBlockIndexCandidates again.
2903 mapBlocksUnlinked
.insert(std::make_pair(pindexFailed
->pprev
, pindexFailed
));
2905 setBlockIndexCandidates
.erase(pindexFailed
);
2906 pindexFailed
= pindexFailed
->pprev
;
2908 setBlockIndexCandidates
.erase(pindexTest
);
2909 fInvalidAncestor
= true;
2912 pindexTest
= pindexTest
->pprev
;
2914 if (!fInvalidAncestor
)
2919 /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
2920 static void PruneBlockIndexCandidates() {
2921 // Note that we can't delete the current block itself, as we may need to return to it later in case a
2922 // reorganization to a better block fails.
2923 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::iterator it
= setBlockIndexCandidates
.begin();
2924 while (it
!= setBlockIndexCandidates
.end() && setBlockIndexCandidates
.value_comp()(*it
, chainActive
.Tip())) {
2925 setBlockIndexCandidates
.erase(it
++);
2927 // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2928 assert(!setBlockIndexCandidates
.empty());
2932 * Try to make some progress towards making pindexMostWork the active block.
2933 * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
2935 static bool ActivateBestChainStep(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexMostWork
, const CBlock
* pblock
, bool& fInvalidFound
, std::vector
<std::shared_ptr
<const CTransaction
>>& txConflicted
, std::vector
<std::tuple
<CTransaction
,CBlockIndex
*,int>>& txChanged
)
2937 AssertLockHeld(cs_main
);
2938 const CBlockIndex
*pindexOldTip
= chainActive
.Tip();
2939 const CBlockIndex
*pindexFork
= chainActive
.FindFork(pindexMostWork
);
2941 // Disconnect active blocks which are no longer in the best chain.
2942 bool fBlocksDisconnected
= false;
2943 while (chainActive
.Tip() && chainActive
.Tip() != pindexFork
) {
2944 if (!DisconnectTip(state
, chainparams
))
2946 fBlocksDisconnected
= true;
2949 // Build list of new blocks to connect.
2950 std::vector
<CBlockIndex
*> vpindexToConnect
;
2951 bool fContinue
= true;
2952 int nHeight
= pindexFork
? pindexFork
->nHeight
: -1;
2953 while (fContinue
&& nHeight
!= pindexMostWork
->nHeight
) {
2954 // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2955 // a few blocks along the way.
2956 int nTargetHeight
= std::min(nHeight
+ 32, pindexMostWork
->nHeight
);
2957 vpindexToConnect
.clear();
2958 vpindexToConnect
.reserve(nTargetHeight
- nHeight
);
2959 CBlockIndex
*pindexIter
= pindexMostWork
->GetAncestor(nTargetHeight
);
2960 while (pindexIter
&& pindexIter
->nHeight
!= nHeight
) {
2961 vpindexToConnect
.push_back(pindexIter
);
2962 pindexIter
= pindexIter
->pprev
;
2964 nHeight
= nTargetHeight
;
2966 // Connect new blocks.
2967 BOOST_REVERSE_FOREACH(CBlockIndex
*pindexConnect
, vpindexToConnect
) {
2968 if (!ConnectTip(state
, chainparams
, pindexConnect
, pindexConnect
== pindexMostWork
? pblock
: NULL
, txConflicted
, txChanged
)) {
2969 if (state
.IsInvalid()) {
2970 // The block violates a consensus rule.
2971 if (!state
.CorruptionPossible())
2972 InvalidChainFound(vpindexToConnect
.back());
2973 state
= CValidationState();
2974 fInvalidFound
= true;
2978 // A system error occurred (disk space, database error, ...).
2982 PruneBlockIndexCandidates();
2983 if (!pindexOldTip
|| chainActive
.Tip()->nChainWork
> pindexOldTip
->nChainWork
) {
2984 // We're in a better position than we were. Return temporarily to release the lock.
2992 if (fBlocksDisconnected
) {
2993 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
2994 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
2996 mempool
.check(pcoinsTip
);
2998 // Callbacks/notifications for a new best chain.
3000 CheckForkWarningConditionsOnNewFork(vpindexToConnect
.back());
3002 CheckForkWarningConditions();
3007 static void NotifyHeaderTip() {
3008 bool fNotify
= false;
3009 bool fInitialBlockDownload
= false;
3010 static CBlockIndex
* pindexHeaderOld
= NULL
;
3011 CBlockIndex
* pindexHeader
= NULL
;
3014 pindexHeader
= pindexBestHeader
;
3016 if (pindexHeader
!= pindexHeaderOld
) {
3018 fInitialBlockDownload
= IsInitialBlockDownload();
3019 pindexHeaderOld
= pindexHeader
;
3022 // Send block tip changed notifications without cs_main
3024 uiInterface
.NotifyHeaderTip(fInitialBlockDownload
, pindexHeader
);
3029 * Make the best chain active, in multiple steps. The result is either failure
3030 * or an activated best chain. pblock is either NULL or a pointer to a block
3031 * that is already loaded (to avoid loading it again from disk).
3033 bool ActivateBestChain(CValidationState
&state
, const CChainParams
& chainparams
, const CBlock
*pblock
) {
3034 CBlockIndex
*pindexMostWork
= NULL
;
3035 CBlockIndex
*pindexNewTip
= NULL
;
3036 std::vector
<std::tuple
<CTransaction
,CBlockIndex
*,int>> txChanged
;
3038 txChanged
.reserve(pblock
->vtx
.size());
3041 boost::this_thread::interruption_point();
3042 if (ShutdownRequested())
3045 const CBlockIndex
*pindexFork
;
3046 std::vector
<std::shared_ptr
<const CTransaction
>> txConflicted
;
3047 bool fInitialDownload
;
3050 CBlockIndex
*pindexOldTip
= chainActive
.Tip();
3051 if (pindexMostWork
== NULL
) {
3052 pindexMostWork
= FindMostWorkChain();
3055 // Whether we have anything to do at all.
3056 if (pindexMostWork
== NULL
|| pindexMostWork
== chainActive
.Tip())
3059 bool fInvalidFound
= false;
3060 if (!ActivateBestChainStep(state
, chainparams
, pindexMostWork
, pblock
&& pblock
->GetHash() == pindexMostWork
->GetBlockHash() ? pblock
: NULL
, fInvalidFound
, txConflicted
, txChanged
))
3063 if (fInvalidFound
) {
3064 // Wipe cache, we may need another branch now.
3065 pindexMostWork
= NULL
;
3067 pindexNewTip
= chainActive
.Tip();
3068 pindexFork
= chainActive
.FindFork(pindexOldTip
);
3069 fInitialDownload
= IsInitialBlockDownload();
3071 // When we reach this point, we switched to a new tip (stored in pindexNewTip).
3073 // Notifications/callbacks that can run without cs_main
3075 // throw all transactions though the signal-interface
3076 // while _not_ holding the cs_main lock
3077 for(std::shared_ptr
<const CTransaction
> tx
: txConflicted
)
3079 GetMainSignals().SyncTransaction(*tx
, pindexNewTip
, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK
);
3081 // ... and about transactions that got confirmed:
3082 for(unsigned int i
= 0; i
< txChanged
.size(); i
++)
3083 GetMainSignals().SyncTransaction(std::get
<0>(txChanged
[i
]), std::get
<1>(txChanged
[i
]), std::get
<2>(txChanged
[i
]));
3085 // Notify external listeners about the new tip.
3086 GetMainSignals().UpdatedBlockTip(pindexNewTip
, pindexFork
, fInitialDownload
);
3088 // Always notify the UI if a new block tip was connected
3089 if (pindexFork
!= pindexNewTip
) {
3090 uiInterface
.NotifyBlockTip(fInitialDownload
, pindexNewTip
);
3092 } while (pindexNewTip
!= pindexMostWork
);
3093 CheckBlockIndex(chainparams
.GetConsensus());
3095 // Write changes periodically to disk, after relay.
3096 if (!FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
)) {
3104 bool PreciousBlock(CValidationState
& state
, const CChainParams
& params
, CBlockIndex
*pindex
)
3108 if (pindex
->nChainWork
< chainActive
.Tip()->nChainWork
) {
3109 // Nothing to do, this block is not at the tip.
3112 if (chainActive
.Tip()->nChainWork
> nLastPreciousChainwork
) {
3113 // The chain has been extended since the last call, reset the counter.
3114 nBlockReverseSequenceId
= -1;
3116 nLastPreciousChainwork
= chainActive
.Tip()->nChainWork
;
3117 setBlockIndexCandidates
.erase(pindex
);
3118 pindex
->nSequenceId
= nBlockReverseSequenceId
;
3119 if (nBlockReverseSequenceId
> std::numeric_limits
<int32_t>::min()) {
3120 // We can't keep reducing the counter if somebody really wants to
3121 // call preciousblock 2**31-1 times on the same set of tips...
3122 nBlockReverseSequenceId
--;
3124 if (pindex
->IsValid(BLOCK_VALID_TRANSACTIONS
) && pindex
->nChainTx
) {
3125 setBlockIndexCandidates
.insert(pindex
);
3126 PruneBlockIndexCandidates();
3130 return ActivateBestChain(state
, params
);
3133 bool InvalidateBlock(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
*pindex
)
3135 AssertLockHeld(cs_main
);
3137 // Mark the block itself as invalid.
3138 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
3139 setDirtyBlockIndex
.insert(pindex
);
3140 setBlockIndexCandidates
.erase(pindex
);
3142 while (chainActive
.Contains(pindex
)) {
3143 CBlockIndex
*pindexWalk
= chainActive
.Tip();
3144 pindexWalk
->nStatus
|= BLOCK_FAILED_CHILD
;
3145 setDirtyBlockIndex
.insert(pindexWalk
);
3146 setBlockIndexCandidates
.erase(pindexWalk
);
3147 // ActivateBestChain considers blocks already in chainActive
3148 // unconditionally valid already, so force disconnect away from it.
3149 if (!DisconnectTip(state
, chainparams
)) {
3150 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
3155 LimitMempoolSize(mempool
, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
3157 // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
3159 BlockMap::iterator it
= mapBlockIndex
.begin();
3160 while (it
!= mapBlockIndex
.end()) {
3161 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& !setBlockIndexCandidates
.value_comp()(it
->second
, chainActive
.Tip())) {
3162 setBlockIndexCandidates
.insert(it
->second
);
3167 InvalidChainFound(pindex
);
3168 mempool
.removeForReorg(pcoinsTip
, chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
3172 bool ResetBlockFailureFlags(CBlockIndex
*pindex
) {
3173 AssertLockHeld(cs_main
);
3175 int nHeight
= pindex
->nHeight
;
3177 // Remove the invalidity flag from this block and all its descendants.
3178 BlockMap::iterator it
= mapBlockIndex
.begin();
3179 while (it
!= mapBlockIndex
.end()) {
3180 if (!it
->second
->IsValid() && it
->second
->GetAncestor(nHeight
) == pindex
) {
3181 it
->second
->nStatus
&= ~BLOCK_FAILED_MASK
;
3182 setDirtyBlockIndex
.insert(it
->second
);
3183 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& setBlockIndexCandidates
.value_comp()(chainActive
.Tip(), it
->second
)) {
3184 setBlockIndexCandidates
.insert(it
->second
);
3186 if (it
->second
== pindexBestInvalid
) {
3187 // Reset invalid block marker if it was pointing to one of those.
3188 pindexBestInvalid
= NULL
;
3194 // Remove the invalidity flag from all ancestors too.
3195 while (pindex
!= NULL
) {
3196 if (pindex
->nStatus
& BLOCK_FAILED_MASK
) {
3197 pindex
->nStatus
&= ~BLOCK_FAILED_MASK
;
3198 setDirtyBlockIndex
.insert(pindex
);
3200 pindex
= pindex
->pprev
;
3205 CBlockIndex
* AddToBlockIndex(const CBlockHeader
& block
)
3207 // Check for duplicate
3208 uint256 hash
= block
.GetHash();
3209 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
3210 if (it
!= mapBlockIndex
.end())
3213 // Construct new block index object
3214 CBlockIndex
* pindexNew
= new CBlockIndex(block
);
3216 // We assign the sequence id to blocks only when the full data is available,
3217 // to avoid miners withholding blocks but broadcasting headers, to get a
3218 // competitive advantage.
3219 pindexNew
->nSequenceId
= 0;
3220 BlockMap::iterator mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
3221 pindexNew
->phashBlock
= &((*mi
).first
);
3222 BlockMap::iterator miPrev
= mapBlockIndex
.find(block
.hashPrevBlock
);
3223 if (miPrev
!= mapBlockIndex
.end())
3225 pindexNew
->pprev
= (*miPrev
).second
;
3226 pindexNew
->nHeight
= pindexNew
->pprev
->nHeight
+ 1;
3227 pindexNew
->BuildSkip();
3229 pindexNew
->nChainWork
= (pindexNew
->pprev
? pindexNew
->pprev
->nChainWork
: 0) + GetBlockProof(*pindexNew
);
3230 pindexNew
->RaiseValidity(BLOCK_VALID_TREE
);
3231 if (pindexBestHeader
== NULL
|| pindexBestHeader
->nChainWork
< pindexNew
->nChainWork
)
3232 pindexBestHeader
= pindexNew
;
3234 setDirtyBlockIndex
.insert(pindexNew
);
3239 /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
3240 bool ReceivedBlockTransactions(const CBlock
&block
, CValidationState
& state
, CBlockIndex
*pindexNew
, const CDiskBlockPos
& pos
)
3242 pindexNew
->nTx
= block
.vtx
.size();
3243 pindexNew
->nChainTx
= 0;
3244 pindexNew
->nFile
= pos
.nFile
;
3245 pindexNew
->nDataPos
= pos
.nPos
;
3246 pindexNew
->nUndoPos
= 0;
3247 pindexNew
->nStatus
|= BLOCK_HAVE_DATA
;
3248 if (IsWitnessEnabled(pindexNew
->pprev
, Params().GetConsensus())) {
3249 pindexNew
->nStatus
|= BLOCK_OPT_WITNESS
;
3251 pindexNew
->RaiseValidity(BLOCK_VALID_TRANSACTIONS
);
3252 setDirtyBlockIndex
.insert(pindexNew
);
3254 if (pindexNew
->pprev
== NULL
|| pindexNew
->pprev
->nChainTx
) {
3255 // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
3256 deque
<CBlockIndex
*> queue
;
3257 queue
.push_back(pindexNew
);
3259 // Recursively process any descendant blocks that now may be eligible to be connected.
3260 while (!queue
.empty()) {
3261 CBlockIndex
*pindex
= queue
.front();
3263 pindex
->nChainTx
= (pindex
->pprev
? pindex
->pprev
->nChainTx
: 0) + pindex
->nTx
;
3265 LOCK(cs_nBlockSequenceId
);
3266 pindex
->nSequenceId
= nBlockSequenceId
++;
3268 if (chainActive
.Tip() == NULL
|| !setBlockIndexCandidates
.value_comp()(pindex
, chainActive
.Tip())) {
3269 setBlockIndexCandidates
.insert(pindex
);
3271 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
);
3272 while (range
.first
!= range
.second
) {
3273 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator it
= range
.first
;
3274 queue
.push_back(it
->second
);
3276 mapBlocksUnlinked
.erase(it
);
3280 if (pindexNew
->pprev
&& pindexNew
->pprev
->IsValid(BLOCK_VALID_TREE
)) {
3281 mapBlocksUnlinked
.insert(std::make_pair(pindexNew
->pprev
, pindexNew
));
3288 bool FindBlockPos(CValidationState
&state
, CDiskBlockPos
&pos
, unsigned int nAddSize
, unsigned int nHeight
, uint64_t nTime
, bool fKnown
= false)
3290 LOCK(cs_LastBlockFile
);
3292 unsigned int nFile
= fKnown
? pos
.nFile
: nLastBlockFile
;
3293 if (vinfoBlockFile
.size() <= nFile
) {
3294 vinfoBlockFile
.resize(nFile
+ 1);
3298 while (vinfoBlockFile
[nFile
].nSize
+ nAddSize
>= MAX_BLOCKFILE_SIZE
) {
3300 if (vinfoBlockFile
.size() <= nFile
) {
3301 vinfoBlockFile
.resize(nFile
+ 1);
3305 pos
.nPos
= vinfoBlockFile
[nFile
].nSize
;
3308 if ((int)nFile
!= nLastBlockFile
) {
3310 LogPrintf("Leaving block file %i: %s\n", nLastBlockFile
, vinfoBlockFile
[nLastBlockFile
].ToString());
3312 FlushBlockFile(!fKnown
);
3313 nLastBlockFile
= nFile
;
3316 vinfoBlockFile
[nFile
].AddBlock(nHeight
, nTime
);
3318 vinfoBlockFile
[nFile
].nSize
= std::max(pos
.nPos
+ nAddSize
, vinfoBlockFile
[nFile
].nSize
);
3320 vinfoBlockFile
[nFile
].nSize
+= nAddSize
;
3323 unsigned int nOldChunks
= (pos
.nPos
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
3324 unsigned int nNewChunks
= (vinfoBlockFile
[nFile
].nSize
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
3325 if (nNewChunks
> nOldChunks
) {
3327 fCheckForPruning
= true;
3328 if (CheckDiskSpace(nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
)) {
3329 FILE *file
= OpenBlockFile(pos
);
3331 LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks
* BLOCKFILE_CHUNK_SIZE
, pos
.nFile
);
3332 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
);
3337 return state
.Error("out of disk space");
3341 setDirtyFileInfo
.insert(nFile
);
3345 bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
)
3349 LOCK(cs_LastBlockFile
);
3351 unsigned int nNewSize
;
3352 pos
.nPos
= vinfoBlockFile
[nFile
].nUndoSize
;
3353 nNewSize
= vinfoBlockFile
[nFile
].nUndoSize
+= nAddSize
;
3354 setDirtyFileInfo
.insert(nFile
);
3356 unsigned int nOldChunks
= (pos
.nPos
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
3357 unsigned int nNewChunks
= (nNewSize
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
3358 if (nNewChunks
> nOldChunks
) {
3360 fCheckForPruning
= true;
3361 if (CheckDiskSpace(nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
)) {
3362 FILE *file
= OpenUndoFile(pos
);
3364 LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks
* UNDOFILE_CHUNK_SIZE
, pos
.nFile
);
3365 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
);
3370 return state
.Error("out of disk space");
3376 bool CheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, bool fCheckPOW
)
3378 // Check proof of work matches claimed amount
3379 if (fCheckPOW
&& !CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
3380 return state
.DoS(50, false, REJECT_INVALID
, "high-hash", false, "proof of work failed");
3385 bool CheckBlock(const CBlock
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3387 // These are checks that are independent of context.
3392 // Check that the header is valid (particularly PoW). This is mostly
3393 // redundant with the call in AcceptBlockHeader.
3394 if (!CheckBlockHeader(block
, state
, consensusParams
, fCheckPOW
))
3397 // Check the merkle root.
3398 if (fCheckMerkleRoot
) {
3400 uint256 hashMerkleRoot2
= BlockMerkleRoot(block
, &mutated
);
3401 if (block
.hashMerkleRoot
!= hashMerkleRoot2
)
3402 return state
.DoS(100, false, REJECT_INVALID
, "bad-txnmrklroot", true, "hashMerkleRoot mismatch");
3404 // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3405 // of transactions in a block without affecting the merkle root of a block,
3406 // while still invalidating it.
3408 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-duplicate", true, "duplicate transaction");
3411 // All potential-corruption validation must be done before we do any
3412 // transaction validation, as otherwise we may mark the header as invalid
3413 // because we receive the wrong transactions for it.
3414 // Note that witness malleability is checked in ContextualCheckBlock, so no
3415 // checks that use witness data may be performed here.
3418 if (block
.vtx
.empty() || block
.vtx
.size() > MAX_BLOCK_BASE_SIZE
|| ::GetSerializeSize(block
, SER_NETWORK
, PROTOCOL_VERSION
| SERIALIZE_TRANSACTION_NO_WITNESS
) > MAX_BLOCK_BASE_SIZE
)
3419 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-length", false, "size limits failed");
3421 // First transaction must be coinbase, the rest must not be
3422 if (block
.vtx
.empty() || !block
.vtx
[0].IsCoinBase())
3423 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-missing", false, "first tx is not coinbase");
3424 for (unsigned int i
= 1; i
< block
.vtx
.size(); i
++)
3425 if (block
.vtx
[i
].IsCoinBase())
3426 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-multiple", false, "more than one coinbase");
3428 // Check transactions
3429 for (const auto& tx
: block
.vtx
)
3430 if (!CheckTransaction(tx
, state
))
3431 return state
.Invalid(false, state
.GetRejectCode(), state
.GetRejectReason(),
3432 strprintf("Transaction check failed (tx hash %s) %s", tx
.GetHash().ToString(), state
.GetDebugMessage()));
3434 unsigned int nSigOps
= 0;
3435 for (const auto& tx
: block
.vtx
)
3437 nSigOps
+= GetLegacySigOpCount(tx
);
3439 if (nSigOps
* WITNESS_SCALE_FACTOR
> MAX_BLOCK_SIGOPS_COST
)
3440 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
3442 if (fCheckPOW
&& fCheckMerkleRoot
)
3443 block
.fChecked
= true;
3448 static bool CheckIndexAgainstCheckpoint(const CBlockIndex
* pindexPrev
, CValidationState
& state
, const CChainParams
& chainparams
, const uint256
& hash
)
3450 if (*pindexPrev
->phashBlock
== chainparams
.GetConsensus().hashGenesisBlock
)
3453 int nHeight
= pindexPrev
->nHeight
+1;
3454 // Don't accept any forks from the main chain prior to last checkpoint
3455 CBlockIndex
* pcheckpoint
= Checkpoints::GetLastCheckpoint(chainparams
.Checkpoints());
3456 if (pcheckpoint
&& nHeight
< pcheckpoint
->nHeight
)
3457 return state
.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__
, nHeight
));
3462 bool IsWitnessEnabled(const CBlockIndex
* pindexPrev
, const Consensus::Params
& params
)
3465 return (VersionBitsState(pindexPrev
, params
, Consensus::DEPLOYMENT_SEGWIT
, versionbitscache
) == THRESHOLD_ACTIVE
);
3468 // Compute at which vout of the block's coinbase transaction the witness
3469 // commitment occurs, or -1 if not found.
3470 static int GetWitnessCommitmentIndex(const CBlock
& block
)
3473 for (size_t o
= 0; o
< block
.vtx
[0].vout
.size(); o
++) {
3474 if (block
.vtx
[0].vout
[o
].scriptPubKey
.size() >= 38 && block
.vtx
[0].vout
[o
].scriptPubKey
[0] == OP_RETURN
&& block
.vtx
[0].vout
[o
].scriptPubKey
[1] == 0x24 && block
.vtx
[0].vout
[o
].scriptPubKey
[2] == 0xaa && block
.vtx
[0].vout
[o
].scriptPubKey
[3] == 0x21 && block
.vtx
[0].vout
[o
].scriptPubKey
[4] == 0xa9 && block
.vtx
[0].vout
[o
].scriptPubKey
[5] == 0xed) {
3481 void UpdateUncommittedBlockStructures(CBlock
& block
, const CBlockIndex
* pindexPrev
, const Consensus::Params
& consensusParams
)
3483 int commitpos
= GetWitnessCommitmentIndex(block
);
3484 static const std::vector
<unsigned char> nonce(32, 0x00);
3485 if (commitpos
!= -1 && IsWitnessEnabled(pindexPrev
, consensusParams
) && block
.vtx
[0].wit
.IsEmpty()) {
3486 block
.vtx
[0].wit
.vtxinwit
.resize(1);
3487 block
.vtx
[0].wit
.vtxinwit
[0].scriptWitness
.stack
.resize(1);
3488 block
.vtx
[0].wit
.vtxinwit
[0].scriptWitness
.stack
[0] = nonce
;
3492 std::vector
<unsigned char> GenerateCoinbaseCommitment(CBlock
& block
, const CBlockIndex
* pindexPrev
, const Consensus::Params
& consensusParams
)
3494 std::vector
<unsigned char> commitment
;
3495 int commitpos
= GetWitnessCommitmentIndex(block
);
3496 bool fHaveWitness
= false;
3497 for (size_t t
= 1; t
< block
.vtx
.size(); t
++) {
3498 if (!block
.vtx
[t
].wit
.IsNull()) {
3499 fHaveWitness
= true;
3503 std::vector
<unsigned char> ret(32, 0x00);
3504 if (fHaveWitness
&& IsWitnessEnabled(pindexPrev
, consensusParams
)) {
3505 if (commitpos
== -1) {
3506 uint256 witnessroot
= BlockWitnessMerkleRoot(block
, NULL
);
3507 CHash256().Write(witnessroot
.begin(), 32).Write(&ret
[0], 32).Finalize(witnessroot
.begin());
3510 out
.scriptPubKey
.resize(38);
3511 out
.scriptPubKey
[0] = OP_RETURN
;
3512 out
.scriptPubKey
[1] = 0x24;
3513 out
.scriptPubKey
[2] = 0xaa;
3514 out
.scriptPubKey
[3] = 0x21;
3515 out
.scriptPubKey
[4] = 0xa9;
3516 out
.scriptPubKey
[5] = 0xed;
3517 memcpy(&out
.scriptPubKey
[6], witnessroot
.begin(), 32);
3518 commitment
= std::vector
<unsigned char>(out
.scriptPubKey
.begin(), out
.scriptPubKey
.end());
3519 const_cast<std::vector
<CTxOut
>*>(&block
.vtx
[0].vout
)->push_back(out
);
3520 block
.vtx
[0].UpdateHash();
3523 UpdateUncommittedBlockStructures(block
, pindexPrev
, consensusParams
);
3527 bool ContextualCheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, const CBlockIndex
* pindexPrev
, int64_t nAdjustedTime
)
3529 const int nHeight
= pindexPrev
== NULL
? 0 : pindexPrev
->nHeight
+ 1;
3530 // Check proof of work
3531 if (block
.nBits
!= GetNextWorkRequired(pindexPrev
, &block
, consensusParams
))
3532 return state
.DoS(100, false, REJECT_INVALID
, "bad-diffbits", false, "incorrect proof of work");
3534 // Check timestamp against prev
3535 if (block
.GetBlockTime() <= pindexPrev
->GetMedianTimePast())
3536 return state
.Invalid(false, REJECT_INVALID
, "time-too-old", "block's timestamp is too early");
3539 if (block
.GetBlockTime() > nAdjustedTime
+ 2 * 60 * 60)
3540 return state
.Invalid(false, REJECT_INVALID
, "time-too-new", "block timestamp too far in the future");
3542 // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
3543 // check for version 2, 3 and 4 upgrades
3544 if((block
.nVersion
< 2 && nHeight
>= consensusParams
.BIP34Height
) ||
3545 (block
.nVersion
< 3 && nHeight
>= consensusParams
.BIP66Height
) ||
3546 (block
.nVersion
< 4 && nHeight
>= consensusParams
.BIP65Height
))
3547 return state
.Invalid(false, REJECT_OBSOLETE
, strprintf("bad-version(0x%08x)", block
.nVersion
),
3548 strprintf("rejected nVersion=0x%08x block", block
.nVersion
));
3553 bool ContextualCheckBlock(const CBlock
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, const CBlockIndex
* pindexPrev
)
3555 const int nHeight
= pindexPrev
== NULL
? 0 : pindexPrev
->nHeight
+ 1;
3557 // Start enforcing BIP113 (Median Time Past) using versionbits logic.
3558 int nLockTimeFlags
= 0;
3559 if (VersionBitsState(pindexPrev
, consensusParams
, Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
3560 nLockTimeFlags
|= LOCKTIME_MEDIAN_TIME_PAST
;
3563 int64_t nLockTimeCutoff
= (nLockTimeFlags
& LOCKTIME_MEDIAN_TIME_PAST
)
3564 ? pindexPrev
->GetMedianTimePast()
3565 : block
.GetBlockTime();
3567 // Check that all transactions are finalized
3568 for (const auto& tx
: block
.vtx
) {
3569 if (!IsFinalTx(tx
, nHeight
, nLockTimeCutoff
)) {
3570 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-nonfinal", false, "non-final transaction");
3574 // Enforce rule that the coinbase starts with serialized block height
3575 if (nHeight
>= consensusParams
.BIP34Height
)
3577 CScript expect
= CScript() << nHeight
;
3578 if (block
.vtx
[0].vin
[0].scriptSig
.size() < expect
.size() ||
3579 !std::equal(expect
.begin(), expect
.end(), block
.vtx
[0].vin
[0].scriptSig
.begin())) {
3580 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-height", false, "block height mismatch in coinbase");
3584 // Validation for witness commitments.
3585 // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
3586 // coinbase (where 0x0000....0000 is used instead).
3587 // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness nonce (unconstrained).
3588 // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
3589 // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
3590 // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness nonce). In case there are
3591 // multiple, the last one is used.
3592 bool fHaveWitness
= false;
3593 if (VersionBitsState(pindexPrev
, consensusParams
, Consensus::DEPLOYMENT_SEGWIT
, versionbitscache
) == THRESHOLD_ACTIVE
) {
3594 int commitpos
= GetWitnessCommitmentIndex(block
);
3595 if (commitpos
!= -1) {
3596 bool malleated
= false;
3597 uint256 hashWitness
= BlockWitnessMerkleRoot(block
, &malleated
);
3598 // The malleation check is ignored; as the transaction tree itself
3599 // already does not permit it, it is impossible to trigger in the
3601 if (block
.vtx
[0].wit
.vtxinwit
.size() != 1 || block
.vtx
[0].wit
.vtxinwit
[0].scriptWitness
.stack
.size() != 1 || block
.vtx
[0].wit
.vtxinwit
[0].scriptWitness
.stack
[0].size() != 32) {
3602 return state
.DoS(100, false, REJECT_INVALID
, "bad-witness-nonce-size", true, strprintf("%s : invalid witness nonce size", __func__
));
3604 CHash256().Write(hashWitness
.begin(), 32).Write(&block
.vtx
[0].wit
.vtxinwit
[0].scriptWitness
.stack
[0][0], 32).Finalize(hashWitness
.begin());
3605 if (memcmp(hashWitness
.begin(), &block
.vtx
[0].vout
[commitpos
].scriptPubKey
[6], 32)) {
3606 return state
.DoS(100, false, REJECT_INVALID
, "bad-witness-merkle-match", true, strprintf("%s : witness merkle commitment mismatch", __func__
));
3608 fHaveWitness
= true;
3612 // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
3613 if (!fHaveWitness
) {
3614 for (size_t i
= 0; i
< block
.vtx
.size(); i
++) {
3615 if (!block
.vtx
[i
].wit
.IsNull()) {
3616 return state
.DoS(100, false, REJECT_INVALID
, "unexpected-witness", true, strprintf("%s : unexpected witness data found", __func__
));
3621 // After the coinbase witness nonce and commitment are verified,
3622 // we can check if the block weight passes (before we've checked the
3623 // coinbase witness, it would be possible for the weight to be too
3624 // large by filling up the coinbase witness, which doesn't change
3625 // the block hash, so we couldn't mark the block as permanently
3627 if (GetBlockWeight(block
) > MAX_BLOCK_WEIGHT
) {
3628 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-weight", false, strprintf("%s : weight limit failed", __func__
));
3634 static bool AcceptBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
=NULL
)
3636 AssertLockHeld(cs_main
);
3637 // Check for duplicate
3638 uint256 hash
= block
.GetHash();
3639 BlockMap::iterator miSelf
= mapBlockIndex
.find(hash
);
3640 CBlockIndex
*pindex
= NULL
;
3641 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
) {
3643 if (miSelf
!= mapBlockIndex
.end()) {
3644 // Block header is already known.
3645 pindex
= miSelf
->second
;
3648 if (pindex
->nStatus
& BLOCK_FAILED_MASK
)
3649 return state
.Invalid(error("%s: block %s is marked invalid", __func__
, hash
.ToString()), 0, "duplicate");
3653 if (!CheckBlockHeader(block
, state
, chainparams
.GetConsensus()))
3654 return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3656 // Get prev block index
3657 CBlockIndex
* pindexPrev
= NULL
;
3658 BlockMap::iterator mi
= mapBlockIndex
.find(block
.hashPrevBlock
);
3659 if (mi
== mapBlockIndex
.end())
3660 return state
.DoS(10, error("%s: prev block not found", __func__
), 0, "bad-prevblk");
3661 pindexPrev
= (*mi
).second
;
3662 if (pindexPrev
->nStatus
& BLOCK_FAILED_MASK
)
3663 return state
.DoS(100, error("%s: prev block invalid", __func__
), REJECT_INVALID
, "bad-prevblk");
3666 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, hash
))
3667 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3669 if (!ContextualCheckBlockHeader(block
, state
, chainparams
.GetConsensus(), pindexPrev
, GetAdjustedTime()))
3670 return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3673 pindex
= AddToBlockIndex(block
);
3681 /** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
3682 static bool AcceptBlock(const CBlock
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
, bool fRequested
, const CDiskBlockPos
* dbp
, bool* fNewBlock
)
3684 if (fNewBlock
) *fNewBlock
= false;
3685 AssertLockHeld(cs_main
);
3687 CBlockIndex
*pindexDummy
= NULL
;
3688 CBlockIndex
*&pindex
= ppindex
? *ppindex
: pindexDummy
;
3690 if (!AcceptBlockHeader(block
, state
, chainparams
, &pindex
))
3693 // Try to process all requested blocks that we don't have, but only
3694 // process an unrequested block if it's new and has enough work to
3695 // advance our tip, and isn't too many blocks ahead.
3696 bool fAlreadyHave
= pindex
->nStatus
& BLOCK_HAVE_DATA
;
3697 bool fHasMoreWork
= (chainActive
.Tip() ? pindex
->nChainWork
> chainActive
.Tip()->nChainWork
: true);
3698 // Blocks that are too out-of-order needlessly limit the effectiveness of
3699 // pruning, because pruning will not delete block files that contain any
3700 // blocks which are too close in height to the tip. Apply this test
3701 // regardless of whether pruning is enabled; it should generally be safe to
3702 // not process unrequested blocks.
3703 bool fTooFarAhead
= (pindex
->nHeight
> int(chainActive
.Height() + MIN_BLOCKS_TO_KEEP
));
3705 // TODO: deal better with return value and error conditions for duplicate
3706 // and unrequested blocks.
3707 if (fAlreadyHave
) return true;
3708 if (!fRequested
) { // If we didn't ask for it:
3709 if (pindex
->nTx
!= 0) return true; // This is a previously-processed block that was pruned
3710 if (!fHasMoreWork
) return true; // Don't process less-work chains
3711 if (fTooFarAhead
) return true; // Block height is too high
3713 if (fNewBlock
) *fNewBlock
= true;
3715 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), GetAdjustedTime()) ||
3716 !ContextualCheckBlock(block
, state
, chainparams
.GetConsensus(), pindex
->pprev
)) {
3717 if (state
.IsInvalid() && !state
.CorruptionPossible()) {
3718 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
3719 setDirtyBlockIndex
.insert(pindex
);
3721 return error("%s: %s", __func__
, FormatStateMessage(state
));
3724 int nHeight
= pindex
->nHeight
;
3726 // Write block to history file
3728 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
3729 CDiskBlockPos blockPos
;
3732 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, nHeight
, block
.GetBlockTime(), dbp
!= NULL
))
3733 return error("AcceptBlock(): FindBlockPos failed");
3735 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
3736 AbortNode(state
, "Failed to write block");
3737 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
3738 return error("AcceptBlock(): ReceivedBlockTransactions failed");
3739 } catch (const std::runtime_error
& e
) {
3740 return AbortNode(state
, std::string("System error: ") + e
.what());
3743 if (fCheckForPruning
)
3744 FlushStateToDisk(state
, FLUSH_STATE_NONE
); // we just allocated more disk space for block files
3749 bool ProcessNewBlock(CValidationState
& state
, const CChainParams
& chainparams
, CNode
* pfrom
, const CBlock
* pblock
, bool fForceProcessing
, const CDiskBlockPos
* dbp
)
3753 bool fRequested
= MarkBlockAsReceived(pblock
->GetHash());
3754 fRequested
|= fForceProcessing
;
3757 CBlockIndex
*pindex
= NULL
;
3758 bool fNewBlock
= false;
3759 bool ret
= AcceptBlock(*pblock
, state
, chainparams
, &pindex
, fRequested
, dbp
, &fNewBlock
);
3760 if (pindex
&& pfrom
) {
3761 mapBlockSource
[pindex
->GetBlockHash()] = pfrom
->GetId();
3762 if (fNewBlock
) pfrom
->nLastBlockTime
= GetTime();
3764 CheckBlockIndex(chainparams
.GetConsensus());
3766 return error("%s: AcceptBlock FAILED", __func__
);
3771 if (!ActivateBestChain(state
, chainparams
, pblock
))
3772 return error("%s: ActivateBestChain failed", __func__
);
3777 bool TestBlockValidity(CValidationState
& state
, const CChainParams
& chainparams
, const CBlock
& block
, CBlockIndex
* pindexPrev
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3779 AssertLockHeld(cs_main
);
3780 assert(pindexPrev
&& pindexPrev
== chainActive
.Tip());
3781 if (fCheckpointsEnabled
&& !CheckIndexAgainstCheckpoint(pindexPrev
, state
, chainparams
, block
.GetHash()))
3782 return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__
, state
.GetRejectReason().c_str());
3784 CCoinsViewCache
viewNew(pcoinsTip
);
3785 CBlockIndex
indexDummy(block
);
3786 indexDummy
.pprev
= pindexPrev
;
3787 indexDummy
.nHeight
= pindexPrev
->nHeight
+ 1;
3789 // NOTE: CheckBlockHeader is called by CheckBlock
3790 if (!ContextualCheckBlockHeader(block
, state
, chainparams
.GetConsensus(), pindexPrev
, GetAdjustedTime()))
3791 return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__
, FormatStateMessage(state
));
3792 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), fCheckPOW
, fCheckMerkleRoot
))
3793 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
3794 if (!ContextualCheckBlock(block
, state
, chainparams
.GetConsensus(), pindexPrev
))
3795 return error("%s: Consensus::ContextualCheckBlock: %s", __func__
, FormatStateMessage(state
));
3796 if (!ConnectBlock(block
, state
, &indexDummy
, viewNew
, chainparams
, true))
3798 assert(state
.IsValid());
3804 * BLOCK PRUNING CODE
3807 /* Calculate the amount of disk space the block & undo files currently use */
3808 uint64_t CalculateCurrentUsage()
3810 uint64_t retval
= 0;
3811 BOOST_FOREACH(const CBlockFileInfo
&file
, vinfoBlockFile
) {
3812 retval
+= file
.nSize
+ file
.nUndoSize
;
3817 /* Prune a block file (modify associated database entries)*/
3818 void PruneOneBlockFile(const int fileNumber
)
3820 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); ++it
) {
3821 CBlockIndex
* pindex
= it
->second
;
3822 if (pindex
->nFile
== fileNumber
) {
3823 pindex
->nStatus
&= ~BLOCK_HAVE_DATA
;
3824 pindex
->nStatus
&= ~BLOCK_HAVE_UNDO
;
3826 pindex
->nDataPos
= 0;
3827 pindex
->nUndoPos
= 0;
3828 setDirtyBlockIndex
.insert(pindex
);
3830 // Prune from mapBlocksUnlinked -- any block we prune would have
3831 // to be downloaded again in order to consider its chain, at which
3832 // point it would be considered as a candidate for
3833 // mapBlocksUnlinked or setBlockIndexCandidates.
3834 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
3835 while (range
.first
!= range
.second
) {
3836 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator _it
= range
.first
;
3838 if (_it
->second
== pindex
) {
3839 mapBlocksUnlinked
.erase(_it
);
3845 vinfoBlockFile
[fileNumber
].SetNull();
3846 setDirtyFileInfo
.insert(fileNumber
);
3850 void UnlinkPrunedFiles(std::set
<int>& setFilesToPrune
)
3852 for (set
<int>::iterator it
= setFilesToPrune
.begin(); it
!= setFilesToPrune
.end(); ++it
) {
3853 CDiskBlockPos
pos(*it
, 0);
3854 boost::filesystem::remove(GetBlockPosFilename(pos
, "blk"));
3855 boost::filesystem::remove(GetBlockPosFilename(pos
, "rev"));
3856 LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__
, *it
);
3860 /* Calculate the block/rev files that should be deleted to remain under target*/
3861 void FindFilesToPrune(std::set
<int>& setFilesToPrune
, uint64_t nPruneAfterHeight
)
3863 LOCK2(cs_main
, cs_LastBlockFile
);
3864 if (chainActive
.Tip() == NULL
|| nPruneTarget
== 0) {
3867 if ((uint64_t)chainActive
.Tip()->nHeight
<= nPruneAfterHeight
) {
3871 unsigned int nLastBlockWeCanPrune
= chainActive
.Tip()->nHeight
- MIN_BLOCKS_TO_KEEP
;
3872 uint64_t nCurrentUsage
= CalculateCurrentUsage();
3873 // We don't check to prune until after we've allocated new space for files
3874 // So we should leave a buffer under our target to account for another allocation
3875 // before the next pruning.
3876 uint64_t nBuffer
= BLOCKFILE_CHUNK_SIZE
+ UNDOFILE_CHUNK_SIZE
;
3877 uint64_t nBytesToPrune
;
3880 if (nCurrentUsage
+ nBuffer
>= nPruneTarget
) {
3881 for (int fileNumber
= 0; fileNumber
< nLastBlockFile
; fileNumber
++) {
3882 nBytesToPrune
= vinfoBlockFile
[fileNumber
].nSize
+ vinfoBlockFile
[fileNumber
].nUndoSize
;
3884 if (vinfoBlockFile
[fileNumber
].nSize
== 0)
3887 if (nCurrentUsage
+ nBuffer
< nPruneTarget
) // are we below our target?
3890 // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
3891 if (vinfoBlockFile
[fileNumber
].nHeightLast
> nLastBlockWeCanPrune
)
3894 PruneOneBlockFile(fileNumber
);
3895 // Queue up the files for removal
3896 setFilesToPrune
.insert(fileNumber
);
3897 nCurrentUsage
-= nBytesToPrune
;
3902 LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
3903 nPruneTarget
/1024/1024, nCurrentUsage
/1024/1024,
3904 ((int64_t)nPruneTarget
- (int64_t)nCurrentUsage
)/1024/1024,
3905 nLastBlockWeCanPrune
, count
);
3908 bool CheckDiskSpace(uint64_t nAdditionalBytes
)
3910 uint64_t nFreeBytesAvailable
= boost::filesystem::space(GetDataDir()).available
;
3912 // Check for nMinDiskSpace bytes (currently 50MB)
3913 if (nFreeBytesAvailable
< nMinDiskSpace
+ nAdditionalBytes
)
3914 return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
3919 FILE* OpenDiskFile(const CDiskBlockPos
&pos
, const char *prefix
, bool fReadOnly
)
3923 boost::filesystem::path path
= GetBlockPosFilename(pos
, prefix
);
3924 boost::filesystem::create_directories(path
.parent_path());
3925 FILE* file
= fopen(path
.string().c_str(), "rb+");
3926 if (!file
&& !fReadOnly
)
3927 file
= fopen(path
.string().c_str(), "wb+");
3929 LogPrintf("Unable to open file %s\n", path
.string());
3933 if (fseek(file
, pos
.nPos
, SEEK_SET
)) {
3934 LogPrintf("Unable to seek to position %u of %s\n", pos
.nPos
, path
.string());
3942 FILE* OpenBlockFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3943 return OpenDiskFile(pos
, "blk", fReadOnly
);
3946 FILE* OpenUndoFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3947 return OpenDiskFile(pos
, "rev", fReadOnly
);
3950 boost::filesystem::path
GetBlockPosFilename(const CDiskBlockPos
&pos
, const char *prefix
)
3952 return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix
, pos
.nFile
);
3955 CBlockIndex
* InsertBlockIndex(uint256 hash
)
3961 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
3962 if (mi
!= mapBlockIndex
.end())
3963 return (*mi
).second
;
3966 CBlockIndex
* pindexNew
= new CBlockIndex();
3968 throw runtime_error(std::string(__func__
) + ": new CBlockIndex failed");
3969 mi
= mapBlockIndex
.insert(make_pair(hash
, pindexNew
)).first
;
3970 pindexNew
->phashBlock
= &((*mi
).first
);
3975 bool static LoadBlockIndexDB()
3977 const CChainParams
& chainparams
= Params();
3978 if (!pblocktree
->LoadBlockIndexGuts(InsertBlockIndex
))
3981 boost::this_thread::interruption_point();
3983 // Calculate nChainWork
3984 vector
<pair
<int, CBlockIndex
*> > vSortedByHeight
;
3985 vSortedByHeight
.reserve(mapBlockIndex
.size());
3986 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
3988 CBlockIndex
* pindex
= item
.second
;
3989 vSortedByHeight
.push_back(make_pair(pindex
->nHeight
, pindex
));
3991 sort(vSortedByHeight
.begin(), vSortedByHeight
.end());
3992 BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex
*)& item
, vSortedByHeight
)
3994 CBlockIndex
* pindex
= item
.second
;
3995 pindex
->nChainWork
= (pindex
->pprev
? pindex
->pprev
->nChainWork
: 0) + GetBlockProof(*pindex
);
3996 // We can link the chain of blocks for which we've received transactions at some point.
3997 // Pruned nodes may have deleted the block.
3998 if (pindex
->nTx
> 0) {
3999 if (pindex
->pprev
) {
4000 if (pindex
->pprev
->nChainTx
) {
4001 pindex
->nChainTx
= pindex
->pprev
->nChainTx
+ pindex
->nTx
;
4003 pindex
->nChainTx
= 0;
4004 mapBlocksUnlinked
.insert(std::make_pair(pindex
->pprev
, pindex
));
4007 pindex
->nChainTx
= pindex
->nTx
;
4010 if (pindex
->IsValid(BLOCK_VALID_TRANSACTIONS
) && (pindex
->nChainTx
|| pindex
->pprev
== NULL
))
4011 setBlockIndexCandidates
.insert(pindex
);
4012 if (pindex
->nStatus
& BLOCK_FAILED_MASK
&& (!pindexBestInvalid
|| pindex
->nChainWork
> pindexBestInvalid
->nChainWork
))
4013 pindexBestInvalid
= pindex
;
4015 pindex
->BuildSkip();
4016 if (pindex
->IsValid(BLOCK_VALID_TREE
) && (pindexBestHeader
== NULL
|| CBlockIndexWorkComparator()(pindexBestHeader
, pindex
)))
4017 pindexBestHeader
= pindex
;
4020 // Load block file info
4021 pblocktree
->ReadLastBlockFile(nLastBlockFile
);
4022 vinfoBlockFile
.resize(nLastBlockFile
+ 1);
4023 LogPrintf("%s: last block file = %i\n", __func__
, nLastBlockFile
);
4024 for (int nFile
= 0; nFile
<= nLastBlockFile
; nFile
++) {
4025 pblocktree
->ReadBlockFileInfo(nFile
, vinfoBlockFile
[nFile
]);
4027 LogPrintf("%s: last block file info: %s\n", __func__
, vinfoBlockFile
[nLastBlockFile
].ToString());
4028 for (int nFile
= nLastBlockFile
+ 1; true; nFile
++) {
4029 CBlockFileInfo info
;
4030 if (pblocktree
->ReadBlockFileInfo(nFile
, info
)) {
4031 vinfoBlockFile
.push_back(info
);
4037 // Check presence of blk files
4038 LogPrintf("Checking all blk files are present...\n");
4039 set
<int> setBlkDataFiles
;
4040 BOOST_FOREACH(const PAIRTYPE(uint256
, CBlockIndex
*)& item
, mapBlockIndex
)
4042 CBlockIndex
* pindex
= item
.second
;
4043 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) {
4044 setBlkDataFiles
.insert(pindex
->nFile
);
4047 for (std::set
<int>::iterator it
= setBlkDataFiles
.begin(); it
!= setBlkDataFiles
.end(); it
++)
4049 CDiskBlockPos
pos(*it
, 0);
4050 if (CAutoFile(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
).IsNull()) {
4055 // Check whether we have ever pruned block & undo files
4056 pblocktree
->ReadFlag("prunedblockfiles", fHavePruned
);
4058 LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
4060 // Check whether we need to continue reindexing
4061 bool fReindexing
= false;
4062 pblocktree
->ReadReindexing(fReindexing
);
4063 fReindex
|= fReindexing
;
4065 // Check whether we have a transaction index
4066 pblocktree
->ReadFlag("txindex", fTxIndex
);
4067 LogPrintf("%s: transaction index %s\n", __func__
, fTxIndex
? "enabled" : "disabled");
4069 // Load pointer to end of best chain
4070 BlockMap::iterator it
= mapBlockIndex
.find(pcoinsTip
->GetBestBlock());
4071 if (it
== mapBlockIndex
.end())
4073 chainActive
.SetTip(it
->second
);
4075 PruneBlockIndexCandidates();
4077 LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__
,
4078 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(),
4079 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
4080 Checkpoints::GuessVerificationProgress(chainparams
.Checkpoints(), chainActive
.Tip()));
4085 CVerifyDB::CVerifyDB()
4087 uiInterface
.ShowProgress(_("Verifying blocks..."), 0);
4090 CVerifyDB::~CVerifyDB()
4092 uiInterface
.ShowProgress("", 100);
4095 bool CVerifyDB::VerifyDB(const CChainParams
& chainparams
, CCoinsView
*coinsview
, int nCheckLevel
, int nCheckDepth
)
4098 if (chainActive
.Tip() == NULL
|| chainActive
.Tip()->pprev
== NULL
)
4101 // Verify blocks in the best chain
4102 if (nCheckDepth
<= 0)
4103 nCheckDepth
= 1000000000; // suffices until the year 19000
4104 if (nCheckDepth
> chainActive
.Height())
4105 nCheckDepth
= chainActive
.Height();
4106 nCheckLevel
= std::max(0, std::min(4, nCheckLevel
));
4107 LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth
, nCheckLevel
);
4108 CCoinsViewCache
coins(coinsview
);
4109 CBlockIndex
* pindexState
= chainActive
.Tip();
4110 CBlockIndex
* pindexFailure
= NULL
;
4111 int nGoodTransactions
= 0;
4112 CValidationState state
;
4114 LogPrintf("[0%%]...");
4115 for (CBlockIndex
* pindex
= chainActive
.Tip(); pindex
&& pindex
->pprev
; pindex
= pindex
->pprev
)
4117 boost::this_thread::interruption_point();
4118 int percentageDone
= std::max(1, std::min(99, (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* (nCheckLevel
>= 4 ? 50 : 100))));
4119 if (reportDone
< percentageDone
/10) {
4120 // report every 10% step
4121 LogPrintf("[%d%%]...", percentageDone
);
4122 reportDone
= percentageDone
/10;
4124 uiInterface
.ShowProgress(_("Verifying blocks..."), percentageDone
);
4125 if (pindex
->nHeight
< chainActive
.Height()-nCheckDepth
)
4127 if (fPruneMode
&& !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) {
4128 // If pruning, only go back as far as we have data.
4129 LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex
->nHeight
);
4133 // check level 0: read from disk
4134 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
4135 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4136 // check level 1: verify block validity
4137 if (nCheckLevel
>= 1 && !CheckBlock(block
, state
, chainparams
.GetConsensus()))
4138 return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__
,
4139 pindex
->nHeight
, pindex
->GetBlockHash().ToString(), FormatStateMessage(state
));
4140 // check level 2: verify undo validity
4141 if (nCheckLevel
>= 2 && pindex
) {
4143 CDiskBlockPos pos
= pindex
->GetUndoPos();
4144 if (!pos
.IsNull()) {
4145 if (!UndoReadFromDisk(undo
, pos
, pindex
->pprev
->GetBlockHash()))
4146 return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4149 // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
4150 if (nCheckLevel
>= 3 && pindex
== pindexState
&& (coins
.DynamicMemoryUsage() + pcoinsTip
->DynamicMemoryUsage()) <= nCoinCacheUsage
) {
4152 if (!DisconnectBlock(block
, state
, pindex
, coins
, &fClean
))
4153 return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4154 pindexState
= pindex
->pprev
;
4156 nGoodTransactions
= 0;
4157 pindexFailure
= pindex
;
4159 nGoodTransactions
+= block
.vtx
.size();
4161 if (ShutdownRequested())
4165 return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive
.Height() - pindexFailure
->nHeight
+ 1, nGoodTransactions
);
4167 // check level 4: try reconnecting blocks
4168 if (nCheckLevel
>= 4) {
4169 CBlockIndex
*pindex
= pindexState
;
4170 while (pindex
!= chainActive
.Tip()) {
4171 boost::this_thread::interruption_point();
4172 uiInterface
.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* 50))));
4173 pindex
= chainActive
.Next(pindex
);
4175 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
4176 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4177 if (!ConnectBlock(block
, state
, pindex
, coins
, chainparams
))
4178 return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
4182 LogPrintf("[DONE].\n");
4183 LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive
.Height() - pindexState
->nHeight
, nGoodTransactions
);
4188 bool RewindBlockIndex(const CChainParams
& params
)
4193 while (nHeight
<= chainActive
.Height()) {
4194 if (IsWitnessEnabled(chainActive
[nHeight
- 1], params
.GetConsensus()) && !(chainActive
[nHeight
]->nStatus
& BLOCK_OPT_WITNESS
)) {
4200 // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
4201 CValidationState state
;
4202 CBlockIndex
* pindex
= chainActive
.Tip();
4203 while (chainActive
.Height() >= nHeight
) {
4204 if (fPruneMode
&& !(chainActive
.Tip()->nStatus
& BLOCK_HAVE_DATA
)) {
4205 // If pruning, don't try rewinding past the HAVE_DATA point;
4206 // since older blocks can't be served anyway, there's
4207 // no need to walk further, and trying to DisconnectTip()
4208 // will fail (and require a needless reindex/redownload
4209 // of the blockchain).
4212 if (!DisconnectTip(state
, params
, true)) {
4213 return error("RewindBlockIndex: unable to disconnect block at height %i", pindex
->nHeight
);
4215 // Occasionally flush state to disk.
4216 if (!FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
))
4220 // Reduce validity flag and have-data flags.
4221 // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
4222 // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
4223 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); it
++) {
4224 CBlockIndex
* pindexIter
= it
->second
;
4226 // Note: If we encounter an insufficiently validated block that
4227 // is on chainActive, it must be because we are a pruning node, and
4228 // this block or some successor doesn't HAVE_DATA, so we were unable to
4229 // rewind all the way. Blocks remaining on chainActive at this point
4230 // must not have their validity reduced.
4231 if (IsWitnessEnabled(pindexIter
->pprev
, params
.GetConsensus()) && !(pindexIter
->nStatus
& BLOCK_OPT_WITNESS
) && !chainActive
.Contains(pindexIter
)) {
4233 pindexIter
->nStatus
= std::min
<unsigned int>(pindexIter
->nStatus
& BLOCK_VALID_MASK
, BLOCK_VALID_TREE
) | (pindexIter
->nStatus
& ~BLOCK_VALID_MASK
);
4234 // Remove have-data flags.
4235 pindexIter
->nStatus
&= ~(BLOCK_HAVE_DATA
| BLOCK_HAVE_UNDO
);
4236 // Remove storage location.
4237 pindexIter
->nFile
= 0;
4238 pindexIter
->nDataPos
= 0;
4239 pindexIter
->nUndoPos
= 0;
4240 // Remove various other things
4241 pindexIter
->nTx
= 0;
4242 pindexIter
->nChainTx
= 0;
4243 pindexIter
->nSequenceId
= 0;
4244 // Make sure it gets written.
4245 setDirtyBlockIndex
.insert(pindexIter
);
4247 setBlockIndexCandidates
.erase(pindexIter
);
4248 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> ret
= mapBlocksUnlinked
.equal_range(pindexIter
->pprev
);
4249 while (ret
.first
!= ret
.second
) {
4250 if (ret
.first
->second
== pindexIter
) {
4251 mapBlocksUnlinked
.erase(ret
.first
++);
4256 } else if (pindexIter
->IsValid(BLOCK_VALID_TRANSACTIONS
) && pindexIter
->nChainTx
) {
4257 setBlockIndexCandidates
.insert(pindexIter
);
4261 PruneBlockIndexCandidates();
4263 CheckBlockIndex(params
.GetConsensus());
4265 if (!FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
)) {
4272 void UnloadBlockIndex()
4275 setBlockIndexCandidates
.clear();
4276 chainActive
.SetTip(NULL
);
4277 pindexBestInvalid
= NULL
;
4278 pindexBestHeader
= NULL
;
4280 mapOrphanTransactions
.clear();
4281 mapOrphanTransactionsByPrev
.clear();
4283 mapBlocksUnlinked
.clear();
4284 vinfoBlockFile
.clear();
4286 nBlockSequenceId
= 1;
4287 mapBlockSource
.clear();
4288 mapBlocksInFlight
.clear();
4289 nPreferredDownload
= 0;
4290 setDirtyBlockIndex
.clear();
4291 setDirtyFileInfo
.clear();
4292 mapNodeState
.clear();
4293 recentRejects
.reset(NULL
);
4294 versionbitscache
.Clear();
4295 for (int b
= 0; b
< VERSIONBITS_NUM_BITS
; b
++) {
4296 warningcache
[b
].clear();
4299 BOOST_FOREACH(BlockMap::value_type
& entry
, mapBlockIndex
) {
4300 delete entry
.second
;
4302 mapBlockIndex
.clear();
4303 fHavePruned
= false;
4306 bool LoadBlockIndex()
4308 // Load block index from databases
4309 if (!fReindex
&& !LoadBlockIndexDB())
4314 bool InitBlockIndex(const CChainParams
& chainparams
)
4318 // Initialize global variables that cannot be constructed at startup.
4319 recentRejects
.reset(new CRollingBloomFilter(120000, 0.000001));
4321 // Check whether we're already initialized
4322 if (chainActive
.Genesis() != NULL
)
4325 // Use the provided setting for -txindex in the new database
4326 fTxIndex
= GetBoolArg("-txindex", DEFAULT_TXINDEX
);
4327 pblocktree
->WriteFlag("txindex", fTxIndex
);
4328 LogPrintf("Initializing databases...\n");
4330 // Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
4333 CBlock
&block
= const_cast<CBlock
&>(chainparams
.GenesisBlock());
4334 // Start new block file
4335 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
4336 CDiskBlockPos blockPos
;
4337 CValidationState state
;
4338 if (!FindBlockPos(state
, blockPos
, nBlockSize
+8, 0, block
.GetBlockTime()))
4339 return error("LoadBlockIndex(): FindBlockPos failed");
4340 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart()))
4341 return error("LoadBlockIndex(): writing genesis block to disk failed");
4342 CBlockIndex
*pindex
= AddToBlockIndex(block
);
4343 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
))
4344 return error("LoadBlockIndex(): genesis block not accepted");
4345 // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
4346 return FlushStateToDisk(state
, FLUSH_STATE_ALWAYS
);
4347 } catch (const std::runtime_error
& e
) {
4348 return error("LoadBlockIndex(): failed to initialize block database: %s", e
.what());
4355 bool LoadExternalBlockFile(const CChainParams
& chainparams
, FILE* fileIn
, CDiskBlockPos
*dbp
)
4357 // Map of disk positions for blocks with unknown parent (only used for reindex)
4358 static std::multimap
<uint256
, CDiskBlockPos
> mapBlocksUnknownParent
;
4359 int64_t nStart
= GetTimeMillis();
4363 // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4364 CBufferedFile
blkdat(fileIn
, 2*MAX_BLOCK_SERIALIZED_SIZE
, MAX_BLOCK_SERIALIZED_SIZE
+8, SER_DISK
, CLIENT_VERSION
);
4365 uint64_t nRewind
= blkdat
.GetPos();
4366 while (!blkdat
.eof()) {
4367 boost::this_thread::interruption_point();
4369 blkdat
.SetPos(nRewind
);
4370 nRewind
++; // start one byte further next time, in case of failure
4371 blkdat
.SetLimit(); // remove former limit
4372 unsigned int nSize
= 0;
4375 unsigned char buf
[CMessageHeader::MESSAGE_START_SIZE
];
4376 blkdat
.FindByte(chainparams
.MessageStart()[0]);
4377 nRewind
= blkdat
.GetPos()+1;
4378 blkdat
>> FLATDATA(buf
);
4379 if (memcmp(buf
, chainparams
.MessageStart(), CMessageHeader::MESSAGE_START_SIZE
))
4383 if (nSize
< 80 || nSize
> MAX_BLOCK_SERIALIZED_SIZE
)
4385 } catch (const std::exception
&) {
4386 // no valid block header found; don't complain
4391 uint64_t nBlockPos
= blkdat
.GetPos();
4393 dbp
->nPos
= nBlockPos
;
4394 blkdat
.SetLimit(nBlockPos
+ nSize
);
4395 blkdat
.SetPos(nBlockPos
);
4398 nRewind
= blkdat
.GetPos();
4400 // detect out of order blocks, and store them for later
4401 uint256 hash
= block
.GetHash();
4402 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
.find(block
.hashPrevBlock
) == mapBlockIndex
.end()) {
4403 LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__
, hash
.ToString(),
4404 block
.hashPrevBlock
.ToString());
4406 mapBlocksUnknownParent
.insert(std::make_pair(block
.hashPrevBlock
, *dbp
));
4410 // process in case the block isn't known yet
4411 if (mapBlockIndex
.count(hash
) == 0 || (mapBlockIndex
[hash
]->nStatus
& BLOCK_HAVE_DATA
) == 0) {
4413 CValidationState state
;
4414 if (AcceptBlock(block
, state
, chainparams
, NULL
, true, dbp
, NULL
))
4416 if (state
.IsError())
4418 } else if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
[hash
]->nHeight
% 1000 == 0) {
4419 LogPrint("reindex", "Block Import: already had block %s at height %d\n", hash
.ToString(), mapBlockIndex
[hash
]->nHeight
);
4422 // Activate the genesis block so normal node progress can continue
4423 if (hash
== chainparams
.GetConsensus().hashGenesisBlock
) {
4424 CValidationState state
;
4425 if (!ActivateBestChain(state
, chainparams
)) {
4432 // Recursively process earlier encountered successors of this block
4433 deque
<uint256
> queue
;
4434 queue
.push_back(hash
);
4435 while (!queue
.empty()) {
4436 uint256 head
= queue
.front();
4438 std::pair
<std::multimap
<uint256
, CDiskBlockPos
>::iterator
, std::multimap
<uint256
, CDiskBlockPos
>::iterator
> range
= mapBlocksUnknownParent
.equal_range(head
);
4439 while (range
.first
!= range
.second
) {
4440 std::multimap
<uint256
, CDiskBlockPos
>::iterator it
= range
.first
;
4441 if (ReadBlockFromDisk(block
, it
->second
, chainparams
.GetConsensus()))
4443 LogPrint("reindex", "%s: Processing out of order child %s of %s\n", __func__
, block
.GetHash().ToString(),
4446 CValidationState dummy
;
4447 if (AcceptBlock(block
, dummy
, chainparams
, NULL
, true, &it
->second
, NULL
))
4450 queue
.push_back(block
.GetHash());
4454 mapBlocksUnknownParent
.erase(it
);
4458 } catch (const std::exception
& e
) {
4459 LogPrintf("%s: Deserialize or I/O error - %s\n", __func__
, e
.what());
4462 } catch (const std::runtime_error
& e
) {
4463 AbortNode(std::string("System error: ") + e
.what());
4466 LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded
, GetTimeMillis() - nStart
);
4470 void static CheckBlockIndex(const Consensus::Params
& consensusParams
)
4472 if (!fCheckBlockIndex
) {
4478 // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4479 // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
4480 // iterating the block tree require that chainActive has been initialized.)
4481 if (chainActive
.Height() < 0) {
4482 assert(mapBlockIndex
.size() <= 1);
4486 // Build forward-pointing map of the entire block tree.
4487 std::multimap
<CBlockIndex
*,CBlockIndex
*> forward
;
4488 for (BlockMap::iterator it
= mapBlockIndex
.begin(); it
!= mapBlockIndex
.end(); it
++) {
4489 forward
.insert(std::make_pair(it
->second
->pprev
, it
->second
));
4492 assert(forward
.size() == mapBlockIndex
.size());
4494 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeGenesis
= forward
.equal_range(NULL
);
4495 CBlockIndex
*pindex
= rangeGenesis
.first
->second
;
4496 rangeGenesis
.first
++;
4497 assert(rangeGenesis
.first
== rangeGenesis
.second
); // There is only one index entry with parent NULL.
4499 // Iterate over the entire block tree, using depth-first search.
4500 // Along the way, remember whether there are blocks on the path from genesis
4501 // block being explored which are the first to have certain properties.
4504 CBlockIndex
* pindexFirstInvalid
= NULL
; // Oldest ancestor of pindex which is invalid.
4505 CBlockIndex
* pindexFirstMissing
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4506 CBlockIndex
* pindexFirstNeverProcessed
= NULL
; // Oldest ancestor of pindex for which nTx == 0.
4507 CBlockIndex
* pindexFirstNotTreeValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4508 CBlockIndex
* pindexFirstNotTransactionsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4509 CBlockIndex
* pindexFirstNotChainValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4510 CBlockIndex
* pindexFirstNotScriptsValid
= NULL
; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4511 while (pindex
!= NULL
) {
4513 if (pindexFirstInvalid
== NULL
&& pindex
->nStatus
& BLOCK_FAILED_VALID
) pindexFirstInvalid
= pindex
;
4514 if (pindexFirstMissing
== NULL
&& !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) pindexFirstMissing
= pindex
;
4515 if (pindexFirstNeverProcessed
== NULL
&& pindex
->nTx
== 0) pindexFirstNeverProcessed
= pindex
;
4516 if (pindex
->pprev
!= NULL
&& pindexFirstNotTreeValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TREE
) pindexFirstNotTreeValid
= pindex
;
4517 if (pindex
->pprev
!= NULL
&& pindexFirstNotTransactionsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TRANSACTIONS
) pindexFirstNotTransactionsValid
= pindex
;
4518 if (pindex
->pprev
!= NULL
&& pindexFirstNotChainValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_CHAIN
) pindexFirstNotChainValid
= pindex
;
4519 if (pindex
->pprev
!= NULL
&& pindexFirstNotScriptsValid
== NULL
&& (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_SCRIPTS
) pindexFirstNotScriptsValid
= pindex
;
4521 // Begin: actual consistency checks.
4522 if (pindex
->pprev
== NULL
) {
4523 // Genesis block checks.
4524 assert(pindex
->GetBlockHash() == consensusParams
.hashGenesisBlock
); // Genesis block's hash must match.
4525 assert(pindex
== chainActive
.Genesis()); // The current active chain's genesis block must be this block.
4527 if (pindex
->nChainTx
== 0) assert(pindex
->nSequenceId
<= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
4528 // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4529 // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4531 // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4532 assert(!(pindex
->nStatus
& BLOCK_HAVE_DATA
) == (pindex
->nTx
== 0));
4533 assert(pindexFirstMissing
== pindexFirstNeverProcessed
);
4535 // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4536 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) assert(pindex
->nTx
> 0);
4538 if (pindex
->nStatus
& BLOCK_HAVE_UNDO
) assert(pindex
->nStatus
& BLOCK_HAVE_DATA
);
4539 assert(((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TRANSACTIONS
) == (pindex
->nTx
> 0)); // This is pruning-independent.
4540 // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
4541 assert((pindexFirstNeverProcessed
!= NULL
) == (pindex
->nChainTx
== 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
4542 assert((pindexFirstNotTransactionsValid
!= NULL
) == (pindex
->nChainTx
== 0));
4543 assert(pindex
->nHeight
== nHeight
); // nHeight must be consistent.
4544 assert(pindex
->pprev
== NULL
|| pindex
->nChainWork
>= pindex
->pprev
->nChainWork
); // For every block except the genesis block, the chainwork must be larger than the parent's.
4545 assert(nHeight
< 2 || (pindex
->pskip
&& (pindex
->pskip
->nHeight
< nHeight
))); // The pskip pointer must point back for all but the first 2 blocks.
4546 assert(pindexFirstNotTreeValid
== NULL
); // All mapBlockIndex entries must at least be TREE valid
4547 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TREE
) assert(pindexFirstNotTreeValid
== NULL
); // TREE valid implies all parents are TREE valid
4548 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_CHAIN
) assert(pindexFirstNotChainValid
== NULL
); // CHAIN valid implies all parents are CHAIN valid
4549 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_SCRIPTS
) assert(pindexFirstNotScriptsValid
== NULL
); // SCRIPTS valid implies all parents are SCRIPTS valid
4550 if (pindexFirstInvalid
== NULL
) {
4551 // Checks for not-invalid blocks.
4552 assert((pindex
->nStatus
& BLOCK_FAILED_MASK
) == 0); // The failed mask cannot be set for blocks without invalid parents.
4554 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && pindexFirstNeverProcessed
== NULL
) {
4555 if (pindexFirstInvalid
== NULL
) {
4556 // If this block sorts at least as good as the current tip and
4557 // is valid and we have all data for its parents, it must be in
4558 // setBlockIndexCandidates. chainActive.Tip() must also be there
4559 // even if some data has been pruned.
4560 if (pindexFirstMissing
== NULL
|| pindex
== chainActive
.Tip()) {
4561 assert(setBlockIndexCandidates
.count(pindex
));
4563 // If some parent is missing, then it could be that this block was in
4564 // setBlockIndexCandidates but had to be removed because of the missing data.
4565 // In this case it must be in mapBlocksUnlinked -- see test below.
4567 } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4568 assert(setBlockIndexCandidates
.count(pindex
) == 0);
4570 // Check whether this block is in mapBlocksUnlinked.
4571 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeUnlinked
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
4572 bool foundInUnlinked
= false;
4573 while (rangeUnlinked
.first
!= rangeUnlinked
.second
) {
4574 assert(rangeUnlinked
.first
->first
== pindex
->pprev
);
4575 if (rangeUnlinked
.first
->second
== pindex
) {
4576 foundInUnlinked
= true;
4579 rangeUnlinked
.first
++;
4581 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
!= NULL
&& pindexFirstInvalid
== NULL
) {
4582 // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
4583 assert(foundInUnlinked
);
4585 if (!(pindex
->nStatus
& BLOCK_HAVE_DATA
)) assert(!foundInUnlinked
); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
4586 if (pindexFirstMissing
== NULL
) assert(!foundInUnlinked
); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
4587 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
== NULL
&& pindexFirstMissing
!= NULL
) {
4588 // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4589 assert(fHavePruned
); // We must have pruned.
4590 // This block may have entered mapBlocksUnlinked if:
4591 // - it has a descendant that at some point had more work than the
4593 // - we tried switching to that descendant but were missing
4594 // data for some intermediate block between chainActive and the
4596 // So if this block is itself better than chainActive.Tip() and it wasn't in
4597 // setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
4598 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && setBlockIndexCandidates
.count(pindex
) == 0) {
4599 if (pindexFirstInvalid
== NULL
) {
4600 assert(foundInUnlinked
);
4604 // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4605 // End: actual consistency checks.
4607 // Try descending into the first subnode.
4608 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> range
= forward
.equal_range(pindex
);
4609 if (range
.first
!= range
.second
) {
4610 // A subnode was found.
4611 pindex
= range
.first
->second
;
4615 // This is a leaf node.
4616 // Move upwards until we reach a node of which we have not yet visited the last child.
4618 // We are going to either move to a parent or a sibling of pindex.
4619 // If pindex was the first with a certain property, unset the corresponding variable.
4620 if (pindex
== pindexFirstInvalid
) pindexFirstInvalid
= NULL
;
4621 if (pindex
== pindexFirstMissing
) pindexFirstMissing
= NULL
;
4622 if (pindex
== pindexFirstNeverProcessed
) pindexFirstNeverProcessed
= NULL
;
4623 if (pindex
== pindexFirstNotTreeValid
) pindexFirstNotTreeValid
= NULL
;
4624 if (pindex
== pindexFirstNotTransactionsValid
) pindexFirstNotTransactionsValid
= NULL
;
4625 if (pindex
== pindexFirstNotChainValid
) pindexFirstNotChainValid
= NULL
;
4626 if (pindex
== pindexFirstNotScriptsValid
) pindexFirstNotScriptsValid
= NULL
;
4628 CBlockIndex
* pindexPar
= pindex
->pprev
;
4629 // Find which child we just visited.
4630 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangePar
= forward
.equal_range(pindexPar
);
4631 while (rangePar
.first
->second
!= pindex
) {
4632 assert(rangePar
.first
!= rangePar
.second
); // Our parent must have at least the node we're coming from as child.
4635 // Proceed to the next one.
4637 if (rangePar
.first
!= rangePar
.second
) {
4638 // Move to the sibling.
4639 pindex
= rangePar
.first
->second
;
4650 // Check that we actually traversed the entire map.
4651 assert(nNodes
== forward
.size());
4654 std::string
GetWarnings(const std::string
& strFor
)
4656 string strStatusBar
;
4659 const string uiAlertSeperator
= "<hr />";
4661 if (!CLIENT_VERSION_IS_RELEASE
) {
4662 strStatusBar
= "This is a pre-release test build - use at your own risk - do not use for mining or merchant applications";
4663 strGUI
= _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
4666 if (GetBoolArg("-testsafemode", DEFAULT_TESTSAFEMODE
))
4667 strStatusBar
= strRPC
= strGUI
= "testsafemode enabled";
4669 // Misc warnings like out of disk space and clock is wrong
4670 if (strMiscWarning
!= "")
4672 strStatusBar
= strMiscWarning
;
4673 strGUI
+= (strGUI
.empty() ? "" : uiAlertSeperator
) + strMiscWarning
;
4676 if (fLargeWorkForkFound
)
4678 strStatusBar
= strRPC
= "Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.";
4679 strGUI
+= (strGUI
.empty() ? "" : uiAlertSeperator
) + _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
4681 else if (fLargeWorkInvalidChainFound
)
4683 strStatusBar
= strRPC
= "Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.";
4684 strGUI
+= (strGUI
.empty() ? "" : uiAlertSeperator
) + _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
4687 if (strFor
== "gui")
4689 else if (strFor
== "statusbar")
4690 return strStatusBar
;
4691 else if (strFor
== "rpc")
4693 assert(!"GetWarnings(): invalid parameter");
4704 //////////////////////////////////////////////////////////////////////////////
4706 // blockchain -> download logic notification
4709 void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex
*pindexNew
, const CBlockIndex
*pindexFork
, bool fInitialDownload
) {
4710 const int nNewHeight
= pindexNew
->nHeight
;
4711 connman
->SetBestHeight(nNewHeight
);
4713 if (!fInitialDownload
) {
4714 // Find the hashes of all blocks that weren't previously in the best chain.
4715 std::vector
<uint256
> vHashes
;
4716 const CBlockIndex
*pindexToAnnounce
= pindexNew
;
4717 while (pindexToAnnounce
!= pindexFork
) {
4718 vHashes
.push_back(pindexToAnnounce
->GetBlockHash());
4719 pindexToAnnounce
= pindexToAnnounce
->pprev
;
4720 if (vHashes
.size() == MAX_BLOCKS_TO_ANNOUNCE
) {
4721 // Limit announcements in case of a huge reorganization.
4722 // Rely on the peer's synchronization mechanism in that case.
4726 // Relay inventory, but don't relay old inventory during initial block download.
4727 connman
->ForEachNode([nNewHeight
, &vHashes
](CNode
* pnode
) {
4728 if (nNewHeight
> (pnode
->nStartingHeight
!= -1 ? pnode
->nStartingHeight
- 2000 : 0)) {
4729 BOOST_REVERSE_FOREACH(const uint256
& hash
, vHashes
) {
4730 pnode
->PushBlockHash(hash
);
4737 void PeerLogicValidation::BlockChecked(const CBlock
& block
, const CValidationState
& state
) {
4740 const uint256
hash(block
.GetHash());
4741 std::map
<uint256
, NodeId
>::iterator it
= mapBlockSource
.find(hash
);
4744 if (state
.IsInvalid(nDoS
)) {
4745 if (it
!= mapBlockSource
.end() && State(it
->second
)) {
4746 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
4747 CBlockReject reject
= {(unsigned char)state
.GetRejectCode(), state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), hash
};
4748 State(it
->second
)->rejects
.push_back(reject
);
4750 Misbehaving(it
->second
, nDoS
);
4753 if (it
!= mapBlockSource
.end())
4754 mapBlockSource
.erase(it
);
4757 //////////////////////////////////////////////////////////////////////////////
4763 bool static AlreadyHave(const CInv
& inv
) EXCLUSIVE_LOCKS_REQUIRED(cs_main
)
4768 case MSG_WITNESS_TX
:
4770 assert(recentRejects
);
4771 if (chainActive
.Tip()->GetBlockHash() != hashRecentRejectsChainTip
)
4773 // If the chain tip has changed previously rejected transactions
4774 // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
4775 // or a double-spend. Reset the rejects filter and give those
4776 // txs a second chance.
4777 hashRecentRejectsChainTip
= chainActive
.Tip()->GetBlockHash();
4778 recentRejects
->reset();
4781 // Use pcoinsTip->HaveCoinsInCache as a quick approximation to exclude
4782 // requesting or processing some txs which have already been included in a block
4783 return recentRejects
->contains(inv
.hash
) ||
4784 mempool
.exists(inv
.hash
) ||
4785 mapOrphanTransactions
.count(inv
.hash
) ||
4786 pcoinsTip
->HaveCoinsInCache(inv
.hash
);
4789 case MSG_WITNESS_BLOCK
:
4790 return mapBlockIndex
.count(inv
.hash
);
4792 // Don't know what it is, just say we already got one
4796 static void RelayTransaction(const CTransaction
& tx
, CConnman
& connman
)
4798 CInv
inv(MSG_TX
, tx
.GetHash());
4799 connman
.ForEachNode([&inv
](CNode
* pnode
)
4801 pnode
->PushInventory(inv
);
4805 static void RelayAddress(const CAddress
& addr
, bool fReachable
, CConnman
& connman
)
4807 int nRelayNodes
= fReachable
? 2 : 1; // limited relaying of addresses outside our network(s)
4809 // Relay to a limited number of other nodes
4810 // Use deterministic randomness to send to the same nodes for 24 hours
4811 // at a time so the addrKnowns of the chosen nodes prevent repeats
4812 uint64_t hashAddr
= addr
.GetHash();
4813 std::multimap
<uint64_t, CNode
*> mapMix
;
4814 const CSipHasher hasher
= connman
.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY
).Write(hashAddr
<< 32).Write((GetTime() + hashAddr
) / (24*60*60));
4815 FastRandomContext insecure_rand
;
4817 auto sortfunc
= [&mapMix
, &hasher
](CNode
* pnode
) {
4818 if (pnode
->nVersion
>= CADDR_TIME_VERSION
) {
4819 uint64_t hashKey
= CSipHasher(hasher
).Write(pnode
->id
).Finalize();
4820 mapMix
.emplace(hashKey
, pnode
);
4824 auto pushfunc
= [&addr
, &mapMix
, &nRelayNodes
, &insecure_rand
] {
4825 for (auto mi
= mapMix
.begin(); mi
!= mapMix
.end() && nRelayNodes
-- > 0; ++mi
)
4826 mi
->second
->PushAddress(addr
, insecure_rand
);
4829 connman
.ForEachNodeThen(std::move(sortfunc
), std::move(pushfunc
));
4832 void static ProcessGetData(CNode
* pfrom
, const Consensus::Params
& consensusParams
, CConnman
& connman
)
4834 std::deque
<CInv
>::iterator it
= pfrom
->vRecvGetData
.begin();
4835 unsigned int nMaxSendBufferSize
= connman
.GetSendBufferSize();
4837 vector
<CInv
> vNotFound
;
4841 while (it
!= pfrom
->vRecvGetData
.end()) {
4842 // Don't bother if send buffer is too full to respond anyway
4843 if (pfrom
->nSendSize
>= nMaxSendBufferSize
)
4846 const CInv
&inv
= *it
;
4848 boost::this_thread::interruption_point();
4851 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
|| inv
.type
== MSG_CMPCT_BLOCK
|| inv
.type
== MSG_WITNESS_BLOCK
)
4854 BlockMap::iterator mi
= mapBlockIndex
.find(inv
.hash
);
4855 if (mi
!= mapBlockIndex
.end())
4857 if (chainActive
.Contains(mi
->second
)) {
4860 static const int nOneMonth
= 30 * 24 * 60 * 60;
4861 // To prevent fingerprinting attacks, only send blocks outside of the active
4862 // chain if they are valid, and no more than a month older (both in time, and in
4863 // best equivalent proof of work) than the best header chain we know about.
4864 send
= mi
->second
->IsValid(BLOCK_VALID_SCRIPTS
) && (pindexBestHeader
!= NULL
) &&
4865 (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() < nOneMonth
) &&
4866 (GetBlockProofEquivalentTime(*pindexBestHeader
, *mi
->second
, *pindexBestHeader
, consensusParams
) < nOneMonth
);
4868 LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__
, pfrom
->GetId());
4872 // disconnect node in case we have reached the outbound limit for serving historical blocks
4873 // never disconnect whitelisted nodes
4874 static const int nOneWeek
= 7 * 24 * 60 * 60; // assume > 1 week = historical
4875 if (send
&& connman
.OutboundTargetReached(true) && ( ((pindexBestHeader
!= NULL
) && (pindexBestHeader
->GetBlockTime() - mi
->second
->GetBlockTime() > nOneWeek
)) || inv
.type
== MSG_FILTERED_BLOCK
) && !pfrom
->fWhitelisted
)
4877 LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom
->GetId());
4880 pfrom
->fDisconnect
= true;
4883 // Pruned nodes may have deleted the block, so check whether
4884 // it's available before trying to send.
4885 if (send
&& (mi
->second
->nStatus
& BLOCK_HAVE_DATA
))
4887 // Send block from disk
4889 if (!ReadBlockFromDisk(block
, (*mi
).second
, consensusParams
))
4890 assert(!"cannot load block from disk");
4891 if (inv
.type
== MSG_BLOCK
)
4892 pfrom
->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::BLOCK
, block
);
4893 else if (inv
.type
== MSG_WITNESS_BLOCK
)
4894 pfrom
->PushMessage(NetMsgType::BLOCK
, block
);
4895 else if (inv
.type
== MSG_FILTERED_BLOCK
)
4897 bool sendMerkleBlock
= false;
4898 CMerkleBlock merkleBlock
;
4900 LOCK(pfrom
->cs_filter
);
4901 if (pfrom
->pfilter
) {
4902 sendMerkleBlock
= true;
4903 merkleBlock
= CMerkleBlock(block
, *pfrom
->pfilter
);
4906 if (sendMerkleBlock
) {
4907 pfrom
->PushMessage(NetMsgType::MERKLEBLOCK
, merkleBlock
);
4908 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
4909 // This avoids hurting performance by pointlessly requiring a round-trip
4910 // Note that there is currently no way for a node to request any single transactions we didn't send here -
4911 // they must either disconnect and retry or request the full block.
4912 // Thus, the protocol spec specified allows for us to provide duplicate txn here,
4913 // however we MUST always provide at least what the remote peer needs
4914 typedef std::pair
<unsigned int, uint256
> PairType
;
4915 BOOST_FOREACH(PairType
& pair
, merkleBlock
.vMatchedTxn
)
4916 pfrom
->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::TX
, block
.vtx
[pair
.first
]);
4921 else if (inv
.type
== MSG_CMPCT_BLOCK
)
4923 // If a peer is asking for old blocks, we're almost guaranteed
4924 // they wont have a useful mempool to match against a compact block,
4925 // and we don't feel like constructing the object for them, so
4926 // instead we respond with the full, non-compact block.
4927 bool fPeerWantsWitness
= State(pfrom
->GetId())->fWantsCmpctWitness
;
4928 if (CanDirectFetch(consensusParams
) && mi
->second
->nHeight
>= chainActive
.Height() - MAX_CMPCTBLOCK_DEPTH
) {
4929 CBlockHeaderAndShortTxIDs
cmpctblock(block
, fPeerWantsWitness
);
4930 pfrom
->PushMessageWithFlag(fPeerWantsWitness
? 0 : SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::CMPCTBLOCK
, cmpctblock
);
4932 pfrom
->PushMessageWithFlag(fPeerWantsWitness
? 0 : SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::BLOCK
, block
);
4935 // Trigger the peer node to send a getblocks request for the next batch of inventory
4936 if (inv
.hash
== pfrom
->hashContinue
)
4938 // Bypass PushInventory, this must send even if redundant,
4939 // and we want it right after the last block so they don't
4940 // wait for other stuff first.
4942 vInv
.push_back(CInv(MSG_BLOCK
, chainActive
.Tip()->GetBlockHash()));
4943 pfrom
->PushMessage(NetMsgType::INV
, vInv
);
4944 pfrom
->hashContinue
.SetNull();
4948 else if (inv
.type
== MSG_TX
|| inv
.type
== MSG_WITNESS_TX
)
4950 // Send stream from relay memory
4952 auto mi
= mapRelay
.find(inv
.hash
);
4953 if (mi
!= mapRelay
.end()) {
4954 pfrom
->PushMessageWithFlag(inv
.type
== MSG_TX
? SERIALIZE_TRANSACTION_NO_WITNESS
: 0, NetMsgType::TX
, *mi
->second
);
4956 } else if (pfrom
->timeLastMempoolReq
) {
4957 auto txinfo
= mempool
.info(inv
.hash
);
4958 // To protect privacy, do not answer getdata using the mempool when
4959 // that TX couldn't have been INVed in reply to a MEMPOOL request.
4960 if (txinfo
.tx
&& txinfo
.nTime
<= pfrom
->timeLastMempoolReq
) {
4961 pfrom
->PushMessageWithFlag(inv
.type
== MSG_TX
? SERIALIZE_TRANSACTION_NO_WITNESS
: 0, NetMsgType::TX
, *txinfo
.tx
);
4966 vNotFound
.push_back(inv
);
4970 // Track requests for our stuff.
4971 GetMainSignals().Inventory(inv
.hash
);
4973 if (inv
.type
== MSG_BLOCK
|| inv
.type
== MSG_FILTERED_BLOCK
|| inv
.type
== MSG_CMPCT_BLOCK
|| inv
.type
== MSG_WITNESS_BLOCK
)
4978 pfrom
->vRecvGetData
.erase(pfrom
->vRecvGetData
.begin(), it
);
4980 if (!vNotFound
.empty()) {
4981 // Let the peer know that we didn't find what it asked for, so it doesn't
4982 // have to wait around forever. Currently only SPV clients actually care
4983 // about this message: it's needed when they are recursively walking the
4984 // dependencies of relevant unconfirmed transactions. SPV clients want to
4985 // do that because they want to know about (and store and rebroadcast and
4986 // risk analyze) the dependencies of transactions relevant to them, without
4987 // having to download the entire memory pool.
4988 pfrom
->PushMessage(NetMsgType::NOTFOUND
, vNotFound
);
4992 uint32_t GetFetchFlags(CNode
* pfrom
, CBlockIndex
* pprev
, const Consensus::Params
& chainparams
) {
4993 uint32_t nFetchFlags
= 0;
4994 if ((pfrom
->GetLocalServices() & NODE_WITNESS
) && State(pfrom
->GetId())->fHaveWitness
) {
4995 nFetchFlags
|= MSG_WITNESS_FLAG
;
5000 bool static ProcessMessage(CNode
* pfrom
, string strCommand
, CDataStream
& vRecv
, int64_t nTimeReceived
, const CChainParams
& chainparams
, CConnman
& connman
)
5002 unsigned int nMaxSendBufferSize
= connman
.GetSendBufferSize();
5004 LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand
), vRecv
.size(), pfrom
->id
);
5005 if (mapArgs
.count("-dropmessagestest") && GetRand(atoi(mapArgs
["-dropmessagestest"])) == 0)
5007 LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
5012 if (!(pfrom
->GetLocalServices() & NODE_BLOOM
) &&
5013 (strCommand
== NetMsgType::FILTERLOAD
||
5014 strCommand
== NetMsgType::FILTERADD
||
5015 strCommand
== NetMsgType::FILTERCLEAR
))
5017 if (pfrom
->nVersion
>= NO_BLOOM_VERSION
) {
5019 Misbehaving(pfrom
->GetId(), 100);
5022 pfrom
->fDisconnect
= true;
5028 if (strCommand
== NetMsgType::VERSION
)
5030 // Feeler connections exist only to verify if address is online.
5031 if (pfrom
->fFeeler
) {
5032 assert(pfrom
->fInbound
== false);
5033 pfrom
->fDisconnect
= true;
5036 // Each connection can only send one version message
5037 if (pfrom
->nVersion
!= 0)
5039 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_DUPLICATE
, string("Duplicate version message"));
5041 Misbehaving(pfrom
->GetId(), 1);
5048 uint64_t nNonce
= 1;
5049 uint64_t nServiceInt
;
5050 vRecv
>> pfrom
->nVersion
>> nServiceInt
>> nTime
>> addrMe
;
5051 pfrom
->nServices
= ServiceFlags(nServiceInt
);
5052 if (!pfrom
->fInbound
)
5054 connman
.SetServices(pfrom
->addr
, pfrom
->nServices
);
5056 if (pfrom
->nServicesExpected
& ~pfrom
->nServices
)
5058 LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom
->id
, pfrom
->nServices
, pfrom
->nServicesExpected
);
5059 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_NONSTANDARD
,
5060 strprintf("Expected to offer services %08x", pfrom
->nServicesExpected
));
5061 pfrom
->fDisconnect
= true;
5065 if (pfrom
->nVersion
< MIN_PEER_PROTO_VERSION
)
5067 // disconnect from peers older than this proto version
5068 LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom
->id
, pfrom
->nVersion
);
5069 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_OBSOLETE
,
5070 strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION
));
5071 pfrom
->fDisconnect
= true;
5075 if (pfrom
->nVersion
== 10300)
5076 pfrom
->nVersion
= 300;
5078 vRecv
>> addrFrom
>> nNonce
;
5079 if (!vRecv
.empty()) {
5080 vRecv
>> LIMITED_STRING(pfrom
->strSubVer
, MAX_SUBVERSION_LENGTH
);
5081 pfrom
->cleanSubVer
= SanitizeString(pfrom
->strSubVer
);
5083 if (!vRecv
.empty()) {
5084 vRecv
>> pfrom
->nStartingHeight
;
5087 LOCK(pfrom
->cs_filter
);
5089 vRecv
>> pfrom
->fRelayTxes
; // set to true after we get the first filter* message
5091 pfrom
->fRelayTxes
= true;
5094 // Disconnect if we connected to ourself
5095 if (pfrom
->fInbound
&& !connman
.CheckIncomingNonce(nNonce
))
5097 LogPrintf("connected to self at %s, disconnecting\n", pfrom
->addr
.ToString());
5098 pfrom
->fDisconnect
= true;
5102 pfrom
->addrLocal
= addrMe
;
5103 if (pfrom
->fInbound
&& addrMe
.IsRoutable())
5108 // Be shy and don't send version until we hear
5109 if (pfrom
->fInbound
)
5110 pfrom
->PushVersion();
5112 pfrom
->fClient
= !(pfrom
->nServices
& NODE_NETWORK
);
5114 if((pfrom
->nServices
& NODE_WITNESS
))
5117 State(pfrom
->GetId())->fHaveWitness
= true;
5120 // Potentially mark this peer as a preferred download peer.
5123 UpdatePreferredDownload(pfrom
, State(pfrom
->GetId()));
5127 pfrom
->PushMessage(NetMsgType::VERACK
);
5128 pfrom
->ssSend
.SetVersion(min(pfrom
->nVersion
, PROTOCOL_VERSION
));
5130 if (!pfrom
->fInbound
)
5132 // Advertise our address
5133 if (fListen
&& !IsInitialBlockDownload())
5135 CAddress addr
= GetLocalAddress(&pfrom
->addr
, pfrom
->GetLocalServices());
5136 FastRandomContext insecure_rand
;
5137 if (addr
.IsRoutable())
5139 LogPrint("net", "ProcessMessages: advertising address %s\n", addr
.ToString());
5140 pfrom
->PushAddress(addr
, insecure_rand
);
5141 } else if (IsPeerAddrLocalGood(pfrom
)) {
5142 addr
.SetIP(pfrom
->addrLocal
);
5143 LogPrint("net", "ProcessMessages: advertising address %s\n", addr
.ToString());
5144 pfrom
->PushAddress(addr
, insecure_rand
);
5148 // Get recent addresses
5149 if (pfrom
->fOneShot
|| pfrom
->nVersion
>= CADDR_TIME_VERSION
|| connman
.GetAddressCount() < 1000)
5151 pfrom
->PushMessage(NetMsgType::GETADDR
);
5152 pfrom
->fGetAddr
= true;
5154 connman
.MarkAddressGood(pfrom
->addr
);
5157 pfrom
->fSuccessfullyConnected
= true;
5161 remoteAddr
= ", peeraddr=" + pfrom
->addr
.ToString();
5163 LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
5164 pfrom
->cleanSubVer
, pfrom
->nVersion
,
5165 pfrom
->nStartingHeight
, addrMe
.ToString(), pfrom
->id
,
5168 int64_t nTimeOffset
= nTime
- GetTime();
5169 pfrom
->nTimeOffset
= nTimeOffset
;
5170 AddTimeData(pfrom
->addr
, nTimeOffset
);
5174 else if (pfrom
->nVersion
== 0)
5176 // Must have a version message before anything else
5178 Misbehaving(pfrom
->GetId(), 1);
5183 else if (strCommand
== NetMsgType::VERACK
)
5185 pfrom
->SetRecvVersion(min(pfrom
->nVersion
, PROTOCOL_VERSION
));
5187 // Mark this node as currently connected, so we update its timestamp later.
5188 if (pfrom
->fNetworkNode
) {
5190 State(pfrom
->GetId())->fCurrentlyConnected
= true;
5193 if (pfrom
->nVersion
>= SENDHEADERS_VERSION
) {
5194 // Tell our peer we prefer to receive headers rather than inv's
5195 // We send this to non-NODE NETWORK peers as well, because even
5196 // non-NODE NETWORK peers can announce blocks (such as pruning
5198 pfrom
->PushMessage(NetMsgType::SENDHEADERS
);
5200 if (pfrom
->nVersion
>= SHORT_IDS_BLOCKS_VERSION
) {
5201 // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
5202 // However, we do not request new block announcements using
5203 // cmpctblock messages.
5204 // We send this to non-NODE NETWORK peers as well, because
5205 // they may wish to request compact blocks from us
5206 bool fAnnounceUsingCMPCTBLOCK
= false;
5207 uint64_t nCMPCTBLOCKVersion
= 2;
5208 if (pfrom
->GetLocalServices() & NODE_WITNESS
)
5209 pfrom
->PushMessage(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
);
5210 nCMPCTBLOCKVersion
= 1;
5211 pfrom
->PushMessage(NetMsgType::SENDCMPCT
, fAnnounceUsingCMPCTBLOCK
, nCMPCTBLOCKVersion
);
5216 else if (strCommand
== NetMsgType::ADDR
)
5218 vector
<CAddress
> vAddr
;
5221 // Don't want addr from older versions unless seeding
5222 if (pfrom
->nVersion
< CADDR_TIME_VERSION
&& connman
.GetAddressCount() > 1000)
5224 if (vAddr
.size() > 1000)
5227 Misbehaving(pfrom
->GetId(), 20);
5228 return error("message addr size() = %u", vAddr
.size());
5231 // Store the new addresses
5232 vector
<CAddress
> vAddrOk
;
5233 int64_t nNow
= GetAdjustedTime();
5234 int64_t nSince
= nNow
- 10 * 60;
5235 BOOST_FOREACH(CAddress
& addr
, vAddr
)
5237 boost::this_thread::interruption_point();
5239 if ((addr
.nServices
& REQUIRED_SERVICES
) != REQUIRED_SERVICES
)
5242 if (addr
.nTime
<= 100000000 || addr
.nTime
> nNow
+ 10 * 60)
5243 addr
.nTime
= nNow
- 5 * 24 * 60 * 60;
5244 pfrom
->AddAddressKnown(addr
);
5245 bool fReachable
= IsReachable(addr
);
5246 if (addr
.nTime
> nSince
&& !pfrom
->fGetAddr
&& vAddr
.size() <= 10 && addr
.IsRoutable())
5248 // Relay to a limited number of other nodes
5249 RelayAddress(addr
, fReachable
, connman
);
5251 // Do not store addresses outside our network
5253 vAddrOk
.push_back(addr
);
5255 connman
.AddNewAddresses(vAddrOk
, pfrom
->addr
, 2 * 60 * 60);
5256 if (vAddr
.size() < 1000)
5257 pfrom
->fGetAddr
= false;
5258 if (pfrom
->fOneShot
)
5259 pfrom
->fDisconnect
= true;
5262 else if (strCommand
== NetMsgType::SENDHEADERS
)
5265 State(pfrom
->GetId())->fPreferHeaders
= true;
5268 else if (strCommand
== NetMsgType::SENDCMPCT
)
5270 bool fAnnounceUsingCMPCTBLOCK
= false;
5271 uint64_t nCMPCTBLOCKVersion
= 0;
5272 vRecv
>> fAnnounceUsingCMPCTBLOCK
>> nCMPCTBLOCKVersion
;
5273 if (nCMPCTBLOCKVersion
== 1 || ((pfrom
->GetLocalServices() & NODE_WITNESS
) && nCMPCTBLOCKVersion
== 2)) {
5275 // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
5276 if (!State(pfrom
->GetId())->fProvidesHeaderAndIDs
) {
5277 State(pfrom
->GetId())->fProvidesHeaderAndIDs
= true;
5278 State(pfrom
->GetId())->fWantsCmpctWitness
= nCMPCTBLOCKVersion
== 2;
5280 if (State(pfrom
->GetId())->fWantsCmpctWitness
== (nCMPCTBLOCKVersion
== 2)) // ignore later version announces
5281 State(pfrom
->GetId())->fPreferHeaderAndIDs
= fAnnounceUsingCMPCTBLOCK
;
5282 if (!State(pfrom
->GetId())->fSupportsDesiredCmpctVersion
) {
5283 if (pfrom
->GetLocalServices() & NODE_WITNESS
)
5284 State(pfrom
->GetId())->fSupportsDesiredCmpctVersion
= (nCMPCTBLOCKVersion
== 2);
5286 State(pfrom
->GetId())->fSupportsDesiredCmpctVersion
= (nCMPCTBLOCKVersion
== 1);
5292 else if (strCommand
== NetMsgType::INV
)
5296 if (vInv
.size() > MAX_INV_SZ
)
5299 Misbehaving(pfrom
->GetId(), 20);
5300 return error("message inv size() = %u", vInv
.size());
5303 bool fBlocksOnly
= !fRelayTxes
;
5305 // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
5306 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY
))
5307 fBlocksOnly
= false;
5311 uint32_t nFetchFlags
= GetFetchFlags(pfrom
, chainActive
.Tip(), chainparams
.GetConsensus());
5313 std::vector
<CInv
> vToFetch
;
5315 for (unsigned int nInv
= 0; nInv
< vInv
.size(); nInv
++)
5317 CInv
&inv
= vInv
[nInv
];
5319 boost::this_thread::interruption_point();
5321 bool fAlreadyHave
= AlreadyHave(inv
);
5322 LogPrint("net", "got inv: %s %s peer=%d\n", inv
.ToString(), fAlreadyHave
? "have" : "new", pfrom
->id
);
5324 if (inv
.type
== MSG_TX
) {
5325 inv
.type
|= nFetchFlags
;
5328 if (inv
.type
== MSG_BLOCK
) {
5329 UpdateBlockAvailability(pfrom
->GetId(), inv
.hash
);
5330 if (!fAlreadyHave
&& !fImporting
&& !fReindex
&& !mapBlocksInFlight
.count(inv
.hash
)) {
5331 // First request the headers preceding the announced block. In the normal fully-synced
5332 // case where a new block is announced that succeeds the current tip (no reorganization),
5333 // there are no such headers.
5334 // Secondly, and only when we are close to being synced, we request the announced block directly,
5335 // to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
5336 // time the block arrives, the header chain leading up to it is already validated. Not
5337 // doing this will result in the received block being rejected as an orphan in case it is
5338 // not a direct successor.
5339 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexBestHeader
), inv
.hash
);
5340 CNodeState
*nodestate
= State(pfrom
->GetId());
5341 if (CanDirectFetch(chainparams
.GetConsensus()) &&
5342 nodestate
->nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
&&
5343 (!IsWitnessEnabled(chainActive
.Tip(), chainparams
.GetConsensus()) || State(pfrom
->GetId())->fHaveWitness
)) {
5344 inv
.type
|= nFetchFlags
;
5345 if (nodestate
->fSupportsDesiredCmpctVersion
)
5346 vToFetch
.push_back(CInv(MSG_CMPCT_BLOCK
, inv
.hash
));
5348 vToFetch
.push_back(inv
);
5349 // Mark block as in flight already, even though the actual "getdata" message only goes out
5350 // later (within the same cs_main lock, though).
5351 MarkBlockAsInFlight(pfrom
->GetId(), inv
.hash
, chainparams
.GetConsensus());
5353 LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader
->nHeight
, inv
.hash
.ToString(), pfrom
->id
);
5358 pfrom
->AddInventoryKnown(inv
);
5360 LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv
.hash
.ToString(), pfrom
->id
);
5361 else if (!fAlreadyHave
&& !fImporting
&& !fReindex
&& !IsInitialBlockDownload())
5365 // Track requests for our stuff
5366 GetMainSignals().Inventory(inv
.hash
);
5368 if (pfrom
->nSendSize
> (nMaxSendBufferSize
* 2)) {
5369 Misbehaving(pfrom
->GetId(), 50);
5370 return error("send buffer size() = %u", pfrom
->nSendSize
);
5374 if (!vToFetch
.empty())
5375 pfrom
->PushMessage(NetMsgType::GETDATA
, vToFetch
);
5379 else if (strCommand
== NetMsgType::GETDATA
)
5383 if (vInv
.size() > MAX_INV_SZ
)
5386 Misbehaving(pfrom
->GetId(), 20);
5387 return error("message getdata size() = %u", vInv
.size());
5390 if (fDebug
|| (vInv
.size() != 1))
5391 LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv
.size(), pfrom
->id
);
5393 if ((fDebug
&& vInv
.size() > 0) || (vInv
.size() == 1))
5394 LogPrint("net", "received getdata for: %s peer=%d\n", vInv
[0].ToString(), pfrom
->id
);
5396 pfrom
->vRecvGetData
.insert(pfrom
->vRecvGetData
.end(), vInv
.begin(), vInv
.end());
5397 ProcessGetData(pfrom
, chainparams
.GetConsensus(), connman
);
5401 else if (strCommand
== NetMsgType::GETBLOCKS
)
5403 CBlockLocator locator
;
5405 vRecv
>> locator
>> hashStop
;
5409 // Find the last block the caller has in the main chain
5410 CBlockIndex
* pindex
= FindForkInGlobalIndex(chainActive
, locator
);
5412 // Send the rest of the chain
5414 pindex
= chainActive
.Next(pindex
);
5416 LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.IsNull() ? "end" : hashStop
.ToString(), nLimit
, pfrom
->id
);
5417 for (; pindex
; pindex
= chainActive
.Next(pindex
))
5419 if (pindex
->GetBlockHash() == hashStop
)
5421 LogPrint("net", " getblocks stopping at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
5424 // If pruning, don't inv blocks unless we have on disk and are likely to still have
5425 // for some reasonable time window (1 hour) that block relay might require.
5426 const int nPrunedBlocksLikelyToHave
= MIN_BLOCKS_TO_KEEP
- 3600 / chainparams
.GetConsensus().nPowTargetSpacing
;
5427 if (fPruneMode
&& (!(pindex
->nStatus
& BLOCK_HAVE_DATA
) || pindex
->nHeight
<= chainActive
.Tip()->nHeight
- nPrunedBlocksLikelyToHave
))
5429 LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
5432 pfrom
->PushInventory(CInv(MSG_BLOCK
, pindex
->GetBlockHash()));
5435 // When this block is requested, we'll send an inv that'll
5436 // trigger the peer to getblocks the next batch of inventory.
5437 LogPrint("net", " getblocks stopping at limit %d %s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
5438 pfrom
->hashContinue
= pindex
->GetBlockHash();
5445 else if (strCommand
== NetMsgType::GETBLOCKTXN
)
5447 BlockTransactionsRequest req
;
5452 BlockMap::iterator it
= mapBlockIndex
.find(req
.blockhash
);
5453 if (it
== mapBlockIndex
.end() || !(it
->second
->nStatus
& BLOCK_HAVE_DATA
)) {
5454 LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom
->id
);
5458 if (it
->second
->nHeight
< chainActive
.Height() - MAX_BLOCKTXN_DEPTH
) {
5459 LogPrint("net", "Peer %d sent us a getblocktxn for a block > %i deep", pfrom
->id
, MAX_BLOCKTXN_DEPTH
);
5464 assert(ReadBlockFromDisk(block
, it
->second
, chainparams
.GetConsensus()));
5466 BlockTransactions
resp(req
);
5467 for (size_t i
= 0; i
< req
.indexes
.size(); i
++) {
5468 if (req
.indexes
[i
] >= block
.vtx
.size()) {
5469 Misbehaving(pfrom
->GetId(), 100);
5470 LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom
->id
);
5473 resp
.txn
[i
] = block
.vtx
[req
.indexes
[i
]];
5475 pfrom
->PushMessageWithFlag(State(pfrom
->GetId())->fWantsCmpctWitness
? 0 : SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::BLOCKTXN
, resp
);
5479 else if (strCommand
== NetMsgType::GETHEADERS
)
5481 CBlockLocator locator
;
5483 vRecv
>> locator
>> hashStop
;
5486 if (IsInitialBlockDownload() && !pfrom
->fWhitelisted
) {
5487 LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom
->id
);
5491 CNodeState
*nodestate
= State(pfrom
->GetId());
5492 CBlockIndex
* pindex
= NULL
;
5493 if (locator
.IsNull())
5495 // If locator is null, return the hashStop block
5496 BlockMap::iterator mi
= mapBlockIndex
.find(hashStop
);
5497 if (mi
== mapBlockIndex
.end())
5499 pindex
= (*mi
).second
;
5503 // Find the last block the caller has in the main chain
5504 pindex
= FindForkInGlobalIndex(chainActive
, locator
);
5506 pindex
= chainActive
.Next(pindex
);
5509 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
5510 vector
<CBlock
> vHeaders
;
5511 int nLimit
= MAX_HEADERS_RESULTS
;
5512 LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex
? pindex
->nHeight
: -1), hashStop
.IsNull() ? "end" : hashStop
.ToString(), pfrom
->id
);
5513 for (; pindex
; pindex
= chainActive
.Next(pindex
))
5515 vHeaders
.push_back(pindex
->GetBlockHeader());
5516 if (--nLimit
<= 0 || pindex
->GetBlockHash() == hashStop
)
5519 // pindex can be NULL either if we sent chainActive.Tip() OR
5520 // if our peer has chainActive.Tip() (and thus we are sending an empty
5521 // headers message). In both cases it's safe to update
5522 // pindexBestHeaderSent to be our tip.
5523 nodestate
->pindexBestHeaderSent
= pindex
? pindex
: chainActive
.Tip();
5524 pfrom
->PushMessage(NetMsgType::HEADERS
, vHeaders
);
5528 else if (strCommand
== NetMsgType::TX
)
5530 // Stop processing the transaction early if
5531 // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
5532 if (!fRelayTxes
&& (!pfrom
->fWhitelisted
|| !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY
)))
5534 LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom
->id
);
5538 deque
<COutPoint
> vWorkQueue
;
5539 vector
<uint256
> vEraseQueue
;
5543 CInv
inv(MSG_TX
, tx
.GetHash());
5544 pfrom
->AddInventoryKnown(inv
);
5548 bool fMissingInputs
= false;
5549 CValidationState state
;
5551 pfrom
->setAskFor
.erase(inv
.hash
);
5552 mapAlreadyAskedFor
.erase(inv
.hash
);
5554 if (!AlreadyHave(inv
) && AcceptToMemoryPool(mempool
, state
, tx
, true, &fMissingInputs
)) {
5555 mempool
.check(pcoinsTip
);
5556 RelayTransaction(tx
, connman
);
5557 for (unsigned int i
= 0; i
< tx
.vout
.size(); i
++) {
5558 vWorkQueue
.emplace_back(inv
.hash
, i
);
5561 pfrom
->nLastTXTime
= GetTime();
5563 LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
5565 tx
.GetHash().ToString(),
5566 mempool
.size(), mempool
.DynamicMemoryUsage() / 1000);
5568 // Recursively process any orphan transactions that depended on this one
5569 set
<NodeId
> setMisbehaving
;
5570 while (!vWorkQueue
.empty()) {
5571 auto itByPrev
= mapOrphanTransactionsByPrev
.find(vWorkQueue
.front());
5572 vWorkQueue
.pop_front();
5573 if (itByPrev
== mapOrphanTransactionsByPrev
.end())
5575 for (auto mi
= itByPrev
->second
.begin();
5576 mi
!= itByPrev
->second
.end();
5579 const CTransaction
& orphanTx
= (*mi
)->second
.tx
;
5580 const uint256
& orphanHash
= orphanTx
.GetHash();
5581 NodeId fromPeer
= (*mi
)->second
.fromPeer
;
5582 bool fMissingInputs2
= false;
5583 // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
5584 // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
5585 // anyone relaying LegitTxX banned)
5586 CValidationState stateDummy
;
5589 if (setMisbehaving
.count(fromPeer
))
5591 if (AcceptToMemoryPool(mempool
, stateDummy
, orphanTx
, true, &fMissingInputs2
)) {
5592 LogPrint("mempool", " accepted orphan tx %s\n", orphanHash
.ToString());
5593 RelayTransaction(orphanTx
, connman
);
5594 for (unsigned int i
= 0; i
< orphanTx
.vout
.size(); i
++) {
5595 vWorkQueue
.emplace_back(orphanHash
, i
);
5597 vEraseQueue
.push_back(orphanHash
);
5599 else if (!fMissingInputs2
)
5602 if (stateDummy
.IsInvalid(nDos
) && nDos
> 0)
5604 // Punish peer that gave us an invalid orphan tx
5605 Misbehaving(fromPeer
, nDos
);
5606 setMisbehaving
.insert(fromPeer
);
5607 LogPrint("mempool", " invalid orphan tx %s\n", orphanHash
.ToString());
5609 // Has inputs but not accepted to mempool
5610 // Probably non-standard or insufficient fee/priority
5611 LogPrint("mempool", " removed orphan tx %s\n", orphanHash
.ToString());
5612 vEraseQueue
.push_back(orphanHash
);
5613 if (orphanTx
.wit
.IsNull() && !stateDummy
.CorruptionPossible()) {
5614 // Do not use rejection cache for witness transactions or
5615 // witness-stripped transactions, as they can have been malleated.
5616 // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
5617 assert(recentRejects
);
5618 recentRejects
->insert(orphanHash
);
5621 mempool
.check(pcoinsTip
);
5625 BOOST_FOREACH(uint256 hash
, vEraseQueue
)
5626 EraseOrphanTx(hash
);
5628 else if (fMissingInputs
)
5630 bool fRejectedParents
= false; // It may be the case that the orphans parents have all been rejected
5631 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
) {
5632 if (recentRejects
->contains(txin
.prevout
.hash
)) {
5633 fRejectedParents
= true;
5637 if (!fRejectedParents
) {
5638 BOOST_FOREACH(const CTxIn
& txin
, tx
.vin
) {
5639 CInv
_inv(MSG_TX
, txin
.prevout
.hash
);
5640 pfrom
->AddInventoryKnown(_inv
);
5641 if (!AlreadyHave(_inv
)) pfrom
->AskFor(_inv
);
5643 AddOrphanTx(tx
, pfrom
->GetId());
5645 // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
5646 unsigned int nMaxOrphanTx
= (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS
));
5647 unsigned int nEvicted
= LimitOrphanTxSize(nMaxOrphanTx
);
5649 LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted
);
5651 LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx
.GetHash().ToString());
5654 if (tx
.wit
.IsNull() && !state
.CorruptionPossible()) {
5655 // Do not use rejection cache for witness transactions or
5656 // witness-stripped transactions, as they can have been malleated.
5657 // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
5658 assert(recentRejects
);
5659 recentRejects
->insert(tx
.GetHash());
5662 if (pfrom
->fWhitelisted
&& GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY
)) {
5663 // Always relay transactions received from whitelisted peers, even
5664 // if they were already in the mempool or rejected from it due
5665 // to policy, allowing the node to function as a gateway for
5666 // nodes hidden behind it.
5668 // Never relay transactions that we would assign a non-zero DoS
5669 // score for, as we expect peers to do the same with us in that
5672 if (!state
.IsInvalid(nDoS
) || nDoS
== 0) {
5673 LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx
.GetHash().ToString(), pfrom
->id
);
5674 RelayTransaction(tx
, connman
);
5676 LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx
.GetHash().ToString(), pfrom
->id
, FormatStateMessage(state
));
5681 if (state
.IsInvalid(nDoS
))
5683 LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx
.GetHash().ToString(),
5685 FormatStateMessage(state
));
5686 if (state
.GetRejectCode() < REJECT_INTERNAL
) // Never send AcceptToMemoryPool's internal codes over P2P
5687 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, (unsigned char)state
.GetRejectCode(),
5688 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), inv
.hash
);
5690 Misbehaving(pfrom
->GetId(), nDoS
);
5693 FlushStateToDisk(state
, FLUSH_STATE_PERIODIC
);
5697 else if (strCommand
== NetMsgType::CMPCTBLOCK
&& !fImporting
&& !fReindex
) // Ignore blocks received while importing
5699 CBlockHeaderAndShortTxIDs cmpctblock
;
5700 vRecv
>> cmpctblock
;
5704 if (mapBlockIndex
.find(cmpctblock
.header
.hashPrevBlock
) == mapBlockIndex
.end()) {
5705 // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
5706 if (!IsInitialBlockDownload())
5707 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexBestHeader
), uint256());
5711 CBlockIndex
*pindex
= NULL
;
5712 CValidationState state
;
5713 if (!AcceptBlockHeader(cmpctblock
.header
, state
, chainparams
, &pindex
)) {
5715 if (state
.IsInvalid(nDoS
)) {
5717 Misbehaving(pfrom
->GetId(), nDoS
);
5718 LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom
->id
);
5723 // If AcceptBlockHeader returned true, it set pindex
5725 UpdateBlockAvailability(pfrom
->GetId(), pindex
->GetBlockHash());
5727 std::map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator blockInFlightIt
= mapBlocksInFlight
.find(pindex
->GetBlockHash());
5728 bool fAlreadyInFlight
= blockInFlightIt
!= mapBlocksInFlight
.end();
5730 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) // Nothing to do here
5733 if (pindex
->nChainWork
<= chainActive
.Tip()->nChainWork
|| // We know something better
5734 pindex
->nTx
!= 0) { // We had this block at some point, but pruned it
5735 if (fAlreadyInFlight
) {
5736 // We requested this block for some reason, but our mempool will probably be useless
5737 // so we just grab the block via normal getdata
5738 std::vector
<CInv
> vInv(1);
5739 vInv
[0] = CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus()), cmpctblock
.header
.GetHash());
5740 pfrom
->PushMessage(NetMsgType::GETDATA
, vInv
);
5745 // If we're not close to tip yet, give up and let parallel block fetch work its magic
5746 if (!fAlreadyInFlight
&& !CanDirectFetch(chainparams
.GetConsensus()))
5749 CNodeState
*nodestate
= State(pfrom
->GetId());
5751 if (IsWitnessEnabled(pindex
->pprev
, chainparams
.GetConsensus()) && !nodestate
->fSupportsDesiredCmpctVersion
) {
5752 // Don't bother trying to process compact blocks from v1 peers
5753 // after segwit activates.
5757 // We want to be a bit conservative just to be extra careful about DoS
5758 // possibilities in compact block processing...
5759 if (pindex
->nHeight
<= chainActive
.Height() + 2) {
5760 if ((!fAlreadyInFlight
&& nodestate
->nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) ||
5761 (fAlreadyInFlight
&& blockInFlightIt
->second
.first
== pfrom
->GetId())) {
5762 list
<QueuedBlock
>::iterator
*queuedBlockIt
= NULL
;
5763 if (!MarkBlockAsInFlight(pfrom
->GetId(), pindex
->GetBlockHash(), chainparams
.GetConsensus(), pindex
, &queuedBlockIt
)) {
5764 if (!(*queuedBlockIt
)->partialBlock
)
5765 (*queuedBlockIt
)->partialBlock
.reset(new PartiallyDownloadedBlock(&mempool
));
5767 // The block was already in flight using compact blocks from the same peer
5768 LogPrint("net", "Peer sent us compact block we were already syncing!\n");
5773 PartiallyDownloadedBlock
& partialBlock
= *(*queuedBlockIt
)->partialBlock
;
5774 ReadStatus status
= partialBlock
.InitData(cmpctblock
);
5775 if (status
== READ_STATUS_INVALID
) {
5776 MarkBlockAsReceived(pindex
->GetBlockHash()); // Reset in-flight state in case of whitelist
5777 Misbehaving(pfrom
->GetId(), 100);
5778 LogPrintf("Peer %d sent us invalid compact block\n", pfrom
->id
);
5780 } else if (status
== READ_STATUS_FAILED
) {
5781 // Duplicate txindexes, the block is now in-flight, so just request it
5782 std::vector
<CInv
> vInv(1);
5783 vInv
[0] = CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus()), cmpctblock
.header
.GetHash());
5784 pfrom
->PushMessage(NetMsgType::GETDATA
, vInv
);
5788 if (!fAlreadyInFlight
&& mapBlocksInFlight
.size() == 1 && pindex
->pprev
->IsValid(BLOCK_VALID_CHAIN
)) {
5789 // We seem to be rather well-synced, so it appears pfrom was the first to provide us
5790 // with this block! Let's get them to announce using compact blocks in the future.
5791 MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate
, pfrom
, connman
);
5794 BlockTransactionsRequest req
;
5795 for (size_t i
= 0; i
< cmpctblock
.BlockTxCount(); i
++) {
5796 if (!partialBlock
.IsTxAvailable(i
))
5797 req
.indexes
.push_back(i
);
5799 if (req
.indexes
.empty()) {
5800 // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
5801 BlockTransactions txn
;
5802 txn
.blockhash
= cmpctblock
.header
.GetHash();
5803 CDataStream
blockTxnMsg(SER_NETWORK
, PROTOCOL_VERSION
);
5805 return ProcessMessage(pfrom
, NetMsgType::BLOCKTXN
, blockTxnMsg
, nTimeReceived
, chainparams
, connman
);
5807 req
.blockhash
= pindex
->GetBlockHash();
5808 pfrom
->PushMessage(NetMsgType::GETBLOCKTXN
, req
);
5812 if (fAlreadyInFlight
) {
5813 // We requested this block, but its far into the future, so our
5814 // mempool will probably be useless - request the block normally
5815 std::vector
<CInv
> vInv(1);
5816 vInv
[0] = CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus()), cmpctblock
.header
.GetHash());
5817 pfrom
->PushMessage(NetMsgType::GETDATA
, vInv
);
5820 // If this was an announce-cmpctblock, we want the same treatment as a header message
5821 // Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions)
5822 std::vector
<CBlock
> headers
;
5823 headers
.push_back(cmpctblock
.header
);
5824 CDataStream
vHeadersMsg(SER_NETWORK
, PROTOCOL_VERSION
);
5825 vHeadersMsg
<< headers
;
5826 return ProcessMessage(pfrom
, NetMsgType::HEADERS
, vHeadersMsg
, nTimeReceived
, chainparams
, connman
);
5830 CheckBlockIndex(chainparams
.GetConsensus());
5833 else if (strCommand
== NetMsgType::BLOCKTXN
&& !fImporting
&& !fReindex
) // Ignore blocks received while importing
5835 BlockTransactions resp
;
5839 bool fBlockRead
= false;
5843 map
<uint256
, pair
<NodeId
, list
<QueuedBlock
>::iterator
> >::iterator it
= mapBlocksInFlight
.find(resp
.blockhash
);
5844 if (it
== mapBlocksInFlight
.end() || !it
->second
.second
->partialBlock
||
5845 it
->second
.first
!= pfrom
->GetId()) {
5846 LogPrint("net", "Peer %d sent us block transactions for block we weren't expecting\n", pfrom
->id
);
5850 PartiallyDownloadedBlock
& partialBlock
= *it
->second
.second
->partialBlock
;
5851 ReadStatus status
= partialBlock
.FillBlock(block
, resp
.txn
);
5852 if (status
== READ_STATUS_INVALID
) {
5853 MarkBlockAsReceived(resp
.blockhash
); // Reset in-flight state in case of whitelist
5854 Misbehaving(pfrom
->GetId(), 100);
5855 LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom
->id
);
5857 } else if (status
== READ_STATUS_FAILED
) {
5858 // Might have collided, fall back to getdata now :(
5859 std::vector
<CInv
> invs
;
5860 invs
.push_back(CInv(MSG_BLOCK
| GetFetchFlags(pfrom
, chainActive
.Tip(), chainparams
.GetConsensus()), resp
.blockhash
));
5861 pfrom
->PushMessage(NetMsgType::GETDATA
, invs
);
5864 } // Don't hold cs_main when we call into ProcessNewBlock
5866 CValidationState state
;
5867 ProcessNewBlock(state
, chainparams
, pfrom
, &block
, false, NULL
);
5869 if (state
.IsInvalid(nDoS
)) {
5870 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
5871 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, (unsigned char)state
.GetRejectCode(),
5872 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), block
.GetHash());
5875 Misbehaving(pfrom
->GetId(), nDoS
);
5882 else if (strCommand
== NetMsgType::HEADERS
&& !fImporting
&& !fReindex
) // Ignore headers received while importing
5884 std::vector
<CBlockHeader
> headers
;
5886 // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
5887 unsigned int nCount
= ReadCompactSize(vRecv
);
5888 if (nCount
> MAX_HEADERS_RESULTS
) {
5890 Misbehaving(pfrom
->GetId(), 20);
5891 return error("headers message size = %u", nCount
);
5893 headers
.resize(nCount
);
5894 for (unsigned int n
= 0; n
< nCount
; n
++) {
5895 vRecv
>> headers
[n
];
5896 ReadCompactSize(vRecv
); // ignore tx count; assume it is 0.
5903 // Nothing interesting. Stop asking this peers for more headers.
5907 CNodeState
*nodestate
= State(pfrom
->GetId());
5909 // If this looks like it could be a block announcement (nCount <
5910 // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
5912 // - Send a getheaders message in response to try to connect the chain.
5913 // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
5914 // don't connect before giving DoS points
5915 // - Once a headers message is received that is valid and does connect,
5916 // nUnconnectingHeaders gets reset back to 0.
5917 if (mapBlockIndex
.find(headers
[0].hashPrevBlock
) == mapBlockIndex
.end() && nCount
< MAX_BLOCKS_TO_ANNOUNCE
) {
5918 nodestate
->nUnconnectingHeaders
++;
5919 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexBestHeader
), uint256());
5920 LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
5921 headers
[0].GetHash().ToString(),
5922 headers
[0].hashPrevBlock
.ToString(),
5923 pindexBestHeader
->nHeight
,
5924 pfrom
->id
, nodestate
->nUnconnectingHeaders
);
5925 // Set hashLastUnknownBlock for this peer, so that if we
5926 // eventually get the headers - even from a different peer -
5927 // we can use this peer to download.
5928 UpdateBlockAvailability(pfrom
->GetId(), headers
.back().GetHash());
5930 if (nodestate
->nUnconnectingHeaders
% MAX_UNCONNECTING_HEADERS
== 0) {
5931 Misbehaving(pfrom
->GetId(), 20);
5936 CBlockIndex
*pindexLast
= NULL
;
5937 BOOST_FOREACH(const CBlockHeader
& header
, headers
) {
5938 CValidationState state
;
5939 if (pindexLast
!= NULL
&& header
.hashPrevBlock
!= pindexLast
->GetBlockHash()) {
5940 Misbehaving(pfrom
->GetId(), 20);
5941 return error("non-continuous headers sequence");
5943 if (!AcceptBlockHeader(header
, state
, chainparams
, &pindexLast
)) {
5945 if (state
.IsInvalid(nDoS
)) {
5947 Misbehaving(pfrom
->GetId(), nDoS
);
5948 return error("invalid header received");
5953 if (nodestate
->nUnconnectingHeaders
> 0) {
5954 LogPrint("net", "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom
->id
, nodestate
->nUnconnectingHeaders
);
5956 nodestate
->nUnconnectingHeaders
= 0;
5959 UpdateBlockAvailability(pfrom
->GetId(), pindexLast
->GetBlockHash());
5961 if (nCount
== MAX_HEADERS_RESULTS
) {
5962 // Headers message had its maximum size; the peer may have more headers.
5963 // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
5964 // from there instead.
5965 LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast
->nHeight
, pfrom
->id
, pfrom
->nStartingHeight
);
5966 pfrom
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexLast
), uint256());
5969 bool fCanDirectFetch
= CanDirectFetch(chainparams
.GetConsensus());
5970 // If this set of headers is valid and ends in a block with at least as
5971 // much work as our tip, download as much as possible.
5972 if (fCanDirectFetch
&& pindexLast
->IsValid(BLOCK_VALID_TREE
) && chainActive
.Tip()->nChainWork
<= pindexLast
->nChainWork
) {
5973 vector
<CBlockIndex
*> vToFetch
;
5974 CBlockIndex
*pindexWalk
= pindexLast
;
5975 // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
5976 while (pindexWalk
&& !chainActive
.Contains(pindexWalk
) && vToFetch
.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
5977 if (!(pindexWalk
->nStatus
& BLOCK_HAVE_DATA
) &&
5978 !mapBlocksInFlight
.count(pindexWalk
->GetBlockHash()) &&
5979 (!IsWitnessEnabled(pindexWalk
->pprev
, chainparams
.GetConsensus()) || State(pfrom
->GetId())->fHaveWitness
)) {
5980 // We don't have this block, and it's not yet in flight.
5981 vToFetch
.push_back(pindexWalk
);
5983 pindexWalk
= pindexWalk
->pprev
;
5985 // If pindexWalk still isn't on our main chain, we're looking at a
5986 // very large reorg at a time we think we're close to caught up to
5987 // the main chain -- this shouldn't really happen. Bail out on the
5988 // direct fetch and rely on parallel download instead.
5989 if (!chainActive
.Contains(pindexWalk
)) {
5990 LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
5991 pindexLast
->GetBlockHash().ToString(),
5992 pindexLast
->nHeight
);
5994 vector
<CInv
> vGetData
;
5995 // Download as much as possible, from earliest to latest.
5996 BOOST_REVERSE_FOREACH(CBlockIndex
*pindex
, vToFetch
) {
5997 if (nodestate
->nBlocksInFlight
>= MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
5998 // Can't download any more from this peer
6001 uint32_t nFetchFlags
= GetFetchFlags(pfrom
, pindex
->pprev
, chainparams
.GetConsensus());
6002 vGetData
.push_back(CInv(MSG_BLOCK
| nFetchFlags
, pindex
->GetBlockHash()));
6003 MarkBlockAsInFlight(pfrom
->GetId(), pindex
->GetBlockHash(), chainparams
.GetConsensus(), pindex
);
6004 LogPrint("net", "Requesting block %s from peer=%d\n",
6005 pindex
->GetBlockHash().ToString(), pfrom
->id
);
6007 if (vGetData
.size() > 1) {
6008 LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
6009 pindexLast
->GetBlockHash().ToString(), pindexLast
->nHeight
);
6011 if (vGetData
.size() > 0) {
6012 if (nodestate
->fSupportsDesiredCmpctVersion
&& vGetData
.size() == 1 && mapBlocksInFlight
.size() == 1 && pindexLast
->pprev
->IsValid(BLOCK_VALID_CHAIN
)) {
6013 // We seem to be rather well-synced, so it appears pfrom was the first to provide us
6014 // with this block! Let's get them to announce using compact blocks in the future.
6015 MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate
, pfrom
, connman
);
6016 // In any case, we want to download using a compact block, not a regular one
6017 vGetData
[0] = CInv(MSG_CMPCT_BLOCK
, vGetData
[0].hash
);
6019 pfrom
->PushMessage(NetMsgType::GETDATA
, vGetData
);
6024 CheckBlockIndex(chainparams
.GetConsensus());
6030 else if (strCommand
== NetMsgType::BLOCK
&& !fImporting
&& !fReindex
) // Ignore blocks received while importing
6035 LogPrint("net", "received block %s peer=%d\n", block
.GetHash().ToString(), pfrom
->id
);
6037 CValidationState state
;
6038 // Process all blocks from whitelisted peers, even if not requested,
6039 // unless we're still syncing with the network.
6040 // Such an unrequested block may still be processed, subject to the
6041 // conditions in AcceptBlock().
6042 bool forceProcessing
= pfrom
->fWhitelisted
&& !IsInitialBlockDownload();
6043 ProcessNewBlock(state
, chainparams
, pfrom
, &block
, forceProcessing
, NULL
);
6045 if (state
.IsInvalid(nDoS
)) {
6046 assert (state
.GetRejectCode() < REJECT_INTERNAL
); // Blocks are never rejected with internal reject codes
6047 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, (unsigned char)state
.GetRejectCode(),
6048 state
.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH
), block
.GetHash());
6051 Misbehaving(pfrom
->GetId(), nDoS
);
6058 else if (strCommand
== NetMsgType::GETADDR
)
6060 // This asymmetric behavior for inbound and outbound connections was introduced
6061 // to prevent a fingerprinting attack: an attacker can send specific fake addresses
6062 // to users' AddrMan and later request them by sending getaddr messages.
6063 // Making nodes which are behind NAT and can only make outgoing connections ignore
6064 // the getaddr message mitigates the attack.
6065 if (!pfrom
->fInbound
) {
6066 LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom
->id
);
6070 // Only send one GetAddr response per connection to reduce resource waste
6071 // and discourage addr stamping of INV announcements.
6072 if (pfrom
->fSentAddr
) {
6073 LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom
->id
);
6076 pfrom
->fSentAddr
= true;
6078 pfrom
->vAddrToSend
.clear();
6079 vector
<CAddress
> vAddr
= connman
.GetAddresses();
6080 FastRandomContext insecure_rand
;
6081 BOOST_FOREACH(const CAddress
&addr
, vAddr
)
6082 pfrom
->PushAddress(addr
, insecure_rand
);
6086 else if (strCommand
== NetMsgType::MEMPOOL
)
6088 if (!(pfrom
->GetLocalServices() & NODE_BLOOM
) && !pfrom
->fWhitelisted
)
6090 LogPrint("net", "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom
->GetId());
6091 pfrom
->fDisconnect
= true;
6095 if (connman
.OutboundTargetReached(false) && !pfrom
->fWhitelisted
)
6097 LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom
->GetId());
6098 pfrom
->fDisconnect
= true;
6102 LOCK(pfrom
->cs_inventory
);
6103 pfrom
->fSendMempool
= true;
6107 else if (strCommand
== NetMsgType::PING
)
6109 if (pfrom
->nVersion
> BIP0031_VERSION
)
6113 // Echo the message back with the nonce. This allows for two useful features:
6115 // 1) A remote node can quickly check if the connection is operational
6116 // 2) Remote nodes can measure the latency of the network thread. If this node
6117 // is overloaded it won't respond to pings quickly and the remote node can
6118 // avoid sending us more work, like chain download requests.
6120 // The nonce stops the remote getting confused between different pings: without
6121 // it, if the remote node sends a ping once per second and this node takes 5
6122 // seconds to respond to each, the 5th ping the remote sends would appear to
6123 // return very quickly.
6124 pfrom
->PushMessage(NetMsgType::PONG
, nonce
);
6129 else if (strCommand
== NetMsgType::PONG
)
6131 int64_t pingUsecEnd
= nTimeReceived
;
6133 size_t nAvail
= vRecv
.in_avail();
6134 bool bPingFinished
= false;
6135 std::string sProblem
;
6137 if (nAvail
>= sizeof(nonce
)) {
6140 // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
6141 if (pfrom
->nPingNonceSent
!= 0) {
6142 if (nonce
== pfrom
->nPingNonceSent
) {
6143 // Matching pong received, this ping is no longer outstanding
6144 bPingFinished
= true;
6145 int64_t pingUsecTime
= pingUsecEnd
- pfrom
->nPingUsecStart
;
6146 if (pingUsecTime
> 0) {
6147 // Successful ping time measurement, replace previous
6148 pfrom
->nPingUsecTime
= pingUsecTime
;
6149 pfrom
->nMinPingUsecTime
= std::min(pfrom
->nMinPingUsecTime
, pingUsecTime
);
6151 // This should never happen
6152 sProblem
= "Timing mishap";
6155 // Nonce mismatches are normal when pings are overlapping
6156 sProblem
= "Nonce mismatch";
6158 // This is most likely a bug in another implementation somewhere; cancel this ping
6159 bPingFinished
= true;
6160 sProblem
= "Nonce zero";
6164 sProblem
= "Unsolicited pong without ping";
6167 // This is most likely a bug in another implementation somewhere; cancel this ping
6168 bPingFinished
= true;
6169 sProblem
= "Short payload";
6172 if (!(sProblem
.empty())) {
6173 LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
6176 pfrom
->nPingNonceSent
,
6180 if (bPingFinished
) {
6181 pfrom
->nPingNonceSent
= 0;
6186 else if (strCommand
== NetMsgType::FILTERLOAD
)
6188 CBloomFilter filter
;
6191 if (!filter
.IsWithinSizeConstraints())
6193 // There is no excuse for sending a too-large filter
6195 Misbehaving(pfrom
->GetId(), 100);
6199 LOCK(pfrom
->cs_filter
);
6200 delete pfrom
->pfilter
;
6201 pfrom
->pfilter
= new CBloomFilter(filter
);
6202 pfrom
->pfilter
->UpdateEmptyFull();
6203 pfrom
->fRelayTxes
= true;
6208 else if (strCommand
== NetMsgType::FILTERADD
)
6210 vector
<unsigned char> vData
;
6213 // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
6214 // and thus, the maximum size any matched object can have) in a filteradd message
6216 if (vData
.size() > MAX_SCRIPT_ELEMENT_SIZE
) {
6219 LOCK(pfrom
->cs_filter
);
6220 if (pfrom
->pfilter
) {
6221 pfrom
->pfilter
->insert(vData
);
6228 Misbehaving(pfrom
->GetId(), 100);
6233 else if (strCommand
== NetMsgType::FILTERCLEAR
)
6235 LOCK(pfrom
->cs_filter
);
6236 delete pfrom
->pfilter
;
6237 pfrom
->pfilter
= new CBloomFilter();
6238 pfrom
->fRelayTxes
= true;
6242 else if (strCommand
== NetMsgType::REJECT
)
6246 string strMsg
; unsigned char ccode
; string strReason
;
6247 vRecv
>> LIMITED_STRING(strMsg
, CMessageHeader::COMMAND_SIZE
) >> ccode
>> LIMITED_STRING(strReason
, MAX_REJECT_MESSAGE_LENGTH
);
6250 ss
<< strMsg
<< " code " << itostr(ccode
) << ": " << strReason
;
6252 if (strMsg
== NetMsgType::BLOCK
|| strMsg
== NetMsgType::TX
)
6256 ss
<< ": hash " << hash
.ToString();
6258 LogPrint("net", "Reject %s\n", SanitizeString(ss
.str()));
6259 } catch (const std::ios_base::failure
&) {
6260 // Avoid feedback loops by preventing reject messages from triggering a new reject message.
6261 LogPrint("net", "Unparseable reject message received\n");
6266 else if (strCommand
== NetMsgType::FEEFILTER
) {
6267 CAmount newFeeFilter
= 0;
6268 vRecv
>> newFeeFilter
;
6269 if (MoneyRange(newFeeFilter
)) {
6271 LOCK(pfrom
->cs_feeFilter
);
6272 pfrom
->minFeeFilter
= newFeeFilter
;
6274 LogPrint("net", "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter
).ToString(), pfrom
->id
);
6278 else if (strCommand
== NetMsgType::NOTFOUND
) {
6279 // We do not care about the NOTFOUND message, but logging an Unknown Command
6280 // message would be undesirable as we transmit it ourselves.
6284 // Ignore unknown commands for extensibility
6285 LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand
), pfrom
->id
);
6293 // requires LOCK(cs_vRecvMsg)
6294 bool ProcessMessages(CNode
* pfrom
, CConnman
& connman
)
6296 const CChainParams
& chainparams
= Params();
6297 unsigned int nMaxSendBufferSize
= connman
.GetSendBufferSize();
6299 // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
6303 // (4) message start
6311 if (!pfrom
->vRecvGetData
.empty())
6312 ProcessGetData(pfrom
, chainparams
.GetConsensus(), connman
);
6314 // this maintains the order of responses
6315 if (!pfrom
->vRecvGetData
.empty()) return fOk
;
6317 std::deque
<CNetMessage
>::iterator it
= pfrom
->vRecvMsg
.begin();
6318 while (!pfrom
->fDisconnect
&& it
!= pfrom
->vRecvMsg
.end()) {
6319 // Don't bother if send buffer is too full to respond anyway
6320 if (pfrom
->nSendSize
>= nMaxSendBufferSize
)
6324 CNetMessage
& msg
= *it
;
6327 // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
6328 // msg.hdr.nMessageSize, msg.vRecv.size(),
6329 // msg.complete() ? "Y" : "N");
6331 // end, if an incomplete message is found
6332 if (!msg
.complete())
6335 // at this point, any failure means we can delete the current message
6338 // Scan for message start
6339 if (memcmp(msg
.hdr
.pchMessageStart
, chainparams
.MessageStart(), CMessageHeader::MESSAGE_START_SIZE
) != 0) {
6340 LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg
.hdr
.GetCommand()), pfrom
->id
);
6346 CMessageHeader
& hdr
= msg
.hdr
;
6347 if (!hdr
.IsValid(chainparams
.MessageStart()))
6349 LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr
.GetCommand()), pfrom
->id
);
6352 string strCommand
= hdr
.GetCommand();
6355 unsigned int nMessageSize
= hdr
.nMessageSize
;
6358 CDataStream
& vRecv
= msg
.vRecv
;
6359 uint256 hash
= Hash(vRecv
.begin(), vRecv
.begin() + nMessageSize
);
6360 if (memcmp(hash
.begin(), hdr
.pchChecksum
, CMessageHeader::CHECKSUM_SIZE
) != 0)
6362 LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__
,
6363 SanitizeString(strCommand
), nMessageSize
,
6364 HexStr(hash
.begin(), hash
.begin()+CMessageHeader::CHECKSUM_SIZE
),
6365 HexStr(hdr
.pchChecksum
, hdr
.pchChecksum
+CMessageHeader::CHECKSUM_SIZE
));
6373 fRet
= ProcessMessage(pfrom
, strCommand
, vRecv
, msg
.nTime
, chainparams
, connman
);
6374 boost::this_thread::interruption_point();
6376 catch (const std::ios_base::failure
& e
)
6378 pfrom
->PushMessage(NetMsgType::REJECT
, strCommand
, REJECT_MALFORMED
, string("error parsing message"));
6379 if (strstr(e
.what(), "end of data"))
6381 // Allow exceptions from under-length message on vRecv
6382 LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
6384 else if (strstr(e
.what(), "size too large"))
6386 // Allow exceptions from over-long size
6387 LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
6389 else if (strstr(e
.what(), "non-canonical ReadCompactSize()"))
6391 // Allow exceptions from non-canonical encoding
6392 LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__
, SanitizeString(strCommand
), nMessageSize
, e
.what());
6396 PrintExceptionContinue(&e
, "ProcessMessages()");
6399 catch (const boost::thread_interrupted
&) {
6402 catch (const std::exception
& e
) {
6403 PrintExceptionContinue(&e
, "ProcessMessages()");
6405 PrintExceptionContinue(NULL
, "ProcessMessages()");
6409 LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__
, SanitizeString(strCommand
), nMessageSize
, pfrom
->id
);
6414 // In case the connection got shut down, its receive buffer was wiped
6415 if (!pfrom
->fDisconnect
)
6416 pfrom
->vRecvMsg
.erase(pfrom
->vRecvMsg
.begin(), it
);
6421 class CompareInvMempoolOrder
6425 CompareInvMempoolOrder(CTxMemPool
*_mempool
)
6430 bool operator()(std::set
<uint256
>::iterator a
, std::set
<uint256
>::iterator b
)
6432 /* As std::make_heap produces a max-heap, we want the entries with the
6433 * fewest ancestors/highest fee to sort later. */
6434 return mp
->CompareDepthAndScore(*b
, *a
);
6438 bool SendMessages(CNode
* pto
, CConnman
& connman
)
6440 const Consensus::Params
& consensusParams
= Params().GetConsensus();
6442 // Don't send anything until we get its version message
6443 if (pto
->nVersion
== 0)
6449 bool pingSend
= false;
6450 if (pto
->fPingQueued
) {
6451 // RPC ping request by user
6454 if (pto
->nPingNonceSent
== 0 && pto
->nPingUsecStart
+ PING_INTERVAL
* 1000000 < GetTimeMicros()) {
6455 // Ping automatically sent as a latency probe & keepalive.
6458 if (pingSend
&& !pto
->fDisconnect
) {
6460 while (nonce
== 0) {
6461 GetRandBytes((unsigned char*)&nonce
, sizeof(nonce
));
6463 pto
->fPingQueued
= false;
6464 pto
->nPingUsecStart
= GetTimeMicros();
6465 if (pto
->nVersion
> BIP0031_VERSION
) {
6466 pto
->nPingNonceSent
= nonce
;
6467 pto
->PushMessage(NetMsgType::PING
, nonce
);
6469 // Peer is too old to support ping command with nonce, pong will never arrive.
6470 pto
->nPingNonceSent
= 0;
6471 pto
->PushMessage(NetMsgType::PING
);
6475 TRY_LOCK(cs_main
, lockMain
); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
6479 // Address refresh broadcast
6480 int64_t nNow
= GetTimeMicros();
6481 if (!IsInitialBlockDownload() && pto
->nNextLocalAddrSend
< nNow
) {
6482 AdvertiseLocal(pto
);
6483 pto
->nNextLocalAddrSend
= PoissonNextSend(nNow
, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
);
6489 if (pto
->nNextAddrSend
< nNow
) {
6490 pto
->nNextAddrSend
= PoissonNextSend(nNow
, AVG_ADDRESS_BROADCAST_INTERVAL
);
6491 vector
<CAddress
> vAddr
;
6492 vAddr
.reserve(pto
->vAddrToSend
.size());
6493 BOOST_FOREACH(const CAddress
& addr
, pto
->vAddrToSend
)
6495 if (!pto
->addrKnown
.contains(addr
.GetKey()))
6497 pto
->addrKnown
.insert(addr
.GetKey());
6498 vAddr
.push_back(addr
);
6499 // receiver rejects addr messages larger than 1000
6500 if (vAddr
.size() >= 1000)
6502 pto
->PushMessage(NetMsgType::ADDR
, vAddr
);
6507 pto
->vAddrToSend
.clear();
6509 pto
->PushMessage(NetMsgType::ADDR
, vAddr
);
6510 // we only send the big addr message once
6511 if (pto
->vAddrToSend
.capacity() > 40)
6512 pto
->vAddrToSend
.shrink_to_fit();
6515 CNodeState
&state
= *State(pto
->GetId());
6516 if (state
.fShouldBan
) {
6517 if (pto
->fWhitelisted
)
6518 LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto
->addr
.ToString());
6520 pto
->fDisconnect
= true;
6521 if (pto
->addr
.IsLocal())
6522 LogPrintf("Warning: not banning local peer %s!\n", pto
->addr
.ToString());
6525 connman
.Ban(pto
->addr
, BanReasonNodeMisbehaving
);
6528 state
.fShouldBan
= false;
6531 BOOST_FOREACH(const CBlockReject
& reject
, state
.rejects
)
6532 pto
->PushMessage(NetMsgType::REJECT
, (string
)NetMsgType::BLOCK
, reject
.chRejectCode
, reject
.strRejectReason
, reject
.hashBlock
);
6533 state
.rejects
.clear();
6536 if (pindexBestHeader
== NULL
)
6537 pindexBestHeader
= chainActive
.Tip();
6538 bool fFetch
= state
.fPreferredDownload
|| (nPreferredDownload
== 0 && !pto
->fClient
&& !pto
->fOneShot
); // Download if this is a nice peer, or we have no nice peers and this one might do.
6539 if (!state
.fSyncStarted
&& !pto
->fClient
&& !pto
->fDisconnect
&& !fImporting
&& !fReindex
) {
6540 // Only actively request headers from a single peer, unless we're close to today.
6541 if ((nSyncStarted
== 0 && fFetch
) || pindexBestHeader
->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
6542 state
.fSyncStarted
= true;
6544 const CBlockIndex
*pindexStart
= pindexBestHeader
;
6545 /* If possible, start at the block preceding the currently
6546 best known header. This ensures that we always get a
6547 non-empty list of headers back as long as the peer
6548 is up-to-date. With a non-empty response, we can initialise
6549 the peer's known best block. This wouldn't be possible
6550 if we requested starting at pindexBestHeader and
6551 got back an empty response. */
6552 if (pindexStart
->pprev
)
6553 pindexStart
= pindexStart
->pprev
;
6554 LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart
->nHeight
, pto
->id
, pto
->nStartingHeight
);
6555 pto
->PushMessage(NetMsgType::GETHEADERS
, chainActive
.GetLocator(pindexStart
), uint256());
6559 // Resend wallet transactions that haven't gotten in a block yet
6560 // Except during reindex, importing and IBD, when old wallet
6561 // transactions become unconfirmed and spams other nodes.
6562 if (!fReindex
&& !fImporting
&& !IsInitialBlockDownload())
6564 GetMainSignals().Broadcast(nTimeBestReceived
, &connman
);
6568 // Try sending block announcements via headers
6571 // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
6572 // list of block hashes we're relaying, and our peer wants
6573 // headers announcements, then find the first header
6574 // not yet known to our peer but would connect, and send.
6575 // If no header would connect, or if we have too many
6576 // blocks, or if the peer doesn't want headers, just
6577 // add all to the inv queue.
6578 LOCK(pto
->cs_inventory
);
6579 vector
<CBlock
> vHeaders
;
6580 bool fRevertToInv
= ((!state
.fPreferHeaders
&&
6581 (!state
.fPreferHeaderAndIDs
|| pto
->vBlockHashesToAnnounce
.size() > 1)) ||
6582 pto
->vBlockHashesToAnnounce
.size() > MAX_BLOCKS_TO_ANNOUNCE
);
6583 CBlockIndex
*pBestIndex
= NULL
; // last header queued for delivery
6584 ProcessBlockAvailability(pto
->id
); // ensure pindexBestKnownBlock is up-to-date
6586 if (!fRevertToInv
) {
6587 bool fFoundStartingHeader
= false;
6588 // Try to find first header that our peer doesn't have, and
6589 // then send all headers past that one. If we come across any
6590 // headers that aren't on chainActive, give up.
6591 BOOST_FOREACH(const uint256
&hash
, pto
->vBlockHashesToAnnounce
) {
6592 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
6593 assert(mi
!= mapBlockIndex
.end());
6594 CBlockIndex
*pindex
= mi
->second
;
6595 if (chainActive
[pindex
->nHeight
] != pindex
) {
6596 // Bail out if we reorged away from this block
6597 fRevertToInv
= true;
6600 if (pBestIndex
!= NULL
&& pindex
->pprev
!= pBestIndex
) {
6601 // This means that the list of blocks to announce don't
6602 // connect to each other.
6603 // This shouldn't really be possible to hit during
6604 // regular operation (because reorgs should take us to
6605 // a chain that has some block not on the prior chain,
6606 // which should be caught by the prior check), but one
6607 // way this could happen is by using invalidateblock /
6608 // reconsiderblock repeatedly on the tip, causing it to
6609 // be added multiple times to vBlockHashesToAnnounce.
6610 // Robustly deal with this rare situation by reverting
6612 fRevertToInv
= true;
6615 pBestIndex
= pindex
;
6616 if (fFoundStartingHeader
) {
6617 // add this to the headers message
6618 vHeaders
.push_back(pindex
->GetBlockHeader());
6619 } else if (PeerHasHeader(&state
, pindex
)) {
6620 continue; // keep looking for the first new block
6621 } else if (pindex
->pprev
== NULL
|| PeerHasHeader(&state
, pindex
->pprev
)) {
6622 // Peer doesn't have this header but they do have the prior one.
6623 // Start sending headers.
6624 fFoundStartingHeader
= true;
6625 vHeaders
.push_back(pindex
->GetBlockHeader());
6627 // Peer doesn't have this header or the prior one -- nothing will
6628 // connect, so bail out.
6629 fRevertToInv
= true;
6634 if (!fRevertToInv
&& !vHeaders
.empty()) {
6635 if (vHeaders
.size() == 1 && state
.fPreferHeaderAndIDs
) {
6636 // We only send up to 1 block as header-and-ids, as otherwise
6637 // probably means we're doing an initial-ish-sync or they're slow
6638 LogPrint("net", "%s sending header-and-ids %s to peer %d\n", __func__
,
6639 vHeaders
.front().GetHash().ToString(), pto
->id
);
6640 //TODO: Shouldn't need to reload block from disk, but requires refactor
6642 assert(ReadBlockFromDisk(block
, pBestIndex
, consensusParams
));
6643 CBlockHeaderAndShortTxIDs
cmpctblock(block
, state
.fWantsCmpctWitness
);
6644 pto
->PushMessageWithFlag(state
.fWantsCmpctWitness
? 0 : SERIALIZE_TRANSACTION_NO_WITNESS
, NetMsgType::CMPCTBLOCK
, cmpctblock
);
6645 state
.pindexBestHeaderSent
= pBestIndex
;
6646 } else if (state
.fPreferHeaders
) {
6647 if (vHeaders
.size() > 1) {
6648 LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__
,
6650 vHeaders
.front().GetHash().ToString(),
6651 vHeaders
.back().GetHash().ToString(), pto
->id
);
6653 LogPrint("net", "%s: sending header %s to peer=%d\n", __func__
,
6654 vHeaders
.front().GetHash().ToString(), pto
->id
);
6656 pto
->PushMessage(NetMsgType::HEADERS
, vHeaders
);
6657 state
.pindexBestHeaderSent
= pBestIndex
;
6659 fRevertToInv
= true;
6662 // If falling back to using an inv, just try to inv the tip.
6663 // The last entry in vBlockHashesToAnnounce was our tip at some point
6665 if (!pto
->vBlockHashesToAnnounce
.empty()) {
6666 const uint256
&hashToAnnounce
= pto
->vBlockHashesToAnnounce
.back();
6667 BlockMap::iterator mi
= mapBlockIndex
.find(hashToAnnounce
);
6668 assert(mi
!= mapBlockIndex
.end());
6669 CBlockIndex
*pindex
= mi
->second
;
6671 // Warn if we're announcing a block that is not on the main chain.
6672 // This should be very rare and could be optimized out.
6673 // Just log for now.
6674 if (chainActive
[pindex
->nHeight
] != pindex
) {
6675 LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
6676 hashToAnnounce
.ToString(), chainActive
.Tip()->GetBlockHash().ToString());
6679 // If the peer's chain has this block, don't inv it back.
6680 if (!PeerHasHeader(&state
, pindex
)) {
6681 pto
->PushInventory(CInv(MSG_BLOCK
, hashToAnnounce
));
6682 LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__
,
6683 pto
->id
, hashToAnnounce
.ToString());
6687 pto
->vBlockHashesToAnnounce
.clear();
6691 // Message: inventory
6695 LOCK(pto
->cs_inventory
);
6696 vInv
.reserve(std::max
<size_t>(pto
->vInventoryBlockToSend
.size(), INVENTORY_BROADCAST_MAX
));
6699 BOOST_FOREACH(const uint256
& hash
, pto
->vInventoryBlockToSend
) {
6700 vInv
.push_back(CInv(MSG_BLOCK
, hash
));
6701 if (vInv
.size() == MAX_INV_SZ
) {
6702 pto
->PushMessage(NetMsgType::INV
, vInv
);
6706 pto
->vInventoryBlockToSend
.clear();
6708 // Check whether periodic sends should happen
6709 bool fSendTrickle
= pto
->fWhitelisted
;
6710 if (pto
->nNextInvSend
< nNow
) {
6711 fSendTrickle
= true;
6712 // Use half the delay for outbound peers, as there is less privacy concern for them.
6713 pto
->nNextInvSend
= PoissonNextSend(nNow
, INVENTORY_BROADCAST_INTERVAL
>> !pto
->fInbound
);
6716 // Time to send but the peer has requested we not relay transactions.
6718 LOCK(pto
->cs_filter
);
6719 if (!pto
->fRelayTxes
) pto
->setInventoryTxToSend
.clear();
6722 // Respond to BIP35 mempool requests
6723 if (fSendTrickle
&& pto
->fSendMempool
) {
6724 auto vtxinfo
= mempool
.infoAll();
6725 pto
->fSendMempool
= false;
6726 CAmount filterrate
= 0;
6728 LOCK(pto
->cs_feeFilter
);
6729 filterrate
= pto
->minFeeFilter
;
6732 LOCK(pto
->cs_filter
);
6734 for (const auto& txinfo
: vtxinfo
) {
6735 const uint256
& hash
= txinfo
.tx
->GetHash();
6736 CInv
inv(MSG_TX
, hash
);
6737 pto
->setInventoryTxToSend
.erase(hash
);
6739 if (txinfo
.feeRate
.GetFeePerK() < filterrate
)
6743 if (!pto
->pfilter
->IsRelevantAndUpdate(*txinfo
.tx
)) continue;
6745 pto
->filterInventoryKnown
.insert(hash
);
6746 vInv
.push_back(inv
);
6747 if (vInv
.size() == MAX_INV_SZ
) {
6748 pto
->PushMessage(NetMsgType::INV
, vInv
);
6752 pto
->timeLastMempoolReq
= GetTime();
6755 // Determine transactions to relay
6757 // Produce a vector with all candidates for sending
6758 vector
<std::set
<uint256
>::iterator
> vInvTx
;
6759 vInvTx
.reserve(pto
->setInventoryTxToSend
.size());
6760 for (std::set
<uint256
>::iterator it
= pto
->setInventoryTxToSend
.begin(); it
!= pto
->setInventoryTxToSend
.end(); it
++) {
6761 vInvTx
.push_back(it
);
6763 CAmount filterrate
= 0;
6765 LOCK(pto
->cs_feeFilter
);
6766 filterrate
= pto
->minFeeFilter
;
6768 // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
6769 // A heap is used so that not all items need sorting if only a few are being sent.
6770 CompareInvMempoolOrder
compareInvMempoolOrder(&mempool
);
6771 std::make_heap(vInvTx
.begin(), vInvTx
.end(), compareInvMempoolOrder
);
6772 // No reason to drain out at many times the network's capacity,
6773 // especially since we have many peers and some will draw much shorter delays.
6774 unsigned int nRelayedTransactions
= 0;
6775 LOCK(pto
->cs_filter
);
6776 while (!vInvTx
.empty() && nRelayedTransactions
< INVENTORY_BROADCAST_MAX
) {
6777 // Fetch the top element from the heap
6778 std::pop_heap(vInvTx
.begin(), vInvTx
.end(), compareInvMempoolOrder
);
6779 std::set
<uint256
>::iterator it
= vInvTx
.back();
6782 // Remove it from the to-be-sent set
6783 pto
->setInventoryTxToSend
.erase(it
);
6784 // Check if not in the filter already
6785 if (pto
->filterInventoryKnown
.contains(hash
)) {
6788 // Not in the mempool anymore? don't bother sending it.
6789 auto txinfo
= mempool
.info(hash
);
6793 if (filterrate
&& txinfo
.feeRate
.GetFeePerK() < filterrate
) {
6796 if (pto
->pfilter
&& !pto
->pfilter
->IsRelevantAndUpdate(*txinfo
.tx
)) continue;
6798 vInv
.push_back(CInv(MSG_TX
, hash
));
6799 nRelayedTransactions
++;
6801 // Expire old relay messages
6802 while (!vRelayExpiration
.empty() && vRelayExpiration
.front().first
< nNow
)
6804 mapRelay
.erase(vRelayExpiration
.front().second
);
6805 vRelayExpiration
.pop_front();
6808 auto ret
= mapRelay
.insert(std::make_pair(hash
, std::move(txinfo
.tx
)));
6810 vRelayExpiration
.push_back(std::make_pair(nNow
+ 15 * 60 * 1000000, ret
.first
));
6813 if (vInv
.size() == MAX_INV_SZ
) {
6814 pto
->PushMessage(NetMsgType::INV
, vInv
);
6817 pto
->filterInventoryKnown
.insert(hash
);
6822 pto
->PushMessage(NetMsgType::INV
, vInv
);
6824 // Detect whether we're stalling
6825 nNow
= GetTimeMicros();
6826 if (!pto
->fDisconnect
&& state
.nStallingSince
&& state
.nStallingSince
< nNow
- 1000000 * BLOCK_STALLING_TIMEOUT
) {
6827 // Stalling only triggers when the block download window cannot move. During normal steady state,
6828 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
6829 // should only happen during initial block download.
6830 LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto
->id
);
6831 pto
->fDisconnect
= true;
6833 // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
6834 // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
6835 // We compensate for other peers to prevent killing off peers due to our own downstream link
6836 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
6837 // to unreasonably increase our timeout.
6838 if (!pto
->fDisconnect
&& state
.vBlocksInFlight
.size() > 0) {
6839 QueuedBlock
&queuedBlock
= state
.vBlocksInFlight
.front();
6840 int nOtherPeersWithValidatedDownloads
= nPeersWithValidatedDownloads
- (state
.nBlocksInFlightValidHeaders
> 0);
6841 if (nNow
> state
.nDownloadingSince
+ consensusParams
.nPowTargetSpacing
* (BLOCK_DOWNLOAD_TIMEOUT_BASE
+ BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
* nOtherPeersWithValidatedDownloads
)) {
6842 LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock
.hash
.ToString(), pto
->id
);
6843 pto
->fDisconnect
= true;
6848 // Message: getdata (blocks)
6850 vector
<CInv
> vGetData
;
6851 if (!pto
->fDisconnect
&& !pto
->fClient
&& (fFetch
|| !IsInitialBlockDownload()) && state
.nBlocksInFlight
< MAX_BLOCKS_IN_TRANSIT_PER_PEER
) {
6852 vector
<CBlockIndex
*> vToDownload
;
6853 NodeId staller
= -1;
6854 FindNextBlocksToDownload(pto
->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER
- state
.nBlocksInFlight
, vToDownload
, staller
, consensusParams
);
6855 BOOST_FOREACH(CBlockIndex
*pindex
, vToDownload
) {
6856 uint32_t nFetchFlags
= GetFetchFlags(pto
, pindex
->pprev
, consensusParams
);
6857 vGetData
.push_back(CInv(MSG_BLOCK
| nFetchFlags
, pindex
->GetBlockHash()));
6858 MarkBlockAsInFlight(pto
->GetId(), pindex
->GetBlockHash(), consensusParams
, pindex
);
6859 LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex
->GetBlockHash().ToString(),
6860 pindex
->nHeight
, pto
->id
);
6862 if (state
.nBlocksInFlight
== 0 && staller
!= -1) {
6863 if (State(staller
)->nStallingSince
== 0) {
6864 State(staller
)->nStallingSince
= nNow
;
6865 LogPrint("net", "Stall started peer=%d\n", staller
);
6871 // Message: getdata (non-blocks)
6873 while (!pto
->fDisconnect
&& !pto
->mapAskFor
.empty() && (*pto
->mapAskFor
.begin()).first
<= nNow
)
6875 const CInv
& inv
= (*pto
->mapAskFor
.begin()).second
;
6876 if (!AlreadyHave(inv
))
6879 LogPrint("net", "Requesting %s peer=%d\n", inv
.ToString(), pto
->id
);
6880 vGetData
.push_back(inv
);
6881 if (vGetData
.size() >= 1000)
6883 pto
->PushMessage(NetMsgType::GETDATA
, vGetData
);
6887 //If we're not going to ask, don't expect a response.
6888 pto
->setAskFor
.erase(inv
.hash
);
6890 pto
->mapAskFor
.erase(pto
->mapAskFor
.begin());
6892 if (!vGetData
.empty())
6893 pto
->PushMessage(NetMsgType::GETDATA
, vGetData
);
6896 // Message: feefilter
6898 // We don't want white listed peers to filter txs to us if we have -whitelistforcerelay
6899 if (pto
->nVersion
>= FEEFILTER_VERSION
&& GetBoolArg("-feefilter", DEFAULT_FEEFILTER
) &&
6900 !(pto
->fWhitelisted
&& GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY
))) {
6901 CAmount currentFilter
= mempool
.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFeePerK();
6902 int64_t timeNow
= GetTimeMicros();
6903 if (timeNow
> pto
->nextSendTimeFeeFilter
) {
6904 CAmount filterToSend
= filterRounder
.round(currentFilter
);
6905 if (filterToSend
!= pto
->lastSentFeeFilter
) {
6906 pto
->PushMessage(NetMsgType::FEEFILTER
, filterToSend
);
6907 pto
->lastSentFeeFilter
= filterToSend
;
6909 pto
->nextSendTimeFeeFilter
= PoissonNextSend(timeNow
, AVG_FEEFILTER_BROADCAST_INTERVAL
);
6911 // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
6912 // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
6913 else if (timeNow
+ MAX_FEEFILTER_CHANGE_DELAY
* 1000000 < pto
->nextSendTimeFeeFilter
&&
6914 (currentFilter
< 3 * pto
->lastSentFeeFilter
/ 4 || currentFilter
> 4 * pto
->lastSentFeeFilter
/ 3)) {
6915 pto
->nextSendTimeFeeFilter
= timeNow
+ GetRandInt(MAX_FEEFILTER_CHANGE_DELAY
) * 1000000;
6922 std::string
CBlockFileInfo::ToString() const {
6923 return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks
, nSize
, nHeightFirst
, nHeightLast
, DateTimeStrFormat("%Y-%m-%d", nTimeFirst
), DateTimeStrFormat("%Y-%m-%d", nTimeLast
));
6926 ThresholdState
VersionBitsTipState(const Consensus::Params
& params
, Consensus::DeploymentPos pos
)
6929 return VersionBitsState(chainActive
.Tip(), params
, pos
, versionbitscache
);
6932 int VersionBitsTipStateSinceHeight(const Consensus::Params
& params
, Consensus::DeploymentPos pos
)
6935 return VersionBitsStateSinceHeight(chainActive
.Tip(), params
, pos
, versionbitscache
);
6938 static const uint64_t MEMPOOL_DUMP_VERSION
= 1;
6940 bool LoadMempool(void)
6942 int64_t nExpiryTimeout
= GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60;
6943 FILE* filestr
= fopen((GetDataDir() / "mempool.dat").string().c_str(), "r");
6944 CAutoFile
file(filestr
, SER_DISK
, CLIENT_VERSION
);
6945 if (file
.IsNull()) {
6946 LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
6951 int64_t skipped
= 0;
6953 int64_t nNow
= GetTime();
6958 if (version
!= MEMPOOL_DUMP_VERSION
) {
6963 double prioritydummy
= 0;
6972 CAmount amountdelta
= nFeeDelta
;
6974 mempool
.PrioritiseTransaction(tx
.GetHash(), tx
.GetHash().ToString(), prioritydummy
, amountdelta
);
6976 CValidationState state
;
6977 if (nTime
+ nExpiryTimeout
> nNow
) {
6979 AcceptToMemoryPoolWithTime(mempool
, state
, tx
, true, NULL
, nTime
);
6980 if (state
.IsValid()) {
6989 std::map
<uint256
, CAmount
> mapDeltas
;
6992 for (const auto& i
: mapDeltas
) {
6993 mempool
.PrioritiseTransaction(i
.first
, i
.first
.ToString(), prioritydummy
, i
.second
);
6995 } catch (const std::exception
& e
) {
6996 LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e
.what());
7000 LogPrintf("Imported mempool transactions from disk: %i successes, %i failed, %i expired\n", count
, failed
, skipped
);
7004 void DumpMempool(void)
7006 int64_t start
= GetTimeMicros();
7008 std::map
<uint256
, CAmount
> mapDeltas
;
7009 std::vector
<TxMempoolInfo
> vinfo
;
7013 for (const auto &i
: mempool
.mapDeltas
) {
7014 mapDeltas
[i
.first
] = i
.second
.first
;
7016 vinfo
= mempool
.infoAll();
7019 int64_t mid
= GetTimeMicros();
7022 FILE* filestr
= fopen((GetDataDir() / "mempool.dat.new").string().c_str(), "w");
7027 CAutoFile
file(filestr
, SER_DISK
, CLIENT_VERSION
);
7029 uint64_t version
= MEMPOOL_DUMP_VERSION
;
7032 file
<< (uint64_t)vinfo
.size();
7033 for (const auto& i
: vinfo
) {
7035 file
<< (int64_t)i
.nTime
;
7036 file
<< (int64_t)i
.nFeeDelta
;
7037 mapDeltas
.erase(i
.tx
->GetHash());
7041 FileCommit(file
.Get());
7043 RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat");
7044 int64_t last
= GetTimeMicros();
7045 LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid
-start
)*0.000001, (last
-mid
)*0.000001);
7046 } catch (const std::exception
& e
) {
7047 LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e
.what());
7057 BlockMap::iterator it1
= mapBlockIndex
.begin();
7058 for (; it1
!= mapBlockIndex
.end(); it1
++)
7059 delete (*it1
).second
;
7060 mapBlockIndex
.clear();
7062 // orphan transactions
7063 mapOrphanTransactions
.clear();
7064 mapOrphanTransactionsByPrev
.clear();
7066 } instance_of_cmaincleanup
;