Improve readability of DecodeBase58Check(...)
[bitcoinplatinum.git] / src / blockencodings.cpp
blob6f27b7b9dc5dff082ab3c773a99786789ae315de
1 // Copyright (c) 2016 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #include "blockencodings.h"
6 #include "consensus/consensus.h"
7 #include "consensus/validation.h"
8 #include "chainparams.h"
9 #include "hash.h"
10 #include "random.h"
11 #include "streams.h"
12 #include "txmempool.h"
13 #include "validation.h"
14 #include "util.h"
16 #include <unordered_map>
18 CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, bool fUseWTXID) :
19 nonce(GetRand(std::numeric_limits<uint64_t>::max())),
20 shorttxids(block.vtx.size() - 1), prefilledtxn(1), header(block) {
21 FillShortTxIDSelector();
22 //TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase
23 prefilledtxn[0] = {0, block.vtx[0]};
24 for (size_t i = 1; i < block.vtx.size(); i++) {
25 const CTransaction& tx = *block.vtx[i];
26 shorttxids[i - 1] = GetShortID(fUseWTXID ? tx.GetWitnessHash() : tx.GetHash());
30 void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const {
31 CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
32 stream << header << nonce;
33 CSHA256 hasher;
34 hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin());
35 uint256 shorttxidhash;
36 hasher.Finalize(shorttxidhash.begin());
37 shorttxidk0 = shorttxidhash.GetUint64(0);
38 shorttxidk1 = shorttxidhash.GetUint64(1);
41 uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const uint256& txhash) const {
42 static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids calculation assumes 6-byte shorttxids");
43 return SipHashUint256(shorttxidk0, shorttxidk1, txhash) & 0xffffffffffffL;
48 ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<std::pair<uint256, CTransactionRef>>& extra_txn) {
49 if (cmpctblock.header.IsNull() || (cmpctblock.shorttxids.empty() && cmpctblock.prefilledtxn.empty()))
50 return READ_STATUS_INVALID;
51 if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_WEIGHT / MIN_SERIALIZABLE_TRANSACTION_WEIGHT)
52 return READ_STATUS_INVALID;
54 assert(header.IsNull() && txn_available.empty());
55 header = cmpctblock.header;
56 txn_available.resize(cmpctblock.BlockTxCount());
58 int32_t lastprefilledindex = -1;
59 for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); i++) {
60 if (cmpctblock.prefilledtxn[i].tx->IsNull())
61 return READ_STATUS_INVALID;
63 lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so can't overflow here
64 if (lastprefilledindex > std::numeric_limits<uint16_t>::max())
65 return READ_STATUS_INVALID;
66 if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) {
67 // If we are inserting a tx at an index greater than our full list of shorttxids
68 // plus the number of prefilled txn we've inserted, then we have txn for which we
69 // have neither a prefilled txn or a shorttxid!
70 return READ_STATUS_INVALID;
72 txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx;
74 prefilled_count = cmpctblock.prefilledtxn.size();
76 // Calculate map of txids -> positions and check mempool to see what we have (or don't)
77 // Because well-formed cmpctblock messages will have a (relatively) uniform distribution
78 // of short IDs, any highly-uneven distribution of elements can be safely treated as a
79 // READ_STATUS_FAILED.
80 std::unordered_map<uint64_t, uint16_t> shorttxids(cmpctblock.shorttxids.size());
81 uint16_t index_offset = 0;
82 for (size_t i = 0; i < cmpctblock.shorttxids.size(); i++) {
83 while (txn_available[i + index_offset])
84 index_offset++;
85 shorttxids[cmpctblock.shorttxids[i]] = i + index_offset;
86 // To determine the chance that the number of entries in a bucket exceeds N,
87 // we use the fact that the number of elements in a single bucket is
88 // binomially distributed (with n = the number of shorttxids S, and p =
89 // 1 / the number of buckets), that in the worst case the number of buckets is
90 // equal to S (due to std::unordered_map having a default load factor of 1.0),
91 // and that the chance for any bucket to exceed N elements is at most
92 // buckets * (the chance that any given bucket is above N elements).
93 // Thus: P(max_elements_per_bucket > N) <= S * (1 - cdf(binomial(n=S,p=1/S), N)).
94 // If we assume blocks of up to 16000, allowing 12 elements per bucket should
95 // only fail once per ~1 million block transfers (per peer and connection).
96 if (shorttxids.bucket_size(shorttxids.bucket(cmpctblock.shorttxids[i])) > 12)
97 return READ_STATUS_FAILED;
99 // TODO: in the shortid-collision case, we should instead request both transactions
100 // which collided. Falling back to full-block-request here is overkill.
101 if (shorttxids.size() != cmpctblock.shorttxids.size())
102 return READ_STATUS_FAILED; // Short ID collision
104 std::vector<bool> have_txn(txn_available.size());
106 LOCK(pool->cs);
107 const std::vector<std::pair<uint256, CTxMemPool::txiter> >& vTxHashes = pool->vTxHashes;
108 for (size_t i = 0; i < vTxHashes.size(); i++) {
109 uint64_t shortid = cmpctblock.GetShortID(vTxHashes[i].first);
110 std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
111 if (idit != shorttxids.end()) {
112 if (!have_txn[idit->second]) {
113 txn_available[idit->second] = vTxHashes[i].second->GetSharedTx();
114 have_txn[idit->second] = true;
115 mempool_count++;
116 } else {
117 // If we find two mempool txn that match the short id, just request it.
118 // This should be rare enough that the extra bandwidth doesn't matter,
119 // but eating a round-trip due to FillBlock failure would be annoying
120 if (txn_available[idit->second]) {
121 txn_available[idit->second].reset();
122 mempool_count--;
126 // Though ideally we'd continue scanning for the two-txn-match-shortid case,
127 // the performance win of an early exit here is too good to pass up and worth
128 // the extra risk.
129 if (mempool_count == shorttxids.size())
130 break;
134 for (size_t i = 0; i < extra_txn.size(); i++) {
135 uint64_t shortid = cmpctblock.GetShortID(extra_txn[i].first);
136 std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
137 if (idit != shorttxids.end()) {
138 if (!have_txn[idit->second]) {
139 txn_available[idit->second] = extra_txn[i].second;
140 have_txn[idit->second] = true;
141 mempool_count++;
142 extra_count++;
143 } else {
144 // If we find two mempool/extra txn that match the short id, just
145 // request it.
146 // This should be rare enough that the extra bandwidth doesn't matter,
147 // but eating a round-trip due to FillBlock failure would be annoying
148 // Note that we don't want duplication between extra_txn and mempool to
149 // trigger this case, so we compare witness hashes first
150 if (txn_available[idit->second] &&
151 txn_available[idit->second]->GetWitnessHash() != extra_txn[i].second->GetWitnessHash()) {
152 txn_available[idit->second].reset();
153 mempool_count--;
154 extra_count--;
158 // Though ideally we'd continue scanning for the two-txn-match-shortid case,
159 // the performance win of an early exit here is too good to pass up and worth
160 // the extra risk.
161 if (mempool_count == shorttxids.size())
162 break;
165 LogPrint(BCLog::CMPCTBLOCK, "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock, SER_NETWORK, PROTOCOL_VERSION));
167 return READ_STATUS_OK;
170 bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const {
171 assert(!header.IsNull());
172 assert(index < txn_available.size());
173 return txn_available[index] != nullptr;
176 ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing) {
177 assert(!header.IsNull());
178 uint256 hash = header.GetHash();
179 block = header;
180 block.vtx.resize(txn_available.size());
182 size_t tx_missing_offset = 0;
183 for (size_t i = 0; i < txn_available.size(); i++) {
184 if (!txn_available[i]) {
185 if (vtx_missing.size() <= tx_missing_offset)
186 return READ_STATUS_INVALID;
187 block.vtx[i] = vtx_missing[tx_missing_offset++];
188 } else
189 block.vtx[i] = std::move(txn_available[i]);
192 // Make sure we can't call FillBlock again.
193 header.SetNull();
194 txn_available.clear();
196 if (vtx_missing.size() != tx_missing_offset)
197 return READ_STATUS_INVALID;
199 CValidationState state;
200 if (!CheckBlock(block, state, Params().GetConsensus())) {
201 // TODO: We really want to just check merkle tree manually here,
202 // but that is expensive, and CheckBlock caches a block's
203 // "checked-status" (in the CBlock?). CBlock should be able to
204 // check its own merkle root and cache that check.
205 if (state.CorruptionPossible())
206 return READ_STATUS_FAILED; // Possible Short ID collision
207 return READ_STATUS_CHECKBLOCK_FAILED;
210 LogPrint(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
211 if (vtx_missing.size() < 5) {
212 for (const auto& tx : vtx_missing) {
213 LogPrint(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
217 return READ_STATUS_OK;