Dash Core Source Documentation (0.16.0.1)

Find detailed information regarding the Dash Core source code.

net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2016 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <arith_uint256.h>
10 #include <blockencodings.h>
11 #include <chainparams.h>
12 #include <consensus/validation.h>
13 #include <hash.h>
14 #include <init.h>
15 #include <validation.h>
16 #include <merkleblock.h>
17 #include <netmessagemaker.h>
18 #include <netbase.h>
19 #include <policy/fees.h>
20 #include <policy/policy.h>
21 #include <primitives/block.h>
22 #include <primitives/transaction.h>
23 #include <random.h>
24 #include <reverse_iterator.h>
25 #include <scheduler.h>
26 #include <tinyformat.h>
27 #include <txdb.h>
28 #include <txmempool.h>
29 #include <ui_interface.h>
30 #include <util.h>
31 #include <utilmoneystr.h>
32 #include <utilstrencodings.h>
33 
34 #include <memory>
35 
36 #include <spork.h>
37 #include <governance/governance.h>
41 #ifdef ENABLE_WALLET
43 #endif // ENABLE_WALLET
45 
46 #include <evo/deterministicmns.h>
47 #include <evo/mnauth.h>
48 #include <evo/simplifiedmns.h>
53 #include <llmq/quorums_init.h>
55 #include <llmq/quorums_signing.h>
57 
58 #if defined(NDEBUG)
59 # error "Dash Core cannot be compiled without assertions."
60 #endif
61 
63 static constexpr int32_t MAX_PEER_OBJECT_IN_FLIGHT = 100;
65 static constexpr int32_t MAX_PEER_OBJECT_ANNOUNCEMENTS = 2 * MAX_INV_SZ;
67 static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{std::chrono::seconds{2}};
69 static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seconds{60}};
71 static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY{std::chrono::seconds{2}};
73 static constexpr int64_t TX_EXPIRY_INTERVAL_FACTOR = 10;
75 "To preserve security, MAX_GETDATA_RANDOM_DELAY should not exceed INBOUND_PEER_DELAY");
77 static const unsigned int MAX_GETDATA_SZ = 1000;
78 
79 std::atomic<int64_t> nTimeBestReceived(0); // Used only to inform the wallet of when we last received a block
81 
83 {
84  template<typename I>
85  bool operator()(const I& a, const I& b) const
86  {
87  return &(*a) < &(*b);
88  }
89 };
90 
91 struct COrphanTx {
92  // When modifying, adapt the copy of this definition in tests/DoS_tests.
95  int64_t nTimeExpire;
96  size_t nTxSize;
97 };
99 std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
100 std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
102 void EraseOrphansFor(NodeId peer);
103 
104 static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
105 static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
106 
107 static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8]
108 
111 static const int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
112 
115 static const int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
116 
118 static constexpr unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24 * 60 * 60;
120 static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30;
124 static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
129 
130 // Internal stuff
131 namespace {
133  int nSyncStarted = 0;
134 
142  std::map<uint256, std::pair<NodeId, bool>> mapBlockSource;
143 
164  std::unique_ptr<CRollingBloomFilter> recentRejects;
165  uint256 hashRecentRejectsChainTip;
166 
168  struct QueuedBlock {
169  uint256 hash;
170  const CBlockIndex* pindex;
171  bool fValidatedHeaders;
172  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
173  };
174  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight;
175 
177  std::list<NodeId> lNodesAnnouncingHeaderAndIDs;
178 
180  int nPreferredDownload = 0;
181 
183  int nPeersWithValidatedDownloads = 0;
184 
186  int g_outbound_peers_with_protect_from_disconnect = 0;
187 
189  std::atomic<int64_t> g_last_tip_update(0);
190 
192  typedef std::map<uint256, CTransactionRef> MapRelay;
193  MapRelay mapRelay;
195  std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration;
196 } // namespace
197 
198 namespace {
199 
200 struct CBlockReject {
201  unsigned char chRejectCode;
202  std::string strRejectReason;
203  uint256 hashBlock;
204 };
205 
212 struct CNodeState {
214  const CService address;
216  bool fCurrentlyConnected;
218  int nMisbehavior;
220  bool fShouldBan;
222  const std::string name;
224  std::vector<CBlockReject> rejects;
226  const CBlockIndex *pindexBestKnownBlock;
228  uint256 hashLastUnknownBlock;
230  const CBlockIndex *pindexLastCommonBlock;
232  const CBlockIndex *pindexBestHeaderSent;
234  int nUnconnectingHeaders;
236  bool fSyncStarted;
238  int64_t nHeadersSyncTimeout;
240  int64_t nStallingSince;
241  std::list<QueuedBlock> vBlocksInFlight;
243  int64_t nDownloadingSince;
244  int nBlocksInFlight;
245  int nBlocksInFlightValidHeaders;
247  bool fPreferredDownload;
249  bool fPreferHeaders;
251  bool fPreferHeaderAndIDs;
253  bool fProvidesHeaderAndIDs;
258  bool fSupportsDesiredCmpctVersion;
259 
274  struct ChainSyncTimeoutState {
276  int64_t m_timeout;
278  const CBlockIndex * m_work_header;
280  bool m_sent_getheaders;
282  bool m_protect;
283  };
284 
285  ChainSyncTimeoutState m_chain_sync;
286 
288  int64_t m_last_block_announcement;
289 
290  /*
291  * State associated with objects download.
292  *
293  * Tx download algorithm:
294  *
295  * When inv comes in, queue up (process_time, inv) inside the peer's
296  * CNodeState (m_object_process_time) as long as m_object_announced for the peer
297  * isn't too big (MAX_PEER_OBJECT_ANNOUNCEMENTS).
298  *
299  * The process_time for a objects is set to nNow for outbound peers,
300  * nNow + 2 seconds for inbound peers. This is the time at which we'll
301  * consider trying to request the objects from the peer in
302  * SendMessages(). The delay for inbound peers is to allow outbound peers
303  * a chance to announce before we request from inbound peers, to prevent
304  * an adversary from using inbound connections to blind us to a
305  * objects (InvBlock).
306  *
307  * When we call SendMessages() for a given peer,
308  * we will loop over the objects in m_object_process_time, looking
309  * at the objects whose process_time <= nNow. We'll request each
310  * such objects that we don't have already and that hasn't been
311  * requested from another peer recently, up until we hit the
312  * MAX_PEER_OBJECT_IN_FLIGHT limit for the peer. Then we'll update
313  * g_already_asked_for for each requested inv, storing the time of the
314  * GETDATA request. We use g_already_asked_for to coordinate objects
315  * requests amongst our peers.
316  *
317  * For objects that we still need but we have already recently
318  * requested from some other peer, we'll reinsert (process_time, inv)
319  * back into the peer's m_object_process_time at the point in the future at
320  * which the most recent GETDATA request would time out (ie
321  * GetObjectInterval + the request time stored in g_already_asked_for).
322  * We add an additional delay for inbound peers, again to prefer
323  * attempting download from outbound peers first.
324  * We also add an extra small random delay up to 2 seconds
325  * to avoid biasing some peers over others. (e.g., due to fixed ordering
326  * of peer processing in ThreadMessageHandler).
327  *
328  * When we receive a objects from a peer, we remove the inv from the
329  * peer's m_object_in_flight set and from their recently announced set
330  * (m_object_announced). We also clear g_already_asked_for for that entry, so
331  * that if somehow the objects is not accepted but also not added to
332  * the reject filter, then we will eventually redownload from other
333  * peers.
334  */
335  struct ObjectDownloadState {
336  /* Track when to attempt download of announced objects (process
337  * time in micros -> inv)
338  */
339  std::multimap<std::chrono::microseconds, CInv> m_object_process_time;
340 
342  std::set<CInv> m_object_announced;
343 
345  std::map<CInv, std::chrono::microseconds> m_object_in_flight;
346 
348  std::chrono::microseconds m_check_expiry_timer{0};
349  };
350 
351  ObjectDownloadState m_object_download;
352 
353  CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) {
354  fCurrentlyConnected = false;
355  nMisbehavior = 0;
356  fShouldBan = false;
357  pindexBestKnownBlock = nullptr;
358  hashLastUnknownBlock.SetNull();
359  pindexLastCommonBlock = nullptr;
360  pindexBestHeaderSent = nullptr;
361  nUnconnectingHeaders = 0;
362  fSyncStarted = false;
363  nHeadersSyncTimeout = 0;
364  nStallingSince = 0;
365  nDownloadingSince = 0;
366  nBlocksInFlight = 0;
367  nBlocksInFlightValidHeaders = 0;
368  fPreferredDownload = false;
369  fPreferHeaders = false;
370  fPreferHeaderAndIDs = false;
371  fProvidesHeaderAndIDs = false;
372  fSupportsDesiredCmpctVersion = false;
373  m_chain_sync = { 0, nullptr, false, false };
374  m_last_block_announcement = 0;
375  }
376 };
377 
378 // Keeps track of the time (in microseconds) when transactions were requested last time
381 
383 std::map<NodeId, CNodeState> mapNodeState;
384 
385 // Requires cs_main.
386 CNodeState *State(NodeId pnode) {
387  std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
388  if (it == mapNodeState.end())
389  return nullptr;
390  return &it->second;
391 }
392 
393 void UpdatePreferredDownload(CNode* node, CNodeState* state)
394 {
395  nPreferredDownload -= state->fPreferredDownload;
396 
397  // Whether this node should be marked as a preferred download node.
398  state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient;
399 
400  nPreferredDownload += state->fPreferredDownload;
401 }
402 
403 void PushNodeVersion(CNode *pnode, CConnman* connman, int64_t nTime)
404 {
405  const auto& params = Params();
406 
407  ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
408  uint64_t nonce = pnode->GetLocalNonce();
409  int nNodeStartingHeight = pnode->GetMyStartingHeight();
410  NodeId nodeid = pnode->GetId();
411  CAddress addr = pnode->addr;
412 
413  CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
414  CAddress addrMe = CAddress(CService(), nLocalNodeServices);
415 
416  uint256 mnauthChallenge;
417  GetRandBytes(mnauthChallenge.begin(), mnauthChallenge.size());
418  {
419  LOCK(pnode->cs_mnauth);
420  pnode->sentMNAuthChallenge = mnauthChallenge;
421  }
422 
423  int nProtocolVersion = PROTOCOL_VERSION;
424  if (params.NetworkIDString() != CBaseChainParams::MAIN && gArgs.IsArgSet("-pushversion")) {
425  nProtocolVersion = gArgs.GetArg("-pushversion", PROTOCOL_VERSION);
426  }
427 
428  connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, nProtocolVersion, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
429  nonce, strSubVersion, nNodeStartingHeight, ::fRelayTxes, mnauthChallenge, pnode->fMasternode));
430 
431  if (fLogIPs) {
432  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", nProtocolVersion, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
433  } else {
434  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", nProtocolVersion, nNodeStartingHeight, addrMe.ToString(), nodeid);
435  }
436 }
437 
438 // Requires cs_main.
439 // Returns a bool indicating whether we requested this block.
440 // Also used if a block was /not/ received and timed out or started with another peer
441 bool MarkBlockAsReceived(const uint256& hash) {
442  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
443  if (itInFlight != mapBlocksInFlight.end()) {
444  CNodeState *state = State(itInFlight->second.first);
445  assert(state != nullptr);
446  state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
447  if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
448  // Last validated block on the queue was received.
449  nPeersWithValidatedDownloads--;
450  }
451  if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
452  // First block on the queue was received, update the start download time for the next one
453  state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros());
454  }
455  state->vBlocksInFlight.erase(itInFlight->second.second);
456  state->nBlocksInFlight--;
457  state->nStallingSince = 0;
458  mapBlocksInFlight.erase(itInFlight);
459  return true;
460  }
461  return false;
462 }
463 
464 // Requires cs_main.
465 // returns false, still setting pit, if the block was already in flight from the same peer
466 // pit will only be valid as long as the same cs_main lock is being held
467 bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex *pindex = nullptr, std::list<QueuedBlock>::iterator **pit = nullptr) {
468  CNodeState *state = State(nodeid);
469  assert(state != nullptr);
470 
471  // Short-circuit most stuff in case its from the same node
472  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
473  if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
474  if (pit) {
475  *pit = &itInFlight->second.second;
476  }
477  return false;
478  }
479 
480  // Make sure it's not listed somewhere already.
481  MarkBlockAsReceived(hash);
482 
483  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
484  {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)});
485  state->nBlocksInFlight++;
486  state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
487  if (state->nBlocksInFlight == 1) {
488  // We're starting a block download (batch) from this peer.
489  state->nDownloadingSince = GetTimeMicros();
490  }
491  if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
492  nPeersWithValidatedDownloads++;
493  }
494  itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
495  if (pit)
496  *pit = &itInFlight->second.second;
497  return true;
498 }
499 
501 void ProcessBlockAvailability(NodeId nodeid) {
502  CNodeState *state = State(nodeid);
503  assert(state != nullptr);
504 
505  if (!state->hashLastUnknownBlock.IsNull()) {
506  BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock);
507  if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) {
508  if (state->pindexBestKnownBlock == nullptr || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
509  state->pindexBestKnownBlock = itOld->second;
510  state->hashLastUnknownBlock.SetNull();
511  }
512  }
513 }
514 
516 void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
517  CNodeState *state = State(nodeid);
518  assert(state != nullptr);
519 
520  ProcessBlockAvailability(nodeid);
521 
522  BlockMap::iterator it = mapBlockIndex.find(hash);
523  if (it != mapBlockIndex.end() && it->second->nChainWork > 0) {
524  // An actually better block was announced.
525  if (state->pindexBestKnownBlock == nullptr || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
526  state->pindexBestKnownBlock = it->second;
527  } else {
528  // An unknown block was announced; just assume that the latest one is the best one.
529  state->hashLastUnknownBlock = hash;
530  }
531 }
532 
533 void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman* connman) {
535  CNodeState* nodestate = State(nodeid);
536  if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) {
537  // Never ask from peers who can't provide desired version.
538  return;
539  }
540  if (nodestate->fProvidesHeaderAndIDs) {
541  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
542  if (*it == nodeid) {
543  lNodesAnnouncingHeaderAndIDs.erase(it);
544  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
545  return;
546  }
547  }
548  connman->ForNode(nodeid, [connman](CNode* pfrom){
549  uint64_t nCMPCTBLOCKVersion = 1;
550  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
551  // As per BIP152, we only get 3 of our peers to announce
552  // blocks using compact encodings.
553  connman->ForNode(lNodesAnnouncingHeaderAndIDs.front(), [connman, nCMPCTBLOCKVersion](CNode* pnodeStop){
554  connman->PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion));
555  return true;
556  });
557  lNodesAnnouncingHeaderAndIDs.pop_front();
558  }
559  connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion));
560  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
561  return true;
562  });
563  }
564 }
565 
566 bool TipMayBeStale(const Consensus::Params &consensusParams)
567 {
569  if (g_last_tip_update == 0) {
570  g_last_tip_update = GetTime();
571  }
572  return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
573 }
574 
575 // Requires cs_main
576 bool CanDirectFetch(const Consensus::Params &consensusParams)
577 {
578  return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
579 }
580 
581 // Requires cs_main
582 bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
583 {
584  if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
585  return true;
586  if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
587  return true;
588  return false;
589 }
590 
593 void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
594  if (count == 0)
595  return;
596 
597  vBlocks.reserve(vBlocks.size() + count);
598  CNodeState *state = State(nodeid);
599  assert(state != nullptr);
600 
601  // Make sure pindexBestKnownBlock is up to date, we'll need it.
602  ProcessBlockAvailability(nodeid);
603 
604  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
605  // This peer has nothing interesting.
606  return;
607  }
608 
609  if (state->pindexLastCommonBlock == nullptr) {
610  // Bootstrap quickly by guessing a parent of our best tip is the forking point.
611  // Guessing wrong in either direction is not a problem.
612  state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
613  }
614 
615  // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
616  // of its current tip anymore. Go back enough to fix that.
617  state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
618  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
619  return;
620 
621  std::vector<const CBlockIndex*> vToFetch;
622  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
623  // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
624  // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
625  // download that next block if the window were 1 larger.
626  int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
627  int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
628  NodeId waitingfor = -1;
629  while (pindexWalk->nHeight < nMaxHeight) {
630  // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
631  // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
632  // as iterating over ~100 CBlockIndex* entries anyway.
633  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
634  vToFetch.resize(nToFetch);
635  pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
636  vToFetch[nToFetch - 1] = pindexWalk;
637  for (unsigned int i = nToFetch - 1; i > 0; i--) {
638  vToFetch[i - 1] = vToFetch[i]->pprev;
639  }
640 
641  // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
642  // are not yet downloaded and not in flight to vBlocks. In the mean time, update
643  // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
644  // already part of our chain (and therefore don't need it even if pruned).
645  for (const CBlockIndex* pindex : vToFetch) {
646  if (!pindex->IsValid(BLOCK_VALID_TREE)) {
647  // We consider the chain that this peer is on invalid.
648  return;
649  }
650  if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
651  if (pindex->nChainTx)
652  state->pindexLastCommonBlock = pindex;
653  } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
654  // The block is not already downloaded, and not yet in flight.
655  if (pindex->nHeight > nWindowEnd) {
656  // We reached the end of the window.
657  if (vBlocks.size() == 0 && waitingfor != nodeid) {
658  // We aren't able to fetch anything, but we would be if the download window was one larger.
659  nodeStaller = waitingfor;
660  }
661  return;
662  }
663  vBlocks.push_back(pindex);
664  if (vBlocks.size() == count) {
665  return;
666  }
667  } else if (waitingfor == -1) {
668  // This is the first already-in-flight block.
669  waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
670  }
671  }
672  }
673 }
674 } // namespace
675 
676 void EraseObjectRequest(CNodeState* nodestate, const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
677 {
679  LogPrint(BCLog::NET, "%s -- inv=(%s)\n", __func__, inv.ToString());
680  g_already_asked_for.erase(inv.hash);
681  g_erased_object_requests.insert(std::make_pair(inv.hash, GetTime<std::chrono::microseconds>()));
682 
683  if (nodestate) {
684  nodestate->m_object_download.m_object_announced.erase(inv);
685  nodestate->m_object_download.m_object_in_flight.erase(inv);
686  }
687 }
688 
690 {
692  auto* state = State(nodeId);
693  if (!state) {
694  return;
695  }
696  EraseObjectRequest(state, inv);
697 }
698 
699 std::chrono::microseconds GetObjectRequestTime(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
700 {
702  auto it = g_already_asked_for.find(hash);
703  if (it != g_already_asked_for.end()) {
704  return it->second;
705  }
706  return {};
707 }
708 
709 void UpdateObjectRequestTime(const uint256& hash, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
710 {
712  auto it = g_already_asked_for.find(hash);
713  if (it == g_already_asked_for.end()) {
714  g_already_asked_for.insert(std::make_pair(hash, request_time));
715  } else {
716  g_already_asked_for.update(it, request_time);
717  }
718 }
719 
720 std::chrono::microseconds GetObjectInterval(int invType)
721 {
722  // some messages need to be re-requested faster when the first announcing peer did not answer to GETDATA
723  switch(invType)
724  {
726  return std::chrono::seconds{15};
727  case MSG_CLSIG:
728  return std::chrono::seconds{5};
729  case MSG_ISLOCK:
730  return std::chrono::seconds{10};
731  default:
732  return GETDATA_TX_INTERVAL;
733  }
734 }
735 
736 std::chrono::microseconds GetObjectExpiryInterval(int invType)
737 {
739 }
740 
741 std::chrono::microseconds GetObjectRandomDelay(int invType)
742 {
743  if (invType == MSG_TX) {
745  }
746  return {};
747 }
748 
749 std::chrono::microseconds CalculateObjectGetDataTime(const CInv& inv, std::chrono::microseconds current_time, bool use_inbound_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
750 {
752  std::chrono::microseconds process_time;
753  const auto last_request_time = GetObjectRequestTime(inv.hash);
754  // First time requesting this tx
755  if (last_request_time.count() == 0) {
756  process_time = current_time;
757  } else {
758  // Randomize the delay to avoid biasing some peers over others (such as due to
759  // fixed ordering of peer processing in ThreadMessageHandler)
760  process_time = last_request_time + GetObjectInterval(inv.type) + GetObjectRandomDelay(inv.type);
761  }
762 
763  // We delay processing announcements from inbound peers
764  if (inv.type == MSG_TX && !fMasternodeMode && use_inbound_delay) process_time += INBOUND_PEER_TX_DELAY;
765 
766  return process_time;
767 }
768 
769 void RequestObject(CNodeState* state, const CInv& inv, std::chrono::microseconds current_time, bool fForce = false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
770 {
772  CNodeState::ObjectDownloadState& peer_download_state = state->m_object_download;
773  if (peer_download_state.m_object_announced.size() >= MAX_PEER_OBJECT_ANNOUNCEMENTS ||
774  peer_download_state.m_object_process_time.size() >= MAX_PEER_OBJECT_ANNOUNCEMENTS ||
775  peer_download_state.m_object_announced.count(inv)) {
776  // Too many queued announcements from this peer, or we already have
777  // this announcement
778  return;
779  }
780  peer_download_state.m_object_announced.insert(inv);
781 
782  // Calculate the time to try requesting this transaction. Use
783  // fPreferredDownload as a proxy for outbound peers.
784  std::chrono::microseconds process_time = CalculateObjectGetDataTime(inv, current_time, !state->fPreferredDownload);
785 
786  peer_download_state.m_object_process_time.emplace(process_time, inv);
787 
788  if (fForce) {
789  // make sure this object is actually requested ASAP
790  g_erased_object_requests.erase(inv.hash);
791  g_already_asked_for.erase(inv.hash);
792  }
793 
794  LogPrint(BCLog::NET, "%s -- inv=(%s), current_time=%d, process_time=%d, delta=%d\n", __func__, inv.ToString(), current_time.count(), process_time.count(), (process_time - current_time).count());
795 }
796 
797 void RequestObject(NodeId nodeId, const CInv& inv, std::chrono::microseconds current_time, bool fForce) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
798 {
800  auto* state = State(nodeId);
801  if (!state) {
802  return;
803  }
804  RequestObject(state, inv, current_time, fForce);
805 }
806 
808 {
810  auto* state = State(nodeId);
811  if (!state) {
812  return 0;
813  }
814  return state->m_object_download.m_object_process_time.size();
815 }
816 
817 // This function is used for testing the stale tip eviction logic, see
818 // DoS_tests.cpp
819 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
820 {
821  LOCK(cs_main);
822  CNodeState *state = State(node);
823  if (state) state->m_last_block_announcement = time_in_seconds;
824 }
825 
826 // Returns true for outbound peers, excluding manual connections, feelers, and
827 // one-shots
829 {
830  return !(node->fInbound || node->m_manual_connection || node->fFeeler || node->fOneShot);
831 }
832 
834  CAddress addr = pnode->addr;
835  std::string addrName = pnode->GetAddrName();
836  NodeId nodeid = pnode->GetId();
837  {
838  LOCK(cs_main);
839  mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName)));
840  }
841  if(!pnode->fInbound)
842  PushNodeVersion(pnode, connman, GetTime());
843 }
844 
845 void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
846  fUpdateConnectionTime = false;
847  LOCK(cs_main);
848  CNodeState *state = State(nodeid);
849  assert(state != nullptr);
850 
851  if (state->fSyncStarted)
852  nSyncStarted--;
853 
854  if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
855  fUpdateConnectionTime = true;
856  }
857 
858  for (const QueuedBlock& entry : state->vBlocksInFlight) {
859  mapBlocksInFlight.erase(entry.hash);
860  }
861  EraseOrphansFor(nodeid);
862  nPreferredDownload -= state->fPreferredDownload;
863  nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
864  assert(nPeersWithValidatedDownloads >= 0);
865  g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
866  assert(g_outbound_peers_with_protect_from_disconnect >= 0);
867 
868  mapNodeState.erase(nodeid);
869 
870  if (mapNodeState.empty()) {
871  // Do a consistency check after the last peer is removed.
872  assert(mapBlocksInFlight.empty());
873  assert(nPreferredDownload == 0);
874  assert(nPeersWithValidatedDownloads == 0);
875  assert(g_outbound_peers_with_protect_from_disconnect == 0);
876  }
877  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
878 }
879 
881  LOCK(cs_main);
882  CNodeState *state = State(nodeid);
883  if (state == nullptr)
884  return false;
885  stats.nMisbehavior = state->nMisbehavior;
886  stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
887  stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
888  for (const QueuedBlock& queue : state->vBlocksInFlight) {
889  if (queue.pindex)
890  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
891  }
892  return true;
893 }
894 
896 //
897 // mapOrphanTransactions
898 //
899 
901 {
902  size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
903  if (max_extra_txn <= 0)
904  return;
905  if (!vExtraTxnForCompact.size())
906  vExtraTxnForCompact.resize(max_extra_txn);
907  vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetHash(), tx);
908  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
909 }
910 
912 {
913  const uint256& hash = tx->GetHash();
914  if (mapOrphanTransactions.count(hash))
915  return false;
916 
917  // Ignore big transactions, to avoid a
918  // send-big-orphans memory exhaustion attack. If a peer has a legitimate
919  // large transaction with a missing parent then we assume
920  // it will rebroadcast it later, after the parent transaction(s)
921  // have been mined or received.
922  // 100 orphans, each of which is at most 99,999 bytes big is
923  // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
925  if (sz > MAX_STANDARD_TX_SIZE)
926  {
927  LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
928  return false;
929  }
930 
931  auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, sz});
932  assert(ret.second);
933  for (const CTxIn& txin : tx->vin) {
934  mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
935  }
936 
938 
940 
941  LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
942  mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
943  return true;
944 }
945 
947 {
948  std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
949  if (it == mapOrphanTransactions.end())
950  return 0;
951  for (const CTxIn& txin : it->second.tx->vin)
952  {
953  auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
954  if (itPrev == mapOrphanTransactionsByPrev.end())
955  continue;
956  itPrev->second.erase(it);
957  if (itPrev->second.empty())
958  mapOrphanTransactionsByPrev.erase(itPrev);
959  }
960  assert(nMapOrphanTransactionsSize >= it->second.nTxSize);
961  nMapOrphanTransactionsSize -= it->second.nTxSize;
962  mapOrphanTransactions.erase(it);
963  return 1;
964 }
965 
967 {
969  int nErased = 0;
970  std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
971  while (iter != mapOrphanTransactions.end())
972  {
973  std::map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
974  if (maybeErase->second.fromPeer == peer)
975  {
976  nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
977  }
978  }
979  if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
980 }
981 
982 
983 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphansSize)
984 {
986 
987  unsigned int nEvicted = 0;
988  static int64_t nNextSweep;
989  int64_t nNow = GetTime();
990  if (nNextSweep <= nNow) {
991  // Sweep out expired orphan pool entries:
992  int nErased = 0;
993  int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
994  std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
995  while (iter != mapOrphanTransactions.end())
996  {
997  std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
998  if (maybeErase->second.nTimeExpire <= nNow) {
999  nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
1000  } else {
1001  nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
1002  }
1003  }
1004  // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
1005  nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
1006  if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
1007  }
1008  while (!mapOrphanTransactions.empty() && nMapOrphanTransactionsSize > nMaxOrphansSize)
1009  {
1010  // Evict a random orphan:
1011  uint256 randomhash = GetRandHash();
1012  std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
1013  if (it == mapOrphanTransactions.end())
1014  it = mapOrphanTransactions.begin();
1015  EraseOrphanTx(it->first);
1016  ++nEvicted;
1017  }
1018  return nEvicted;
1019 }
1020 
1021 void static ProcessOrphanTx(CConnman* connman, std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans);
1022 
1023 // Requires cs_main.
1024 void Misbehaving(NodeId pnode, int howmuch, const std::string& message)
1025 {
1026  if (howmuch == 0)
1027  return;
1028 
1029  CNodeState *state = State(pnode);
1030  if (state == nullptr)
1031  return;
1032 
1033  state->nMisbehavior += howmuch;
1034  int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
1035  std::string message_prefixed = message.empty() ? "" : (": " + message);
1036  if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
1037  {
1038  LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed);
1039  state->fShouldBan = true;
1040  } else
1041  LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d)%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed);
1042 }
1043 
1044 // Requires cs_main.
1045 bool IsBanned(NodeId pnode)
1046 {
1047  CNodeState *state = State(pnode);
1048  if (state == nullptr)
1049  return false;
1050  if (state->fShouldBan) {
1051  return true;
1052  }
1053  return false;
1054 }
1055 
1056 
1057 
1058 
1059 
1060 
1062 //
1063 // blockchain -> download logic notification
1064 //
1065 
1066 // To prevent fingerprinting attacks, only send blocks/headers outside of the
1067 // active chain if they are no more than a month older (both in time, and in
1068 // best equivalent proof of work) than the best header chain we know about and
1069 // we fully-validated them at some point.
1070 static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Params& consensusParams)
1071 {
1073  if (chainActive.Contains(pindex)) return true;
1074  return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
1077 }
1078 
1079 PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, CScheduler &scheduler) : connman(connmanIn), m_stale_tip_check_time(0) {
1080  // Initialize global variables that cannot be constructed at startup.
1081  recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
1082 
1083  const Consensus::Params& consensusParams = Params().GetConsensus();
1084  // Stale tip checking and peer eviction are on two different timers, but we
1085  // don't want them to get out of sync due to drift in the scheduler, so we
1086  // combine them in one function and schedule at the quicker (peer-eviction)
1087  // timer.
1088  static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1090 }
1091 
1092 void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) {
1094 
1095  std::vector<uint256> vOrphanErase;
1096  std::set<uint256> orphanWorkSet;
1097 
1098  for (const CTransactionRef& ptx : pblock->vtx) {
1099  const CTransaction& tx = *ptx;
1100 
1101  // Which orphan pool entries we should reprocess and potentially try to accept into mempool again?
1102  for (size_t i = 0; i < tx.vin.size(); i++) {
1103  auto itByPrev = mapOrphanTransactionsByPrev.find(COutPoint(tx.GetHash(), (uint32_t)i));
1104  if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
1105  for (const auto& elem : itByPrev->second) {
1106  orphanWorkSet.insert(elem->first);
1107  }
1108  }
1109 
1110  // Which orphan pool entries must we evict?
1111  for (const auto& txin : tx.vin) {
1112  auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
1113  if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
1114  for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
1115  const CTransaction& orphanTx = *(*mi)->second.tx;
1116  const uint256& orphanHash = orphanTx.GetHash();
1117  vOrphanErase.push_back(orphanHash);
1118  }
1119  }
1120  }
1121 
1122  // Erase orphan transactions include or precluded by this block
1123  if (vOrphanErase.size()) {
1124  int nErased = 0;
1125  for (uint256 &orphanHash : vOrphanErase) {
1126  nErased += EraseOrphanTx(orphanHash);
1127  }
1128  LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
1129  }
1130 
1131  while (!orphanWorkSet.empty()) {
1132  LogPrint(BCLog::MEMPOOL, "Trying to process %d orphans\n", orphanWorkSet.size());
1133  ProcessOrphanTx(g_connman.get(), orphanWorkSet);
1134  }
1135 
1136  g_last_tip_update = GetTime();
1137 }
1138 
1139 // All of the following cache a recent block, and are protected by cs_most_recent_block
1141 static std::shared_ptr<const CBlock> most_recent_block;
1142 static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block;
1144 
1145 void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) {
1146  std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock);
1147  const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
1148 
1149  LOCK(cs_main);
1150 
1151  static int nHighestFastAnnounce = 0;
1152  if (pindex->nHeight <= nHighestFastAnnounce)
1153  return;
1154  nHighestFastAnnounce = pindex->nHeight;
1155 
1156  uint256 hashBlock(pblock->GetHash());
1157 
1158  {
1160  most_recent_block_hash = hashBlock;
1161  most_recent_block = pblock;
1162  most_recent_compact_block = pcmpctblock;
1163  }
1164 
1165  connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker, &hashBlock](CNode* pnode) {
1166  // TODO: Avoid the repeated-serialization here
1167  if (pnode->fDisconnect)
1168  return;
1169  ProcessBlockAvailability(pnode->GetId());
1170  CNodeState &state = *State(pnode->GetId());
1171  // If the peer has, or we announced to them the previous block already,
1172  // but we don't think they have this one, go ahead and announce it
1173  if (state.fPreferHeaderAndIDs &&
1174  !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
1175 
1176  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
1177  hashBlock.ToString(), pnode->GetId());
1178  connman->PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
1179  state.pindexBestHeaderSent = pindex;
1180  }
1181  });
1182 }
1183 
1184 void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
1185  const int nNewHeight = pindexNew->nHeight;
1186  connman->SetBestHeight(nNewHeight);
1187 
1188  SetServiceFlagsIBDCache(!fInitialDownload);
1189  if (!fInitialDownload) {
1190  // Find the hashes of all blocks that weren't previously in the best chain.
1191  std::vector<uint256> vHashes;
1192  const CBlockIndex *pindexToAnnounce = pindexNew;
1193  while (pindexToAnnounce != pindexFork) {
1194  vHashes.push_back(pindexToAnnounce->GetBlockHash());
1195  pindexToAnnounce = pindexToAnnounce->pprev;
1196  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
1197  // Limit announcements in case of a huge reorganization.
1198  // Rely on the peer's synchronization mechanism in that case.
1199  break;
1200  }
1201  }
1202  // Relay inventory, but don't relay old inventory during initial block download.
1203  connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
1204  if (pnode->fMasternode) return;
1205  if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
1206  for (const uint256& hash : reverse_iterate(vHashes)) {
1207  pnode->PushBlockHash(hash);
1208  }
1209  }
1210  });
1212  }
1213 
1215 }
1216 
1218  LOCK(cs_main);
1219 
1220  const uint256 hash(block.GetHash());
1221  std::map<uint256, std::pair<NodeId, bool> >::iterator it = mapBlockSource.find(hash);
1222 
1223  int nDoS = 0;
1224  if (state.IsInvalid(nDoS)) {
1225  // Don't send reject message with code 0 or an internal reject code.
1226  if (it != mapBlockSource.end() && State(it->second.first) && state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) {
1227  CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash};
1228  State(it->second.first)->rejects.push_back(reject);
1229  if (nDoS > 0 && it->second.second)
1230  Misbehaving(it->second.first, nDoS);
1231  }
1232  }
1233  // Check that:
1234  // 1. The block is valid
1235  // 2. We're not in initial block download
1236  // 3. This is currently the best block we're aware of. We haven't updated
1237  // the tip yet so we have no way to check this directly here. Instead we
1238  // just check that there are currently no other blocks in flight.
1239  else if (state.IsValid() &&
1241  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
1242  if (it != mapBlockSource.end()) {
1243  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, connman);
1244  }
1245  }
1246  if (it != mapBlockSource.end())
1247  mapBlockSource.erase(it);
1248 }
1249 
1251 //
1252 // Messages
1253 //
1254 
1255 
1257 {
1258  switch (inv.type)
1259  {
1260  case MSG_TX:
1261  case MSG_DSTX:
1262  case MSG_LEGACY_TXLOCK_REQUEST: // we treat legacy IX messages as TX messages
1263  {
1264  assert(recentRejects);
1265  if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
1266  {
1267  // If the chain tip has changed previously rejected transactions
1268  // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
1269  // or a double-spend. Reset the rejects filter and give those
1270  // txs a second chance.
1271  hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
1272  recentRejects->reset();
1273  }
1274 
1275  {
1276  LOCK(g_cs_orphans);
1277  if (mapOrphanTransactions.count(inv.hash)) return true;
1278  }
1279 
1280  // When we receive an islock for a previously rejected transaction, we have to
1281  // drop the first-seen tx (which such a locked transaction was conflicting with)
1282  // and re-request the locked transaction (which did not make it into the mempool
1283  // previously due to txn-mempool-conflict rule). This means that we must ignore
1284  // recentRejects filter for such locked txes here.
1285  // We also ignore recentRejects filter for DSTX-es because a malicious peer might
1286  // relay a valid DSTX as a regular TX first which would skip all the specific checks
1287  // but would cause such tx to be rejected by ATMP due to 0 fee. Ignoring it here
1288  // should let DSTX to be propagated by honest peer later. Note, that a malicious
1289  // masternode would not be able to exploit this to spam the network with specially
1290  // crafted invalid DSTX-es and potentially cause high load cheaply, because
1291  // corresponding checks in ProcessMessage won't let it to send DSTX-es too often.
1292  bool fIgnoreRecentRejects = llmq::quorumInstantSendManager->IsLocked(inv.hash) || inv.type == MSG_DSTX;
1293 
1294  return (!fIgnoreRecentRejects && recentRejects->contains(inv.hash)) ||
1295  (inv.type == MSG_DSTX && static_cast<bool>(CPrivateSend::GetDSTX(inv.hash))) ||
1296  mempool.exists(inv.hash) ||
1297  pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1
1298  pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1)) ||
1299  (fTxIndex && pblocktree->HasTxIndex(inv.hash));
1300  }
1301 
1302  case MSG_BLOCK:
1303  return mapBlockIndex.count(inv.hash);
1304 
1305  /*
1306  Dash Related Inventory Messages
1307 
1308  --
1309 
1310  We shouldn't update the sync times for each of the messages when we already have it.
1311  We're going to be asking many nodes upfront for the full inventory list, so we'll get duplicates of these.
1312  We want to only update the time on new hits, so that we can time out appropriately if needed.
1313  */
1314 
1315  case MSG_SPORK:
1316  {
1318  return sporkManager.GetSporkByHash(inv.hash, spork);
1319  }
1320 
1321  case MSG_GOVERNANCE_OBJECT:
1323  return ! governance.ConfirmInventoryRequest(inv);
1324 
1327  case MSG_QUORUM_CONTRIB:
1328  case MSG_QUORUM_COMPLAINT:
1334  case MSG_CLSIG:
1335  return llmq::chainLocksHandler->AlreadyHave(inv);
1336  case MSG_ISLOCK:
1338  }
1339 
1340  // Don't know what it is, just say we already got one
1341  return true;
1342 }
1343 
1344 static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connman)
1345 {
1346  unsigned int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
1347 
1348  // Relay to a limited number of other nodes
1349  // Use deterministic randomness to send to the same nodes for 24 hours
1350  // at a time so the addrKnowns of the chosen nodes prevent repeats
1351  uint64_t hashAddr = addr.GetHash();
1352  const CSipHasher hasher = connman->GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60));
1353  FastRandomContext insecure_rand;
1354 
1355  std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
1356  assert(nRelayNodes <= best.size());
1357 
1358  auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) {
1359  if (pnode->nVersion >= CADDR_TIME_VERSION) {
1360  uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize();
1361  for (unsigned int i = 0; i < nRelayNodes; i++) {
1362  if (hashKey > best[i].first) {
1363  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
1364  best[i] = std::make_pair(hashKey, pnode);
1365  break;
1366  }
1367  }
1368  }
1369  };
1370 
1371  auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
1372  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
1373  best[i].second->PushAddress(addr, insecure_rand);
1374  }
1375  };
1376 
1377  connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
1378 }
1379 
1380 void static ProcessGetBlockData(CNode* pfrom, const CChainParams& chainparams, const CInv& inv, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
1381 {
1382  bool send = false;
1383  std::shared_ptr<const CBlock> a_recent_block;
1384  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
1385  const Consensus::Params& consensusParams = chainparams.GetConsensus();
1386  {
1388  a_recent_block = most_recent_block;
1389  a_recent_compact_block = most_recent_compact_block;
1390  }
1391 
1392  bool need_activate_chain = false;
1393  {
1394  LOCK(cs_main);
1395  BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
1396  if (mi != mapBlockIndex.end())
1397  {
1398  if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
1399  mi->second->IsValid(BLOCK_VALID_TREE)) {
1400  // If we have the block and all of its parents, but have not yet validated it,
1401  // we might be in the middle of connecting it (ie in the unlock of cs_main
1402  // before ActivateBestChain but after AcceptBlock).
1403  // In this case, we need to run ActivateBestChain prior to checking the relay
1404  // conditions below.
1405  need_activate_chain = true;
1406  }
1407  }
1408  } // release cs_main before calling ActivateBestChain
1409  if (need_activate_chain) {
1410  CValidationState dummy;
1411  ActivateBestChain(dummy, Params(), a_recent_block);
1412  }
1413 
1414  LOCK(cs_main);
1415  BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
1416  if (mi != mapBlockIndex.end()) {
1417  send = BlockRequestAllowed(mi->second, consensusParams);
1418  if (!send) {
1419  LogPrint(BCLog::NET,"%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
1420  }
1421  }
1422  const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1423  // disconnect node in case we have reached the outbound limit for serving historical blocks
1424  // never disconnect whitelisted nodes
1425  if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
1426  {
1427  LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
1428 
1429  //disconnect node
1430  pfrom->fDisconnect = true;
1431  send = false;
1432  }
1433  // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
1434  if (send && !pfrom->fWhitelisted && (
1435  (((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (chainActive.Tip()->nHeight - mi->second->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
1436  )) {
1437  LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId());
1438 
1439  //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
1440  pfrom->fDisconnect = true;
1441  send = false;
1442  }
1443  // Pruned nodes may have deleted the block, so check whether
1444  // it's available before trying to send.
1445  if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
1446  {
1447  std::shared_ptr<const CBlock> pblock;
1448  if (a_recent_block && a_recent_block->GetHash() == (*mi).second->GetBlockHash()) {
1449  pblock = a_recent_block;
1450  } else {
1451  // Send block from disk
1452  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
1453  if (!ReadBlockFromDisk(*pblockRead, (*mi).second, consensusParams))
1454  assert(!"cannot load block from disk");
1455  pblock = pblockRead;
1456  }
1457  if (pblock) {
1458  if (inv.type == MSG_BLOCK)
1459  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
1460  else if (inv.type == MSG_FILTERED_BLOCK) {
1461  bool sendMerkleBlock = false;
1462  CMerkleBlock merkleBlock;
1463  {
1464  LOCK(pfrom->cs_filter);
1465  if (pfrom->pfilter) {
1466  sendMerkleBlock = true;
1467  merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter);
1468  }
1469  }
1470  if (sendMerkleBlock) {
1471  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
1472  // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
1473  // This avoids hurting performance by pointlessly requiring a round-trip
1474  // Note that there is currently no way for a node to request any single transactions we didn't send here -
1475  // they must either disconnect and retry or request the full block.
1476  // Thus, the protocol spec specified allows for us to provide duplicate txn here,
1477  // however we MUST always provide at least what the remote peer needs
1478  typedef std::pair<unsigned int, uint256> PairType;
1479  for (PairType &pair : merkleBlock.vMatchedTxn)
1480  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::TX, *pblock->vtx[pair.first]));
1481  }
1482  // else
1483  // no response
1484  } else if (inv.type == MSG_CMPCT_BLOCK) {
1485  // If a peer is asking for old blocks, we're almost guaranteed
1486  // they won't have a useful mempool to match against a compact block,
1487  // and we don't feel like constructing the object for them, so
1488  // instead we respond with the full, non-compact block.
1489  if (CanDirectFetch(consensusParams) &&
1490  mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
1491  if (a_recent_compact_block &&
1492  a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) {
1493  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
1494  } else {
1495  CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
1496  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
1497  }
1498  } else {
1499  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
1500  }
1501  }
1502  }
1503  // Trigger the peer node to send a getblocks request for the next batch of inventory
1504  if (inv.hash == pfrom->hashContinue)
1505  {
1506  // Bypass PushInventory, this must send even if redundant,
1507  // and we want it right after the last block so they don't
1508  // wait for other stuff first.
1509  std::vector<CInv> vInv;
1510  vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
1511  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
1512  pfrom->hashContinue.SetNull();
1513  }
1514  }
1515 }
1516 
1517 void static ProcessGetData(CNode* pfrom, const CChainParams& chainparams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
1518 {
1520 
1521  std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
1522  std::vector<CInv> vNotFound;
1523  const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1524  {
1525  LOCK(cs_main);
1526 
1527  while (it != pfrom->vRecvGetData.end() && it->IsKnownType()) {
1528  if (interruptMsgProc)
1529  return;
1530  // Don't bother if send buffer is too full to respond anyway
1531  if (pfrom->fPauseSend)
1532  break;
1533 
1534  const CInv &inv = *it;
1535  if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) {
1536  break;
1537  }
1538  it++;
1539 
1540  // Send stream from relay memory
1541  bool push = false;
1542  if (inv.type == MSG_TX || inv.type == MSG_DSTX) {
1544  if (inv.type == MSG_DSTX) {
1545  dstx = CPrivateSend::GetDSTX(inv.hash);
1546  }
1547  auto mi = mapRelay.find(inv.hash);
1548  if (mi != mapRelay.end()) {
1549  if (dstx) {
1550  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::DSTX, dstx));
1551  } else {
1552  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::TX, *mi->second));
1553  }
1554  push = true;
1555  } else if (pfrom->timeLastMempoolReq) {
1556  auto txinfo = mempool.info(inv.hash);
1557  // To protect privacy, do not answer getdata using the mempool when
1558  // that TX couldn't have been INVed in reply to a MEMPOOL request.
1559  if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
1560  if (dstx) {
1561  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::DSTX, dstx));
1562  } else {
1563  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::TX, *txinfo.tx));
1564  }
1565  push = true;
1566  }
1567  }
1568  }
1569 
1570  if (!push && inv.type == MSG_SPORK) {
1573  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SPORK, spork));
1574  push = true;
1575  }
1576  }
1577 
1578  if (!push && inv.type == MSG_GOVERNANCE_OBJECT) {
1579  LogPrint(BCLog::NET, "ProcessGetData -- MSG_GOVERNANCE_OBJECT: inv = %s\n", inv.ToString());
1580  CDataStream ss(SER_NETWORK, pfrom->GetSendVersion());
1581  bool topush = false;
1582  {
1583  if(governance.HaveObjectForHash(inv.hash)) {
1584  ss.reserve(1000);
1585  if(governance.SerializeObjectForHash(inv.hash, ss)) {
1586  topush = true;
1587  }
1588  }
1589  }
1590  LogPrint(BCLog::NET, "ProcessGetData -- MSG_GOVERNANCE_OBJECT: topush = %d, inv = %s\n", topush, inv.ToString());
1591  if(topush) {
1592  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MNGOVERNANCEOBJECT, ss));
1593  push = true;
1594  }
1595  }
1596 
1597  if (!push && inv.type == MSG_GOVERNANCE_OBJECT_VOTE) {
1598  CDataStream ss(SER_NETWORK, pfrom->GetSendVersion());
1599  bool topush = false;
1600  {
1601  if(governance.HaveVoteForHash(inv.hash)) {
1602  ss.reserve(1000);
1603  if(governance.SerializeVoteForHash(inv.hash, ss)) {
1604  topush = true;
1605  }
1606  }
1607  }
1608  if(topush) {
1609  LogPrint(BCLog::NET, "ProcessGetData -- pushing: inv = %s\n", inv.ToString());
1610  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MNGOVERNANCEOBJECTVOTE, ss));
1611  push = true;
1612  }
1613  }
1614 
1615  if (!push && (inv.type == MSG_QUORUM_FINAL_COMMITMENT)) {
1617  if (llmq::quorumBlockProcessor->GetMinableCommitmentByHash(inv.hash, o)) {
1618  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::QFCOMMITMENT, o));
1619  push = true;
1620  }
1621  }
1622 
1623  if (!push && (inv.type == MSG_QUORUM_CONTRIB)) {
1625  if (llmq::quorumDKGSessionManager->GetContribution(inv.hash, o)) {
1626  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::QCONTRIB, o));
1627  push = true;
1628  }
1629  }
1630  if (!push && (inv.type == MSG_QUORUM_COMPLAINT)) {
1632  if (llmq::quorumDKGSessionManager->GetComplaint(inv.hash, o)) {
1633  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::QCOMPLAINT, o));
1634  push = true;
1635  }
1636  }
1637  if (!push && (inv.type == MSG_QUORUM_JUSTIFICATION)) {
1639  if (llmq::quorumDKGSessionManager->GetJustification(inv.hash, o)) {
1640  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::QJUSTIFICATION, o));
1641  push = true;
1642  }
1643  }
1644  if (!push && (inv.type == MSG_QUORUM_PREMATURE_COMMITMENT)) {
1646  if (llmq::quorumDKGSessionManager->GetPrematureCommitment(inv.hash, o)) {
1647  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::QPCOMMITMENT, o));
1648  push = true;
1649  }
1650  }
1651  if (!push && (inv.type == MSG_QUORUM_RECOVERED_SIG)) {
1653  if (llmq::quorumSigningManager->GetRecoveredSigForGetData(inv.hash, o)) {
1654  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::QSIGREC, o));
1655  push = true;
1656  }
1657  }
1658 
1659  if (!push && (inv.type == MSG_CLSIG)) {
1661  if (llmq::chainLocksHandler->GetChainLockByHash(inv.hash, o)) {
1662  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::CLSIG, o));
1663  push = true;
1664  }
1665  }
1666 
1667  if (!push && (inv.type == MSG_ISLOCK)) {
1669  if (llmq::quorumInstantSendManager->GetInstantSendLockByHash(inv.hash, o)) {
1670  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::ISLOCK, o));
1671  push = true;
1672  }
1673  }
1674 
1675  if (!push)
1676  vNotFound.push_back(inv);
1677  }
1678  } // release cs_main
1679 
1680  if (it != pfrom->vRecvGetData.end() && !pfrom->fPauseSend) {
1681  const CInv &inv = *it;
1682  if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) {
1683  it++;
1684  ProcessGetBlockData(pfrom, chainparams, inv, connman, interruptMsgProc);
1685  }
1686  }
1687 
1688  pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
1689 
1690  if (!vNotFound.empty()) {
1691  // Let the peer know that we didn't find what it asked for, so it doesn't
1692  // have to wait around forever.
1693  // SPV clients care about this message: it's needed when they are
1694  // recursively walking the dependencies of relevant unconfirmed
1695  // transactions. SPV clients want to do that because they want to know
1696  // about (and store and rebroadcast and risk analyze) the dependencies
1697  // of transactions relevant to them, without having to download the
1698  // entire memory pool.
1699  // Also, other nodes can use these messages to automatically request a
1700  // transaction from some other peer that annnounced it, and stop
1701  // waiting for us to respond.
1702  // In normal operation, we often send NOTFOUND messages for parents of
1703  // transactions that we relay; if a peer is missing a parent, they may
1704  // assume we have them and request the parents from us.
1705  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
1706  }
1707 }
1708 
1709 inline void static SendBlockTransactions(const CBlock& block, const BlockTransactionsRequest& req, CNode* pfrom, CConnman* connman) {
1710  BlockTransactions resp(req);
1711  for (size_t i = 0; i < req.indexes.size(); i++) {
1712  if (req.indexes[i] >= block.vtx.size()) {
1713  LOCK(cs_main);
1714  Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->GetId()));
1715  return;
1716  }
1717  resp.txn[i] = block.vtx[req.indexes[i]];
1718  }
1719  LOCK(cs_main);
1720  CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1721  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCKTXN, resp));
1722 }
1723 
1724 bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool punish_duplicate_invalid)
1725 {
1726  const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1727  size_t nCount = headers.size();
1728 
1729  if (nCount == 0) {
1730  // Nothing interesting. Stop asking this peers for more headers.
1731  return true;
1732  }
1733 
1734  bool received_new_header = false;
1735  const CBlockIndex *pindexLast = nullptr;
1736  {
1737  LOCK(cs_main);
1738  CNodeState *nodestate = State(pfrom->GetId());
1739 
1740  // If this looks like it could be a block announcement (nCount <
1741  // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
1742  // don't connect:
1743  // - Send a getheaders message in response to try to connect the chain.
1744  // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
1745  // don't connect before giving DoS points
1746  // - Once a headers message is received that is valid and does connect,
1747  // nUnconnectingHeaders gets reset back to 0.
1748  if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
1749  nodestate->nUnconnectingHeaders++;
1751  LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
1752  headers[0].GetHash().ToString(),
1753  headers[0].hashPrevBlock.ToString(),
1755  pfrom->GetId(), nodestate->nUnconnectingHeaders);
1756  // Set hashLastUnknownBlock for this peer, so that if we
1757  // eventually get the headers - even from a different peer -
1758  // we can use this peer to download.
1759  UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash());
1760 
1761  if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
1762  Misbehaving(pfrom->GetId(), 20);
1763  }
1764  return true;
1765  }
1766 
1767  uint256 hashLastBlock;
1768  for (const CBlockHeader& header : headers) {
1769  if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
1770  Misbehaving(pfrom->GetId(), 20, "non-continuous headers sequence");
1771  return false;
1772  }
1773  hashLastBlock = header.GetHash();
1774  }
1775 
1776  // If we don't have the last header, then they'll have given us
1777  // something new (if these headers are valid).
1778  if (mapBlockIndex.find(hashLastBlock) == mapBlockIndex.end()) {
1779  received_new_header = true;
1780  }
1781  }
1782 
1783  CValidationState state;
1784  CBlockHeader first_invalid_header;
1785  if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast, &first_invalid_header)) {
1786  int nDoS;
1787  if (state.IsInvalid(nDoS)) {
1788  LOCK(cs_main);
1789  if (nDoS > 0) {
1790  Misbehaving(pfrom->GetId(), nDoS, "invalid header received");
1791  } else {
1792  LogPrint(BCLog::NET, "peer=%d: invalid header received\n", pfrom->GetId());
1793  }
1794  if (punish_duplicate_invalid && mapBlockIndex.find(first_invalid_header.GetHash()) != mapBlockIndex.end()) {
1795  // Goal: don't allow outbound peers to use up our outbound
1796  // connection slots if they are on incompatible chains.
1797  //
1798  // We ask the caller to set punish_invalid appropriately based
1799  // on the peer and the method of header delivery (compact
1800  // blocks are allowed to be invalid in some circumstances,
1801  // under BIP 152).
1802  // Here, we try to detect the narrow situation that we have a
1803  // valid block header (ie it was valid at the time the header
1804  // was received, and hence stored in mapBlockIndex) but know the
1805  // block is invalid, and that a peer has announced that same
1806  // block as being on its active chain.
1807  // Disconnect the peer in such a situation.
1808  //
1809  // Note: if the header that is invalid was not accepted to our
1810  // mapBlockIndex at all, that may also be grounds for
1811  // disconnecting the peer, as the chain they are on is likely
1812  // to be incompatible. However, there is a circumstance where
1813  // that does not hold: if the header's timestamp is more than
1814  // 2 hours ahead of our current time. In that case, the header
1815  // may become valid in the future, and we don't want to
1816  // disconnect a peer merely for serving us one too-far-ahead
1817  // block header, to prevent an attacker from splitting the
1818  // network by mining a block right at the 2 hour boundary.
1819  //
1820  // TODO: update the DoS logic (or, rather, rewrite the
1821  // DoS-interface between validation and net_processing) so that
1822  // the interface is cleaner, and so that we disconnect on all the
1823  // reasons that a peer's headers chain is incompatible
1824  // with ours (eg block->nVersion softforks, MTP violations,
1825  // etc), and not just the duplicate-invalid case.
1826  pfrom->fDisconnect = true;
1827  }
1828  return false;
1829  }
1830  }
1831 
1832  {
1833  LOCK(cs_main);
1834  CNodeState *nodestate = State(pfrom->GetId());
1835  if (nodestate->nUnconnectingHeaders > 0) {
1836  LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders);
1837  }
1838  nodestate->nUnconnectingHeaders = 0;
1839 
1840  assert(pindexLast);
1841  UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
1842 
1843  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
1844  // because it is set in UpdateBlockAvailability. Some nullptr checks
1845  // are still present, however, as belt-and-suspenders.
1846 
1847  if (received_new_header && pindexLast->nChainWork > chainActive.Tip()->nChainWork) {
1848  nodestate->m_last_block_announcement = GetTime();
1849  }
1850 
1851  if (nCount == MAX_HEADERS_RESULTS) {
1852  // Headers message had its maximum size; the peer may have more headers.
1853  // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
1854  // from there instead.
1855  LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight);
1856  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()));
1857  }
1858 
1859  bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
1860  // If this set of headers is valid and ends in a block with at least as
1861  // much work as our tip, download as much as possible.
1862  if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
1863  std::vector<const CBlockIndex*> vToFetch;
1864  const CBlockIndex *pindexWalk = pindexLast;
1865  // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
1866  while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1867  if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
1868  !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) {
1869  // We don't have this block, and it's not yet in flight.
1870  vToFetch.push_back(pindexWalk);
1871  }
1872  pindexWalk = pindexWalk->pprev;
1873  }
1874  // If pindexWalk still isn't on our main chain, we're looking at a
1875  // very large reorg at a time we think we're close to caught up to
1876  // the main chain -- this shouldn't really happen. Bail out on the
1877  // direct fetch and rely on parallel download instead.
1878  if (!chainActive.Contains(pindexWalk)) {
1879  LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
1880  pindexLast->GetBlockHash().ToString(),
1881  pindexLast->nHeight);
1882  } else {
1883  std::vector<CInv> vGetData;
1884  // Download as much as possible, from earliest to latest.
1885  for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
1886  if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1887  // Can't download any more from this peer
1888  break;
1889  }
1890  vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
1891  MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex);
1892  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
1893  pindex->GetBlockHash().ToString(), pfrom->GetId());
1894  }
1895  if (vGetData.size() > 1) {
1896  LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
1897  pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
1898  }
1899  if (vGetData.size() > 0) {
1900  if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
1901  // In any case, we want to download using a compact block, not a regular one
1902  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
1903  }
1904  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
1905  }
1906  }
1907  }
1908  // If we're in IBD, we want outbound peers that will serve us a useful
1909  // chain. Disconnect peers that are on chains with insufficient work.
1910  if (IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
1911  // When nCount < MAX_HEADERS_RESULTS, we know we have no more
1912  // headers to fetch from this peer.
1913  if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
1914  // This peer has too little work on their headers chain to help
1915  // us sync -- disconnect if using an outbound slot (unless
1916  // whitelisted or addnode).
1917  // Note: We compare their tip to nMinimumChainWork (rather than
1918  // chainActive.Tip()) because we won't start block download
1919  // until we have a headers chain that has at least
1920  // nMinimumChainWork, even if a peer has a chain past our tip,
1921  // as an anti-DoS measure.
1922  if (IsOutboundDisconnectionCandidate(pfrom)) {
1923  LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom->GetId());
1924  pfrom->fDisconnect = true;
1925  }
1926  }
1927  }
1928 
1929  if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr) {
1930  // If this is an outbound peer, check to see if we should protect
1931  // it from the bad/lagging chain logic.
1932  if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
1933  LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom->GetId());
1934  nodestate->m_chain_sync.m_protect = true;
1935  ++g_outbound_peers_with_protect_from_disconnect;
1936  }
1937  }
1938  }
1939 
1940  return true;
1941 }
1942 
1943 void static ProcessOrphanTx(CConnman* connman, std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans)
1944 {
1947  std::set<NodeId> setMisbehaving;
1948  bool done = false;
1949  while (!done && !orphan_work_set.empty()) {
1950  const uint256 orphanHash = *orphan_work_set.begin();
1951  orphan_work_set.erase(orphan_work_set.begin());
1952 
1953  auto orphan_it = mapOrphanTransactions.find(orphanHash);
1954  if (orphan_it == mapOrphanTransactions.end()) continue;
1955 
1956  const CTransactionRef porphanTx = orphan_it->second.tx;
1957  const CTransaction& orphanTx = *porphanTx;
1958  NodeId fromPeer = orphan_it->second.fromPeer;
1959  bool fMissingInputs2 = false;
1960  // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
1961  // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
1962  // anyone relaying LegitTxX banned)
1963  CValidationState stateDummy;
1964 
1965  if (setMisbehaving.count(fromPeer)) continue;
1966  if (AcceptToMemoryPool(mempool, stateDummy, porphanTx, &fMissingInputs2 /* pfMissingInputs */,
1967  false /* bypass_limits */, 0 /* nAbsurdFee */)) {
1968  LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
1969  connman->RelayTransaction(orphanTx);
1970  for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
1971  auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i));
1972  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
1973  for (const auto& elem : it_by_prev->second) {
1974  orphan_work_set.insert(elem->first);
1975  }
1976  }
1977  }
1978  EraseOrphanTx(orphanHash);
1979  done = true;
1980  } else if (!fMissingInputs2) {
1981  int nDos = 0;
1982  if (stateDummy.IsInvalid(nDos) && nDos > 0) {
1983  // Punish peer that gave us an invalid orphan tx
1984  Misbehaving(fromPeer, nDos);
1985  setMisbehaving.insert(fromPeer);
1986  LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanHash.ToString());
1987  }
1988  // Has inputs but not accepted to mempool
1989  // Probably non-standard or insufficient fee
1990  LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
1991  if (!stateDummy.CorruptionPossible()) {
1992  assert(recentRejects);
1993  recentRejects->insert(orphanHash);
1994  }
1995  EraseOrphanTx(orphanHash);
1996  done = true;
1997  }
1998  mempool.check(pcoinsTip.get());
1999  }
2000 }
2001 
2002 bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
2003 {
2004  LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->GetId());
2005  if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
2006  {
2007  LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
2008  return true;
2009  }
2010 
2011  if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
2012  (strCommand == NetMsgType::FILTERLOAD ||
2013  strCommand == NetMsgType::FILTERADD))
2014  {
2015  if (pfrom->nVersion >= NO_BLOOM_VERSION) {
2016  LOCK(cs_main);
2017  Misbehaving(pfrom->GetId(), 100);
2018  return false;
2019  } else {
2020  pfrom->fDisconnect = true;
2021  return false;
2022  }
2023  }
2024 
2025  if (strCommand == NetMsgType::REJECT)
2026  {
2027  std::string strMsg; unsigned char ccode; std::string strReason;
2028  uint256 hash;
2029  try {
2030  vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
2031  if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX) {
2032  vRecv >> hash;
2033  }
2034  } catch (const std::ios_base::failure&) {
2035  // Avoid feedback loops by preventing reject messages from triggering a new reject message.
2036  LogPrint(BCLog::NET, "Unparseable reject message received\n");
2037  }
2038 
2039  if (strMsg == NetMsgType::BLOCK) {
2040  // The node requested a block from us and then rejected it, which indicates that it's most likely running
2041  // on rules which are incompatible to ours. Better to ban him after some time as it might otherwise keep
2042  // asking for the same block (if -addnode/-connect was used on the other side).
2043  LOCK(cs_main);
2044  Misbehaving(pfrom->GetId(), 1);
2045  }
2046 
2048  std::ostringstream ss;
2049  ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
2050 
2051  if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX) {
2052  ss << ": hash " << hash.ToString();
2053  }
2054  LogPrint(BCLog::NET, "Reject %s\n", SanitizeString(ss.str()));
2055  }
2056  return true;
2057  }
2058 
2059  if (strCommand == NetMsgType::VERSION) {
2060  // Each connection can only send one version message
2061  if (pfrom->nVersion != 0)
2062  {
2063  if (g_enable_bip61) {
2064  connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, std::string("Duplicate version message")));
2065  }
2066  LOCK(cs_main);
2067  Misbehaving(pfrom->GetId(), 1);
2068  return false;
2069  }
2070 
2071  int64_t nTime;
2072  CAddress addrMe;
2073  CAddress addrFrom;
2074  uint64_t nNonce = 1;
2075  uint64_t nServiceInt;
2076  ServiceFlags nServices;
2077  int nVersion;
2078  int nSendVersion;
2079  std::string strSubVer;
2080  std::string cleanSubVer;
2081  int nStartingHeight = -1;
2082  bool fRelay = true;
2083 
2084  vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
2085  nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
2086  nServices = ServiceFlags(nServiceInt);
2087  if (!pfrom->fInbound)
2088  {
2089  connman->SetServices(pfrom->addr, nServices);
2090  }
2091  if (!pfrom->fInbound && !pfrom->fFeeler && !pfrom->m_manual_connection && !HasAllDesirableServiceFlags(nServices))
2092  {
2093  LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->GetId(), nServices, GetDesirableServiceFlags(nServices));
2094  if (g_enable_bip61) {
2096  strprintf("Expected to offer services %08x", GetDesirableServiceFlags(nServices))));
2097  }
2098  pfrom->fDisconnect = true;
2099  return false;
2100  }
2101 
2102  if (nVersion < MIN_PEER_PROTO_VERSION)
2103  {
2104  // disconnect from peers older than this proto version
2105  LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom->GetId(), nVersion);
2106  if (g_enable_bip61) {
2108  strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION)));
2109  }
2110  pfrom->fDisconnect = true;
2111  return false;
2112  }
2113 
2114  if (nVersion == 10300)
2115  nVersion = 300;
2116  if (!vRecv.empty())
2117  vRecv >> addrFrom >> nNonce;
2118  if (!vRecv.empty()) {
2119  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
2120  cleanSubVer = SanitizeString(strSubVer);
2121  }
2122  if (!vRecv.empty()) {
2123  vRecv >> nStartingHeight;
2124  }
2125  if (!vRecv.empty())
2126  vRecv >> fRelay;
2127  if (!vRecv.empty()) {
2128  LOCK(pfrom->cs_mnauth);
2129  vRecv >> pfrom->receivedMNAuthChallenge;
2130  }
2131  if (!vRecv.empty()) {
2132  bool fOtherMasternode = false;
2133  vRecv >> fOtherMasternode;
2134  if (pfrom->fInbound) {
2135  pfrom->fMasternode = fOtherMasternode;
2136  if (fOtherMasternode) {
2137  LogPrint(BCLog::NET_NETCONN, "peer=%d is an inbound masternode connection, not relaying anything to it\n", pfrom->GetId());
2138  if (!fMasternodeMode) {
2139  LogPrint(BCLog::NET_NETCONN, "but we're not a masternode, disconnecting\n");
2140  pfrom->fDisconnect = true;
2141  return true;
2142  }
2143  }
2144  }
2145  }
2146  // Disconnect if we connected to ourself
2147  if (pfrom->fInbound && !connman->CheckIncomingNonce(nNonce))
2148  {
2149  LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
2150  pfrom->fDisconnect = true;
2151  return true;
2152  }
2153 
2154  if (pfrom->fInbound && addrMe.IsRoutable())
2155  {
2156  SeenLocal(addrMe);
2157  }
2158 
2159  // Be shy and don't send version until we hear
2160  if (pfrom->fInbound)
2161  PushNodeVersion(pfrom, connman, GetAdjustedTime());
2162 
2163  if (Params().NetworkIDString() == CBaseChainParams::DEVNET) {
2164  if (strSubVer.find(strprintf("devnet=%s", gArgs.GetDevNetName())) == std::string::npos) {
2165  LOCK(cs_main);
2166  LogPrintf("connected to wrong devnet. Reported version is %s, expected devnet name is %s\n", strSubVer, gArgs.GetDevNetName());
2167  if (!pfrom->fInbound)
2168  Misbehaving(pfrom->GetId(), 100); // don't try to connect again
2169  else
2170  Misbehaving(pfrom->GetId(), 1); // whover connected, might just have made a mistake, don't ban him immediately
2171  pfrom->fDisconnect = true;
2172  return true;
2173  }
2174  }
2175 
2177 
2178  pfrom->nServices = nServices;
2179  pfrom->SetAddrLocal(addrMe);
2180  {
2181  LOCK(pfrom->cs_SubVer);
2182  pfrom->strSubVer = strSubVer;
2183  pfrom->cleanSubVer = cleanSubVer;
2184  }
2185  pfrom->nStartingHeight = nStartingHeight;
2186 
2187  // set nodes not relaying blocks and tx and not serving (parts) of the historical blockchain as "clients"
2188  pfrom->fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED));
2189 
2190  // set nodes not capable of serving the complete blockchain history as "limited nodes"
2191  pfrom->m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
2192 
2193  {
2194  LOCK(pfrom->cs_filter);
2195  pfrom->fRelayTxes = fRelay; // set to true after we get the first filter* message
2196  }
2197 
2198  // Change version
2199  pfrom->SetSendVersion(nSendVersion);
2200  pfrom->nVersion = nVersion;
2201 
2202  // Potentially mark this peer as a preferred download peer.
2203  {
2204  LOCK(cs_main);
2205  UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
2206  }
2207 
2208  if (!pfrom->fInbound)
2209  {
2210  // Advertise our address
2211  if (fListen && !IsInitialBlockDownload())
2212  {
2213  CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices());
2214  FastRandomContext insecure_rand;
2215  if (addr.IsRoutable())
2216  {
2217  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2218  pfrom->PushAddress(addr, insecure_rand);
2219  } else if (IsPeerAddrLocalGood(pfrom)) {
2220  addr.SetIP(addrMe);
2221  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2222  pfrom->PushAddress(addr, insecure_rand);
2223  }
2224  }
2225 
2226  // Get recent addresses
2227  if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman->GetAddressCount() < 1000)
2228  {
2229  connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
2230  pfrom->fGetAddr = true;
2231  }
2232  connman->MarkAddressGood(pfrom->addr);
2233  }
2234 
2235  std::string remoteAddr;
2236  if (fLogIPs)
2237  remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
2238 
2239  LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
2240  cleanSubVer, pfrom->nVersion,
2241  pfrom->nStartingHeight, addrMe.ToString(), pfrom->GetId(),
2242  remoteAddr);
2243 
2244  int64_t nTimeOffset = nTime - GetTime();
2245  pfrom->nTimeOffset = nTimeOffset;
2246  AddTimeData(pfrom->addr, nTimeOffset);
2247 
2248  // Feeler connections exist only to verify if address is online.
2249  if (pfrom->fFeeler) {
2250  assert(pfrom->fInbound == false);
2251  pfrom->fDisconnect = true;
2252  }
2253  return true;
2254  }
2255 
2256  if (pfrom->nVersion == 0) {
2257  // Must have a version message before anything else
2258  LOCK(cs_main);
2259  Misbehaving(pfrom->GetId(), 1);
2260  return false;
2261  }
2262 
2263  // At this point, the outgoing message serialization version can't change.
2264  const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
2265 
2266  if (strCommand == NetMsgType::VERACK)
2267  {
2268  pfrom->SetRecvVersion(std::min(pfrom->nVersion.load(), PROTOCOL_VERSION));
2269 
2270  if (!pfrom->fInbound) {
2271  // Mark this node as currently connected, so we update its timestamp later.
2272  LOCK(cs_main);
2273  State(pfrom->GetId())->fCurrentlyConnected = true;
2274  LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s\n",
2275  pfrom->nVersion.load(), pfrom->nStartingHeight, pfrom->GetId(),
2276  (fLogIPs ? strprintf(", peeraddr=%s", pfrom->addr.ToString()) : ""));
2277  }
2278 
2279  if (pfrom->nVersion >= LLMQS_PROTO_VERSION && !pfrom->fMasternodeProbe) {
2280  CMNAuth::PushMNAUTH(pfrom, *connman);
2281  }
2282 
2283  if (pfrom->nVersion >= SENDHEADERS_VERSION) {
2284  // Tell our peer we prefer to receive headers rather than inv's
2285  // We send this to non-NODE NETWORK peers as well, because even
2286  // non-NODE NETWORK peers can announce blocks (such as pruning
2287  // nodes)
2288  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
2289  }
2290 
2291  if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION && !pfrom->fMasternode) {
2292  // Tell our peer we are willing to provide version-1 cmpctblocks
2293  // However, we do not request new block announcements using
2294  // cmpctblock messages.
2295  // We send this to non-NODE NETWORK peers as well, because
2296  // they may wish to request compact blocks from us
2297  bool fAnnounceUsingCMPCTBLOCK = false;
2298  uint64_t nCMPCTBLOCKVersion = 1;
2299  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2300  }
2301 
2302  if (pfrom->nVersion >= SENDDSQUEUE_PROTO_VERSION) {
2303  // Tell our peer that he should send us PrivateSend queue messages
2304  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDDSQUEUE, true));
2305  } else {
2306  // older nodes do not support SENDDSQUEUE and expect us to always send PrivateSend queue messages
2307  // TODO we can remove this compatibility code in 0.15.0
2308  pfrom->fSendDSQueue = true;
2309  }
2310 
2311  if (pfrom->nVersion >= LLMQS_PROTO_VERSION && !pfrom->fMasternode) {
2312  // Tell our peer that we're interested in plain LLMQ recovered signatures.
2313  // Otherwise the peer would only announce/send messages resulting from QRECSIG,
2314  // e.g. InstantSend locks or ChainLocks. SPV nodes should not send this message
2315  // as they are usually only interested in the higher level messages
2316  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::QSENDRECSIGS, true));
2317  }
2318 
2319  if (gArgs.GetBoolArg("-watchquorums", llmq::DEFAULT_WATCH_QUORUMS) && !pfrom->fMasternode) {
2320  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::QWATCH));
2321  }
2322 
2323  pfrom->fSuccessfullyConnected = true;
2324  return true;
2325  }
2326 
2327  if (!pfrom->fSuccessfullyConnected) {
2328  // Must have a verack message before anything else
2329  LOCK(cs_main);
2330  Misbehaving(pfrom->GetId(), 1);
2331  return false;
2332  }
2333 
2334  if (pfrom->nTimeFirstMessageReceived == 0) {
2335  // First message after VERSION/VERACK
2337  pfrom->fFirstMessageIsMNAUTH = strCommand == NetMsgType::MNAUTH;
2338  // Note: do not break the flow here
2339 
2340  if (pfrom->fMasternodeProbe && !pfrom->fFirstMessageIsMNAUTH) {
2341  LogPrint(BCLog::NET, "connection is a masternode probe but first received message is not MNAUTH, peer=%d\n", pfrom->GetId());
2342  pfrom->fDisconnect = true;
2343  return false;
2344  }
2345  }
2346 
2347  if (strCommand == NetMsgType::ADDR) {
2348  std::vector<CAddress> vAddr;
2349  vRecv >> vAddr;
2350 
2351  // Don't want addr from older versions unless seeding
2352  if (pfrom->nVersion < CADDR_TIME_VERSION && connman->GetAddressCount() > 1000)
2353  return true;
2354  if (vAddr.size() > 1000)
2355  {
2356  LOCK(cs_main);
2357  Misbehaving(pfrom->GetId(), 20, strprintf("message addr size() = %u", vAddr.size()));
2358  return false;
2359  }
2360 
2361  // Store the new addresses
2362  std::vector<CAddress> vAddrOk;
2363  int64_t nNow = GetAdjustedTime();
2364  int64_t nSince = nNow - 10 * 60;
2365  for (CAddress& addr : vAddr)
2366  {
2367  if (interruptMsgProc)
2368  return true;
2369 
2370  // We only bother storing full nodes, though this may include
2371  // things which we would not make an outbound connection to, in
2372  // part because we may make feeler connections to them.
2374  continue;
2375 
2376  if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
2377  addr.nTime = nNow - 5 * 24 * 60 * 60;
2378  pfrom->AddAddressKnown(addr);
2379  bool fReachable = IsReachable(addr);
2380  if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
2381  {
2382  RelayAddress(addr, fReachable, connman);
2383  }
2384  // Do not store addresses outside our network
2385  if (fReachable)
2386  vAddrOk.push_back(addr);
2387  }
2388  connman->AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
2389  if (vAddr.size() < 1000)
2390  pfrom->fGetAddr = false;
2391  if (pfrom->fOneShot)
2392  pfrom->fDisconnect = true;
2393  return true;
2394  }
2395 
2396  if (strCommand == NetMsgType::SENDHEADERS) {
2397  LOCK(cs_main);
2398  State(pfrom->GetId())->fPreferHeaders = true;
2399  return true;
2400  }
2401 
2402  if (strCommand == NetMsgType::SENDCMPCT) {
2403  bool fAnnounceUsingCMPCTBLOCK = false;
2404  uint64_t nCMPCTBLOCKVersion = 1;
2405  vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
2406  if (nCMPCTBLOCKVersion == 1) {
2407  LOCK(cs_main);
2408  State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
2409  State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
2410  State(pfrom->GetId())->fSupportsDesiredCmpctVersion = true;
2411  }
2412  return true;
2413  }
2414 
2415 
2416  if (strCommand == NetMsgType::SENDDSQUEUE)
2417  {
2418  bool b;
2419  vRecv >> b;
2420  pfrom->fSendDSQueue = b;
2421  return true;
2422  }
2423 
2424 
2425  if (strCommand == NetMsgType::QSENDRECSIGS) {
2426  bool b;
2427  vRecv >> b;
2428  pfrom->fSendRecSigs = b;
2429  return true;
2430  }
2431 
2432  if (strCommand == NetMsgType::INV) {
2433  std::vector<CInv> vInv;
2434  vRecv >> vInv;
2435  if (vInv.size() > MAX_INV_SZ)
2436  {
2437  LOCK(cs_main);
2438  Misbehaving(pfrom->GetId(), 20, strprintf("message inv size() = %u", vInv.size()));
2439  return false;
2440  }
2441 
2442  bool fBlocksOnly = !fRelayTxes;
2443 
2444  // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
2445  if (pfrom->fWhitelisted && gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))
2446  fBlocksOnly = false;
2447 
2448  LOCK(cs_main);
2449 
2450  const auto current_time = GetTime<std::chrono::microseconds>();
2451 
2452  for (CInv &inv : vInv)
2453  {
2454  if(!inv.IsKnownType()) {
2455  LogPrint(BCLog::NET, "got inv of unknown type %d: %s peer=%d\n", inv.type, inv.hash.ToString(), pfrom->GetId());
2456  continue;
2457  }
2458 
2459  if (interruptMsgProc)
2460  return true;
2461 
2462  bool fAlreadyHave = AlreadyHave(inv);
2463  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->GetId());
2464 
2465  if (inv.type == MSG_BLOCK) {
2466  UpdateBlockAvailability(pfrom->GetId(), inv.hash);
2467 
2468  if (fAlreadyHave || fImporting || fReindex || mapBlocksInFlight.count(inv.hash)) {
2469  continue;
2470  }
2471 
2472  CNodeState *state = State(pfrom->GetId());
2473  if (!state) {
2474  continue;
2475  }
2476 
2477  // Download if this is a nice peer, or we have no nice peers and this one might do.
2478  bool fFetch = state->fPreferredDownload || (nPreferredDownload == 0 && !pfrom->fOneShot);
2479  // Only actively request headers from a single peer, unless we're close to end of initial download.
2480  if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - nMaxTipAge) {
2481  // Make sure to mark this peer as the one we are currently syncing with etc.
2482  state->fSyncStarted = true;
2484  nSyncStarted++;
2485  // We used to request the full block here, but since headers-announcements are now the
2486  // primary method of announcement on the network, and since, in the case that a node
2487  // fell back to inv we probably have a reorg which we should get the headers for first,
2488  // we now only provide a getheaders response here. When we receive the headers, we will
2489  // then ask for the blocks we need.
2490  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
2491  LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->GetId());
2492  }
2493  }
2494  else
2495  {
2496  static std::set<int> allowWhileInIBDObjs = {
2497  MSG_SPORK
2498  };
2499 
2500  pfrom->AddInventoryKnown(inv);
2501  if (fBlocksOnly) {
2502  LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(),
2503  pfrom->GetId());
2504  } else if (!fAlreadyHave) {
2505  bool allowWhileInIBD = allowWhileInIBDObjs.count(inv.type);
2506  if (allowWhileInIBD || (!fImporting && !fReindex && !IsInitialBlockDownload())) {
2507  RequestObject(State(pfrom->GetId()), inv, current_time);
2508  }
2509  }
2510  }
2511  }
2512  return true;
2513  }
2514 
2515  if (strCommand == NetMsgType::GETDATA) {
2516  std::vector<CInv> vInv;
2517  vRecv >> vInv;
2518  if (vInv.size() > MAX_INV_SZ)
2519  {
2520  LOCK(cs_main);
2521  Misbehaving(pfrom->GetId(), 20, strprintf("message getdata size() = %u", vInv.size()));
2522  return false;
2523  }
2524 
2525  LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->GetId());
2526 
2527  if (vInv.size() > 0) {
2528  LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->GetId());
2529  }
2530 
2531  pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
2532  ProcessGetData(pfrom, chainparams, connman, interruptMsgProc);
2533  return true;
2534  }
2535 
2536  if (strCommand == NetMsgType::GETBLOCKS) {
2537  CBlockLocator locator;
2538  uint256 hashStop;
2539  vRecv >> locator >> hashStop;
2540 
2541  // We might have announced the currently-being-connected tip using a
2542  // compact block, which resulted in the peer sending a getblocks
2543  // request, which we would otherwise respond to without the new block.
2544  // To avoid this situation we simply verify that we are on our best
2545  // known chain now. This is super overkill, but we handle it better
2546  // for getheaders requests, and there are no known nodes which support
2547  // compact blocks but still use getblocks to request blocks.
2548  {
2549  std::shared_ptr<const CBlock> a_recent_block;
2550  {
2552  a_recent_block = most_recent_block;
2553  }
2554  CValidationState dummy;
2555  ActivateBestChain(dummy, Params(), a_recent_block);
2556  }
2557 
2558  LOCK(cs_main);
2559 
2560  // Find the last block the caller has in the main chain
2561  const CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
2562 
2563  // Send the rest of the chain
2564  if (pindex)
2565  pindex = chainActive.Next(pindex);
2566  int nLimit = 500;
2567  LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->GetId());
2568  for (; pindex; pindex = chainActive.Next(pindex))
2569  {
2570  if (pindex->GetBlockHash() == hashStop)
2571  {
2572  LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2573  break;
2574  }
2575  // If pruning, don't inv blocks unless we have on disk and are likely to still have
2576  // for some reasonable time window (1 hour) that block relay might require.
2577  const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
2578  if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave))
2579  {
2580  LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2581  break;
2582  }
2583  if (!pfrom->fMasternode) {
2584  pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
2585  }
2586  if (--nLimit <= 0)
2587  {
2588  // When this block is requested, we'll send an inv that'll
2589  // trigger the peer to getblocks the next batch of inventory.
2590  LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2591  pfrom->hashContinue = pindex->GetBlockHash();
2592  break;
2593  }
2594  }
2595  return true;
2596  }
2597 
2598  if (strCommand == NetMsgType::GETBLOCKTXN) {
2600  vRecv >> req;
2601 
2602  std::shared_ptr<const CBlock> recent_block;
2603  {
2605  if (most_recent_block_hash == req.blockhash)
2606  recent_block = most_recent_block;
2607  // Unlock cs_most_recent_block to avoid cs_main lock inversion
2608  }
2609  if (recent_block) {
2610  SendBlockTransactions(*recent_block, req, pfrom, connman);
2611  return true;
2612  }
2613 
2614  LOCK(cs_main);
2615 
2616  BlockMap::iterator it = mapBlockIndex.find(req.blockhash);
2617  if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) {
2618  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom->GetId());
2619  return true;
2620  }
2621 
2622  if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) {
2623  // If an older block is requested (should never happen in practice,
2624  // but can happen in tests) send a block response instead of a
2625  // blocktxn response. Sending a full block response instead of a
2626  // small blocktxn response is preferable in the case where a peer
2627  // might maliciously send lots of getblocktxn requests to trigger
2628  // expensive disk reads, because it will require the peer to
2629  // actually receive all the data read from disk over the network.
2630  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->GetId(), MAX_BLOCKTXN_DEPTH);
2631  CInv inv;
2632  inv.type = MSG_BLOCK;
2633  inv.hash = req.blockhash;
2634  pfrom->vRecvGetData.push_back(inv);
2635  // The message processing loop will go around again (without pausing) and we'll respond then (without cs_main)
2636  return true;
2637  }
2638 
2639  CBlock block;
2640  bool ret = ReadBlockFromDisk(block, it->second, chainparams.GetConsensus());
2641  assert(ret);
2642 
2643  SendBlockTransactions(block, req, pfrom, connman);
2644  return true;
2645  }
2646 
2647  if (strCommand == NetMsgType::GETHEADERS) {
2648  CBlockLocator locator;
2649  uint256 hashStop;
2650  vRecv >> locator >> hashStop;
2651 
2652  LOCK(cs_main);
2653  if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
2654  LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->GetId());
2655  return true;
2656  }
2657 
2658  CNodeState *nodestate = State(pfrom->GetId());
2659  const CBlockIndex* pindex = nullptr;
2660  if (locator.IsNull())
2661  {
2662  // If locator is null, return the hashStop block
2663  BlockMap::iterator mi = mapBlockIndex.find(hashStop);
2664  if (mi == mapBlockIndex.end())
2665  return true;
2666  pindex = (*mi).second;
2667 
2668  if (!BlockRequestAllowed(pindex, chainparams.GetConsensus())) {
2669  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom->GetId());
2670  return true;
2671  }
2672  }
2673  else
2674  {
2675  // Find the last block the caller has in the main chain
2676  pindex = FindForkInGlobalIndex(chainActive, locator);
2677  if (pindex)
2678  pindex = chainActive.Next(pindex);
2679  }
2680 
2681  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
2682  std::vector<CBlock> vHeaders;
2683  int nLimit = MAX_HEADERS_RESULTS;
2684  LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->GetId());
2685  for (; pindex; pindex = chainActive.Next(pindex))
2686  {
2687  vHeaders.push_back(pindex->GetBlockHeader());
2688  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
2689  break;
2690  }
2691  // pindex can be nullptr either if we sent chainActive.Tip() OR
2692  // if our peer has chainActive.Tip() (and thus we are sending an empty
2693  // headers message). In both cases it's safe to update
2694  // pindexBestHeaderSent to be our tip.
2695  //
2696  // It is important that we simply reset the BestHeaderSent value here,
2697  // and not max(BestHeaderSent, newHeaderSent). We might have announced
2698  // the currently-being-connected tip using a compact block, which
2699  // resulted in the peer sending a headers request, which we respond to
2700  // without the new block. By resetting the BestHeaderSent, we ensure we
2701  // will re-announce the new block via headers (or compact blocks again)
2702  // in the SendMessages logic.
2703  nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
2704  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
2705  return true;
2706  }
2707 
2708  if (strCommand == NetMsgType::TX || strCommand == NetMsgType::DSTX || strCommand == NetMsgType::LEGACYTXLOCKREQUEST) {
2709  // Stop processing the transaction early if
2710  // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
2711  if (!fRelayTxes && (!pfrom->fWhitelisted || !gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)))
2712  {
2713  LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->GetId());
2714  return true;
2715  }
2716 
2717  CTransactionRef ptx;
2719  int nInvType = MSG_TX;
2720 
2721  // Read data and assign inv type
2722  if(strCommand == NetMsgType::TX) {
2723  vRecv >> ptx;
2724  } else if(strCommand == NetMsgType::LEGACYTXLOCKREQUEST) {
2725  // we keep processing the legacy IX message here but revert to handling it as a regular TX
2726  vRecv >> ptx;
2727  } else if (strCommand == NetMsgType::DSTX) {
2728  vRecv >> dstx;
2729  ptx = dstx.tx;
2730  nInvType = MSG_DSTX;
2731  }
2732  const CTransaction& tx = *ptx;
2733 
2734  CInv inv(nInvType, tx.GetHash());
2735  pfrom->AddInventoryKnown(inv);
2736 
2737  // Process custom logic, no matter if tx will be accepted to mempool later or not
2738  if (nInvType == MSG_DSTX) {
2739  uint256 hashTx = tx.GetHash();
2740  if (!dstx.IsValidStructure()) {
2741  LogPrint(BCLog::PRIVATESEND, "DSTX -- Invalid DSTX structure: %s\n", hashTx.ToString());
2742  return false;
2743  }
2744  if(CPrivateSend::GetDSTX(hashTx)) {
2745  LogPrint(BCLog::PRIVATESEND, "DSTX -- Already have %s, skipping...\n", hashTx.ToString());
2746  return true; // not an error
2747  }
2748 
2749  const CBlockIndex* pindex{nullptr};
2750  CDeterministicMNCPtr dmn{nullptr};
2751  {
2752  LOCK(cs_main);
2753  pindex = chainActive.Tip();
2754  }
2755  // It could be that a MN is no longer in the list but its DSTX is not yet mined.
2756  // Try to find a MN up to 24 blocks deep to make sure such dstx-es are relayed and processed correctly.
2757  for (int i = 0; i < 24 && pindex; ++i) {
2758  dmn = deterministicMNManager->GetListForBlock(pindex).GetMNByCollateral(dstx.masternodeOutpoint);
2759  if (dmn) break;
2760  pindex = pindex->pprev;
2761  }
2762  if(!dmn) {
2763  LogPrint(BCLog::PRIVATESEND, "DSTX -- Can't find masternode %s to verify %s\n", dstx.masternodeOutpoint.ToStringShort(), hashTx.ToString());
2764  return false;
2765  }
2766 
2767  if (!mmetaman.GetMetaInfo(dmn->proTxHash)->IsValidForMixingTxes()) {
2768  LogPrint(BCLog::PRIVATESEND, "DSTX -- Masternode %s is sending too many transactions %s\n", dstx.masternodeOutpoint.ToStringShort(), hashTx.ToString());
2769  return true;
2770  // TODO: Not an error? Could it be that someone is relaying old DSTXes
2771  // we have no idea about (e.g we were offline)? How to handle them?
2772  }
2773 
2774  if (!dstx.CheckSignature(dmn->pdmnState->pubKeyOperator.Get())) {
2775  LogPrint(BCLog::PRIVATESEND, "DSTX -- CheckSignature() failed for %s\n", hashTx.ToString());
2776  return false;
2777  }
2778 
2779  LogPrint(BCLog::PRIVATESEND, "DSTX -- Got Masternode transaction %s\n", hashTx.ToString());
2780  mempool.PrioritiseTransaction(hashTx, 0.1*COIN);
2781  mmetaman.DisallowMixing(dmn->proTxHash);
2782  }
2783 
2785 
2786  bool fMissingInputs = false;
2787  CValidationState state;
2788 
2789  EraseObjectRequest(pfrom->GetId(), inv);
2790 
2791  if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, ptx, &fMissingInputs /* pfMissingInputs */,
2792  false /* bypass_limits */, 0 /* nAbsurdFee */)) {
2793  // Process custom txes, this changes AlreadyHave to "true"
2794  if (nInvType == MSG_DSTX) {
2795  LogPrint(BCLog::PRIVATESEND, "DSTX -- Masternode transaction accepted, txid=%s, peer=%d\n",
2796  tx.GetHash().ToString(), pfrom->GetId());
2797  CPrivateSend::AddDSTX(dstx);
2798  }
2799 
2800  mempool.check(pcoinsTip.get());
2801  connman->RelayTransaction(tx);
2802 
2803  for (unsigned int i = 0; i < tx.vout.size(); i++) {
2804  auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(inv.hash, i));
2805  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
2806  for (const auto& elem : it_by_prev->second) {
2807  pfrom->orphan_work_set.insert(elem->first);
2808  }
2809  }
2810  }
2811 
2812  pfrom->nLastTXTime = GetTime();
2813 
2814  LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
2815  pfrom->GetId(),
2816  tx.GetHash().ToString(),
2817  mempool.size(), mempool.DynamicMemoryUsage() / 1000);
2818 
2819  // Recursively process any orphan transactions that depended on this one
2820  ProcessOrphanTx(connman, pfrom->orphan_work_set);
2821  }
2822  else if (fMissingInputs)
2823  {
2824  bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
2825  for (const CTxIn& txin : tx.vin) {
2826  if (recentRejects->contains(txin.prevout.hash)) {
2827  fRejectedParents = true;
2828  break;
2829  }
2830  }
2831  if (!fRejectedParents) {
2832  const auto current_time = GetTime<std::chrono::microseconds>();
2833 
2834  for (const CTxIn& txin : tx.vin) {
2835  CInv _inv(MSG_TX, txin.prevout.hash);
2836  pfrom->AddInventoryKnown(_inv);
2837  if (!AlreadyHave(_inv)) RequestObject(State(pfrom->GetId()), _inv, current_time);
2838  // We don't know if the previous tx was a regular or a mixing one, try both
2839  CInv _inv2(MSG_DSTX, txin.prevout.hash);
2840  pfrom->AddInventoryKnown(_inv2);
2841  if (!AlreadyHave(_inv2)) RequestObject(State(pfrom->GetId()), _inv2, current_time);
2842  }
2843  AddOrphanTx(ptx, pfrom->GetId());
2844 
2845  // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
2846  unsigned int nMaxOrphanTxSize = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantxsize", DEFAULT_MAX_ORPHAN_TRANSACTIONS_SIZE)) * 1000000;
2847  unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTxSize);
2848  if (nEvicted > 0) {
2849  LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
2850  }
2851  } else {
2852  LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
2853  // We will continue to reject this tx since it has rejected
2854  // parents so avoid re-requesting it from other peers.
2855  recentRejects->insert(tx.GetHash());
2856  }
2857  } else {
2858  if (!state.CorruptionPossible()) {
2859  assert(recentRejects);
2860  recentRejects->insert(tx.GetHash());
2861  if (RecursiveDynamicUsage(*ptx) < 100000) {
2863  }
2864  }
2865 
2866  if (pfrom->fWhitelisted && gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
2867  // Always relay transactions received from whitelisted peers, even
2868  // if they were already in the mempool or rejected from it due
2869  // to policy, allowing the node to function as a gateway for
2870  // nodes hidden behind it.
2871  //
2872  // Never relay transactions that we would assign a non-zero DoS
2873  // score for, as we expect peers to do the same with us in that
2874  // case.
2875  int nDoS = 0;
2876  if (!state.IsInvalid(nDoS) || nDoS == 0) {
2877  LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
2878  connman->RelayTransaction(tx);
2879  } else {
2880  LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->GetId(), FormatStateMessage(state));
2881  }
2882  }
2883  }
2884 
2885  int nDoS = 0;
2886  if (state.IsInvalid(nDoS))
2887  {
2888  LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
2889  pfrom->GetId(),
2890  FormatStateMessage(state));
2891  if (g_enable_bip61 && state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) { // Never send AcceptToMemoryPool's internal codes over P2P
2892  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
2893  state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash));
2894  }
2895  if (nDoS > 0) {
2896  Misbehaving(pfrom->GetId(), nDoS);
2897  }
2898  }
2899  return true;
2900  }
2901 
2902  if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
2903  {
2904  CBlockHeaderAndShortTxIDs cmpctblock;
2905  vRecv >> cmpctblock;
2906 
2907  bool received_new_header = false;
2908 
2909  {
2910  LOCK(cs_main);
2911 
2912  if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) {
2913  // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
2914  if (!IsInitialBlockDownload())
2916  return true;
2917  }
2918 
2919  if (mapBlockIndex.find(cmpctblock.header.GetHash()) == mapBlockIndex.end()) {
2920  received_new_header = true;
2921  }
2922  }
2923 
2924  const CBlockIndex *pindex = nullptr;
2925  CValidationState state;
2926  if (!ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
2927  int nDoS;
2928  if (state.IsInvalid(nDoS)) {
2929  if (nDoS > 0) {
2930  LOCK(cs_main);
2931  Misbehaving(pfrom->GetId(), nDoS, strprintf("Peer %d sent us invalid header via cmpctblock", pfrom->GetId()));
2932  } else {
2933  LogPrint(BCLog::NET, "Peer %d sent us invalid header via cmpctblock\n", pfrom->GetId());
2934  }
2935  return true;
2936  }
2937  }
2938 
2939  // When we succeed in decoding a block's txids from a cmpctblock
2940  // message we typically jump to the BLOCKTXN handling code, with a
2941  // dummy (empty) BLOCKTXN message, to re-use the logic there in
2942  // completing processing of the putative block (without cs_main).
2943  bool fProcessBLOCKTXN = false;
2945 
2946  // If we end up treating this as a plain headers message, call that as well
2947  // without cs_main.
2948  bool fRevertToHeaderProcessing = false;
2949 
2950  // Keep a CBlock for "optimistic" compactblock reconstructions (see
2951  // below)
2952  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2953  bool fBlockReconstructed = false;
2954 
2955  {
2957  // If AcceptBlockHeader returned true, it set pindex
2958  assert(pindex);
2959  UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
2960 
2961  CNodeState *nodestate = State(pfrom->GetId());
2962 
2963  // If this was a new header with more work than our tip, update the
2964  // peer's last block announcement time
2965  if (received_new_header && pindex->nChainWork > chainActive.Tip()->nChainWork) {
2966  nodestate->m_last_block_announcement = GetTime();
2967  }
2968 
2969  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
2970  bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
2971 
2972  if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
2973  return true;
2974 
2975  if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better
2976  pindex->nTx != 0) { // We had this block at some point, but pruned it
2977  if (fAlreadyInFlight) {
2978  // We requested this block for some reason, but our mempool will probably be useless
2979  // so we just grab the block via normal getdata
2980  std::vector<CInv> vInv(1);
2981  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
2982  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
2983  }
2984  return true;
2985  }
2986 
2987  // If we're not close to tip yet, give up and let parallel block fetch work its magic
2988  if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus()))
2989  return true;
2990 
2991  // We want to be a bit conservative just to be extra careful about DoS
2992  // possibilities in compact block processing...
2993  if (pindex->nHeight <= chainActive.Height() + 2) {
2994  if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
2995  (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) {
2996  std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
2997  if (!MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
2998  if (!(*queuedBlockIt)->partialBlock)
2999  (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
3000  else {
3001  // The block was already in flight using compact blocks from the same peer
3002  LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
3003  return true;
3004  }
3005  }
3006 
3007  PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
3008  ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
3009  if (status == READ_STATUS_INVALID) {
3010  MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist
3011  Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us invalid compact block", pfrom->GetId()));
3012  return true;
3013  } else if (status == READ_STATUS_FAILED) {
3014  // Duplicate txindexes, the block is now in-flight, so just request it
3015  std::vector<CInv> vInv(1);
3016  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
3017  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3018  return true;
3019  }
3020 
3022  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
3023  if (!partialBlock.IsTxAvailable(i))
3024  req.indexes.push_back(i);
3025  }
3026  if (req.indexes.empty()) {
3027  // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
3028  BlockTransactions txn;
3029  txn.blockhash = cmpctblock.header.GetHash();
3030  blockTxnMsg << txn;
3031  fProcessBLOCKTXN = true;
3032  } else {
3033  req.blockhash = pindex->GetBlockHash();
3034  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
3035  }
3036  } else {
3037  // This block is either already in flight from a different
3038  // peer, or this peer has too many blocks outstanding to
3039  // download from.
3040  // Optimistically try to reconstruct anyway since we might be
3041  // able to without any round trips.
3042  PartiallyDownloadedBlock tempBlock(&mempool);
3043  ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
3044  if (status != READ_STATUS_OK) {
3045  // TODO: don't ignore failures
3046  return true;
3047  }
3048  std::vector<CTransactionRef> dummy;
3049  status = tempBlock.FillBlock(*pblock, dummy);
3050  if (status == READ_STATUS_OK) {
3051  fBlockReconstructed = true;
3052  }
3053  }
3054  } else {
3055  if (fAlreadyInFlight) {
3056  // We requested this block, but its far into the future, so our
3057  // mempool will probably be useless - request the block normally
3058  std::vector<CInv> vInv(1);
3059  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
3060  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3061  return true;
3062  } else {
3063  // If this was an announce-cmpctblock, we want the same treatment as a header message
3064  fRevertToHeaderProcessing = true;
3065  }
3066  }
3067  } // cs_main
3068 
3069  if (fProcessBLOCKTXN)
3070  return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman, interruptMsgProc);
3071 
3072  if (fRevertToHeaderProcessing) {
3073  // Headers received from HB compact block peers are permitted to be
3074  // relayed before full validation (see BIP 152), so we don't want to disconnect
3075  // the peer if the header turns out to be for an invalid block.
3076  // Note that if a peer tries to build on an invalid chain, that
3077  // will be detected and the peer will be banned.
3078  return ProcessHeadersMessage(pfrom, connman, {cmpctblock.header}, chainparams, /*punish_duplicate_invalid=*/false);
3079  }
3080 
3081  if (fBlockReconstructed) {
3082  // If we got here, we were able to optimistically reconstruct a
3083  // block that is in flight from some other peer.
3084  {
3085  LOCK(cs_main);
3086  mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom->GetId(), false));
3087  }
3088  bool fNewBlock = false;
3089  // Setting fForceProcessing to true means that we bypass some of
3090  // our anti-DoS protections in AcceptBlock, which filters
3091  // unrequested blocks that might be trying to waste our resources
3092  // (eg disk space). Because we only try to reconstruct blocks when
3093  // we're close to caught up (via the CanDirectFetch() requirement
3094  // above, combined with the behavior of not requesting blocks until
3095  // we have a chain with at least nMinimumChainWork), and we ignore
3096  // compact blocks with less work than our tip, it is safe to treat
3097  // reconstructed compact blocks as having been requested.
3098  ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
3099  if (fNewBlock) {
3100  pfrom->nLastBlockTime = GetTime();
3101  } else {
3102  LOCK(cs_main);
3103  mapBlockSource.erase(pblock->GetHash());
3104  }
3105  LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
3106  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
3107  // Clear download state for this block, which is in
3108  // process from some other peer. We do this after calling
3109  // ProcessNewBlock so that a malleated cmpctblock announcement
3110  // can't be used to interfere with block relay.
3111  MarkBlockAsReceived(pblock->GetHash());
3112  }
3113  }
3114  return true;
3115  }
3116 
3117  if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing
3118  {
3119  BlockTransactions resp;
3120  vRecv >> resp;
3121 
3122  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3123  bool fBlockRead = false;
3124  {
3125  LOCK(cs_main);
3126 
3127  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
3128  if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
3129  it->second.first != pfrom->GetId()) {
3130  LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->GetId());
3131  return true;
3132  }
3133 
3134  PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
3135  ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
3136  if (status == READ_STATUS_INVALID) {
3137  MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist
3138  Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us invalid compact block/non-matching block transactions", pfrom->GetId()));
3139  return true;
3140  } else if (status == READ_STATUS_FAILED) {
3141  // Might have collided, fall back to getdata now :(
3142  std::vector<CInv> invs;
3143  invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
3144  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
3145  } else {
3146  // Block is either okay, or possibly we received
3147  // READ_STATUS_CHECKBLOCK_FAILED.
3148  // Note that CheckBlock can only fail for one of a few reasons:
3149  // 1. bad-proof-of-work (impossible here, because we've already
3150  // accepted the header)
3151  // 2. merkleroot doesn't match the transactions given (already
3152  // caught in FillBlock with READ_STATUS_FAILED, so
3153  // impossible here)
3154  // 3. the block is otherwise invalid (eg invalid coinbase,
3155  // block is too big, too many legacy sigops, etc).
3156  // So if CheckBlock failed, #3 is the only possibility.
3157  // Under BIP 152, we don't DoS-ban unless proof of work is
3158  // invalid (we don't require all the stateless checks to have
3159  // been run). This is handled below, so just treat this as
3160  // though the block was successfully read, and rely on the
3161  // handling in ProcessNewBlock to ensure the block index is
3162  // updated, reject messages go out, etc.
3163  MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
3164  fBlockRead = true;
3165  // mapBlockSource is only used for sending reject messages and DoS scores,
3166  // so the race between here and cs_main in ProcessNewBlock is fine.
3167  // BIP 152 permits peers to relay compact blocks after validating
3168  // the header only; we should not punish peers if the block turns
3169  // out to be invalid.
3170  mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom->GetId(), false));
3171  }
3172  } // Don't hold cs_main when we call into ProcessNewBlock
3173  if (fBlockRead) {
3174  bool fNewBlock = false;
3175  // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3176  // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3177  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3178  // disk-space attacks), but this should be safe due to the
3179  // protections in the compact block handler -- see related comment
3180  // in compact block optimistic reconstruction handling.
3181  ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
3182  if (fNewBlock) {
3183  pfrom->nLastBlockTime = GetTime();
3184  } else {
3185  LOCK(cs_main);
3186  mapBlockSource.erase(pblock->GetHash());
3187  }
3188  }
3189  return true;
3190  }
3191 
3192  if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) // Ignore headers received while importing
3193  {
3194  std::vector<CBlockHeader> headers;
3195 
3196  // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
3197  unsigned int nCount = ReadCompactSize(vRecv);
3198  if (nCount > MAX_HEADERS_RESULTS) {
3199  LOCK(cs_main);
3200  Misbehaving(pfrom->GetId(), 20, strprintf("headers message size = %u", nCount));
3201  return false;
3202  }
3203  headers.resize(nCount);
3204  for (unsigned int n = 0; n < nCount; n++) {
3205  vRecv >> headers[n];
3206  ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
3207  }
3208 
3209  // Headers received via a HEADERS message should be valid, and reflect
3210  // the chain the peer is on. If we receive a known-invalid header,
3211  // disconnect the peer if it is using one of our outbound connection
3212  // slots.
3213  bool should_punish = !pfrom->fInbound && !pfrom->m_manual_connection;
3214  return ProcessHeadersMessage(pfrom, connman, headers, chainparams, should_punish);
3215  }
3216 
3217  if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
3218  {
3219  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3220  vRecv >> *pblock;
3221 
3222  LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->GetId());
3223 
3224  bool forceProcessing = false;
3225  const uint256 hash(pblock->GetHash());
3226  {
3227  LOCK(cs_main);
3228  // Also always process if we requested the block explicitly, as we may
3229  // need it even though it is not a candidate for a new best tip.
3230  forceProcessing |= MarkBlockAsReceived(hash);
3231  // mapBlockSource is only used for sending reject messages and DoS scores,
3232  // so the race between here and cs_main in ProcessNewBlock is fine.
3233  mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true));
3234  }
3235  bool fNewBlock = false;
3236  ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock);
3237  if (fNewBlock) {
3238  pfrom->nLastBlockTime = GetTime();
3239  } else {
3240  LOCK(cs_main);
3241  mapBlockSource.erase(pblock->GetHash());
3242  }
3243  return true;
3244  }
3245 
3246  if (strCommand == NetMsgType::GETADDR) {
3247  // This asymmetric behavior for inbound and outbound connections was introduced
3248  // to prevent a fingerprinting attack: an attacker can send specific fake addresses
3249  // to users' AddrMan and later request them by sending getaddr messages.
3250  // Making nodes which are behind NAT and can only make outgoing connections ignore
3251  // the getaddr message mitigates the attack.
3252  if (!pfrom->fInbound) {
3253  LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->GetId());
3254  return true;
3255  }
3256 
3257  // Only send one GetAddr response per connection to reduce resource waste
3258  // and discourage addr stamping of INV announcements.
3259  if (pfrom->fSentAddr) {
3260  LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->GetId());
3261  return true;
3262  }
3263  pfrom->fSentAddr = true;
3264 
3265  pfrom->vAddrToSend.clear();
3266  std::vector<CAddress> vAddr = connman->GetAddresses();
3267  FastRandomContext insecure_rand;
3268  for (const CAddress &addr : vAddr)
3269  pfrom->PushAddress(addr, insecure_rand);
3270  return true;
3271  }
3272 
3273  if (strCommand == NetMsgType::MEMPOOL) {
3274  if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted)
3275  {
3276  LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId());
3277  pfrom->fDisconnect = true;
3278  return true;
3279  }
3280 
3281  if (connman->OutboundTargetReached(false) && !pfrom->fWhitelisted)
3282  {
3283  LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
3284  pfrom->fDisconnect = true;
3285  return true;
3286  }
3287 
3288  LOCK(pfrom->cs_inventory);
3289  pfrom->fSendMempool = true;
3290  return true;
3291  }
3292 
3293  if (strCommand == NetMsgType::PING) {
3294  if (pfrom->nVersion > BIP0031_VERSION)
3295  {
3296  uint64_t nonce = 0;
3297  vRecv >> nonce;
3298  // Echo the message back with the nonce. This allows for two useful features:
3299  //
3300  // 1) A remote node can quickly check if the connection is operational
3301  // 2) Remote nodes can measure the latency of the network thread. If this node
3302  // is overloaded it won't respond to pings quickly and the remote node can
3303  // avoid sending us more work, like chain download requests.
3304  //
3305  // The nonce stops the remote getting confused between different pings: without
3306  // it, if the remote node sends a ping once per second and this node takes 5
3307  // seconds to respond to each, the 5th ping the remote sends would appear to
3308  // return very quickly.
3309  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
3310  }
3311  return true;
3312  }
3313 
3314  if (strCommand == NetMsgType::PONG) {
3315  int64_t pingUsecEnd = nTimeReceived;
3316  uint64_t nonce = 0;
3317  size_t nAvail = vRecv.in_avail();
3318  bool bPingFinished = false;
3319  std::string sProblem;
3320 
3321  if (nAvail >= sizeof(nonce)) {
3322  vRecv >> nonce;
3323 
3324  // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
3325  if (pfrom->nPingNonceSent != 0) {
3326  if (nonce == pfrom->nPingNonceSent) {
3327  // Matching pong received, this ping is no longer outstanding
3328  bPingFinished = true;
3329  int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
3330  if (pingUsecTime > 0) {
3331  // Successful ping time measurement, replace previous
3332  pfrom->nPingUsecTime = pingUsecTime;
3333  pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime.load(), pingUsecTime);
3334  } else {
3335  // This should never happen
3336  sProblem = "Timing mishap";
3337  }
3338  } else {
3339  // Nonce mismatches are normal when pings are overlapping
3340  sProblem = "Nonce mismatch";
3341  if (nonce == 0) {
3342  // This is most likely a bug in another implementation somewhere; cancel this ping
3343  bPingFinished = true;
3344  sProblem = "Nonce zero";
3345  }
3346  }
3347  } else {
3348  sProblem = "Unsolicited pong without ping";
3349  }
3350  } else {
3351  // This is most likely a bug in another implementation somewhere; cancel this ping
3352  bPingFinished = true;
3353  sProblem = "Short payload";
3354  }
3355 
3356  if (!(sProblem.empty())) {
3357  LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
3358  pfrom->GetId(),
3359  sProblem,
3360  pfrom->nPingNonceSent,
3361  nonce,
3362  nAvail);
3363  }
3364  if (bPingFinished) {
3365  pfrom->nPingNonceSent = 0;
3366  }
3367  return true;
3368  }
3369 
3370  if (strCommand == NetMsgType::FILTERLOAD) {
3371  CBloomFilter filter;
3372  vRecv >> filter;
3373 
3374  if (!filter.IsWithinSizeConstraints())
3375  {
3376  // There is no excuse for sending a too-large filter
3377  LOCK(cs_main);
3378  Misbehaving(pfrom->GetId(), 100);
3379  }
3380  else
3381  {
3382  LOCK(pfrom->cs_filter);
3383  pfrom->pfilter.reset(new CBloomFilter(filter));
3384  pfrom->pfilter->UpdateEmptyFull();
3385  pfrom->fRelayTxes = true;
3386  }
3387  return true;
3388  }
3389 
3390  if (strCommand == NetMsgType::FILTERADD) {
3391  std::vector<unsigned char> vData;
3392  vRecv >> vData;
3393 
3394  // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
3395  // and thus, the maximum size any matched object can have) in a filteradd message
3396  bool bad = false;
3397  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
3398  bad = true;
3399  } else {
3400  LOCK(pfrom->cs_filter);
3401  if (pfrom->pfilter) {
3402  pfrom->pfilter->insert(vData);
3403  } else {
3404  bad = true;
3405  }
3406  }
3407  if (bad) {
3408  LOCK(cs_main);
3409  Misbehaving(pfrom->GetId(), 100);
3410  }
3411  return true;
3412  }
3413 
3414  if (strCommand == NetMsgType::FILTERCLEAR) {
3415  LOCK(pfrom->cs_filter);
3416  if (pfrom->GetLocalServices() & NODE_BLOOM) {
3417  pfrom->pfilter.reset(new CBloomFilter());
3418  }
3419  pfrom->fRelayTxes = true;
3420  return true;
3421  }
3422 
3423 
3424  if (strCommand == NetMsgType::GETMNLISTDIFF) {
3426  vRecv >> cmd;
3427 
3428  LOCK(cs_main);
3429 
3430  CSimplifiedMNListDiff mnListDiff;
3431  std::string strError;
3432  if (BuildSimplifiedMNListDiff(cmd.baseBlockHash, cmd.blockHash, mnListDiff, strError)) {
3433  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MNLISTDIFF, mnListDiff));
3434  } else {
3435  strError = strprintf("getmnlistdiff failed for baseBlockHash=%s, blockHash=%s. error=%s", cmd.baseBlockHash.ToString(), cmd.blockHash.ToString(), strError);
3436  Misbehaving(pfrom->GetId(), 1, strError);
3437  }
3438  return true;
3439  }
3440 
3441 
3442  if (strCommand == NetMsgType::MNLISTDIFF) {
3443  // we have never requested this
3444  LOCK(cs_main);
3445  Misbehaving(pfrom->GetId(), 100, strprintf("received not-requested mnlistdiff. peer=%d", pfrom->GetId()));
3446  return true;
3447  }
3448 
3449 
3450  if (strCommand == NetMsgType::NOTFOUND) {
3451  // Remove the NOTFOUND transactions from the peer
3452  LOCK(cs_main);
3453  CNodeState *state = State(pfrom->GetId());
3454  std::vector<CInv> vInv;
3455  vRecv >> vInv;
3457  for (CInv &inv : vInv) {
3458  if (inv.IsKnownType()) {
3459  // If we receive a NOTFOUND message for a txid we requested, erase
3460  // it from our data structures for this peer.
3461  auto in_flight_it = state->m_object_download.m_object_in_flight.find(inv);
3462  if (in_flight_it == state->m_object_download.m_object_in_flight.end()) {
3463  // Skip any further work if this is a spurious NOTFOUND
3464  // message.
3465  continue;
3466  }
3467  state->m_object_download.m_object_in_flight.erase(in_flight_it);
3468  state->m_object_download.m_object_announced.erase(inv);
3469  }
3470  }
3471  }
3472  return true;
3473  }
3474 
3475  bool found = false;
3476  const std::vector<std::string> &allMessages = getAllNetMessageTypes();
3477  for (const std::string msg : allMessages) {
3478  if(msg == strCommand) {
3479  found = true;
3480  break;
3481  }
3482  }
3483 
3484  if (found)
3485  {
3486  //probably one the extensions
3487 #ifdef ENABLE_WALLET
3488  privateSendClient.ProcessMessage(pfrom, strCommand, vRecv, *connman);
3489 #endif // ENABLE_WALLET
3490  privateSendServer.ProcessMessage(pfrom, strCommand, vRecv, *connman);
3491  sporkManager.ProcessSpork(pfrom, strCommand, vRecv, *connman);
3492  masternodeSync.ProcessMessage(pfrom, strCommand, vRecv);
3493  governance.ProcessMessage(pfrom, strCommand, vRecv, *connman);
3494  CMNAuth::ProcessMessage(pfrom, strCommand, vRecv, *connman);
3495  llmq::quorumBlockProcessor->ProcessMessage(pfrom, strCommand, vRecv, *connman);
3496  llmq::quorumDKGSessionManager->ProcessMessage(pfrom, strCommand, vRecv, *connman);
3497  llmq::quorumSigSharesManager->ProcessMessage(pfrom, strCommand, vRecv, *connman);
3498  llmq::quorumSigningManager->ProcessMessage(pfrom, strCommand, vRecv, *connman);
3499  llmq::chainLocksHandler->ProcessMessage(pfrom, strCommand, vRecv, *connman);
3500  llmq::quorumInstantSendManager->ProcessMessage(pfrom, strCommand, vRecv, *connman);
3501  return true;
3502  }
3503 
3504  // Ignore unknown commands for extensibility
3505  LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->GetId());
3506 
3507  return true;
3508 }
3509 
3510 static bool SendRejectsAndCheckIfBanned(CNode* pnode, CConnman* connman)
3511 {
3513  CNodeState &state = *State(pnode->GetId());
3514 
3515  if (g_enable_bip61) {
3516  for (const CBlockReject& reject : state.rejects) {
3517  connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, (std::string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock));
3518  }
3519  }
3520  state.rejects.clear();
3521 
3522  if (state.fShouldBan) {
3523  state.fShouldBan = false;
3524  if (pnode->fWhitelisted)
3525  LogPrintf("Warning: not punishing whitelisted peer %s!\n", pnode->GetLogString());
3526  else if (pnode->m_manual_connection)
3527  LogPrintf("Warning: not punishing manually-connected peer %s!\n", pnode->GetLogString());
3528  else {
3529  pnode->fDisconnect = true;
3530  if (pnode->addr.IsLocal())
3531  LogPrintf("Warning: not banning local peer %s!\n", pnode->GetLogString());
3532  else
3533  {
3534  connman->Ban(pnode->addr, BanReasonNodeMisbehaving);
3535  }
3536  }
3537  return true;
3538  }
3539  return false;
3540 }
3541 
3542 bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
3543 {
3544  const CChainParams& chainparams = Params();
3545  //
3546  // Message format
3547  // (4) message start
3548  // (12) command
3549  // (4) size
3550  // (4) checksum
3551  // (x) data
3552  //
3553  bool fMoreWork = false;
3554 
3555  if (!pfrom->vRecvGetData.empty())
3556  ProcessGetData(pfrom, chainparams, connman, interruptMsgProc);
3557 
3558  if (!pfrom->orphan_work_set.empty()) {
3561  }
3562 
3563  if (pfrom->fDisconnect)
3564  return false;
3565 
3566  // this maintains the order of responses
3567  if (!pfrom->vRecvGetData.empty()) return true;
3568  if (!pfrom->orphan_work_set.empty()) return true;
3569 
3570  // Don't bother if send buffer is too full to respond anyway
3571  if (pfrom->fPauseSend)
3572  return false;
3573 
3574  std::list<CNetMessage> msgs;
3575  {
3576  LOCK(pfrom->cs_vProcessMsg);
3577  if (pfrom->vProcessMsg.empty())
3578  return false;
3579  // Just take one message
3580  msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
3581  pfrom->nProcessQueueSize -= msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE;
3583  fMoreWork = !pfrom->vProcessMsg.empty();
3584  }
3585  CNetMessage& msg(msgs.front());
3586 
3587  msg.SetVersion(pfrom->GetRecvVersion());
3588  // Scan for message start
3589  if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) {
3590  LogPrint(BCLog::NET, "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->GetId());
3591  pfrom->fDisconnect = true;
3592  return false;
3593  }
3594 
3595  // Read header
3596  CMessageHeader& hdr = msg.hdr;
3597  if (!hdr.IsValid(chainparams.MessageStart()))
3598  {
3599  LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->GetId());
3600  return fMoreWork;
3601  }
3602  std::string strCommand = hdr.GetCommand();
3603 
3604  // Message size
3605  unsigned int nMessageSize = hdr.nMessageSize;
3606 
3607  // Checksum
3608  CDataStream& vRecv = msg.vRecv;
3609  const uint256& hash = msg.GetMessageHash();
3610  if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0)
3611  {
3612  LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__,
3613  SanitizeString(strCommand), nMessageSize,
3616  return fMoreWork;
3617  }
3618 
3619  // Process message
3620  bool fRet = false;
3621  try
3622  {
3623  fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime, chainparams, connman, interruptMsgProc);
3624  if (interruptMsgProc)
3625  return false;
3626  if (!pfrom->vRecvGetData.empty())
3627  fMoreWork = true;
3628  }
3629  catch (const std::ios_base::failure& e)
3630  {
3631  if (g_enable_bip61) {
3632  connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, std::string("error parsing message")));
3633  }
3634  if (strstr(e.what(), "end of data"))
3635  {
3636  // Allow exceptions from under-length message on vRecv
3637  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
3638  }
3639  else if (strstr(e.what(), "size too large"))
3640  {
3641  // Allow exceptions from over-long size
3642  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
3643  }
3644  else if (strstr(e.what(), "non-canonical ReadCompactSize()"))
3645  {
3646  // Allow exceptions from non-canonical encoding
3647  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
3648  }
3649  else
3650  {
3651  PrintExceptionContinue(std::current_exception(), "ProcessMessages()");
3652  }
3653  } catch (...) {
3654  PrintExceptionContinue(std::current_exception(), "ProcessMessages()");
3655  }
3656 
3657  if (!fRet) {
3658  LogPrint(BCLog::NET, "%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->GetId());
3659  }
3660 
3661  LOCK(cs_main);
3663 
3664  return fMoreWork;
3665 }
3666 
3667 void PeerLogicValidation::ConsiderEviction(CNode *pto, int64_t time_in_seconds)
3668 {
3670 
3671  CNodeState &state = *State(pto->GetId());
3672  const CNetMsgMaker msgMaker(pto->GetSendVersion());
3673 
3674  if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) {
3675  // This is an outbound peer subject to disconnection if they don't
3676  // announce a block with as much work as the current tip within
3677  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
3678  // their chain has more work than ours, we should sync to it,
3679  // unless it's invalid, in which case we should find that out and
3680  // disconnect from them elsewhere).
3681  if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork) {
3682  if (state.m_chain_sync.m_timeout != 0) {
3683  state.m_chain_sync.m_timeout = 0;
3684  state.m_chain_sync.m_work_header = nullptr;
3685  state.m_chain_sync.m_sent_getheaders = false;
3686  }
3687  } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
3688  // Our best block known by this peer is behind our tip, and we're either noticing
3689  // that for the first time, OR this peer was able to catch up to some earlier point
3690  // where we checked against our tip.
3691  // Either way, set a new timeout based on current tip.
3692  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
3693  state.m_chain_sync.m_work_header = chainActive.Tip();
3694  state.m_chain_sync.m_sent_getheaders = false;
3695  } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
3696  // No evidence yet that our peer has synced to a chain with work equal to that
3697  // of our tip, when we first detected it was behind. Send a single getheaders
3698  // message to give the peer a chance to update us.
3699  if (state.m_chain_sync.m_sent_getheaders) {
3700  // They've run out of time to catch up!
3701  LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
3702  pto->fDisconnect = true;
3703  } else {
3704  assert(state.m_chain_sync.m_work_header);
3705  LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
3706  connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
3707  state.m_chain_sync.m_sent_getheaders = true;
3708  constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
3709  // Bump the timeout to allow a response, which could clear the timeout
3710  // (if the response shows the peer has synced), reset the timeout (if
3711  // the peer syncs to the required work but not to our tip), or result
3712  // in disconnect (if we advance to the timeout and pindexBestKnownBlock
3713  // has not sufficiently progressed)
3714  state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
3715  }
3716  }
3717  }
3718 }
3719 
3721 {
3722  // Check whether we have too many outbound peers
3723  int extra_peers = connman->GetExtraOutboundCount();
3724  if (extra_peers > 0) {
3725  // If we have more outbound peers than we target, disconnect one.
3726  // Pick the outbound peer that least recently announced
3727  // us a new block, with ties broken by choosing the more recent
3728  // connection (higher node id)
3729  NodeId worst_peer = -1;
3730  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
3731 
3732  LOCK(cs_main);
3733 
3734  connman->ForEachNode([&](CNode* pnode) {
3735  // Don't disconnect masternodes just because they were slow in block announcement
3736  if (pnode->fMasternode) return;
3737  // Ignore non-outbound peers, or nodes marked for disconnect already
3738  if (!IsOutboundDisconnectionCandidate(pnode) || pnode->fDisconnect) return;
3739  CNodeState *state = State(pnode->GetId());
3740  if (state == nullptr) return; // shouldn't be possible, but just in case
3741  // Don't evict our protected peers
3742  if (state->m_chain_sync.m_protect) return;
3743  if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
3744  worst_peer = pnode->GetId();
3745  oldest_block_announcement = state->m_last_block_announcement;
3746  }
3747  });
3748  if (worst_peer != -1) {
3749  bool disconnected = connman->ForNode(worst_peer, [&](CNode *pnode) {
3750  // Only disconnect a peer that has been connected to us for
3751  // some reasonable fraction of our check-frequency, to give
3752  // it time for new information to have arrived.
3753  // Also don't disconnect any peer we're trying to download a
3754  // block from.
3755  CNodeState &state = *State(pnode->GetId());
3756  if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
3757  LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
3758  pnode->fDisconnect = true;
3759  return true;
3760  } else {
3761  LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight);
3762  return false;
3763  }
3764  });
3765  if (disconnected) {
3766  // If we disconnected an extra peer, that means we successfully
3767  // connected to at least one peer after the last time we
3768  // detected a stale tip. Don't try any more extra peers until
3769  // we next detect a stale tip, to limit the load we put on the
3770  // network from these extra connections.
3772  }
3773  }
3774  }
3775 }
3776 
3778 {
3779  if (connman == nullptr) return;
3780 
3781  int64_t time_in_seconds = GetTime();
3782 
3783  EvictExtraOutboundPeers(time_in_seconds);
3784 
3785  if (time_in_seconds > m_stale_tip_check_time) {
3786  LOCK(cs_main);
3787  // Check whether our tip is stale, and if so, allow using an extra
3788  // outbound peer
3789  if (TipMayBeStale(consensusParams)) {
3790  LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update);
3792  } else if (connman->GetTryNewOutboundPeer()) {
3794  }
3795  m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
3796  }
3797 }
3798 
3800 {
3802 public:
3804  {
3805  mp = _mempool;
3806  }
3807 
3808  bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
3809  {
3810  /* As std::make_heap produces a max-heap, we want the entries with the
3811  * fewest ancestors/highest fee to sort later. */
3812  return mp->CompareDepthAndScore(*b, *a);
3813  }
3814 };
3815 
3816 bool PeerLogicValidation::SendMessages(CNode* pto, std::atomic<bool>& interruptMsgProc)
3817 {
3818  const Consensus::Params& consensusParams = Params().GetConsensus();
3819  {
3820  // Don't send anything until the version handshake is complete
3821  if (!pto->fSuccessfullyConnected || pto->fDisconnect)
3822  return true;
3823 
3824  // If we get here, the outgoing message serialization version is set and can't change.
3825  const CNetMsgMaker msgMaker(pto->GetSendVersion());
3826 
3827  //
3828  // Message: ping
3829  //
3830  bool pingSend = false;
3831  if (pto->fPingQueued) {
3832  // RPC ping request by user
3833  pingSend = true;
3834  }
3835  if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
3836  // Ping automatically sent as a latency probe & keepalive.
3837  pingSend = true;
3838  }
3839  if (pingSend) {
3840  uint64_t nonce = 0;
3841  while (nonce == 0) {
3842  GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
3843  }
3844  pto->fPingQueued = false;
3845  pto->nPingUsecStart = GetTimeMicros();
3846  if (pto->nVersion > BIP0031_VERSION) {
3847  pto->nPingNonceSent = nonce;
3848  connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
3849  } else {
3850  // Peer is too old to support ping command with nonce, pong will never arrive.
3851  pto->nPingNonceSent = 0;
3852  connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING));
3853  }
3854  }
3855 
3856  TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
3857  if (!lockMain)
3858  return true;
3859 
3861  return true;
3862  CNodeState &state = *State(pto->GetId());
3863 
3864  // Address refresh broadcast
3865  int64_t nNow = GetTimeMicros();
3866  auto current_time = GetTime<std::chrono::microseconds>();
3867 
3868  if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
3869  AdvertiseLocal(pto);
3870  pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
3871  }
3872 
3873  //
3874  // Message: addr
3875  //
3876  if (pto->nNextAddrSend < nNow) {
3877  pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
3878  std::vector<CAddress> vAddr;
3879  vAddr.reserve(pto->vAddrToSend.size());
3880  for (const CAddress& addr : pto->vAddrToSend)
3881  {
3882  if (!pto->addrKnown.contains(addr.GetKey()))
3883  {
3884  pto->addrKnown.insert(addr.GetKey());
3885  vAddr.push_back(addr);
3886  // receiver rejects addr messages larger than 1000
3887  if (vAddr.size() >= 1000)
3888  {
3889  connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
3890  vAddr.clear();
3891  }
3892  }
3893  }
3894  pto->vAddrToSend.clear();
3895  if (!vAddr.empty())
3896  connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
3897  // we only send the big addr message once
3898  if (pto->vAddrToSend.capacity() > 40)
3899  pto->vAddrToSend.shrink_to_fit();
3900  }
3901 
3902  // Start block sync
3903  if (pindexBestHeader == nullptr)
3905  bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
3906  if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex && !pto->fMasternode) {
3907  // Only actively request headers from a single peer, unless we're close to end of initial download.
3908  if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - nMaxTipAge) {
3909  state.fSyncStarted = true;
3911  nSyncStarted++;
3912  const CBlockIndex *pindexStart = pindexBestHeader;
3913  /* If possible, start at the block preceding the currently
3914  best known header. This ensures that we always get a
3915  non-empty list of headers back as long as the peer
3916  is up-to-date. With a non-empty response, we can initialise
3917  the peer's known best block. This wouldn't be possible
3918  if we requested starting at pindexBestHeader and
3919  got back an empty response. */
3920  if (pindexStart->pprev)
3921  pindexStart = pindexStart->pprev;
3922  LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
3923  connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()));
3924  }
3925  }
3926 
3927  // Resend wallet transactions that haven't gotten in a block yet
3928  // Except during reindex, importing and IBD, when old wallet
3929  // transactions become unconfirmed and spams other nodes.
3931  {
3932  static int64_t nLastBroadcastTime = 0;
3933  // HACK: Call this only once every few seconds. SendMessages is called once per peer, which makes this signal very expensive
3934  // The proper solution would be to move this out of here, but this is not worth the effort right now as bitcoin#15632 will later do this.
3935  // Luckily, the Broadcast signal is not used for anything else then CWallet::ResendWalletTransactionsBefore.
3936  if (nNow - nLastBroadcastTime >= 5000000) {
3938  nLastBroadcastTime = nNow;
3939  }
3940  }
3941 
3942  //
3943  // Try sending block announcements via headers
3944  //
3945  if (!pto->fMasternode) {
3946  // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
3947  // list of block hashes we're relaying, and our peer wants
3948  // headers announcements, then find the first header
3949  // not yet known to our peer but would connect, and send.
3950  // If no header would connect, or if we have too many
3951  // blocks, or if the peer doesn't want headers, just
3952  // add all to the inv queue.
3953  LOCK(pto->cs_inventory);
3954  std::vector<CBlock> vHeaders;
3955  bool fRevertToInv = ((!state.fPreferHeaders &&
3956  (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
3958  const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
3959  ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
3960 
3961  if (!fRevertToInv) {
3962  bool fFoundStartingHeader = false;
3963  // Try to find first header that our peer doesn't have, and
3964  // then send all headers past that one. If we come across any
3965  // headers that aren't on chainActive, give up.
3966  for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
3967  BlockMap::iterator mi = mapBlockIndex.find(hash);
3968  assert(mi != mapBlockIndex.end());
3969  const CBlockIndex *pindex = mi->second;
3970  if (chainActive[pindex->nHeight] != pindex) {
3971  // Bail out if we reorged away from this block
3972  fRevertToInv = true;
3973  break;
3974  }
3975  if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
3976  // This means that the list of blocks to announce don't
3977  // connect to each other.
3978  // This shouldn't really be possible to hit during
3979  // regular operation (because reorgs should take us to
3980  // a chain that has some block not on the prior chain,
3981  // which should be caught by the prior check), but one
3982  // way this could happen is by using invalidateblock /
3983  // reconsiderblock repeatedly on the tip, causing it to
3984  // be added multiple times to vBlockHashesToAnnounce.
3985  // Robustly deal with this rare situation by reverting
3986  // to an inv.
3987  fRevertToInv = true;
3988  break;
3989  }
3990  pBestIndex = pindex;
3991  bool isPrevDevnetGenesisBlock = false;
3992  if (!consensusParams.hashDevnetGenesisBlock.IsNull() &&
3993  pindex->pprev != nullptr &&
3994  pindex->pprev->GetBlockHash() == consensusParams.hashDevnetGenesisBlock) {
3995  // even though the devnet genesis block was never transferred through the wire and thus not
3996  // appear anywhere in the node state where we track what other nodes have or not have, we can
3997  // assume that the other node already knows the devnet genesis block
3998  isPrevDevnetGenesisBlock = true;
3999  }
4000  if (fFoundStartingHeader) {
4001  // add this to the headers message
4002  vHeaders.push_back(pindex->GetBlockHeader());
4003  } else if (PeerHasHeader(&state, pindex)) {
4004  continue; // keep looking for the first new block
4005  } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev) || isPrevDevnetGenesisBlock) {
4006  // Peer doesn't have this header but they do have the prior one.
4007  // Start sending headers.
4008  fFoundStartingHeader = true;
4009  vHeaders.push_back(pindex->GetBlockHeader());
4010  } else {
4011  // Peer doesn't have this header or the prior one -- nothing will
4012  // connect, so bail out.
4013  fRevertToInv = true;
4014  break;
4015  }
4016  }
4017  }
4018  if (!fRevertToInv && !vHeaders.empty()) {
4019  if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
4020  // We only send up to 1 block as header-and-ids, as otherwise
4021  // probably means we're doing an initial-ish-sync or they're slow
4022  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
4023  vHeaders.front().GetHash().ToString(), pto->GetId());
4024 
4025  bool fGotBlockFromCache = false;
4026  {
4028  if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
4030  fGotBlockFromCache = true;
4031  }
4032  }
4033  if (!fGotBlockFromCache) {
4034  CBlock block;
4035  bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
4036  assert(ret);
4037  CBlockHeaderAndShortTxIDs cmpctblock(block);
4038  connman->PushMessage(pto, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
4039  }
4040  state.pindexBestHeaderSent = pBestIndex;
4041  } else if (state.fPreferHeaders) {
4042  if (vHeaders.size() > 1) {
4043  LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
4044  vHeaders.size(),
4045  vHeaders.front().GetHash().ToString(),
4046  vHeaders.back().GetHash().ToString(), pto->GetId());
4047  } else {
4048  LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
4049  vHeaders.front().GetHash().ToString(), pto->GetId());
4050  }
4051  connman->PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
4052  state.pindexBestHeaderSent = pBestIndex;
4053  } else
4054  fRevertToInv = true;
4055  }
4056  if (fRevertToInv) {
4057  // If falling back to using an inv, just try to inv the tip.
4058  // The last entry in vBlockHashesToAnnounce was our tip at some point
4059  // in the past.
4060  if (!pto->vBlockHashesToAnnounce.empty()) {
4061  const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
4062  BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
4063  assert(mi != mapBlockIndex.end());
4064  const CBlockIndex *pindex = mi->second;
4065 
4066  // Warn if we're announcing a block that is not on the main chain.
4067  // This should be very rare and could be optimized out.
4068  // Just log for now.
4069  if (chainActive[pindex->nHeight] != pindex) {
4070  LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
4071  hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString());
4072  }
4073 
4074  // If the peer's chain has this block, don't inv it back.
4075  if (!PeerHasHeader(&state, pindex)) {
4076  pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
4077  LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
4078  pto->GetId(), hashToAnnounce.ToString());
4079  }
4080  }
4081  }
4082  pto->vBlockHashesToAnnounce.clear();
4083  }
4084 
4085  //
4086  // Message: inventory
4087  //
4088  std::vector<CInv> vInv;
4089  {
4090  size_t reserve = std::min<size_t>(pto->setInventoryTxToSend.size(), INVENTORY_BROADCAST_MAX_PER_1MB_BLOCK * MaxBlockSize(true) / 1000000);
4091  reserve = std::max<size_t>(reserve, pto->vInventoryBlockToSend.size());
4092  reserve = std::min<size_t>(reserve, MAX_INV_SZ);
4093  vInv.reserve(reserve);
4094 
4095  LOCK2(mempool.cs, pto->cs_inventory);
4096 
4097  // Add blocks
4098  for (const uint256& hash : pto->vInventoryBlockToSend) {
4099  vInv.push_back(CInv(MSG_BLOCK, hash));
4100  if (vInv.size() == MAX_INV_SZ) {
4101  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4102  vInv.clear();
4103  }
4104  }
4105  pto->vInventoryBlockToSend.clear();
4106 
4107  // Check whether periodic sends should happen
4108  // Note: If this node is running in a Masternode mode, it makes no sense to delay outgoing txes
4109  // because we never produce any txes ourselves i.e. no privacy is lost in this case.
4110  bool fSendTrickle = pto->fWhitelisted || fMasternodeMode;
4111  if (pto->nNextInvSend < current_time) {
4112  fSendTrickle = true;
4113  if (pto->fInbound) {
4114  pto->nNextInvSend = std::chrono::microseconds{connman->PoissonNextSendInbound(current_time.count(), INVENTORY_BROADCAST_INTERVAL)};
4115  } else {
4116  // Use half the delay for regular outbound peers, as there is less privacy concern for them.
4117  // and quarter the delay for Masternode outbound peers, as there is even less privacy concern in this case.
4118  pto->nNextInvSend = PoissonNextSend(current_time, std::chrono::seconds{INVENTORY_BROADCAST_INTERVAL >> 1 >> !pto->verifiedProRegTxHash.IsNull()});
4119  }
4120  }
4121 
4122  // Time to send but the peer has requested we not relay transactions.
4123  if (fSendTrickle) {
4124  LOCK(pto->cs_filter);
4125  if (!pto->fRelayTxes) pto->setInventoryTxToSend.clear();
4126  }
4127 
4128  // Respond to BIP35 mempool requests
4129  if (fSendTrickle && pto->fSendMempool) {
4130  auto vtxinfo = mempool.infoAll();
4131  pto->fSendMempool = false;
4132 
4133  LOCK(pto->cs_filter);
4134 
4135  for (const auto& txinfo : vtxinfo) {
4136  const uint256& hash = txinfo.tx->GetHash();
4137  int nInvType = MSG_TX;
4138  if (CPrivateSend::GetDSTX(hash)) {
4139  nInvType = MSG_DSTX;
4140  }
4141  CInv inv(nInvType, hash);
4142  pto->setInventoryTxToSend.erase(hash);
4143  if (pto->pfilter) {
4144  if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4145  }
4146  pto->filterInventoryKnown.insert(hash);
4147 
4148  LogPrint(BCLog::NET, "SendMessages -- queued inv: %s index=%d peer=%d\n", inv.ToString(), vInv.size(), pto->GetId());
4149  vInv.push_back(inv);
4150  if (vInv.size() == MAX_INV_SZ) {
4151  LogPrint(BCLog::NET, "SendMessages -- pushing inv's: count=%d peer=%d\n", vInv.size(), pto->GetId());
4152  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4153  vInv.clear();
4154  }
4155 
4156  uint256 islockHash;
4157  if (!llmq::quorumInstantSendManager->GetInstantSendLockHashByTxid(hash, islockHash)) continue;
4158  CInv islockInv(MSG_ISLOCK, islockHash);
4159  pto->filterInventoryKnown.insert(islockHash);
4160 
4161  LogPrint(BCLog::NET, "SendMessages -- queued inv: %s index=%d peer=%d\n", inv.ToString(), vInv.size(), pto->GetId());
4162  vInv.push_back(inv);
4163  if (vInv.size() == MAX_INV_SZ) {
4164  LogPrint(BCLog::NET, "SendMessages -- pushing inv's: count=%d peer=%d\n", vInv.size(), pto->GetId());
4165  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4166  vInv.clear();
4167  }
4168  }
4169  pto->timeLastMempoolReq = GetTime();
4170  }
4171 
4172  // Determine transactions to relay
4173  if (fSendTrickle) {
4174  // Produce a vector with all candidates for sending
4175  std::vector<std::set<uint256>::iterator> vInvTx;
4176  vInvTx.reserve(pto->setInventoryTxToSend.size());
4177  for (std::set<uint256>::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) {
4178  vInvTx.push_back(it);
4179  }
4180  // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
4181  // A heap is used so that not all items need sorting if only a few are being sent.
4182  CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
4183  std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4184  // No reason to drain out at many times the network's capacity,
4185  // especially since we have many peers and some will draw much shorter delays.
4186  unsigned int nRelayedTransactions = 0;
4187  LOCK(pto->cs_filter);
4188  while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_1MB_BLOCK * MaxBlockSize(true) / 1000000) {
4189  // Fetch the top element from the heap
4190  std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4191  std::set<uint256>::iterator it = vInvTx.back();
4192  vInvTx.pop_back();
4193  uint256 hash = *it;
4194  // Remove it from the to-be-sent set
4195  pto->setInventoryTxToSend.erase(it);
4196  // Check if not in the filter already
4197  if (pto->filterInventoryKnown.contains(hash)) {
4198  continue;
4199  }
4200  // Not in the mempool anymore? don't bother sending it.
4201  auto txinfo = mempool.info(hash);
4202  if (!txinfo.tx) {
4203  continue;
4204  }
4205  if (pto->pfilter && !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4206  // Send
4207  int nInvType = MSG_TX;
4208  if (CPrivateSend::GetDSTX(hash)) {
4209  nInvType = MSG_DSTX;
4210  }
4211  vInv.push_back(CInv(nInvType, hash));
4212  nRelayedTransactions++;
4213  {
4214  // Expire old relay messages
4215  while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow)
4216  {
4217  mapRelay.erase(vRelayExpiration.front().second);
4218  vRelayExpiration.pop_front();
4219  }
4220 
4221  auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx)));
4222  if (ret.second) {
4223  vRelayExpiration.push_back(std::make_pair(nNow + 15 * 60 * 1000000, ret.first));
4224  }
4225  }
4226  if (vInv.size() == MAX_INV_SZ) {
4227  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4228  vInv.clear();
4229  }
4230  pto->filterInventoryKnown.insert(hash);
4231  }
4232  }
4233 
4234  // Send non-tx/non-block inventory items
4235  for (const auto& inv : pto->vInventoryOtherToSend) {
4236  if (pto->filterInventoryKnown.contains(inv.hash)) {
4237  continue;
4238  }
4239  vInv.push_back(inv);
4240  pto->filterInventoryKnown.insert(inv.hash);
4241  if (vInv.size() == MAX_INV_SZ) {
4242  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4243  vInv.clear();
4244  }
4245  }
4246  pto->vInventoryOtherToSend.clear();
4247  }
4248  if (!vInv.empty())
4249  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4250 
4251  // Detect whether we're stalling
4252  current_time = GetTime<std::chrono::microseconds>();
4253  // nNow is the current system time (GetTimeMicros is not mockable) and
4254  // should be replaced by the mockable current_time eventually
4255  nNow = GetTimeMicros();
4256  if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
4257  // Stalling only triggers when the block download window cannot move. During normal steady state,
4258  // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
4259  // should only happen during initial block download.
4260  LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
4261  pto->fDisconnect = true;
4262  return true;
4263  }
4264  // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
4265  // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
4266  // We compensate for other peers to prevent killing off peers due to our own downstream link
4267  // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
4268  // to unreasonably increase our timeout.
4269  if (state.vBlocksInFlight.size() > 0) {
4270  QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
4271  int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
4272  if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
4273  LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
4274  pto->fDisconnect = true;
4275  return true;
4276  }
4277  }
4278  // Check for headers sync timeouts
4279  if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
4280  // Detect whether this is a stalling initial-headers-sync peer
4282  if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
4283  // Disconnect a (non-whitelisted) peer if it is our only sync peer,
4284  // and we have others we could be using instead.
4285  // Note: If all our peers are inbound, then we won't
4286  // disconnect our sync peer for stalling; we have bigger
4287  // problems if we can't get any outbound peers.
4288  if (!pto->fWhitelisted) {
4289  LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
4290  pto->fDisconnect = true;
4291  return true;
4292  } else {
4293  LogPrintf("Timeout downloading headers from whitelisted peer=%d, not disconnecting\n", pto->GetId());
4294  // Reset the headers sync state so that we have a
4295  // chance to try downloading from a different peer.
4296  // Note: this will also result in at least one more
4297  // getheaders message to be sent to
4298  // this peer (eventually).
4299  state.fSyncStarted = false;
4300  nSyncStarted--;
4301  state.nHeadersSyncTimeout = 0;
4302  }
4303  }
4304  } else {
4305  // After we've caught up once, reset the timeout so we can't trigger
4306  // disconnect later.
4307  state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
4308  }
4309  }
4310 
4311  // Check that outbound peers have reasonable chains
4312  // GetTime() is used by this anti-DoS logic so we can test this using mocktime
4313  ConsiderEviction(pto, GetTime());
4314 
4315  //
4316  // Message: getdata (blocks)
4317  //
4318  std::vector<CInv> vGetData;
4319  if (!pto->fClient && !pto->fMasternode && ((fFetch && !pto->m_limited_node) || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
4320  std::vector<const CBlockIndex*> vToDownload;
4321  NodeId staller = -1;
4322  FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
4323  for (const CBlockIndex *pindex : vToDownload) {
4324  vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
4325  MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex);
4326  LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
4327  pindex->nHeight, pto->GetId());
4328  }
4329  if (state.nBlocksInFlight == 0 && staller != -1) {
4330  if (State(staller)->nStallingSince == 0) {
4331  State(staller)->nStallingSince = nNow;
4332  LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
4333  }
4334  }
4335  }
4336 
4337  //
4338  // Message: getdata (non-blocks)
4339  //
4340 
4341  // For robustness, expire old requests after a long timeout, so that
4342  // we can resume downloading objects from a peer even if they
4343  // were unresponsive in the past.
4344  // Eventually we should consider disconnecting peers, but this is
4345  // conservative.
4346  if (state.m_object_download.m_check_expiry_timer <= current_time) {
4347  for (auto it=state.m_object_download.m_object_in_flight.begin(); it != state.m_object_download.m_object_in_flight.end();) {
4348  if (it->second <= current_time - GetObjectExpiryInterval(it->first.type)) {
4349  LogPrint(BCLog::NET, "timeout of inflight object %s from peer=%d\n", it->first.ToString(), pto->GetId());
4350  state.m_object_download.m_object_announced.erase(it->first);
4351  state.m_object_download.m_object_in_flight.erase(it++);
4352  } else {
4353  ++it;
4354  }
4355  }
4356  // On average, we do this check every GetObjectExpiryInterval. Randomize
4357  // so that we're not doing this for all peers at the same time.
4358  state.m_object_download.m_check_expiry_timer = current_time + GetObjectExpiryInterval(MSG_TX)/2 + GetRandMicros(GetObjectExpiryInterval(MSG_TX));
4359  }
4360 
4361  // DASH this code also handles non-TXs (Dash specific messages)
4362  auto& object_process_time = state.m_object_download.m_object_process_time;
4363  while (!object_process_time.empty() && object_process_time.begin()->first <= current_time && state.m_object_download.m_object_in_flight.size() < MAX_PEER_OBJECT_IN_FLIGHT) {
4364  const CInv inv = object_process_time.begin()->second;
4365  // Erase this entry from object_process_time (it may be added back for
4366  // processing at a later time, see below)
4367  object_process_time.erase(object_process_time.begin());
4368  if (g_erased_object_requests.count(inv.hash)) {
4369  LogPrint(BCLog::NET, "%s -- GETDATA skipping inv=(%s), peer=%d\n", __func__, inv.ToString(), pto->GetId());
4370  state.m_object_download.m_object_announced.erase(inv);
4371  state.m_object_download.m_object_in_flight.erase(inv);
4372  continue;
4373  }
4374  if (!AlreadyHave(inv)) {
4375  // If this object was last requested more than GetObjectInterval ago,
4376  // then request.
4377  const auto last_request_time = GetObjectRequestTime(inv.hash);
4378  if (last_request_time <= current_time - GetObjectInterval(inv.type)) {
4379  LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId());
4380  vGetData.push_back(inv);
4381  if (vGetData.size() >= MAX_GETDATA_SZ) {
4382  connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4383  vGetData.clear();
4384  }
4385  UpdateObjectRequestTime(inv.hash, current_time);
4386  state.m_object_download.m_object_in_flight.emplace(inv, current_time);
4387  } else {
4388  // This object is in flight from someone else; queue
4389  // up processing to happen after the download times out
4390  // (with a slight delay for inbound peers, to prefer
4391  // requests to outbound peers).
4392  const auto next_process_time = CalculateObjectGetDataTime(inv, current_time, !state.fPreferredDownload);
4393  object_process_time.emplace(next_process_time, inv);
4394  LogPrint(BCLog::NET, "%s -- GETDATA re-queue inv=(%s), next_process_time=%d, delta=%d, peer=%d\n", __func__, inv.ToString(), next_process_time.count(), (next_process_time - current_time).count(), pto->GetId());
4395  }
4396  } else {
4397  // We have already seen this object, no need to download.
4398  state.m_object_download.m_object_announced.erase(inv);
4399  state.m_object_download.m_object_in_flight.erase(inv);
4400  LogPrint(BCLog::NET, "%s -- GETDATA already seen inv=(%s), peer=%d\n", __func__, inv.ToString(), pto->GetId());
4401  }
4402  }
4403 
4404 
4405  if (!vGetData.empty()) {
4406  connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4407  LogPrint(BCLog::NET, "SendMessages -- GETDATA -- pushed size = %lu peer=%d\n", vGetData.size(), pto->GetId());
4408  }
4409 
4410  }
4411  return true;
4412 }
4413 
4415 {
4416 public:
4419  // orphan transactions
4420  mapOrphanTransactions.clear();
4421  mapOrphanTransactionsByPrev.clear();
4423  }
void ForEachNodeThen(const Condition &cond, Callable &&pre, CallableAfter &&post)
Definition: net.h:320
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:195
std::chrono::microseconds GetObjectInterval(int invType)
void RequestObject(CNodeState *state, const CInv &inv, std::chrono::microseconds current_time, bool fForce=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL
How long to wait (in microseconds) before downloading a transaction from an additional peer...
enum ReadStatus_t ReadStatus
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected...
Definition: protocol.cpp:31
bool GetBlockHash(uint256 &hashRet, int nBlockHeight)
Return true if hash can be found in chainActive at nBlockHeight height.
CTxMemPool mempool
std::atomic< uint64_t > nPingNonceSent
Definition: net.h:924
bool IsArgSet(const std::string &strArg) const
Return true if the given argument has been manually set.
Definition: util.cpp:784
OutIter copy(Range &&r, OutIter out)
Definition: algorithm.hpp:168
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:34
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:23
std::atomic_bool fPauseSend
Definition: net.h:875
void DisallowMixing(const uint256 &proTxHash)
uint8_t pchChecksum[CHECKSUM_SIZE]
Definition: protocol.h:60
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we&#39;re willing to respond to GETBLOCKTXN requests for.
Definition: validation.h:97
static std::shared_ptr< const CBlockHeaderAndShortTxIDs > most_recent_compact_block
CMasternodeSync masternodeSync
CSigSharesManager * quorumSigSharesManager
int GetSendVersion() const
Definition: net.cpp:887
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:42
bool fPruneMode
True if we&#39;re running in -prune mode.
Definition: validation.cpp:230
CPrivateSendServer privateSendServer
uint256 GetRandHash()
Definition: random.cpp:384
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
ServiceFlags
nServices flags
Definition: protocol.h:280
std::string ToString(bool fUseGetnameinfo=true) const
Definition: netaddress.cpp:581
bool IsLocal() const
Definition: netaddress.cpp:177
CCriticalSection cs_filter
Definition: net.h:868
bool BuildSimplifiedMNListDiff(const uint256 &baseBlockHash, const uint256 &blockHash, CSimplifiedMNListDiff &mnListDiffRet, std::string &errorRet)
void SetNull()
Definition: uint256.h:41
int64_t GetBlockTime() const
Definition: chain.h:297
CConnman *const connman
Describes a place in the block chain to another node such that if the other node doesn&#39;t have the sam...
Definition: block.h:127
static const int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay...
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:177
bool operator()(std::set< uint256 >::iterator a, std::set< uint256 >::iterator b)
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:1197
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data It is treated as if this was the little-endian interpretation of ...
Definition: hash.cpp:102
#define TRY_LOCK(cs, name)
Definition: sync.h:180
uint32_t nStatus
Verification status of this block. See enum BlockStatus.
Definition: chain.h:207
static std::shared_ptr< const CBlock > most_recent_block
const char * QPCOMMITMENT
Definition: protocol.cpp:67
size_t GetAddressCount() const
Definition: net.cpp:3257
void SetIP(const CNetAddr &ip)
Definition: netaddress.cpp:26
void WakeMessageHandler()
Definition: net.cpp:1928
static constexpr bool DEFAULT_ENABLE_BIP61
Default for BIP61 (sending reject messages)
void SetServices(const CService &addr, ServiceFlags nServices)
Definition: net.cpp:3262
std::string ToString() const
Definition: protocol.cpp:283
std::chrono::microseconds GetObjectRandomDelay(int invType)
Definition: block.h:72
const char * QFCOMMITMENT
Definition: protocol.cpp:63
bool HaveVoteForHash(const uint256 &nHash) const
Definition: governance.cpp:66
uint64_t ReadCompactSize(Stream &is)
Definition: serialize.h:261
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:29
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
Notifies listeners of updated block chain tip.
int64_t nTimeExpire
void ProcessMessage(CNode *pnode, const std::string &strCommand, CDataStream &vRecv, CConnman &connman)
unsigned int MaxBlockSize(bool fDIP0001Active)
Definition: consensus.h:12
Defined in BIP152.
Definition: protocol.h:415
std::vector< uint16_t > indexes
int GetRecvVersion() const
Definition: net.h:997
#define strprintf
Definition: tinyformat.h:1066
void CheckForStaleTipAndEvictPeers(const Consensus::Params &consensusParams)
static bool AlreadyHave(const CInv &inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
void insert(const std::vector< unsigned char > &vKey)
Definition: bloom.cpp:341
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats)
Get statistics from node state.
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:1389
reverse_range< T > reverse_iterate(T &x)
CDKGSessionManager * quorumDKGSessionManager
inv message data
Definition: protocol.h:429
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in millionths of the block interval (i.e.
Definition: validation.h:110
CQuorumBlockProcessor * quorumBlockProcessor
BlockMap & mapBlockIndex
Definition: validation.cpp:215
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
size_t GetSerializeSize(const T &t, int nType, int nVersion=0)
Definition: serialize.h:1295
bool AlreadyHave(const CInv &inv)
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:39
static const CAmount COIN
Definition: amount.h:14
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of chainActive.Tip() will not be prun...
Definition: validation.h:207
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:155
TxMempoolInfo info(const uint256 &hash) const
Definition: txmempool.cpp:1220
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition: chain.h:134
static bool ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::vector< CBlockHeader > &headers, const CChainParams &chainparams, bool punish_duplicate_invalid)
static const unsigned int MAX_REJECT_MESSAGE_LENGTH
Maximum length of reject messages.
Definition: validation.h:108
int Height() const
Return the maximal height in the chain.
Definition: chain.h:484
CCriticalSection cs_main
Definition: validation.cpp:213
bool IsValid() const
Definition: validation.h:61
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:46
void EvictExtraOutboundPeers(int64_t time_in_seconds)
bool fSendMempool
Definition: net.h:914
std::string HexStr(const T itbegin, const T itend, bool fSpaces=false)
CCriticalSection cs_SubVer
Definition: net.h:844
bool GetTryNewOutboundPeer()
Definition: net.cpp:2186
CTransactionRef tx
static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY
static int64_t nTimeOffset
Definition: timedata.cpp:20
CCriticalSection cs_mnauth
Definition: net.h:938
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid...
Definition: chain.h:141
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
Definition: version.h:33
void UpdateObjectRequestTime(const uint256 &hash, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool IsOutboundDisconnectionCandidate(const CNode *node)
void PushMessage(CNode *pnode, CSerializedNetMsg &&msg)
Definition: net.cpp:3733
arith_uint256 nMinimumChainWork
Minimum work we will assume exists on some valid chain.
Definition: validation.cpp:244
void SetVersion(int nVersionIn)
Definition: net.h:788
static const int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
void ProcessSpork(CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, CConnman &connman)
ProcessSpork is used to handle the &#39;getsporks&#39; and &#39;spork&#39; p2p messages.
Definition: spork.cpp:114
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
Definition: protocol.cpp:208
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set. ...
Definition: bloom.h:126
std::atomic< int64_t > nPingUsecStart
Definition: net.h:926
void ProcessMessage(CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, CConnman &connman)
CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices)
Definition: net.cpp:180
void scheduleEvery(Function f, int64_t deltaMilliSeconds)
Definition: scheduler.cpp:126
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version ...
Definition: version.h:39
int64_t GetTimeMicros()
Returns the system time (not mockable)
Definition: utiltime.cpp:63
CChainParams defines various tweakable parameters of a given instance of the Dash system...
Definition: chainparams.h:41
bool HaveObjectForHash(const uint256 &nHash) const
Definition: governance.cpp:47
bool IsNull() const
Definition: block.h:150
Double ended buffer combining vector and stream-like interfaces.
Definition: streams.h:103
std::shared_ptr< const CDeterministicMN > CDeterministicMNCPtr
bool empty() const
Definition: streams.h:195
bool GetBoolArg(const std::string &strArg, bool fDefault) const
Return boolean argument or default value.
Definition: util.cpp:824
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:2191
static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect, in seconds.
uint32_t nMessageSize
Definition: protocol.h:59
CCriticalSection cs_inventory
Definition: net.h:908
void Broadcast(int64_t nBestBlockTime, CConnman *connman)
uint64_t GetLocalNonce() const
Definition: net.h:977
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:296
std::set< uint256 > setInventoryTxToSend
Definition: net.h:901
static void ProcessGetBlockData(CNode *pfrom, const CChainParams &chainparams, const CInv &inv, CConnman *connman, const std::atomic< bool > &interruptMsgProc)
std::vector< CAddress > vAddrToSend
Definition: net.h:890
bool fMasternode
Definition: net.h:864
std::atomic< int > nStartingHeight
Definition: net.h:887
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:23
class CNetProcessingCleanup instance_of_cnetprocessingcleanup
void PushAddress(const CAddress &_addr, FastRandomContext &insecure_rand)
Definition: net.h:1026
static CScheduler scheduler
Definition: init.cpp:213
bool ProcessNewBlock(const CChainParams &chainparams, const std::shared_ptr< const CBlock > pblock, bool fForceProcessing, bool *fNewBlock)
Process an incoming block.
static CCriticalSection cs_most_recent_block
std::string GetCommand() const
Definition: protocol.cpp:165
void SetRecvVersion(int nVersionIn)
Definition: net.h:993
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:32
unsigned char * begin()
Definition: uint256.h:57
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:345
std::unique_ptr< CDeterministicMNManager > deterministicMNManager
std::atomic< int64_t > timeLastMempoolReq
Definition: net.h:921
bool IsValid(const MessageStartChars &messageStart) const
Definition: protocol.cpp:170
static const unsigned char REJECT_OBSOLETE
Definition: validation.h:14
static constexpr int64_t MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict...
bool IsNull() const
Definition: uint256.h:33
bool ProcessMessages(CNode *pfrom, std::atomic< bool > &interrupt) override
Process protocol messages received from a given node.
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:27
const char * QJUSTIFICATION
Definition: protocol.cpp:66
unsigned int nChainTx
(memory only) Number of transactions in the chain up to and including this block. ...
Definition: chain.h:204
void PushInventory(const CInv &inv)
Definition: net.h:1054
Sporks are network parameters used primarily to prevent forking and turn on/off certain features...
Definition: spork.h:75
static bool LogAcceptCategory(uint64_t category)
Definition: util.h:152
std::set< uint256 > orphan_work_set
Definition: net.h:949
bool ActivateBestChain(CValidationState &state, const CChainParams &chainparams, std::shared_ptr< const CBlock > pblock)
Find the best known block, and make it the tip of the block chain.
std::atomic< ServiceFlags > nServices
Definition: net.h:805
const std::vector< CTxIn > vin
Definition: transaction.h:215
void SetAddrLocal(const CService &addrLocalIn)
May not be called more than once.
Definition: net.cpp:741
std::chrono::microseconds GetRandMicros(std::chrono::microseconds duration_max) noexcept
Definition: random.cpp:374
std::deque< CInv > vRecvGetData
Definition: net.h:822
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:21
void ForEachNode(const Condition &cond, Callable &&func)
Definition: net.h:288
bool AddOrphanTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &headers, CValidationState &state, const CChainParams &chainparams, const CBlockIndex **ppindex, CBlockHeader *first_invalid)
Process incoming block headers.
static const std::string MAIN
BIP70 chain name strings (main, test or regtest)
bool contains(const std::vector< unsigned char > &vKey) const
Definition: bloom.cpp:378
static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY
Maximum delay (in microseconds) for transaction requests to avoid biasing some peers over others...
static const unsigned int REJECT_INTERNAL
Reject codes greater or equal to this can be returned by AcceptToMemPool for transactions, to signal internal conditions.
Definition: validation.h:484
void check(const CCoinsViewCache *pcoins) const
If sanity-checking is turned on, check makes sure the pool is consistent (does not contain two transa...
Definition: txmempool.cpp:1013
CMasternodeMetaMan mmetaman
CInstantSendManager * quorumInstantSendManager
std::unique_ptr< CCoinsViewCache > pcoinsTip
Global variable that points to the active CCoinsView (protected by cs_main)
Definition: validation.cpp:300
void AddToCompactExtraTransactions(const CTransactionRef &tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
uint256 GetBlockHash() const
Definition: chain.h:292
static const int LLMQS_PROTO_VERSION
introduction of LLMQs
Definition: version.h:51
bool IsValid(enum BlockStatus nUpTo=BLOCK_VALID_TRANSACTIONS) const
Check whether this block index entry is valid up to the passed validity level.
Definition: chain.h:332
CGovernanceManager governance
Definition: governance.cpp:23
bool done
bool fSentAddr
Definition: net.h:862
std::atomic< bool > fFirstMessageIsMNAUTH
Definition: net.h:832
COutPoint masternodeOutpoint
Definition: privatesend.h:300
void Misbehaving(NodeId pnode, int howmuch, const std::string &message)
Increase a node&#39;s misbehavior score.
std::atomic< int64_t > nPingUsecTime
Definition: net.h:928
int64_t GetTime()
Return system time (or mocked time, if set)
Definition: utiltime.cpp:22
bool fMasternodeMode
Definition: util.cpp:93
std::atomic< int64_t > nMinPingUsecTime
Definition: net.h:930
int GetMyStartingHeight() const
Definition: net.h:981
#define LOCK2(cs1, cs2)
Definition: sync.h:179
void Ban(const CNetAddr &netAddr, const BanReason &reason, int64_t bantimeoffset=0, bool sinceUnixEpoch=false)
Definition: net.cpp:605
ServiceFlags GetLocalServices() const
Definition: net.h:1087
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends...
Definition: chain.h:145
const char * ISLOCK
Definition: protocol.cpp:76
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
Definition: validation.h:112
bool fClient
Definition: net.h:849
bool ForNode(NodeId id, std::function< bool(const CNode *pnode)> cond, std::function< bool(CNode *pnode)> func)
Definition: net.cpp:3795
bool fRelayTxes
Definition: net.cpp:114
Used to relay blocks as header + vector<merkle branch> to filtered nodes.
Definition: merkleblock.h:127
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:25
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
Definition: validation.h:209
CChainLocksHandler * chainLocksHandler
UniValue spork(const JSONRPCRequest &request)
Definition: misc.cpp:165
#define LogPrintf(...)
Definition: util.h:203
static const bool DEFAULT_WHITELISTRELAY
Default for -whitelistrelay.
Definition: validation.h:52
size_type size() const
Definition: streams.h:194
const char * MNLISTDIFF
Definition: protocol.cpp:61
std::chrono::microseconds CalculateObjectGetDataTime(const CInv &inv, std::chrono::microseconds current_time, bool use_inbound_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
size_t nProcessQueueSize
Definition: net.h:818
static constexpr size_t COMMAND_SIZE
Definition: protocol.h:32
Scripts & signatures ok. Implies all parents are also at least SCRIPTS.
Definition: chain.h:148
unsigned long size()
Definition: txmempool.h:660
bool IsInvalid() const
Definition: validation.h:64
CBlockIndex * pindexBestHeader
Best header we&#39;ve seen so far (used for getheaders queries&#39; starting points).
Definition: validation.cpp:218
std::vector< CTransactionRef > txn
bool fOneShot
Definition: net.h:847
An input of a transaction.
Definition: transaction.h:70
void ProcessMessage(CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, CConnman &connman)
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)...
Definition: protocol.h:345
static CPrivateSendBroadcastTx GetDSTX(const uint256 &hash)
#define LOCK(cs)
Definition: sync.h:178
const char * name
Definition: rest.cpp:36
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
Definition: validation.h:87
const uint256 & GetHash() const
Definition: transaction.h:256
void ProcessMessage(CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, CConnman &connman)
bool IsPeerAddrLocalGood(CNode *pnode)
Definition: net.cpp:201
int type
Definition: protocol.h:455
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
Definition: version.h:17
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:471
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:143
Fast randomness source.
Definition: random.h:48
const char * LEGACYTXLOCKREQUEST
Definition: protocol.cpp:44
int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds)
Attempts to obfuscate tx time through exponentially distributed emitting.
Definition: net.cpp:3814
bool OutboundTargetReached(bool historicalBlockServingLimit)
check if the outbound target is reached
Definition: net.cpp:3587
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
Definition: validation.h:92
static const unsigned char REJECT_NONSTANDARD
Definition: validation.h:16
int64_t nPowTargetSpacing
Definition: params.h:176
static void ProcessGetData(CNode *pfrom, const CChainParams &chainparams, CConnman *connman, const std::atomic< bool > &interruptMsgProc)
std::vector< CAddress > GetAddresses()
Definition: net.cpp:3277
static constexpr int32_t MAX_PEER_OBJECT_ANNOUNCEMENTS
Maximum number of announced objects from a peer.
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition: chain.h:476
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.cpp:38
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.cpp:30
NodeId fromPeer
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
Definition: version.h:45
std::string GetRejectReason() const
Definition: validation.h:81
bool IsLocked(const uint256 &txHash)
static const int64_t ORPHAN_TX_EXPIRE_INTERVAL
Minimum time between orphan transactions expire time checks in seconds.
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:586
bool m_manual_connection
Definition: net.h:848
const std::vector< CTxOut > vout
Definition: transaction.h:216
PeerLogicValidation(CConnman *connmanIn, CScheduler &scheduler)
void ProcessMessage(CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, CConnman &connman)
Definition: governance.cpp:88
A CService with information about it as peer.
Definition: protocol.h:358
bool IsBanned(NodeId pnode)
bool IsInitialBlockDownload()
Check whether we are doing an initial block download (synchronizing from disk or network) ...
std::vector< unsigned char > GetKey() const
Definition: netaddress.cpp:557
static int EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
const char * QSIGREC
Definition: protocol.cpp:73
static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts in seconds.
const std::vector< std::string > & getAllNetMessageTypes()
Definition: protocol.cpp:293
uint256 hash
Definition: protocol.h:456
static const int64_t ORPHAN_TX_EXPIRE_TIME
Expiration time for orphan transactions in seconds.
CMainSignals & GetMainSignals()
bool GetSporkByHash(const uint256 &hash, CSporkMessage &sporkRet)
GetSporkByHash returns a spork message given a hash of the spork message.
Definition: spork.cpp:255
std::vector< uint256 > vBlockHashesToAnnounce
Definition: net.h:912
const char * GETMNLISTDIFF
Definition: protocol.cpp:60
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.cpp:20
const CMessageHeader::MessageStartChars & MessageStart() const
Definition: chainparams.h:55
int64_t NodeId
Definition: net.h:109
std::atomic< int64_t > nTimeBestReceived(0)
bool exists(uint256 hash) const
Definition: txmempool.h:672
Definition: net.h:136
void AddNewAddresses(const std::vector< CAddress > &vAddr, const CAddress &addrFrom, int64_t nTimePenalty=0)
Definition: net.cpp:3272
size_t nTxSize
bool fMasternodeProbe
Definition: net.h:866
uint256 sentMNAuthChallenge
Definition: net.h:939
std::atomic< int64_t > nTimeFirstMessageReceived
Definition: net.h:831
static const unsigned int INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions in seconds.
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter...
Definition: protocol.cpp:36
bool fGetAddr
Definition: net.h:892
std::atomic_bool fImporting
static constexpr size_t CHECKSUM_SIZE
Definition: protocol.h:34
void ProcessMessage(CNode *pnode, const std::string &strCommand, CDataStream &vRecv, CConnman &connman)
std::string ToString() const
Definition: uint256.cpp:62
unsigned int size() const
Definition: uint256.h:77
bool SerializeObjectForHash(const uint256 &nHash, CDataStream &ss) const
Definition: governance.cpp:53
NodeId GetId() const
Definition: net.h:973
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.cpp:33
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition: net.cpp:3858
static const std::string DEVNET
bool ConfirmInventoryRequest(const CInv &inv)
This is called by AlreadyHave in net_processing.cpp as part of the inventory retrieval process...
Definition: governance.cpp:562
Parameters that influence chain consensus.
Definition: params.h:130
static void AddDSTX(const CPrivateSendBroadcastTx &dstx)
An outpoint - a combination of a transaction hash and an index n into its vout.
Definition: transaction.h:26
static constexpr int32_t MAX_PEER_OBJECT_IN_FLIGHT
Maximum number of in-flight objects from a peer.
void ProcessMessage(CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, CConnman &connman)
const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.cpp:28
static void SendBlockTransactions(const CBlock &block, const BlockTransactionsRequest &req, CNode *pfrom, CConnman *connman)
std::atomic_bool fDisconnect
Definition: net.h:853
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
Definition: net.cpp:118
static const unsigned char REJECT_DUPLICATE
Definition: validation.h:15
bool fTxIndex
Definition: validation.cpp:225
static CCriticalSection g_cs_orphans
bool IsRoutable() const
Definition: netaddress.cpp:230
int64_t nMaxTipAge
If the tip is older than this (in seconds), the node is considered to be in initial block download...
Definition: validation.cpp:238
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout expressed in microseconds Timeout = base + per_header * (expected number of ...
uint64_t GetHash() const
Definition: netaddress.cpp:392
CRollingBloomFilter addrKnown
Definition: net.h:891
CCriticalSection cs
Definition: txmempool.h:488
std::chrono::microseconds GetObjectExpiryInterval(int invType)
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB...
Definition: protocol.h:353
unsigned int GetReceiveFloodSize() const
Definition: net.cpp:3643
bool SerializeVoteForHash(const uint256 &nHash, CDataStream &ss) const
Definition: governance.cpp:80
const char * REJECT
The reject message informs the receiving node that one of its previous messages has been rejected...
Definition: protocol.cpp:37
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphansSize)
static const int MAX_UNCONNECTING_HEADERS
Maximum number of unconnecting headers announcements before DoS score.
Definition: validation.h:136
bool CheckIncomingNonce(uint64_t nonce)
Definition: net.cpp:386
static constexpr size_t MESSAGE_START_SIZE
Definition: protocol.h:31
const CAddress addr
Definition: net.h:834
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.cpp:24
const int64_t nTimeConnected
Definition: net.h:828
int64_t m_stale_tip_check_time
CSporkManager sporkManager
Definition: spork.cpp:29
bool HasMinableCommitment(const uint256 &hash)
void BlockConnected(const std::shared_ptr< const CBlock > &pblock, const CBlockIndex *pindexConnected, const std::vector< CTransactionRef > &vtxConflicted) override
Notifies listeners of a block being connected.
#define LogPrint(category,...)
Definition: util.h:214
void PrintExceptionContinue(const std::exception_ptr pex, const char *pszExceptionOrigin)
Definition: util.cpp:891
std::atomic_bool fReindex
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.cpp:19
static void ProcessMessage(CNode *pnode, const std::string &strCommand, CDataStream &vRecv, CConnman &connman)
Definition: mnauth.cpp:57
uint256 GetHash() const
Definition: block.cpp:14
bool fLogIPs
Definition: util.cpp:115
static bool ProcessMessage(CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, int64_t nTimeReceived, const CChainParams &chainparams, CConnman *connman, const std::atomic< bool > &interruptMsgProc)
const char * QSENDRECSIGS
Definition: protocol.cpp:62
void ProcessMessage(CNode *pfrom, const std::string &strCommand, CDataStream &vRecv)
Capture information about block/transaction validation.
Definition: validation.h:22
static void PushMNAUTH(CNode *pnode, CConnman &connman)
Definition: mnauth.cpp:19
std::atomic< bool > fPingQueued
Definition: net.h:932
256-bit opaque blob.
Definition: uint256.h:123
std::string GetDevNetName() const
Looks for -devnet and returns either "devnet-<name>" or simply "devnet" if no name was specified...
Definition: util.cpp:1045
void AddInventoryKnown(const CInv &inv)
Definition: net.h:1041
unsigned int nTime
Definition: protocol.h:390
bool IsReachable(enum Network net)
check whether a given network is one we can probably connect to
Definition: net.cpp:316
ArgsManager gArgs
Definition: util.cpp:108
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
ServiceFlags nServices
Definition: protocol.h:387
void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &pblock) override
Notifies listeners that a block which builds directly on our current tip has been received and connec...
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:50
std::vector< CTransactionRef > vtx
Definition: block.h:76
uint256 receivedMNAuthChallenge
Definition: net.h:940
std::string FormatStateMessage(const CValidationState &state)
Convert CValidationState to a human-readable message for logging.
Definition: validation.cpp:513
std::set< NodeId > setMisbehaving
CompareInvMempoolOrder(CTxMemPool *_mempool)
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids"...
Definition: protocol.cpp:40
bool operator()(const I &a, const I &b) const
bool fFeeler
Definition: net.h:846
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:442
uint256 verifiedProRegTxHash
Definition: net.h:941
std::atomic< int64_t > nLastTXTime
Definition: net.h:918
Helper class to store mixing transaction (tx) information.
Definition: privatesend.h:291
const bool fInbound
Definition: net.h:851
bool CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb)
Definition: txmempool.cpp:1135
CPrivateSendClientManager privateSendClient
static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY
How many microseconds to delay requesting transactions from inbound peers.
const char * QCOMPLAINT
Definition: protocol.cpp:65
static void RelayAddress(const CAddress &addr, bool fReachable, CConnman *connman)
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.cpp:18
void reserve(size_type n)
Definition: streams.h:197
std::vector< std::pair< unsigned int, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:140
The block chain is a tree shaped structure starting with the genesis block at the root...
Definition: chain.h:170
const CChainParams & Params()
Return the currently selected parameters.
uint256 hashContinue
Definition: net.h:886
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
bool fWhitelisted
Definition: net.h:845
bool AlreadyHave(const CInv &inv)
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
Definition: version.h:23
static const int PROTOCOL_VERSION
network protocol versioning
Definition: version.h:14
bool IsTxAvailable(size_t index) const
static constexpr int64_t CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds.
static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN
Default number of orphan+recently-replaced txn to keep around for block reconstruction.
bool AcceptToMemoryPool(CTxMemPool &pool, CValidationState &state, const CTransactionRef &tx, bool *pfMissingInputs, bool bypass_limits, const CAmount nAbsurdFee, bool fDryRun)
(try to) add transaction to memory pool
Definition: validation.cpp:890
std::string GetArg(const std::string &strArg, const std::string &strDefault) const
Return string argument or default value.
Definition: util.cpp:808
CBlockIndex * FindForkInGlobalIndex(const CChain &chain, const CBlockLocator &locator)
Find the last common block between the parameter chain and a locator.
Definition: validation.cpp:284
int64_t GetAdjustedTime()
Definition: timedata.cpp:35
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
Definition: protocol.cpp:201
static bool BlockRequestAllowed(const CBlockIndex *pindex, const Consensus::Params &consensusParams)
CCriticalSection cs_vProcessMsg
Definition: net.h:816
void SetSendVersion(int nVersionIn)
Definition: net.cpp:873
static void ProcessOrphanTx(CConnman *connman, std::set< uint256 > &orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main
bool AlreadyHave(const CInv &inv)
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition: chain.h:453
void BlockChecked(const CBlock &block, const CValidationState &state) override
Notifies listeners of a block validation result.
void SetBestHeight(int height)
Definition: net.cpp:3633
#define LIMITED_STRING(obj, n)
Definition: serialize.h:377
static const int CADDR_TIME_VERSION
nTime field added to CAddress, starting with this version; if possible, avoid requesting addresses no...
Definition: version.h:30
void EraseOrphansFor(NodeId peer)
static const bool DEFAULT_WATCH_QUORUMS
Definition: quorums_init.h:15
std::atomic< int64_t > nTimeOffset
Definition: net.h:829
std::string GetLogString() const
Definition: net.cpp:750
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
Definition: validation.h:133
int64_t PoissonNextSend(int64_t now, int average_interval_seconds)
Return a timestamp in the future (in microseconds) for exponentially distributed events.
Definition: net.cpp:3825
const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition: protocol.cpp:22
bool fListen
Definition: net.cpp:113
std::unique_ptr< CConnman > g_connman
Definition: init.cpp:97
std::atomic_bool fSuccessfullyConnected
Definition: net.h:852
CBlockLocator GetLocator(const CBlockIndex *pindex=nullptr) const
Return a CBlockLocator that refers to a block in this chain (by default the tip). ...
Definition: chain.cpp:23
SipHash-2-4.
Definition: hash.h:266
#define AssertLockNotHeld(cs)
Definition: sync.h:88
std::unique_ptr< CBlockTreeDB > pblocktree
Global variable that points to the active block tree (protected by cs_main)
Definition: validation.cpp:301
void GetRandBytes(unsigned char *buf, int num)
Functions to gather random data via the OpenSSL PRNG.
Definition: random.cpp:273
static int count
Definition: tests.c:45
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of strSubVer in version message.
Definition: net.h:68
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_1MB_BLOCK
Maximum number of inventory items to send per transmission.
const char * DSTX
Definition: protocol.cpp:53
std::atomic< int > nVersion
Definition: net.h:838
bool fRelayTxes
Definition: net.h:861
bool CheckSignature(const CBLSPublicKey &blsPubKey) const
unsigned int GetRejectCode() const
Definition: validation.h:80
const char * MNAUTH
Definition: protocol.cpp:77
static size_t RecursiveDynamicUsage(const CScript &script)
Definition: core_memusage.h:12
void ConsiderEviction(CNode *pto, int64_t time_in_seconds)
void EraseObjectRequest(CNodeState *nodestate, const CInv &inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< uint256, CTransactionRef >> &extra_txn)
CSigningManager * quorumSigningManager
const char * QCONTRIB
Definition: protocol.cpp:64
void FinalizeNode(NodeId nodeid, bool &fUpdateConnectionTime) override
static const int PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive (in seconds).
Definition: net.h:54
static constexpr size_t HEADER_SIZE
Definition: protocol.h:37
bool m_limited_node
Definition: net.h:850
static const int NO_BLOOM_VERSION
"filter*" commands are disabled without NODE_BLOOM after and including this version ...
Definition: version.h:36
bool CorruptionPossible() const
Definition: validation.h:77
int GetExtraOutboundCount()
Definition: net.cpp:2203
static const unsigned int DEFAULT_BANSCORE_THRESHOLD
Definition: validation.h:126
static constexpr unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts in seconds.
static const unsigned int BLOCK_STALLING_TIMEOUT
Timeout in seconds during which a peer must stall block download progress before being disconnected...
Definition: validation.h:89
std::atomic< bool > fSendRecSigs
Definition: net.h:945
const char * CLSIG
Definition: protocol.cpp:75
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we&#39;re willing to serve as compact blocks to peers when requested.
Definition: validation.h:95
static const int SENDDSQUEUE_PROTO_VERSION
introduction of SENDDSQUEUE TODO we can remove this in 0.15.0.0
Definition: version.h:55
uint256 hashDevnetGenesisBlock
Definition: params.h:132
const char * MNGOVERNANCEOBJECT
Definition: protocol.cpp:58
std::atomic< bool > fSendDSQueue
Definition: net.h:935
const char * MNGOVERNANCEOBJECTVOTE
Definition: protocol.cpp:59
const char * TX
The tx message transmits a single transaction.
Definition: protocol.cpp:26
The basic transaction that is broadcasted on the network and contained in blocks. ...
Definition: transaction.h:198
void MarkAddressGood(const CAddress &addr)
Definition: net.cpp:3267
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:183
std::chrono::microseconds GetObjectRequestTime(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
static const int32_t CURRENT_VERSION
Definition: transaction.h:202
Information about a peer.
Definition: net.h:800
const Consensus::Params & GetConsensus() const
Definition: chainparams.h:54
std::vector< int > vHeightInFlight
bool ReadBlockFromDisk(CBlock &block, const CDiskBlockPos &pos, const Consensus::Params &consensusParams)
Functions for disk access for blocks.
full block available in blk*.dat
Definition: chain.h:154
const char * QWATCH
Definition: protocol.cpp:68
STL-like map container that only keeps the N elements with the highest value.
Definition: limitedmap.h:18
static const unsigned char REJECT_MALFORMED
"reject" message codes
Definition: validation.h:12
std::string GetAddrName() const
Definition: net.cpp:724
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
Definition: timedata.cpp:47
CChain & chainActive
The currently-connected chain of blocks (protected by cs_main).
Definition: validation.cpp:217
const char * SPORK
Definition: protocol.cpp:45
CSerializedNetMsg Make(int nFlags, std::string sCommand, Args &&... args) const
AssertLockHeld(g_cs_orphans)
void RelayTransaction(const CTransaction &tx)
Definition: net.cpp:3465
CTransactionRef tx
Definition: privatesend.h:299
void AddAddressKnown(const CAddress &_addr)
Definition: net.h:1021
std::map< uint256, COrphanTx > mapOrphanTransactions GUARDED_BY(g_cs_orphans)
size_t GetRequestedObjectCount(NodeId nodeId)
void InitializeNode(CNode *pnode) override
static constexpr int64_t TX_EXPIRY_INTERVAL_FACTOR
How long to wait (expiry * factor microseconds) before expiring an in-flight getdata request to a pee...
static constexpr int64_t STALE_CHECK_INTERVAL
How frequently to check for stale tips, in seconds.
std::string SanitizeString(const std::string &str, int rule)
Remove unsafe chars.
COutPoint prevout
Definition: transaction.h:73
std::atomic_bool fPauseRecv
Definition: net.h:874
static const bool DEFAULT_WHITELISTFORCERELAY
Default for -whitelistforcerelay.
Definition: validation.h:54
static const unsigned int MAX_INV_SZ
The maximum number of entries in an &#39;inv&#39; protocol message.
Definition: net.h:62
std::chrono::microseconds nNextInvSend
Definition: net.h:909
bool SendMessages(CNode *pto, std::atomic< bool > &interrupt) override
Send queued protocol messages to be sent to a give node.
const char * SENDDSQUEUE
Definition: protocol.cpp:55
std::atomic< int64_t > nLastBlockTime
Definition: net.h:917
void ProcessMessage(CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, CConnman &connman)
void ProcessMessage(CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, CConnman &connman)
std::vector< CInv > vInventoryOtherToSend
Definition: net.h:907
void PrioritiseTransaction(const uint256 &hash, const CAmount &nFeeDelta)
Affect CreateNewBlock prioritisation of transactions.
Definition: txmempool.cpp:1317
static bool SendRejectsAndCheckIfBanned(CNode *pnode, CConnman *connman)
bool g_enable_bip61
Enable BIP61 (sending reject messages)
CMasternodeMetaInfoPtr GetMetaInfo(const uint256 &proTxHash, bool fCreate=true)
static uint256 most_recent_block_hash
void AdvertiseLocal(CNode *pnode)
Definition: net.cpp:209
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:199
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:20
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS_SIZE
Default for -maxorphantxsize, maximum size in megabytes the orphan map can grow before entries are re...
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch? Larger windows tolerate larger download speed differences between peer, but increase the potential degree of disordering of blocks on disk (which make reindexing and pruning harder).
Definition: validation.h:102
int in_avail() const
Definition: streams.h:292
size_t nMapOrphanTransactionsSize
Defined in BIP37.
Definition: protocol.h:402
bool AlreadyHave(const CInv &inv) const
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition: protocol.cpp:35
Wrapped mutex: supports recursive locking, but no waiting TODO: We should move away from using the re...
Definition: sync.h:94
std::string itostr(int n)
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition: chain.cpp:136
uint64_t GetRand(uint64_t nMax)
Definition: random.cpp:354
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition: protocol.cpp:41
Message header.
Definition: protocol.h:28
uint256 hash
Definition: transaction.h:29
static const unsigned int MAX_STANDARD_TX_SIZE
The maximum size for transactions we&#39;re willing to relay/mine.
Definition: policy.h:24
std::string ToStringShort() const
Definition: transaction.cpp:17
Released under the MIT license