Skip to content

Commit

Permalink
Merge #26326: net: don't lock cs_main while reading blocks in net pro…
Browse files Browse the repository at this point in the history
…cessing

75d27fe net: reduce LOCK(cs_main) scope in ProcessGetBlockData (Andrew Toth)
613a45c net: reduce LOCK(cs_main) scope in GETBLOCKTXN (Andrew Toth)

Pull request description:

  Inspired by #11913 and #26308.

  `cs_main` doesn't need to be locked while reading blocks. This removes the locks in `net_processing`.

ACKs for top commit:
  sr-gi:
    ACK [75d27fe](75d27fe)
  achow101:
    ACK 75d27fe
  furszy:
    ACK 75d27fe with a non-blocking nit.
  mzumsande:
    Code Review ACK 75d27fe
  TheCharlatan:
    ACK 75d27fe

Tree-SHA512: 79b85f748f68ecfb2f2afd3267857dd41b8e76dd482c9c922037399dcbce7b1e5d4c708a4f5fd17c3fb6699b0d88f26a17cc1d92db115dd43c8d4392ae27cec4
  • Loading branch information
achow101 committed May 8, 2024
2 parents 4ff4276 + 75d27fe commit 573f631
Showing 1 changed file with 73 additions and 43 deletions.
116 changes: 73 additions & 43 deletions src/net_processing.cpp
Expand Up @@ -118,6 +118,7 @@ static const unsigned int MAX_HEADERS_RESULTS = 2000;
static const int MAX_CMPCTBLOCK_DEPTH = 5;
/** Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for. */
static const int MAX_BLOCKTXN_DEPTH = 10;
static_assert(MAX_BLOCKTXN_DEPTH <= MIN_BLOCKS_TO_KEEP, "MAX_BLOCKTXN_DEPTH too high");
/** Size of the "block download window": how far ahead of our current height do we fetch?
* Larger windows tolerate larger download speed differences between peer, but increase the potential
* degree of disordering of blocks on disk (which make reindexing and pruning harder). We'll probably
Expand Down Expand Up @@ -2420,55 +2421,77 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
}
}

LOCK(cs_main);
const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
if (!pindex) {
return;
}
if (!BlockRequestAllowed(pindex)) {
LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
return;
}
// disconnect node in case we have reached the outbound limit for serving historical blocks
if (m_connman.OutboundTargetReached(true) &&
(((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
!pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
) {
LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
pfrom.fDisconnect = true;
return;
}
// Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && (
(((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (m_chainman.ActiveChain().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
)) {
LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, disconnect peer=%d\n", pfrom.GetId());
//disconnect node and prevent it from stalling (would otherwise wait for the missing block)
pfrom.fDisconnect = true;
return;
}
// Pruned nodes may have deleted the block, so check whether
// it's available before trying to send.
if (!(pindex->nStatus & BLOCK_HAVE_DATA)) {
return;
const CBlockIndex* pindex{nullptr};
const CBlockIndex* tip{nullptr};
bool can_direct_fetch{false};
FlatFilePos block_pos{};
{
LOCK(cs_main);
pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
if (!pindex) {
return;
}
if (!BlockRequestAllowed(pindex)) {
LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
return;
}
// disconnect node in case we have reached the outbound limit for serving historical blocks
if (m_connman.OutboundTargetReached(true) &&
(((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
!pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
) {
LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
pfrom.fDisconnect = true;
return;
}
tip = m_chainman.ActiveChain().Tip();
// Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && (
(((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (tip->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
)) {
LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, disconnect peer=%d\n", pfrom.GetId());
//disconnect node and prevent it from stalling (would otherwise wait for the missing block)
pfrom.fDisconnect = true;
return;
}
// Pruned nodes may have deleted the block, so check whether
// it's available before trying to send.
if (!(pindex->nStatus & BLOCK_HAVE_DATA)) {
return;
}
can_direct_fetch = CanDirectFetch();
block_pos = pindex->GetBlockPos();
}

std::shared_ptr<const CBlock> pblock;
if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
pblock = a_recent_block;
} else if (inv.IsMsgWitnessBlk()) {
// Fast-path: in this case it is possible to serve the block directly from disk,
// as the network format matches the format on disk
std::vector<uint8_t> block_data;
if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data, pindex->GetBlockPos())) {
assert(!"cannot load block from disk");
if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data, block_pos)) {
if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
LogPrint(BCLog::NET, "Block was pruned before it could be read, disconnect peer=%s\n", pfrom.GetId());
} else {
LogError("Cannot load block from disk, disconnect peer=%d\n", pfrom.GetId());
}
pfrom.fDisconnect = true;
return;
}
MakeAndPushMessage(pfrom, NetMsgType::BLOCK, Span{block_data});
// Don't set pblock as we've sent the block
} else {
// Send block from disk
std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, *pindex)) {
assert(!"cannot load block from disk");
if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, block_pos)) {
if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
LogPrint(BCLog::NET, "Block was pruned before it could be read, disconnect peer=%s\n", pfrom.GetId());
} else {
LogError("Cannot load block from disk, disconnect peer=%d\n", pfrom.GetId());
}
pfrom.fDisconnect = true;
return;
}
pblock = pblockRead;
}
Expand Down Expand Up @@ -2506,7 +2529,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
// they won't have a useful mempool to match against a compact block,
// and we don't feel like constructing the object for them, so
// instead we respond with the full, non-compact block.
if (CanDirectFetch() && pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_CMPCTBLOCK_DEPTH) {
if (can_direct_fetch && pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) {
if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block);
} else {
Expand All @@ -2527,7 +2550,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
// and we want it right after the last block so they don't
// wait for other stuff first.
std::vector<CInv> vInv;
vInv.emplace_back(MSG_BLOCK, m_chainman.ActiveChain().Tip()->GetBlockHash());
vInv.emplace_back(MSG_BLOCK, tip->GetBlockHash());
MakeAndPushMessage(pfrom, NetMsgType::INV, vInv);
peer.m_continuation_block.SetNull();
}
Expand Down Expand Up @@ -4366,6 +4389,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}

FlatFilePos block_pos{};
{
LOCK(cs_main);

Expand All @@ -4376,15 +4400,21 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}

if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
CBlock block;
const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pindex)};
assert(ret);

SendBlockTransactions(pfrom, *peer, block, req);
return;
block_pos = pindex->GetBlockPos();
}
}

if (!block_pos.IsNull()) {
CBlock block;
const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, block_pos)};
// If height is above MAX_BLOCKTXN_DEPTH then this block cannot get
// pruned after we release cs_main above, so this read should never fail.
assert(ret);

SendBlockTransactions(pfrom, *peer, block, req);
return;
}

// If an older block is requested (should never happen in practice,
// but can happen in tests) send a block response instead of a
// blocktxn response. Sending a full block response instead of a
Expand Down

0 comments on commit 573f631

Please sign in to comment.