Skip to content

Commit

Permalink
Merge pull request #188 from dgenr8/bip100
Browse files Browse the repository at this point in the history
BIP100: Dynamic maximum block size by miner vote
  • Loading branch information
dgenr8 committed May 14, 2017
2 parents ae1e759 + e13cc7d commit 9837cfa
Show file tree
Hide file tree
Showing 38 changed files with 1,008 additions and 87 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,8 @@ linux-build
win32-build
qa/pull-tester/run-bitcoind-for-test.sh
qa/pull-tester/tests-config.sh
qa/pull-tester/cache/*
qa/pull-tester/cache
qa/pull-tester/cache_bigblock
qa/pull-tester/test.*/*

!src/leveldb*/Makefile
Expand Down
1 change: 1 addition & 0 deletions qa/pull-tester/rpc-tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@
'sendheaders.py'
]
testScriptsExt = [
'bip100-sizelimit.py',
'bip9-softforks.py',
'bipdersig-p2p.py',
'bip65-cltv-p2p.py',
Expand Down
1 change: 1 addition & 0 deletions qa/rpc-tests/.gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
*.pyc
cache
cache_bigblock
207 changes: 207 additions & 0 deletions qa/rpc-tests/bip100-sizelimit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,207 @@
#!/usr/bin/env python2
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.

#
# Test mining and broadcast of larger-than-1MB-blocks
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *

from decimal import Decimal

CACHE_DIR = "cache_bigblock"

class BigBlockTest(BitcoinTestFramework):

def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)

if not os.path.isdir(os.path.join(CACHE_DIR, "node0")):
print("Creating initial chain. This will be cached for future runs.")

for i in range(4):
initialize_datadir(CACHE_DIR, i) # Overwrite port/rpcport in bitcoin.conf

# Node 0 creates 8MB blocks that vote for increase to 8MB
# Node 1 creates empty blocks that vote for 8MB
# Node 2 creates empty blocks that vote for 2MB
# Node 3 creates empty blocks that do not vote for increase
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(start_node(0, CACHE_DIR, ["-blockmaxsize=8000000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))
self.nodes.append(start_node(1, CACHE_DIR, ["-blockmaxsize=1000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))
self.nodes.append(start_node(2, CACHE_DIR, ["-blockmaxsize=1000", "-maxblocksizevote=1", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))
self.nodes.append(start_node(3, CACHE_DIR, ["-blockmaxsize=1000", "-maxblocksizevote=2", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))

connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 2, 3)
connect_nodes_bi(self.nodes, 3, 0)

self.is_network_split = False

# Create a 2012-block chain in a 75% ratio for increase (genesis block votes for 1MB)
# Make sure they are not already sorted correctly
blocks = []
blocks.append(self.nodes[1].generate(503))
assert(self.sync_blocks(self.nodes[1:3]))
blocks.append(self.nodes[2].generate(502)) # <--- genesis is 503rd vote for 1MB
assert(self.sync_blocks(self.nodes[2:4]))
blocks.append(self.nodes[3].generate(503))
assert(self.sync_blocks(self.nodes[1:4]))
blocks.append(self.nodes[1].generate(503))
assert(self.sync_blocks(self.nodes))

tx_file = open(os.path.join(CACHE_DIR, "txdata"), "w")

# Create a lot of tansaction data ready to be mined
fee = Decimal('.00005')
used = set()
print("Creating transaction data")
for i in range(0,25):
inputs = []
outputs = {}
limit = 0
utxos = self.nodes[3].listunspent(0)
for utxo in utxos:
if not utxo["txid"]+str(utxo["vout"]) in used:
raw_input = {}
raw_input["txid"] = utxo["txid"]
raw_input["vout"] = utxo["vout"]
inputs.append(raw_input)
outputs[self.nodes[3].getnewaddress()] = utxo["amount"] - fee
used.add(utxo["txid"]+str(utxo["vout"]))
limit = limit + 1
if (limit >= 250):
break
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
txdata = self.nodes[3].signrawtransaction(rawtx)["hex"]
self.nodes[3].sendrawtransaction(txdata)
tx_file.write(txdata+"\n")
tx_file.close()

stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = []
for i in range(4):
os.remove(log_filename(CACHE_DIR, i, "db.log"))
os.remove(log_filename(CACHE_DIR, i, "peers.dat"))
os.remove(log_filename(CACHE_DIR, i, "fee_estimates.dat"))

for i in range(4):
from_dir = os.path.join(CACHE_DIR, "node"+str(i))
to_dir = os.path.join(self.options.tmpdir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf

def sync_blocks(self, rpc_connections, wait=1, max_wait=60):
"""
Wait until everybody has the same block count
"""
for i in range(0,max_wait):
if i > 0: time.sleep(wait)
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
return True
return False

def setup_network(self):
self.nodes = []

self.nodes.append(start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockmaxsize=1000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockmaxsize=1000", "-maxblocksizevote=1", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60))
# (We don't restart the node with the huge wallet
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 2, 0)

self.load_mempool(self.nodes[0])

def load_mempool(self, node):
with open(os.path.join(CACHE_DIR, "txdata"), "r") as f:
for line in f:
node.sendrawtransaction(line.rstrip())

def TestMineBig(self, expect_big):
# Test if node0 will mine a block bigger than legacy MAX_BLOCK_SIZE
b1hash = self.nodes[0].generate(1)[0]
b1 = self.nodes[0].getblock(b1hash, True)
assert(self.sync_blocks(self.nodes[0:3]))

if expect_big:
assert(b1['size'] > 1000*1000)

# Have node1 mine on top of the block,
# to make sure it goes along with the fork
b2hash = self.nodes[1].generate(1)[0]
b2 = self.nodes[1].getblock(b2hash, True)
assert(b2['previousblockhash'] == b1hash)
assert(self.sync_blocks(self.nodes[0:3]))

else:
assert(b1['size'] < 1000*1000)

# Reset chain to before b1hash:
for node in self.nodes[0:3]:
node.invalidateblock(b1hash)
assert(self.sync_blocks(self.nodes[0:3]))


def run_test(self):
print("Testing consensus blocksize increase conditions")

assert_equal(self.nodes[0].getblockcount(), 2011) # This is a 0-based height

# Current nMaxBlockSize is still 1MB
assert_equal(self.nodes[0].getblocktemplate()["sizelimit"], 1000000)
self.TestMineBig(False)

# Create a situation where the 1512th-highest vote is for 2MB
self.nodes[2].generate(1)
assert(self.sync_blocks(self.nodes[1:3]))
ahash = self.nodes[1].generate(3)[2]
assert_equal(self.nodes[1].getblocktemplate()["sizelimit"], int(1000000 * 1.05))
assert(self.sync_blocks(self.nodes[0:2]))
self.TestMineBig(True)

# Shutdown then restart node[0], it should produce a big block.
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60)
self.load_mempool(self.nodes[0])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
assert_equal(self.nodes[0].getblocktemplate()["sizelimit"], int(1000000 * 1.05))
self.TestMineBig(True)

# Test re-orgs past the sizechange block
stop_node(self.nodes[0], 0)
self.nodes[2].invalidateblock(ahash)
assert_equal(self.nodes[2].getblocktemplate()["sizelimit"], 1000000)
self.nodes[2].generate(2)
assert_equal(self.nodes[2].getblocktemplate()["sizelimit"], 1000000)
assert(self.sync_blocks(self.nodes[1:3]))

# Restart node0, it should re-org onto longer chain,
# and refuse to mine a big block:
self.nodes[0] = start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60)
self.load_mempool(self.nodes[0])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
assert(self.sync_blocks(self.nodes[0:3]))
assert_equal(self.nodes[0].getblocktemplate()["sizelimit"], 1000000)
self.TestMineBig(False)

# Mine 4 blocks voting for 8MB. Bigger block NOT ok, we are in the next voting period
self.nodes[1].generate(4)
assert_equal(self.nodes[1].getblocktemplate()["sizelimit"], 1000000)
assert(self.sync_blocks(self.nodes[0:3]))
self.TestMineBig(False)


print("Cached test chain and transactions left in %s"%(CACHE_DIR))

if __name__ == '__main__':
BigBlockTest().main()
2 changes: 2 additions & 0 deletions src/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ BITCOIN_CORE_H = \
leveldbwrapper.h \
limitedmap.h \
main.h \
maxblocksize.h \
memusage.h \
merkleblock.h \
miner.h \
Expand Down Expand Up @@ -211,6 +212,7 @@ libbitcoin_server_a_SOURCES = \
leakybucket.cpp \
leveldbwrapper.cpp \
main.cpp \
maxblocksize.cpp \
merkleblock.cpp \
miner.cpp \
net.cpp \
Expand Down
1 change: 1 addition & 0 deletions src/Makefile.test.include
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ BITCOIN_TESTS =\
test/ipgroups_tests.cpp \
test/key_tests.cpp \
test/main_tests.cpp \
test/maxblocksize_tests.cpp \
test/mempool_tests.cpp \
test/miner_tests.cpp \
test/multisig_tests.cpp \
Expand Down
11 changes: 7 additions & 4 deletions src/blockencodings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@ uint64_t GetShortID(
return SipHashUint256(shorttxidk0, shorttxidk1, txhash) & 0xffffffffffffL;
}

#define MIN_TRANSACTION_SIZE (::GetSerializeSize(CTransaction(), SER_NETWORK, PROTOCOL_VERSION))

CompactBlock::CompactBlock(const CBlock& block, const CompactPrefiller& prefiller) :
nonce(GetRand(std::numeric_limits<uint64_t>::max())), header(block)
{
Expand Down Expand Up @@ -68,11 +66,16 @@ uint64_t CompactBlock::GetShortID(const uint256& txhash) const {
return ::GetShortID(shorttxidk0, shorttxidk1, txhash);
}

void validateCompactBlock(const CompactBlock& cmpctblock) {
unsigned int minTxSize() {
return ::GetSerializeSize(CTransaction(), SER_NETWORK, PROTOCOL_VERSION);
}

void validateCompactBlock(const CompactBlock& cmpctblock, uint64_t currMaxBlockSize) {
if (cmpctblock.header.IsNull() || (cmpctblock.shorttxids.empty() && cmpctblock.prefilledtxn.empty()))
throw std::invalid_argument("empty data in compact block");

if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_SIZE / MIN_TRANSACTION_SIZE)
uint64_t maxBlockSize = currMaxBlockSize * 105 / 100; // max size after next block adjustmnet
if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > maxBlockSize / minTxSize())
throw std::invalid_argument("compact block exceeds max txs in a block");

int32_t lastprefilledindex = -1;
Expand Down
3 changes: 2 additions & 1 deletion src/blockencodings.h
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ class CompactBlock {
}
};

void validateCompactBlock(const CompactBlock& cmpctblock);
unsigned int minTxSize();
void validateCompactBlock(const CompactBlock& cmpctblock, uint64_t currMaxBlockSize);

#endif
27 changes: 24 additions & 3 deletions src/chain.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@

#include <vector>

static const int BIP100_DBI_VERSION = 0x08000000;
static const int DISK_BLOCK_INDEX_VERSION = BIP100_DBI_VERSION;

struct CDiskBlockPos
{
int nFile;
Expand Down Expand Up @@ -67,8 +70,8 @@ enum BlockStatus: uint32_t {

/**
* Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid, no duplicate txids,
* sigops, size, merkle root. Implies all parents are at least TREE but not necessarily TRANSACTIONS. When all
* parent blocks also have TRANSACTIONS, CBlockIndex::nChainTx will be set.
* merkle root. Implies all parents are at least TREE but not necessarily TRANSACTIONS. When all
* parent blocks also have TRANSACTIONS, CBlockIndex::nChainTx and maxblocksize will be set.
*/
BLOCK_VALID_TRANSACTIONS = 3,

Expand Down Expand Up @@ -146,6 +149,15 @@ class CBlockIndex
//! (memory only) Sequential id assigned to distinguish order in which blocks are received.
uint32_t nSequenceId;

//! Index entry serial format version
int nSerialVersion;

//! Maximum serialized block size at nHeight
uint64_t nMaxBlockSize;

//! This block's vote for future maximum serialized block size
uint64_t nMaxBlockSizeVote;

void SetNull()
{
phashBlock = NULL;
Expand All @@ -160,6 +172,9 @@ class CBlockIndex
nChainTx = 0;
nStatus = 0;
nSequenceId = 0;
nSerialVersion = 0;
nMaxBlockSize = 0;
nMaxBlockSizeVote = 0;

nVersion = 0;
hashMerkleRoot = uint256();
Expand Down Expand Up @@ -292,14 +307,15 @@ class CDiskBlockIndex : public CBlockIndex

explicit CDiskBlockIndex(const CBlockIndex* pindex) : CBlockIndex(*pindex) {
hashPrev = (pprev ? pprev->GetBlockHash() : uint256());
nSerialVersion = DISK_BLOCK_INDEX_VERSION;
}

ADD_SERIALIZE_METHODS;

template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
if (!(nType & SER_GETHASH))
READWRITE(VARINT(nVersion));
READWRITE(VARINT(nSerialVersion));

READWRITE(VARINT(nHeight));
READWRITE(VARINT(nStatus));
Expand All @@ -318,6 +334,11 @@ class CDiskBlockIndex : public CBlockIndex
READWRITE(nTime);
READWRITE(nBits);
READWRITE(nNonce);

if (nSerialVersion >= BIP100_DBI_VERSION) {
READWRITE(VARINT(nMaxBlockSize));
READWRITE(VARINT(nMaxBlockSizeVote));
}
}

uint256 GetBlockHash() const
Expand Down
11 changes: 11 additions & 0 deletions src/chainparams.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ class CMainParams : public CChainParams {
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nStartTime = 1462060800; // May 1st, 2016
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nTimeout = 1493596800; // May 1st, 2017

// BIP100 defined start height and max block size change critical vote position
consensus.bip100ActivationHeight = 449568;
consensus.nMaxBlockSizeChangePosition = 1512;

/**
* The message start string is designed to be unlikely to occur in normal data.
* The characters are rarely used upper ASCII, not valid as UTF-8, and produce
Expand Down Expand Up @@ -183,6 +187,10 @@ class CTestNetParams : public CChainParams {
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nStartTime = 1456790400; // March 1st, 2016
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nTimeout = 1493596800; // May 1st, 2017

// BIP100 defined start height and max block size change critical vote position
consensus.bip100ActivationHeight = 798336;
consensus.nMaxBlockSizeChangePosition = 1512;

pchMessageStart[0] = 0x0b;
pchMessageStart[1] = 0x11;
pchMessageStart[2] = 0x09;
Expand Down Expand Up @@ -255,6 +263,9 @@ class CRegTestParams : public CChainParams {
consensus.nPowTargetSpacing = 10 * 60;
consensus.fPowAllowMinDifficultyBlocks = true;

consensus.bip100ActivationHeight = 0;
consensus.nMaxBlockSizeChangePosition = 1512;

pchMessageStart[0] = 0xfa;
pchMessageStart[1] = 0xbf;
pchMessageStart[2] = 0xb5;
Expand Down

0 comments on commit 9837cfa

Please sign in to comment.