Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

BIP100: Dynamic maximum block size by miner vote #1

Open
wants to merge 10 commits into
base: 0.12
Choose a base branch
from
3 changes: 2 additions & 1 deletion .gitignore
Expand Up @@ -103,7 +103,8 @@ linux-build
win32-build
qa/pull-tester/run-bitcoind-for-test.sh
qa/pull-tester/tests_config.py
qa/pull-tester/cache/*
qa/pull-tester/cache
qa/pull-tester/cache_bigblock
qa/pull-tester/test.*/*
qa/tmp
cache/
Expand Down
1 change: 1 addition & 0 deletions qa/pull-tester/rpc-tests.py
Expand Up @@ -127,6 +127,7 @@
testScripts.append('zmq_test.py')

testScriptsExt = [
'bip100-sizelimit.py',
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
Expand Down
1 change: 1 addition & 0 deletions qa/rpc-tests/.gitignore
@@ -1,2 +1,3 @@
*.pyc
cache
cache_bigblock
207 changes: 207 additions & 0 deletions qa/rpc-tests/bip100-sizelimit.py
@@ -0,0 +1,207 @@
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.

#
# Test mining and broadcast of larger-than-1MB-blocks
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *

from decimal import Decimal

CACHE_DIR = "cache_bigblock"

class BigBlockTest(BitcoinTestFramework):

def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)

if not os.path.isdir(os.path.join(CACHE_DIR, "node0")):
print("Creating initial chain. This will be cached for future runs.")

for i in range(4):
initialize_datadir(CACHE_DIR, i) # Overwrite port/rpcport in bitcoin.conf

# Node 0 creates 8MB blocks that vote for increase to 8MB
# Node 1 creates empty blocks that vote for 8MB
# Node 2 creates empty blocks that vote for 2MB
# Node 3 creates empty blocks that do not vote for increase
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(start_node(0, CACHE_DIR, ["-blockmaxsize=8000000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))
self.nodes.append(start_node(1, CACHE_DIR, ["-blockmaxsize=1000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))
self.nodes.append(start_node(2, CACHE_DIR, ["-blockmaxsize=1000", "-maxblocksizevote=1", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))
self.nodes.append(start_node(3, CACHE_DIR, ["-blockmaxsize=1000", "-maxblocksizevote=2", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))

connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 2, 3)
connect_nodes_bi(self.nodes, 3, 0)

self.is_network_split = False

# Create a 2012-block chain in a 75% ratio for increase (genesis block votes for 1MB)
# Make sure they are not already sorted correctly
blocks = []
blocks.append(self.nodes[1].generate(503))
assert(self.sync_blocks(self.nodes[1:3]))
blocks.append(self.nodes[2].generate(502)) # <--- genesis is 503rd vote for 1MB
assert(self.sync_blocks(self.nodes[2:4]))
blocks.append(self.nodes[3].generate(503))
assert(self.sync_blocks(self.nodes[1:4]))
blocks.append(self.nodes[1].generate(503))
assert(self.sync_blocks(self.nodes))

tx_file = open(os.path.join(CACHE_DIR, "txdata"), "w")

# Create a lot of tansaction data ready to be mined
fee = Decimal('.00005')
used = set()
print("Creating transaction data")
for i in range(0,25):
inputs = []
outputs = {}
limit = 0
utxos = self.nodes[3].listunspent(0)
for utxo in utxos:
if not utxo["txid"]+str(utxo["vout"]) in used:
raw_input = {}
raw_input["txid"] = utxo["txid"]
raw_input["vout"] = utxo["vout"]
inputs.append(raw_input)
outputs[self.nodes[3].getnewaddress()] = utxo["amount"] - fee
used.add(utxo["txid"]+str(utxo["vout"]))
limit = limit + 1
if (limit >= 250):
break
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
txdata = self.nodes[3].signrawtransaction(rawtx)["hex"]
self.nodes[3].sendrawtransaction(txdata)
tx_file.write(txdata+"\n")
tx_file.close()

stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = []
for i in range(4):
os.remove(log_filename(CACHE_DIR, i, "db.log"))
os.remove(log_filename(CACHE_DIR, i, "peers.dat"))
os.remove(log_filename(CACHE_DIR, i, "fee_estimates.dat"))

for i in range(4):
from_dir = os.path.join(CACHE_DIR, "node"+str(i))
to_dir = os.path.join(self.options.tmpdir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf

def sync_blocks(self, rpc_connections, wait=1, max_wait=60):
"""
Wait until everybody has the same block count
"""
for i in range(0,max_wait):
if i > 0: time.sleep(wait)
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
return True
return False

def setup_network(self):
self.nodes = []

self.nodes.append(start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockmaxsize=1000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockmaxsize=1000", "-maxblocksizevote=1", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60))
# (We don't restart the node with the huge wallet
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 2, 0)

self.load_mempool(self.nodes[0])

def load_mempool(self, node):
with open(os.path.join(CACHE_DIR, "txdata"), "r") as f:
for line in f:
node.sendrawtransaction(line.rstrip())

def TestMineBig(self, expect_big):
# Test if node0 will mine a block bigger than legacy MAX_BLOCK_SIZE
b1hash = self.nodes[0].generate(1)[0]
b1 = self.nodes[0].getblock(b1hash, True)
assert(self.sync_blocks(self.nodes[0:3]))

if expect_big:
assert(b1['size'] > 1000*1000)

# Have node1 mine on top of the block,
# to make sure it goes along with the fork
b2hash = self.nodes[1].generate(1)[0]
b2 = self.nodes[1].getblock(b2hash, True)
assert(b2['previousblockhash'] == b1hash)
assert(self.sync_blocks(self.nodes[0:3]))

else:
assert(b1['size'] < 1000*1000)

# Reset chain to before b1hash:
for node in self.nodes[0:3]:
node.invalidateblock(b1hash)
assert(self.sync_blocks(self.nodes[0:3]))


def run_test(self):
print("Testing consensus blocksize increase conditions")

assert_equal(self.nodes[0].getblockcount(), 2011) # This is a 0-based height

# Current nMaxBlockSize is still 1MB
assert_equal(self.nodes[0].getblocktemplate()["sizelimit"], 1000000)
self.TestMineBig(False)

# Create a situation where the 1512th-highest vote is for 2MB
self.nodes[2].generate(1)
assert(self.sync_blocks(self.nodes[1:3]))
ahash = self.nodes[1].generate(3)[2]
assert_equal(self.nodes[1].getblocktemplate()["sizelimit"], int(1000000 * 1.05))
assert(self.sync_blocks(self.nodes[0:2]))
self.TestMineBig(True)

# Shutdown then restart node[0], it should produce a big block.
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60)
self.load_mempool(self.nodes[0])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
assert_equal(self.nodes[0].getblocktemplate()["sizelimit"], int(1000000 * 1.05))
self.TestMineBig(True)

# Test re-orgs past the sizechange block
stop_node(self.nodes[0], 0)
self.nodes[2].invalidateblock(ahash)
assert_equal(self.nodes[2].getblocktemplate()["sizelimit"], 1000000)
self.nodes[2].generate(2)
assert_equal(self.nodes[2].getblocktemplate()["sizelimit"], 1000000)
assert(self.sync_blocks(self.nodes[1:3]))

# Restart node0, it should re-org onto longer chain,
# and refuse to mine a big block:
self.nodes[0] = start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60)
self.load_mempool(self.nodes[0])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
assert(self.sync_blocks(self.nodes[0:3]))
assert_equal(self.nodes[0].getblocktemplate()["sizelimit"], 1000000)
self.TestMineBig(False)

# Mine 4 blocks voting for 8MB. Bigger block NOT ok, we are in the next voting period
self.nodes[1].generate(4)
assert_equal(self.nodes[1].getblocktemplate()["sizelimit"], 1000000)
assert(self.sync_blocks(self.nodes[0:3]))
self.TestMineBig(False)


print("Cached test chain and transactions left in %s"%(CACHE_DIR))

if __name__ == '__main__':
BigBlockTest().main()
18 changes: 14 additions & 4 deletions qa/rpc-tests/p2p-fullblocktest.py
Expand Up @@ -37,12 +37,13 @@ def __init__(self):
self.block_time = int(time.time())+1
self.tip = None
self.blocks = {}
self.test = None

def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
self.test.run()

def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
Expand Down Expand Up @@ -348,7 +349,16 @@ def update_block(block_number, new_transactions):
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
yield rejected() # Network sanity check will cause disconnect

# Reconnect
self.test.clear_all_connections()
self.test.wait_for_disconnections()
self.test.add_all_connections(self.nodes)
# Ignore requests for the oversize block
[n.inv_hash_ignore.append(b24.sha256) for n in self.test.test_nodes]
NetworkThread().start()
self.test.wait_for_verack()

b25 = block(25, spend=out7)
yield rejected()
Expand Down
5 changes: 5 additions & 0 deletions qa/rpc-tests/test_framework/authproxy.py
Expand Up @@ -124,6 +124,11 @@ def _request(self, method, path, postdata):
return self._get_response()
else:
raise
except BrokenPipeError:
# Python 3.5+ raises this instead of BadStatusLine when the connection was reset
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()

def __call__(self, *args):
AuthServiceProxy.__id_count += 1
Expand Down
19 changes: 11 additions & 8 deletions qa/rpc-tests/test_framework/comptool.py
Expand Up @@ -52,6 +52,7 @@ def __init__(self, block_store, tx_store):
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
self.inv_hash_ignore = []

# When the pingmap is non-empty we're waiting for
# a response
Expand All @@ -77,15 +78,17 @@ def on_getheaders(self, conn, message):
conn.send_message(response)

def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]

for i in message.inv:
if i.type == 1:
for idx, i in enumerate(message.inv):
if i.hash in self.inv_hash_ignore:
del message.inv[idx]
elif i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True

[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]

def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]

Expand Down Expand Up @@ -192,10 +195,10 @@ def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)

def wait_for_pings(self, counter):
def wait_for_pings(self, counter, attempts=float('inf')):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
return wait_until(received_pongs, attempts)

# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
Expand All @@ -217,7 +220,7 @@ def blocks_requested():

# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.wait_for_pings(self.ping_counter, attempts=20)
self.ping_counter += 1

# Analogous to sync_block (see above)
Expand Down
2 changes: 2 additions & 0 deletions src/Makefile.am
Expand Up @@ -100,6 +100,7 @@ BITCOIN_CORE_H = \
dbwrapper.h \
limitedmap.h \
main.h \
maxblocksize.h \
memusage.h \
merkleblock.h \
miner.h \
Expand Down Expand Up @@ -181,6 +182,7 @@ libbitcoin_server_a_SOURCES = \
init.cpp \
dbwrapper.cpp \
main.cpp \
maxblocksize.cpp \
merkleblock.cpp \
miner.cpp \
net.cpp \
Expand Down
2 changes: 2 additions & 0 deletions src/Makefile.test.include
Expand Up @@ -55,11 +55,13 @@ BITCOIN_TESTS =\
test/limitedmap_tests.cpp \
test/dbwrapper_tests.cpp \
test/main_tests.cpp \
test/maxblocksize_tests.cpp \
test/mempool_tests.cpp \
test/merkle_tests.cpp \
test/miner_tests.cpp \
test/multisig_tests.cpp \
test/netbase_tests.cpp \
test/p2p_protocol_tests.cpp \
test/pmt_tests.cpp \
test/policyestimator_tests.cpp \
test/pow_tests.cpp \
Expand Down
2 changes: 1 addition & 1 deletion src/alert.cpp
Expand Up @@ -121,7 +121,7 @@ bool CAlert::AppliesTo(int nVersion, const std::string& strSubVerIn) const

bool CAlert::AppliesToMe() const
{
return AppliesTo(PROTOCOL_VERSION, FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, std::vector<std::string>()));
return AppliesTo(PROTOCOL_VERSION, FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, std::vector<std::string>(), 0));
}

bool CAlert::RelayTo(CNode* pnode) const
Expand Down