Skip to content

Commit

Permalink
Merge pull request #32 from xelis-project/dev
Browse files Browse the repository at this point in the history
Version 1.9.1
  • Loading branch information
Slixe committed Apr 22, 2024
2 parents be3489c + 6fac86b commit 22f8804
Show file tree
Hide file tree
Showing 16 changed files with 281 additions and 102 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/docker-publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,8 @@ jobs:
push: ${{ env.SHOULD_PUSH }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=registry,ref=xelis/${{ matrix.app }}:buildcache
cache-to: type=registry,ref=xelis/${{ matrix.app }}:buildcache,mode=max
cache-from: type=gha
cache-to: ${{ env.SHOULD_PUSH && 'type=gha,mode=max' || '' }}
build-args: |
app=xelis_${{ matrix.app }}
commit_hash=${{ github.sha }}
Expand Down
14 changes: 7 additions & 7 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ NOTE: It is recommended to use the GetWork WebSocket server to be notified of ne
Mining jobs are send only when a new block is found or when a new TX is added in mempool.
Miners software are recommended to update themselves the block timestamp (or at least every 500ms) for best network difficulty calculation.

Actually, the POW Hashing algorithm is `Blake3` which is until we develop (or choose) our own algorithm.
The POW Hashing algorithm is [xelis-hash](https://github.com/xelis-project/xelis-hash).

## Client Protocol

Expand Down
33 changes: 20 additions & 13 deletions xelis_common/src/block/miner.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
use std::borrow::Cow;

use crate::{
crypto::{
pow_hash_with_scratch_pad,
Hash,
Hashable,
PublicKey,
XelisHashError,
POW_MEMORY_SIZE
pow_hash_with_scratch_pad,
Input,
ScratchPad
},
serializer::{Reader, ReaderError, Serializer, Writer},
time::TimestampMillis,
Expand All @@ -25,7 +27,7 @@ pub struct BlockMiner<'a> {
// Can also be used to spread more the work job and increase its work capacity
extra_nonce: [u8; EXTRA_NONCE_SIZE],
// Cache in case of hashing
cache: Option<[u8; BLOCK_WORK_SIZE]>
cache: Option<Input>
}

impl<'a> BlockMiner<'a> {
Expand Down Expand Up @@ -53,33 +55,38 @@ impl<'a> BlockMiner<'a> {
}

#[inline(always)]
pub fn get_pow_hash(&mut self, scratch_pad: &mut [u64; POW_MEMORY_SIZE]) -> Result<Hash, XelisHashError> {
pub fn get_pow_hash(&mut self, scratch_pad: &mut ScratchPad) -> Result<Hash, XelisHashError> {
if self.cache.is_none() {
self.cache = Some(self.to_bytes().try_into().unwrap());
let mut input = Input::default();
input.as_mut_slice()?[0..BLOCK_WORK_SIZE].copy_from_slice(&self.to_bytes());
self.cache = Some(input);
}

let bytes = self.cache.as_ref().unwrap();
pow_hash_with_scratch_pad(bytes, scratch_pad)
let bytes = self.cache.as_mut().unwrap();
pow_hash_with_scratch_pad(bytes.as_mut_slice()?, scratch_pad)
}

pub fn get_extra_nonce(&mut self) -> &mut [u8; EXTRA_NONCE_SIZE] {
&mut self.extra_nonce
}

#[inline(always)]
pub fn set_timestamp(&mut self, timestamp: TimestampMillis) {
pub fn set_timestamp(&mut self, timestamp: TimestampMillis) -> Result<(), XelisHashError> {
self.timestamp = timestamp;
if let Some(cache) = &mut self.cache {
cache[32..40].copy_from_slice(&self.timestamp.to_be_bytes());
cache.as_mut_slice()?[32..40].copy_from_slice(&self.timestamp.to_be_bytes());
}

Ok(())
}

#[inline(always)]
pub fn increase_nonce(&mut self) {
pub fn increase_nonce(&mut self) -> Result<(), XelisHashError> {
self.nonce += 1;
if let Some(cache) = &mut self.cache {
cache[40..48].copy_from_slice(&self.nonce.to_be_bytes());
cache.as_mut_slice()?[40..48].copy_from_slice(&self.nonce.to_be_bytes());
}
Ok(())
}

#[inline(always)]
Expand All @@ -100,8 +107,8 @@ impl<'a> BlockMiner<'a> {

impl<'a> Serializer for BlockMiner<'a> {
fn write(&self, writer: &mut Writer) {
if let Some(cache) = self.cache {
writer.write_bytes(&cache);
if let Some(cache) = self.cache.as_ref() {
writer.write_bytes(cache.as_slice().unwrap());
} else {
writer.write_hash(&self.header_work_hash); // 32
writer.write_u64(&self.timestamp); // 32 + 8 = 40
Expand Down
24 changes: 12 additions & 12 deletions xelis_common/src/crypto/hash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,10 @@ use blake3::hash as blake3_hash;
pub use xelis_hash::{
Error as XelisHashError,
xelis_hash,
xelis_hash_no_scratch_pad,
BYTES_ARRAY_INPUT,
MEMORY_SIZE as POW_MEMORY_SIZE,
ScratchPad,
Input
};

pub const HASH_SIZE: usize = 32; // 32 bytes / 256 bits
Expand Down Expand Up @@ -48,24 +49,23 @@ impl Hash {
}
}

pub fn pow_hash(input: &[u8]) -> Result<Hash, XelisHashError> {
if input.len() > BYTES_ARRAY_INPUT {
return Err(XelisHashError);
}
pub fn pow_hash(work: &[u8]) -> Result<Hash, XelisHashError> {
let mut scratchpad = ScratchPad::default();

// Make sure the input has good alignment
let mut input = Input::default();
let slice = input.as_mut_slice()?;
slice[..work.len()].copy_from_slice(work);

let mut buffer = [0u8; BYTES_ARRAY_INPUT];
buffer[..input.len()].copy_from_slice(input);
xelis_hash_no_scratch_pad(buffer.as_mut_slice()).map(|bytes| Hash::new(bytes))
pow_hash_with_scratch_pad(input.as_mut_slice()?, &mut scratchpad)
}

pub fn pow_hash_with_scratch_pad(input: &[u8], scratch_pad: &mut [u64; POW_MEMORY_SIZE]) -> Result<Hash, XelisHashError> {
pub fn pow_hash_with_scratch_pad(input: &mut [u8; BYTES_ARRAY_INPUT], scratch_pad: &mut ScratchPad) -> Result<Hash, XelisHashError> {
if input.len() > BYTES_ARRAY_INPUT {
return Err(XelisHashError);
}

let mut buffer = [0u8; BYTES_ARRAY_INPUT];
buffer[..input.len()].copy_from_slice(input);
xelis_hash(buffer.as_mut_slice(), scratch_pad).map(|bytes| Hash::new(bytes))
xelis_hash(input, scratch_pad.as_mut_slice()).map(|bytes| Hash::new(bytes))
}

impl Serializer for Hash {
Expand Down
6 changes: 4 additions & 2 deletions xelis_daemon/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,8 @@ pub const P2P_EXTEND_PEERLIST_DELAY: u64 = 60;
pub const PEER_FAIL_TIME_RESET: u64 = 30 * 60;
// number of fail to disconnect the peer
pub const PEER_FAIL_LIMIT: u8 = 50;
// number of fail during handshake before temp ban
pub const PEER_FAIL_TO_CONNECT_LIMIT: u8 = 3;
// number of seconds to temp ban the peer in case of fail reached
// Set to 15 minutes
pub const PEER_TEMP_BAN_TIME: u64 = 15 * 60;
Expand All @@ -129,7 +131,7 @@ pub const PEER_TIMEOUT_REQUEST_OBJECT: u64 = 15000;
// millis until we timeout during a bootstrap request
pub const PEER_TIMEOUT_BOOTSTRAP_STEP: u64 = 60000;
// millis until we timeout during a handshake
pub const PEER_TIMEOUT_INIT_CONNECTION: u64 = 3000;
pub const PEER_TIMEOUT_INIT_CONNECTION: u64 = 5000;
// 16 additional bytes are for AEAD from ChaCha20Poly1305
pub const PEER_MAX_PACKET_SIZE: u32 = MAX_BLOCK_SIZE as u32 + 16;
// Peer TX cache size
Expand Down Expand Up @@ -169,7 +171,7 @@ pub fn get_genesis_block_hash(network: &Network) -> &'static Hash {
match network {
Network::Mainnet => &MAINNET_GENESIS_BLOCK_HASH,
Network::Testnet => &TESTNET_GENESIS_BLOCK_HASH,
Network::Dev => panic!("Dev network has not fix genesis block hash"),
Network::Dev => panic!("Dev network has no fix genesis block hash"),
}
}

Expand Down
30 changes: 16 additions & 14 deletions xelis_daemon/src/core/blockchain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,12 @@ pub struct Config {
///
/// Note that it may prevent to have new incoming peers.
#[clap(long, default_value = "false")]
pub disable_ip_sharing: bool
pub disable_ip_sharing: bool,
/// Disable outgoing connections from peers.
///
/// This is useful for seed nodes under heavy load or for nodes that don't want to connect to others.
#[clap(long, default_value = "false")]
pub disable_outgoing_connections: bool
}

pub struct Blockchain<S: Storage> {
Expand Down Expand Up @@ -313,7 +318,7 @@ impl<S: Storage> Blockchain<S> {
exclusive_nodes.push(addr);
}

match P2pServer::new(config.dir_path, config.tag, config.max_peers, config.p2p_bind_address, Arc::clone(&arc), exclusive_nodes.is_empty(), exclusive_nodes, config.allow_fast_sync, config.allow_boost_sync, config.max_chain_response_size, !config.disable_ip_sharing) {
match P2pServer::new(config.dir_path, config.tag, config.max_peers, config.p2p_bind_address, Arc::clone(&arc), exclusive_nodes.is_empty(), exclusive_nodes, config.allow_fast_sync, config.allow_boost_sync, config.max_chain_response_size, !config.disable_ip_sharing, config.disable_outgoing_connections) {
Ok(p2p) => {
// connect to priority nodes
for addr in config.priority_nodes {
Expand Down Expand Up @@ -2119,18 +2124,15 @@ impl<S: Storage> Blockchain<S> {
if let Err(e) = self.add_tx_to_mempool_with_storage_and_hash(&storage, tx.clone(), tx_hash.clone(), false).await {
debug!("Error while adding back orphaned tx: {}, broadcasting event", e);
// We couldn't add it back to mempool, let's notify this event
if should_track_events.contains(&NotifyEvent::TransactionOrphaned) {
let data = RPCTransaction::from_tx(&tx, &tx_hash, storage.is_mainnet());

let data = TransactionResponse {
blocks: None,
executed_in_block: None,
in_mempool: false,
first_seen: None,
data,
};
events.entry(NotifyEvent::TransactionOrphaned).or_insert_with(Vec::new).push(json!(data));
}
let data = RPCTransaction::from_tx(&tx, &tx_hash, storage.is_mainnet());
let data = TransactionResponse {
blocks: None,
executed_in_block: None,
in_mempool: false,
first_seen: None,
data,
};
events.entry(NotifyEvent::TransactionOrphaned).or_insert_with(Vec::new).push(json!(data));
}
}
}
Expand Down
18 changes: 18 additions & 0 deletions xelis_daemon/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,7 @@ async fn run_prompt<S: Storage>(prompt: ShareablePrompt, blockchain: Arc<Blockch
command_manager.add_command(Command::new("clear_rpc_connections", "Clear all WS connections from RPC", CommandHandler::Async(async_handler!(clear_rpc_connections::<S>))))?;
command_manager.add_command(Command::with_optional_arguments("difficulty_dataset", "Create a dataset for difficulty from chain", vec![Arg::new("output", ArgType::String)], CommandHandler::Async(async_handler!(difficulty_dataset::<S>))))?;
command_manager.add_command(Command::with_optional_arguments("mine_block", "Mine a block on testnet", vec![Arg::new("count", ArgType::Number)], CommandHandler::Async(async_handler!(mine_block::<S>))))?;
command_manager.add_command(Command::new("p2p_outgoing_connections", "Accept/refuse to connect to outgoing nodes", CommandHandler::Async(async_handler!(p2p_outgoing_connections::<S>))))?;


// Don't keep the lock for ever
Expand Down Expand Up @@ -786,3 +787,20 @@ async fn mine_block<S: Storage>(manager: &CommandManager, mut arguments: Argumen
}
Ok(())
}

async fn p2p_outgoing_connections<S: Storage>(manager: &CommandManager, _: ArgumentManager) -> Result<(), CommandError> {
let context = manager.get_context().lock()?;
let blockchain: &Arc<Blockchain<S>> = context.get()?;
match blockchain.get_p2p().read().await.as_ref() {
Some(p2p) => {
let current = p2p.is_outgoing_connections_disabled();
p2p.set_disable_outgoing_connections(!current);
manager.message(format!("Outgoing connections are now {}", if current { "enabled" } else { "disabled" }));
},
None => {
manager.error("P2P is not enabled");
}
};

Ok(())
}
2 changes: 1 addition & 1 deletion xelis_daemon/src/p2p/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ pub enum P2pError {
PeerInvalidPingCoutdown,
#[error(transparent)]
BlockchainError(#[from] Box<BlockchainError>),
#[error("Invalid content in peerlist file")]
#[error("Invalid content in peerlist shared")]
InvalidPeerlist,
#[error("Invalid bootstrap chain step, expected {:?}, got {:?}", _0, _1)]
InvalidBootstrapStep(StepKind, StepKind),
Expand Down

0 comments on commit 22f8804

Please sign in to comment.