Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion api/src/handlers/pool_api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ impl PoolPushHandler {
.chain_head()
.context(ErrorKind::Internal("Failed to get chain head".to_owned()))?;
let res = tx_pool
.add_to_pool(source, tx, !fluff, &header)
.add_to_pool(&source, &tx, !fluff, &header)
.context(ErrorKind::Internal("Failed to update pool".to_owned()))?;
Ok(res)
}),
Expand Down
2 changes: 1 addition & 1 deletion chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -525,7 +525,7 @@ impl Chain {
let height = self.next_block_height()?;
let txhashset = self.txhashset.read();
txhashset::utxo_view(&txhashset, |utxo| {
utxo.verify_coinbase_maturity(&tx.inputs(), height)?;
utxo.verify_coinbase_maturity(tx.inputs(), height)?;
Ok(())
})
}
Expand Down
2 changes: 1 addition & 1 deletion chain/src/txhashset/utxo_view.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ impl<'a> UTXOView<'a> {

/// Verify we are not attempting to spend any coinbase outputs
/// that have not sufficiently matured.
pub fn verify_coinbase_maturity(&self, inputs: &Vec<Input>, height: u64) -> Result<(), Error> {
pub fn verify_coinbase_maturity(&self, inputs: &[Input], height: u64) -> Result<(), Error> {
// Find the greatest output pos of any coinbase
// outputs we are attempting to spend.
let pos = inputs
Expand Down
6 changes: 3 additions & 3 deletions core/src/core/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -480,7 +480,7 @@ impl Block {
let all_kernels = Vec::from_iter(all_kernels);

// Initialize a tx body and sort everything.
let body = TransactionBody::init(all_inputs, all_outputs, all_kernels, false)?;
let body = TransactionBody::init(&all_inputs, &all_outputs, &all_kernels, false)?;

// Finally return the full block.
// Note: we have not actually validated the block here,
Expand Down Expand Up @@ -509,7 +509,7 @@ impl Block {
// A block is just a big transaction, aggregate and add the reward output
// and reward kernel. At this point the tx is technically invalid but the
// tx body is valid if we account for the reward (i.e. as a block).
let agg_tx = transaction::aggregate(txs)?
let agg_tx = transaction::aggregate(&txs)?
.with_output(reward_out)
.with_kernel(reward_kern);

Expand Down Expand Up @@ -598,7 +598,7 @@ impl Block {
let kernels = self.kernels().clone();

// Initialize tx body and sort everything.
let body = TransactionBody::init(inputs, outputs, kernels, false)?;
let body = TransactionBody::init(&inputs, &outputs, &kernels, false)?;

Ok(Block {
header: self.header,
Expand Down
62 changes: 30 additions & 32 deletions core/src/core/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ impl Readable for TransactionBody {
let kernels = read_multi(reader, kernel_len)?;

// Initialize tx body and verify everything is sorted.
let body = TransactionBody::init(inputs, outputs, kernels, true)
let body = TransactionBody::init(&inputs, &outputs, &kernels, true)
.map_err(|_| ser::Error::CorruptedData)?;

Ok(body)
Expand Down Expand Up @@ -499,15 +499,15 @@ impl TransactionBody {
/// the provided inputs, outputs and kernels.
/// Guarantees inputs, outputs, kernels are sorted lexicographically.
pub fn init(
inputs: Vec<Input>,
outputs: Vec<Output>,
kernels: Vec<TxKernel>,
inputs: &[Input],
outputs: &[Output],
kernels: &[TxKernel],
verify_sorted: bool,
) -> Result<TransactionBody, Error> {
let mut body = TransactionBody {
inputs,
outputs,
kernels,
inputs: inputs.to_vec(),
outputs: outputs.to_vec(),
kernels: kernels.to_vec(),
};

if verify_sorted {
Expand Down Expand Up @@ -830,7 +830,7 @@ impl Transaction {

/// Creates a new transaction initialized with
/// the provided inputs, outputs, kernels
pub fn new(inputs: Vec<Input>, outputs: Vec<Output>, kernels: Vec<TxKernel>) -> Transaction {
pub fn new(inputs: &[Input], outputs: &[Output], kernels: &[TxKernel]) -> Transaction {
let offset = BlindingFactor::zero();

// Initialize a new tx body and sort everything.
Expand Down Expand Up @@ -877,7 +877,7 @@ impl Transaction {
}

/// Get inputs
pub fn inputs(&self) -> &Vec<Input> {
pub fn inputs(&self) -> &[Input] {
&self.body.inputs
}

Expand All @@ -887,7 +887,7 @@ impl Transaction {
}

/// Get outputs
pub fn outputs(&self) -> &Vec<Output> {
pub fn outputs(&self) -> &[Output] {
&self.body.outputs
}

Expand All @@ -897,7 +897,7 @@ impl Transaction {
}

/// Get kernels
pub fn kernels(&self) -> &Vec<TxKernel> {
pub fn kernels(&self) -> &[TxKernel] {
&self.body.kernels
}

Expand Down Expand Up @@ -990,12 +990,12 @@ pub fn cut_through(inputs: &mut Vec<Input>, outputs: &mut Vec<Output>) -> Result
}

/// Aggregate a vec of txs into a multi-kernel tx with cut_through.
pub fn aggregate(mut txs: Vec<Transaction>) -> Result<Transaction, Error> {
pub fn aggregate(txs: &[Transaction]) -> Result<Transaction, Error> {
// convenience short-circuiting
if txs.is_empty() {
return Ok(Transaction::empty());
} else if txs.len() == 1 {
return Ok(txs.pop().unwrap());
return Ok(txs.first().unwrap().clone());
}
let mut n_inputs = 0;
let mut n_outputs = 0;
Expand All @@ -1013,13 +1013,13 @@ pub fn aggregate(mut txs: Vec<Transaction>) -> Result<Transaction, Error> {
// we will sum these together at the end to give us the overall offset for the
// transaction
let mut kernel_offsets: Vec<BlindingFactor> = Vec::with_capacity(txs.len());
for mut tx in txs {
for tx in txs {
// we will sum these later to give a single aggregate offset
kernel_offsets.push(tx.offset);

inputs.append(&mut tx.body.inputs);
outputs.append(&mut tx.body.outputs);
kernels.append(&mut tx.body.kernels);
inputs.extend_from_slice(tx.inputs());
outputs.extend_from_slice(tx.outputs());
kernels.extend_from_slice(tx.kernels());
}

// Sort inputs and outputs during cut_through.
Expand All @@ -1037,14 +1037,14 @@ pub fn aggregate(mut txs: Vec<Transaction>) -> Result<Transaction, Error> {
// * cut-through outputs
// * full set of tx kernels
// * sum of all kernel offsets
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
let tx = Transaction::new(&inputs, &outputs, &kernels).with_offset(total_kernel_offset);

Ok(tx)
}

/// Attempt to deaggregate a multi-kernel transaction based on multiple
/// transactions
pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transaction, Error> {
pub fn deaggregate(mk_tx: &Transaction, txs: &[Transaction]) -> Result<Transaction, Error> {
let mut inputs: Vec<Input> = vec![];
let mut outputs: Vec<Output> = vec![];
let mut kernels: Vec<TxKernel> = vec![];
Expand All @@ -1055,19 +1055,19 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact

let tx = aggregate(txs)?;

for mk_input in mk_tx.body.inputs {
for mk_input in mk_tx.inputs() {
if !tx.body.inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
inputs.push(mk_input);
inputs.push(mk_input.clone());
}
}
for mk_output in mk_tx.body.outputs {
for mk_output in mk_tx.outputs() {
if !tx.body.outputs.contains(&mk_output) && !outputs.contains(&mk_output) {
outputs.push(mk_output);
outputs.push(mk_output.clone());
}
}
for mk_kernel in mk_tx.body.kernels {
for mk_kernel in mk_tx.kernels() {
if !tx.body.kernels.contains(&mk_kernel) && !kernels.contains(&mk_kernel) {
kernels.push(mk_kernel);
kernels.push(mk_kernel.clone());
}
}

Expand Down Expand Up @@ -1102,7 +1102,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
kernels.sort_unstable();

// Build a new tx from the above data.
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
let tx = Transaction::new(&inputs, &outputs, &kernels).with_offset(total_kernel_offset);
Ok(tx)
}

Expand Down Expand Up @@ -1312,14 +1312,12 @@ impl Output {
Ok(())
}

/// Batch validates the range proofs using the commitments
pub fn batch_verify_proofs(
commits: &Vec<Commitment>,
proofs: &Vec<RangeProof>,
) -> Result<(), Error> {
/// Batch validates the range proofs using the commitments.
/// TODO - can verify_bullet_proof_multi be reworked to take slices?
pub fn batch_verify_proofs(commits: &[Commitment], proofs: &[RangeProof]) -> Result<(), Error> {
let secp = static_secp_instance();
secp.lock()
.verify_bullet_proof_multi(commits.clone(), proofs.clone(), None)?;
.verify_bullet_proof_multi(commits.to_vec(), proofs.to_vec(), None)?;
Ok(())
}
}
Expand Down
7 changes: 7 additions & 0 deletions p2p/src/peer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
// limitations under the License.

use crate::util::{Mutex, RwLock};
use std::fmt;
use std::fs::File;
use std::net::{Shutdown, TcpStream};
use std::sync::Arc;
Expand Down Expand Up @@ -54,6 +55,12 @@ pub struct Peer {
connection: Option<Mutex<conn::Tracker>>,
}

impl fmt::Debug for Peer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Peer({:?})", &self.info)
}
}

impl Peer {
// Only accept and connect can be externally used to build a peer
fn new(info: PeerInfo, adapter: Arc<dyn NetAdapter>) -> Peer {
Expand Down
113 changes: 56 additions & 57 deletions p2p/src/peers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ pub struct Peers {
pub adapter: Arc<dyn ChainAdapter>,
store: PeerStore,
peers: RwLock<HashMap<PeerAddr, Arc<Peer>>>,
dandelion_relay: RwLock<Option<(i64, Arc<Peer>)>>,
config: P2PConfig,
}

Expand All @@ -48,7 +47,6 @@ impl Peers {
store,
config,
peers: RwLock::new(HashMap::new()),
dandelion_relay: RwLock::new(None),
}
}

Expand Down Expand Up @@ -87,38 +85,39 @@ impl Peers {
self.save_peer(&peer_data)
}

// Update the dandelion relay
pub fn update_dandelion_relay(&self) {
let peers = self.outgoing_connected_peers();

let peer = &self
.config
.dandelion_peer
.and_then(|ip| peers.iter().find(|x| x.info.addr == ip))
.or(thread_rng().choose(&peers));

match peer {
Some(peer) => self.set_dandelion_relay(peer),
None => debug!("Could not update dandelion relay"),
}
}

fn set_dandelion_relay(&self, peer: &Arc<Peer>) {
// Clear the map and add new relay
let dandelion_relay = &self.dandelion_relay;
dandelion_relay
.write()
.replace((Utc::now().timestamp(), peer.clone()));
debug!(
"Successfully updated Dandelion relay to: {}",
peer.info.addr
);
}

// Get the dandelion relay
pub fn get_dandelion_relay(&self) -> Option<(i64, Arc<Peer>)> {
self.dandelion_relay.read().clone()
}
// // Update the dandelion relay
// pub fn update_dandelion_relay(&self) {
// let peers = self.outgoing_connected_peers();
//
// let peer = &self
// .config
// .dandelion_peer
// .and_then(|ip| peers.iter().find(|x| x.info.addr == ip))
// .or(thread_rng().choose(&peers));
//
// match peer {
// Some(peer) => self.set_dandelion_relay(peer),
// None => debug!("Could not update dandelion relay"),
// }
// }
//
// fn set_dandelion_relay(&self, peer: &Arc<Peer>) {
// // Clear the map and add new relay
// let dandelion_relay = &self.dandelion_relay;
// dandelion_relay.write().clear();
// dandelion_relay
// .write()
// .insert(Utc::now().timestamp(), peer.clone());
// debug!(
// "Successfully updated Dandelion relay to: {}",
// peer.info.addr
// );
// }
//
// // Get the dandelion relay
// pub fn get_dandelion_relay(&self) -> HashMap<i64, Arc<Peer>> {
// self.dandelion_relay.read().clone()
// }

pub fn is_known(&self, addr: PeerAddr) -> bool {
self.peers.read().contains_key(&addr)
Expand Down Expand Up @@ -335,29 +334,29 @@ impl Peers {
);
}

/// Relays the provided stem transaction to our single stem peer.
pub fn relay_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
self.get_dandelion_relay()
.or_else(|| {
debug!("No dandelion relay, updating.");
self.update_dandelion_relay();
self.get_dandelion_relay()
})
// If still return an error, let the caller handle this as they see fit.
// The caller will "fluff" at this point as the stem phase is finished.
.ok_or(Error::NoDandelionRelay)
.map(|(_, relay)| {
if relay.is_connected() {
if let Err(e) = relay.send_stem_transaction(tx) {
debug!("Error sending stem transaction to peer relay: {:?}", e);
}
}
})
}

/// Broadcasts the provided transaction to PEER_PREFERRED_COUNT of our
/// peers. We may be connected to PEER_MAX_COUNT peers so we only
/// want to broadcast to a random subset of peers.
// /// Relays the provided stem transaction to our single stem peer.
// pub fn relay_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
// let dandelion_relay = self.get_dandelion_relay();
// if dandelion_relay.is_empty() {
// debug!("No dandelion relay, updating.");
// self.update_dandelion_relay();
// }
// // If still return an error, let the caller handle this as they see fit.
// // The caller will "fluff" at this point as the stem phase is finished.
// if dandelion_relay.is_empty() {
// return Err(Error::NoDandelionRelay);
// }
// for relay in dandelion_relay.values() {
// if relay.is_connected() {
// if let Err(e) = relay.send_stem_transaction(tx) {
// debug!("Error sending stem transaction to peer relay: {:?}", e);
// }
// }
// }
// Ok(())
// }

/// Broadcasts the provided transaction to PEER_MAX_COUNT of our peers.
/// A peer implementation may drop the broadcast request
/// if it knows the remote peer already has the transaction.
pub fn broadcast_transaction(&self, tx: &core::Transaction) {
Expand Down
4 changes: 1 addition & 3 deletions pool/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,4 @@ pub mod transaction_pool;
pub mod types;

pub use crate::transaction_pool::TransactionPool;
pub use crate::types::{
BlockChain, DandelionConfig, PoolAdapter, PoolConfig, PoolEntryState, PoolError, TxSource,
};
pub use crate::types::{BlockChain, DandelionConfig, PoolAdapter, PoolConfig, PoolError, TxSource};
Loading