diff --git a/src/bank.rs b/src/bank.rs index 7358b69a1dadfb..3da9d367dfeec2 100644 --- a/src/bank.rs +++ b/src/bank.rs @@ -314,7 +314,12 @@ impl Bank { result?; } } - self.register_entry_id(&entry.id); + // TODO: verify this is ok in cases like: + // 1. an untrusted genesis or tx-.log + // 2. a crazy leader.. + if !entry.has_more { + self.register_entry_id(&entry.id); + } } Ok(self.entry_count()) } diff --git a/src/entry.rs b/src/entry.rs index 44382cacf4e952..704c246943dda8 100644 --- a/src/entry.rs +++ b/src/entry.rs @@ -35,29 +35,57 @@ pub struct Entry { /// generated. The may have been observed before a previous Entry ID but were /// pushed back into this list to ensure deterministic interpretation of the ledger. pub transactions: Vec, + + /// Indication that: + /// 1. the next Entry in the ledger has transactions that can potentially + /// be verified in parallel with these transactions + /// 2. this Entry can be left out of the bank's entry_id cache for + /// purposes of duplicate rejection + pub has_more: bool, + + /// Erasure requires that Entry be a multiple of 4 bytes in size + pad: [u8; 3], } impl Entry { /// Creates the next Entry `num_hashes` after `start_hash`. - pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec) -> Self { + pub fn new( + start_hash: &Hash, + cur_hashes: u64, + transactions: Vec, + has_more: bool, + ) -> Self { let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 }; let id = next_hash(start_hash, 0, &transactions); let entry = Entry { num_hashes, id, transactions, + has_more, + pad: [0, 0, 0], }; assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64); entry } + pub fn will_fit(transactions: Vec) -> bool { + serialized_size(&Entry { + num_hashes: 0, + id: Hash::default(), + transactions, + has_more: false, + pad: [0, 0, 0], + }).unwrap() <= BLOB_DATA_SIZE as u64 + } + /// Creates the next Tick Entry `num_hashes` after `start_hash`. pub fn new_mut( start_hash: &mut Hash, cur_hashes: &mut u64, transactions: Vec, + has_more: bool, ) -> Self { - let entry = Self::new(start_hash, *cur_hashes, transactions); + let entry = Self::new(start_hash, *cur_hashes, transactions, has_more); *start_hash = entry.id; *cur_hashes = 0; assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64); @@ -71,6 +99,8 @@ impl Entry { num_hashes, id: *id, transactions: vec![], + has_more: false, + pad: [0, 0, 0], } } @@ -119,6 +149,8 @@ pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec, ) -> Vec { if transactions.is_empty() { - vec![Entry::new_mut(start_hash, cur_hashes, transactions)] + vec![Entry::new_mut(start_hash, cur_hashes, transactions, false)] } else { let mut chunk_len = transactions.len(); // check for fit, make sure they can be serialized - while serialized_size(&Entry { - num_hashes: 0, - id: Hash::default(), - transactions: transactions[0..chunk_len].to_vec(), - }).unwrap() > BLOB_DATA_SIZE as u64 - { + while !Entry::will_fit(transactions[0..chunk_len].to_vec()) { chunk_len /= 2; } - let mut entries = Vec::with_capacity(transactions.len() / chunk_len + 1); + let mut num_chunks = if transactions.len() % chunk_len == 0 { + transactions.len() / chunk_len + } else { + transactions.len() / chunk_len + 1 + }; + + let mut entries = Vec::with_capacity(num_chunks); for chunk in transactions.chunks(chunk_len) { - entries.push(Entry::new_mut(start_hash, cur_hashes, chunk.to_vec())); + num_chunks -= 1; + entries.push(Entry::new_mut( + start_hash, + cur_hashes, + chunk.to_vec(), + num_chunks > 0, + )); } entries } @@ -112,29 +119,11 @@ mod tests { use super::*; use entry::{next_entry, Entry}; use hash::hash; - use packet::BlobRecycler; + use packet::{BlobRecycler, BLOB_DATA_SIZE}; use signature::{KeyPair, KeyPairUtil}; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use transaction::Transaction; - /// Create a vector of Entries of length `transaction_batches.len()` - /// from `start_hash` hash, `num_hashes`, and `transaction_batches`. - fn next_entries_batched( - start_hash: &Hash, - cur_hashes: u64, - transaction_batches: Vec>, - ) -> Vec { - let mut id = *start_hash; - let mut entries = vec![]; - let mut num_hashes = cur_hashes; - - for transactions in transaction_batches { - let mut entry_batch = next_entries_mut(&mut id, &mut num_hashes, transactions); - entries.append(&mut entry_batch); - } - entries - } - #[test] fn test_verify_slice() { let zero = Hash::default(); @@ -142,9 +131,9 @@ mod tests { assert!(vec![][..].verify(&zero)); // base case assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1 assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad - assert!(next_entries_batched(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step + assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step - let mut bad_ticks = next_entries_batched(&zero, 0, vec![vec![]; 2]); + let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2]; bad_ticks[1].id = one; assert!(!bad_ticks.verify(&zero)); // inductive step, bad } @@ -177,26 +166,41 @@ mod tests { } #[test] - fn test_next_entries_batched() { - // this also tests next_entries, ugly, but is an easy way to do vec of vec (batch) - let mut id = Hash::default(); + fn test_next_entries() { + let id = Hash::default(); let next_id = hash(&id); let keypair = KeyPair::new(); let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id); - let transactions = vec![tx0; 5]; - let transaction_batches = vec![transactions.clone(); 5]; - let entries0 = next_entries_batched(&id, 0, transaction_batches); - - assert_eq!(entries0.len(), 5); - - let mut entries1 = vec![]; - for _ in 0..5 { - let entry = next_entry(&id, 1, transactions.clone()); - id = entry.id; - entries1.push(entry); - } - assert_eq!(entries0, entries1); + // NOTE: if Entry grows to larger than a transaction, the code below falls over + let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size + + // verify no split + let transactions = vec![tx0.clone(); threshold]; + let entries0 = next_entries(&id, 0, transactions.clone()); + assert_eq!(entries0.len(), 1); + assert!(entries0.verify(&id)); + + // verify the split + let transactions = vec![tx0.clone(); threshold * 2]; + let entries0 = next_entries(&id, 0, transactions.clone()); + assert_eq!(entries0.len(), 2); + assert!(entries0[0].has_more); + assert!(!entries0[entries0.len() - 1].has_more); + + assert!(entries0.verify(&id)); + // test hand-construction... brittle, changes if split method changes... ? + // let mut entries1 = vec![]; + // entries1.push(Entry::new(&id, 1, transactions[..threshold].to_vec(), true)); + // id = entries1[0].id; + // entries1.push(Entry::new( + // &id, + // 1, + // transactions[threshold..].to_vec(), + // false, + // )); + // + // assert_eq!(entries0, entries1); } } diff --git a/src/mint.rs b/src/mint.rs index 46c5a0315305a1..a08de36e16069e 100644 --- a/src/mint.rs +++ b/src/mint.rs @@ -53,8 +53,8 @@ impl Mint { } pub fn create_entries(&self) -> Vec { - let e0 = Entry::new(&self.seed(), 0, vec![]); - let e1 = Entry::new(&e0.id, 0, self.create_transactions()); + let e0 = Entry::new(&self.seed(), 0, vec![], false); + let e1 = Entry::new(&e0.id, 0, self.create_transactions(), false); vec![e0, e1] } } diff --git a/src/recorder.rs b/src/recorder.rs index 1056cab3c358bc..68293a3d75fa24 100644 --- a/src/recorder.rs +++ b/src/recorder.rs @@ -39,6 +39,7 @@ impl Recorder { &mut self.last_hash, &mut self.num_hashes, vec![], + false, )) } else { None diff --git a/src/tvu.rs b/src/tvu.rs index 014beb06ec4009..784e2621a22146 100644 --- a/src/tvu.rs +++ b/src/tvu.rs @@ -210,7 +210,7 @@ pub mod tests { let transfer_amount = 501; let bob_keypair = KeyPair::new(); for i in 0..num_transfers { - let entry0 = Entry::new(&cur_hash, i, vec![]); + let entry0 = Entry::new(&cur_hash, i, vec![], false); bank.register_entry_id(&cur_hash); cur_hash = hash(&cur_hash); @@ -222,7 +222,7 @@ pub mod tests { ); bank.register_entry_id(&cur_hash); cur_hash = hash(&cur_hash); - let entry1 = Entry::new(&cur_hash, i + num_transfers, vec![tx0]); + let entry1 = Entry::new(&cur_hash, i + num_transfers, vec![tx0], false); bank.register_entry_id(&cur_hash); cur_hash = hash(&cur_hash);