Add testing for blocked bucket cleanup

This commit is contained in:
onyinyang 2023-07-10 17:03:10 -04:00
parent 3c88f4471b
commit a62b537aa6
No known key found for this signature in database
GPG Key ID: 156A6435430C2036
3 changed files with 274 additions and 113 deletions

View File

@ -279,118 +279,23 @@ impl BridgeTable {
self.buckets.len()
}
/// Get today's (real or simulated) date
fn today(&self) -> u32 {
// We will not encounter negative Julian dates (~6700 years ago)
// or ones larger than 32 bits
(time::OffsetDateTime::now_utc().date())
.to_julian_day()
.try_into()
.unwrap()
}
// This function looks for and removes buckets so their indexes can be reused
// This should include buckets that have been blocked for a sufficiently long period
// that we no longer want to allow migration to, or else, open-entry buckets that
// have been unblocked long enough to become trusted and who's users' credentials
// would have expired (after 511 days)
fn clean_up_expired_buckets(&mut self) {
// Consider including migration tables and check age of from buckets
// If an open-invitation bucket is more than 511 days old, it should be recycled
// If a blocked bridge is more than 511 (the maximum validity of a credential in days) days old, it should also be recycled
// First check if there are any blocked indexes that are old enough to be replaced
if !self.blocked_keys.is_empty()
&& self.blocked_keys.iter().any(|&x| x.1 > self.today() + 511)
//Perhaps 511 should be changed to an earlier time
{
let blocked_keys_clone = self.blocked_keys.clone();
// If so, separate them from the fresh blockages
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = blocked_keys_clone
.into_iter()
.partition(|&x| x.1 > self.today() + 511);
for item in expired {
let new_item = item.0;
// check each single bridge line and ensure none are still marked as reachable.
// if any are still reachable, remove from reachable bridges.
// When syncing resources, we will likely have to reallocate this bridge but if it hasn't already been
// blocked, this might be fine?
let bridgelines = self.buckets.get(&new_item).unwrap();
for bridgeline in bridgelines {
// If the bridge hasn't been set to default, assume it's still reachable
if bridgeline.port > 0 {
// Move to unallocated bridges
self.unallocated_bridges.push(*bridgeline);
// Check if it's still in the reachable bridges. It should be if we've gotten this far.
if let Some(_reachable_indexes_for_bridgeline) = self.reachable.get(bridgeline) {
// and remove it until it's reallocated
self.reachable.remove(bridgeline);
}
}
}
// Then remove the bucket and keys at the specified index
self.buckets.remove(&new_item);
self.keys.remove(&new_item);
//and add them to the recyclable keys
self.recycleable_keys.push(new_item);
}
// Finally, update the blocked_keys vector to only include the fresh keys
self.blocked_keys = fresh
}
// Next do the same for open_invitations buckets
// First check if there are any open invitation indexes that are old enough to be replaced
if !self.open_inv_keys.is_empty()
&& self.open_inv_keys.iter().any(|&x| x.1 > self.today() + 511)
//Perhaps 511 should be changed to an earlier time
{
let open_inv_keys_clone = self.open_inv_keys.clone();
// If so, separate them from the fresh open invitation indexes
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = open_inv_keys_clone
.into_iter()
.partition(|&x| x.1 > self.today() + 511);
for item in expired {
let new_item = item.0;
self.buckets.remove(&new_item);
self.keys.remove(&new_item);
//and add them to the recyclable keys
self.recycleable_keys.push(new_item);
}
// update the open_inv_keys vector to only include the fresh keys
self.open_inv_keys = fresh
}
}
// Since buckets are moved around in the bridge_table, finding a lookup key that
// does not overwrite existing bridges could become an issue. We keep a list
// of recycleable lookup keys from buckets that have been removed and prioritize
// this list before increasing the counter
fn find_next_available_key(&mut self) -> u32 {
self.clean_up_expired_buckets(); //This function probably should be moved to lib.rs to handle trustup and migration tables too
if self.recycleable_keys.is_empty() {
let mut test_index = 1;
let mut test_counter = self.counter.wrapping_add(test_index);
let mut i = 0;
while self.buckets.contains_key(&test_counter) && i < 5000 {
test_index += i;
test_counter = self.counter.wrapping_add(test_index);
i += 1;
}
self.counter = self.counter.wrapping_add(test_index);
self.counter
} else {
self.recycleable_keys.pop().unwrap()
}
}
// /// Get today's (real or simulated) date
// fn today(&self) -> u32 {
// // We will not encounter negative Julian dates (~6700 years ago)
// // or ones larger than 32 bits
// (time::OffsetDateTime::now_utc().date())
// .to_julian_day()
// .try_into()
// .unwrap()
// }
/// Append a new bucket to the bridge table, returning its index
pub fn new_bucket(&mut self, bucket: &[BridgeLine; MAX_BRIDGES_PER_BUCKET]) -> u32 {
pub fn new_bucket(&mut self, bucket: &[BridgeLine; MAX_BRIDGES_PER_BUCKET], index: u32) -> u32 {
// Pick a random key to encrypt this bucket
let mut rng = rand::thread_rng();
let mut key: [u8; 16] = [0; 16];
rng.fill_bytes(&mut key);
// Increase the counter to identify the bucket, wrap value if beyond u32::MAX
let index = self.find_next_available_key();
//self.counter = self.counter.wrapping_add(1);
self.keys.insert(index, key);
self.buckets.insert(index, *bucket);
@ -495,7 +400,8 @@ mod tests {
for _ in 0..20 {
let bucket: [BridgeLine; 3] =
[BridgeLine::random(), Default::default(), Default::default()];
btable.new_bucket(&bucket);
btable.counter += 1;
btable.new_bucket(&bucket, btable.counter);
}
// And 20 more with three random bridges each
for _ in 0..20 {
@ -504,7 +410,8 @@ mod tests {
BridgeLine::random(),
BridgeLine::random(),
];
btable.new_bucket(&bucket);
btable.counter += 1;
btable.new_bucket(&bucket, btable.counter);
}
let today: u32 = time::OffsetDateTime::now_utc()
.date()
@ -514,15 +421,15 @@ mod tests {
// Create the encrypted bridge table
btable.encrypt_table(today, &reachability_priv);
// Try to decrypt a 1-bridge bucket
let key7 = btable.keys[&7u32];
let key7 = btable.keys.get(&7u32).unwrap();
let bucket7 = btable.decrypt_bucket_id(7, &key7)?;
println!("bucket 7 = {:?}", bucket7);
// Try to decrypt a 3-bridge bucket
let key24 = btable.keys[&24u32];
let key24 = btable.keys.get(&24u32).unwrap();
let bucket24 = btable.decrypt_bucket_id(24, &key24)?;
println!("bucket 24 = {:?}", bucket24);
// Try to decrypt a bucket with the wrong key
let key12 = btable.keys[&12u32];
let key12 = btable.keys.get(&12u32).unwrap();
let res = btable.decrypt_bucket_id(15, &key12).unwrap_err();
println!("bucket key mismatch = {:?}", res);
Ok(())

View File

@ -300,11 +300,13 @@ impl BridgeAuth {
bridges: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
bdb: &mut BridgeDb,
) {
let bnum = self.bridge_table.new_bucket(&bridges);
let index = self.find_next_available_key();
let bnum = self.bridge_table.new_bucket(&bridges, index);
let mut single = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
for b in bridges.iter() {
let index = self.find_next_available_key();
single[0] = *b;
let snum = self.bridge_table.new_bucket(&single);
let snum = self.bridge_table.new_bucket(&single, index);
self.bridge_table.open_inv_keys.push((snum, self.today()));
bdb.insert_openinv(snum);
self.trustup_migration_table.table.insert(snum, bnum);
@ -313,7 +315,8 @@ impl BridgeAuth {
/// Insert a hot spare bucket of bridges
pub fn add_spare_bucket(&mut self, bucket: [BridgeLine; MAX_BRIDGES_PER_BUCKET]) {
let bnum = self.bridge_table.new_bucket(&bucket);
let index = self.find_next_available_key();
let bnum = self.bridge_table.new_bucket(&bucket, index);
self.bridge_table.spares.insert(bnum);
}
@ -600,6 +603,117 @@ impl BridgeAuth {
res
}
// Since buckets are moved around in the bridge_table, finding a lookup key that
// does not overwrite existing bridges could become an issue. We keep a list
// of recycleable lookup keys from buckets that have been removed and prioritize
// this list before increasing the counter
fn find_next_available_key(&mut self) -> u32 {
self.clean_up_expired_buckets(); //This function probably should be moved to lib.rs to handle trustup and migration tables too
if self.bridge_table.recycleable_keys.is_empty() {
let mut test_index = 1;
let mut test_counter = self.bridge_table.counter.wrapping_add(test_index);
let mut i = 0;
while self.bridge_table.buckets.contains_key(&test_counter) && i < 5000 {
test_index += i;
test_counter = self.bridge_table.counter.wrapping_add(test_index);
i += 1;
}
self.bridge_table.counter = self.bridge_table.counter.wrapping_add(test_index);
self.bridge_table.counter
} else {
self.bridge_table.recycleable_keys.pop().unwrap()
}
}
// This function looks for and removes buckets so their indexes can be reused
// This should include buckets that have been blocked for a sufficiently long period
// that we no longer want to allow migration to, or else, open-entry buckets that
// have been unblocked long enough to become trusted and who's users' credentials
// would have expired (after 511 days)
pub fn clean_up_expired_buckets(&mut self) {
// Consider including migration tables and check age of from buckets
// If an open-invitation bucket is more than 511 days old, it should be recycled
// If a blocked bridge is more than 511 (the maximum validity of a credential in days) days old, it should also be recycled
// First check if there are any blocked indexes that are old enough to be replaced
self.clean_up_blocked();
// Next do the same for open_invitations buckets
// self.clean_up_open_entry();
}
fn clean_up_blocked(&mut self) {
if !self.bridge_table.blocked_keys.is_empty()
&& self
.bridge_table
.blocked_keys
.iter()
.any(|&x| x.1 + 511 < self.today())
//Perhaps 511 should be changed to an earlier time
{
let blocked_keys_clone = self.bridge_table.blocked_keys.clone();
// If so, separate them from the fresh blockages
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = blocked_keys_clone
.into_iter()
.partition(|&x| x.1 + 511 < self.today());
for item in expired {
let new_item = item.0;
// check each single bridge line and ensure none are still marked as reachable.
// if any are still reachable, remove from reachable bridges.
// When syncing resources, we will likely have to reallocate this bridge but if it hasn't already been
// blocked, this might be fine?
let bridgelines = self.bridge_table.buckets.get(&new_item).unwrap();
for bridgeline in bridgelines {
// If the bridge hasn't been set to default, assume it's still reachable
if bridgeline.port > 0 {
// Move to unallocated bridges
self.bridge_table.unallocated_bridges.push(*bridgeline);
// Check if it's still in the reachable bridges. It should be if we've gotten this far.
if let Some(_reachable_indexes_for_bridgeline) =
self.bridge_table.reachable.get(bridgeline)
{
// and remove it until it's reallocated
self.bridge_table.reachable.remove(bridgeline);
}
}
}
// Then remove the bucket and keys at the specified index
self.bridge_table.buckets.remove(&new_item);
self.bridge_table.keys.remove(&new_item);
//and add them to the recyclable keys
self.bridge_table.recycleable_keys.push(new_item);
}
// Finally, update the blocked_keys vector to only include the fresh keys
self.bridge_table.blocked_keys = fresh
}
}
fn clean_up_open_entry(&mut self) {
// First check if there are any open invitation indexes that are old enough to be replaced
if !self.bridge_table.open_inv_keys.is_empty()
&& self
.bridge_table
.open_inv_keys
.iter()
.any(|&x| x.1 + 511 < self.today())
//Perhaps 511 should be changed to an earlier time
{
let open_inv_keys_clone = self.bridge_table.open_inv_keys.clone();
// If so, separate them from the fresh open invitation indexes
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = open_inv_keys_clone
.into_iter()
.partition(|&x| x.1 + 511 < self.today());
for item in expired {
let new_item = item.0;
self.bridge_table.buckets.remove(&new_item);
self.bridge_table.keys.remove(&new_item);
//and add them to the recyclable keys
self.bridge_table.recycleable_keys.push(new_item);
}
// update the open_inv_keys vector to only include the fresh keys
self.bridge_table.open_inv_keys = fresh
}
}
#[cfg(test)]
/// For testing only: manually advance the day by 1 day
pub fn advance_day(&mut self) {

View File

@ -4,7 +4,9 @@ BridgeLine::random() or private fields */
use super::bridge_table::{BridgeLine, BRIDGE_BYTES};
use super::proto::*;
use super::*;
use chrono::{DateTime, NaiveTime, Timelike, Utc};
use rand::Rng;
use std::thread;
use std::time::{Duration, Instant};
struct PerfStat {
@ -173,6 +175,7 @@ impl TestHarness {
let bucket =
bridge_table::BridgeTable::decrypt_bucket(id, &key, encbuckets.get(&id).unwrap())
.unwrap();
let reachcred = bucket.1.unwrap();
// Use the Bucket Reachability credential to advance to the next
@ -591,6 +594,143 @@ fn test_redeem_invite() {
println!("bob_cred = {:?}", bob_cred);
}
#[test]
fn test_clean_up_blocked() {
let mut th = TestHarness::new_buckets(50, 50);
let mut credentials: Vec<cred::Lox> = Vec::new();
// Users
for _ in 0..100 {
let h: NaiveTime = DateTime::time(&Utc::now());
if h.hour() == 23 && h.minute() == 59 {
println!("Wait for UTC 00:00");
thread::sleep(Duration::new(60, 0));
println!("Ready to work again");
}
let cred = th.open_invite().1 .0;
th.advance_days(30);
let (_, migcred) = th.trust_promotion(&cred);
let (_, cred1) = th.level0_migration(&cred, &migcred);
th.advance_days(14);
let (_, cred2) = th.level_up(&cred1);
let (_, (cred2a, invite)) = th.issue_invite(&cred2);
let (_, bob_cred) = th.redeem_invite(&invite);
th.advance_days(28);
let (_, _) = th.level_up(&bob_cred);
let (_, cred3) = th.level_up(&cred2a);
credentials.push(cred3);
}
// Block 25% == 25 bridges
let blocked = block_bridges(&mut th, 25, credentials);
println!("\n**AFTER 25% BLOCKING EVENT**\n");
assert!(
blocked == th.ba.bridge_table.blocked_keys.len(),
"Blocked keys does not equal the number of blocked buckets"
);
assert!(th.ba.bridge_table.recycleable_keys.is_empty(), "");
// Each open invitation bucket creates 4 buckets -> 50*4 = 200 + 50 hotspare buckets = 250 total
assert!(
th.ba.bridge_table.counter == 250,
"Total number of buckets should be 250"
);
// Advance beyond the time that blocked buckets expire
th.advance_days(512);
th.ba.clean_up_blocked();
assert!(
th.ba.bridge_table.blocked_keys.is_empty(),
"Blocked keys should be empty after cleanup"
);
assert!(th.ba.bridge_table.recycleable_keys.len() == blocked, "The number of recycleable keys should be equal to the number of previously blocked buckets");
// Each open invitation bucket creates 4 buckets -> 50*4 = 200 + 50 hotspare buckets = 250 total
assert!(
th.ba.bridge_table.counter == 250,
"Total number of buckets should be 250"
);
for _ in 0..10 {
let bucket = [
BridgeLine::random(),
BridgeLine::random(),
BridgeLine::random(),
];
// Add new bridges to trigger bucket cleanup
th.ba.add_openinv_bridges(bucket, &mut th.bdb);
}
println!(
"Size of recyclable keys: {:?}",
th.ba.bridge_table.recycleable_keys.len()
);
println!("Counter: {:?}", th.ba.bridge_table.counter);
println!("\n**AFTER NEW BUCKETS ADDED**\n");
assert!(
th.ba.bridge_table.blocked_keys.is_empty(),
"After adding new buckets, blocked keys should be empty"
);
assert!(
th.ba.bridge_table.recycleable_keys.is_empty(),
"After adding new buckets, recycleable keys should be empty"
);
// Because of open-entry buckets added and open-entry cleanup, the counter increases to 278
println!("Counter: {:?}", th.ba.bridge_table.counter);
}
#[test]
fn test_clean_up_open_entry() {}
#[test]
fn find_next_available_key() {}
/// Blocks a percentage of the bridges for the passed Test Harness
/// excluding the hot spare buckets as they will not have been handed out.
/// The logic assumes hot spare buckets are appended to the end of the bridge_table
/// bucket list.
fn block_bridges(th: &mut TestHarness, percentage: usize, credentials: Vec<cred::Lox>) -> usize {
let blockable_num = th.ba.bridge_table.buckets.len()
- th.ba.bridge_table.spares.len()
- th.bdb.openinv_buckets.len();
let blockable_range = th.ba.bridge_table.buckets.len() - th.ba.bridge_table.spares.len();
let to_block: usize = blockable_num * percentage / 100;
let mut block_index: HashSet<usize> = HashSet::new();
let mut rng = rand::thread_rng();
while block_index.len() < to_block {
let rand_num = rng.gen_range(0, blockable_range);
if !th.bdb.openinv_buckets.contains(&(rand_num as u32)) {
block_index.insert(rand_num);
}
}
for index in block_index {
let ba_clone = th.ba.bridge_table.buckets.clone();
if let Some(bridgelines) = ba_clone.get(&u32::try_from(index).unwrap()) {
for bridgeline in bridgelines {
th.ba.bridge_unreachable(&bridgeline, &mut th.bdb);
}
}
}
for cred in credentials {
let (id, key) = bridge_table::from_scalar(cred.bucket).unwrap();
let encbuckets = th.ba.enc_bridge_table();
let bucket =
bridge_table::BridgeTable::decrypt_bucket(id, &key, &encbuckets.get(&id).unwrap())
.unwrap();
let mut count = 0;
for bridge_line in &bucket.0 {
if th.ba.bridge_table.reachable.contains_key(bridge_line) {
count += 1;
}
}
if count < 2 {
let (_perf_stat, migration) = th.check_blockage(&cred);
let (_block_perf_stat, _) = th.blockage_migration(&cred, &migration);
}
}
to_block
}
#[test]
fn test_allocate_bridges() {
let mut th = TestHarness::new();