Add more comprehensive logic for tidying unneeded buckets

This commit is contained in:
onyinyang 2023-07-07 16:53:10 -04:00
parent 40f1e88a31
commit d2d09bccc1
No known key found for this signature in database
GPG Key ID: 156A6435430C2036
2 changed files with 60 additions and 13 deletions

View File

@ -261,6 +261,9 @@ pub struct BridgeTable {
// We maintain a list of keys that have been blocked, as well as the date of their blocking // We maintain a list of keys that have been blocked, as well as the date of their blocking
// so that they can be repurposed with new buckets eventually // so that they can be repurposed with new buckets eventually
pub blocked_keys: Vec<(u32, u32)>, pub blocked_keys: Vec<(u32, u32)>,
// Similarly, we maintain a list of open entry buckets that will be listed as expired
// after 511 days
pub open_inv_keys: Vec<(u32, u32)>,
/// The date the buckets were last encrypted to make the encbucket. /// The date the buckets were last encrypted to make the encbucket.
/// The encbucket must be rebuilt each day so that the Bucket /// The encbucket must be rebuilt each day so that the Bucket
/// Reachability credentials in the buckets can be refreshed. /// Reachability credentials in the buckets can be refreshed.
@ -286,37 +289,76 @@ impl BridgeTable {
.unwrap() .unwrap()
} }
// Since buckets are moved around in the bridge_table, finding a lookup key that // This function looks for and removes buckets so their indexes can be reused
// does not overwrite existing bridges could become an issue. We keep a list // This should include buckets that have been blocked for a sufficiently long period
// of recycleable lookup keys from buckets that have been removed and prioritize // that we no longer want to allow migration to the, or else, open-entry buckets that
// this list before increasing the counter // have been unblocked long enough to become trusted and who's users' credentials
fn find_next_available_key(&mut self) -> u32 { // would have expired (after 511 days)
fn clean_up_expired_buckets(&mut self) {
// Consider including migration tables and check age of from buckets
// If an open-invitation bucket is more than 511 days old, it should be recycled
// If a blocked bridge is more than ?? 511 (the maximum validity of a credential in days)? days old, it should also be recycled
// First check if there are any blocked indexes that are old enough to be replaced // First check if there are any blocked indexes that are old enough to be replaced
if !self.blocked_keys.is_empty() if !self.blocked_keys.is_empty()
&& self.blocked_keys.iter().any(|&x| x.1 > self.today() + 30) && self.blocked_keys.iter().any(|&x| x.1 > self.today() + 511)
//Perhaps 30 should be changed to a later/earlier time //Perhaps 511 should be changed to an earlier time
{ {
let blocked_keys_clone = self.blocked_keys.clone(); let blocked_keys_clone = self.blocked_keys.clone();
// If so, separate them from the fresh blockages // If so, separate them from the fresh blockages
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = blocked_keys_clone let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = blocked_keys_clone
.into_iter() .into_iter()
.partition(|&x| x.1 > self.today() + 30); .partition(|&x| x.1 > self.today() + 511);
for item in expired { for item in expired {
let new_item = item.0; let new_item = item.0;
// get the single bridge line and remove the open entry index from the reachable bridges
let bridge_line = self.buckets.get(&new_item).unwrap();
for item in self.reachable.get(bridge_line) {}
self.buckets.remove(&new_item);
self.keys.remove(&new_item);
//and add them to the recyclable keys //and add them to the recyclable keys
self.recycleable_keys.push(new_item); self.recycleable_keys.push(new_item);
} }
// update the blocked_keys vector to only include the fresh keys // update the blocked_keys vector to only include the fresh keys
self.blocked_keys = fresh self.blocked_keys = fresh
} }
// First check if there are any open invitation indexes that are old enough to be replaced
if !self.open_inv_keys.is_empty()
&& self.open_inv_keys.iter().any(|&x| x.1 > self.today() + 511)
//Perhaps 511 should be changed to an earlier time
{
let open_inv_keys_clone = self.open_inv_keys.clone();
// If so, separate them from the fresh open invitation indexes
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = open_inv_keys_clone
.into_iter()
.partition(|&x| x.1 > self.today() + 511);
for item in expired {
let new_item = item.0;
self.buckets.remove(&new_item);
self.keys.remove(&new_item);
//and add them to the recyclable keys
self.recycleable_keys.push(new_item);
}
// update the open_inv_keys vector to only include the fresh keys
self.open_inv_keys = fresh
}
}
// Since buckets are moved around in the bridge_table, finding a lookup key that
// does not overwrite existing bridges could become an issue. We keep a list
// of recycleable lookup keys from buckets that have been removed and prioritize
// this list before increasing the counter
fn find_next_available_key(&mut self) -> u32 {
self.clean_up_expired_buckets();
if self.recycleable_keys.is_empty() { if self.recycleable_keys.is_empty() {
let mut test_index = 1; let mut test_index = 1;
let mut test_counter = self.counter.wrapping_add(test_index); let mut test_counter = self.counter.wrapping_add(test_index);
while self.buckets.contains_key(&test_counter) { let mut i = 0;
for i in 0..5000 { while self.buckets.contains_key(&test_counter) && i < 5000 {
test_index += i; test_index += i;
test_counter = self.counter.wrapping_add(test_index); test_counter = self.counter.wrapping_add(test_index);
} i += 1;
} }
self.counter = self.counter.wrapping_add(test_index); self.counter = self.counter.wrapping_add(test_index);
self.counter self.counter
@ -324,6 +366,7 @@ impl BridgeTable {
self.recycleable_keys.pop().unwrap() self.recycleable_keys.pop().unwrap()
} }
} }
/// Append a new bucket to the bridge table, returning its index /// Append a new bucket to the bridge table, returning its index
pub fn new_bucket(&mut self, bucket: &[BridgeLine; MAX_BRIDGES_PER_BUCKET]) -> u32 { pub fn new_bucket(&mut self, bucket: &[BridgeLine; MAX_BRIDGES_PER_BUCKET]) -> u32 {
// Pick a random key to encrypt this bucket // Pick a random key to encrypt this bucket

View File

@ -305,6 +305,7 @@ impl BridgeAuth {
for b in bridges.iter() { for b in bridges.iter() {
single[0] = *b; single[0] = *b;
let snum = self.bridge_table.new_bucket(&single); let snum = self.bridge_table.new_bucket(&single);
self.bridge_table.open_inv_keys.push((snum, self.today()));
bdb.insert_openinv(snum); bdb.insert_openinv(snum);
self.trustup_migration_table.table.insert(snum, bnum); self.trustup_migration_table.table.insert(snum, bnum);
} }
@ -325,6 +326,9 @@ impl BridgeAuth {
// How to check for bridges that aren't there/are extra? // How to check for bridges that aren't there/are extra?
// After going through the update, make sure bridges in the table are the same and deal with discrepencies // After going through the update, make sure bridges in the table are the same and deal with discrepencies
// This will be the bad/annoying part // This will be the bad/annoying part
//also use open_inv_keys and blocked_keys from bridge_table to remove expired keys from table.
// make sure this happens before they are removed from the structures in the bridge table
} }
pub fn allocate_bridges( pub fn allocate_bridges(