Add ExhausedIndexError for no available bucket ids

This commit is contained in:
onyinyang 2023-07-27 16:05:20 -04:00
parent 471cb8c8c4
commit 93ce3e41b7
No known key found for this signature in database
GPG Key ID: 156A6435430C2036
8 changed files with 156 additions and 113 deletions

View File

@ -60,13 +60,29 @@ impl LoxServerContext {
pub fn add_openinv_bucket(&self, bucket: [BridgeLine; 3]) { pub fn add_openinv_bucket(&self, bucket: [BridgeLine; 3]) {
let mut ba_obj = self.ba.lock().unwrap(); let mut ba_obj = self.ba.lock().unwrap();
let mut db_obj = self.db.lock().unwrap(); let mut db_obj = self.db.lock().unwrap();
ba_obj.add_openinv_bridges(bucket, &mut db_obj); match ba_obj.add_openinv_bridges(bucket, &mut db_obj) {
Ok(_) => (),
Err(e) => {
println!("Error: {:?}", e);
for bridge in bucket {
self.append_extra_bridges(bridge);
}
}
}
} }
pub fn add_spare_bucket(&self, bucket: [BridgeLine; 3]) { pub fn add_spare_bucket(&self, bucket: [BridgeLine; 3]) {
let mut ba_obj = self.ba.lock().unwrap(); let mut ba_obj = self.ba.lock().unwrap();
let mut db_obj = self.db.lock().unwrap(); let mut db_obj = self.db.lock().unwrap();
ba_obj.add_spare_bucket(bucket, &mut db_obj); match ba_obj.add_spare_bucket(bucket, &mut db_obj) {
Ok(_) => (),
Err(e) => {
println!("Error: {:?}", e);
for bridge in bucket {
self.append_extra_bridges(bridge);
}
}
}
} }
pub fn replace_with_new(&self, bridgeline: BridgeLine) -> lox_library::ReplaceSuccess { pub fn replace_with_new(&self, bridgeline: BridgeLine) -> lox_library::ReplaceSuccess {

View File

@ -81,7 +81,7 @@ mod tests {
cred::BucketReachability, cred::BucketReachability,
proto, BridgeAuth, BridgeDb, proto, BridgeAuth, BridgeDb,
}; };
use lox_utils;
use rand::RngCore; use rand::RngCore;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
@ -103,117 +103,117 @@ mod tests {
impl LoxClient for LoxClientMock { impl LoxClient for LoxClientMock {
fn invite(&self) -> Request<Body> { fn invite(&self) -> Request<Body> {
let req = Request::builder()
Request::builder()
.method("POST") .method("POST")
.uri("http://localhost/invite") .uri("http://localhost/invite")
.body(Body::empty()) .body(Body::empty())
.unwrap(); .unwrap()
req
} }
fn reachability(&self) -> Request<Body> { fn reachability(&self) -> Request<Body> {
let req = Request::builder()
Request::builder()
.method("POST") .method("POST")
.uri("http://localhost/reachability") .uri("http://localhost/reachability")
.body(Body::empty()) .body(Body::empty())
.unwrap(); .unwrap()
req
} }
fn pubkeys(&self) -> Request<Body> { fn pubkeys(&self) -> Request<Body> {
let req = Request::builder()
Request::builder()
.method("POST") .method("POST")
.uri("http://localhost/pubkeys") .uri("http://localhost/pubkeys")
.body(Body::empty()) .body(Body::empty())
.unwrap(); .unwrap()
req
} }
fn openinvite(&self, request: proto::open_invite::Request) -> Request<Body> { fn openinvite(&self, request: proto::open_invite::Request) -> Request<Body> {
let req_str = serde_json::to_string(&request).unwrap(); let req_str = serde_json::to_string(&request).unwrap();
let req = Request::builder()
Request::builder()
.header("Content-Type", "application/json") .header("Content-Type", "application/json")
.method("POST") .method("POST")
.uri("http://localhost/openreq") .uri("http://localhost/openreq")
.body(Body::from(req_str)) .body(Body::from(req_str))
.unwrap(); .unwrap()
req
} }
fn trustpromo(&self, request: proto::trust_promotion::Request) -> Request<Body> { fn trustpromo(&self, request: proto::trust_promotion::Request) -> Request<Body> {
let req_str = serde_json::to_string(&request).unwrap(); let req_str = serde_json::to_string(&request).unwrap();
let req = Request::builder()
Request::builder()
.header("Content-Type", "application/json") .header("Content-Type", "application/json")
.method("POST") .method("POST")
.uri("http://localhost/trustpromo") .uri("http://localhost/trustpromo")
.body(Body::from(req_str)) .body(Body::from(req_str))
.unwrap(); .unwrap()
req
} }
fn trustmigration(&self, request: proto::migration::Request) -> Request<Body> { fn trustmigration(&self, request: proto::migration::Request) -> Request<Body> {
let req_str = serde_json::to_string(&request).unwrap(); let req_str = serde_json::to_string(&request).unwrap();
let req = Request::builder()
Request::builder()
.header("Content-Type", "application/json") .header("Content-Type", "application/json")
.method("POST") .method("POST")
.uri("http://localhost/trustmig") .uri("http://localhost/trustmig")
.body(Body::from(req_str)) .body(Body::from(req_str))
.unwrap(); .unwrap()
req
} }
fn levelup(&self, request: proto::level_up::Request) -> Request<Body> { fn levelup(&self, request: proto::level_up::Request) -> Request<Body> {
let req_str = serde_json::to_string(&request).unwrap(); let req_str = serde_json::to_string(&request).unwrap();
let req = Request::builder()
Request::builder()
.header("Content-Type", "application/json") .header("Content-Type", "application/json")
.method("POST") .method("POST")
.uri("http://localhost/levelup") .uri("http://localhost/levelup")
.body(Body::from(req_str)) .body(Body::from(req_str))
.unwrap(); .unwrap()
req
} }
fn issueinvite(&self, request: proto::issue_invite::Request) -> Request<Body> { fn issueinvite(&self, request: proto::issue_invite::Request) -> Request<Body> {
let req_str = serde_json::to_string(&request).unwrap(); let req_str = serde_json::to_string(&request).unwrap();
let req = Request::builder()
Request::builder()
.header("Content-Type", "application/json") .header("Content-Type", "application/json")
.method("POST") .method("POST")
.uri("http://localhost/issueinvite") .uri("http://localhost/issueinvite")
.body(Body::from(req_str)) .body(Body::from(req_str))
.unwrap(); .unwrap()
req
} }
fn redeeminvite(&self, request: proto::redeem_invite::Request) -> Request<Body> { fn redeeminvite(&self, request: proto::redeem_invite::Request) -> Request<Body> {
let req_str = serde_json::to_string(&request).unwrap(); let req_str = serde_json::to_string(&request).unwrap();
let req = Request::builder()
Request::builder()
.header("Content-Type", "application/json") .header("Content-Type", "application/json")
.method("POST") .method("POST")
.uri("http://localhost/redeem") .uri("http://localhost/redeem")
.body(Body::from(req_str)) .body(Body::from(req_str))
.unwrap(); .unwrap()
req
} }
fn checkblockage(&self, request: proto::check_blockage::Request) -> Request<Body> { fn checkblockage(&self, request: proto::check_blockage::Request) -> Request<Body> {
let req_str = serde_json::to_string(&request).unwrap(); let req_str = serde_json::to_string(&request).unwrap();
let req = Request::builder()
Request::builder()
.header("Content-Type", "application/json") .header("Content-Type", "application/json")
.method("POST") .method("POST")
.uri("http://localhost/checkblockage") .uri("http://localhost/checkblockage")
.body(Body::from(req_str)) .body(Body::from(req_str))
.unwrap(); .unwrap()
req
} }
fn blockagemigration(&self, request: proto::blockage_migration::Request) -> Request<Body> { fn blockagemigration(&self, request: proto::blockage_migration::Request) -> Request<Body> {
let req_str = serde_json::to_string(&request).unwrap(); let req_str = serde_json::to_string(&request).unwrap();
let req = Request::builder()
Request::builder()
.header("Content-Type", "application/json") .header("Content-Type", "application/json")
.method("POST") .method("POST")
.uri("http://localhost/blockagemigration") .uri("http://localhost/blockagemigration")
.body(Body::from(req_str)) .body(Body::from(req_str))
.unwrap(); .unwrap()
req
} }
} }
@ -229,13 +229,13 @@ mod tests {
// Make 3 x num_buckets open invitation bridges, in sets of 3 // Make 3 x num_buckets open invitation bridges, in sets of 3
for _ in 0..5 { for _ in 0..5 {
let bucket = [random(), random(), random()]; let bucket = [random(), random(), random()];
lox_auth.add_openinv_bridges(bucket, &mut bridgedb); let _ = lox_auth.add_openinv_bridges(bucket, &mut bridgedb);
} }
// Add hot_spare more hot spare buckets // Add hot_spare more hot spare buckets
for _ in 0..5 { for _ in 0..5 {
let bucket = [random(), random(), random()]; let bucket = [random(), random(), random()];
lox_auth.add_spare_bucket(bucket, &mut bridgedb); let _ = lox_auth.add_spare_bucket(bucket, &mut bridgedb);
} }
// Create the encrypted bridge table // Create the encrypted bridge table
lox_auth.enc_bridge_table(); lox_auth.enc_bridge_table();
@ -261,7 +261,7 @@ mod tests {
let mut lox_auth = self.context.ba.lock().unwrap(); let mut lox_auth = self.context.ba.lock().unwrap();
let encbuckets = lox_auth.enc_bridge_table(); let encbuckets = lox_auth.enc_bridge_table();
let bucket = let bucket =
bridge_table::BridgeTable::decrypt_bucket(id, &key, &encbuckets.get(&id).unwrap()) bridge_table::BridgeTable::decrypt_bucket(id, &key, encbuckets.get(&id).unwrap())
.unwrap(); .unwrap();
assert!(bucket.1.is_some()); assert!(bucket.1.is_some());
// Block two of our bridges // Block two of our bridges
@ -275,7 +275,7 @@ mod tests {
let mut lox_auth = self.context.ba.lock().unwrap(); let mut lox_auth = self.context.ba.lock().unwrap();
let encbuckets2 = lox_auth.enc_bridge_table(); let encbuckets2 = lox_auth.enc_bridge_table();
let bucket2 = let bucket2 =
bridge_table::BridgeTable::decrypt_bucket(id, &key, &encbuckets2.get(&id).unwrap()) bridge_table::BridgeTable::decrypt_bucket(id, &key, encbuckets2.get(&id).unwrap())
.unwrap(); .unwrap();
// We should no longer have a Bridge Reachability credential // We should no longer have a Bridge Reachability credential
assert!(bucket2.1.is_none()); assert!(bucket2.1.is_none());

View File

@ -26,7 +26,7 @@ aes-gcm = "0.8"
base64 = "0.13" base64 = "0.13"
time = "0.3.21" time = "0.3.21"
subtle = "2.4" subtle = "2.4"
thiserror= "1.0.40" thiserror = "1.0.40"
[features] [features]
default = ["u64_backend"] default = ["u64_backend"]

View File

@ -258,11 +258,14 @@ pub struct BridgeTable {
// To prevent issues with a counter for the hashmap keys, we keep a list of keys that // To prevent issues with a counter for the hashmap keys, we keep a list of keys that
// no longer match any buckets that can be used before increasing the counter // no longer match any buckets that can be used before increasing the counter
pub recycleable_keys: Vec<u32>, pub recycleable_keys: Vec<u32>,
// We maintain a list of keys that have been blocked, as well as the date of their blocking // We maintain a list of keys that have been blocked (bucket_id: u32), as well as the
// so that they can be repurposed with new buckets eventually // time (julian_date: u32) of their blocking so that they can be repurposed with new
// buckets eventually
pub blocked_keys: Vec<(u32, u32)>, pub blocked_keys: Vec<(u32, u32)>,
// Similarly, we maintain a list of open entry buckets that will be listed as expired // Similarly, we maintain a list of open entry buckets (bucket_id: u32) and the time they were
// after 511 days // created (julian_date: u32) so they will be listed as expired after some amount of time
// (e.g., 511 days, which is the maximum time an open-invitation credential would still be valid)
// TODO: add open entry buckets to the open_inv_keys only once they have been distributed
pub open_inv_keys: Vec<(u32, u32)>, pub open_inv_keys: Vec<(u32, u32)>,
/// The date the buckets were last encrypted to make the encbucket. /// The date the buckets were last encrypted to make the encbucket.
/// The encbucket must be rebuilt each day so that the Bucket /// The encbucket must be rebuilt each day so that the Bucket
@ -279,24 +282,12 @@ impl BridgeTable {
self.buckets.len() self.buckets.len()
} }
// /// Get today's (real or simulated) date
// fn today(&self) -> u32 {
// // We will not encounter negative Julian dates (~6700 years ago)
// // or ones larger than 32 bits
// (time::OffsetDateTime::now_utc().date())
// .to_julian_day()
// .try_into()
// .unwrap()
// }
/// Append a new bucket to the bridge table, returning its index /// Append a new bucket to the bridge table, returning its index
pub fn new_bucket(&mut self, bucket: &[BridgeLine; MAX_BRIDGES_PER_BUCKET], index: u32) { pub fn new_bucket(&mut self, index: u32, bucket: &[BridgeLine; MAX_BRIDGES_PER_BUCKET]) {
// Pick a random key to encrypt this bucket // Pick a random key to encrypt this bucket
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let mut key: [u8; 16] = [0; 16]; let mut key: [u8; 16] = [0; 16];
rng.fill_bytes(&mut key); rng.fill_bytes(&mut key);
// Increase the counter to identify the bucket, wrap value if beyond u32::MAX
//self.counter = self.counter.wrapping_add(1);
self.keys.insert(index, key); self.keys.insert(index, key);
self.buckets.insert(index, *bucket); self.buckets.insert(index, *bucket);
// TODO: maybe we don't need this if the hashtable can keep track of available bridges // TODO: maybe we don't need this if the hashtable can keep track of available bridges
@ -400,7 +391,7 @@ mod tests {
let bucket: [BridgeLine; 3] = let bucket: [BridgeLine; 3] =
[BridgeLine::random(), Default::default(), Default::default()]; [BridgeLine::random(), Default::default(), Default::default()];
btable.counter += 1; btable.counter += 1;
btable.new_bucket(&bucket, btable.counter); btable.new_bucket(btable.counter, &bucket);
} }
// And 20 more with three random bridges each // And 20 more with three random bridges each
for _ in 0..20 { for _ in 0..20 {
@ -410,7 +401,7 @@ mod tests {
BridgeLine::random(), BridgeLine::random(),
]; ];
btable.counter += 1; btable.counter += 1;
btable.new_bucket(&bucket, btable.counter); btable.new_bucket(btable.counter, &bucket);
} }
let today: u32 = time::OffsetDateTime::now_utc() let today: u32 = time::OffsetDateTime::now_utc()
.date() .date()
@ -421,15 +412,15 @@ mod tests {
btable.encrypt_table(today, &reachability_priv); btable.encrypt_table(today, &reachability_priv);
// Try to decrypt a 1-bridge bucket // Try to decrypt a 1-bridge bucket
let key7 = btable.keys.get(&7u32).unwrap(); let key7 = btable.keys.get(&7u32).unwrap();
let bucket7 = btable.decrypt_bucket_id(7, &key7)?; let bucket7 = btable.decrypt_bucket_id(7, key7)?;
println!("bucket 7 = {:?}", bucket7); println!("bucket 7 = {:?}", bucket7);
// Try to decrypt a 3-bridge bucket // Try to decrypt a 3-bridge bucket
let key24 = btable.keys.get(&24u32).unwrap(); let key24 = btable.keys.get(&24u32).unwrap();
let bucket24 = btable.decrypt_bucket_id(24, &key24)?; let bucket24 = btable.decrypt_bucket_id(24, key24)?;
println!("bucket 24 = {:?}", bucket24); println!("bucket 24 = {:?}", bucket24);
// Try to decrypt a bucket with the wrong key // Try to decrypt a bucket with the wrong key
let key12 = btable.keys.get(&12u32).unwrap(); let key12 = btable.keys.get(&12u32).unwrap();
let res = btable.decrypt_bucket_id(15, &key12).unwrap_err(); let res = btable.decrypt_bucket_id(15, key12).unwrap_err();
println!("bucket key mismatch = {:?}", res); println!("bucket key mismatch = {:?}", res);
Ok(()) Ok(())
} }

View File

@ -22,6 +22,7 @@ pub mod cred;
pub mod dup_filter; pub mod dup_filter;
pub mod migration_table; pub mod migration_table;
use chrono::{DateTime, Utc};
use sha2::Sha512; use sha2::Sha512;
use curve25519_dalek::constants as dalek_constants; use curve25519_dalek::constants as dalek_constants;
@ -48,6 +49,7 @@ use migration_table::{MigrationTable, MigrationType};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use thiserror::Error;
lazy_static! { lazy_static! {
pub static ref CMZ_A: RistrettoPoint = pub static ref CMZ_A: RistrettoPoint =
@ -65,6 +67,12 @@ pub enum ReplaceSuccess {
Replaced = 2, Replaced = 2,
} }
#[derive(Error, Debug)]
pub enum NoAvailableIDError {
#[error("Find key exhausted with no available index found!")]
ExhaustedIndexer,
}
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct IssuerPrivKey { pub struct IssuerPrivKey {
x0tilde: Scalar, x0tilde: Scalar,
@ -299,18 +307,25 @@ impl BridgeAuth {
&mut self, &mut self,
bridges: [BridgeLine; MAX_BRIDGES_PER_BUCKET], bridges: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
bdb: &mut BridgeDb, bdb: &mut BridgeDb,
) { ) -> Result<(), NoAvailableIDError> {
let bindex = self.find_next_available_key(bdb); let bindex = match self.find_next_available_key(bdb) {
self.bridge_table.new_bucket(&bridges, bindex); Ok(sindex) => sindex,
Err(e) => return Err(e),
};
self.bridge_table.new_bucket(bindex, &bridges);
let mut single = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET]; let mut single = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
for b in bridges.iter() { for b in bridges.iter() {
let sindex = self.find_next_available_key(bdb); let sindex = match self.find_next_available_key(bdb) {
Ok(sindex) => sindex,
Err(e) => return Err(e),
};
single[0] = *b; single[0] = *b;
self.bridge_table.new_bucket(&single, sindex); self.bridge_table.new_bucket(sindex, &single);
self.bridge_table.open_inv_keys.push((sindex, self.today())); self.bridge_table.open_inv_keys.push((sindex, self.today()));
bdb.insert_openinv(sindex); bdb.insert_openinv(sindex);
self.trustup_migration_table.table.insert(sindex, bindex); self.trustup_migration_table.table.insert(sindex, bindex);
} }
Ok(())
} }
/// Insert a hot spare bucket of bridges /// Insert a hot spare bucket of bridges
@ -318,12 +333,17 @@ impl BridgeAuth {
&mut self, &mut self,
bucket: [BridgeLine; MAX_BRIDGES_PER_BUCKET], bucket: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
bdb: &mut BridgeDb, bdb: &mut BridgeDb,
) { ) -> Result<(), NoAvailableIDError> {
let index = self.find_next_available_key(bdb); let index = match self.find_next_available_key(bdb) {
self.bridge_table.new_bucket(&bucket, index); Ok(index) => index,
Err(e) => return Err(e),
};
self.bridge_table.new_bucket(index, &bucket);
self.bridge_table.spares.insert(index); self.bridge_table.spares.insert(index);
Ok(())
} }
// TODO Ensure synchronization of Lox bridge_table with rdsys
pub fn sync_table(&mut self) { pub fn sync_table(&mut self) {
// Create a hashtable (?) of bridges in the lox distributor from new resources // Create a hashtable (?) of bridges in the lox distributor from new resources
@ -351,7 +371,15 @@ impl BridgeAuth {
for bridge in bucket.iter_mut() { for bridge in bucket.iter_mut() {
*bridge = self.bridge_table.unallocated_bridges.pop().unwrap(); *bridge = self.bridge_table.unallocated_bridges.pop().unwrap();
} }
self.add_openinv_bridges(bucket, bdb); match self.add_openinv_bridges(bucket, bdb) {
Ok(_) => continue,
Err(e) => {
println!("Error: {:?}", e);
for bridge in bucket {
self.bridge_table.unallocated_bridges.push(bridge);
}
}
}
} }
} }
@ -611,21 +639,24 @@ impl BridgeAuth {
// does not overwrite existing bridges could become an issue. We keep a list // does not overwrite existing bridges could become an issue. We keep a list
// of recycleable lookup keys from buckets that have been removed and prioritize // of recycleable lookup keys from buckets that have been removed and prioritize
// this list before increasing the counter // this list before increasing the counter
fn find_next_available_key(&mut self, bdb: &mut BridgeDb) -> u32 { fn find_next_available_key(&mut self, bdb: &mut BridgeDb) -> Result<u32, NoAvailableIDError> {
self.clean_up_expired_buckets(bdb); self.clean_up_expired_buckets(bdb);
if self.bridge_table.recycleable_keys.is_empty() { if self.bridge_table.recycleable_keys.is_empty() {
let mut test_index = 1; let mut test_index = 1;
let mut test_counter = self.bridge_table.counter.wrapping_add(test_index); let mut test_counter = self.bridge_table.counter.wrapping_add(test_index);
let mut i = 0; let mut i = 0;
while self.bridge_table.buckets.contains_key(&test_counter) && i < 5000 { while self.bridge_table.buckets.contains_key(&test_counter) && i < 5000 {
test_index += i; test_index += 1;
test_counter = self.bridge_table.counter.wrapping_add(test_index); test_counter = self.bridge_table.counter.wrapping_add(test_index);
i += 1; i += 1;
if i == 5000 {
return Err(NoAvailableIDError::ExhaustedIndexer);
}
} }
self.bridge_table.counter = self.bridge_table.counter.wrapping_add(test_index); self.bridge_table.counter = self.bridge_table.counter.wrapping_add(test_index);
self.bridge_table.counter Ok(self.bridge_table.counter)
} else { } else {
self.bridge_table.recycleable_keys.pop().unwrap() Ok(self.bridge_table.recycleable_keys.pop().unwrap())
} }
} }
@ -747,6 +778,11 @@ impl BridgeAuth {
.unwrap() .unwrap()
} }
/// Get today's (real or simulated) date
pub fn today_date(&self) -> DateTime<Utc> {
Utc::now()
}
/// Get a reference to the encrypted bridge table. /// Get a reference to the encrypted bridge table.
/// ///
/// Be sure to call this function when you want the latest version /// Be sure to call this function when you want the latest version

View File

@ -4,9 +4,9 @@ BridgeLine::random() or private fields */
use super::bridge_table::{BridgeLine, BRIDGE_BYTES}; use super::bridge_table::{BridgeLine, BRIDGE_BYTES};
use super::proto::*; use super::proto::*;
use super::*; use super::*;
use chrono::{DateTime, NaiveTime, Timelike, Utc};
use rand::Rng; use rand::Rng;
use std::thread;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
struct PerfStat { struct PerfStat {
@ -41,7 +41,7 @@ impl TestHarness {
BridgeLine::random(), BridgeLine::random(),
BridgeLine::random(), BridgeLine::random(),
]; ];
ba.add_openinv_bridges(bucket, &mut bdb); let _ = ba.add_openinv_bridges(bucket, &mut bdb);
} }
// Add hot_spare more hot spare buckets // Add hot_spare more hot spare buckets
for _ in 0..hot_spare { for _ in 0..hot_spare {
@ -50,7 +50,7 @@ impl TestHarness {
BridgeLine::random(), BridgeLine::random(),
BridgeLine::random(), BridgeLine::random(),
]; ];
ba.add_spare_bucket(bucket, &mut bdb); let _ = ba.add_spare_bucket(bucket, &mut bdb);
} }
// Create the encrypted bridge table // Create the encrypted bridge table
ba.enc_bridge_table(); ba.enc_bridge_table();
@ -609,7 +609,7 @@ fn test_clean_up_blocked() {
let (_, bob_cred) = th.redeem_invite(&invite); let (_, bob_cred) = th.redeem_invite(&invite);
th.advance_days(28); th.advance_days(28);
let (_, _) = th.level_up(&bob_cred); let (_, _) = th.level_up(&bob_cred);
let (_, cred3) = th.level_up(&cred2a); let (_, _cred3) = th.level_up(&cred2a);
} }
// Block 25% == 25 bridges // Block 25% == 25 bridges
let blocked = 37; let blocked = 37;
@ -645,7 +645,7 @@ fn test_clean_up_blocked() {
BridgeLine::random(), BridgeLine::random(),
]; ];
// Add new bridges to trigger bucket cleanup, but also open invitation cleanup so 150 buckets - blocked // Add new bridges to trigger bucket cleanup, but also open invitation cleanup so 150 buckets - blocked
th.ba.add_openinv_bridges(bucket, &mut th.bdb); let _ = th.ba.add_openinv_bridges(bucket, &mut th.bdb);
} }
// 150 is the number of open invitation buckets that will be cleared, + the number of blocked bridges // 150 is the number of open invitation buckets that will be cleared, + the number of blocked bridges
@ -702,7 +702,7 @@ fn test_clean_up_open_entry() {
"Open entry keys should be 0" "Open entry keys should be 0"
); );
assert!( assert!(
th.ba.trustup_migration_table.table.len() == 0, th.ba.trustup_migration_table.table.is_empty(),
"There should be no remaining eligible trust up migrations" "There should be no remaining eligible trust up migrations"
); );
assert!( assert!(
@ -729,7 +729,7 @@ fn test_clean_up_open_entry() {
BridgeLine::random(), BridgeLine::random(),
]; ];
// Add new bridges to trigger bucket cleanup // Add new bridges to trigger bucket cleanup
th.ba.add_openinv_bridges(bucket, &mut th.bdb); let _ = th.ba.add_openinv_bridges(bucket, &mut th.bdb);
} }
println!( println!(
"The number of trustup migrations after adding 10 new buckets is: {:?}", "The number of trustup migrations after adding 10 new buckets is: {:?}",
@ -741,7 +741,7 @@ fn test_clean_up_open_entry() {
); );
// Let's also make sure that open invitation works again // Let's also make sure that open invitation works again
let cred = th.open_invite().1 .0; let _cred = th.open_invite().1 .0;
th.advance_days(30); th.advance_days(30);
assert!( assert!(
@ -772,11 +772,11 @@ fn test_find_next_available_key() {
"There should be 50*3 openinv buckets + 50 superset buckets +50 spare buckets" "There should be 50*3 openinv buckets + 50 superset buckets +50 spare buckets"
); );
assert!( assert!(
th.ba.bridge_table.recycleable_keys.len() == 0, th.ba.bridge_table.recycleable_keys.is_empty(),
"There should be no recyclable keys" "There should be no recyclable keys"
); );
assert!( assert!(
th.ba.bridge_table.blocked_keys.len() == 0, th.ba.bridge_table.blocked_keys.is_empty(),
"There should be no blocked keys" "There should be no blocked keys"
); );
assert!( assert!(
@ -793,7 +793,7 @@ fn test_find_next_available_key() {
"Blocked keys should be 45" "Blocked keys should be 45"
); );
assert!( assert!(
th.ba.bridge_table.recycleable_keys.len() == 0, th.ba.bridge_table.recycleable_keys.is_empty(),
"There should be no recyclable keys" "There should be no recyclable keys"
); );
let bucket = [ let bucket = [
@ -802,14 +802,14 @@ fn test_find_next_available_key() {
BridgeLine::random(), BridgeLine::random(),
]; ];
// Add new bucket to trigger bucket cleanup and find_next_available_key // Add new bucket to trigger bucket cleanup and find_next_available_key
th.ba.add_spare_bucket(bucket, &mut th.bdb); let _ = th.ba.add_spare_bucket(bucket, &mut th.bdb);
// No recyclable keys yet so counter should increase // No recyclable keys yet so counter should increase
assert!( assert!(
th.ba.bridge_table.counter == 251, th.ba.bridge_table.counter == 251,
"There should be 50*3 openinv buckets + 50 superset buckets +50 spare buckets" "There should be 50*3 openinv buckets + 50 superset buckets +50 spare buckets"
); );
assert!( assert!(
th.ba.bridge_table.recycleable_keys.len() == 0, th.ba.bridge_table.recycleable_keys.is_empty(),
"There should be no recyclable keys" "There should be no recyclable keys"
); );
assert!( assert!(
@ -828,7 +828,7 @@ fn test_find_next_available_key() {
BridgeLine::random(), BridgeLine::random(),
]; ];
// Add new bridges to trigger bucket cleanup // Add new bridges to trigger bucket cleanup
th.ba.add_spare_bucket(bucket, &mut th.bdb); let _ = th.ba.add_spare_bucket(bucket, &mut th.bdb);
// Now all keys should be cleaned up so the counter won't move // Now all keys should be cleaned up so the counter won't move
assert!( assert!(
th.ba.bridge_table.counter == 251, th.ba.bridge_table.counter == 251,
@ -840,11 +840,11 @@ fn test_find_next_available_key() {
"There should be no recyclable keys" "There should be no recyclable keys"
); );
assert!( assert!(
th.ba.bridge_table.blocked_keys.len() == 0, th.ba.bridge_table.blocked_keys.is_empty(),
"There should be no blocked keys" "There should be no blocked keys"
); );
assert!( assert!(
th.ba.bridge_table.open_inv_keys.len() == 0, th.ba.bridge_table.open_inv_keys.is_empty(),
"There should be 150 open inv keys" "There should be 150 open inv keys"
); );
} }
@ -871,7 +871,7 @@ fn block_bridges(th: &mut TestHarness, to_block: usize) {
let ba_clone = th.ba.bridge_table.buckets.clone(); let ba_clone = th.ba.bridge_table.buckets.clone();
if let Some(bridgelines) = ba_clone.get(&u32::try_from(index).unwrap()) { if let Some(bridgelines) = ba_clone.get(&u32::try_from(index).unwrap()) {
for bridgeline in bridgelines { for bridgeline in bridgelines {
th.ba.bridge_unreachable(&bridgeline, &mut th.bdb); th.ba.bridge_unreachable(bridgeline, &mut th.bdb);
} }
} }
} }
@ -946,10 +946,10 @@ fn test_update_bridge() {
// Create changed info for bridgeline to be updated to // Create changed info for bridgeline to be updated to
let infostr: String = format!( let infostr: String = format!(
"type={} blocked_in={:?} protocol={} distribution={}", "type={} blocked_in={:?} protocol={} distribution={}",
"obfs2 test bridge".to_string(), "obfs2 test bridge",
{}, {},
"obfs2".to_string(), "obfs2",
"moat".to_string(), "moat",
); );
let mut updated_info_bytes: [u8; BRIDGE_BYTES - 26] = [0; BRIDGE_BYTES - 26]; let mut updated_info_bytes: [u8; BRIDGE_BYTES - 26] = [0; BRIDGE_BYTES - 26];
@ -977,7 +977,7 @@ fn test_update_bridge() {
println!("The two bridgelines are not equal before the update"); println!("The two bridgelines are not equal before the update");
// Add 3 bridges to test harness // Add 3 bridges to test harness
th.ba.add_openinv_bridges(bucket, &mut th.bdb); let _ = th.ba.add_openinv_bridges(bucket, &mut th.bdb);
println!("Before update spares = {:?}", th.ba.bridge_table.spares); println!("Before update spares = {:?}", th.ba.bridge_table.spares);
println!( println!(
@ -1028,9 +1028,9 @@ fn test_bridge_replace() {
let table_size = th.ba.bridge_table.buckets.len(); let table_size = th.ba.bridge_table.buckets.len();
let mut num = 100000; let mut num = 100000;
while !th.ba.bridge_table.buckets.contains_key(&num) { while !th.ba.bridge_table.buckets.contains_key(&num) {
num = rand::thread_rng().gen_range(0, th.ba.bridge_table.counter) as u32; num = rand::thread_rng().gen_range(0, th.ba.bridge_table.counter);
} }
let replaceable_bucket = th.ba.bridge_table.buckets.get(&num).unwrap().clone(); let replaceable_bucket = *th.ba.bridge_table.buckets.get(&num).unwrap();
let replacement_bridge = &replaceable_bucket[0]; let replacement_bridge = &replaceable_bucket[0];
assert!( assert!(
th.ba th.ba
@ -1131,7 +1131,7 @@ fn test_bridge_replace() {
"Number of buckets changed size" "Number of buckets changed size"
); );
assert!( assert!(
th.ba.bridge_table.unallocated_bridges.len() == 0, th.ba.bridge_table.unallocated_bridges.is_empty(),
"Allocated bridge still in unallocated bridges" "Allocated bridge still in unallocated bridges"
); );
@ -1140,7 +1140,7 @@ fn test_bridge_replace() {
"spare" => { "spare" => {
// Case three: available_bridge == null and unallocated_bridges == null // Case three: available_bridge == null and unallocated_bridges == null
assert!( assert!(
th.ba.bridge_table.unallocated_bridges.len() == 0, th.ba.bridge_table.unallocated_bridges.is_empty(),
"Unallocated bridges should have a length of 0" "Unallocated bridges should have a length of 0"
); );
assert!( assert!(
@ -1170,7 +1170,7 @@ fn test_bridge_replace() {
"failed" => { "failed" => {
// Case four: available_bridge == None and unallocated_bridges == None and spare buckets == None // Case four: available_bridge == None and unallocated_bridges == None and spare buckets == None
assert!( assert!(
th.ba.bridge_table.unallocated_bridges.len() == 0, th.ba.bridge_table.unallocated_bridges.is_empty(),
"Unallocated bridges should have a length of 0" "Unallocated bridges should have a length of 0"
); );
assert!( assert!(
@ -1190,7 +1190,7 @@ fn test_bridge_replace() {
"Number of buckets changed size" "Number of buckets changed size"
); );
assert!( assert!(
th.ba.bridge_table.unallocated_bridges.len() == 0, th.ba.bridge_table.unallocated_bridges.is_empty(),
"Unallocated bridges changed size" "Unallocated bridges changed size"
); );
println!("No bridges available to replace bridge so replacement gracefully failed"); println!("No bridges available to replace bridge so replacement gracefully failed");

View File

@ -137,7 +137,7 @@ mod tests {
assert_ne!(res, Poll::Pending); assert_ne!(res, Poll::Pending);
if let Poll::Ready(Some(diff)) = res { if let Poll::Ready(Some(diff)) = res {
assert_eq!(diff.new, None); assert_eq!(diff.new, None);
assert_eq!(diff.full_update, true); assert!(diff.full_update);
} }
} }
@ -151,14 +151,14 @@ mod tests {
tx.send(chunk2).await.unwrap(); tx.send(chunk2).await.unwrap();
let mut diffs = ResourceStream::new(rx); let mut diffs = ResourceStream::new(rx);
let mut res = Pin::new(&mut diffs).poll_next(&mut cx); let mut res = Pin::new(&mut diffs).poll_next(&mut cx);
while let Poll::Pending = res { while res.is_pending() {
res = Pin::new(&mut diffs).poll_next(&mut cx); res = Pin::new(&mut diffs).poll_next(&mut cx);
} }
assert_ne!(res, Poll::Ready(None)); assert_ne!(res, Poll::Ready(None));
assert_ne!(res, Poll::Pending); assert_ne!(res, Poll::Pending);
if let Poll::Ready(Some(diff)) = res { if let Poll::Ready(Some(diff)) = res {
assert_eq!(diff.new, None); assert_eq!(diff.new, None);
assert_eq!(diff.full_update, true); assert!(diff.full_update);
} }
} }
@ -177,24 +177,24 @@ mod tests {
tx.send(chunk4).await.unwrap(); tx.send(chunk4).await.unwrap();
let mut diffs = ResourceStream::new(rx); let mut diffs = ResourceStream::new(rx);
let mut res = Pin::new(&mut diffs).poll_next(&mut cx); let mut res = Pin::new(&mut diffs).poll_next(&mut cx);
while let Poll::Pending = res { while res.is_pending() {
res = Pin::new(&mut diffs).poll_next(&mut cx); res = Pin::new(&mut diffs).poll_next(&mut cx);
} }
assert_ne!(res, Poll::Ready(None)); assert_ne!(res, Poll::Ready(None));
assert_ne!(res, Poll::Pending); assert_ne!(res, Poll::Pending);
if let Poll::Ready(Some(diff)) = res { if let Poll::Ready(Some(diff)) = res {
assert_eq!(diff.new, None); assert_eq!(diff.new, None);
assert_eq!(diff.full_update, true); assert!(diff.full_update);
} }
res = Pin::new(&mut diffs).poll_next(&mut cx); res = Pin::new(&mut diffs).poll_next(&mut cx);
while let Poll::Pending = res { while res.is_pending() {
res = Pin::new(&mut diffs).poll_next(&mut cx); res = Pin::new(&mut diffs).poll_next(&mut cx);
} }
assert_ne!(res, Poll::Ready(None)); assert_ne!(res, Poll::Ready(None));
assert_ne!(res, Poll::Pending); assert_ne!(res, Poll::Pending);
if let Poll::Ready(Some(diff)) = res { if let Poll::Ready(Some(diff)) = res {
assert_eq!(diff.new, None); assert_eq!(diff.new, None);
assert_eq!(diff.full_update, true); assert!(diff.full_update);
} }
} }
} }

View File

@ -199,7 +199,7 @@ mod tests {
assert_ne!(diff.new, None); assert_ne!(diff.new, None);
assert_eq!(diff.changed, None); assert_eq!(diff.changed, None);
assert_eq!(diff.gone, None); assert_eq!(diff.gone, None);
assert_eq!(diff.full_update, true); assert!(diff.full_update);
if let Some(new) = diff.new { if let Some(new) = diff.new {
if let Some(obfs2) = &new["obfs2"] { if let Some(obfs2) = &new["obfs2"] {
assert_eq!(obfs2[0].r#type, "obfs2"); assert_eq!(obfs2[0].r#type, "obfs2");
@ -255,6 +255,6 @@ mod tests {
assert_ne!(diff.new, None); assert_ne!(diff.new, None);
assert_eq!(diff.changed, None); assert_eq!(diff.changed, None);
assert_eq!(diff.gone, None); assert_eq!(diff.gone, None);
assert_eq!(diff.full_update, true); assert!(diff.full_update);
} }
} }