2021-04-26 22:44:59 -04:00
|
|
|
|
/*! Implementation of a new style of bridge authority for Tor that
|
|
|
|
|
allows users to invite other users, while protecting the social graph
|
|
|
|
|
from the bridge authority itself.
|
|
|
|
|
|
|
|
|
|
We use CMZ14 credentials (GGM version, which is more efficient, but
|
|
|
|
|
makes a stronger security assumption): "Algebraic MACs and
|
|
|
|
|
Keyed-Verification Anonymous Credentials" (Chase, Meiklejohn, and
|
|
|
|
|
Zaverucha, CCS 2014)
|
|
|
|
|
|
|
|
|
|
The notation follows that of the paper "Hyphae: Social Secret Sharing"
|
|
|
|
|
(Lovecruft and de Valence, 2017), Section 4. */
|
|
|
|
|
|
|
|
|
|
// We really want points to be capital letters and scalars to be
|
|
|
|
|
// lowercase letters
|
|
|
|
|
#![allow(non_snake_case)]
|
|
|
|
|
|
|
|
|
|
#[macro_use]
|
|
|
|
|
extern crate zkp;
|
|
|
|
|
|
2021-04-27 13:00:18 -04:00
|
|
|
|
pub mod bridge_table;
|
2021-04-28 13:36:04 -04:00
|
|
|
|
pub mod cred;
|
2021-04-27 08:53:22 -04:00
|
|
|
|
pub mod dup_filter;
|
2021-04-29 15:18:54 -04:00
|
|
|
|
pub mod migration_table;
|
2021-04-27 08:53:22 -04:00
|
|
|
|
|
2023-07-27 16:05:20 -04:00
|
|
|
|
use chrono::{DateTime, Utc};
|
2021-04-26 22:44:59 -04:00
|
|
|
|
use sha2::Sha512;
|
|
|
|
|
|
|
|
|
|
use curve25519_dalek::constants as dalek_constants;
|
|
|
|
|
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
|
|
|
|
use curve25519_dalek::ristretto::RistrettoPoint;
|
|
|
|
|
use curve25519_dalek::scalar::Scalar;
|
2021-04-30 16:24:42 -04:00
|
|
|
|
#[cfg(test)]
|
|
|
|
|
use curve25519_dalek::traits::IsIdentity;
|
2023-07-19 10:30:35 -04:00
|
|
|
|
use rand::rngs::OsRng;
|
|
|
|
|
use rand::Rng;
|
|
|
|
|
use std::collections::HashMap;
|
|
|
|
|
use std::convert::{TryFrom, TryInto};
|
2021-04-26 22:44:59 -04:00
|
|
|
|
|
|
|
|
|
use ed25519_dalek::{Keypair, PublicKey, Signature, SignatureError, Signer, Verifier};
|
2021-04-29 18:22:06 -04:00
|
|
|
|
use subtle::ConstantTimeEq;
|
2021-04-26 22:44:59 -04:00
|
|
|
|
|
2021-05-05 13:58:43 -04:00
|
|
|
|
use std::collections::HashSet;
|
|
|
|
|
|
|
|
|
|
use bridge_table::{
|
2023-06-20 20:04:17 -04:00
|
|
|
|
BridgeLine, BridgeTable, EncryptedBucket, MAX_BRIDGES_PER_BUCKET, MIN_BUCKET_REACHABILITY,
|
2021-05-05 13:58:43 -04:00
|
|
|
|
};
|
|
|
|
|
use migration_table::{MigrationTable, MigrationType};
|
|
|
|
|
|
2021-04-26 22:44:59 -04:00
|
|
|
|
use lazy_static::lazy_static;
|
|
|
|
|
|
2022-11-22 19:15:09 -05:00
|
|
|
|
use serde::{Deserialize, Serialize};
|
2023-07-27 16:05:20 -04:00
|
|
|
|
use thiserror::Error;
|
2022-11-13 11:33:57 -05:00
|
|
|
|
|
2021-04-26 22:44:59 -04:00
|
|
|
|
lazy_static! {
|
|
|
|
|
pub static ref CMZ_A: RistrettoPoint =
|
|
|
|
|
RistrettoPoint::hash_from_bytes::<Sha512>(b"CMZ Generator A");
|
|
|
|
|
pub static ref CMZ_B: RistrettoPoint = dalek_constants::RISTRETTO_BASEPOINT_POINT;
|
|
|
|
|
pub static ref CMZ_A_TABLE: RistrettoBasepointTable = RistrettoBasepointTable::create(&CMZ_A);
|
|
|
|
|
pub static ref CMZ_B_TABLE: RistrettoBasepointTable =
|
|
|
|
|
dalek_constants::RISTRETTO_BASEPOINT_TABLE;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-28 12:27:49 -04:00
|
|
|
|
// EXPIRY_DATE is set to EXPIRY_DATE days for open-entry and blocked buckets in order to match
|
|
|
|
|
// the expiry date for Lox credentials. This particular value (EXPIRY_DATE) is chosen because
|
|
|
|
|
// values that are 2^k − 1 make range proofs more efficient, but this can be changed to any value
|
|
|
|
|
pub const EXPIRY_DATE: u32 = 511;
|
|
|
|
|
|
2023-06-16 13:56:30 -04:00
|
|
|
|
#[derive(PartialEq, Eq)]
|
|
|
|
|
pub enum ReplaceSuccess {
|
|
|
|
|
NotFound = 0,
|
|
|
|
|
NotReplaced = 1,
|
|
|
|
|
Replaced = 2,
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-27 16:05:20 -04:00
|
|
|
|
#[derive(Error, Debug)]
|
|
|
|
|
pub enum NoAvailableIDError {
|
|
|
|
|
#[error("Find key exhausted with no available index found!")]
|
|
|
|
|
ExhaustedIndexer,
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-13 11:33:57 -05:00
|
|
|
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
2021-04-26 22:44:59 -04:00
|
|
|
|
pub struct IssuerPrivKey {
|
|
|
|
|
x0tilde: Scalar,
|
|
|
|
|
x: Vec<Scalar>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl IssuerPrivKey {
|
|
|
|
|
/// Create an IssuerPrivKey for credentials with the given number of
|
|
|
|
|
/// attributes.
|
|
|
|
|
pub fn new(n: u16) -> IssuerPrivKey {
|
|
|
|
|
let mut rng = rand::thread_rng();
|
|
|
|
|
let x0tilde = Scalar::random(&mut rng);
|
|
|
|
|
let mut x: Vec<Scalar> = Vec::with_capacity((n + 1) as usize);
|
|
|
|
|
|
|
|
|
|
// Set x to a vector of n+1 random Scalars
|
|
|
|
|
x.resize_with((n + 1) as usize, || Scalar::random(&mut rng));
|
|
|
|
|
|
|
|
|
|
IssuerPrivKey { x0tilde, x }
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-13 11:33:57 -05:00
|
|
|
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
2021-04-26 22:44:59 -04:00
|
|
|
|
pub struct IssuerPubKey {
|
|
|
|
|
X: Vec<RistrettoPoint>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl IssuerPubKey {
|
|
|
|
|
/// Create an IssuerPubKey from the corresponding IssuerPrivKey
|
|
|
|
|
pub fn new(privkey: &IssuerPrivKey) -> IssuerPubKey {
|
|
|
|
|
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
|
|
|
|
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
|
|
|
|
let n_plus_one = privkey.x.len();
|
|
|
|
|
let mut X: Vec<RistrettoPoint> = Vec::with_capacity(n_plus_one);
|
|
|
|
|
|
|
|
|
|
// The first element is a special case; it is
|
|
|
|
|
// X[0] = x0tilde*A + x[0]*B
|
|
|
|
|
X.push(&privkey.x0tilde * Atable + &privkey.x[0] * Btable);
|
|
|
|
|
|
|
|
|
|
// The other elements (1 through n) are X[i] = x[i]*A
|
2021-06-05 13:36:09 -04:00
|
|
|
|
X.extend(privkey.x.iter().skip(1).map(|xi| xi * Atable));
|
|
|
|
|
|
2021-04-26 22:44:59 -04:00
|
|
|
|
IssuerPubKey { X }
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-25 17:24:03 -04:00
|
|
|
|
pub const OPENINV_K: u32 = 10;
|
2021-04-26 22:44:59 -04:00
|
|
|
|
/// The BridgeDb. This will typically be a singleton object. The
|
|
|
|
|
/// BridgeDb's role is simply to issue signed "open invitations" to
|
|
|
|
|
/// people who are not yet part of the system.
|
2022-11-14 13:56:13 -05:00
|
|
|
|
#[derive(Debug, Serialize, Deserialize)]
|
2021-04-26 22:44:59 -04:00
|
|
|
|
pub struct BridgeDb {
|
|
|
|
|
/// The keypair for signing open invitations
|
|
|
|
|
keypair: Keypair,
|
|
|
|
|
/// The public key for verifying open invitations
|
|
|
|
|
pub pubkey: PublicKey,
|
2021-05-05 13:58:43 -04:00
|
|
|
|
/// The set of open-invitation buckets
|
|
|
|
|
openinv_buckets: HashSet<u32>,
|
2023-07-25 17:24:03 -04:00
|
|
|
|
distributed_buckets: Vec<u32>,
|
|
|
|
|
current_k: u32,
|
2021-04-26 22:44:59 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// An open invitation is a [u8; OPENINV_LENGTH] where the first 32
|
|
|
|
|
/// bytes are the serialization of a random Scalar (the invitation id),
|
|
|
|
|
/// the next 4 bytes are a little-endian bucket number, and the last
|
|
|
|
|
/// SIGNATURE_LENGTH bytes are the signature on the first 36 bytes.
|
|
|
|
|
pub const OPENINV_LENGTH: usize = 32 // the length of the random
|
|
|
|
|
// invitation id (a Scalar)
|
|
|
|
|
+ 4 // the length of the u32 for the bucket number
|
|
|
|
|
+ ed25519_dalek::SIGNATURE_LENGTH; // the length of the signature
|
|
|
|
|
|
|
|
|
|
impl BridgeDb {
|
|
|
|
|
/// Create the BridgeDb.
|
2021-05-05 13:58:43 -04:00
|
|
|
|
pub fn new() -> Self {
|
2021-04-26 22:44:59 -04:00
|
|
|
|
let mut csprng = OsRng {};
|
|
|
|
|
let keypair = Keypair::generate(&mut csprng);
|
|
|
|
|
let pubkey = keypair.public;
|
2021-04-28 13:36:04 -04:00
|
|
|
|
Self {
|
2021-04-26 22:44:59 -04:00
|
|
|
|
keypair,
|
|
|
|
|
pubkey,
|
2021-05-05 13:58:43 -04:00
|
|
|
|
openinv_buckets: Default::default(),
|
2023-07-25 17:24:03 -04:00
|
|
|
|
distributed_buckets: Default::default(),
|
|
|
|
|
current_k: 0,
|
2021-04-26 22:44:59 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-05 13:58:43 -04:00
|
|
|
|
/// Insert an open-invitation bucket into the set
|
|
|
|
|
pub fn insert_openinv(&mut self, bucket: u32) {
|
|
|
|
|
self.openinv_buckets.insert(bucket);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Remove an open-invitation bucket from the set
|
2023-07-10 15:03:37 -04:00
|
|
|
|
pub fn remove_openinv(&mut self, bucket: &u32) {
|
|
|
|
|
self.openinv_buckets.remove(bucket);
|
2021-05-05 13:58:43 -04:00
|
|
|
|
}
|
|
|
|
|
|
2023-07-25 17:24:03 -04:00
|
|
|
|
pub fn remove_blocked_or_expired_buckets(&mut self, bucket: &u32) {
|
|
|
|
|
if self.openinv_buckets.contains(bucket) {
|
|
|
|
|
println!("Removing a bucket that has not been distributed yet!");
|
|
|
|
|
self.openinv_buckets.remove(bucket);
|
|
|
|
|
} else if self.distributed_buckets.contains(bucket) {
|
|
|
|
|
self.distributed_buckets.retain(|&x| x != *bucket);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn mark_distributed(&mut self, bucket: u32) {
|
|
|
|
|
self.distributed_buckets.push(bucket);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Produce an open invitation such that the next k users, where k is <
|
|
|
|
|
/// OPENINV_K, will receive the same open invitation bucket
|
|
|
|
|
/// chosen randomly from the set of open-invitation buckets.
|
|
|
|
|
pub fn invite(&mut self) -> [u8; OPENINV_LENGTH] {
|
2021-04-26 22:44:59 -04:00
|
|
|
|
let mut res: [u8; OPENINV_LENGTH] = [0; OPENINV_LENGTH];
|
|
|
|
|
let mut rng = rand::thread_rng();
|
|
|
|
|
// Choose a random invitation id (a Scalar) and serialize it
|
|
|
|
|
let id = Scalar::random(&mut rng);
|
|
|
|
|
res[0..32].copy_from_slice(&id.to_bytes());
|
2023-07-25 17:24:03 -04:00
|
|
|
|
let bucket_num: u32;
|
|
|
|
|
if self.current_k < OPENINV_K && !self.distributed_buckets.is_empty() {
|
|
|
|
|
bucket_num = *self.distributed_buckets.last().unwrap();
|
|
|
|
|
self.current_k += 1;
|
|
|
|
|
} else {
|
|
|
|
|
// Choose a random bucket number (from the set of open
|
|
|
|
|
// invitation buckets) and serialize it
|
|
|
|
|
let openinv_vec: Vec<&u32> = self.openinv_buckets.iter().collect();
|
|
|
|
|
bucket_num = *openinv_vec[rng.gen_range(0, openinv_vec.len())];
|
|
|
|
|
self.mark_distributed(bucket_num);
|
|
|
|
|
self.remove_openinv(&bucket_num);
|
|
|
|
|
self.current_k = 1;
|
|
|
|
|
}
|
2021-04-26 22:44:59 -04:00
|
|
|
|
res[32..(32 + 4)].copy_from_slice(&bucket_num.to_le_bytes());
|
|
|
|
|
// Sign the first 36 bytes and serialize it
|
|
|
|
|
let sig = self.keypair.sign(&res[0..(32 + 4)]);
|
|
|
|
|
res[(32 + 4)..].copy_from_slice(&sig.to_bytes());
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Verify an open invitation. Returns the invitation id and the
|
|
|
|
|
/// bucket number if the signature checked out. It is up to the
|
|
|
|
|
/// caller to then check that the invitation id has not been used
|
|
|
|
|
/// before.
|
|
|
|
|
pub fn verify(
|
|
|
|
|
invitation: [u8; OPENINV_LENGTH],
|
|
|
|
|
pubkey: PublicKey,
|
|
|
|
|
) -> Result<(Scalar, u32), SignatureError> {
|
|
|
|
|
// Pull out the signature and verify it
|
|
|
|
|
let sig = Signature::try_from(&invitation[(32 + 4)..])?;
|
|
|
|
|
pubkey.verify(&invitation[0..(32 + 4)], &sig)?;
|
|
|
|
|
// The signature passed. Pull out the bucket number and then
|
|
|
|
|
// the invitation id
|
|
|
|
|
let bucket = u32::from_le_bytes(invitation[32..(32 + 4)].try_into().unwrap());
|
|
|
|
|
match Scalar::from_canonical_bytes(invitation[0..32].try_into().unwrap()) {
|
|
|
|
|
// It should never happen that there's a valid signature on
|
|
|
|
|
// an invalid serialization of a Scalar, but check anyway.
|
|
|
|
|
None => Err(SignatureError::new()),
|
|
|
|
|
Some(s) => Ok((s, bucket)),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-04-28 13:36:04 -04:00
|
|
|
|
|
2021-05-05 13:58:43 -04:00
|
|
|
|
impl Default for BridgeDb {
|
|
|
|
|
fn default() -> Self {
|
|
|
|
|
Self::new()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-28 13:36:04 -04:00
|
|
|
|
/// The bridge authority. This will typically be a singleton object.
|
2022-11-22 19:15:09 -05:00
|
|
|
|
#[derive(Debug, Serialize, Deserialize)]
|
2021-04-28 13:36:04 -04:00
|
|
|
|
pub struct BridgeAuth {
|
|
|
|
|
/// The private key for the main Lox credential
|
|
|
|
|
lox_priv: IssuerPrivKey,
|
|
|
|
|
/// The public key for the main Lox credential
|
|
|
|
|
pub lox_pub: IssuerPubKey,
|
|
|
|
|
/// The private key for migration credentials
|
|
|
|
|
migration_priv: IssuerPrivKey,
|
|
|
|
|
/// The public key for migration credentials
|
|
|
|
|
pub migration_pub: IssuerPubKey,
|
2021-04-29 15:18:54 -04:00
|
|
|
|
/// The private key for migration key credentials
|
|
|
|
|
migrationkey_priv: IssuerPrivKey,
|
|
|
|
|
/// The public key for migration key credentials
|
|
|
|
|
pub migrationkey_pub: IssuerPubKey,
|
2021-05-01 17:12:03 -04:00
|
|
|
|
/// The private key for bucket reachability credentials
|
|
|
|
|
reachability_priv: IssuerPrivKey,
|
|
|
|
|
/// The public key for bucket reachability credentials
|
|
|
|
|
pub reachability_pub: IssuerPubKey,
|
2021-05-03 19:05:42 -04:00
|
|
|
|
/// The private key for invitation credentials
|
|
|
|
|
invitation_priv: IssuerPrivKey,
|
|
|
|
|
/// The public key for invitation credentials
|
|
|
|
|
pub invitation_pub: IssuerPubKey,
|
2021-04-28 13:36:04 -04:00
|
|
|
|
|
|
|
|
|
/// The public key of the BridgeDb issuing open invitations
|
|
|
|
|
pub bridgedb_pub: PublicKey,
|
|
|
|
|
|
2021-04-28 18:31:47 -04:00
|
|
|
|
/// The bridge table
|
2023-02-09 16:24:43 -05:00
|
|
|
|
pub bridge_table: BridgeTable,
|
2021-04-28 18:31:47 -04:00
|
|
|
|
|
2021-05-05 13:58:43 -04:00
|
|
|
|
/// The migration tables
|
|
|
|
|
trustup_migration_table: MigrationTable,
|
|
|
|
|
blockage_migration_table: MigrationTable,
|
2021-04-29 15:18:54 -04:00
|
|
|
|
|
2021-04-28 13:36:04 -04:00
|
|
|
|
/// Duplicate filter for open invitations
|
|
|
|
|
openinv_filter: dup_filter::DupFilter<Scalar>,
|
2021-05-04 17:48:15 -04:00
|
|
|
|
/// Duplicate filter for Lox credential ids
|
2021-04-28 13:36:04 -04:00
|
|
|
|
id_filter: dup_filter::DupFilter<Scalar>,
|
2021-05-04 17:48:15 -04:00
|
|
|
|
/// Duplicate filter for Invitation credential ids
|
|
|
|
|
inv_id_filter: dup_filter::DupFilter<Scalar>,
|
2021-04-29 15:18:54 -04:00
|
|
|
|
/// Duplicate filter for trust promotions (from untrusted level 0 to
|
|
|
|
|
/// trusted level 1)
|
|
|
|
|
trust_promotion_filter: dup_filter::DupFilter<Scalar>,
|
2021-04-28 13:36:04 -04:00
|
|
|
|
|
|
|
|
|
/// For testing only: offset of the true time to the simulated time
|
2022-11-22 19:15:09 -05:00
|
|
|
|
#[serde(skip)]
|
2021-04-28 13:36:04 -04:00
|
|
|
|
time_offset: time::Duration,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl BridgeAuth {
|
|
|
|
|
pub fn new(bridgedb_pub: PublicKey) -> Self {
|
2021-04-29 15:18:54 -04:00
|
|
|
|
// Create the private and public keys for each of the types of
|
|
|
|
|
// credential, each with the appropriate number of attributes
|
2021-04-28 13:36:04 -04:00
|
|
|
|
let lox_priv = IssuerPrivKey::new(6);
|
|
|
|
|
let lox_pub = IssuerPubKey::new(&lox_priv);
|
2021-05-05 15:25:32 -04:00
|
|
|
|
let migration_priv = IssuerPrivKey::new(4);
|
2021-04-28 13:36:04 -04:00
|
|
|
|
let migration_pub = IssuerPubKey::new(&migration_priv);
|
2021-04-29 15:18:54 -04:00
|
|
|
|
let migrationkey_priv = IssuerPrivKey::new(2);
|
|
|
|
|
let migrationkey_pub = IssuerPubKey::new(&migrationkey_priv);
|
2021-05-01 17:12:03 -04:00
|
|
|
|
let reachability_priv = IssuerPrivKey::new(2);
|
|
|
|
|
let reachability_pub = IssuerPubKey::new(&reachability_priv);
|
2021-05-03 19:05:42 -04:00
|
|
|
|
let invitation_priv = IssuerPrivKey::new(4);
|
|
|
|
|
let invitation_pub = IssuerPubKey::new(&invitation_priv);
|
2021-04-28 13:36:04 -04:00
|
|
|
|
Self {
|
|
|
|
|
lox_priv,
|
|
|
|
|
lox_pub,
|
|
|
|
|
migration_priv,
|
|
|
|
|
migration_pub,
|
2021-04-29 15:18:54 -04:00
|
|
|
|
migrationkey_priv,
|
|
|
|
|
migrationkey_pub,
|
2021-05-01 17:12:03 -04:00
|
|
|
|
reachability_priv,
|
|
|
|
|
reachability_pub,
|
2021-05-03 19:05:42 -04:00
|
|
|
|
invitation_priv,
|
|
|
|
|
invitation_pub,
|
2021-04-28 13:36:04 -04:00
|
|
|
|
bridgedb_pub,
|
2021-04-28 18:31:47 -04:00
|
|
|
|
bridge_table: Default::default(),
|
2021-05-05 13:58:43 -04:00
|
|
|
|
trustup_migration_table: MigrationTable::new(MigrationType::TrustUpgrade),
|
|
|
|
|
blockage_migration_table: MigrationTable::new(MigrationType::Blockage),
|
2021-04-28 13:36:04 -04:00
|
|
|
|
openinv_filter: Default::default(),
|
|
|
|
|
id_filter: Default::default(),
|
2021-05-04 17:48:15 -04:00
|
|
|
|
inv_id_filter: Default::default(),
|
2021-04-29 15:18:54 -04:00
|
|
|
|
trust_promotion_filter: Default::default(),
|
2023-06-07 15:34:55 -04:00
|
|
|
|
time_offset: time::Duration::ZERO,
|
2021-04-28 13:36:04 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-13 12:08:48 -04:00
|
|
|
|
pub fn is_empty(&mut self) -> bool {
|
|
|
|
|
self.bridge_table.buckets.is_empty()
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-05 13:58:43 -04:00
|
|
|
|
/// Insert a set of open invitation bridges.
|
|
|
|
|
///
|
|
|
|
|
/// Each of the bridges will be given its own open invitation
|
|
|
|
|
/// bucket, and the BridgeDb will be informed. A single bucket
|
|
|
|
|
/// containing all of the bridges will also be created, with a trust
|
|
|
|
|
/// upgrade migration from each of the single-bridge buckets.
|
|
|
|
|
pub fn add_openinv_bridges(
|
|
|
|
|
&mut self,
|
|
|
|
|
bridges: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
|
|
|
|
|
bdb: &mut BridgeDb,
|
2023-07-27 16:05:20 -04:00
|
|
|
|
) -> Result<(), NoAvailableIDError> {
|
|
|
|
|
let bindex = match self.find_next_available_key(bdb) {
|
|
|
|
|
Ok(sindex) => sindex,
|
|
|
|
|
Err(e) => return Err(e),
|
|
|
|
|
};
|
|
|
|
|
self.bridge_table.new_bucket(bindex, &bridges);
|
2021-05-05 13:58:43 -04:00
|
|
|
|
let mut single = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
|
|
|
|
|
for b in bridges.iter() {
|
2023-07-27 16:05:20 -04:00
|
|
|
|
let sindex = match self.find_next_available_key(bdb) {
|
|
|
|
|
Ok(sindex) => sindex,
|
|
|
|
|
Err(e) => return Err(e),
|
|
|
|
|
};
|
2021-05-05 13:58:43 -04:00
|
|
|
|
single[0] = *b;
|
2023-07-27 16:05:20 -04:00
|
|
|
|
self.bridge_table.new_bucket(sindex, &single);
|
2023-07-13 17:36:40 -04:00
|
|
|
|
self.bridge_table.open_inv_keys.push((sindex, self.today()));
|
|
|
|
|
bdb.insert_openinv(sindex);
|
|
|
|
|
self.trustup_migration_table.table.insert(sindex, bindex);
|
2021-05-05 13:58:43 -04:00
|
|
|
|
}
|
2023-07-27 16:05:20 -04:00
|
|
|
|
Ok(())
|
2021-05-05 13:58:43 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Insert a hot spare bucket of bridges
|
2023-07-13 17:36:40 -04:00
|
|
|
|
pub fn add_spare_bucket(
|
|
|
|
|
&mut self,
|
|
|
|
|
bucket: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
|
|
|
|
|
bdb: &mut BridgeDb,
|
2023-07-27 16:05:20 -04:00
|
|
|
|
) -> Result<(), NoAvailableIDError> {
|
|
|
|
|
let index = match self.find_next_available_key(bdb) {
|
|
|
|
|
Ok(index) => index,
|
|
|
|
|
Err(e) => return Err(e),
|
|
|
|
|
};
|
|
|
|
|
self.bridge_table.new_bucket(index, &bucket);
|
2023-07-13 17:36:40 -04:00
|
|
|
|
self.bridge_table.spares.insert(index);
|
2023-07-27 16:05:20 -04:00
|
|
|
|
Ok(())
|
2021-05-05 13:58:43 -04:00
|
|
|
|
}
|
|
|
|
|
|
2023-07-27 16:05:20 -04:00
|
|
|
|
// TODO Ensure synchronization of Lox bridge_table with rdsys
|
2023-04-03 11:47:11 -04:00
|
|
|
|
pub fn sync_table(&mut self) {
|
|
|
|
|
// Create a hashtable (?) of bridges in the lox distributor from new resources
|
|
|
|
|
// accept the hashtable and recreate the bridge table from the hash table here
|
|
|
|
|
// using existing reachable bridges, other table checks and placements from existing bridge table
|
|
|
|
|
// If bridges are in reachable bridges, put them in the table with their Vec
|
|
|
|
|
// How to check for bridges that aren't there/are extra?
|
|
|
|
|
// After going through the update, make sure bridges in the table are the same and deal with discrepencies
|
|
|
|
|
// This will be the bad/annoying part
|
2023-07-07 16:53:10 -04:00
|
|
|
|
|
|
|
|
|
//also use open_inv_keys and blocked_keys from bridge_table to remove expired keys from table.
|
|
|
|
|
// make sure this happens before they are removed from the structures in the bridge table
|
2023-04-03 11:47:11 -04:00
|
|
|
|
}
|
|
|
|
|
|
2023-05-10 20:26:08 -04:00
|
|
|
|
pub fn allocate_bridges(
|
|
|
|
|
&mut self,
|
|
|
|
|
distributor_bridges: &mut Vec<BridgeLine>,
|
|
|
|
|
bdb: &mut BridgeDb,
|
|
|
|
|
) {
|
2023-05-15 18:57:23 -04:00
|
|
|
|
while let Some(bridge) = distributor_bridges.pop() {
|
|
|
|
|
self.bridge_table.unallocated_bridges.push(bridge);
|
2023-05-08 19:44:36 -04:00
|
|
|
|
}
|
|
|
|
|
while self.bridge_table.unallocated_bridges.len() >= MAX_BRIDGES_PER_BUCKET {
|
|
|
|
|
let mut bucket = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
|
2023-06-16 12:27:40 -04:00
|
|
|
|
for bridge in bucket.iter_mut() {
|
|
|
|
|
*bridge = self.bridge_table.unallocated_bridges.pop().unwrap();
|
2023-05-08 19:44:36 -04:00
|
|
|
|
}
|
2023-07-27 16:05:20 -04:00
|
|
|
|
match self.add_openinv_bridges(bucket, bdb) {
|
|
|
|
|
Ok(_) => continue,
|
|
|
|
|
Err(e) => {
|
|
|
|
|
println!("Error: {:?}", e);
|
|
|
|
|
for bridge in bucket {
|
|
|
|
|
self.bridge_table.unallocated_bridges.push(bridge);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-05-08 19:44:36 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-23 11:00:10 -04:00
|
|
|
|
// Update the details of a bridge in the bridge table. This assumes that the IP and Port
|
|
|
|
|
// of a given bridge remains the same and thus can be updated.
|
|
|
|
|
// First we must retrieve the list of reachable bridges, then we must search for any matching our partial key
|
2023-03-27 12:36:57 -04:00
|
|
|
|
// which will include the IP and Port. Then we can replace the original bridge with the updated bridge
|
|
|
|
|
// Returns true if the bridge has successfully updated
|
2023-03-23 11:00:10 -04:00
|
|
|
|
pub fn bridge_update(&mut self, bridge: &BridgeLine) -> bool {
|
|
|
|
|
let mut res: bool = false; //default False to assume that update failed
|
2023-04-03 11:47:11 -04:00
|
|
|
|
//Needs to be updated since bridge will only match on some fields.
|
2023-03-27 12:36:57 -04:00
|
|
|
|
|
2023-03-23 13:21:04 -04:00
|
|
|
|
let reachable_bridges = self.bridge_table.reachable.clone();
|
|
|
|
|
for reachable_bridge in reachable_bridges {
|
2023-04-04 18:39:28 -04:00
|
|
|
|
if reachable_bridge.0.uid_fingerprint == bridge.uid_fingerprint {
|
2023-03-23 13:21:04 -04:00
|
|
|
|
println!(
|
|
|
|
|
"Bridge from table: {:?} has same IP and Port as bridge {:?}!",
|
|
|
|
|
reachable_bridge.0, bridge
|
|
|
|
|
);
|
2023-03-27 12:36:57 -04:00
|
|
|
|
// Now we must remove the old bridge from the table and insert the new bridge in its place
|
|
|
|
|
// i.e., in the same bucket and with the same permissions.
|
|
|
|
|
let positions = self.bridge_table.reachable.get(&reachable_bridge.0);
|
|
|
|
|
if let Some(v) = positions {
|
|
|
|
|
for (bucketnum, offset) in v.iter() {
|
|
|
|
|
println!("Bucket num: {:?} and offset: {:?}", bucketnum, offset);
|
2023-06-20 17:49:41 -04:00
|
|
|
|
let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
2023-06-16 11:02:13 -04:00
|
|
|
|
Some(bridgelines) => *bridgelines,
|
2023-06-20 17:07:51 -04:00
|
|
|
|
None => return res,
|
2023-06-16 11:02:13 -04:00
|
|
|
|
};
|
2023-06-20 17:07:51 -04:00
|
|
|
|
assert!(bridgelines[*offset] == reachable_bridge.0);
|
2023-06-16 11:02:13 -04:00
|
|
|
|
bridgelines[*offset] = *bridge;
|
|
|
|
|
self.bridge_table.buckets.insert(*bucketnum, bridgelines);
|
|
|
|
|
let bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
|
|
|
|
Some(bridgelines) => *bridgelines,
|
2023-06-20 17:07:51 -04:00
|
|
|
|
None => return res,
|
2023-06-16 11:02:13 -04:00
|
|
|
|
};
|
2023-06-20 17:07:51 -04:00
|
|
|
|
assert!(bridgelines[*offset] != reachable_bridge.0);
|
2023-03-23 13:21:04 -04:00
|
|
|
|
}
|
2023-04-03 11:47:11 -04:00
|
|
|
|
res = true;
|
|
|
|
|
} else {
|
|
|
|
|
return res;
|
2023-03-23 13:21:04 -04:00
|
|
|
|
}
|
2023-03-27 12:36:57 -04:00
|
|
|
|
// We must also remove the old bridge from the reachable bridges table
|
|
|
|
|
// and add the new bridge
|
|
|
|
|
self.bridge_table.reachable.remove(&reachable_bridge.0);
|
|
|
|
|
self.bridge_table
|
|
|
|
|
.reachable
|
|
|
|
|
.insert(*bridge, reachable_bridge.1);
|
2023-04-03 11:47:11 -04:00
|
|
|
|
return res;
|
2023-03-23 11:00:10 -04:00
|
|
|
|
}
|
2023-03-23 13:21:04 -04:00
|
|
|
|
}
|
2023-05-04 18:10:50 -04:00
|
|
|
|
// If this is returned, we assume that the bridge wasn't found in the bridge table
|
|
|
|
|
// and therefore should be treated as a "new bridge"
|
|
|
|
|
res
|
|
|
|
|
}
|
2023-04-04 18:39:28 -04:00
|
|
|
|
|
2023-05-04 18:10:50 -04:00
|
|
|
|
pub fn bridge_replace(
|
|
|
|
|
&mut self,
|
|
|
|
|
bridge: &BridgeLine,
|
|
|
|
|
available_bridge: Option<&BridgeLine>,
|
2023-06-16 13:56:30 -04:00
|
|
|
|
) -> ReplaceSuccess {
|
|
|
|
|
let mut res = ReplaceSuccess::NotFound;
|
2023-05-04 18:10:50 -04:00
|
|
|
|
let reachable_bridges = &self.bridge_table.reachable.clone();
|
2023-06-16 13:56:30 -04:00
|
|
|
|
match reachable_bridges.get(bridge) {
|
|
|
|
|
Some(positions) => {
|
|
|
|
|
if let Some(replacement) = available_bridge {
|
|
|
|
|
for (bucketnum, offset) in positions.iter() {
|
2023-06-20 20:04:17 -04:00
|
|
|
|
let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
2023-06-16 11:02:13 -04:00
|
|
|
|
Some(bridgelines) => *bridgelines,
|
|
|
|
|
None => return ReplaceSuccess::NotFound,
|
|
|
|
|
};
|
|
|
|
|
assert!(bridgelines[*offset] == *bridge);
|
|
|
|
|
bridgelines[*offset] = *replacement;
|
|
|
|
|
self.bridge_table.buckets.insert(*bucketnum, bridgelines);
|
2023-06-16 13:56:30 -04:00
|
|
|
|
// Remove the bridge from the reachable bridges and add new bridge
|
|
|
|
|
self.bridge_table
|
|
|
|
|
.reachable
|
|
|
|
|
.insert(*replacement, positions.clone());
|
|
|
|
|
// Remove the bridge from the bucket
|
|
|
|
|
self.bridge_table.reachable.remove(bridge);
|
2023-05-04 18:10:50 -04:00
|
|
|
|
}
|
2023-06-16 11:02:13 -04:00
|
|
|
|
res = ReplaceSuccess::Replaced
|
2023-06-16 13:56:30 -04:00
|
|
|
|
} else if !self.bridge_table.unallocated_bridges.is_empty() {
|
|
|
|
|
let replacement = &self.bridge_table.unallocated_bridges.pop().unwrap();
|
|
|
|
|
for (bucketnum, offset) in positions.iter() {
|
2023-06-20 20:04:17 -04:00
|
|
|
|
let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
2023-06-16 11:02:13 -04:00
|
|
|
|
Some(bridgelines) => *bridgelines,
|
2023-06-20 17:07:51 -04:00
|
|
|
|
// This should not happen if the rest of the function is correct, we can assume unwrap will succeed
|
|
|
|
|
None => return ReplaceSuccess::NotReplaced,
|
2023-06-16 11:02:13 -04:00
|
|
|
|
};
|
|
|
|
|
assert!(bridgelines[*offset] == *bridge);
|
|
|
|
|
bridgelines[*offset] = *replacement;
|
|
|
|
|
self.bridge_table.buckets.insert(*bucketnum, bridgelines);
|
2023-06-16 13:56:30 -04:00
|
|
|
|
self.bridge_table
|
|
|
|
|
.reachable
|
|
|
|
|
.insert(*replacement, positions.clone());
|
|
|
|
|
// Remove the bridge from the bucket
|
|
|
|
|
self.bridge_table.reachable.remove(bridge);
|
|
|
|
|
}
|
|
|
|
|
res = ReplaceSuccess::Replaced
|
|
|
|
|
} else if !self.bridge_table.spares.is_empty() {
|
|
|
|
|
// Get the first spare and remove it from the spares set.
|
|
|
|
|
let spare = *self.bridge_table.spares.iter().next().unwrap();
|
|
|
|
|
self.bridge_table.spares.remove(&spare);
|
2023-06-27 15:47:56 -04:00
|
|
|
|
self.bridge_table.recycleable_keys.push(spare);
|
2023-06-16 13:56:30 -04:00
|
|
|
|
// Get the actual bridges from the spare bucket
|
2023-06-20 17:07:51 -04:00
|
|
|
|
let spare_bucket = match self.bridge_table.buckets.remove(&spare) {
|
|
|
|
|
Some(spare_bucket) => spare_bucket,
|
|
|
|
|
// This should not happen if the rest of the functions are correct, we can assume unwrap will succeed
|
|
|
|
|
None => return ReplaceSuccess::NotReplaced,
|
|
|
|
|
};
|
|
|
|
|
// Remove the spare bucket uid from the keys map
|
|
|
|
|
self.bridge_table.keys.remove(&spare);
|
2023-06-16 13:56:30 -04:00
|
|
|
|
let mut replacement: &BridgeLine = &BridgeLine::default();
|
|
|
|
|
// Make the first spare the replacement bridge, add the others to the set of
|
|
|
|
|
// unallocated_bridges
|
|
|
|
|
for spare_bridge in spare_bucket.iter() {
|
|
|
|
|
if replacement.port > 0 {
|
|
|
|
|
self.bridge_table.unallocated_bridges.push(*spare_bridge);
|
|
|
|
|
// Mark bucket as unreachable while it is unallocated
|
|
|
|
|
self.bridge_table.reachable.remove(spare_bridge);
|
|
|
|
|
} else {
|
|
|
|
|
replacement = spare_bridge;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
for (bucketnum, offset) in positions.iter() {
|
2023-06-22 17:07:14 -04:00
|
|
|
|
let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
2023-06-20 17:07:51 -04:00
|
|
|
|
Some(bridgelines) => *bridgelines,
|
|
|
|
|
None => return ReplaceSuccess::NotReplaced,
|
|
|
|
|
};
|
|
|
|
|
assert!(bridgelines[*offset] == *bridge);
|
|
|
|
|
bridgelines[*offset] = *replacement;
|
|
|
|
|
self.bridge_table.buckets.insert(*bucketnum, bridgelines);
|
2023-06-16 13:56:30 -04:00
|
|
|
|
self.bridge_table
|
|
|
|
|
.reachable
|
|
|
|
|
.insert(*replacement, positions.clone());
|
|
|
|
|
// Remove the bridge from the bucket
|
|
|
|
|
self.bridge_table.reachable.remove(bridge);
|
|
|
|
|
}
|
|
|
|
|
res = ReplaceSuccess::Replaced
|
|
|
|
|
} else {
|
|
|
|
|
// If there are no available bridges that can be assigned here, the only thing
|
|
|
|
|
// that can be done is return an indication that updating the gone bridge
|
|
|
|
|
// didn't work.
|
|
|
|
|
// In this case, we do not mark the bridge as unreachable or remove it from the
|
|
|
|
|
// reachable bridges so that we can still find it when a new bridge does become available
|
|
|
|
|
res = ReplaceSuccess::NotReplaced
|
2023-05-10 20:26:08 -04:00
|
|
|
|
}
|
2023-05-04 18:10:50 -04:00
|
|
|
|
}
|
2023-06-16 13:56:30 -04:00
|
|
|
|
None => return res,
|
|
|
|
|
};
|
2023-06-22 17:07:14 -04:00
|
|
|
|
res
|
2023-03-20 12:33:26 -04:00
|
|
|
|
}
|
|
|
|
|
|
2021-05-05 13:58:43 -04:00
|
|
|
|
/// Mark a bridge as unreachable
|
|
|
|
|
///
|
|
|
|
|
/// This bridge will be removed from each of the buckets that
|
|
|
|
|
/// contains it. If any of those are open-invitation buckets, the
|
|
|
|
|
/// trust upgrade migration for that bucket will be removed and the
|
|
|
|
|
/// BridgeDb will be informed to stop handing out that bridge. If
|
|
|
|
|
/// any of those are trusted buckets where the number of reachable
|
|
|
|
|
/// bridges has fallen below the threshold, a blockage migration
|
|
|
|
|
/// from that bucket to a spare bucket will be added, and the spare
|
|
|
|
|
/// bucket will be removed from the list of hot spares. In
|
|
|
|
|
/// addition, if the blocked bucket was the _target_ of a blockage
|
|
|
|
|
/// migration, change the target to the new (formerly spare) bucket.
|
|
|
|
|
/// Returns true if sucessful, or false if it needed a hot spare but
|
|
|
|
|
/// there was none available.
|
|
|
|
|
pub fn bridge_unreachable(&mut self, bridge: &BridgeLine, bdb: &mut BridgeDb) -> bool {
|
|
|
|
|
let mut res: bool = true;
|
2023-05-10 20:26:08 -04:00
|
|
|
|
if self.bridge_table.unallocated_bridges.contains(bridge) {
|
|
|
|
|
let index = self
|
|
|
|
|
.bridge_table
|
|
|
|
|
.unallocated_bridges
|
|
|
|
|
.iter()
|
|
|
|
|
.position(|&b| b == *bridge)
|
|
|
|
|
.unwrap();
|
|
|
|
|
self.bridge_table.unallocated_bridges.remove(index);
|
|
|
|
|
res = true;
|
|
|
|
|
} else {
|
|
|
|
|
let positions = self.bridge_table.reachable.get(bridge);
|
|
|
|
|
if let Some(v) = positions {
|
|
|
|
|
for (bucketnum, offset) in v.iter() {
|
|
|
|
|
// Count how many bridges in this bucket are reachable
|
2023-06-22 17:07:14 -04:00
|
|
|
|
let mut bucket = match self.bridge_table.buckets.get(bucketnum) {
|
2023-06-20 20:04:17 -04:00
|
|
|
|
Some(bridgelines) => *bridgelines,
|
2023-07-10 15:03:37 -04:00
|
|
|
|
None => return false, // This should not happen
|
2023-06-20 20:04:17 -04:00
|
|
|
|
};
|
2023-06-20 17:49:41 -04:00
|
|
|
|
let numreachable = bucket
|
2023-05-10 20:26:08 -04:00
|
|
|
|
.iter()
|
|
|
|
|
.filter(|br| self.bridge_table.reachable.get(br).is_some())
|
|
|
|
|
.count();
|
2021-05-05 13:58:43 -04:00
|
|
|
|
|
2023-05-10 20:26:08 -04:00
|
|
|
|
// Remove the bridge from the bucket
|
2023-06-20 17:49:41 -04:00
|
|
|
|
assert!(bucket[*offset] == *bridge);
|
|
|
|
|
bucket[*offset] = BridgeLine::default();
|
2021-05-05 13:58:43 -04:00
|
|
|
|
|
2023-05-10 20:26:08 -04:00
|
|
|
|
// Is this bucket an open-invitation bucket?
|
2023-07-25 17:24:03 -04:00
|
|
|
|
if bdb.openinv_buckets.contains(bucketnum)
|
|
|
|
|
|| bdb.distributed_buckets.contains(bucketnum)
|
|
|
|
|
{
|
|
|
|
|
bdb.remove_blocked_or_expired_buckets(bucketnum);
|
2023-05-10 20:26:08 -04:00
|
|
|
|
self.trustup_migration_table.table.remove(bucketnum);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Does this removal cause the bucket to go below the
|
|
|
|
|
// threshold?
|
|
|
|
|
if numreachable != MIN_BUCKET_REACHABILITY {
|
|
|
|
|
// No
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// This bucket is now unreachable. Get a spare bucket
|
|
|
|
|
if self.bridge_table.spares.is_empty() {
|
|
|
|
|
// Uh, oh. No spares available. Just delete any
|
|
|
|
|
// migrations leading to this bucket.
|
|
|
|
|
res = false;
|
|
|
|
|
self.trustup_migration_table
|
|
|
|
|
.table
|
|
|
|
|
.retain(|_, &mut v| v != *bucketnum);
|
|
|
|
|
self.blockage_migration_table
|
|
|
|
|
.table
|
|
|
|
|
.retain(|_, &mut v| v != *bucketnum);
|
|
|
|
|
} else {
|
|
|
|
|
// Get the first spare and remove it from the spares
|
|
|
|
|
// set.
|
|
|
|
|
let spare = *self.bridge_table.spares.iter().next().unwrap();
|
|
|
|
|
self.bridge_table.spares.remove(&spare);
|
2023-06-27 15:47:56 -04:00
|
|
|
|
self.bridge_table
|
|
|
|
|
.blocked_keys
|
|
|
|
|
.push((*bucketnum, self.today()));
|
2023-05-10 20:26:08 -04:00
|
|
|
|
// Add a blockage migration from this bucket to the spare
|
|
|
|
|
self.blockage_migration_table
|
|
|
|
|
.table
|
|
|
|
|
.insert(*bucketnum, spare);
|
|
|
|
|
// Remove any trust upgrade migrations to this
|
|
|
|
|
// bucket
|
|
|
|
|
self.trustup_migration_table
|
|
|
|
|
.table
|
|
|
|
|
.retain(|_, &mut v| v != *bucketnum);
|
|
|
|
|
// Change any blockage migrations with this bucket
|
|
|
|
|
// as the destination to the spare
|
|
|
|
|
for (_, v) in self.blockage_migration_table.table.iter_mut() {
|
|
|
|
|
if *v == *bucketnum {
|
|
|
|
|
*v = spare;
|
|
|
|
|
}
|
2021-05-05 13:58:43 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-05-10 20:26:08 -04:00
|
|
|
|
self.bridge_table.reachable.remove(bridge);
|
2021-05-05 13:58:43 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-10 17:03:10 -04:00
|
|
|
|
// Since buckets are moved around in the bridge_table, finding a lookup key that
|
|
|
|
|
// does not overwrite existing bridges could become an issue. We keep a list
|
|
|
|
|
// of recycleable lookup keys from buckets that have been removed and prioritize
|
|
|
|
|
// this list before increasing the counter
|
2023-07-27 16:05:20 -04:00
|
|
|
|
fn find_next_available_key(&mut self, bdb: &mut BridgeDb) -> Result<u32, NoAvailableIDError> {
|
2023-07-13 17:36:40 -04:00
|
|
|
|
self.clean_up_expired_buckets(bdb);
|
2023-07-10 17:03:10 -04:00
|
|
|
|
if self.bridge_table.recycleable_keys.is_empty() {
|
|
|
|
|
let mut test_index = 1;
|
|
|
|
|
let mut test_counter = self.bridge_table.counter.wrapping_add(test_index);
|
|
|
|
|
let mut i = 0;
|
|
|
|
|
while self.bridge_table.buckets.contains_key(&test_counter) && i < 5000 {
|
2023-07-27 16:05:20 -04:00
|
|
|
|
test_index += 1;
|
2023-07-10 17:03:10 -04:00
|
|
|
|
test_counter = self.bridge_table.counter.wrapping_add(test_index);
|
|
|
|
|
i += 1;
|
2023-07-27 16:05:20 -04:00
|
|
|
|
if i == 5000 {
|
|
|
|
|
return Err(NoAvailableIDError::ExhaustedIndexer);
|
|
|
|
|
}
|
2023-07-10 17:03:10 -04:00
|
|
|
|
}
|
|
|
|
|
self.bridge_table.counter = self.bridge_table.counter.wrapping_add(test_index);
|
2023-07-27 16:05:20 -04:00
|
|
|
|
Ok(self.bridge_table.counter)
|
2023-07-10 17:03:10 -04:00
|
|
|
|
} else {
|
2023-07-27 16:05:20 -04:00
|
|
|
|
Ok(self.bridge_table.recycleable_keys.pop().unwrap())
|
2023-07-10 17:03:10 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// This function looks for and removes buckets so their indexes can be reused
|
|
|
|
|
// This should include buckets that have been blocked for a sufficiently long period
|
|
|
|
|
// that we no longer want to allow migration to, or else, open-entry buckets that
|
|
|
|
|
// have been unblocked long enough to become trusted and who's users' credentials
|
2023-07-28 12:27:49 -04:00
|
|
|
|
// would have expired (after EXPIRY_DATE)
|
2023-07-13 17:36:40 -04:00
|
|
|
|
pub fn clean_up_expired_buckets(&mut self, bdb: &mut BridgeDb) {
|
2023-07-10 17:03:10 -04:00
|
|
|
|
// First check if there are any blocked indexes that are old enough to be replaced
|
|
|
|
|
self.clean_up_blocked();
|
|
|
|
|
// Next do the same for open_invitations buckets
|
2023-07-13 17:36:40 -04:00
|
|
|
|
self.clean_up_open_entry(bdb);
|
2023-07-10 17:03:10 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn clean_up_blocked(&mut self) {
|
|
|
|
|
if !self.bridge_table.blocked_keys.is_empty()
|
|
|
|
|
&& self
|
|
|
|
|
.bridge_table
|
|
|
|
|
.blocked_keys
|
|
|
|
|
.iter()
|
2023-07-28 12:27:49 -04:00
|
|
|
|
.any(|&x| x.1 + EXPIRY_DATE < self.today())
|
2023-07-10 17:03:10 -04:00
|
|
|
|
{
|
2023-07-28 12:27:49 -04:00
|
|
|
|
// If there are expired blockages, separate them from the fresh blockages
|
2023-07-21 16:11:10 -04:00
|
|
|
|
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = self
|
|
|
|
|
.bridge_table
|
|
|
|
|
.blocked_keys
|
2023-07-28 12:27:49 -04:00
|
|
|
|
.iter()
|
|
|
|
|
.partition(|&x| x.1 + EXPIRY_DATE < self.today());
|
2023-07-10 17:03:10 -04:00
|
|
|
|
for item in expired {
|
|
|
|
|
let new_item = item.0;
|
|
|
|
|
// check each single bridge line and ensure none are still marked as reachable.
|
|
|
|
|
// if any are still reachable, remove from reachable bridges.
|
|
|
|
|
// When syncing resources, we will likely have to reallocate this bridge but if it hasn't already been
|
|
|
|
|
// blocked, this might be fine?
|
|
|
|
|
let bridgelines = self.bridge_table.buckets.get(&new_item).unwrap();
|
|
|
|
|
for bridgeline in bridgelines {
|
|
|
|
|
// If the bridge hasn't been set to default, assume it's still reachable
|
|
|
|
|
if bridgeline.port > 0 {
|
|
|
|
|
// Move to unallocated bridges
|
|
|
|
|
self.bridge_table.unallocated_bridges.push(*bridgeline);
|
|
|
|
|
// Check if it's still in the reachable bridges. It should be if we've gotten this far.
|
|
|
|
|
if let Some(_reachable_indexes_for_bridgeline) =
|
|
|
|
|
self.bridge_table.reachable.get(bridgeline)
|
|
|
|
|
{
|
|
|
|
|
// and remove it until it's reallocated
|
|
|
|
|
self.bridge_table.reachable.remove(bridgeline);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Then remove the bucket and keys at the specified index
|
|
|
|
|
self.bridge_table.buckets.remove(&new_item);
|
|
|
|
|
self.bridge_table.keys.remove(&new_item);
|
|
|
|
|
//and add them to the recyclable keys
|
|
|
|
|
self.bridge_table.recycleable_keys.push(new_item);
|
2023-07-28 12:27:49 -04:00
|
|
|
|
// Remove the expired blocked bucket from the blockage migration table,
|
|
|
|
|
// assuming that anyone that has still not attempted to migrate from their
|
|
|
|
|
// blocked bridge after the EXPIRY_DATE probably doesn't still need to migrate.
|
|
|
|
|
self.blockage_migration_table
|
|
|
|
|
.table
|
|
|
|
|
.retain(|&k, _| k != new_item);
|
2023-07-10 17:03:10 -04:00
|
|
|
|
}
|
|
|
|
|
// Finally, update the blocked_keys vector to only include the fresh keys
|
|
|
|
|
self.bridge_table.blocked_keys = fresh
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-13 17:36:40 -04:00
|
|
|
|
fn clean_up_open_entry(&mut self, bdb: &mut BridgeDb) {
|
2023-07-10 17:03:10 -04:00
|
|
|
|
// First check if there are any open invitation indexes that are old enough to be replaced
|
|
|
|
|
if !self.bridge_table.open_inv_keys.is_empty()
|
|
|
|
|
&& self
|
|
|
|
|
.bridge_table
|
|
|
|
|
.open_inv_keys
|
|
|
|
|
.iter()
|
2023-07-28 12:27:49 -04:00
|
|
|
|
.any(|&x| x.1 + EXPIRY_DATE < self.today())
|
|
|
|
|
//Perhaps EXPIRY_DATE should be changed to an earlier time
|
2023-07-10 17:03:10 -04:00
|
|
|
|
{
|
|
|
|
|
// If so, separate them from the fresh open invitation indexes
|
2023-07-21 16:11:10 -04:00
|
|
|
|
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = self
|
|
|
|
|
.bridge_table
|
|
|
|
|
.open_inv_keys
|
2023-07-28 12:27:49 -04:00
|
|
|
|
.iter()
|
|
|
|
|
.partition(|&x| x.1 + EXPIRY_DATE < self.today());
|
2023-07-10 17:03:10 -04:00
|
|
|
|
for item in expired {
|
|
|
|
|
let new_item = item.0;
|
2023-07-25 17:24:03 -04:00
|
|
|
|
bdb.remove_blocked_or_expired_buckets(&new_item);
|
2023-07-13 17:36:40 -04:00
|
|
|
|
// Remove any trust upgrade migrations from this
|
|
|
|
|
// bucket
|
|
|
|
|
self.trustup_migration_table
|
|
|
|
|
.table
|
|
|
|
|
.retain(|&k, _| k != new_item);
|
2023-07-10 17:03:10 -04:00
|
|
|
|
self.bridge_table.buckets.remove(&new_item);
|
|
|
|
|
self.bridge_table.keys.remove(&new_item);
|
|
|
|
|
//and add them to the recyclable keys
|
|
|
|
|
self.bridge_table.recycleable_keys.push(new_item);
|
|
|
|
|
}
|
|
|
|
|
// update the open_inv_keys vector to only include the fresh keys
|
|
|
|
|
self.bridge_table.open_inv_keys = fresh
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 21:24:32 -04:00
|
|
|
|
#[cfg(test)]
|
2021-04-28 13:36:04 -04:00
|
|
|
|
/// For testing only: manually advance the day by 1 day
|
|
|
|
|
pub fn advance_day(&mut self) {
|
|
|
|
|
self.time_offset += time::Duration::days(1);
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-09 16:24:43 -05:00
|
|
|
|
//#[cfg(test)]
|
2021-04-28 13:36:04 -04:00
|
|
|
|
/// For testing only: manually advance the day by the given number
|
|
|
|
|
/// of days
|
|
|
|
|
pub fn advance_days(&mut self, days: u16) {
|
|
|
|
|
self.time_offset += time::Duration::days(days.into());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Get today's (real or simulated) date
|
2023-02-09 16:24:43 -05:00
|
|
|
|
pub fn today(&self) -> u32 {
|
2021-04-28 18:31:47 -04:00
|
|
|
|
// We will not encounter negative Julian dates (~6700 years ago)
|
2021-05-01 15:33:45 -04:00
|
|
|
|
// or ones larger than 32 bits
|
2021-04-28 18:31:47 -04:00
|
|
|
|
(time::OffsetDateTime::now_utc().date() + self.time_offset)
|
2023-06-07 15:34:55 -04:00
|
|
|
|
.to_julian_day()
|
2021-04-28 18:31:47 -04:00
|
|
|
|
.try_into()
|
|
|
|
|
.unwrap()
|
2021-04-28 13:36:04 -04:00
|
|
|
|
}
|
2021-04-29 21:24:32 -04:00
|
|
|
|
|
2023-07-27 16:05:20 -04:00
|
|
|
|
/// Get today's (real or simulated) date
|
|
|
|
|
pub fn today_date(&self) -> DateTime<Utc> {
|
|
|
|
|
Utc::now()
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-01 17:12:03 -04:00
|
|
|
|
/// Get a reference to the encrypted bridge table.
|
|
|
|
|
///
|
|
|
|
|
/// Be sure to call this function when you want the latest version
|
|
|
|
|
/// of the table, since it will put fresh Bucket Reachability
|
|
|
|
|
/// credentials in the buckets each day.
|
2023-07-19 10:30:35 -04:00
|
|
|
|
pub fn enc_bridge_table(&mut self) -> &HashMap<u32, EncryptedBucket> {
|
2021-05-01 17:12:03 -04:00
|
|
|
|
let today = self.today();
|
|
|
|
|
if self.bridge_table.date_last_enc != today {
|
|
|
|
|
self.bridge_table
|
|
|
|
|
.encrypt_table(today, &self.reachability_priv);
|
|
|
|
|
}
|
|
|
|
|
&self.bridge_table.encbuckets
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 21:24:32 -04:00
|
|
|
|
#[cfg(test)]
|
2021-04-30 16:24:42 -04:00
|
|
|
|
/// Verify the two MACs on a Lox credential
|
2021-04-29 21:24:32 -04:00
|
|
|
|
pub fn verify_lox(&self, cred: &cred::Lox) -> bool {
|
2021-05-03 14:13:13 -04:00
|
|
|
|
if cred.P.is_identity() {
|
2021-04-30 16:24:42 -04:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 21:24:32 -04:00
|
|
|
|
let Q = (self.lox_priv.x[0]
|
|
|
|
|
+ cred.id * self.lox_priv.x[1]
|
|
|
|
|
+ cred.bucket * self.lox_priv.x[2]
|
|
|
|
|
+ cred.trust_level * self.lox_priv.x[3]
|
|
|
|
|
+ cred.level_since * self.lox_priv.x[4]
|
|
|
|
|
+ cred.invites_remaining * self.lox_priv.x[5]
|
2021-05-03 14:27:11 -04:00
|
|
|
|
+ cred.blockages * self.lox_priv.x[6])
|
2021-04-29 21:24:32 -04:00
|
|
|
|
* cred.P;
|
2021-04-30 16:24:42 -04:00
|
|
|
|
|
2021-05-03 14:14:17 -04:00
|
|
|
|
Q == cred.Q
|
2021-04-29 21:24:32 -04:00
|
|
|
|
}
|
2021-04-30 13:30:20 -04:00
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
/// Verify the MAC on a Migration credential
|
|
|
|
|
pub fn verify_migration(&self, cred: &cred::Migration) -> bool {
|
2021-04-30 16:24:42 -04:00
|
|
|
|
if cred.P.is_identity() {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-30 13:30:20 -04:00
|
|
|
|
let Q = (self.migration_priv.x[0]
|
|
|
|
|
+ cred.lox_id * self.migration_priv.x[1]
|
|
|
|
|
+ cred.from_bucket * self.migration_priv.x[2]
|
|
|
|
|
+ cred.to_bucket * self.migration_priv.x[3])
|
|
|
|
|
* cred.P;
|
2021-05-03 14:14:17 -04:00
|
|
|
|
|
|
|
|
|
Q == cred.Q
|
2021-04-30 13:30:20 -04:00
|
|
|
|
}
|
2021-05-01 17:12:03 -04:00
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
/// Verify the MAC on a Bucket Reachability credential
|
|
|
|
|
pub fn verify_reachability(&self, cred: &cred::BucketReachability) -> bool {
|
|
|
|
|
if cred.P.is_identity() {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let Q = (self.reachability_priv.x[0]
|
|
|
|
|
+ cred.date * self.reachability_priv.x[1]
|
|
|
|
|
+ cred.bucket * self.reachability_priv.x[2])
|
|
|
|
|
* cred.P;
|
2021-05-03 14:14:17 -04:00
|
|
|
|
|
|
|
|
|
Q == cred.Q
|
2021-05-01 17:12:03 -04:00
|
|
|
|
}
|
2021-05-03 19:05:42 -04:00
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
/// Verify the MAC on a Invitation credential
|
|
|
|
|
pub fn verify_invitation(&self, cred: &cred::Invitation) -> bool {
|
|
|
|
|
if cred.P.is_identity() {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let Q = (self.invitation_priv.x[0]
|
|
|
|
|
+ cred.inv_id * self.invitation_priv.x[1]
|
|
|
|
|
+ cred.date * self.invitation_priv.x[2]
|
|
|
|
|
+ cred.bucket * self.invitation_priv.x[3]
|
|
|
|
|
+ cred.blockages * self.invitation_priv.x[4])
|
|
|
|
|
* cred.P;
|
|
|
|
|
|
|
|
|
|
Q == cred.Q
|
|
|
|
|
}
|
2021-04-28 13:36:04 -04:00
|
|
|
|
}
|
2021-04-28 15:42:16 -04:00
|
|
|
|
|
2021-04-29 18:22:06 -04:00
|
|
|
|
/// Try to extract a u64 from a Scalar
|
|
|
|
|
pub fn scalar_u64(s: &Scalar) -> Option<u64> {
|
|
|
|
|
// Check that the top 24 bytes of the Scalar are 0
|
|
|
|
|
let sbytes = s.as_bytes();
|
|
|
|
|
if sbytes[8..].ct_eq(&[0u8; 24]).unwrap_u8() == 0 {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
Some(u64::from_le_bytes(sbytes[..8].try_into().unwrap()))
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-01 15:33:45 -04:00
|
|
|
|
/// Try to extract a u32 from a Scalar
|
|
|
|
|
pub fn scalar_u32(s: &Scalar) -> Option<u32> {
|
|
|
|
|
// Check that the top 28 bytes of the Scalar are 0
|
|
|
|
|
let sbytes = s.as_bytes();
|
|
|
|
|
if sbytes[4..].ct_eq(&[0u8; 28]).unwrap_u8() == 0 {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
Some(u32::from_le_bytes(sbytes[..4].try_into().unwrap()))
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 18:22:06 -04:00
|
|
|
|
/// Double a Scalar
|
|
|
|
|
pub fn scalar_dbl(s: &Scalar) -> Scalar {
|
|
|
|
|
s + s
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-29 21:24:32 -04:00
|
|
|
|
/// Double a RistrettoPoint
|
|
|
|
|
pub fn pt_dbl(P: &RistrettoPoint) -> RistrettoPoint {
|
|
|
|
|
P + P
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-01 15:21:50 -04:00
|
|
|
|
/// The protocol modules.
|
|
|
|
|
///
|
|
|
|
|
/// Each protocol lives in a submodule. Each submodule defines structs
|
|
|
|
|
/// for Request (the message from the client to the bridge authority),
|
|
|
|
|
/// State (the state held by the client while waiting for the reply),
|
|
|
|
|
/// and Response (the message from the bridge authority to the client).
|
|
|
|
|
/// Each submodule defines functions request, which produces a (Request,
|
|
|
|
|
/// State) pair, and handle_response, which consumes a State and a
|
|
|
|
|
/// Response. It also adds a handle_* function to the BridgeAuth struct
|
|
|
|
|
/// that consumes a Request and produces a Result<Response, ProofError>.
|
|
|
|
|
pub mod proto {
|
2021-05-05 18:21:09 -04:00
|
|
|
|
pub mod blockage_migration;
|
2021-05-05 16:28:56 -04:00
|
|
|
|
pub mod check_blockage;
|
2021-05-03 19:05:42 -04:00
|
|
|
|
pub mod issue_invite;
|
2021-05-01 22:25:32 -04:00
|
|
|
|
pub mod level_up;
|
2021-05-01 15:21:50 -04:00
|
|
|
|
pub mod migration;
|
|
|
|
|
pub mod open_invite;
|
2021-05-04 15:47:37 -04:00
|
|
|
|
pub mod redeem_invite;
|
2021-05-01 15:21:50 -04:00
|
|
|
|
pub mod trust_promotion;
|
|
|
|
|
}
|
2021-04-28 18:48:52 -04:00
|
|
|
|
|
2021-04-29 16:12:53 -04:00
|
|
|
|
// Unit tests
|
2021-04-28 18:48:52 -04:00
|
|
|
|
#[cfg(test)]
|
2021-04-29 16:12:53 -04:00
|
|
|
|
mod tests;
|