lox/crates/lox-library/src/lib.rs

994 lines
40 KiB
Rust
Raw Normal View History

/*! Implementation of a new style of bridge authority for Tor that
allows users to invite other users, while protecting the social graph
from the bridge authority itself.
We use CMZ14 credentials (GGM version, which is more efficient, but
makes a stronger security assumption): "Algebraic MACs and
Keyed-Verification Anonymous Credentials" (Chase, Meiklejohn, and
Zaverucha, CCS 2014)
The notation follows that of the paper "Hyphae: Social Secret Sharing"
(Lovecruft and de Valence, 2017), Section 4. */
// We really want points to be capital letters and scalars to be
// lowercase letters
#![allow(non_snake_case)]
#[macro_use]
extern crate lox_zkp;
2021-04-27 13:00:18 -04:00
pub mod bridge_table;
2021-04-28 13:36:04 -04:00
pub mod cred;
2021-04-27 08:53:22 -04:00
pub mod dup_filter;
pub mod migration_table;
2021-04-27 08:53:22 -04:00
use chrono::Duration;
use chrono::{DateTime, Utc};
use sha2::Sha512;
use curve25519_dalek::constants as dalek_constants;
use curve25519_dalek::ristretto::RistrettoBasepointTable;
use curve25519_dalek::ristretto::RistrettoPoint;
use curve25519_dalek::scalar::Scalar;
#[cfg(test)]
use curve25519_dalek::traits::IsIdentity;
2023-07-19 10:30:35 -04:00
use rand::rngs::OsRng;
use rand::Rng;
use std::collections::HashMap;
use std::convert::{TryFrom, TryInto};
use ed25519_dalek::{Signature, SignatureError, Signer, SigningKey, Verifier, VerifyingKey};
use subtle::ConstantTimeEq;
use std::collections::HashSet;
use bridge_table::{
BridgeLine, BridgeTable, EncryptedBucket, MAX_BRIDGES_PER_BUCKET, MIN_BUCKET_REACHABILITY,
};
use migration_table::{MigrationTable, MigrationType};
use lazy_static::lazy_static;
2022-11-22 19:15:09 -05:00
use serde::{Deserialize, Serialize};
use thiserror::Error;
2022-11-13 11:33:57 -05:00
lazy_static! {
pub static ref CMZ_A: RistrettoPoint =
RistrettoPoint::hash_from_bytes::<Sha512>(b"CMZ Generator A");
pub static ref CMZ_B: RistrettoPoint = dalek_constants::RISTRETTO_BASEPOINT_POINT;
pub static ref CMZ_A_TABLE: RistrettoBasepointTable = RistrettoBasepointTable::create(&CMZ_A);
pub static ref CMZ_B_TABLE: RistrettoBasepointTable =
dalek_constants::RISTRETTO_BASEPOINT_TABLE.clone();
}
// EXPIRY_DATE is set to EXPIRY_DATE days for open-entry and blocked buckets in order to match
// the expiry date for Lox credentials.This particular value (EXPIRY_DATE) is chosen because
// values that are 2^k 1 make range proofs more efficient, but this can be changed to any value
pub const EXPIRY_DATE: u32 = 511;
/// ReplaceSuccess sends a signal to the lox-distributor to inform
/// whether or not a bridge was successfully replaced
#[derive(PartialEq, Eq)]
pub enum ReplaceSuccess {
NotFound = 0,
NotReplaced = 1,
Replaced = 2,
}
/// This error is thrown if the number of buckets/keys in the bridge table
/// exceeds u32 MAX.It is unlikely this error will ever occur.
#[derive(Error, Debug)]
pub enum NoAvailableIDError {
#[error("Find key exhausted with no available index found!")]
ExhaustedIndexer,
}
/// This error is thrown after the MAX_DAILY_BRIDGES threshold for bridges
/// distributed in a day has been reached
#[derive(Error, Debug)]
pub enum OpenInvitationError {
#[error("The maximum number of bridges has already been distributed today, please try again tomorrow!")]
ExceededMaxBridges,
#[error("There are no bridges available for open invitations.")]
NoBridgesAvailable,
}
#[derive(Error, Debug)]
pub enum BridgeTableError {
#[error("The bucket corresponding to key {0} was not in the bridge table")]
MissingBucket(u32),
}
/// Private Key of the Issuer
2022-11-13 11:33:57 -05:00
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct IssuerPrivKey {
x0tilde: Scalar,
x: Vec<Scalar>,
}
impl IssuerPrivKey {
/// Create an IssuerPrivKey for credentials with the given number of
/// attributes.
pub fn new(n: u16) -> IssuerPrivKey {
let mut rng = rand::thread_rng();
let x0tilde = Scalar::random(&mut rng);
let mut x: Vec<Scalar> = Vec::with_capacity((n + 1) as usize);
// Set x to a vector of n+1 random Scalars
x.resize_with((n + 1) as usize, || Scalar::random(&mut rng));
IssuerPrivKey { x0tilde, x }
}
}
2022-11-13 11:33:57 -05:00
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct IssuerPubKey {
X: Vec<RistrettoPoint>,
}
/// Public Key of the Issuer
impl IssuerPubKey {
/// Create an IssuerPubKey from the corresponding IssuerPrivKey
pub fn new(privkey: &IssuerPrivKey) -> IssuerPubKey {
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
let n_plus_one = privkey.x.len();
let mut X: Vec<RistrettoPoint> = Vec::with_capacity(n_plus_one);
// The first element is a special case; it is
// X[0] = x0tilde*A + x[0]*B
X.push(&privkey.x0tilde * Atable + &privkey.x[0] * Btable);
// The other elements (1 through n) are X[i] = x[i]*A
X.extend(privkey.x.iter().skip(1).map(|xi| xi * Atable));
IssuerPubKey { X }
}
}
/// Number of times a given invitation is ditributed
pub const OPENINV_K: u32 = 10;
/// TODO: Decide on maximum daily number of invitations to be distributed
pub const MAX_DAILY_BRIDGES: u32 = 100;
/// The BridgeDb. This will typically be a singleton object. The
/// BridgeDb's role is simply to issue signed "open invitations" to
/// people who are not yet part of the system.
2022-11-14 13:56:13 -05:00
#[derive(Debug, Serialize, Deserialize)]
pub struct BridgeDb {
/// The keypair for signing open invitations
keypair: SigningKey,
/// The public key for verifying open invitations
pub pubkey: VerifyingKey,
/// The set of open-invitation buckets
openinv_buckets: HashSet<u32>,
/// The set of open invitation buckets that have been distributed
distributed_buckets: Vec<u32>,
#[serde(skip)]
today: DateTime<Utc>,
pub current_k: u32,
pub daily_bridges_distributed: u32,
}
/// An open invitation is a [u8; OPENINV_LENGTH] where the first 32
/// bytes are the serialization of a random Scalar (the invitation id),
/// the next 4 bytes are a little-endian bucket number, and the last
/// SIGNATURE_LENGTH bytes are the signature on the first 36 bytes.
pub const OPENINV_LENGTH: usize = 32 // the length of the random
// invitation id (a Scalar)
+ 4 // the length of the u32 for the bucket number
+ ed25519_dalek::SIGNATURE_LENGTH; // the length of the signature
impl BridgeDb {
/// Create the BridgeDb.
pub fn new() -> Self {
let mut csprng = OsRng {};
let keypair = SigningKey::generate(&mut csprng);
let pubkey = keypair.verifying_key();
2021-04-28 13:36:04 -04:00
Self {
keypair,
pubkey,
openinv_buckets: Default::default(),
distributed_buckets: Default::default(),
today: Utc::now(),
current_k: 0,
daily_bridges_distributed: 0,
}
}
/// Insert an open-invitation bucket into the set
pub fn insert_openinv(&mut self, bucket: u32) {
self.openinv_buckets.insert(bucket);
}
/// Remove an open-invitation bucket from the set
2023-07-10 15:03:37 -04:00
pub fn remove_openinv(&mut self, bucket: &u32) {
self.openinv_buckets.remove(bucket);
}
/// Remove open invitation and/or otherwise distributed buckets that have
/// become blocked or are expired to free up the index for a new bucket
pub fn remove_blocked_or_expired_buckets(&mut self, bucket: &u32) {
if self.openinv_buckets.contains(bucket) {
println!("Removing a bucket that has not been distributed yet!");
self.openinv_buckets.remove(bucket);
} else if self.distributed_buckets.contains(bucket) {
self.distributed_buckets.retain(|&x| x != *bucket);
}
}
/// Mark a bucket as distributed
pub fn mark_distributed(&mut self, bucket: u32) {
self.distributed_buckets.push(bucket);
}
/// Produce an open invitation such that the next k users, where k is <
/// OPENINV_K, will receive the same open invitation bucket
/// chosen randomly from the set of open-invitation buckets.
pub fn invite(&mut self) -> Result<[u8; OPENINV_LENGTH], OpenInvitationError> {
let mut res: [u8; OPENINV_LENGTH] = [0; OPENINV_LENGTH];
let mut rng = rand::thread_rng();
// Choose a random invitation id (a Scalar) and serialize it
let id = Scalar::random(&mut rng);
res[0..32].copy_from_slice(&id.to_bytes());
let bucket_num: u32;
if Utc::now() >= (self.today + Duration::days(1)) {
self.today = Utc::now();
self.daily_bridges_distributed = 0;
}
if self.daily_bridges_distributed < MAX_DAILY_BRIDGES {
if self.current_k < OPENINV_K && !self.distributed_buckets.is_empty() {
bucket_num = *self.distributed_buckets.last().unwrap();
self.current_k += 1;
} else {
if self.openinv_buckets.is_empty() {
return Err(OpenInvitationError::NoBridgesAvailable);
}
// Choose a random bucket number (from the set of open
// invitation buckets) and serialize it
let openinv_vec: Vec<&u32> = self.openinv_buckets.iter().collect();
bucket_num = *openinv_vec[rng.gen_range(0..openinv_vec.len())];
self.mark_distributed(bucket_num);
self.remove_openinv(&bucket_num);
self.current_k = 1;
self.daily_bridges_distributed += 1;
}
res[32..(32 + 4)].copy_from_slice(&bucket_num.to_le_bytes());
// Sign the first 36 bytes and serialize it
let sig = self.keypair.sign(&res[0..(32 + 4)]);
res[(32 + 4)..].copy_from_slice(&sig.to_bytes());
Ok(res)
} else {
Err(OpenInvitationError::ExceededMaxBridges)
}
}
/// Verify an open invitation. Returns the invitation id and the
/// bucket number if the signature checked out. It is up to the
/// caller to then check that the invitation id has not been used
/// before.
pub fn verify(
invitation: [u8; OPENINV_LENGTH],
pubkey: VerifyingKey,
) -> Result<(Scalar, u32), SignatureError> {
// Pull out the signature and verify it
let sig = Signature::try_from(&invitation[(32 + 4)..])?;
pubkey.verify(&invitation[0..(32 + 4)], &sig)?;
// The signature passed. Pull out the bucket number and then
// the invitation id
let bucket = u32::from_le_bytes(invitation[32..(32 + 4)].try_into().unwrap());
let s = Scalar::from_canonical_bytes(invitation[0..32].try_into().unwrap());
if s.is_some().into() {
2023-12-18 22:56:26 -05:00
Ok((s.unwrap(), bucket))
} else {
// It should never happen that there's a valid signature on
// an invalid serialization of a Scalar, but check anyway.
2023-12-18 22:56:26 -05:00
Err(SignatureError::new())
}
}
}
2021-04-28 13:36:04 -04:00
impl Default for BridgeDb {
fn default() -> Self {
Self::new()
}
}
/// The bridge authority. This will typically be a singleton object.
2022-11-22 19:15:09 -05:00
#[derive(Debug, Serialize, Deserialize)]
2021-04-28 13:36:04 -04:00
pub struct BridgeAuth {
/// The private key for the main Lox credential
lox_priv: IssuerPrivKey,
/// The public key for the main Lox credential
pub lox_pub: IssuerPubKey,
/// The private key for migration credentials
migration_priv: IssuerPrivKey,
/// The public key for migration credentials
pub migration_pub: IssuerPubKey,
/// The private key for migration key credentials
migrationkey_priv: IssuerPrivKey,
/// The public key for migration key credentials
pub migrationkey_pub: IssuerPubKey,
/// The private key for bucket reachability credentials
reachability_priv: IssuerPrivKey,
/// The public key for bucket reachability credentials
pub reachability_pub: IssuerPubKey,
/// The private key for invitation credentials
invitation_priv: IssuerPrivKey,
/// The public key for invitation credentials
pub invitation_pub: IssuerPubKey,
2021-04-28 13:36:04 -04:00
/// The public key of the BridgeDb issuing open invitations
pub bridgedb_pub: VerifyingKey,
2021-04-28 13:36:04 -04:00
/// The bridge table
pub bridge_table: BridgeTable,
/// The migration tables
trustup_migration_table: MigrationTable,
blockage_migration_table: MigrationTable,
2021-04-28 13:36:04 -04:00
/// Duplicate filter for open invitations
openinv_filter: dup_filter::DupFilter<Scalar>,
/// Duplicate filter for Lox credential ids
2021-04-28 13:36:04 -04:00
id_filter: dup_filter::DupFilter<Scalar>,
/// Duplicate filter for Invitation credential ids
inv_id_filter: dup_filter::DupFilter<Scalar>,
/// Duplicate filter for trust promotions (from untrusted level 0 to
/// trusted level 1)
trust_promotion_filter: dup_filter::DupFilter<Scalar>,
2021-04-28 13:36:04 -04:00
/// For testing only: offset of the true time to the simulated time
2022-11-22 19:15:09 -05:00
#[serde(skip)]
2021-04-28 13:36:04 -04:00
time_offset: time::Duration,
}
impl BridgeAuth {
pub fn new(bridgedb_pub: VerifyingKey) -> Self {
// Create the private and public keys for each of the types of
// credential, each with the appropriate number of attributes
2021-04-28 13:36:04 -04:00
let lox_priv = IssuerPrivKey::new(6);
let lox_pub = IssuerPubKey::new(&lox_priv);
let migration_priv = IssuerPrivKey::new(4);
2021-04-28 13:36:04 -04:00
let migration_pub = IssuerPubKey::new(&migration_priv);
let migrationkey_priv = IssuerPrivKey::new(2);
let migrationkey_pub = IssuerPubKey::new(&migrationkey_priv);
let reachability_priv = IssuerPrivKey::new(2);
let reachability_pub = IssuerPubKey::new(&reachability_priv);
let invitation_priv = IssuerPrivKey::new(4);
let invitation_pub = IssuerPubKey::new(&invitation_priv);
2021-04-28 13:36:04 -04:00
Self {
lox_priv,
lox_pub,
migration_priv,
migration_pub,
migrationkey_priv,
migrationkey_pub,
reachability_priv,
reachability_pub,
invitation_priv,
invitation_pub,
2021-04-28 13:36:04 -04:00
bridgedb_pub,
bridge_table: Default::default(),
trustup_migration_table: MigrationTable::new(MigrationType::TrustUpgrade),
blockage_migration_table: MigrationTable::new(MigrationType::Blockage),
2021-04-28 13:36:04 -04:00
openinv_filter: Default::default(),
id_filter: Default::default(),
inv_id_filter: Default::default(),
trust_promotion_filter: Default::default(),
time_offset: time::Duration::ZERO,
2021-04-28 13:36:04 -04:00
}
}
2023-09-13 12:08:48 -04:00
pub fn is_empty(&mut self) -> bool {
self.bridge_table.buckets.is_empty()
}
/// Insert a set of open invitation bridges.
///
/// Each of the bridges will be given its own open invitation
/// bucket, and the BridgeDb will be informed. A single bucket
/// containing all of the bridges will also be created, with a trust
/// upgrade migration from each of the single-bridge buckets.
pub fn add_openinv_bridges(
&mut self,
bridges: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
bdb: &mut BridgeDb,
) -> Result<(), NoAvailableIDError> {
let bindex = match self.find_next_available_key(bdb) {
Ok(sindex) => sindex,
Err(e) => return Err(e),
};
self.bridge_table.new_bucket(bindex, &bridges);
let mut single = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
for b in bridges.iter() {
let sindex = match self.find_next_available_key(bdb) {
Ok(sindex) => sindex,
Err(e) => return Err(e),
};
single[0] = *b;
self.bridge_table.new_bucket(sindex, &single);
2023-07-13 17:36:40 -04:00
self.bridge_table.open_inv_keys.push((sindex, self.today()));
bdb.insert_openinv(sindex);
self.trustup_migration_table.table.insert(sindex, bindex);
}
Ok(())
}
/// Insert a hot spare bucket of bridges
2023-07-13 17:36:40 -04:00
pub fn add_spare_bucket(
&mut self,
bucket: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
bdb: &mut BridgeDb,
) -> Result<(), NoAvailableIDError> {
let index = match self.find_next_available_key(bdb) {
Ok(index) => index,
Err(e) => return Err(e),
};
self.bridge_table.new_bucket(index, &bucket);
2023-07-13 17:36:40 -04:00
self.bridge_table.spares.insert(index);
Ok(())
}
/// When syncing the Lox bridge table with rdsys, this function returns any bridges
/// that are found in the Lox bridge table that are not found in the Vector
/// of bridges received from rdsys through the Lox distributor.
pub fn find_and_remove_unaccounted_for_bridges(
&mut self,
accounted_for_bridges: Vec<u64>,
) -> Vec<BridgeLine> {
let mut unaccounted_for: Vec<BridgeLine> = Vec::new();
for (k, _v) in self.bridge_table.reachable.clone() {
if !accounted_for_bridges.contains(&k.uid_fingerprint) {
unaccounted_for.push(k);
}
}
unaccounted_for
2023-04-03 11:47:11 -04:00
}
/// Allocate single left over bridges to an open invitation bucket
pub fn allocate_bridges(
&mut self,
distributor_bridges: &mut Vec<BridgeLine>,
bdb: &mut BridgeDb,
) {
while let Some(bridge) = distributor_bridges.pop() {
self.bridge_table.unallocated_bridges.push(bridge);
}
while self.bridge_table.unallocated_bridges.len() >= MAX_BRIDGES_PER_BUCKET {
let mut bucket = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
2023-06-16 12:27:40 -04:00
for bridge in bucket.iter_mut() {
*bridge = self.bridge_table.unallocated_bridges.pop().unwrap();
}
match self.add_openinv_bridges(bucket, bdb) {
Ok(_) => continue,
Err(e) => {
println!("Error: {:?}", e);
for bridge in bucket {
self.bridge_table.unallocated_bridges.push(bridge);
}
}
}
}
}
// Update the details of a bridge in the bridge table. This assumes that the IP and Port
// of a given bridge remains the same and thus can be updated.
// First we must retrieve the list of reachable bridges, then we must search for any matching our partial key
// which will include the IP and Port. Finally we can replace the original bridge with the updated bridge.
2023-03-27 12:36:57 -04:00
// Returns true if the bridge has successfully updated
pub fn bridge_update(&mut self, bridge: &BridgeLine) -> bool {
let mut res: bool = false; //default False to assume that update failed
2023-03-23 13:21:04 -04:00
let reachable_bridges = self.bridge_table.reachable.clone();
for reachable_bridge in reachable_bridges {
if reachable_bridge.0.uid_fingerprint == bridge.uid_fingerprint {
2023-03-27 12:36:57 -04:00
// Now we must remove the old bridge from the table and insert the new bridge in its place
// i.e., in the same bucket and with the same permissions.
let positions = self.bridge_table.reachable.get(&reachable_bridge.0);
if let Some(v) = positions {
for (bucketnum, offset) in v.iter() {
2023-06-20 17:49:41 -04:00
let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
Some(bridgelines) => *bridgelines,
None => return res,
};
assert!(bridgelines[*offset] == reachable_bridge.0);
bridgelines[*offset] = *bridge;
self.bridge_table.buckets.insert(*bucketnum, bridgelines);
if self.bridge_table.buckets.get(bucketnum).is_none() {
return res;
}
2023-03-23 13:21:04 -04:00
}
2023-04-03 11:47:11 -04:00
res = true;
} else {
return res;
2023-03-23 13:21:04 -04:00
}
2023-03-27 12:36:57 -04:00
// We must also remove the old bridge from the reachable bridges table
// and add the new bridge
self.bridge_table.reachable.remove(&reachable_bridge.0);
self.bridge_table
.reachable
.insert(*bridge, reachable_bridge.1);
2023-04-03 11:47:11 -04:00
return res;
}
2023-03-23 13:21:04 -04:00
}
2023-05-04 18:10:50 -04:00
// If this is returned, we assume that the bridge wasn't found in the bridge table
// and therefore should be treated as a "new bridge"
res
}
// Repurpose a bucket of spares into unallocated bridges
pub fn dissolve_spare_bucket(&mut self, key: u32) -> Result<(), BridgeTableError> {
self.bridge_table.spares.remove(&key);
// Get the actual bridges from the spare bucket
let spare_bucket = self
.bridge_table
.buckets
.remove(&key)
.ok_or(BridgeTableError::MissingBucket(key))?;
for bridge in spare_bucket.iter() {
self.bridge_table.unallocated_bridges.push(*bridge);
// Mark bucket as unreachable while it is unallocated
self.bridge_table.reachable.remove(bridge);
}
self.bridge_table.keys.remove(&key);
self.bridge_table.recycleable_keys.push(key);
Ok(())
}
/// Attempt to remove a bridge that is failing tests and replace it with a bridge from
/// available_bridge or from a spare bucket
2023-05-04 18:10:50 -04:00
pub fn bridge_replace(
&mut self,
bridge: &BridgeLine,
available_bridge: Option<BridgeLine>,
) -> ReplaceSuccess {
2023-05-04 18:10:50 -04:00
let reachable_bridges = &self.bridge_table.reachable.clone();
let Some(positions) = reachable_bridges.get(bridge) else {
2024-03-09 19:23:40 -05:00
return ReplaceSuccess::NotFound;
};
2024-03-09 19:20:43 -05:00
// select replacement:
// - first try the given bridge
// - second try to pick one from the set of available bridges
// - third dissolve a spare bucket to create more available bridges
let Some(replacement) = available_bridge.or_else(|| {
self.bridge_table.unallocated_bridges.pop().or_else(|| {
let Some(spare) = self
.bridge_table
.spares
.iter()
// in case bridge is a spare, avoid replacing it with itself
.find(|x| !positions.iter().any(|(bucketnum, _)| &bucketnum == x))
2024-03-09 19:20:43 -05:00
.cloned()
else {
return None;
};
2024-03-09 19:20:43 -05:00
let Ok(_) = self.dissolve_spare_bucket(spare) else {
return None;
};
2024-03-09 19:20:43 -05:00
self.bridge_table.unallocated_bridges.pop()
})
}) else {
// If there are no available bridges that can be assigned here, the only thing
// that can be done is return an indication that updating the gone bridge
// didn't work.
// In this case, we do not mark the bridge as unreachable or remove it from the
// reachable bridges so that we can still find it when a new bridge does become available
2024-03-09 19:20:43 -05:00
println!("No available bridges");
return ReplaceSuccess::NotReplaced;
};
for (bucketnum, offset) in positions.iter() {
let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
Some(bridgelines) => *bridgelines,
None => return ReplaceSuccess::NotFound,
};
assert!(bridgelines[*offset] == *bridge);
bridgelines[*offset] = replacement;
self.bridge_table.buckets.insert(*bucketnum, bridgelines);
// Remove the bridge from the reachable bridges and add new bridge
self.bridge_table
.reachable
.insert(replacement, positions.clone());
// Remove the bridge from the bucket
self.bridge_table.reachable.remove(bridge);
}
2024-03-09 19:23:40 -05:00
ReplaceSuccess::Replaced
2023-03-20 12:33:26 -04:00
}
/// Mark a bridge as blocked
///
/// This bridge will be removed from each of the buckets that
/// contains it. If any of those are open-invitation buckets, the
/// trust upgrade migration for that bucket will be removed and the
/// BridgeDb will be informed to stop handing out that bridge. If
/// any of those are trusted buckets where the number of reachable
/// bridges has fallen below the threshold, a blockage migration
/// from that bucket to a spare bucket will be added, and the spare
/// bucket will be removed from the list of hot spares. In
/// addition, if the blocked bucket was the _target_ of a blockage
/// migration, change the target to the new (formerly spare) bucket.
/// Returns true if sucessful, or false if it needed a hot spare but
/// there was none available.
pub fn bridge_blocked(&mut self, bridge: &BridgeLine, bdb: &mut BridgeDb) -> bool {
let mut res: bool = true;
if self.bridge_table.unallocated_bridges.contains(bridge) {
let index = self
.bridge_table
.unallocated_bridges
.iter()
.position(|&b| b == *bridge)
.unwrap();
self.bridge_table.unallocated_bridges.remove(index);
res = true;
} else {
let positions = self.bridge_table.reachable.get(bridge);
if let Some(v) = positions {
for (bucketnum, offset) in v.iter() {
// Count how many bridges in this bucket are reachable
2023-06-22 17:07:14 -04:00
let mut bucket = match self.bridge_table.buckets.get(bucketnum) {
Some(bridgelines) => *bridgelines,
2023-07-10 15:03:37 -04:00
None => return false, // This should not happen
};
2023-06-20 17:49:41 -04:00
let numreachable = bucket
.iter()
.filter(|br| self.bridge_table.reachable.get(br).is_some())
.count();
// Remove the bridge from the bucket
2023-06-20 17:49:41 -04:00
assert!(bucket[*offset] == *bridge);
bucket[*offset] = BridgeLine::default();
// Is this bucket an open-invitation bucket?
if bdb.openinv_buckets.contains(bucketnum)
|| bdb.distributed_buckets.contains(bucketnum)
{
bdb.remove_blocked_or_expired_buckets(bucketnum);
self.trustup_migration_table.table.remove(bucketnum);
continue;
}
// Does this removal cause the bucket to go below the
// threshold?
if numreachable != MIN_BUCKET_REACHABILITY {
// No
continue;
}
// This bucket is now unreachable. Get a spare bucket
if self.bridge_table.spares.is_empty() {
// Uh, oh. No spares available. Just delete any
// migrations leading to this bucket.
res = false;
self.trustup_migration_table
.table
.retain(|_, &mut v| v != *bucketnum);
self.blockage_migration_table
.table
.retain(|_, &mut v| v != *bucketnum);
} else {
// Get the first spare and remove it from the spares
// set.
let spare = *self.bridge_table.spares.iter().next().unwrap();
self.bridge_table.spares.remove(&spare);
self.bridge_table
.blocked_keys
.push((*bucketnum, self.today()));
// Add a blockage migration from this bucket to the spare
self.blockage_migration_table
.table
.insert(*bucketnum, spare);
// Remove any trust upgrade migrations to this
// bucket
self.trustup_migration_table
.table
.retain(|_, &mut v| v != *bucketnum);
// Change any blockage migrations with this bucket
// as the destination to the spare
for (_, v) in self.blockage_migration_table.table.iter_mut() {
if *v == *bucketnum {
*v = spare;
}
}
}
}
}
self.bridge_table.reachable.remove(bridge);
}
res
}
2023-07-10 17:03:10 -04:00
// Since buckets are moved around in the bridge_table, finding a lookup key that
// does not overwrite existing bridges could become an issue.We keep a list
2023-07-10 17:03:10 -04:00
// of recycleable lookup keys from buckets that have been removed and prioritize
// this list before increasing the counter
fn find_next_available_key(&mut self, bdb: &mut BridgeDb) -> Result<u32, NoAvailableIDError> {
2023-07-13 17:36:40 -04:00
self.clean_up_expired_buckets(bdb);
2023-07-10 17:03:10 -04:00
if self.bridge_table.recycleable_keys.is_empty() {
let mut test_index = 1;
let mut test_counter = self.bridge_table.counter.wrapping_add(test_index);
let mut i = 0;
while self.bridge_table.buckets.contains_key(&test_counter) && i < 5000 {
test_index += 1;
2023-07-10 17:03:10 -04:00
test_counter = self.bridge_table.counter.wrapping_add(test_index);
i += 1;
if i == 5000 {
return Err(NoAvailableIDError::ExhaustedIndexer);
}
2023-07-10 17:03:10 -04:00
}
self.bridge_table.counter = self.bridge_table.counter.wrapping_add(test_index);
Ok(self.bridge_table.counter)
2023-07-10 17:03:10 -04:00
} else {
Ok(self.bridge_table.recycleable_keys.pop().unwrap())
2023-07-10 17:03:10 -04:00
}
}
// This function looks for and removes buckets so their indexes can be reused
// This should include buckets that have been blocked for a sufficiently long period
// that we no longer want to allow migration to, or else, open-entry buckets that
// have been unblocked long enough to become trusted and who's users' credentials
// would have expired (after EXPIRY_DATE)
2023-07-13 17:36:40 -04:00
pub fn clean_up_expired_buckets(&mut self, bdb: &mut BridgeDb) {
2023-07-10 17:03:10 -04:00
// First check if there are any blocked indexes that are old enough to be replaced
self.clean_up_blocked();
// Next do the same for open_invitations buckets
2023-07-13 17:36:40 -04:00
self.clean_up_open_entry(bdb);
2023-07-10 17:03:10 -04:00
}
/// Cleans up exipred blocked buckets
2023-07-10 17:03:10 -04:00
fn clean_up_blocked(&mut self) {
if !self.bridge_table.blocked_keys.is_empty()
&& self
.bridge_table
.blocked_keys
.iter()
.any(|&x| x.1 + EXPIRY_DATE < self.today())
2023-07-10 17:03:10 -04:00
{
// If there are expired blockages, separate them from the fresh blockages
2023-07-21 16:11:10 -04:00
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = self
.bridge_table
.blocked_keys
.iter()
.partition(|&x| x.1 + EXPIRY_DATE < self.today());
2023-07-10 17:03:10 -04:00
for item in expired {
let new_item = item.0;
// check each single bridge line and ensure none are still marked as reachable.
// if any are still reachable, remove from reachable bridges.
// When syncing resources, we will likely have to reallocate this bridge but if it hasn't already been
// blocked, this might be fine?
let bridgelines = self.bridge_table.buckets.get(&new_item).unwrap();
for bridgeline in bridgelines {
// If the bridge hasn't been set to default, assume it's still reachable
if bridgeline.port > 0 {
// Move to unallocated bridges
self.bridge_table.unallocated_bridges.push(*bridgeline);
// Check if it's still in the reachable bridges.It should be if we've gotten this far.
2023-07-10 17:03:10 -04:00
if let Some(_reachable_indexes_for_bridgeline) =
self.bridge_table.reachable.get(bridgeline)
{
// and remove it until it's reallocated
self.bridge_table.reachable.remove(bridgeline);
}
}
}
// Then remove the bucket and keys at the specified index
self.bridge_table.buckets.remove(&new_item);
self.bridge_table.keys.remove(&new_item);
//and add them to the recyclable keys
self.bridge_table.recycleable_keys.push(new_item);
// Remove the expired blocked bucket from the blockage migration table,
// assuming that anyone that has still not attempted to migrate from their
// blocked bridge after the EXPIRY_DATE probably doesn't still need to migrate.
self.blockage_migration_table
.table
.retain(|&k, _| k != new_item);
2023-07-10 17:03:10 -04:00
}
// Finally, update the blocked_keys vector to only include the fresh keys
self.bridge_table.blocked_keys = fresh
}
}
/// Cleans up expired open invitation buckets
2023-07-13 17:36:40 -04:00
fn clean_up_open_entry(&mut self, bdb: &mut BridgeDb) {
2023-07-10 17:03:10 -04:00
// First check if there are any open invitation indexes that are old enough to be replaced
if !self.bridge_table.open_inv_keys.is_empty()
&& self
.bridge_table
.open_inv_keys
.iter()
.any(|&x| x.1 + EXPIRY_DATE < self.today())
//Perhaps EXPIRY_DATE should be changed to an earlier time
2023-07-10 17:03:10 -04:00
{
// If so, separate them from the fresh open invitation indexes
2023-07-21 16:11:10 -04:00
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = self
.bridge_table
.open_inv_keys
.iter()
.partition(|&x| x.1 + EXPIRY_DATE < self.today());
2023-07-10 17:03:10 -04:00
for item in expired {
// We should check that the items were actually distributed before they are removed
if !bdb.distributed_buckets.contains(&item.0) {
// TODO: Add prometheus metric for this?
println!("This bucket was not actually distributed!");
}
2023-07-10 17:03:10 -04:00
let new_item = item.0;
bdb.remove_blocked_or_expired_buckets(&new_item);
2023-07-13 17:36:40 -04:00
// Remove any trust upgrade migrations from this
// bucket
self.trustup_migration_table
.table
.retain(|&k, _| k != new_item);
2023-07-10 17:03:10 -04:00
self.bridge_table.buckets.remove(&new_item);
self.bridge_table.keys.remove(&new_item);
//and add them to the recyclable keys
self.bridge_table.recycleable_keys.push(new_item);
}
// update the open_inv_keys vector to only include the fresh keys
self.bridge_table.open_inv_keys = fresh
}
}
#[cfg(test)]
2021-04-28 13:36:04 -04:00
/// For testing only: manually advance the day by 1 day
pub fn advance_day(&mut self) {
self.time_offset += time::Duration::days(1);
}
///#[cfg(test)]
2021-04-28 13:36:04 -04:00
/// For testing only: manually advance the day by the given number
/// of days
pub fn advance_days(&mut self, days: u16) {
self.time_offset += time::Duration::days(days.into());
}
/// Get today's (real or simulated) date as u32
pub fn today(&self) -> u32 {
// We will not encounter negative Julian dates (~6700 years ago)
2021-05-01 15:33:45 -04:00
// or ones larger than 32 bits
(time::OffsetDateTime::now_utc().date() + self.time_offset)
.to_julian_day()
.try_into()
.unwrap()
2021-04-28 13:36:04 -04:00
}
/// Get today's (real or simulated) date as a DateTime<Utc> value
pub fn today_date(&self) -> DateTime<Utc> {
Utc::now()
}
/// Get a reference to the encrypted bridge table.
///
/// Be sure to call this function when you want the latest version
/// of the table, since it will put fresh Bucket Reachability
/// credentials in the buckets each day.
2023-07-19 10:30:35 -04:00
pub fn enc_bridge_table(&mut self) -> &HashMap<u32, EncryptedBucket> {
let today = self.today();
if self.bridge_table.date_last_enc != today {
self.bridge_table
.encrypt_table(today, &self.reachability_priv);
}
&self.bridge_table.encbuckets
}
#[cfg(test)]
/// Verify the two MACs on a Lox credential
pub fn verify_lox(&self, cred: &cred::Lox) -> bool {
2021-05-03 14:13:13 -04:00
if cred.P.is_identity() {
return false;
}
let Q = (self.lox_priv.x[0]
+ cred.id * self.lox_priv.x[1]
+ cred.bucket * self.lox_priv.x[2]
+ cred.trust_level * self.lox_priv.x[3]
+ cred.level_since * self.lox_priv.x[4]
+ cred.invites_remaining * self.lox_priv.x[5]
+ cred.blockages * self.lox_priv.x[6])
* cred.P;
2021-05-03 14:14:17 -04:00
Q == cred.Q
}
#[cfg(test)]
/// Verify the MAC on a Migration credential
pub fn verify_migration(&self, cred: &cred::Migration) -> bool {
if cred.P.is_identity() {
return false;
}
let Q = (self.migration_priv.x[0]
+ cred.lox_id * self.migration_priv.x[1]
+ cred.from_bucket * self.migration_priv.x[2]
+ cred.to_bucket * self.migration_priv.x[3])
* cred.P;
2021-05-03 14:14:17 -04:00
Q == cred.Q
}
#[cfg(test)]
/// Verify the MAC on a Bucket Reachability credential
pub fn verify_reachability(&self, cred: &cred::BucketReachability) -> bool {
if cred.P.is_identity() {
return false;
}
let Q = (self.reachability_priv.x[0]
+ cred.date * self.reachability_priv.x[1]
+ cred.bucket * self.reachability_priv.x[2])
* cred.P;
2021-05-03 14:14:17 -04:00
Q == cred.Q
}
#[cfg(test)]
/// Verify the MAC on a Invitation credential
pub fn verify_invitation(&self, cred: &cred::Invitation) -> bool {
if cred.P.is_identity() {
return false;
}
let Q = (self.invitation_priv.x[0]
+ cred.inv_id * self.invitation_priv.x[1]
+ cred.date * self.invitation_priv.x[2]
+ cred.bucket * self.invitation_priv.x[3]
+ cred.blockages * self.invitation_priv.x[4])
* cred.P;
Q == cred.Q
}
2021-04-28 13:36:04 -04:00
}
/// Try to extract a u64 from a Scalar
pub fn scalar_u64(s: &Scalar) -> Option<u64> {
// Check that the top 24 bytes of the Scalar are 0
let sbytes = s.as_bytes();
if sbytes[8..].ct_eq(&[0u8; 24]).unwrap_u8() == 0 {
return None;
}
Some(u64::from_le_bytes(sbytes[..8].try_into().unwrap()))
}
2021-05-01 15:33:45 -04:00
/// Try to extract a u32 from a Scalar
pub fn scalar_u32(s: &Scalar) -> Option<u32> {
// Check that the top 28 bytes of the Scalar are 0
let sbytes = s.as_bytes();
if sbytes[4..].ct_eq(&[0u8; 28]).unwrap_u8() == 0 {
return None;
}
Some(u32::from_le_bytes(sbytes[..4].try_into().unwrap()))
}
/// Double a Scalar
pub fn scalar_dbl(s: &Scalar) -> Scalar {
s + s
}
/// Double a RistrettoPoint
pub fn pt_dbl(P: &RistrettoPoint) -> RistrettoPoint {
P + P
}
/// The protocol modules.
///
/// Each protocol lives in a submodule. Each submodule defines structs
/// for Request (the message from the client to the bridge authority),
/// State (the state held by the client while waiting for the reply),
/// and Response (the message from the bridge authority to the client).
/// Each submodule defines functions request, which produces a (Request,
/// State) pair, and handle_response, which consumes a State and a
/// Response. It also adds a handle_* function to the BridgeAuth struct
/// that consumes a Request and produces a Result<Response, ProofError>.
pub mod proto {
pub mod blockage_migration;
2021-05-05 16:28:56 -04:00
pub mod check_blockage;
pub mod issue_invite;
pub mod level_up;
pub mod migration;
pub mod open_invite;
pub mod redeem_invite;
pub mod trust_promotion;
}
2021-04-29 16:12:53 -04:00
// Unit tests
#[cfg(test)]
2021-04-29 16:12:53 -04:00
mod tests;