963 lines
40 KiB
Rust
963 lines
40 KiB
Rust
/*! Implementation of a new style of bridge authority for Tor that
|
||
allows users to invite other users, while protecting the social graph
|
||
from the bridge authority itself.
|
||
|
||
We use CMZ14 credentials (GGM version, which is more efficient, but
|
||
makes a stronger security assumption): "Algebraic MACs and
|
||
Keyed-Verification Anonymous Credentials" (Chase, Meiklejohn, and
|
||
Zaverucha, CCS 2014)
|
||
|
||
The notation follows that of the paper "Hyphae: Social Secret Sharing"
|
||
(Lovecruft and de Valence, 2017), Section 4. */
|
||
|
||
// We really want points to be capital letters and scalars to be
|
||
// lowercase letters
|
||
#![allow(non_snake_case)]
|
||
|
||
#[macro_use]
|
||
extern crate zkp;
|
||
|
||
pub mod bridge_table;
|
||
pub mod cred;
|
||
pub mod dup_filter;
|
||
pub mod migration_table;
|
||
|
||
use chrono::{DateTime, Utc};
|
||
use sha2::Sha512;
|
||
|
||
use curve25519_dalek::constants as dalek_constants;
|
||
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||
use curve25519_dalek::ristretto::RistrettoPoint;
|
||
use curve25519_dalek::scalar::Scalar;
|
||
#[cfg(test)]
|
||
use curve25519_dalek::traits::IsIdentity;
|
||
use rand::rngs::OsRng;
|
||
use rand::Rng;
|
||
use std::collections::HashMap;
|
||
use std::convert::{TryFrom, TryInto};
|
||
|
||
use ed25519_dalek::{Keypair, PublicKey, Signature, SignatureError, Signer, Verifier};
|
||
use subtle::ConstantTimeEq;
|
||
|
||
use std::collections::HashSet;
|
||
|
||
use bridge_table::{
|
||
BridgeLine, BridgeTable, EncryptedBucket, MAX_BRIDGES_PER_BUCKET, MIN_BUCKET_REACHABILITY,
|
||
};
|
||
use migration_table::{MigrationTable, MigrationType};
|
||
|
||
use lazy_static::lazy_static;
|
||
|
||
use serde::{Deserialize, Serialize};
|
||
use thiserror::Error;
|
||
|
||
lazy_static! {
|
||
pub static ref CMZ_A: RistrettoPoint =
|
||
RistrettoPoint::hash_from_bytes::<Sha512>(b"CMZ Generator A");
|
||
pub static ref CMZ_B: RistrettoPoint = dalek_constants::RISTRETTO_BASEPOINT_POINT;
|
||
pub static ref CMZ_A_TABLE: RistrettoBasepointTable = RistrettoBasepointTable::create(&CMZ_A);
|
||
pub static ref CMZ_B_TABLE: RistrettoBasepointTable =
|
||
dalek_constants::RISTRETTO_BASEPOINT_TABLE;
|
||
}
|
||
|
||
// EXPIRY_DATE is set to EXPIRY_DATE days for open-entry and blocked buckets in order to match
|
||
// the expiry date for Lox credentials. This particular value (EXPIRY_DATE) is chosen because
|
||
// values that are 2^k − 1 make range proofs more efficient, but this can be changed to any value
|
||
pub const EXPIRY_DATE: u32 = 511;
|
||
|
||
#[derive(PartialEq, Eq)]
|
||
pub enum ReplaceSuccess {
|
||
NotFound = 0,
|
||
NotReplaced = 1,
|
||
Replaced = 2,
|
||
}
|
||
|
||
#[derive(Error, Debug)]
|
||
pub enum NoAvailableIDError {
|
||
#[error("Find key exhausted with no available index found!")]
|
||
ExhaustedIndexer,
|
||
}
|
||
|
||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||
pub struct IssuerPrivKey {
|
||
x0tilde: Scalar,
|
||
x: Vec<Scalar>,
|
||
}
|
||
|
||
impl IssuerPrivKey {
|
||
/// Create an IssuerPrivKey for credentials with the given number of
|
||
/// attributes.
|
||
pub fn new(n: u16) -> IssuerPrivKey {
|
||
let mut rng = rand::thread_rng();
|
||
let x0tilde = Scalar::random(&mut rng);
|
||
let mut x: Vec<Scalar> = Vec::with_capacity((n + 1) as usize);
|
||
|
||
// Set x to a vector of n+1 random Scalars
|
||
x.resize_with((n + 1) as usize, || Scalar::random(&mut rng));
|
||
|
||
IssuerPrivKey { x0tilde, x }
|
||
}
|
||
}
|
||
|
||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||
pub struct IssuerPubKey {
|
||
X: Vec<RistrettoPoint>,
|
||
}
|
||
|
||
impl IssuerPubKey {
|
||
/// Create an IssuerPubKey from the corresponding IssuerPrivKey
|
||
pub fn new(privkey: &IssuerPrivKey) -> IssuerPubKey {
|
||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||
let n_plus_one = privkey.x.len();
|
||
let mut X: Vec<RistrettoPoint> = Vec::with_capacity(n_plus_one);
|
||
|
||
// The first element is a special case; it is
|
||
// X[0] = x0tilde*A + x[0]*B
|
||
X.push(&privkey.x0tilde * Atable + &privkey.x[0] * Btable);
|
||
|
||
// The other elements (1 through n) are X[i] = x[i]*A
|
||
X.extend(privkey.x.iter().skip(1).map(|xi| xi * Atable));
|
||
|
||
IssuerPubKey { X }
|
||
}
|
||
}
|
||
|
||
pub const OPENINV_K: u32 = 10;
|
||
/// The BridgeDb. This will typically be a singleton object. The
|
||
/// BridgeDb's role is simply to issue signed "open invitations" to
|
||
/// people who are not yet part of the system.
|
||
#[derive(Debug, Serialize, Deserialize)]
|
||
pub struct BridgeDb {
|
||
/// The keypair for signing open invitations
|
||
keypair: Keypair,
|
||
/// The public key for verifying open invitations
|
||
pub pubkey: PublicKey,
|
||
/// The set of open-invitation buckets
|
||
openinv_buckets: HashSet<u32>,
|
||
distributed_buckets: Vec<u32>,
|
||
current_k: u32,
|
||
}
|
||
|
||
/// An open invitation is a [u8; OPENINV_LENGTH] where the first 32
|
||
/// bytes are the serialization of a random Scalar (the invitation id),
|
||
/// the next 4 bytes are a little-endian bucket number, and the last
|
||
/// SIGNATURE_LENGTH bytes are the signature on the first 36 bytes.
|
||
pub const OPENINV_LENGTH: usize = 32 // the length of the random
|
||
// invitation id (a Scalar)
|
||
+ 4 // the length of the u32 for the bucket number
|
||
+ ed25519_dalek::SIGNATURE_LENGTH; // the length of the signature
|
||
|
||
impl BridgeDb {
|
||
/// Create the BridgeDb.
|
||
pub fn new() -> Self {
|
||
let mut csprng = OsRng {};
|
||
let keypair = Keypair::generate(&mut csprng);
|
||
let pubkey = keypair.public;
|
||
Self {
|
||
keypair,
|
||
pubkey,
|
||
openinv_buckets: Default::default(),
|
||
distributed_buckets: Default::default(),
|
||
current_k: 0,
|
||
}
|
||
}
|
||
|
||
/// Insert an open-invitation bucket into the set
|
||
pub fn insert_openinv(&mut self, bucket: u32) {
|
||
self.openinv_buckets.insert(bucket);
|
||
}
|
||
|
||
/// Remove an open-invitation bucket from the set
|
||
pub fn remove_openinv(&mut self, bucket: &u32) {
|
||
self.openinv_buckets.remove(bucket);
|
||
}
|
||
|
||
pub fn remove_blocked_or_expired_buckets(&mut self, bucket: &u32) {
|
||
if self.openinv_buckets.contains(bucket) {
|
||
println!("Removing a bucket that has not been distributed yet!");
|
||
self.openinv_buckets.remove(bucket);
|
||
} else if self.distributed_buckets.contains(bucket) {
|
||
self.distributed_buckets.retain(|&x| x != *bucket);
|
||
}
|
||
}
|
||
|
||
pub fn mark_distributed(&mut self, bucket: u32) {
|
||
self.distributed_buckets.push(bucket);
|
||
}
|
||
|
||
/// Produce an open invitation such that the next k users, where k is <
|
||
/// OPENINV_K, will receive the same open invitation bucket
|
||
/// chosen randomly from the set of open-invitation buckets.
|
||
pub fn invite(&mut self) -> [u8; OPENINV_LENGTH] {
|
||
let mut res: [u8; OPENINV_LENGTH] = [0; OPENINV_LENGTH];
|
||
let mut rng = rand::thread_rng();
|
||
// Choose a random invitation id (a Scalar) and serialize it
|
||
let id = Scalar::random(&mut rng);
|
||
res[0..32].copy_from_slice(&id.to_bytes());
|
||
let bucket_num: u32;
|
||
if self.current_k < OPENINV_K && !self.distributed_buckets.is_empty() {
|
||
bucket_num = *self.distributed_buckets.last().unwrap();
|
||
self.current_k += 1;
|
||
} else {
|
||
// Choose a random bucket number (from the set of open
|
||
// invitation buckets) and serialize it
|
||
let openinv_vec: Vec<&u32> = self.openinv_buckets.iter().collect();
|
||
bucket_num = *openinv_vec[rng.gen_range(0, openinv_vec.len())];
|
||
self.mark_distributed(bucket_num);
|
||
self.remove_openinv(&bucket_num);
|
||
self.current_k = 1;
|
||
}
|
||
res[32..(32 + 4)].copy_from_slice(&bucket_num.to_le_bytes());
|
||
// Sign the first 36 bytes and serialize it
|
||
let sig = self.keypair.sign(&res[0..(32 + 4)]);
|
||
res[(32 + 4)..].copy_from_slice(&sig.to_bytes());
|
||
res
|
||
}
|
||
|
||
/// Verify an open invitation. Returns the invitation id and the
|
||
/// bucket number if the signature checked out. It is up to the
|
||
/// caller to then check that the invitation id has not been used
|
||
/// before.
|
||
pub fn verify(
|
||
invitation: [u8; OPENINV_LENGTH],
|
||
pubkey: PublicKey,
|
||
) -> Result<(Scalar, u32), SignatureError> {
|
||
// Pull out the signature and verify it
|
||
let sig = Signature::try_from(&invitation[(32 + 4)..])?;
|
||
pubkey.verify(&invitation[0..(32 + 4)], &sig)?;
|
||
// The signature passed. Pull out the bucket number and then
|
||
// the invitation id
|
||
let bucket = u32::from_le_bytes(invitation[32..(32 + 4)].try_into().unwrap());
|
||
match Scalar::from_canonical_bytes(invitation[0..32].try_into().unwrap()) {
|
||
// It should never happen that there's a valid signature on
|
||
// an invalid serialization of a Scalar, but check anyway.
|
||
None => Err(SignatureError::new()),
|
||
Some(s) => Ok((s, bucket)),
|
||
}
|
||
}
|
||
}
|
||
|
||
impl Default for BridgeDb {
|
||
fn default() -> Self {
|
||
Self::new()
|
||
}
|
||
}
|
||
|
||
/// The bridge authority. This will typically be a singleton object.
|
||
#[derive(Debug, Serialize, Deserialize)]
|
||
pub struct BridgeAuth {
|
||
/// The private key for the main Lox credential
|
||
lox_priv: IssuerPrivKey,
|
||
/// The public key for the main Lox credential
|
||
pub lox_pub: IssuerPubKey,
|
||
/// The private key for migration credentials
|
||
migration_priv: IssuerPrivKey,
|
||
/// The public key for migration credentials
|
||
pub migration_pub: IssuerPubKey,
|
||
/// The private key for migration key credentials
|
||
migrationkey_priv: IssuerPrivKey,
|
||
/// The public key for migration key credentials
|
||
pub migrationkey_pub: IssuerPubKey,
|
||
/// The private key for bucket reachability credentials
|
||
reachability_priv: IssuerPrivKey,
|
||
/// The public key for bucket reachability credentials
|
||
pub reachability_pub: IssuerPubKey,
|
||
/// The private key for invitation credentials
|
||
invitation_priv: IssuerPrivKey,
|
||
/// The public key for invitation credentials
|
||
pub invitation_pub: IssuerPubKey,
|
||
|
||
/// The public key of the BridgeDb issuing open invitations
|
||
pub bridgedb_pub: PublicKey,
|
||
|
||
/// The bridge table
|
||
pub bridge_table: BridgeTable,
|
||
|
||
/// The migration tables
|
||
trustup_migration_table: MigrationTable,
|
||
blockage_migration_table: MigrationTable,
|
||
|
||
/// Duplicate filter for open invitations
|
||
openinv_filter: dup_filter::DupFilter<Scalar>,
|
||
/// Duplicate filter for Lox credential ids
|
||
id_filter: dup_filter::DupFilter<Scalar>,
|
||
/// Duplicate filter for Invitation credential ids
|
||
inv_id_filter: dup_filter::DupFilter<Scalar>,
|
||
/// Duplicate filter for trust promotions (from untrusted level 0 to
|
||
/// trusted level 1)
|
||
trust_promotion_filter: dup_filter::DupFilter<Scalar>,
|
||
|
||
/// For testing only: offset of the true time to the simulated time
|
||
#[serde(skip)]
|
||
time_offset: time::Duration,
|
||
}
|
||
|
||
impl BridgeAuth {
|
||
pub fn new(bridgedb_pub: PublicKey) -> Self {
|
||
// Create the private and public keys for each of the types of
|
||
// credential, each with the appropriate number of attributes
|
||
let lox_priv = IssuerPrivKey::new(6);
|
||
let lox_pub = IssuerPubKey::new(&lox_priv);
|
||
let migration_priv = IssuerPrivKey::new(4);
|
||
let migration_pub = IssuerPubKey::new(&migration_priv);
|
||
let migrationkey_priv = IssuerPrivKey::new(2);
|
||
let migrationkey_pub = IssuerPubKey::new(&migrationkey_priv);
|
||
let reachability_priv = IssuerPrivKey::new(2);
|
||
let reachability_pub = IssuerPubKey::new(&reachability_priv);
|
||
let invitation_priv = IssuerPrivKey::new(4);
|
||
let invitation_pub = IssuerPubKey::new(&invitation_priv);
|
||
Self {
|
||
lox_priv,
|
||
lox_pub,
|
||
migration_priv,
|
||
migration_pub,
|
||
migrationkey_priv,
|
||
migrationkey_pub,
|
||
reachability_priv,
|
||
reachability_pub,
|
||
invitation_priv,
|
||
invitation_pub,
|
||
bridgedb_pub,
|
||
bridge_table: Default::default(),
|
||
trustup_migration_table: MigrationTable::new(MigrationType::TrustUpgrade),
|
||
blockage_migration_table: MigrationTable::new(MigrationType::Blockage),
|
||
openinv_filter: Default::default(),
|
||
id_filter: Default::default(),
|
||
inv_id_filter: Default::default(),
|
||
trust_promotion_filter: Default::default(),
|
||
time_offset: time::Duration::ZERO,
|
||
}
|
||
}
|
||
|
||
pub fn is_empty(&mut self) -> bool {
|
||
self.bridge_table.buckets.is_empty()
|
||
}
|
||
|
||
/// Insert a set of open invitation bridges.
|
||
///
|
||
/// Each of the bridges will be given its own open invitation
|
||
/// bucket, and the BridgeDb will be informed. A single bucket
|
||
/// containing all of the bridges will also be created, with a trust
|
||
/// upgrade migration from each of the single-bridge buckets.
|
||
pub fn add_openinv_bridges(
|
||
&mut self,
|
||
bridges: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
|
||
bdb: &mut BridgeDb,
|
||
) -> Result<(), NoAvailableIDError> {
|
||
let bindex = match self.find_next_available_key(bdb) {
|
||
Ok(sindex) => sindex,
|
||
Err(e) => return Err(e),
|
||
};
|
||
self.bridge_table.new_bucket(bindex, &bridges);
|
||
let mut single = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
|
||
for b in bridges.iter() {
|
||
let sindex = match self.find_next_available_key(bdb) {
|
||
Ok(sindex) => sindex,
|
||
Err(e) => return Err(e),
|
||
};
|
||
single[0] = *b;
|
||
self.bridge_table.new_bucket(sindex, &single);
|
||
self.bridge_table.open_inv_keys.push((sindex, self.today()));
|
||
bdb.insert_openinv(sindex);
|
||
self.trustup_migration_table.table.insert(sindex, bindex);
|
||
}
|
||
Ok(())
|
||
}
|
||
|
||
/// Insert a hot spare bucket of bridges
|
||
pub fn add_spare_bucket(
|
||
&mut self,
|
||
bucket: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
|
||
bdb: &mut BridgeDb,
|
||
) -> Result<(), NoAvailableIDError> {
|
||
let index = match self.find_next_available_key(bdb) {
|
||
Ok(index) => index,
|
||
Err(e) => return Err(e),
|
||
};
|
||
self.bridge_table.new_bucket(index, &bucket);
|
||
self.bridge_table.spares.insert(index);
|
||
Ok(())
|
||
}
|
||
|
||
// TODO Ensure synchronization of Lox bridge_table with rdsys
|
||
pub fn sync_table(&mut self) {
|
||
// Create a hashtable (?) of bridges in the lox distributor from new resources
|
||
// accept the hashtable and recreate the bridge table from the hash table here
|
||
// using existing reachable bridges, other table checks and placements from existing bridge table
|
||
// If bridges are in reachable bridges, put them in the table with their Vec
|
||
// How to check for bridges that aren't there/are extra?
|
||
// After going through the update, make sure bridges in the table are the same and deal with discrepencies
|
||
// This will be the bad/annoying part
|
||
|
||
//also use open_inv_keys and blocked_keys from bridge_table to remove expired keys from table.
|
||
// make sure this happens before they are removed from the structures in the bridge table
|
||
}
|
||
|
||
pub fn allocate_bridges(
|
||
&mut self,
|
||
distributor_bridges: &mut Vec<BridgeLine>,
|
||
bdb: &mut BridgeDb,
|
||
) {
|
||
while let Some(bridge) = distributor_bridges.pop() {
|
||
self.bridge_table.unallocated_bridges.push(bridge);
|
||
}
|
||
while self.bridge_table.unallocated_bridges.len() >= MAX_BRIDGES_PER_BUCKET {
|
||
let mut bucket = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
|
||
for bridge in bucket.iter_mut() {
|
||
*bridge = self.bridge_table.unallocated_bridges.pop().unwrap();
|
||
}
|
||
match self.add_openinv_bridges(bucket, bdb) {
|
||
Ok(_) => continue,
|
||
Err(e) => {
|
||
println!("Error: {:?}", e);
|
||
for bridge in bucket {
|
||
self.bridge_table.unallocated_bridges.push(bridge);
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
// Update the details of a bridge in the bridge table. This assumes that the IP and Port
|
||
// of a given bridge remains the same and thus can be updated.
|
||
// First we must retrieve the list of reachable bridges, then we must search for any matching our partial key
|
||
// which will include the IP and Port. Then we can replace the original bridge with the updated bridge
|
||
// Returns true if the bridge has successfully updated
|
||
pub fn bridge_update(&mut self, bridge: &BridgeLine) -> bool {
|
||
let mut res: bool = false; //default False to assume that update failed
|
||
//Needs to be updated since bridge will only match on some fields.
|
||
|
||
let reachable_bridges = self.bridge_table.reachable.clone();
|
||
for reachable_bridge in reachable_bridges {
|
||
if reachable_bridge.0.uid_fingerprint == bridge.uid_fingerprint {
|
||
println!(
|
||
"Bridge from table: {:?} has same IP and Port as bridge {:?}!",
|
||
reachable_bridge.0, bridge
|
||
);
|
||
// Now we must remove the old bridge from the table and insert the new bridge in its place
|
||
// i.e., in the same bucket and with the same permissions.
|
||
let positions = self.bridge_table.reachable.get(&reachable_bridge.0);
|
||
if let Some(v) = positions {
|
||
for (bucketnum, offset) in v.iter() {
|
||
println!("Bucket num: {:?} and offset: {:?}", bucketnum, offset);
|
||
let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
||
Some(bridgelines) => *bridgelines,
|
||
None => return res,
|
||
};
|
||
assert!(bridgelines[*offset] == reachable_bridge.0);
|
||
bridgelines[*offset] = *bridge;
|
||
self.bridge_table.buckets.insert(*bucketnum, bridgelines);
|
||
let bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
||
Some(bridgelines) => *bridgelines,
|
||
None => return res,
|
||
};
|
||
assert!(bridgelines[*offset] != reachable_bridge.0);
|
||
}
|
||
res = true;
|
||
} else {
|
||
return res;
|
||
}
|
||
// We must also remove the old bridge from the reachable bridges table
|
||
// and add the new bridge
|
||
self.bridge_table.reachable.remove(&reachable_bridge.0);
|
||
self.bridge_table
|
||
.reachable
|
||
.insert(*bridge, reachable_bridge.1);
|
||
return res;
|
||
}
|
||
}
|
||
// If this is returned, we assume that the bridge wasn't found in the bridge table
|
||
// and therefore should be treated as a "new bridge"
|
||
res
|
||
}
|
||
|
||
pub fn bridge_replace(
|
||
&mut self,
|
||
bridge: &BridgeLine,
|
||
available_bridge: Option<&BridgeLine>,
|
||
) -> ReplaceSuccess {
|
||
let mut res = ReplaceSuccess::NotFound;
|
||
let reachable_bridges = &self.bridge_table.reachable.clone();
|
||
match reachable_bridges.get(bridge) {
|
||
Some(positions) => {
|
||
if let Some(replacement) = available_bridge {
|
||
for (bucketnum, offset) in positions.iter() {
|
||
let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
||
Some(bridgelines) => *bridgelines,
|
||
None => return ReplaceSuccess::NotFound,
|
||
};
|
||
assert!(bridgelines[*offset] == *bridge);
|
||
bridgelines[*offset] = *replacement;
|
||
self.bridge_table.buckets.insert(*bucketnum, bridgelines);
|
||
// Remove the bridge from the reachable bridges and add new bridge
|
||
self.bridge_table
|
||
.reachable
|
||
.insert(*replacement, positions.clone());
|
||
// Remove the bridge from the bucket
|
||
self.bridge_table.reachable.remove(bridge);
|
||
}
|
||
res = ReplaceSuccess::Replaced
|
||
} else if !self.bridge_table.unallocated_bridges.is_empty() {
|
||
let replacement = &self.bridge_table.unallocated_bridges.pop().unwrap();
|
||
for (bucketnum, offset) in positions.iter() {
|
||
let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
||
Some(bridgelines) => *bridgelines,
|
||
// This should not happen if the rest of the function is correct, we can assume unwrap will succeed
|
||
None => return ReplaceSuccess::NotReplaced,
|
||
};
|
||
assert!(bridgelines[*offset] == *bridge);
|
||
bridgelines[*offset] = *replacement;
|
||
self.bridge_table.buckets.insert(*bucketnum, bridgelines);
|
||
self.bridge_table
|
||
.reachable
|
||
.insert(*replacement, positions.clone());
|
||
// Remove the bridge from the bucket
|
||
self.bridge_table.reachable.remove(bridge);
|
||
}
|
||
res = ReplaceSuccess::Replaced
|
||
} else if !self.bridge_table.spares.is_empty() {
|
||
// Get the first spare and remove it from the spares set.
|
||
let spare = *self.bridge_table.spares.iter().next().unwrap();
|
||
self.bridge_table.spares.remove(&spare);
|
||
self.bridge_table.recycleable_keys.push(spare);
|
||
// Get the actual bridges from the spare bucket
|
||
let spare_bucket = match self.bridge_table.buckets.remove(&spare) {
|
||
Some(spare_bucket) => spare_bucket,
|
||
// This should not happen if the rest of the functions are correct, we can assume unwrap will succeed
|
||
None => return ReplaceSuccess::NotReplaced,
|
||
};
|
||
// Remove the spare bucket uid from the keys map
|
||
self.bridge_table.keys.remove(&spare);
|
||
let mut replacement: &BridgeLine = &BridgeLine::default();
|
||
// Make the first spare the replacement bridge, add the others to the set of
|
||
// unallocated_bridges
|
||
for spare_bridge in spare_bucket.iter() {
|
||
if replacement.port > 0 {
|
||
self.bridge_table.unallocated_bridges.push(*spare_bridge);
|
||
// Mark bucket as unreachable while it is unallocated
|
||
self.bridge_table.reachable.remove(spare_bridge);
|
||
} else {
|
||
replacement = spare_bridge;
|
||
}
|
||
}
|
||
for (bucketnum, offset) in positions.iter() {
|
||
let mut bridgelines = match self.bridge_table.buckets.get(bucketnum) {
|
||
Some(bridgelines) => *bridgelines,
|
||
None => return ReplaceSuccess::NotReplaced,
|
||
};
|
||
assert!(bridgelines[*offset] == *bridge);
|
||
bridgelines[*offset] = *replacement;
|
||
self.bridge_table.buckets.insert(*bucketnum, bridgelines);
|
||
self.bridge_table
|
||
.reachable
|
||
.insert(*replacement, positions.clone());
|
||
// Remove the bridge from the bucket
|
||
self.bridge_table.reachable.remove(bridge);
|
||
}
|
||
res = ReplaceSuccess::Replaced
|
||
} else {
|
||
// If there are no available bridges that can be assigned here, the only thing
|
||
// that can be done is return an indication that updating the gone bridge
|
||
// didn't work.
|
||
// In this case, we do not mark the bridge as unreachable or remove it from the
|
||
// reachable bridges so that we can still find it when a new bridge does become available
|
||
res = ReplaceSuccess::NotReplaced
|
||
}
|
||
}
|
||
None => return res,
|
||
};
|
||
res
|
||
}
|
||
|
||
/// Mark a bridge as unreachable
|
||
///
|
||
/// This bridge will be removed from each of the buckets that
|
||
/// contains it. If any of those are open-invitation buckets, the
|
||
/// trust upgrade migration for that bucket will be removed and the
|
||
/// BridgeDb will be informed to stop handing out that bridge. If
|
||
/// any of those are trusted buckets where the number of reachable
|
||
/// bridges has fallen below the threshold, a blockage migration
|
||
/// from that bucket to a spare bucket will be added, and the spare
|
||
/// bucket will be removed from the list of hot spares. In
|
||
/// addition, if the blocked bucket was the _target_ of a blockage
|
||
/// migration, change the target to the new (formerly spare) bucket.
|
||
/// Returns true if sucessful, or false if it needed a hot spare but
|
||
/// there was none available.
|
||
pub fn bridge_unreachable(&mut self, bridge: &BridgeLine, bdb: &mut BridgeDb) -> bool {
|
||
let mut res: bool = true;
|
||
if self.bridge_table.unallocated_bridges.contains(bridge) {
|
||
let index = self
|
||
.bridge_table
|
||
.unallocated_bridges
|
||
.iter()
|
||
.position(|&b| b == *bridge)
|
||
.unwrap();
|
||
self.bridge_table.unallocated_bridges.remove(index);
|
||
res = true;
|
||
} else {
|
||
let positions = self.bridge_table.reachable.get(bridge);
|
||
if let Some(v) = positions {
|
||
for (bucketnum, offset) in v.iter() {
|
||
// Count how many bridges in this bucket are reachable
|
||
let mut bucket = match self.bridge_table.buckets.get(bucketnum) {
|
||
Some(bridgelines) => *bridgelines,
|
||
None => return false, // This should not happen
|
||
};
|
||
let numreachable = bucket
|
||
.iter()
|
||
.filter(|br| self.bridge_table.reachable.get(br).is_some())
|
||
.count();
|
||
|
||
// Remove the bridge from the bucket
|
||
assert!(bucket[*offset] == *bridge);
|
||
bucket[*offset] = BridgeLine::default();
|
||
|
||
// Is this bucket an open-invitation bucket?
|
||
if bdb.openinv_buckets.contains(bucketnum)
|
||
|| bdb.distributed_buckets.contains(bucketnum)
|
||
{
|
||
bdb.remove_blocked_or_expired_buckets(bucketnum);
|
||
self.trustup_migration_table.table.remove(bucketnum);
|
||
continue;
|
||
}
|
||
|
||
// Does this removal cause the bucket to go below the
|
||
// threshold?
|
||
if numreachable != MIN_BUCKET_REACHABILITY {
|
||
// No
|
||
continue;
|
||
}
|
||
|
||
// This bucket is now unreachable. Get a spare bucket
|
||
if self.bridge_table.spares.is_empty() {
|
||
// Uh, oh. No spares available. Just delete any
|
||
// migrations leading to this bucket.
|
||
res = false;
|
||
self.trustup_migration_table
|
||
.table
|
||
.retain(|_, &mut v| v != *bucketnum);
|
||
self.blockage_migration_table
|
||
.table
|
||
.retain(|_, &mut v| v != *bucketnum);
|
||
} else {
|
||
// Get the first spare and remove it from the spares
|
||
// set.
|
||
let spare = *self.bridge_table.spares.iter().next().unwrap();
|
||
self.bridge_table.spares.remove(&spare);
|
||
self.bridge_table
|
||
.blocked_keys
|
||
.push((*bucketnum, self.today()));
|
||
// Add a blockage migration from this bucket to the spare
|
||
self.blockage_migration_table
|
||
.table
|
||
.insert(*bucketnum, spare);
|
||
// Remove any trust upgrade migrations to this
|
||
// bucket
|
||
self.trustup_migration_table
|
||
.table
|
||
.retain(|_, &mut v| v != *bucketnum);
|
||
// Change any blockage migrations with this bucket
|
||
// as the destination to the spare
|
||
for (_, v) in self.blockage_migration_table.table.iter_mut() {
|
||
if *v == *bucketnum {
|
||
*v = spare;
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
self.bridge_table.reachable.remove(bridge);
|
||
}
|
||
|
||
res
|
||
}
|
||
|
||
// Since buckets are moved around in the bridge_table, finding a lookup key that
|
||
// does not overwrite existing bridges could become an issue. We keep a list
|
||
// of recycleable lookup keys from buckets that have been removed and prioritize
|
||
// this list before increasing the counter
|
||
fn find_next_available_key(&mut self, bdb: &mut BridgeDb) -> Result<u32, NoAvailableIDError> {
|
||
self.clean_up_expired_buckets(bdb);
|
||
if self.bridge_table.recycleable_keys.is_empty() {
|
||
let mut test_index = 1;
|
||
let mut test_counter = self.bridge_table.counter.wrapping_add(test_index);
|
||
let mut i = 0;
|
||
while self.bridge_table.buckets.contains_key(&test_counter) && i < 5000 {
|
||
test_index += 1;
|
||
test_counter = self.bridge_table.counter.wrapping_add(test_index);
|
||
i += 1;
|
||
if i == 5000 {
|
||
return Err(NoAvailableIDError::ExhaustedIndexer);
|
||
}
|
||
}
|
||
self.bridge_table.counter = self.bridge_table.counter.wrapping_add(test_index);
|
||
Ok(self.bridge_table.counter)
|
||
} else {
|
||
Ok(self.bridge_table.recycleable_keys.pop().unwrap())
|
||
}
|
||
}
|
||
|
||
// This function looks for and removes buckets so their indexes can be reused
|
||
// This should include buckets that have been blocked for a sufficiently long period
|
||
// that we no longer want to allow migration to, or else, open-entry buckets that
|
||
// have been unblocked long enough to become trusted and who's users' credentials
|
||
// would have expired (after EXPIRY_DATE)
|
||
pub fn clean_up_expired_buckets(&mut self, bdb: &mut BridgeDb) {
|
||
// First check if there are any blocked indexes that are old enough to be replaced
|
||
self.clean_up_blocked();
|
||
// Next do the same for open_invitations buckets
|
||
self.clean_up_open_entry(bdb);
|
||
}
|
||
|
||
fn clean_up_blocked(&mut self) {
|
||
if !self.bridge_table.blocked_keys.is_empty()
|
||
&& self
|
||
.bridge_table
|
||
.blocked_keys
|
||
.iter()
|
||
.any(|&x| x.1 + EXPIRY_DATE < self.today())
|
||
{
|
||
// If there are expired blockages, separate them from the fresh blockages
|
||
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = self
|
||
.bridge_table
|
||
.blocked_keys
|
||
.iter()
|
||
.partition(|&x| x.1 + EXPIRY_DATE < self.today());
|
||
for item in expired {
|
||
let new_item = item.0;
|
||
// check each single bridge line and ensure none are still marked as reachable.
|
||
// if any are still reachable, remove from reachable bridges.
|
||
// When syncing resources, we will likely have to reallocate this bridge but if it hasn't already been
|
||
// blocked, this might be fine?
|
||
let bridgelines = self.bridge_table.buckets.get(&new_item).unwrap();
|
||
for bridgeline in bridgelines {
|
||
// If the bridge hasn't been set to default, assume it's still reachable
|
||
if bridgeline.port > 0 {
|
||
// Move to unallocated bridges
|
||
self.bridge_table.unallocated_bridges.push(*bridgeline);
|
||
// Check if it's still in the reachable bridges. It should be if we've gotten this far.
|
||
if let Some(_reachable_indexes_for_bridgeline) =
|
||
self.bridge_table.reachable.get(bridgeline)
|
||
{
|
||
// and remove it until it's reallocated
|
||
self.bridge_table.reachable.remove(bridgeline);
|
||
}
|
||
}
|
||
}
|
||
// Then remove the bucket and keys at the specified index
|
||
self.bridge_table.buckets.remove(&new_item);
|
||
self.bridge_table.keys.remove(&new_item);
|
||
//and add them to the recyclable keys
|
||
self.bridge_table.recycleable_keys.push(new_item);
|
||
// Remove the expired blocked bucket from the blockage migration table,
|
||
// assuming that anyone that has still not attempted to migrate from their
|
||
// blocked bridge after the EXPIRY_DATE probably doesn't still need to migrate.
|
||
self.blockage_migration_table
|
||
.table
|
||
.retain(|&k, _| k != new_item);
|
||
}
|
||
// Finally, update the blocked_keys vector to only include the fresh keys
|
||
self.bridge_table.blocked_keys = fresh
|
||
}
|
||
}
|
||
|
||
fn clean_up_open_entry(&mut self, bdb: &mut BridgeDb) {
|
||
// First check if there are any open invitation indexes that are old enough to be replaced
|
||
if !self.bridge_table.open_inv_keys.is_empty()
|
||
&& self
|
||
.bridge_table
|
||
.open_inv_keys
|
||
.iter()
|
||
.any(|&x| x.1 + EXPIRY_DATE < self.today())
|
||
//Perhaps EXPIRY_DATE should be changed to an earlier time
|
||
{
|
||
// If so, separate them from the fresh open invitation indexes
|
||
let (expired, fresh): (Vec<(u32, u32)>, Vec<(u32, u32)>) = self
|
||
.bridge_table
|
||
.open_inv_keys
|
||
.iter()
|
||
.partition(|&x| x.1 + EXPIRY_DATE < self.today());
|
||
for item in expired {
|
||
let new_item = item.0;
|
||
bdb.remove_blocked_or_expired_buckets(&new_item);
|
||
// Remove any trust upgrade migrations from this
|
||
// bucket
|
||
self.trustup_migration_table
|
||
.table
|
||
.retain(|&k, _| k != new_item);
|
||
self.bridge_table.buckets.remove(&new_item);
|
||
self.bridge_table.keys.remove(&new_item);
|
||
//and add them to the recyclable keys
|
||
self.bridge_table.recycleable_keys.push(new_item);
|
||
}
|
||
// update the open_inv_keys vector to only include the fresh keys
|
||
self.bridge_table.open_inv_keys = fresh
|
||
}
|
||
}
|
||
|
||
#[cfg(test)]
|
||
/// For testing only: manually advance the day by 1 day
|
||
pub fn advance_day(&mut self) {
|
||
self.time_offset += time::Duration::days(1);
|
||
}
|
||
|
||
//#[cfg(test)]
|
||
/// For testing only: manually advance the day by the given number
|
||
/// of days
|
||
pub fn advance_days(&mut self, days: u16) {
|
||
self.time_offset += time::Duration::days(days.into());
|
||
}
|
||
|
||
/// Get today's (real or simulated) date
|
||
pub fn today(&self) -> u32 {
|
||
// We will not encounter negative Julian dates (~6700 years ago)
|
||
// or ones larger than 32 bits
|
||
(time::OffsetDateTime::now_utc().date() + self.time_offset)
|
||
.to_julian_day()
|
||
.try_into()
|
||
.unwrap()
|
||
}
|
||
|
||
/// Get today's (real or simulated) date
|
||
pub fn today_date(&self) -> DateTime<Utc> {
|
||
Utc::now()
|
||
}
|
||
|
||
/// Get a reference to the encrypted bridge table.
|
||
///
|
||
/// Be sure to call this function when you want the latest version
|
||
/// of the table, since it will put fresh Bucket Reachability
|
||
/// credentials in the buckets each day.
|
||
pub fn enc_bridge_table(&mut self) -> &HashMap<u32, EncryptedBucket> {
|
||
let today = self.today();
|
||
if self.bridge_table.date_last_enc != today {
|
||
self.bridge_table
|
||
.encrypt_table(today, &self.reachability_priv);
|
||
}
|
||
&self.bridge_table.encbuckets
|
||
}
|
||
|
||
#[cfg(test)]
|
||
/// Verify the two MACs on a Lox credential
|
||
pub fn verify_lox(&self, cred: &cred::Lox) -> bool {
|
||
if cred.P.is_identity() {
|
||
return false;
|
||
}
|
||
|
||
let Q = (self.lox_priv.x[0]
|
||
+ cred.id * self.lox_priv.x[1]
|
||
+ cred.bucket * self.lox_priv.x[2]
|
||
+ cred.trust_level * self.lox_priv.x[3]
|
||
+ cred.level_since * self.lox_priv.x[4]
|
||
+ cred.invites_remaining * self.lox_priv.x[5]
|
||
+ cred.blockages * self.lox_priv.x[6])
|
||
* cred.P;
|
||
|
||
Q == cred.Q
|
||
}
|
||
|
||
#[cfg(test)]
|
||
/// Verify the MAC on a Migration credential
|
||
pub fn verify_migration(&self, cred: &cred::Migration) -> bool {
|
||
if cred.P.is_identity() {
|
||
return false;
|
||
}
|
||
|
||
let Q = (self.migration_priv.x[0]
|
||
+ cred.lox_id * self.migration_priv.x[1]
|
||
+ cred.from_bucket * self.migration_priv.x[2]
|
||
+ cred.to_bucket * self.migration_priv.x[3])
|
||
* cred.P;
|
||
|
||
Q == cred.Q
|
||
}
|
||
|
||
#[cfg(test)]
|
||
/// Verify the MAC on a Bucket Reachability credential
|
||
pub fn verify_reachability(&self, cred: &cred::BucketReachability) -> bool {
|
||
if cred.P.is_identity() {
|
||
return false;
|
||
}
|
||
|
||
let Q = (self.reachability_priv.x[0]
|
||
+ cred.date * self.reachability_priv.x[1]
|
||
+ cred.bucket * self.reachability_priv.x[2])
|
||
* cred.P;
|
||
|
||
Q == cred.Q
|
||
}
|
||
|
||
#[cfg(test)]
|
||
/// Verify the MAC on a Invitation credential
|
||
pub fn verify_invitation(&self, cred: &cred::Invitation) -> bool {
|
||
if cred.P.is_identity() {
|
||
return false;
|
||
}
|
||
|
||
let Q = (self.invitation_priv.x[0]
|
||
+ cred.inv_id * self.invitation_priv.x[1]
|
||
+ cred.date * self.invitation_priv.x[2]
|
||
+ cred.bucket * self.invitation_priv.x[3]
|
||
+ cred.blockages * self.invitation_priv.x[4])
|
||
* cred.P;
|
||
|
||
Q == cred.Q
|
||
}
|
||
}
|
||
|
||
/// Try to extract a u64 from a Scalar
|
||
pub fn scalar_u64(s: &Scalar) -> Option<u64> {
|
||
// Check that the top 24 bytes of the Scalar are 0
|
||
let sbytes = s.as_bytes();
|
||
if sbytes[8..].ct_eq(&[0u8; 24]).unwrap_u8() == 0 {
|
||
return None;
|
||
}
|
||
Some(u64::from_le_bytes(sbytes[..8].try_into().unwrap()))
|
||
}
|
||
|
||
/// Try to extract a u32 from a Scalar
|
||
pub fn scalar_u32(s: &Scalar) -> Option<u32> {
|
||
// Check that the top 28 bytes of the Scalar are 0
|
||
let sbytes = s.as_bytes();
|
||
if sbytes[4..].ct_eq(&[0u8; 28]).unwrap_u8() == 0 {
|
||
return None;
|
||
}
|
||
Some(u32::from_le_bytes(sbytes[..4].try_into().unwrap()))
|
||
}
|
||
|
||
/// Double a Scalar
|
||
pub fn scalar_dbl(s: &Scalar) -> Scalar {
|
||
s + s
|
||
}
|
||
|
||
/// Double a RistrettoPoint
|
||
pub fn pt_dbl(P: &RistrettoPoint) -> RistrettoPoint {
|
||
P + P
|
||
}
|
||
|
||
/// The protocol modules.
|
||
///
|
||
/// Each protocol lives in a submodule. Each submodule defines structs
|
||
/// for Request (the message from the client to the bridge authority),
|
||
/// State (the state held by the client while waiting for the reply),
|
||
/// and Response (the message from the bridge authority to the client).
|
||
/// Each submodule defines functions request, which produces a (Request,
|
||
/// State) pair, and handle_response, which consumes a State and a
|
||
/// Response. It also adds a handle_* function to the BridgeAuth struct
|
||
/// that consumes a Request and produces a Result<Response, ProofError>.
|
||
pub mod proto {
|
||
pub mod blockage_migration;
|
||
pub mod check_blockage;
|
||
pub mod issue_invite;
|
||
pub mod level_up;
|
||
pub mod migration;
|
||
pub mod open_invite;
|
||
pub mod redeem_invite;
|
||
pub mod trust_promotion;
|
||
}
|
||
|
||
// Unit tests
|
||
#[cfg(test)]
|
||
mod tests;
|