Merge 'lox/master' into lox-workspace
This commit is contained in:
commit
43e9a04320
|
@ -0,0 +1,12 @@
|
|||
image: "rust:latest"
|
||||
|
||||
test:cargo:
|
||||
script:
|
||||
- rustc --version && cargo --version # Print version info for debugging
|
||||
- cargo test --workspace --verbose
|
||||
|
||||
test:lox-unit:
|
||||
script:
|
||||
- cargo test -- release
|
||||
artifacts:
|
||||
when: always
|
|
@ -0,0 +1,31 @@
|
|||
[package]
|
||||
name = "lox"
|
||||
version = "0.1.0"
|
||||
authors = ["Ian Goldberg <iang@uwaterloo.ca>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
curve25519-dalek = { package = "curve25519-dalek-ng", version = "3", default-features = false, features = ["serde", "std"] }
|
||||
ed25519-dalek = { version = "1", features = ["serde"] }
|
||||
# zkp = { version = "0.8", features = ["debug-transcript"] }
|
||||
zkp = "0.8"
|
||||
bincode = "1"
|
||||
chrono = "0.4"
|
||||
rand = "0.7"
|
||||
serde = "1"
|
||||
serde_with = "1.9.1"
|
||||
sha2 = "0.9"
|
||||
statistical = "1.0.0"
|
||||
lazy_static = "1"
|
||||
hex_fmt = "0.3"
|
||||
aes-gcm = "0.8"
|
||||
base64 = "0.13"
|
||||
time = "0.2"
|
||||
subtle = "2.4"
|
||||
|
||||
[features]
|
||||
default = ["u64_backend"]
|
||||
u32_backend = ["curve25519-dalek/u32_backend"]
|
||||
u64_backend = ["curve25519-dalek/u64_backend"]
|
||||
simd_backend = ["curve25519-dalek/simd_backend"]
|
||||
fast = []
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2022 Ian Goldberg and Lindsey Tulloch
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,8 @@
|
|||
# Lox
|
||||
|
||||
Lox is a reputation-based bridge distribution system that provides privacy protection to users and their social graph and is open to all users.
|
||||
Lox is written in rust and requires `cargo` to test. [Install Rust](https://www.rust-lang.org/tools/install). We used Rust version 1.56.0.
|
||||
|
||||
Note that this implementation is coded such that the reachability certificate expires at 00:00 UTC. In reality, if the bucket is still reachable, a user could simply request a new reachability token if their request fails for this reason (a new certificate should be available prior to the outdated certificate expiring).
|
||||
|
||||
|
|
@ -0,0 +1,430 @@
|
|||
/*! The encrypted table of bridges.
|
||||
|
||||
The table consists of a number of buckets, each holding some number
|
||||
(currently up to 3) of bridges. Each bucket is individually encrypted
|
||||
with a bucket key. Users will have a credential containing a bucket
|
||||
(number, key) combination, and so will be able to read one of the
|
||||
buckets. Users will either download the whole encrypted bucket list or
|
||||
use PIR to download a piece of it, so that the bridge authority does not
|
||||
learn which bucket the user has access to. */
|
||||
|
||||
use super::cred;
|
||||
use super::IssuerPrivKey;
|
||||
use super::CMZ_B_TABLE;
|
||||
use aes_gcm::aead;
|
||||
use aes_gcm::aead::{generic_array::GenericArray, Aead, NewAead};
|
||||
use aes_gcm::Aes128Gcm;
|
||||
use curve25519_dalek::ristretto::CompressedRistretto;
|
||||
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::serde_as;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::convert::TryInto;
|
||||
use subtle::ConstantTimeEq;
|
||||
|
||||
/// Each bridge information line is serialized into this many bytes
|
||||
pub const BRIDGE_BYTES: usize = 300;
|
||||
|
||||
/// The max number of bridges per bucket
|
||||
pub const MAX_BRIDGES_PER_BUCKET: usize = 3;
|
||||
|
||||
/// The minimum number of bridges in a bucket that must be reachable for
|
||||
/// the bucket to get a Bucket Reachability credential that will allow
|
||||
/// users of that bucket to gain trust levels (once they are already at
|
||||
/// level 1)
|
||||
pub const MIN_BUCKET_REACHABILITY: usize = 2;
|
||||
|
||||
/// A bridge information line
|
||||
#[serde_as]
|
||||
#[derive(Serialize, Deserialize, Copy, Clone, Hash, Eq, PartialEq, Debug)]
|
||||
pub struct BridgeLine {
|
||||
/// IPv4 or IPv6 address
|
||||
pub addr: [u8; 16],
|
||||
/// port
|
||||
pub port: u16,
|
||||
/// fingerprint
|
||||
pub uid_fingerprint: u64,
|
||||
/// other protocol information, including pluggable transport,
|
||||
/// public key, etc.
|
||||
#[serde_as(as = "[_; BRIDGE_BYTES - 26]")]
|
||||
pub info: [u8; BRIDGE_BYTES - 26],
|
||||
}
|
||||
|
||||
/// A bucket contains MAX_BRIDGES_PER_BUCKET bridges plus the
|
||||
/// information needed to construct a Bucket Reachability credential,
|
||||
/// which is a 4-byte date, and a (P,Q) MAC
|
||||
type Bucket = (
|
||||
[BridgeLine; MAX_BRIDGES_PER_BUCKET],
|
||||
Option<cred::BucketReachability>,
|
||||
);
|
||||
|
||||
/// The size of a plaintext bucket
|
||||
pub const BUCKET_BYTES: usize = BRIDGE_BYTES * MAX_BRIDGES_PER_BUCKET + 4 + 32 + 32;
|
||||
|
||||
/// The size of an encrypted bucket
|
||||
pub const ENC_BUCKET_BYTES: usize = BUCKET_BYTES + 12 + 16;
|
||||
|
||||
impl Default for BridgeLine {
|
||||
/// An "empty" BridgeLine is represented by all zeros
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
addr: [0; 16],
|
||||
port: 0,
|
||||
uid_fingerprint: 0,
|
||||
info: [0; BRIDGE_BYTES - 26],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BridgeLine {
|
||||
/// Encode a BridgeLine to a byte array
|
||||
pub fn encode(&self) -> [u8; BRIDGE_BYTES] {
|
||||
let mut res: [u8; BRIDGE_BYTES] = [0; BRIDGE_BYTES];
|
||||
res[0..16].copy_from_slice(&self.addr);
|
||||
res[16..18].copy_from_slice(&self.port.to_be_bytes());
|
||||
res[18..26].copy_from_slice(&self.uid_fingerprint.to_be_bytes());
|
||||
res[26..].copy_from_slice(&self.info);
|
||||
res
|
||||
}
|
||||
/// Decode a BridgeLine from a byte array
|
||||
pub fn decode(data: &[u8; BRIDGE_BYTES]) -> Self {
|
||||
let mut res: Self = Default::default();
|
||||
res.addr.copy_from_slice(&data[0..16]);
|
||||
res.port = u16::from_be_bytes(data[16..18].try_into().unwrap());
|
||||
res.uid_fingerprint = u64::from_be_bytes(data[18..26].try_into().unwrap());
|
||||
res.info.copy_from_slice(&data[26..]);
|
||||
res
|
||||
}
|
||||
/// Encode a bucket to a byte array, including a Bucket Reachability
|
||||
/// credential if appropriate
|
||||
pub fn bucket_encode(
|
||||
bucket: &[BridgeLine; MAX_BRIDGES_PER_BUCKET],
|
||||
reachable: &HashMap<BridgeLine, Vec<(u32, usize)>>,
|
||||
today: u32,
|
||||
bucket_attr: &Scalar,
|
||||
reachability_priv: &IssuerPrivKey,
|
||||
) -> [u8; BUCKET_BYTES] {
|
||||
let mut res: [u8; BUCKET_BYTES] = [0; BUCKET_BYTES];
|
||||
let mut pos: usize = 0;
|
||||
let mut num_reachable: usize = 0;
|
||||
for bridge in bucket {
|
||||
res[pos..pos + BRIDGE_BYTES].copy_from_slice(&bridge.encode());
|
||||
if reachable.contains_key(bridge) {
|
||||
num_reachable += 1;
|
||||
}
|
||||
pos += BRIDGE_BYTES;
|
||||
}
|
||||
if num_reachable >= MIN_BUCKET_REACHABILITY {
|
||||
// Construct a Bucket Reachability credential for this
|
||||
// bucket and today's date
|
||||
let today_attr: Scalar = today.into();
|
||||
let mut rng = rand::thread_rng();
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
let b = Scalar::random(&mut rng);
|
||||
let P = &b * Btable;
|
||||
let Q = &(b
|
||||
* (reachability_priv.x[0]
|
||||
+ reachability_priv.x[1] * today_attr
|
||||
+ reachability_priv.x[2] * bucket_attr))
|
||||
* Btable;
|
||||
res[pos..pos + 4].copy_from_slice(&today.to_le_bytes());
|
||||
res[pos + 4..pos + 36].copy_from_slice(P.compress().as_bytes());
|
||||
res[pos + 36..].copy_from_slice(Q.compress().as_bytes());
|
||||
}
|
||||
res
|
||||
}
|
||||
/// Decode a bucket from a byte array, yielding the array of
|
||||
/// BridgeLine entries and an optional Bucket Reachability
|
||||
/// credential
|
||||
fn bucket_decode(data: &[u8; BUCKET_BYTES], bucket_attr: &Scalar) -> Bucket {
|
||||
let mut pos: usize = 0;
|
||||
let mut bridges: [BridgeLine; MAX_BRIDGES_PER_BUCKET] = Default::default();
|
||||
for bridge in bridges.iter_mut().take(MAX_BRIDGES_PER_BUCKET) {
|
||||
*bridge = BridgeLine::decode(data[pos..pos + BRIDGE_BYTES].try_into().unwrap());
|
||||
pos += BRIDGE_BYTES;
|
||||
}
|
||||
// See if there's a nonzero date in the Bucket Reachability
|
||||
// Credential
|
||||
let date = u32::from_le_bytes(data[pos..pos + 4].try_into().unwrap());
|
||||
let (optP, optQ) = if date > 0 {
|
||||
(
|
||||
CompressedRistretto::from_slice(&data[pos + 4..pos + 36]).decompress(),
|
||||
CompressedRistretto::from_slice(&data[pos + 36..]).decompress(),
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
if let (Some(P), Some(Q)) = (optP, optQ) {
|
||||
let date_attr: Scalar = date.into();
|
||||
(
|
||||
bridges,
|
||||
Some(cred::BucketReachability {
|
||||
P,
|
||||
Q,
|
||||
date: date_attr,
|
||||
bucket: *bucket_attr,
|
||||
}),
|
||||
)
|
||||
} else {
|
||||
(bridges, None)
|
||||
}
|
||||
}
|
||||
/// Create a random BridgeLine for testing
|
||||
#[cfg(test)]
|
||||
pub fn random() -> Self {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut res: Self = Default::default();
|
||||
// Pick a random 4-byte address
|
||||
let mut addr: [u8; 4] = [0; 4];
|
||||
rng.fill_bytes(&mut addr);
|
||||
// If the leading byte is 224 or more, that's not a valid IPv4
|
||||
// address. Choose an IPv6 address instead (but don't worry too
|
||||
// much about it being well formed).
|
||||
if addr[0] >= 224 {
|
||||
rng.fill_bytes(&mut res.addr);
|
||||
} else {
|
||||
// Store an IPv4 address as a v4-mapped IPv6 address
|
||||
res.addr[10] = 255;
|
||||
res.addr[11] = 255;
|
||||
res.addr[12..16].copy_from_slice(&addr);
|
||||
};
|
||||
let ports: [u16; 4] = [443, 4433, 8080, 43079];
|
||||
let portidx = (rng.next_u32() % 4) as usize;
|
||||
res.port = ports[portidx];
|
||||
res.uid_fingerprint = rng.next_u64();
|
||||
let mut cert: [u8; 52] = [0; 52];
|
||||
rng.fill_bytes(&mut cert);
|
||||
let infostr: String = format!(
|
||||
"obfs4 cert={}, iat-mode=0",
|
||||
base64::encode_config(cert, base64::STANDARD_NO_PAD)
|
||||
);
|
||||
res.info[..infostr.len()].copy_from_slice(infostr.as_bytes());
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
/// A BridgeTable is the internal structure holding the buckets
|
||||
/// containing the bridges, the keys used to encrypt the buckets, and
|
||||
/// the encrypted buckets. The encrypted buckets will be exposed to the
|
||||
/// users of the system, and each user credential will contain the
|
||||
/// decryption key for one bucket.
|
||||
#[serde_as]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct BridgeTable {
|
||||
pub keys: Vec<[u8; 16]>,
|
||||
pub buckets: Vec<[BridgeLine; MAX_BRIDGES_PER_BUCKET]>,
|
||||
#[serde_as(as = "Vec<[_; ENC_BUCKET_BYTES]>")]
|
||||
pub encbuckets: Vec<[u8; ENC_BUCKET_BYTES]>,
|
||||
/// Individual bridges that are reachable
|
||||
pub reachable: HashMap<BridgeLine, Vec<(u32, usize)>>,
|
||||
/// bucket ids of "hot spare" buckets. These buckets are not handed
|
||||
/// to users, nor do they have any Migration credentials pointing to
|
||||
/// them. When a new Migration credential is needed, a bucket is
|
||||
/// removed from this set and used for that purpose.
|
||||
pub spares: HashSet<u32>,
|
||||
/// bucket ids of "recyclable" buckets. These buckets have not been handed out
|
||||
/// to users, nor do they have any Migration credentials pointing to
|
||||
/// them. When a single bridge is needed and there are no more readily available bridges,
|
||||
/// bridges are taken from a bucket of hot spares, making the unallocated spare bucket empty
|
||||
/// but still useable as it has not been handed out previously.
|
||||
pub recyclable: HashSet<u32>,
|
||||
/// In some instances a single bridge may need to be added to a bucket
|
||||
/// In some instances a single bridge may need to be added to a bucket
|
||||
/// In that case, a spare bucket will be removed from the set of spare bridges. One
|
||||
pub unallocated_bridges: Vec<BridgeLine>,
|
||||
/// The date the buckets were last encrypted to make the encbucket.
|
||||
/// The encbucket must be rebuilt each day so that the Bucket
|
||||
/// Reachability credentials in the buckets can be refreshed.
|
||||
pub date_last_enc: u32,
|
||||
}
|
||||
|
||||
// Invariant: the lengths of the keys and buckets vectors are the same.
|
||||
// The encbuckets vector only gets updated when encrypt_table is called.
|
||||
|
||||
impl BridgeTable {
|
||||
/// Get the number of buckets in the bridge table
|
||||
pub fn num_buckets(&self) -> usize {
|
||||
self.buckets.len()
|
||||
}
|
||||
|
||||
pub fn recycle_bucket(&mut self, bucket: &[BridgeLine; MAX_BRIDGES_PER_BUCKET], index: u32) {
|
||||
// Pick a random key to encrypt this bucket
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut key: [u8; 16] = [0; 16];
|
||||
rng.fill_bytes(&mut key);
|
||||
self.keys[index as usize] = key;
|
||||
self.buckets[index as usize] = *bucket;
|
||||
for (i, b) in bucket.iter().enumerate() {
|
||||
if b.port > 0 {
|
||||
if let Some(v) = self.reachable.get_mut(b) {
|
||||
v.push((index, i));
|
||||
} else {
|
||||
let v = vec![(index, i)];
|
||||
self.reachable.insert(*b, v);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Append a new bucket to the bridge table, returning its index
|
||||
pub fn new_bucket(&mut self, bucket: &[BridgeLine; MAX_BRIDGES_PER_BUCKET]) -> u32 {
|
||||
// Pick a random key to encrypt this bucket
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut key: [u8; 16] = [0; 16];
|
||||
rng.fill_bytes(&mut key);
|
||||
self.keys.push(key);
|
||||
self.buckets.push(*bucket);
|
||||
let bucketnum: u32 = (self.buckets.len() - 1).try_into().unwrap();
|
||||
// Mark the new bridges as available
|
||||
for (i, b) in bucket.iter().enumerate() {
|
||||
if b.port > 0 {
|
||||
if let Some(v) = self.reachable.get_mut(b) {
|
||||
v.push((bucketnum, i));
|
||||
} else {
|
||||
let v = vec![(bucketnum, i)];
|
||||
self.reachable.insert(*b, v);
|
||||
}
|
||||
}
|
||||
}
|
||||
bucketnum
|
||||
}
|
||||
|
||||
/// Create the vector of encrypted buckets from the keys and buckets
|
||||
/// in the BridgeTable. All of the entries will be (randomly)
|
||||
/// re-encrypted, so it will be hidden whether any individual bucket
|
||||
/// has changed (except for entirely new buckets, of course).
|
||||
/// Bucket Reachability credentials are added to the buckets when
|
||||
/// enough (at least MIN_BUCKET_REACHABILITY) bridges in the bucket
|
||||
/// are reachable.
|
||||
pub fn encrypt_table(&mut self, today: u32, reachability_priv: &IssuerPrivKey) {
|
||||
let mut rng = rand::thread_rng();
|
||||
self.encbuckets.clear();
|
||||
// We want id to be a u32, so we use .zip(0u32..) instead of
|
||||
// enumerate()
|
||||
for ((key, bucket), id) in self.keys.iter().zip(self.buckets.iter()).zip(0u32..) {
|
||||
let mut encbucket: [u8; ENC_BUCKET_BYTES] = [0; ENC_BUCKET_BYTES];
|
||||
let plainbucket: [u8; BUCKET_BYTES] = BridgeLine::bucket_encode(
|
||||
bucket,
|
||||
&self.reachable,
|
||||
today,
|
||||
&to_scalar(id, key),
|
||||
reachability_priv,
|
||||
);
|
||||
// Set the AES key
|
||||
let aeskey = GenericArray::from_slice(key);
|
||||
// Pick a random nonce
|
||||
let mut noncebytes: [u8; 12] = [0; 12];
|
||||
rng.fill_bytes(&mut noncebytes);
|
||||
let nonce = GenericArray::from_slice(&noncebytes);
|
||||
// Encrypt
|
||||
let cipher = Aes128Gcm::new(aeskey);
|
||||
let ciphertext: Vec<u8> = cipher.encrypt(nonce, plainbucket.as_ref()).unwrap();
|
||||
encbucket[0..12].copy_from_slice(&noncebytes);
|
||||
encbucket[12..].copy_from_slice(ciphertext.as_slice());
|
||||
self.encbuckets.push(encbucket);
|
||||
}
|
||||
self.date_last_enc = today;
|
||||
}
|
||||
|
||||
/// Decrypt an individual encrypted bucket, given its id, key, and
|
||||
/// the encrypted bucket itself
|
||||
pub fn decrypt_bucket(
|
||||
id: u32,
|
||||
key: &[u8; 16],
|
||||
encbucket: &[u8; ENC_BUCKET_BYTES],
|
||||
) -> Result<Bucket, aead::Error> {
|
||||
// Set the nonce and the key
|
||||
let nonce = GenericArray::from_slice(&encbucket[0..12]);
|
||||
let aeskey = GenericArray::from_slice(key);
|
||||
// Decrypt
|
||||
let cipher = Aes128Gcm::new(aeskey);
|
||||
let plaintext: Vec<u8> = cipher.decrypt(nonce, encbucket[12..].as_ref())?;
|
||||
// Convert the plaintext bytes to an array of BridgeLines
|
||||
Ok(BridgeLine::bucket_decode(
|
||||
plaintext.as_slice().try_into().unwrap(),
|
||||
&to_scalar(id, key),
|
||||
))
|
||||
}
|
||||
|
||||
/// Decrypt an individual encrypted bucket, given its id and key
|
||||
pub fn decrypt_bucket_id(&self, id: u32, key: &[u8; 16]) -> Result<Bucket, aead::Error> {
|
||||
let encbucket = self.encbuckets[id as usize];
|
||||
BridgeTable::decrypt_bucket(id, key, &encbucket)
|
||||
}
|
||||
}
|
||||
|
||||
// Unit tests that require access to the testing-only function
|
||||
// BridgeLine::random()
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_bridge_table() -> Result<(), aead::Error> {
|
||||
// Create private keys for the Bucket Reachability credentials
|
||||
let reachability_priv = IssuerPrivKey::new(2);
|
||||
// Create an empty bridge table
|
||||
let mut btable: BridgeTable = Default::default();
|
||||
// Make 20 buckets with one random bridge each
|
||||
for _ in 0..20 {
|
||||
let bucket: [BridgeLine; 3] =
|
||||
[BridgeLine::random(), Default::default(), Default::default()];
|
||||
btable.new_bucket(&bucket);
|
||||
}
|
||||
// And 20 more with three random bridges each
|
||||
for _ in 0..20 {
|
||||
let bucket: [BridgeLine; 3] = [
|
||||
BridgeLine::random(),
|
||||
BridgeLine::random(),
|
||||
BridgeLine::random(),
|
||||
];
|
||||
btable.new_bucket(&bucket);
|
||||
}
|
||||
let today: u32 = time::OffsetDateTime::now_utc()
|
||||
.date()
|
||||
.julian_day()
|
||||
.try_into()
|
||||
.unwrap();
|
||||
// Create the encrypted bridge table
|
||||
btable.encrypt_table(today, &reachability_priv);
|
||||
// Try to decrypt a 1-bridge bucket
|
||||
let key7 = btable.keys[7];
|
||||
let bucket7 = btable.decrypt_bucket_id(7, &key7)?;
|
||||
println!("bucket 7 = {:?}", bucket7);
|
||||
// Try to decrypt a 3-bridge bucket
|
||||
let key24 = btable.keys[24];
|
||||
let bucket24 = btable.decrypt_bucket_id(24, &key24)?;
|
||||
println!("bucket 24 = {:?}", bucket24);
|
||||
// Try to decrypt a bucket with the wrong key
|
||||
let key12 = btable.keys[12];
|
||||
let res = btable.decrypt_bucket_id(15, &key12).unwrap_err();
|
||||
println!("bucket key mismatch = {:?}", res);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert an id and key to a Scalar attribute
|
||||
pub fn to_scalar(id: u32, key: &[u8; 16]) -> Scalar {
|
||||
let mut b: [u8; 32] = [0; 32];
|
||||
// b is a little-endian representation of the Scalar; put the key in
|
||||
// the low 16 bytes, and the id in the next 4 bytes.
|
||||
b[0..16].copy_from_slice(key);
|
||||
b[16..20].copy_from_slice(&id.to_le_bytes());
|
||||
// This cannot fail, since we're only using the low 20 bytes of b
|
||||
Scalar::from_canonical_bytes(b).unwrap()
|
||||
}
|
||||
|
||||
/// Convert a Scalar attribute to an id and key if possible
|
||||
pub fn from_scalar(s: Scalar) -> Result<(u32, [u8; 16]), aead::Error> {
|
||||
// Check that the top 12 bytes of the Scalar are 0
|
||||
let sbytes = s.as_bytes();
|
||||
if sbytes[20..].ct_eq(&[0u8; 12]).unwrap_u8() == 0 {
|
||||
return Err(aead::Error);
|
||||
}
|
||||
let id = u32::from_le_bytes(sbytes[16..20].try_into().unwrap());
|
||||
let mut key: [u8; 16] = [0; 16];
|
||||
key.copy_from_slice(&sbytes[..16]);
|
||||
Ok((id, key))
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
/*! The various credentials used by the system.
|
||||
|
||||
In each case, (P,Q) forms the MAC on the credential. This MAC is
|
||||
verifiable only by the issuing party, or if the issuing party issues a
|
||||
zero-knowledge proof of its correctness (as it does at issuing time). */
|
||||
|
||||
use curve25519_dalek::ristretto::RistrettoPoint;
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// A migration credential.
|
||||
///
|
||||
/// This credential authorizes the holder of the Lox credential with the
|
||||
/// given id to switch from bucket from_bucket to bucket to_bucket. The
|
||||
/// migration_type attribute is 0 for trust upgrade migrations (moving
|
||||
/// from a 1-bridge untrusted bucket to a 3-bridge trusted bucket) and 1
|
||||
/// for blockage migrations (moving buckets because the from_bucket has
|
||||
/// been blocked).
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Migration {
|
||||
pub P: RistrettoPoint,
|
||||
pub Q: RistrettoPoint,
|
||||
pub lox_id: Scalar,
|
||||
pub from_bucket: Scalar,
|
||||
pub to_bucket: Scalar,
|
||||
pub migration_type: Scalar,
|
||||
}
|
||||
|
||||
/// The main user credential in the Lox system.
|
||||
///
|
||||
/// Its id is jointly generated by the user and the BA (bridge
|
||||
/// authority), but known only to the user. The level_since date is the
|
||||
/// Julian date of when this user was changed to the current trust
|
||||
/// level.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Lox {
|
||||
pub P: RistrettoPoint,
|
||||
pub Q: RistrettoPoint,
|
||||
pub id: Scalar,
|
||||
pub bucket: Scalar,
|
||||
pub trust_level: Scalar,
|
||||
pub level_since: Scalar,
|
||||
pub invites_remaining: Scalar,
|
||||
pub blockages: Scalar,
|
||||
}
|
||||
|
||||
/// The migration key credential.
|
||||
///
|
||||
/// This credential is never actually instantiated. It is an implicit
|
||||
/// credential on attributes lox_id and from_bucket. This credential
|
||||
/// type does have an associated private and public key, however. The
|
||||
/// idea is that if a user proves (in zero knowledge) that their Lox
|
||||
/// credential entitles them to migrate from one bucket to another, the
|
||||
/// BA will issue a (blinded, so the BA will not know the values of the
|
||||
/// attributes or of Q) MAC on this implicit credential. The Q value
|
||||
/// will then be used (actually, a hash of lox_id, from_bucket, and Q)
|
||||
/// to encrypt the to_bucket, P, and Q fields of a Migration credential.
|
||||
/// That way, people entitled to migrate buckets can receive a Migration
|
||||
/// credential with their new bucket, without the BA learning either
|
||||
/// their old or new buckets.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct MigrationKey {
|
||||
pub P: RistrettoPoint,
|
||||
pub Q: RistrettoPoint,
|
||||
pub lox_id: Scalar,
|
||||
pub from_bucket: Scalar,
|
||||
}
|
||||
|
||||
/// The Bucket Reachability credential.
|
||||
///
|
||||
/// Each day, a credential of this type is put in each bucket that has
|
||||
/// at least a (configurable) threshold number of bridges that have not
|
||||
/// been blocked as of the given date. Users can present this
|
||||
/// credential (in zero knowledge) with today's date to prove that the
|
||||
/// bridges in their bucket have not been blocked, in order to gain a
|
||||
/// trust level.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BucketReachability {
|
||||
pub P: RistrettoPoint,
|
||||
pub Q: RistrettoPoint,
|
||||
pub date: Scalar,
|
||||
pub bucket: Scalar,
|
||||
}
|
||||
|
||||
/// The Invitation credential.
|
||||
///
|
||||
/// These credentials allow a Lox user (the inviter) of sufficient trust
|
||||
/// (level 2 or higher) to invite someone else (the invitee) to join the
|
||||
/// system. The invitee ends up at trust level 1, in the _same bucket_
|
||||
/// as the inviter, and inherits the inviter's blockages count (so that
|
||||
/// you can't clear your blockages count simply by inviting yourself).
|
||||
/// Invitations expire after some amount of time.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Invitation {
|
||||
pub P: RistrettoPoint,
|
||||
pub Q: RistrettoPoint,
|
||||
pub inv_id: Scalar,
|
||||
pub date: Scalar,
|
||||
pub bucket: Scalar,
|
||||
pub blockages: Scalar,
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
/*! Filter duplicate shows of credentials and open invitations by id
|
||||
(which will typically be a Scalar).
|
||||
|
||||
This implementation just keeps the table of seen ids in memory, but a
|
||||
production one would of course use a disk-backed database. */
|
||||
|
||||
use std::cmp::Eq;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::Hash;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Each instance of DupFilter maintains its own independent table of
|
||||
/// seen ids. IdType will typically be Scalar.
|
||||
#[derive(Default, Debug, Serialize, Deserialize)]
|
||||
pub struct DupFilter<IdType: Hash + Eq + Copy + Serialize> {
|
||||
seen_table: HashMap<IdType, ()>,
|
||||
}
|
||||
|
||||
/// A return type indicating whether the item was fresh (not previously
|
||||
/// seen) or previously seen
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub enum SeenType {
|
||||
Fresh,
|
||||
Seen,
|
||||
}
|
||||
|
||||
impl<IdType: Hash + Eq + Copy + Serialize> DupFilter<IdType> {
|
||||
/// Check to see if the id is in the seen table, but do not add it
|
||||
/// to the seen table. Return Seen if it is already in the table,
|
||||
/// Fresh if not.
|
||||
pub fn check(&self, id: &IdType) -> SeenType {
|
||||
if self.seen_table.contains_key(id) {
|
||||
SeenType::Seen
|
||||
} else {
|
||||
SeenType::Fresh
|
||||
}
|
||||
}
|
||||
|
||||
/// As atomically as possible, check to see if the id is in the seen
|
||||
/// table, and add it if not. Return Fresh if it was not already
|
||||
/// in the table, and Seen if it was.
|
||||
pub fn filter(&mut self, id: &IdType) -> SeenType {
|
||||
match self.seen_table.insert(*id, ()) {
|
||||
None => SeenType::Fresh,
|
||||
Some(()) => SeenType::Seen,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,728 @@
|
|||
/*! Implementation of a new style of bridge authority for Tor that
|
||||
allows users to invite other users, while protecting the social graph
|
||||
from the bridge authority itself.
|
||||
|
||||
We use CMZ14 credentials (GGM version, which is more efficient, but
|
||||
makes a stronger security assumption): "Algebraic MACs and
|
||||
Keyed-Verification Anonymous Credentials" (Chase, Meiklejohn, and
|
||||
Zaverucha, CCS 2014)
|
||||
|
||||
The notation follows that of the paper "Hyphae: Social Secret Sharing"
|
||||
(Lovecruft and de Valence, 2017), Section 4. */
|
||||
|
||||
// We really want points to be capital letters and scalars to be
|
||||
// lowercase letters
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
#[macro_use]
|
||||
extern crate zkp;
|
||||
|
||||
pub mod bridge_table;
|
||||
pub mod cred;
|
||||
pub mod dup_filter;
|
||||
pub mod migration_table;
|
||||
|
||||
use sha2::Sha512;
|
||||
|
||||
use rand::rngs::OsRng;
|
||||
use rand::Rng;
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
|
||||
use curve25519_dalek::constants as dalek_constants;
|
||||
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||
use curve25519_dalek::ristretto::RistrettoPoint;
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
#[cfg(test)]
|
||||
use curve25519_dalek::traits::IsIdentity;
|
||||
|
||||
use ed25519_dalek::{Keypair, PublicKey, Signature, SignatureError, Signer, Verifier};
|
||||
use subtle::ConstantTimeEq;
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use bridge_table::{
|
||||
BridgeLine, BridgeTable, ENC_BUCKET_BYTES, MAX_BRIDGES_PER_BUCKET, MIN_BUCKET_REACHABILITY,
|
||||
};
|
||||
use migration_table::{MigrationTable, MigrationType};
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CMZ_A: RistrettoPoint =
|
||||
RistrettoPoint::hash_from_bytes::<Sha512>(b"CMZ Generator A");
|
||||
pub static ref CMZ_B: RistrettoPoint = dalek_constants::RISTRETTO_BASEPOINT_POINT;
|
||||
pub static ref CMZ_A_TABLE: RistrettoBasepointTable = RistrettoBasepointTable::create(&CMZ_A);
|
||||
pub static ref CMZ_B_TABLE: RistrettoBasepointTable =
|
||||
dalek_constants::RISTRETTO_BASEPOINT_TABLE;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct IssuerPrivKey {
|
||||
x0tilde: Scalar,
|
||||
x: Vec<Scalar>,
|
||||
}
|
||||
|
||||
impl IssuerPrivKey {
|
||||
/// Create an IssuerPrivKey for credentials with the given number of
|
||||
/// attributes.
|
||||
pub fn new(n: u16) -> IssuerPrivKey {
|
||||
let mut rng = rand::thread_rng();
|
||||
let x0tilde = Scalar::random(&mut rng);
|
||||
let mut x: Vec<Scalar> = Vec::with_capacity((n + 1) as usize);
|
||||
|
||||
// Set x to a vector of n+1 random Scalars
|
||||
x.resize_with((n + 1) as usize, || Scalar::random(&mut rng));
|
||||
|
||||
IssuerPrivKey { x0tilde, x }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct IssuerPubKey {
|
||||
X: Vec<RistrettoPoint>,
|
||||
}
|
||||
|
||||
impl IssuerPubKey {
|
||||
/// Create an IssuerPubKey from the corresponding IssuerPrivKey
|
||||
pub fn new(privkey: &IssuerPrivKey) -> IssuerPubKey {
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
let n_plus_one = privkey.x.len();
|
||||
let mut X: Vec<RistrettoPoint> = Vec::with_capacity(n_plus_one);
|
||||
|
||||
// The first element is a special case; it is
|
||||
// X[0] = x0tilde*A + x[0]*B
|
||||
X.push(&privkey.x0tilde * Atable + &privkey.x[0] * Btable);
|
||||
|
||||
// The other elements (1 through n) are X[i] = x[i]*A
|
||||
X.extend(privkey.x.iter().skip(1).map(|xi| xi * Atable));
|
||||
|
||||
IssuerPubKey { X }
|
||||
}
|
||||
}
|
||||
|
||||
/// The BridgeDb. This will typically be a singleton object. The
|
||||
/// BridgeDb's role is simply to issue signed "open invitations" to
|
||||
/// people who are not yet part of the system.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BridgeDb {
|
||||
/// The keypair for signing open invitations
|
||||
keypair: Keypair,
|
||||
/// The public key for verifying open invitations
|
||||
pub pubkey: PublicKey,
|
||||
/// The set of open-invitation buckets
|
||||
openinv_buckets: HashSet<u32>,
|
||||
}
|
||||
|
||||
/// An open invitation is a [u8; OPENINV_LENGTH] where the first 32
|
||||
/// bytes are the serialization of a random Scalar (the invitation id),
|
||||
/// the next 4 bytes are a little-endian bucket number, and the last
|
||||
/// SIGNATURE_LENGTH bytes are the signature on the first 36 bytes.
|
||||
pub const OPENINV_LENGTH: usize = 32 // the length of the random
|
||||
// invitation id (a Scalar)
|
||||
+ 4 // the length of the u32 for the bucket number
|
||||
+ ed25519_dalek::SIGNATURE_LENGTH; // the length of the signature
|
||||
|
||||
impl BridgeDb {
|
||||
/// Create the BridgeDb.
|
||||
pub fn new() -> Self {
|
||||
let mut csprng = OsRng {};
|
||||
let keypair = Keypair::generate(&mut csprng);
|
||||
let pubkey = keypair.public;
|
||||
Self {
|
||||
keypair,
|
||||
pubkey,
|
||||
openinv_buckets: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert an open-invitation bucket into the set
|
||||
pub fn insert_openinv(&mut self, bucket: u32) {
|
||||
self.openinv_buckets.insert(bucket);
|
||||
}
|
||||
|
||||
/// Remove an open-invitation bucket from the set
|
||||
pub fn remove_openinv(&mut self, bucket: u32) {
|
||||
self.openinv_buckets.remove(&bucket);
|
||||
}
|
||||
|
||||
/// Produce an open invitation. In this example code, we just
|
||||
/// choose a random open-invitation bucket.
|
||||
pub fn invite(&self) -> [u8; OPENINV_LENGTH] {
|
||||
let mut res: [u8; OPENINV_LENGTH] = [0; OPENINV_LENGTH];
|
||||
let mut rng = rand::thread_rng();
|
||||
// Choose a random invitation id (a Scalar) and serialize it
|
||||
let id = Scalar::random(&mut rng);
|
||||
res[0..32].copy_from_slice(&id.to_bytes());
|
||||
// Choose a random bucket number (from the set of open
|
||||
// invitation buckets) and serialize it
|
||||
let openinv_vec: Vec<&u32> = self.openinv_buckets.iter().collect();
|
||||
let bucket_num = *openinv_vec[rng.gen_range(0, openinv_vec.len())];
|
||||
res[32..(32 + 4)].copy_from_slice(&bucket_num.to_le_bytes());
|
||||
// Sign the first 36 bytes and serialize it
|
||||
let sig = self.keypair.sign(&res[0..(32 + 4)]);
|
||||
res[(32 + 4)..].copy_from_slice(&sig.to_bytes());
|
||||
res
|
||||
}
|
||||
|
||||
/// Verify an open invitation. Returns the invitation id and the
|
||||
/// bucket number if the signature checked out. It is up to the
|
||||
/// caller to then check that the invitation id has not been used
|
||||
/// before.
|
||||
pub fn verify(
|
||||
invitation: [u8; OPENINV_LENGTH],
|
||||
pubkey: PublicKey,
|
||||
) -> Result<(Scalar, u32), SignatureError> {
|
||||
// Pull out the signature and verify it
|
||||
let sig = Signature::try_from(&invitation[(32 + 4)..])?;
|
||||
pubkey.verify(&invitation[0..(32 + 4)], &sig)?;
|
||||
// The signature passed. Pull out the bucket number and then
|
||||
// the invitation id
|
||||
let bucket = u32::from_le_bytes(invitation[32..(32 + 4)].try_into().unwrap());
|
||||
match Scalar::from_canonical_bytes(invitation[0..32].try_into().unwrap()) {
|
||||
// It should never happen that there's a valid signature on
|
||||
// an invalid serialization of a Scalar, but check anyway.
|
||||
None => Err(SignatureError::new()),
|
||||
Some(s) => Ok((s, bucket)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BridgeDb {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// The bridge authority. This will typically be a singleton object.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BridgeAuth {
|
||||
/// The private key for the main Lox credential
|
||||
lox_priv: IssuerPrivKey,
|
||||
/// The public key for the main Lox credential
|
||||
pub lox_pub: IssuerPubKey,
|
||||
/// The private key for migration credentials
|
||||
migration_priv: IssuerPrivKey,
|
||||
/// The public key for migration credentials
|
||||
pub migration_pub: IssuerPubKey,
|
||||
/// The private key for migration key credentials
|
||||
migrationkey_priv: IssuerPrivKey,
|
||||
/// The public key for migration key credentials
|
||||
pub migrationkey_pub: IssuerPubKey,
|
||||
/// The private key for bucket reachability credentials
|
||||
reachability_priv: IssuerPrivKey,
|
||||
/// The public key for bucket reachability credentials
|
||||
pub reachability_pub: IssuerPubKey,
|
||||
/// The private key for invitation credentials
|
||||
invitation_priv: IssuerPrivKey,
|
||||
/// The public key for invitation credentials
|
||||
pub invitation_pub: IssuerPubKey,
|
||||
|
||||
/// The public key of the BridgeDb issuing open invitations
|
||||
pub bridgedb_pub: PublicKey,
|
||||
|
||||
/// The bridge table
|
||||
pub bridge_table: BridgeTable,
|
||||
|
||||
/// The migration tables
|
||||
trustup_migration_table: MigrationTable,
|
||||
blockage_migration_table: MigrationTable,
|
||||
|
||||
/// Duplicate filter for open invitations
|
||||
openinv_filter: dup_filter::DupFilter<Scalar>,
|
||||
/// Duplicate filter for Lox credential ids
|
||||
id_filter: dup_filter::DupFilter<Scalar>,
|
||||
/// Duplicate filter for Invitation credential ids
|
||||
inv_id_filter: dup_filter::DupFilter<Scalar>,
|
||||
/// Duplicate filter for trust promotions (from untrusted level 0 to
|
||||
/// trusted level 1)
|
||||
trust_promotion_filter: dup_filter::DupFilter<Scalar>,
|
||||
|
||||
/// For testing only: offset of the true time to the simulated time
|
||||
#[serde(skip)]
|
||||
time_offset: time::Duration,
|
||||
}
|
||||
|
||||
impl BridgeAuth {
|
||||
pub fn new(bridgedb_pub: PublicKey) -> Self {
|
||||
// Create the private and public keys for each of the types of
|
||||
// credential, each with the appropriate number of attributes
|
||||
let lox_priv = IssuerPrivKey::new(6);
|
||||
let lox_pub = IssuerPubKey::new(&lox_priv);
|
||||
let migration_priv = IssuerPrivKey::new(4);
|
||||
let migration_pub = IssuerPubKey::new(&migration_priv);
|
||||
let migrationkey_priv = IssuerPrivKey::new(2);
|
||||
let migrationkey_pub = IssuerPubKey::new(&migrationkey_priv);
|
||||
let reachability_priv = IssuerPrivKey::new(2);
|
||||
let reachability_pub = IssuerPubKey::new(&reachability_priv);
|
||||
let invitation_priv = IssuerPrivKey::new(4);
|
||||
let invitation_pub = IssuerPubKey::new(&invitation_priv);
|
||||
Self {
|
||||
lox_priv,
|
||||
lox_pub,
|
||||
migration_priv,
|
||||
migration_pub,
|
||||
migrationkey_priv,
|
||||
migrationkey_pub,
|
||||
reachability_priv,
|
||||
reachability_pub,
|
||||
invitation_priv,
|
||||
invitation_pub,
|
||||
bridgedb_pub,
|
||||
bridge_table: Default::default(),
|
||||
trustup_migration_table: MigrationTable::new(MigrationType::TrustUpgrade),
|
||||
blockage_migration_table: MigrationTable::new(MigrationType::Blockage),
|
||||
openinv_filter: Default::default(),
|
||||
id_filter: Default::default(),
|
||||
inv_id_filter: Default::default(),
|
||||
trust_promotion_filter: Default::default(),
|
||||
time_offset: time::Duration::zero(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a set of open invitation bridges.
|
||||
///
|
||||
/// Each of the bridges will be given its own open invitation
|
||||
/// bucket, and the BridgeDb will be informed. A single bucket
|
||||
/// containing all of the bridges will also be created, with a trust
|
||||
/// upgrade migration from each of the single-bridge buckets.
|
||||
pub fn add_openinv_bridges(
|
||||
&mut self,
|
||||
bridges: [BridgeLine; MAX_BRIDGES_PER_BUCKET],
|
||||
bdb: &mut BridgeDb,
|
||||
) {
|
||||
let bnum: u32;
|
||||
if self.bridge_table.recyclable.is_empty() {
|
||||
bnum = self.bridge_table.new_bucket(&bridges);
|
||||
} else {
|
||||
bnum = *self.bridge_table.recyclable.iter().next().unwrap();
|
||||
self.bridge_table.recyclable.remove(&bnum);
|
||||
self.bridge_table.recycle_bucket(&bridges, bnum)
|
||||
}
|
||||
let mut single = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
|
||||
for b in bridges.iter() {
|
||||
single[0] = *b;
|
||||
let snum = self.bridge_table.new_bucket(&single);
|
||||
bdb.insert_openinv(snum);
|
||||
self.trustup_migration_table.table.insert(snum, bnum);
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a hot spare bucket of bridges
|
||||
pub fn add_spare_bucket(&mut self, bucket: [BridgeLine; MAX_BRIDGES_PER_BUCKET]) {
|
||||
let bnum: u32;
|
||||
if self.bridge_table.recyclable.is_empty() {
|
||||
bnum = self.bridge_table.new_bucket(&bucket);
|
||||
} else {
|
||||
bnum = *self.bridge_table.recyclable.iter().next().unwrap();
|
||||
self.bridge_table.recyclable.remove(&bnum);
|
||||
self.bridge_table.recycle_bucket(&bucket, bnum)
|
||||
}
|
||||
self.bridge_table.spares.insert(bnum);
|
||||
}
|
||||
|
||||
pub fn sync_table(&mut self) {
|
||||
|
||||
// Create a hashtable (?) of bridges in the lox distributor from new resources
|
||||
// accept the hashtable and recreate the bridge table from the hash table here
|
||||
// using existing reachable bridges, other table checks and placements from existing bridge table
|
||||
// If bridges are in reachable bridges, put them in the table with their Vec
|
||||
// How to check for bridges that aren't there/are extra?
|
||||
// After going through the update, make sure bridges in the table are the same and deal with discrepencies
|
||||
// This will be the bad/annoying part
|
||||
}
|
||||
|
||||
pub fn allocate_bridges(
|
||||
&mut self,
|
||||
distributor_bridges: &mut Vec<BridgeLine>,
|
||||
bdb: &mut BridgeDb,
|
||||
) {
|
||||
while let Some(bridge) = distributor_bridges.pop() {
|
||||
self.bridge_table.unallocated_bridges.push(bridge);
|
||||
}
|
||||
while self.bridge_table.unallocated_bridges.len() >= MAX_BRIDGES_PER_BUCKET {
|
||||
let mut bucket = [BridgeLine::default(); MAX_BRIDGES_PER_BUCKET];
|
||||
for i in 0..MAX_BRIDGES_PER_BUCKET {
|
||||
bucket[i] = self.bridge_table.unallocated_bridges.pop().unwrap();
|
||||
}
|
||||
self.add_openinv_bridges(bucket, bdb);
|
||||
}
|
||||
}
|
||||
|
||||
// Update the details of a bridge in the bridge table. This assumes that the IP and Port
|
||||
// of a given bridge remains the same and thus can be updated.
|
||||
// First we must retrieve the list of reachable bridges, then we must search for any matching our partial key
|
||||
// which will include the IP and Port. Then we can replace the original bridge with the updated bridge
|
||||
// Returns true if the bridge has successfully updated
|
||||
pub fn bridge_update(&mut self, bridge: &BridgeLine) -> bool {
|
||||
let mut res: bool = false; //default False to assume that update failed
|
||||
//Needs to be updated since bridge will only match on some fields.
|
||||
|
||||
let reachable_bridges = self.bridge_table.reachable.clone();
|
||||
for reachable_bridge in reachable_bridges {
|
||||
if reachable_bridge.0.uid_fingerprint == bridge.uid_fingerprint {
|
||||
println!(
|
||||
"Bridge from table: {:?} has same IP and Port as bridge {:?}!",
|
||||
reachable_bridge.0, bridge
|
||||
);
|
||||
// Now we must remove the old bridge from the table and insert the new bridge in its place
|
||||
// i.e., in the same bucket and with the same permissions.
|
||||
let positions = self.bridge_table.reachable.get(&reachable_bridge.0);
|
||||
if let Some(v) = positions {
|
||||
for (bucketnum, offset) in v.iter() {
|
||||
println!("Bucket num: {:?} and offset: {:?}", bucketnum, offset);
|
||||
assert!(
|
||||
self.bridge_table.buckets[*bucketnum as usize][*offset]
|
||||
== reachable_bridge.0
|
||||
);
|
||||
self.bridge_table.buckets[*bucketnum as usize][*offset] = *bridge;
|
||||
assert!(self.bridge_table.buckets[*bucketnum as usize][*offset] == *bridge);
|
||||
assert!(
|
||||
self.bridge_table.buckets[*bucketnum as usize][*offset]
|
||||
!= reachable_bridge.0
|
||||
);
|
||||
}
|
||||
res = true;
|
||||
} else {
|
||||
return res;
|
||||
}
|
||||
// We must also remove the old bridge from the reachable bridges table
|
||||
// and add the new bridge
|
||||
self.bridge_table.reachable.remove(&reachable_bridge.0);
|
||||
self.bridge_table
|
||||
.reachable
|
||||
.insert(*bridge, reachable_bridge.1);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
// If this is returned, we assume that the bridge wasn't found in the bridge table
|
||||
// and therefore should be treated as a "new bridge"
|
||||
res
|
||||
}
|
||||
|
||||
pub fn bridge_replace(
|
||||
&mut self,
|
||||
bridge: &BridgeLine,
|
||||
available_bridge: Option<&BridgeLine>,
|
||||
) -> bool {
|
||||
let mut res: bool = false;
|
||||
let reachable_bridges = &self.bridge_table.reachable.clone();
|
||||
if let Some(positions) = reachable_bridges.get(bridge) {
|
||||
if let Some(replacement) = available_bridge {
|
||||
for (bucketnum, offset) in positions.iter() {
|
||||
assert!(self.bridge_table.buckets[*bucketnum as usize][*offset] == *bridge);
|
||||
self.bridge_table.buckets[*bucketnum as usize][*offset] =
|
||||
*replacement;
|
||||
// Remove the bridge from the reachable bridges and add new bridge
|
||||
self.bridge_table
|
||||
.reachable
|
||||
.insert(*replacement, positions.clone());
|
||||
// Remove the bridge from the bucket
|
||||
self.bridge_table.reachable.remove(bridge);
|
||||
res = true
|
||||
}
|
||||
} else if !self.bridge_table.unallocated_bridges.is_empty() {
|
||||
let replacement = &self.bridge_table.unallocated_bridges.pop().unwrap();
|
||||
for (bucketnum, offset) in positions.iter() {
|
||||
self.bridge_table.buckets[*bucketnum as usize][*offset] = *replacement;
|
||||
self.bridge_table
|
||||
.reachable
|
||||
.insert(*replacement, positions.clone());
|
||||
// Remove the bridge from the bucket
|
||||
self.bridge_table.reachable.remove(bridge);
|
||||
}
|
||||
res = true
|
||||
} else if !self.bridge_table.spares.is_empty() {
|
||||
// Get the first spare and remove it from the spares set.
|
||||
let spare = *self.bridge_table.spares.iter().next().unwrap();
|
||||
self.bridge_table.spares.remove(&spare);
|
||||
// Indicate the removed bucket as a recyclable bucket
|
||||
self.bridge_table.recyclable.insert(spare);
|
||||
// Get the actual bridges from the spare bucket
|
||||
let spare_bucket = self.bridge_table.buckets[spare as usize];
|
||||
let mut replacement: &BridgeLine = &BridgeLine::default();
|
||||
// Make the first spare the replacement bridge, add the others to the set of
|
||||
// unallocated_bridges
|
||||
for spare_bridge in spare_bucket.iter() {
|
||||
if replacement.port > 0 {
|
||||
self.bridge_table.unallocated_bridges.push(*spare_bridge);
|
||||
// Mark bucket as unreachable while it is unallocated
|
||||
self.bridge_table.reachable.remove(spare_bridge);
|
||||
} else {
|
||||
replacement = spare_bridge;
|
||||
}
|
||||
}
|
||||
for (bucketnum, offset) in positions.iter() {
|
||||
self.bridge_table.buckets[*bucketnum as usize][*offset] = *replacement;
|
||||
self.bridge_table
|
||||
.reachable
|
||||
.insert(*replacement, positions.clone());
|
||||
// Remove the bridge from the bucket
|
||||
self.bridge_table.reachable.remove(bridge);
|
||||
}
|
||||
res = true
|
||||
}
|
||||
// If there are no available bridges that can be assigned here, the only thing
|
||||
// that can be done is return an indication that updating the gone bridge
|
||||
// didn't work.
|
||||
// In this case, we do not mark the bridge as unreachable or remove it from the
|
||||
// reachable bridges so that we can still find it when a new bridge does become available
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
/// Mark a bridge as unreachable
|
||||
///
|
||||
/// This bridge will be removed from each of the buckets that
|
||||
/// contains it. If any of those are open-invitation buckets, the
|
||||
/// trust upgrade migration for that bucket will be removed and the
|
||||
/// BridgeDb will be informed to stop handing out that bridge. If
|
||||
/// any of those are trusted buckets where the number of reachable
|
||||
/// bridges has fallen below the threshold, a blockage migration
|
||||
/// from that bucket to a spare bucket will be added, and the spare
|
||||
/// bucket will be removed from the list of hot spares. In
|
||||
/// addition, if the blocked bucket was the _target_ of a blockage
|
||||
/// migration, change the target to the new (formerly spare) bucket.
|
||||
/// Returns true if sucessful, or false if it needed a hot spare but
|
||||
/// there was none available.
|
||||
pub fn bridge_unreachable(&mut self, bridge: &BridgeLine, bdb: &mut BridgeDb) -> bool {
|
||||
let mut res: bool = true;
|
||||
if self.bridge_table.unallocated_bridges.contains(bridge) {
|
||||
let index = self
|
||||
.bridge_table
|
||||
.unallocated_bridges
|
||||
.iter()
|
||||
.position(|&b| b == *bridge)
|
||||
.unwrap();
|
||||
self.bridge_table.unallocated_bridges.remove(index);
|
||||
res = true;
|
||||
} else {
|
||||
let positions = self.bridge_table.reachable.get(bridge);
|
||||
if let Some(v) = positions {
|
||||
for (bucketnum, offset) in v.iter() {
|
||||
// Count how many bridges in this bucket are reachable
|
||||
let numreachable = self.bridge_table.buckets[*bucketnum as usize]
|
||||
.iter()
|
||||
.filter(|br| self.bridge_table.reachable.get(br).is_some())
|
||||
.count();
|
||||
|
||||
// Remove the bridge from the bucket
|
||||
assert!(self.bridge_table.buckets[*bucketnum as usize][*offset] == *bridge);
|
||||
self.bridge_table.buckets[*bucketnum as usize][*offset] = BridgeLine::default();
|
||||
|
||||
// Is this bucket an open-invitation bucket?
|
||||
if bdb.openinv_buckets.contains(bucketnum) {
|
||||
bdb.openinv_buckets.remove(bucketnum);
|
||||
self.trustup_migration_table.table.remove(bucketnum);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Does this removal cause the bucket to go below the
|
||||
// threshold?
|
||||
if numreachable != MIN_BUCKET_REACHABILITY {
|
||||
// No
|
||||
continue;
|
||||
}
|
||||
|
||||
// This bucket is now unreachable. Get a spare bucket
|
||||
if self.bridge_table.spares.is_empty() {
|
||||
// Uh, oh. No spares available. Just delete any
|
||||
// migrations leading to this bucket.
|
||||
res = false;
|
||||
self.trustup_migration_table
|
||||
.table
|
||||
.retain(|_, &mut v| v != *bucketnum);
|
||||
self.blockage_migration_table
|
||||
.table
|
||||
.retain(|_, &mut v| v != *bucketnum);
|
||||
} else {
|
||||
// Get the first spare and remove it from the spares
|
||||
// set.
|
||||
let spare = *self.bridge_table.spares.iter().next().unwrap();
|
||||
self.bridge_table.spares.remove(&spare);
|
||||
// Add a blockage migration from this bucket to the spare
|
||||
self.blockage_migration_table
|
||||
.table
|
||||
.insert(*bucketnum, spare);
|
||||
// Remove any trust upgrade migrations to this
|
||||
// bucket
|
||||
self.trustup_migration_table
|
||||
.table
|
||||
.retain(|_, &mut v| v != *bucketnum);
|
||||
// Change any blockage migrations with this bucket
|
||||
// as the destination to the spare
|
||||
for (_, v) in self.blockage_migration_table.table.iter_mut() {
|
||||
if *v == *bucketnum {
|
||||
*v = spare;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
self.bridge_table.reachable.remove(bridge);
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// For testing only: manually advance the day by 1 day
|
||||
pub fn advance_day(&mut self) {
|
||||
self.time_offset += time::Duration::days(1);
|
||||
}
|
||||
|
||||
//#[cfg(test)]
|
||||
/// For testing only: manually advance the day by the given number
|
||||
/// of days
|
||||
pub fn advance_days(&mut self, days: u16) {
|
||||
self.time_offset += time::Duration::days(days.into());
|
||||
}
|
||||
|
||||
/// Get today's (real or simulated) date
|
||||
pub fn today(&self) -> u32 {
|
||||
// We will not encounter negative Julian dates (~6700 years ago)
|
||||
// or ones larger than 32 bits
|
||||
(time::OffsetDateTime::now_utc().date() + self.time_offset)
|
||||
.julian_day()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Get a reference to the encrypted bridge table.
|
||||
///
|
||||
/// Be sure to call this function when you want the latest version
|
||||
/// of the table, since it will put fresh Bucket Reachability
|
||||
/// credentials in the buckets each day.
|
||||
pub fn enc_bridge_table(&mut self) -> &Vec<[u8; ENC_BUCKET_BYTES]> {
|
||||
let today = self.today();
|
||||
if self.bridge_table.date_last_enc != today {
|
||||
self.bridge_table
|
||||
.encrypt_table(today, &self.reachability_priv);
|
||||
}
|
||||
&self.bridge_table.encbuckets
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Verify the two MACs on a Lox credential
|
||||
pub fn verify_lox(&self, cred: &cred::Lox) -> bool {
|
||||
if cred.P.is_identity() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let Q = (self.lox_priv.x[0]
|
||||
+ cred.id * self.lox_priv.x[1]
|
||||
+ cred.bucket * self.lox_priv.x[2]
|
||||
+ cred.trust_level * self.lox_priv.x[3]
|
||||
+ cred.level_since * self.lox_priv.x[4]
|
||||
+ cred.invites_remaining * self.lox_priv.x[5]
|
||||
+ cred.blockages * self.lox_priv.x[6])
|
||||
* cred.P;
|
||||
|
||||
Q == cred.Q
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Verify the MAC on a Migration credential
|
||||
pub fn verify_migration(&self, cred: &cred::Migration) -> bool {
|
||||
if cred.P.is_identity() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let Q = (self.migration_priv.x[0]
|
||||
+ cred.lox_id * self.migration_priv.x[1]
|
||||
+ cred.from_bucket * self.migration_priv.x[2]
|
||||
+ cred.to_bucket * self.migration_priv.x[3])
|
||||
* cred.P;
|
||||
|
||||
Q == cred.Q
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Verify the MAC on a Bucket Reachability credential
|
||||
pub fn verify_reachability(&self, cred: &cred::BucketReachability) -> bool {
|
||||
if cred.P.is_identity() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let Q = (self.reachability_priv.x[0]
|
||||
+ cred.date * self.reachability_priv.x[1]
|
||||
+ cred.bucket * self.reachability_priv.x[2])
|
||||
* cred.P;
|
||||
|
||||
Q == cred.Q
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Verify the MAC on a Invitation credential
|
||||
pub fn verify_invitation(&self, cred: &cred::Invitation) -> bool {
|
||||
if cred.P.is_identity() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let Q = (self.invitation_priv.x[0]
|
||||
+ cred.inv_id * self.invitation_priv.x[1]
|
||||
+ cred.date * self.invitation_priv.x[2]
|
||||
+ cred.bucket * self.invitation_priv.x[3]
|
||||
+ cred.blockages * self.invitation_priv.x[4])
|
||||
* cred.P;
|
||||
|
||||
Q == cred.Q
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to extract a u64 from a Scalar
|
||||
pub fn scalar_u64(s: &Scalar) -> Option<u64> {
|
||||
// Check that the top 24 bytes of the Scalar are 0
|
||||
let sbytes = s.as_bytes();
|
||||
if sbytes[8..].ct_eq(&[0u8; 24]).unwrap_u8() == 0 {
|
||||
return None;
|
||||
}
|
||||
Some(u64::from_le_bytes(sbytes[..8].try_into().unwrap()))
|
||||
}
|
||||
|
||||
/// Try to extract a u32 from a Scalar
|
||||
pub fn scalar_u32(s: &Scalar) -> Option<u32> {
|
||||
// Check that the top 28 bytes of the Scalar are 0
|
||||
let sbytes = s.as_bytes();
|
||||
if sbytes[4..].ct_eq(&[0u8; 28]).unwrap_u8() == 0 {
|
||||
return None;
|
||||
}
|
||||
Some(u32::from_le_bytes(sbytes[..4].try_into().unwrap()))
|
||||
}
|
||||
|
||||
/// Double a Scalar
|
||||
pub fn scalar_dbl(s: &Scalar) -> Scalar {
|
||||
s + s
|
||||
}
|
||||
|
||||
/// Double a RistrettoPoint
|
||||
pub fn pt_dbl(P: &RistrettoPoint) -> RistrettoPoint {
|
||||
P + P
|
||||
}
|
||||
|
||||
/// The protocol modules.
|
||||
///
|
||||
/// Each protocol lives in a submodule. Each submodule defines structs
|
||||
/// for Request (the message from the client to the bridge authority),
|
||||
/// State (the state held by the client while waiting for the reply),
|
||||
/// and Response (the message from the bridge authority to the client).
|
||||
/// Each submodule defines functions request, which produces a (Request,
|
||||
/// State) pair, and handle_response, which consumes a State and a
|
||||
/// Response. It also adds a handle_* function to the BridgeAuth struct
|
||||
/// that consumes a Request and produces a Result<Response, ProofError>.
|
||||
pub mod proto {
|
||||
pub mod blockage_migration;
|
||||
pub mod check_blockage;
|
||||
pub mod issue_invite;
|
||||
pub mod level_up;
|
||||
pub mod migration;
|
||||
pub mod open_invite;
|
||||
pub mod redeem_invite;
|
||||
pub mod trust_promotion;
|
||||
}
|
||||
|
||||
// Unit tests
|
||||
#[cfg(test)]
|
||||
mod tests;
|
|
@ -0,0 +1,263 @@
|
|||
/*! The migration table.
|
||||
|
||||
This is a table listing pairs of (from_bucket_id, to_bucket_id). A pair
|
||||
in this table indicates that a user with a Lox credential containing
|
||||
from_bucket_id (and possibly meeting other conditions as well) is
|
||||
entitled to exchange their credential for one with to_bucket_id. (Note
|
||||
that the credentials contain the bucket attributes, which include both
|
||||
the id and the bucket decrytpion key, but the table just contains the
|
||||
bucket ids.) */
|
||||
|
||||
use curve25519_dalek::ristretto::CompressedRistretto;
|
||||
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||
use curve25519_dalek::ristretto::RistrettoPoint;
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
|
||||
use sha2::Digest;
|
||||
use sha2::Sha256;
|
||||
|
||||
use aes_gcm::aead::{generic_array::GenericArray, Aead, NewAead};
|
||||
use aes_gcm::Aes128Gcm;
|
||||
use rand::RngCore;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::bridge_table;
|
||||
use super::cred::Migration;
|
||||
use super::IssuerPrivKey;
|
||||
use super::CMZ_B_TABLE;
|
||||
|
||||
/// Each (plaintext) entry in the returned migration table is serialized
|
||||
/// into this many bytes
|
||||
pub const MIGRATION_BYTES: usize = 96;
|
||||
|
||||
/// The size of an encrypted entry in the returned migration table
|
||||
pub const ENC_MIGRATION_BYTES: usize = MIGRATION_BYTES + 12 + 16;
|
||||
|
||||
/// The type of migration table: TrustUpgrade is for migrations from
|
||||
/// untrusted (level 0) 1-bridge buckets to trusted (level 1) 3-bridge
|
||||
/// buckets. Blockage is for migrations that drop you down two levels
|
||||
/// (level 3 to 1, level 4 to 2) because the bridges in your current
|
||||
/// bucket were blocked.
|
||||
pub enum MigrationType {
|
||||
TrustUpgrade,
|
||||
Blockage,
|
||||
}
|
||||
|
||||
impl From<MigrationType> for Scalar {
|
||||
/// Convert a MigrationType into the Scalar value that represents
|
||||
/// it in the Migration credential
|
||||
fn from(m: MigrationType) -> Self {
|
||||
match m {
|
||||
MigrationType::TrustUpgrade => 0u32,
|
||||
MigrationType::Blockage => 1u32,
|
||||
}
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
/// The migration table
|
||||
#[derive(Default, Debug, Serialize, Deserialize)]
|
||||
pub struct MigrationTable {
|
||||
pub table: HashMap<u32, u32>,
|
||||
pub migration_type: Scalar,
|
||||
}
|
||||
|
||||
/// Create an encrypted Migration credential for returning to the user
|
||||
/// in the trust promotion protocol.
|
||||
///
|
||||
/// Given the attributes of a Migration credential, produce a serialized
|
||||
/// version (containing only the to_bucket and the MAC, since the
|
||||
/// receiver will already know the id and from_bucket), encrypted with
|
||||
/// H2(id, from_bucket, Qk), for the Qk portion of the MAC on the
|
||||
/// corresponding Migration Key credential (with fixed Pk, given as a
|
||||
/// precomputed multiplication table). Return the label H1(id,
|
||||
/// from_attr_i, Qk_i) and the encrypted Migration credential. H1 and
|
||||
/// H2 are the first 16 bytes and the second 16 bytes respectively of
|
||||
/// the SHA256 hash of the input.
|
||||
pub fn encrypt_cred(
|
||||
id: &Scalar,
|
||||
from_bucket: &Scalar,
|
||||
to_bucket: &Scalar,
|
||||
migration_type: &Scalar,
|
||||
Pktable: &RistrettoBasepointTable,
|
||||
migration_priv: &IssuerPrivKey,
|
||||
migrationkey_priv: &IssuerPrivKey,
|
||||
) -> ([u8; 16], [u8; ENC_MIGRATION_BYTES]) {
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
// Compute the Migration Key credential MAC Qk
|
||||
let Qk = &(migrationkey_priv.x[0]
|
||||
+ migrationkey_priv.x[1] * id
|
||||
+ migrationkey_priv.x[2] * from_bucket)
|
||||
* Pktable;
|
||||
|
||||
// Compute a MAC (P, Q) on the Migration credential
|
||||
let b = Scalar::random(&mut rng);
|
||||
let P = &b * Btable;
|
||||
let Q = &(b
|
||||
* (migration_priv.x[0]
|
||||
+ migration_priv.x[1] * id
|
||||
+ migration_priv.x[2] * from_bucket
|
||||
+ migration_priv.x[3] * to_bucket
|
||||
+ migration_priv.x[4] * migration_type))
|
||||
* Btable;
|
||||
|
||||
// Serialize (to_bucket, P, Q)
|
||||
let mut credbytes: [u8; MIGRATION_BYTES] = [0; MIGRATION_BYTES];
|
||||
credbytes[0..32].copy_from_slice(to_bucket.as_bytes());
|
||||
credbytes[32..64].copy_from_slice(P.compress().as_bytes());
|
||||
credbytes[64..].copy_from_slice(Q.compress().as_bytes());
|
||||
|
||||
// Pick a random nonce
|
||||
let mut noncebytes: [u8; 12] = [0; 12];
|
||||
rng.fill_bytes(&mut noncebytes);
|
||||
let nonce = GenericArray::from_slice(&noncebytes);
|
||||
|
||||
// Compute the hash of (id, from_bucket, Qk)
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(id.as_bytes());
|
||||
hasher.update(from_bucket.as_bytes());
|
||||
hasher.update(Qk.compress().as_bytes());
|
||||
let fullhash = hasher.finalize();
|
||||
|
||||
// Create the encryption key from the 2nd half of the hash
|
||||
let aeskey = GenericArray::from_slice(&fullhash[16..]);
|
||||
// Encrypt
|
||||
let cipher = Aes128Gcm::new(aeskey);
|
||||
let ciphertext: Vec<u8> = cipher.encrypt(nonce, credbytes.as_ref()).unwrap();
|
||||
let mut enccredbytes: [u8; ENC_MIGRATION_BYTES] = [0; ENC_MIGRATION_BYTES];
|
||||
enccredbytes[..12].copy_from_slice(&noncebytes);
|
||||
enccredbytes[12..].copy_from_slice(ciphertext.as_slice());
|
||||
|
||||
// Use the first half of the above hash as the label
|
||||
let mut label: [u8; 16] = [0; 16];
|
||||
label[..].copy_from_slice(&fullhash[..16]);
|
||||
|
||||
(label, enccredbytes)
|
||||
}
|
||||
|
||||
/// Create an encrypted Migration credential for returning to the user
|
||||
/// in the trust promotion protocol, given the ids of the from and to
|
||||
/// buckets, and the migration type, and using a BridgeTable to get the
|
||||
/// bucket keys.
|
||||
///
|
||||
/// Otherwise the same as encrypt_cred, above, except it returns an
|
||||
/// Option in case the passed ids were invalid.
|
||||
pub fn encrypt_cred_ids(
|
||||
id: &Scalar,
|
||||
from_id: u32,
|
||||
to_id: u32,
|
||||
migration_type: &Scalar,
|
||||
bridgetable: &bridge_table::BridgeTable,
|
||||
Pktable: &RistrettoBasepointTable,
|
||||
migration_priv: &IssuerPrivKey,
|
||||
migrationkey_priv: &IssuerPrivKey,
|
||||
) -> Option<([u8; 16], [u8; ENC_MIGRATION_BYTES])> {
|
||||
// Look up the bucket keys and form the attributes (Scalars)
|
||||
let fromkey = bridgetable.keys.get(from_id as usize)?;
|
||||
let tokey = bridgetable.keys.get(to_id as usize)?;
|
||||
Some(encrypt_cred(
|
||||
id,
|
||||
&bridge_table::to_scalar(from_id, fromkey),
|
||||
&bridge_table::to_scalar(to_id, tokey),
|
||||
migration_type,
|
||||
Pktable,
|
||||
migration_priv,
|
||||
migrationkey_priv,
|
||||
))
|
||||
}
|
||||
|
||||
impl MigrationTable {
|
||||
/// Create a MigrationTable of the given MigrationType
|
||||
pub fn new(table_type: MigrationType) -> Self {
|
||||
Self {
|
||||
table: Default::default(),
|
||||
migration_type: table_type.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// For each entry in the MigrationTable, use encrypt_cred_ids to
|
||||
/// produce an entry in an output HashMap (from labels to encrypted
|
||||
/// Migration credentials).
|
||||
pub fn encrypt_table(
|
||||
&self,
|
||||
id: &Scalar,
|
||||
bridgetable: &bridge_table::BridgeTable,
|
||||
Pktable: &RistrettoBasepointTable,
|
||||
migration_priv: &IssuerPrivKey,
|
||||
migrationkey_priv: &IssuerPrivKey,
|
||||
) -> HashMap<[u8; 16], [u8; ENC_MIGRATION_BYTES]> {
|
||||
self.table
|
||||
.iter()
|
||||
.filter_map(|(from_id, to_id)| {
|
||||
encrypt_cred_ids(
|
||||
id,
|
||||
*from_id,
|
||||
*to_id,
|
||||
&self.migration_type,
|
||||
bridgetable,
|
||||
Pktable,
|
||||
migration_priv,
|
||||
migrationkey_priv,
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Decrypt an encrypted Migration credential given Qk, the known
|
||||
/// attributes id and from_bucket for the Migration credential as well
|
||||
/// as the known migration type, and a HashMap mapping labels to
|
||||
/// ciphertexts.
|
||||
pub fn decrypt_cred(
|
||||
Qk: &RistrettoPoint,
|
||||
lox_id: &Scalar,
|
||||
from_bucket: &Scalar,
|
||||
migration_type: MigrationType,
|
||||
enc_migration_table: &HashMap<[u8; 16], [u8; ENC_MIGRATION_BYTES]>,
|
||||
) -> Option<Migration> {
|
||||
// Compute the hash of (id, from_bucket, Qk)
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(lox_id.as_bytes());
|
||||
hasher.update(from_bucket.as_bytes());
|
||||
hasher.update(Qk.compress().as_bytes());
|
||||
let fullhash = hasher.finalize();
|
||||
|
||||
// Use the first half of the above hash as the label
|
||||
let mut label: [u8; 16] = [0; 16];
|
||||
label[..].copy_from_slice(&fullhash[..16]);
|
||||
|
||||
// Look up the label in the HashMap
|
||||
let ciphertext = enc_migration_table.get(&label)?;
|
||||
|
||||
// Create the decryption key from the 2nd half of the hash
|
||||
let aeskey = GenericArray::from_slice(&fullhash[16..]);
|
||||
|
||||
// Decrypt
|
||||
let nonce = GenericArray::from_slice(&ciphertext[..12]);
|
||||
let cipher = Aes128Gcm::new(aeskey);
|
||||
let plaintext: Vec<u8> = match cipher.decrypt(nonce, ciphertext[12..].as_ref()) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return None,
|
||||
};
|
||||
let plaintextbytes = plaintext.as_slice();
|
||||
let mut to_bucket_bytes: [u8; 32] = [0; 32];
|
||||
to_bucket_bytes.copy_from_slice(&plaintextbytes[..32]);
|
||||
let to_bucket = Scalar::from_bytes_mod_order(to_bucket_bytes);
|
||||
let P = CompressedRistretto::from_slice(&plaintextbytes[32..64]).decompress()?;
|
||||
let Q = CompressedRistretto::from_slice(&plaintextbytes[64..]).decompress()?;
|
||||
|
||||
Some(Migration {
|
||||
P,
|
||||
Q,
|
||||
lox_id: *lox_id,
|
||||
from_bucket: *from_bucket,
|
||||
to_bucket,
|
||||
migration_type: migration_type.into(),
|
||||
})
|
||||
}
|
|
@ -0,0 +1,686 @@
|
|||
/*! A module for the protocol for the user of trust level 3 or higher to
|
||||
migrate from one bucket to another because their current bucket has been
|
||||
blocked. Their trust level will go down by 2.
|
||||
|
||||
The user presents their current Lox credential:
|
||||
|
||||
- id: revealed
|
||||
- bucket: blinded
|
||||
- trust_level: revealed to be 3 or higher
|
||||
- level_since: blinded
|
||||
- invites_remaining: blinded
|
||||
- blockages: blinded
|
||||
|
||||
and a Migration credential:
|
||||
|
||||
- id: revealed as the same as the Lox credential id above
|
||||
- from_bucket: blinded, but proved in ZK that it's the same as the
|
||||
bucket in the Lox credential above
|
||||
- to_bucket: blinded
|
||||
|
||||
and a new Lox credential to be issued:
|
||||
|
||||
- id: jointly chosen by the user and BA
|
||||
- bucket: blinded, but proved in ZK that it's the same as the to_bucket
|
||||
in the Migration credential above
|
||||
- trust_level: revealed to be 2 less than the trust_level above
|
||||
- level_since: today
|
||||
- invites_remaining: revealed to be LEVEL_INVITATIONS for the new trust
|
||||
level [Actually, there's a bug in the zkp crate that's triggered when
|
||||
a public value is 0 (the identity element of the Ristretto group), so
|
||||
we treat this field as blinded, but the _server_ encrypts the value.]
|
||||
- blockages: blinded, but proved in ZK that it's one more than the
|
||||
blockages above
|
||||
|
||||
*/
|
||||
|
||||
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||
use curve25519_dalek::ristretto::RistrettoPoint;
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
use curve25519_dalek::traits::IsIdentity;
|
||||
|
||||
use zkp::CompactProof;
|
||||
use zkp::ProofError;
|
||||
use zkp::Transcript;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::super::cred;
|
||||
use super::super::dup_filter::SeenType;
|
||||
use super::super::migration_table::MigrationType;
|
||||
use super::super::scalar_u32;
|
||||
use super::super::{BridgeAuth, IssuerPubKey};
|
||||
use super::super::{CMZ_A, CMZ_A_TABLE, CMZ_B, CMZ_B_TABLE};
|
||||
use super::check_blockage::MIN_TRUST_LEVEL;
|
||||
use super::level_up::LEVEL_INVITATIONS;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Request {
|
||||
// Fields for blind showing the Lox credential
|
||||
P_lox: RistrettoPoint,
|
||||
id: Scalar,
|
||||
CBucket: RistrettoPoint,
|
||||
trust_level: Scalar,
|
||||
CSince: RistrettoPoint,
|
||||
CInvRemain: RistrettoPoint,
|
||||
CBlockages: RistrettoPoint,
|
||||
CQ_lox: RistrettoPoint,
|
||||
|
||||
// Fields for blind showing the Migration credential
|
||||
P_mig: RistrettoPoint,
|
||||
CFromBucket: RistrettoPoint,
|
||||
CToBucket: RistrettoPoint,
|
||||
CQ_mig: RistrettoPoint,
|
||||
|
||||
// Fields for user blinding of the Lox credential to be issued
|
||||
D: RistrettoPoint,
|
||||
EncIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
EncBlockages: (RistrettoPoint, RistrettoPoint),
|
||||
|
||||
// The combined ZKP
|
||||
piUser: CompactProof,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct State {
|
||||
d: Scalar,
|
||||
D: RistrettoPoint,
|
||||
EncIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
EncBlockages: (RistrettoPoint, RistrettoPoint),
|
||||
id_client: Scalar,
|
||||
to_bucket: Scalar,
|
||||
trust_level: Scalar,
|
||||
blockages: Scalar,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Response {
|
||||
// The new attributes; the trust_level and invites_remaining are
|
||||
// implicit
|
||||
level_since: Scalar,
|
||||
|
||||
// The fields for the new Lox credential
|
||||
P: RistrettoPoint,
|
||||
EncQ: (RistrettoPoint, RistrettoPoint),
|
||||
EncInvRemain: (RistrettoPoint, RistrettoPoint),
|
||||
id_server: Scalar,
|
||||
TId: RistrettoPoint,
|
||||
TBucket: RistrettoPoint,
|
||||
TInvRemain: RistrettoPoint,
|
||||
TBlockages: RistrettoPoint,
|
||||
|
||||
// The ZKP
|
||||
piBlindIssue: CompactProof,
|
||||
}
|
||||
|
||||
define_proof! {
|
||||
requestproof,
|
||||
"Blockage Migration Request",
|
||||
(bucket, since, invremain, blockages, zbucket, zsince, zinvremain,
|
||||
zblockages, negzQ_lox,
|
||||
tobucket, zfrombucket, ztobucket, negzQ_mig,
|
||||
d, eid_client, ebucket, eblockages, id_client),
|
||||
(P_lox, CBucket, CSince, CInvRemain, CBlockages, V_lox, Xbucket,
|
||||
Xsince, Xinvremain, Xblockages,
|
||||
P_mig, CFromBucket, CToBucket, V_mig, Xfrombucket, Xtobucket,
|
||||
D, EncIdClient0, EncIdClient1, EncBucket0, EncBucket1,
|
||||
EncBlockages0, EncBlockages1_minus_B),
|
||||
(A, B):
|
||||
// Blind showing of the Lox credential
|
||||
CBucket = (bucket*P_lox + zbucket*A),
|
||||
CSince = (since*P_lox + zsince*A),
|
||||
CInvRemain = (invremain*P_lox + zinvremain*A),
|
||||
CBlockages = (blockages*P_lox + zblockages*A),
|
||||
V_lox = (zbucket*Xbucket + zsince*Xsince + zinvremain*Xinvremain
|
||||
+ zblockages*Xblockages + negzQ_lox*A),
|
||||
// Blind showing of the Migration credential; note the use of the
|
||||
// same "bucket" secret variable
|
||||
CFromBucket = (bucket*P_mig + zfrombucket*A),
|
||||
CToBucket = (tobucket*P_mig + ztobucket*A),
|
||||
V_mig = (zfrombucket*Xfrombucket + ztobucket*Xtobucket + negzQ_mig*A),
|
||||
// User blinding of the Lox credential to be issued; note the use of
|
||||
// the same "tobucket" secret variable
|
||||
D = (d*B),
|
||||
EncIdClient0 = (eid_client*B),
|
||||
EncIdClient1 = (id_client*B + eid_client*D),
|
||||
EncBucket0 = (ebucket*B),
|
||||
EncBucket1 = (tobucket*B + ebucket*D),
|
||||
EncBlockages0 = (eblockages*B),
|
||||
EncBlockages1_minus_B = (blockages*B + eblockages*D)
|
||||
}
|
||||
|
||||
define_proof! {
|
||||
blindissue,
|
||||
"Blockage Migration Blind Issuing",
|
||||
(x0, x0tilde, xid, xbucket, xlevel, xsince, xinvremain, xblockages,
|
||||
s, b, tid, tbucket, tinvremain, tblockages),
|
||||
(P, EncQ0, EncQ1, X0, Xid, Xbucket, Xlevel, Xsince, Xinvremain,
|
||||
Xblockages, Plevel, Psince, TId, TBucket, TInvRemain, TBlockages,
|
||||
D, EncId0, EncId1, EncBucket0, EncBucket1, EncInvRemain0,
|
||||
EncInvRemain1, EncBlockages0, EncBlockages1),
|
||||
(A, B):
|
||||
Xid = (xid*A),
|
||||
Xlevel = (xlevel*A),
|
||||
Xbucket = (xbucket*A),
|
||||
Xsince = (xsince*A),
|
||||
Xinvremain = (xinvremain*A),
|
||||
Xblockages = (xblockages*A),
|
||||
X0 = (x0*B + x0tilde*A),
|
||||
P = (b*B),
|
||||
TId = (b*Xid),
|
||||
TId = (tid*A),
|
||||
TBucket = (b*Xbucket),
|
||||
TBucket = (tbucket*A),
|
||||
TInvRemain = (b*Xinvremain),
|
||||
TInvRemain = (tinvremain*A),
|
||||
TBlockages = (b*Xblockages),
|
||||
TBlockages = (tblockages*A),
|
||||
EncQ0 = (s*B + tid*EncId0 + tbucket*EncBucket0
|
||||
+ tinvremain*EncInvRemain0 + tblockages*EncBlockages0),
|
||||
EncQ1 = (s*D + tid*EncId1 + tbucket*EncBucket1
|
||||
+ tinvremain*EncInvRemain1 + tblockages*EncBlockages1
|
||||
+ x0*P + xlevel*Plevel + xsince*Psince)
|
||||
}
|
||||
|
||||
pub fn request(
|
||||
lox_cred: &cred::Lox,
|
||||
migration_cred: &cred::Migration,
|
||||
lox_pub: &IssuerPubKey,
|
||||
migration_pub: &IssuerPubKey,
|
||||
) -> Result<(Request, State), ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
// Ensure that the credenials can be correctly shown; that is, the
|
||||
// ids match and the Lox credential bucket matches the Migration
|
||||
// credential from_bucket
|
||||
if lox_cred.id != migration_cred.lox_id || lox_cred.bucket != migration_cred.from_bucket {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// The trust level must be at least MIN_TRUST_LEVEL
|
||||
let level: u32 = match scalar_u32(&lox_cred.trust_level) {
|
||||
Some(v) => v,
|
||||
None => return Err(ProofError::VerificationFailure),
|
||||
};
|
||||
if level < MIN_TRUST_LEVEL {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Blind showing the Lox credential
|
||||
|
||||
// Reblind P and Q
|
||||
let mut rng = rand::thread_rng();
|
||||
let t_lox = Scalar::random(&mut rng);
|
||||
let P_lox = t_lox * lox_cred.P;
|
||||
let Q_lox = t_lox * lox_cred.Q;
|
||||
|
||||
// Form Pedersen commitments to the blinded attributes
|
||||
let zbucket = Scalar::random(&mut rng);
|
||||
let zsince = Scalar::random(&mut rng);
|
||||
let zinvremain = Scalar::random(&mut rng);
|
||||
let zblockages = Scalar::random(&mut rng);
|
||||
let CBucket = lox_cred.bucket * P_lox + &zbucket * Atable;
|
||||
let CSince = lox_cred.level_since * P_lox + &zsince * Atable;
|
||||
let CInvRemain = lox_cred.invites_remaining * P_lox + &zinvremain * Atable;
|
||||
let CBlockages = lox_cred.blockages * P_lox + &zblockages * Atable;
|
||||
|
||||
// Form a Pedersen commitment to the MAC Q
|
||||
// We flip the sign of zQ from that of the Hyphae paper so that
|
||||
// the ZKP has a "+" instead of a "-", as that's what the zkp
|
||||
// macro supports.
|
||||
let negzQ_lox = Scalar::random(&mut rng);
|
||||
let CQ_lox = Q_lox - &negzQ_lox * Atable;
|
||||
|
||||
// Compute the "error factor"
|
||||
let V_lox = zbucket * lox_pub.X[2]
|
||||
+ zsince * lox_pub.X[4]
|
||||
+ zinvremain * lox_pub.X[5]
|
||||
+ zblockages * lox_pub.X[6]
|
||||
+ &negzQ_lox * Atable;
|
||||
|
||||
// Blind showing the Migration credential
|
||||
|
||||
// Reblind P and Q
|
||||
let t_mig = Scalar::random(&mut rng);
|
||||
let P_mig = t_mig * migration_cred.P;
|
||||
let Q_mig = t_mig * migration_cred.Q;
|
||||
|
||||
// Form Pedersen commitments to the blinded attributes
|
||||
let zfrombucket = Scalar::random(&mut rng);
|
||||
let ztobucket = Scalar::random(&mut rng);
|
||||
let CFromBucket = migration_cred.from_bucket * P_mig + &zfrombucket * Atable;
|
||||
let CToBucket = migration_cred.to_bucket * P_mig + &ztobucket * Atable;
|
||||
|
||||
// Form a Pedersen commitment to the MAC Q
|
||||
// We flip the sign of zQ from that of the Hyphae paper so that
|
||||
// the ZKP has a "+" instead of a "-", as that's what the zkp
|
||||
// macro supports.
|
||||
let negzQ_mig = Scalar::random(&mut rng);
|
||||
let CQ_mig = Q_mig - &negzQ_mig * Atable;
|
||||
|
||||
// Compute the "error factor"
|
||||
let V_mig =
|
||||
zfrombucket * migration_pub.X[2] + ztobucket * migration_pub.X[3] + &negzQ_mig * Atable;
|
||||
|
||||
// User blinding for the Lox certificate to be issued
|
||||
|
||||
// Pick an ElGamal keypair
|
||||
let d = Scalar::random(&mut rng);
|
||||
let D = &d * Btable;
|
||||
|
||||
// Pick a random client component of the id
|
||||
let id_client = Scalar::random(&mut rng);
|
||||
|
||||
// Encrypt it (times the basepoint B) to the ElGamal public key D we
|
||||
// just created
|
||||
let eid_client = Scalar::random(&mut rng);
|
||||
let EncIdClient = (&eid_client * Btable, &id_client * Btable + eid_client * D);
|
||||
|
||||
// Encrypt the other blinded attributes (times B) to D as well
|
||||
let ebucket = Scalar::random(&mut rng);
|
||||
let EncBucket = (
|
||||
&ebucket * Btable,
|
||||
&migration_cred.to_bucket * Btable + ebucket * D,
|
||||
);
|
||||
let eblockages = Scalar::random(&mut rng);
|
||||
let new_blockages = lox_cred.blockages + Scalar::one();
|
||||
let EncBlockages = (
|
||||
&eblockages * Btable,
|
||||
&new_blockages * Btable + eblockages * D,
|
||||
);
|
||||
|
||||
// Construct the proof
|
||||
let mut transcript = Transcript::new(b"blockage migration request");
|
||||
let piUser = requestproof::prove_compact(
|
||||
&mut transcript,
|
||||
requestproof::ProveAssignments {
|
||||
A,
|
||||
B,
|
||||
P_lox: &P_lox,
|
||||
CBucket: &CBucket,
|
||||
CSince: &CSince,
|
||||
CInvRemain: &CInvRemain,
|
||||
CBlockages: &CBlockages,
|
||||
V_lox: &V_lox,
|
||||
Xbucket: &lox_pub.X[2],
|
||||
Xsince: &lox_pub.X[4],
|
||||
Xinvremain: &lox_pub.X[5],
|
||||
Xblockages: &lox_pub.X[6],
|
||||
P_mig: &P_mig,
|
||||
CFromBucket: &CFromBucket,
|
||||
CToBucket: &CToBucket,
|
||||
V_mig: &V_mig,
|
||||
Xfrombucket: &migration_pub.X[2],
|
||||
Xtobucket: &migration_pub.X[3],
|
||||
D: &D,
|
||||
EncIdClient0: &EncIdClient.0,
|
||||
EncIdClient1: &EncIdClient.1,
|
||||
EncBucket0: &EncBucket.0,
|
||||
EncBucket1: &EncBucket.1,
|
||||
EncBlockages0: &EncBlockages.0,
|
||||
EncBlockages1_minus_B: &(EncBlockages.1 - B),
|
||||
bucket: &lox_cred.bucket,
|
||||
since: &lox_cred.level_since,
|
||||
invremain: &lox_cred.invites_remaining,
|
||||
blockages: &lox_cred.blockages,
|
||||
zbucket: &zbucket,
|
||||
zsince: &zsince,
|
||||
zinvremain: &zinvremain,
|
||||
zblockages: &zblockages,
|
||||
negzQ_lox: &negzQ_lox,
|
||||
tobucket: &migration_cred.to_bucket,
|
||||
zfrombucket: &zfrombucket,
|
||||
ztobucket: &ztobucket,
|
||||
negzQ_mig: &negzQ_mig,
|
||||
d: &d,
|
||||
eid_client: &eid_client,
|
||||
ebucket: &ebucket,
|
||||
eblockages: &eblockages,
|
||||
id_client: &id_client,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
|
||||
Ok((
|
||||
Request {
|
||||
P_lox,
|
||||
id: lox_cred.id,
|
||||
CBucket,
|
||||
trust_level: lox_cred.trust_level,
|
||||
CSince,
|
||||
CInvRemain,
|
||||
CBlockages,
|
||||
CQ_lox,
|
||||
P_mig,
|
||||
CFromBucket,
|
||||
CToBucket,
|
||||
CQ_mig,
|
||||
D,
|
||||
EncIdClient,
|
||||
EncBucket,
|
||||
EncBlockages,
|
||||
piUser,
|
||||
},
|
||||
State {
|
||||
d,
|
||||
D,
|
||||
EncIdClient,
|
||||
EncBucket,
|
||||
EncBlockages,
|
||||
id_client,
|
||||
to_bucket: migration_cred.to_bucket,
|
||||
trust_level: (level - 2).into(),
|
||||
blockages: new_blockages,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
impl BridgeAuth {
|
||||
/// Receive a blockage migration request
|
||||
pub fn handle_blockage_migration(&mut self, req: Request) -> Result<Response, ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
if req.P_lox.is_identity() || req.P_mig.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// The trust level must be at least MIN_TRUST_LEVEL
|
||||
let level: u32 = match scalar_u32(&req.trust_level) {
|
||||
Some(v) => v,
|
||||
None => return Err(ProofError::VerificationFailure),
|
||||
};
|
||||
if level < MIN_TRUST_LEVEL {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Recompute the "error factors" using knowledge of our own
|
||||
// (the issuer's) private key instead of knowledge of the
|
||||
// hidden attributes
|
||||
let Vprime_lox = (self.lox_priv.x[0]
|
||||
+ self.lox_priv.x[1] * req.id
|
||||
+ self.lox_priv.x[3] * req.trust_level)
|
||||
* req.P_lox
|
||||
+ self.lox_priv.x[2] * req.CBucket
|
||||
+ self.lox_priv.x[4] * req.CSince
|
||||
+ self.lox_priv.x[5] * req.CInvRemain
|
||||
+ self.lox_priv.x[6] * req.CBlockages
|
||||
- req.CQ_lox;
|
||||
|
||||
let migration_type: Scalar = MigrationType::Blockage.into();
|
||||
let Vprime_mig = (self.migration_priv.x[0]
|
||||
+ self.migration_priv.x[1] * req.id
|
||||
+ self.migration_priv.x[4] * migration_type)
|
||||
* req.P_mig
|
||||
+ self.migration_priv.x[2] * req.CFromBucket
|
||||
+ self.migration_priv.x[3] * req.CToBucket
|
||||
- req.CQ_mig;
|
||||
|
||||
// Verify the ZKP
|
||||
let mut transcript = Transcript::new(b"blockage migration request");
|
||||
requestproof::verify_compact(
|
||||
&req.piUser,
|
||||
&mut transcript,
|
||||
requestproof::VerifyAssignments {
|
||||
A: &A.compress(),
|
||||
B: &B.compress(),
|
||||
P_lox: &req.P_lox.compress(),
|
||||
CBucket: &req.CBucket.compress(),
|
||||
CSince: &req.CSince.compress(),
|
||||
CInvRemain: &req.CInvRemain.compress(),
|
||||
CBlockages: &req.CBlockages.compress(),
|
||||
V_lox: &Vprime_lox.compress(),
|
||||
Xbucket: &self.lox_pub.X[2].compress(),
|
||||
Xsince: &self.lox_pub.X[4].compress(),
|
||||
Xinvremain: &self.lox_pub.X[5].compress(),
|
||||
Xblockages: &self.lox_pub.X[6].compress(),
|
||||
P_mig: &req.P_mig.compress(),
|
||||
CFromBucket: &req.CFromBucket.compress(),
|
||||
CToBucket: &req.CToBucket.compress(),
|
||||
V_mig: &Vprime_mig.compress(),
|
||||
Xfrombucket: &self.migration_pub.X[2].compress(),
|
||||
Xtobucket: &self.migration_pub.X[3].compress(),
|
||||
D: &req.D.compress(),
|
||||
EncIdClient0: &req.EncIdClient.0.compress(),
|
||||
EncIdClient1: &req.EncIdClient.1.compress(),
|
||||
EncBucket0: &req.EncBucket.0.compress(),
|
||||
EncBucket1: &req.EncBucket.1.compress(),
|
||||
EncBlockages0: &req.EncBlockages.0.compress(),
|
||||
EncBlockages1_minus_B: &(req.EncBlockages.1 - B).compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Ensure the id has not been seen before, and add it to the
|
||||
// seen list.
|
||||
if self.id_filter.filter(&req.id) == SeenType::Seen {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Blind issuing of the new Lox credential
|
||||
|
||||
// Choose a random server id component to add to the client's
|
||||
// (blinded) id component
|
||||
let mut rng = rand::thread_rng();
|
||||
let id_server = Scalar::random(&mut rng);
|
||||
let EncId = (req.EncIdClient.0, req.EncIdClient.1 + &id_server * Btable);
|
||||
|
||||
// Create the trust_level attrubute (Scalar), which will be
|
||||
// 2 levels down from the one in the provided credential
|
||||
let trust_level: Scalar = (level - 2).into();
|
||||
|
||||
// Create the level_since attribute (Scalar), which is today's
|
||||
// Julian date
|
||||
let level_since: Scalar = self.today().into();
|
||||
|
||||
// The invites remaining is the appropriate number for the new
|
||||
// level (note that LEVEL_INVITATIONS[i] is the number of
|
||||
// invitations for moving from level i to level i+1)
|
||||
let invremain: Scalar = LEVEL_INVITATIONS[(level - 3) as usize].into();
|
||||
|
||||
// Because of the bug in the zkp crate, encrypt the invites
|
||||
// remaining instead of sending it in the clear
|
||||
let sinvremain = Scalar::random(&mut rng);
|
||||
let EncInvRemain = (
|
||||
&sinvremain * Btable,
|
||||
&invremain * Btable + sinvremain * req.D,
|
||||
);
|
||||
|
||||
// Compute the MAC on the visible attributes
|
||||
let b = Scalar::random(&mut rng);
|
||||
let P = &b * Btable;
|
||||
let QHc = (self.lox_priv.x[0]
|
||||
+ self.lox_priv.x[3] * trust_level
|
||||
+ self.lox_priv.x[4] * level_since)
|
||||
* P;
|
||||
|
||||
// El Gamal encrypt it to the public key req.D
|
||||
let s = Scalar::random(&mut rng);
|
||||
let EncQHc = (&s * Btable, QHc + s * req.D);
|
||||
|
||||
// Homomorphically compute the part of the MAC corresponding to
|
||||
// the blinded attributes
|
||||
let tid = self.lox_priv.x[1] * b;
|
||||
let TId = &tid * Atable;
|
||||
let EncQId = (tid * EncId.0, tid * EncId.1);
|
||||
let tbucket = self.lox_priv.x[2] * b;
|
||||
let TBucket = &tbucket * Atable;
|
||||
let EncQBucket = (tbucket * req.EncBucket.0, tbucket * req.EncBucket.1);
|
||||
let tinvremain = self.lox_priv.x[5] * b;
|
||||
let TInvRemain = &tinvremain * Atable;
|
||||
let EncQInvRemain = (tinvremain * EncInvRemain.0, tinvremain * EncInvRemain.1);
|
||||
let tblockages = self.lox_priv.x[6] * b;
|
||||
let TBlockages = &tblockages * Atable;
|
||||
let EncQBlockages = (
|
||||
tblockages * req.EncBlockages.0,
|
||||
tblockages * req.EncBlockages.1,
|
||||
);
|
||||
|
||||
let EncQ = (
|
||||
EncQHc.0 + EncQId.0 + EncQBucket.0 + EncQInvRemain.0 + EncQBlockages.0,
|
||||
EncQHc.1 + EncQId.1 + EncQBucket.1 + EncQInvRemain.1 + EncQBlockages.1,
|
||||
);
|
||||
|
||||
let mut transcript = Transcript::new(b"blockage migration issuing");
|
||||
let piBlindIssue = blindissue::prove_compact(
|
||||
&mut transcript,
|
||||
blindissue::ProveAssignments {
|
||||
A,
|
||||
B,
|
||||
P: &P,
|
||||
EncQ0: &EncQ.0,
|
||||
EncQ1: &EncQ.1,
|
||||
X0: &self.lox_pub.X[0],
|
||||
Xid: &self.lox_pub.X[1],
|
||||
Xbucket: &self.lox_pub.X[2],
|
||||
Xlevel: &self.lox_pub.X[3],
|
||||
Xsince: &self.lox_pub.X[4],
|
||||
Xinvremain: &self.lox_pub.X[5],
|
||||
Xblockages: &self.lox_pub.X[6],
|
||||
Plevel: &(trust_level * P),
|
||||
Psince: &(level_since * P),
|
||||
TId: &TId,
|
||||
TBucket: &TBucket,
|
||||
TInvRemain: &TInvRemain,
|
||||
TBlockages: &TBlockages,
|
||||
D: &req.D,
|
||||
EncId0: &EncId.0,
|
||||
EncId1: &EncId.1,
|
||||
EncBucket0: &req.EncBucket.0,
|
||||
EncBucket1: &req.EncBucket.1,
|
||||
EncInvRemain0: &EncInvRemain.0,
|
||||
EncInvRemain1: &EncInvRemain.1,
|
||||
EncBlockages0: &req.EncBlockages.0,
|
||||
EncBlockages1: &req.EncBlockages.1,
|
||||
x0: &self.lox_priv.x[0],
|
||||
x0tilde: &self.lox_priv.x0tilde,
|
||||
xid: &self.lox_priv.x[1],
|
||||
xbucket: &self.lox_priv.x[2],
|
||||
xlevel: &self.lox_priv.x[3],
|
||||
xsince: &self.lox_priv.x[4],
|
||||
xinvremain: &self.lox_priv.x[5],
|
||||
xblockages: &self.lox_priv.x[6],
|
||||
s: &s,
|
||||
b: &b,
|
||||
tid: &tid,
|
||||
tbucket: &tbucket,
|
||||
tinvremain: &tinvremain,
|
||||
tblockages: &tblockages,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
|
||||
Ok(Response {
|
||||
level_since,
|
||||
P,
|
||||
EncQ,
|
||||
EncInvRemain,
|
||||
id_server,
|
||||
TId,
|
||||
TBucket,
|
||||
TInvRemain,
|
||||
TBlockages,
|
||||
piBlindIssue,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle the response to the request, producing the new Lox credential
|
||||
/// if successful.
|
||||
pub fn handle_response(
|
||||
state: State,
|
||||
resp: Response,
|
||||
lox_pub: &IssuerPubKey,
|
||||
) -> Result<cred::Lox, ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
if resp.P.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Add the server's contribution to the id to our own, both in plain
|
||||
// and encrypted form
|
||||
let id = state.id_client + resp.id_server;
|
||||
let EncId = (
|
||||
state.EncIdClient.0,
|
||||
state.EncIdClient.1 + &resp.id_server * Btable,
|
||||
);
|
||||
|
||||
let new_level: u32 = match scalar_u32(&state.trust_level) {
|
||||
Some(v) => v,
|
||||
None => return Err(ProofError::VerificationFailure),
|
||||
};
|
||||
if new_level < 1 {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// The invites remaining is the appropriate number for the new level
|
||||
// (note that LEVEL_INVITATIONS[i] is the number of invitations for
|
||||
// moving from level i to level i+1)
|
||||
let invremain: Scalar = LEVEL_INVITATIONS[(new_level - 1) as usize].into();
|
||||
|
||||
// Decrypt EncInvRemain
|
||||
let recv_invremain = resp.EncInvRemain.1 - (state.d * resp.EncInvRemain.0);
|
||||
|
||||
if recv_invremain != &invremain * Btable {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Verify the proof
|
||||
let mut transcript = Transcript::new(b"blockage migration issuing");
|
||||
blindissue::verify_compact(
|
||||
&resp.piBlindIssue,
|
||||
&mut transcript,
|
||||
blindissue::VerifyAssignments {
|
||||
A: &A.compress(),
|
||||
B: &B.compress(),
|
||||
P: &resp.P.compress(),
|
||||
EncQ0: &resp.EncQ.0.compress(),
|
||||
EncQ1: &resp.EncQ.1.compress(),
|
||||
X0: &lox_pub.X[0].compress(),
|
||||
Xid: &lox_pub.X[1].compress(),
|
||||
Xbucket: &lox_pub.X[2].compress(),
|
||||
Xlevel: &lox_pub.X[3].compress(),
|
||||
Xsince: &lox_pub.X[4].compress(),
|
||||
Xinvremain: &lox_pub.X[5].compress(),
|
||||
Xblockages: &lox_pub.X[6].compress(),
|
||||
Plevel: &(state.trust_level * resp.P).compress(),
|
||||
Psince: &(resp.level_since * resp.P).compress(),
|
||||
TId: &resp.TId.compress(),
|
||||
TBucket: &resp.TBucket.compress(),
|
||||
TInvRemain: &resp.TInvRemain.compress(),
|
||||
TBlockages: &resp.TBlockages.compress(),
|
||||
D: &state.D.compress(),
|
||||
EncId0: &EncId.0.compress(),
|
||||
EncId1: &EncId.1.compress(),
|
||||
EncBucket0: &state.EncBucket.0.compress(),
|
||||
EncBucket1: &state.EncBucket.1.compress(),
|
||||
EncInvRemain0: &resp.EncInvRemain.0.compress(),
|
||||
EncInvRemain1: &resp.EncInvRemain.1.compress(),
|
||||
EncBlockages0: &state.EncBlockages.0.compress(),
|
||||
EncBlockages1: &state.EncBlockages.1.compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Decrypt EncQ
|
||||
let Q = resp.EncQ.1 - (state.d * resp.EncQ.0);
|
||||
|
||||
Ok(cred::Lox {
|
||||
P: resp.P,
|
||||
Q,
|
||||
id,
|
||||
bucket: state.to_bucket,
|
||||
trust_level: new_level.into(),
|
||||
level_since: resp.level_since,
|
||||
invites_remaining: invremain,
|
||||
blockages: state.blockages,
|
||||
})
|
||||
}
|
|
@ -0,0 +1,361 @@
|
|||
/*! A module for the protocol for the user to check for the availability
|
||||
of a migration credential they can use in order to move to a new bucket
|
||||
if theirs has been blocked.
|
||||
|
||||
The user presents their current Lox credential:
|
||||
- id: revealed
|
||||
- bucket: blinded
|
||||
- trust_level: revealed to be 3 or above
|
||||
- level_since: blinded
|
||||
- invites_remaining: blinded
|
||||
- blockages: blinded
|
||||
|
||||
They are allowed to to this as long as they are level 3 or above. If
|
||||
they have too many blockages (but are level 3 or above), they will be
|
||||
allowed to perform this migration, but will not be able to advance to
|
||||
level 3 in their new bucket, so this will be their last allowed
|
||||
migration without rejoining the system either with a new invitation or
|
||||
an open invitation.
|
||||
|
||||
They will receive in return the encrypted MAC (Pk, EncQk) for their
|
||||
implicit Migration Key credential with attributes id and bucket,
|
||||
along with a HashMap of encrypted Migration credentials. For each
|
||||
(from_i, to_i) in the BA's migration list, there will be an entry in
|
||||
the HashMap with key H1(id, from_attr_i, Qk_i) and value
|
||||
Enc_{H2(id, from_attr_i, Qk_i)}(to_attr_i, P_i, Q_i). Here H1 and H2
|
||||
are the first 16 bytes and the second 16 bytes respectively of the
|
||||
SHA256 hash of the input, P_i and Q_i are a MAC on the Migration
|
||||
credential with attributes id, from_attr_i, and to_attr_i. Qk_i is the
|
||||
value EncQk would decrypt to if bucket were equal to from_attr_i. */
|
||||
|
||||
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||
use curve25519_dalek::ristretto::RistrettoPoint;
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
use curve25519_dalek::traits::IsIdentity;
|
||||
|
||||
use zkp::CompactProof;
|
||||
use zkp::ProofError;
|
||||
use zkp::Transcript;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::serde_as;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use super::super::cred;
|
||||
use super::super::dup_filter::SeenType;
|
||||
use super::super::migration_table;
|
||||
use super::super::scalar_u32;
|
||||
use super::super::{BridgeAuth, IssuerPubKey};
|
||||
use super::super::{CMZ_A, CMZ_A_TABLE, CMZ_B, CMZ_B_TABLE};
|
||||
|
||||
/// The minimum trust level a Lox credential must have to be allowed to
|
||||
/// perform this protocol.
|
||||
pub const MIN_TRUST_LEVEL: u32 = 3;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Request {
|
||||
// Fields for blind showing the Lox credential
|
||||
P: RistrettoPoint,
|
||||
id: Scalar,
|
||||
CBucket: RistrettoPoint,
|
||||
level: Scalar,
|
||||
CSince: RistrettoPoint,
|
||||
CInvRemain: RistrettoPoint,
|
||||
CBlockages: RistrettoPoint,
|
||||
CQ: RistrettoPoint,
|
||||
|
||||
// Fields for user blinding of the Migration Key credential
|
||||
D: RistrettoPoint,
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
|
||||
// The combined ZKP
|
||||
piUser: CompactProof,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct State {
|
||||
d: Scalar,
|
||||
D: RistrettoPoint,
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
id: Scalar,
|
||||
bucket: Scalar,
|
||||
}
|
||||
|
||||
#[serde_as]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Response {
|
||||
// The encrypted MAC for the Migration Key credential
|
||||
Pk: RistrettoPoint,
|
||||
EncQk: (RistrettoPoint, RistrettoPoint),
|
||||
|
||||
// A table of encrypted Migration credentials; the encryption keys
|
||||
// are formed from the possible values of Qk (the decrypted form of
|
||||
// EncQk)
|
||||
#[serde_as(as = "Vec<(_,[_; migration_table::ENC_MIGRATION_BYTES])>")]
|
||||
enc_migration_table: HashMap<[u8; 16], [u8; migration_table::ENC_MIGRATION_BYTES]>,
|
||||
}
|
||||
|
||||
define_proof! {
|
||||
requestproof,
|
||||
"Check Blockage Request",
|
||||
(bucket, since, invremain, blockages, zbucket, zsince, zinvremain,
|
||||
zblockages, negzQ,
|
||||
d, ebucket),
|
||||
(P, CBucket, CSince, CInvRemain, CBlockages, V, Xbucket, Xsince,
|
||||
Xinvremain, Xblockages,
|
||||
D, EncBucket0, EncBucket1),
|
||||
(A, B):
|
||||
// Blind showing of the Lox credential
|
||||
CBucket = (bucket*P + zbucket*A),
|
||||
CSince = (since*P + zsince*A),
|
||||
CInvRemain = (invremain*P + zinvremain*A),
|
||||
CBlockages = (blockages*P + zblockages*A),
|
||||
V = (zbucket*Xbucket + zsince*Xsince + zinvremain*Xinvremain
|
||||
+ zblockages*Xblockages + negzQ*A),
|
||||
// User blinding of the Migration Key credential
|
||||
D = (d*B),
|
||||
EncBucket0 = (ebucket*B),
|
||||
EncBucket1 = (bucket*B + ebucket*D)
|
||||
}
|
||||
|
||||
pub fn request(
|
||||
lox_cred: &cred::Lox,
|
||||
lox_pub: &IssuerPubKey,
|
||||
) -> Result<(Request, State), ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
// Ensure the credential can be correctly shown: it must be the case
|
||||
// that trust_level >= MIN_TRUST_LEVEL
|
||||
let level: u32 = match scalar_u32(&lox_cred.trust_level) {
|
||||
Some(v) => v,
|
||||
None => return Err(ProofError::VerificationFailure),
|
||||
};
|
||||
if level < MIN_TRUST_LEVEL {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Blind showing the Lox credential
|
||||
|
||||
// Reblind P and Q
|
||||
let mut rng = rand::thread_rng();
|
||||
let t = Scalar::random(&mut rng);
|
||||
let P = t * lox_cred.P;
|
||||
let Q = t * lox_cred.Q;
|
||||
|
||||
// Form Pedersen commitments to the blinded attributes
|
||||
let zbucket = Scalar::random(&mut rng);
|
||||
let zsince = Scalar::random(&mut rng);
|
||||
let zinvremain = Scalar::random(&mut rng);
|
||||
let zblockages = Scalar::random(&mut rng);
|
||||
let CBucket = lox_cred.bucket * P + &zbucket * Atable;
|
||||
let CSince = lox_cred.level_since * P + &zsince * Atable;
|
||||
let CInvRemain = lox_cred.invites_remaining * P + &zinvremain * Atable;
|
||||
let CBlockages = lox_cred.blockages * P + &zblockages * Atable;
|
||||
|
||||
// Form a Pedersen commitment to the MAC Q
|
||||
// We flip the sign of zQ from that of the Hyphae paper so that
|
||||
// the ZKP has a "+" instead of a "-", as that's what the zkp
|
||||
// macro supports.
|
||||
let negzQ = Scalar::random(&mut rng);
|
||||
let CQ = Q - &negzQ * Atable;
|
||||
|
||||
// Compute the "error factor"
|
||||
let V = zbucket * lox_pub.X[2]
|
||||
+ zsince * lox_pub.X[4]
|
||||
+ zinvremain * lox_pub.X[5]
|
||||
+ zblockages * lox_pub.X[6]
|
||||
+ &negzQ * Atable;
|
||||
|
||||
// User blinding the Migration Key credential
|
||||
|
||||
// Pick an ElGamal keypair
|
||||
let d = Scalar::random(&mut rng);
|
||||
let D = &d * Btable;
|
||||
|
||||
// Encrypt the attributes to be blinded (each times the
|
||||
// basepoint B) to the public key we just created
|
||||
let ebucket = Scalar::random(&mut rng);
|
||||
let EncBucket = (&ebucket * Btable, &lox_cred.bucket * Btable + ebucket * D);
|
||||
|
||||
// Construct the proof
|
||||
let mut transcript = Transcript::new(b"check blockage request");
|
||||
let piUser = requestproof::prove_compact(
|
||||
&mut transcript,
|
||||
requestproof::ProveAssignments {
|
||||
A,
|
||||
B,
|
||||
P: &P,
|
||||
CBucket: &CBucket,
|
||||
CSince: &CSince,
|
||||
CInvRemain: &CInvRemain,
|
||||
CBlockages: &CBlockages,
|
||||
V: &V,
|
||||
Xbucket: &lox_pub.X[2],
|
||||
Xsince: &lox_pub.X[4],
|
||||
Xinvremain: &lox_pub.X[5],
|
||||
Xblockages: &lox_pub.X[6],
|
||||
D: &D,
|
||||
EncBucket0: &EncBucket.0,
|
||||
EncBucket1: &EncBucket.1,
|
||||
bucket: &lox_cred.bucket,
|
||||
since: &lox_cred.level_since,
|
||||
invremain: &lox_cred.invites_remaining,
|
||||
blockages: &lox_cred.blockages,
|
||||
zbucket: &zbucket,
|
||||
zsince: &zsince,
|
||||
zinvremain: &zinvremain,
|
||||
zblockages: &zblockages,
|
||||
negzQ: &negzQ,
|
||||
d: &d,
|
||||
ebucket: &ebucket,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
|
||||
Ok((
|
||||
Request {
|
||||
P,
|
||||
id: lox_cred.id,
|
||||
CBucket,
|
||||
level: lox_cred.trust_level,
|
||||
CSince,
|
||||
CInvRemain,
|
||||
CBlockages,
|
||||
CQ,
|
||||
D,
|
||||
EncBucket,
|
||||
piUser,
|
||||
},
|
||||
State {
|
||||
d,
|
||||
D,
|
||||
EncBucket,
|
||||
id: lox_cred.id,
|
||||
bucket: lox_cred.bucket,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
impl BridgeAuth {
|
||||
/// Receive a check blockage request
|
||||
pub fn handle_check_blockage(&mut self, req: Request) -> Result<Response, ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
let level: u32 = match scalar_u32(&req.level) {
|
||||
Some(v) => v,
|
||||
None => return Err(ProofError::VerificationFailure),
|
||||
};
|
||||
|
||||
if req.P.is_identity() || level < MIN_TRUST_LEVEL {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Recompute the "error factor" using knowledge of our own
|
||||
// (the issuer's) private key instead of knowledge of the
|
||||
// hidden attributes
|
||||
let Vprime =
|
||||
(self.lox_priv.x[0] + self.lox_priv.x[1] * req.id + self.lox_priv.x[3] * req.level)
|
||||
* req.P
|
||||
+ self.lox_priv.x[2] * req.CBucket
|
||||
+ self.lox_priv.x[4] * req.CSince
|
||||
+ self.lox_priv.x[5] * req.CInvRemain
|
||||
+ self.lox_priv.x[6] * req.CBlockages
|
||||
- req.CQ;
|
||||
|
||||
// Verify the ZKP
|
||||
let mut transcript = Transcript::new(b"check blockage request");
|
||||
requestproof::verify_compact(
|
||||
&req.piUser,
|
||||
&mut transcript,
|
||||
requestproof::VerifyAssignments {
|
||||
A: &A.compress(),
|
||||
B: &B.compress(),
|
||||
P: &req.P.compress(),
|
||||
CBucket: &req.CBucket.compress(),
|
||||
CSince: &req.CSince.compress(),
|
||||
CInvRemain: &req.CInvRemain.compress(),
|
||||
CBlockages: &req.CBlockages.compress(),
|
||||
V: &Vprime.compress(),
|
||||
Xbucket: &self.lox_pub.X[2].compress(),
|
||||
Xsince: &self.lox_pub.X[4].compress(),
|
||||
Xinvremain: &self.lox_pub.X[5].compress(),
|
||||
Xblockages: &self.lox_pub.X[6].compress(),
|
||||
D: &req.D.compress(),
|
||||
EncBucket0: &req.EncBucket.0.compress(),
|
||||
EncBucket1: &req.EncBucket.1.compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Ensure the id has not been seen before in the general id
|
||||
// filter, but do not add it, so that the user can potentially
|
||||
// run this protocol multiple times.
|
||||
if self.id_filter.check(&req.id) == SeenType::Seen {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Compute the encrypted MAC (Pk, EncQk) for the Migration Key
|
||||
// credential.
|
||||
|
||||
// Compute the MAC on the visible attributes
|
||||
let mut rng = rand::thread_rng();
|
||||
let b = Scalar::random(&mut rng);
|
||||
let Pk = &b * Btable;
|
||||
let Pktable = RistrettoBasepointTable::create(&Pk);
|
||||
let Qid = &(self.migrationkey_priv.x[0] + self.migrationkey_priv.x[1] * req.id) * &Pktable;
|
||||
|
||||
// El Gamal encrypt it to the public key req.D
|
||||
let s = Scalar::random(&mut rng);
|
||||
let EncQkid = (&s * Btable, Qid + s * req.D);
|
||||
|
||||
// Homomorphically compute the part of the MAC corresponding to
|
||||
// the blinded attributes
|
||||
let tbucket = self.migrationkey_priv.x[2] * b;
|
||||
let EncQkBucket = (tbucket * req.EncBucket.0, tbucket * req.EncBucket.1);
|
||||
|
||||
let EncQk = (EncQkid.0 + EncQkBucket.0, EncQkid.1 + EncQkBucket.1);
|
||||
|
||||
Ok(Response {
|
||||
Pk,
|
||||
EncQk,
|
||||
enc_migration_table: self.blockage_migration_table.encrypt_table(
|
||||
&req.id,
|
||||
&self.bridge_table,
|
||||
&Pktable,
|
||||
&self.migration_priv,
|
||||
&self.migrationkey_priv,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle the response to the request, producing a Migration credential
|
||||
/// if successful.
|
||||
///
|
||||
/// The Migration credential can then be used in the migration protocol
|
||||
/// to actually change buckets
|
||||
pub fn handle_response(state: State, resp: Response) -> Result<cred::Migration, ProofError> {
|
||||
if resp.Pk.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Decrypt the MAC on the Migration Key credential
|
||||
let Qk = resp.EncQk.1 - (state.d * resp.EncQk.0);
|
||||
|
||||
// Use Qk to locate and decrypt the Migration credential
|
||||
match migration_table::decrypt_cred(
|
||||
&Qk,
|
||||
&state.id,
|
||||
&state.bucket,
|
||||
migration_table::MigrationType::Blockage,
|
||||
&resp.enc_migration_table,
|
||||
) {
|
||||
Some(m) => Ok(m),
|
||||
None => Err(ProofError::VerificationFailure),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,908 @@
|
|||
/*! A module for the protocol for a user to request the issuing of an
|
||||
Invitation credential they can pass to someone they know.
|
||||
|
||||
They are allowed to do this as long as their current Lox credentials has
|
||||
a non-zero "invites_remaining" attribute (which will be decreased by
|
||||
one), and they have a Bucket Reachability credential for their current
|
||||
bucket and today's date. (Such credentials are placed daily in the
|
||||
encrypted bridge table.)
|
||||
|
||||
The user presents their current Lox credential:
|
||||
- id: revealed
|
||||
- bucket: blinded
|
||||
- trust_level: blinded
|
||||
- level_since: blinded
|
||||
- invites_remaining: blinded, but proved in ZK that it's not zero
|
||||
- blockages: blinded
|
||||
|
||||
and a Bucket Reachability credential:
|
||||
- date: revealed to be today
|
||||
- bucket: blinded, but proved in ZK that it's the same as in the Lox
|
||||
credential above
|
||||
|
||||
and a new Lox credential to be issued:
|
||||
|
||||
- id: jointly chosen by the user and BA
|
||||
- bucket: blinded, but proved in ZK that it's the same as in the Lox
|
||||
credential above
|
||||
- trust_level: blinded, but proved in ZK that it's the same as in the
|
||||
Lox credential above
|
||||
- level_since: blinded, but proved in ZK that it's the same as in the
|
||||
Lox credential above
|
||||
- invites_remaining: blinded, but proved in ZK that it's one less than
|
||||
the number in the Lox credential above
|
||||
- blockages: blinded, but proved in ZK that it's the same as in the
|
||||
Lox credential above
|
||||
|
||||
and a new Invitation credential to be issued:
|
||||
|
||||
- inv_id: jointly chosen by the user and BA
|
||||
- date: revealed to be today
|
||||
- bucket: blinded, but proved in ZK that it's the same as in the Lox
|
||||
credential above
|
||||
- blockages: blinded, but proved in ZK that it's the same as in the Lox
|
||||
credential above
|
||||
|
||||
*/
|
||||
|
||||
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||
use curve25519_dalek::ristretto::RistrettoPoint;
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
use curve25519_dalek::traits::IsIdentity;
|
||||
|
||||
use zkp::CompactProof;
|
||||
use zkp::ProofError;
|
||||
use zkp::Transcript;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::super::cred;
|
||||
use super::super::dup_filter::SeenType;
|
||||
use super::super::scalar_u32;
|
||||
use super::super::{BridgeAuth, IssuerPubKey};
|
||||
use super::super::{CMZ_A, CMZ_A_TABLE, CMZ_B, CMZ_B_TABLE};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Request {
|
||||
// Fields for blind showing the Lox credential
|
||||
P: RistrettoPoint,
|
||||
id: Scalar,
|
||||
CBucket: RistrettoPoint,
|
||||
CLevel: RistrettoPoint,
|
||||
CSince: RistrettoPoint,
|
||||
CInvRemain: RistrettoPoint,
|
||||
CBlockages: RistrettoPoint,
|
||||
CQ: RistrettoPoint,
|
||||
|
||||
// Fields for blind showing the Bucket Reachability credential
|
||||
P_reach: RistrettoPoint,
|
||||
CBucket_reach: RistrettoPoint,
|
||||
CQ_reach: RistrettoPoint,
|
||||
|
||||
// Fields for user blinding of the Lox credential to be issued
|
||||
D: RistrettoPoint,
|
||||
EncIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
EncLevel: (RistrettoPoint, RistrettoPoint),
|
||||
EncSince: (RistrettoPoint, RistrettoPoint),
|
||||
EncInvRemain: (RistrettoPoint, RistrettoPoint),
|
||||
EncBlockages: (RistrettoPoint, RistrettoPoint),
|
||||
|
||||
// Fields for user blinding of the Inivtation credential to be
|
||||
// issued
|
||||
EncInvIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
// The bucket and blockages attributes in the Invitation credential
|
||||
// issuing protocol can just reuse the exact encryptions as for the
|
||||
// Lox credential issuing protocol above.
|
||||
|
||||
// The combined ZKP
|
||||
piUser: CompactProof,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct State {
|
||||
d: Scalar,
|
||||
D: RistrettoPoint,
|
||||
EncIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
EncLevel: (RistrettoPoint, RistrettoPoint),
|
||||
EncSince: (RistrettoPoint, RistrettoPoint),
|
||||
EncInvRemain: (RistrettoPoint, RistrettoPoint),
|
||||
EncBlockages: (RistrettoPoint, RistrettoPoint),
|
||||
EncInvIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
id_client: Scalar,
|
||||
bucket: Scalar,
|
||||
level: Scalar,
|
||||
since: Scalar,
|
||||
invremain: Scalar,
|
||||
blockages: Scalar,
|
||||
inv_id_client: Scalar,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Response {
|
||||
// The fields for the new Lox credential; the new invites_remaining
|
||||
// is one less than the old value, so we don't have to include it
|
||||
// here explicitly
|
||||
P: RistrettoPoint,
|
||||
EncQ: (RistrettoPoint, RistrettoPoint),
|
||||
id_server: Scalar,
|
||||
TId: RistrettoPoint,
|
||||
TBucket: RistrettoPoint,
|
||||
TLevel: RistrettoPoint,
|
||||
TSince: RistrettoPoint,
|
||||
TInvRemain: RistrettoPoint,
|
||||
TBlockages: RistrettoPoint,
|
||||
|
||||
// The fields for the new Invitation credential
|
||||
P_inv: RistrettoPoint,
|
||||
EncQ_inv: (RistrettoPoint, RistrettoPoint),
|
||||
inv_id_server: Scalar,
|
||||
TId_inv: RistrettoPoint,
|
||||
date_inv: Scalar,
|
||||
TBucket_inv: RistrettoPoint,
|
||||
TBlockages_inv: RistrettoPoint,
|
||||
|
||||
// The ZKP
|
||||
piBlindIssue: CompactProof,
|
||||
}
|
||||
|
||||
define_proof! {
|
||||
requestproof,
|
||||
"Issue Invite Request",
|
||||
(bucket, level, since, invremain, blockages, zbucket, zlevel,
|
||||
zsince, zinvremain, zblockages, negzQ,
|
||||
zbucket_reach, negzQ_reach,
|
||||
d, eid_client, ebucket, elevel, esince, einvremain, eblockages, id_client,
|
||||
inv_id_client, einv_id_client,
|
||||
invremain_inverse, zinvremain_inverse),
|
||||
(P, CBucket, CLevel, CSince, CInvRemain, CBlockages, V, Xbucket,
|
||||
Xlevel, Xsince, Xinvremain, Xblockages,
|
||||
P_reach, CBucket_reach, V_reach, Xbucket_reach,
|
||||
D, EncIdClient0, EncIdClient1, EncBucket0, EncBucket1,
|
||||
EncLevel0, EncLevel1, EncSince0, EncSince1,
|
||||
EncInvRemain0, EncInvRemain1_plus_B, EncBlockages0, EncBlockages1,
|
||||
EncInvIdClient0, EncInvIdClient1),
|
||||
(A, B):
|
||||
// Blind showing of the Lox credential
|
||||
CBucket = (bucket*P + zbucket*A),
|
||||
CLevel = (level*P + zlevel*A),
|
||||
CSince = (since*P + zsince*A),
|
||||
CInvRemain = (invremain*P + zinvremain*A),
|
||||
CBlockages = (blockages*P + zblockages*A),
|
||||
// Proof that invremain is not 0
|
||||
P = (invremain_inverse*CInvRemain + zinvremain_inverse*A),
|
||||
// Blind showing of the Bucket Reachability credential; note the
|
||||
// same bucket is used in the proof
|
||||
CBucket_reach = (bucket*P_reach + zbucket_reach*A),
|
||||
// User blinding of the Lox credential to be issued
|
||||
D = (d*B),
|
||||
EncIdClient0 = (eid_client*B),
|
||||
EncIdClient1 = (id_client*B + eid_client*D),
|
||||
EncBucket0 = (ebucket*B),
|
||||
EncBucket1 = (bucket*B + ebucket*D),
|
||||
EncLevel0 = (elevel*B),
|
||||
EncLevel1 = (level*B + elevel*D),
|
||||
EncSince0 = (esince*B),
|
||||
EncSince1 = (since*B + esince*D),
|
||||
EncInvRemain0 = (einvremain*B),
|
||||
EncInvRemain1_plus_B = (invremain*B + einvremain*D),
|
||||
EncBlockages0 = (eblockages*B),
|
||||
EncBlockages1 = (blockages*B + eblockages*D),
|
||||
// User blinding of the Invitation to be issued
|
||||
EncInvIdClient0 = (einv_id_client*B),
|
||||
EncInvIdClient1 = (inv_id_client*B + einv_id_client*D)
|
||||
}
|
||||
|
||||
define_proof! {
|
||||
blindissue,
|
||||
"Issue Invite Issuing",
|
||||
(x0, x0tilde, xid, xbucket, xlevel, xsince, xinvremain, xblockages,
|
||||
s, b, tid, tbucket, tlevel, tsince, tinvremain, tblockages,
|
||||
x0_inv, x0tilde_inv, xid_inv, xdate_inv, xbucket_inv,
|
||||
xblockages_inv,
|
||||
s_inv, b_inv, tid_inv, tbucket_inv, tblockages_inv),
|
||||
(P, EncQ0, EncQ1, X0, Xid, Xbucket, Xlevel, Xsince, Xinvremain,
|
||||
Xblockages, TId, TBucket, TLevel, TSince, TInvRemain, TBlockages,
|
||||
P_inv, EncQ_inv0, EncQ_inv1, X0_inv, Xid_inv, Xdate_inv,
|
||||
Xbucket_inv, Xblockages_inv, Pdate_inv, TId_inv, TBucket_inv,
|
||||
TBlockages_inv,
|
||||
D, EncId0, EncId1, EncBucket0, EncBucket1, EncLevel0, EncLevel1,
|
||||
EncSince0, EncSince1, EncInvRemain0, EncInvRemain1,
|
||||
EncBlockages0, EncBlockages1,
|
||||
EncInvId0, EncInvId1),
|
||||
(A, B):
|
||||
Xid = (xid*A),
|
||||
Xbucket = (xbucket*A),
|
||||
Xlevel = (xlevel*A),
|
||||
Xsince = (xsince*A),
|
||||
Xinvremain = (xinvremain*A),
|
||||
Xblockages = (xblockages*A),
|
||||
X0 = (x0*B + x0tilde*A),
|
||||
P = (b*B),
|
||||
TId = (b*Xid),
|
||||
TId = (tid*A),
|
||||
TBucket = (b*Xbucket),
|
||||
TBucket = (tbucket*A),
|
||||
TLevel = (b*Xlevel),
|
||||
TLevel = (tlevel*A),
|
||||
TSince = (b*Xsince),
|
||||
TSince = (tsince*A),
|
||||
TInvRemain = (b*Xinvremain),
|
||||
TInvRemain = (tinvremain*A),
|
||||
TBlockages = (b*Xblockages),
|
||||
TBlockages = (tblockages*A),
|
||||
EncQ0 = (s*B + tid*EncId0 + tbucket*EncBucket0 + tlevel*EncLevel0
|
||||
+ tsince*EncSince0 + tinvremain*EncInvRemain0 + tblockages*EncBlockages0),
|
||||
EncQ1 = (s*D + tid*EncId1 + tbucket*EncBucket1 + tlevel*EncLevel1
|
||||
+ tsince*EncSince1 + tinvremain*EncInvRemain1 + tblockages*EncBlockages1
|
||||
+ x0*P),
|
||||
Xid_inv = (xid_inv*A),
|
||||
Xdate_inv = (xdate_inv*A),
|
||||
Xbucket_inv = (xbucket_inv*A),
|
||||
Xblockages_inv = (xblockages_inv*A),
|
||||
X0_inv = (x0_inv*B + x0tilde_inv*A),
|
||||
P_inv = (b_inv*B),
|
||||
TId_inv = (b_inv*Xid_inv),
|
||||
TId_inv = (tid_inv*A),
|
||||
TBucket_inv = (b_inv*Xbucket_inv),
|
||||
TBucket_inv = (tbucket_inv*A),
|
||||
TBlockages_inv = (b_inv*Xblockages_inv),
|
||||
TBlockages_inv = (tblockages_inv*A),
|
||||
EncQ_inv0 = (s_inv*B + tid_inv*EncInvId0 + tbucket_inv*EncBucket0
|
||||
+ tblockages_inv*EncBlockages0),
|
||||
EncQ_inv1 = (s_inv*D + tid_inv*EncInvId1 + tbucket_inv*EncBucket1
|
||||
+ tblockages_inv*EncBlockages1 + x0_inv*P_inv + xdate_inv*Pdate_inv)
|
||||
}
|
||||
|
||||
pub fn request(
|
||||
lox_cred: &cred::Lox,
|
||||
reach_cred: &cred::BucketReachability,
|
||||
lox_pub: &IssuerPubKey,
|
||||
reach_pub: &IssuerPubKey,
|
||||
today: u32,
|
||||
) -> Result<(Request, State), ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
// Ensure the credential can be correctly shown: it must be the case
|
||||
// that invites_remaining not be 0
|
||||
if lox_cred.invites_remaining == Scalar::zero() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
// The buckets in the Lox and Bucket Reachability credentials have
|
||||
// to match
|
||||
if lox_cred.bucket != reach_cred.bucket {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
// The Bucket Reachability credential has to be dated today
|
||||
let reach_date: u32 = match scalar_u32(&reach_cred.date) {
|
||||
Some(v) => v,
|
||||
None => return Err(ProofError::VerificationFailure),
|
||||
};
|
||||
if reach_date != today {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
// The new invites_remaining
|
||||
let new_invites_remaining = lox_cred.invites_remaining - Scalar::one();
|
||||
|
||||
// Blind showing the Lox credential
|
||||
|
||||
// Reblind P and Q
|
||||
let mut rng = rand::thread_rng();
|
||||
let t = Scalar::random(&mut rng);
|
||||
let P = t * lox_cred.P;
|
||||
let Q = t * lox_cred.Q;
|
||||
|
||||
// Form Pedersen commitments to the blinded attributes
|
||||
let zbucket = Scalar::random(&mut rng);
|
||||
let zlevel = Scalar::random(&mut rng);
|
||||
let zsince = Scalar::random(&mut rng);
|
||||
let zinvremain = Scalar::random(&mut rng);
|
||||
let zblockages = Scalar::random(&mut rng);
|
||||
let CBucket = lox_cred.bucket * P + &zbucket * Atable;
|
||||
let CLevel = lox_cred.trust_level * P + &zlevel * Atable;
|
||||
let CSince = lox_cred.level_since * P + &zsince * Atable;
|
||||
let CInvRemain = lox_cred.invites_remaining * P + &zinvremain * Atable;
|
||||
let CBlockages = lox_cred.blockages * P + &zblockages * Atable;
|
||||
|
||||
// Form a Pedersen commitment to the MAC Q
|
||||
// We flip the sign of zQ from that of the Hyphae paper so that
|
||||
// the ZKP has a "+" instead of a "-", as that's what the zkp
|
||||
// macro supports.
|
||||
let negzQ = Scalar::random(&mut rng);
|
||||
let CQ = Q - &negzQ * Atable;
|
||||
|
||||
// Compute the "error factor"
|
||||
let V = zbucket * lox_pub.X[2]
|
||||
+ zlevel * lox_pub.X[3]
|
||||
+ zsince * lox_pub.X[4]
|
||||
+ zinvremain * lox_pub.X[5]
|
||||
+ zblockages * lox_pub.X[6]
|
||||
+ &negzQ * Atable;
|
||||
|
||||
// Blind showing the Bucket Reachability credential
|
||||
|
||||
// Reblind P and Q
|
||||
let t_reach = Scalar::random(&mut rng);
|
||||
let P_reach = t_reach * reach_cred.P;
|
||||
let Q_reach = t_reach * reach_cred.Q;
|
||||
|
||||
// Form Pedersen commitments to the blinded attributes
|
||||
let zbucket_reach = Scalar::random(&mut rng);
|
||||
let CBucket_reach = reach_cred.bucket * P_reach + &zbucket_reach * Atable;
|
||||
|
||||
// Form a Pedersen commitment to the MAC Q
|
||||
// We flip the sign of zQ from that of the Hyphae paper so that
|
||||
// the ZKP has a "+" instead of a "-", as that's what the zkp
|
||||
// macro supports.
|
||||
let negzQ_reach = Scalar::random(&mut rng);
|
||||
let CQ_reach = Q_reach - &negzQ_reach * Atable;
|
||||
|
||||
// Compute the "error factor"
|
||||
let V_reach = zbucket_reach * reach_pub.X[2] + &negzQ_reach * Atable;
|
||||
|
||||
// User blinding for the Lox certificate to be issued
|
||||
|
||||
// Pick an ElGamal keypair
|
||||
let d = Scalar::random(&mut rng);
|
||||
let D = &d * Btable;
|
||||
|
||||
// Pick a random client component of the id
|
||||
let id_client = Scalar::random(&mut rng);
|
||||
|
||||
// Encrypt it (times the basepoint B) to the ElGamal public key D we
|
||||
// just created
|
||||
let eid_client = Scalar::random(&mut rng);
|
||||
let EncIdClient = (&eid_client * Btable, &id_client * Btable + eid_client * D);
|
||||
|
||||
// Encrypt the other blinded fields (times B) to D as well
|
||||
let ebucket = Scalar::random(&mut rng);
|
||||
let EncBucket = (&ebucket * Btable, &lox_cred.bucket * Btable + ebucket * D);
|
||||
let elevel = Scalar::random(&mut rng);
|
||||
let EncLevel = (
|
||||
&elevel * Btable,
|
||||
&lox_cred.trust_level * Btable + elevel * D,
|
||||
);
|
||||
let esince = Scalar::random(&mut rng);
|
||||
let EncSince = (
|
||||
&esince * Btable,
|
||||
&lox_cred.level_since * Btable + esince * D,
|
||||
);
|
||||
let einvremain = Scalar::random(&mut rng);
|
||||
let EncInvRemain = (
|
||||
&einvremain * Btable,
|
||||
&new_invites_remaining * Btable + einvremain * D,
|
||||
);
|
||||
let eblockages = Scalar::random(&mut rng);
|
||||
let EncBlockages = (
|
||||
&eblockages * Btable,
|
||||
&lox_cred.blockages * Btable + eblockages * D,
|
||||
);
|
||||
|
||||
// User blinding for the Invitation certificate to be issued
|
||||
|
||||
// Pick a random client component of the id
|
||||
let inv_id_client = Scalar::random(&mut rng);
|
||||
|
||||
// Encrypt it (times the basepoint B) to the ElGamal public key D we
|
||||
// just created
|
||||
let einv_id_client = Scalar::random(&mut rng);
|
||||
let EncInvIdClient = (
|
||||
&einv_id_client * Btable,
|
||||
&inv_id_client * Btable + einv_id_client * D,
|
||||
);
|
||||
|
||||
// The proof that invites_remaining is not zero. We prove this by
|
||||
// demonstrating that we know its inverse.
|
||||
let invremain_inverse = &lox_cred.invites_remaining.invert();
|
||||
|
||||
let zinvremain_inverse = -zinvremain * invremain_inverse;
|
||||
|
||||
// So now invremain_inverse * CInvRemain + zinvremain_inverse * A = P
|
||||
|
||||
// Construct the proof
|
||||
let mut transcript = Transcript::new(b"issue invite request");
|
||||
let piUser = requestproof::prove_compact(
|
||||
&mut transcript,
|
||||
requestproof::ProveAssignments {
|
||||
A,
|
||||
B,
|
||||
P: &P,
|
||||
CBucket: &CBucket,
|
||||
CLevel: &CLevel,
|
||||
CSince: &CSince,
|
||||
CInvRemain: &CInvRemain,
|
||||
CBlockages: &CBlockages,
|
||||
V: &V,
|
||||
Xbucket: &lox_pub.X[2],
|
||||
Xlevel: &lox_pub.X[3],
|
||||
Xsince: &lox_pub.X[4],
|
||||
Xinvremain: &lox_pub.X[5],
|
||||
Xblockages: &lox_pub.X[6],
|
||||
P_reach: &P_reach,
|
||||
CBucket_reach: &CBucket_reach,
|
||||
V_reach: &V_reach,
|
||||
Xbucket_reach: &reach_pub.X[2],
|
||||
D: &D,
|
||||
EncIdClient0: &EncIdClient.0,
|
||||
EncIdClient1: &EncIdClient.1,
|
||||
EncBucket0: &EncBucket.0,
|
||||
EncBucket1: &EncBucket.1,
|
||||
EncLevel0: &EncLevel.0,
|
||||
EncLevel1: &EncLevel.1,
|
||||
EncSince0: &EncSince.0,
|
||||
EncSince1: &EncSince.1,
|
||||
EncInvRemain0: &EncInvRemain.0,
|
||||
EncInvRemain1_plus_B: &(EncInvRemain.1 + B),
|
||||
EncBlockages0: &EncBlockages.0,
|
||||
EncBlockages1: &EncBlockages.1,
|
||||
EncInvIdClient0: &EncInvIdClient.0,
|
||||
EncInvIdClient1: &EncInvIdClient.1,
|
||||
bucket: &lox_cred.bucket,
|
||||
level: &lox_cred.trust_level,
|
||||
since: &lox_cred.level_since,
|
||||
invremain: &lox_cred.invites_remaining,
|
||||
blockages: &lox_cred.blockages,
|
||||
zbucket: &zbucket,
|
||||
zlevel: &zlevel,
|
||||
zsince: &zsince,
|
||||
zinvremain: &zinvremain,
|
||||
zblockages: &zblockages,
|
||||
negzQ: &negzQ,
|
||||
zbucket_reach: &zbucket_reach,
|
||||
negzQ_reach: &negzQ_reach,
|
||||
d: &d,
|
||||
eid_client: &eid_client,
|
||||
ebucket: &ebucket,
|
||||
elevel: &elevel,
|
||||
esince: &esince,
|
||||
einvremain: &einvremain,
|
||||
eblockages: &eblockages,
|
||||
id_client: &id_client,
|
||||
inv_id_client: &inv_id_client,
|
||||
einv_id_client: &einv_id_client,
|
||||
invremain_inverse,
|
||||
zinvremain_inverse: &zinvremain_inverse,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
|
||||
Ok((
|
||||
Request {
|
||||
P,
|
||||
id: lox_cred.id,
|
||||
CBucket,
|
||||
CLevel,
|
||||
CSince,
|
||||
CInvRemain,
|
||||
CBlockages,
|
||||
CQ,
|
||||
P_reach,
|
||||
CBucket_reach,
|
||||
CQ_reach,
|
||||
D,
|
||||
EncIdClient,
|
||||
EncBucket,
|
||||
EncLevel,
|
||||
EncSince,
|
||||
EncInvRemain,
|
||||
EncBlockages,
|
||||
EncInvIdClient,
|
||||
piUser,
|
||||
},
|
||||
State {
|
||||
d,
|
||||
D,
|
||||
EncIdClient,
|
||||
EncBucket,
|
||||
EncLevel,
|
||||
EncSince,
|
||||
EncInvRemain,
|
||||
EncBlockages,
|
||||
EncInvIdClient,
|
||||
id_client,
|
||||
bucket: lox_cred.bucket,
|
||||
level: lox_cred.trust_level,
|
||||
since: lox_cred.level_since,
|
||||
invremain: new_invites_remaining,
|
||||
blockages: lox_cred.blockages,
|
||||
inv_id_client,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
impl BridgeAuth {
|
||||
/// Receive an issue invite request
|
||||
pub fn handle_issue_invite(&mut self, req: Request) -> Result<Response, ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
if req.P.is_identity() || req.P_reach.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
let today: Scalar = self.today().into();
|
||||
|
||||
// Recompute the "error factors" using knowledge of our own
|
||||
// (the issuer's) private key instead of knowledge of the
|
||||
// hidden attributes
|
||||
let Vprime = (self.lox_priv.x[0] + self.lox_priv.x[1] * req.id) * req.P
|
||||
+ self.lox_priv.x[2] * req.CBucket
|
||||
+ self.lox_priv.x[3] * req.CLevel
|
||||
+ self.lox_priv.x[4] * req.CSince
|
||||
+ self.lox_priv.x[5] * req.CInvRemain
|
||||
+ self.lox_priv.x[6] * req.CBlockages
|
||||
- req.CQ;
|
||||
|
||||
let Vprime_reach = (self.reachability_priv.x[0] + self.reachability_priv.x[1] * today)
|
||||
* req.P_reach
|
||||
+ self.reachability_priv.x[2] * req.CBucket_reach
|
||||
- req.CQ_reach;
|
||||
|
||||
// Verify the ZKP
|
||||
let mut transcript = Transcript::new(b"issue invite request");
|
||||
requestproof::verify_compact(
|
||||
&req.piUser,
|
||||
&mut transcript,
|
||||
requestproof::VerifyAssignments {
|
||||
A: &A.compress(),
|
||||
B: &B.compress(),
|
||||
P: &req.P.compress(),
|
||||
CBucket: &req.CBucket.compress(),
|
||||
CLevel: &req.CLevel.compress(),
|
||||
CSince: &req.CSince.compress(),
|
||||
CInvRemain: &req.CInvRemain.compress(),
|
||||
CBlockages: &req.CBlockages.compress(),
|
||||
V: &Vprime.compress(),
|
||||
Xbucket: &self.lox_pub.X[2].compress(),
|
||||
Xlevel: &self.lox_pub.X[3].compress(),
|
||||
Xsince: &self.lox_pub.X[4].compress(),
|
||||
Xinvremain: &self.lox_pub.X[5].compress(),
|
||||
Xblockages: &self.lox_pub.X[6].compress(),
|
||||
P_reach: &req.P_reach.compress(),
|
||||
CBucket_reach: &req.CBucket_reach.compress(),
|
||||
V_reach: &Vprime_reach.compress(),
|
||||
Xbucket_reach: &self.reachability_pub.X[2].compress(),
|
||||
D: &req.D.compress(),
|
||||
EncIdClient0: &req.EncIdClient.0.compress(),
|
||||
EncIdClient1: &req.EncIdClient.1.compress(),
|
||||
EncBucket0: &req.EncBucket.0.compress(),
|
||||
EncBucket1: &req.EncBucket.1.compress(),
|
||||
EncLevel0: &req.EncLevel.0.compress(),
|
||||
EncLevel1: &req.EncLevel.1.compress(),
|
||||
EncSince0: &req.EncSince.0.compress(),
|
||||
EncSince1: &req.EncSince.1.compress(),
|
||||
EncInvRemain0: &req.EncInvRemain.0.compress(),
|
||||
EncInvRemain1_plus_B: &(req.EncInvRemain.1 + B).compress(),
|
||||
EncBlockages0: &req.EncBlockages.0.compress(),
|
||||
EncBlockages1: &req.EncBlockages.1.compress(),
|
||||
EncInvIdClient0: &req.EncInvIdClient.0.compress(),
|
||||
EncInvIdClient1: &req.EncInvIdClient.1.compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Ensure the id has not been seen before, and add it to the
|
||||
// seen list.
|
||||
if self.id_filter.filter(&req.id) == SeenType::Seen {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Blind issuing of the new Lox credential
|
||||
|
||||
// Choose a random server id component to add to the client's
|
||||
// (blinded) id component
|
||||
let mut rng = rand::thread_rng();
|
||||
let id_server = Scalar::random(&mut rng);
|
||||
let EncId = (req.EncIdClient.0, req.EncIdClient.1 + &id_server * Btable);
|
||||
|
||||
// Compute the MAC on the visible attributes (none here)
|
||||
let b = Scalar::random(&mut rng);
|
||||
let P = &b * Btable;
|
||||
let QHc = self.lox_priv.x[0] * P;
|
||||
|
||||
// El Gamal encrypt it to the public key req.D
|
||||
let s = Scalar::random(&mut rng);
|
||||
let EncQHc = (&s * Btable, QHc + s * req.D);
|
||||
|
||||
// Homomorphically compute the part of the MAC corresponding to
|
||||
// the blinded attributes
|
||||
let tid = self.lox_priv.x[1] * b;
|
||||
let TId = &tid * Atable;
|
||||
let EncQId = (tid * EncId.0, tid * EncId.1);
|
||||
let tbucket = self.lox_priv.x[2] * b;
|
||||
let TBucket = &tbucket * Atable;
|
||||
let EncQBucket = (tbucket * req.EncBucket.0, tbucket * req.EncBucket.1);
|
||||
let tlevel = self.lox_priv.x[3] * b;
|
||||
let TLevel = &tlevel * Atable;
|
||||
let EncQLevel = (tlevel * req.EncLevel.0, tlevel * req.EncLevel.1);
|
||||
let tsince = self.lox_priv.x[4] * b;
|
||||
let TSince = &tsince * Atable;
|
||||
let EncQSince = (tsince * req.EncSince.0, tsince * req.EncSince.1);
|
||||
let tinvremain = self.lox_priv.x[5] * b;
|
||||
let TInvRemain = &tinvremain * Atable;
|
||||
let EncQInvRemain = (
|
||||
tinvremain * req.EncInvRemain.0,
|
||||
tinvremain * req.EncInvRemain.1,
|
||||
);
|
||||
let tblockages = self.lox_priv.x[6] * b;
|
||||
let TBlockages = &tblockages * Atable;
|
||||
let EncQBlockages = (
|
||||
tblockages * req.EncBlockages.0,
|
||||
tblockages * req.EncBlockages.1,
|
||||
);
|
||||
|
||||
let EncQ = (
|
||||
EncQHc.0
|
||||
+ EncQId.0
|
||||
+ EncQBucket.0
|
||||
+ EncQLevel.0
|
||||
+ EncQSince.0
|
||||
+ EncQInvRemain.0
|
||||
+ EncQBlockages.0,
|
||||
EncQHc.1
|
||||
+ EncQId.1
|
||||
+ EncQBucket.1
|
||||
+ EncQLevel.1
|
||||
+ EncQSince.1
|
||||
+ EncQInvRemain.1
|
||||
+ EncQBlockages.1,
|
||||
);
|
||||
|
||||
// Blind issuing of the new Invitation credential
|
||||
|
||||
// Choose a random server id component to add to the client's
|
||||
// (blinded) id component
|
||||
let inv_id_server = Scalar::random(&mut rng);
|
||||
let EncInvId = (
|
||||
req.EncInvIdClient.0,
|
||||
req.EncInvIdClient.1 + &inv_id_server * Btable,
|
||||
);
|
||||
|
||||
// Compute the MAC on the visible attributes
|
||||
let b_inv = Scalar::random(&mut rng);
|
||||
let P_inv = &b_inv * Btable;
|
||||
let QHc_inv = (self.invitation_priv.x[0] + self.invitation_priv.x[2] * today) * P_inv;
|
||||
|
||||
// El Gamal encrypt it to the public key req.D
|
||||
let s_inv = Scalar::random(&mut rng);
|
||||
let EncQHc_inv = (&s_inv * Btable, QHc_inv + s_inv * req.D);
|
||||
|
||||
// Homomorphically compute the part of the MAC corresponding to
|
||||
// the blinded attributes
|
||||
let tinvid = self.invitation_priv.x[1] * b_inv;
|
||||
let TId_inv = &tinvid * Atable;
|
||||
let EncQInvId = (tinvid * EncInvId.0, tinvid * EncInvId.1);
|
||||
let tinvbucket = self.invitation_priv.x[3] * b_inv;
|
||||
let TBucket_inv = &tinvbucket * Atable;
|
||||
// The bucket and blockages encrypted attributes are reused from
|
||||
// the Lox credential
|
||||
let EncQInvBucket = (tinvbucket * req.EncBucket.0, tinvbucket * req.EncBucket.1);
|
||||
let tinvblockages = self.invitation_priv.x[4] * b_inv;
|
||||
let TBlockages_inv = &tinvblockages * Atable;
|
||||
let EncQInvBlockages = (
|
||||
tinvblockages * req.EncBlockages.0,
|
||||
tinvblockages * req.EncBlockages.1,
|
||||
);
|
||||
|
||||
let EncQ_inv = (
|
||||
EncQHc_inv.0 + EncQInvId.0 + EncQInvBucket.0 + EncQInvBlockages.0,
|
||||
EncQHc_inv.1 + EncQInvId.1 + EncQInvBucket.1 + EncQInvBlockages.1,
|
||||
);
|
||||
|
||||
let mut transcript = Transcript::new(b"issue invite issuing");
|
||||
let piBlindIssue = blindissue::prove_compact(
|
||||
&mut transcript,
|
||||
blindissue::ProveAssignments {
|
||||
A,
|
||||
B,
|
||||
P: &P,
|
||||
EncQ0: &EncQ.0,
|
||||
EncQ1: &EncQ.1,
|
||||
X0: &self.lox_pub.X[0],
|
||||
Xid: &self.lox_pub.X[1],
|
||||
Xbucket: &self.lox_pub.X[2],
|
||||
Xlevel: &self.lox_pub.X[3],
|
||||
Xsince: &self.lox_pub.X[4],
|
||||
Xinvremain: &self.lox_pub.X[5],
|
||||
Xblockages: &self.lox_pub.X[6],
|
||||
TId: &TId,
|
||||
TBucket: &TBucket,
|
||||
TLevel: &TLevel,
|
||||
TSince: &TSince,
|
||||
TInvRemain: &TInvRemain,
|
||||
TBlockages: &TBlockages,
|
||||
P_inv: &P_inv,
|
||||
EncQ_inv0: &EncQ_inv.0,
|
||||
EncQ_inv1: &EncQ_inv.1,
|
||||
X0_inv: &self.invitation_pub.X[0],
|
||||
Xid_inv: &self.invitation_pub.X[1],
|
||||
Xdate_inv: &self.invitation_pub.X[2],
|
||||
Xbucket_inv: &self.invitation_pub.X[3],
|
||||
Xblockages_inv: &self.invitation_pub.X[4],
|
||||
Pdate_inv: &(today * P_inv),
|
||||
TId_inv: &TId_inv,
|
||||
TBucket_inv: &TBucket_inv,
|
||||
TBlockages_inv: &TBlockages_inv,
|
||||
D: &req.D,
|
||||
EncId0: &EncId.0,
|
||||
EncId1: &EncId.1,
|
||||
EncBucket0: &req.EncBucket.0,
|
||||
EncBucket1: &req.EncBucket.1,
|
||||
EncLevel0: &req.EncLevel.0,
|
||||
EncLevel1: &req.EncLevel.1,
|
||||
EncSince0: &req.EncSince.0,
|
||||
EncSince1: &req.EncSince.1,
|
||||
EncInvRemain0: &req.EncInvRemain.0,
|
||||
EncInvRemain1: &req.EncInvRemain.1,
|
||||
EncBlockages0: &req.EncBlockages.0,
|
||||
EncBlockages1: &req.EncBlockages.1,
|
||||
EncInvId0: &EncInvId.0,
|
||||
EncInvId1: &EncInvId.1,
|
||||
x0: &self.lox_priv.x[0],
|
||||
x0tilde: &self.lox_priv.x0tilde,
|
||||
xid: &self.lox_priv.x[1],
|
||||
xbucket: &self.lox_priv.x[2],
|
||||
xlevel: &self.lox_priv.x[3],
|
||||
xsince: &self.lox_priv.x[4],
|
||||
xinvremain: &self.lox_priv.x[5],
|
||||
xblockages: &self.lox_priv.x[6],
|
||||
s: &s,
|
||||
b: &b,
|
||||
tid: &tid,
|
||||
tbucket: &tbucket,
|
||||
tlevel: &tlevel,
|
||||
tsince: &tsince,
|
||||
tinvremain: &tinvremain,
|
||||
tblockages: &tblockages,
|
||||
x0_inv: &self.invitation_priv.x[0],
|
||||
x0tilde_inv: &self.invitation_priv.x0tilde,
|
||||
xid_inv: &self.invitation_priv.x[1],
|
||||
xdate_inv: &self.invitation_priv.x[2],
|
||||
xbucket_inv: &self.invitation_priv.x[3],
|
||||
xblockages_inv: &self.invitation_priv.x[4],
|
||||
s_inv: &s_inv,
|
||||
b_inv: &b_inv,
|
||||
tid_inv: &tinvid,
|
||||
tbucket_inv: &tinvbucket,
|
||||
tblockages_inv: &tinvblockages,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
|
||||
Ok(Response {
|
||||
P,
|
||||
EncQ,
|
||||
id_server,
|
||||
TId,
|
||||
TBucket,
|
||||
TLevel,
|
||||
TSince,
|
||||
TInvRemain,
|
||||
TBlockages,
|
||||
P_inv,
|
||||
EncQ_inv,
|
||||
inv_id_server,
|
||||
TId_inv,
|
||||
date_inv: today,
|
||||
TBucket_inv,
|
||||
TBlockages_inv,
|
||||
piBlindIssue,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle the response to the request, producing the new Lox credential
|
||||
/// and Invitation credential if successful.
|
||||
pub fn handle_response(
|
||||
state: State,
|
||||
resp: Response,
|
||||
lox_pub: &IssuerPubKey,
|
||||
invitation_pub: &IssuerPubKey,
|
||||
) -> Result<(cred::Lox, cred::Invitation), ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
if resp.P.is_identity() || resp.P_inv.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Add the server's contribution to the id to our own, both in plain
|
||||
// and encrypted form and for both the Lox credential id and the
|
||||
// Invitation credential id
|
||||
let id = state.id_client + resp.id_server;
|
||||
let EncId = (
|
||||
state.EncIdClient.0,
|
||||
state.EncIdClient.1 + &resp.id_server * Btable,
|
||||
);
|
||||
|
||||
let inv_id = state.inv_id_client + resp.inv_id_server;
|
||||
let EncInvId = (
|
||||
state.EncInvIdClient.0,
|
||||
state.EncInvIdClient.1 + &resp.inv_id_server * Btable,
|
||||
);
|
||||
|
||||
// Verify the proof
|
||||
let mut transcript = Transcript::new(b"issue invite issuing");
|
||||
blindissue::verify_compact(
|
||||
&resp.piBlindIssue,
|
||||
&mut transcript,
|
||||
blindissue::VerifyAssignments {
|
||||
A: &A.compress(),
|
||||
B: &B.compress(),
|
||||
P: &resp.P.compress(),
|
||||
EncQ0: &resp.EncQ.0.compress(),
|
||||
EncQ1: &resp.EncQ.1.compress(),
|
||||
X0: &lox_pub.X[0].compress(),
|
||||
Xid: &lox_pub.X[1].compress(),
|
||||
Xbucket: &lox_pub.X[2].compress(),
|
||||
Xlevel: &lox_pub.X[3].compress(),
|
||||
Xsince: &lox_pub.X[4].compress(),
|
||||
Xinvremain: &lox_pub.X[5].compress(),
|
||||
Xblockages: &lox_pub.X[6].compress(),
|
||||
TId: &resp.TId.compress(),
|
||||
TBucket: &resp.TBucket.compress(),
|
||||
TLevel: &resp.TLevel.compress(),
|
||||
TSince: &resp.TSince.compress(),
|
||||
TInvRemain: &resp.TInvRemain.compress(),
|
||||
TBlockages: &resp.TBlockages.compress(),
|
||||
P_inv: &resp.P_inv.compress(),
|
||||
EncQ_inv0: &resp.EncQ_inv.0.compress(),
|
||||
EncQ_inv1: &resp.EncQ_inv.1.compress(),
|
||||
X0_inv: &invitation_pub.X[0].compress(),
|
||||
Xid_inv: &invitation_pub.X[1].compress(),
|
||||
Xdate_inv: &invitation_pub.X[2].compress(),
|
||||
Xbucket_inv: &invitation_pub.X[3].compress(),
|
||||
Xblockages_inv: &invitation_pub.X[4].compress(),
|
||||
Pdate_inv: &(resp.date_inv * resp.P_inv).compress(),
|
||||
TId_inv: &resp.TId_inv.compress(),
|
||||
TBucket_inv: &resp.TBucket_inv.compress(),
|
||||
TBlockages_inv: &resp.TBlockages_inv.compress(),
|
||||
D: &state.D.compress(),
|
||||
EncId0: &EncId.0.compress(),
|
||||
EncId1: &EncId.1.compress(),
|
||||
EncBucket0: &state.EncBucket.0.compress(),
|
||||
EncBucket1: &state.EncBucket.1.compress(),
|
||||
EncLevel0: &state.EncLevel.0.compress(),
|
||||
EncLevel1: &state.EncLevel.1.compress(),
|
||||
EncSince0: &state.EncSince.0.compress(),
|
||||
EncSince1: &state.EncSince.1.compress(),
|
||||
EncInvRemain0: &state.EncInvRemain.0.compress(),
|
||||
EncInvRemain1: &state.EncInvRemain.1.compress(),
|
||||
EncBlockages0: &state.EncBlockages.0.compress(),
|
||||
EncBlockages1: &state.EncBlockages.1.compress(),
|
||||
EncInvId0: &EncInvId.0.compress(),
|
||||
EncInvId1: &EncInvId.1.compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Decrypt EncQ and EncQ_inv
|
||||
let Q = resp.EncQ.1 - (state.d * resp.EncQ.0);
|
||||
let Q_inv = resp.EncQ_inv.1 - (state.d * resp.EncQ_inv.0);
|
||||
|
||||
Ok((
|
||||
cred::Lox {
|
||||
P: resp.P,
|
||||
Q,
|
||||
id,
|
||||
bucket: state.bucket,
|
||||
trust_level: state.level,
|
||||
level_since: state.since,
|
||||
invites_remaining: state.invremain,
|
||||
blockages: state.blockages,
|
||||
},
|
||||
cred::Invitation {
|
||||
P: resp.P_inv,
|
||||
Q: Q_inv,
|
||||
inv_id,
|
||||
date: resp.date_inv,
|
||||
bucket: state.bucket,
|
||||
blockages: state.blockages,
|
||||
},
|
||||
))
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,541 @@
|
|||
/*! A module for the protocol for the user to migrate from one bucket to
|
||||
another and change trust level from untrusted (trust level 0) to trusted
|
||||
(trust level 1).
|
||||
|
||||
The user presents their current Lox credential:
|
||||
|
||||
- id: revealed
|
||||
- bucket: blinded
|
||||
- trust_level: revealed to be 0
|
||||
- level_since: blinded
|
||||
- invites_remaining: revealed to be 0
|
||||
- blockages: revealed to be 0
|
||||
|
||||
and a Migration credential:
|
||||
|
||||
- id: revealed as the same as the Lox credential id above
|
||||
- from_bucket: blinded, but proved in ZK that it's the same as the
|
||||
bucket in the Lox credential above
|
||||
- to_bucket: blinded
|
||||
|
||||
and a new Lox credential to be issued:
|
||||
|
||||
- id: jointly chosen by the user and BA
|
||||
- bucket: blinded, but proved in ZK that it's the same as the to_bucket
|
||||
in the Migration credential above
|
||||
- trust_level: 1
|
||||
- level_since: today
|
||||
- invites_remaining: 0
|
||||
- blockages: 0
|
||||
|
||||
*/
|
||||
|
||||
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||
use curve25519_dalek::ristretto::RistrettoPoint;
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
use curve25519_dalek::traits::IsIdentity;
|
||||
|
||||
use zkp::CompactProof;
|
||||
use zkp::ProofError;
|
||||
use zkp::Transcript;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::super::cred;
|
||||
use super::super::dup_filter::SeenType;
|
||||
use super::super::{BridgeAuth, IssuerPubKey};
|
||||
use super::super::{CMZ_A, CMZ_A_TABLE, CMZ_B, CMZ_B_TABLE};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Request {
|
||||
// Fields for blind showing the Lox credential
|
||||
// We don't need to include invites_remaining or blockages,
|
||||
// since they must be 0
|
||||
P_lox: RistrettoPoint,
|
||||
id: Scalar,
|
||||
CBucket: RistrettoPoint,
|
||||
trust_level: Scalar,
|
||||
CSince: RistrettoPoint,
|
||||
CQ_lox: RistrettoPoint,
|
||||
|
||||
// Fields for blind showing the Migration credential
|
||||
P_mig: RistrettoPoint,
|
||||
CFromBucket: RistrettoPoint,
|
||||
CToBucket: RistrettoPoint,
|
||||
CQ_mig: RistrettoPoint,
|
||||
|
||||
// Fields for user blinding of the Lox credential to be issued
|
||||
D: RistrettoPoint,
|
||||
EncIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
|
||||
// The combined ZKP
|
||||
piUser: CompactProof,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct State {
|
||||
d: Scalar,
|
||||
D: RistrettoPoint,
|
||||
EncIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
id_client: Scalar,
|
||||
to_bucket: Scalar,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Response {
|
||||
// The new attributes; trust_level = 1 is implicit
|
||||
level_since: Scalar,
|
||||
|
||||
// The fields for the new Lox credential
|
||||
P: RistrettoPoint,
|
||||
EncQ: (RistrettoPoint, RistrettoPoint),
|
||||
id_server: Scalar,
|
||||
TId: RistrettoPoint,
|
||||
TBucket: RistrettoPoint,
|
||||
|
||||
// The ZKP
|
||||
piBlindIssue: CompactProof,
|
||||
}
|
||||
|
||||
define_proof! {
|
||||
requestproof,
|
||||
"Migration Request",
|
||||
(bucket, since, zbucket, zsince, negzQ_lox,
|
||||
tobucket, zfrombucket, ztobucket, negzQ_mig,
|
||||
d, eid_client, ebucket, id_client),
|
||||
(P_lox, CBucket, CSince, V_lox, Xbucket, Xsince,
|
||||
P_mig, CFromBucket, CToBucket, V_mig, Xfrombucket, Xtobucket,
|
||||
D, EncIdClient0, EncIdClient1, EncBucket0, EncBucket1),
|
||||
(A, B):
|
||||
// Blind showing of the Lox credential
|
||||
CBucket = (bucket*P_lox + zbucket*A),
|
||||
CSince = (since*P_lox + zsince*A),
|
||||
V_lox = (zbucket*Xbucket + zsince*Xsince + negzQ_lox*A),
|
||||
// Blind showing of the Migration credential; note the use of the
|
||||
// same "bucket" secret variable
|
||||
CFromBucket = (bucket*P_mig + zfrombucket*A),
|
||||
CToBucket = (tobucket*P_mig + ztobucket*A),
|
||||
V_mig = (zfrombucket*Xfrombucket + ztobucket*Xtobucket + negzQ_mig*A),
|
||||
// User blinding of the Lox credential to be issued; note the use of
|
||||
// the same "tobucket" secret variable
|
||||
D = (d*B),
|
||||
EncIdClient0 = (eid_client*B),
|
||||
EncIdClient1 = (id_client*B + eid_client*D),
|
||||
EncBucket0 = (ebucket*B),
|
||||
EncBucket1 = (tobucket*B + ebucket*D)
|
||||
}
|
||||
|
||||
define_proof! {
|
||||
blindissue,
|
||||
"Migration Blind Issuing",
|
||||
(x0, x0tilde, xid, xbucket, xlevel, xsince, s, b, tid, tbucket),
|
||||
(P, EncQ0, EncQ1, X0, Xid, Xbucket, Xlevel, Xsince, Plevel, Psince, TId, TBucket,
|
||||
D, EncId0, EncId1, EncBucket0, EncBucket1),
|
||||
(A, B):
|
||||
Xid = (xid*A),
|
||||
Xlevel = (xlevel*A),
|
||||
Xbucket = (xbucket*A),
|
||||
Xsince = (xsince*A),
|
||||
X0 = (x0*B + x0tilde*A),
|
||||
P = (b*B),
|
||||
TId = (b*Xid),
|
||||
TId = (tid*A),
|
||||
TBucket = (b*Xbucket),
|
||||
TBucket = (tbucket*A),
|
||||
EncQ0 = (s*B + tid*EncId0 + tbucket*EncBucket0),
|
||||
EncQ1 = (s*D + tid*EncId1 + tbucket*EncBucket1 + x0*P + xlevel*Plevel + xsince*Psince)
|
||||
}
|
||||
|
||||
pub fn request(
|
||||
lox_cred: &cred::Lox,
|
||||
migration_cred: &cred::Migration,
|
||||
lox_pub: &IssuerPubKey,
|
||||
migration_pub: &IssuerPubKey,
|
||||
) -> Result<(Request, State), ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
// Ensure that the credenials can be correctly shown; that is, the
|
||||
// ids match and the Lox credential bucket matches the Migration
|
||||
// credential from_bucket
|
||||
if lox_cred.id != migration_cred.lox_id || lox_cred.bucket != migration_cred.from_bucket {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// This protocol only allows migrating from trust level 0 to trust
|
||||
// level 1
|
||||
if lox_cred.trust_level != Scalar::zero() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Blind showing the Lox credential
|
||||
|
||||
// Reblind P and Q
|
||||
let mut rng = rand::thread_rng();
|
||||
let t_lox = Scalar::random(&mut rng);
|
||||
let P_lox = t_lox * lox_cred.P;
|
||||
let Q_lox = t_lox * lox_cred.Q;
|
||||
|
||||
// Form Pedersen commitments to the blinded attributes
|
||||
let zbucket = Scalar::random(&mut rng);
|
||||
let zsince = Scalar::random(&mut rng);
|
||||
let CBucket = lox_cred.bucket * P_lox + &zbucket * Atable;
|
||||
let CSince = lox_cred.level_since * P_lox + &zsince * Atable;
|
||||
|
||||
// Form a Pedersen commitment to the MAC Q
|
||||
// We flip the sign of zQ from that of the Hyphae paper so that
|
||||
// the ZKP has a "+" instead of a "-", as that's what the zkp
|
||||
// macro supports.
|
||||
let negzQ_lox = Scalar::random(&mut rng);
|
||||
let CQ_lox = Q_lox - &negzQ_lox * Atable;
|
||||
|
||||
// Compute the "error factor"
|
||||
let V_lox = zbucket * lox_pub.X[2] + zsince * lox_pub.X[4] + &negzQ_lox * Atable;
|
||||
|
||||
// Blind showing the Migration credential
|
||||
|
||||
// Reblind P and Q
|
||||
let t_mig = Scalar::random(&mut rng);
|
||||
let P_mig = t_mig * migration_cred.P;
|
||||
let Q_mig = t_mig * migration_cred.Q;
|
||||
|
||||
// Form Pedersen commitments to the blinded attributes
|
||||
let zfrombucket = Scalar::random(&mut rng);
|
||||
let ztobucket = Scalar::random(&mut rng);
|
||||
let CFromBucket = migration_cred.from_bucket * P_mig + &zfrombucket * Atable;
|
||||
let CToBucket = migration_cred.to_bucket * P_mig + &ztobucket * Atable;
|
||||
|
||||
// Form a Pedersen commitment to the MAC Q
|
||||
// We flip the sign of zQ from that of the Hyphae paper so that
|
||||
// the ZKP has a "+" instead of a "-", as that's what the zkp
|
||||
// macro supports.
|
||||
let negzQ_mig = Scalar::random(&mut rng);
|
||||
let CQ_mig = Q_mig - &negzQ_mig * Atable;
|
||||
|
||||
// Compute the "error factor"
|
||||
let V_mig =
|
||||
zfrombucket * migration_pub.X[2] + ztobucket * migration_pub.X[3] + &negzQ_mig * Atable;
|
||||
|
||||
// User blinding for the Lox certificate to be issued
|
||||
|
||||
// Pick an ElGamal keypair
|
||||
let d = Scalar::random(&mut rng);
|
||||
let D = &d * Btable;
|
||||
|
||||
// Pick a random client component of the id
|
||||
let id_client = Scalar::random(&mut rng);
|
||||
|
||||
// Encrypt it (times the basepoint B) to the ElGamal public key D we
|
||||
// just created
|
||||
let eid_client = Scalar::random(&mut rng);
|
||||
let EncIdClient = (&eid_client * Btable, &id_client * Btable + eid_client * D);
|
||||
|
||||
// Encrypt the bucket field (times B) to D as well
|
||||
let ebucket = Scalar::random(&mut rng);
|
||||
let EncBucket = (
|
||||
&ebucket * Btable,
|
||||
&migration_cred.to_bucket * Btable + ebucket * D,
|
||||
);
|
||||
|
||||
// Construct the proof
|
||||
let mut transcript = Transcript::new(b"migration request");
|
||||
let piUser = requestproof::prove_compact(
|
||||
&mut transcript,
|
||||
requestproof::ProveAssignments {
|
||||
A,
|
||||
B,
|
||||
P_lox: &P_lox,
|
||||
CBucket: &CBucket,
|
||||
CSince: &CSince,
|
||||
V_lox: &V_lox,
|
||||
Xbucket: &lox_pub.X[2],
|
||||
Xsince: &lox_pub.X[4],
|
||||
P_mig: &P_mig,
|
||||
CFromBucket: &CFromBucket,
|
||||
CToBucket: &CToBucket,
|
||||
V_mig: &V_mig,
|
||||
Xfrombucket: &migration_pub.X[2],
|
||||
Xtobucket: &migration_pub.X[3],
|
||||
D: &D,
|
||||
EncIdClient0: &EncIdClient.0,
|
||||
EncIdClient1: &EncIdClient.1,
|
||||
EncBucket0: &EncBucket.0,
|
||||
EncBucket1: &EncBucket.1,
|
||||
bucket: &lox_cred.bucket,
|
||||
since: &lox_cred.level_since,
|
||||
zbucket: &zbucket,
|
||||
zsince: &zsince,
|
||||
negzQ_lox: &negzQ_lox,
|
||||
tobucket: &migration_cred.to_bucket,
|
||||
zfrombucket: &zfrombucket,
|
||||
ztobucket: &ztobucket,
|
||||
negzQ_mig: &negzQ_mig,
|
||||
d: &d,
|
||||
eid_client: &eid_client,
|
||||
ebucket: &ebucket,
|
||||
id_client: &id_client,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
|
||||
Ok((
|
||||
Request {
|
||||
P_lox,
|
||||
id: lox_cred.id,
|
||||
CBucket,
|
||||
trust_level: lox_cred.trust_level,
|
||||
CSince,
|
||||
CQ_lox,
|
||||
P_mig,
|
||||
CFromBucket,
|
||||
CToBucket,
|
||||
CQ_mig,
|
||||
D,
|
||||
EncIdClient,
|
||||
EncBucket,
|
||||
piUser,
|
||||
},
|
||||
State {
|
||||
d,
|
||||
D,
|
||||
EncIdClient,
|
||||
EncBucket,
|
||||
id_client,
|
||||
to_bucket: migration_cred.to_bucket,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
impl BridgeAuth {
|
||||
/// Receive a migration request
|
||||
pub fn handle_migration(&mut self, req: Request) -> Result<Response, ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
if req.P_lox.is_identity() || req.P_mig.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// We only currently support migrating from trust level 0
|
||||
if req.trust_level != Scalar::zero() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Recompute the "error factors" using knowledge of our own
|
||||
// (the issuer's) private key instead of knowledge of the
|
||||
// hidden attributes
|
||||
let Vprime_lox = (self.lox_priv.x[0]
|
||||
+ self.lox_priv.x[1] * req.id
|
||||
+ self.lox_priv.x[3] * req.trust_level)
|
||||
* req.P_lox
|
||||
+ self.lox_priv.x[2] * req.CBucket
|
||||
+ self.lox_priv.x[4] * req.CSince
|
||||
- req.CQ_lox;
|
||||
|
||||
let Vprime_mig = (self.migration_priv.x[0] + self.migration_priv.x[1] * req.id) * req.P_mig
|
||||
+ self.migration_priv.x[2] * req.CFromBucket
|
||||
+ self.migration_priv.x[3] * req.CToBucket
|
||||
- req.CQ_mig;
|
||||
|
||||
// Verify the ZKP
|
||||
let mut transcript = Transcript::new(b"migration request");
|
||||
requestproof::verify_compact(
|
||||
&req.piUser,
|
||||
&mut transcript,
|
||||
requestproof::VerifyAssignments {
|
||||
A: &A.compress(),
|
||||
B: &B.compress(),
|
||||
P_lox: &req.P_lox.compress(),
|
||||
CBucket: &req.CBucket.compress(),
|
||||
CSince: &req.CSince.compress(),
|
||||
V_lox: &Vprime_lox.compress(),
|
||||
Xbucket: &self.lox_pub.X[2].compress(),
|
||||
Xsince: &self.lox_pub.X[4].compress(),
|
||||
P_mig: &req.P_mig.compress(),
|
||||
CFromBucket: &req.CFromBucket.compress(),
|
||||
CToBucket: &req.CToBucket.compress(),
|
||||
V_mig: &Vprime_mig.compress(),
|
||||
Xfrombucket: &self.migration_pub.X[2].compress(),
|
||||
Xtobucket: &self.migration_pub.X[3].compress(),
|
||||
D: &req.D.compress(),
|
||||
EncIdClient0: &req.EncIdClient.0.compress(),
|
||||
EncIdClient1: &req.EncIdClient.1.compress(),
|
||||
EncBucket0: &req.EncBucket.0.compress(),
|
||||
EncBucket1: &req.EncBucket.1.compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Ensure the id has not been seen before, and add it to the
|
||||
// seen list.
|
||||
if self.id_filter.filter(&req.id) == SeenType::Seen {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Blind issuing of the new Lox credential
|
||||
|
||||
// Choose a random server id component to add to the client's
|
||||
// (blinded) id component
|
||||
let mut rng = rand::thread_rng();
|
||||
let id_server = Scalar::random(&mut rng);
|
||||
let EncId = (req.EncIdClient.0, req.EncIdClient.1 + &id_server * Btable);
|
||||
|
||||
// Create the trust_level attrubute (Scalar), which will be
|
||||
// level 1
|
||||
let trust_level: Scalar = Scalar::one();
|
||||
|
||||
// Create the level_since attribute (Scalar), which is today's
|
||||
// Julian date
|
||||
let level_since: Scalar = self.today().into();
|
||||
|
||||
// The invitations_remaining and blockages attributes are 0 for
|
||||
// level 0 and level 1 Lox credentials, so we don't need to
|
||||
// explicitly create them.
|
||||
|
||||
// Compute the MAC on the visible attributes
|
||||
let b = Scalar::random(&mut rng);
|
||||
let P = &b * Btable;
|
||||
// invites_remaining = blockages = 0
|
||||
let QHc = (self.lox_priv.x[0]
|
||||
+ self.lox_priv.x[3] * trust_level
|
||||
+ self.lox_priv.x[4] * level_since)
|
||||
* P;
|
||||
|
||||
// El Gamal encrypt it to the public key req.D
|
||||
let s = Scalar::random(&mut rng);
|
||||
let EncQHc = (&s * Btable, QHc + s * req.D);
|
||||
|
||||
// Homomorphically compute the part of the MAC corresponding to
|
||||
// the blinded attributes
|
||||
let tid = self.lox_priv.x[1] * b;
|
||||
let TId = &tid * Atable;
|
||||
let EncQId = (tid * EncId.0, tid * EncId.1);
|
||||
let tbucket = self.lox_priv.x[2] * b;
|
||||
let TBucket = &tbucket * Atable;
|
||||
let EncQBucket = (tbucket * req.EncBucket.0, tbucket * req.EncBucket.1);
|
||||
|
||||
let EncQ = (
|
||||
EncQHc.0 + EncQId.0 + EncQBucket.0,
|
||||
EncQHc.1 + EncQId.1 + EncQBucket.1,
|
||||
);
|
||||
|
||||
let mut transcript = Transcript::new(b"migration issuing");
|
||||
let piBlindIssue = blindissue::prove_compact(
|
||||
&mut transcript,
|
||||
blindissue::ProveAssignments {
|
||||
A,
|
||||
B,
|
||||
P: &P,
|
||||
EncQ0: &EncQ.0,
|
||||
EncQ1: &EncQ.1,
|
||||
X0: &self.lox_pub.X[0],
|
||||
Xid: &self.lox_pub.X[1],
|
||||
Xbucket: &self.lox_pub.X[2],
|
||||
Xlevel: &self.lox_pub.X[3],
|
||||
Xsince: &self.lox_pub.X[4],
|
||||
Plevel: &(trust_level * P),
|
||||
Psince: &(level_since * P),
|
||||
TId: &TId,
|
||||
TBucket: &TBucket,
|
||||
D: &req.D,
|
||||
EncId0: &EncId.0,
|
||||
EncId1: &EncId.1,
|
||||
EncBucket0: &req.EncBucket.0,
|
||||
EncBucket1: &req.EncBucket.1,
|
||||
x0: &self.lox_priv.x[0],
|
||||
x0tilde: &self.lox_priv.x0tilde,
|
||||
xid: &self.lox_priv.x[1],
|
||||
xbucket: &self.lox_priv.x[2],
|
||||
xlevel: &self.lox_priv.x[3],
|
||||
xsince: &self.lox_priv.x[4],
|
||||
s: &s,
|
||||
b: &b,
|
||||
tid: &tid,
|
||||
tbucket: &tbucket,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
|
||||
Ok(Response {
|
||||
level_since,
|
||||
P,
|
||||
EncQ,
|
||||
id_server,
|
||||
TId,
|
||||
TBucket,
|
||||
piBlindIssue,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle the response to the request, producing the new Lox credential
|
||||
/// if successful.
|
||||
pub fn handle_response(
|
||||
state: State,
|
||||
resp: Response,
|
||||
lox_pub: &IssuerPubKey,
|
||||
) -> Result<cred::Lox, ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
if resp.P.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Add the server's contribution to the id to our own, both in plain
|
||||
// and encrypted form
|
||||
let id = state.id_client + resp.id_server;
|
||||
let EncId = (
|
||||
state.EncIdClient.0,
|
||||
state.EncIdClient.1 + &resp.id_server * Btable,
|
||||
);
|
||||
|
||||
// Verify the proof
|
||||
let mut transcript = Transcript::new(b"migration issuing");
|
||||
blindissue::verify_compact(
|
||||
&resp.piBlindIssue,
|
||||
&mut transcript,
|
||||
blindissue::VerifyAssignments {
|
||||
A: &A.compress(),
|
||||
B: &B.compress(),
|
||||
P: &resp.P.compress(),
|
||||
EncQ0: &resp.EncQ.0.compress(),
|
||||
EncQ1: &resp.EncQ.1.compress(),
|
||||
X0: &lox_pub.X[0].compress(),
|
||||
Xid: &lox_pub.X[1].compress(),
|
||||
Xbucket: &lox_pub.X[2].compress(),
|
||||
Xlevel: &lox_pub.X[3].compress(),
|
||||
Xsince: &lox_pub.X[4].compress(),
|
||||
// The new trust level is 1
|
||||
Plevel: &(Scalar::one() * resp.P).compress(),
|
||||
Psince: &(resp.level_since * resp.P).compress(),
|
||||
TId: &resp.TId.compress(),
|
||||
TBucket: &resp.TBucket.compress(),
|
||||
D: &state.D.compress(),
|
||||
EncId0: &EncId.0.compress(),
|
||||
EncId1: &EncId.1.compress(),
|
||||
EncBucket0: &state.EncBucket.0.compress(),
|
||||
EncBucket1: &state.EncBucket.1.compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Decrypt EncQ
|
||||
let Q = resp.EncQ.1 - (state.d * resp.EncQ.0);
|
||||
|
||||
Ok(cred::Lox {
|
||||
P: resp.P,
|
||||
Q,
|
||||
id,
|
||||
bucket: state.to_bucket,
|
||||
trust_level: Scalar::one(),
|
||||
level_since: resp.level_since,
|
||||
invites_remaining: Scalar::zero(),
|
||||
blockages: Scalar::zero(),
|
||||
})
|
||||
}
|
|
@ -0,0 +1,334 @@
|
|||
/*! A module for the protocol for the user to redeem an open invitation
|
||||
with the BA (bridge authority) to receive their initial Lox
|
||||
credential.
|
||||
|
||||
The credential will have attributes:
|
||||
|
||||
- id: jointly chosen by the user and BA
|
||||
- bucket: set by the BA
|
||||
- trust_level: 0
|
||||
- level_since: today
|
||||
- invites_remaining: 0
|
||||
- blockages: 0
|
||||
|
||||
*/
|
||||
|
||||
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||
use curve25519_dalek::ristretto::RistrettoPoint;
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
use curve25519_dalek::traits::IsIdentity;
|
||||
|
||||
use zkp::CompactProof;
|
||||
use zkp::ProofError;
|
||||
use zkp::Transcript;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::serde_as;
|
||||
|
||||
use super::super::bridge_table;
|
||||
use super::super::bridge_table::BridgeLine;
|
||||
use super::super::cred;
|
||||
use super::super::dup_filter::SeenType;
|
||||
use super::super::OPENINV_LENGTH;
|
||||
use super::super::{BridgeAuth, BridgeDb, IssuerPubKey};
|
||||
use super::super::{CMZ_A, CMZ_A_TABLE, CMZ_B, CMZ_B_TABLE};
|
||||
|
||||
/// The request message for this protocol
|
||||
#[serde_as]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Request {
|
||||
#[serde_as(as = "[_; OPENINV_LENGTH]")]
|
||||
invite: [u8; OPENINV_LENGTH],
|
||||
D: RistrettoPoint,
|
||||
EncIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
piUserBlinding: CompactProof,
|
||||
}
|
||||
|
||||
/// The client state for this protocol
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct State {
|
||||
d: Scalar,
|
||||
D: RistrettoPoint,
|
||||
EncIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
id_client: Scalar,
|
||||
}
|
||||
|
||||
/// The response message for this protocol
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Response {
|
||||
P: RistrettoPoint,
|
||||
EncQ: (RistrettoPoint, RistrettoPoint),
|
||||
id_server: Scalar,
|
||||
TId: RistrettoPoint,
|
||||
bucket: Scalar,
|
||||
level_since: Scalar,
|
||||
piBlindIssue: CompactProof,
|
||||
bridge_line: BridgeLine,
|
||||
}
|
||||
|
||||
// The userblinding ZKP
|
||||
define_proof! {
|
||||
userblinding,
|
||||
"Open Invitation User Blinding",
|
||||
(d, eid_client, id_client),
|
||||
(D, EncIdClient0, EncIdClient1),
|
||||
(B) :
|
||||
D = (d*B),
|
||||
EncIdClient0 = (eid_client*B),
|
||||
EncIdClient1 = (id_client*B + eid_client*D)
|
||||
}
|
||||
|
||||
// The issuing ZKP
|
||||
define_proof! {
|
||||
blindissue,
|
||||
"Open Invitation Blind Issuing",
|
||||
(x0, x0tilde, xid, xbucket, xsince, s, b, tid),
|
||||
(P, EncQ0, EncQ1, X0, Xid, Xbucket, Xsince, Pbucket, Psince, TId,
|
||||
D, EncId0, EncId1),
|
||||
(A, B) :
|
||||
Xid = (xid*A),
|
||||
Xbucket = (xbucket*A),
|
||||
Xsince = (xsince*A),
|
||||
X0 = (x0*B + x0tilde*A),
|
||||
P = (b*B),
|
||||
TId = (b*Xid),
|
||||
TId = (tid*A),
|
||||
EncQ0 = (s*B + tid*EncId0),
|
||||
EncQ1 = (s*D + tid*EncId1 + x0*P + xbucket*Pbucket + xsince*Psince)
|
||||
}
|
||||
|
||||
/// Submit an open invitation issued by the BridgeDb to receive your
|
||||
/// first Lox credential
|
||||
pub fn request(invite: &[u8; OPENINV_LENGTH]) -> (Request, State) {
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
// Pick an ElGamal keypair
|
||||
let mut rng = rand::thread_rng();
|
||||
let d = Scalar::random(&mut rng);
|
||||
let D = &d * Btable;
|
||||
|
||||
// Pick a random client component of the id
|
||||
let id_client = Scalar::random(&mut rng);
|
||||
|
||||
// Encrypt it (times the basepoint B) to the ElGamal public key D we
|
||||
// just created
|
||||
let eid_client = Scalar::random(&mut rng);
|
||||
let EncIdClient = (&eid_client * Btable, &id_client * Btable + eid_client * D);
|
||||
|
||||
// Construct the proof of correct user blinding
|
||||
let mut transcript = Transcript::new(b"open invite user blinding");
|
||||
let piUserBlinding = userblinding::prove_compact(
|
||||
&mut transcript,
|
||||
userblinding::ProveAssignments {
|
||||
B,
|
||||
D: &D,
|
||||
EncIdClient0: &EncIdClient.0,
|
||||
EncIdClient1: &EncIdClient.1,
|
||||
d: &d,
|
||||
eid_client: &eid_client,
|
||||
id_client: &id_client,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
(
|
||||
Request {
|
||||
invite: *invite,
|
||||
D,
|
||||
EncIdClient,
|
||||
piUserBlinding,
|
||||
},
|
||||
State {
|
||||
d,
|
||||
D,
|
||||
EncIdClient,
|
||||
id_client,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
impl BridgeAuth {
|
||||
/// Receive an open invitation issued by the BridgeDb and if it is
|
||||
/// valid and fresh, issue a Lox credential at trust level 0.
|
||||
pub fn handle_open_invite(&mut self, req: Request) -> Result<Response, ProofError> {
|
||||
// Check the signature on the open_invite. We manually match
|
||||
// here because we're changing the Err type from SignatureError
|
||||
// to ProofError
|
||||
let (invite_id, bucket_id_u32) = match BridgeDb::verify(req.invite, self.bridgedb_pub) {
|
||||
Ok(res) => res,
|
||||
Err(_) => return Err(ProofError::VerificationFailure),
|
||||
};
|
||||
let bucket_id: usize = bucket_id_u32 as usize;
|
||||
|
||||
// Only proceed if the invite_id is fresh
|
||||
if self.openinv_filter.filter(&invite_id) == SeenType::Seen {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// And also check that the bucket id is valid
|
||||
if bucket_id >= self.bridge_table.num_buckets() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
// Next check the proof in the request
|
||||
let mut transcript = Transcript::new(b"open invite user blinding");
|
||||
userblinding::verify_compact(
|
||||
&req.piUserBlinding,
|
||||
&mut transcript,
|
||||
userblinding::VerifyAssignments {
|
||||
B: &B.compress(),
|
||||
EncIdClient0: &req.EncIdClient.0.compress(),
|
||||
EncIdClient1: &req.EncIdClient.1.compress(),
|
||||
D: &req.D.compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Choose a random server id component to add to the client's
|
||||
// (blinded) id component
|
||||
let mut rng = rand::thread_rng();
|
||||
let id_server = Scalar::random(&mut rng);
|
||||
let EncId = (req.EncIdClient.0, req.EncIdClient.1 + &id_server * Btable);
|
||||
|
||||
// Create the bucket attribute (Scalar), which is a combination
|
||||
// of the bucket id (u32) and the bucket's decryption key ([u8; 16])
|
||||
let bucket_key = self.bridge_table.keys[bucket_id];
|
||||
let bucket: Scalar = bridge_table::to_scalar(bucket_id_u32, &bucket_key);
|
||||
let bridge_line = self.bridge_table.buckets[bucket_id][0];
|
||||
|
||||
// Create the level_since attribute (Scalar), which is today's
|
||||
// Julian date
|
||||
let level_since: Scalar = self.today().into();
|
||||
|
||||
// Compute the MAC on the visible attributes
|
||||
let b = Scalar::random(&mut rng);
|
||||
let P = &b * Btable;
|
||||
// trust_level = invites_remaining = blockages = 0
|
||||
let QHc =
|
||||
(self.lox_priv.x[0] + self.lox_priv.x[2] * bucket + self.lox_priv.x[4] * level_since)
|
||||
* P;
|
||||
|
||||
// El Gamal encrypt it to the public key req.D
|
||||
let s = Scalar::random(&mut rng);
|
||||
let EncQHc = (&s * Btable, QHc + s * req.D);
|
||||
|
||||
// Homomorphically compute the part of the MAC corresponding to
|
||||
// the blinded id attribute
|
||||
let tid = self.lox_priv.x[1] * b;
|
||||
let TId = &tid * Atable;
|
||||
let EncQId = (tid * EncId.0, tid * EncId.1);
|
||||
|
||||
let EncQ = (EncQHc.0 + EncQId.0, EncQHc.1 + EncQId.1);
|
||||
|
||||
let mut transcript = Transcript::new(b"open invite issuing");
|
||||
let piBlindIssue = blindissue::prove_compact(
|
||||
&mut transcript,
|
||||
blindissue::ProveAssignments {
|
||||
A,
|
||||
B,
|
||||
P: &P,
|
||||
EncQ0: &EncQ.0,
|
||||
EncQ1: &EncQ.1,
|
||||
X0: &self.lox_pub.X[0],
|
||||
Xid: &self.lox_pub.X[1],
|
||||
Xbucket: &self.lox_pub.X[2],
|
||||
Xsince: &self.lox_pub.X[4],
|
||||
Pbucket: &(bucket * P),
|
||||
Psince: &(level_since * P),
|
||||
TId: &TId,
|
||||
D: &req.D,
|
||||
EncId0: &EncId.0,
|
||||
EncId1: &EncId.1,
|
||||
x0: &self.lox_priv.x[0],
|
||||
x0tilde: &self.lox_priv.x0tilde,
|
||||
xid: &self.lox_priv.x[1],
|
||||
xbucket: &self.lox_priv.x[2],
|
||||
xsince: &self.lox_priv.x[4],
|
||||
s: &s,
|
||||
b: &b,
|
||||
tid: &tid,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
|
||||
Ok(Response {
|
||||
P,
|
||||
EncQ,
|
||||
id_server,
|
||||
TId,
|
||||
bucket,
|
||||
level_since,
|
||||
piBlindIssue,
|
||||
bridge_line,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle the reponse to the request, producing the desired Lox
|
||||
/// credential if successful.
|
||||
pub fn handle_response(
|
||||
state: State,
|
||||
resp: Response,
|
||||
lox_pub: &IssuerPubKey,
|
||||
) -> Result<(cred::Lox, BridgeLine), ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
if resp.P.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Add the server's contribution to the id to our own, both in plain
|
||||
// and encrypted form
|
||||
let id = state.id_client + resp.id_server;
|
||||
let EncId = (
|
||||
state.EncIdClient.0,
|
||||
state.EncIdClient.1 + &resp.id_server * Btable,
|
||||
);
|
||||
|
||||
// Verify the proof
|
||||
let mut transcript = Transcript::new(b"open invite issuing");
|
||||
blindissue::verify_compact(
|
||||
&resp.piBlindIssue,
|
||||
&mut transcript,
|
||||
blindissue::VerifyAssignments {
|
||||
A: &A.compress(),
|
||||
B: &B.compress(),
|
||||
P: &resp.P.compress(),
|
||||
EncQ0: &resp.EncQ.0.compress(),
|
||||
EncQ1: &resp.EncQ.1.compress(),
|
||||
X0: &lox_pub.X[0].compress(),
|
||||
Xid: &lox_pub.X[1].compress(),
|
||||
Xbucket: &lox_pub.X[2].compress(),
|
||||
Xsince: &lox_pub.X[4].compress(),
|
||||
Pbucket: &(resp.bucket * resp.P).compress(),
|
||||
Psince: &(resp.level_since * resp.P).compress(),
|
||||
TId: &resp.TId.compress(),
|
||||
D: &state.D.compress(),
|
||||
EncId0: &EncId.0.compress(),
|
||||
EncId1: &EncId.1.compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Decrypt EncQ
|
||||
let Q = resp.EncQ.1 - (state.d * resp.EncQ.0);
|
||||
|
||||
Ok((
|
||||
cred::Lox {
|
||||
P: resp.P,
|
||||
Q,
|
||||
id,
|
||||
bucket: resp.bucket,
|
||||
trust_level: Scalar::zero(),
|
||||
level_since: resp.level_since,
|
||||
invites_remaining: Scalar::zero(),
|
||||
blockages: Scalar::zero(),
|
||||
},
|
||||
resp.bridge_line,
|
||||
))
|
||||
}
|
|
@ -0,0 +1,619 @@
|
|||
/*! A module for the protocol for a new user to redeem an Invitation
|
||||
credential. The user will start at trust level 1 (instead of 0 for
|
||||
untrusted uninvited users).
|
||||
|
||||
The user presents the Invitation credential:
|
||||
- id: revealed
|
||||
- date: blinded, but proved in ZK to be at most INVITATION_EXPIRY days ago
|
||||
- bucket: blinded
|
||||
- blockages: blinded
|
||||
|
||||
and a new Lox credential to be issued:
|
||||
|
||||
- id: jointly chosen by the user and BA
|
||||
- bucket: blinded, but proved in ZK that it's the same as in the
|
||||
Invitation credential above
|
||||
- trust_level: revealed to be 1
|
||||
- level_since: today
|
||||
- invites_remaining: revealed to be 0
|
||||
- blockages: blinded, but proved in ZK that it's the same as in the
|
||||
Invitations credential above
|
||||
|
||||
*/
|
||||
|
||||
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||
use curve25519_dalek::ristretto::RistrettoPoint;
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
use curve25519_dalek::traits::IsIdentity;
|
||||
|
||||
use zkp::CompactProof;
|
||||
use zkp::ProofError;
|
||||
use zkp::Transcript;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::super::cred;
|
||||
use super::super::dup_filter::SeenType;
|
||||
use super::super::{pt_dbl, scalar_dbl, scalar_u32};
|
||||
use super::super::{BridgeAuth, IssuerPubKey};
|
||||
use super::super::{CMZ_A, CMZ_A_TABLE, CMZ_B, CMZ_B_TABLE};
|
||||
|
||||
/// Invitations must be used within this many days of being issued.
|
||||
/// Note that if you change this number to be larger than 15, you must
|
||||
/// also add bits to the zero knowledge proof.
|
||||
pub const INVITATION_EXPIRY: u32 = 15;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Request {
|
||||
// Fields for showing the Invitation credential
|
||||
P: RistrettoPoint,
|
||||
inv_id: Scalar,
|
||||
CDate: RistrettoPoint,
|
||||
CBucket: RistrettoPoint,
|
||||
CBlockages: RistrettoPoint,
|
||||
CQ: RistrettoPoint,
|
||||
|
||||
// Fields for the inequality proof
|
||||
// date + INVITATION_EXPIRY >= today
|
||||
CG1: RistrettoPoint,
|
||||
CG2: RistrettoPoint,
|
||||
CG3: RistrettoPoint,
|
||||
CG0sq: RistrettoPoint,
|
||||
CG1sq: RistrettoPoint,
|
||||
CG2sq: RistrettoPoint,
|
||||
CG3sq: RistrettoPoint,
|
||||
|
||||
// Fields for user blinding of the Lox credential to be issued
|
||||
D: RistrettoPoint,
|
||||
EncIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
EncBlockages: (RistrettoPoint, RistrettoPoint),
|
||||
|
||||
// The combined ZKP
|
||||
piUser: CompactProof,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct State {
|
||||
d: Scalar,
|
||||
D: RistrettoPoint,
|
||||
EncIdClient: (RistrettoPoint, RistrettoPoint),
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
EncBlockages: (RistrettoPoint, RistrettoPoint),
|
||||
id_client: Scalar,
|
||||
bucket: Scalar,
|
||||
blockages: Scalar,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Response {
|
||||
// The fields for the new Lox credential; the new trust level is 1
|
||||
// and the new invites_remaining is 0, so we don't have to include
|
||||
// them here explicitly
|
||||
P: RistrettoPoint,
|
||||
EncQ: (RistrettoPoint, RistrettoPoint),
|
||||
id_server: Scalar,
|
||||
level_since: Scalar,
|
||||
TId: RistrettoPoint,
|
||||
TBucket: RistrettoPoint,
|
||||
TBlockages: RistrettoPoint,
|
||||
|
||||
// The ZKP
|
||||
piBlindIssue: CompactProof,
|
||||
}
|
||||
|
||||
define_proof! {
|
||||
requestproof,
|
||||
"Redeem Invite Request",
|
||||
(date, bucket, blockages, zdate, zbucket, zblockages, negzQ,
|
||||
d, eid_client, ebucket, eblockages, id_client,
|
||||
g0, g1, g2, g3,
|
||||
zg0, zg1, zg2, zg3,
|
||||
wg0, wg1, wg2, wg3,
|
||||
yg0, yg1, yg2, yg3),
|
||||
(P, CDate, CBucket, CBlockages, V, Xdate, Xbucket, Xblockages,
|
||||
D, EncIdClient0, EncIdClient1, EncBucket0, EncBucket1,
|
||||
EncBlockages0, EncBlockages1,
|
||||
CG0, CG1, CG2, CG3,
|
||||
CG0sq, CG1sq, CG2sq, CG3sq),
|
||||
(A, B):
|
||||
// Blind showing of the Invitation credential
|
||||
CDate = (date*P + zdate*A),
|
||||
CBucket = (bucket*P + zbucket*A),
|
||||
CBlockages = (blockages*P + zblockages*A),
|
||||
// User blinding of the Lox credential to be issued
|
||||
D = (d*B),
|
||||
EncIdClient0 = (eid_client*B),
|
||||
EncIdClient1 = (id_client*B + eid_client*D),
|
||||
EncBucket0 = (ebucket*B),
|
||||
EncBucket1 = (bucket*B + ebucket*D),
|
||||
EncBlockages0 = (eblockages*B),
|
||||
EncBlockages1 = (blockages*B + eblockages*D),
|
||||
// Prove CDate encodes a value at most INVITATION_EXPIRY
|
||||
// days ago: first prove each of g0, ..., g3 is a bit by
|
||||
// proving that gi = gi^2
|
||||
CG0 = (g0*P + zg0*A), CG0sq = (g0*CG0 + wg0*A), CG0sq = (g0*P + yg0*A),
|
||||
CG1 = (g1*P + zg1*A), CG1sq = (g1*CG1 + wg1*A), CG1sq = (g1*P + yg1*A),
|
||||
CG2 = (g2*P + zg2*A), CG2sq = (g2*CG2 + wg2*A), CG2sq = (g2*P + yg2*A),
|
||||
CG3 = (g3*P + zg3*A), CG3sq = (g3*CG3 + wg3*A), CG3sq = (g3*P + yg3*A)
|
||||
// Then we'll check that today*P + CG0 + 2*CG1 + 4*CG2 + 8*CG3 =
|
||||
// CDate + INVITATION_EXPIRY*P by having the verifier
|
||||
// plug in CDate + INVITATION_EXPIRY*P - (today*P + 2*CG1 + 4*CG2
|
||||
// + 8*CG3) as its value of CG0.
|
||||
}
|
||||
|
||||
define_proof! {
|
||||
blindissue,
|
||||
"Redeem Invite Issuing",
|
||||
(x0, x0tilde, xid, xbucket, xlevel, xsince, xblockages,
|
||||
s, b, tid, tbucket, tblockages),
|
||||
(P, EncQ0, EncQ1, X0, Xid, Xbucket, Xlevel, Xsince, Xblockages,
|
||||
Psince, TId, TBucket, TBlockages,
|
||||
D, EncId0, EncId1, EncBucket0, EncBucket1, EncBlockages0, EncBlockages1),
|
||||
(A, B):
|
||||
Xid = (xid*A),
|
||||
Xbucket = (xbucket*A),
|
||||
Xlevel = (xlevel*A),
|
||||
Xsince = (xsince*A),
|
||||
Xblockages = (xblockages*A),
|
||||
X0 = (x0*B + x0tilde*A),
|
||||
P = (b*B),
|
||||
TId = (b*Xid),
|
||||
TId = (tid*A),
|
||||
TBucket = (b*Xbucket),
|
||||
TBucket = (tbucket*A),
|
||||
TBlockages = (b*Xblockages),
|
||||
TBlockages = (tblockages*A),
|
||||
EncQ0 = (s*B + tid*EncId0 + tbucket*EncBucket0 + tblockages*EncBlockages0),
|
||||
// level=1 (so Plevel = P) and invremain=0 (so the term is omitted)
|
||||
EncQ1 = (s*D + tid*EncId1 + tbucket*EncBucket1
|
||||
+ tblockages*EncBlockages1 + x0*P + xlevel*P + xsince*Psince)
|
||||
}
|
||||
|
||||
pub fn request(
|
||||
inv_cred: &cred::Invitation,
|
||||
invitation_pub: &IssuerPubKey,
|
||||
today: u32,
|
||||
) -> Result<(Request, State), ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
// Ensure the credential can be correctly shown: it must be the case
|
||||
// that date + INVITATION_EXPIRY >= today.
|
||||
let date: u32 = match scalar_u32(&inv_cred.date) {
|
||||
Some(v) => v,
|
||||
None => return Err(ProofError::VerificationFailure),
|
||||
};
|
||||
if date + INVITATION_EXPIRY < today {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
let diffdays = date + INVITATION_EXPIRY - today;
|
||||
// If diffdays > 15, then since INVITATION_EXPIRY <= 15, then date
|
||||
// must be in the future. Reject.
|
||||
if diffdays > 15 {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Blind showing the Invitation credential
|
||||
|
||||
// Reblind P and Q
|
||||
let mut rng = rand::thread_rng();
|
||||
let t = Scalar::random(&mut rng);
|
||||
let P = t * inv_cred.P;
|
||||
let Q = t * inv_cred.Q;
|
||||
|
||||
// Form Pedersen commitments to the blinded attributes
|
||||
let zdate = Scalar::random(&mut rng);
|
||||
let zbucket = Scalar::random(&mut rng);
|
||||
let zblockages = Scalar::random(&mut rng);
|
||||
let CDate = inv_cred.date * P + &zdate * Atable;
|
||||
let CBucket = inv_cred.bucket * P + &zbucket * Atable;
|
||||
let CBlockages = inv_cred.blockages * P + &zblockages * Atable;
|
||||
|
||||
// Form a Pedersen commitment to the MAC Q
|
||||
// We flip the sign of zQ from that of the Hyphae paper so that
|
||||
// the ZKP has a "+" instead of a "-", as that's what the zkp
|
||||
// macro supports.
|
||||
let negzQ = Scalar::random(&mut rng);
|
||||
let CQ = Q - &negzQ * Atable;
|
||||
|
||||
// Compute the "error factor"
|
||||
let V = zdate * invitation_pub.X[2]
|
||||
+ zbucket * invitation_pub.X[3]
|
||||
+ zblockages * invitation_pub.X[4]
|
||||
+ &negzQ * Atable;
|
||||
|
||||
// User blinding for the Lox certificate to be issued
|
||||
|
||||
// Pick an ElGamal keypair
|
||||
let d = Scalar::random(&mut rng);
|
||||
let D = &d * Btable;
|
||||
|
||||
// Pick a random client component of the id
|
||||
let id_client = Scalar::random(&mut rng);
|
||||
|
||||
// Encrypt it (times the basepoint B) to the ElGamal public key D we
|
||||
// just created
|
||||
let eid_client = Scalar::random(&mut rng);
|
||||
let EncIdClient = (&eid_client * Btable, &id_client * Btable + eid_client * D);
|
||||
|
||||
// Encrypt the other blinded fields (times B) to D as well
|
||||
let ebucket = Scalar::random(&mut rng);
|
||||
let EncBucket = (&ebucket * Btable, &inv_cred.bucket * Btable + ebucket * D);
|
||||
let eblockages = Scalar::random(&mut rng);
|
||||
let EncBlockages = (
|
||||
&eblockages * Btable,
|
||||
&inv_cred.blockages * Btable + eblockages * D,
|
||||
);
|
||||
|
||||
// The range proof that 0 <= diffdays <= 15
|
||||
|
||||
// Extract the 4 bits from diffdays
|
||||
let g0: Scalar = (diffdays & 1).into();
|
||||
let g1: Scalar = ((diffdays >> 1) & 1).into();
|
||||
let g2: Scalar = ((diffdays >> 2) & 1).into();
|
||||
let g3: Scalar = ((diffdays >> 3) & 1).into();
|
||||
|
||||
// Pick random factors for the Pedersen commitments
|
||||
let wg0 = Scalar::random(&mut rng);
|
||||
let zg1 = Scalar::random(&mut rng);
|
||||
let wg1 = Scalar::random(&mut rng);
|
||||
let zg2 = Scalar::random(&mut rng);
|
||||
let wg2 = Scalar::random(&mut rng);
|
||||
let zg3 = Scalar::random(&mut rng);
|
||||
let wg3 = Scalar::random(&mut rng);
|
||||
|
||||
// Compute zg0 to cancel things out as
|
||||
// zg0 = zdate - (2*zg1 + 4*zg2 + 8*zg3)
|
||||
// but use Horner's method
|
||||
let zg0 = zdate - scalar_dbl(&(scalar_dbl(&(scalar_dbl(&zg3) + zg2)) + zg1));
|
||||
|
||||
let yg0 = wg0 + g0 * zg0;
|
||||
let yg1 = wg1 + g1 * zg1;
|
||||
let yg2 = wg2 + g2 * zg2;
|
||||
let yg3 = wg3 + g3 * zg3;
|
||||
|
||||
let CG0 = g0 * P + &zg0 * Atable;
|
||||
let CG1 = g1 * P + &zg1 * Atable;
|
||||
let CG2 = g2 * P + &zg2 * Atable;
|
||||
let CG3 = g3 * P + &zg3 * Atable;
|
||||
|
||||
let CG0sq = g0 * P + &yg0 * Atable;
|
||||
let CG1sq = g1 * P + &yg1 * Atable;
|
||||
let CG2sq = g2 * P + &yg2 * Atable;
|
||||
let CG3sq = g3 * P + &yg3 * Atable;
|
||||
|
||||
// Construct the proof
|
||||
let mut transcript = Transcript::new(b"redeem invite request");
|
||||
let piUser = requestproof::prove_compact(
|
||||
&mut transcript,
|
||||
requestproof::ProveAssignments {
|
||||
A,
|
||||
B,
|
||||
P: &P,
|
||||
CDate: &CDate,
|
||||
CBucket: &CBucket,
|
||||
CBlockages: &CBlockages,
|
||||
V: &V,
|
||||
Xdate: &invitation_pub.X[2],
|
||||
Xbucket: &invitation_pub.X[3],
|
||||
Xblockages: &invitation_pub.X[4],
|
||||
D: &D,
|
||||
EncIdClient0: &EncIdClient.0,
|
||||
EncIdClient1: &EncIdClient.1,
|
||||
EncBucket0: &EncBucket.0,
|
||||
EncBucket1: &EncBucket.1,
|
||||
EncBlockages0: &EncBlockages.0,
|
||||
EncBlockages1: &EncBlockages.1,
|
||||
CG0: &CG0,
|
||||
CG1: &CG1,
|
||||
CG2: &CG2,
|
||||
CG3: &CG3,
|
||||
CG0sq: &CG0sq,
|
||||
CG1sq: &CG1sq,
|
||||
CG2sq: &CG2sq,
|
||||
CG3sq: &CG3sq,
|
||||
date: &inv_cred.date,
|
||||
bucket: &inv_cred.bucket,
|
||||
blockages: &inv_cred.blockages,
|
||||
zdate: &zdate,
|
||||
zbucket: &zbucket,
|
||||
zblockages: &zblockages,
|
||||
negzQ: &negzQ,
|
||||
d: &d,
|
||||
eid_client: &eid_client,
|
||||
ebucket: &ebucket,
|
||||
eblockages: &eblockages,
|
||||
id_client: &id_client,
|
||||
g0: &g0,
|
||||
g1: &g1,
|
||||
g2: &g2,
|
||||
g3: &g3,
|
||||
zg0: &zg0,
|
||||
zg1: &zg1,
|
||||
zg2: &zg2,
|
||||
zg3: &zg3,
|
||||
wg0: &wg0,
|
||||
wg1: &wg1,
|
||||
wg2: &wg2,
|
||||
wg3: &wg3,
|
||||
yg0: &yg0,
|
||||
yg1: &yg1,
|
||||
yg2: &yg2,
|
||||
yg3: &yg3,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
|
||||
Ok((
|
||||
Request {
|
||||
P,
|
||||
inv_id: inv_cred.inv_id,
|
||||
CDate,
|
||||
CBucket,
|
||||
CBlockages,
|
||||
CQ,
|
||||
D,
|
||||
EncIdClient,
|
||||
EncBucket,
|
||||
EncBlockages,
|
||||
CG1,
|
||||
CG2,
|
||||
CG3,
|
||||
CG0sq,
|
||||
CG1sq,
|
||||
CG2sq,
|
||||
CG3sq,
|
||||
piUser,
|
||||
},
|
||||
State {
|
||||
d,
|
||||
D,
|
||||
EncIdClient,
|
||||
EncBucket,
|
||||
EncBlockages,
|
||||
id_client,
|
||||
bucket: inv_cred.bucket,
|
||||
blockages: inv_cred.blockages,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
impl BridgeAuth {
|
||||
/// Receive a redeem invite request
|
||||
pub fn handle_redeem_invite(&mut self, req: Request) -> Result<Response, ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
if req.P.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
let today: Scalar = self.today().into();
|
||||
|
||||
// Recompute the "error factor" using knowledge of our own
|
||||
// (the issuer's) private key instead of knowledge of the
|
||||
// hidden attributes
|
||||
let Vprime = (self.invitation_priv.x[0] + self.invitation_priv.x[1] * req.inv_id) * req.P
|
||||
+ self.invitation_priv.x[2] * req.CDate
|
||||
+ self.invitation_priv.x[3] * req.CBucket
|
||||
+ self.invitation_priv.x[4] * req.CBlockages
|
||||
- req.CQ;
|
||||
|
||||
// Recompute CG0 using Horner's method
|
||||
let expiry: Scalar = INVITATION_EXPIRY.into();
|
||||
let CG0prime = (expiry - today) * req.P + req.CDate
|
||||
- pt_dbl(&(pt_dbl(&(pt_dbl(&req.CG3) + req.CG2)) + req.CG1));
|
||||
|
||||
// Verify the ZKP
|
||||
let mut transcript = Transcript::new(b"redeem invite request");
|
||||
requestproof::verify_compact(
|
||||
&req.piUser,
|
||||
&mut transcript,
|
||||
requestproof::VerifyAssignments {
|
||||
A: &A.compress(),
|
||||
B: &B.compress(),
|
||||
P: &req.P.compress(),
|
||||
CDate: &req.CDate.compress(),
|
||||
CBucket: &req.CBucket.compress(),
|
||||
CBlockages: &req.CBlockages.compress(),
|
||||
V: &Vprime.compress(),
|
||||
Xdate: &self.invitation_pub.X[2].compress(),
|
||||
Xbucket: &self.invitation_pub.X[3].compress(),
|
||||
Xblockages: &self.invitation_pub.X[4].compress(),
|
||||
D: &req.D.compress(),
|
||||
EncIdClient0: &req.EncIdClient.0.compress(),
|
||||
EncIdClient1: &req.EncIdClient.1.compress(),
|
||||
EncBucket0: &req.EncBucket.0.compress(),
|
||||
EncBucket1: &req.EncBucket.1.compress(),
|
||||
EncBlockages0: &req.EncBlockages.0.compress(),
|
||||
EncBlockages1: &req.EncBlockages.1.compress(),
|
||||
CG0: &CG0prime.compress(),
|
||||
CG1: &req.CG1.compress(),
|
||||
CG2: &req.CG2.compress(),
|
||||
CG3: &req.CG3.compress(),
|
||||
CG0sq: &req.CG0sq.compress(),
|
||||
CG1sq: &req.CG1sq.compress(),
|
||||
CG2sq: &req.CG2sq.compress(),
|
||||
CG3sq: &req.CG3sq.compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Ensure the id has not been seen before, and add it to the
|
||||
// invite id seen list.
|
||||
if self.inv_id_filter.filter(&req.inv_id) == SeenType::Seen {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Blind issuing of the new Lox credential
|
||||
|
||||
// Choose a random server id component to add to the client's
|
||||
// (blinded) id component
|
||||
let mut rng = rand::thread_rng();
|
||||
let id_server = Scalar::random(&mut rng);
|
||||
let EncId = (req.EncIdClient.0, req.EncIdClient.1 + &id_server * Btable);
|
||||
|
||||
// The trust level for invitees is always 1
|
||||
let level = Scalar::one();
|
||||
|
||||
// The invites remaining for invitees is always 0 (as
|
||||
// appropriate for trust level 1), so we don't need to actually
|
||||
// construct it
|
||||
|
||||
// Compute the MAC on the visible attributes
|
||||
let b = Scalar::random(&mut rng);
|
||||
let P = &b * Btable;
|
||||
let QHc =
|
||||
(self.lox_priv.x[0] + self.lox_priv.x[3] * level + self.lox_priv.x[4] * today) * P;
|
||||
|
||||
// El Gamal encrypt it to the public key req.D
|
||||
let s = Scalar::random(&mut rng);
|
||||
let EncQHc = (&s * Btable, QHc + s * req.D);
|
||||
|
||||
// Homomorphically compute the part of the MAC corresponding to
|
||||
// the blinded attributes
|
||||
let tid = self.lox_priv.x[1] * b;
|
||||
let TId = &tid * Atable;
|
||||
let EncQId = (tid * EncId.0, tid * EncId.1);
|
||||
let tbucket = self.lox_priv.x[2] * b;
|
||||
let TBucket = &tbucket * Atable;
|
||||
let EncQBucket = (tbucket * req.EncBucket.0, tbucket * req.EncBucket.1);
|
||||
let tblockages = self.lox_priv.x[6] * b;
|
||||
let TBlockages = &tblockages * Atable;
|
||||
let EncQBlockages = (
|
||||
tblockages * req.EncBlockages.0,
|
||||
tblockages * req.EncBlockages.1,
|
||||
);
|
||||
|
||||
let EncQ = (
|
||||
EncQHc.0 + EncQId.0 + EncQBucket.0 + EncQBlockages.0,
|
||||
EncQHc.1 + EncQId.1 + EncQBucket.1 + EncQBlockages.1,
|
||||
);
|
||||
|
||||
let mut transcript = Transcript::new(b"redeem invite issuing");
|
||||
let piBlindIssue = blindissue::prove_compact(
|
||||
&mut transcript,
|
||||
blindissue::ProveAssignments {
|
||||
A,
|
||||
B,
|
||||
P: &P,
|
||||
EncQ0: &EncQ.0,
|
||||
EncQ1: &EncQ.1,
|
||||
X0: &self.lox_pub.X[0],
|
||||
Xid: &self.lox_pub.X[1],
|
||||
Xbucket: &self.lox_pub.X[2],
|
||||
Xlevel: &self.lox_pub.X[3],
|
||||
Xsince: &self.lox_pub.X[4],
|
||||
Xblockages: &self.lox_pub.X[6],
|
||||
Psince: &(today * P),
|
||||
TId: &TId,
|
||||
TBucket: &TBucket,
|
||||
TBlockages: &TBlockages,
|
||||
D: &req.D,
|
||||
EncId0: &EncId.0,
|
||||
EncId1: &EncId.1,
|
||||
EncBucket0: &req.EncBucket.0,
|
||||
EncBucket1: &req.EncBucket.1,
|
||||
EncBlockages0: &req.EncBlockages.0,
|
||||
EncBlockages1: &req.EncBlockages.1,
|
||||
x0: &self.lox_priv.x[0],
|
||||
x0tilde: &self.lox_priv.x0tilde,
|
||||
xid: &self.lox_priv.x[1],
|
||||
xbucket: &self.lox_priv.x[2],
|
||||
xlevel: &self.lox_priv.x[3],
|
||||
xsince: &self.lox_priv.x[4],
|
||||
xblockages: &self.lox_priv.x[6],
|
||||
s: &s,
|
||||
b: &b,
|
||||
tid: &tid,
|
||||
tbucket: &tbucket,
|
||||
tblockages: &tblockages,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
|
||||
Ok(Response {
|
||||
P,
|
||||
EncQ,
|
||||
id_server,
|
||||
level_since: today,
|
||||
TId,
|
||||
TBucket,
|
||||
TBlockages,
|
||||
piBlindIssue,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle the response to the request, producing the new Lox credential
|
||||
/// if successful.
|
||||
pub fn handle_response(
|
||||
state: State,
|
||||
resp: Response,
|
||||
lox_pub: &IssuerPubKey,
|
||||
) -> Result<cred::Lox, ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
if resp.P.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Add the server's contribution to the id to our own, both in plain
|
||||
// and encrypted form
|
||||
let id = state.id_client + resp.id_server;
|
||||
let EncId = (
|
||||
state.EncIdClient.0,
|
||||
state.EncIdClient.1 + &resp.id_server * Btable,
|
||||
);
|
||||
|
||||
// Verify the proof
|
||||
let mut transcript = Transcript::new(b"redeem invite issuing");
|
||||
blindissue::verify_compact(
|
||||
&resp.piBlindIssue,
|
||||
&mut transcript,
|
||||
blindissue::VerifyAssignments {
|
||||
A: &A.compress(),
|
||||
B: &B.compress(),
|
||||
P: &resp.P.compress(),
|
||||
EncQ0: &resp.EncQ.0.compress(),
|
||||
EncQ1: &resp.EncQ.1.compress(),
|
||||
X0: &lox_pub.X[0].compress(),
|
||||
Xid: &lox_pub.X[1].compress(),
|
||||
Xbucket: &lox_pub.X[2].compress(),
|
||||
Xlevel: &lox_pub.X[3].compress(),
|
||||
Xsince: &lox_pub.X[4].compress(),
|
||||
Xblockages: &lox_pub.X[6].compress(),
|
||||
Psince: &(resp.level_since * resp.P).compress(),
|
||||
TId: &resp.TId.compress(),
|
||||
TBucket: &resp.TBucket.compress(),
|
||||
TBlockages: &resp.TBlockages.compress(),
|
||||
D: &state.D.compress(),
|
||||
EncId0: &EncId.0.compress(),
|
||||
EncId1: &EncId.1.compress(),
|
||||
EncBucket0: &state.EncBucket.0.compress(),
|
||||
EncBucket1: &state.EncBucket.1.compress(),
|
||||
EncBlockages0: &state.EncBlockages.0.compress(),
|
||||
EncBlockages1: &state.EncBlockages.1.compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Decrypt EncQ
|
||||
let Q = resp.EncQ.1 - (state.d * resp.EncQ.0);
|
||||
|
||||
Ok(cred::Lox {
|
||||
P: resp.P,
|
||||
Q,
|
||||
id,
|
||||
bucket: state.bucket,
|
||||
trust_level: Scalar::one(),
|
||||
level_since: resp.level_since,
|
||||
invites_remaining: Scalar::zero(),
|
||||
blockages: state.blockages,
|
||||
})
|
||||
}
|
|
@ -0,0 +1,561 @@
|
|||
/*! A module for the protocol for the user to get promoted from
|
||||
untrusted (trust level 0) to trusted (trust level 1).
|
||||
|
||||
They are allowed to do this as long as UNTRUSTED_INTERVAL days have
|
||||
passed since they obtained their level 0 Lox credential, and their
|
||||
bridge (level 0 users get put in a one-bridge bucket) has not been
|
||||
blocked. (Blocked bridges in one-bridge buckets will have their entries
|
||||
removed from the bridge authority's migration table.)
|
||||
|
||||
The user presents their current Lox credential:
|
||||
- id: revealed
|
||||
- bucket: blinded
|
||||
- trust_level: revealed to be 0
|
||||
- level_since: blinded, but proved in ZK that it's at least
|
||||
UNTRUSTED_INTERVAL days ago
|
||||
- invites_remaining: revealed to be 0
|
||||
- blockages: revealed to be 0
|
||||
|
||||
They will receive in return the encrypted MAC (Pk, EncQk) for their
|
||||
implicit Migration Key credential with attributes id and bucket,
|
||||
along with a HashMap of encrypted Migration credentials. For each
|
||||
(from_i, to_i) in the BA's migration list, there will be an entry in
|
||||
the HashMap with key H1(id, from_attr_i, Qk_i) and value
|
||||
Enc_{H2(id, from_attr_i, Qk_i)}(to_attr_i, P_i, Q_i). Here H1 and H2
|
||||
are the first 16 bytes and the second 16 bytes respectively of the
|
||||
SHA256 hash of the input, P_i and Q_i are a MAC on the Migration
|
||||
credential with attributes id, from_attr_i, and to_attr_i. Qk_i is the
|
||||
value EncQk would decrypt to if bucket were equal to from_attr_i. */
|
||||
|
||||
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||
use curve25519_dalek::ristretto::RistrettoPoint;
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
use curve25519_dalek::traits::IsIdentity;
|
||||
|
||||
use zkp::CompactProof;
|
||||
use zkp::ProofError;
|
||||
use zkp::Transcript;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::serde_as;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use super::super::cred;
|
||||
use super::super::dup_filter::SeenType;
|
||||
use super::super::migration_table;
|
||||
use super::super::{pt_dbl, scalar_dbl, scalar_u32};
|
||||
use super::super::{BridgeAuth, IssuerPubKey};
|
||||
use super::super::{CMZ_A, CMZ_A_TABLE, CMZ_B, CMZ_B_TABLE};
|
||||
|
||||
/// The minimum number of days a user has to be at trust level 0
|
||||
/// (untrusted) with their (single) bridge unblocked before they can
|
||||
/// move to level 1.
|
||||
///
|
||||
/// The implementation also puts an upper bound of UNTRUSTED_INTERVAL +
|
||||
/// 511 days, which is not unreasonable; we want users to be engaging
|
||||
/// with the system in order to move up trust levels.
|
||||
pub const UNTRUSTED_INTERVAL: u32 = 30;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Request {
|
||||
// Fields for blind showing the Lox credential
|
||||
// We don't need to include trust_level, invites_remaining, or
|
||||
// blockages, since they must be 0
|
||||
P: RistrettoPoint,
|
||||
id: Scalar,
|
||||
CBucket: RistrettoPoint,
|
||||
CSince: RistrettoPoint,
|
||||
CQ: RistrettoPoint,
|
||||
|
||||
// Fields for user blinding of the Migration Key credential
|
||||
D: RistrettoPoint,
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
|
||||
// Fields for the inequality proof (level_since +
|
||||
// UNTRUSTED_INTERVAL <= today)
|
||||
CG1: RistrettoPoint,
|
||||
CG2: RistrettoPoint,
|
||||
CG3: RistrettoPoint,
|
||||
CG4: RistrettoPoint,
|
||||
CG5: RistrettoPoint,
|
||||
CG6: RistrettoPoint,
|
||||
CG7: RistrettoPoint,
|
||||
CG8: RistrettoPoint,
|
||||
CG0sq: RistrettoPoint,
|
||||
CG1sq: RistrettoPoint,
|
||||
CG2sq: RistrettoPoint,
|
||||
CG3sq: RistrettoPoint,
|
||||
CG4sq: RistrettoPoint,
|
||||
CG5sq: RistrettoPoint,
|
||||
CG6sq: RistrettoPoint,
|
||||
CG7sq: RistrettoPoint,
|
||||
CG8sq: RistrettoPoint,
|
||||
|
||||
// The combined ZKP
|
||||
piUser: CompactProof,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct State {
|
||||
d: Scalar,
|
||||
D: RistrettoPoint,
|
||||
EncBucket: (RistrettoPoint, RistrettoPoint),
|
||||
id: Scalar,
|
||||
bucket: Scalar,
|
||||
}
|
||||
|
||||
#[serde_as]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Response {
|
||||
// The encrypted MAC for the Migration Key credential
|
||||
Pk: RistrettoPoint,
|
||||
EncQk: (RistrettoPoint, RistrettoPoint),
|
||||
|
||||
// A table of encrypted Migration credentials; the encryption keys
|
||||
// are formed from the possible values of Qk (the decrypted form of
|
||||
// EncQk)
|
||||
#[serde_as(as = "Vec<(_,[_; migration_table::ENC_MIGRATION_BYTES])>")]
|
||||
enc_migration_table: HashMap<[u8; 16], [u8; migration_table::ENC_MIGRATION_BYTES]>,
|
||||
}
|
||||
|
||||
define_proof! {
|
||||
requestproof,
|
||||
"Trust Promotion Request",
|
||||
(bucket, since, zbucket, zsince, negzQ,
|
||||
d, ebucket,
|
||||
g0, g1, g2, g3, g4, g5, g6, g7, g8,
|
||||
zg0, zg1, zg2, zg3, zg4, zg5, zg6, zg7, zg8,
|
||||
wg0, wg1, wg2, wg3, wg4, wg5, wg6, wg7, wg8,
|
||||
yg0, yg1, yg2, yg3, yg4, yg5, yg6, yg7, yg8),
|
||||
(P, CBucket, CSince, V, Xbucket, Xsince,
|
||||
D, EncBucket0, EncBucket1,
|
||||
CG0, CG1, CG2, CG3, CG4, CG5, CG6, CG7, CG8,
|
||||
CG0sq, CG1sq, CG2sq, CG3sq, CG4sq, CG5sq, CG6sq, CG7sq, CG8sq),
|
||||
(A, B):
|
||||
// Blind showing of the Lox credential
|
||||
CBucket = (bucket*P + zbucket*A),
|
||||
CSince = (since*P + zsince*A),
|
||||
V = (zbucket*Xbucket + zsince*Xsince + negzQ*A),
|
||||
// User blinding of the Migration Key credential
|
||||
D = (d*B),
|
||||
EncBucket0 = (ebucket*B),
|
||||
EncBucket1 = (bucket*B + ebucket*D),
|
||||
// Prove CSince encodes a value at least UNTRUSTED_INTERVAL
|
||||
// days ago (and technically at most UNTRUSTED_INTERVAL+511 days
|
||||
// ago): first prove each of g0, ..., g8 is a bit by proving that
|
||||
// gi = gi^2
|
||||
CG0 = (g0*P + zg0*A), CG0sq = (g0*CG0 + wg0*A), CG0sq = (g0*P + yg0*A),
|
||||
CG1 = (g1*P + zg1*A), CG1sq = (g1*CG1 + wg1*A), CG1sq = (g1*P + yg1*A),
|
||||
CG2 = (g2*P + zg2*A), CG2sq = (g2*CG2 + wg2*A), CG2sq = (g2*P + yg2*A),
|
||||
CG3 = (g3*P + zg3*A), CG3sq = (g3*CG3 + wg3*A), CG3sq = (g3*P + yg3*A),
|
||||
CG4 = (g4*P + zg4*A), CG4sq = (g4*CG4 + wg4*A), CG4sq = (g4*P + yg4*A),
|
||||
CG5 = (g5*P + zg5*A), CG5sq = (g5*CG5 + wg5*A), CG5sq = (g5*P + yg5*A),
|
||||
CG6 = (g6*P + zg6*A), CG6sq = (g6*CG6 + wg6*A), CG6sq = (g6*P + yg6*A),
|
||||
CG7 = (g7*P + zg7*A), CG7sq = (g7*CG7 + wg7*A), CG7sq = (g7*P + yg7*A),
|
||||
CG8 = (g8*P + zg8*A), CG8sq = (g8*CG8 + wg8*A), CG8sq = (g8*P + yg8*A)
|
||||
// Then we'll check that CSince + UNTRUSTED_INTERVAL*P + CG0 + 2*CG1
|
||||
// + 4*CG2 + 8*CG3 + ... + 256*CG8 = today*P by having the verifier
|
||||
// plug in today*P - (CSince + UNTRUSTED_INTERVAL*P + 2*CG1 + 4*CG2
|
||||
// + ... + 256*CG8) as its value of CG0.
|
||||
}
|
||||
|
||||
pub fn request(
|
||||
lox_cred: &cred::Lox,
|
||||
lox_pub: &IssuerPubKey,
|
||||
today: u32,
|
||||
) -> Result<(Request, State), ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Atable: &RistrettoBasepointTable = &CMZ_A_TABLE;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
// Ensure the credential can be correctly shown: it must be the case
|
||||
// that level_since + UNTRUSTED_INTERVAL <= today.
|
||||
let level_since: u32 = match scalar_u32(&lox_cred.level_since) {
|
||||
Some(v) => v,
|
||||
None => return Err(ProofError::VerificationFailure),
|
||||
};
|
||||
if level_since + UNTRUSTED_INTERVAL > today {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
let diffdays = today - (level_since + UNTRUSTED_INTERVAL);
|
||||
if diffdays > 511 {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Blind showing the Lox credential
|
||||
|
||||
// Reblind P and Q
|
||||
let mut rng = rand::thread_rng();
|
||||
let t = Scalar::random(&mut rng);
|
||||
let P = t * lox_cred.P;
|
||||
let Q = t * lox_cred.Q;
|
||||
|
||||
// Form Pedersen commitments to the blinded attributes
|
||||
let zbucket = Scalar::random(&mut rng);
|
||||
let zsince = Scalar::random(&mut rng);
|
||||
let CBucket = lox_cred.bucket * P + &zbucket * Atable;
|
||||
let CSince = lox_cred.level_since * P + &zsince * Atable;
|
||||
|
||||
// Form a Pedersen commitment to the MAC Q
|
||||
// We flip the sign of zQ from that of the Hyphae paper so that
|
||||
// the ZKP has a "+" instead of a "-", as that's what the zkp
|
||||
// macro supports.
|
||||
let negzQ = Scalar::random(&mut rng);
|
||||
let CQ = Q - &negzQ * Atable;
|
||||
|
||||
// Compute the "error factor"
|
||||
let V = zbucket * lox_pub.X[2] + zsince * lox_pub.X[4] + &negzQ * Atable;
|
||||
|
||||
// User blinding the Migration Key credential
|
||||
|
||||
// Pick an ElGamal keypair
|
||||
let d = Scalar::random(&mut rng);
|
||||
let D = &d * Btable;
|
||||
|
||||
// Encrypt the attributes to be blinded (each times the
|
||||
// basepoint B) to the public key we just created
|
||||
let ebucket = Scalar::random(&mut rng);
|
||||
let EncBucket = (&ebucket * Btable, &lox_cred.bucket * Btable + ebucket * D);
|
||||
|
||||
// The range proof that 0 <= diffdays <= 511
|
||||
|
||||
// Extract the 9 bits from diffdays
|
||||
let g0: Scalar = (diffdays & 1).into();
|
||||
let g1: Scalar = ((diffdays >> 1) & 1).into();
|
||||
let g2: Scalar = ((diffdays >> 2) & 1).into();
|
||||
let g3: Scalar = ((diffdays >> 3) & 1).into();
|
||||
let g4: Scalar = ((diffdays >> 4) & 1).into();
|
||||
let g5: Scalar = ((diffdays >> 5) & 1).into();
|
||||
let g6: Scalar = ((diffdays >> 6) & 1).into();
|
||||
let g7: Scalar = ((diffdays >> 7) & 1).into();
|
||||
let g8: Scalar = ((diffdays >> 8) & 1).into();
|
||||
|
||||
// Pick random factors for the Pedersen commitments
|
||||
let wg0 = Scalar::random(&mut rng);
|
||||
let zg1 = Scalar::random(&mut rng);
|
||||
let wg1 = Scalar::random(&mut rng);
|
||||
let zg2 = Scalar::random(&mut rng);
|
||||
let wg2 = Scalar::random(&mut rng);
|
||||
let zg3 = Scalar::random(&mut rng);
|
||||
let wg3 = Scalar::random(&mut rng);
|
||||
let zg4 = Scalar::random(&mut rng);
|
||||
let wg4 = Scalar::random(&mut rng);
|
||||
let zg5 = Scalar::random(&mut rng);
|
||||
let wg5 = Scalar::random(&mut rng);
|
||||
let zg6 = Scalar::random(&mut rng);
|
||||
let wg6 = Scalar::random(&mut rng);
|
||||
let zg7 = Scalar::random(&mut rng);
|
||||
let wg7 = Scalar::random(&mut rng);
|
||||
let zg8 = Scalar::random(&mut rng);
|
||||
let wg8 = Scalar::random(&mut rng);
|
||||
|
||||
// Compute zg0 to cancel things out as
|
||||
// zg0 = -(zsince + 2*zg1 + 4*zg2 + 8*zg3 + 16*zg4 + 32*zg5 + 64*zg6 + 128*zg7 + 256*zg8)
|
||||
// but use Horner's method
|
||||
let zg0 = -(scalar_dbl(
|
||||
&(scalar_dbl(
|
||||
&(scalar_dbl(
|
||||
&(scalar_dbl(
|
||||
&(scalar_dbl(
|
||||
&(scalar_dbl(&(scalar_dbl(&(scalar_dbl(&zg8) + zg7)) + zg6)) + zg5),
|
||||
) + zg4),
|
||||
) + zg3),
|
||||
) + zg2),
|
||||
) + zg1),
|
||||
) + zsince);
|
||||
|
||||
let yg0 = wg0 + g0 * zg0;
|
||||
let yg1 = wg1 + g1 * zg1;
|
||||
let yg2 = wg2 + g2 * zg2;
|
||||
let yg3 = wg3 + g3 * zg3;
|
||||
let yg4 = wg4 + g4 * zg4;
|
||||
let yg5 = wg5 + g5 * zg5;
|
||||
let yg6 = wg6 + g6 * zg6;
|
||||
let yg7 = wg7 + g7 * zg7;
|
||||
let yg8 = wg8 + g8 * zg8;
|
||||
|
||||
let CG0 = g0 * P + &zg0 * Atable;
|
||||
let CG1 = g1 * P + &zg1 * Atable;
|
||||
let CG2 = g2 * P + &zg2 * Atable;
|
||||
let CG3 = g3 * P + &zg3 * Atable;
|
||||
let CG4 = g4 * P + &zg4 * Atable;
|
||||
let CG5 = g5 * P + &zg5 * Atable;
|
||||
let CG6 = g6 * P + &zg6 * Atable;
|
||||
let CG7 = g7 * P + &zg7 * Atable;
|
||||
let CG8 = g8 * P + &zg8 * Atable;
|
||||
|
||||
let CG0sq = g0 * P + &yg0 * Atable;
|
||||
let CG1sq = g1 * P + &yg1 * Atable;
|
||||
let CG2sq = g2 * P + &yg2 * Atable;
|
||||
let CG3sq = g3 * P + &yg3 * Atable;
|
||||
let CG4sq = g4 * P + &yg4 * Atable;
|
||||
let CG5sq = g5 * P + &yg5 * Atable;
|
||||
let CG6sq = g6 * P + &yg6 * Atable;
|
||||
let CG7sq = g7 * P + &yg7 * Atable;
|
||||
let CG8sq = g8 * P + &yg8 * Atable;
|
||||
|
||||
// Construct the proof
|
||||
let mut transcript = Transcript::new(b"trust promotion request");
|
||||
let piUser = requestproof::prove_compact(
|
||||
&mut transcript,
|
||||
requestproof::ProveAssignments {
|
||||
A,
|
||||
B,
|
||||
P: &P,
|
||||
CBucket: &CBucket,
|
||||
CSince: &CSince,
|
||||
V: &V,
|
||||
Xbucket: &lox_pub.X[2],
|
||||
Xsince: &lox_pub.X[4],
|
||||
D: &D,
|
||||
EncBucket0: &EncBucket.0,
|
||||
EncBucket1: &EncBucket.1,
|
||||
CG0: &CG0,
|
||||
CG1: &CG1,
|
||||
CG2: &CG2,
|
||||
CG3: &CG3,
|
||||
CG4: &CG4,
|
||||
CG5: &CG5,
|
||||
CG6: &CG6,
|
||||
CG7: &CG7,
|
||||
CG8: &CG8,
|
||||
CG0sq: &CG0sq,
|
||||
CG1sq: &CG1sq,
|
||||
CG2sq: &CG2sq,
|
||||
CG3sq: &CG3sq,
|
||||
CG4sq: &CG4sq,
|
||||
CG5sq: &CG5sq,
|
||||
CG6sq: &CG6sq,
|
||||
CG7sq: &CG7sq,
|
||||
CG8sq: &CG8sq,
|
||||
bucket: &lox_cred.bucket,
|
||||
since: &lox_cred.level_since,
|
||||
zbucket: &zbucket,
|
||||
zsince: &zsince,
|
||||
negzQ: &negzQ,
|
||||
d: &d,
|
||||
ebucket: &ebucket,
|
||||
g0: &g0,
|
||||
g1: &g1,
|
||||
g2: &g2,
|
||||
g3: &g3,
|
||||
g4: &g4,
|
||||
g5: &g5,
|
||||
g6: &g6,
|
||||
g7: &g7,
|
||||
g8: &g8,
|
||||
zg0: &zg0,
|
||||
zg1: &zg1,
|
||||
zg2: &zg2,
|
||||
zg3: &zg3,
|
||||
zg4: &zg4,
|
||||
zg5: &zg5,
|
||||
zg6: &zg6,
|
||||
zg7: &zg7,
|
||||
zg8: &zg8,
|
||||
wg0: &wg0,
|
||||
wg1: &wg1,
|
||||
wg2: &wg2,
|
||||
wg3: &wg3,
|
||||
wg4: &wg4,
|
||||
wg5: &wg5,
|
||||
wg6: &wg6,
|
||||
wg7: &wg7,
|
||||
wg8: &wg8,
|
||||
yg0: &yg0,
|
||||
yg1: &yg1,
|
||||
yg2: &yg2,
|
||||
yg3: &yg3,
|
||||
yg4: &yg4,
|
||||
yg5: &yg5,
|
||||
yg6: &yg6,
|
||||
yg7: &yg7,
|
||||
yg8: &yg8,
|
||||
},
|
||||
)
|
||||
.0;
|
||||
|
||||
Ok((
|
||||
Request {
|
||||
P,
|
||||
id: lox_cred.id,
|
||||
CBucket,
|
||||
CSince,
|
||||
CQ,
|
||||
D,
|
||||
EncBucket,
|
||||
CG1,
|
||||
CG2,
|
||||
CG3,
|
||||
CG4,
|
||||
CG5,
|
||||
CG6,
|
||||
CG7,
|
||||
CG8,
|
||||
CG0sq,
|
||||
CG1sq,
|
||||
CG2sq,
|
||||
CG3sq,
|
||||
CG4sq,
|
||||
CG5sq,
|
||||
CG6sq,
|
||||
CG7sq,
|
||||
CG8sq,
|
||||
piUser,
|
||||
},
|
||||
State {
|
||||
d,
|
||||
D,
|
||||
EncBucket,
|
||||
id: lox_cred.id,
|
||||
bucket: lox_cred.bucket,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
impl BridgeAuth {
|
||||
/// Receive a trust promotion request
|
||||
pub fn handle_trust_promotion(&mut self, req: Request) -> Result<Response, ProofError> {
|
||||
let A: &RistrettoPoint = &CMZ_A;
|
||||
let B: &RistrettoPoint = &CMZ_B;
|
||||
let Btable: &RistrettoBasepointTable = &CMZ_B_TABLE;
|
||||
|
||||
if req.P.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Recompute the "error factor" using knowledge of our own
|
||||
// (the issuer's) private key instead of knowledge of the
|
||||
// hidden attributes
|
||||
let Vprime = (self.lox_priv.x[0] + self.lox_priv.x[1] * req.id) * req.P
|
||||
+ self.lox_priv.x[2] * req.CBucket
|
||||
+ self.lox_priv.x[4] * req.CSince
|
||||
- req.CQ;
|
||||
|
||||
// Recompute CG0 using Horner's method
|
||||
let today: Scalar = self.today().into();
|
||||
let unt: Scalar = UNTRUSTED_INTERVAL.into();
|
||||
let CG0prime = (today - unt) * req.P
|
||||
- req.CSince
|
||||
- pt_dbl(
|
||||
&(pt_dbl(
|
||||
&(pt_dbl(
|
||||
&(pt_dbl(
|
||||
&(pt_dbl(
|
||||
&(pt_dbl(&(pt_dbl(&(pt_dbl(&req.CG8) + req.CG7)) + req.CG6))
|
||||
+ req.CG5),
|
||||
) + req.CG4),
|
||||
) + req.CG3),
|
||||
) + req.CG2),
|
||||
) + req.CG1),
|
||||
);
|
||||
|
||||
// Verify the ZKP
|
||||
let mut transcript = Transcript::new(b"trust promotion request");
|
||||
requestproof::verify_compact(
|
||||
&req.piUser,
|
||||
&mut transcript,
|
||||
requestproof::VerifyAssignments {
|
||||
A: &A.compress(),
|
||||
B: &B.compress(),
|
||||
P: &req.P.compress(),
|
||||
CBucket: &req.CBucket.compress(),
|
||||
CSince: &req.CSince.compress(),
|
||||
V: &Vprime.compress(),
|
||||
Xbucket: &self.lox_pub.X[2].compress(),
|
||||
Xsince: &self.lox_pub.X[4].compress(),
|
||||
D: &req.D.compress(),
|
||||
EncBucket0: &req.EncBucket.0.compress(),
|
||||
EncBucket1: &req.EncBucket.1.compress(),
|
||||
CG0: &CG0prime.compress(),
|
||||
CG1: &req.CG1.compress(),
|
||||
CG2: &req.CG2.compress(),
|
||||
CG3: &req.CG3.compress(),
|
||||
CG4: &req.CG4.compress(),
|
||||
CG5: &req.CG5.compress(),
|
||||
CG6: &req.CG6.compress(),
|
||||
CG7: &req.CG7.compress(),
|
||||
CG8: &req.CG8.compress(),
|
||||
CG0sq: &req.CG0sq.compress(),
|
||||
CG1sq: &req.CG1sq.compress(),
|
||||
CG2sq: &req.CG2sq.compress(),
|
||||
CG3sq: &req.CG3sq.compress(),
|
||||
CG4sq: &req.CG4sq.compress(),
|
||||
CG5sq: &req.CG5sq.compress(),
|
||||
CG6sq: &req.CG6sq.compress(),
|
||||
CG7sq: &req.CG7sq.compress(),
|
||||
CG8sq: &req.CG8sq.compress(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Ensure the id has not been seen before, either in the general
|
||||
// id filter, or the filter specifically for trust promotion.
|
||||
// Add the id to the latter, but not the former.
|
||||
if self.id_filter.check(&req.id) == SeenType::Seen
|
||||
|| self.trust_promotion_filter.filter(&req.id) == SeenType::Seen
|
||||
{
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Compute the encrypted MAC (Pk, EncQk) for the Migration Key
|
||||
// credential.
|
||||
|
||||
// Compute the MAC on the visible attributes
|
||||
let mut rng = rand::thread_rng();
|
||||
let b = Scalar::random(&mut rng);
|
||||
let Pk = &b * Btable;
|
||||
let Pktable = RistrettoBasepointTable::create(&Pk);
|
||||
let Qid = &(self.migrationkey_priv.x[0] + self.migrationkey_priv.x[1] * req.id) * &Pktable;
|
||||
|
||||
// El Gamal encrypt it to the public key req.D
|
||||
let s = Scalar::random(&mut rng);
|
||||
let EncQkid = (&s * Btable, Qid + s * req.D);
|
||||
|
||||
// Homomorphically compute the part of the MAC corresponding to
|
||||
// the blinded attributes
|
||||
let tbucket = self.migrationkey_priv.x[2] * b;
|
||||
let EncQkBucket = (tbucket * req.EncBucket.0, tbucket * req.EncBucket.1);
|
||||
|
||||
let EncQk = (EncQkid.0 + EncQkBucket.0, EncQkid.1 + EncQkBucket.1);
|
||||
|
||||
Ok(Response {
|
||||
Pk,
|
||||
EncQk,
|
||||
enc_migration_table: self.trustup_migration_table.encrypt_table(
|
||||
&req.id,
|
||||
&self.bridge_table,
|
||||
&Pktable,
|
||||
&self.migration_priv,
|
||||
&self.migrationkey_priv,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle the response to the request, producing a Migration credential
|
||||
/// if successful.
|
||||
///
|
||||
/// The Migration credential can then be used in the migration protocol
|
||||
/// to actually upgrade to trust level 1.
|
||||
pub fn handle_response(state: State, resp: Response) -> Result<cred::Migration, ProofError> {
|
||||
if resp.Pk.is_identity() {
|
||||
return Err(ProofError::VerificationFailure);
|
||||
}
|
||||
|
||||
// Decrypt the MAC on the Migration Key credential
|
||||
let Qk = resp.EncQk.1 - (state.d * resp.EncQk.0);
|
||||
|
||||
// Use Qk to locate and decrypt the Migration credential
|
||||
match migration_table::decrypt_cred(
|
||||
&Qk,
|
||||
&state.id,
|
||||
&state.bucket,
|
||||
migration_table::MigrationType::TrustUpgrade,
|
||||
&resp.enc_migration_table,
|
||||
) {
|
||||
Some(m) => Ok(m),
|
||||
None => Err(ProofError::VerificationFailure),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,995 @@
|
|||
/*! Unit tests that require access to the testing-only function
|
||||
BridgeLine::random() or private fields */
|
||||
|
||||
use super::bridge_table::{BridgeLine, BRIDGE_BYTES};
|
||||
use super::proto::*;
|
||||
use super::*;
|
||||
use rand::Rng;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
struct PerfStat {
|
||||
// Report performance metrics for each test
|
||||
req_len: usize,
|
||||
resp_len: usize,
|
||||
req_t: Duration,
|
||||
resp_t: Duration,
|
||||
resp_handle_t: Duration,
|
||||
}
|
||||
|
||||
struct TestHarness {
|
||||
bdb: BridgeDb,
|
||||
pub ba: BridgeAuth,
|
||||
}
|
||||
|
||||
impl TestHarness {
|
||||
fn new() -> Self {
|
||||
TestHarness::new_buckets(5, 5)
|
||||
}
|
||||
|
||||
fn new_buckets(num_buckets: u16, hot_spare: u16) -> Self {
|
||||
// Create a BridegDb
|
||||
let mut bdb = BridgeDb::new();
|
||||
// Create a BridgeAuth
|
||||
let mut ba = BridgeAuth::new(bdb.pubkey);
|
||||
|
||||
// Make 3 x num_buckets open invitation bridges, in sets of 3
|
||||
for _ in 0..num_buckets {
|
||||
let bucket = [
|
||||
BridgeLine::random(),
|
||||
BridgeLine::random(),
|
||||
BridgeLine::random(),
|
||||
];
|
||||
ba.add_openinv_bridges(bucket, &mut bdb);
|
||||
}
|
||||
// Add hot_spare more hot spare buckets
|
||||
for _ in 0..hot_spare {
|
||||
let bucket = [
|
||||
BridgeLine::random(),
|
||||
BridgeLine::random(),
|
||||
BridgeLine::random(),
|
||||
];
|
||||
ba.add_spare_bucket(bucket);
|
||||
}
|
||||
// Create the encrypted bridge table
|
||||
ba.enc_bridge_table();
|
||||
|
||||
Self { bdb, ba }
|
||||
}
|
||||
|
||||
fn advance_days(&mut self, days: u16) {
|
||||
self.ba.advance_days(days);
|
||||
}
|
||||
|
||||
fn open_invite(&mut self) -> (PerfStat, (cred::Lox, bridge_table::BridgeLine)) {
|
||||
// Issue an open invitation
|
||||
let inv = self.bdb.invite();
|
||||
|
||||
let req_start = Instant::now();
|
||||
// Use it to get a Lox credential
|
||||
let (req, state) = open_invite::request(&inv);
|
||||
let encoded: Vec<u8> = bincode::serialize(&req).unwrap();
|
||||
let req_t = req_start.elapsed();
|
||||
let req_len = encoded.len();
|
||||
|
||||
let resp_start = Instant::now();
|
||||
let decoded = bincode::deserialize(&encoded[..]).unwrap();
|
||||
let resp = self.ba.handle_open_invite(decoded).unwrap();
|
||||
let encoded_resp: Vec<u8> = bincode::serialize(&resp).unwrap();
|
||||
let resp_t = resp_start.elapsed();
|
||||
let resp_len = encoded_resp.len();
|
||||
|
||||
let resp_handle_start = Instant::now();
|
||||
let decode_resp = bincode::deserialize(&encoded_resp[..]).unwrap();
|
||||
let (cred, bridgeline) =
|
||||
open_invite::handle_response(state, decode_resp, &self.ba.lox_pub).unwrap();
|
||||
let resp_handle_t = resp_handle_start.elapsed();
|
||||
|
||||
(
|
||||
PerfStat {
|
||||
req_len,
|
||||
resp_len,
|
||||
req_t,
|
||||
resp_t,
|
||||
resp_handle_t,
|
||||
},
|
||||
(cred, bridgeline),
|
||||
)
|
||||
}
|
||||
|
||||
fn trust_promotion(&mut self, cred: &cred::Lox) -> (PerfStat, cred::Migration) {
|
||||
let req_start = Instant::now();
|
||||
let (promreq, promstate) =
|
||||
trust_promotion::request(cred, &self.ba.lox_pub, self.ba.today()).unwrap();
|
||||
let encoded: Vec<u8> = bincode::serialize(&promreq).unwrap();
|
||||
let req_t = req_start.elapsed();
|
||||
let req_len = encoded.len();
|
||||
|
||||
let resp_start = Instant::now();
|
||||
let decoded = bincode::deserialize(&encoded[..]).unwrap();
|
||||
let promresp = self.ba.handle_trust_promotion(decoded).unwrap();
|
||||
let encoded_resp: Vec<u8> = bincode::serialize(&promresp).unwrap();
|
||||
let resp_t = resp_start.elapsed();
|
||||
let resp_len = encoded_resp.len();
|
||||
|
||||
let resp_handle_start = Instant::now();
|
||||
let decode_resp = bincode::deserialize(&encoded_resp[..]).unwrap();
|
||||
let migcred = trust_promotion::handle_response(promstate, decode_resp).unwrap();
|
||||
let resp_handle_t = resp_handle_start.elapsed();
|
||||
|
||||
(
|
||||
PerfStat {
|
||||
req_len,
|
||||
resp_len,
|
||||
req_t,
|
||||
resp_t,
|
||||
resp_handle_t,
|
||||
},
|
||||
migcred,
|
||||
)
|
||||
}
|
||||
|
||||
fn level0_migration(
|
||||
&mut self,
|
||||
loxcred: &cred::Lox,
|
||||
migcred: &cred::Migration,
|
||||
) -> (PerfStat, cred::Lox) {
|
||||
let req_start = Instant::now();
|
||||
let (migreq, migstate) =
|
||||
migration::request(loxcred, migcred, &self.ba.lox_pub, &self.ba.migration_pub).unwrap();
|
||||
let encoded: Vec<u8> = bincode::serialize(&migreq).unwrap();
|
||||
let req_t = req_start.elapsed();
|
||||
let req_len = encoded.len();
|
||||
|
||||
let resp_start = Instant::now();
|
||||
let decoded = bincode::deserialize(&encoded[..]).unwrap();
|
||||
let migresp = self.ba.handle_migration(decoded).unwrap();
|
||||
let encoded_resp: Vec<u8> = bincode::serialize(&migresp).unwrap();
|
||||
let resp_t = resp_start.elapsed();
|
||||
let resp_len = encoded_resp.len();
|
||||
|
||||
let resp_handle_start = Instant::now();
|
||||
let decode_resp: migration::Response = bincode::deserialize(&encoded_resp[..]).unwrap();
|
||||
let cred = migration::handle_response(migstate, decode_resp, &self.ba.lox_pub).unwrap();
|
||||
let resp_handle_t = resp_handle_start.elapsed();
|
||||
|
||||
(
|
||||
PerfStat {
|
||||
req_len,
|
||||
resp_len,
|
||||
req_t,
|
||||
resp_t,
|
||||
resp_handle_t,
|
||||
},
|
||||
cred,
|
||||
)
|
||||
}
|
||||
|
||||
fn level_up(&mut self, cred: &cred::Lox) -> (PerfStat, cred::Lox) {
|
||||
// Read the bucket in the credential to get today's Bucket
|
||||
// Reachability credential
|
||||
|
||||
let (id, key) = bridge_table::from_scalar(cred.bucket).unwrap();
|
||||
let encbuckets = self.ba.enc_bridge_table();
|
||||
let bucket =
|
||||
bridge_table::BridgeTable::decrypt_bucket(id, &key, &encbuckets[id as usize]).unwrap();
|
||||
let reachcred = bucket.1.unwrap();
|
||||
|
||||
// Use the Bucket Reachability credential to advance to the next
|
||||
// level
|
||||
let req_start = Instant::now();
|
||||
let (req, state) = level_up::request(
|
||||
cred,
|
||||
&reachcred,
|
||||
&self.ba.lox_pub,
|
||||
&self.ba.reachability_pub,
|
||||
self.ba.today(),
|
||||
)
|
||||
.unwrap();
|
||||
let encoded: Vec<u8> = bincode::serialize(&req).unwrap();
|
||||
let req_t = req_start.elapsed();
|
||||
let req_len = encoded.len();
|
||||
|
||||
let resp_start = Instant::now();
|
||||
let decoded = bincode::deserialize(&encoded[..]).unwrap();
|
||||
let resp = self.ba.handle_level_up(decoded).unwrap();
|
||||
let encoded_resp: Vec<u8> = bincode::serialize(&resp).unwrap();
|
||||
let resp_t = resp_start.elapsed();
|
||||
let resp_len = encoded_resp.len();
|
||||
|
||||
let resp_handle_start = Instant::now();
|
||||
let decode_resp = bincode::deserialize(&encoded_resp[..]).unwrap();
|
||||
let cred = level_up::handle_response(state, decode_resp, &self.ba.lox_pub).unwrap();
|
||||
let resp_handle_t = resp_handle_start.elapsed();
|
||||
|
||||
(
|
||||
PerfStat {
|
||||
req_len,
|
||||
resp_len,
|
||||
req_t,
|
||||
resp_t,
|
||||
resp_handle_t,
|
||||
},
|
||||
cred,
|
||||
)
|
||||
}
|
||||
|
||||
fn issue_invite(&mut self, cred: &cred::Lox) -> (PerfStat, (cred::Lox, cred::Invitation)) {
|
||||
// Read the bucket in the credential to get today's Bucket
|
||||
// Reachability credential
|
||||
let (id, key) = bridge_table::from_scalar(cred.bucket).unwrap();
|
||||
let encbuckets = self.ba.enc_bridge_table();
|
||||
let bucket =
|
||||
bridge_table::BridgeTable::decrypt_bucket(id, &key, &encbuckets[id as usize]).unwrap();
|
||||
let reachcred = bucket.1.unwrap();
|
||||
|
||||
let req_start = Instant::now();
|
||||
let (req, state) = issue_invite::request(
|
||||
cred,
|
||||
&reachcred,
|
||||
&self.ba.lox_pub,
|
||||
&self.ba.reachability_pub,
|
||||
self.ba.today(),
|
||||
)
|
||||
.unwrap();
|
||||
let encoded: Vec<u8> = bincode::serialize(&req).unwrap();
|
||||
let req_t = req_start.elapsed();
|
||||
let req_len = encoded.len();
|
||||
|
||||
let resp_start = Instant::now();
|
||||
let decoded = bincode::deserialize(&encoded[..]).unwrap();
|
||||
let resp = self.ba.handle_issue_invite(decoded).unwrap();
|
||||
let encoded_resp: Vec<u8> = bincode::serialize(&resp).unwrap();
|
||||
let resp_t = resp_start.elapsed();
|
||||
let resp_len = encoded_resp.len();
|
||||
|
||||
let resp_handle_start = Instant::now();
|
||||
let decode_resp = bincode::deserialize(&encoded_resp[..]).unwrap();
|
||||
let (cred, invite) = issue_invite::handle_response(
|
||||
state,
|
||||
decode_resp,
|
||||
&self.ba.lox_pub,
|
||||
&self.ba.invitation_pub,
|
||||
)
|
||||
.unwrap();
|
||||
let resp_handle_t = resp_handle_start.elapsed();
|
||||
|
||||
(
|
||||
PerfStat {
|
||||
req_len,
|
||||
resp_len,
|
||||
req_t,
|
||||
resp_t,
|
||||
resp_handle_t,
|
||||
},
|
||||
(cred, invite),
|
||||
)
|
||||
}
|
||||
|
||||
fn redeem_invite(&mut self, inv: &cred::Invitation) -> (PerfStat, cred::Lox) {
|
||||
let req_start = Instant::now();
|
||||
let (req, state) =
|
||||
redeem_invite::request(inv, &self.ba.invitation_pub, self.ba.today()).unwrap();
|
||||
let encoded: Vec<u8> = bincode::serialize(&req).unwrap();
|
||||
let req_t = req_start.elapsed();
|
||||
let req_len = encoded.len();
|
||||
|
||||
let resp_start = Instant::now();
|
||||
let decoded = bincode::deserialize(&encoded[..]).unwrap();
|
||||
let resp = self.ba.handle_redeem_invite(decoded).unwrap();
|
||||
let encoded_resp: Vec<u8> = bincode::serialize(&resp).unwrap();
|
||||
let resp_t = resp_start.elapsed();
|
||||
let resp_len = encoded_resp.len();
|
||||
|
||||
let resp_handle_start = Instant::now();
|
||||
let decode_resp = bincode::deserialize(&encoded_resp[..]).unwrap();
|
||||
let cred = redeem_invite::handle_response(state, decode_resp, &self.ba.lox_pub).unwrap();
|
||||
let resp_handle_t = resp_handle_start.elapsed();
|
||||
|
||||
(
|
||||
PerfStat {
|
||||
req_len,
|
||||
resp_len,
|
||||
req_t,
|
||||
resp_t,
|
||||
resp_handle_t,
|
||||
},
|
||||
cred,
|
||||
)
|
||||
}
|
||||
|
||||
fn check_blockage(&mut self, cred: &cred::Lox) -> (PerfStat, cred::Migration) {
|
||||
let req_start = Instant::now();
|
||||
let (req, state) = check_blockage::request(cred, &self.ba.lox_pub).unwrap();
|
||||
let encoded: Vec<u8> = bincode::serialize(&req).unwrap();
|
||||
let req_t = req_start.elapsed();
|
||||
let req_len = encoded.len();
|
||||
|
||||
let resp_start = Instant::now();
|
||||
let decoded = bincode::deserialize(&encoded[..]).unwrap();
|
||||
let resp = self.ba.handle_check_blockage(decoded).unwrap();
|
||||
let encoded_resp: Vec<u8> = bincode::serialize(&resp).unwrap();
|
||||
let resp_t = resp_start.elapsed();
|
||||
let resp_len = encoded_resp.len();
|
||||
|
||||
let resp_handle_start = Instant::now();
|
||||
let decode_resp = bincode::deserialize(&encoded_resp[..]).unwrap();
|
||||
let migcred = check_blockage::handle_response(state, decode_resp).unwrap();
|
||||
let resp_handle_t = resp_handle_start.elapsed();
|
||||
|
||||
(
|
||||
PerfStat {
|
||||
req_len,
|
||||
resp_len,
|
||||
req_t,
|
||||
resp_t,
|
||||
resp_handle_t,
|
||||
},
|
||||
migcred,
|
||||
)
|
||||
}
|
||||
|
||||
fn blockage_migration(
|
||||
&mut self,
|
||||
cred: &cred::Lox,
|
||||
mig: &cred::Migration,
|
||||
) -> (PerfStat, cred::Lox) {
|
||||
let req_start = Instant::now();
|
||||
let (req, state) =
|
||||
blockage_migration::request(cred, mig, &self.ba.lox_pub, &self.ba.migration_pub)
|
||||
.unwrap();
|
||||
let encoded: Vec<u8> = bincode::serialize(&req).unwrap();
|
||||
let req_t = req_start.elapsed();
|
||||
let req_len = encoded.len();
|
||||
|
||||
let resp_start = Instant::now();
|
||||
let decoded = bincode::deserialize(&encoded[..]).unwrap();
|
||||
let resp = self.ba.handle_blockage_migration(decoded).unwrap();
|
||||
let encoded_resp: Vec<u8> = bincode::serialize(&resp).unwrap();
|
||||
let resp_t = resp_start.elapsed();
|
||||
let resp_len = encoded_resp.len();
|
||||
|
||||
let resp_handle_start = Instant::now();
|
||||
let decode_resp: blockage_migration::Response =
|
||||
bincode::deserialize(&encoded_resp[..]).unwrap();
|
||||
let cred =
|
||||
blockage_migration::handle_response(state, decode_resp, &self.ba.lox_pub).unwrap();
|
||||
let resp_handle_t = resp_handle_start.elapsed();
|
||||
|
||||
(
|
||||
PerfStat {
|
||||
req_len,
|
||||
resp_len,
|
||||
req_t,
|
||||
resp_t,
|
||||
resp_handle_t,
|
||||
},
|
||||
cred,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_open_invite() {
|
||||
let mut th = TestHarness::new();
|
||||
|
||||
// Join an untrusted user
|
||||
let (perf_stat, (cred, bridgeline)) = th.open_invite();
|
||||
|
||||
// Check that we can use the credential to read a bucket
|
||||
let (id, key) = bridge_table::from_scalar(cred.bucket).unwrap();
|
||||
let encbuckets = th.ba.enc_bridge_table();
|
||||
let bucket =
|
||||
bridge_table::BridgeTable::decrypt_bucket(id, &key, &encbuckets[id as usize]).unwrap();
|
||||
print_test_results(perf_stat);
|
||||
println!("cred = {:?}", cred);
|
||||
println!("bucket = {:?}", bucket);
|
||||
println!("bridgeline = {:?}", bridgeline);
|
||||
assert!(bucket.1.is_none());
|
||||
assert!(th.ba.verify_lox(&cred));
|
||||
assert!(bridgeline == bucket.0[0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trust_promotion() {
|
||||
let mut th = TestHarness::new();
|
||||
|
||||
let cred = th.open_invite().1 .0;
|
||||
assert!(th.ba.verify_lox(&cred));
|
||||
|
||||
// Time passes
|
||||
th.advance_days(47);
|
||||
|
||||
let (perf_stat, migcred) = th.trust_promotion(&cred);
|
||||
assert!(th.ba.verify_migration(&migcred));
|
||||
|
||||
// Check that we can use the to_bucket in the Migration credenital
|
||||
// to read a bucket
|
||||
let (id, key) = bridge_table::from_scalar(migcred.to_bucket).unwrap();
|
||||
let encbuckets = th.ba.enc_bridge_table();
|
||||
let bucket =
|
||||
bridge_table::BridgeTable::decrypt_bucket(id, &key, &encbuckets[id as usize]).unwrap();
|
||||
print_test_results(perf_stat);
|
||||
println!("bucket = {:?}", bucket);
|
||||
assert!(th.ba.verify_reachability(&bucket.1.unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_level0_migration() {
|
||||
let mut th = TestHarness::new();
|
||||
|
||||
let cred = th.open_invite().1 .0;
|
||||
assert!(th.ba.verify_lox(&cred));
|
||||
|
||||
// Time passes
|
||||
th.advance_days(47);
|
||||
|
||||
let (perf_stat, migcred) = th.trust_promotion(&cred);
|
||||
assert!(th.ba.verify_migration(&migcred));
|
||||
println!("--Trust Promotion to 1--\n");
|
||||
print_test_results(perf_stat);
|
||||
|
||||
let (mperf_stat, newloxcred) = th.level0_migration(&cred, &migcred);
|
||||
|
||||
println!("--Level 0 migration--\n");
|
||||
print_test_results(mperf_stat);
|
||||
|
||||
assert!(th.ba.verify_lox(&newloxcred));
|
||||
println!("newloxcred = {:?}", newloxcred);
|
||||
// Check that we can use the credenital to read a bucket
|
||||
let (id, key) = bridge_table::from_scalar(newloxcred.bucket).unwrap();
|
||||
let encbuckets = th.ba.enc_bridge_table();
|
||||
let bucket =
|
||||
bridge_table::BridgeTable::decrypt_bucket(id, &key, &encbuckets[id as usize]).unwrap();
|
||||
println!("bucket = {:?}", bucket);
|
||||
assert!(th.ba.verify_reachability(&bucket.1.unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_level_up() {
|
||||
let mut th = TestHarness::new();
|
||||
|
||||
// Join an untrusted user
|
||||
let cred = th.open_invite().1 .0;
|
||||
|
||||
// Time passes
|
||||
th.advance_days(47);
|
||||
|
||||
// Go up to level 1
|
||||
let (perf_stat, migcred) = th.trust_promotion(&cred);
|
||||
|
||||
println!("--Trust Promotion to 1--\n");
|
||||
print_test_results(perf_stat);
|
||||
|
||||
let (mperf_stat, cred1) = th.level0_migration(&cred, &migcred);
|
||||
|
||||
println!("--New Level 1 Credential--\n");
|
||||
print_test_results(mperf_stat);
|
||||
|
||||
assert!(scalar_u32(&cred1.trust_level).unwrap() == 1);
|
||||
|
||||
// Time passes
|
||||
th.advance_days(20);
|
||||
|
||||
let (two_perf_stat, cred2) = th.level_up(&cred1);
|
||||
assert!(scalar_u32(&cred2.trust_level).unwrap() == 2);
|
||||
|
||||
println!("--Upgrade to Level 2--\n");
|
||||
print_test_results(two_perf_stat);
|
||||
println!("cred2 = {:?}", cred2);
|
||||
assert!(th.ba.verify_lox(&cred2));
|
||||
|
||||
// Time passes
|
||||
th.advance_days(30);
|
||||
|
||||
let (three_perf_stat, cred3) = th.level_up(&cred2);
|
||||
assert!(scalar_u32(&cred3.trust_level).unwrap() == 3);
|
||||
println!("--Upgrade to Level 3--\n");
|
||||
print_test_results(three_perf_stat);
|
||||
println!("cred3 = {:?}", cred3);
|
||||
assert!(th.ba.verify_lox(&cred3));
|
||||
|
||||
// Time passes
|
||||
th.advance_days(60);
|
||||
|
||||
let (four_perf_stat, cred4) = th.level_up(&cred3);
|
||||
assert!(scalar_u32(&cred3.trust_level).unwrap() == 3);
|
||||
println!("--Upgrade to Level 4--\n");
|
||||
print_test_results(four_perf_stat);
|
||||
println!("cred4 = {:?}", cred4);
|
||||
assert!(th.ba.verify_lox(&cred4));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_issue_invite() {
|
||||
let mut th = TestHarness::new();
|
||||
|
||||
// Join an untrusted user
|
||||
let cred = th.open_invite().1 .0;
|
||||
|
||||
// Time passes
|
||||
th.advance_days(47);
|
||||
|
||||
// Go up to level 1
|
||||
let (perf_stat, migcred) = th.trust_promotion(&cred);
|
||||
println!("--Trust Promotion to 1--\n");
|
||||
print_test_results(perf_stat);
|
||||
let (mperf_stat, cred1) = th.level0_migration(&cred, &migcred);
|
||||
println!("--New Level 1 Credential--\n");
|
||||
print_test_results(mperf_stat);
|
||||
assert!(scalar_u32(&cred1.trust_level).unwrap() == 1);
|
||||
|
||||
// Time passes
|
||||
th.advance_days(20);
|
||||
|
||||
// Go up to level 2
|
||||
let (two_perf_stat, cred2) = th.level_up(&cred1);
|
||||
println!("--Upgrade to Level 2--\n");
|
||||
print_test_results(two_perf_stat);
|
||||
assert!(scalar_u32(&cred2.trust_level).unwrap() == 2);
|
||||
println!("cred2 = {:?}", cred2);
|
||||
assert!(th.ba.verify_lox(&cred2));
|
||||
|
||||
// Issue an invitation
|
||||
let (invite_perf_stat, (cred2a, invite)) = th.issue_invite(&cred2);
|
||||
println!("--Issue Invitation--\n");
|
||||
print_test_results(invite_perf_stat);
|
||||
assert!(th.ba.verify_lox(&cred2a));
|
||||
assert!(th.ba.verify_invitation(&invite));
|
||||
println!("cred2a = {:?}", cred2a);
|
||||
println!("invite = {:?}", invite);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_redeem_invite() {
|
||||
let mut th = TestHarness::new();
|
||||
|
||||
// Join an untrusted user
|
||||
let cred = th.open_invite().1 .0;
|
||||
|
||||
// Time passes
|
||||
th.advance_days(47);
|
||||
|
||||
// Go up to level 1
|
||||
let (perf_stat, migcred) = th.trust_promotion(&cred);
|
||||
println!("--Trust Promotion to 1--\n");
|
||||
print_test_results(perf_stat);
|
||||
let (mperf_stat, cred1) = th.level0_migration(&cred, &migcred);
|
||||
println!("--New Level 1 Credential--\n");
|
||||
print_test_results(mperf_stat);
|
||||
assert!(scalar_u32(&cred1.trust_level).unwrap() == 1);
|
||||
|
||||
// Time passes
|
||||
th.advance_days(20);
|
||||
|
||||
// Go up to level 2
|
||||
let (two_perf_stat, cred2) = th.level_up(&cred1);
|
||||
println!("--Upgrade to Level 2--\n");
|
||||
print_test_results(two_perf_stat);
|
||||
assert!(scalar_u32(&cred2.trust_level).unwrap() == 2);
|
||||
println!("cred2 = {:?}", cred2);
|
||||
assert!(th.ba.verify_lox(&cred2));
|
||||
|
||||
// Issue an invitation to Bob
|
||||
let (invite_perf_stat, (cred2a, bob_invite)) = th.issue_invite(&cred2);
|
||||
println!("--Issue Invitation--\n");
|
||||
print_test_results(invite_perf_stat);
|
||||
assert!(th.ba.verify_lox(&cred2a));
|
||||
assert!(th.ba.verify_invitation(&bob_invite));
|
||||
println!("cred2a = {:?}", cred2a);
|
||||
println!("bob_invite = {:?}", bob_invite);
|
||||
|
||||
// Time passes
|
||||
th.advance_days(12);
|
||||
|
||||
// Bob joins the system
|
||||
let (bob_perf_stat, bob_cred) = th.redeem_invite(&bob_invite);
|
||||
println!("--Bob joins the system--\n");
|
||||
print_test_results(bob_perf_stat);
|
||||
assert!(th.ba.verify_lox(&bob_cred));
|
||||
println!("bob_cred = {:?}", bob_cred);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_allocate_bridges() {
|
||||
let mut th = TestHarness::new();
|
||||
let distributor_bridges: &mut Vec<BridgeLine> = &mut Vec::new();
|
||||
let table_size = th.ba.bridge_table.buckets.len();
|
||||
for _ in 0..3 {
|
||||
distributor_bridges.push(BridgeLine::random());
|
||||
}
|
||||
assert!(!distributor_bridges.is_empty(), "No BridgeLines in distributor_bridges");
|
||||
th.ba.allocate_bridges(distributor_bridges, &mut th.bdb);
|
||||
assert!(distributor_bridges.is_empty(), "BridgeLines in distributor_bridges were not allocated");
|
||||
assert!(th.ba.bridge_table.buckets.len() > table_size, "Size of bridge table did not increase");
|
||||
let table_size = th.ba.bridge_table.buckets.len();
|
||||
for _ in 0..2 {
|
||||
distributor_bridges.push(BridgeLine::random());
|
||||
th.ba.bridge_table.unallocated_bridges.push(BridgeLine::random());
|
||||
}
|
||||
assert!(!th.ba.bridge_table.unallocated_bridges.is_empty(), "No BridgeLines in unallocated bridges");
|
||||
assert!(!distributor_bridges.is_empty(), "No BridgeLines in distributor_bridges");
|
||||
th.ba.allocate_bridges(distributor_bridges, &mut th.bdb);
|
||||
assert!(th.ba.bridge_table.unallocated_bridges.len() == 1, "Incorrect number of bridges remain unallocated");
|
||||
assert!(distributor_bridges.is_empty(), "BridgeLines in distributor_bridges were not allocated");
|
||||
assert!(th.ba.bridge_table.buckets.len() > table_size, "Size of bridge table did not increase");
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_bridge() {
|
||||
let mut th = TestHarness::new();
|
||||
// Add new bridge to table with known values,
|
||||
// check that I can find and update the values and that everything else stays the same
|
||||
|
||||
// Create 3 bridges to test harness
|
||||
let bucket = [
|
||||
BridgeLine::random(),
|
||||
BridgeLine::random(),
|
||||
BridgeLine::random(),
|
||||
];
|
||||
// Store first bridgeline to update later
|
||||
let bridgeline_to_update = bucket[0];
|
||||
// Create changed info for bridgeline to be updated to
|
||||
let infostr: String = format!(
|
||||
"type={} blocked_in={:?} protocol={} distribution={}",
|
||||
"obfs2 test bridge".to_string(),
|
||||
{},
|
||||
"obfs2".to_string(),
|
||||
"moat".to_string(),
|
||||
);
|
||||
let mut updated_info_bytes: [u8; BRIDGE_BYTES - 26] = [0; BRIDGE_BYTES - 26];
|
||||
|
||||
updated_info_bytes[..infostr.len()].copy_from_slice(infostr.as_bytes());
|
||||
|
||||
let updated_bridgeline = BridgeLine {
|
||||
addr: bridgeline_to_update.addr,
|
||||
port: bridgeline_to_update.port,
|
||||
uid_fingerprint: bridgeline_to_update.uid_fingerprint,
|
||||
info: updated_info_bytes,
|
||||
};
|
||||
|
||||
assert!(
|
||||
updated_bridgeline.uid_fingerprint == bridgeline_to_update.uid_fingerprint,
|
||||
"Bridge entering the bridgepool {:?} did not have the same fingerprint as the updating bridge {:?}",
|
||||
bridgeline_to_update,
|
||||
updated_bridgeline.uid_fingerprint
|
||||
);
|
||||
assert!(updated_bridgeline.info != bridgeline_to_update.info);
|
||||
println!(
|
||||
"Bridge entering the bridgepool {:?} has different info than the updating bridge {:?}",
|
||||
bridgeline_to_update.info, updated_bridgeline.info
|
||||
);
|
||||
assert!(updated_bridgeline != bridgeline_to_update);
|
||||
println!("The two bridgelines are not equal before the update");
|
||||
|
||||
// Add 3 bridges to test harness
|
||||
th.ba.add_openinv_bridges(bucket, &mut th.bdb);
|
||||
|
||||
println!("Before update spares = {:?}", th.ba.bridge_table.spares);
|
||||
println!(
|
||||
"Before update tmig = {:?}",
|
||||
th.ba.trustup_migration_table.table
|
||||
);
|
||||
println!(
|
||||
"Before update bmig = {:?}",
|
||||
th.ba.blockage_migration_table.table
|
||||
);
|
||||
println!("Before update openinv = {:?}\n", th.bdb.openinv_buckets);
|
||||
|
||||
// Update the info of a bridge with matching IP and Port to a bridge in the bridge table
|
||||
let result = th.ba.bridge_update(&updated_bridgeline);
|
||||
assert!(result, "Bridge failed to update successfully!!");
|
||||
let found_bridge = th
|
||||
.ba
|
||||
.bridge_table
|
||||
.reachable
|
||||
.get_key_value(&updated_bridgeline);
|
||||
assert!(*found_bridge.unwrap().0 != bridgeline_to_update);
|
||||
assert!(*found_bridge.unwrap().0 == updated_bridgeline);
|
||||
println!("After update spares = {:?}", th.ba.bridge_table.spares);
|
||||
println!(
|
||||
"After update tmig = {:?}",
|
||||
th.ba.trustup_migration_table.table
|
||||
);
|
||||
println!(
|
||||
"After update bmig = {:?}",
|
||||
th.ba.blockage_migration_table.table
|
||||
);
|
||||
println!("After update openinv = {:?}\n", th.bdb.openinv_buckets);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bridge_replace() {
|
||||
// Create 3 open invitation buckets and 3 spare buckets
|
||||
let cases = vec!["available", "unallocated", "spare", "failed"];
|
||||
for case in cases {
|
||||
let mut th: TestHarness;
|
||||
if case != "failed" {
|
||||
th = TestHarness::new();
|
||||
} else {
|
||||
th = TestHarness::new_buckets(5, 0);
|
||||
}
|
||||
// Randomly select a bridge to replace
|
||||
let table_size = th.ba.bridge_table.buckets.len();
|
||||
let num = rand::thread_rng().gen_range(0, table_size - 1);
|
||||
let replaceable_bucket = th.ba.bridge_table.buckets.get(num).unwrap().clone();
|
||||
let replacement_bridge = &replaceable_bucket[0];
|
||||
assert!(
|
||||
th.ba
|
||||
.bridge_table
|
||||
.reachable
|
||||
.contains_key(replacement_bridge),
|
||||
"Random bridge to replace not in reachable bridges"
|
||||
);
|
||||
match case {
|
||||
"available" => {
|
||||
// Case one: available_bridge != null
|
||||
let random_bridgeline = &BridgeLine::random();
|
||||
let unallocated_bridgeline = &BridgeLine::random();
|
||||
th.ba
|
||||
.bridge_table
|
||||
.unallocated_bridges
|
||||
.push(*unallocated_bridgeline);
|
||||
assert!(
|
||||
th.ba
|
||||
.bridge_table
|
||||
.reachable
|
||||
.get(random_bridgeline)
|
||||
.is_none(),
|
||||
"Random bridge already in table"
|
||||
);
|
||||
assert!(
|
||||
th.ba
|
||||
.bridge_replace(replacement_bridge, Some(random_bridgeline)),
|
||||
"Bridge was not replaced with available bridge"
|
||||
);
|
||||
assert!(
|
||||
th.ba
|
||||
.bridge_table
|
||||
.reachable
|
||||
.get(random_bridgeline)
|
||||
.is_some(),
|
||||
"Replacement bridge not added to reachable bridges"
|
||||
);
|
||||
assert!(
|
||||
table_size == th.ba.bridge_table.buckets.len(),
|
||||
"Number of buckets changed size"
|
||||
);
|
||||
assert!(
|
||||
th.ba.bridge_table.unallocated_bridges.len() == 1,
|
||||
"Extra bridge added to unallocated bridges"
|
||||
);
|
||||
println!("Successfully added passed bridgeline");
|
||||
}
|
||||
// Case two: available_bridge == null and unallocated_bridges !=null
|
||||
"unallocated" => {
|
||||
let unallocated_bridgeline = &BridgeLine::random();
|
||||
th.ba
|
||||
.bridge_table
|
||||
.unallocated_bridges
|
||||
.push(*unallocated_bridgeline);
|
||||
assert!(
|
||||
th.ba.bridge_table.unallocated_bridges.len() == 1,
|
||||
"Not enough bridges in unallocated bridges"
|
||||
);
|
||||
assert!(
|
||||
th.ba
|
||||
.bridge_table
|
||||
.reachable
|
||||
.get(unallocated_bridgeline)
|
||||
.is_none(),
|
||||
"Unallocated bridge already marked as reachable"
|
||||
);
|
||||
assert!(
|
||||
th.ba.bridge_replace(replacement_bridge, None),
|
||||
"Bridge was not replaced with available bridge"
|
||||
);
|
||||
assert!(
|
||||
th.ba
|
||||
.bridge_table
|
||||
.reachable
|
||||
.get(unallocated_bridgeline)
|
||||
.is_some(),
|
||||
"Replacement bridge not added to reachable bridges"
|
||||
);
|
||||
assert!(
|
||||
table_size == th.ba.bridge_table.buckets.len(),
|
||||
"Number of buckets changed size"
|
||||
);
|
||||
assert!(
|
||||
th.ba.bridge_table.unallocated_bridges.len() == 0,
|
||||
"Allocated bridge still in unallocated bridges"
|
||||
);
|
||||
|
||||
println!("Successfully added unallocated bridgeline");
|
||||
}
|
||||
"spare" => {
|
||||
// Case three: available_bridge == null and unallocated_bridges ==null
|
||||
assert!(
|
||||
th.ba.bridge_table.unallocated_bridges.len() == 0,
|
||||
"Unallocated bridges should have a length of 0"
|
||||
);
|
||||
assert!(
|
||||
th.ba.bridge_replace(replacement_bridge, None),
|
||||
"Bridge was not replaced with available spare bridge"
|
||||
);
|
||||
assert!(
|
||||
th.ba
|
||||
.bridge_table
|
||||
.reachable
|
||||
.get(replacement_bridge)
|
||||
.is_none(),
|
||||
"Replacement bridge still marked as reachable"
|
||||
);
|
||||
assert!(
|
||||
table_size == th.ba.bridge_table.buckets.len(),
|
||||
"Number of buckets changed size"
|
||||
);
|
||||
assert!(
|
||||
th.ba.bridge_table.unallocated_bridges.len() == 2,
|
||||
"Extra spare bridges not added to unallocated bridges"
|
||||
);
|
||||
|
||||
println!("Successfully added unallocated bridgeline");
|
||||
}
|
||||
"failed" => {
|
||||
// Case four: available_bridge == None and unallocated_bridges == None and spare buckets == None
|
||||
assert!(
|
||||
th.ba.bridge_table.unallocated_bridges.len() == 0,
|
||||
"Unallocated bridges should have a length of 0"
|
||||
);
|
||||
assert!(
|
||||
!th.ba.bridge_replace(replacement_bridge, None),
|
||||
"Bridge was somehow marked as replaced despite no replaceable bridges"
|
||||
);
|
||||
assert!(
|
||||
th.ba
|
||||
.bridge_table
|
||||
.reachable
|
||||
.get(replacement_bridge)
|
||||
.is_some(),
|
||||
"Replacement bridge marked as unreachable despite not being replaced"
|
||||
);
|
||||
assert!(
|
||||
table_size == th.ba.bridge_table.buckets.len(),
|
||||
"Number of buckets changed size"
|
||||
);
|
||||
assert!(
|
||||
th.ba.bridge_table.unallocated_bridges.len() == 0,
|
||||
"Unallocated bridges changed size"
|
||||
);
|
||||
println!("No bridges available to replace bridge so replacement gracefully failed");
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mark_unreachable() {
|
||||
let mut th = TestHarness::new();
|
||||
|
||||
println!("spares = {:?}", th.ba.bridge_table.spares);
|
||||
println!("tmig = {:?}", th.ba.trustup_migration_table.table);
|
||||
println!("bmig = {:?}", th.ba.blockage_migration_table.table);
|
||||
println!("openinv = {:?}\n", th.bdb.openinv_buckets);
|
||||
|
||||
// Mark a bridge in an untrusted bucket as unreachable
|
||||
let b6 = th.ba.bridge_table.buckets[6][0];
|
||||
th.ba.bridge_unreachable(&b6, &mut th.bdb);
|
||||
|
||||
println!("spares = {:?}", th.ba.bridge_table.spares);
|
||||
println!("tmig = {:?}", th.ba.trustup_migration_table.table);
|
||||
println!("bmig = {:?}", th.ba.blockage_migration_table.table);
|
||||
println!("openinv = {:?}\n", th.bdb.openinv_buckets);
|
||||
|
||||
// Mark another bridge grouped to the same trusted bucket as
|
||||
// unreachable
|
||||
let b7 = th.ba.bridge_table.buckets[7][0];
|
||||
th.ba.bridge_unreachable(&b7, &mut th.bdb);
|
||||
|
||||
println!("spares = {:?}", th.ba.bridge_table.spares);
|
||||
println!("tmig = {:?}", th.ba.trustup_migration_table.table);
|
||||
println!("bmig = {:?}", th.ba.blockage_migration_table.table);
|
||||
println!("openinv = {:?}\n", th.bdb.openinv_buckets);
|
||||
|
||||
// That will have introduced a blockage migration. Get the target
|
||||
let target: u32 = *th
|
||||
.ba
|
||||
.blockage_migration_table
|
||||
.table
|
||||
.iter()
|
||||
.next()
|
||||
.unwrap()
|
||||
.1;
|
||||
|
||||
// Block two of the bridges in that target bucket
|
||||
let bt1 = th.ba.bridge_table.buckets[target as usize][1];
|
||||
let bt2 = th.ba.bridge_table.buckets[target as usize][2];
|
||||
th.ba.bridge_unreachable(&bt1, &mut th.bdb);
|
||||
th.ba.bridge_unreachable(&bt2, &mut th.bdb);
|
||||
|
||||
println!("spares = {:?}", th.ba.bridge_table.spares);
|
||||
println!("tmig = {:?}", th.ba.trustup_migration_table.table);
|
||||
println!("bmig = {:?}", th.ba.blockage_migration_table.table);
|
||||
println!("openinv = {:?}\n", th.bdb.openinv_buckets);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blockage_migration() {
|
||||
let mut th = TestHarness::new();
|
||||
|
||||
// Join an untrusted user
|
||||
let cred = th.open_invite().1 .0;
|
||||
|
||||
// Time passes
|
||||
th.advance_days(47);
|
||||
|
||||
// Go up to level 1
|
||||
let (_mperf_stat, migcred) = th.trust_promotion(&cred);
|
||||
let (_perf_stat, cred1) = th.level0_migration(&cred, &migcred);
|
||||
assert!(scalar_u32(&cred1.trust_level).unwrap() == 1);
|
||||
|
||||
// Time passes
|
||||
th.advance_days(20);
|
||||
|
||||
// Go up to level 2
|
||||
let (_two_perf_stat, cred2) = th.level_up(&cred1);
|
||||
assert!(scalar_u32(&cred2.trust_level).unwrap() == 2);
|
||||
println!("cred2 = {:?}", cred2);
|
||||
assert!(th.ba.verify_lox(&cred2));
|
||||
|
||||
// Time passes
|
||||
th.advance_days(29);
|
||||
|
||||
// Go up to level 3
|
||||
let (_three_perf_stat, cred3) = th.level_up(&cred2);
|
||||
assert!(scalar_u32(&cred3.trust_level).unwrap() == 3);
|
||||
println!("cred3 = {:?}", cred3);
|
||||
assert!(th.ba.verify_lox(&cred3));
|
||||
|
||||
// Get our bridges
|
||||
let (id, key) = bridge_table::from_scalar(cred3.bucket).unwrap();
|
||||
let encbuckets = th.ba.enc_bridge_table();
|
||||
let bucket =
|
||||
bridge_table::BridgeTable::decrypt_bucket(id, &key, &encbuckets[id as usize]).unwrap();
|
||||
// We should have a Bridge Reachability credential
|
||||
assert!(bucket.1.is_some());
|
||||
|
||||
// Oh, no! Two of our bridges are blocked!
|
||||
th.ba.bridge_unreachable(&bucket.0[0], &mut th.bdb);
|
||||
th.ba.bridge_unreachable(&bucket.0[2], &mut th.bdb);
|
||||
|
||||
println!("spares = {:?}", th.ba.bridge_table.spares);
|
||||
println!("tmig = {:?}", th.ba.trustup_migration_table.table);
|
||||
println!("bmig = {:?}", th.ba.blockage_migration_table.table);
|
||||
println!("openinv = {:?}\n", th.bdb.openinv_buckets);
|
||||
|
||||
// Time passes
|
||||
th.advance_days(1);
|
||||
|
||||
let encbuckets2 = th.ba.enc_bridge_table();
|
||||
let bucket2 =
|
||||
bridge_table::BridgeTable::decrypt_bucket(id, &key, &encbuckets2[id as usize]).unwrap();
|
||||
// We should no longer have a Bridge Reachability credential
|
||||
assert!(bucket2.1.is_none());
|
||||
|
||||
// See about getting a Migration credential for the blockage
|
||||
let (_block_perf_stat, migration) = th.check_blockage(&cred3);
|
||||
|
||||
println!("migration = {:?}", migration);
|
||||
|
||||
// Migrate
|
||||
let (_four_perf_stat, cred4) = th.blockage_migration(&cred3, &migration);
|
||||
|
||||
println!("cred4 = {:?}", cred4);
|
||||
assert!(th.ba.verify_lox(&cred4));
|
||||
}
|
||||
|
||||
fn print_test_results(perf_stat: PerfStat) {
|
||||
println!("Request size = {:?} bytes", perf_stat.req_len);
|
||||
println!("Request time = {:?}", perf_stat.req_t);
|
||||
println!("Response size = {:?} bytes", perf_stat.resp_len);
|
||||
println!("Response time = {:?}", perf_stat.resp_t);
|
||||
println!("Response handle time = {:?}", perf_stat.resp_handle_t);
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
use lox::dup_filter;
|
||||
use lox::dup_filter::SeenType::{Fresh, Seen};
|
||||
use lox::BridgeDb;
|
||||
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
|
||||
#[test]
|
||||
fn test_bridgedb() {
|
||||
let mut bdb = BridgeDb::new();
|
||||
for i in &[1u32, 5, 7, 12, 19, 20, 22] {
|
||||
bdb.insert_openinv(*i);
|
||||
}
|
||||
let inv = bdb.invite();
|
||||
println!("{:?}", inv);
|
||||
let res = BridgeDb::verify(inv, bdb.pubkey);
|
||||
println!("{:?}", res);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dup_filter() {
|
||||
let mut df1: dup_filter::DupFilter<Scalar> = Default::default();
|
||||
let mut df2: dup_filter::DupFilter<Scalar> = Default::default();
|
||||
let mut rng = rand::thread_rng();
|
||||
let s1 = Scalar::random(&mut rng);
|
||||
let s2 = Scalar::random(&mut rng);
|
||||
let s3 = Scalar::random(&mut rng);
|
||||
let s4 = Scalar::random(&mut rng);
|
||||
let s5 = Scalar::random(&mut rng);
|
||||
// Check basic behaviour
|
||||
assert_eq!(df1.check(&s1), Fresh);
|
||||
assert_eq!(df1.filter(&s1), Fresh);
|
||||
assert_eq!(df1.check(&s1), Seen);
|
||||
assert_eq!(df1.filter(&s1), Seen);
|
||||
// Ensure different instances of DupFilter have different tables
|
||||
assert_eq!(df2.check(&s1), Fresh);
|
||||
assert_eq!(df2.filter(&s1), Fresh);
|
||||
assert_eq!(df2.filter(&s1), Seen);
|
||||
assert_eq!(df2.check(&s1), Seen);
|
||||
// Check multiple ids
|
||||
assert_eq!(df1.check(&s2), Fresh);
|
||||
assert_eq!(df1.filter(&s3), Fresh);
|
||||
assert_eq!(df1.filter(&s4), Fresh);
|
||||
assert_eq!(df1.filter(&s3), Seen);
|
||||
assert_eq!(df1.check(&s1), Seen);
|
||||
assert_eq!(df1.filter(&s1), Seen);
|
||||
assert_eq!(df1.filter(&s5), Fresh);
|
||||
println!("df1 = {:?}", df1);
|
||||
println!("df2 = {:?}", df2);
|
||||
}
|
Loading…
Reference in New Issue