Begin implementing Troll Patrol handler
This commit is contained in:
parent
503c026964
commit
6a0ff0d8a2
|
@ -36,6 +36,10 @@ prometheus = "0.13.3"
|
||||||
sled = "0.34.7"
|
sled = "0.34.7"
|
||||||
prometheus-client = "0.22.0"
|
prometheus-client = "0.22.0"
|
||||||
thiserror = "1"
|
thiserror = "1"
|
||||||
|
curve25519-dalek = { version = "4", default-features = false, features = ["serde", "rand_core", "digest"] }
|
||||||
|
troll-patrol = { git = "https://git-crysp.uwaterloo.ca/vvecna/troll-patrol", version = "0.1.0" }
|
||||||
|
array-bytes = "6.2.0"
|
||||||
|
sha1 = "0.10"
|
||||||
|
|
||||||
[dependencies.chrono]
|
[dependencies.chrono]
|
||||||
version = "0.4.31"
|
version = "0.4.31"
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
|
|
||||||
},
|
},
|
||||||
"lox_authority_port": 8001,
|
"lox_authority_port": 8001,
|
||||||
|
"troll_patrol_port": 8002,
|
||||||
"metrics_port": 5222,
|
"metrics_port": 5222,
|
||||||
"bridge_config": {
|
"bridge_config": {
|
||||||
"watched_blockages": [
|
"watched_blockages": [
|
||||||
|
|
|
@ -5,7 +5,9 @@ use crate::{lox_context, DbConfig};
|
||||||
use chrono::{naive::Days, DateTime, Local, NaiveDateTime, Utc};
|
use chrono::{naive::Days, DateTime, Local, NaiveDateTime, Utc};
|
||||||
use lox_library::{BridgeAuth, BridgeDb};
|
use lox_library::{BridgeAuth, BridgeDb};
|
||||||
use sled::IVec;
|
use sled::IVec;
|
||||||
|
use std::collections::HashMap;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
use troll_patrol::bridge_info::BridgeInfo as TPBridgeInfo;
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum LoxDBError {
|
pub enum LoxDBError {
|
||||||
|
@ -78,6 +80,9 @@ impl DB {
|
||||||
ba: Arc::new(Mutex::new(new_ba)),
|
ba: Arc::new(Mutex::new(new_ba)),
|
||||||
extra_bridges: Arc::new(Mutex::new(Vec::new())),
|
extra_bridges: Arc::new(Mutex::new(Vec::new())),
|
||||||
to_be_replaced_bridges: Arc::new(Mutex::new(Vec::new())),
|
to_be_replaced_bridges: Arc::new(Mutex::new(Vec::new())),
|
||||||
|
tp_bridge_infos: Arc::new(Mutex::new(
|
||||||
|
HashMap::<[u8; 20], TPBridgeInfo>::new(),
|
||||||
|
)),
|
||||||
metrics,
|
metrics,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -169,6 +174,7 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_write_context() {
|
fn test_write_context() {
|
||||||
|
// TODO: Fix db_test_file.json
|
||||||
env::set_var("TEST_FILE_PATH", "db_test_file.json");
|
env::set_var("TEST_FILE_PATH", "db_test_file.json");
|
||||||
let (mut lox_db, _context) =
|
let (mut lox_db, _context) =
|
||||||
DB::open_new_or_existing_db(DbConfig::default(), None, Metrics::default()).unwrap();
|
DB::open_new_or_existing_db(DbConfig::default(), None, Metrics::default()).unwrap();
|
||||||
|
|
|
@ -1,9 +1,13 @@
|
||||||
|
// Allow points to be capital letters
|
||||||
|
#![allow(non_snake_case)]
|
||||||
|
|
||||||
|
use curve25519_dalek::{ristretto::RistrettoBasepointTable, Scalar};
|
||||||
use hyper::{body::Bytes, header::HeaderValue, Body, Response};
|
use hyper::{body::Bytes, header::HeaderValue, Body, Response};
|
||||||
use lox_library::{
|
use lox_library::{
|
||||||
bridge_table::{BridgeLine, EncryptedBucket, MAX_BRIDGES_PER_BUCKET},
|
bridge_table::{BridgeLine, EncryptedBucket, MAX_BRIDGES_PER_BUCKET},
|
||||||
proto::{
|
proto::{
|
||||||
blockage_migration, check_blockage, issue_invite, level_up, migration, open_invite,
|
blockage_migration, check_blockage, issue_invite, level_up, migration, open_invite,
|
||||||
redeem_invite, trust_promotion,
|
positive_report, redeem_invite, trust_promotion,
|
||||||
},
|
},
|
||||||
BridgeAuth, BridgeDb, IssuerPubKey, OpenInvitationError,
|
BridgeAuth, BridgeDb, IssuerPubKey, OpenInvitationError,
|
||||||
};
|
};
|
||||||
|
@ -14,10 +18,15 @@ use lox_zkp::ProofError;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use std::{
|
use std::{
|
||||||
cmp::Ordering,
|
cmp::Ordering,
|
||||||
collections::HashMap,
|
collections::{BTreeMap, HashMap},
|
||||||
|
ops::DerefMut,
|
||||||
sync::{Arc, Mutex},
|
sync::{Arc, Mutex},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use troll_patrol::{
|
||||||
|
self, bridge_info::BridgeInfo as TPBridgeInfo, negative_report::*, positive_report::*,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::metrics::Metrics;
|
use crate::metrics::Metrics;
|
||||||
use crate::resource_parser::{parse_into_bridgelines, sort_for_parsing};
|
use crate::resource_parser::{parse_into_bridgelines, sort_for_parsing};
|
||||||
|
|
||||||
|
@ -27,6 +36,8 @@ pub struct LoxServerContext {
|
||||||
pub ba: Arc<Mutex<BridgeAuth>>,
|
pub ba: Arc<Mutex<BridgeAuth>>,
|
||||||
pub extra_bridges: Arc<Mutex<Vec<BridgeLine>>>,
|
pub extra_bridges: Arc<Mutex<Vec<BridgeLine>>>,
|
||||||
pub to_be_replaced_bridges: Arc<Mutex<Vec<BridgeLine>>>,
|
pub to_be_replaced_bridges: Arc<Mutex<Vec<BridgeLine>>>,
|
||||||
|
// Map of bridge fingerprint to values needed to verify TP reports
|
||||||
|
pub tp_bridge_infos: Arc<Mutex<HashMap<[u8; 20], TPBridgeInfo>>>,
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub metrics: Metrics,
|
pub metrics: Metrics,
|
||||||
}
|
}
|
||||||
|
@ -735,6 +746,113 @@ impl LoxServerContext {
|
||||||
self.advance_days_test(req);
|
self.advance_days_test(req);
|
||||||
self.send_today()
|
self.send_today()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Troll Patrol-related tasks
|
||||||
|
|
||||||
|
// Verify one negative report, return true if verification succeeds
|
||||||
|
pub fn verify_negative_report(&self, report: NegativeReport) -> bool {
|
||||||
|
match self
|
||||||
|
.tp_bridge_infos
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.get(&report.fingerprint)
|
||||||
|
{
|
||||||
|
Some(bridge_info) => report.verify(&bridge_info),
|
||||||
|
None => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify multiple negative reports, return count of verified reports
|
||||||
|
// Note: This should be called on reports with the same (bridge, country, date)
|
||||||
|
pub fn verify_negative_reports(self, request: Bytes) -> Response<Body> {
|
||||||
|
let mut reports: BTreeMap<String, u32> = match serde_json::from_slice(&request) {
|
||||||
|
Ok(req) => req,
|
||||||
|
Err(e) => {
|
||||||
|
let response = json!({"error": e.to_string()});
|
||||||
|
let val = serde_json::to_string(&response).unwrap();
|
||||||
|
return prepare_header(val);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let mut count: u32 = 0;
|
||||||
|
while let Some((serializable_report, num_reports)) = reports.pop_first() {
|
||||||
|
let val = {
|
||||||
|
match NegativeReport::from_json(serializable_report) {
|
||||||
|
Ok(report) => {
|
||||||
|
let fingerprint = array_bytes::bytes2hex("", &report.fingerprint);
|
||||||
|
if self.verify_negative_report(report) {
|
||||||
|
println!(
|
||||||
|
"Report with fingerprint {} passed verification",
|
||||||
|
fingerprint
|
||||||
|
);
|
||||||
|
1
|
||||||
|
} else {
|
||||||
|
println!(
|
||||||
|
"Report with fingerprint {} failed verification",
|
||||||
|
fingerprint
|
||||||
|
);
|
||||||
|
0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => 0,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
count += val * num_reports;
|
||||||
|
}
|
||||||
|
prepare_header(count.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify one positive report, return true if verification succeeds
|
||||||
|
pub fn verify_positive_report(
|
||||||
|
&self,
|
||||||
|
report: PositiveReport,
|
||||||
|
Htable: &RistrettoBasepointTable,
|
||||||
|
) -> bool {
|
||||||
|
let mut binding = self.ba.lock().unwrap();
|
||||||
|
let la = binding.deref_mut();
|
||||||
|
match self
|
||||||
|
.tp_bridge_infos
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.get(&report.fingerprint)
|
||||||
|
{
|
||||||
|
Some(bridge_info) => report.verify(la, &bridge_info, &Htable),
|
||||||
|
None => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn verify_positive_reports(
|
||||||
|
self,
|
||||||
|
request: Bytes,
|
||||||
|
Htables: &mut HashMap<u32, RistrettoBasepointTable>,
|
||||||
|
) -> Response<Body> {
|
||||||
|
let mut reports: Vec<SerializablePositiveReport> = match serde_json::from_slice(&request) {
|
||||||
|
Ok(req) => req,
|
||||||
|
Err(e) => {
|
||||||
|
let response = json!({"error": e.to_string()});
|
||||||
|
let val = serde_json::to_string(&response).unwrap();
|
||||||
|
return prepare_header(val);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let mut count: u32 = 0;
|
||||||
|
while let Some(serializable_report) = reports.pop() {
|
||||||
|
let report = serializable_report.to_report();
|
||||||
|
if report.is_ok() {
|
||||||
|
let report = report.unwrap();
|
||||||
|
let Htable = if Htables.contains_key(&report.date) {
|
||||||
|
Htables.get(&report.date).unwrap().clone()
|
||||||
|
} else {
|
||||||
|
let H = positive_report::compute_H(report.date);
|
||||||
|
let Htable: RistrettoBasepointTable = RistrettoBasepointTable::create(&H);
|
||||||
|
Htables.insert(report.date, Htable.clone());
|
||||||
|
Htable
|
||||||
|
};
|
||||||
|
if self.verify_positive_report(report, &Htable) {
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prepare_header(count.to_string())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepare HTTP Response for successful Server Request
|
// Prepare HTTP Response for successful Server Request
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
|
// Allow points to be capital letters
|
||||||
|
#![allow(non_snake_case)]
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||||
use futures::future;
|
use futures::future;
|
||||||
use hyper::{
|
use hyper::{
|
||||||
server::conn::AddrStream,
|
server::conn::AddrStream,
|
||||||
|
@ -11,6 +15,7 @@ use rdsys_backend::{proto::ResourceState, request_resources};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
convert::Infallible,
|
convert::Infallible,
|
||||||
fs::File,
|
fs::File,
|
||||||
io::BufReader,
|
io::BufReader,
|
||||||
|
@ -28,6 +33,8 @@ mod request_handler;
|
||||||
use request_handler::handle;
|
use request_handler::handle;
|
||||||
mod resource_parser;
|
mod resource_parser;
|
||||||
use resource_parser::{parse_into_bridgelines, parse_into_buckets};
|
use resource_parser::{parse_into_bridgelines, parse_into_buckets};
|
||||||
|
mod troll_patrol_handler;
|
||||||
|
use troll_patrol_handler::handle as tp_handle;
|
||||||
|
|
||||||
use tokio::{
|
use tokio::{
|
||||||
signal, spawn,
|
signal, spawn,
|
||||||
|
@ -64,6 +71,7 @@ struct Config {
|
||||||
db: DbConfig,
|
db: DbConfig,
|
||||||
metrics_port: u16,
|
metrics_port: u16,
|
||||||
lox_authority_port: u16,
|
lox_authority_port: u16,
|
||||||
|
troll_patrol_port: u16,
|
||||||
bridge_config: BridgeConfig,
|
bridge_config: BridgeConfig,
|
||||||
rtype: ResourceInfo,
|
rtype: ResourceInfo,
|
||||||
}
|
}
|
||||||
|
@ -182,12 +190,13 @@ async fn create_context_manager(
|
||||||
db_config: DbConfig,
|
db_config: DbConfig,
|
||||||
bridge_config: BridgeConfig,
|
bridge_config: BridgeConfig,
|
||||||
roll_back_date: Option<String>,
|
roll_back_date: Option<String>,
|
||||||
|
Htables: HashMap<u32, RistrettoBasepointTable>,
|
||||||
metrics: Metrics,
|
metrics: Metrics,
|
||||||
context_rx: mpsc::Receiver<Command>,
|
context_rx: mpsc::Receiver<Command>,
|
||||||
mut kill: broadcast::Receiver<()>,
|
mut kill: broadcast::Receiver<()>,
|
||||||
) {
|
) {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
create_context = context_manager(db_config, bridge_config, roll_back_date, metrics, context_rx) => create_context,
|
create_context = context_manager(db_config, bridge_config, roll_back_date, Htables, metrics, context_rx) => create_context,
|
||||||
_ = kill.recv() => {println!("Shut down context_manager");},
|
_ = kill.recv() => {println!("Shut down context_manager");},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -199,6 +208,7 @@ async fn context_manager(
|
||||||
db_config: DbConfig,
|
db_config: DbConfig,
|
||||||
bridge_config: BridgeConfig,
|
bridge_config: BridgeConfig,
|
||||||
roll_back_date: Option<String>,
|
roll_back_date: Option<String>,
|
||||||
|
mut Htables: HashMap<u32, RistrettoBasepointTable>,
|
||||||
metrics: Metrics,
|
metrics: Metrics,
|
||||||
mut context_rx: mpsc::Receiver<Command>,
|
mut context_rx: mpsc::Receiver<Command>,
|
||||||
) {
|
) {
|
||||||
|
@ -253,6 +263,14 @@ async fn context_manager(
|
||||||
lox_db.write_context(context.clone());
|
lox_db.write_context(context.clone());
|
||||||
sleep(Duration::from_millis(1)).await;
|
sleep(Duration::from_millis(1)).await;
|
||||||
}
|
}
|
||||||
|
TpRequest { req, sender } => {
|
||||||
|
let response = tp_handle(context.clone(), &mut Htables, req).await;
|
||||||
|
if let Err(e) = sender.send(response) {
|
||||||
|
eprintln!("Server Response Error: {:?}", e);
|
||||||
|
};
|
||||||
|
lox_db.write_context(context.clone());
|
||||||
|
sleep(Duration::from_millis(1)).await;
|
||||||
|
}
|
||||||
Shutdown { shutdown_sig } => {
|
Shutdown { shutdown_sig } => {
|
||||||
lox_db.write_context(context.clone());
|
lox_db.write_context(context.clone());
|
||||||
println!("Sending Shutdown Signal, all threads should shutdown.");
|
println!("Sending Shutdown Signal, all threads should shutdown.");
|
||||||
|
@ -273,6 +291,10 @@ enum Command {
|
||||||
req: Request<Body>,
|
req: Request<Body>,
|
||||||
sender: oneshot::Sender<Result<Response<Body>, Infallible>>,
|
sender: oneshot::Sender<Result<Response<Body>, Infallible>>,
|
||||||
},
|
},
|
||||||
|
TpRequest {
|
||||||
|
req: Request<Body>,
|
||||||
|
sender: oneshot::Sender<Result<Response<Body>, Infallible>>,
|
||||||
|
},
|
||||||
Shutdown {
|
Shutdown {
|
||||||
shutdown_sig: broadcast::Sender<()>,
|
shutdown_sig: broadcast::Sender<()>,
|
||||||
},
|
},
|
||||||
|
@ -289,6 +311,7 @@ async fn main() {
|
||||||
|
|
||||||
let (rdsys_tx, context_rx) = mpsc::channel(32);
|
let (rdsys_tx, context_rx) = mpsc::channel(32);
|
||||||
let request_tx = rdsys_tx.clone();
|
let request_tx = rdsys_tx.clone();
|
||||||
|
let tp_request_tx = rdsys_tx.clone();
|
||||||
let shutdown_cmd_tx = rdsys_tx.clone();
|
let shutdown_cmd_tx = rdsys_tx.clone();
|
||||||
|
|
||||||
// create the shutdown broadcast channel and clone for every thread
|
// create the shutdown broadcast channel and clone for every thread
|
||||||
|
@ -320,11 +343,17 @@ async fn main() {
|
||||||
let metrics_handler =
|
let metrics_handler =
|
||||||
spawn(async move { start_metrics_collector(metrics_addr, registry, kill_metrics).await });
|
spawn(async move { start_metrics_collector(metrics_addr, registry, kill_metrics).await });
|
||||||
|
|
||||||
|
// Store a basepoint table for each day's H for TP positive reports.
|
||||||
|
// RistrettoBasepointTables are not serializable, so don't save these to disk.
|
||||||
|
// Just keep a map during each run and recompute when we restart.
|
||||||
|
let Htables = HashMap::<u32, RistrettoBasepointTable>::new();
|
||||||
|
|
||||||
let context_manager = spawn(async move {
|
let context_manager = spawn(async move {
|
||||||
create_context_manager(
|
create_context_manager(
|
||||||
config.db,
|
config.db,
|
||||||
config.bridge_config,
|
config.bridge_config,
|
||||||
args.roll_back_date,
|
args.roll_back_date,
|
||||||
|
Htables,
|
||||||
metrics,
|
metrics,
|
||||||
context_rx,
|
context_rx,
|
||||||
kill_context,
|
kill_context,
|
||||||
|
@ -356,12 +385,39 @@ async fn main() {
|
||||||
async move { Ok::<_, Infallible>(service) }
|
async move { Ok::<_, Infallible>(service) }
|
||||||
});
|
});
|
||||||
|
|
||||||
let addr = SocketAddr::from(([127, 0, 0, 1], config.lox_authority_port));
|
let tp_make_service = make_service_fn(move |_conn: &AddrStream| {
|
||||||
let server = Server::bind(&addr).serve(make_service);
|
let request_tx = tp_request_tx.clone();
|
||||||
|
let service = service_fn(move |req| {
|
||||||
|
let request_tx = request_tx.clone();
|
||||||
|
let (response_tx, response_rx) = oneshot::channel();
|
||||||
|
let cmd = Command::TpRequest {
|
||||||
|
req,
|
||||||
|
sender: response_tx,
|
||||||
|
};
|
||||||
|
async move {
|
||||||
|
request_tx.send(cmd).await.unwrap();
|
||||||
|
response_rx.await.unwrap()
|
||||||
|
}
|
||||||
|
});
|
||||||
|
async move { Ok::<_, Infallible>(service) }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Public address
|
||||||
|
let pub_addr = SocketAddr::from(([127, 0, 0, 1], config.lox_authority_port));
|
||||||
|
let server = Server::bind(&pub_addr).serve(make_service);
|
||||||
let graceful = server.with_graceful_shutdown(shutdown_signal());
|
let graceful = server.with_graceful_shutdown(shutdown_signal());
|
||||||
println!("Listening on {}", addr);
|
// Address for connections from Troll Patrol
|
||||||
if let Err(e) = graceful.await {
|
let tp_addr = SocketAddr::from(([127, 0, 0, 1], config.troll_patrol_port));
|
||||||
eprintln!("server error: {}", e);
|
let tp_server = Server::bind(&tp_addr).serve(tp_make_service);
|
||||||
|
let tp_graceful = tp_server.with_graceful_shutdown(shutdown_signal());
|
||||||
|
println!("Listening on {}", pub_addr);
|
||||||
|
println!("Listening on {}", tp_addr);
|
||||||
|
let (a, b) = future::join(graceful, tp_graceful).await;
|
||||||
|
if a.is_err() {
|
||||||
|
eprintln!("server error: {}", a.unwrap_err());
|
||||||
|
}
|
||||||
|
if b.is_err() {
|
||||||
|
eprintln!("server error: {}", b.unwrap_err());
|
||||||
}
|
}
|
||||||
future::join_all([
|
future::join_all([
|
||||||
metrics_handler,
|
metrics_handler,
|
||||||
|
|
|
@ -247,6 +247,10 @@ mod tests {
|
||||||
ba: Arc::new(Mutex::new(lox_auth)),
|
ba: Arc::new(Mutex::new(lox_auth)),
|
||||||
extra_bridges: Arc::new(Mutex::new(Vec::new())),
|
extra_bridges: Arc::new(Mutex::new(Vec::new())),
|
||||||
to_be_replaced_bridges: Arc::new(Mutex::new(Vec::new())),
|
to_be_replaced_bridges: Arc::new(Mutex::new(Vec::new())),
|
||||||
|
tp_bridge_infos: Arc::new(Mutex::new(std::collections::HashMap::<
|
||||||
|
[u8; 20],
|
||||||
|
troll_patrol::bridge_info::BridgeInfo,
|
||||||
|
>::new())),
|
||||||
metrics: Metrics::default(),
|
metrics: Metrics::default(),
|
||||||
};
|
};
|
||||||
Self { context }
|
Self { context }
|
||||||
|
@ -307,6 +311,7 @@ mod tests {
|
||||||
let portidx = (rng.next_u32() % 4) as usize;
|
let portidx = (rng.next_u32() % 4) as usize;
|
||||||
res.port = ports[portidx];
|
res.port = ports[portidx];
|
||||||
res.uid_fingerprint = rng.next_u64();
|
res.uid_fingerprint = rng.next_u64();
|
||||||
|
rng.fill_bytes(&mut res.fingerprint);
|
||||||
let mut cert: [u8; 52] = [0; 52];
|
let mut cert: [u8; 52] = [0; 52];
|
||||||
rng.fill_bytes(&mut cert);
|
rng.fill_bytes(&mut cert);
|
||||||
let infostr: String = format!(
|
let infostr: String = format!(
|
||||||
|
|
|
@ -19,11 +19,9 @@ pub fn parse_into_bridgelines(
|
||||||
let resource_uid = resource
|
let resource_uid = resource
|
||||||
.get_uid()
|
.get_uid()
|
||||||
.expect("Unable to get Fingerprint UID of resource");
|
.expect("Unable to get Fingerprint UID of resource");
|
||||||
let fingerprint: [u8; 20] = array_bytes::hex2array(&resource.fingerprint).expect("Failed to parse bridge fingerprint");
|
let fingerprint: [u8; 20] = array_bytes::hex2array(&resource.fingerprint)
|
||||||
let infostr: String = format!(
|
.expect("Failed to parse bridge fingerprint");
|
||||||
"type={} params={:?}",
|
let infostr: String = format!("type={} params={:?}", resource.r#type, resource.params,);
|
||||||
resource.r#type, resource.params,
|
|
||||||
);
|
|
||||||
let mut info_bytes: [u8; BRIDGE_INFO_BYTES] = [0; BRIDGE_INFO_BYTES];
|
let mut info_bytes: [u8; BRIDGE_INFO_BYTES] = [0; BRIDGE_INFO_BYTES];
|
||||||
|
|
||||||
info_bytes[..infostr.len()].copy_from_slice(infostr.as_bytes());
|
info_bytes[..infostr.len()].copy_from_slice(infostr.as_bytes());
|
||||||
|
|
|
@ -0,0 +1,343 @@
|
||||||
|
use crate::lox_context::LoxServerContext;
|
||||||
|
use curve25519_dalek::ristretto::RistrettoBasepointTable;
|
||||||
|
use hyper::{body, header::HeaderValue, Body, Method, Request, Response, StatusCode};
|
||||||
|
use std::{collections::HashMap, convert::Infallible};
|
||||||
|
|
||||||
|
// Handle for each Troll Patrol request/protocol
|
||||||
|
pub async fn handle(
|
||||||
|
cloned_context: LoxServerContext,
|
||||||
|
Htables: &mut HashMap<u32, RistrettoBasepointTable>,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Infallible> {
|
||||||
|
match req.method() {
|
||||||
|
&Method::OPTIONS => Ok(Response::builder()
|
||||||
|
.header("Access-Control-Allow-Origin", HeaderValue::from_static("*"))
|
||||||
|
.header("Access-Control-Allow-Headers", "accept, content-type")
|
||||||
|
.header("Access-Control-Allow-Methods", "POST")
|
||||||
|
.status(200)
|
||||||
|
.body(Body::from("Allow POST"))
|
||||||
|
.unwrap()),
|
||||||
|
_ => match (req.method(), req.uri().path()) {
|
||||||
|
(&Method::POST, "/verifynegative") => Ok::<_, Infallible>({
|
||||||
|
let bytes = body::to_bytes(req.into_body()).await.unwrap();
|
||||||
|
cloned_context.verify_negative_reports(bytes)
|
||||||
|
}),
|
||||||
|
(&Method::POST, "/verifypositive") => Ok::<_, Infallible>({
|
||||||
|
let bytes = body::to_bytes(req.into_body()).await.unwrap();
|
||||||
|
cloned_context.verify_positive_reports(bytes, Htables)
|
||||||
|
}),
|
||||||
|
_ => {
|
||||||
|
// Return 404 not found response.
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::NOT_FOUND)
|
||||||
|
.body(Body::from("Not found"))
|
||||||
|
.unwrap())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::lox_context;
|
||||||
|
use crate::metrics::Metrics;
|
||||||
|
use base64::{engine::general_purpose, Engine as _};
|
||||||
|
use curve25519_dalek::Scalar;
|
||||||
|
use lox_library::{
|
||||||
|
bridge_table::{self, BridgeLine, BridgeTable},
|
||||||
|
cred::Lox,
|
||||||
|
proto::*,
|
||||||
|
scalar_u32, BridgeAuth, BridgeDb,
|
||||||
|
};
|
||||||
|
use rand::RngCore;
|
||||||
|
use sha1::{Digest, Sha1};
|
||||||
|
use std::{
|
||||||
|
collections::{BTreeMap, HashSet},
|
||||||
|
sync::{Arc, Mutex},
|
||||||
|
};
|
||||||
|
use troll_patrol::{
|
||||||
|
bridge_info::BridgeInfo,
|
||||||
|
negative_report::{NegativeReport, SerializableNegativeReport},
|
||||||
|
positive_report::{PositiveReport, SerializablePositiveReport},
|
||||||
|
BridgeDistributor,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
trait TpClient {
|
||||||
|
fn verifynegative(&self, reports: BTreeMap<String, u32>) -> Request<Body>;
|
||||||
|
fn verifypositive(&self, reports: Vec<SerializablePositiveReport>) -> Request<Body>;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TpClientMock {}
|
||||||
|
|
||||||
|
impl TpClient for TpClientMock {
|
||||||
|
fn verifynegative(&self, reports: BTreeMap<String, u32>) -> Request<Body> {
|
||||||
|
let req = serde_json::to_string(&reports).unwrap();
|
||||||
|
Request::builder()
|
||||||
|
.method("POST")
|
||||||
|
.uri("http://localhost/verifynegative")
|
||||||
|
.body(Body::from(req))
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verifypositive(&self, reports: Vec<SerializablePositiveReport>) -> Request<Body> {
|
||||||
|
let req = serde_json::to_string(&reports).unwrap();
|
||||||
|
Request::builder()
|
||||||
|
.method("POST")
|
||||||
|
.uri("http://localhost/verifypositive")
|
||||||
|
.body(Body::from(req))
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TestHarness {
|
||||||
|
context: LoxServerContext,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TestHarness {
|
||||||
|
fn new() -> Self {
|
||||||
|
let mut bridgedb = BridgeDb::new();
|
||||||
|
let mut lox_auth = BridgeAuth::new(bridgedb.pubkey);
|
||||||
|
|
||||||
|
// Make 3 x num_buckets open invitation bridges, in sets of 3
|
||||||
|
for _ in 0..5 {
|
||||||
|
let bucket = [random(), random(), random()];
|
||||||
|
let _ = lox_auth.add_openinv_bridges(bucket, &mut bridgedb);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add hot_spare more hot spare buckets
|
||||||
|
for _ in 0..5 {
|
||||||
|
let bucket = [random(), random(), random()];
|
||||||
|
let _ = lox_auth.add_spare_bucket(bucket, &mut bridgedb);
|
||||||
|
}
|
||||||
|
// Create the encrypted bridge table
|
||||||
|
lox_auth.enc_bridge_table();
|
||||||
|
|
||||||
|
let context = lox_context::LoxServerContext {
|
||||||
|
db: Arc::new(Mutex::new(bridgedb)),
|
||||||
|
ba: Arc::new(Mutex::new(lox_auth)),
|
||||||
|
extra_bridges: Arc::new(Mutex::new(Vec::new())),
|
||||||
|
to_be_replaced_bridges: Arc::new(Mutex::new(Vec::new())),
|
||||||
|
tp_bridge_infos: Arc::new(Mutex::new(HashMap::<[u8; 20], BridgeInfo>::new())),
|
||||||
|
metrics: Metrics::default(),
|
||||||
|
};
|
||||||
|
Self { context }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_bridge_infos(&self) {
|
||||||
|
// We want to ignore empty bridgelines
|
||||||
|
let mut hasher = Sha1::new();
|
||||||
|
hasher.update([0; 20]);
|
||||||
|
let empty_bridgeline_fingerprint: [u8; 20] = hasher.finalize().into();
|
||||||
|
|
||||||
|
let mut lox_auth = self.context.ba.lock().unwrap();
|
||||||
|
|
||||||
|
// Recompute table
|
||||||
|
let mut tp_bridge_infos = self.context.tp_bridge_infos.lock().unwrap();
|
||||||
|
tp_bridge_infos.clear();
|
||||||
|
|
||||||
|
// Go through all buckets and all bridges in buckets, map bridge to
|
||||||
|
// buckets containing it. Note that a bridge may be contained within
|
||||||
|
// multiple buckets (open invitaion buckets and invite-only buckets).
|
||||||
|
let buckets = &lox_auth.bridge_table.buckets;
|
||||||
|
for id in buckets.keys() {
|
||||||
|
let bridges = buckets.get(id).unwrap();
|
||||||
|
let key = lox_auth.bridge_table.keys.get(id).unwrap();
|
||||||
|
let bucket = bridge_table::to_scalar(*id, key);
|
||||||
|
for bridge in bridges {
|
||||||
|
// Get hashed fingerprint
|
||||||
|
let mut hasher = Sha1::new();
|
||||||
|
hasher.update(&bridge.fingerprint);
|
||||||
|
let fingerprint: [u8; 20] = hasher.finalize().into();
|
||||||
|
|
||||||
|
if fingerprint != empty_bridgeline_fingerprint {
|
||||||
|
// Add new entry or add bucket to existing entry
|
||||||
|
if tp_bridge_infos.contains_key(&fingerprint) {
|
||||||
|
tp_bridge_infos
|
||||||
|
.get_mut(&fingerprint)
|
||||||
|
.unwrap()
|
||||||
|
.buckets
|
||||||
|
.insert(bucket);
|
||||||
|
} else {
|
||||||
|
let mut buckets = HashSet::<Scalar>::new();
|
||||||
|
buckets.insert(bucket);
|
||||||
|
tp_bridge_infos.insert(
|
||||||
|
fingerprint,
|
||||||
|
BridgeInfo {
|
||||||
|
bridge_line: *bridge,
|
||||||
|
buckets: buckets,
|
||||||
|
pubkey: None, // TODO: add pubkey for signed bridge tokens
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn random() -> BridgeLine {
|
||||||
|
let mut rng = rand::thread_rng();
|
||||||
|
let mut res: BridgeLine = BridgeLine::default();
|
||||||
|
// Pick a random 4-byte address
|
||||||
|
let mut addr: [u8; 4] = [0; 4];
|
||||||
|
rng.fill_bytes(&mut addr);
|
||||||
|
// If the leading byte is 224 or more, that's not a valid IPv4
|
||||||
|
// address. Choose an IPv6 address instead (but don't worry too
|
||||||
|
// much about it being well formed).
|
||||||
|
if addr[0] >= 224 {
|
||||||
|
rng.fill_bytes(&mut res.addr);
|
||||||
|
} else {
|
||||||
|
// Store an IPv4 address as a v4-mapped IPv6 address
|
||||||
|
res.addr[10] = 255;
|
||||||
|
res.addr[11] = 255;
|
||||||
|
res.addr[12..16].copy_from_slice(&addr);
|
||||||
|
};
|
||||||
|
let ports: [u16; 4] = [443, 4433, 8080, 43079];
|
||||||
|
let portidx = (rng.next_u32() % 4) as usize;
|
||||||
|
res.port = ports[portidx];
|
||||||
|
res.uid_fingerprint = rng.next_u64();
|
||||||
|
rng.fill_bytes(&mut res.fingerprint);
|
||||||
|
let mut cert: [u8; 52] = [0; 52];
|
||||||
|
rng.fill_bytes(&mut cert);
|
||||||
|
let infostr: String = format!(
|
||||||
|
"obfs4 cert={}, iat-mode=0",
|
||||||
|
general_purpose::STANDARD_NO_PAD.encode(cert)
|
||||||
|
);
|
||||||
|
res.info[..infostr.len()].copy_from_slice(infostr.as_bytes());
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn body_to_string(res: Response<Body>) -> String {
|
||||||
|
let body_bytes = hyper::body::to_bytes(res.into_body()).await.unwrap();
|
||||||
|
String::from_utf8(body_bytes.to_vec()).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_new_credential(th: &mut TestHarness) -> Lox {
|
||||||
|
let inv = th.context.db.lock().unwrap().invite().unwrap();
|
||||||
|
let (req, state) = open_invite::request(&inv);
|
||||||
|
let resp = th
|
||||||
|
.context
|
||||||
|
.ba
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.handle_open_invite(req)
|
||||||
|
.unwrap();
|
||||||
|
let (cred, _bridgeline) =
|
||||||
|
open_invite::handle_response(state, resp, &th.context.ba.lock().unwrap().lox_pub)
|
||||||
|
.unwrap();
|
||||||
|
cred
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn level_up(th: &mut TestHarness, cred: &Lox) -> Lox {
|
||||||
|
let current_level = scalar_u32(&cred.trust_level).unwrap();
|
||||||
|
if current_level == 0 {
|
||||||
|
th.context
|
||||||
|
.advance_days_test(trust_promotion::UNTRUSTED_INTERVAL.try_into().unwrap());
|
||||||
|
let mut ba = th.context.ba.lock().unwrap();
|
||||||
|
let (promreq, promstate) =
|
||||||
|
trust_promotion::request(cred, &ba.lox_pub, ba.today()).unwrap();
|
||||||
|
let promresp = ba.handle_trust_promotion(promreq).unwrap();
|
||||||
|
let migcred = trust_promotion::handle_response(promstate, promresp).unwrap();
|
||||||
|
let (migreq, migstate) =
|
||||||
|
migration::request(cred, &migcred, &ba.lox_pub, &ba.migration_pub).unwrap();
|
||||||
|
let migresp = ba.handle_migration(migreq).unwrap();
|
||||||
|
let new_cred = migration::handle_response(migstate, migresp, &ba.lox_pub).unwrap();
|
||||||
|
new_cred
|
||||||
|
} else {
|
||||||
|
th.context.advance_days_test(
|
||||||
|
level_up::LEVEL_INTERVAL[usize::try_from(current_level).unwrap()]
|
||||||
|
.try_into()
|
||||||
|
.unwrap(),
|
||||||
|
);
|
||||||
|
let mut ba = th.context.ba.lock().unwrap();
|
||||||
|
let (id, key) = bridge_table::from_scalar(cred.bucket).unwrap();
|
||||||
|
let encbuckets = ba.enc_bridge_table();
|
||||||
|
let bucket =
|
||||||
|
bridge_table::BridgeTable::decrypt_bucket(id, &key, encbuckets.get(&id).unwrap())
|
||||||
|
.unwrap();
|
||||||
|
let reachcred = bucket.1.unwrap();
|
||||||
|
let (lvreq, lvstate) = level_up::request(
|
||||||
|
cred,
|
||||||
|
&reachcred,
|
||||||
|
&ba.lox_pub,
|
||||||
|
&ba.reachability_pub,
|
||||||
|
ba.today(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let lvresp = ba.handle_level_up(lvreq).unwrap();
|
||||||
|
let new_cred = level_up::handle_response(lvstate, lvresp, &ba.lox_pub).unwrap();
|
||||||
|
new_cred
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_negative_reports() {
|
||||||
|
let mut th = TestHarness::new();
|
||||||
|
th.generate_bridge_infos();
|
||||||
|
let tpc = TpClientMock {};
|
||||||
|
let mut Htables = HashMap::<u32, RistrettoBasepointTable>::new();
|
||||||
|
|
||||||
|
// Get new level 1 credential
|
||||||
|
let cred = get_new_credential(&mut th).await;
|
||||||
|
let cred = level_up(&mut th, &cred).await;
|
||||||
|
|
||||||
|
th.generate_bridge_infos();
|
||||||
|
|
||||||
|
let mut ba = th.context.ba.lock().unwrap();
|
||||||
|
|
||||||
|
// Get bucket
|
||||||
|
let (id, key) = bridge_table::from_scalar(cred.bucket).unwrap();
|
||||||
|
let encbuckets = ba.enc_bridge_table();
|
||||||
|
let bucket =
|
||||||
|
bridge_table::BridgeTable::decrypt_bucket(id, &key, encbuckets.get(&id).unwrap())
|
||||||
|
.unwrap();
|
||||||
|
let bridges = bucket.0;
|
||||||
|
|
||||||
|
// Create random number of negative reports for each bridge in bucket
|
||||||
|
let mut rng = rand::thread_rng();
|
||||||
|
let num_report_1 = rng.next_u32() % 4 + 1;
|
||||||
|
let num_report_2 = rng.next_u32() % 4 + 1;
|
||||||
|
let num_report_3 = rng.next_u32() % 4 + 1;
|
||||||
|
let mut reports = BTreeMap::<String, u32>::new();
|
||||||
|
|
||||||
|
let report_1 =
|
||||||
|
NegativeReport::from_bridgeline(bridges[0], "ru".to_string(), BridgeDistributor::Lox);
|
||||||
|
println!(
|
||||||
|
"report_1: {}, count: {}",
|
||||||
|
array_bytes::bytes2hex("", report_1.fingerprint),
|
||||||
|
num_report_1
|
||||||
|
);
|
||||||
|
reports.insert(report_1.to_json(), num_report_1);
|
||||||
|
|
||||||
|
let report_2 =
|
||||||
|
NegativeReport::from_lox_bucket(bridges[1].fingerprint, cred.bucket, "ru".to_string());
|
||||||
|
println!(
|
||||||
|
"report_2: {}, count: {}",
|
||||||
|
array_bytes::bytes2hex("", report_2.fingerprint),
|
||||||
|
num_report_2
|
||||||
|
);
|
||||||
|
reports.insert(report_2.to_json(), num_report_2);
|
||||||
|
|
||||||
|
let report_3 =
|
||||||
|
NegativeReport::from_lox_credential(bridges[2].fingerprint, cred, "ru".to_string());
|
||||||
|
println!(
|
||||||
|
"report_3: {}, count: {}",
|
||||||
|
array_bytes::bytes2hex("", report_3.fingerprint),
|
||||||
|
num_report_3
|
||||||
|
);
|
||||||
|
reports.insert(report_3.to_json(), num_report_3);
|
||||||
|
|
||||||
|
// TODO: Check reports with invalid fields
|
||||||
|
// TODO: Check well-formed reports with incorrect bridge data
|
||||||
|
|
||||||
|
let request = tpc.verifynegative(reports);
|
||||||
|
let response = handle(th.context.clone(), &mut Htables, request)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(response.status(), StatusCode::OK);
|
||||||
|
let count: u32 = body_to_string(response).await.parse().unwrap();
|
||||||
|
assert_eq!(num_report_1 + num_report_2 + num_report_3, count);
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue