Update rdsys-api-backend and lox-distributor to match rdsys

This commit is contained in:
onyinyang 2023-09-27 18:45:40 -04:00
parent 8ebc4abc19
commit b6ff0c60e2
No known key found for this signature in database
GPG Key ID: 156A6435430C2036
5 changed files with 147 additions and 86 deletions

View File

@ -8,6 +8,7 @@ use lox_library::{
}, },
BridgeAuth, BridgeDb, IssuerPubKey, BridgeAuth, BridgeDb, IssuerPubKey,
}; };
use rdsys_backend::proto::ResourceState;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{ use std::{
@ -16,6 +17,8 @@ use std::{
}; };
use zkp::ProofError; use zkp::ProofError;
use crate::resource_parser::{parse_into_bridgelines, sort_for_parsing};
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct LoxServerContext { pub struct LoxServerContext {
pub db: Arc<Mutex<BridgeDb>>, pub db: Arc<Mutex<BridgeDb>>,
@ -68,25 +71,45 @@ impl LoxServerContext {
*/ */
// Sync resources received from rdsys with the Lox bridgetable // Sync resources received from rdsys with the Lox bridgetable
pub fn sync_with_bridgetable(&self, functional: Vec<BridgeLine>, failing: Vec<BridgeLine>) { pub fn sync_with_bridgetable(&self, resources: ResourceState) {
// Check if the resource is already in the Lox bridgetable. If it is, it's probably fine // Check if the resource is already in the Lox bridgetable. If it is, it's probably fine
// to replace the existing resource with the incoming one to account for changes, // to replace the existing resource with the incoming one to account for changes
// unless we want to track the number of changes on the lox side? let mut unaccounted_for_bridges: Vec<u64> = Vec::new();
for bridge in functional { let mut accounted_for_bridges: Vec<u64> = Vec::new();
if let Some(working_resources) = resources.working {
// ensure all working resources are updated and accounted for
// save a list of unaccounted for bridges and deal with them in the next block
let bridgelines = parse_into_bridgelines(working_resources);
for bridge in bridgelines {
let res = self.update_bridge(bridge); let res = self.update_bridge(bridge);
if res { if res {
println!( println!(
"BridgeLine {:?} successfully updated.", "BridgeLine {:?} successfully updated.",
bridge.uid_fingerprint bridge.uid_fingerprint
); );
accounted_for_bridges.push(bridge.uid_fingerprint);
// Assume non-failing bridges that are not found in the bridge table are new bridges and save them for later // Assume non-failing bridges that are not found in the bridge table are new bridges and save them for later
} else { } else {
println!("BridgeLine: {:?} not found in Lox's Bridgetable. Save it as a new resource for now!", bridge.uid_fingerprint); println!("BridgeLine: {:?} not found in Lox's Bridgetable. Save it as a new resource for now!", bridge.uid_fingerprint);
self.append_extra_bridges(bridge); self.append_extra_bridges(bridge);
} }
} }
// Next, handle the failing bridges. If resource last passed tests 3 hours ago, it should be replaced }
// with a working resource and be removed from the bridgetable. if let Some(not_working_resources) = resources.not_working {
let (grace_period, failing) = sort_for_parsing(not_working_resources);
// Update bridges in the bridge table that are failing but within the grace period
for bridge in grace_period {
let res = self.update_bridge(bridge);
if res {
println!(
"BridgeLine {:?} successfully updated.",
bridge.uid_fingerprint
);
accounted_for_bridges.push(bridge.uid_fingerprint);
}
}
// Next, handle the failing bridges. If resource last passed tests >=ACCEPTED_HOURS_OF_FAILURE ago,
// it should be replaced with a working resource and be removed from the bridgetable.
for bridge in failing { for bridge in failing {
let res = self.replace_with_new(bridge); let res = self.replace_with_new(bridge);
if res == lox_library::ReplaceSuccess::Replaced { if res == lox_library::ReplaceSuccess::Replaced {
@ -94,6 +117,7 @@ impl LoxServerContext {
"BridgeLine {:?} successfully replaced.", "BridgeLine {:?} successfully replaced.",
bridge.uid_fingerprint bridge.uid_fingerprint
); );
accounted_for_bridges.push(bridge.uid_fingerprint);
} else if res == lox_library::ReplaceSuccess::NotReplaced { } else if res == lox_library::ReplaceSuccess::NotReplaced {
// Add the bridge to the list of to_be_replaced bridges in the Lox context and try // Add the bridge to the list of to_be_replaced bridges in the Lox context and try
// again to replace at the next update (nothing changes in the Lox Authority) // again to replace at the next update (nothing changes in the Lox Authority)
@ -102,6 +126,7 @@ impl LoxServerContext {
bridge.uid_fingerprint bridge.uid_fingerprint
); );
self.new_to_be_replaced_bridge(bridge); self.new_to_be_replaced_bridge(bridge);
accounted_for_bridges.push(bridge.uid_fingerprint);
} else { } else {
// NotFound // NotFound
assert!( assert!(
@ -114,6 +139,14 @@ impl LoxServerContext {
); );
} }
} }
// Make sure that all bridges are accounted for
let ba_clone = self.ba.lock().unwrap();
let total_reachable = ba_clone.bridge_table.reachable.len();
if total_reachable > accounted_for_bridges.len() {
// Search for extra fingerprints, assume those bridges are gone and remove?
} else if total_reachable < accounted_for_bridges.len() {
println!("Something unexpected occurred: The number of reachable bridges should not be less than those updated from rdsys");
}
// Finally, assign any extra_bridges to new buckets if there are enough // Finally, assign any extra_bridges to new buckets if there are enough
while self.extra_bridges.lock().unwrap().len() >= MAX_BRIDGES_PER_BUCKET { while self.extra_bridges.lock().unwrap().len() >= MAX_BRIDGES_PER_BUCKET {
let bucket = self.remove_extra_bridges(); let bucket = self.remove_extra_bridges();
@ -122,6 +155,7 @@ impl LoxServerContext {
self.add_spare_bucket(bucket); self.add_spare_bucket(bucket);
} }
} }
}
pub fn append_extra_bridges(&self, bridge: BridgeLine) { pub fn append_extra_bridges(&self, bridge: BridgeLine) {
let mut extra_bridges = self.extra_bridges.lock().unwrap(); let mut extra_bridges = self.extra_bridges.lock().unwrap();

View File

@ -6,7 +6,7 @@ use hyper::{
Body, Request, Response, Server, Body, Request, Response, Server,
}; };
use rdsys_backend::{proto::Resource, request_resources}; use rdsys_backend::{proto::ResourceState, request_resources};
use serde::Deserialize; use serde::Deserialize;
use std::{ use std::{
@ -27,8 +27,6 @@ use tokio::{
time::{interval, sleep}, time::{interval, sleep},
}; };
use crate::resource_parser::sort_for_parsing;
async fn shutdown_signal() { async fn shutdown_signal() {
tokio::signal::ctrl_c() tokio::signal::ctrl_c()
.await .await
@ -98,7 +96,7 @@ struct ResourceInfo {
// TODO: ensure this stream gracefully shutdowns on the ctrl_c command. // TODO: ensure this stream gracefully shutdowns on the ctrl_c command.
async fn rdsys_stream( async fn rdsys_stream(
rtype: ResourceInfo, rtype: ResourceInfo,
tx: mpsc::Sender<Vec<Resource>>, tx: mpsc::Sender<ResourceState>,
mut kill: broadcast::Receiver<()>, mut kill: broadcast::Receiver<()>,
) { ) {
tokio::select! { tokio::select! {
@ -108,7 +106,7 @@ async fn rdsys_stream(
} }
} }
async fn rdsys_request(rtype: ResourceInfo, tx: mpsc::Sender<Vec<Resource>>) { async fn rdsys_request(rtype: ResourceInfo, tx: mpsc::Sender<ResourceState>) {
let mut interval = interval(Duration::from_secs(30)); let mut interval = interval(Duration::from_secs(30));
loop { loop {
interval.tick().await; interval.tick().await;
@ -126,7 +124,7 @@ async fn rdsys_request(rtype: ResourceInfo, tx: mpsc::Sender<Vec<Resource>>) {
async fn rdsys_bridge_parser( async fn rdsys_bridge_parser(
rdsys_tx: mpsc::Sender<Command>, rdsys_tx: mpsc::Sender<Command>,
rx: mpsc::Receiver<Vec<Resource>>, rx: mpsc::Receiver<ResourceState>,
mut kill: broadcast::Receiver<()>, mut kill: broadcast::Receiver<()>,
) { ) {
tokio::select! { tokio::select! {
@ -137,7 +135,7 @@ async fn rdsys_bridge_parser(
// Parse Bridges receives a Vec<Resource> from rdsys_sender and sends it to the // Parse Bridges receives a Vec<Resource> from rdsys_sender and sends it to the
// Context Manager to be parsed and added to the BridgeDB // Context Manager to be parsed and added to the BridgeDB
async fn parse_bridges(rdsys_tx: mpsc::Sender<Command>, mut rx: mpsc::Receiver<Vec<Resource>>) { async fn parse_bridges(rdsys_tx: mpsc::Sender<Command>, mut rx: mpsc::Receiver<ResourceState>) {
loop { loop {
let resources = rx.recv().await.unwrap(); let resources = rx.recv().await.unwrap();
let cmd = Command::Rdsys { resources }; let cmd = Command::Rdsys { resources };
@ -180,22 +178,24 @@ async fn context_manager(
match cmd { match cmd {
Rdsys { resources } => { Rdsys { resources } => {
// If the bridgetable is not being loaded from an existing database, we will populate the // If the bridgetable is not being loaded from an existing database, we will populate the
// bridgetable with all of the bridges received from rdsys // bridgetable with all of the working bridges received from rdsys.
if context.bridgetable_is_empty() { if context.bridgetable_is_empty() {
let bridgelines = parse_into_bridgelines(resources); if let Some(working_resources) = resources.working {
let bridgelines = parse_into_bridgelines(working_resources);
let (buckets, leftovers) = parse_into_buckets(bridgelines); let (buckets, leftovers) = parse_into_buckets(bridgelines);
for leftover in leftovers { for leftover in leftovers {
context.append_extra_bridges(leftover); context.append_extra_bridges(leftover);
} }
context.populate_bridgetable(buckets, bridge_config.percent_spares); context.populate_bridgetable(buckets, bridge_config.percent_spares);
// otherwise, we need to sync the existing bridgetable with the resources we receive from // otherwise, we need to sync the existing bridgetable with the resources we receive from
// rdsys and ensure that all functioning bridges are correctly placed in the bridgetable // rdsys and ensure that all functioning bridges are correctly placed in the bridgetable
// those that have changed are updated and those that have been failing tests for an extended // those that have changed are updated and those that have been failing tests for an extended
// period of time are removed. // period of time are removed.
// If bridges are labelled as blocked_in, we should also handle blocking behaviour. // If bridges are labelled as blocked_in, we should also handle blocking behaviour.
}
} else { } else {
let (functional, failing) = sort_for_parsing(resources); context.sync_with_bridgetable(resources);
context.sync_with_bridgetable(functional, failing);
} }
// Handle any bridges that are leftover in the bridge authority from the sync // Handle any bridges that are leftover in the bridge authority from the sync
context.allocate_leftover_bridges(); context.allocate_leftover_bridges();
@ -225,7 +225,7 @@ async fn context_manager(
#[derive(Debug)] #[derive(Debug)]
enum Command { enum Command {
Rdsys { Rdsys {
resources: Vec<Resource>, resources: ResourceState,
}, },
Request { Request {
req: Request<Body>, req: Request<Body>,

View File

@ -77,24 +77,28 @@ pub fn parse_into_buckets(
// they were passing tests. Before passing them back to the calling function, they are parsed into // they were passing tests. Before passing them back to the calling function, they are parsed into
// BridgeLines // BridgeLines
pub fn sort_for_parsing(resources: Vec<Resource>) -> (Vec<BridgeLine>, Vec<BridgeLine>) { pub fn sort_for_parsing(resources: Vec<Resource>) -> (Vec<BridgeLine>, Vec<BridgeLine>) {
let mut functional: Vec<Resource> = Vec::new(); let mut grace_period: Vec<Resource> = Vec::new();
let mut failing: Vec<Resource> = Vec::new(); let mut failing: Vec<Resource> = Vec::new();
for resource in resources { for resource in resources {
if resource.last_passed + Duration::hours(ACCEPTED_HOURS_OF_FAILURE) >= Utc::now() { // TODO: Maybe filter for untested resources first if last_passed alone would skew
functional.push(resource); // the filter in an unintended direction
if resource.test_result.last_passed + Duration::hours(ACCEPTED_HOURS_OF_FAILURE)
>= Utc::now()
{
grace_period.push(resource);
} else { } else {
failing.push(resource); failing.push(resource);
} }
} }
let functional_bridgelines = parse_into_bridgelines(functional); let grace_period_bridgelines = parse_into_bridgelines(grace_period);
let failing_bridgelines = parse_into_bridgelines(failing); let failing_bridgelines = parse_into_bridgelines(failing);
(functional_bridgelines, failing_bridgelines) (grace_period_bridgelines, failing_bridgelines)
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use rdsys_backend::proto::Resource; use rdsys_backend::proto::{Resource, TestResults};
use std::collections::HashMap; use std::collections::HashMap;
use chrono::{Duration, Utc}; use chrono::{Duration, Utc};
@ -119,7 +123,9 @@ mod tests {
Resource { Resource {
r#type: String::from(rtype), r#type: String::from(rtype),
blocked_in: HashMap::new(), blocked_in: HashMap::new(),
test_result: TestResults {
last_passed: Utc::now() - Duration::hours(last_passed), last_passed: Utc::now() - Duration::hours(last_passed),
},
protocol: String::from("tcp"), protocol: String::from("tcp"),
address: String::from(address), address: String::from(address),
port: port, port: port,

View File

@ -267,8 +267,8 @@ pub async fn request_resources(
name: String, name: String,
token: String, token: String,
resource_types: Vec<String>, resource_types: Vec<String>,
) -> Result<Vec<proto::Resource>, Error> { ) -> Result<proto::ResourceState, Error> {
let fetched_resources: Result<Vec<proto::Resource>, Error>; let fetched_resources: Result<proto::ResourceState, Error>;
let req = proto::ResourceRequest { let req = proto::ResourceRequest {
request_origin: name, request_origin: name,
resource_types, resource_types,
@ -288,7 +288,7 @@ pub async fn request_resources(
.unwrap(); .unwrap();
match response.status() { match response.status() {
reqwest::StatusCode::OK => { reqwest::StatusCode::OK => {
fetched_resources = match response.json::<Vec<proto::Resource>>().await { fetched_resources = match response.json::<proto::ResourceState>().await {
Ok(fetched_resources) => Ok(fetched_resources), Ok(fetched_resources) => Ok(fetched_resources),
Err(e) => Err(Error::Reqwest(e)), Err(e) => Err(Error::Reqwest(e)),
}; };

View File

@ -10,12 +10,18 @@ pub struct ResourceRequest {
pub resource_types: Vec<String>, pub resource_types: Vec<String>,
} }
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct TestResults {
pub last_passed: DateTime<Utc>
}
/// Representation of a bridge resource /// Representation of a bridge resource
#[derive(Deserialize, PartialEq, Eq, Debug)] #[derive(Deserialize, PartialEq, Eq, Debug)]
pub struct Resource { pub struct Resource {
pub r#type: String, pub r#type: String,
pub blocked_in: HashMap<String, bool>, pub blocked_in: HashMap<String, bool>,
pub last_passed: DateTime<Utc>, pub test_result: TestResults,
pub protocol: String, pub protocol: String,
pub address: String, pub address: String,
pub port: u16, pub port: u16,
@ -51,6 +57,13 @@ impl Resource {
} }
} }
/// A ResourceState holds information about new, changed, or pruned resources
#[derive(Deserialize, PartialEq, Eq, Debug)]
pub struct ResourceState {
pub working: Option<Vec<Resource>>,
pub not_working: Option<Vec<Resource>>,
}
/// A ResourceDiff holds information about new, changed, or pruned resources /// A ResourceDiff holds information about new, changed, or pruned resources
#[derive(Deserialize, PartialEq, Eq, Debug)] #[derive(Deserialize, PartialEq, Eq, Debug)]
pub struct ResourceDiff { pub struct ResourceDiff {
@ -92,7 +105,7 @@ mod tests {
let bridge = Resource { let bridge = Resource {
r#type: String::from("scramblesuit"), r#type: String::from("scramblesuit"),
blocked_in: HashMap::new(), blocked_in: HashMap::new(),
last_passed: "2023-05-30T14:20:28Z".parse::<DateTime<Utc>>().unwrap(), test_result: TestResults { last_passed: "2023-05-30T14:20:28Z".parse::<DateTime<Utc>>().unwrap() },
protocol: String::from("tcp"), protocol: String::from("tcp"),
address: String::from("216.117.3.62"), address: String::from("216.117.3.62"),
port: 63174, port: 63174,
@ -107,7 +120,9 @@ mod tests {
{ {
"type": "scramblesuit", "type": "scramblesuit",
"blocked_in": {}, "blocked_in": {},
"last_passed": "2023-05-30T14:20:28.000+00:00", "test_result" : {
"last_passed": "2023-05-30T14:20:28.000+00:00"
},
"protocol": "tcp", "protocol": "tcp",
"address": "216.117.3.62", "address": "216.117.3.62",
"port": 63174, "port": 63174,
@ -135,7 +150,9 @@ mod tests {
{ {
"type": "obfs2", "type": "obfs2",
"blocked_in": {}, "blocked_in": {},
"last_passed": "2023-05-30T11:42:28.000+07:00", "test_result" : {
"last_passed": "2023-05-30T11:42:28.000+07:00"
},
"Location": null, "Location": null,
"protocol": "tcp", "protocol": "tcp",
"address": "176.247.216.207", "address": "176.247.216.207",
@ -153,7 +170,9 @@ mod tests {
{ {
"type": "obfs2", "type": "obfs2",
"blocked_in": {}, "blocked_in": {},
"last_passed": "2023-05-30T12:20:28.000+07:00", "test_result" : {
"last_passed": "2023-05-30T12:20:28.000+07:00"
},
"protocol": "tcp", "protocol": "tcp",
"address": "133.69.16.145", "address": "133.69.16.145",
"port": 58314, "port": 58314,
@ -172,7 +191,9 @@ mod tests {
{ {
"type": "scramblesuit", "type": "scramblesuit",
"blocked_in": {}, "blocked_in": {},
"last_passed": "2023-05-30T14:20:28.000+07:00", "test_result" : {
"last_passed": "2023-05-30T14:20:28.000+07:00"
},
"protocol": "tcp", "protocol": "tcp",
"address": "216.117.3.62", "address": "216.117.3.62",
"port": 63174, "port": 63174,