Skip to content

Commit

Permalink
Process all closed uTP streams in UtpListener and handle the payload …
Browse files Browse the repository at this point in the history
…in overlay service
  • Loading branch information
ogenev committed May 12, 2022
1 parent e7ece03 commit 52115d4
Show file tree
Hide file tree
Showing 4 changed files with 103 additions and 41 deletions.
1 change: 1 addition & 0 deletions newsfragments/325.added.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Process all closed uTP streams in UtpListener and pass the payload to overlay service.
45 changes: 44 additions & 1 deletion trin-core/src/portalnet/overlay_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ use crate::{
utp::stream::UtpListenerRequest,
};

use crate::utp::{stream::UtpPayload, trin_helpers::UtpStreamId};
use delay_map::HashSetDelay;
use discv5::{
enr::NodeId,
Expand All @@ -45,6 +46,10 @@ pub const FIND_CONTENT_MAX_NODES: usize = 32;
/// With even distribution assumptions, 2**17 is enough to put each node (estimating 100k nodes,
/// which is more than 10x the ethereum mainnet node count) into a unique bucket by the 17th bucket index.
const EXPECTED_NON_EMPTY_BUCKETS: usize = 17;
/// Bucket refresh lookup interval in seconds
const BUCKET_REFRESH_INTERVAL: u64 = 60;
/// Process uTP streams interval in milliseconds
const PROCESS_UTP_STREAMS_INTERVAL: u64 = 20;

/// An overlay request error.
#[derive(Clone, Error, Debug)]
Expand Down Expand Up @@ -377,7 +382,10 @@ impl<TContentKey: OverlayContentKey + Send, TMetric: Metric + Send>
/// Bucket maintenance: Maintain the routing table (more info documented above function).
async fn start(&mut self) {
// Construct bucket refresh interval
let mut bucket_refresh_interval = tokio::time::interval(Duration::from_secs(60));
let mut bucket_refresh_interval =
tokio::time::interval(Duration::from_secs(BUCKET_REFRESH_INTERVAL));
let mut process_utp_streams_interval =
tokio::time::interval(Duration::from_millis(PROCESS_UTP_STREAMS_INTERVAL));

loop {
tokio::select! {
Expand Down Expand Up @@ -408,6 +416,22 @@ impl<TContentKey: OverlayContentKey + Send, TMetric: Metric + Send>
self.peers_to_ping.insert(node_id);
}
}
_ = process_utp_streams_interval.tick() => {
let (tx, rx) = tokio::sync::oneshot::channel::<Vec<(UtpPayload, UtpStreamId)>>();

// Send request to uTP listener to process all closed uTP streams and wait for response
if let Err(err) = self.utp_listener_tx.send(UtpListenerRequest::ProcessClosedStreams(tx)) {
error!("Unable to send ProcessClosedStreams request to uTP listener: {err}");
continue
}

match rx.await {
Ok(streams) => {
self.handle_utp_payload(streams);
}
Err(err) => error!("Unable to receive ProcessClosedStreams response from uTP listener: {err}")
}
}
_ = OverlayService::<TContentKey, TMetric>::bucket_maintenance_poll(self.protocol.clone(), &self.kbuckets) => {}
_ = bucket_refresh_interval.tick() => {
debug!("[{:?}] Overlay bucket refresh lookup", self.protocol);
Expand Down Expand Up @@ -703,6 +727,19 @@ impl<TContentKey: OverlayContentKey + Send, TMetric: Metric + Send>
Ok(accept)
}

/// Handle all closed uTP streams, currently we process only AcceptStream here.
/// FindContent payload is processed explicitly when we send FindContent request.
fn handle_utp_payload(&self, streams: Vec<(UtpPayload, UtpStreamId)>) {
for stream in streams {
match stream {
(payload, UtpStreamId::AcceptStream(content_keys)) => {
self.process_accept_utp_payload(content_keys, payload);
}
_ => {}
}
}
}

/// Sends a TALK request via Discovery v5 to some destination node.
fn send_talk_req(&self, request: Request, request_id: OverlayRequestId, destination: Enr) {
let discovery = Arc::clone(&self.discovery);
Expand Down Expand Up @@ -734,6 +771,12 @@ impl<TContentKey: OverlayContentKey + Send, TMetric: Metric + Send>
});
}

/// Process accepted uTP payload of the OFFER?ACCEPT stream
fn process_accept_utp_payload(&self, content_keys: Vec<Vec<u8>>, payload: UtpPayload) {
// TODO: Verify the payload, store the content and propagate gossip.
debug!("DEBUG: Processing content keys: {content_keys:?}, with payload: {payload:?}");
}

/// Processes an incoming request from some source node.
fn process_incoming_request(&mut self, request: Request, _id: RequestId, source: NodeId) {
// Look up the node in the routing table.
Expand Down
91 changes: 55 additions & 36 deletions trin-core/src/utp/stream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ use crate::{
utp::{
packets::{ExtensionType, Packet, PacketType, HEADER_SIZE},
time::{now_microseconds, Delay, Timestamp},
trin_helpers::{UtpMessage, UtpMessageId},
trin_helpers::{UtpMessage, UtpStreamId},
util::{abs_diff, ewma, generate_sequential_identifiers},
},
};
Expand Down Expand Up @@ -62,6 +62,9 @@ const DISCV5_SOCKET_TIMEOUT: u64 = 25;
/// uTP connection id
type ConnId = u16;

/// uTP payload data
pub type UtpPayload = Vec<u8>;

pub fn rand() -> u16 {
rand::thread_rng().gen()
}
Expand Down Expand Up @@ -111,6 +114,8 @@ pub enum UtpListenerRequest {
FindContentData(ConnId, ByteList),
/// Request to listen for FindContent stream
FindContentStream(ConnId),
/// Process all streams where uTP socket state is "Closed"
ProcessClosedStreams(oneshot::Sender<Vec<(UtpPayload, UtpStreamId)>>),
/// Request to listen for Offer stream
OfferStream(ConnId),
}
Expand All @@ -122,7 +127,7 @@ pub struct UtpListener {
/// Store all active connections
utp_connections: HashMap<ConnectionKey, UtpSocket>,
/// uTP connection ids to listen for
listening: HashMap<ConnId, UtpMessageId>,
listening: HashMap<ConnId, UtpStreamId>,
/// Receiver for uTP events sent from the main portal event handler
utp_event_rx: UnboundedReceiver<TalkRequest>,
/// Receiver for uTP requests sent from the overlay layer
Expand Down Expand Up @@ -221,9 +226,9 @@ impl UtpListener {
// TODO: Probably there is a better way with lifetimes to pass the HashMap value to a
// different thread without removing the key and re-adding it.
self.listening
.insert(conn.sender_connection_id, UtpMessageId::FindContentStream);
.insert(conn.sender_connection_id, UtpStreamId::FindContentStream);

if let Some(UtpMessageId::FindContentData(Content(content_data))) =
if let Some(UtpStreamId::FindContentData(Content(content_data))) =
utp_message_id
{
// We want to send uTP data only if the content is Content(ByteList)
Expand Down Expand Up @@ -264,12 +269,17 @@ impl UtpListener {
return;
}

let mut result = Vec::new();

let mut buf = [0; BUF_SIZE];
if let Err(msg) = conn.recv(&mut buf).await {
error!("Unable to receive uTP DATA packet: {msg}")
} else {
conn.recv_data_stream
.append(&mut Vec::from(packet.payload()));
match conn.recv(&mut buf).await {
Ok(bytes_read) => {
if let Some(bytes) = bytes_read {
result.extend_from_slice(&buf[..bytes]);
conn.recv_data_stream.append(&mut result);
}
}
Err(err) => error!("Unable to receive uTP DATA packet: {err}"),
}
}
}
Expand Down Expand Up @@ -314,24 +324,29 @@ impl UtpListener {
match request {
UtpListenerRequest::FindContentStream(conn_id) => {
self.listening
.insert(conn_id, UtpMessageId::FindContentStream);
.insert(conn_id, UtpStreamId::FindContentStream);
}
UtpListenerRequest::Connect(conn_id, node_id, tx) => {
let conn = self.connect(conn_id, node_id).await;
if tx.send(conn).is_err() {
warn!("Unable to send uTP socket to requester")
error!("Unable to send uTP socket to requester")
};
}
UtpListenerRequest::OfferStream(conn_id) => {
self.listening.insert(conn_id, UtpMessageId::OfferStream);
self.listening.insert(conn_id, UtpStreamId::OfferStream);
}
UtpListenerRequest::FindContentData(conn_id, content) => {
self.listening
.insert(conn_id, UtpMessageId::FindContentData(Content(content)));
.insert(conn_id, UtpStreamId::FindContentData(Content(content)));
}
UtpListenerRequest::AcceptStream(conn_id, accepted_keys) => {
self.listening
.insert(conn_id, UtpMessageId::AcceptStream(accepted_keys));
.insert(conn_id, UtpStreamId::AcceptStream(accepted_keys));
}
UtpListenerRequest::ProcessClosedStreams(tx) => {
if tx.send(self.process_closed_streams()).is_err() {
error!("Unable to send closed uTP streams to requester")
};
}
}
}
Expand All @@ -355,28 +370,32 @@ impl UtpListener {
}
}

// https://github.com/ethereum/portal-network-specs/pull/98\
// Currently the way to handle data over uTP isn't finalized yet, so we are going to use the
// handle data on connection closed method, as that seems to be the accepted method for now.
pub async fn process_utp_byte_stream(&mut self) {
let mut utp_connections = self.utp_connections.clone();
for (conn_key, conn) in self.utp_connections.iter_mut() {
if conn.state == SocketState::Closed {
let received_stream = conn.recv_data_stream.clone();
debug!("Received data: with len: {}", received_stream.len());

match self.listening.get(&conn.receiver_connection_id) {
Some(message_type) => {
if let UtpMessageId::AcceptStream(content_keys) = message_type {
// TODO: Implement this with overlay store and decode receiver stream if multiple content values are send
debug!("Store {content_keys:?}, {received_stream:?}");
}
}
_ => warn!("uTP listening HashMap doesn't have uTP stream message type"),
}
utp_connections.remove(conn_key);
}
}
/// Return and cleanup all active uTP streams where socket state is "Closed"
pub fn process_closed_streams(&mut self) -> Vec<(UtpPayload, UtpStreamId)> {
// This seems to be a hot loop, we may need to optimise it and find a better way to filter by closed
// connections without cloning all records. One reasonable way is to use some data-oriented
// design principles like Struct of Arrays vs. Array of Structs.
self.utp_connections
.clone()
.iter()
.filter(|conn| conn.1.state == SocketState::Closed)
.map(|conn| {
// Remove the closed connections from active connections
let receiver_stream_id = self
.listening
.remove(&conn.1.receiver_connection_id)
.expect("Receiver connection id should match active listening connections.");
self.listening
.remove(&conn.1.sender_connection_id)
.expect("Sender connection id should match active listening connections.");
let utp_socket = self
.utp_connections
.remove(conn.0)
.expect("uTP socket should match asctive utp connections.");

(utp_socket.recv_data_stream, receiver_stream_id)
})
.collect()
}
}

Expand Down
7 changes: 3 additions & 4 deletions trin-core/src/utp/trin_helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,9 @@ pub struct UtpAccept {
pub message: Vec<(Vec<u8>, Vec<u8>)>,
}

// This is not in a spec, this is just for internally tracking for what portal message
// negotiated the uTP stream
#[derive(Debug, Clone)]
pub enum UtpMessageId {
/// Used to track which stream to which overlay request correspond
#[derive(Debug, Clone, PartialEq)]
pub enum UtpStreamId {
FindContentStream,
FindContentData(Content),
OfferStream,
Expand Down

0 comments on commit 52115d4

Please sign in to comment.