|
@@ -3,7 +3,7 @@ use crate::{
|
|
|
Connection, Error,
|
|
|
};
|
|
|
use futures_util::StreamExt;
|
|
|
-use nostr_rs_client::{Error as ClientError, Pool, Url};
|
|
|
+use nostr_rs_client::{pool::subscription::PoolSubscriptionId, Pool, Url};
|
|
|
use nostr_rs_storage_base::Storage;
|
|
|
use nostr_rs_subscription_manager::SubscriptionManager;
|
|
|
use nostr_rs_types::{
|
|
@@ -18,10 +18,12 @@ use std::{
|
|
|
};
|
|
|
use tokio::{
|
|
|
net::{TcpListener, TcpStream},
|
|
|
- sync::mpsc::{channel, Receiver, Sender},
|
|
|
-};
|
|
|
-use tokio::{
|
|
|
- sync::{mpsc, RwLock},
|
|
|
+ sync::{
|
|
|
+ mpsc::{
|
|
|
+ self, {channel, Receiver, Sender},
|
|
|
+ },
|
|
|
+ RwLock,
|
|
|
+ },
|
|
|
task::JoinHandle,
|
|
|
};
|
|
|
|
|
@@ -47,27 +49,23 @@ pub struct Relayer<T: Storage + Send + Sync + 'static> {
|
|
|
/// otherwise all the messages are going to be ephemeral, making this
|
|
|
/// relayer just a dumb proxy (that can be useful for privacy) but it won't
|
|
|
/// be able to perform any optimization like prefetching content while offline
|
|
|
- storage: Option<T>,
|
|
|
+ storage: Arc<Option<T>>,
|
|
|
/// Subscription manager
|
|
|
subscription_manager: Arc<SubscriptionManager<RelayerSubscriptionId, ()>>,
|
|
|
/// List of all active connections
|
|
|
- connections: RwLock<HashMap<ConnectionId, Connection>>,
|
|
|
+ connections: Arc<RwLock<HashMap<ConnectionId, Connection>>>,
|
|
|
/// This Sender can be used to send requests from anywhere to the relayer.
|
|
|
send_to_relayer: Sender<(ConnectionId, Request)>,
|
|
|
/// This Receiver is the relayer the way the relayer receives messages
|
|
|
relayer_receiver: Option<Receiver<(ConnectionId, Request)>>,
|
|
|
+
|
|
|
/// Client pool
|
|
|
///
|
|
|
- /// A relayer can optionally be connected to a pool of clients to get foreign events.
|
|
|
- client_pool: Option<(Pool, JoinHandle<()>)>,
|
|
|
-}
|
|
|
-
|
|
|
-impl<T: Storage + Send + Sync + 'static> Drop for Relayer<T> {
|
|
|
- fn drop(&mut self) {
|
|
|
- if let Some((_, handle)) = self.client_pool.take() {
|
|
|
- handle.abort();
|
|
|
- }
|
|
|
- }
|
|
|
+ /// A relayer can optionally be connected to a pool of clients to get
|
|
|
+ /// foreign events.
|
|
|
+ client_pool: Option<Pool>,
|
|
|
+ client_pool_receiver: Option<Receiver<(Response, Url)>>,
|
|
|
+ client_pool_subscriptions: RwLock<HashMap<PoolSubscriptionId, (SubscriptionId, ConnectionId)>>,
|
|
|
}
|
|
|
|
|
|
impl<T: Storage + Send + Sync + 'static> Relayer<T> {
|
|
@@ -80,25 +78,36 @@ impl<T: Storage + Send + Sync + 'static> Relayer<T> {
|
|
|
/// and create a network of relayers, reposting events to them and
|
|
|
/// subscribing to their events.`gqq`
|
|
|
pub fn new(storage: Option<T>, client_pool: Option<Pool>) -> Result<Self, Error> {
|
|
|
- let (sender, receiver) = channel(100_000);
|
|
|
+ let (relayer_sender, relayer_receiver) = channel(100_000);
|
|
|
+
|
|
|
+ let (client_pool_receiver, client_pool) = if let Some(client_pool) = client_pool {
|
|
|
+ let result = client_pool.split()?;
|
|
|
+ (result.0, Some(result.1))
|
|
|
+ } else {
|
|
|
+ let (_, receiver) = mpsc::channel(1);
|
|
|
+ (receiver, None)
|
|
|
+ };
|
|
|
+
|
|
|
Ok(Self {
|
|
|
- storage,
|
|
|
+ storage: Arc::new(storage),
|
|
|
subscription_manager: Default::default(),
|
|
|
- send_to_relayer: sender.clone(),
|
|
|
- relayer_receiver: Some(receiver),
|
|
|
+ send_to_relayer: relayer_sender,
|
|
|
+ relayer_receiver: Some(relayer_receiver),
|
|
|
connections: Default::default(),
|
|
|
- client_pool: if let Some(client_pool) = client_pool {
|
|
|
- Some(Self::handle_client_pool(client_pool, sender)?)
|
|
|
- } else {
|
|
|
- None
|
|
|
- },
|
|
|
+ client_pool_receiver: Some(client_pool_receiver),
|
|
|
+ client_pool: client_pool,
|
|
|
+ client_pool_subscriptions: Default::default(),
|
|
|
})
|
|
|
}
|
|
|
|
|
|
/// Connects to the relayer pool
|
|
|
pub async fn connect_to_relayer(&self, url: Url) -> Result<(), Error> {
|
|
|
- let (client_pool, _) = self.client_pool.as_ref().ok_or(Error::NoClient)?;
|
|
|
- let _ = client_pool.connect_to(url).await?;
|
|
|
+ let _ = self
|
|
|
+ .client_pool
|
|
|
+ .as_ref()
|
|
|
+ .ok_or(Error::NoClient)?
|
|
|
+ .connect_to(url)
|
|
|
+ .await?;
|
|
|
Ok(())
|
|
|
}
|
|
|
|
|
@@ -117,7 +126,12 @@ impl<T: Storage + Send + Sync + 'static> Relayer<T> {
|
|
|
///
|
|
|
/// This function consumes the object and takes the ownership. The returned
|
|
|
/// JoinHandle() can be used to stop the main loop
|
|
|
- pub fn main(self, server: TcpListener) -> Result<(Arc<Self>, JoinHandle<()>), Error> {
|
|
|
+ pub fn main(mut self, server: TcpListener) -> Result<(Arc<Self>, JoinHandle<()>), Error> {
|
|
|
+ let mut client_pool_receiver = self
|
|
|
+ .client_pool_receiver
|
|
|
+ .take()
|
|
|
+ .ok_or(Error::AlreadySplitted)?;
|
|
|
+
|
|
|
let (this, mut receiver) = self.split()?;
|
|
|
let _self = Arc::new(this);
|
|
|
let this = _self.clone();
|
|
@@ -126,22 +140,36 @@ impl<T: Storage + Send + Sync + 'static> Relayer<T> {
|
|
|
loop {
|
|
|
tokio::select! {
|
|
|
Ok((stream, _)) = server.accept() => {
|
|
|
- // accept new external connections
|
|
|
+ // accept new connections
|
|
|
let _ = this.add_connection(None, stream).await;
|
|
|
},
|
|
|
- Some((conn_id, request)) = receiver.recv() => {
|
|
|
- // receive messages from the connection pool
|
|
|
- if conn_id.is_empty() {
|
|
|
- // message received from client pool
|
|
|
- if let Request::Event(event) = request {
|
|
|
+ Some((response, _)) = client_pool_receiver.recv() => {
|
|
|
+ // process messages from anothe relayer, broadcast it and store it
|
|
|
+ match response {
|
|
|
+ Response::Event(event) => {
|
|
|
+ // we received a message from the client pool, store it locally
|
|
|
+ // and re-broadcast it.
|
|
|
let _ = this.broadcast(event.deref()).await;
|
|
|
- if let Some(storage) = this.storage.as_ref() {
|
|
|
- let _ = storage.store_local_event(&event).await;
|
|
|
- }
|
|
|
}
|
|
|
- continue;
|
|
|
+ Response::EndOfStoredEvents(sub) => {
|
|
|
+ let connections = this.connections.read().await;
|
|
|
+ let (sub_id, connection) = if let Some((sub_id, conn_id)) = this.client_pool_subscriptions.write().await.remove(&(sub.deref().into())) {
|
|
|
+ if let Some(connection) = connections.get(&conn_id) {
|
|
|
+ (sub_id, connection)
|
|
|
+ } else {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ continue
|
|
|
+ };
|
|
|
+
|
|
|
+ let _ = connection.send(Response::EndOfStoredEvents(sub_id.into()));
|
|
|
+ }
|
|
|
+ _ => {}
|
|
|
}
|
|
|
-
|
|
|
+ }
|
|
|
+ Some((conn_id, request)) = receiver.recv() => {
|
|
|
+ // receive messages from our clients
|
|
|
let connections = this.connections.read().await;
|
|
|
let connection = if let Some(connection) = connections.get(&conn_id) {
|
|
|
connection
|
|
@@ -162,37 +190,6 @@ impl<T: Storage + Send + Sync + 'static> Relayer<T> {
|
|
|
Ok((_self, handle))
|
|
|
}
|
|
|
|
|
|
- /// Handle the client pool
|
|
|
- ///
|
|
|
- /// Main loop to consume messages from the client pool and broadcast them to the local subscribers
|
|
|
- fn handle_client_pool(
|
|
|
- client_pool: Pool,
|
|
|
- send_message_to_relayer: Sender<(ConnectionId, Request)>,
|
|
|
- ) -> Result<(Pool, JoinHandle<()>), ClientError> {
|
|
|
- let (mut receiver, client_pool) = client_pool.split()?;
|
|
|
-
|
|
|
- let handle = tokio::spawn(async move {
|
|
|
- loop {
|
|
|
- if let Some((response, _)) = receiver.recv().await {
|
|
|
- match response {
|
|
|
- Response::Event(event) => {
|
|
|
- let _ = send_message_to_relayer.try_send((
|
|
|
- ConnectionId::new_empty(),
|
|
|
- Request::Event(event.event.into()),
|
|
|
- ));
|
|
|
- }
|
|
|
- Response::EndOfStoredEvents(_) => {}
|
|
|
- x => {
|
|
|
- println!("x => {:?}", x);
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- });
|
|
|
-
|
|
|
- Ok((client_pool, handle))
|
|
|
- }
|
|
|
-
|
|
|
/// Returns a reference to the internal database
|
|
|
pub fn get_db(&self) -> &Option<T> {
|
|
|
&self.storage
|
|
@@ -269,7 +266,7 @@ impl<T: Storage + Send + Sync + 'static> Relayer<T> {
|
|
|
let _ = storage.store_local_event(&event).await;
|
|
|
}
|
|
|
|
|
|
- if let Some((client_pool, _)) = self.client_pool.as_ref() {
|
|
|
+ if let Some(client_pool) = self.client_pool.as_ref() {
|
|
|
// pass the event to the pool of clients, so this relayer can relay
|
|
|
// their local events to the clients in the network of relayers
|
|
|
let _ = client_pool.post(event).await;
|
|
@@ -284,11 +281,19 @@ impl<T: Storage + Send + Sync + 'static> Relayer<T> {
|
|
|
)?;
|
|
|
}
|
|
|
Request::Request(request) => {
|
|
|
- let foreign_subscription = if let Some((client_pool, _)) = self.client_pool.as_ref()
|
|
|
- {
|
|
|
+ let foreign_subscription = if let Some(client_pool) = self.client_pool.as_ref() {
|
|
|
// pass the subscription request to the pool of clients, so this relayer
|
|
|
// can relay any unknown event to the clients through their subscriptions
|
|
|
- Some(client_pool.subscribe(request.clone()).await)
|
|
|
+ let foreign_sub_id = client_pool
|
|
|
+ .subscribe(request.filters.clone().into())
|
|
|
+ .await?;
|
|
|
+
|
|
|
+ self.client_pool_subscriptions.write().await.insert(
|
|
|
+ foreign_sub_id.clone(),
|
|
|
+ (request.subscription_id.clone(), connection.get_conn_id()),
|
|
|
+ );
|
|
|
+
|
|
|
+ Some(foreign_sub_id)
|
|
|
} else {
|
|
|
None
|
|
|
};
|
|
@@ -315,8 +320,10 @@ impl<T: Storage + Send + Sync + 'static> Relayer<T> {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- let _ = connection
|
|
|
- .send(relayer::EndOfStoredEvents(request.subscription_id.clone()).into());
|
|
|
+ if foreign_subscription.is_none() {
|
|
|
+ let _ = connection
|
|
|
+ .send(relayer::EndOfStoredEvents(request.subscription_id.clone()).into());
|
|
|
+ }
|
|
|
|
|
|
connection
|
|
|
.subscribe(
|
|
@@ -343,6 +350,38 @@ impl<T: Storage + Send + Sync + 'static> Relayer<T> {
|
|
|
}
|
|
|
|
|
|
#[inline]
|
|
|
+ /// A non-blocking version of broadcast
|
|
|
+ #[allow(dead_code)]
|
|
|
+ fn broadcast_and_forget(&self, event: Event) {
|
|
|
+ let storage = self.storage.clone();
|
|
|
+ let connections = self.connections.clone();
|
|
|
+ let subscription_manager = self.subscription_manager.clone();
|
|
|
+
|
|
|
+ tokio::spawn(async move {
|
|
|
+ if let Some(storage) = storage.as_ref() {
|
|
|
+ if !storage.store(&event).await.unwrap_or_default() {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ let connections = connections.read().await;
|
|
|
+ for RelayerSubscriptionId((sub_id, conn_id)) in
|
|
|
+ subscription_manager.get_subscribers(&event).await
|
|
|
+ {
|
|
|
+ if let Some(connection) = connections.get(&conn_id) {
|
|
|
+ let _ = connection.send(
|
|
|
+ relayer::Event {
|
|
|
+ subscription_id: sub_id,
|
|
|
+ event: event.clone(),
|
|
|
+ }
|
|
|
+ .into(),
|
|
|
+ );
|
|
|
+ }
|
|
|
+ }
|
|
|
+ });
|
|
|
+ }
|
|
|
+
|
|
|
+ #[inline]
|
|
|
/// Broadcast a given event to all local subscribers
|
|
|
pub async fn broadcast(&self, event: &Event) -> Result<bool, Error> {
|
|
|
if let Some(storage) = self.storage.as_ref() {
|
|
@@ -1012,18 +1051,19 @@ mod test {
|
|
|
let (main_client, _main_client_inscope) =
|
|
|
Pool::new_with_clients(vec![main_relayer]).expect("valid pool");
|
|
|
|
|
|
- let _sub = reader_client.subscribe(Default::default()).await;
|
|
|
+ let _sub = reader_client
|
|
|
+ .subscribe(Default::default())
|
|
|
+ .await
|
|
|
+ .expect("v");
|
|
|
|
|
|
sleep(Duration::from_millis(20)).await;
|
|
|
|
|
|
- for _ in 0..3 {
|
|
|
- assert!(reader_client
|
|
|
- .try_recv()
|
|
|
- .map(|(r, _)| r)
|
|
|
- .expect("valid message")
|
|
|
- .as_end_of_stored_events()
|
|
|
- .is_some());
|
|
|
- }
|
|
|
+ assert!(reader_client
|
|
|
+ .try_recv()
|
|
|
+ .map(|(r, _)| r)
|
|
|
+ .expect("valid message: step")
|
|
|
+ .as_end_of_stored_events()
|
|
|
+ .is_some());
|
|
|
assert!(reader_client.try_recv().is_none());
|
|
|
|
|
|
let account1 = Account::default();
|
|
@@ -1083,7 +1123,7 @@ mod test {
|
|
|
// connected to the main relayer
|
|
|
let (mut main_client, _in_scope) =
|
|
|
Pool::new_with_clients(vec![main_relayer]).expect("valid client");
|
|
|
- let _sub = main_client.subscribe(Default::default()).await;
|
|
|
+ let _sub = main_client.subscribe(Default::default()).await.expect("v");
|
|
|
|
|
|
sleep(Duration::from_millis(10)).await;
|
|
|
assert!(main_client
|