@@ -21,19 +21,6 @@ use crate::{
21
21
/// Number of seconds between refreshing the Kademlia DHT.
22
22
const PEER_REFRESH_INTERVAL_SECS : u64 = 30 ;
23
23
24
- /// **Dria Compute Node**
25
- ///
26
- /// Internally, the node will create a new P2P client with the given secret key.
27
- /// This P2P client, although created synchronously, requires a tokio runtime.
28
- ///
29
- /// ### Example
30
- ///
31
- /// ```rs
32
- /// let config = DriaComputeNodeConfig::new();
33
- /// let mut node = DriaComputeNode::new(config, CancellationToken::new())?;
34
- /// node.check_services().await?;
35
- /// node.launch().await?;
36
- /// ```
37
24
pub struct DriaComputeNode {
38
25
pub config : DriaComputeNodeConfig ,
39
26
pub p2p : DriaP2PCommander ,
@@ -44,10 +31,13 @@ pub struct DriaComputeNode {
44
31
}
45
32
46
33
impl DriaComputeNode {
34
+ /// Creates a new `DriaComputeNode` with the given configuration and cancellation token.
35
+ ///
36
+ /// Returns the node instance and p2p client together. P2p MUST be run in a separate task before this node is used at all.
47
37
pub async fn new (
48
38
config : DriaComputeNodeConfig ,
49
39
cancellation : CancellationToken ,
50
- ) -> Result < ( Self , DriaP2PClient ) > {
40
+ ) -> Result < ( DriaComputeNode , DriaP2PClient ) > {
51
41
// create the keypair from secret key
52
42
let keypair = secret_to_keypair ( & config. secret_key ) ;
53
43
@@ -139,16 +129,11 @@ impl DriaComputeNode {
139
129
Ok ( ( ) )
140
130
}
141
131
142
- /// Returns the list of connected peers.
143
- // #[inline(always)]
144
- // pub fn peers(
145
- // &self,
146
- // ) -> Vec<(
147
- // &dkn_p2p::libp2p_identity::PeerId,
148
- // Vec<&gossipsub::TopicHash>,
149
- // )> {
150
- // self.p2p.peers()
151
- // }
132
+ /// Returns the list of connected peers, `mesh` and `all`.
133
+ #[ inline]
134
+ pub async fn peers ( & self ) -> Result < ( Vec < PeerId > , Vec < PeerId > ) > {
135
+ self . p2p . peers ( ) . await
136
+ }
152
137
153
138
/// Launches the main loop of the compute node.
154
139
/// This method is not expected to return until cancellation occurs.
@@ -285,16 +270,24 @@ impl DriaComputeNode {
285
270
self . unsubscribe ( WorkflowHandler :: LISTEN_TOPIC ) . await ?;
286
271
self . unsubscribe ( WorkflowHandler :: RESPONSE_TOPIC ) . await ?;
287
272
273
+ // shutdown channels
274
+ self . shutdown ( ) . await ?;
275
+
276
+ Ok ( ( ) )
277
+ }
278
+
279
+ /// Shutdown channels between p2p and yourself.
280
+ pub async fn shutdown ( & mut self ) -> Result < ( ) > {
288
281
// send shutdown signal
282
+ log:: debug!( "Sending shutdown command to p2p client." ) ;
289
283
self . p2p . shutdown ( ) . await ?;
290
284
291
- // close msg channel
285
+ // close message channel
292
286
log:: debug!( "Closing message channel." ) ;
293
287
self . msg_rx . close ( ) ;
294
288
295
289
Ok ( ( ) )
296
290
}
297
-
298
291
/// Parses a given raw Gossipsub message to a prepared P2PMessage object.
299
292
/// This prepared message includes the topic, payload, version and timestamp.
300
293
///
@@ -322,34 +315,47 @@ impl DriaComputeNode {
322
315
323
316
#[ cfg( test) ]
324
317
mod tests {
325
- // use super::*;
326
- // use std::env;
327
-
328
- // #[tokio::test]
329
- // #[ignore = "run this manually"]
330
- // async fn test_publish_message() {
331
- // env::set_var("RUST_LOG", "info");
332
- // let _ = env_logger::try_init();
333
-
334
- // // create node
335
- // let cancellation = CancellationToken::new();
336
- // let mut node = DriaComputeNode::new(DriaComputeNodeConfig::default(), cancellation.clone())
337
- // .await
338
- // .expect("should create node");
339
-
340
- // // launch & wait for a while for connections
341
- // log::info!("Waiting a bit for peer setup.");
342
- // tokio::select! {
343
- // _ = node.launch() => (),
344
- // _ = tokio::time::sleep(tokio::time::Duration::from_secs(20)) => cancellation.cancel(),
345
- // }
346
- // log::info!("Connected Peers:\n{:#?}", node.peers());
347
-
348
- // // publish a dummy message
349
- // let topic = "foo";
350
- // let message = DKNMessage::new("hello from the other side", topic);
351
- // node.subscribe(topic).expect("should subscribe");
352
- // node.publish(message).expect("should publish");
353
- // node.unsubscribe(topic).expect("should unsubscribe");
354
- // }
318
+ use super :: * ;
319
+ use std:: env;
320
+
321
+ #[ tokio:: test]
322
+ #[ ignore = "run this manually" ]
323
+ async fn test_publish_message ( ) -> eyre:: Result < ( ) > {
324
+ env:: set_var ( "RUST_LOG" , "none,dkn_compute=debug,dkn_p2p=debug" ) ;
325
+ let _ = env_logger:: builder ( ) . is_test ( true ) . try_init ( ) ;
326
+
327
+ // create node
328
+ let cancellation = CancellationToken :: new ( ) ;
329
+ let ( mut node, p2p) =
330
+ DriaComputeNode :: new ( DriaComputeNodeConfig :: default ( ) , cancellation. clone ( ) )
331
+ . await
332
+ . expect ( "should create node" ) ;
333
+
334
+ // spawn p2p task
335
+ let p2p_task = tokio:: spawn ( async move { p2p. run ( ) . await } ) ;
336
+
337
+ // launch & wait for a while for connections
338
+ log:: info!( "Waiting a bit for peer setup." ) ;
339
+ tokio:: select! {
340
+ _ = node. launch( ) => ( ) ,
341
+ _ = tokio:: time:: sleep( tokio:: time:: Duration :: from_secs( 20 ) ) => cancellation. cancel( ) ,
342
+ }
343
+ log:: info!( "Connected Peers:\n {:#?}" , node. peers( ) . await ?) ;
344
+
345
+ // publish a dummy message
346
+ let topic = "foo" ;
347
+ let message = DKNMessage :: new ( "hello from the other side" , topic) ;
348
+ node. subscribe ( topic) . await . expect ( "should subscribe" ) ;
349
+ node. publish ( message) . await . expect ( "should publish" ) ;
350
+ node. unsubscribe ( topic) . await . expect ( "should unsubscribe" ) ;
351
+
352
+ // close everything
353
+ log:: info!( "Shutting down node." ) ;
354
+ node. p2p . shutdown ( ) . await ?;
355
+
356
+ // wait for task handle
357
+ p2p_task. await ?;
358
+
359
+ Ok ( ( ) )
360
+ }
355
361
}
0 commit comments