@@ -4,7 +4,7 @@ use std::{str::FromStr, time::Duration};
4
4
use tokio_util:: sync:: CancellationToken ;
5
5
6
6
use crate :: {
7
- config:: DriaComputeNodeConfig ,
7
+ config:: * ,
8
8
handlers:: * ,
9
9
p2p:: P2PClient ,
10
10
utils:: { crypto:: secret_to_keypair, AvailableNodes , DKNMessage } ,
@@ -13,6 +13,19 @@ use crate::{
13
13
/// Number of seconds between refreshing the Admin RPC PeerIDs from Dria server.
14
14
const RPC_PEER_ID_REFRESH_INTERVAL_SECS : u64 = 30 ;
15
15
16
+ /// **Dria Compute Node**
17
+ ///
18
+ /// Internally, the node will create a new P2P client with the given secret key.
19
+ /// This P2P client, although created synchronously, requires a tokio runtime.
20
+ ///
21
+ /// ### Example
22
+ ///
23
+ /// ```rs
24
+ /// let config = DriaComputeNodeConfig::new();
25
+ /// let mut node = DriaComputeNode::new(config, CancellationToken::new())?;
26
+ /// node.check_services().await?;
27
+ /// node.launch().await?;
28
+ /// ```
16
29
pub struct DriaComputeNode {
17
30
pub config : DriaComputeNodeConfig ,
18
31
pub p2p : P2PClient ,
@@ -22,19 +35,6 @@ pub struct DriaComputeNode {
22
35
}
23
36
24
37
impl DriaComputeNode {
25
- /// Create a new compute node with the given configuration and cancellation token.
26
- ///
27
- /// Internally, the node will create a new P2P client with the given secret key.
28
- /// This P2P client, although created synchronously, requires a tokio runtime.
29
- ///
30
- /// ### Example
31
- ///
32
- /// ```rs
33
- /// let config = DriaComputeNodeConfig::new();
34
- /// let mut node = DriaComputeNode::new(config, CancellationToken::new())?;
35
- /// node.check_services().await?;
36
- /// node.launch().await?;
37
- /// ```
38
38
pub async fn new (
39
39
config : DriaComputeNodeConfig ,
40
40
cancellation : CancellationToken ,
@@ -106,10 +106,10 @@ impl DriaComputeNode {
106
106
/// This method is not expected to return until cancellation occurs.
107
107
pub async fn launch ( & mut self ) -> Result < ( ) > {
108
108
// subscribe to topics
109
- self . subscribe ( PINGPONG_LISTEN_TOPIC ) ?;
110
- self . subscribe ( PINGPONG_RESPONSE_TOPIC ) ?;
111
- self . subscribe ( WORKFLOW_LISTEN_TOPIC ) ?;
112
- self . subscribe ( WORKFLOW_RESPONSE_TOPIC ) ?;
109
+ self . subscribe ( PingpongHandler :: LISTEN_TOPIC ) ?;
110
+ self . subscribe ( PingpongHandler :: RESPONSE_TOPIC ) ?;
111
+ self . subscribe ( WorkflowHandler :: LISTEN_TOPIC ) ?;
112
+ self . subscribe ( WorkflowHandler :: RESPONSE_TOPIC ) ?;
113
113
114
114
// main loop, listens for message events in particular
115
115
// the underlying p2p client is expected to handle the rest within its own loop
@@ -127,7 +127,7 @@ impl DriaComputeNode {
127
127
let topic_str = topic. as_str( ) ;
128
128
129
129
// handle message w.r.t topic
130
- if std:: matches!( topic_str, PINGPONG_LISTEN_TOPIC | WORKFLOW_LISTEN_TOPIC ) {
130
+ if std:: matches!( topic_str, PingpongHandler :: LISTEN_TOPIC | WorkflowHandler :: LISTEN_TOPIC ) {
131
131
// ensure that the message is from a valid source (origin)
132
132
let source_peer_id = match message. source {
133
133
Some ( peer) => peer,
@@ -159,7 +159,7 @@ impl DriaComputeNode {
159
159
let message = match self . parse_message_to_prepared_message( message. clone( ) ) {
160
160
Ok ( message) => message,
161
161
Err ( e) => {
162
- log:: error!( "Error parsing message: {}" , e) ;
162
+ log:: error!( "Error parsing message: {:? }" , e) ;
163
163
log:: debug!( "Message: {}" , String :: from_utf8_lossy( & message. data) ) ;
164
164
self . p2p. validate_message( & message_id, & peer_id, gossipsub:: MessageAcceptance :: Ignore ) ?;
165
165
continue ;
@@ -168,11 +168,11 @@ impl DriaComputeNode {
168
168
169
169
// then handle the prepared message
170
170
let handler_result = match topic_str {
171
- WORKFLOW_LISTEN_TOPIC => {
172
- WorkflowHandler :: handle_compute( self , message, WORKFLOW_RESPONSE_TOPIC ) . await
171
+ WorkflowHandler :: LISTEN_TOPIC => {
172
+ WorkflowHandler :: handle_compute( self , message) . await
173
173
}
174
- PINGPONG_LISTEN_TOPIC => {
175
- PingpongHandler :: handle_compute( self , message, PINGPONG_RESPONSE_TOPIC ) . await
174
+ PingpongHandler :: LISTEN_TOPIC => {
175
+ PingpongHandler :: handle_compute( self , message) . await
176
176
}
177
177
// TODO: can we do this in a nicer way?
178
178
// TODO: yes, cast to enum above and let type-casting do the work
@@ -185,11 +185,11 @@ impl DriaComputeNode {
185
185
self . p2p. validate_message( & message_id, & peer_id, acceptance) ?;
186
186
} ,
187
187
Err ( err) => {
188
- log:: error!( "Error handling {} message: {}" , topic_str, err) ;
188
+ log:: error!( "Error handling {} message: {:? }" , topic_str, err) ;
189
189
self . p2p. validate_message( & message_id, & peer_id, gossipsub:: MessageAcceptance :: Ignore ) ?;
190
190
}
191
191
}
192
- } else if std:: matches!( topic_str, PINGPONG_RESPONSE_TOPIC | WORKFLOW_RESPONSE_TOPIC ) {
192
+ } else if std:: matches!( topic_str, PingpongHandler :: RESPONSE_TOPIC | WorkflowHandler :: RESPONSE_TOPIC ) {
193
193
// since we are responding to these topics, we might receive messages from other compute nodes
194
194
// we can gracefully ignore them and propagate it to to others
195
195
log:: debug!( "Ignoring message for topic: {}" , topic_str) ;
@@ -205,10 +205,10 @@ impl DriaComputeNode {
205
205
}
206
206
207
207
// unsubscribe from topics
208
- self . unsubscribe ( PINGPONG_LISTEN_TOPIC ) ?;
209
- self . unsubscribe ( PINGPONG_RESPONSE_TOPIC ) ?;
210
- self . unsubscribe ( WORKFLOW_LISTEN_TOPIC ) ?;
211
- self . unsubscribe ( WORKFLOW_RESPONSE_TOPIC ) ?;
208
+ self . unsubscribe ( PingpongHandler :: LISTEN_TOPIC ) ?;
209
+ self . unsubscribe ( PingpongHandler :: RESPONSE_TOPIC ) ?;
210
+ self . unsubscribe ( WorkflowHandler :: LISTEN_TOPIC ) ?;
211
+ self . unsubscribe ( WorkflowHandler :: RESPONSE_TOPIC ) ?;
212
212
213
213
Ok ( ( ) )
214
214
}
0 commit comments