-
Notifications
You must be signed in to change notification settings - Fork 301
feat(fortuna): Explorer apis #2649
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 7 commits
f581b27
962350b
b25800b
94320d8
ad30b6b
55af6c1
fcc9f9d
94eb034
7b2cdaf
bff8fb8
7a133fc
ee868a9
e3f51d3
31428b6
57b291a
94ec16f
7e89634
78ba9da
6fb0e3d
2127a0f
28ff807
c463043
bc8fb42
e908da0
967f7bb
47abafb
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,3 +1,8 @@ | ||
| use chrono::DateTime; | ||
| use ethers::types::TxHash; | ||
| use serde::Serialize; | ||
| use std::collections::BTreeMap; | ||
| use utoipa::ToSchema; | ||
| use { | ||
| crate::{ | ||
| chain::reader::{BlockNumber, BlockStatus, EntropyReader}, | ||
|
|
@@ -21,9 +26,10 @@ use { | |
| tokio::sync::RwLock, | ||
| url::Url, | ||
| }; | ||
| pub use {chain_ids::*, index::*, live::*, metrics::*, ready::*, revelation::*}; | ||
| pub use {chain_ids::*, explorer::*, index::*, live::*, metrics::*, ready::*, revelation::*}; | ||
|
|
||
| mod chain_ids; | ||
| mod explorer; | ||
| mod index; | ||
| mod live; | ||
| mod metrics; | ||
|
|
@@ -41,10 +47,159 @@ pub struct ApiMetrics { | |
| pub http_requests: Family<RequestLabel, Counter>, | ||
| } | ||
|
|
||
| #[derive(Clone, Debug, Serialize)] | ||
| pub enum JournalLog { | ||
| Observed { tx_hash: TxHash }, | ||
| FailedToReveal { reason: String }, | ||
| Revealed { tx_hash: TxHash }, | ||
| Landed { block_number: BlockNumber }, | ||
| } | ||
|
|
||
| impl JournalLog { | ||
| pub fn get_tx_hash(&self) -> Option<TxHash> { | ||
| match self { | ||
| JournalLog::Observed { tx_hash } => Some(*tx_hash), | ||
| JournalLog::FailedToReveal { .. } => None, | ||
| JournalLog::Revealed { tx_hash } => Some(*tx_hash), | ||
| JournalLog::Landed { .. } => None, | ||
| } | ||
| } | ||
| } | ||
|
|
||
| #[derive(Clone, Debug, Serialize)] | ||
| pub struct TimedJournalLog { | ||
| pub timestamp: DateTime<chrono::Utc>, | ||
| pub log: JournalLog, | ||
| } | ||
|
|
||
| impl TimedJournalLog { | ||
| pub fn with_current_time(log: JournalLog) -> Self { | ||
| TimedJournalLog { | ||
| timestamp: chrono::Utc::now(), | ||
| log, | ||
| } | ||
| } | ||
| } | ||
|
|
||
| #[derive(Clone, Debug, Serialize, ToSchema)] | ||
| pub struct RequestJournal { | ||
| pub chain_id: ChainId, | ||
| pub sequence: u64, | ||
| pub journal: Vec<TimedJournalLog>, | ||
| } | ||
|
|
||
| type RequestKey = (ChainId, u64); | ||
|
|
||
| #[derive(Default)] | ||
| pub struct History { | ||
| pub by_hash: HashMap<TxHash, Vec<RequestKey>>, | ||
| pub by_chain_and_time: BTreeMap<(ChainId, DateTime<chrono::Utc>), RequestKey>, | ||
| pub by_time: BTreeMap<DateTime<chrono::Utc>, RequestKey>, | ||
| pub by_request_key: HashMap<RequestKey, RequestJournal>, | ||
|
||
| } | ||
|
|
||
| impl History { | ||
| const MAX_HISTORY: usize = 1_000_000; | ||
| pub fn new() -> Self { | ||
| Self::default() | ||
| } | ||
|
|
||
| pub fn add(&mut self, (chain_id, sequence): RequestKey, request_journal_log: TimedJournalLog) { | ||
| let mut new_entry = false; | ||
| let entry = self | ||
| .by_request_key | ||
| .entry((chain_id.clone(), sequence)) | ||
| .or_insert_with(|| { | ||
| new_entry = true; | ||
| RequestJournal { | ||
| chain_id: chain_id.clone(), | ||
| sequence, | ||
| journal: vec![], | ||
| } | ||
| }); | ||
| if let Some(tx_hash) = request_journal_log.log.get_tx_hash() { | ||
| self.by_hash | ||
| .entry(tx_hash) | ||
| .or_default() | ||
| .push((chain_id.clone(), sequence)); | ||
| } | ||
| entry.journal.push(request_journal_log); | ||
| if new_entry { | ||
| let current_time = chrono::Utc::now(); | ||
| self.by_chain_and_time.insert( | ||
| (chain_id.clone(), current_time), | ||
| (chain_id.clone(), sequence), | ||
| ); | ||
| self.by_time | ||
| .insert(current_time, (chain_id.clone(), sequence)); | ||
|
|
||
| if self.by_time.len() > Self::MAX_HISTORY { | ||
|
||
| // TODO | ||
| } | ||
| } | ||
| } | ||
|
|
||
| pub fn get_request_logs(&self, request_key: &RequestKey) -> Option<RequestJournal> { | ||
| self.by_request_key.get(request_key).cloned() | ||
| } | ||
|
|
||
| pub fn get_request_logs_by_tx_hash(&self, tx_hash: TxHash) -> Vec<RequestJournal> { | ||
| self.by_hash | ||
| .get(&tx_hash) | ||
| .map(|request_keys| { | ||
| request_keys | ||
| .iter() | ||
| .map(|request_key| self.by_request_key.get(request_key).unwrap().clone()) | ||
| .collect() | ||
| }) | ||
| .unwrap_or_default() | ||
| } | ||
|
|
||
| pub fn get_latest_requests( | ||
| &self, | ||
| chain_id: Option<&ChainId>, | ||
| limit: u64, | ||
| min_timestamp: Option<DateTime<chrono::Utc>>, | ||
| max_timestamp: Option<DateTime<chrono::Utc>>, | ||
| ) -> Vec<RequestJournal> { | ||
| match chain_id { | ||
| Some(chain_id) => { | ||
| let range = self.by_chain_and_time.range( | ||
| ( | ||
| chain_id.clone(), | ||
| min_timestamp.unwrap_or(DateTime::<chrono::Utc>::MIN_UTC), | ||
| ) | ||
| ..( | ||
| chain_id.clone(), | ||
| max_timestamp.unwrap_or(DateTime::<chrono::Utc>::MAX_UTC), | ||
| ), | ||
| ); | ||
| range | ||
| .rev() | ||
| .take(limit as usize) | ||
| .map(|(_, request_key)| self.by_request_key.get(request_key).unwrap().clone()) | ||
| .collect() | ||
| } | ||
| None => self | ||
| .by_time | ||
| .range( | ||
| min_timestamp.unwrap_or(DateTime::<chrono::Utc>::MIN_UTC) | ||
| ..max_timestamp.unwrap_or(DateTime::<chrono::Utc>::MAX_UTC), | ||
| ) | ||
| .rev() | ||
| .take(limit as usize) | ||
| .map(|(_time, request_key)| self.by_request_key.get(request_key).unwrap().clone()) | ||
| .collect::<Vec<_>>(), | ||
| } | ||
| } | ||
| } | ||
|
|
||
| #[derive(Clone)] | ||
| pub struct ApiState { | ||
| pub chains: Arc<RwLock<HashMap<ChainId, ApiBlockChainState>>>, | ||
|
|
||
| pub history: Arc<RwLock<History>>, | ||
|
|
||
| pub metrics_registry: Arc<RwLock<Registry>>, | ||
|
|
||
| /// Prometheus metrics | ||
|
|
@@ -55,6 +210,7 @@ impl ApiState { | |
| pub async fn new( | ||
| chains: Arc<RwLock<HashMap<ChainId, ApiBlockChainState>>>, | ||
| metrics_registry: Arc<RwLock<Registry>>, | ||
| history: Arc<RwLock<History>>, | ||
| ) -> ApiState { | ||
| let metrics = ApiMetrics { | ||
| http_requests: Family::default(), | ||
|
|
@@ -70,6 +226,7 @@ impl ApiState { | |
| ApiState { | ||
| chains, | ||
| metrics: Arc::new(metrics), | ||
| history, | ||
| metrics_registry, | ||
| } | ||
| } | ||
|
|
@@ -114,6 +271,7 @@ pub enum RestError { | |
| /// The server cannot currently communicate with the blockchain, so is not able to verify | ||
| /// which random values have been requested. | ||
| TemporarilyUnavailable, | ||
| BadFilterParameters(String), | ||
| /// The server is not able to process the request because the blockchain initialization | ||
| /// has not been completed yet. | ||
| Uninitialized, | ||
|
|
@@ -156,6 +314,11 @@ impl IntoResponse for RestError { | |
| "An unknown error occurred processing the request", | ||
| ) | ||
| .into_response(), | ||
| RestError::BadFilterParameters(message) => ( | ||
| StatusCode::BAD_REQUEST, | ||
| format!("Invalid filter parameters: {}", message), | ||
| ) | ||
| .into_response(), | ||
| } | ||
| } | ||
| } | ||
|
|
@@ -167,6 +330,7 @@ pub fn routes(state: ApiState) -> Router<(), Body> { | |
| .route("/metrics", get(metrics)) | ||
| .route("/ready", get(ready)) | ||
| .route("/v1/chains", get(chain_ids)) | ||
| .route("/v1/explorer", get(explorer)) | ||
|
||
| .route( | ||
| "/v1/chains/:chain_id/revelations/:sequence", | ||
| get(revelation), | ||
|
|
@@ -252,7 +416,12 @@ mod test { | |
| ApiBlockChainState::Initialized(avax_state), | ||
| ); | ||
|
|
||
| let api_state = ApiState::new(Arc::new(RwLock::new(chains)), metrics_registry).await; | ||
| let api_state = ApiState::new( | ||
| Arc::new(RwLock::new(chains)), | ||
| metrics_registry, | ||
| Default::default(), | ||
| ) | ||
| .await; | ||
|
|
||
| let app = api::routes(api_state); | ||
| (TestServer::new(app).unwrap(), eth_read, avax_read) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,77 @@ | ||
| use crate::api::{ChainId, RequestJournal, RestError}; | ||
| use axum::extract::{Query, State}; | ||
| use axum::Json; | ||
| use ethers::types::TxHash; | ||
| use utoipa::{IntoParams, ToSchema}; | ||
|
|
||
| #[derive(Debug, serde::Serialize, serde::Deserialize, IntoParams)] | ||
| #[into_params(parameter_in=Query)] | ||
| pub struct ExplorerQueryParams { | ||
| pub mode: ExplorerQueryParamsMode, | ||
|
|
||
| pub min_timestamp: Option<u64>, | ||
| pub max_timestamp: Option<u64>, | ||
| pub sequence_id: Option<u64>, | ||
| #[param(value_type = Option<String>)] | ||
| pub tx_hash: Option<TxHash>, | ||
| #[param(value_type = Option<String>)] | ||
| pub chain_id: Option<ChainId>, | ||
| } | ||
| #[derive(Debug, serde::Serialize, serde::Deserialize, ToSchema)] | ||
| #[serde(rename_all = "kebab-case")] | ||
| pub enum ExplorerQueryParamsMode { | ||
| TxHash, | ||
| ChainAndSequence, | ||
| ChainAndTimestamp, | ||
| Timestamp, | ||
| } | ||
|
||
|
|
||
| #[utoipa::path( | ||
| get, | ||
| path = "/v1/explorer", | ||
| responses( | ||
| (status = 200, description = "Random value successfully retrieved", body = Vec<RequestJournal>) | ||
| ), | ||
| params(ExplorerQueryParams) | ||
| )] | ||
| pub async fn explorer( | ||
| State(state): State<crate::api::ApiState>, | ||
| Query(query_params): Query<ExplorerQueryParams>, | ||
| ) -> anyhow::Result<Json<Vec<RequestJournal>>, RestError> { | ||
| let result = match query_params.mode { | ||
| ExplorerQueryParamsMode::TxHash => { | ||
| let tx_hash = query_params.tx_hash.ok_or(RestError::BadFilterParameters( | ||
| "tx_hash is required when mode=tx-hash".to_string(), | ||
| ))?; | ||
| state | ||
| .history | ||
| .read() | ||
| .await | ||
| .get_request_logs_by_tx_hash(tx_hash) | ||
| } | ||
| ExplorerQueryParamsMode::ChainAndSequence => { | ||
| let chain_id = query_params.chain_id.ok_or(RestError::BadFilterParameters( | ||
| "chain_id is required when mode=chain-and-sequence".to_string(), | ||
| ))?; | ||
| let sequence_id = query_params | ||
| .sequence_id | ||
| .ok_or(RestError::BadFilterParameters( | ||
| "sequence_id is required when mode=chain-and-sequence".to_string(), | ||
| ))?; | ||
| state | ||
| .history | ||
| .read() | ||
| .await | ||
| .get_request_logs(&(chain_id, sequence_id)) | ||
| .into_iter() | ||
| .collect() | ||
| } | ||
| ExplorerQueryParamsMode::ChainAndTimestamp => { | ||
| vec![] | ||
| } | ||
| ExplorerQueryParamsMode::Timestamp => { | ||
| vec![] | ||
| } | ||
| }; | ||
| Ok(Json(result)) | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't understand why we need so many different types of log entries. I think at the end of the day (after we upgrade the contracts), requests can be in literally 2 states: pending (meaning we've seen it but haven't sent the callback) or complete. If it's complete, the result may or may not be an error.
The representation you are using for those two states (a vector of these log entries) has a much larger state space. I don't see why that's necessary
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
also I presume you're going to add a bunch of additional fields to these log entries? E.g., all the stuff emitted in the request event, all the stuff emitted in the callback
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes, more fields will be added. I think more verbosity is helpful for internal debugging + calculating more metrics such as our landing latency, observation latency, etc. Many things can go wrong between each of these steps and knowing the latest state is very powerful