Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 17 additions & 8 deletions apps/fortuna/src/api/explorer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,20 +35,26 @@ pub struct ExplorerQueryParams {
pub offset: Option<u64>,
}

#[derive(Debug, serde::Serialize, utoipa::ToSchema)]
pub struct ExplorerResponse {
pub requests: Vec<RequestStatus>,
pub total_results: u64,
}

/// Returns the logs of all requests captured by the keeper.
///
/// This endpoint allows you to filter the logs by a specific network ID, a query string (which can be a transaction hash, sender address, or sequence number), and a time range.
/// This is useful for debugging and monitoring the requests made to the Entropy contracts on various chains.
#[utoipa::path(
get,
path = "/v1/logs",
responses((status = 200, description = "A list of Entropy request logs", body = Vec<RequestStatus>)),
responses((status = 200, description = "A list of Entropy request logs", body = ExplorerResponse)),
params(ExplorerQueryParams)
)]
pub async fn explorer(
State(state): State<crate::api::ApiState>,
Query(query_params): Query<ExplorerQueryParams>,
) -> anyhow::Result<Json<Vec<RequestStatus>>, RestError> {
) -> anyhow::Result<Json<ExplorerResponse>, RestError> {
if let Some(network_id) = &query_params.network_id {
if !state
.chains
Expand Down Expand Up @@ -89,10 +95,13 @@ pub async fn explorer(
if let Some(max_timestamp) = query_params.max_timestamp {
query = query.max_timestamp(max_timestamp);
}
Ok(Json(
query
.execute()
.await
.map_err(|_| RestError::TemporarilyUnavailable)?,
))

let (requests, total_results) = tokio::join!(query.execute(), query.count_results());
let requests = requests.map_err(|_| RestError::TemporarilyUnavailable)?;
let total_results = total_results.map_err(|_| RestError::TemporarilyUnavailable)?;

Ok(Json(ExplorerResponse {
requests,
total_results,
}))
}
1 change: 1 addition & 0 deletions apps/fortuna/src/command/run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ pub async fn run_api(
crate::api::Blob,
crate::api::BinaryEncoding,
crate::api::StateTag,
crate::api::ExplorerResponse,
)
),
tags(
Expand Down
81 changes: 64 additions & 17 deletions apps/fortuna/src/history.rs
Original file line number Diff line number Diff line change
Expand Up @@ -427,8 +427,34 @@ impl<'a> RequestQueryBuilder<'a> {
}

pub async fn execute(&self) -> Result<Vec<RequestStatus>> {
let mut query_builder =
QueryBuilder::new("SELECT * FROM request WHERE created_at BETWEEN ");
let mut query_builder = self.build_query("*");
query_builder.push(" LIMIT ");
query_builder.push_bind(self.limit);
query_builder.push(" OFFSET ");
query_builder.push_bind(self.offset);

let result: sqlx::Result<Vec<RequestRow>> =
query_builder.build_query_as().fetch_all(self.pool).await;

if let Err(e) = &result {
tracing::error!("Failed to fetch request: {}", e);
}

Ok(result?.into_iter().filter_map(|row| row.into()).collect())
}

pub async fn count_results(&self) -> Result<u64> {
self.build_query("COUNT(*) AS count")
.build_query_scalar::<u64>()
.fetch_one(self.pool)
.await
.map_err(|err| err.into())
}

fn build_query(&self, columns: &str) -> QueryBuilder<Sqlite> {
let mut query_builder = QueryBuilder::new(format!(
"SELECT {columns} FROM request WHERE created_at BETWEEN "
));
query_builder.push_bind(self.min_timestamp);
query_builder.push(" AND ");
query_builder.push_bind(self.max_timestamp);
Expand Down Expand Up @@ -464,21 +490,8 @@ impl<'a> RequestQueryBuilder<'a> {
query_builder.push_bind(state);
}

query_builder.push(" ORDER BY created_at DESC LIMIT ");
query_builder.push_bind(self.limit);
query_builder.push(" OFFSET ");
query_builder.push_bind(self.offset);

let rows = query_builder
.build_query_as::<RequestRow>()
.fetch_all(self.pool)
.await;

if let Err(e) = &rows {
tracing::error!("Failed to fetch request by time: {}", e);
}

Ok(rows?.into_iter().filter_map(|row| row.into()).collect())
query_builder.push(" ORDER BY created_at DESC");
query_builder
}
}

Expand Down Expand Up @@ -928,4 +941,38 @@ mod test {
.unwrap();
assert_eq!(logs, vec![status]);
}

#[tokio::test]
async fn test_count_results() {
let history = History::new_in_memory().await.unwrap();
History::update_request_status(&history.pool, get_random_request_status()).await;
History::update_request_status(&history.pool, get_random_request_status()).await;
let mut failed_status = get_random_request_status();
History::update_request_status(&history.pool, failed_status.clone()).await;
failed_status.state = RequestEntryState::Failed {
reason: "Failed".to_string(),
provider_random_number: None,
};
History::update_request_status(&history.pool, failed_status.clone()).await;

let results = history.query().count_results().await.unwrap();
assert_eq!(results, 3);

let results = history
.query()
.limit(1)
.unwrap()
.count_results()
.await
.unwrap();
assert_eq!(results, 3);

let results = history
.query()
.state(StateTag::Pending)
.count_results()
.await
.unwrap();
assert_eq!(results, 2);
}
}
Loading