Skip to content

Commit b72fe1b

Browse files
committed
refactor: use router builder
Signed-off-by: Gustavo Inacio <[email protected]>
1 parent a9d7a71 commit b72fe1b

File tree

13 files changed

+685
-620
lines changed

13 files changed

+685
-620
lines changed

Cargo.lock

Lines changed: 24 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,11 @@ alloy = { version = "=0.5.4", features = [
2929
], default-features = false }
3030
clap = "4.4.3"
3131
lazy_static = "1.4.0"
32-
axum = { version = "0.7.7", default-features = false }
32+
axum = { version = "0.7.9", default-features = false, features = [
33+
"tokio",
34+
"http1",
35+
"http2",
36+
] }
3337
tokio = "1.40"
3438
prometheus = "0.13.3"
3539
anyhow = { version = "1.0.72" }

crates/service/Cargo.toml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,8 @@ graphql = { git = "https://github.com/edgeandnode/toolshed", tag = "graphql-v0.3
3939
tap_core.workspace = true
4040
uuid.workspace = true
4141
alloy.workspace = true
42-
tower_governor = "0.4.0"
42+
tower_governor = "0.4.3"
43+
governor = "0.6.0"
4344
tower-http = { version = "0.6.2", features = [
4445
"auth",
4546
"cors",
@@ -55,6 +56,7 @@ cost-model = { git = "https://github.com/graphprotocol/agora", rev = "3ed34ca" }
5556
bip39.workspace = true
5657
tower = "0.5.1"
5758
pin-project = "1.1.7"
59+
typed-builder = "0.20.0"
5860

5961
[dev-dependencies]
6062
hex-literal = "0.4.1"

crates/service/src/database/mod.rs

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,15 @@ use std::time::Duration;
99
use sqlx::{postgres::PgPoolOptions, PgPool};
1010
use tracing::debug;
1111

12+
const DATABASE_TIMEOUT: Duration = Duration::from_secs(30);
13+
const DATABASE_MAX_CONNECTIONS: u32 = 50;
14+
1215
pub async fn connect(url: &str) -> PgPool {
1316
debug!("Connecting to database");
1417

1518
PgPoolOptions::new()
16-
.max_connections(50)
17-
.acquire_timeout(Duration::from_secs(3))
19+
.max_connections(DATABASE_MAX_CONNECTIONS)
20+
.acquire_timeout(DATABASE_TIMEOUT)
1821
.connect(url)
1922
.await
2023
.expect("Should be able to connect to the database")

crates/service/src/metrics.rs

Lines changed: 43 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,16 @@
11
// Copyright 2023-, Edge & Node, GraphOps, and Semiotic Labs.
22
// SPDX-License-Identifier: Apache-2.0
33

4+
use std::net::SocketAddr;
5+
6+
use axum::{routing::get, serve, Router};
47
use lazy_static::lazy_static;
5-
use prometheus::{register_counter_vec, register_histogram_vec, CounterVec, HistogramVec};
8+
use prometheus::{
9+
register_counter_vec, register_histogram_vec, CounterVec, HistogramVec, TextEncoder,
10+
};
11+
use reqwest::StatusCode;
12+
use tokio::net::TcpListener;
13+
use tracing::{error, info};
614

715
lazy_static! {
816
/// Metric registered in global registry for
@@ -37,3 +45,37 @@ lazy_static! {
3745
.unwrap();
3846

3947
}
48+
49+
pub fn serve_metrics(host_and_port: SocketAddr) {
50+
info!(address = %host_and_port, "Serving prometheus metrics");
51+
52+
tokio::spawn(async move {
53+
let router = Router::new().route(
54+
"/metrics",
55+
get(|| async {
56+
let metric_families = prometheus::gather();
57+
let encoder = TextEncoder::new();
58+
59+
match encoder.encode_to_string(&metric_families) {
60+
Ok(s) => (StatusCode::OK, s),
61+
Err(e) => {
62+
error!("Error encoding metrics: {}", e);
63+
(
64+
StatusCode::INTERNAL_SERVER_ERROR,
65+
format!("Error encoding metrics: {}", e),
66+
)
67+
}
68+
}
69+
}),
70+
);
71+
72+
serve(
73+
TcpListener::bind(host_and_port)
74+
.await
75+
.expect("Failed to bind to metrics port"),
76+
router.into_make_service(),
77+
)
78+
.await
79+
.expect("Failed to serve metrics")
80+
});
81+
}

crates/service/src/routes/cost.rs

Lines changed: 7 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -5,19 +5,16 @@ use std::str::FromStr;
55

66
use crate::database::cost_model::{self, CostModel};
77
use async_graphql::{Context, EmptyMutation, EmptySubscription, Object, Schema, SimpleObject};
8-
use async_graphql_axum::{GraphQLRequest, GraphQLResponse};
9-
use axum::extract::State;
108
use lazy_static::lazy_static;
119
use prometheus::{
1210
register_counter, register_counter_vec, register_histogram, register_histogram_vec, Counter,
1311
CounterVec, Histogram, HistogramVec,
1412
};
1513
use serde::{Deserialize, Serialize};
1614
use serde_json::Value;
15+
use sqlx::PgPool;
1716
use thegraph_core::DeploymentId;
1817

19-
use crate::service::SubgraphServiceState;
20-
2118
lazy_static! {
2219
pub static ref COST_MODEL_METRIC: HistogramVec = register_histogram_vec!(
2320
"indexer_cost_model_seconds",
@@ -126,7 +123,7 @@ impl Query {
126123
ctx: &Context<'_>,
127124
deployment_ids: Vec<DeploymentId>,
128125
) -> Result<Vec<GraphQlCostModel>, anyhow::Error> {
129-
let pool = &ctx.data_unchecked::<SubgraphServiceState>().database;
126+
let pool = &ctx.data_unchecked::<PgPool>();
130127
let cost_models = cost_model::cost_models(pool, &deployment_ids).await?;
131128
Ok(cost_models.into_iter().map(|m| m.into()).collect())
132129
}
@@ -136,7 +133,7 @@ impl Query {
136133
ctx: &Context<'_>,
137134
deployment_id: DeploymentId,
138135
) -> Result<Option<GraphQlCostModel>, anyhow::Error> {
139-
let pool = &ctx.data_unchecked::<SubgraphServiceState>().database;
136+
let pool = &ctx.data_unchecked::<PgPool>();
140137
cost_model::cost_model(pool, &deployment_id)
141138
.await
142139
.map(|model_opt| model_opt.map(GraphQlCostModel::from))
@@ -145,17 +142,8 @@ impl Query {
145142

146143
pub type CostSchema = Schema<Query, EmptyMutation, EmptySubscription>;
147144

148-
pub async fn build_schema() -> CostSchema {
149-
Schema::build(Query, EmptyMutation, EmptySubscription).finish()
150-
}
151-
152-
pub async fn cost(
153-
State(state): State<SubgraphServiceState>,
154-
req: GraphQLRequest,
155-
) -> GraphQLResponse {
156-
state
157-
.cost_schema
158-
.execute(req.into_inner().data(state.clone()))
159-
.await
160-
.into()
145+
pub async fn build_schema(data: PgPool) -> CostSchema {
146+
Schema::build(Query, EmptyMutation, EmptySubscription)
147+
.data(data)
148+
.finish()
161149
}

crates/service/src/routes/dips.rs

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,18 +5,56 @@ use std::time::Duration;
55
use std::{str::FromStr, sync::Arc};
66

77
use anyhow::bail;
8-
use async_graphql::{Context, FieldResult, Object, SimpleObject};
8+
use async_graphql::{Context, EmptySubscription, FieldResult, Object, Schema, SimpleObject};
99
use base64::{engine::general_purpose::STANDARD, Engine};
10+
use indexer_config::{BlockchainConfig, DipsConfig};
1011
use indexer_dips::alloy::dyn_abi::Eip712Domain;
1112
use indexer_dips::SignedCancellationRequest;
1213
use indexer_dips::{
1314
alloy::core::primitives::Address, alloy_rlp::Decodable, SignedIndexingAgreementVoucher,
1415
SubgraphIndexingVoucherMetadata,
1516
};
17+
use thegraph_core::attestation::eip712_domain;
1618
use uuid::Uuid;
1719

1820
use crate::database::dips::AgreementStore;
1921

22+
pub type DipsSchema = Schema<AgreementQuery, AgreementMutation, EmptySubscription>;
23+
pub type DipsStore = Arc<dyn AgreementStore>;
24+
25+
pub fn build_schema(
26+
indexer_address: Address,
27+
DipsConfig {
28+
allowed_payers,
29+
cancellation_time_tolerance,
30+
}: &DipsConfig,
31+
BlockchainConfig {
32+
chain_id,
33+
receipts_verifier_address,
34+
}: &BlockchainConfig,
35+
agreement_store: DipsStore,
36+
prices: Vec<Price>,
37+
) -> DipsSchema {
38+
Schema::build(
39+
AgreementQuery {},
40+
AgreementMutation {
41+
expected_payee: indexer_address,
42+
allowed_payers: allowed_payers.clone(),
43+
domain: eip712_domain(
44+
// 42161, // arbitrum
45+
*chain_id as u64,
46+
*receipts_verifier_address,
47+
),
48+
cancel_voucher_time_tolerance: cancellation_time_tolerance
49+
.unwrap_or(Duration::from_secs(5)),
50+
},
51+
EmptySubscription,
52+
)
53+
.data(agreement_store)
54+
.data(prices)
55+
.finish()
56+
}
57+
2058
pub enum NetworkProtocol {
2159
ArbitrumMainnet,
2260
}

crates/service/src/routes/request_handler.rs

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
11
// Copyright 2023-, Edge & Node, GraphOps, and Semiotic Labs.
22
// SPDX-License-Identifier: Apache-2.0
33

4-
use crate::{
5-
error::SubgraphServiceError, middleware::AttestationInput, service::SubgraphServiceState,
6-
};
4+
use crate::{error::SubgraphServiceError, middleware::AttestationInput, service::GraphNodeState};
75
use axum::{
86
extract::{Path, State},
97
http::{HeaderValue, Response},
@@ -17,7 +15,7 @@ const GRAPH_ATTESTABLE: &str = "graph-attestable";
1715

1816
pub async fn request_handler(
1917
Path(deployment): Path<DeploymentId>,
20-
State(state): State<SubgraphServiceState>,
18+
State(state): State<GraphNodeState>,
2119
req: String,
2220
) -> Result<impl IntoResponse, SubgraphServiceError> {
2321
trace!("Handling request for deployment `{deployment}`");

crates/service/src/routes/status.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ use thegraph_graphql_http::{
1212
http_client::{ReqwestExt, ResponseError},
1313
};
1414

15-
use crate::{error::SubgraphServiceError, service::SubgraphServiceState};
15+
use crate::{error::SubgraphServiceError, service::GraphNodeState};
1616

1717
lazy_static::lazy_static! {
1818
static ref SUPPORTED_ROOT_FIELDS: HashSet<&'static str> =
@@ -57,7 +57,7 @@ impl IntoRequestParameters for WrappedGraphQLRequest {
5757

5858
// Custom middleware function to process the request before reaching the main handler
5959
pub async fn status(
60-
State(state): State<SubgraphServiceState>,
60+
State(state): State<GraphNodeState>,
6161
request: GraphQLRequest,
6262
) -> Result<impl IntoResponse, SubgraphServiceError> {
6363
let request = request.into_inner();

0 commit comments

Comments
 (0)