Skip to content

Commit 1f2732c

Browse files
committed
Revert "Do not repeat a rollup after restart in some corner cases (#5675)"
This reverts commits: - 22bca4e - 22f805d - 207e31f
1 parent b7851fc commit 1f2732c

File tree

7 files changed

+7
-102
lines changed

7 files changed

+7
-102
lines changed

graph/src/blockchain/types.rs

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -435,9 +435,3 @@ impl ToSql<Timestamptz, Pg> for BlockTime {
435435
<Timestamp as ToSql<Timestamptz, Pg>>::to_sql(&self.0, out)
436436
}
437437
}
438-
439-
impl FromSql<Timestamptz, Pg> for BlockTime {
440-
fn from_sql(bytes: diesel::pg::PgValue) -> diesel::deserialize::Result<Self> {
441-
<Timestamp as FromSql<Timestamptz, Pg>>::from_sql(bytes).map(|ts| Self(ts))
442-
}
443-
}

graph/src/data/store/scalar/timestamp.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
use chrono::{DateTime, Utc};
22
use diesel::serialize::ToSql;
3-
use diesel::sql_types::Timestamptz;
43
use serde::{self, Deserialize, Serialize};
54
use stable_hash::StableHash;
65

@@ -94,12 +93,12 @@ impl Display for Timestamp {
9493
}
9594
}
9695

97-
impl ToSql<Timestamptz, diesel::pg::Pg> for Timestamp {
96+
impl ToSql<diesel::sql_types::Timestamptz, diesel::pg::Pg> for Timestamp {
9897
fn to_sql<'b>(
9998
&'b self,
10099
out: &mut diesel::serialize::Output<'b, '_, diesel::pg::Pg>,
101100
) -> diesel::serialize::Result {
102-
<_ as ToSql<Timestamptz, _>>::to_sql(&self.0, &mut out.reborrow())
101+
<_ as ToSql<diesel::sql_types::Timestamptz, _>>::to_sql(&self.0, &mut out.reborrow())
103102
}
104103
}
105104

graph/src/env/store.rs

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -120,14 +120,6 @@ pub struct EnvVarsStore {
120120
pub use_brin_for_all_query_types: bool,
121121
/// Temporary env var to disable certain lookups in the chain store
122122
pub disable_block_cache_for_lookup: bool,
123-
/// Temporary env var to fall back to the old broken way of determining
124-
/// the time of the last rollup from the POI table instead of the new
125-
/// way that fixes
126-
/// https://github.com/graphprotocol/graph-node/issues/5530 Remove this
127-
/// and all code that is dead as a consequence once this has been vetted
128-
/// sufficiently, probably after 2024-12-01
129-
/// Defaults to `false`, i.e. using the new fixed behavior
130-
pub last_rollup_from_poi: bool,
131123
}
132124

133125
// This does not print any values avoid accidentally leaking any sensitive env vars
@@ -176,7 +168,6 @@ impl From<InnerStore> for EnvVarsStore {
176168
create_gin_indexes: x.create_gin_indexes,
177169
use_brin_for_all_query_types: x.use_brin_for_all_query_types,
178170
disable_block_cache_for_lookup: x.disable_block_cache_for_lookup,
179-
last_rollup_from_poi: x.last_rollup_from_poi,
180171
}
181172
}
182173
}
@@ -238,8 +229,6 @@ pub struct InnerStore {
238229
use_brin_for_all_query_types: bool,
239230
#[envconfig(from = "GRAPH_STORE_DISABLE_BLOCK_CACHE_FOR_LOOKUP", default = "false")]
240231
disable_block_cache_for_lookup: bool,
241-
#[envconfig(from = "GRAPH_STORE_LAST_ROLLUP_FROM_POI", default = "false")]
242-
last_rollup_from_poi: bool,
243232
}
244233

245234
#[derive(Clone, Copy, Debug)]

server/graphman/tests/deployment_query.rs

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,10 @@
11
pub mod util;
22

3-
use graph::components::store::{QueryStoreManager, SubgraphStore};
43
use graph::data::subgraph::DeploymentHash;
5-
use graph::prelude::QueryTarget;
6-
74
use serde_json::json;
85
use test_store::store::create_test_subgraph;
96
use test_store::store::NETWORK_NAME;
10-
use test_store::STORE;
11-
use test_store::SUBGRAPH_STORE;
7+
use test_store::store::NODE_ID;
128

139
use self::util::client::send_graphql_request;
1410
use self::util::run_test;
@@ -58,15 +54,6 @@ fn graphql_returns_deployment_info() {
5854
.await;
5955

6056
let namespace = format!("sgd{}", locator.id);
61-
let node = SUBGRAPH_STORE.assigned_node(&locator).unwrap().unwrap();
62-
let qs = STORE
63-
.query_store(
64-
QueryTarget::Deployment(locator.hash.clone(), Default::default()),
65-
false,
66-
)
67-
.await
68-
.expect("could get a query store");
69-
let shard = qs.shard();
7057

7158
let expected_resp = json!({
7259
"data": {
@@ -76,8 +63,8 @@ fn graphql_returns_deployment_info() {
7663
"hash": "subgraph_1",
7764
"namespace": namespace,
7865
"name": "subgraph_1",
79-
"nodeId": node.to_string(),
80-
"shard": shard,
66+
"nodeId": NODE_ID.to_string(),
67+
"shard": "primary",
8168
"chain": NETWORK_NAME,
8269
"versionStatus": "current",
8370
"isActive": true,

store/postgres/src/deployment_store.rs

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -910,11 +910,7 @@ impl DeploymentStore {
910910

911911
let mut conn = self.get_conn()?;
912912
let layout = store.layout(&mut conn, site.cheap_clone())?;
913-
if ENV_VARS.store.last_rollup_from_poi {
914-
layout.block_time(&mut conn, block)
915-
} else {
916-
layout.last_rollup(&mut conn)
917-
}
913+
layout.block_time(&mut conn, block)
918914
}
919915

920916
pub(crate) async fn supports_proof_of_indexing<'a>(

store/postgres/src/relational.rs

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1012,20 +1012,6 @@ impl Layout {
10121012
Ok(block_time)
10131013
}
10141014

1015-
/// Find the time of the last rollup for the subgraph. We do this by
1016-
/// looking for the maximum timestamp in any aggregation table and
1017-
/// adding a little bit more than the corresponding interval to it. This
1018-
/// method crucially depends on the fact that we always write the rollup
1019-
/// for all aggregations, meaning that if some aggregations do not have
1020-
/// an entry with the maximum timestamp that there was just no data for
1021-
/// that interval, but we did try to aggregate at that time.
1022-
pub(crate) fn last_rollup(
1023-
&self,
1024-
conn: &mut PgConnection,
1025-
) -> Result<Option<BlockTime>, StoreError> {
1026-
Rollup::last_rollup(&self.rollups, conn)
1027-
}
1028-
10291015
/// Construct `Rolllup` for each of the aggregation mappings
10301016
/// `schema.agg_mappings()` and return them in the same order as the
10311017
/// aggregation mappings

store/postgres/src/relational/rollup.rs

Lines changed: 1 addition & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ use std::sync::Arc;
6060

6161
use diesel::{sql_query, PgConnection, RunQueryDsl as _};
6262

63-
use diesel::sql_types::{Integer, Nullable, Timestamptz};
63+
use diesel::sql_types::{Integer, Timestamptz};
6464
use graph::blockchain::BlockTime;
6565
use graph::components::store::{BlockNumber, StoreError};
6666
use graph::constraint_violation;
@@ -70,7 +70,6 @@ use graph::schema::{
7070
};
7171
use graph::sqlparser::ast as p;
7272
use graph::sqlparser::parser::ParserError;
73-
use itertools::Itertools;
7473

7574
use crate::relational::Table;
7675

@@ -230,10 +229,6 @@ pub(crate) struct Rollup {
230229
#[allow(dead_code)]
231230
agg_table: Arc<Table>,
232231
insert_sql: String,
233-
/// A query that determines the last time a rollup was done. The query
234-
/// finds the latest timestamp in the aggregation table and adds the
235-
/// length of the aggregation interval to deduce the last rollup time
236-
last_rollup_sql: String,
237232
}
238233

239234
impl Rollup {
@@ -261,12 +256,10 @@ impl Rollup {
261256
);
262257
let mut insert_sql = String::new();
263258
sql.insert(&mut insert_sql)?;
264-
let last_rollup_sql = sql.last_rollup();
265259
Ok(Self {
266260
interval,
267261
agg_table,
268262
insert_sql,
269-
last_rollup_sql,
270263
})
271264
}
272265

@@ -282,32 +275,6 @@ impl Rollup {
282275
.bind::<Integer, _>(block);
283276
query.execute(conn)
284277
}
285-
286-
pub(crate) fn last_rollup(
287-
rollups: &[Rollup],
288-
conn: &mut PgConnection,
289-
) -> Result<Option<BlockTime>, StoreError> {
290-
#[derive(QueryableByName)]
291-
#[diesel(check_for_backend(diesel::pg::Pg))]
292-
struct BlockTimeRes {
293-
#[diesel(sql_type = Nullable<Timestamptz>)]
294-
last_rollup: Option<BlockTime>,
295-
}
296-
297-
if rollups.is_empty() {
298-
return Ok(None);
299-
}
300-
301-
let union_all = rollups
302-
.iter()
303-
.map(|rollup| &rollup.last_rollup_sql)
304-
.join(" union all ");
305-
let query = format!("select max(last_rollup) as last_rollup from ({union_all}) as a");
306-
let last_rollup = sql_query(&query)
307-
.get_result::<BlockTimeRes>(conn)
308-
.map(|res| res.last_rollup)?;
309-
Ok(last_rollup)
310-
}
311278
}
312279

313280
struct RollupSql<'a> {
@@ -512,19 +479,6 @@ impl<'a> RollupSql<'a> {
512479
self.insert_bucket(w)
513480
}
514481
}
515-
516-
/// Generate a query that selects the timestamp of the last rollup
517-
fn last_rollup(&self) -> String {
518-
// The timestamp column contains the timestamp of the start of the
519-
// last bucket. The last rollup was therefore at least
520-
// `self.interval` after that. We add 1 second to make sure we are
521-
// well within the next bucket
522-
let secs = self.interval.as_duration().as_secs() + 1;
523-
format!(
524-
"select max(timestamp) + '{} s'::interval as last_rollup from {}",
525-
secs, self.agg_table.qualified_name
526-
)
527-
}
528482
}
529483

530484
/// Write the elements in `list` separated by commas into `w`. The list

0 commit comments

Comments
 (0)