Skip to content

Commit a4ca0df

Browse files
authored
feat: Blob filestores (#18971)
Added `FileStoreBlobClient` that enables reading and writing blobs to cloud storage (S3, GCS) or local filesystem. This provides an alternative blob source alongside the existing blob sink server, L1 consensus layer, and archive (e.g., Blobscan). ## Blob Download Logic The download strategy varies based on sync state: - **Historical sync:** Blob sink → FileStore → L1 consensus → Archive - **Near-tip sync:** Blob sink → FileStore → L1 consensus → FileStore (with retries) → Archive Near-tip sync includes retries with exponential backoff, assuming filestores should already contain the data when blobs are queried. ## Blob Upload Logic Blobs are uploaded only if the node has write access to a filestore. Uploads occur in three scenarios: 1. `SequencerPublisher` publishes blobs when posting a checkpoint to L1 2. The archiver fetches blobs from any source 3. A validator receives a valid block proposal ## Configuration The following environment variables are introduced: | Variable | Description | |----------|-------------| | `BLOB_FILE_STORE_URLS` | Comma-separated URLs for reading blobs (`s3://`, `gs://`, `file://`, `https://`) | | `BLOB_FILE_STORE_UPLOAD_URL` | URL for uploading blobs (`s3://`, `gs://`, `file://`) | `BLOB_FILE_STORE_URLS` can also be defined in the network configuration, enabling centralized management of these URLs across nodes.
2 parents c2ed84e + aa4a3ae commit a4ca0df

File tree

25 files changed

+1702
-50
lines changed

25 files changed

+1702
-50
lines changed

yarn-project/archiver/src/archiver/archiver.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -970,6 +970,7 @@ export class Archiver
970970
this.l1Addresses,
971971
this.instrumentation,
972972
this.log,
973+
!this.initialSyncComplete, // isHistoricalSync
973974
);
974975

975976
if (retrievedCheckpoints.length === 0) {

yarn-project/archiver/src/archiver/l1/data_retrieval.ts

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -137,13 +137,17 @@ export async function retrievedToPublishedCheckpoint({
137137

138138
/**
139139
* Fetches new checkpoints.
140+
* @param rollup - The rollup contract instance.
140141
* @param publicClient - The viem public client to use for transaction retrieval.
141142
* @param debugClient - The viem debug client to use for trace/debug RPC methods (optional).
142-
* @param rollupAddress - The address of the rollup contract.
143+
* @param blobSinkClient - The blob sink client for fetching blob data.
143144
* @param searchStartBlock - The block number to use for starting the search.
144145
* @param searchEndBlock - The highest block number that we should search up to.
145-
* @param expectedNextL2BlockNum - The next L2 block number that we expect to find.
146-
* @returns An array of block; as well as the next eth block to search from.
146+
* @param contractAddresses - The contract addresses (governanceProposerAddress, slashFactoryAddress, slashingProposerAddress).
147+
* @param instrumentation - The archiver instrumentation instance.
148+
* @param logger - The logger instance.
149+
* @param isHistoricalSync - Whether this is a historical sync.
150+
* @returns An array of retrieved checkpoints.
147151
*/
148152
export async function retrieveCheckpointsFromRollup(
149153
rollup: GetContractReturnType<typeof RollupAbi, ViemPublicClient>,
@@ -159,6 +163,7 @@ export async function retrieveCheckpointsFromRollup(
159163
},
160164
instrumentation: ArchiverInstrumentation,
161165
logger: Logger = createLogger('archiver'),
166+
isHistoricalSync: boolean = false,
162167
): Promise<RetrievedCheckpoint[]> {
163168
const retrievedCheckpoints: RetrievedCheckpoint[] = [];
164169

@@ -210,6 +215,7 @@ export async function retrieveCheckpointsFromRollup(
210215
contractAddresses,
211216
instrumentation,
212217
logger,
218+
isHistoricalSync,
213219
);
214220
retrievedCheckpoints.push(...newCheckpoints);
215221
searchStartBlock = lastLog.blockNumber! + 1n;
@@ -221,11 +227,17 @@ export async function retrieveCheckpointsFromRollup(
221227

222228
/**
223229
* Processes newly received CheckpointProposed logs.
224-
* @param rollup - The rollup contract
230+
* @param rollup - The rollup contract instance.
225231
* @param publicClient - The viem public client to use for transaction retrieval.
226232
* @param debugClient - The viem debug client to use for trace/debug RPC methods (optional).
233+
* @param blobSinkClient - The blob sink client for fetching blob data.
227234
* @param logs - CheckpointProposed logs.
228-
* @returns - An array of checkpoints.
235+
* @param rollupConstants - The rollup constants (chainId, version, targetCommitteeSize).
236+
* @param contractAddresses - The contract addresses (governanceProposerAddress, slashFactoryAddress, slashingProposerAddress).
237+
* @param instrumentation - The archiver instrumentation instance.
238+
* @param logger - The logger instance.
239+
* @param isHistoricalSync - Whether this is a historical sync.
240+
* @returns An array of retrieved checkpoints.
229241
*/
230242
async function processCheckpointProposedLogs(
231243
rollup: GetContractReturnType<typeof RollupAbi, ViemPublicClient>,
@@ -241,6 +253,7 @@ async function processCheckpointProposedLogs(
241253
},
242254
instrumentation: ArchiverInstrumentation,
243255
logger: Logger,
256+
isHistoricalSync: boolean,
244257
): Promise<RetrievedCheckpoint[]> {
245258
const retrievedCheckpoints: RetrievedCheckpoint[] = [];
246259
const calldataRetriever = new CalldataRetriever(
@@ -278,6 +291,7 @@ async function processCheckpointProposedLogs(
278291
blobHashes,
279292
checkpointNumber,
280293
logger,
294+
isHistoricalSync,
281295
);
282296

283297
const l1 = new L1PublishedData(
@@ -315,8 +329,9 @@ export async function getCheckpointBlobDataFromBlobs(
315329
blobHashes: Buffer<ArrayBufferLike>[],
316330
checkpointNumber: CheckpointNumber,
317331
logger: Logger,
332+
isHistoricalSync: boolean,
318333
): Promise<CheckpointBlobData> {
319-
const blobBodies = await blobSinkClient.getBlobSidecar(blockHash, blobHashes);
334+
const blobBodies = await blobSinkClient.getBlobSidecar(blockHash, blobHashes, undefined, { isHistoricalSync });
320335
if (blobBodies.length === 0) {
321336
throw new NoBlobBodiesFoundError(checkpointNumber);
322337
}

yarn-project/aztec-node/src/aztec-node/server.ts

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
import { Archiver, createArchiver } from '@aztec/archiver';
22
import { BBCircuitVerifier, QueuedIVCVerifier, TestCircuitVerifier } from '@aztec/bb-prover';
33
import { type BlobSinkClientInterface, createBlobSinkClient } from '@aztec/blob-sink/client';
4+
import {
5+
type BlobFileStoreMetadata,
6+
createReadOnlyFileStoreBlobClients,
7+
createWritableFileStoreBlobClient,
8+
} from '@aztec/blob-sink/filestore';
49
import {
510
ARCHIVE_HEIGHT,
611
INITIAL_L2_BLOCK_NUM,
@@ -197,8 +202,6 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
197202
const packageVersion = getPackageVersion() ?? '';
198203
const telemetry = deps.telemetry ?? getTelemetryClient();
199204
const dateProvider = deps.dateProvider ?? new DateProvider();
200-
const blobSinkClient =
201-
deps.blobSinkClient ?? createBlobSinkClient(config, { logger: createLogger('node:blob-sink:client') });
202205
const ethereumChain = createEthereumChain(config.l1RpcUrls, config.l1ChainId);
203206

204207
// Build a key store from file if given or from environment otherwise
@@ -266,6 +269,25 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
266269
);
267270
}
268271

272+
const blobFileStoreMetadata: BlobFileStoreMetadata = {
273+
l1ChainId: config.l1ChainId,
274+
rollupVersion: config.rollupVersion,
275+
rollupAddress: config.l1Contracts.rollupAddress.toString(),
276+
};
277+
278+
const [fileStoreClients, fileStoreUploadClient] = await Promise.all([
279+
createReadOnlyFileStoreBlobClients(config.blobFileStoreUrls, blobFileStoreMetadata, log),
280+
createWritableFileStoreBlobClient(config.blobFileStoreUploadUrl, blobFileStoreMetadata, log),
281+
]);
282+
283+
const blobSinkClient =
284+
deps.blobSinkClient ??
285+
createBlobSinkClient(config, {
286+
logger: createLogger('node:blob-sink:client'),
287+
fileStoreClients,
288+
fileStoreUploadClient,
289+
});
290+
269291
// attempt snapshot sync if possible
270292
await trySnapshotSync(config, log);
271293

@@ -331,6 +353,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
331353
blockSource: archiver,
332354
l1ToL2MessageSource: archiver,
333355
keyStoreManager,
356+
fileStoreBlobUploadClient: fileStoreUploadClient,
334357
});
335358

336359
// If we have a validator client, register it as a source of offenses for the slasher,

yarn-project/blob-sink/package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,8 @@
1111
"./client": "./dest/client/index.js",
1212
"./client/config": "./dest/client/config.js",
1313
"./encoding": "./dest/encoding/index.js",
14-
"./types": "./dest/types/index.js"
14+
"./types": "./dest/types/index.js",
15+
"./filestore": "./dest/filestore/index.js"
1516
},
1617
"inherits": [
1718
"../package.common.json"

yarn-project/blob-sink/src/client/config.ts

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,16 @@ export interface BlobSinkConfig extends BlobSinkArchiveApiConfig {
4545
* Whether to allow having no blob sources configured during startup
4646
*/
4747
blobAllowEmptySources?: boolean;
48+
49+
/**
50+
* URLs for reading blobs from filestore (s3://, gs://, file://, https://). Tried in order until blobs are found.
51+
*/
52+
blobFileStoreUrls?: string[];
53+
54+
/**
55+
* URL for uploading blobs to filestore (s3://, gs://, file://)
56+
*/
57+
blobFileStoreUploadUrl?: string;
4858
}
4959

5060
export const blobSinkConfigMapping: ConfigMappingsType<BlobSinkConfig> = {
@@ -84,6 +94,19 @@ export const blobSinkConfigMapping: ConfigMappingsType<BlobSinkConfig> = {
8494
description: 'Whether to allow having no blob sources configured during startup',
8595
...booleanConfigHelper(false),
8696
},
97+
blobFileStoreUrls: {
98+
env: 'BLOB_FILE_STORE_URLS',
99+
description: 'URLs for filestore blob archive, comma-separated. Tried in order until blobs are found.',
100+
parseEnv: (val: string) =>
101+
val
102+
.split(',')
103+
.map(url => url.trim())
104+
.filter(url => url.length > 0),
105+
},
106+
blobFileStoreUploadUrl: {
107+
env: 'BLOB_FILE_STORE_UPLOAD_URL',
108+
description: 'URL for uploading blobs to filestore (s3://, gs://, file://)',
109+
},
87110
...blobSinkArchiveApiConfigMappings,
88111
};
89112

@@ -99,5 +122,10 @@ export function getBlobSinkConfigFromEnv(): BlobSinkConfig {
99122
* Returns whether the given blob sink config has any remote sources defined.
100123
*/
101124
export function hasRemoteBlobSinkSources(config: BlobSinkConfig = {}): boolean {
102-
return !!(config.blobSinkUrl || config.l1ConsensusHostUrls?.length || config.archiveApiUrl);
125+
return !!(
126+
config.blobSinkUrl ||
127+
config.l1ConsensusHostUrls?.length ||
128+
config.archiveApiUrl ||
129+
config.blobFileStoreUrls?.length
130+
);
103131
}

yarn-project/blob-sink/src/client/factory.ts

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,24 @@
11
import { type Logger, createLogger } from '@aztec/foundation/log';
22

33
import { MemoryBlobStore } from '../blobstore/memory_blob_store.js';
4+
import type { FileStoreBlobClient } from '../filestore/filestore_blob_client.js';
45
import { type BlobSinkConfig, hasRemoteBlobSinkSources } from './config.js';
56
import { HttpBlobSinkClient } from './http.js';
67
import type { BlobSinkClientInterface } from './interface.js';
78
import { LocalBlobSinkClient } from './local.js';
89

9-
export function createBlobSinkClient(config?: BlobSinkConfig, deps?: { logger: Logger }): BlobSinkClientInterface {
10+
export interface CreateBlobSinkClientDeps {
11+
logger?: Logger;
12+
/** FileStore clients for reading blobs */
13+
fileStoreClients?: FileStoreBlobClient[];
14+
/** FileStore client for uploading blobs */
15+
fileStoreUploadClient?: FileStoreBlobClient;
16+
}
17+
18+
export function createBlobSinkClient(
19+
config?: BlobSinkConfig,
20+
deps?: CreateBlobSinkClientDeps,
21+
): BlobSinkClientInterface {
1022
const log = deps?.logger ?? createLogger('blob-sink:client');
1123
if (!hasRemoteBlobSinkSources(config)) {
1224
log.info(`Creating local blob sink client.`);
@@ -18,6 +30,12 @@ export function createBlobSinkClient(config?: BlobSinkConfig, deps?: { logger: L
1830
blobSinkUrl: config?.blobSinkUrl,
1931
l1ConsensusHostUrls: config?.l1ConsensusHostUrls,
2032
archiveApiUrl: config?.archiveApiUrl,
33+
fileStoreCount: deps?.fileStoreClients?.length ?? 0,
34+
hasFileStoreUpload: !!deps?.fileStoreUploadClient,
35+
});
36+
return new HttpBlobSinkClient(config, {
37+
logger: log,
38+
fileStoreClients: deps?.fileStoreClients,
39+
fileStoreUploadClient: deps?.fileStoreUploadClient,
2140
});
22-
return new HttpBlobSinkClient(config, deps);
2341
}

0 commit comments

Comments
 (0)