@@ -75,6 +75,9 @@ pub struct Batcher {
7575 s3_client : S3Client ,
7676 s3_bucket_name : String ,
7777 download_endpoint : String ,
78+ s3_client_secondary : Option < S3Client > ,
79+ s3_bucket_name_secondary : Option < String > ,
80+ download_endpoint_secondary : Option < String > ,
7881 eth_ws_url : String ,
7982 eth_ws_url_fallback : String ,
8083 batcher_signer : Arc < SignerMiddlewareT > ,
@@ -106,15 +109,40 @@ impl Batcher {
106109 dotenv ( ) . ok ( ) ;
107110
108111 // https://docs.aws.amazon.com/sdk-for-rust/latest/dg/localstack.html
109- let upload_endpoint = env:: var ( "UPLOAD_ENDPOINT" ) . ok ( ) ;
112+ // Primary S3 configuration
113+ let s3_config_primary = s3:: S3Config {
114+ access_key_id : env:: var ( "AWS_ACCESS_KEY_ID" ) . ok ( ) ,
115+ secret_access_key : env:: var ( "AWS_SECRET_ACCESS_KEY" ) . ok ( ) ,
116+ region : env:: var ( "AWS_REGION" ) . ok ( ) ,
117+ endpoint_url : env:: var ( "UPLOAD_ENDPOINT" ) . ok ( ) ,
118+ } ;
110119
111120 let s3_bucket_name =
112121 env:: var ( "AWS_BUCKET_NAME" ) . expect ( "AWS_BUCKET_NAME not found in environment" ) ;
113122
114123 let download_endpoint =
115124 env:: var ( "DOWNLOAD_ENDPOINT" ) . expect ( "DOWNLOAD_ENDPOINT not found in environment" ) ;
116125
117- let s3_client = s3:: create_client ( upload_endpoint) . await ;
126+ let s3_client = s3:: create_client ( s3_config_primary) . await ;
127+
128+ // Secondary S3 configuration (optional)
129+ let s3_bucket_name_secondary = env:: var ( "AWS_BUCKET_NAME_SECONDARY" ) . ok ( ) ;
130+ let download_endpoint_secondary = env:: var ( "DOWNLOAD_ENDPOINT_SECONDARY" ) . ok ( ) ;
131+
132+ let s3_client_secondary = if s3_bucket_name_secondary. is_some ( )
133+ && download_endpoint_secondary. is_some ( )
134+ {
135+ let s3_config_secondary = s3:: S3Config {
136+ access_key_id : env:: var ( "AWS_ACCESS_KEY_ID_SECONDARY" ) . ok ( ) ,
137+ secret_access_key : env:: var ( "AWS_SECRET_ACCESS_KEY_SECONDARY" ) . ok ( ) ,
138+ region : env:: var ( "AWS_REGION_SECONDARY" ) . ok ( ) ,
139+ endpoint_url : env:: var ( "UPLOAD_ENDPOINT_SECONDARY" ) . ok ( ) ,
140+ } ;
141+ Some ( s3:: create_client ( s3_config_secondary) . await )
142+ } else {
143+ info ! ( "Secondary S3 configuration not found or incomplete. Operating with primary S3 only." ) ;
144+ None
145+ } ;
118146
119147 let config = ConfigFromYaml :: new ( config_file) ;
120148 // Ensure max_batch_bytes_size can at least hold one proof of max_proof_size,
@@ -252,6 +280,9 @@ impl Batcher {
252280 s3_client,
253281 s3_bucket_name,
254282 download_endpoint,
283+ s3_client_secondary,
284+ s3_bucket_name_secondary,
285+ download_endpoint_secondary,
255286 eth_ws_url : config. eth_ws_url ,
256287 eth_ws_url_fallback : config. eth_ws_url_fallback ,
257288 batcher_signer,
@@ -1541,7 +1572,18 @@ impl Batcher {
15411572 let batch_merkle_root_hex = hex:: encode ( batch_merkle_root) ;
15421573 info ! ( "Batch merkle root: 0x{}" , batch_merkle_root_hex) ;
15431574 let file_name = batch_merkle_root_hex. clone ( ) + ".json" ;
1544- let batch_data_pointer: String = "" . to_owned ( ) + & self . download_endpoint + "/" + & file_name;
1575+
1576+ let batch_data_pointer = self
1577+ . upload_batch_to_multiple_s3 ( batch_bytes, & file_name)
1578+ . await ?;
1579+ if let Err ( e) = self
1580+ . telemetry
1581+ . task_uploaded_to_s3 ( & batch_merkle_root_hex)
1582+ . await
1583+ {
1584+ warn ! ( "Failed to send task status to telemetry: {:?}" , e) ;
1585+ } ;
1586+ info ! ( "Batch upload to: {}" , batch_data_pointer) ;
15451587
15461588 let num_proofs_in_batch = leaves. len ( ) ;
15471589 let gas_per_proof = ( self . constant_gas_cost ( )
@@ -1577,16 +1619,6 @@ impl Batcher {
15771619 . gas_price_used_on_latest_batch
15781620 . set ( gas_price. as_u64 ( ) as i64 ) ;
15791621
1580- info ! ( "Uploading batch to S3..." ) ;
1581- self . upload_batch_to_s3 ( batch_bytes, & file_name) . await ?;
1582- if let Err ( e) = self
1583- . telemetry
1584- . task_uploaded_to_s3 ( & batch_merkle_root_hex)
1585- . await
1586- {
1587- warn ! ( "Failed to send task status to telemetry: {:?}" , e) ;
1588- } ;
1589- info ! ( "Batch sent to S3 with name: {}" , file_name) ;
15901622 if let Err ( e) = self
15911623 . telemetry
15921624 . task_created (
@@ -1857,22 +1889,99 @@ impl Batcher {
18571889 unlocked
18581890 }
18591891
1892+ /// Uploads the batch to both S3 buckets and returns the comma-separated URLs of successful uploads.
1893+ /// Returns an error only if all uploads fail.
1894+ async fn upload_batch_to_multiple_s3 (
1895+ & self ,
1896+ batch_bytes : & [ u8 ] ,
1897+ file_name : & str ,
1898+ ) -> Result < String , BatcherError > {
1899+ // Upload to both S3 buckets and collect successful URLs
1900+ let mut successful_urls = Vec :: new ( ) ;
1901+
1902+ // Try primary S3 upload
1903+ if self
1904+ . upload_batch_to_s3 (
1905+ & self . s3_client ,
1906+ batch_bytes,
1907+ file_name,
1908+ & self . s3_bucket_name ,
1909+ )
1910+ . await
1911+ . is_ok ( )
1912+ {
1913+ let primary_url = format ! ( "{}/{}" , self . download_endpoint, file_name) ;
1914+ successful_urls. push ( primary_url. clone ( ) ) ;
1915+ info ! ( "Successfully uploaded batch to primary S3: {}" , primary_url) ;
1916+ } else {
1917+ warn ! ( "Failed to upload batch to primary S3" ) ;
1918+ }
1919+
1920+ // Try secondary S3 upload (if configured)
1921+ if let (
1922+ Some ( s3_client_secondary) ,
1923+ Some ( s3_bucket_name_secondary) ,
1924+ Some ( download_endpoint_secondary) ,
1925+ ) = (
1926+ & self . s3_client_secondary ,
1927+ & self . s3_bucket_name_secondary ,
1928+ & self . download_endpoint_secondary ,
1929+ ) {
1930+ if self
1931+ . upload_batch_to_s3 (
1932+ s3_client_secondary,
1933+ batch_bytes,
1934+ file_name,
1935+ s3_bucket_name_secondary,
1936+ )
1937+ . await
1938+ . is_ok ( )
1939+ {
1940+ let secondary_url = format ! ( "{}/{}" , download_endpoint_secondary, file_name) ;
1941+ successful_urls. push ( secondary_url. clone ( ) ) ;
1942+ info ! (
1943+ "Successfully uploaded batch to secondary S3: {}" ,
1944+ secondary_url
1945+ ) ;
1946+ } else {
1947+ warn ! ( "Failed to upload batch to secondary S3" ) ;
1948+ }
1949+ }
1950+
1951+ // Update metrics with number of available data services
1952+ self . metrics
1953+ . available_data_services
1954+ . set ( successful_urls. len ( ) as i64 ) ;
1955+
1956+ // If no uploads succeeded, return error
1957+ if successful_urls. is_empty ( ) {
1958+ error ! ( "Failed to upload batch to both S3 buckets" ) ;
1959+ return Err ( BatcherError :: BatchUploadError (
1960+ "Failed to upload to any S3 bucket" . to_string ( ) ,
1961+ ) ) ;
1962+ }
1963+
1964+ Ok ( successful_urls. join ( "," ) )
1965+ }
1966+
18601967 /// Uploads the batch to s3.
18611968 /// Retries on recoverable errors using exponential backoff up to `ETHEREUM_CALL_MAX_RETRIES` times:
18621969 /// (0,5 secs - 1 secs - 2 secs - 4 secs - 8 secs).
18631970 async fn upload_batch_to_s3 (
18641971 & self ,
1972+ s3_client : & S3Client ,
18651973 batch_bytes : & [ u8 ] ,
18661974 file_name : & str ,
1975+ bucket_name : & str ,
18671976 ) -> Result < ( ) , BatcherError > {
18681977 let start = Instant :: now ( ) ;
18691978 let result = retry_function (
18701979 || {
18711980 Self :: upload_batch_to_s3_retryable (
18721981 batch_bytes,
18731982 file_name,
1874- self . s3_client . clone ( ) ,
1875- & self . s3_bucket_name ,
1983+ s3_client. clone ( ) ,
1984+ bucket_name ,
18761985 )
18771986 } ,
18781987 ETHEREUM_CALL_MIN_RETRY_DELAY ,
0 commit comments