diff --git a/Cargo.toml b/Cargo.toml index 861b70ea958..3305683c2e5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -119,6 +119,10 @@ parking_lot = "0.12.4" sqlparser-latest = { version = "0.57.0", package = "sqlparser", features = ["visitor"] } tokio-util = "0.7.15" +[workspace.lints.clippy] +too_many_arguments = "allow" +type_complexity = "allow" + # Incremental compilation on Rust 1.58 causes an ICE on build. As soon as graph node builds again, these can be removed. [profile.test] incremental = false diff --git a/chain/common/Cargo.toml b/chain/common/Cargo.toml index eef11ed85a3..1f945ea984e 100644 --- a/chain/common/Cargo.toml +++ b/chain/common/Cargo.toml @@ -10,3 +10,6 @@ protobuf = "3.0.2" protobuf-parse = "3.7.2" anyhow = "1" heck = "0.5" + +[lints] +workspace = true diff --git a/chain/common/tests/test-acme.rs b/chain/common/tests/test-acme.rs index 554e4ecbd5c..cd0b32b8582 100644 --- a/chain/common/tests/test-acme.rs +++ b/chain/common/tests/test-acme.rs @@ -63,7 +63,7 @@ fn required_ok() { !f.required, "Transaction.events field should NOT be required!" ), - _ => assert!(false, "Unexpected message field [{}]!", f.name), + _ => panic!("Unexpected message field [{}]!", f.name), }; }); } diff --git a/chain/ethereum/Cargo.toml b/chain/ethereum/Cargo.toml index f3780ddd224..17eafbeaaa0 100644 --- a/chain/ethereum/Cargo.toml +++ b/chain/ethereum/Cargo.toml @@ -29,3 +29,6 @@ base64 = "0" [build-dependencies] tonic-build = { workspace = true } + +[lints] +workspace = true diff --git a/chain/ethereum/examples/firehose.rs b/chain/ethereum/examples/firehose.rs index 5a70794dfe2..281a3eb3053 100644 --- a/chain/ethereum/examples/firehose.rs +++ b/chain/ethereum/examples/firehose.rs @@ -9,6 +9,7 @@ use graph::{ use graph_chain_ethereum::codec; use hex::ToHex; use prost::Message; +use std::slice; use std::sync::Arc; use tonic::Streaming; @@ -25,7 +26,7 @@ async fn main() -> Result<(), Error> { let host = "https://api.streamingfast.io:443".to_string(); let metrics = Arc::new(EndpointMetrics::new( logger, - &[host.clone()], + slice::from_ref(&host), Arc::new(MetricsRegistry::mock()), )); diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index efadb95c089..50b0d743fd9 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -71,7 +71,7 @@ impl EventSignatureWithTopics { /// If self.address is None, it's considered a wildcard match. /// Otherwise, it must match the provided address. /// It must also match the topics if they are Some - pub fn matches(&self, address: Option<&H160>, sig: H256, topics: &Vec) -> bool { + pub fn matches(&self, address: Option<&H160>, sig: H256, topics: &[H256]) -> bool { // If self.address is None, it's considered a wildcard match. Otherwise, it must match the provided address. let address_matches = match self.address { Some(ref self_addr) => address == Some(self_addr), @@ -80,15 +80,18 @@ impl EventSignatureWithTopics { address_matches && self.signature == sig - && self.topic1.as_ref().map_or(true, |t1| { - topics.get(1).map_or(false, |topic| t1.contains(topic)) - }) - && self.topic2.as_ref().map_or(true, |t2| { - topics.get(2).map_or(false, |topic| t2.contains(topic)) - }) - && self.topic3.as_ref().map_or(true, |t3| { - topics.get(3).map_or(false, |topic| t3.contains(topic)) - }) + && self + .topic1 + .as_ref() + .is_none_or(|t1| topics.get(1).is_some_and(|topic| t1.contains(topic))) + && self + .topic2 + .as_ref() + .is_none_or(|t2| topics.get(2).is_some_and(|topic| t2.contains(topic))) + && self + .topic3 + .as_ref() + .is_none_or(|t3| topics.get(3).is_some_and(|topic| t3.contains(topic))) } } @@ -390,7 +393,7 @@ impl EthereumLogFilter { &self, event_signature: &H256, contract_address: Option<&Address>, - topics: &Vec, + topics: &[H256], ) -> bool { // Check for wildcard events first. if self.wildcard_events.get(event_signature) == Some(&true) { @@ -536,10 +539,8 @@ impl EthereumLogFilter { // Handle events with topic filters. filters.extend( self.events_with_topic_filters - .into_iter() - .map(|(event_with_topics, _)| { - EthGetLogsFilter::from_event_with_topics(event_with_topics) - }), + .into_keys() + .map(EthGetLogsFilter::from_event_with_topics), ); // The current algorithm is to repeatedly find the maximum cardinality vertex and turn all @@ -614,16 +615,16 @@ pub struct EthereumCallFilter { pub wildcard_signatures: HashSet, } -impl Into> for EthereumCallFilter { - fn into(self) -> Vec { - if self.is_empty() { +impl From for Vec { + fn from(val: EthereumCallFilter) -> Self { + if val.is_empty() { return Vec::new(); } let EthereumCallFilter { contract_addresses_function_signatures, wildcard_signatures, - } = self; + } = val; let mut filters: Vec = contract_addresses_function_signatures .into_iter() @@ -771,14 +772,13 @@ impl FromIterator<(BlockNumber, Address, FunctionSelector)> for EthereumCallFilt .for_each(|(start_block, address, function_signature)| { lookup .entry(address) + .and_modify(|set| { + if set.0 > start_block { + set.0 = start_block + } + set.1.insert(function_signature); + }) .or_insert((start_block, HashSet::default())); - lookup.get_mut(&address).map(|set| { - if set.0 > start_block { - set.0 = start_block - } - set.1.insert(function_signature); - set - }); }); EthereumCallFilter { contract_addresses_function_signatures: lookup, @@ -810,9 +810,9 @@ pub struct EthereumBlockFilter { pub trigger_every_block: bool, } -impl Into> for EthereumBlockFilter { - fn into(self) -> Vec { - self.contract_addresses +impl From for Vec { + fn from(val: EthereumBlockFilter) -> Self { + val.contract_addresses .into_iter() .map(|(_, addr)| addr) .sorted() @@ -847,9 +847,8 @@ impl EthereumBlockFilter { .block_handlers .clone() .into_iter() - .any(|block_handler| match block_handler.filter { - Some(BlockHandlerFilter::Call) => true, - _ => false, + .any(|block_handler| { + matches!(block_handler.filter, Some(BlockHandlerFilter::Call)) }); let has_block_handler_without_filter = data_source @@ -1411,7 +1410,7 @@ mod tests { filter.event_signatures.sort(); } assert_eq!(expected_log_filters, actual_log_filters); - assert_eq!(true, actual_send_all_block_headers); + assert!(actual_send_all_block_headers); } #[test] @@ -1478,7 +1477,7 @@ mod tests { } assert_eq!(expected_log_filters, actual_log_filters); - assert_eq!(true, actual_send_all_block_headers); + assert!(actual_send_all_block_headers); } #[test] @@ -1505,76 +1504,63 @@ mod tests { wildcard_signatures: HashSet::from_iter(vec![[11u8; 4]]), }; - assert_eq!( - false, - filter.matches(&call(address(2), vec![])), + assert!( + !filter.matches(&call(address(2), vec![])), "call with empty bytes are always ignore, whatever the condition" ); - assert_eq!( - false, - filter.matches(&call(address(4), vec![1; 36])), + assert!( + !filter.matches(&call(address(4), vec![1; 36])), "call with incorrect address should be ignored" ); - assert_eq!( - true, + assert!( filter.matches(&call(address(1), vec![1; 36])), "call with correct address & signature should match" ); - assert_eq!( - true, + assert!( filter.matches(&call(address(1), vec![1; 32])), "call with correct address & signature, but with incorrect input size should match" ); - assert_eq!( - false, - filter.matches(&call(address(1), vec![4u8; 36])), + assert!( + !filter.matches(&call(address(1), vec![4u8; 36])), "call with correct address but incorrect signature for a specific contract filter (i.e. matches some signatures) should be ignored" ); - assert_eq!( - false, - filter.matches(&call(address(0), vec![11u8; 36])), + assert!( + !filter.matches(&call(address(0), vec![11u8; 36])), "this signature should not match filter1, this avoid false passes if someone changes the code" ); - assert_eq!( - false, - filter2.matches(&call(address(1), vec![10u8; 36])), + assert!( + !filter2.matches(&call(address(1), vec![10u8; 36])), "this signature should not match filter2 because the address is not the expected one" ); - assert_eq!( - true, + assert!( filter2.matches(&call(address(0), vec![10u8; 36])), "this signature should match filter2 on the non wildcard clause" ); - assert_eq!( - true, + assert!( filter2.matches(&call(address(0), vec![11u8; 36])), "this signature should match filter2 on the wildcard clause" ); // extend filter1 and test the filter 2 stuff again filter.extend(filter2); - assert_eq!( - true, + assert!( filter.matches(&call(address(0), vec![11u8; 36])), "this signature should not match filter1, this avoid false passes if someone changes the code" ); - assert_eq!( - false, - filter.matches(&call(address(1), vec![10u8; 36])), + assert!( + !filter.matches(&call(address(1), vec![10u8; 36])), "this signature should not match filter2 because the address is not the expected one" ); - assert_eq!( - true, + assert!( filter.matches(&call(address(0), vec![10u8; 36])), "this signature should match filter2 on the non wildcard clause" ); - assert_eq!( - true, + assert!( filter.matches(&call(address(0), vec![11u8; 36])), "this signature should match filter2 on the wildcard clause" ); @@ -1674,7 +1660,7 @@ mod tests { base.extend(extension); - assert_eq!(true, base.trigger_every_block); + assert!(base.trigger_every_block); } #[test] @@ -1694,7 +1680,7 @@ mod tests { base.extend(extension); - assert_eq!(true, base.trigger_every_block); + assert!(base.trigger_every_block); assert_eq!( HashSet::from_iter(vec![(10, address(2))]), base.contract_addresses, @@ -1718,7 +1704,7 @@ mod tests { base.extend(extension); - assert_eq!(true, base.trigger_every_block); + assert!(base.trigger_every_block); assert_eq!( HashSet::from_iter(vec![(10, address(2)), (10, address(1))]), base.contract_addresses, diff --git a/chain/ethereum/src/buffered_call_cache.rs b/chain/ethereum/src/buffered_call_cache.rs index c6e0040b570..2ec0bfa40e7 100644 --- a/chain/ethereum/src/buffered_call_cache.rs +++ b/chain/ethereum/src/buffered_call_cache.rs @@ -61,7 +61,7 @@ impl EthereumCallCache for BufferedCallCache { return Ok(Some(value)); } - let result = self.call_cache.get_call(&call, block).await?; + let result = self.call_cache.get_call(call, block).await?; let mut buffer = self.buffer.lock().unwrap(); if let Some(call::Response { diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index 11ca025e0e2..c13f0bfdde8 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -760,7 +760,7 @@ async fn fetch_unique_blocks_from_cache( // Load blocks from the cache let blocks_map = chain_store .cheap_clone() - .block_ptrs_by_numbers(block_numbers.iter().map(|&b| b.into()).collect::>()) + .block_ptrs_by_numbers(block_numbers.iter().copied().collect::>()) .await .map_err(|e| { error!(logger, "Error accessing block cache {}", e); diff --git a/chain/ethereum/src/codec.rs b/chain/ethereum/src/codec.rs index 114982607ec..935b294599b 100644 --- a/chain/ethereum/src/codec.rs +++ b/chain/ethereum/src/codec.rs @@ -499,6 +499,21 @@ impl BlockchainBlock for HeaderOnlyBlock { } } +fn get_to_address(trace: &TransactionTrace) -> Result, Error> { + // Try to detect contract creation transactions, which have no 'to' address + let is_contract_creation = trace.to.is_empty() + || trace + .calls + .first() + .is_some_and(|call| CallType::try_from(call.call_type) == Ok(CallType::Create)); + + if is_contract_creation { + Ok(None) + } else { + Ok(Some(trace.to.try_decode_proto("transaction to address")?)) + } +} + #[cfg(test)] mod test { use graph::{blockchain::Block as _, prelude::chrono::Utc}; @@ -511,14 +526,19 @@ mod test { #[test] fn ensure_block_serialization() { let now = Utc::now().timestamp(); - let mut block = Block::default(); - let mut header = BlockHeader::default(); - header.timestamp = Some(Timestamp { - seconds: now, - nanos: 0, - }); - block.header = Some(header); + let header = BlockHeader { + timestamp: Some(Timestamp { + seconds: now, + nanos: 0, + }), + ..Default::default() + }; + + let block = Block { + header: Some(header.clone()), + ..Default::default() + }; let str_block = block.data().unwrap().to_string(); @@ -529,18 +549,3 @@ mod test { ); } } - -fn get_to_address(trace: &TransactionTrace) -> Result, Error> { - // Try to detect contract creation transactions, which have no 'to' address - let is_contract_creation = trace.to.len() == 0 - || trace.calls.get(0).map_or(false, |call| { - CallType::try_from(call.call_type) - .map_or(false, |call_type| call_type == CallType::Create) - }); - - if is_contract_creation { - Ok(None) - } else { - Ok(Some(trace.to.try_decode_proto("transaction to address")?)) - } -} diff --git a/chain/ethereum/src/data_source.rs b/chain/ethereum/src/data_source.rs index e314b5a158f..5be627baf25 100644 --- a/chain/ethereum/src/data_source.rs +++ b/chain/ethereum/src/data_source.rs @@ -97,7 +97,7 @@ impl blockchain::DataSource for DataSource { // Obtain the address from the parameters let string = params - .get(0) + .first() .with_context(|| { format!( "Failed to create data source from template `{}`: address parameter is missing", @@ -467,7 +467,7 @@ impl DataSource { self.mapping .event_handlers .iter() - .filter(|handler| handler.matches(&log)) + .filter(|handler| handler.matches(log)) .cloned() .collect::>() } @@ -513,8 +513,7 @@ impl DataSource { .find(move |handler| match handler.filter { Some(BlockHandlerFilter::Polling { every }) => { let start_block = self.start_block; - let should_trigger = (block - start_block) % every.get() as i32 == 0; - should_trigger + (block - start_block) % every.get() as i32 == 0 } None => true, _ => false, @@ -651,7 +650,7 @@ impl DataSource { // `address,uint256,bool) arguments.push(')'); // `operation(address,uint256,bool)` - let actual_signature = vec![function.name.clone(), arguments].join("("); + let actual_signature = [function.name.clone(), arguments].join("("); target_signature == actual_signature }) } @@ -1012,7 +1011,10 @@ impl DecoderHook { // We don't have time measurements for each call (though that would be nice) // Use the average time of all calls that we want to observe as the time for // each call - let to_observe = results.iter().map(|(_, source)| source.observe()).count() as f64; + let to_observe = results + .iter() + .filter(|(_, source)| source.observe()) + .count() as f64; let elapsed = start.elapsed().as_secs_f64() / to_observe; results @@ -1385,11 +1387,9 @@ impl UnresolvedMapping { // resolve each abi abis.into_iter() .map(|unresolved_abi| async { - Result::<_, Error>::Ok( - unresolved_abi - .resolve(deployment_hash, resolver, logger) - .await?, - ) + unresolved_abi + .resolve(deployment_hash, resolver, logger) + .await }) .collect::>() .try_collect::>(), @@ -1416,7 +1416,7 @@ impl UnresolvedMapping { ) })?; - unresolved_handler.resolve(abi_json, &spec_version) + unresolved_handler.resolve(abi_json, spec_version) }) .collect::, anyhow::Error>>()?; @@ -1562,14 +1562,14 @@ impl MappingEventHandler { pub fn matches(&self, log: &Log) -> bool { let matches_topic = |index: usize, topic_opt: &Option>| -> bool { - topic_opt.as_ref().map_or(true, |topic_vec| { + topic_opt.as_ref().is_none_or(|topic_vec| { log.topics .get(index) - .map_or(false, |log_topic| topic_vec.contains(log_topic)) + .is_some_and(|log_topic| topic_vec.contains(log_topic)) }) }; - if let Some(topic0) = log.topics.get(0) { + if let Some(topic0) = log.topics.first() { return self.topic0() == *topic0 && matches_topic(1, &self.topic1) && matches_topic(2, &self.topic2) @@ -1581,9 +1581,9 @@ impl MappingEventHandler { } pub fn has_additional_topics(&self) -> bool { - self.topic1.as_ref().map_or(false, |v| !v.is_empty()) - || self.topic2.as_ref().map_or(false, |v| !v.is_empty()) - || self.topic3.as_ref().map_or(false, |v| !v.is_empty()) + self.topic1.as_ref().is_some_and(|v| !v.is_empty()) + || self.topic2.as_ref().is_some_and(|v| !v.is_empty()) + || self.topic3.as_ref().is_some_and(|v| !v.is_empty()) } } diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index c4dd377fa58..b1ee277c0d3 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -169,7 +169,7 @@ impl EthereumAdapter { .trace() .filter(trace_filter) .await - .map(move |traces| { + .inspect(|traces| { if !traces.is_empty() { if to == from { debug!( @@ -188,7 +188,6 @@ impl EthereumAdapter { ); } } - traces }) .map_err(Error::from); @@ -499,17 +498,14 @@ impl EthereumAdapter { block_ptr: BlockPtr, ) -> Result { let web3 = self.web3.clone(); - let logger = Logger::new(&logger, o!("provider" => self.provider.clone())); + let logger = Logger::new(logger, o!("provider" => self.provider.clone())); let block_id = self.block_ptr_to_id(&block_ptr); let retry_log_message = format!("eth_getCode RPC call for block {}", block_ptr); retry(retry_log_message, &logger) .redact_log_urls(true) - .when(|result| match result { - Ok(_) => false, - Err(_) => true, - }) + .when(|result| result.is_err()) .limit(ENV_VARS.request_retries) .timeout_secs(ENV_VARS.json_rpc_timeout.as_secs()) .run(move || { @@ -534,17 +530,14 @@ impl EthereumAdapter { block_ptr: BlockPtr, ) -> Result { let web3 = self.web3.clone(); - let logger = Logger::new(&logger, o!("provider" => self.provider.clone())); + let logger = Logger::new(logger, o!("provider" => self.provider.clone())); let block_id = self.block_ptr_to_id(&block_ptr); let retry_log_message = format!("eth_getBalance RPC call for block {}", block_ptr); retry(retry_log_message, &logger) .redact_log_urls(true) - .when(|result| match result { - Ok(_) => false, - Err(_) => true, - }) + .when(|result| result.is_err()) .limit(ENV_VARS.request_retries) .timeout_secs(ENV_VARS.json_rpc_timeout.as_secs()) .run(move || { @@ -590,7 +583,7 @@ impl EthereumAdapter { async move { let req = CallRequest { to: Some(call_data.address), - gas: gas.map(|val| web3::types::U256::from(val)), + gas: gas.map(web3::types::U256::from), data: Some(Bytes::from(call_data.encoded_call.to_vec())), from: None, gas_price: None, @@ -736,7 +729,7 @@ impl EthereumAdapter { .await?; if let Err(e) = cache .set_call( - &logger, + logger, req.cheap_clone(), call.block_ptr.cheap_clone(), result.clone(), @@ -1034,7 +1027,7 @@ impl EthereumAdapter { let blocks_matching_polling_filter = self.load_ptrs_for_blocks( logger.clone(), - matching_blocks.iter().map(|(k, _)| *k).collect_vec(), + matching_blocks.keys().cloned().collect_vec(), ); let block_futures = blocks_matching_polling_filter.map(move |ptrs| { @@ -1092,7 +1085,7 @@ impl EthereumAdapter { // all the traces for the block, we need to ensure that the // block hash for the traces is equal to the desired block hash. // Assume all traces are for the same block. - if traces.iter().nth(0).unwrap().block_hash != block_hash { + if traces.first().unwrap().block_hash != block_hash { return Err(anyhow!( "Trace stream returned traces for an unexpected block: \ number = `{}`, hash = `{}`", @@ -1217,9 +1210,8 @@ impl EthereumAdapterTrait for EthereumAdapter { ENV_VARS.genesis_block_number.into(), ))) .await - .map_err(|e| { + .inspect_err(|_| { metrics.set_status(ProviderStatus::GenesisFail, &provider); - e })? .and_then(|gen_block| gen_block.hash.map(BlockHash::from)) .ok_or_else(|| anyhow!("Ethereum node could not find genesis block")) @@ -2759,6 +2751,7 @@ mod tests { // transport.set_response(block_json); // transport.add_response(json_value); + #[allow(clippy::arc_with_non_send_sync)] let web3 = Arc::new(Web3::new(transport.clone())); let result = check_block_receipt_support( web3.clone(), diff --git a/chain/ethereum/src/ingestor.rs b/chain/ethereum/src/ingestor.rs index 47cae0b93c5..e7821248d90 100644 --- a/chain/ethereum/src/ingestor.rs +++ b/chain/ethereum/src/ingestor.rs @@ -140,7 +140,7 @@ impl PollingBlockIngestor { // ingest_blocks will return a (potentially incomplete) list of blocks that are // missing. let mut missing_block_hash = self - .ingest_block(&logger, ð_adapter, &latest_block.hash) + .ingest_block(logger, ð_adapter, &latest_block.hash) .await?; // Repeatedly fetch missing parent blocks, and ingest them. @@ -162,7 +162,7 @@ impl PollingBlockIngestor { // iteration will have at most block number N-1. // - Therefore, the loop will iterate at most ancestor_count times. while let Some(hash) = missing_block_hash { - missing_block_hash = self.ingest_block(&logger, ð_adapter, &hash).await?; + missing_block_hash = self.ingest_block(logger, ð_adapter, &hash).await?; } Ok(()) } @@ -181,7 +181,7 @@ impl PollingBlockIngestor { .block_by_hash(logger, block_hash) .await? .ok_or(IngestorError::BlockUnavailable(block_hash))?; - let ethereum_block = eth_adapter.load_full_block(&logger, block).await?; + let ethereum_block = eth_adapter.load_full_block(logger, block).await?; // We need something that implements `Block` to store the block; the // store does not care whether the block is final or not @@ -212,7 +212,7 @@ impl PollingBlockIngestor { eth_adapter: &Arc, ) -> Result { eth_adapter - .latest_block_header(&logger) + .latest_block_header(logger) .await .map(|block| block.into()) } @@ -252,12 +252,8 @@ impl BlockIngestor for PollingBlockIngestor { .logger .new(o!("provider" => eth_adapter.provider().to_string())); - match self.do_poll(&logger, eth_adapter).await { - // Some polls will fail due to transient issues - Err(err) => { - error!(logger, "Trying again after block polling failed: {}", err); - } - Ok(()) => (), + if let Err(err) = self.do_poll(&logger, eth_adapter).await { + error!(logger, "Trying again after block polling failed: {}", err); } if ENV_VARS.cleanup_blocks { diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index ca45411cdc2..536f7a8a54d 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -197,7 +197,7 @@ impl EthereumNetworkAdapters { required_capabilities: &NodeCapabilities, retest_percent: f64, ) -> Result, Error> { - let retest_rng: f64 = (&mut rand::rng()).random(); + let retest_rng: f64 = rand::rng().random(); let cheapest = input.into_iter().choose_multiple(&mut rand::rng(), 3); let cheapest = cheapest.iter(); @@ -329,6 +329,7 @@ mod tests { use super::{EthereumNetworkAdapter, EthereumNetworkAdapters, NodeCapabilities}; #[test] + #[allow(clippy::neg_cmp_op_on_partial_ord)] fn ethereum_capabilities_comparison() { let archive = NodeCapabilities { archive: true, @@ -352,35 +353,35 @@ mod tests { }; // Test all real combinations of capability comparisons - assert_eq!(false, &full >= &archive); - assert_eq!(false, &full >= &traces); - assert_eq!(false, &full >= &archive_traces); - assert_eq!(true, &full >= &full); - assert_eq!(false, &full >= &full_traces); - - assert_eq!(true, &archive >= &archive); - assert_eq!(false, &archive >= &traces); - assert_eq!(false, &archive >= &archive_traces); - assert_eq!(true, &archive >= &full); - assert_eq!(false, &archive >= &full_traces); - - assert_eq!(false, &traces >= &archive); - assert_eq!(true, &traces >= &traces); - assert_eq!(false, &traces >= &archive_traces); - assert_eq!(true, &traces >= &full); - assert_eq!(true, &traces >= &full_traces); - - assert_eq!(true, &archive_traces >= &archive); - assert_eq!(true, &archive_traces >= &traces); - assert_eq!(true, &archive_traces >= &archive_traces); - assert_eq!(true, &archive_traces >= &full); - assert_eq!(true, &archive_traces >= &full_traces); - - assert_eq!(false, &full_traces >= &archive); - assert_eq!(true, &full_traces >= &traces); - assert_eq!(false, &full_traces >= &archive_traces); - assert_eq!(true, &full_traces >= &full); - assert_eq!(true, &full_traces >= &full_traces); + assert!(!(full >= archive)); + assert!(!(full >= traces)); + assert!(!(full >= archive_traces)); + assert!(full >= full); + assert!(!(full >= full_traces)); + + assert!(archive >= archive); + assert!(!(archive >= traces)); + assert!(!(archive >= archive_traces)); + assert!(archive >= full); + assert!(!(archive >= full_traces)); + + assert!(!(traces >= archive)); + assert!(traces >= traces); + assert!(!(traces >= archive_traces)); + assert!(traces >= full); + assert!(traces >= full_traces); + + assert!(archive_traces >= archive); + assert!(archive_traces >= traces); + assert!(archive_traces >= archive_traces); + assert!(archive_traces >= full); + assert!(archive_traces >= full_traces); + + assert!(!(full_traces >= archive)); + assert!(full_traces >= traces); + assert!(!(full_traces >= archive_traces)); + assert!(full_traces >= full); + assert!(full_traces >= full_traces); } #[graph::test] @@ -463,17 +464,14 @@ mod tests { }) .await .unwrap(); - assert_eq!(adapter.is_call_only(), false); + assert!(!adapter.is_call_only()); } // Check limits { let adapter = adapters.call_or_cheapest(None).unwrap(); assert!(adapter.is_call_only()); - assert_eq!( - adapters.call_or_cheapest(None).unwrap().is_call_only(), - false - ); + assert!(!adapters.call_or_cheapest(None).unwrap().is_call_only()); } // Check empty falls back to call only @@ -485,7 +483,7 @@ mod tests { traces: false, })) .unwrap(); - assert_eq!(adapter.is_call_only(), false); + assert!(!adapter.is_call_only()); } } @@ -553,11 +551,11 @@ mod tests { // verify that after all call_only were exhausted, we can still // get normal adapters - let keep: Vec> = vec![0; 10] + let keep: Vec> = [0; 10] .iter() .map(|_| adapters.call_or_cheapest(None).unwrap()) .collect(); - assert_eq!(keep.iter().any(|a| !a.is_call_only()), false); + assert!(!keep.iter().any(|a| !a.is_call_only())); } #[graph::test] @@ -621,10 +619,7 @@ mod tests { // one reference above and one inside adapters struct assert_eq!(Arc::strong_count(ð_call_adapter), 2); assert_eq!(Arc::strong_count(ð_adapter), 2); - assert_eq!( - adapters.call_or_cheapest(None).unwrap().is_call_only(), - false - ); + assert!(!adapters.call_or_cheapest(None).unwrap().is_call_only()); } #[graph::test] @@ -667,10 +662,7 @@ mod tests { .await; // one reference above and one inside adapters struct assert_eq!(Arc::strong_count(ð_adapter), 2); - assert_eq!( - adapters.call_or_cheapest(None).unwrap().is_call_only(), - false - ); + assert!(!adapters.call_or_cheapest(None).unwrap().is_call_only()); } #[graph::test] @@ -690,19 +682,19 @@ mod tests { let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); let chain_id: Word = "chain_id".into(); - let adapters = vec![ + let adapters = [ fake_adapter( &logger, - &unavailable_provider, + unavailable_provider, &provider_metrics, &metrics, false, ) .await, - fake_adapter(&logger, &error_provider, &provider_metrics, &metrics, false).await, + fake_adapter(&logger, error_provider, &provider_metrics, &metrics, false).await, fake_adapter( &logger, - &no_error_provider, + no_error_provider, &provider_metrics, &metrics, false, @@ -813,7 +805,7 @@ mod tests { archive: true, traces: false, }, - adapter: fake_adapter(&logger, &error_provider, &provider_metrics, &metrics, false) + adapter: fake_adapter(&logger, error_provider, &provider_metrics, &metrics, false) .await, limit: SubgraphLimit::Unlimited, }); @@ -827,7 +819,7 @@ mod tests { }, adapter: fake_adapter( &logger, - &no_error_provider, + no_error_provider, &provider_metrics, &metrics, false, @@ -891,7 +883,7 @@ mod tests { }, adapter: fake_adapter( &logger, - &no_error_provider, + no_error_provider, &provider_metrics, &metrics, false, @@ -901,11 +893,7 @@ mod tests { }); let manager = ProviderManager::new( logger, - vec![( - chain_id.clone(), - no_available_adapter.iter().cloned().collect(), - )] - .into_iter(), + vec![(chain_id.clone(), no_available_adapter.to_vec())].into_iter(), ProviderCheckStrategy::MarkAsValid, ); @@ -927,7 +915,7 @@ mod tests { call_only: bool, ) -> Arc { let transport = Transport::new_rpc( - Url::parse(&"http://127.0.0.1").unwrap(), + Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new(), endpoint_metrics.clone(), "", diff --git a/chain/ethereum/src/polling_block_stream.rs b/chain/ethereum/src/polling_block_stream.rs index 9802f7b7d5d..063ada95f38 100644 --- a/chain/ethereum/src/polling_block_stream.rs +++ b/chain/ethereum/src/polling_block_stream.rs @@ -5,7 +5,6 @@ use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; -use tokio; use graph::blockchain::block_stream::{ BlockStream, BlockStreamError, BlockStreamEvent, BlockWithTriggers, ChainHeadUpdateStream, @@ -39,7 +38,7 @@ enum BlockStreamState { /// store up to date with the chain store. /// /// Valid next states: BeginReconciliation - YieldingBlocks(Box>>), + YieldingBlocks(VecDeque>), /// The BlockStream experienced an error and is pausing before attempting to produce /// blocks again. @@ -407,6 +406,7 @@ impl PollingBlockStreamContext { // block number, and checking to see if the block we found matches the // subgraph_ptr. + #[allow(clippy::unnecessary_unwrap)] let subgraph_ptr = subgraph_ptr.expect("subgraph block pointer should not be `None` here"); @@ -523,8 +523,7 @@ impl Stream for PollingBlockStream { } // Switch to yielding state until next_blocks is depleted - self.state = - BlockStreamState::YieldingBlocks(Box::new(next_blocks)); + self.state = BlockStreamState::YieldingBlocks(next_blocks); // Yield the first block in next_blocks continue; diff --git a/chain/ethereum/src/runtime/abi.rs b/chain/ethereum/src/runtime/abi.rs index a716c4ea3a8..7a772caccec 100644 --- a/chain/ethereum/src/runtime/abi.rs +++ b/chain/ethereum/src/runtime/abi.rs @@ -438,7 +438,7 @@ impl<'a> ToAscObj for EthereumBlockData<'a> { gas: &GasCounter, ) -> Result { let size = match self.size() { - Some(size) => asc_new(heap, &BigInt::from_unsigned_u256(&size), gas).await?, + Some(size) => asc_new(heap, &BigInt::from_unsigned_u256(size), gas).await?, None => AscPtr::null(), }; @@ -474,11 +474,11 @@ impl<'a> ToAscObj for EthereumBlockData<'a> { gas: &GasCounter, ) -> Result { let size = match self.size() { - Some(size) => asc_new(heap, &BigInt::from_unsigned_u256(&size), gas).await?, + Some(size) => asc_new(heap, &BigInt::from_unsigned_u256(size), gas).await?, None => AscPtr::null(), }; let base_fee_per_block = match self.base_fee_per_gas() { - Some(base_fee) => asc_new(heap, &BigInt::from_unsigned_u256(&base_fee), gas).await?, + Some(base_fee) => asc_new(heap, &BigInt::from_unsigned_u256(base_fee), gas).await?, None => AscPtr::null(), }; diff --git a/chain/ethereum/src/runtime/runtime_adapter.rs b/chain/ethereum/src/runtime/runtime_adapter.rs index acbf41c62a3..2d06b28733e 100644 --- a/chain/ethereum/src/runtime/runtime_adapter.rs +++ b/chain/ethereum/src/runtime/runtime_adapter.rs @@ -315,7 +315,7 @@ async fn eth_call( fn result_as_string(result: &Result>, HostExportError>) -> String { match result { - Ok(Some(tokens)) => format!("({})", tokens_as_string(&tokens)), + Ok(Some(tokens)) => format!("({})", tokens_as_string(tokens)), Ok(None) => "none".to_string(), Err(_) => "error".to_string(), } diff --git a/chain/ethereum/src/tests.rs b/chain/ethereum/src/tests.rs index 00873f8ea87..6ed05265cf3 100644 --- a/chain/ethereum/src/tests.rs +++ b/chain/ethereum/src/tests.rs @@ -26,24 +26,31 @@ fn test_trigger_ordering() { EthereumBlockTriggerType::WithCallTo(Address::random()), ); - let mut call1 = EthereumCall::default(); - call1.transaction_index = 1; + let call1 = EthereumCall { + transaction_index: 1, + ..Default::default() + }; let call1 = EthereumTrigger::Call(Arc::new(call1)); - let mut call2 = EthereumCall::default(); - call2.transaction_index = 2; - call2.input = Bytes(vec![0]); + let call2 = EthereumCall { + transaction_index: 2, + input: Bytes(vec![0]), + ..Default::default() + }; let call2 = EthereumTrigger::Call(Arc::new(call2)); - let mut call3 = EthereumCall::default(); - call3.transaction_index = 3; + let call3 = EthereumCall { + transaction_index: 3, + ..Default::default() + }; let call3 = EthereumTrigger::Call(Arc::new(call3)); // Call with the same tx index as call2 - let mut call4 = EthereumCall::default(); - call4.transaction_index = 2; - // different than call2 so they don't get mistaken as the same - call4.input = Bytes(vec![1]); + let call4 = EthereumCall { + transaction_index: 2, + input: Bytes(vec![1]), + ..Default::default() + }; let call4 = EthereumTrigger::Call(Arc::new(call4)); fn create_log(tx_index: u64, log_index: u64) -> Arc { @@ -92,13 +99,14 @@ fn test_trigger_ordering() { let logger = Logger::root(slog::Discard, o!()); - let mut b: LightEthereumBlock = Default::default(); - - // This is necessary because inside of BlockWithTriggers::new - // there's a log for both fields. So just using Default above - // gives None on them. - b.number = Some(Default::default()); - b.hash = Some(Default::default()); + // The field initializers are necessary because inside of + // BlockWithTriggers::new there's a log for both fields. So just using + // Default above gives None on them. + let b: LightEthereumBlock = LightEthereumBlock { + number: Some(Default::default()), + hash: Some(Default::default()), + ..Default::default() + }; // Test that `BlockWithTriggers` sorts the triggers. let block_with_triggers = BlockWithTriggers::::new( @@ -109,7 +117,7 @@ fn test_trigger_ordering() { let expected = vec![log1, log2, call1, log3, call2, call4, call3, block2, block1] .into_iter() - .map(|t| Trigger::Chain(t)) + .map(Trigger::Chain) .collect::>(); assert_eq!(block_with_triggers.trigger_data, expected); @@ -130,21 +138,29 @@ fn test_trigger_dedup() { // duplicate block2 let block3 = block2.clone(); - let mut call1 = EthereumCall::default(); - call1.transaction_index = 1; + let call1 = EthereumCall { + transaction_index: 1, + ..Default::default() + }; let call1 = EthereumTrigger::Call(Arc::new(call1)); - let mut call2 = EthereumCall::default(); - call2.transaction_index = 2; + let call2 = EthereumCall { + transaction_index: 2, + ..Default::default() + }; let call2 = EthereumTrigger::Call(Arc::new(call2)); - let mut call3 = EthereumCall::default(); - call3.transaction_index = 3; + let call3 = EthereumCall { + transaction_index: 3, + ..Default::default() + }; let call3 = EthereumTrigger::Call(Arc::new(call3)); // duplicate call2 - let mut call4 = EthereumCall::default(); - call4.transaction_index = 2; + let call4 = EthereumCall { + transaction_index: 2, + ..Default::default() + }; let call4 = EthereumTrigger::Call(Arc::new(call4)); fn create_log(tx_index: u64, log_index: u64) -> Arc { @@ -190,13 +206,14 @@ fn test_trigger_dedup() { let logger = Logger::root(slog::Discard, o!()); - let mut b: LightEthereumBlock = Default::default(); - - // This is necessary because inside of BlockWithTriggers::new - // there's a log for both fields. So just using Default above - // gives None on them. - b.number = Some(Default::default()); - b.hash = Some(Default::default()); + // The field initializers are necessary because inside of + // BlockWithTriggers::new there's a log for both fields. So just using + // Default above gives None on them. + let b: LightEthereumBlock = LightEthereumBlock { + number: Some(Default::default()), + hash: Some(Default::default()), + ..Default::default() + }; // Test that `BlockWithTriggers` sorts the triggers. let block_with_triggers = BlockWithTriggers::::new( @@ -207,7 +224,7 @@ fn test_trigger_dedup() { let expected = vec![log1, log2, call1, log3, call2, call3, block2, block1] .into_iter() - .map(|t| Trigger::Chain(t)) + .map(Trigger::Chain) .collect::>(); assert_eq!(block_with_triggers.trigger_data, expected); diff --git a/chain/ethereum/src/trigger.rs b/chain/ethereum/src/trigger.rs index bbbaa69a8d2..c9225cc3ce9 100644 --- a/chain/ethereum/src/trigger.rs +++ b/chain/ethereum/src/trigger.rs @@ -359,7 +359,7 @@ impl EthereumTrigger { Some(address) } EthereumTrigger::Call(call) => Some(&call.to), - EthereumTrigger::Log(log_ref) => Some(&log_ref.address()), + EthereumTrigger::Log(log_ref) => Some(log_ref.address()), // Unfiltered block triggers match any data source address. EthereumTrigger::Block(_, EthereumBlockTriggerType::End) => None, EthereumTrigger::Block(_, EthereumBlockTriggerType::Start) => None, diff --git a/chain/near/src/adapter.rs b/chain/near/src/adapter.rs index 4d6151aa5ca..86bfbc0c018 100644 --- a/chain/near/src/adapter.rs +++ b/chain/near/src/adapter.rs @@ -340,7 +340,7 @@ mod test { let firehose_filter = decode_filter(filter); assert_eq!(firehose_filter.accounts, vec![String::from("acc1"),],); - let expected_pairs = vec![ + let expected_pairs = [ PrefixSuffixPair { prefix: "acc3".to_string(), suffix: "acc4".to_string(), @@ -357,8 +357,7 @@ mod test { let pairs = firehose_filter.prefix_and_suffix_pairs; assert_eq!(pairs.len(), 3); - assert_eq!( - true, + assert!( expected_pairs.iter().all(|x| pairs.contains(x)), "{:?}", pairs @@ -468,10 +467,9 @@ mod test { receipt_filter: case.input, }; let param = tf.to_module_params(); - let filter = NearFilter::try_from(param.as_str()).expect(&format!( - "case: {}, the filter to parse params correctly", - case.name - )); + let filter = NearFilter::try_from(param.as_str()).unwrap_or_else(|_| { + panic!("case: {}, the filter to parse params correctly", case.name) + }); assert_eq!( filter, case.expected, diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index 7bf2b50a6a8..cc40eb61e5c 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -73,20 +73,14 @@ impl BlockStreamBuilder for NearStreamBuilder { }); let mut package = Package::decode(SUBSTREAMS_TRIGGER_FILTER_BYTES.to_vec().as_ref()).unwrap(); - match package.modules.as_mut() { - Some(modules) => modules + if let Some(Some(module)) = package.modules.as_mut().map(|modules| { + modules .modules .iter_mut() - .find(|module| module.name == NEAR_FILTER_MODULE_NAME) - .map(|module| { - graph::substreams::patch_module_params( - mapper.filter.to_module_params(), - module, - ); - module - }), - None => None, - }; + .find(|m| m.name == NEAR_FILTER_MODULE_NAME) + }) { + graph::substreams::patch_module_params(mapper.filter.to_module_params(), module); + } let logger = chain .logger_factory @@ -657,12 +651,11 @@ mod test { .collect(); assert_eq!(errs.len(), 2, "{:?}", ds); - let expected_errors = vec![ + let expected_errors = [ "partial account prefixes can't have empty values".to_string(), "partial account suffixes can't have empty values".to_string(), ]; - assert_eq!( - true, + assert!( expected_errors.iter().all(|err| errs.contains(err)), "{:?}", errs @@ -748,8 +741,7 @@ mod test { case.name, receipt.partial_accounts, ); - assert_eq!( - true, + assert!( case.expected .iter() .all(|x| receipt.partial_accounts.contains(x)), @@ -1000,7 +992,7 @@ mod test { .collect() } - fn new_success_block(height: u64, receiver_id: &String) -> codec::Block { + fn new_success_block(height: u64, receiver_id: &str) -> codec::Block { codec::Block { header: Some(BlockHeader { height, @@ -1012,12 +1004,12 @@ mod test { receipt: Some(crate::codec::Receipt { receipt: Some(receipt::Receipt::Action(ReceiptAction { output_data_receivers: vec![DataReceiver { - receiver_id: receiver_id.clone(), + receiver_id: receiver_id.to_string(), ..Default::default() }], ..Default::default() })), - receiver_id: receiver_id.clone(), + receiver_id: receiver_id.to_string(), ..Default::default() }), execution_outcome: Some(ExecutionOutcomeWithId { @@ -1067,7 +1059,7 @@ mod test { } } - fn new_receipt_with_outcome(receiver_id: &String, block: Arc) -> ReceiptWithOutcome { + fn new_receipt_with_outcome(receiver_id: &str, block: Arc) -> ReceiptWithOutcome { ReceiptWithOutcome { outcome: ExecutionOutcomeWithId { outcome: Some(ExecutionOutcome { @@ -1082,12 +1074,12 @@ mod test { receipt: codec::Receipt { receipt: Some(receipt::Receipt::Action(ReceiptAction { output_data_receivers: vec![DataReceiver { - receiver_id: receiver_id.clone(), + receiver_id: receiver_id.to_string(), ..Default::default() }], ..Default::default() })), - receiver_id: receiver_id.clone(), + receiver_id: receiver_id.to_string(), ..Default::default() }, block, diff --git a/chain/near/src/runtime/generated.rs b/chain/near/src/runtime/generated.rs index 153eb8b5ab5..d8fe2937f43 100644 --- a/chain/near/src/runtime/generated.rs +++ b/chain/near/src/runtime/generated.rs @@ -227,20 +227,15 @@ impl AscIndexId for AscSignature { } #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub(crate) enum AscAccessKeyPermissionKind { + #[default] FunctionCall, FullAccess, } impl AscValue for AscAccessKeyPermissionKind {} -impl Default for AscAccessKeyPermissionKind { - fn default() -> Self { - Self::FunctionCall - } -} - #[repr(C)] #[derive(AscType)] pub(crate) struct AscFunctionCallPermission { @@ -293,8 +288,9 @@ impl AscIndexId for AscDataReceiver { } #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub(crate) enum AscActionKind { + #[default] CreateAccount, DeployContract, FunctionCall, @@ -307,12 +303,6 @@ pub(crate) enum AscActionKind { impl AscValue for AscActionKind {} -impl Default for AscActionKind { - fn default() -> Self { - Self::CreateAccount - } -} - #[repr(C)] #[derive(AscType)] pub(crate) struct AscCreateAccountAction {} @@ -424,20 +414,15 @@ impl AscIndexId for AscActionReceipt { } #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub(crate) enum AscSuccessStatusKind { + #[default] Value, ReceiptId, } impl AscValue for AscSuccessStatusKind {} -impl Default for AscSuccessStatusKind { - fn default() -> Self { - Self::Value - } -} - pub struct AscSuccessStatusEnum(pub(crate) AscEnum); impl AscType for AscSuccessStatusEnum { @@ -458,20 +443,15 @@ impl AscIndexId for AscSuccessStatusEnum { } #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub(crate) enum AscDirection { + #[default] Left, Right, } impl AscValue for AscDirection {} -impl Default for AscDirection { - fn default() -> Self { - Self::Left - } -} - #[repr(C)] #[derive(AscType)] pub(crate) struct AscMerklePathItem { diff --git a/chain/near/src/trigger.rs b/chain/near/src/trigger.rs index d604f97bc14..c929d7caa19 100644 --- a/chain/near/src/trigger.rs +++ b/chain/near/src/trigger.rs @@ -152,8 +152,6 @@ pub struct ReceiptWithOutcome { #[cfg(test)] mod tests { - use std::convert::TryFrom; - use super::*; use graph::{ @@ -407,8 +405,7 @@ mod tests { } fn big_int(input: u64) -> Option { - let value = - BigInt::try_from(input).unwrap_or_else(|_| panic!("Invalid BigInt value {}", input)); + let value = BigInt::from(input); let bytes = value.to_signed_bytes_le(); Some(codec::BigInt { bytes }) diff --git a/chain/substreams/examples/substreams.rs b/chain/substreams/examples/substreams.rs index d2277580c37..c6b2b4893d7 100644 --- a/chain/substreams/examples/substreams.rs +++ b/chain/substreams/examples/substreams.rs @@ -8,8 +8,8 @@ use graph::prelude::{info, tokio, DeploymentHash, MetricsRegistry, Registry}; use graph::{env::env_var, firehose::FirehoseEndpoint, log::logger, substreams}; use graph_chain_substreams::mapper::Mapper; use prost::Message; -use std::env; use std::sync::Arc; +use std::{env, slice}; use tokio_stream::StreamExt; #[tokio::main] @@ -44,7 +44,7 @@ async fn main() -> Result<(), Error> { let endpoint_metrics = EndpointMetrics::new( logger.clone(), - &[endpoint.clone()], + slice::from_ref(&endpoint), Arc::new(MetricsRegistry::mock()), ); diff --git a/chain/substreams/src/block_stream.rs b/chain/substreams/src/block_stream.rs index daad94bae20..ef812dda6fc 100644 --- a/chain/substreams/src/block_stream.rs +++ b/chain/substreams/src/block_stream.rs @@ -24,6 +24,12 @@ use crate::{ pub struct BlockStreamBuilder {} +impl Default for BlockStreamBuilder { + fn default() -> Self { + Self::new() + } +} + impl BlockStreamBuilder { pub fn new() -> Self { Self {} diff --git a/chain/substreams/src/data_source.rs b/chain/substreams/src/data_source.rs index a30d92173c5..3bc471c0cae 100644 --- a/chain/substreams/src/data_source.rs +++ b/chain/substreams/src/data_source.rs @@ -114,7 +114,7 @@ impl blockchain::DataSource for DataSource { fn validate(&self, _: &semver::Version) -> Vec { let mut errs = vec![]; - if &self.kind != SUBSTREAMS_KIND { + if self.kind != SUBSTREAMS_KIND { errs.push(anyhow!( "data source has invalid `kind`, expected {} but found {}", SUBSTREAMS_KIND, @@ -205,16 +205,19 @@ impl blockchain::UnresolvedDataSource for UnresolvedDataSource { let mut package = graph::substreams::Package::decode(content.as_ref())?; let module = match package.modules.as_mut() { - Some(modules) => modules - .modules - .iter_mut() - .find(|module| module.name == self.source.package.module_name) - .map(|module| { + Some(modules) => { + let mut module = modules + .modules + .iter_mut() + .find(|module| module.name == self.source.package.module_name); + + if let Some(ref mut module) = module { if let Some(params) = self.source.package.params { graph::substreams::patch_module_params(params, module); } - module - }), + } + module + } None => None, }; @@ -481,13 +484,13 @@ mod test { async fn data_source_conversion_override_params() { let mut package = gen_package(); let mut modules = package.modules.unwrap(); - modules.modules.get_mut(0).map(|module| { + if let Some(module) = modules.modules.get_mut(0) { module.inputs = vec![graph::substreams::module::Input { input: Some(Input::Params(Params { value: "x\ny\n123\n".into(), })), }] - }); + } package.modules = Some(modules); let ds: UnresolvedDataSource = @@ -526,10 +529,10 @@ mod test { #[test] fn data_source_validation() { let mut ds = gen_data_source(); - assert_eq!(true, ds.validate(LATEST_VERSION).is_empty()); + assert!(ds.validate(LATEST_VERSION).is_empty()); ds.network = None; - assert_eq!(true, ds.validate(LATEST_VERSION).is_empty()); + assert!(ds.validate(LATEST_VERSION).is_empty()); ds.kind = "asdasd".into(); ds.name = "".into(); diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index 78788186795..41b930b72de 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -137,7 +137,7 @@ impl BlockStreamMapper for Mapper { block: Block, ) -> Result, BlockStreamError> { let mut triggers = vec![]; - if block.changes.entity_changes.len() >= 1 { + if !block.changes.entity_changes.is_empty() { triggers.push(TriggerData {}); } @@ -211,7 +211,7 @@ fn parse_changes( let new_value: &crate::codec::value::Typed = match &field.new_value { Some(crate::codec::Value { typed: Some(new_value), - }) => &new_value, + }) => new_value, _ => continue, }; @@ -242,11 +242,9 @@ fn decode_value(value: &crate::codec::value::Typed) -> anyhow::Result { Typed::Bigdecimal(new_value) => BigDecimal::from_str(new_value) .map(Value::BigDecimal) - .map_err(|err| anyhow::Error::from(err)), + .map_err(anyhow::Error::from), - Typed::Bigint(new_value) => BigInt::from_str(new_value) - .map(Value::BigInt) - .map_err(|err| anyhow::Error::from(err)), + Typed::Bigint(new_value) => BigInt::from_str(new_value).map(Value::BigInt), Typed::String(new_value) => { let mut string = new_value.clone(); @@ -261,13 +259,13 @@ fn decode_value(value: &crate::codec::value::Typed) -> anyhow::Result { Typed::Bytes(new_value) => BASE64_STANDARD .decode(new_value) .map(|bs| Value::Bytes(Bytes::from(bs))) - .map_err(|err| anyhow::Error::from(err)), + .map_err(anyhow::Error::from), Typed::Bool(new_value) => Ok(Value::Bool(*new_value)), Typed::Timestamp(new_value) => Timestamp::from_microseconds_since_epoch(*new_value) .map(Value::Timestamp) - .map_err(|err| anyhow::Error::from(err)), + .map_err(anyhow::Error::from), Typed::Array(arr) => arr .value diff --git a/core/Cargo.toml b/core/Cargo.toml index 07c01a94d05..a3087b4084b 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -35,3 +35,6 @@ tokio-util.workspace = true [dev-dependencies] tower-test = { git = "https://github.com/tower-rs/tower.git" } wiremock = "0.6.5" + +[lints] +workspace = true diff --git a/core/graphman/src/commands/deployment/info.rs b/core/graphman/src/commands/deployment/info.rs index 7cf0e87c758..55e00e917ca 100644 --- a/core/graphman/src/commands/deployment/info.rs +++ b/core/graphman/src/commands/deployment/info.rs @@ -33,7 +33,7 @@ pub async fn load_deployments( ) -> Result, GraphmanError> { let mut primary_conn = primary_pool.get().await?; - crate::deployment::load_deployments(&mut primary_conn, &deployment, &version).await + crate::deployment::load_deployments(&mut primary_conn, deployment, version).await } pub async fn load_deployment_statuses( @@ -56,7 +56,7 @@ pub async fn load_deployment_statuses( let chain = status .chains - .get(0) + .first() .ok_or_else(|| { GraphmanError::Store(anyhow!( "deployment status has no chains on deployment '{id}'" diff --git a/core/graphman/src/commands/deployment/reassign.rs b/core/graphman/src/commands/deployment/reassign.rs index 5d8e282f306..b1ead37cd12 100644 --- a/core/graphman/src/commands/deployment/reassign.rs +++ b/core/graphman/src/commands/deployment/reassign.rs @@ -100,17 +100,17 @@ pub async fn reassign_deployment( let mut catalog_conn = catalog::Connection::new(primary_conn); let changes: Vec = match &curr_node { Some(curr) => { - if &curr == &node { + if curr == node { vec![] } else { catalog_conn - .reassign_subgraph(&deployment.site, &node) + .reassign_subgraph(&deployment.site, node) .await .map_err(GraphmanError::from)? } } None => catalog_conn - .assign_subgraph(&deployment.site, &node) + .assign_subgraph(&deployment.site, node) .await .map_err(GraphmanError::from)?, }; @@ -129,7 +129,7 @@ pub async fn reassign_deployment( let mirror = catalog::Mirror::primary_only(primary_pool); let count = mirror - .assignments(&node) + .assignments(node) .await .map_err(GraphmanError::from)? .len(); diff --git a/core/src/amp_subgraph/runner/data_processing.rs b/core/src/amp_subgraph/runner/data_processing.rs index 83b113922e1..8c403de2b7f 100644 --- a/core/src/amp_subgraph/runner/data_processing.rs +++ b/core/src/amp_subgraph/runner/data_processing.rs @@ -258,7 +258,7 @@ fn decode_block_timestamp(record_batches: &[StreamRecordBatch]) -> Result { return decoder .decode(0) - .map_err(|e| Error::Deterministic(e))? + .map_err(Error::Deterministic)? .ok_or_else(|| Error::Deterministic(anyhow!("block timestamp is empty"))); } Err(e) => { diff --git a/core/src/amp_subgraph/runner/data_stream.rs b/core/src/amp_subgraph/runner/data_stream.rs index 7f3636a5af9..ad6d6d471f8 100644 --- a/core/src/amp_subgraph/runner/data_stream.rs +++ b/core/src/amp_subgraph/runner/data_stream.rs @@ -43,7 +43,7 @@ where ); loop { - let next_block_ranges = next_block_ranges(&cx, latest_queried_block, latest_block); + let next_block_ranges = next_block_ranges(cx, latest_queried_block, latest_block); if next_block_ranges.is_empty() { if data_streams.is_empty() { diff --git a/core/src/amp_subgraph/runner/latest_blocks.rs b/core/src/amp_subgraph/runner/latest_blocks.rs index 559aef963cd..cb62f2e3a42 100644 --- a/core/src/amp_subgraph/runner/latest_blocks.rs +++ b/core/src/amp_subgraph/runner/latest_blocks.rs @@ -31,7 +31,7 @@ impl LatestBlocks { .data_sources .iter() .enumerate() - .map(|(i, data_source)| { + .flat_map(|(i, data_source)| { data_source .source .tables @@ -39,10 +39,9 @@ impl LatestBlocks { .enumerate() .map(move |(j, table)| ((i, j), &data_source.source.dataset, table)) }) - .flatten() .unique_by(|(_, dataset, table)| (dataset.to_string(), table.to_string())) .map(|(table_ptr, dataset, table)| { - latest_block(&cx, dataset, table) + latest_block(cx, dataset, table) .map_ok(move |latest_block| (table_ptr, latest_block)) .map_err(move |e| { e.context(format!( @@ -100,7 +99,7 @@ impl LatestBlocks { let dataset = &source.dataset; let table = &source.tables[j]; - latest_block_changed(&cx, dataset, table, latest_block).map_err(move |e| { + latest_block_changed(cx, dataset, table, latest_block).map_err(move |e| { e.context(format!( "failed to check if the latest block changed in '{dataset}.{table}'" )) @@ -136,9 +135,9 @@ where let record_batch = read_once(stream).await?; let latest_block = block_number_decoder(&record_batch, 0) - .map_err(|e| Error::Deterministic(e))? + .map_err(Error::Deterministic)? .decode(0) - .map_err(|e| Error::Deterministic(e))? + .map_err(Error::Deterministic)? .ok_or_else(|| Error::NonDeterministic(anyhow!("table is empty")))?; Ok(latest_block) diff --git a/core/src/amp_subgraph/runner/reorg_handler.rs b/core/src/amp_subgraph/runner/reorg_handler.rs index 911c4ebf818..03130b19625 100644 --- a/core/src/amp_subgraph/runner/reorg_handler.rs +++ b/core/src/amp_subgraph/runner/reorg_handler.rs @@ -103,7 +103,7 @@ where let table = &data_source.source.tables[*j]; detect_reorg( - &cx, + cx, network, dataset, table, diff --git a/core/src/subgraph/context/instance/mod.rs b/core/src/subgraph/context/instance/mod.rs index 0d14ae8d758..fa6e83fef47 100644 --- a/core/src/subgraph/context/instance/mod.rs +++ b/core/src/subgraph/context/instance/mod.rs @@ -241,7 +241,7 @@ where .matches_by_address(trigger.address_match()), TriggerData::Offchain(trigger) => self .offchain_hosts - .matches_by_address(trigger.source.address().as_ref().map(|a| a.as_slice())), + .matches_by_address(trigger.source.address().as_deref()), TriggerData::Subgraph(trigger) => self .subgraph_hosts .matches_by_address(Some(trigger.source.to_bytes().as_slice())), diff --git a/core/src/subgraph/error.rs b/core/src/subgraph/error.rs index c50712c08db..502a28dbc66 100644 --- a/core/src/subgraph/error.rs +++ b/core/src/subgraph/error.rs @@ -50,7 +50,7 @@ impl ProcessingError { /// call the method `detail` to avoid ambiguity with anyhow's `context` /// method pub trait DetailHelper { - fn detail(self: Self, ctx: &str) -> Result; + fn detail(self, ctx: &str) -> Result; } impl DetailHelper for Result { @@ -61,12 +61,12 @@ impl DetailHelper for Result { /// Implement this for errors that are always non-deterministic. pub(crate) trait NonDeterministicErrorHelper { - fn non_deterministic(self: Self) -> Result; + fn non_deterministic(self) -> Result; } impl NonDeterministicErrorHelper for Result { fn non_deterministic(self) -> Result { - self.map_err(|e| ProcessingError::Unknown(e)) + self.map_err(ProcessingError::Unknown) } } @@ -79,7 +79,7 @@ impl NonDeterministicErrorHelper for Result { /// Implement this for errors where it depends on the details whether they /// are deterministic or not. pub(crate) trait ClassifyErrorHelper { - fn classify(self: Self) -> Result; + fn classify(self) -> Result; } impl ClassifyErrorHelper for Result { @@ -88,12 +88,10 @@ impl ClassifyErrorHelper for Result { if ENV_VARS.mappings.store_errors_are_nondeterministic { // Old behavior, just in case the new behavior causes issues ProcessingError::Unknown(Error::from(e)) + } else if e.is_deterministic() { + ProcessingError::Deterministic(Box::new(e)) } else { - if e.is_deterministic() { - ProcessingError::Deterministic(Box::new(e)) - } else { - ProcessingError::Unknown(Error::from(e)) - } + ProcessingError::Unknown(Error::from(e)) } }) } diff --git a/core/src/subgraph/inputs.rs b/core/src/subgraph/inputs.rs index 91bbdd131f4..88e89de4ff4 100644 --- a/core/src/subgraph/inputs.rs +++ b/core/src/subgraph/inputs.rs @@ -61,8 +61,8 @@ impl IndexingInputs { start_blocks: start_blocks.clone(), end_blocks: end_blocks.clone(), source_subgraph_stores: source_subgraph_stores.clone(), - stop_block: stop_block.clone(), - max_end_block: max_end_block.clone(), + stop_block: *stop_block, + max_end_block: *max_end_block, store, debug_fork: debug_fork.clone(), triggers_adapter: triggers_adapter.clone(), diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index 7706410a33b..0b7b0ee59db 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -232,7 +232,7 @@ impl SubgraphInstanceManager { .await? .ok_or_else(|| anyhow!("no active deployment for hash {}", hash))?; - let sourceable_store = subgraph_store.clone().sourceable(loc.id.clone()).await?; + let sourceable_store = subgraph_store.clone().sourceable(loc.id).await?; sourceable_stores.push(sourceable_store); } @@ -407,11 +407,7 @@ impl SubgraphInstanceManager { let end_blocks: BTreeSet = manifest .data_sources .iter() - .filter_map(|d| { - d.as_onchain() - .map(|d: &C::DataSource| d.end_block()) - .flatten() - }) + .filter_map(|d| d.as_onchain().and_then(|d: &C::DataSource| d.end_block())) .collect(); // We can set `max_end_block` to the maximum of `end_blocks` and stop the subgraph diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index 234d43a35ae..e80cb23ee43 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -150,7 +150,7 @@ where let logger = self.logger.new(o!("subgraph_id" => deployment.hash.to_string(), "node_id" => self.node_id.to_string())); if let Some((assigned, is_paused)) = assigned { - if &assigned == &self.node_id { + if assigned == self.node_id { if is_paused { // Subgraph is paused, so we don't start it debug!(logger, "Deployment assignee is this node"; "assigned_to" => assigned, "paused" => is_paused, "action" => "ignore"); @@ -432,7 +432,7 @@ async fn resolve_start_block( 0 => Ok(None), min_start_block => Retry::spawn(retry_strategy(Some(2), RETRY_DEFAULT_LIMIT), move || { chain - .block_pointer_from_number(&logger, min_start_block - 1) + .block_pointer_from_number(logger, min_start_block - 1) .inspect_err(move |e| warn!(&logger, "Failed to get block number: {}", e)) }) .await diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index cca0e59e22b..7e95530942f 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -145,7 +145,7 @@ where // Filter out data sources that have reached their end block let end_block_filter = |ds: &&C::DataSource| match current_ptr.as_ref() { // We filter out datasources for which the current block is at or past their end block. - Some(block) => ds.end_block().map_or(true, |end| block.number < end), + Some(block) => ds.end_block().is_none_or(|end| block.number < end), // If there is no current block, we keep all datasources. None => true, }; @@ -309,11 +309,10 @@ where // This will require some code refactor in how the BlockStream is created let block_start = Instant::now(); - let action = self.handle_stream_event(event).await.map(|res| { + let action = self.handle_stream_event(event).await.inspect(|res| { self.metrics .subgraph .observe_block_processed(block_start.elapsed(), res.block_finished()); - res })?; self.update_deployment_synced_metric(); @@ -514,7 +513,7 @@ where .observe(elapsed); block_state_metrics - .flush_metrics_to_store(&logger, block_ptr, self.inputs.deployment.id) + .flush_metrics_to_store(logger, block_ptr, self.inputs.deployment.id) .non_deterministic()?; if has_errors { @@ -538,7 +537,7 @@ where // Use `Canceled` to avoiding setting the subgraph health to failed, an error was // just transacted so it will be already be set to unhealthy. - Err(ProcessingError::Canceled.into()) + Err(ProcessingError::Canceled) } else { Ok(()) } @@ -562,8 +561,8 @@ where self.ctx .decoder .match_and_decode_many( - &logger, - &block, + logger, + block, triggers, hosts_filter, &self.metrics.subgraph, @@ -1028,11 +1027,11 @@ where } } - return Ok(action); + Ok(action) } Err(ProcessingError::Canceled) => { debug!(self.logger, "Subgraph block stream shut down cleanly"); - return Ok(Action::Stop); + Ok(Action::Stop) } // Handle unexpected stream errors by marking the subgraph as failed. @@ -1069,7 +1068,7 @@ where .await .context("Failed to set subgraph status to `failed`")?; - return Err(err); + Err(err) } false => { // Shouldn't fail subgraph if it's already failed for non-deterministic @@ -1104,7 +1103,7 @@ where self.state.should_try_unfail_non_deterministic = true; // And restart the subgraph. - return Ok(Action::Restart); + Ok(Action::Restart) } } } @@ -1146,12 +1145,12 @@ where // it so that we are up to date when checking if synced. let cached_head_ptr = self.state.cached_head_ptr.cheap_clone(); if cached_head_ptr.is_none() - || close_to_chain_head(&block_ptr, &cached_head_ptr, CAUGHT_UP_DISTANCE) + || close_to_chain_head(block_ptr, &cached_head_ptr, CAUGHT_UP_DISTANCE) { self.state.cached_head_ptr = self.inputs.chain.chain_head_ptr().await?; } let is_caught_up = - close_to_chain_head(&block_ptr, &self.state.cached_head_ptr, CAUGHT_UP_DISTANCE); + close_to_chain_head(block_ptr, &self.state.cached_head_ptr, CAUGHT_UP_DISTANCE); if is_caught_up { // Stop recording time-to-sync metrics. self.metrics.stream.stopwatch.disable(); @@ -1288,7 +1287,7 @@ where // This propagates any deterministic error as a non-deterministic one. Which might make // sense considering offchain data sources are non-deterministic. if let Some(err) = block_state.deterministic_errors.into_iter().next() { - return Err(anyhow!("{}", err.to_string())); + return Err(anyhow!("{}", err)); } mods.extend( @@ -1377,7 +1376,7 @@ where Ok(block_state) => block_state, // Some form of unknown or non-deterministic error ocurred. - Err(MappingError::Unknown(e)) => return Err(ProcessingError::Unknown(e).into()), + Err(MappingError::Unknown(e)) => return Err(ProcessingError::Unknown(e)), Err(MappingError::PossibleReorg(e)) => { info!(logger, "Possible reorg detected, retrying"; @@ -1591,7 +1590,7 @@ async fn update_proof_of_indexing( (digest_name, Value::from(digest)), ]; if entity_cache.schema.has_aggregations() { - let block_time = Value::Int8(block_time.as_secs_since_epoch() as i64); + let block_time = Value::Int8(block_time.as_secs_since_epoch()); data.push((entity_cache.schema.poi_block_time(), block_time)); } let poi = entity_cache.make_entity(data)?; diff --git a/core/src/subgraph_provider.rs b/core/src/subgraph_provider.rs index cbfb60a5e11..7e479dd9495 100644 --- a/core/src/subgraph_provider.rs +++ b/core/src/subgraph_provider.rs @@ -230,6 +230,7 @@ enum Error { /// /// Before starting a subgraph, its processing kind is determined from the subgraph manifest. /// Then, the appropriate instance manager is loaded from this mapping. +#[derive(Default)] pub struct SubgraphInstanceManagers( HashMap>, ); @@ -287,8 +288,7 @@ impl SubgraphProcessingKind { .filter_map(Value::as_mapping) .filter_map(|map| map.get("kind")) .filter_map(Value::as_str) - .filter(|kind| *kind == amp::manifest::DataSource::KIND) - .next() + .find(|kind| *kind == amp::manifest::DataSource::KIND) }) .is_some(); diff --git a/gnd/src/main.rs b/gnd/src/main.rs index 3e5936824cd..fc79c707310 100644 --- a/gnd/src/main.rs +++ b/gnd/src/main.rs @@ -166,7 +166,7 @@ async fn run_graph_node( let (prometheus_registry, metrics_registry) = launcher::setup_metrics(logger); - let ipfs_client = graph::ipfs::new_ipfs_client(&opt.ipfs, &metrics_registry, &logger) + let ipfs_client = graph::ipfs::new_ipfs_client(&opt.ipfs, &metrics_registry, logger) .await .unwrap_or_else(|err| panic!("Failed to create IPFS client: {err:#}")); diff --git a/gnd/src/watcher.rs b/gnd/src/watcher.rs index 3171f240128..63e920ddbd8 100644 --- a/gnd/src/watcher.rs +++ b/gnd/src/watcher.rs @@ -231,7 +231,7 @@ async fn process_file_events( logger: &Logger, rx: mpsc::Receiver>, exclusion_set: &GlobSet, - manifests_paths: &Vec, + manifests_paths: &[PathBuf], source_subgraph_aliases: &HashMap, sender: Sender<(DeploymentHash, SubgraphName)>, ) -> Result<()> { @@ -289,13 +289,12 @@ fn is_relevant_event(event: &Event, watched_dirs: Vec, exclusion_set: & /// Redeploys all subgraphs in the order it appears in the manifests_paths pub async fn deploy_all_subgraphs( logger: &Logger, - manifests_paths: &Vec, + manifests_paths: &[PathBuf], source_subgraph_aliases: &HashMap, sender: &Sender<(DeploymentHash, SubgraphName)>, ) -> Result<()> { info!(logger, "File change detected, redeploying all subgraphs"); - let mut count = 0; - for manifest_path in manifests_paths { + for (count, manifest_path) in manifests_paths.iter().enumerate() { let alias_name = source_subgraph_aliases .iter() .find(|(_, path)| path == &manifest_path) @@ -312,7 +311,6 @@ pub async fn deploy_all_subgraphs( .map_err(|_| anyhow!("Failed to create subgraph name"))?, )) .await; - count += 1; } Ok(()) } diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 58cfba024c2..64939877874 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -112,3 +112,6 @@ wiremock = "0.6.5" [build-dependencies] tonic-build = { workspace = true } + +[lints] +workspace = true diff --git a/graph/derive/src/lib.rs b/graph/derive/src/lib.rs index ce13fc9faf9..be7e5364032 100644 --- a/graph/derive/src/lib.rs +++ b/graph/derive/src/lib.rs @@ -33,7 +33,7 @@ fn impl_cheap_clone(input: TokenStream2) -> TokenStream2 { fn cheap_clone_body(data: Data) -> TokenStream2 { match data { Data::Struct(st) => match &st.fields { - Fields::Unit => return quote! { Self }, + Fields::Unit => quote! { Self }, Fields::Unnamed(fields) => { let mut field_clones = Vec::new(); for (num, _) in fields.unnamed.iter().enumerate() { @@ -105,7 +105,7 @@ fn impl_cheap_clone(input: TokenStream2) -> TokenStream2 { let input = match syn::parse2::(input) { Ok(input) => input, Err(e) => { - return e.to_compile_error().into(); + return e.to_compile_error(); } }; let DeriveInput { @@ -275,7 +275,7 @@ pub fn test(args: TokenStream, item: TokenStream) -> TokenStream { if input.sig.asyncness.is_none() { let msg = "the `async` keyword is missing from the function declaration"; - return syn::Error::new_spanned(&input.sig.fn_token, msg) + return syn::Error::new_spanned(input.sig.fn_token, msg) .to_compile_error() .into(); } diff --git a/graph/examples/append_row.rs b/graph/examples/append_row.rs index 59f6fc3a5f2..f272c07cf82 100644 --- a/graph/examples/append_row.rs +++ b/graph/examples/append_row.rs @@ -83,7 +83,7 @@ pub fn main() -> anyhow::Result<()> { let id = &ids[pos]; let data = vec![ (Word::from("id"), Value::String(id.to_string())), - (Word::from("count"), Value::Int(block as i32)), + (Word::from("count"), Value::Int(block)), ]; let data = Arc::new(SCHEMA.make_entity(data).unwrap()); let md = if existing.contains(id) { diff --git a/graph/examples/stress.rs b/graph/examples/stress.rs index 5534f2263b3..7a50df521d4 100644 --- a/graph/examples/stress.rs +++ b/graph/examples/stress.rs @@ -48,13 +48,12 @@ static mut PRINT_SAMPLES: bool = false; /// number of entries divided by `NODE_FILL`, and the number of /// interior nodes can be determined by dividing the number of nodes /// at the child level by `NODE_FILL` - +/// /// The other difficulty is that the structs with which `BTreeMap` /// represents internal and leaf nodes are not public, so we can't /// get their size with `std::mem::size_of`; instead, we base our /// estimates of their size on the current `std` code, assuming that /// these structs will not change - mod btree { use std::mem; use std::{mem::MaybeUninit, ptr::NonNull}; diff --git a/graph/examples/validate.rs b/graph/examples/validate.rs index ed57feb1bec..a5a2159cff4 100644 --- a/graph/examples/validate.rs +++ b/graph/examples/validate.rs @@ -238,8 +238,8 @@ impl Sizer { .map_err(Into::into) })?; let (input_size, input_schema) = - self.size(|| InputSchema::parse_latest(raw, id.clone()).map_err(Into::into))?; - let (api_size, api) = self.size(|| input_schema.api_schema().map_err(Into::into))?; + self.size(|| InputSchema::parse_latest(raw, id.clone()))?; + let (api_size, api) = self.size(|| input_schema.api_schema())?; let api_text = api.document().to_string().len(); Ok(Sizes { gql: gql_size, diff --git a/graph/proto/substreams-rpc.proto b/graph/proto/substreams-rpc.proto index 28298458480..48624615cdb 100644 --- a/graph/proto/substreams-rpc.proto +++ b/graph/proto/substreams-rpc.proto @@ -32,14 +32,14 @@ message Request { // By default, the engine runs in developer mode, with richer and deeper // output. Differences between production and development modes include: // * Forward parallel execution is enabled in production mode and disabled in - // development mode + // development mode // * The time required to reach the first byte in development mode is faster - // than in production mode. + // than in production mode. // // Specific attributes of development mode include: // * The client will receive all of the executed module's logs. // * It's possible to request specific store snapshots in the execution tree - // (via `debug_initial_store_snapshot_for_modules`). + // (via `debug_initial_store_snapshot_for_modules`). // * Multiple module's output is possible. // // With production mode`, however, you trade off functionality for high speed diff --git a/graph/src/amp/codec/array_decoder.rs b/graph/src/amp/codec/array_decoder.rs index e74a777cb12..f7a480f38d0 100644 --- a/graph/src/amp/codec/array_decoder.rs +++ b/graph/src/amp/codec/array_decoder.rs @@ -470,7 +470,7 @@ impl Decoder>> for ArrayDecoder<'_, TimestampNanosecondArra } } -fn downcast_ref<'a, T>(array: &'a dyn Array) -> Result<&'a T> +fn downcast_ref(array: &dyn Array) -> Result<&T> where T: Array + 'static, { diff --git a/graph/src/amp/codec/mapping_decoder.rs b/graph/src/amp/codec/mapping_decoder.rs index b0c85e9d2e6..19a81cb6dcb 100644 --- a/graph/src/amp/codec/mapping_decoder.rs +++ b/graph/src/amp/codec/mapping_decoder.rs @@ -27,6 +27,6 @@ where fn decode(&self, row_index: usize) -> Result { let value = self.decoder.decode(row_index)?; - Ok((&self.mapping)(value)) + Ok((self.mapping)(value)) } } diff --git a/graph/src/amp/codec/test_fixtures.rs b/graph/src/amp/codec/test_fixtures.rs index a55001439b2..a8a6882ff88 100644 --- a/graph/src/amp/codec/test_fixtures.rs +++ b/graph/src/amp/codec/test_fixtures.rs @@ -35,9 +35,8 @@ pub static RECORD_BATCH: LazyLock = LazyLock::new(|| { let columns = record_batches .into_iter() - .map(|record_batch| record_batch.columns()) - .flatten() - .map(|column| column.clone()) + .flat_map(|record_batch| record_batch.columns()) + .cloned() .collect::>(); RecordBatch::try_new(Schema::try_merge(schemas).unwrap().into(), columns).unwrap() diff --git a/graph/src/amp/codec/utils.rs b/graph/src/amp/codec/utils.rs index 4f6ba4ff0b1..9811f661f7b 100644 --- a/graph/src/amp/codec/utils.rs +++ b/graph/src/amp/codec/utils.rs @@ -92,16 +92,16 @@ where } } - return None; + None } -pub fn column_decoder<'a, T: 'static, U>( +pub fn column_decoder<'a, T, U>( record_batch: &'a RecordBatch, column_index: usize, nullable: bool, ) -> Result> + 'a>> where - T: Array, + T: Array + 'static, ArrayDecoder<'a, T>: Decoder>, { if column_index >= record_batch.num_columns() { diff --git a/graph/src/amp/manifest/data_source/raw.rs b/graph/src/amp/manifest/data_source/raw.rs index 15e2ceb9237..3369eebc54d 100644 --- a/graph/src/amp/manifest/data_source/raw.rs +++ b/graph/src/amp/manifest/data_source/raw.rs @@ -382,7 +382,7 @@ impl RawAbi { let file_bytes = link_resolver .cat( - &LinkResolverContext::new(&DeploymentHash::default(), &logger), + &LinkResolverContext::new(&DeploymentHash::default(), logger), &(file.into()), ) .await @@ -559,7 +559,7 @@ impl RawTable { let record_batch = RecordBatch::new_empty(schema.into()); let (block_number_column, _) = - auto_block_number_decoder(&record_batch).map_err(|e| Error::InvalidQuery(e))?; + auto_block_number_decoder(&record_batch).map_err(Error::InvalidQuery)?; let need_block_hash_column = auto_block_hash_decoder(&record_batch).is_err(); let need_block_timestamp_column = input_schema diff --git a/graph/src/amp/schema/generator/entity.rs b/graph/src/amp/schema/generator/entity.rs index 7e3fa5b8f6c..88745bce51b 100644 --- a/graph/src/amp/schema/generator/entity.rs +++ b/graph/src/amp/schema/generator/entity.rs @@ -45,9 +45,9 @@ impl SchemaEntity { impl fmt::Display for SchemaEntity { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write! {f, "type {} @entity(immutable: true)", self.name.to_pascal_case()}?; - write! {f, " {{\n"}?; + writeln! {f, " {{"}?; for field in &self.fields { - write! {f, "\t{field}\n"}?; + writeln! {f, "\t{field}"}?; } write! {f, "}}"} } diff --git a/graph/src/amp/sql/query_builder/block_range_query.rs b/graph/src/amp/sql/query_builder/block_range_query.rs index 6a89eb02ce7..dde44d803ad 100644 --- a/graph/src/amp/sql/query_builder/block_range_query.rs +++ b/graph/src/amp/sql/query_builder/block_range_query.rs @@ -17,7 +17,7 @@ use super::{extract_tables, parse_query, TableReference}; /// All the table references in the original SQL query are replaced with the created CTE names. /// /// The output is ordered by block numbers. -pub(super) fn new_block_range_query<'a>( +pub(super) fn new_block_range_query( query: &ast::Query, block_number_column: &str, block_range: &RangeInclusive, diff --git a/graph/src/amp/sql/query_builder/event_signature_resolver.rs b/graph/src/amp/sql/query_builder/event_signature_resolver.rs index 89ab8a31a51..ac37068ea3d 100644 --- a/graph/src/amp/sql/query_builder/event_signature_resolver.rs +++ b/graph/src/amp/sql/query_builder/event_signature_resolver.rs @@ -56,7 +56,7 @@ fn visit_expr(expr: &mut ast::Expr, abis: &[(&str, &JsonAbi)]) -> Result<()> { Ok(()) } -fn get_args<'a>(function: &'a ast::Function) -> Option<(&'a str, &'a str)> { +fn get_args(function: &ast::Function) -> Option<(&str, &str)> { let ast::FunctionArguments::List(args) = &function.args else { return None; }; @@ -71,7 +71,7 @@ fn get_args<'a>(function: &'a ast::Function) -> Option<(&'a str, &'a str)> { } } -fn get_arg<'a>(arg: &'a ast::FunctionArg) -> Option<&'a str> { +fn get_arg(arg: &ast::FunctionArg) -> Option<&str> { let ast::FunctionArg::Unnamed(ast::FunctionArgExpr::Expr(expr)) = arg else { return None; }; @@ -92,10 +92,8 @@ fn get_event<'a>( ) -> Option<&'a alloy::json_abi::Event> { abis.iter() .filter(|(name, _)| *name == contract_name) - .map(|(_, contract)| contract.event(event_name)) - .flatten() - .map(|events| events.first()) - .flatten() + .filter_map(|(_, contract)| contract.event(event_name)) + .filter_map(|events| events.first()) .next() } diff --git a/graph/src/amp/stream_aggregator/record_batch/aggregator.rs b/graph/src/amp/stream_aggregator/record_batch/aggregator.rs index f513a2752ed..f2c9cff13e0 100644 --- a/graph/src/amp/stream_aggregator/record_batch/aggregator.rs +++ b/graph/src/amp/stream_aggregator/record_batch/aggregator.rs @@ -107,7 +107,7 @@ impl Aggregator { return iter.next(); } - iter.skip(1).next() + iter.nth(1) } /// Returns `true` if this aggregator contains completed groups. @@ -220,8 +220,8 @@ impl Aggregator { if block_number == max_block_number && block_hash != max_block_hash { bail!( "received block hash '0x{}' after '0x{}' for block number {block_number}", - hex::encode(&block_hash), - hex::encode(&max_block_hash) + hex::encode(block_hash), + hex::encode(max_block_hash) ); } diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index e3568345803..01e4444aaf9 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -583,8 +583,8 @@ impl TriggersAdapterWrapper { let ptrs = futures03::future::try_join_all( self.source_subgraph_stores - .iter() - .map(|(_, store)| store.block_ptr()), + .values() + .map(|store| store.block_ptr()), ) .await?; @@ -765,7 +765,7 @@ pub trait BlockStreamMapper: Send + Sync { let cursor = FirehoseCursor::from(cursor); let event = self - .handle_substreams_block(&logger, clock, cursor, value) + .handle_substreams_block(logger, clock, cursor, value) .await?; Ok(Some(event)) diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index 4ec6e17c83f..90e985ff44d 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -64,10 +64,10 @@ impl FirehoseBlockStreamMetrics { fn observe_successful_connection(&self, time: &mut Instant, provider: &str) { self.restarts - .with_label_values(&[self.deployment.as_str(), &provider, "true"]) + .with_label_values(&[self.deployment.as_str(), provider, "true"]) .inc(); self.connect_duration - .with_label_values(&[self.deployment.as_str(), &provider]) + .with_label_values(&[self.deployment.as_str(), provider]) .set(time.elapsed().as_secs_f64()); // Reset last connection timestamp @@ -76,10 +76,10 @@ impl FirehoseBlockStreamMetrics { fn observe_failed_connection(&self, time: &mut Instant, provider: &str) { self.restarts - .with_label_values(&[self.deployment.as_str(), &provider, "false"]) + .with_label_values(&[self.deployment.as_str(), provider, "false"]) .inc(); self.connect_duration - .with_label_values(&[self.deployment.as_str(), &provider]) + .with_label_values(&[self.deployment.as_str(), provider]) .set(time.elapsed().as_secs_f64()); // Reset last connection timestamp @@ -88,10 +88,10 @@ impl FirehoseBlockStreamMetrics { fn observe_response(&self, kind: &str, time: &mut Instant, provider: &str) { self.time_between_responses - .with_label_values(&[self.deployment.as_str(), &provider]) + .with_label_values(&[self.deployment.as_str(), provider]) .observe(time.elapsed().as_secs_f64()); self.responses - .with_label_values(&[self.deployment.as_str(), &provider, kind]) + .with_label_values(&[self.deployment.as_str(), provider, kind]) .inc(); // Reset last response timestamp @@ -366,10 +366,12 @@ async fn process_firehose_response>( let previous_block_ptr = block.parent_ptr(); if previous_block_ptr.is_some() && previous_block_ptr.as_ref() != subgraph_current_block { + #[allow(clippy::unnecessary_unwrap)] + let firehose_start_block = previous_block_ptr.unwrap(); warn!(&logger, "Firehose selected first streamed block's parent should match subgraph start block, reverting to last know final chain segment"; "subgraph_current_block" => &subgraph_current_block.unwrap(), - "firehose_start_block" => &previous_block_ptr.unwrap(), + "firehose_start_block" => &firehose_start_block, ); let mut revert_to = mapper @@ -463,48 +465,64 @@ mod tests { // Nothing - assert_eq!( - must_check_subgraph_continuity(&logger, &no_current_block, &no_cursor, 10), - false, - ); + assert!(!must_check_subgraph_continuity( + &logger, + &no_current_block, + &no_cursor, + 10 + ),); // No cursor, subgraph current block ptr <, ==, > than manifest start block num - assert_eq!( - must_check_subgraph_continuity(&logger, &some_current_block(9), &no_cursor, 10), - false, - ); - - assert_eq!( - must_check_subgraph_continuity(&logger, &some_current_block(10), &no_cursor, 10), - true, - ); - - assert_eq!( - must_check_subgraph_continuity(&logger, &some_current_block(11), &no_cursor, 10), - true, - ); + assert!(!must_check_subgraph_continuity( + &logger, + &some_current_block(9), + &no_cursor, + 10 + ),); + + assert!(must_check_subgraph_continuity( + &logger, + &some_current_block(10), + &no_cursor, + 10 + ),); + + assert!(must_check_subgraph_continuity( + &logger, + &some_current_block(11), + &no_cursor, + 10 + ),); // Some cursor, subgraph current block ptr <, ==, > than manifest start block num - assert_eq!( - must_check_subgraph_continuity(&logger, &no_current_block, &some_cursor, 10), - false, - ); - - assert_eq!( - must_check_subgraph_continuity(&logger, &some_current_block(9), &some_cursor, 10), - false, - ); - - assert_eq!( - must_check_subgraph_continuity(&logger, &some_current_block(10), &some_cursor, 10), - false, - ); - - assert_eq!( - must_check_subgraph_continuity(&logger, &some_current_block(11), &some_cursor, 10), - false, - ); + assert!(!must_check_subgraph_continuity( + &logger, + &no_current_block, + &some_cursor, + 10 + ),); + + assert!(!must_check_subgraph_continuity( + &logger, + &some_current_block(9), + &some_cursor, + 10 + ),); + + assert!(!must_check_subgraph_continuity( + &logger, + &some_current_block(10), + &some_cursor, + 10 + ),); + + assert!(!must_check_subgraph_continuity( + &logger, + &some_current_block(11), + &some_cursor, + 10 + ),); } } diff --git a/graph/src/blockchain/mock.rs b/graph/src/blockchain/mock.rs index 577b5fbc816..a4a411d8e5c 100644 --- a/graph/src/blockchain/mock.rs +++ b/graph/src/blockchain/mock.rs @@ -203,8 +203,8 @@ impl UnresolvedDataSource for MockUnresolvedDataSource { #[derive(Debug, Clone)] pub struct MockDataSourceTemplate; -impl Into for MockDataSourceTemplate { - fn into(self) -> DataSourceTemplateInfo { +impl From for DataSourceTemplateInfo { + fn from(_val: MockDataSourceTemplate) -> Self { todo!() } } diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index 5066f38ac54..09bb9a1a4be 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -361,8 +361,7 @@ pub trait DataSource: 'static + Sized + Send + Sync + Clone { fn validate(&self, spec_version: &semver::Version) -> Vec; fn has_expired(&self, block: BlockNumber) -> bool { - self.end_block() - .map_or(false, |end_block| block > end_block) + self.end_block().is_some_and(|end_block| block > end_block) } fn has_declared_calls(&self) -> bool { @@ -609,11 +608,11 @@ impl BlockchainKind { // // Split by `/` to, for example, read 'ethereum' in 'ethereum/contracts'. manifest - .get(&Value::String("dataSources".to_owned())) + .get(Value::String("dataSources".to_owned())) .and_then(|ds| ds.as_sequence()) .and_then(|ds| ds.first()) .and_then(|ds| ds.as_mapping()) - .and_then(|ds| ds.get(&Value::String("kind".to_owned()))) + .and_then(|ds| ds.get(Value::String("kind".to_owned()))) .and_then(|kind| kind.as_str()) .and_then(|kind| kind.split('/').next()) .context("invalid manifest") diff --git a/graph/src/blockchain/substreams_block_stream.rs b/graph/src/blockchain/substreams_block_stream.rs index c359ec1a504..6f5a49b12b9 100644 --- a/graph/src/blockchain/substreams_block_stream.rs +++ b/graph/src/blockchain/substreams_block_stream.rs @@ -65,10 +65,10 @@ impl SubstreamsBlockStreamMetrics { fn observe_successful_connection(&self, time: &mut Instant, provider: &str) { self.restarts - .with_label_values(&[self.deployment.as_str(), &provider, "true"]) + .with_label_values(&[self.deployment.as_str(), provider, "true"]) .inc(); self.connect_duration - .with_label_values(&[self.deployment.as_str(), &provider]) + .with_label_values(&[self.deployment.as_str(), provider]) .set(time.elapsed().as_secs_f64()); // Reset last connection timestamp @@ -77,10 +77,10 @@ impl SubstreamsBlockStreamMetrics { fn observe_failed_connection(&self, time: &mut Instant, provider: &str) { self.restarts - .with_label_values(&[self.deployment.as_str(), &provider, "false"]) + .with_label_values(&[self.deployment.as_str(), provider, "false"]) .inc(); self.connect_duration - .with_label_values(&[self.deployment.as_str(), &provider]) + .with_label_values(&[self.deployment.as_str(), provider]) .set(time.elapsed().as_secs_f64()); // Reset last connection timestamp @@ -89,10 +89,10 @@ impl SubstreamsBlockStreamMetrics { fn observe_response(&self, kind: &str, time: &mut Instant, provider: &str) { self.time_between_responses - .with_label_values(&[self.deployment.as_str(), &provider]) + .with_label_values(&[self.deployment.as_str(), provider]) .observe(time.elapsed().as_secs_f64()); self.responses - .with_label_values(&[self.deployment.as_str(), &provider, kind]) + .with_label_values(&[self.deployment.as_str(), provider, kind]) .inc(); // Reset last response timestamp @@ -320,8 +320,7 @@ async fn process_substreams_response>( match mapper .to_block_stream_event(logger, response.message, log_data) - .await - .map_err(BlockStreamError::from)? + .await? { Some(event) => { let cursor = match &event { @@ -331,7 +330,7 @@ async fn process_substreams_response>( } .to_string(); - return Ok(Some(BlockResponse::Proceed(event, cursor))); + Ok(Some(BlockResponse::Proceed(event, cursor))) } None => Ok(None), // some progress responses are ignored within to_block_stream_event } @@ -378,7 +377,7 @@ impl SubstreamsLogData { let mut stages_str = "".to_string(); for i in (0..len).rev() { let stage = &progress.stages[i]; - let range = if stage.completed_ranges.len() > 0 { + let range = if !stage.completed_ranges.is_empty() { let b = stage.completed_ranges.iter().map(|x| x.end_block).min(); format!(" up to {}", b.unwrap_or(0)) } else { diff --git a/graph/src/blockchain/types.rs b/graph/src/blockchain/types.rs index c64da4f4f7a..34b44f2723e 100644 --- a/graph/src/blockchain/types.rs +++ b/graph/src/blockchain/types.rs @@ -642,7 +642,7 @@ impl ToSql for BlockTime { impl FromSql for BlockTime { fn from_sql(bytes: diesel::pg::PgValue) -> diesel::deserialize::Result { - >::from_sql(bytes).map(|ts| Self(ts)) + >::from_sql(bytes).map(Self) } } diff --git a/graph/src/cheap_clone.rs b/graph/src/cheap_clone.rs index fc9c98ab7d1..adcb823c303 100644 --- a/graph/src/cheap_clone.rs +++ b/graph/src/cheap_clone.rs @@ -39,14 +39,14 @@ impl CheapClone for Arc { } } -impl CheapClone for Box { +impl CheapClone for Box { #[inline] fn cheap_clone(&self) -> Self { self.clone() } } -impl CheapClone for std::pin::Pin { +impl CheapClone for std::pin::Pin { #[inline] fn cheap_clone(&self) -> Self { self.clone() diff --git a/graph/src/components/link_resolver/file.rs b/graph/src/components/link_resolver/file.rs index 14b7438642e..593dccc641b 100644 --- a/graph/src/components/link_resolver/file.rs +++ b/graph/src/components/link_resolver/file.rs @@ -33,7 +33,7 @@ impl FileLinkResolver { /// All paths are treated as absolute paths. pub fn new(base_dir: Option, aliases: HashMap) -> Self { Self { - base_dir: base_dir, + base_dir, timeout: Duration::from_secs(30), aliases, } @@ -76,25 +76,24 @@ impl FileLinkResolver { // Create a path to the manifest based on the current resolver's // base directory or default to using the deployment string as path // If the deployment string is an alias, use the aliased path - let manifest_path = if let Some(aliased) = self.aliases.get(&manifest_path_str.to_string()) - { + let manifest_path = if let Some(aliased) = self.aliases.get(manifest_path_str) { aliased.clone() } else { match &resolver.base_dir { - Some(dir) => dir.join(&manifest_path_str), + Some(dir) => dir.join(manifest_path_str), None => PathBuf::from(manifest_path_str), } }; let canonical_manifest_path = manifest_path .canonicalize() - .map_err(|e| Error::from(anyhow!("Failed to canonicalize manifest path: {}", e)))?; + .map_err(|e| anyhow!("Failed to canonicalize manifest path: {}", e))?; // The manifest path is the path of the subgraph manifest file in the build directory // We use the parent directory as the base directory for the new resolver let base_dir = canonical_manifest_path .parent() - .ok_or_else(|| Error::from(anyhow!("Manifest path has no parent directory")))? + .ok_or_else(|| anyhow!("Manifest path has no parent directory"))? .to_path_buf(); resolver.base_dir = Some(base_dir); @@ -104,11 +103,7 @@ impl FileLinkResolver { pub fn remove_prefix(link: &str) -> &str { const IPFS: &str = "/ipfs/"; - if link.starts_with(IPFS) { - &link[IPFS.len()..] - } else { - link - } + link.strip_prefix(IPFS).unwrap_or(link) } #[async_trait] @@ -125,7 +120,7 @@ impl LinkResolverTrait for FileLinkResolver { async fn cat(&self, ctx: &LinkResolverContext, link: &Link) -> Result, Error> { let link = remove_prefix(&link.link); - let path = self.resolve_path(&link); + let path = self.resolve_path(link); slog::debug!(ctx.logger, "File resolver: reading file"; "path" => path.to_string_lossy().to_string()); @@ -136,7 +131,7 @@ impl LinkResolverTrait for FileLinkResolver { slog::error!(ctx.logger, "Failed to read file"; "path" => path.to_string_lossy().to_string(), "error" => e.to_string()); - Err(anyhow!("Failed to read file {}: {}", path.display(), e).into()) + Err(anyhow!("Failed to read file {}: {}", path.display(), e)) } } } @@ -146,7 +141,7 @@ impl LinkResolverTrait for FileLinkResolver { } async fn get_block(&self, _ctx: &LinkResolverContext, _link: &Link) -> Result, Error> { - Err(anyhow!("get_block is not implemented for FileLinkResolver").into()) + Err(anyhow!("get_block is not implemented for FileLinkResolver")) } async fn json_stream( @@ -154,7 +149,9 @@ impl LinkResolverTrait for FileLinkResolver { _ctx: &LinkResolverContext, _link: &Link, ) -> Result { - Err(anyhow!("json_stream is not implemented for FileLinkResolver").into()) + Err(anyhow!( + "json_stream is not implemented for FileLinkResolver" + )) } } diff --git a/graph/src/components/link_resolver/ipfs.rs b/graph/src/components/link_resolver/ipfs.rs index 59a9f8027d7..37e22c12994 100644 --- a/graph/src/components/link_resolver/ipfs.rs +++ b/graph/src/components/link_resolver/ipfs.rs @@ -224,7 +224,7 @@ impl LinkResolver for IpfsResolver { // run through the loop. match try_ready!(stream.poll().map_err(|e| anyhow::anyhow!("{}", e))) { Some(b) => buf.extend_from_slice(&b), - None if !buf.is_empty() => buf.extend_from_slice(&[b'\n']), + None if !buf.is_empty() => buf.extend_from_slice(b"\n"), None => return Ok(Async::Ready(None)), } } diff --git a/graph/src/components/metrics/block_state.rs b/graph/src/components/metrics/block_state.rs index 87984d46647..2a6ffb3fc64 100644 --- a/graph/src/components/metrics/block_state.rs +++ b/graph/src/components/metrics/block_state.rs @@ -37,6 +37,12 @@ impl From<&str> for CounterKey { } } +impl Default for BlockStateMetrics { + fn default() -> Self { + Self::new() + } +} + impl BlockStateMetrics { pub fn new() -> Self { BlockStateMetrics { @@ -101,7 +107,7 @@ impl BlockStateMetrics { let data_bytes = data.into_bytes(); let bucket = - Url::parse(&bucket).map_err(|e| anyhow!("Failed to parse bucket url: {}", e))?; + Url::parse(bucket).map_err(|e| anyhow!("Failed to parse bucket url: {}", e))?; let store = GoogleCloudStorageBuilder::from_env() .with_url(bucket) .build()?; diff --git a/graph/src/components/metrics/registry.rs b/graph/src/components/metrics/registry.rs index b41f27bc785..cb210040952 100644 --- a/graph/src/components/metrics/registry.rs +++ b/graph/src/components/metrics/registry.rs @@ -395,7 +395,7 @@ impl MetricsRegistry { variable_labels: &[&str], ) -> Result, PrometheusError> { let opts = Opts::new(name, help); - let counters = Box::new(IntCounterVec::new(opts, &variable_labels)?); + let counters = Box::new(IntCounterVec::new(opts, variable_labels)?); self.register(name, counters.clone()); Ok(counters) } diff --git a/graph/src/components/metrics/stopwatch.rs b/graph/src/components/metrics/stopwatch.rs index a9236c5d10a..f9eb5ff78dc 100644 --- a/graph/src/components/metrics/stopwatch.rs +++ b/graph/src/components/metrics/stopwatch.rs @@ -229,6 +229,7 @@ impl StopwatchInner { .write(true) .append(false) .create(true) + .truncate(true) .open(section_map) .expect("can open file"); serde_json::to_writer(&file, &entries).expect("can write json"); diff --git a/graph/src/components/network_provider/chain_identifier_validator.rs b/graph/src/components/network_provider/chain_identifier_validator.rs index d64eb0a401d..c275e4e31bc 100644 --- a/graph/src/components/network_provider/chain_identifier_validator.rs +++ b/graph/src/components/network_provider/chain_identifier_validator.rs @@ -78,7 +78,7 @@ impl ChainIdentifierValidator for ChainIdentifierStore { .store .chain_identifier(chain_name) .await - .map_err(|err| ChainIdentifierValidationError::Store(err))?; + .map_err(ChainIdentifierValidationError::Store)?; if store_identifier.is_default() { return Err(ChainIdentifierValidationError::IdentifierNotSet( @@ -120,6 +120,6 @@ impl ChainIdentifierValidator for ChainIdentifierStore { self.store .set_chain_identifier(chain_name, chain_identifier) .await - .map_err(|err| ChainIdentifierValidationError::Store(err)) + .map_err(ChainIdentifierValidationError::Store) } } diff --git a/graph/src/components/network_provider/genesis_hash_check.rs b/graph/src/components/network_provider/genesis_hash_check.rs index 26c8f91bab1..b92d4602315 100644 --- a/graph/src/components/network_provider/genesis_hash_check.rs +++ b/graph/src/components/network_provider/genesis_hash_check.rs @@ -185,8 +185,8 @@ mod tests { update_identifier_calls, } = self; - assert!(validate_identifier_calls.lock().unwrap().is_empty()); - assert!(update_identifier_calls.lock().unwrap().is_empty()); + assert!(validate_identifier_calls.get_mut().unwrap().is_empty()); + assert!(update_identifier_calls.get_mut().unwrap().is_empty()); } } @@ -226,7 +226,7 @@ mod tests { chain_identifier_calls, } = self; - assert!(chain_identifier_calls.lock().unwrap().is_empty()); + assert!(chain_identifier_calls.get_mut().unwrap().is_empty()); } } diff --git a/graph/src/components/network_provider/provider_manager.rs b/graph/src/components/network_provider/provider_manager.rs index 510b053b6ff..54454df40f6 100644 --- a/graph/src/components/network_provider/provider_manager.rs +++ b/graph/src/components/network_provider/provider_manager.rs @@ -126,7 +126,7 @@ impl ProviderManager { }; let mut validations: Vec = Vec::new(); - let adapters = Self::adapters_by_chain_names(adapters, &mut validations, &enabled_checks); + let adapters = Self::adapters_by_chain_names(adapters, &mut validations, enabled_checks); let inner = Inner { logger, @@ -428,7 +428,7 @@ mod tests { provider_name_calls, } = self; - assert!(provider_name_calls.lock().unwrap().is_empty()); + assert!(provider_name_calls.get_mut().unwrap().is_empty()); } } @@ -566,6 +566,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -583,6 +584,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -675,6 +677,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone(), adapter_2.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -736,6 +739,7 @@ mod tests { let mut manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -766,6 +770,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -793,6 +798,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -822,6 +828,7 @@ mod tests { let mut manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -850,6 +857,7 @@ mod tests { let mut manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -889,6 +897,7 @@ mod tests { chain_name(), vec![adapter_1.clone(), adapter_2.clone(), adapter_3.clone()], )], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -933,6 +942,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); diff --git a/graph/src/components/server/mod.rs b/graph/src/components/server/mod.rs index 89323b9c8b1..a2dbcadb65f 100644 --- a/graph/src/components/server/mod.rs +++ b/graph/src/components/server/mod.rs @@ -4,4 +4,5 @@ pub mod query; /// Component for the index node server. pub mod index_node; +#[allow(clippy::module_inception)] pub mod server; diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 11748415444..4993339831a 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -220,13 +220,13 @@ impl EntityCache { if let Some(op) = self.updates.get(key).cloned() { entity = op - .apply_to(&mut entity) + .apply_to(&entity) .map_err(|e| key.unknown_attribute(e))? .map(Arc::new); } if let Some(op) = self.handler_updates.get(key).cloned() { entity = op - .apply_to(&mut entity) + .apply_to(&entity) .map_err(|e| key.unknown_attribute(e))? .map(Arc::new); } @@ -241,7 +241,7 @@ impl EntityCache { let query = DerivedEntityQuery { entity_type, - entity_field: field.name.clone().into(), + entity_field: field.name.clone(), value: eref.entity_id.clone(), causality_region: eref.causality_region, }; @@ -250,7 +250,7 @@ impl EntityCache { for (key, entity) in entity_map.iter() { // Only insert to the cache if it's not already there - if !self.current.contains_key(&key) { + if !self.current.contains_key(key) { self.current .insert(key.clone(), Some(Arc::new(entity.clone()))); } diff --git a/graph/src/components/store/err.rs b/graph/src/components/store/err.rs index d59a835d57b..cbf500884df 100644 --- a/graph/src/components/store/err.rs +++ b/graph/src/components/store/err.rs @@ -108,7 +108,7 @@ impl Clone for StoreError { } Self::InvalidIdentifier(arg0) => Self::InvalidIdentifier(arg0.clone()), Self::DuplicateBlockProcessing(arg0, arg1) => { - Self::DuplicateBlockProcessing(arg0.clone(), arg1.clone()) + Self::DuplicateBlockProcessing(arg0.clone(), *arg1) } Self::InternalError(arg0) => Self::InternalError(arg0.clone()), Self::DeploymentNotFound(arg0) => Self::DeploymentNotFound(arg0.clone()), @@ -121,14 +121,14 @@ impl Clone for StoreError { Self::Poisoned => Self::Poisoned, Self::WriterPanic(arg0) => Self::Unknown(anyhow!("writer panic: {}", arg0)), Self::UnsupportedDeploymentSchemaVersion(arg0) => { - Self::UnsupportedDeploymentSchemaVersion(arg0.clone()) + Self::UnsupportedDeploymentSchemaVersion(*arg0) } Self::PruneFailure(arg0) => Self::PruneFailure(arg0.clone()), Self::UnsupportedFilter(arg0, arg1) => { Self::UnsupportedFilter(arg0.clone(), arg1.clone()) } Self::WriteFailure(arg0, arg1, arg2, arg3) => { - Self::WriteFailure(arg0.clone(), arg1.clone(), arg2.clone(), arg3.clone()) + Self::WriteFailure(arg0.clone(), *arg1, arg2.clone(), arg3.clone()) } Self::StatementTimeout => Self::StatementTimeout, Self::ConstraintViolation(arg0) => Self::ConstraintViolation(arg0.clone()), @@ -244,7 +244,7 @@ impl From for StoreError { impl From for StoreError { fn from(e: std::fmt::Error) -> Self { - StoreError::Unknown(anyhow!("{}", e.to_string())) + StoreError::Unknown(anyhow!("{}", e)) } } diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 818718a5f74..77675967c25 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -427,13 +427,14 @@ impl EntityCollection { /// be enough for everybody pub type BlockNumber = i32; -pub const BLOCK_NUMBER_MAX: BlockNumber = std::i32::MAX; +pub const BLOCK_NUMBER_MAX: BlockNumber = i32::MAX; /// A query for entities in a store. /// /// Details of how query generation for `EntityQuery` works can be found /// at https://github.com/graphprotocol/rfcs/blob/master/engineering-plans/0001-graphql-query-prefetching.md #[derive(Clone, Debug)] +#[non_exhaustive] pub struct EntityQuery { /// ID of the subgraph. pub subgraph_id: DeploymentHash, @@ -464,8 +465,6 @@ pub struct EntityQuery { pub query_id: Option, pub trace: bool, - - _force_use_of_new: (), } impl EntityQuery { @@ -484,7 +483,6 @@ impl EntityQuery { logger: None, query_id: None, trace: false, - _force_use_of_new: (), } } @@ -1017,12 +1015,12 @@ impl PruneRequest { ) -> Result { let rebuild_threshold = ENV_VARS.store.rebuild_threshold; let delete_threshold = ENV_VARS.store.delete_threshold; - if rebuild_threshold < 0.0 || rebuild_threshold > 1.0 { + if !(0.0..=1.0).contains(&rebuild_threshold) { return Err(internal_error!( "the copy threshold must be between 0 and 1 but is {rebuild_threshold}" )); } - if delete_threshold < 0.0 || delete_threshold > 1.0 { + if !(0.0..=1.0).contains(&delete_threshold) { return Err(internal_error!( "the delete threshold must be between 0 and 1 but is {delete_threshold}" )); diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 1c464be487e..fb123b9a012 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -106,7 +106,7 @@ impl<'a> TryFrom<&'a EntityModification> for EntityWrite<'a> { end, } => Ok(EntityWrite { id: &key.entity_id, - entity: &data, + entity: data, causality_region: key.causality_region, block: *block, end: *end, @@ -207,7 +207,7 @@ impl EntityModification { } /// Turn an `Overwrite` into an `Insert`, return an error if this is a `Remove` - fn as_insert(self, entity_type: &EntityType) -> Result { + fn into_insert(self, entity_type: &EntityType) -> Result { use EntityModification::*; match self { @@ -223,13 +223,11 @@ impl EntityModification { block, end, }), - Remove { key, .. } => { - return Err(internal_error!( - "a remove for {}[{}] can not be converted into an insert", - entity_type, - key.entity_id - )) - } + Remove { key, .. } => Err(internal_error!( + "a remove for {}[{}] can not be converted into an insert", + entity_type, + key.entity_id + )), } } @@ -512,7 +510,7 @@ impl RowGroup { Overwrite { block, .. }, ) => { prev_row.clamp(*block)?; - let row = row.as_insert(&self.entity_type)?; + let row = row.into_insert(&self.entity_type)?; self.push_row(row); } (Insert { end: None, .. } | Overwrite { end: None, .. }, Remove { block, .. }) => { @@ -839,8 +837,7 @@ impl Batch { .entries .iter() .filter(move |(ptr, _)| ptr.number <= at) - .map(|(_, ds)| ds) - .flatten() + .flat_map(|(_, ds)| ds) .filter(|ds| { !self .offchain_to_remove @@ -850,7 +847,7 @@ impl Batch { }) } - pub fn groups<'a>(&'a self) -> impl Iterator { + pub fn groups(&self) -> impl Iterator { self.mods.groups.iter() } @@ -937,7 +934,6 @@ impl<'a> WriteChunk<'a> { /// Return a vector of `WriteChunk`s each containing a single write pub fn as_single_writes(&self) -> Vec { (0..self.len()) - .into_iter() .map(|position| WriteChunk { group: self.group, chunk_size: 1, @@ -981,7 +977,7 @@ impl<'a> Iterator for WriteChunkIter<'a> { return insert; } } - return None; + None } } @@ -1046,7 +1042,7 @@ mod test { }) .collect::>(); let exp = Vec::from_iter( - exp.into_iter() + exp.iter() .map(|(block, values)| (*block, Vec::from_iter(values.iter().map(as_id)))), ); assert_eq!(exp, act); @@ -1158,7 +1154,7 @@ mod test { impl PartialEq<&[Mod]> for Group { fn eq(&self, mods: &&[Mod]) -> bool { - let mods: Vec<_> = mods.iter().map(|m| EntityModification::from(m)).collect(); + let mods: Vec<_> = mods.iter().map(EntityModification::from).collect(); self.group.rows == mods } } diff --git a/graph/src/components/subgraph/host.rs b/graph/src/components/subgraph/host.rs index f43c6aa3c00..26332698bb7 100644 --- a/graph/src/components/subgraph/host.rs +++ b/graph/src/components/subgraph/host.rs @@ -32,9 +32,9 @@ impl From for MappingError { impl From for MappingError { fn from(value: HostExportError) -> MappingError { match value { - HostExportError::PossibleReorg(e) => MappingError::PossibleReorg(e.into()), + HostExportError::PossibleReorg(e) => MappingError::PossibleReorg(e), HostExportError::Deterministic(e) | HostExportError::Unknown(e) => { - MappingError::Unknown(e.into()) + MappingError::Unknown(e) } } } diff --git a/graph/src/components/subgraph/instance.rs b/graph/src/components/subgraph/instance.rs index c6d3f0c7e85..6ee720a10c0 100644 --- a/graph/src/components/subgraph/instance.rs +++ b/graph/src/components/subgraph/instance.rs @@ -140,10 +140,7 @@ impl BlockState { assert!(!self.in_handler); self.created_data_sources .iter() - .any(|ds| match ds.template { - InstanceDSTemplate::Onchain(_) => true, - _ => false, - }) + .any(|ds| matches!(ds.template, InstanceDSTemplate::Onchain(_))) } pub fn drain_created_data_sources(&mut self) -> Vec { diff --git a/graph/src/components/subgraph/proof_of_indexing/mod.rs b/graph/src/components/subgraph/proof_of_indexing/mod.rs index 718a3a5cecd..b3861c0cea6 100644 --- a/graph/src/components/subgraph/proof_of_indexing/mod.rs +++ b/graph/src/components/subgraph/proof_of_indexing/mod.rs @@ -148,11 +148,10 @@ mod tests { // Create a database which stores intermediate PoIs let mut db = HashMap::>::new(); - let mut block_count = 1; - for causality_region in case.data.causality_regions.values() { - block_count = causality_region.blocks.len(); - break; - } + let block_count = match case.data.causality_regions.values().next() { + Some(causality_region) => causality_region.blocks.len(), + None => 1, + }; for block_i in 0..block_count { let mut stream = ProofOfIndexing::new(block_i.try_into().unwrap(), version); diff --git a/graph/src/components/subgraph/settings.rs b/graph/src/components/subgraph/settings.rs index a7512614583..f1ad9459b37 100644 --- a/graph/src/components/subgraph/settings.rs +++ b/graph/src/components/subgraph/settings.rs @@ -45,7 +45,7 @@ impl Settings { Self::from_str(&read_to_string(path)?) } - pub fn from_str(toml: &str) -> Result { + fn from_str(toml: &str) -> Result { toml::from_str::(toml).map_err(anyhow::Error::from) } @@ -77,18 +77,12 @@ mod test { let section = Settings::from_str(content).unwrap(); assert_eq!(section.settings.len(), 3); - let rule1 = match §ion.settings[0].pred { - Predicate::Name(name) => name, - }; + let Predicate::Name(rule1) = §ion.settings[0].pred; assert_eq!(rule1.as_str(), ".*"); - let rule2 = match §ion.settings[1].pred { - Predicate::Name(name) => name, - }; + let Predicate::Name(rule2) = §ion.settings[1].pred; assert_eq!(rule2.as_str(), "xxxxx"); - let rule1 = match §ion.settings[2].pred { - Predicate::Name(name) => name, - }; + let Predicate::Name(rule1) = §ion.settings[2].pred; assert_eq!(rule1.as_str(), ".*!$"); } } diff --git a/graph/src/data/graphql/ext.rs b/graph/src/data/graphql/ext.rs index 271ace79237..8cdc312f72b 100644 --- a/graph/src/data/graphql/ext.rs +++ b/graph/src/data/graphql/ext.rs @@ -237,10 +237,7 @@ impl TypeExt for Type { // Returns true if the given type is a non-null type. fn is_non_null(&self) -> bool { - match self { - Type::NonNullType(_) => true, - _ => false, - } + matches!(self, Type::NonNullType(_)) } } @@ -391,7 +388,7 @@ impl FieldExt for Field { } fn argument(&self, name: &str) -> Option<&s::InputValue> { - self.arguments.iter().find(|iv| &iv.name == name) + self.arguments.iter().find(|iv| iv.name == name) } } diff --git a/graph/src/data/graphql/load_manager.rs b/graph/src/data/graphql/load_manager.rs index 12fa565d321..b8bdb4a63d0 100644 --- a/graph/src/data/graphql/load_manager.rs +++ b/graph/src/data/graphql/load_manager.rs @@ -310,9 +310,9 @@ impl LoadManager { .map(GenericCounter::inc); if !ENV_VARS.load_management_is_disabled() { let qref = QueryRef::new(deployment, shape_hash); - self.effort - .get(shard) - .map(|effort| effort.add(shard, qref, duration, &self.effort_gauge)); + if let Some(effort) = self.effort.get(shard) { + effort.add(shard, qref, duration, &self.effort_gauge) + } } } @@ -438,8 +438,7 @@ impl LoadManager { // Kill random queries in case we have no queries, or not enough queries // that cause at least 20% of the effort let kill_rate = self.update_kill_rate(shard, kill_rate, last_update, overloaded, wait_ms); - let decline = - rng().random_bool((kill_rate * query_effort / total_effort).min(1.0).max(0.0)); + let decline = rng().random_bool((kill_rate * query_effort / total_effort).clamp(0.0, 1.0)); if decline { if ENV_VARS.load_simulate { debug!(self.logger, "Declining query"; diff --git a/graph/src/data/graphql/object_or_interface.rs b/graph/src/data/graphql/object_or_interface.rs index 625965f2ba1..b86764bbdc5 100644 --- a/graph/src/data/graphql/object_or_interface.rs +++ b/graph/src/data/graphql/object_or_interface.rs @@ -100,7 +100,7 @@ impl<'a> ObjectOrInterface<'a> { } pub fn field(&self, name: &str) -> Option<&s::Field> { - self.fields().iter().find(|field| &field.name == name) + self.fields().iter().find(|field| field.name == name) } pub fn object_types(self, schema: &'a Schema) -> Option> { diff --git a/graph/src/data/query/cache_status.rs b/graph/src/data/query/cache_status.rs index b5ff2db3ae1..0a713e81a46 100644 --- a/graph/src/data/query/cache_status.rs +++ b/graph/src/data/query/cache_status.rs @@ -6,7 +6,7 @@ use serde::Serialize; use crate::derive::CacheWeight; /// Used for checking if a response hit the cache. -#[derive(Copy, Clone, CacheWeight, Debug, PartialEq, Eq, Hash)] +#[derive(Copy, Clone, CacheWeight, Debug, PartialEq, Eq, Hash, Default)] pub enum CacheStatus { /// Hit is a hit in the generational cache. Hit, @@ -18,15 +18,10 @@ pub enum CacheStatus { Insert, /// A miss is none of the above. + #[default] Miss, } -impl Default for CacheStatus { - fn default() -> Self { - CacheStatus::Miss - } -} - impl fmt::Display for CacheStatus { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(self.as_str()) diff --git a/graph/src/data/query/mod.rs b/graph/src/data/query/mod.rs index 407c2218525..b278160a01d 100644 --- a/graph/src/data/query/mod.rs +++ b/graph/src/data/query/mod.rs @@ -1,5 +1,6 @@ mod cache_status; mod error; +#[allow(clippy::module_inception)] mod query; mod result; mod trace; diff --git a/graph/src/data/query/query.rs b/graph/src/data/query/query.rs index 5bb64a8a134..0520be3be78 100644 --- a/graph/src/data/query/query.rs +++ b/graph/src/data/query/query.rs @@ -127,6 +127,7 @@ impl QueryTarget { /// A GraphQL query as submitted by a client, either directly or through a subscription. #[derive(Clone, Debug)] +#[non_exhaustive] pub struct Query { pub document: q::Document, pub variables: Option, @@ -134,7 +135,6 @@ pub struct Query { pub query_text: Arc, pub variables_text: Arc, pub trace: bool, - _force_use_of_new: (), } impl Query { @@ -162,7 +162,6 @@ impl Query { query_text: Arc::new(query_text), variables_text: Arc::new(variables_text), trace, - _force_use_of_new: (), } } } diff --git a/graph/src/data/query/trace.rs b/graph/src/data/query/trace.rs index 256c9cdeaf6..c6682274100 100644 --- a/graph/src/data/query/trace.rs +++ b/graph/src/data/query/trace.rs @@ -35,8 +35,9 @@ impl HttpTrace { } } -#[derive(Debug, CacheWeight)] +#[derive(Debug, CacheWeight, Default)] pub enum Trace { + #[default] None, Root { query: Arc, @@ -77,12 +78,6 @@ pub enum Trace { }, } -impl Default for Trace { - fn default() -> Self { - Self::None - } -} - impl Trace { pub fn root( query: &Arc, diff --git a/graph/src/data/store/id.rs b/graph/src/data/store/id.rs index 9726141e2d6..222a11fefdf 100644 --- a/graph/src/data/store/id.rs +++ b/graph/src/data/store/id.rs @@ -89,12 +89,12 @@ impl IdType { } } -impl<'a> TryFrom<&s::ObjectType> for IdType { +impl TryFrom<&s::ObjectType> for IdType { type Error = Error; fn try_from(obj_type: &s::ObjectType) -> Result { let base_type = obj_type - .field(&*ID) + .field(&ID) .ok_or_else(|| anyhow!("Type {} does not have an `id` field", obj_type.name))? .field_type .get_base_type(); @@ -484,7 +484,7 @@ impl IdList { } pub fn first(&self) -> Option> { - if self.len() > 0 { + if !self.is_empty() { Some(self.index(0)) } else { None @@ -566,7 +566,7 @@ mod tests { assert_eq!(exp, id); let id = IdType::Int8.generate_id(3, 2).unwrap(); - let exp = Id::Int8(0x0000_0003__0000_0002); + let exp = Id::Int8(0x0000_0003_0000_0002); assert_eq!(exp, id); // Should be id + 1 diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index d56ae785cf3..f113c5248c8 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -3,8 +3,7 @@ use crate::{ prelude::{lazy_static, q, r, s, CacheWeight, QueryExecutionError}, runtime::gas::{Gas, GasSizeOf}, schema::{input::VID_FIELD, EntityKey}, - util::intern::{self, AtomPool}, - util::intern::{Error as InternError, NullValue, Object}, + util::intern::{self, AtomPool, Error as InternError, NullValue, Object}, }; use anyhow::{anyhow, Error}; use itertools::Itertools; @@ -38,12 +37,14 @@ pub mod sql; pub struct NodeId(String); impl NodeId { - pub fn new(s: impl Into) -> Result { + /// Create a new NodeId. The name `s` must be between 1 and 63 + /// characters long. If it is not, `Err(s)` is returned + pub fn new(s: impl Into) -> Result { let s = s.into(); // Enforce minimum and maximum length limit if s.len() > 63 || s.is_empty() { - return Err(()); + return Err(s); } Ok(NodeId(s)) @@ -77,8 +78,8 @@ impl<'de> de::Deserialize<'de> for NodeId { D: de::Deserializer<'de>, { let s: String = de::Deserialize::deserialize(deserializer)?; - NodeId::new(s.clone()) - .map_err(|()| de::Error::invalid_value(de::Unexpected::Str(&s), &"valid node ID")) + NodeId::new(s) + .map_err(|s| de::Error::invalid_value(de::Unexpected::Str(&s), &"valid node ID")) } } @@ -358,7 +359,7 @@ impl Value { })?), BIG_DECIMAL_SCALAR => Value::BigDecimal(scalar::BigDecimal::from_str(s)?), INT8_SCALAR => Value::Int8(s.parse::().map_err(|_| { - QueryExecutionError::ValueParseError("Int8".to_string(), format!("{}", s)) + QueryExecutionError::ValueParseError("Int8".to_string(), s.to_string()) })?), TIMESTAMP_SCALAR => { Value::Timestamp(scalar::Timestamp::parse_timestamp(s).map_err(|_| { @@ -644,7 +645,7 @@ impl From for Value { impl From for Value { fn from(value: i64) -> Value { - Value::Int8(value.into()) + Value::Int8(value) } } @@ -712,7 +713,117 @@ pub trait TryIntoEntityIterator: IntoIterator impl>> TryIntoEntityIterator for T {} #[derive(Debug, Error, PartialEq, Eq, Clone)] -pub enum EntityValidationError { +pub struct EntityValidationError(Box); + +impl fmt::Display for EntityValidationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl EntityValidationError { + pub fn unknown_entity_type(key: &EntityKey) -> Self { + let entity = key.entity_type.to_string(); + let id = key.entity_id.to_string(); + + EntityValidationError(Box::new(EntityValidationErrorInner::UnknownEntityType { + entity, + id, + })) + } + + pub fn mismatched_element_type_in_list( + key: &EntityKey, + field: &crate::schema::Field, + value: &Value, + elt: &Value, + index: usize, + ) -> Self { + let entity = key.entity_type.to_string(); + let entity_id = key.entity_id.to_string(); + let expected_type = field.field_type.to_string(); + let field = field.name.to_string(); + let value = value.to_string(); + let actual_type = elt.type_name(); + EntityValidationError(Box::new( + EntityValidationErrorInner::MismatchedElementTypeInList { + entity, + entity_id, + field, + expected_type, + value, + actual_type, + index, + }, + )) + } + + pub fn invalid_field_type( + key: &EntityKey, + field: &crate::schema::Field, + value: &Value, + ) -> Self { + let entity = key.entity_type.to_string(); + let entity_id = key.entity_id.to_string(); + let expected_type = field.field_type.to_string(); + let field = field.name.to_string(); + let actual_type = value.type_name(); + let value = value.to_string(); + + EntityValidationError(Box::new(EntityValidationErrorInner::InvalidFieldType { + entity, + entity_id, + value, + field, + expected_type, + actual_type, + })) + } + + fn missing_value_for_non_nullable_field(key: &EntityKey, field: &crate::schema::Field) -> Self { + let entity = key.entity_type.to_string(); + let entity_id = key.entity_id.to_string(); + let field = field.name.to_string(); + EntityValidationError(Box::new( + EntityValidationErrorInner::MissingValueForNonNullableField { + entity, + entity_id, + field, + }, + )) + } + + fn cannot_set_derived_field(key: &EntityKey, field: &crate::schema::Field) -> Self { + EntityValidationError(Box::new( + EntityValidationErrorInner::CannotSetDerivedField { + entity: key.entity_type.to_string(), + entity_id: key.entity_id.to_string(), + field: field.name.to_string(), + }, + )) + } + + fn unknown_key(not_interned: String) -> Self { + EntityValidationError(Box::new(EntityValidationErrorInner::UnknownKey( + not_interned, + ))) + } + + fn missing_id_attribute(entity: String) -> EntityValidationError { + EntityValidationError(Box::new(EntityValidationErrorInner::MissingIDAttribute { + entity, + })) + } + + fn unsupported_type_for_id_attribute() -> EntityValidationError { + EntityValidationError(Box::new( + EntityValidationErrorInner::UnsupportedTypeForIDAttribute, + )) + } +} + +#[derive(Debug, Error, PartialEq, Eq, Clone)] +pub enum EntityValidationErrorInner { #[error("Entity {entity}[{id}]: unknown entity type `{entity}`")] UnknownEntityType { entity: String, id: String }, @@ -781,10 +892,9 @@ pub enum EntityValidationError { macro_rules! entity { ($schema:expr => $($name:ident: $value:expr,)*) => { { - let mut result = Vec::new(); - $( - result.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); - )* + let result = vec![$( + ($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value)), + )*]; $schema.make_entity(result).unwrap() } }; @@ -801,7 +911,7 @@ impl Entity { let mut obj = Object::new(pool); for (key, value) in iter { obj.insert(key, value) - .map_err(|e| EntityValidationError::UnknownKey(e.not_interned()))?; + .map_err(|e| EntityValidationError::unknown_key(e.not_interned()))?; } let entity = Entity(obj); entity.check_id()?; @@ -858,11 +968,12 @@ impl Entity { fn check_id(&self) -> Result<(), EntityValidationError> { match self.get("id") { - None => Err(EntityValidationError::MissingIDAttribute { - entity: format!("{:?}", self.0), - }), + None => Err(EntityValidationError::missing_id_attribute(format!( + "{:?}", + self.0 + ))), Some(Value::String(_)) | Some(Value::Bytes(_)) | Some(Value::Int8(_)) => Ok(()), - _ => Err(EntityValidationError::UnsupportedTypeForIDAttribute), + _ => Err(EntityValidationError::unsupported_type_for_id_attribute()), } } @@ -943,12 +1054,10 @@ impl Entity { return Ok(()); } - let object_type = key.entity_type.object_type().map_err(|_| { - EntityValidationError::UnknownEntityType { - entity: key.entity_type.to_string(), - id: key.entity_id.to_string(), - } - })?; + let object_type = key + .entity_type + .object_type() + .map_err(|_| EntityValidationError::unknown_entity_type(key))?; for field in object_type.fields.iter() { match (self.get(&field.name), field.is_derived()) { @@ -960,48 +1069,29 @@ impl Entity { // assigning a scalar to a list will be caught below if let Value::List(elts) = value { for (index, elt) in elts.iter().enumerate() { - if !elt.is_assignable(&scalar_type, false) { + if !elt.is_assignable(scalar_type, false) { return Err( - EntityValidationError::MismatchedElementTypeInList { - entity: key.entity_type.to_string(), - entity_id: key.entity_id.to_string(), - field: field.name.to_string(), - expected_type: field.field_type.to_string(), - value: value.to_string(), - actual_type: elt.type_name().to_string(), - index, - }, + EntityValidationError::mismatched_element_type_in_list( + key, field, value, elt, index, + ), ); } } } } - if !value.is_assignable(&scalar_type, field.field_type.is_list()) { - return Err(EntityValidationError::InvalidFieldType { - entity: key.entity_type.to_string(), - entity_id: key.entity_id.to_string(), - value: value.to_string(), - field: field.name.to_string(), - expected_type: field.field_type.to_string(), - actual_type: value.type_name().to_string(), - }); + if !value.is_assignable(scalar_type, field.field_type.is_list()) { + return Err(EntityValidationError::invalid_field_type(key, field, value)); } } (None, false) => { if field.field_type.is_non_null() { - return Err(EntityValidationError::MissingValueForNonNullableField { - entity: key.entity_type.to_string(), - entity_id: key.entity_id.to_string(), - field: field.name.to_string(), - }); + return Err(EntityValidationError::missing_value_for_non_nullable_field( + key, field, + )); } } (Some(_), true) => { - return Err(EntityValidationError::CannotSetDerivedField { - entity: key.entity_type.to_string(), - entity_id: key.entity_id.to_string(), - field: field.name.to_string(), - }); + return Err(EntityValidationError::cannot_set_derived_field(key, field)); } (None, true) => { // derived fields should not be set @@ -1258,7 +1348,7 @@ fn entity_hidden_vid() { // get returns nothing... assert_eq!(entity.get(VID_FIELD), None); - assert_eq!(entity.contains_key(VID_FIELD), false); + assert!(!entity.contains_key(VID_FIELD)); // ...while vid is present assert_eq!(entity.vid(), 3i64); diff --git a/graph/src/data/store/scalar/bigdecimal.rs b/graph/src/data/store/scalar/bigdecimal.rs index 65738563a67..baba57a2d4e 100644 --- a/graph/src/data/store/scalar/bigdecimal.rs +++ b/graph/src/data/store/scalar/bigdecimal.rs @@ -631,7 +631,7 @@ mod test { #[test] fn big_decimal_stable() { - let cases = vec![ + let cases = [ ( "28b09c9c3f3e2fe037631b7fbccdf65c37594073016d8bf4bb0708b3fda8066a", "0.1", diff --git a/graph/src/data/store/scalar/bigint.rs b/graph/src/data/store/scalar/bigint.rs index 195d26a5cb5..696a1fd49ec 100644 --- a/graph/src/data/store/scalar/bigint.rs +++ b/graph/src/data/store/scalar/bigint.rs @@ -73,7 +73,7 @@ mod big_int { } pub fn bits(&self) -> usize { - self.0.bits() as usize + self.0.bits() } pub(in super::super) fn inner(self) -> num_bigint::BigInt { diff --git a/graph/src/data/store/scalar/bytes.rs b/graph/src/data/store/scalar/bytes.rs index 585b548f931..8c5f4f1fe08 100644 --- a/graph/src/data/store/scalar/bytes.rs +++ b/graph/src/data/store/scalar/bytes.rs @@ -52,7 +52,7 @@ impl FromStr for Bytes { } } -impl<'a> From<&'a [u8]> for Bytes { +impl From<&[u8]> for Bytes { fn from(array: &[u8]) -> Self { Bytes(array.into()) } diff --git a/graph/src/data/store/scalar/timestamp.rs b/graph/src/data/store/scalar/timestamp.rs index 58b2ef10cb8..1d9026d5a72 100644 --- a/graph/src/data/store/scalar/timestamp.rs +++ b/graph/src/data/store/scalar/timestamp.rs @@ -58,7 +58,7 @@ impl Timestamp { } pub fn since_epoch(secs: i64, nanos: u32) -> Option { - DateTime::from_timestamp(secs, nanos).map(|dt| Timestamp(dt)) + DateTime::from_timestamp(secs, nanos).map(Timestamp) } pub fn as_secs_since_epoch(&self) -> i64 { diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index f8047c0a807..9c2c29d0f07 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -194,7 +194,14 @@ impl TryFromValue for DeploymentHash { pub struct SubgraphName(String); impl SubgraphName { - pub fn new(s: impl Into) -> Result { + /// Construct a new `SubgraphName`, validating the name according to the rules: + /// - Length between 1 and 255 characters + /// - Contains only alphanumeric characters, dashes (`-`), underscores (`_`), and slashes (`/`) + /// - Each part (separated by `/`) must be non-empty, start and end with an alphanumeric character, + /// contain at least one alphabetic character, and not be equal to "graphql" + /// + /// If the name is invalid, return s (as a `String`) as the error + pub fn new(s: impl Into) -> Result { let s = s.into(); // Note: these validation rules must be kept consistent with the validation rules @@ -202,7 +209,7 @@ impl SubgraphName { // Enforce length limits if s.is_empty() || s.len() > 255 { - return Err(()); + return Err(s); } // Check that the name contains only allowed characters. @@ -210,19 +217,19 @@ impl SubgraphName { .chars() .all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_' || c == '/') { - return Err(()); + return Err(s); } // Parse into components and validate each for part in s.split('/') { // Each part must be non-empty if part.is_empty() { - return Err(()); + return Err(s); } // To keep URLs unambiguous, reserve the token "graphql" if part == "graphql" { - return Err(()); + return Err(s); } // Part should not start or end with a special character. @@ -232,7 +239,7 @@ impl SubgraphName { || !last_char.is_ascii_alphanumeric() || !part.chars().any(|c| c.is_ascii_alphabetic()) { - return Err(()); + return Err(s); } } @@ -272,7 +279,7 @@ impl<'de> de::Deserialize<'de> for SubgraphName { { let s: String = de::Deserialize::deserialize(deserializer)?; SubgraphName::new(s.clone()) - .map_err(|()| de::Error::invalid_value(de::Unexpected::Str(&s), &"valid subgraph name")) + .map_err(|s| de::Error::invalid_value(de::Unexpected::Str(&s), &"valid subgraph name")) } } @@ -382,6 +389,12 @@ pub enum SubgraphManifestResolveError { #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct DataSourceContext(HashMap); +impl Default for DataSourceContext { + fn default() -> Self { + Self::new() + } +} + impl DataSourceContext { pub fn new() -> Self { Self(HashMap::new()) @@ -410,7 +423,7 @@ pub struct Link { /// Custom deserializer for Link /// This handles both formats: /// 1. Simple string: "schema.graphql" or "subgraph.yaml" which is used in [`FileLinkResolver`] -/// FileLinkResolver is used in local development environments +/// FileLinkResolver is used in local development environments /// 2. IPLD format: { "/": "Qm..." } which is used in [`IpfsLinkResolver`] impl<'de> de::Deserialize<'de> for Link { fn deserialize(deserializer: D) -> Result @@ -741,20 +754,19 @@ impl UnvalidatedSubgraphManifest { let mut errors = Vec::new(); // Check spec version support for subgraph datasources - if *spec_version < SPEC_VERSION_1_3_0 { - if data_sources + if *spec_version < SPEC_VERSION_1_3_0 + && data_sources .iter() .any(|ds| matches!(ds, DataSource::Subgraph(_))) - { - errors.push(SubgraphManifestValidationError::DataSourceValidation( - "subgraph".to_string(), - anyhow!( - "Subgraph datasources are not supported prior to spec version {}", - SPEC_VERSION_1_3_0 - ), - )); - return errors; - } + { + errors.push(SubgraphManifestValidationError::DataSourceValidation( + "subgraph".to_string(), + anyhow!( + "Subgraph datasources are not supported prior to spec version {}", + SPEC_VERSION_1_3_0 + ), + )); + return errors; } let subgraph_ds_count = data_sources @@ -1014,15 +1026,12 @@ impl SubgraphManifest { .map(|s| s.to_string()) .collect_vec(); - let api_version = unified_api_version - .map(|v| v.version().map(|v| v.to_string())) - .flatten(); + let api_version = unified_api_version.and_then(|v| v.version().map(|v| v.to_string())); let handler_kinds = self .data_sources .iter() - .map(|ds| ds.handler_kinds()) - .flatten() + .flat_map(|ds| ds.handler_kinds()) .collect::>(); let features: Vec = self @@ -1185,16 +1194,13 @@ impl UnresolvedSubgraphManifest { let schema = match schema { Some(schema) => schema, None if amp_data_sources.len() == data_sources.len() => { - let table_schemas = amp_data_sources - .iter() - .map(|data_source| { - data_source - .transformer - .tables - .iter() - .map(|table| (table.name.clone(), table.schema.clone())) - }) - .flatten(); + let table_schemas = amp_data_sources.iter().flat_map(|data_source| { + data_source + .transformer + .tables + .iter() + .map(|table| (table.name.clone(), table.schema.clone())) + }); amp::schema::generate_subgraph_schema(&id, table_schemas)? } @@ -1364,7 +1370,7 @@ impl DeploymentState { /// `block` pub fn has_deterministic_errors(&self, block: &BlockPtr) -> bool { self.first_error_block - .map_or(false, |first_error_block| first_error_block <= block.number) + .is_some_and(|first_error_block| first_error_block <= block.number) } } diff --git a/graph/src/data/value.rs b/graph/src/data/value.rs index af2629a1f18..7abf8fec47e 100644 --- a/graph/src/data/value.rs +++ b/graph/src/data/value.rs @@ -350,7 +350,7 @@ impl Value { ("BigDecimal", Value::Int(i)) => Ok(Value::String(i.to_string())), ("BigDecimal", Value::String(s)) => Ok(Value::String(s)), ("Int", Value::Int(num)) => { - if i32::min_value() as i64 <= num && num <= i32::max_value() as i64 { + if i32::MIN as i64 <= num && num <= i32::MAX as i64 { Ok(Value::Int(num)) } else { Err(Value::Int(num)) @@ -409,7 +409,7 @@ impl std::fmt::Display for Value { write!(f, "}}") } Value::Timestamp(ref ts) => { - write!(f, "\"{}\"", ts.as_microseconds_since_epoch().to_string()) + write!(f, "\"{}\"", ts.as_microseconds_since_epoch()) } } } @@ -433,7 +433,7 @@ impl Serialize for Value { seq.end() } Value::Timestamp(ts) => { - serializer.serialize_str(&ts.as_microseconds_since_epoch().to_string().as_str()) + serializer.serialize_str(ts.as_microseconds_since_epoch().to_string().as_str()) } Value::Null => serializer.serialize_none(), Value::String(s) => serializer.serialize_str(s), diff --git a/graph/src/data_source/common.rs b/graph/src/data_source/common.rs index f89739c6ab7..511d18f3de7 100644 --- a/graph/src/data_source/common.rs +++ b/graph/src/data_source/common.rs @@ -387,20 +387,20 @@ impl UnresolvedMappingABI { /// `event.params.`. Each entry under `calls` gets turned into a /// `CallDcl` #[derive(Clone, CheapClone, Debug, Default, Hash, Eq, PartialEq)] +#[non_exhaustive] pub struct CallDecls { pub decls: Arc>, - readonly: (), } /// A single call declaration, like `myCall1: /// Contract[address].function(arg1, arg2, ...)` #[derive(Clone, Debug, Hash, Eq, PartialEq)] +#[non_exhaustive] pub struct CallDecl { /// A user-defined label pub label: String, /// The call expression pub expr: CallExpr, - readonly: (), } impl CallDecl { @@ -420,7 +420,7 @@ impl CallDecl { EthereumArg::Param(name) => { let value = params .iter() - .find(|param| ¶m.name == name.as_str()) + .find(|param| param.name == name.as_str()) .ok_or_else(|| { anyhow!( "In declarative call '{}': unknown param {}", @@ -441,7 +441,7 @@ impl CallDecl { EthereumArg::StructField(param_name, field_accesses) => { let param = params .iter() - .find(|param| ¶m.name == param_name.as_str()) + .find(|param| param.name == param_name.as_str()) .ok_or_else(|| { anyhow!( "In declarative call '{}': unknown param {}", @@ -486,7 +486,7 @@ impl CallDecl { EthereumArg::Param(name) => { let value = params .iter() - .find(|param| ¶m.name == name.as_str()) + .find(|param| param.name == name.as_str()) .ok_or_else(|| anyhow!("In declarative call '{}': unknown param {}", self.label, name))? .value .clone(); @@ -495,7 +495,7 @@ impl CallDecl { EthereumArg::StructField(param_name, field_accesses) => { let param = params .iter() - .find(|param| ¶m.name == param_name.as_str()) + .find(|param| param.name == param_name.as_str()) .ok_or_else(|| anyhow!("In declarative call '{}': unknown param {}", self.label, param_name))?; Self::extract_nested_struct_field( @@ -578,7 +578,7 @@ impl CallDecl { self.expr .args .iter() - .zip(param_types.into_iter()) + .zip(param_types) .map(|(arg, expected_type)| { self.process_entity_handler_arg(arg, &expected_type, entity) }) @@ -763,9 +763,9 @@ impl CallDecl { /// Unresolved representation of declared calls stored as raw strings /// Used during initial manifest parsing before ABI context is available #[derive(Clone, CheapClone, Debug, Default, Eq, PartialEq)] +#[non_exhaustive] pub struct UnresolvedCallDecls { pub raw_decls: Arc>, - readonly: (), } impl UnresolvedCallDecls { @@ -784,7 +784,6 @@ impl UnresolvedCallDecls { .map(|expr| CallDecl { label: label.clone(), expr, - readonly: (), }) .with_context(|| format!("Error in declared call '{}':", label)) }) @@ -792,7 +791,6 @@ impl UnresolvedCallDecls { Ok(CallDecls { decls: Arc::new(decls?), - readonly: (), }) } @@ -811,18 +809,17 @@ impl<'de> de::Deserialize<'de> for UnresolvedCallDecls { de::Deserialize::deserialize(deserializer)?; Ok(UnresolvedCallDecls { raw_decls: Arc::new(raw_decls), - readonly: (), }) } } #[derive(Clone, Debug, Hash, Eq, PartialEq)] +#[non_exhaustive] pub struct CallExpr { pub abi: Word, pub address: CallArg, pub func: Word, pub args: Vec, - readonly: (), } impl CallExpr { @@ -962,7 +959,6 @@ impl CallExpr { address, func: Word::from(func), args, - readonly: (), }; expr.validate_args().with_context(|| { @@ -1163,7 +1159,7 @@ impl CallArg { .into_iter() .map(|part| part.parse::()) .collect::, _>>() - .with_context(|| format!("Failed to parse numeric field indices"))? + .with_context(|| "Failed to parse numeric field indices".to_string())? }; Ok(CallArg::Ethereum(EthereumArg::StructField( Word::from(param), @@ -1235,7 +1231,7 @@ impl DeclaredCall { .context(format!( "Failed to parse arguments for call to function \"{}\" of contract \"{}\"", decl.expr.func.as_str(), - decl.expr.abi.to_string() + decl.expr.abi ))?, )) }) @@ -1655,7 +1651,6 @@ mod tests { let call_decl = CallDecl { label: "myTokenCall".to_string(), expr: parser.ok("ERC20[event.params.asset.1].name()"), - readonly: (), }; // Test scenario 1: Unknown parameter @@ -1719,7 +1714,6 @@ mod tests { let call_decl_with_args = CallDecl { label: "transferCall".to_string(), expr, - readonly: (), }; // Create a structure where base has only 2 fields instead of 3 diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index d33c6e41560..cd34ca62857 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -457,7 +457,7 @@ impl DataSourceTemplate { pub fn name(&self) -> &str { match self { - Self::Onchain(ds) => &ds.name(), + Self::Onchain(ds) => ds.name(), Self::Offchain(ds) => &ds.name, Self::Subgraph(ds) => &ds.name, } @@ -708,7 +708,7 @@ impl<'de, C: Blockchain> Deserialize<'de> for UnresolvedDataSource { amp::manifest::data_source::RawDataSource::deserialize(map.into_deserializer()) .map(UnresolvedDataSource::Amp) .map_err(serde::de::Error::custom) - } else if (&C::KIND.to_string() == kind) || C::ALIASES.contains(&kind) { + } else if (C::KIND.to_string() == kind) || C::ALIASES.contains(&kind) { C::UnresolvedDataSource::deserialize(map.into_deserializer()) .map_err(serde::de::Error::custom) .map(UnresolvedDataSource::Onchain) @@ -742,7 +742,7 @@ impl<'de, C: Blockchain> Deserialize<'de> for UnresolvedDataSourceTemplate { subgraph::UnresolvedDataSourceTemplate::deserialize(map.into_deserializer()) .map_err(serde::de::Error::custom) .map(UnresolvedDataSourceTemplate::Subgraph) - } else if (&C::KIND.to_string() == kind) || C::ALIASES.contains(&kind) { + } else if (C::KIND.to_string() == kind) || C::ALIASES.contains(&kind) { C::UnresolvedDataSourceTemplate::deserialize(map.into_deserializer()) .map_err(serde::de::Error::custom) .map(UnresolvedDataSourceTemplate::Onchain) diff --git a/graph/src/data_source/offchain.rs b/graph/src/data_source/offchain.rs index 70459a86692..35e62a3cdcc 100644 --- a/graph/src/data_source/offchain.rs +++ b/graph/src/data_source/offchain.rs @@ -63,21 +63,17 @@ impl OffchainDataSourceKind { } } -impl ToString for OffchainDataSourceKind { - fn to_string(&self) -> String { +impl fmt::Display for OffchainDataSourceKind { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // This is less performant than hardcoding the values but makes it more difficult // to be used incorrectly, since this map is quite small it should be fine. - OFFCHAIN_KINDS + let label = OFFCHAIN_KINDS .iter() - .find_map(|(str, kind)| { - if kind.eq(self) { - Some(str.to_string()) - } else { - None - } - }) + .find_map(|(str, kind)| if kind.eq(self) { Some(*str) } else { None }) // the kind is validated based on OFFCHAIN_KINDS so it's guaranteed to exist - .unwrap() + .unwrap_or(""); + + write!(f, "{}", label) } } @@ -336,9 +332,9 @@ impl Source { } } -impl Into for Source { - fn into(self) -> Bytes { - match self { +impl From for Bytes { + fn from(val: Source) -> Self { + match val { Source::Ipfs(ref path) => Bytes::from(path.to_string().as_bytes().to_vec()), Source::Arweave(ref base64) => Bytes::from(base64.as_bytes()), } @@ -435,15 +431,15 @@ pub struct DataSourceTemplate { pub mapping: Mapping, } -impl Into for DataSourceTemplate { - fn into(self) -> DataSourceTemplateInfo { +impl From for DataSourceTemplateInfo { + fn from(val: DataSourceTemplate) -> Self { let DataSourceTemplate { kind, network: _, name, manifest_idx, mapping, - } = self; + } = val; DataSourceTemplateInfo { api_version: mapping.api_version.clone(), diff --git a/graph/src/data_source/subgraph.rs b/graph/src/data_source/subgraph.rs index c9f01cf4890..b2a1d34f4e6 100644 --- a/graph/src/data_source/subgraph.rs +++ b/graph/src/data_source/subgraph.rs @@ -250,7 +250,7 @@ impl UnresolvedDataSource { source_manifest: &SubgraphManifest, ) -> Result<(), Error> { for entity in mapping_entities { - let type_kind = source_manifest.schema.kind_of_declared_type(&entity); + let type_kind = source_manifest.schema.kind_of_declared_type(entity); match type_kind { Some(TypeKind::Interface) => { @@ -584,15 +584,15 @@ pub struct DataSourceTemplate { pub mapping: Mapping, } -impl Into for DataSourceTemplate { - fn into(self) -> DataSourceTemplateInfo { +impl From for DataSourceTemplateInfo { + fn from(val: DataSourceTemplate) -> Self { let DataSourceTemplate { kind, network: _, name, manifest_idx, mapping, - } = self; + } = val; DataSourceTemplateInfo { api_version: mapping.api_version.clone(), diff --git a/graph/src/endpoint.rs b/graph/src/endpoint.rs index a9fdd99a98c..4edd1195647 100644 --- a/graph/src/endpoint.rs +++ b/graph/src/endpoint.rs @@ -35,9 +35,9 @@ pub enum ConnectionType { Rpc, } -impl Into<&str> for &ConnectionType { - fn into(self) -> &'static str { - match self { +impl From<&ConnectionType> for &str { + fn from(val: &ConnectionType) -> Self { + match val { ConnectionType::Firehose => "firehose", ConnectionType::Substreams => "substreams", ConnectionType::Rpc => "rpc", diff --git a/graph/src/env/amp.rs b/graph/src/env/amp.rs index ef4fff7c1dc..a6a02b194c3 100644 --- a/graph/src/env/amp.rs +++ b/graph/src/env/amp.rs @@ -50,11 +50,11 @@ impl AmpEnv { .unwrap_or(Self::DEFAULT_MAX_BUFFER_SIZE), max_block_range: raw_env .amp_max_block_range - .and_then(|mut value| { + .map(|mut value| { if value == 0 { value = usize::MAX; } - Some(value) + value }) .unwrap_or(Self::DEFAULT_MAX_BLOCK_RANGE), query_retry_min_delay: raw_env diff --git a/graph/src/env/mappings.rs b/graph/src/env/mappings.rs index 27bc5720e9b..66a01d6cb6f 100644 --- a/graph/src/env/mappings.rs +++ b/graph/src/env/mappings.rs @@ -116,7 +116,7 @@ impl TryFrom for EnvVarsMapping { max_ipfs_file_bytes: x.max_ipfs_file_bytes.0, ipfs_request_limit: x.ipfs_request_limit, ipfs_max_attempts: x.ipfs_max_attempts, - ipfs_cache_location: ipfs_cache_location, + ipfs_cache_location, allow_non_deterministic_ipfs: x.allow_non_deterministic_ipfs.0, disable_declared_calls: x.disable_declared_calls.0, store_errors_are_nondeterministic: x.store_errors_are_nondeterministic.0, diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index 6d51b04ba34..efec3d693ae 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -336,7 +336,7 @@ impl FromStr for ZeroToOneF64 { fn from_str(s: &str) -> Result { let f = s.parse::()?; - if f < 0.0 || f > 1.0 { + if !(0.0..=1.0).contains(&f) { bail!("invalid value: {s} must be between 0 and 1"); } else { Ok(ZeroToOneF64(f)) diff --git a/graph/src/ext/mod.rs b/graph/src/ext/mod.rs index 4e9773f7bd3..375e8f62340 100644 --- a/graph/src/ext/mod.rs +++ b/graph/src/ext/mod.rs @@ -1,2 +1,2 @@ -///! Extension traits for external types. +//! Extension traits for external types. pub mod futures; diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index b05390154ed..9539b46ebaa 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -1,1056 +1,1060 @@ -use crate::firehose::codec::InfoRequest; -use crate::firehose::fetch_client::FetchClient; -use crate::firehose::interceptors::AuthInterceptor; -use crate::{ - blockchain::{ - block_stream::FirehoseCursor, Block as BlockchainBlock, BlockPtr, ChainIdentifier, - }, - cheap_clone::CheapClone, - components::store::BlockNumber, - endpoint::{ConnectionType, EndpointMetrics, RequestLabels}, - env::ENV_VARS, - firehose::decode_firehose_block, - prelude::{anyhow, debug, DeploymentHash}, - substreams_rpc, -}; -use anyhow::Context; -use async_trait::async_trait; -use futures03::{StreamExt, TryStreamExt}; -use http::uri::{Scheme, Uri}; -use itertools::Itertools; -use slog::{error, info, trace, Logger}; -use std::{collections::HashMap, fmt::Display, ops::ControlFlow, sync::Arc, time::Duration}; -use tokio::sync::OnceCell; -use tonic::codegen::InterceptedService; -use tonic::{ - codegen::CompressionEncoding, - metadata::{Ascii, MetadataKey, MetadataValue}, - transport::{Channel, ClientTlsConfig}, - Request, -}; - -use super::{codec as firehose, interceptors::MetricsInterceptor, stream_client::StreamClient}; -use crate::components::network_provider::ChainName; -use crate::components::network_provider::NetworkDetails; -use crate::components::network_provider::ProviderCheckStrategy; -use crate::components::network_provider::ProviderManager; -use crate::components::network_provider::ProviderName; -use crate::prelude::retry; - -/// This is constant because we found this magic number of connections after -/// which the grpc connections start to hang. -/// For more details see: https://github.com/graphprotocol/graph-node/issues/3879 -pub const SUBGRAPHS_PER_CONN: usize = 100; - -const LOW_VALUE_THRESHOLD: usize = 10; -const LOW_VALUE_USED_PERCENTAGE: usize = 50; -const HIGH_VALUE_USED_PERCENTAGE: usize = 80; - -#[derive(Debug)] -pub struct FirehoseEndpoint { - pub provider: ProviderName, - pub auth: AuthInterceptor, - pub filters_enabled: bool, - pub compression_enabled: bool, - pub subgraph_limit: SubgraphLimit, - is_substreams: bool, - endpoint_metrics: Arc, - channel: Channel, - - /// The endpoint info is not intended to change very often, as it only contains the - /// endpoint's metadata, so caching it avoids sending unnecessary network requests. - info_response: OnceCell, -} - -#[derive(Debug)] -pub struct ConnectionHeaders(HashMap, MetadataValue>); - -#[async_trait] -impl NetworkDetails for Arc { - fn provider_name(&self) -> ProviderName { - self.provider.clone() - } - - async fn chain_identifier(&self) -> anyhow::Result { - let genesis_block_ptr = self.clone().info().await?.genesis_block_ptr()?; - - Ok(ChainIdentifier { - net_version: "0".to_string(), - genesis_block_hash: genesis_block_ptr.hash, - }) - } - - async fn provides_extended_blocks(&self) -> anyhow::Result { - let info = self.clone().info().await?; - let pred = if info.chain_name.contains("arbitrum-one") - || info.chain_name.contains("optimism-mainnet") - { - |x: &String| x.starts_with("extended") || x == "hybrid" - } else { - |x: &String| x == "extended" - }; - - Ok(info.block_features.iter().any(pred)) - } -} - -impl ConnectionHeaders { - pub fn new() -> Self { - Self(HashMap::new()) - } - pub fn with_deployment(mut self, deployment: DeploymentHash) -> Self { - if let Ok(deployment) = deployment.parse() { - self.0 - .insert("x-deployment-id".parse().unwrap(), deployment); - } - self - } - pub fn add_to_request(&self, request: T) -> Request { - let mut request = Request::new(request); - self.0.iter().for_each(|(k, v)| { - request.metadata_mut().insert(k, v.clone()); - }); - request - } -} - -#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] -pub enum AvailableCapacity { - Unavailable, - Low, - High, -} - -// TODO: Find a new home for this type. -#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] -pub enum SubgraphLimit { - Disabled, - Limit(usize), - Unlimited, -} - -impl SubgraphLimit { - pub fn get_capacity(&self, current: usize) -> AvailableCapacity { - match self { - // Limit(0) should probably be Disabled but just in case - SubgraphLimit::Disabled | SubgraphLimit::Limit(0) => AvailableCapacity::Unavailable, - SubgraphLimit::Limit(total) => { - let total = *total; - if current >= total { - return AvailableCapacity::Unavailable; - } - - let used_percent = current * 100 / total; - - // If total is low it can vary very quickly so we can consider 50% as the low threshold - // to make selection more reliable - let threshold_percent = if total <= LOW_VALUE_THRESHOLD { - LOW_VALUE_USED_PERCENTAGE - } else { - HIGH_VALUE_USED_PERCENTAGE - }; - - if used_percent < threshold_percent { - return AvailableCapacity::High; - } - - AvailableCapacity::Low - } - _ => AvailableCapacity::High, - } - } - - pub fn has_capacity(&self, current: usize) -> bool { - match self { - SubgraphLimit::Unlimited => true, - SubgraphLimit::Limit(limit) => limit > ¤t, - SubgraphLimit::Disabled => false, - } - } -} - -impl Display for FirehoseEndpoint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - Display::fmt(self.provider.as_str(), f) - } -} - -impl FirehoseEndpoint { - pub fn new>( - provider: S, - url: S, - token: Option, - key: Option, - filters_enabled: bool, - compression_enabled: bool, - subgraph_limit: SubgraphLimit, - endpoint_metrics: Arc, - is_substreams_endpoint: bool, - ) -> Self { - let uri = url - .as_ref() - .parse::() - .expect("the url should have been validated by now, so it is a valid Uri"); - - let endpoint_builder = match uri.scheme().unwrap_or(&Scheme::HTTP).as_str() { - "http" => Channel::builder(uri), - "https" => { - let mut tls = ClientTlsConfig::new(); - tls = tls.with_native_roots(); - - Channel::builder(uri) - .tls_config(tls) - .expect("TLS config on this host is invalid") - } - _ => panic!("invalid uri scheme for firehose endpoint"), - }; - - // These tokens come from the config so they have to be ascii. - let token: Option> = token - .map_or(Ok(None), |token| { - let bearer_token = format!("bearer {}", token); - bearer_token.parse::>().map(Some) - }) - .expect("Firehose token is invalid"); - - let key: Option> = key - .map_or(Ok(None), |key| { - key.parse::>().map(Some) - }) - .expect("Firehose key is invalid"); - - // Note on the connection window size: We run multiple block streams on a same connection, - // and a problematic subgraph with a stalled block stream might consume the entire window - // capacity for its http2 stream and never release it. If there are enough stalled block - // streams to consume all the capacity on the http2 connection, then _all_ subgraphs using - // this same http2 connection will stall. At a default stream window size of 2^16, setting - // the connection window size to the maximum of 2^31 allows for 2^15 streams without any - // contention, which is effectively unlimited for normal graph node operation. - // - // Note: Do not set `http2_keep_alive_interval` or `http2_adaptive_window`, as these will - // send ping frames, and many cloud load balancers will drop connections that frequently - // send pings. - let endpoint = endpoint_builder - .initial_connection_window_size(Some((1 << 31) - 1)) - .connect_timeout(Duration::from_secs(10)) - .tcp_keepalive(Some(Duration::from_secs(15))) - // Timeout on each request, so the timeout to estabilish each 'Blocks' stream. - .timeout(Duration::from_secs(120)); - - let subgraph_limit = match subgraph_limit { - // See the comment on the constant - SubgraphLimit::Unlimited => SubgraphLimit::Limit(SUBGRAPHS_PER_CONN), - // This is checked when parsing from config but doesn't hurt to be defensive. - SubgraphLimit::Limit(limit) => SubgraphLimit::Limit(limit.min(SUBGRAPHS_PER_CONN)), - l => l, - }; - - FirehoseEndpoint { - provider: provider.as_ref().into(), - channel: endpoint.connect_lazy(), - auth: AuthInterceptor { token, key }, - filters_enabled, - compression_enabled, - subgraph_limit, - endpoint_metrics, - info_response: OnceCell::new(), - is_substreams: is_substreams_endpoint, - } - } - - pub fn current_error_count(&self) -> u64 { - self.endpoint_metrics.get_count(&self.provider) - } - - // we need to -1 because there will always be a reference - // inside FirehoseEndpoints that is not used (is always cloned). - pub fn get_capacity(self: &Arc) -> AvailableCapacity { - self.subgraph_limit - .get_capacity(Arc::strong_count(self).saturating_sub(1)) - } - - fn metrics_interceptor(&self) -> MetricsInterceptor { - MetricsInterceptor { - metrics: self.endpoint_metrics.cheap_clone(), - service: self.channel.cheap_clone(), - labels: RequestLabels { - provider: self.provider.clone().into(), - req_type: "unknown".into(), - conn_type: ConnectionType::Firehose, - }, - } - } - - fn max_message_size(&self) -> usize { - 1024 * 1024 * ENV_VARS.firehose_grpc_max_decode_size_mb - } - - fn new_fetch_client( - &self, - ) -> FetchClient< - InterceptedService, impl tonic::service::Interceptor>, - > { - let metrics = self.metrics_interceptor(); - - let mut client = FetchClient::with_interceptor(metrics, self.auth.clone()) - .accept_compressed(CompressionEncoding::Gzip); - - if self.compression_enabled { - client = client.send_compressed(CompressionEncoding::Gzip); - } - - client = client.max_decoding_message_size(self.max_message_size()); - - client - } - - fn new_stream_client( - &self, - ) -> StreamClient< - InterceptedService, impl tonic::service::Interceptor>, - > { - let metrics = self.metrics_interceptor(); - - let mut client = StreamClient::with_interceptor(metrics, self.auth.clone()) - .accept_compressed(CompressionEncoding::Gzip); - - if self.compression_enabled { - client = client.send_compressed(CompressionEncoding::Gzip); - } - - client = client.max_decoding_message_size(self.max_message_size()); - - client - } - - fn new_firehose_info_client(&self) -> crate::firehose::endpoint_info::Client { - let metrics = self.metrics_interceptor(); - let auth = self.auth.clone(); - - let mut client = crate::firehose::endpoint_info::Client::new(metrics, auth); - - if self.compression_enabled { - client = client.with_compression(); - } - - client = client.with_max_message_size(self.max_message_size()); - client - } - - fn new_substreams_info_client( - &self, - ) -> crate::substreams_rpc::endpoint_info_client::EndpointInfoClient< - InterceptedService, impl tonic::service::Interceptor>, - > { - let metrics = self.metrics_interceptor(); - - let mut client = - crate::substreams_rpc::endpoint_info_client::EndpointInfoClient::with_interceptor( - metrics, - self.auth.clone(), - ) - .accept_compressed(CompressionEncoding::Gzip); - - if self.compression_enabled { - client = client.send_compressed(CompressionEncoding::Gzip); - } - - client = client.max_decoding_message_size(self.max_message_size()); - - client - } - - fn new_substreams_streaming_client( - &self, - ) -> substreams_rpc::stream_client::StreamClient< - InterceptedService, impl tonic::service::Interceptor>, - > { - let metrics = self.metrics_interceptor(); - - let mut client = substreams_rpc::stream_client::StreamClient::with_interceptor( - metrics, - self.auth.clone(), - ) - .accept_compressed(CompressionEncoding::Gzip); - - if self.compression_enabled { - client = client.send_compressed(CompressionEncoding::Gzip); - } - - client = client.max_decoding_message_size(self.max_message_size()); - - client - } - - pub async fn get_block( - &self, - cursor: FirehoseCursor, - logger: &Logger, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - debug!( - logger, - "Connecting to firehose to retrieve block for cursor {}", cursor; - "provider" => self.provider.as_str(), - ); - - let req = firehose::SingleBlockRequest { - transforms: [].to_vec(), - reference: Some(firehose::single_block_request::Reference::Cursor( - firehose::single_block_request::Cursor { - cursor: cursor.to_string(), - }, - )), - }; - - let mut client = self.new_fetch_client(); - match client.block(req).await { - Ok(v) => Ok(M::decode( - v.get_ref().block.as_ref().unwrap().value.as_ref(), - )?), - Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), - } - } - - pub async fn get_block_by_ptr( - &self, - ptr: &BlockPtr, - logger: &Logger, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - debug!( - logger, - "Connecting to firehose to retrieve block for ptr {}", ptr; - "provider" => self.provider.as_str(), - ); - - let req = firehose::SingleBlockRequest { - transforms: [].to_vec(), - reference: Some( - firehose::single_block_request::Reference::BlockHashAndNumber( - firehose::single_block_request::BlockHashAndNumber { - hash: ptr.hash.to_string(), - num: ptr.number as u64, - }, - ), - ), - }; - - let mut client = self.new_fetch_client(); - match client.block(req).await { - Ok(v) => Ok(M::decode( - v.get_ref().block.as_ref().unwrap().value.as_ref(), - )?), - Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), - } - } - - pub async fn get_block_by_ptr_with_retry( - self: Arc, - ptr: &BlockPtr, - logger: &Logger, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - let retry_log_message = format!("get_block_by_ptr for block {}", ptr); - let endpoint = self.cheap_clone(); - let logger = logger.cheap_clone(); - let ptr_for_retry = ptr.clone(); - - retry(retry_log_message, &logger) - .limit(ENV_VARS.firehose_block_fetch_retry_limit) - .timeout_secs(ENV_VARS.firehose_block_fetch_timeout) - .run(move || { - let endpoint = endpoint.cheap_clone(); - let logger = logger.cheap_clone(); - let ptr = ptr_for_retry.clone(); - async move { - endpoint - .get_block_by_ptr::(&ptr, &logger) - .await - .context(format!( - "Failed to fetch block by ptr {} from firehose", - ptr - )) - } - }) - .await - .map_err(move |e| { - anyhow::anyhow!("Failed to fetch block by ptr {} from firehose: {}", ptr, e) - }) - } - - async fn get_block_by_number(&self, number: u64, logger: &Logger) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - trace!( - logger, - "Connecting to firehose to retrieve block for number {}", number; - "provider" => self.provider.as_str(), - ); - - let req = firehose::SingleBlockRequest { - transforms: [].to_vec(), - reference: Some(firehose::single_block_request::Reference::BlockNumber( - firehose::single_block_request::BlockNumber { num: number }, - )), - }; - - let mut client = self.new_fetch_client(); - match client.block(req).await { - Ok(v) => Ok(M::decode( - v.get_ref().block.as_ref().unwrap().value.as_ref(), - )?), - Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), - } - } - - pub async fn get_block_by_number_with_retry( - self: Arc, - number: u64, - logger: &Logger, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - let retry_log_message = format!("get_block_by_number for block {}", number); - let endpoint = self.cheap_clone(); - let logger = logger.cheap_clone(); - - retry(retry_log_message, &logger) - .limit(ENV_VARS.firehose_block_fetch_retry_limit) - .timeout_secs(ENV_VARS.firehose_block_fetch_timeout) - .run(move || { - let endpoint = endpoint.cheap_clone(); - let logger = logger.cheap_clone(); - async move { - endpoint - .get_block_by_number::(number, &logger) - .await - .context(format!( - "Failed to fetch block by number {} from firehose", - number - )) - } - }) - .await - .map_err(|e| { - anyhow::anyhow!( - "Failed to fetch block by number {} from firehose: {}", - number, - e - ) - }) - } - - pub async fn load_blocks_by_numbers( - self: Arc, - numbers: Vec, - logger: &Logger, - ) -> Result, anyhow::Error> - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - let logger = logger.clone(); - let logger_for_error = logger.clone(); - - let blocks_stream = futures03::stream::iter(numbers) - .map(move |number| { - let e = self.cheap_clone(); - let l = logger.clone(); - async move { e.get_block_by_number_with_retry::(number, &l).await } - }) - .buffered(ENV_VARS.firehose_block_batch_size); - - let blocks = blocks_stream.try_collect::>().await.map_err(|e| { - error!( - logger_for_error, - "Failed to load blocks from firehose: {}", e; - ); - anyhow::format_err!("failed to load blocks from firehose: {}", e) - })?; - - Ok(blocks) - } - - pub async fn genesis_block_ptr(&self, logger: &Logger) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - info!(logger, "Requesting genesis block from firehose"; - "provider" => self.provider.as_str()); - - // We use 0 here to mean the genesis block of the chain. Firehose - // when seeing start block number 0 will always return the genesis - // block of the chain, even if the chain's start block number is - // not starting at block #0. - self.block_ptr_for_number::(logger, 0).await - } - - pub async fn block_ptr_for_number( - &self, - logger: &Logger, - number: BlockNumber, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - debug!( - logger, - "Connecting to firehose to retrieve block for number {}", number; - "provider" => self.provider.as_str(), - ); - - let mut client = self.new_stream_client(); - - // The trick is the following. - // - // Firehose `start_block_num` and `stop_block_num` are both inclusive, so we specify - // the block we are looking for in both. - // - // Now, the remaining question is how the block from the canonical chain is picked. We - // leverage the fact that Firehose will always send the block in the longuest chain as the - // last message of this request. - // - // That way, we either get the final block if the block is now in a final segment of the - // chain (or probabilisticly if not finality concept exists for the chain). Or we get the - // block that is in the longuest chain according to Firehose. - let response_stream = client - .blocks(firehose::Request { - start_block_num: number as i64, - stop_block_num: number as u64, - final_blocks_only: false, - ..Default::default() - }) - .await?; - - let mut block_stream = response_stream.into_inner(); - - debug!(logger, "Retrieving block(s) from firehose"; - "provider" => self.provider.as_str()); - - let mut latest_received_block: Option = None; - while let Some(message) = block_stream.next().await { - match message { - Ok(v) => { - let block = decode_firehose_block::(&v)?.ptr(); - - match latest_received_block { - None => { - latest_received_block = Some(block); - } - Some(ref actual_ptr) => { - // We want to receive all events related to a specific block number, - // however, in some circumstances, it seems Firehose would not stop sending - // blocks (`start_block_num: 0 and stop_block_num: 0` on NEAR seems to trigger - // this). - // - // To prevent looping infinitely, we stop as soon as a new received block's - // number is higher than the latest received block's number, in which case it - // means it's an event for a block we are not interested in. - if block.number > actual_ptr.number { - break; - } - - latest_received_block = Some(block); - } - } - } - Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), - }; - } - - match latest_received_block { - Some(block_ptr) => Ok(block_ptr), - None => Err(anyhow::format_err!( - "Firehose should have returned at least one block for request" - )), - } - } - - pub async fn stream_blocks( - self: Arc, - request: firehose::Request, - headers: &ConnectionHeaders, - ) -> Result, anyhow::Error> { - let mut client = self.new_stream_client(); - let request = headers.add_to_request(request); - let response_stream = client.blocks(request).await?; - let block_stream = response_stream.into_inner(); - - Ok(block_stream) - } - - pub async fn substreams( - self: Arc, - request: substreams_rpc::Request, - headers: &ConnectionHeaders, - ) -> Result, anyhow::Error> { - let mut client = self.new_substreams_streaming_client(); - let request = headers.add_to_request(request); - let response_stream = client.blocks(request).await?; - let block_stream = response_stream.into_inner(); - - Ok(block_stream) - } - - pub async fn info( - self: Arc, - ) -> Result { - let endpoint = self.cheap_clone(); - - self.info_response - .get_or_try_init(move || async move { - if endpoint.is_substreams { - let mut client = endpoint.new_substreams_info_client(); - - client - .info(InfoRequest {}) - .await - .map(|r| r.into_inner()) - .map_err(anyhow::Error::from) - .and_then(|e| e.try_into()) - } else { - let mut client = endpoint.new_firehose_info_client(); - - client.info().await - } - }) - .await - .map(ToOwned::to_owned) - } -} - -#[derive(Debug)] -pub struct FirehoseEndpoints(ChainName, ProviderManager>); - -impl FirehoseEndpoints { - pub fn for_testing(adapters: Vec>) -> Self { - let chain_name: ChainName = "testing".into(); - - Self( - chain_name.clone(), - ProviderManager::new( - crate::log::discard(), - [(chain_name, adapters)], - ProviderCheckStrategy::MarkAsValid, - ), - ) - } - - pub fn new( - chain_name: ChainName, - provider_manager: ProviderManager>, - ) -> Self { - Self(chain_name, provider_manager) - } - - pub fn len(&self) -> usize { - self.1.len(&self.0) - } - - /// This function will attempt to grab an endpoint based on the Lowest error count - // with high capacity available. If an adapter cannot be found `endpoint` will - // return an error. - pub async fn endpoint(&self) -> anyhow::Result> { - let endpoint = self - .1 - .providers(&self.0) - .await? - .sorted_by_key(|x| x.current_error_count()) - .try_fold(None, |acc, adapter| { - match adapter.get_capacity() { - AvailableCapacity::Unavailable => ControlFlow::Continue(acc), - AvailableCapacity::Low => match acc { - Some(_) => ControlFlow::Continue(acc), - None => ControlFlow::Continue(Some(adapter)), - }, - // This means that if all adapters with low/no errors are low capacity - // we will retry the high capacity that has errors, at this point - // any other available with no errors are almost at their limit. - AvailableCapacity::High => ControlFlow::Break(Some(adapter)), - } - }); - - match endpoint { - ControlFlow::Continue(adapter) - | ControlFlow::Break(adapter) => - adapter.cloned().ok_or(anyhow!("unable to get a connection, increase the firehose conn_pool_size or limit for the node")) - } - } -} - -#[cfg(test)] -mod test { - use std::{mem, sync::Arc}; - - use slog::{o, Discard, Logger}; - - use super::*; - use crate::components::metrics::MetricsRegistry; - use crate::endpoint::EndpointMetrics; - use crate::firehose::SubgraphLimit; - - #[crate::test] - async fn firehose_endpoint_errors() { - let endpoint = vec![Arc::new(FirehoseEndpoint::new( - String::new(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - Arc::new(EndpointMetrics::mock()), - false, - ))]; - - let endpoints = FirehoseEndpoints::for_testing(endpoint); - - let mut keep = vec![]; - for _i in 0..SUBGRAPHS_PER_CONN { - keep.push(endpoints.endpoint().await.unwrap()); - } - - let err = endpoints.endpoint().await.unwrap_err(); - assert!(err.to_string().contains("conn_pool_size")); - - mem::drop(keep); - endpoints.endpoint().await.unwrap(); - - let endpoints = FirehoseEndpoints::for_testing(vec![]); - - let err = endpoints.endpoint().await.unwrap_err(); - assert!(err.to_string().contains("unable to get a connection")); - } - - #[crate::test] - async fn firehose_endpoint_with_limit() { - let endpoint = vec![Arc::new(FirehoseEndpoint::new( - String::new(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Limit(2), - Arc::new(EndpointMetrics::mock()), - false, - ))]; - - let endpoints = FirehoseEndpoints::for_testing(endpoint); - - let mut keep = vec![]; - for _ in 0..2 { - keep.push(endpoints.endpoint().await.unwrap()); - } - - let err = endpoints.endpoint().await.unwrap_err(); - assert!(err.to_string().contains("conn_pool_size")); - - mem::drop(keep); - endpoints.endpoint().await.unwrap(); - } - - #[crate::test] - async fn firehose_endpoint_no_traffic() { - let endpoint = vec![Arc::new(FirehoseEndpoint::new( - String::new(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Disabled, - Arc::new(EndpointMetrics::mock()), - false, - ))]; - - let endpoints = FirehoseEndpoints::for_testing(endpoint); - - let err = endpoints.endpoint().await.unwrap_err(); - assert!(err.to_string().contains("conn_pool_size")); - } - - #[crate::test] - async fn firehose_endpoint_selection() { - let logger = Logger::root(Discard, o!()); - let endpoint_metrics = Arc::new(EndpointMetrics::new( - logger, - &["high_error", "low availability", "high availability"], - Arc::new(MetricsRegistry::mock()), - )); - - let high_error_adapter1 = Arc::new(FirehoseEndpoint::new( - "high_error".to_string(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - endpoint_metrics.clone(), - false, - )); - let high_error_adapter2 = Arc::new(FirehoseEndpoint::new( - "high_error".to_string(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - endpoint_metrics.clone(), - false, - )); - let low_availability = Arc::new(FirehoseEndpoint::new( - "low availability".to_string(), - "http://127.0.0.2".to_string(), - None, - None, - false, - false, - SubgraphLimit::Limit(2), - endpoint_metrics.clone(), - false, - )); - let high_availability = Arc::new(FirehoseEndpoint::new( - "high availability".to_string(), - "http://127.0.0.3".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - endpoint_metrics.clone(), - false, - )); - - endpoint_metrics.report_for_test(&high_error_adapter1.provider, false); - - let endpoints = FirehoseEndpoints::for_testing(vec![ - high_error_adapter1.clone(), - high_error_adapter2.clone(), - low_availability.clone(), - high_availability.clone(), - ]); - - let res = endpoints.endpoint().await.unwrap(); - assert_eq!(res.provider, high_availability.provider); - mem::drop(endpoints); - - // Removing high availability without errors should fallback to low availability - let endpoints = FirehoseEndpoints::for_testing( - vec![ - high_error_adapter1.clone(), - high_error_adapter2, - low_availability.clone(), - high_availability.clone(), - ] - .into_iter() - .filter(|a| a.provider_name() != high_availability.provider) - .collect(), - ); - - // Ensure we're in a low capacity situation - assert_eq!(low_availability.get_capacity(), AvailableCapacity::Low); - - // In the scenario where the only high level adapter has errors we keep trying that - // because the others will be low or unavailable - let res = endpoints.endpoint().await.unwrap(); - // This will match both high error adapters - assert_eq!(res.provider, high_error_adapter1.provider); - } - - #[test] - fn subgraph_limit_calculates_availability() { - #[derive(Debug)] - struct Case { - limit: SubgraphLimit, - current: usize, - capacity: AvailableCapacity, - } - - let cases = vec![ - Case { - limit: SubgraphLimit::Disabled, - current: 20, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(0), - current: 20, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(0), - current: 0, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 80, - capacity: AvailableCapacity::Low, - }, - Case { - limit: SubgraphLimit::Limit(2), - current: 1, - capacity: AvailableCapacity::Low, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 19, - capacity: AvailableCapacity::High, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 100, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 99, - capacity: AvailableCapacity::Low, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 101, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Unlimited, - current: 1000, - capacity: AvailableCapacity::High, - }, - Case { - limit: SubgraphLimit::Unlimited, - current: 0, - capacity: AvailableCapacity::High, - }, - ]; - - for c in cases { - let res = c.limit.get_capacity(c.current); - assert_eq!(res, c.capacity, "{:#?}", c); - } - } - - #[test] - fn available_capacity_ordering() { - assert_eq!( - AvailableCapacity::Unavailable < AvailableCapacity::Low, - true - ); - assert_eq!( - AvailableCapacity::Unavailable < AvailableCapacity::High, - true - ); - assert_eq!(AvailableCapacity::Low < AvailableCapacity::High, true); - } -} +use crate::firehose::codec::InfoRequest; +use crate::firehose::fetch_client::FetchClient; +use crate::firehose::interceptors::AuthInterceptor; +use crate::{ + blockchain::{ + block_stream::FirehoseCursor, Block as BlockchainBlock, BlockPtr, ChainIdentifier, + }, + cheap_clone::CheapClone, + components::store::BlockNumber, + endpoint::{ConnectionType, EndpointMetrics, RequestLabels}, + env::ENV_VARS, + firehose::decode_firehose_block, + prelude::{anyhow, debug, DeploymentHash}, + substreams_rpc, +}; +use anyhow::Context; +use async_trait::async_trait; +use futures03::{StreamExt, TryStreamExt}; +use http::uri::{Scheme, Uri}; +use itertools::Itertools; +use slog::{error, info, trace, Logger}; +use std::{collections::HashMap, fmt::Display, ops::ControlFlow, sync::Arc, time::Duration}; +use tokio::sync::OnceCell; +use tonic::codegen::InterceptedService; +use tonic::{ + codegen::CompressionEncoding, + metadata::{Ascii, MetadataKey, MetadataValue}, + transport::{Channel, ClientTlsConfig}, + Request, +}; + +use super::{codec as firehose, interceptors::MetricsInterceptor, stream_client::StreamClient}; +use crate::components::network_provider::ChainName; +use crate::components::network_provider::NetworkDetails; +use crate::components::network_provider::ProviderCheckStrategy; +use crate::components::network_provider::ProviderManager; +use crate::components::network_provider::ProviderName; +use crate::prelude::retry; + +/// This is constant because we found this magic number of connections after +/// which the grpc connections start to hang. +/// For more details see: https://github.com/graphprotocol/graph-node/issues/3879 +pub const SUBGRAPHS_PER_CONN: usize = 100; + +const LOW_VALUE_THRESHOLD: usize = 10; +const LOW_VALUE_USED_PERCENTAGE: usize = 50; +const HIGH_VALUE_USED_PERCENTAGE: usize = 80; + +#[derive(Debug)] +pub struct FirehoseEndpoint { + pub provider: ProviderName, + pub auth: AuthInterceptor, + pub filters_enabled: bool, + pub compression_enabled: bool, + pub subgraph_limit: SubgraphLimit, + is_substreams: bool, + endpoint_metrics: Arc, + channel: Channel, + + /// The endpoint info is not intended to change very often, as it only contains the + /// endpoint's metadata, so caching it avoids sending unnecessary network requests. + info_response: OnceCell, +} + +#[derive(Debug)] +pub struct ConnectionHeaders(HashMap, MetadataValue>); + +#[async_trait] +impl NetworkDetails for Arc { + fn provider_name(&self) -> ProviderName { + self.provider.clone() + } + + async fn chain_identifier(&self) -> anyhow::Result { + let genesis_block_ptr = self.clone().info().await?.genesis_block_ptr()?; + + Ok(ChainIdentifier { + net_version: "0".to_string(), + genesis_block_hash: genesis_block_ptr.hash, + }) + } + + async fn provides_extended_blocks(&self) -> anyhow::Result { + let info = self.clone().info().await?; + let pred = if info.chain_name.contains("arbitrum-one") + || info.chain_name.contains("optimism-mainnet") + { + |x: &String| x.starts_with("extended") || x == "hybrid" + } else { + |x: &String| x == "extended" + }; + + Ok(info.block_features.iter().any(pred)) + } +} + +impl Default for ConnectionHeaders { + fn default() -> Self { + Self::new() + } +} + +impl ConnectionHeaders { + pub fn new() -> Self { + Self(HashMap::new()) + } + pub fn with_deployment(mut self, deployment: DeploymentHash) -> Self { + if let Ok(deployment) = deployment.parse() { + self.0 + .insert("x-deployment-id".parse().unwrap(), deployment); + } + self + } + pub fn add_to_request(&self, request: T) -> Request { + let mut request = Request::new(request); + self.0.iter().for_each(|(k, v)| { + request.metadata_mut().insert(k, v.clone()); + }); + request + } +} + +#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] +pub enum AvailableCapacity { + Unavailable, + Low, + High, +} + +// TODO: Find a new home for this type. +#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] +pub enum SubgraphLimit { + Disabled, + Limit(usize), + Unlimited, +} + +impl SubgraphLimit { + pub fn get_capacity(&self, current: usize) -> AvailableCapacity { + match self { + // Limit(0) should probably be Disabled but just in case + SubgraphLimit::Disabled | SubgraphLimit::Limit(0) => AvailableCapacity::Unavailable, + SubgraphLimit::Limit(total) => { + let total = *total; + if current >= total { + return AvailableCapacity::Unavailable; + } + + let used_percent = current * 100 / total; + + // If total is low it can vary very quickly so we can consider 50% as the low threshold + // to make selection more reliable + let threshold_percent = if total <= LOW_VALUE_THRESHOLD { + LOW_VALUE_USED_PERCENTAGE + } else { + HIGH_VALUE_USED_PERCENTAGE + }; + + if used_percent < threshold_percent { + return AvailableCapacity::High; + } + + AvailableCapacity::Low + } + _ => AvailableCapacity::High, + } + } + + pub fn has_capacity(&self, current: usize) -> bool { + match self { + SubgraphLimit::Unlimited => true, + SubgraphLimit::Limit(limit) => limit > ¤t, + SubgraphLimit::Disabled => false, + } + } +} + +impl Display for FirehoseEndpoint { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Display::fmt(self.provider.as_str(), f) + } +} + +impl FirehoseEndpoint { + pub fn new>( + provider: S, + url: S, + token: Option, + key: Option, + filters_enabled: bool, + compression_enabled: bool, + subgraph_limit: SubgraphLimit, + endpoint_metrics: Arc, + is_substreams_endpoint: bool, + ) -> Self { + let uri = url + .as_ref() + .parse::() + .expect("the url should have been validated by now, so it is a valid Uri"); + + let endpoint_builder = match uri.scheme().unwrap_or(&Scheme::HTTP).as_str() { + "http" => Channel::builder(uri), + "https" => { + let mut tls = ClientTlsConfig::new(); + tls = tls.with_native_roots(); + + Channel::builder(uri) + .tls_config(tls) + .expect("TLS config on this host is invalid") + } + _ => panic!("invalid uri scheme for firehose endpoint"), + }; + + // These tokens come from the config so they have to be ascii. + let token: Option> = token + .map_or(Ok(None), |token| { + let bearer_token = format!("bearer {}", token); + bearer_token.parse::>().map(Some) + }) + .expect("Firehose token is invalid"); + + let key: Option> = key + .map_or(Ok(None), |key| { + key.parse::>().map(Some) + }) + .expect("Firehose key is invalid"); + + // Note on the connection window size: We run multiple block streams on a same connection, + // and a problematic subgraph with a stalled block stream might consume the entire window + // capacity for its http2 stream and never release it. If there are enough stalled block + // streams to consume all the capacity on the http2 connection, then _all_ subgraphs using + // this same http2 connection will stall. At a default stream window size of 2^16, setting + // the connection window size to the maximum of 2^31 allows for 2^15 streams without any + // contention, which is effectively unlimited for normal graph node operation. + // + // Note: Do not set `http2_keep_alive_interval` or `http2_adaptive_window`, as these will + // send ping frames, and many cloud load balancers will drop connections that frequently + // send pings. + let endpoint = endpoint_builder + .initial_connection_window_size(Some((1 << 31) - 1)) + .connect_timeout(Duration::from_secs(10)) + .tcp_keepalive(Some(Duration::from_secs(15))) + // Timeout on each request, so the timeout to estabilish each 'Blocks' stream. + .timeout(Duration::from_secs(120)); + + let subgraph_limit = match subgraph_limit { + // See the comment on the constant + SubgraphLimit::Unlimited => SubgraphLimit::Limit(SUBGRAPHS_PER_CONN), + // This is checked when parsing from config but doesn't hurt to be defensive. + SubgraphLimit::Limit(limit) => SubgraphLimit::Limit(limit.min(SUBGRAPHS_PER_CONN)), + l => l, + }; + + FirehoseEndpoint { + provider: provider.as_ref().into(), + channel: endpoint.connect_lazy(), + auth: AuthInterceptor { token, key }, + filters_enabled, + compression_enabled, + subgraph_limit, + endpoint_metrics, + info_response: OnceCell::new(), + is_substreams: is_substreams_endpoint, + } + } + + pub fn current_error_count(&self) -> u64 { + self.endpoint_metrics.get_count(&self.provider) + } + + // we need to -1 because there will always be a reference + // inside FirehoseEndpoints that is not used (is always cloned). + pub fn get_capacity(self: &Arc) -> AvailableCapacity { + self.subgraph_limit + .get_capacity(Arc::strong_count(self).saturating_sub(1)) + } + + fn metrics_interceptor(&self) -> MetricsInterceptor { + MetricsInterceptor { + metrics: self.endpoint_metrics.cheap_clone(), + service: self.channel.cheap_clone(), + labels: RequestLabels { + provider: self.provider.clone(), + req_type: "unknown".into(), + conn_type: ConnectionType::Firehose, + }, + } + } + + fn max_message_size(&self) -> usize { + 1024 * 1024 * ENV_VARS.firehose_grpc_max_decode_size_mb + } + + fn new_fetch_client( + &self, + ) -> FetchClient< + InterceptedService, impl tonic::service::Interceptor>, + > { + let metrics = self.metrics_interceptor(); + + let mut client = FetchClient::with_interceptor(metrics, self.auth.clone()) + .accept_compressed(CompressionEncoding::Gzip); + + if self.compression_enabled { + client = client.send_compressed(CompressionEncoding::Gzip); + } + + client = client.max_decoding_message_size(self.max_message_size()); + + client + } + + fn new_stream_client( + &self, + ) -> StreamClient< + InterceptedService, impl tonic::service::Interceptor>, + > { + let metrics = self.metrics_interceptor(); + + let mut client = StreamClient::with_interceptor(metrics, self.auth.clone()) + .accept_compressed(CompressionEncoding::Gzip); + + if self.compression_enabled { + client = client.send_compressed(CompressionEncoding::Gzip); + } + + client = client.max_decoding_message_size(self.max_message_size()); + + client + } + + fn new_firehose_info_client(&self) -> crate::firehose::endpoint_info::Client { + let metrics = self.metrics_interceptor(); + let auth = self.auth.clone(); + + let mut client = crate::firehose::endpoint_info::Client::new(metrics, auth); + + if self.compression_enabled { + client = client.with_compression(); + } + + client = client.with_max_message_size(self.max_message_size()); + client + } + + fn new_substreams_info_client( + &self, + ) -> crate::substreams_rpc::endpoint_info_client::EndpointInfoClient< + InterceptedService, impl tonic::service::Interceptor>, + > { + let metrics = self.metrics_interceptor(); + + let mut client = + crate::substreams_rpc::endpoint_info_client::EndpointInfoClient::with_interceptor( + metrics, + self.auth.clone(), + ) + .accept_compressed(CompressionEncoding::Gzip); + + if self.compression_enabled { + client = client.send_compressed(CompressionEncoding::Gzip); + } + + client = client.max_decoding_message_size(self.max_message_size()); + + client + } + + fn new_substreams_streaming_client( + &self, + ) -> substreams_rpc::stream_client::StreamClient< + InterceptedService, impl tonic::service::Interceptor>, + > { + let metrics = self.metrics_interceptor(); + + let mut client = substreams_rpc::stream_client::StreamClient::with_interceptor( + metrics, + self.auth.clone(), + ) + .accept_compressed(CompressionEncoding::Gzip); + + if self.compression_enabled { + client = client.send_compressed(CompressionEncoding::Gzip); + } + + client = client.max_decoding_message_size(self.max_message_size()); + + client + } + + pub async fn get_block( + &self, + cursor: FirehoseCursor, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + debug!( + logger, + "Connecting to firehose to retrieve block for cursor {}", cursor; + "provider" => self.provider.as_str(), + ); + + let req = firehose::SingleBlockRequest { + transforms: [].to_vec(), + reference: Some(firehose::single_block_request::Reference::Cursor( + firehose::single_block_request::Cursor { + cursor: cursor.to_string(), + }, + )), + }; + + let mut client = self.new_fetch_client(); + match client.block(req).await { + Ok(v) => Ok(M::decode( + v.get_ref().block.as_ref().unwrap().value.as_ref(), + )?), + Err(e) => Err(anyhow::format_err!("firehose error {}", e)), + } + } + + pub async fn get_block_by_ptr( + &self, + ptr: &BlockPtr, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + debug!( + logger, + "Connecting to firehose to retrieve block for ptr {}", ptr; + "provider" => self.provider.as_str(), + ); + + let req = firehose::SingleBlockRequest { + transforms: [].to_vec(), + reference: Some( + firehose::single_block_request::Reference::BlockHashAndNumber( + firehose::single_block_request::BlockHashAndNumber { + hash: ptr.hash.to_string(), + num: ptr.number as u64, + }, + ), + ), + }; + + let mut client = self.new_fetch_client(); + match client.block(req).await { + Ok(v) => Ok(M::decode( + v.get_ref().block.as_ref().unwrap().value.as_ref(), + )?), + Err(e) => Err(anyhow::format_err!("firehose error {}", e)), + } + } + + pub async fn get_block_by_ptr_with_retry( + self: Arc, + ptr: &BlockPtr, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + let retry_log_message = format!("get_block_by_ptr for block {}", ptr); + let endpoint = self.cheap_clone(); + let logger = logger.cheap_clone(); + let ptr_for_retry = ptr.clone(); + + retry(retry_log_message, &logger) + .limit(ENV_VARS.firehose_block_fetch_retry_limit) + .timeout_secs(ENV_VARS.firehose_block_fetch_timeout) + .run(move || { + let endpoint = endpoint.cheap_clone(); + let logger = logger.cheap_clone(); + let ptr = ptr_for_retry.clone(); + async move { + endpoint + .get_block_by_ptr::(&ptr, &logger) + .await + .context(format!( + "Failed to fetch block by ptr {} from firehose", + ptr + )) + } + }) + .await + .map_err(move |e| { + anyhow::anyhow!("Failed to fetch block by ptr {} from firehose: {}", ptr, e) + }) + } + + async fn get_block_by_number(&self, number: u64, logger: &Logger) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + trace!( + logger, + "Connecting to firehose to retrieve block for number {}", number; + "provider" => self.provider.as_str(), + ); + + let req = firehose::SingleBlockRequest { + transforms: [].to_vec(), + reference: Some(firehose::single_block_request::Reference::BlockNumber( + firehose::single_block_request::BlockNumber { num: number }, + )), + }; + + let mut client = self.new_fetch_client(); + match client.block(req).await { + Ok(v) => Ok(M::decode( + v.get_ref().block.as_ref().unwrap().value.as_ref(), + )?), + Err(e) => Err(anyhow::format_err!("firehose error {}", e)), + } + } + + pub async fn get_block_by_number_with_retry( + self: Arc, + number: u64, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + let retry_log_message = format!("get_block_by_number for block {}", number); + let endpoint = self.cheap_clone(); + let logger = logger.cheap_clone(); + + retry(retry_log_message, &logger) + .limit(ENV_VARS.firehose_block_fetch_retry_limit) + .timeout_secs(ENV_VARS.firehose_block_fetch_timeout) + .run(move || { + let endpoint = endpoint.cheap_clone(); + let logger = logger.cheap_clone(); + async move { + endpoint + .get_block_by_number::(number, &logger) + .await + .context(format!( + "Failed to fetch block by number {} from firehose", + number + )) + } + }) + .await + .map_err(|e| { + anyhow::anyhow!( + "Failed to fetch block by number {} from firehose: {}", + number, + e + ) + }) + } + + pub async fn load_blocks_by_numbers( + self: Arc, + numbers: Vec, + logger: &Logger, + ) -> Result, anyhow::Error> + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + let logger = logger.clone(); + let logger_for_error = logger.clone(); + + let blocks_stream = futures03::stream::iter(numbers) + .map(move |number| { + let e = self.cheap_clone(); + let l = logger.clone(); + async move { e.get_block_by_number_with_retry::(number, &l).await } + }) + .buffered(ENV_VARS.firehose_block_batch_size); + + let blocks = blocks_stream.try_collect::>().await.map_err(|e| { + error!( + logger_for_error, + "Failed to load blocks from firehose: {}", e; + ); + anyhow::format_err!("failed to load blocks from firehose: {}", e) + })?; + + Ok(blocks) + } + + pub async fn genesis_block_ptr(&self, logger: &Logger) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + info!(logger, "Requesting genesis block from firehose"; + "provider" => self.provider.as_str()); + + // We use 0 here to mean the genesis block of the chain. Firehose + // when seeing start block number 0 will always return the genesis + // block of the chain, even if the chain's start block number is + // not starting at block #0. + self.block_ptr_for_number::(logger, 0).await + } + + pub async fn block_ptr_for_number( + &self, + logger: &Logger, + number: BlockNumber, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + debug!( + logger, + "Connecting to firehose to retrieve block for number {}", number; + "provider" => self.provider.as_str(), + ); + + let mut client = self.new_stream_client(); + + // The trick is the following. + // + // Firehose `start_block_num` and `stop_block_num` are both inclusive, so we specify + // the block we are looking for in both. + // + // Now, the remaining question is how the block from the canonical chain is picked. We + // leverage the fact that Firehose will always send the block in the longuest chain as the + // last message of this request. + // + // That way, we either get the final block if the block is now in a final segment of the + // chain (or probabilisticly if not finality concept exists for the chain). Or we get the + // block that is in the longuest chain according to Firehose. + let response_stream = client + .blocks(firehose::Request { + start_block_num: number as i64, + stop_block_num: number as u64, + final_blocks_only: false, + ..Default::default() + }) + .await?; + + let mut block_stream = response_stream.into_inner(); + + debug!(logger, "Retrieving block(s) from firehose"; + "provider" => self.provider.as_str()); + + let mut latest_received_block: Option = None; + while let Some(message) = block_stream.next().await { + match message { + Ok(v) => { + let block = decode_firehose_block::(&v)?.ptr(); + + match latest_received_block { + None => { + latest_received_block = Some(block); + } + Some(ref actual_ptr) => { + // We want to receive all events related to a specific block number, + // however, in some circumstances, it seems Firehose would not stop sending + // blocks (`start_block_num: 0 and stop_block_num: 0` on NEAR seems to trigger + // this). + // + // To prevent looping infinitely, we stop as soon as a new received block's + // number is higher than the latest received block's number, in which case it + // means it's an event for a block we are not interested in. + if block.number > actual_ptr.number { + break; + } + + latest_received_block = Some(block); + } + } + } + Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), + }; + } + + match latest_received_block { + Some(block_ptr) => Ok(block_ptr), + None => Err(anyhow::format_err!( + "Firehose should have returned at least one block for request" + )), + } + } + + pub async fn stream_blocks( + self: Arc, + request: firehose::Request, + headers: &ConnectionHeaders, + ) -> Result, anyhow::Error> { + let mut client = self.new_stream_client(); + let request = headers.add_to_request(request); + let response_stream = client.blocks(request).await?; + let block_stream = response_stream.into_inner(); + + Ok(block_stream) + } + + pub async fn substreams( + self: Arc, + request: substreams_rpc::Request, + headers: &ConnectionHeaders, + ) -> Result, anyhow::Error> { + let mut client = self.new_substreams_streaming_client(); + let request = headers.add_to_request(request); + let response_stream = client.blocks(request).await?; + let block_stream = response_stream.into_inner(); + + Ok(block_stream) + } + + pub async fn info( + self: Arc, + ) -> Result { + let endpoint = self.cheap_clone(); + + self.info_response + .get_or_try_init(move || async move { + if endpoint.is_substreams { + let mut client = endpoint.new_substreams_info_client(); + + client + .info(InfoRequest {}) + .await + .map(|r| r.into_inner()) + .map_err(anyhow::Error::from) + .and_then(|e| e.try_into()) + } else { + let mut client = endpoint.new_firehose_info_client(); + + client.info().await + } + }) + .await + .map(ToOwned::to_owned) + } +} + +#[derive(Debug)] +pub struct FirehoseEndpoints(ChainName, ProviderManager>); + +impl FirehoseEndpoints { + pub fn for_testing(adapters: Vec>) -> Self { + let chain_name: ChainName = "testing".into(); + + Self( + chain_name.clone(), + ProviderManager::new( + crate::log::discard(), + [(chain_name, adapters)], + ProviderCheckStrategy::MarkAsValid, + ), + ) + } + + pub fn new( + chain_name: ChainName, + provider_manager: ProviderManager>, + ) -> Self { + Self(chain_name, provider_manager) + } + + pub fn len(&self) -> usize { + self.1.len(&self.0) + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// This function will attempt to grab an endpoint based on the Lowest error count + // with high capacity available. If an adapter cannot be found `endpoint` will + // return an error. + pub async fn endpoint(&self) -> anyhow::Result> { + let endpoint = self + .1 + .providers(&self.0) + .await? + .sorted_by_key(|x| x.current_error_count()) + .try_fold(None, |acc, adapter| { + match adapter.get_capacity() { + AvailableCapacity::Unavailable => ControlFlow::Continue(acc), + AvailableCapacity::Low => match acc { + Some(_) => ControlFlow::Continue(acc), + None => ControlFlow::Continue(Some(adapter)), + }, + // This means that if all adapters with low/no errors are low capacity + // we will retry the high capacity that has errors, at this point + // any other available with no errors are almost at their limit. + AvailableCapacity::High => ControlFlow::Break(Some(adapter)), + } + }); + + match endpoint { + ControlFlow::Continue(adapter) + | ControlFlow::Break(adapter) => + adapter.cloned().ok_or(anyhow!("unable to get a connection, increase the firehose conn_pool_size or limit for the node")) + } + } +} + +#[cfg(test)] +mod test { + use std::{mem, sync::Arc}; + + use slog::{o, Discard, Logger}; + + use super::*; + use crate::components::metrics::MetricsRegistry; + use crate::endpoint::EndpointMetrics; + use crate::firehose::SubgraphLimit; + + #[crate::test] + async fn firehose_endpoint_errors() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + Arc::new(EndpointMetrics::mock()), + false, + ))]; + + let endpoints = FirehoseEndpoints::for_testing(endpoint); + + let mut keep = vec![]; + for _i in 0..SUBGRAPHS_PER_CONN { + keep.push(endpoints.endpoint().await.unwrap()); + } + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + + mem::drop(keep); + endpoints.endpoint().await.unwrap(); + + let endpoints = FirehoseEndpoints::for_testing(vec![]); + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("unable to get a connection")); + } + + #[crate::test] + async fn firehose_endpoint_with_limit() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Limit(2), + Arc::new(EndpointMetrics::mock()), + false, + ))]; + + let endpoints = FirehoseEndpoints::for_testing(endpoint); + + let mut keep = vec![]; + for _ in 0..2 { + keep.push(endpoints.endpoint().await.unwrap()); + } + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + + mem::drop(keep); + endpoints.endpoint().await.unwrap(); + } + + #[crate::test] + async fn firehose_endpoint_no_traffic() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Disabled, + Arc::new(EndpointMetrics::mock()), + false, + ))]; + + let endpoints = FirehoseEndpoints::for_testing(endpoint); + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + } + + #[crate::test] + async fn firehose_endpoint_selection() { + let logger = Logger::root(Discard, o!()); + let endpoint_metrics = Arc::new(EndpointMetrics::new( + logger, + &["high_error", "low availability", "high availability"], + Arc::new(MetricsRegistry::mock()), + )); + + let high_error_adapter1 = Arc::new(FirehoseEndpoint::new( + "high_error".to_string(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + endpoint_metrics.clone(), + false, + )); + let high_error_adapter2 = Arc::new(FirehoseEndpoint::new( + "high_error".to_string(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + endpoint_metrics.clone(), + false, + )); + let low_availability = Arc::new(FirehoseEndpoint::new( + "low availability".to_string(), + "http://127.0.0.2".to_string(), + None, + None, + false, + false, + SubgraphLimit::Limit(2), + endpoint_metrics.clone(), + false, + )); + let high_availability = Arc::new(FirehoseEndpoint::new( + "high availability".to_string(), + "http://127.0.0.3".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + endpoint_metrics.clone(), + false, + )); + + endpoint_metrics.report_for_test(&high_error_adapter1.provider, false); + + let endpoints = FirehoseEndpoints::for_testing(vec![ + high_error_adapter1.clone(), + high_error_adapter2.clone(), + low_availability.clone(), + high_availability.clone(), + ]); + + let res = endpoints.endpoint().await.unwrap(); + assert_eq!(res.provider, high_availability.provider); + mem::drop(endpoints); + + // Removing high availability without errors should fallback to low availability + let endpoints = FirehoseEndpoints::for_testing( + vec![ + high_error_adapter1.clone(), + high_error_adapter2, + low_availability.clone(), + high_availability.clone(), + ] + .into_iter() + .filter(|a| a.provider_name() != high_availability.provider) + .collect(), + ); + + // Ensure we're in a low capacity situation + assert_eq!(low_availability.get_capacity(), AvailableCapacity::Low); + + // In the scenario where the only high level adapter has errors we keep trying that + // because the others will be low or unavailable + let res = endpoints.endpoint().await.unwrap(); + // This will match both high error adapters + assert_eq!(res.provider, high_error_adapter1.provider); + } + + #[test] + fn subgraph_limit_calculates_availability() { + #[derive(Debug)] + struct Case { + limit: SubgraphLimit, + current: usize, + capacity: AvailableCapacity, + } + + let cases = vec![ + Case { + limit: SubgraphLimit::Disabled, + current: 20, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(0), + current: 20, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(0), + current: 0, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 80, + capacity: AvailableCapacity::Low, + }, + Case { + limit: SubgraphLimit::Limit(2), + current: 1, + capacity: AvailableCapacity::Low, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 19, + capacity: AvailableCapacity::High, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 100, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 99, + capacity: AvailableCapacity::Low, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 101, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Unlimited, + current: 1000, + capacity: AvailableCapacity::High, + }, + Case { + limit: SubgraphLimit::Unlimited, + current: 0, + capacity: AvailableCapacity::High, + }, + ]; + + for c in cases { + let res = c.limit.get_capacity(c.current); + assert_eq!(res, c.capacity, "{:#?}", c); + } + } + + #[test] + fn available_capacity_ordering() { + assert!(AvailableCapacity::Unavailable < AvailableCapacity::Low); + assert!(AvailableCapacity::Unavailable < AvailableCapacity::High); + assert!(AvailableCapacity::Low < AvailableCapacity::High); + } +} diff --git a/graph/src/ipfs/cache.rs b/graph/src/ipfs/cache.rs index e0e256a7c22..2e2c3253f55 100644 --- a/graph/src/ipfs/cache.rs +++ b/graph/src/ipfs/cache.rs @@ -174,9 +174,7 @@ impl Cache { async fn insert(&self, logger: &Logger, path: ContentPath, data: Bytes) { match self { - Cache::Memory { max_entry_size, .. } if data.len() > *max_entry_size => { - return; - } + Cache::Memory { max_entry_size, .. } if data.len() > *max_entry_size => {} Cache::Memory { cache, .. } => { let mut cache = cache.lock().unwrap(); diff --git a/graph/src/ipfs/gateway_client.rs b/graph/src/ipfs/gateway_client.rs index 862a46656af..0a7d3ac34ad 100644 --- a/graph/src/ipfs/gateway_client.rs +++ b/graph/src/ipfs/gateway_client.rs @@ -323,7 +323,7 @@ mod tests { ) .await; - assert!(matches!(result, Err(_))); + assert!(result.is_err()); } #[crate::test] diff --git a/graph/src/ipfs/mod.rs b/graph/src/ipfs/mod.rs index 403cbf614cd..d8231d2074c 100644 --- a/graph/src/ipfs/mod.rs +++ b/graph/src/ipfs/mod.rs @@ -75,7 +75,7 @@ where input: "".to_owned(), source: anyhow!("at least one server address is required"), }), - 1 => Ok(clients.pop().unwrap().into()), + 1 => Ok(clients.pop().unwrap()), n => { info!(logger, "Creating a pool of {} IPFS clients", n); diff --git a/graph/src/ipfs/rpc_client.rs b/graph/src/ipfs/rpc_client.rs index e5efcc122d0..92e9e787ec8 100644 --- a/graph/src/ipfs/rpc_client.rs +++ b/graph/src/ipfs/rpc_client.rs @@ -283,7 +283,7 @@ mod tests { ) .await; - assert!(matches!(result, Err(_))); + assert!(result.is_err()); } #[crate::test] diff --git a/graph/src/ipfs/test_utils.rs b/graph/src/ipfs/test_utils.rs index decd9724a78..405d46539ea 100644 --- a/graph/src/ipfs/test_utils.rs +++ b/graph/src/ipfs/test_utils.rs @@ -18,7 +18,7 @@ impl From> for IpfsAddFile { fn from(content: Vec) -> Self { Self { path: Default::default(), - content: content.into(), + content, } } } diff --git a/graph/src/runtime/asc_heap.rs b/graph/src/runtime/asc_heap.rs index 4f2f5c41a87..1301ea017f5 100644 --- a/graph/src/runtime/asc_heap.rs +++ b/graph/src/runtime/asc_heap.rs @@ -47,14 +47,14 @@ pub trait AscHeap: Send { /// /// This operation is expensive as it requires a call to `raw_new` for every /// nested object. -pub async fn asc_new( +pub async fn asc_new( heap: &mut H, rust_obj: &T, gas: &GasCounter, ) -> Result, HostExportError> where C: AscType + AscIndexId, - T: ToAscObj, + T: ToAscObj + ?Sized, { let obj = rust_obj.to_asc_obj(heap, gas).await?; AscPtr::alloc_obj(obj, heap, gas).await diff --git a/graph/src/runtime/asc_ptr.rs b/graph/src/runtime/asc_ptr.rs index 7a51805269e..3325d4b7f02 100644 --- a/graph/src/runtime/asc_ptr.rs +++ b/graph/src/runtime/asc_ptr.rs @@ -19,7 +19,7 @@ impl Copy for AscPtr {} impl Clone for AscPtr { fn clone(&self) -> Self { - AscPtr(self.0, PhantomData) + *self } } @@ -69,7 +69,7 @@ impl AscPtr { let using_buffer = |buffer: &mut [MaybeUninit]| { let buffer = heap.read(self.0, buffer, gas)?; - C::from_asc_bytes(buffer, &heap.api_version()) + C::from_asc_bytes(buffer, heap.api_version()) }; let len = len as usize; @@ -103,7 +103,7 @@ impl AscPtr { let aligned_len = padding_to_16(bytes.len()); // Since AssemblyScript keeps all allocated objects with a 16 byte alignment, // we need to do the same when we allocate ourselves. - bytes.extend(std::iter::repeat(0).take(aligned_len)); + bytes.extend(std::iter::repeat_n(0, aligned_len)); let header = Self::generate_header( heap, @@ -140,6 +140,7 @@ impl AscPtr { /// - gc_info2: usize -> second GC info (we don't free memory so it's irrelevant) /// - rt_id: u32 -> identifier for the class being allocated /// - rt_size: u32 -> content size + /// /// Only used for version >= 0.0.5. async fn generate_header( heap: &mut H, @@ -169,12 +170,14 @@ impl AscPtr { } /// Helper to read the length from the header. + /// /// An AssemblyScript header has 20 bytes, and it's right before the content, and composed by: /// - mm_info: usize /// - gc_info: usize /// - gc_info2: usize /// - rt_id: u32 /// - rt_size: u32 + /// /// This function returns the `rt_size`. /// Only used for version >= 0.0.5. pub fn read_len( diff --git a/graph/src/schema/api.rs b/graph/src/schema/api.rs index 7fe29806a3f..86b13a9f3f2 100644 --- a/graph/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -188,11 +188,13 @@ impl ApiSchema { match t { s::Type::NamedType(name) => { let named_type = self.get_named_type(name); - named_type.map_or(false, |type_def| match type_def { - s::TypeDefinition::Scalar(_) - | s::TypeDefinition::Enum(_) - | s::TypeDefinition::InputObject(_) => true, - _ => false, + named_type.is_some_and(|type_def| { + matches!( + type_def, + s::TypeDefinition::Scalar(_) + | s::TypeDefinition::Enum(_) + | s::TypeDefinition::InputObject(_) + ) }) } s::Type::ListType(inner) => self.is_input_type(inner), @@ -916,7 +918,7 @@ fn field_scalar_filter_input_values( set: FilterOpsSet<'_>, ) -> Vec { field_filter_ops(set) - .into_iter() + .iter() .map(|filter_type| { let field_type = s::Type::NamedType(set.type_name().to_string()); let value_type = match *filter_type { @@ -1317,7 +1319,7 @@ mod tests { TypeDefinition::Object(t) => ast::get_field(t, name), _ => None, } - .expect(&format!("Schema should contain a field named `{}`", name)) + .unwrap_or_else(|| panic!("Schema should contain a field named `{}`", name)) } #[test] @@ -1776,10 +1778,7 @@ mod tests { let change_block_filter = user_filter_type .fields .iter() - .find(move |p| match p.name.as_str() { - "_change_block" => true, - _ => false, - }) + .find(|p| p.name == "_change_block") .expect("_change_block field is missing in User_filter"); match &change_block_filter.value_type { @@ -1902,10 +1901,7 @@ mod tests { let change_block_filter = user_filter_type .fields .iter() - .find(move |p| match p.name.as_str() { - "_change_block" => true, - _ => false, - }) + .find(|p| p.name == "_change_block") .expect("_change_block field is missing in User_filter"); match &change_block_filter.value_type { @@ -2289,7 +2285,7 @@ type Gravatar @entity { TypeDefinition::Object(t) => ast::get_field(t, name), _ => None, } - .expect(&format!("Schema should contain a field named `{}`", name)) + .unwrap_or_else(|| panic!("Schema should contain a field named `{}`", name)) } const SCHEMA: &str = r#" diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 841f7568ad7..2152ed25723 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -74,12 +74,12 @@ pub fn parse_field_as_filter(key: &str) -> (String, FilterOp) { _ => ("", FilterOp::Equal), }; - return match op { + match op { FilterOp::And => (key.to_owned(), op), FilterOp::Or => (key.to_owned(), op), // Strip the operator suffix to get the attribute. _ => (key.trim_end_matches(suffix).to_owned(), op), - }; + } } /// An `ObjectType` with `Hash` and `Eq` derived from the name. @@ -94,7 +94,7 @@ impl Ord for ObjectType { impl PartialOrd for ObjectType { fn partial_cmp(&self, other: &Self) -> Option { - Some(self.0.name.cmp(&other.0.name)) + Some(self.cmp(other)) } } @@ -303,10 +303,7 @@ pub fn get_object_type_directive( // Returns true if the given type is a non-null type. pub fn is_non_null_type(t: &s::Type) -> bool { - match t { - s::Type::NonNullType(_) => true, - _ => false, - } + matches!(t, s::Type::NonNullType(_)) } /// Returns true if the given type is an input type. @@ -317,11 +314,13 @@ pub fn is_input_type(schema: &s::Document, t: &s::Type) -> bool { match t { s::Type::NamedType(name) => { let named_type = schema.get_named_type(name); - named_type.map_or(false, |type_def| match type_def { - s::TypeDefinition::Scalar(_) - | s::TypeDefinition::Enum(_) - | s::TypeDefinition::InputObject(_) => true, - _ => false, + named_type.is_some_and(|type_def| { + matches!( + type_def, + s::TypeDefinition::Scalar(_) + | s::TypeDefinition::Enum(_) + | s::TypeDefinition::InputObject(_) + ) }) } s::Type::ListType(inner) => is_input_type(schema, inner), @@ -333,7 +332,7 @@ pub fn is_entity_type(schema: &s::Document, t: &s::Type) -> bool { match t { s::Type::NamedType(name) => schema .get_named_type(name) - .map_or(false, is_entity_type_definition), + .is_some_and(is_entity_type_definition), s::Type::ListType(inner_type) => is_entity_type(schema, inner_type), s::Type::NonNullType(inner_type) => is_entity_type(schema, inner_type), } @@ -359,10 +358,7 @@ pub fn is_entity_type_definition(type_def: &s::TypeDefinition) -> bool { pub fn is_list_or_non_null_list_field(field: &s::Field) -> bool { match &field.field_type { s::Type::ListType(_) => true, - s::Type::NonNullType(inner_type) => match inner_type.deref() { - s::Type::ListType(_) => true, - _ => false, - }, + s::Type::NonNullType(inner_type) => matches!(inner_type.deref(), s::Type::ListType(_)), _ => false, } } diff --git a/graph/src/schema/entity_key.rs b/graph/src/schema/entity_key.rs index d560351f71e..520d3d6320a 100644 --- a/graph/src/schema/entity_key.rs +++ b/graph/src/schema/entity_key.rs @@ -10,6 +10,7 @@ use crate::util::intern; /// Key by which an individual entity in the store can be accessed. Stores /// only the entity type and id. The deployment must be known from context. #[derive(Clone, CacheWeight, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[non_exhaustive] pub struct EntityKey { /// Name of the entity type. pub entity_type: EntityType, @@ -23,8 +24,6 @@ pub struct EntityKey { /// doing the lookup. So if the entity exists but was created on a different causality region, /// the lookup will return empty. pub causality_region: CausalityRegion, - - _force_use_of_new: (), } impl EntityKey { @@ -43,7 +42,6 @@ impl EntityKey { entity_type, entity_id, causality_region, - _force_use_of_new: (), } } diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index 098b48362b9..deb3cb3d8ef 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -190,7 +190,7 @@ impl Eq for EntityType {} impl PartialOrd for EntityType { fn partial_cmp(&self, other: &Self) -> Option { - self.as_str().partial_cmp(other.as_str()) + Some(self.cmp(other)) } } diff --git a/graph/src/schema/input/mod.rs b/graph/src/schema/input/mod.rs index ac7a4284175..bd6aa2d3017 100644 --- a/graph/src/schema/input/mod.rs +++ b/graph/src/schema/input/mod.rs @@ -201,7 +201,7 @@ impl TypeInfo { } fn for_aggregation(schema: &Schema, pool: &AtomPool, agg_type: &s::ObjectType) -> Self { - let agg_type = Aggregation::new(&schema, &pool, agg_type); + let agg_type = Aggregation::new(schema, pool, agg_type); TypeInfo::Aggregation(agg_type) } @@ -230,7 +230,7 @@ impl Field { field_type: &s::Type, derived_from: Option, ) -> Self { - let value_type = Self::scalar_value_type(&schema, field_type); + let value_type = Self::scalar_value_type(schema, field_type); Self { name: Word::from(name), field_type: field_type.clone(), @@ -245,7 +245,7 @@ impl Field { s::Type::NamedType(name) => name.parse::().unwrap_or_else(|_| { match schema.document.get_named_type(name) { Some(t::Object(obj_type)) => { - let id = obj_type.field(&*ID).expect("all object types have an id"); + let id = obj_type.field(&ID).expect("all object types have an id"); Self::scalar_value_type(schema, &id.field_type) } Some(t::Interface(intf)) => { @@ -265,7 +265,7 @@ impl Field { ValueType::String } Some(obj_type) => { - let id = obj_type.field(&*ID).expect("all object types have an id"); + let id = obj_type.field(&ID).expect("all object types have an id"); Self::scalar_value_type(schema, &id.field_type) } } @@ -290,7 +290,7 @@ impl Field { let derived_from = self.derived_from.as_ref()?; let name = schema .pool() - .lookup(&self.field_type.get_base_type()) + .lookup(self.field_type.get_base_type()) .unwrap(); schema.field(name, derived_from) } @@ -309,11 +309,9 @@ pub enum ObjectOrInterface<'a> { impl<'a> CheapClone for ObjectOrInterface<'a> { fn cheap_clone(&self) -> Self { match self { - ObjectOrInterface::Object(schema, object) => { - ObjectOrInterface::Object(*schema, *object) - } + ObjectOrInterface::Object(schema, object) => ObjectOrInterface::Object(schema, object), ObjectOrInterface::Interface(schema, interface) => { - ObjectOrInterface::Interface(*schema, *interface) + ObjectOrInterface::Interface(schema, interface) } } } @@ -357,7 +355,7 @@ impl<'a> ObjectOrInterface<'a> { let object_type = match self { ObjectOrInterface::Object(_, object_type) => Some(*object_type), ObjectOrInterface::Interface(schema, interface) => { - schema.implementers(&interface).next() + schema.implementers(interface).next() } }; object_type.and_then(|object_type| object_type.field(name)) @@ -431,7 +429,7 @@ impl ObjectType { .fields .iter() .map(|field| { - let derived_from = field.derived_from().map(|name| Word::from(name)); + let derived_from = field.derived_from().map(Word::from); Field::new(schema, &field.name, &field.field_type, derived_from) }) .collect(); @@ -535,7 +533,7 @@ impl InterfaceType { // since the API schema does not contain certain filters for // derived fields on interfaces that it would for // non-derived fields - let derived_from = field.derived_from().map(|name| Word::from(name)); + let derived_from = field.derived_from().map(Word::from); Field::new(schema, &field.name, &field.field_type, derived_from) }) .collect(); @@ -932,7 +930,7 @@ impl Aggregation { pub fn dimensions(&self) -> impl Iterator { self.fields .iter() - .filter(|field| &field.name != &*ID && field.name != kw::TIMESTAMP) + .filter(|field| field.name != *ID && field.name != kw::TIMESTAMP) } fn object_type(&self, interval: AggregationInterval) -> Option<&ObjectType> { @@ -969,7 +967,7 @@ impl InputSchema { .iter() .enumerate() .filter_map(|(idx, ti)| ti.aggregation().map(|agg_type| (idx, agg_type))) - .map(|(aggregation, agg_type)| { + .flat_map(|(aggregation, agg_type)| { agg_type .intervals .iter() @@ -980,7 +978,6 @@ impl InputSchema { agg_type, }) }) - .flatten() .collect(); mappings.sort(); mappings.into_boxed_slice() @@ -1408,12 +1405,7 @@ impl InputSchema { .filter(|directive| match directive.argument("include") { Some(Value::List(includes)) if !includes.is_empty() => { includes.iter().any(|include| match include { - Value::Object(include) => match include.get("entity") { - Some(Value::String(fulltext_entity)) if fulltext_entity == entity => { - true - } - _ => false, - }, + Value::Object(include) => matches!(include.get("entity"), Some(Value::String(fulltext_entity)) if fulltext_entity == entity), _ => false, }) } @@ -1611,7 +1603,7 @@ impl InputSchema { fn atom_pool(document: &s::Document) -> AtomPool { let mut pool = AtomPool::new(); - pool.intern(&*ID); + pool.intern(&ID); // Name and attributes of PoI entity type pool.intern(POI_OBJECT); pool.intern(POI_DIGEST); @@ -1676,7 +1668,7 @@ fn atom_pool(document: &s::Document) -> AtomPool { } for object_type in document.get_object_type_definitions() { - for defn in InputSchema::fulltext_definitions(&document, &object_type.name).unwrap() { + for defn in InputSchema::fulltext_definitions(document, &object_type.name).unwrap() { pool.intern(defn.name.as_str()); } } @@ -1785,9 +1777,7 @@ mod validations { if subgraph_schema_type .directives .iter() - .filter(|directive| !directive.name.eq("fulltext")) - .next() - .is_some() + .any(|directive| !directive.name.eq("fulltext")) { Some(SchemaValidationError::InvalidSchemaTypeDirectives) } else { @@ -2002,7 +1992,7 @@ mod validations { /// type `Int8` and that the `id` field has type `Int8` fn validate_entity_directives(&self) -> Vec { fn id_type_is_int8(object_type: &s::ObjectType) -> Option { - let field = match object_type.field(&*ID) { + let field = match object_type.field(&ID) { Some(field) => field, None => { return Some(Err::IdFieldMissing(object_type.name.to_owned())); @@ -2066,7 +2056,7 @@ mod validations { self.entity_types .iter() .fold(vec![], |mut errors, object_type| { - match object_type.field(&*ID) { + match object_type.field(&ID) { None => errors.push(SchemaValidationError::IdFieldMissing( object_type.name.clone(), )), @@ -2281,7 +2271,7 @@ mod validations { // when we query, and just assume that that's ok. let target_field_type = target_field.field_type.get_base_type(); if target_field_type != object_type.name - && &target_field.name != ID.as_str() + && target_field.name != ID.as_str() && !interface_types .iter() .any(|iface| target_field_type.eq(iface.as_str())) @@ -2320,7 +2310,7 @@ mod validations { let id_types: HashSet<&str> = HashSet::from_iter( obj_types .iter() - .filter_map(|obj_type| obj_type.field(&*ID)) + .filter_map(|obj_type| obj_type.field(&ID)) .map(|f| f.field_type.get_base_type()) .map(|name| if name == "ID" { "String" } else { name }), ); @@ -2405,13 +2395,9 @@ mod validations { )), Err(_) => { if is_first_last - && schema - .entity_types - .iter() - .find(|entity_type| { - entity_type.name.eq(field.field_type.get_base_type()) - }) - .is_some() + && schema.entity_types.iter().any(|entity_type| { + entity_type.name.eq(field.field_type.get_base_type()) + }) { return Ok(()); } @@ -2430,10 +2416,10 @@ mod validations { /// * `source` is an existing timeseries type /// * all non-aggregate fields are also fields on the `source` - /// type and have the same type + /// type and have the same type /// * `arg` for each `@aggregate` is a numeric type in the - /// timeseries, coercible to the type of the field (e.g. `Int -> - /// BigDecimal`, but not `BigInt -> Int8`) + /// timeseries, coercible to the type of the field (e.g. `Int + /// -> BigDecimal`, but not `BigInt -> Int8`) fn aggregate_directive( schema: &Schema, agg_type: &s::ObjectType, @@ -2691,7 +2677,7 @@ mod validations { return; } for interval in intervals { - if let Err(_) = interval.parse::() { + if interval.parse::().is_err() { errors.push(Err::AggregationInvalidInterval( agg_type.name.to_owned(), interval.to_owned(), @@ -2807,9 +2793,9 @@ mod validations { BaseSchema::parse(&schema, DeploymentHash::new("dummy").unwrap()).unwrap(); let res = validate(&schema); if ok { - assert!(matches!(res, Ok(_))); + assert!(res.is_ok()); } else { - assert!(matches!(res, Err(_))); + assert!(res.is_err()); assert!(matches!( res.unwrap_err()[0], SchemaValidationError::InterfaceImplementorsMixId(_, _) @@ -3108,7 +3094,6 @@ type Gravatar @entity { let files = { let mut files = std::fs::read_dir(dir) .unwrap() - .into_iter() .filter_map(|entry| entry.ok()) .map(|entry| entry.path()) .filter(|path| path.extension() == Some(OsString::from("graphql").as_os_str())) diff --git a/graph/src/schema/input/sqlexpr.rs b/graph/src/schema/input/sqlexpr.rs index bc5705810bb..24d373a930a 100644 --- a/graph/src/schema/input/sqlexpr.rs +++ b/graph/src/schema/input/sqlexpr.rs @@ -31,12 +31,17 @@ pub(crate) fn parse( /// `store/postgres/src/relational/rollup.rs`. Note that the visitor can /// mutate both itself (e.g., to store errors) and the expression it is /// visiting. +/// +/// The error type is `()`, as the visitor is expected to record any errors +/// internally pub trait ExprVisitor { /// Visit an identifier (column name). Must return `Err` if the /// identifier is not allowed + #[allow(clippy::result_unit_err)] fn visit_ident(&mut self, ident: &mut p::Ident) -> Result<(), ()>; /// Visit a function name. Must return `Err` if the function is not /// allowed + #[allow(clippy::result_unit_err)] fn visit_func_name(&mut self, func: &mut p::ObjectNamePart) -> Result<(), ()>; /// Called when we encounter a construct that is not supported like a /// subquery @@ -46,7 +51,7 @@ pub trait ExprVisitor { } pub struct VisitExpr<'a> { - visitor: Box<&'a mut dyn ExprVisitor>, + visitor: &'a mut dyn ExprVisitor, } impl<'a> VisitExpr<'a> { @@ -69,6 +74,7 @@ impl<'a> VisitExpr<'a> { /// return `Err(())`. The visitor will know the details of the error /// since this can only happen if `visit_ident` or `visit_func_name` /// returned an error, or `parse_error` or `not_supported` was called. + #[allow(clippy::result_unit_err)] pub fn visit(sql: &str, visitor: &'a mut dyn ExprVisitor) -> Result { let dialect = PostgreSqlDialect {}; @@ -78,9 +84,7 @@ impl<'a> VisitExpr<'a> { .tokenize_with_location() .unwrap(); parser = parser.with_tokens_with_locations(tokens); - let mut visit = VisitExpr { - visitor: Box::new(visitor), - }; + let mut visit = VisitExpr { visitor }; let mut expr = match parser.parse_expr() { Ok(expr) => expr, Err(e) => { @@ -360,7 +364,7 @@ struct Validator { errors: Vec, } -const FN_WHITELIST: [&'static str; 14] = [ +const FN_WHITELIST: [&str; 14] = [ // Clearly deterministic functions from // https://www.postgresql.org/docs/current/functions-math.html, Table // 9.5. We could also add trig functions (Table 9.7 and 9.8), but under diff --git a/graph/src/substreams_rpc/sf.substreams.rpc.v2.rs b/graph/src/substreams_rpc/sf.substreams.rpc.v2.rs index ff69b343d29..b933e183edf 100644 --- a/graph/src/substreams_rpc/sf.substreams.rpc.v2.rs +++ b/graph/src/substreams_rpc/sf.substreams.rpc.v2.rs @@ -22,14 +22,14 @@ pub struct Request { /// By default, the engine runs in developer mode, with richer and deeper /// output. Differences between production and development modes include: /// * Forward parallel execution is enabled in production mode and disabled in - /// development mode + /// development mode /// * The time required to reach the first byte in development mode is faster - /// than in production mode. + /// than in production mode. /// /// Specific attributes of development mode include: /// * The client will receive all of the executed module's logs. /// * It's possible to request specific store snapshots in the execution tree - /// (via `debug_initial_store_snapshot_for_modules`). + /// (via `debug_initial_store_snapshot_for_modules`). /// * Multiple module's output is possible. /// /// With production mode`, however, you trade off functionality for high speed diff --git a/graph/src/util/bounded_queue.rs b/graph/src/util/bounded_queue.rs index f618c7eca7d..6129817eabe 100644 --- a/graph/src/util/bounded_queue.rs +++ b/graph/src/util/bounded_queue.rs @@ -169,6 +169,6 @@ impl BoundedQueue { /// Clear the queue by popping entries until there are none left pub fn clear(&self) { - while let Some(_) = self.try_pop() {} + while self.try_pop().is_some() {} } } diff --git a/graph/src/util/cache_weight.rs b/graph/src/util/cache_weight.rs index 3c1bf1bec10..077db9a51ce 100644 --- a/graph/src/util/cache_weight.rs +++ b/graph/src/util/cache_weight.rs @@ -265,13 +265,12 @@ fn derive_cache_weight() { /// number of entries divided by `NODE_FILL`, and the number of /// interior nodes can be determined by dividing the number of nodes /// at the child level by `NODE_FILL` - +/// /// The other difficulty is that the structs with which `BTreeMap` /// represents internal and leaf nodes are not public, so we can't /// get their size with `std::mem::size_of`; instead, we base our /// estimates of their size on the current `std` code, assuming that /// these structs will not change - pub mod btree { use std::collections::BTreeMap; use std::mem; diff --git a/graph/src/util/futures.rs b/graph/src/util/futures.rs index b4da90c8a1c..7dff592b342 100644 --- a/graph/src/util/futures.rs +++ b/graph/src/util/futures.rs @@ -114,8 +114,8 @@ where /// Never log failed attempts. /// May still log at `trace` logging level. pub fn no_logging(mut self) -> Self { - self.log_after = u64::max_value(); - self.warn_after = u64::max_value(); + self.log_after = u64::MAX; + self.warn_after = u64::MAX; self } diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs index 62ff3b4618f..884e4cb7e3d 100644 --- a/graph/src/util/intern.rs +++ b/graph/src/util/intern.rs @@ -64,7 +64,7 @@ impl Error { } } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Default)] /// A pool of interned strings. Pools can be organized hierarchically with /// lookups in child pools also considering the parent pool. The chain of /// pools from a pool through all its ancestors act as one big pool to the @@ -103,7 +103,7 @@ impl AtomPool { /// pool or any of its ancestors. pub fn get(&self, atom: Atom) -> Option<&str> { if atom.0 < self.base_sym { - self.base.as_ref().map(|base| base.get(atom)).flatten() + self.base.as_ref().and_then(|base| base.get(atom)) } else { self.atoms .get((atom.0 - self.base_sym) as usize) @@ -211,6 +211,10 @@ impl Object { .count() } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + /// Find the value for `key` in the object. Return `None` if the key is /// not present. pub fn get(&self, key: &str) -> Option<&V> { @@ -271,7 +275,7 @@ impl Object { pub(crate) fn contains_key(&self, key: &str) -> bool { self.entries .iter() - .any(|entry| self.pool.get(entry.key).map_or(false, |k| key == k)) + .any(|entry| self.pool.get(entry.key) == Some(key)) } pub fn merge(&mut self, other: Object) { @@ -337,12 +341,12 @@ impl Object { if self.same_pool(other) { self.entries .iter() - .filter(|e| e.key != TOMBSTONE_KEY && ignore.map_or(true, |ig| e.key != ig)) - .all(|Entry { key, value }| other.get_by_atom(key).map_or(false, |o| o == value)) + .filter(|e| e.key != TOMBSTONE_KEY && (ignore != Some(e.key))) + .all(|Entry { key, value }| other.get_by_atom(key) == Some(value)) } else { self.iter() .filter(|(key, _)| *key != ignore_key) - .all(|(key, value)| other.get(key).map_or(false, |o| o == value)) + .all(|(key, value)| other.get(key) == Some(value)) } } } @@ -385,7 +389,7 @@ impl<'a, V> Iterator for ObjectIter<'a, V> { type Item = (&'a str, &'a V); fn next(&mut self) -> Option { - while let Some(entry) = self.iter.next() { + for entry in self.iter.by_ref() { if entry.key != TOMBSTONE_KEY { // unwrap: we only add entries that are backed by the pool let key = self.pool.get(entry.key).unwrap(); @@ -424,7 +428,7 @@ impl Iterator for ObjectOwningIter { type Item = (Word, V); fn next(&mut self) -> Option { - while let Some(entry) = self.iter.next() { + for entry in self.iter.by_ref() { if entry.key != TOMBSTONE_KEY { // unwrap: we only add entries that are backed by the pool let key = self.pool.get(entry.key).unwrap(); @@ -451,7 +455,7 @@ impl<'a, V> Iterator for AtomIter<'a, V> { type Item = Atom; fn next(&mut self) -> Option { - while let Some(entry) = self.iter.next() { + for entry in self.iter.by_ref() { if entry.key != TOMBSTONE_KEY { return Some(entry.key); } @@ -498,10 +502,10 @@ impl PartialEq for Object { self.entries .iter() .filter(|e| e.key != TOMBSTONE_KEY) - .all(|Entry { key, value }| other.get_by_atom(key).map_or(false, |o| o == value)) + .all(|Entry { key, value }| other.get_by_atom(key) == Some(value)) } else { self.iter() - .all(|(key, value)| other.get(key).map_or(false, |o| o == value)) + .all(|(key, value)| other.get(key) == Some(value)) } } } diff --git a/graph/src/util/jobs.rs b/graph/src/util/jobs.rs index 4abed5e2a56..438ff72004c 100644 --- a/graph/src/util/jobs.rs +++ b/graph/src/util/jobs.rs @@ -142,7 +142,7 @@ mod tests { break; } if start.elapsed() > Duration::from_secs(2) { - assert!(false, "Counting to 10 took longer than 2 seconds"); + panic!("Counting to 10 took longer than 2 seconds"); } } diff --git a/graph/src/util/lfu_cache.rs b/graph/src/util/lfu_cache.rs index 06ec6a475db..12712350a01 100644 --- a/graph/src/util/lfu_cache.rs +++ b/graph/src/util/lfu_cache.rs @@ -179,7 +179,7 @@ impl }) } - pub fn iter<'a>(&'a self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.queue .iter() .map(|entry| (&entry.0.key, &entry.0.value)) @@ -194,7 +194,7 @@ impl // the absolute minimum and popping. let key_entry = CacheEntry::cache_key(key.clone()); self.queue - .change_priority(&key_entry, (true, Reverse(u64::min_value()))) + .change_priority(&key_entry, (true, Reverse(u64::MIN))) .and_then(|_| { self.queue.pop().map(|(e, _)| { assert_eq!(e.key, key_entry.key); diff --git a/graph/src/util/ogive.rs b/graph/src/util/ogive.rs index 29938b03b17..a90d414b038 100644 --- a/graph/src/util/ogive.rs +++ b/graph/src/util/ogive.rs @@ -155,7 +155,7 @@ impl Ogive { // rewritten to be more friendly to lossy calculations with f64 let offset = (value as f64).rem_euclid(self.bin_size) * (b - a) as f64; let x = a + (offset / self.bin_size) as i64; - Ok(x as i64) + Ok(x) } fn check_in_range(&self, point: i64) -> Result<(), StoreError> { @@ -192,17 +192,17 @@ mod tests { } // Check that the ogive is correct - assert_eq!(ogive.bin_size, 700 as f64 / 5 as f64); + assert_eq!(ogive.bin_size, 700_f64 / 5_f64); assert_eq!(ogive.range, 10..=60); // Test value method - for point in vec![20, 30, 45, 50, 60] { + for point in [20, 30, 45, 50, 60] { assert_eq!(ogive.value(point).unwrap(), f(point), "value for {}", point); } // Test next_point method - for step in vec![50, 140, 200] { - for value in vec![10, 20, 30, 35, 45, 50, 60] { + for step in [50, 140, 200] { + for value in [10, 20, 30, 35, 45, 50, 60] { assert_eq!( ogive.next_point(value, step).unwrap(), g(f(value) + step as i64).min(60), @@ -240,17 +240,17 @@ mod tests { } // Check that the ogive is correct - assert_eq!(ogive.bin_size, 700 as f64 / 1 as f64); + assert_eq!(ogive.bin_size, 700_f64 / 1_f64); assert_eq!(ogive.range, 10..=20); // Test value method - for point in vec![10, 15, 20] { + for point in [10, 15, 20] { assert_eq!(ogive.value(point).unwrap(), f(point), "value for {}", point); } // Test next_point method - for step in vec![50, 140, 200] { - for value in vec![10, 15, 20] { + for step in [50, 140, 200] { + for value in [10, 15, 20] { assert_eq!( ogive.next_point(value, step).unwrap(), g(f(value) + step as i64).min(20), diff --git a/graph/src/util/timed_cache.rs b/graph/src/util/timed_cache.rs index 20ac7ba49fd..8f64c844630 100644 --- a/graph/src/util/timed_cache.rs +++ b/graph/src/util/timed_cache.rs @@ -36,18 +36,18 @@ impl TimedCache { /// return `None` otherwise. Note that expired entries stay in the cache /// as it is assumed that, after returning `None`, the caller will /// immediately overwrite that entry with a call to `set` - pub fn get(&self, key: &Q) -> Option> + pub fn get(&self, key: &Q) -> Option> where K: Borrow + Eq + Hash, - Q: Hash + Eq, + Q: Hash + Eq + ?Sized, { self.get_at(key, Instant::now()) } - fn get_at(&self, key: &Q, now: Instant) -> Option> + fn get_at(&self, key: &Q, now: Instant) -> Option> where K: Borrow + Eq + Hash, - Q: Hash + Eq, + Q: Hash + Eq + ?Sized, { match self.entries.read().unwrap().get(key) { Some(CacheEntry { value, expires }) if expires >= &now => Some(value.clone()), @@ -94,10 +94,10 @@ impl TimedCache { /// Remove an entry from the cache. If there was an entry for `key`, /// return the value associated with it and whether the entry is still /// live - pub fn remove(&self, key: &Q) -> Option<(Arc, bool)> + pub fn remove(&self, key: &Q) -> Option<(Arc, bool)> where K: Borrow + Eq + Hash, - Q: Hash + Eq, + Q: Hash + Eq + ?Sized, { self.entries .write() diff --git a/graphql/Cargo.toml b/graphql/Cargo.toml index d9cb14684f6..898bfc15f9b 100644 --- a/graphql/Cargo.toml +++ b/graphql/Cargo.toml @@ -14,3 +14,6 @@ stable-hash_legacy = { git = "https://github.com/graphprotocol/stable-hash", bra parking_lot = "0.12" anyhow = "1.0" async-recursion = "1.1.1" + +[lints] +workspace = true diff --git a/graphql/src/execution/ast.rs b/graphql/src/execution/ast.rs index 0f20845e5d5..65bdb6298d1 100644 --- a/graphql/src/execution/ast.rs +++ b/graphql/src/execution/ast.rs @@ -315,7 +315,7 @@ impl Field { // can find the field type entity_type .field(&field.name) - .map_or(false, |field| !field.is_derived()) + .is_some_and(|field| !field.is_derived()) }) .filter_map(|field| { if field.name.starts_with("__") { @@ -351,13 +351,13 @@ impl Field { .map(|value| match value { r::Value::Enum(interval) => interval.parse::().map_err(|_| { QueryExecutionError::InvalidArgumentError( - self.position.clone(), + self.position, kw::INTERVAL.to_string(), q::Value::from(value.clone()), ) }), _ => Err(QueryExecutionError::InvalidArgumentError( - self.position.clone(), + self.position, kw::INTERVAL.to_string(), q::Value::from(value.clone()), )), diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index 8173f00f2bf..48477d3eb5f 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -258,8 +258,8 @@ where resolver: introspection_resolver, query: self.query.cheap_clone(), deadline: self.deadline, - max_first: std::u32::MAX, - max_skip: std::u32::MAX, + max_first: u32::MAX, + max_skip: u32::MAX, // `cache_status` is a dead value for the introspection context. cache_status: AtomicCell::new(CacheStatus::Miss), @@ -309,7 +309,7 @@ pub(crate) async fn execute_root_selection_set_uncached( let ictx = ctx.as_introspection_context(); values.append( - execute_selection_set_to_map(&ictx, &intro_set, &*INTROSPECTION_QUERY_TYPE, None) + execute_selection_set_to_map(&ictx, &intro_set, &INTROSPECTION_QUERY_TYPE, None) .await?, ); } @@ -629,7 +629,6 @@ async fn resolve_field_value( s::Type::ListType(inner_type) => { resolve_field_value_for_list_type( ctx, - object_type, field_value, field, field_definition, @@ -692,7 +691,6 @@ async fn resolve_field_value_for_named_type( #[async_recursion] async fn resolve_field_value_for_list_type( ctx: &ExecutionContext, - object_type: &s::ObjectType, field_value: Option, field: &a::Field, field_definition: &s::Field, @@ -700,15 +698,8 @@ async fn resolve_field_value_for_list_type( ) -> Result> { match inner_type { s::Type::NonNullType(inner_type) => { - resolve_field_value_for_list_type( - ctx, - object_type, - field_value, - field, - field_definition, - inner_type, - ) - .await + resolve_field_value_for_list_type(ctx, field_value, field, field_definition, inner_type) + .await } s::Type::NamedType(ref type_name) => { @@ -898,8 +889,8 @@ async fn complete_value( } /// Resolves an abstract type (interface, union) into an object type based on the given value. -fn resolve_abstract_type<'a>( - ctx: &'a ExecutionContext, +fn resolve_abstract_type( + ctx: &ExecutionContext, abstract_type: &s::TypeDefinition, object_value: &r::Value, ) -> Result> { diff --git a/graphql/src/execution/mod.rs b/graphql/src/execution/mod.rs index 8e409d66770..9cd5db531df 100644 --- a/graphql/src/execution/mod.rs +++ b/graphql/src/execution/mod.rs @@ -1,5 +1,6 @@ mod cache; /// Implementation of the GraphQL execution algorithm. +#[allow(clippy::module_inception)] mod execution; mod query; /// Common trait for field resolvers used in the execution. diff --git a/graphql/src/execution/query.rs b/graphql/src/execution/query.rs index e8593f27fba..1eb377acafb 100644 --- a/graphql/src/execution/query.rs +++ b/graphql/src/execution/query.rs @@ -602,9 +602,7 @@ impl<'s> RawQuery<'s> { Ok(complexity) => Ok(complexity), Err(ComplexityError::Invalid) => Ok(0), Err(ComplexityError::TooDeep) => Err(QueryExecutionError::TooDeep(max_depth)), - Err(ComplexityError::Overflow) => { - Err(QueryExecutionError::TooComplex(u64::max_value(), 0)) - } + Err(ComplexityError::Overflow) => Err(QueryExecutionError::TooComplex(u64::MAX, 0)), Err(ComplexityError::CyclicalFragment(name)) => { Err(QueryExecutionError::CyclicalFragment(name)) } @@ -730,21 +728,17 @@ impl Transform { } /// Interpolate variable references in the arguments `args` - fn interpolate_arguments( - &self, - args: Vec<(String, q::Value)>, - pos: &q::Pos, - ) -> Vec<(String, r::Value)> { + fn interpolate_arguments(&self, args: Vec<(String, q::Value)>) -> Vec<(String, r::Value)> { args.into_iter() .map(|(name, val)| { - let val = self.interpolate_value(val, pos); + let val = self.interpolate_value(val); (name, val) }) .collect() } /// Turn `value` into an `r::Value` by resolving variable references - fn interpolate_value(&self, value: q::Value, pos: &q::Pos) -> r::Value { + fn interpolate_value(&self, value: q::Value) -> r::Value { match value { q::Value::Variable(var) => self.variable(&var), q::Value::Int(ref num) => { @@ -758,14 +752,14 @@ impl Transform { q::Value::List(vals) => { let vals = vals .into_iter() - .map(|val| self.interpolate_value(val, pos)) + .map(|val| self.interpolate_value(val)) .collect(); r::Value::List(vals) } q::Value::Object(map) => { let mut rmap = BTreeMap::new(); for (key, value) in map.into_iter() { - let value = self.interpolate_value(value, pos); + let value = self.interpolate_value(value); rmap.insert(key.into(), value); } r::Value::object(rmap) @@ -788,7 +782,7 @@ impl Transform { position, arguments, } = dir; - let arguments = self.interpolate_arguments(arguments, &position); + let arguments = self.interpolate_arguments(arguments); a::Directive { name, position, @@ -905,7 +899,7 @@ impl Transform { return Ok(None); } - let mut arguments = self.interpolate_arguments(arguments, &position); + let mut arguments = self.interpolate_arguments(arguments); self.coerce_argument_values(&mut arguments, parent_type, &name)?; let is_leaf_type = self.schema.document().is_leaf_type(&field_type.field_type); diff --git a/graphql/src/query/ext.rs b/graphql/src/query/ext.rs index 44d7eb5306a..4abca7a4b6c 100644 --- a/graphql/src/query/ext.rs +++ b/graphql/src/query/ext.rs @@ -54,22 +54,17 @@ impl ValueExt for q::Value { } } -#[derive(Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, PartialEq, Eq, Hash, Debug, Default)] pub enum BlockConstraint { Hash(BlockHash), Number(BlockNumber), /// Execute the query on the latest block only if the the subgraph has progressed to or past the /// given block number. Min(BlockNumber), + #[default] Latest, } -impl Default for BlockConstraint { - fn default() -> Self { - BlockConstraint::Latest - } -} - impl BlockConstraint { /// Return the `Some(hash)` if this constraint constrains by hash, /// otherwise return `None` diff --git a/graphql/src/runner.rs b/graphql/src/runner.rs index 6723c2802ae..293dcaa111b 100644 --- a/graphql/src/runner.rs +++ b/graphql/src/runner.rs @@ -288,10 +288,7 @@ where .to_result()?; let query_start = Instant::now(); - let result = store - .execute_sql(&req.query) - .await - .map_err(|e| QueryExecutionError::from(e)); + let result = store.execute_sql(&req.query).await; self.load_manager.record_work( store.shard(), diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 451c4d19422..ce43dee97a9 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -219,7 +219,7 @@ fn build_list_filter_from_value( } /// build a filter which has list of nested filters -fn build_list_filter_from_object<'a>( +fn build_list_filter_from_object( entity: &ObjectOrInterface, object: &Object, schema: &InputSchema, @@ -235,13 +235,13 @@ fn build_list_filter_from_object<'a>( } /// Parses a GraphQL input object into an EntityFilter, if present. -fn build_filter_from_object<'a>( +fn build_filter_from_object( entity: &ObjectOrInterface, object: &Object, schema: &InputSchema, ) -> Result, QueryExecutionError> { // Check if we have both column filters and 'or' operator at the same level - if let Some(_) = object.get("or") { + if object.get("or").is_some() { let column_filters: Vec = object .iter() .filter_map(|(key, _)| { @@ -738,9 +738,9 @@ mod tests { &object, BLOCK_NUMBER_MAX, field, - std::u32::MAX, - std::u32::MAX, - &*&INPUT_SCHEMA, + u32::MAX, + u32::MAX, + &INPUT_SCHEMA, ) .unwrap() } @@ -1042,9 +1042,9 @@ mod tests { &object, BLOCK_NUMBER_MAX, &query_field, - std::u32::MAX, - std::u32::MAX, - &*INPUT_SCHEMA, + u32::MAX, + u32::MAX, + &INPUT_SCHEMA, ); assert!(result.is_err()); @@ -1100,9 +1100,9 @@ mod tests { &object, BLOCK_NUMBER_MAX, &query_field, - std::u32::MAX, - std::u32::MAX, - &*INPUT_SCHEMA, + u32::MAX, + u32::MAX, + &INPUT_SCHEMA, ); assert!(result.is_err()); @@ -1194,9 +1194,9 @@ mod tests { &object, BLOCK_NUMBER_MAX, &query_field, - std::u32::MAX, - std::u32::MAX, - &*INPUT_SCHEMA, + u32::MAX, + u32::MAX, + &INPUT_SCHEMA, ); assert!(result.is_err()); diff --git a/graphql/src/store/resolver.rs b/graphql/src/store/resolver.rs index 500964ea7a2..426e921f2c6 100644 --- a/graphql/src/store/resolver.rs +++ b/graphql/src/store/resolver.rs @@ -164,8 +164,7 @@ impl StoreResolver { let Some(block) = field .selection_set .fields() - .map(|(_, iter)| iter) - .flatten() + .flat_map(|(_, iter)| iter) .find(|f| f.name == BLOCK) else { return false; @@ -173,8 +172,7 @@ impl StoreResolver { block .selection_set .fields() - .map(|(_, iter)| iter) - .flatten() + .flat_map(|(_, iter)| iter) .any(|f| f.name == TIMESTAMP || f.name == PARENT_HASH) } @@ -248,7 +246,7 @@ impl StoreResolver { "__typename".into(), r::Value::String(META_FIELD_TYPE.to_string()), ); - return Ok(r::Value::object(map)); + Ok(r::Value::object(map)) } } @@ -299,7 +297,7 @@ impl Resolver for StoreResolver { fn child_id(child: &r::Value) -> String { match child { r::Value::Object(child) => child - .get(&*ID) + .get(&ID) .map(|id| id.to_string()) .unwrap_or("(no id)".to_string()), _ => "(no child object)".to_string(), diff --git a/graphql/src/values/coercion.rs b/graphql/src/values/coercion.rs index b0365e7f335..9d85059b33c 100644 --- a/graphql/src/values/coercion.rs +++ b/graphql/src/values/coercion.rs @@ -37,7 +37,7 @@ impl MaybeCoercible for q::Value { ("BigDecimal", q::Value::String(s)) => Ok(r::Value::String(s)), ("Int", q::Value::Int(num)) => { let n = num.as_i64().ok_or_else(|| q::Value::Int(num.clone()))?; - if i32::min_value() as i64 <= n && n <= i32::max_value() as i64 { + if i32::MIN as i64 <= n && n <= i32::MAX as i64 { Ok(r::Value::Int((n as i32).into())) } else { Err(q::Value::Int(num)) @@ -88,7 +88,7 @@ fn coerce_to_definition<'a>( let def = t .fields .iter() - .find(|f| f.name == &*name) + .find(|f| f.name == *name) .ok_or_else(|| object_for_error.clone())?; coerced_object.insert( name.clone(), diff --git a/justfile b/justfile index 32ae928faa3..de74a789498 100644 --- a/justfile +++ b/justfile @@ -7,8 +7,8 @@ format *EXTRA_FLAGS: cargo fmt --all {{EXTRA_FLAGS}} # Run Clippy linting (cargo clippy) -lint: - cargo clippy --no-deps -- --allow warnings +lint *EXTRA_FLAGS: + cargo clippy --all-targets {{EXTRA_FLAGS}} # Check Rust code (cargo check) check *EXTRA_FLAGS: diff --git a/node/Cargo.toml b/node/Cargo.toml index 63723442423..b1d6d05f741 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -44,3 +44,6 @@ json-structural-diff = { version = "0.2", features = ["colorize"] } # Dependencies related to Amp subgraphs tokio-util.workspace = true + +[lints] +workspace = true diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index f8c28079f13..792df8853c9 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -860,11 +860,12 @@ pub enum CheckBlockMethod { impl From for config::Opt { fn from(opt: Opt) -> Self { - let mut config_opt = config::Opt::default(); - config_opt.config = Some(opt.config); - config_opt.store_connection_pool_size = 5; - config_opt.node_id = opt.node_id; - config_opt + config::Opt { + config: Some(opt.config), + store_connection_pool_size: 5, + node_id: opt.node_id, + ..Default::default() + } } } @@ -1120,8 +1121,8 @@ async fn main() -> anyhow::Result<()> { } let node = match NodeId::new(&opt.node_id) { - Err(()) => { - eprintln!("invalid node id: {}", opt.node_id); + Err(node_id) => { + eprintln!("invalid node id: {}", node_id); std::process::exit(1); } Ok(node) => node, diff --git a/node/src/chain.rs b/node/src/chain.rs index 543a0cd5cfb..6e3c65848d9 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -11,8 +11,7 @@ use ethereum::ProviderEthRpcMetrics; use graph::anyhow::bail; use graph::blockchain::client::ChainClient; use graph::blockchain::{ - BasicBlockchainBuilder, Blockchain, BlockchainBuilder as _, BlockchainKind, BlockchainMap, - ChainIdentifier, + BasicBlockchainBuilder, BlockchainBuilder as _, BlockchainKind, BlockchainMap, ChainIdentifier, }; use graph::cheap_clone::CheapClone; use graph::components::network_provider::ChainName; @@ -110,7 +109,7 @@ pub fn create_substreams_networks( let parsed_networks = networks_by_kind .entry((chain.protocol, name.clone())) - .or_insert_with(Vec::new); + .or_default(); for _ in 0..firehose.conn_pool_size { parsed_networks.push(Arc::new(FirehoseEndpoint::new( @@ -137,7 +136,7 @@ pub fn create_substreams_networks( AdapterConfiguration::Substreams(FirehoseAdapterConfig { chain_id, kind, - adapters: endpoints.into(), + adapters: endpoints, }) }) .collect() @@ -179,7 +178,7 @@ pub fn create_firehose_networks( let parsed_networks = networks_by_kind .entry((chain.protocol, name.clone())) - .or_insert_with(Vec::new); + .or_default(); // Create n FirehoseEndpoints where n is the size of the pool. If a // subgraph limit is defined for this endpoint then each endpoint @@ -212,7 +211,7 @@ pub fn create_firehose_networks( AdapterConfiguration::Firehose(FirehoseAdapterConfig { chain_id, kind, - adapters: endpoints.into(), + adapters: endpoints, }) }) .collect() @@ -244,7 +243,7 @@ pub async fn create_ethereum_networks( ) }); - Ok(try_join_all(eth_networks_futures).await?) + try_join_all(eth_networks_futures).await } /// Parses a single Ethereum connection string and returns its network name and `EthereumAdapter`. @@ -345,7 +344,7 @@ pub async fn create_ethereum_networks_for_chain( /// Deep integration chains (explicitly defined on the graph-node like Ethereum, Near, etc): /// - These can have adapter of any type. Adapters of firehose and rpc types are used by the Chain implementation, aka deep integration /// - The substreams adapters will trigger the creation of a Substreams chain, the priority for the block ingestor setup depends on the chain, if enabled at all. -/// Substreams Chain(chains the graph-node knows nothing about and are only accessible through substreams): +/// Substreams Chain(chains the graph-node knows nothing about and are only accessible through substreams): /// - This chain type is more generic and can only have adapters of substreams type. /// - Substreams chain are created as a "secondary" chain for deep integrations but in that case the block ingestor should be run by the main/deep integration chain. /// - These chains will use SubstreamsBlockIngestor by default. @@ -383,7 +382,7 @@ pub async fn networks_as_chains( None => { let ident = match timeout( config.genesis_validation_timeout, - networks.chain_identifier(&logger, chain_id), + networks.chain_identifier(logger, chain_id), ) .await { @@ -400,7 +399,7 @@ pub async fn networks_as_chains( } }; - async fn add_substreams( + async fn add_substreams( networks: &Networks, config: &Arc, chain_id: ChainName, @@ -410,7 +409,7 @@ pub async fn networks_as_chains( metrics_registry: Arc, ) { let substreams_endpoints = networks.substreams_endpoints(chain_id.clone()); - if substreams_endpoints.len() == 0 { + if substreams_endpoints.is_empty() { return; } @@ -442,7 +441,7 @@ pub async fn networks_as_chains( let firehose_endpoints = networks.firehose_endpoints(chain_id.clone()); let eth_adapters = networks.ethereum_rpcs(chain_id.clone()); - let cc = if firehose_endpoints.len() > 0 { + let cc = if !firehose_endpoints.is_empty() { ChainClient::::new_firehose(firehose_endpoints) } else { ChainClient::::new_rpc(eth_adapters.clone()) @@ -481,7 +480,7 @@ pub async fn networks_as_chains( blockchain_map .insert::(chain_id.clone(), Arc::new(chain)); - add_substreams::( + add_substreams( networks, config, chain_id.clone(), @@ -509,7 +508,7 @@ pub async fn networks_as_chains( ), ); - add_substreams::( + add_substreams( networks, config, chain_id.clone(), diff --git a/node/src/config.rs b/node/src/config.rs index db2a5c203e9..2f7db735e99 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -181,7 +181,7 @@ impl Config { pub fn from_str(config: &str, node: &str) -> Result { let mut config: Config = toml::from_str(config)?; - config.node = NodeId::new(node).map_err(|()| anyhow!("invalid node id {}", node))?; + config.node = NodeId::new(node).map_err(|node| anyhow!("invalid node id {}", node))?; config.validate()?; Ok(config) } @@ -191,7 +191,7 @@ impl Config { let mut stores = BTreeMap::new(); let chains = ChainSection::from_opt(opt)?; let node = NodeId::new(opt.node_id.to_string()) - .map_err(|()| anyhow!("invalid node id {}", opt.node_id))?; + .map_err(|node| anyhow!("invalid node id {}", node))?; stores.insert(PRIMARY_SHARD.to_string(), Shard::from_opt(true, opt)?); Ok(Config { node, @@ -306,7 +306,7 @@ impl Shard { let mut url = Url::parse(shellexpand::env(&self.connection)?.as_ref())?; // Put the PGAPPNAME into the URL since tokio-postgres ignores this // environment variable - if let Some(app_name) = std::env::var("PGAPPNAME").ok() { + if let Ok(app_name) = std::env::var("PGAPPNAME") { let query = match url.query() { Some(query) => { format!("{query}&application_name={app_name}") @@ -322,20 +322,15 @@ impl Shard { } } -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize, Default)] #[serde(untagged)] pub enum PoolSize { + #[default] None, Fixed(u32), Rule(Vec), } -impl Default for PoolSize { - fn default() -> Self { - Self::None - } -} - impl PoolSize { fn five() -> Self { Self::Fixed(5) @@ -431,7 +426,7 @@ pub struct ChainSection { impl ChainSection { fn validate(&mut self) -> Result<()> { NodeId::new(&self.ingestor) - .map_err(|()| anyhow!("invalid node id for ingestor {}", &self.ingestor))?; + .map_err(|node| anyhow!("invalid node id for ingestor {}", node))?; for (_, chain) in self.chains.iter_mut() { chain.validate()? } @@ -469,7 +464,7 @@ impl ChainSection { fn parse_networks( chains: &mut BTreeMap, transport: Transport, - args: &Vec, + args: &[String], ) -> Result<()> { for (nr, arg) in args.iter().enumerate() { if arg.starts_with("wss://") @@ -507,7 +502,7 @@ impl ChainSection { })?; let (features, url_str) = rest.split_at(colon); - let (url, features) = if vec!["http", "https", "ws", "wss"].contains(&features) { + let (url, features) = if ["http", "https", "ws", "wss"].contains(&features) { (rest, DEFAULT_PROVIDER_FEATURES.to_vec()) } else { (&url_str[1..], features.split(',').collect()) @@ -953,9 +948,10 @@ enum ProviderField { Headers, } -#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq)] +#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Default)] pub enum Transport { #[serde(rename = "rpc")] + #[default] Rpc, #[serde(rename = "ws")] Ws, @@ -963,12 +959,6 @@ pub enum Transport { Ipc, } -impl Default for Transport { - fn default() -> Self { - Self::Rpc - } -} - impl std::fmt::Display for Transport { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { use Transport::*; @@ -1032,8 +1022,7 @@ impl DeploymentPlacer for Deployment { .indexers .iter() .map(|idx| { - NodeId::new(idx.clone()) - .map_err(|()| format!("{} is not a valid node name", idx)) + NodeId::new(idx).map_err(|idx| format!("{} is not a valid node name", idx)) }) .collect::, _>>()?; Some((shards, indexers)) @@ -1080,7 +1069,7 @@ impl Rule { return Err(anyhow!("useless rule without indexers")); } for indexer in &self.indexers { - NodeId::new(indexer).map_err(|()| anyhow!("invalid node id {}", &indexer))?; + NodeId::new(indexer).map_err(|indexer| anyhow!("invalid node id {}", indexer))?; } self.shard_names().map_err(Error::from)?; Ok(()) @@ -1368,9 +1357,9 @@ mod tests { "#, ); - assert_eq!(true, actual.is_err()); + assert!(actual.is_err()); let err_str = actual.unwrap_err().to_string(); - assert_eq!(err_str.contains("missing field `url`"), true, "{}", err_str); + assert!(err_str.contains("missing field `url`"), "{}", err_str); } #[test] @@ -1383,14 +1372,9 @@ mod tests { "#, ); - assert_eq!(true, actual.is_err()); + assert!(actual.is_err()); let err_str = actual.unwrap_err().to_string(); - assert_eq!( - err_str.contains("missing field `features`"), - true, - "{}", - err_str - ); + assert!(err_str.contains("missing field `features`"), "{}", err_str); } #[test] @@ -1500,9 +1484,9 @@ mod tests { "#, ); - assert_eq!(true, actual.is_err()); + assert!(actual.is_err()); let err_str = actual.unwrap_err().to_string(); - assert_eq!(err_str.contains("when `details` field is provided, deprecated `url`, `transport`, `features` and `headers` cannot be specified"),true, "{}", err_str); + assert!(err_str.contains("when `details` field is provided, deprecated `url`, `transport`, `features` and `headers` cannot be specified"), "{}", err_str); } #[test] @@ -1774,15 +1758,12 @@ mod tests { details = { type = "firehose", url = "http://localhost:9000", features = ["bananas"]} "#, ).unwrap().validate(); - assert_eq!(true, actual.is_err(), "{:?}", actual); + assert!(actual.is_err(), "{:?}", actual); if let Err(error) = actual { - assert_eq!( - true, - error - .to_string() - .starts_with("supported firehose endpoint filters are:") - ) + assert!(error + .to_string() + .starts_with("supported firehose endpoint filters are:")) } } @@ -1881,14 +1862,9 @@ mod tests { .unwrap(); let err = actual.validate(); - assert_eq!(true, err.is_err()); + assert!(err.is_err()); let err = err.unwrap_err(); - assert_eq!( - true, - err.to_string().contains("unique"), - "result: {:?}", - err - ); + assert!(err.to_string().contains("unique"), "result: {:?}", err); } #[test] @@ -1913,7 +1889,7 @@ mod tests { .unwrap(); let result = actual.validate(); - assert_eq!(true, result.is_ok(), "error: {:?}", result.unwrap_err()); + assert!(result.is_ok(), "error: {:?}", result.unwrap_err()); } #[test] diff --git a/node/src/helpers.rs b/node/src/helpers.rs index fd59d6d8d15..7b2e81335f8 100644 --- a/node/src/helpers.rs +++ b/node/src/helpers.rs @@ -51,9 +51,8 @@ async fn deploy_subgraph( true, ) .await - .and_then(|locator| { + .inspect(|locator| { info!(logger, "Subgraph deployed"; "name" => name.to_string(), "id" => subgraph_id.to_string(), "locator" => locator.to_string()); - Ok(locator) }) } diff --git a/node/src/launcher.rs b/node/src/launcher.rs index 944b80d4530..9c0bef19e44 100644 --- a/node/src/launcher.rs +++ b/node/src/launcher.rs @@ -119,7 +119,7 @@ async fn build_blockchain_map( let network_adapters = Networks::from_config( logger.cheap_clone(), - &config, + config, metrics_registry.cheap_clone(), endpoint_metrics, &provider_checks, @@ -129,10 +129,10 @@ async fn build_blockchain_map( let blockchain_map = network_adapters .blockchain_map( - &env_vars, - &logger, + env_vars, + logger, block_store, - &logger_factory, + logger_factory, metrics_registry.cheap_clone(), chain_head_update_listener, ) @@ -184,7 +184,7 @@ async fn spawn_block_ingestor( metrics_registry: &Arc, ) { let logger = logger.clone(); - let ingestors = Networks::block_ingestors(&logger, &blockchain_map) + let ingestors = Networks::block_ingestors(&logger, blockchain_map) .await .expect("unable to start block ingestors"); @@ -295,7 +295,7 @@ where if let Some(amp_client) = amp_client.cheap_clone() { let amp_instance_manager = graph_core::amp_subgraph::Manager::new( - &logger_factory, + logger_factory, metrics_registry.cheap_clone(), env_vars.cheap_clone(), &cancel_token, @@ -311,7 +311,7 @@ where } let subgraph_instance_manager = graph_core::subgraph::SubgraphInstanceManager::new( - &logger_factory, + logger_factory, env_vars.cheap_clone(), network_store.subgraph_store(), blockchain_map.cheap_clone(), @@ -330,7 +330,7 @@ where ); let subgraph_provider = graph_core::subgraph_provider::SubgraphProvider::new( - &logger_factory, + logger_factory, sg_count.cheap_clone(), network_store.subgraph_store(), link_resolver.cheap_clone(), @@ -342,8 +342,9 @@ where let version_switching_mode = ENV_VARS.subgraph_version_switching_mode; // Create named subgraph provider for resolving subgraph name->ID mappings - let subgraph_registrar = Arc::new(graph_core::subgraph::SubgraphRegistrar::new( - &logger_factory, + + Arc::new(graph_core::subgraph::SubgraphRegistrar::new( + logger_factory, link_resolver, Arc::new(subgraph_provider), network_store.subgraph_store(), @@ -353,9 +354,7 @@ where node_id.clone(), version_switching_mode, Arc::new(subgraph_settings), - )); - - subgraph_registrar + )) } fn build_graphql_server( @@ -368,20 +367,19 @@ fn build_graphql_server( ) -> GraphQLQueryServer> { let shards: Vec<_> = config.stores.keys().cloned().collect(); let load_manager = Arc::new(LoadManager::new( - &logger, + logger, shards, expensive_queries, metrics_registry.clone(), )); let graphql_runner = Arc::new(GraphQlRunner::new( - &logger, + logger, network_store.clone(), load_manager, metrics_registry, )); - let graphql_server = GraphQLQueryServer::new(&logger_factory, graphql_runner.clone()); - graphql_server + GraphQLQueryServer::new(logger_factory, graphql_runner.clone()) } /// Runs the Graph Node by initializing all components and starting all required services diff --git a/node/src/manager/color.rs b/node/src/manager/color.rs index cf10d2e22d4..5c89789f90c 100644 --- a/node/src/manager/color.rs +++ b/node/src/manager/color.rs @@ -15,6 +15,12 @@ pub struct Terminal { spec: ColorSpec, } +impl Default for Terminal { + fn default() -> Self { + Self::new() + } +} + impl Terminal { pub fn set_color_preference(pref: &str) { let choice = match pref { @@ -78,10 +84,10 @@ impl Terminal { F: FnOnce(&mut Self) -> io::Result, { self.spec.set_fg(Some(color)); - self.out.set_color(&self.spec).map_err(io::Error::from)?; + self.out.set_color(&self.spec)?; let res = f(self); self.spec = ColorSpec::new(); - self.out.set_color(&self.spec).map_err(io::Error::from)?; + self.out.set_color(&self.spec)?; res } } diff --git a/node/src/manager/commands/assign.rs b/node/src/manager/commands/assign.rs index 971d8a4687f..c5f451281be 100644 --- a/node/src/manager/commands/assign.rs +++ b/node/src/manager/commands/assign.rs @@ -35,7 +35,7 @@ pub async fn reassign( search: &DeploymentSearch, node: String, ) -> Result<(), Error> { - let node = NodeId::new(node.clone()).map_err(|()| anyhow!("illegal node id `{}`", node))?; + let node = NodeId::new(node).map_err(|node| anyhow!("illegal node id `{}`", node))?; let locator = search.locate_unique(&primary).await?; let pconn = primary.get_permitted().await?; diff --git a/node/src/manager/commands/config.rs b/node/src/manager/commands/config.rs index 8b6d36e9afa..08633ae586d 100644 --- a/node/src/manager/commands/config.rs +++ b/node/src/manager/commands/config.rs @@ -72,7 +72,7 @@ pub fn pools(config: &Config, nodes: Vec, shard: bool) -> Result<(), Err .into_iter() .map(|name| { NodeId::new(name.replace('-', "_")) - .map_err(|()| anyhow!("illegal node name `{}`", name)) + .map_err(|name| anyhow!("illegal node name `{}`", name)) }) .collect::>()?; // node -> shard_name -> size @@ -139,7 +139,7 @@ pub async fn provider( let metrics = Arc::new(EndpointMetrics::mock()); let caps = caps_from_features(features)?; - let networks = Networks::from_config(logger, &config, registry, metrics, &[]).await?; + let networks = Networks::from_config(logger, config, registry, metrics, &[]).await?; let network: ChainName = network.into(); let adapters = networks.ethereum_rpcs(network.clone()); @@ -157,7 +157,7 @@ pub async fn provider( } pub fn setting(name: &str) -> Result<(), Error> { - let name = SubgraphName::new(name).map_err(|()| anyhow!("illegal subgraph name `{}`", name))?; + let name = SubgraphName::new(name).map_err(|name| anyhow!("illegal subgraph name `{name}`"))?; let env_vars = EnvVars::from_env().unwrap(); if let Some(path) = &env_vars.subgraph_settings { let settings = Settings::from_file(path) diff --git a/node/src/manager/commands/copy.rs b/node/src/manager/commands/copy.rs index c3fa4cca993..cafe82dacbb 100644 --- a/node/src/manager/commands/copy.rs +++ b/node/src/manager/commands/copy.rs @@ -148,10 +148,10 @@ async fn create_inner( ) } let shard = Shard::new(shard)?; - let node = NodeId::new(node.clone()).map_err(|()| anyhow!("invalid node id `{}`", node))?; + let node = NodeId::new(node).map_err(|node| anyhow!("invalid node id `{}`", node))?; let dst = subgraph_store - .copy_deployment(&src, shard, node, base_ptr, on_sync) + .copy_deployment(src, shard, node, base_ptr, on_sync) .await?; println!("created deployment {} as copy of {}", dst, src); diff --git a/node/src/manager/commands/create.rs b/node/src/manager/commands/create.rs index cfaa62aa958..db75e28d5c1 100644 --- a/node/src/manager/commands/create.rs +++ b/node/src/manager/commands/create.rs @@ -5,7 +5,7 @@ use graph_store_postgres::SubgraphStore; pub async fn run(store: Arc, name: String) -> Result<(), Error> { let name = SubgraphName::new(name.clone()) - .map_err(|()| anyhow!("illegal subgraph name `{}`", name))?; + .map_err(|name| anyhow!("illegal subgraph name `{name}`"))?; println!("creating subgraph {}", name); store.create_subgraph(name).await?; diff --git a/node/src/manager/commands/deployment/info.rs b/node/src/manager/commands/deployment/info.rs index 4d121d3692a..08e8f1c517d 100644 --- a/node/src/manager/commands/deployment/info.rs +++ b/node/src/manager/commands/deployment/info.rs @@ -148,26 +148,23 @@ fn render( } table.push_row(["Node ID", &optional(deployment.node_id.as_ref())]); table.push_row(["Active", &deployment.is_active.to_string()]); - if let Some((_, status)) = deployments.get(0) { - if let Some(status) = status { - table.push_row(["Paused", &optional(status.is_paused)]); - table.push_row(["Synced", &status.is_synced.to_string()]); - table.push_row(["Health", status.health.as_str()]); - - let earliest = status.earliest_block_number; - let latest = status.latest_block.as_ref().map(|x| x.number); - let chain_head = status.chain_head_block.as_ref().map(|x| x.number); - let behind = match (latest, chain_head) { - (Some(latest), Some(chain_head)) => Some(chain_head - latest), - _ => None, - }; - - table.push_row(["Earliest Block", &earliest.to_string()]); - table.push_row(["Latest Block", &number(latest)]); - table.push_row(["Chain Head Block", &number(chain_head)]); - if let Some(behind) = behind { - table.push_row([" Blocks behind", &behind.to_string()]); - } + if let Some((_, Some(status))) = deployments.first() { + table.push_row(["Paused", &optional(status.is_paused)]); + table.push_row(["Synced", &status.is_synced.to_string()]); + table.push_row(["Health", status.health.as_str()]); + let earliest = status.earliest_block_number; + let latest = status.latest_block.as_ref().map(|x| x.number); + let chain_head = status.chain_head_block.as_ref().map(|x| x.number); + let behind = match (latest, chain_head) { + (Some(latest), Some(chain_head)) => Some(chain_head - latest), + _ => None, + }; + + table.push_row(["Earliest Block", &earliest.to_string()]); + table.push_row(["Latest Block", &number(latest)]); + table.push_row(["Chain Head Block", &number(chain_head)]); + if let Some(behind) = behind { + table.push_row([" Blocks behind", &behind.to_string()]); } } } diff --git a/node/src/manager/commands/listen.rs b/node/src/manager/commands/listen.rs index d53dfaae455..0f61f8a5b2e 100644 --- a/node/src/manager/commands/listen.rs +++ b/node/src/manager/commands/listen.rs @@ -16,7 +16,7 @@ async fn listen(mgr: Arc) -> Result<(), Error> { .for_each(move |event| { serde_json::to_writer_pretty(std::io::stdout(), &event) .expect("event can be serialized to JSON"); - writeln!(std::io::stdout()).unwrap(); + println!(); std::io::stdout().flush().unwrap(); future::ready(()) }) diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index 415eccbf984..5078ab37d02 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -86,7 +86,7 @@ impl PruneReporter for Progress { fn start_analyze(&mut self) { if !self.initial_analyze { - println!(""); + println!(); } print!("Analyze tables"); self.analyze_start = Instant::now(); @@ -105,7 +105,7 @@ impl PruneReporter for Progress { let stats: Vec<_> = stats .iter() .filter(|stat| self.initial_analyze || analyzed.contains(&stat.tablename.as_str())) - .map(|stats| stats.clone()) + .cloned() .collect(); println!( "\rAnalyzed {} tables in {}s{: ^30}", @@ -424,7 +424,7 @@ pub async fn status( let table_name = fmt::abbreviate(&table_name, 30); let rows = rows.map_or_null(|rows| rows.to_string()); let batch_size = batch_size.map_or_null(|b| b.to_string()); - let duration = started_at.map_or_null(|s| fmt::duration(&s, &finished_at)); + let duration = started_at.map_or_null(|s| fmt::duration(s, &finished_at)); let phase = phase.as_str(); writeln!(term, "{table_name:<30} | {:<15} {complete:>6} | {rows:>8} | {batch_size:>11} | {duration:>8}", diff --git a/node/src/manager/commands/query.rs b/node/src/manager/commands/query.rs index 6339b7bf9cc..04400bfb923 100644 --- a/node/src/manager/commands/query.rs +++ b/node/src/manager/commands/query.rs @@ -30,7 +30,7 @@ pub async fn run( QueryTarget::Deployment(id, Default::default()) } else { let name = SubgraphName::new(target.clone()) - .map_err(|()| anyhow!("illegal subgraph name `{}`", target))?; + .map_err(|name| anyhow!("illegal subgraph name `{name}`"))?; QueryTarget::Name(name, Default::default()) }; diff --git a/node/src/manager/commands/remove.rs b/node/src/manager/commands/remove.rs index 3d03bdf6148..bcf9417569a 100644 --- a/node/src/manager/commands/remove.rs +++ b/node/src/manager/commands/remove.rs @@ -4,7 +4,7 @@ use graph::prelude::{anyhow, Error, SubgraphName, SubgraphStore as _}; use graph_store_postgres::SubgraphStore; pub async fn run(store: Arc, name: &str) -> Result<(), Error> { - let name = SubgraphName::new(name).map_err(|()| anyhow!("illegal subgraph name `{}`", name))?; + let name = SubgraphName::new(name).map_err(|name| anyhow!("illegal subgraph name `{name}`"))?; println!("Removing subgraph {}", name); store.remove_subgraph(name).await?; diff --git a/node/src/manager/commands/rewind.rs b/node/src/manager/commands/rewind.rs index b407089d32a..0ca0c9d2bd2 100644 --- a/node/src/manager/commands/rewind.rs +++ b/node/src/manager/commands/rewind.rs @@ -16,7 +16,7 @@ use graph_store_postgres::{ConnectionPool, Store}; async fn block_ptr( store: BlockStore, locators: &HashSet<(String, DeploymentLocator)>, - searches: &Vec, + searches: &[DeploymentSearch], hash: &str, number: BlockNumber, force: bool, @@ -147,7 +147,7 @@ pub async fn run( println!("Pausing deployments"); for (_, locator) in &locators { - pause_or_resume(primary.clone(), &sender, &locator, true).await?; + pause_or_resume(primary.clone(), sender, locator, true).await?; } // There's no good way to tell that a subgraph has in fact stopped @@ -193,7 +193,7 @@ pub async fn run( println!("Resuming deployments"); for (_, locator) in &locators { - pause_or_resume(primary.clone(), &sender, locator, false).await?; + pause_or_resume(primary.clone(), sender, locator, false).await?; } Ok(()) } diff --git a/node/src/manager/display.rs b/node/src/manager/display.rs index 7d27b8269cb..cda62f6ea74 100644 --- a/node/src/manager/display.rs +++ b/node/src/manager/display.rs @@ -58,6 +58,7 @@ impl List { /// A more general list of columns than `List`. In practical terms, this is /// a very simple table with two columns, where both columns are /// left-aligned +#[derive(Default)] pub struct Columns { widths: Vec, rows: Vec, @@ -84,15 +85,6 @@ impl Columns { } } -impl Default for Columns { - fn default() -> Self { - Self { - widths: Vec::new(), - rows: Vec::new(), - } - } -} - pub enum Row { Cells(Vec), Separator, @@ -122,11 +114,7 @@ impl Row { } Row::Separator => { let total_width = widths.iter().sum::(); - let extra_width = if total_width >= LINE_WIDTH { - 0 - } else { - LINE_WIDTH - total_width - }; + let extra_width = LINE_WIDTH.saturating_sub(total_width); for (idx, width) in widths.iter().enumerate() { if idx > 0 { write!(out, "-+-")?; diff --git a/node/src/manager/fmt.rs b/node/src/manager/fmt.rs index 6aaa12192a7..45807fecbf7 100644 --- a/node/src/manager/fmt.rs +++ b/node/src/manager/fmt.rs @@ -24,9 +24,7 @@ impl MapOrNull for Option { where F: FnOnce(&T) -> String, { - self.as_ref() - .map(|value| f(value)) - .unwrap_or_else(|| NULL.to_string()) + self.as_ref().map(f).unwrap_or_else(|| NULL.to_string()) } } diff --git a/node/src/network_setup.rs b/node/src/network_setup.rs index 63cfe8097b4..f9cc723b181 100644 --- a/node/src/network_setup.rs +++ b/node/src/network_setup.rs @@ -117,17 +117,17 @@ impl Networks { adapters: vec![], rpc_provider_manager: ProviderManager::new( Logger::root(Discard, o!()), - vec![].into_iter(), + vec![], ProviderCheckStrategy::MarkAsValid, ), firehose_provider_manager: ProviderManager::new( Logger::root(Discard, o!()), - vec![].into_iter(), + vec![], ProviderCheckStrategy::MarkAsValid, ), substreams_provider_manager: ProviderManager::new( Logger::root(Discard, o!()), - vec![].into_iter(), + vec![], ProviderCheckStrategy::MarkAsValid, ), } @@ -198,20 +198,20 @@ impl Networks { let eth = create_ethereum_networks( logger.cheap_clone(), registry, - &config, + config, endpoint_metrics.cheap_clone(), chain_filter, ) .await?; let firehose = create_firehose_networks( logger.cheap_clone(), - &config, + config, endpoint_metrics.cheap_clone(), chain_filter, ); let substreams = create_substreams_networks( logger.cheap_clone(), - &config, + config, endpoint_metrics, chain_filter, ); @@ -320,16 +320,12 @@ impl Networks { ), firehose_provider_manager: ProviderManager::new( logger.clone(), - firehose_adapters - .into_iter() - .map(|(chain_id, endpoints)| (chain_id, endpoints)), + firehose_adapters, ProviderCheckStrategy::RequireAll(provider_checks), ), substreams_provider_manager: ProviderManager::new( logger.clone(), - substreams_adapters - .into_iter() - .map(|(chain_id, endpoints)| (chain_id, endpoints)), + substreams_adapters, ProviderCheckStrategy::RequireAll(provider_checks), ), }; @@ -389,7 +385,7 @@ impl Networks { for ((_, id), chain) in blockchain_map .iter() - .filter(|((kind, id), _)| BlockchainKind::Substreams.eq(&kind) && !visited.contains(id)) + .filter(|((kind, id), _)| BlockchainKind::Substreams.eq(kind) && !visited.contains(id)) { block_ingestor::(logger, id, chain, &mut res).await? } diff --git a/runtime/test/src/common.rs b/runtime/test/src/common.rs index b0ec8018db2..cb5493fbe01 100644 --- a/runtime/test/src/common.rs +++ b/runtime/test/src/common.rs @@ -30,7 +30,7 @@ fn mock_host_exports( store: Arc, api_version: Version, ) -> HostExports { - let templates = vec![data_source::DataSourceTemplate::Onchain::( + let templates = [data_source::DataSourceTemplate::Onchain::( DataSourceTemplate { kind: String::from("ethereum/contract"), name: String::from("example template"), diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index f8ee8c38116..8bbf5719f7d 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -190,7 +190,7 @@ impl WasmInstanceExt for WasmInstance { async fn invoke_export0_void(&mut self, f: &str) -> Result<(), Error> { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); func.call_async(&mut self.store.as_context_mut(), ()).await @@ -199,7 +199,7 @@ impl WasmInstanceExt for WasmInstance { async fn invoke_export0(&mut self, f: &str) -> AscPtr { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let ptr: u32 = func @@ -212,7 +212,7 @@ impl WasmInstanceExt for WasmInstance { async fn takes_ptr_returns_ptr(&mut self, f: &str, arg: AscPtr) -> AscPtr { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let ptr: u32 = func @@ -229,7 +229,7 @@ impl WasmInstanceExt for WasmInstance { { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let ptr = self.asc_new(arg).await.unwrap(); @@ -268,7 +268,7 @@ impl WasmInstanceExt for WasmInstance { { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let arg0 = self.asc_new(arg0).await.unwrap(); @@ -297,7 +297,7 @@ impl WasmInstanceExt for WasmInstance { { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let arg0 = self.asc_new(arg0).await.unwrap(); @@ -312,7 +312,7 @@ impl WasmInstanceExt for WasmInstance { async fn invoke_export0_val(&mut self, func: &str) -> V { let func = self .get_func(func) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); func.call_async(&mut self.store.as_context_mut(), ()) @@ -327,7 +327,7 @@ impl WasmInstanceExt for WasmInstance { { let func = self .get_func(func) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let ptr = self.asc_new(v).await.unwrap(); @@ -339,7 +339,7 @@ impl WasmInstanceExt for WasmInstance { async fn takes_val_returns_ptr

(&mut self, fn_name: &str, val: impl SyncWasmTy) -> AscPtr

{ let func = self .get_func(fn_name) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let ptr: u32 = func @@ -848,7 +848,7 @@ async fn test_abort(api_version: Version, error_msg: &str) { .await; let res: Result<(), _> = instance .get_func("abort") - .typed(&instance.store.as_context()) + .typed(instance.store.as_context()) .unwrap() .call_async(&mut instance.store.as_context_mut(), ()) .await; @@ -1081,7 +1081,7 @@ async fn test_entity_store(api_version: Version) { assert_eq!(Some(&Value::from("steve")), data.get("id")); assert_eq!(Some(&Value::from("Steve-O")), data.get("name")); } - _ => assert!(false, "expected Overwrite modification"), + _ => panic!("expected Overwrite modification"), } // Load, set, save cycle for a new entity with fulltext API @@ -1104,7 +1104,7 @@ async fn test_entity_store(api_version: Version) { assert_eq!(Some(&Value::from("herobrine")), data.get("id")); assert_eq!(Some(&Value::from("Brine-O")), data.get("name")); } - _ => assert!(false, "expected Insert modification"), + _ => panic!("expected Insert modification"), }; } @@ -1123,22 +1123,16 @@ fn test_detect_contract_calls(api_version: Version) { &wasm_file_path("abi_store_value.wasm", api_version.clone()), api_version.clone(), ); - assert_eq!( - data_source_without_calls - .mapping - .requires_archive() - .unwrap(), - false - ); + assert!(!data_source_without_calls + .mapping + .requires_archive() + .unwrap()); let data_source_with_calls = mock_data_source( &wasm_file_path("contract_calls.wasm", api_version.clone()), api_version, ); - assert_eq!( - data_source_with_calls.mapping.requires_archive().unwrap(), - true - ); + assert!(data_source_with_calls.mapping.requires_archive().unwrap()); } #[graph::test] @@ -1647,8 +1641,8 @@ async fn generate_id() { "bin2", IdType::Bytes.parse("0x0000000c00000003".into()).unwrap(), ), - ("int1", Id::Int8(0x0000_000c__0000_0000)), - ("int2", Id::Int8(0x0000_000c__0000_0001)), + ("int1", Id::Int8(0x0000_000c_0000_0000)), + ("int2", Id::Int8(0x0000_000c_0000_0001)), ] .into_iter(), ); @@ -1789,7 +1783,7 @@ async fn test_yaml_parsing(api_version: Version, gas_used: u64) { test("{a: 1, - b: 2}", "error").await; // Test size limit; - test(&"x".repeat(10_000_0001), "error").await; + test(&"x".repeat(100_000_001), "error").await; // Test nulls; test("null", "(0) null").await; diff --git a/runtime/test/src/test/abi.rs b/runtime/test/src/test/abi.rs index 886626a2871..ba9048f6040 100644 --- a/runtime/test/src/test/abi.rs +++ b/runtime/test/src/test/abi.rs @@ -20,7 +20,7 @@ async fn test_unbounded_loop(api_version: Version) { .0; let res: Result<(), _> = instance .get_func("loop") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .call_async(&mut instance.store.as_context_mut(), ()) .await; @@ -54,7 +54,7 @@ async fn test_unbounded_recursion(api_version: Version) { .await; let res: Result<(), _> = instance .get_func("rabbit_hole") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .call_async(&mut instance.store.as_context_mut(), ()) .await; @@ -251,7 +251,7 @@ async fn test_abi_ethabi_token_identity(api_version: Version) { let token_bool_ptr = instance.asc_new(&token_bool).await.unwrap(); let func = instance .get_func("token_to_bool") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); let boolean: i32 = func @@ -324,7 +324,7 @@ async fn test_abi_store_value(api_version: Version) { // Value::Null let func = instance .get_func("value_null") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); let ptr: u32 = func @@ -342,13 +342,13 @@ async fn test_abi_store_value(api_version: Version) { assert_eq!(new_value, Value::from(string)); // Value::Int - let int = i32::min_value(); + let int = i32::MIN; let new_value_ptr = instance.takes_val_returns_ptr("value_from_int", int).await; let new_value: Value = instance.asc_get(new_value_ptr).unwrap(); assert_eq!(new_value, Value::Int(int)); // Value::Int8 - let int8 = i64::min_value(); + let int8 = i64::MIN; let new_value_ptr = instance .takes_val_returns_ptr("value_from_int8", int8) .await; @@ -381,7 +381,7 @@ async fn test_abi_store_value(api_version: Version) { // Value::List let func = instance .get_func("array_from_values") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); @@ -578,7 +578,7 @@ async fn test_invalid_discriminant(api_version: Version) { let func = instance .get_func("invalid_discriminant") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); let ptr: u32 = func diff --git a/runtime/test/src/test_padding.rs b/runtime/test/src/test_padding.rs index ef750674178..93e1a642724 100644 --- a/runtime/test/src/test_padding.rs +++ b/runtime/test/src/test_padding.rs @@ -185,7 +185,7 @@ async fn manual_padding_should_fail(api_version: semver::Version) { let func = instance .get_func("test_padding_manual") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); @@ -220,7 +220,7 @@ async fn manual_padding_manualy_fixed_ok(api_version: semver::Version) { let func = instance .get_func("test_padding_manual") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); diff --git a/runtime/wasm/Cargo.toml b/runtime/wasm/Cargo.toml index e2260a7bb59..c934cc943be 100644 --- a/runtime/wasm/Cargo.toml +++ b/runtime/wasm/Cargo.toml @@ -21,3 +21,6 @@ wasm-instrument = { version = "0.2.0", features = ["std", "sign_ext"] } parity-wasm = { version = "0.45", features = ["std", "sign_ext"] } serde_yaml = { workspace = true } + +[lints] +workspace = true diff --git a/runtime/wasm/src/asc_abi/class.rs b/runtime/wasm/src/asc_abi/class.rs index 4fe5b3192cd..0fac865ab0e 100644 --- a/runtime/wasm/src/asc_abi/class.rs +++ b/runtime/wasm/src/asc_abi/class.rs @@ -1,3 +1,5 @@ +//! Rust types that have with a direct correspondence to an Asc class, +//! with their `AscType` implementations. use async_trait::async_trait; use ethabi; @@ -18,9 +20,6 @@ use graph_runtime_derive::AscType; use crate::asc_abi::{v0_0_4, v0_0_5}; use semver::Version; -///! Rust types that have with a direct correspondence to an Asc class, -///! with their `AscType` implementations. - /// Wrapper of ArrayBuffer for multiple AssemblyScript versions. /// It just delegates its method calls to the correct mappings apiVersion. pub enum ArrayBuffer { @@ -528,8 +527,9 @@ impl AscIndexId for AscEnum { pub type AscEnumArray = AscPtr>>>; #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub enum EthereumValueKind { + #[default] Address, FixedBytes, Bytes, @@ -559,22 +559,17 @@ impl EthereumValueKind { } } -impl Default for EthereumValueKind { - fn default() -> Self { - EthereumValueKind::Address - } -} - impl AscValue for EthereumValueKind {} #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub enum StoreValueKind { String, Int, BigDecimal, Bool, Array, + #[default] Null, Bytes, BigInt, @@ -601,12 +596,6 @@ impl StoreValueKind { } } -impl Default for StoreValueKind { - fn default() -> Self { - StoreValueKind::Null - } -} - impl AscValue for StoreValueKind {} /// Big ints are represented using signed number representation. Note: This differs @@ -670,8 +659,9 @@ pub type AscEntity = AscTypedMap>; pub(crate) type AscJson = AscTypedMap>; #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub enum JsonValueKind { + #[default] Null, Bool, Number, @@ -680,12 +670,6 @@ pub enum JsonValueKind { Object, } -impl Default for JsonValueKind { - fn default() -> Self { - JsonValueKind::Null - } -} - impl AscValue for JsonValueKind {} impl JsonValueKind { @@ -780,8 +764,9 @@ impl AscIndexId for AscWrapped>> { } #[repr(u32)] -#[derive(AscType, Clone, Copy)] +#[derive(AscType, Clone, Copy, Default)] pub enum YamlValueKind { + #[default] Null, Bool, Number, @@ -791,12 +776,6 @@ pub enum YamlValueKind { Tagged, } -impl Default for YamlValueKind { - fn default() -> Self { - YamlValueKind::Null - } -} - impl AscValue for YamlValueKind {} impl YamlValueKind { diff --git a/runtime/wasm/src/asc_abi/v0_0_4.rs b/runtime/wasm/src/asc_abi/v0_0_4.rs index c4098ac0889..3eb796434e6 100644 --- a/runtime/wasm/src/asc_abi/v0_0_4.rs +++ b/runtime/wasm/src/asc_abi/v0_0_4.rs @@ -12,7 +12,7 @@ use graph_runtime_derive::AscType; use crate::asc_abi::class; /// Module related to AssemblyScript version v0.6. - +/// /// Asc std ArrayBuffer: "a generic, fixed-length raw binary data buffer". /// See https://github.com/AssemblyScript/assemblyscript/wiki/Memory-Layout-&-Management/86447e88be5aa8ec633eaf5fe364651136d136ab#arrays pub struct ArrayBuffer { @@ -35,7 +35,7 @@ impl ArrayBuffer { content.extend(&asc_bytes); } - if content.len() > u32::max_value() as usize { + if content.len() > u32::MAX as usize { return Err(DeterministicHostError::from(anyhow::anyhow!( "slice cannot fit in WASM memory" ))); @@ -62,7 +62,7 @@ impl ArrayBuffer { self.content[byte_offset..] .chunks(size_of::()) .take(length) - .map(|asc_obj| T::from_asc_bytes(asc_obj, &api_version)) + .map(|asc_obj| T::from_asc_bytes(asc_obj, api_version)) .collect() // TODO: This code is preferred as it validates the length of the array. @@ -96,7 +96,7 @@ impl AscType for ArrayBuffer { let total_size = self.byte_length as usize + header_size; let total_capacity = total_size.next_power_of_two(); let extra_capacity = total_capacity - total_size; - asc_layout.extend(std::iter::repeat(0).take(extra_capacity)); + asc_layout.extend(std::iter::repeat_n(0, extra_capacity)); assert_eq!(asc_layout.len(), total_capacity); Ok(asc_layout) @@ -194,7 +194,7 @@ pub struct AscString { impl AscString { pub fn new(content: &[u16]) -> Result { - if size_of_val(content) > u32::max_value() as usize { + if size_of_val(content) > u32::MAX as usize { return Err(DeterministicHostError::from(anyhow!( "string cannot fit in WASM memory" ))); @@ -249,7 +249,7 @@ impl AscType for AscString { } // Prevents panic when accessing offset + 1 in the loop - if asc_obj.len() % 2 != 0 { + if !asc_obj.len().is_multiple_of(2) { return Err(DeterministicHostError::from(anyhow::anyhow!( "Invalid string length" ))); diff --git a/runtime/wasm/src/asc_abi/v0_0_5.rs b/runtime/wasm/src/asc_abi/v0_0_5.rs index 906f6ff1cf6..3c769845b2b 100644 --- a/runtime/wasm/src/asc_abi/v0_0_5.rs +++ b/runtime/wasm/src/asc_abi/v0_0_5.rs @@ -16,7 +16,7 @@ use crate::asc_abi::class; /// All `to_asc_bytes`/`from_asc_bytes` only consider the #data/content/payload /// not the #header, that's handled on `AscPtr`. /// Header in question: https://www.assemblyscript.org/memory.html#common-header-layout - +/// /// Similar as JS ArrayBuffer, "a generic, fixed-length raw binary data buffer". /// See https://www.assemblyscript.org/memory.html#arraybuffer-layout pub struct ArrayBuffer { @@ -34,7 +34,7 @@ impl ArrayBuffer { content.extend(&asc_bytes); } - if content.len() > u32::max_value() as usize { + if content.len() > u32::MAX as usize { return Err(DeterministicHostError::from(anyhow::anyhow!( "slice cannot fit in WASM memory" ))); @@ -75,7 +75,7 @@ impl AscType for ArrayBuffer { let total_size = self.byte_length as usize + HEADER_SIZE; let total_capacity = total_size.next_power_of_two(); let extra_capacity = total_capacity - total_size; - asc_layout.extend(std::iter::repeat(0).take(extra_capacity)); + asc_layout.extend(std::iter::repeat_n(0, extra_capacity)); Ok(asc_layout) } @@ -176,7 +176,7 @@ pub struct AscString { impl AscString { pub fn new(content: &[u16]) -> Result { - if size_of_val(content) > u32::max_value() as usize { + if size_of_val(content) > u32::MAX as usize { return Err(DeterministicHostError::from(anyhow!( "string cannot fit in WASM memory" ))); @@ -205,7 +205,7 @@ impl AscType for AscString { let total_size = (self.byte_length as usize * 2) + header_size; let total_capacity = total_size.next_power_of_two(); let extra_capacity = total_capacity - total_size; - content.extend(std::iter::repeat(0).take(extra_capacity)); + content.extend(std::iter::repeat_n(0, extra_capacity)); Ok(content) } diff --git a/runtime/wasm/src/host.rs b/runtime/wasm/src/host.rs index aa079381a94..9f9f3645a55 100644 --- a/runtime/wasm/src/host.rs +++ b/runtime/wasm/src/host.rs @@ -190,7 +190,7 @@ where proof_of_indexing, host_fns: self.host_fns.cheap_clone(), debug_fork: debug_fork.cheap_clone(), - mapping_logger: Logger::new(&logger, o!("component" => "UserMapping")), + mapping_logger: Logger::new(logger, o!("component" => "UserMapping")), instrument, }, trigger, @@ -257,7 +257,7 @@ where proof_of_indexing, host_fns: self.host_fns.cheap_clone(), debug_fork: debug_fork.cheap_clone(), - mapping_logger: Logger::new(&logger, o!("component" => "UserBlockMapping")), + mapping_logger: Logger::new(logger, o!("component" => "UserBlockMapping")), instrument, }, handler.clone(), diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 43e235c6299..bc2ba76572d 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -175,7 +175,7 @@ impl HostExports { !state .entity_cache .schema - .has_field_with_name(entity_type, &field_name) + .has_field_with_name(entity_type, field_name) }); if has_invalid_fields { @@ -185,7 +185,7 @@ impl HostExports { if !state .entity_cache .schema - .has_field_with_name(entity_type, &field_name) + .has_field_with_name(entity_type, field_name) { Some(field_name.clone()) } else { @@ -326,7 +326,7 @@ impl HostExports { let poi_section = stopwatch.start_section("host_export_store_set__proof_of_indexing"); proof_of_indexing.write_event( &ProofOfIndexingEvent::SetEntity { - entity_type: &key.entity_type.typename(), + entity_type: key.entity_type.typename(), id: &key.entity_id.to_string(), data: &entity, }, @@ -385,9 +385,9 @@ impl HostExports { Ok(()) } - pub(crate) async fn store_get<'a>( + pub(crate) async fn store_get( &self, - state: &'a mut BlockState, + state: &mut BlockState, entity_type: String, entity_id: String, gas: &GasCounter, @@ -412,7 +412,7 @@ impl HostExports { )?; if let Some(ref entity) = result { - state.metrics.track_entity_read(&entity_type, &entity) + state.metrics.track_entity_read(&entity_type, entity) } Ok(result) @@ -585,7 +585,7 @@ impl HostExports { } Ok(v) }; - result.map_err(move |e: Error| anyhow::anyhow!("{}: {}", errmsg, e.to_string())) + result.map_err(move |e: Error| anyhow::anyhow!("{}: {}", errmsg, e)) } /// Expects a decimal string. @@ -1164,9 +1164,10 @@ impl HostExports { )?; if bytes.len() > MAX_JSON_SIZE { - return Err(DeterministicHostError::Other( - anyhow!("JSON size exceeds max size of {}", MAX_JSON_SIZE).into(), - )); + return Err(DeterministicHostError::Other(anyhow!( + "JSON size exceeds max size of {}", + MAX_JSON_SIZE + ))); } serde_json::from_slice(bytes.as_slice()) @@ -1264,13 +1265,10 @@ impl HostExports { )?; if bytes.len() > YAML_MAX_SIZE_BYTES { - return Err(DeterministicHostError::Other( - anyhow!( - "YAML size exceeds max size of {} bytes", - YAML_MAX_SIZE_BYTES - ) - .into(), - )); + return Err(DeterministicHostError::Other(anyhow!( + "YAML size exceeds max size of {} bytes", + YAML_MAX_SIZE_BYTES + ))); } serde_yaml::from_slice(bytes) diff --git a/runtime/wasm/src/lib.rs b/runtime/wasm/src/lib.rs index a9b28f872f1..7c543a4c128 100644 --- a/runtime/wasm/src/lib.rs +++ b/runtime/wasm/src/lib.rs @@ -4,7 +4,7 @@ mod host; pub mod to_from; /// Public interface of the crate, receives triggers to be processed. - +/// /// Pre-processes modules and manages their threads. Serves as an interface from `host` to `module`. pub mod mapping; diff --git a/runtime/wasm/src/mapping.rs b/runtime/wasm/src/mapping.rs index 0e06c125c1a..d8f03058b0c 100644 --- a/runtime/wasm/src/mapping.rs +++ b/runtime/wasm/src/mapping.rs @@ -346,7 +346,6 @@ impl ValidModule { let mut epoch_counter_abort_handle = None; if let Some(timeout) = timeout { - let timeout = timeout.clone(); let engine = engine.clone(); // The epoch counter task will perpetually increment the epoch every `timeout` seconds. diff --git a/runtime/wasm/src/module/context.rs b/runtime/wasm/src/module/context.rs index 9ecb04782ef..490a2414c6b 100644 --- a/runtime/wasm/src/module/context.rs +++ b/runtime/wasm/src/module/context.rs @@ -531,7 +531,7 @@ impl WasmInstanceContext<'_> { let ipfs_res = host_exports.ipfs_cat(&logger, link).await; let logger = self.as_ref().ctx.logger.cheap_clone(); match ipfs_res { - Ok(bytes) => asc_new(self, &*bytes, gas).await.map_err(Into::into), + Ok(bytes) => asc_new(self, &*bytes, gas).await, // Return null in case of error. Err(e) => { @@ -568,7 +568,7 @@ impl WasmInstanceContext<'_> { .ipfs_get_block(&self.as_ref().ctx.logger, link) .await; match ipfs_res { - Ok(bytes) => asc_new(self, &*bytes, gas).await.map_err(Into::into), + Ok(bytes) => asc_new(self, &*bytes, gas).await, // Return null in case of error. Err(e) => { @@ -1118,7 +1118,7 @@ impl WasmInstanceContext<'_> { // map `None` to `null`, and `Some(s)` to a runtime string match name { - Some(name) => asc_new(self, &*name, gas).await.map_err(Into::into), + Some(name) => asc_new(self, &*name, gas).await, None => Ok(AscPtr::null()), } } diff --git a/runtime/wasm/src/module/instance.rs b/runtime/wasm/src/module/instance.rs index 21560bb4fe5..e0a9bc87067 100644 --- a/runtime/wasm/src/module/instance.rs +++ b/runtime/wasm/src/module/instance.rs @@ -59,13 +59,10 @@ mod impl_for_tests { asc_get(&ctx, asc_ptr, &self.gas) } - pub async fn asc_new( - &mut self, - rust_obj: &T, - ) -> Result, HostExportError> + pub async fn asc_new(&mut self, rust_obj: &T) -> Result, HostExportError> where P: AscType + AscIndexId, - T: ToAscObj

, + T: ToAscObj

+ ?Sized, { let mut ctx = WasmInstanceContext::new(&mut self.store); asc_new(&mut ctx, rust_obj, &self.gas).await @@ -190,13 +187,13 @@ impl WasmInstance { .await { Ok(()) => { - assert!(self.instance_ctx().as_ref().possible_reorg == false); - assert!(self.instance_ctx().as_ref().deterministic_host_trap == false); + assert!(!self.instance_ctx().as_ref().possible_reorg); + assert!(!self.instance_ctx().as_ref().deterministic_host_trap); None } Err(trap) if self.instance_ctx().as_ref().possible_reorg => { self.instance_ctx().as_mut().ctx.state.exit_handler(); - return Err(MappingError::PossibleReorg(trap.into())); + return Err(MappingError::PossibleReorg(trap)); } // Treat timeouts anywhere in the error chain as a special case to have a better error @@ -208,7 +205,7 @@ impl WasmInstance { .any(|e| e.downcast_ref::() == Some(&Trap::Interrupt)) => { self.instance_ctx().as_mut().ctx.state.exit_handler(); - return Err(MappingError::Unknown(Error::from(trap).context(format!( + return Err(MappingError::Unknown(trap.context(format!( "Handler '{}' hit the timeout of '{}' seconds", handler, self.instance_ctx().as_ref().valid_module.timeout.unwrap().as_secs() @@ -628,7 +625,7 @@ impl WasmInstance { // we cannot execute anything that requires access to the heap before it's created. if let Some(start_func) = valid_module.start_function.as_ref() { instance - .get_func(store.as_context_mut(), &start_func) + .get_func(store.as_context_mut(), start_func) .context(format!("`{start_func}` function not found"))? .typed::<(), ()>(store.as_context_mut())? .call_async(store.as_context_mut(), ()) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 86bf4055e5a..02914abc519 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -35,7 +35,7 @@ mod instance; mod into_wasm_ret; // Convenience for a 'top-level' asc_get, with depth 0. -fn asc_get( +fn asc_get( heap: &H, ptr: AscPtr, gas: &GasCounter, @@ -246,7 +246,7 @@ impl AscHeapCtx { fn host_export_error_from_trap(trap: Error, context: String) -> HostExportError { let trap_is_deterministic = is_trap_deterministic(&trap); - let e = Error::from(trap).context(context); + let e = trap.context(context); match trap_is_deterministic { true => HostExportError::Deterministic(e), false => HostExportError::Unknown(e), diff --git a/runtime/wasm/src/to_from/mod.rs b/runtime/wasm/src/to_from/mod.rs index 4edb688caf8..d3dc07c9afe 100644 --- a/runtime/wasm/src/to_from/mod.rs +++ b/runtime/wasm/src/to_from/mod.rs @@ -1,3 +1,5 @@ +//! Implementations of `ToAscObj` and `FromAscObj` for Rust types. +//! Standard Rust types go in `mod.rs` and external types in `external.rs`. use anyhow::anyhow; use async_trait::async_trait; use std::collections::HashMap; @@ -14,8 +16,6 @@ use graph::{ use crate::asc_abi::class::*; -///! Implementations of `ToAscObj` and `FromAscObj` for Rust types. -///! Standard Rust types go in `mod.rs` and external types in `external.rs`. mod external; #[async_trait] @@ -223,6 +223,6 @@ where depth: usize, ) -> Result { let entries: Vec<(T, U)> = asc_get(heap, asc_map.entries, gas, depth)?; - Ok(HashMap::from_iter(entries.into_iter())) + Ok(HashMap::from_iter(entries)) } } diff --git a/server/graphman/src/resolvers/deployment_mutation.rs b/server/graphman/src/resolvers/deployment_mutation.rs index aa716c286d0..c4a9b483fa9 100644 --- a/server/graphman/src/resolvers/deployment_mutation.rs +++ b/server/graphman/src/resolvers/deployment_mutation.rs @@ -118,7 +118,7 @@ impl DeploymentMutation { ) -> Result { let ctx = GraphmanContext::new(ctx)?; let deployment = deployment.try_into()?; - let node = NodeId::new(node.clone()).map_err(|()| anyhow!("illegal node id `{}`", node))?; + let node = NodeId::new(node).map_err(|node| anyhow!("illegal node id `{}`", node))?; let reassign_result = reassign::run(&ctx, &deployment, &node).await?; match reassign_result { ReassignResult::CompletedWithWarnings(warnings) => Ok( diff --git a/server/graphman/src/resolvers/deployment_mutation/reassign.rs b/server/graphman/src/resolvers/deployment_mutation/reassign.rs index 8a1d3459479..00d32bc0a7b 100644 --- a/server/graphman/src/resolvers/deployment_mutation/reassign.rs +++ b/server/graphman/src/resolvers/deployment_mutation/reassign.rs @@ -20,7 +20,7 @@ pub async fn run( ctx.primary_pool.clone(), ctx.notification_sender.clone(), &deployment, - &node, + node, curr_node, ) .await?; diff --git a/server/graphman/tests/deployment_mutation.rs b/server/graphman/tests/deployment_mutation.rs index b9da4672c90..fd2020ee740 100644 --- a/server/graphman/tests/deployment_mutation.rs +++ b/server/graphman/tests/deployment_mutation.rs @@ -464,7 +464,7 @@ fn graphql_can_unassign_deployments() { let is_node_null = subgraph_node_id["data"]["deployment"]["info"][0]["nodeId"].is_null(); assert_eq!(unassign_req, expected_resp); - assert_eq!(is_node_null, true); + assert!(is_node_null); }); } diff --git a/server/http/src/request.rs b/server/http/src/request.rs index c13d46af440..4b7fdacdf59 100644 --- a/server/http/src/request.rs +++ b/server/http/src/request.rs @@ -154,19 +154,16 @@ mod tests { let query = request.expect("Should accept valid queries"); let expected_query = q::parse_query("{ user { name } }").unwrap().into_static(); - let expected_variables = QueryVariables::new(HashMap::from_iter( - vec![ - (String::from("string"), r::Value::String(String::from("s"))), - ( - String::from("map"), - r::Value::Object(Object::from_iter( - vec![(Word::from("k"), r::Value::String(String::from("v")))].into_iter(), - )), - ), - (String::from("int"), r::Value::Int(5)), - ] - .into_iter(), - )); + let expected_variables = QueryVariables::new(HashMap::from_iter(vec![ + (String::from("string"), r::Value::String(String::from("s"))), + ( + String::from("map"), + r::Value::Object(Object::from_iter( + vec![(Word::from("k"), r::Value::String(String::from("v")))].into_iter(), + )), + ), + (String::from("int"), r::Value::Int(5)), + ])); assert_eq!(query.document, expected_query); assert_eq!(query.variables, Some(expected_variables)); diff --git a/server/http/src/service.rs b/server/http/src/service.rs index 875a7d6b0eb..6eee41ce59a 100644 --- a/server/http/src/service.rs +++ b/server/http/src/service.rs @@ -130,9 +130,8 @@ where request: Request, ) -> ServerResult { let version = self.resolve_api_version(&request)?; - let subgraph_name = SubgraphName::new(subgraph_name.as_str()).map_err(|()| { - ServerError::ClientError(format!("Invalid subgraph name {:?}", subgraph_name)) - })?; + let subgraph_name = SubgraphName::new(subgraph_name.as_str()) + .map_err(|name| ServerError::ClientError(format!("Invalid subgraph name `{name}`")))?; self.handle_graphql_query(QueryTarget::Name(subgraph_name, version), request) .await @@ -164,7 +163,7 @@ where .get("X-GraphTraceQuery") .map(|v| { v.to_str() - .map(|s| s == &ENV_VARS.graphql.query_trace_token) + .map(|s| s == ENV_VARS.graphql.query_trace_token) .unwrap_or(false) }) .unwrap_or(false) @@ -342,7 +341,7 @@ where segments .iter() .filter(|&&segment| !segment.is_empty()) - .map(|&segment| segment) + .copied() .collect::>() .join("/") } @@ -355,7 +354,7 @@ where .find(|(key, _)| key == "query") .map(|(_, value)| value.into_owned()) }) - .unwrap_or_else(|| String::new()) + .unwrap_or_default() .trim() .to_lowercase() .starts_with("mutation"); @@ -525,7 +524,7 @@ mod tests { .body(Full::from("{}")) .unwrap(); - let response = service.call(request.into()).await; + let response = service.call(request).await; let content_type_header = response.status(); assert_eq!(content_type_header, StatusCode::OK); diff --git a/server/http/tests/response.rs b/server/http/tests/response.rs index 63e94509aca..cd5041260cf 100644 --- a/server/http/tests/response.rs +++ b/server/http/tests/response.rs @@ -71,7 +71,12 @@ fn canonical_serialization() { ); // Value::Float - assert_resp!(r#"{"data":{"float":3.14159}}"#, object! { float: 3.14159 }); + #[allow(clippy::approx_constant)] + let almost_pi = 3.14159_f64; + assert_resp!( + r#"{"data":{"float":3.14159}}"#, + object! { float: almost_pi } + ); // Value::String assert_resp!( diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index 76dd25414d0..08ff1b63ac8 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -1,5 +1,4 @@ use std::collections::BTreeMap; -use std::convert::TryInto; use async_trait::async_trait; use graph::data::query::Trace; @@ -147,7 +146,7 @@ where .collect(), _ => unreachable!(), }) - .unwrap_or_else(Vec::new); + .unwrap_or_default(); let infos = self .store @@ -360,9 +359,7 @@ where let block_number: i32 = field .get_required::("blockNumber") - .expect("Valid blockNumber required") - .try_into() - .unwrap(); + .expect("Valid blockNumber required"); let block_hash = field .get_required::("blockHash") @@ -756,7 +753,7 @@ fn entity_changes_to_graphql(entity_changes: Vec) -> r::Value { r::Value::object( e.sorted() .into_iter() - .map(|(name, value)| (name.into(), value.into())) + .map(|(name, value)| (name, value.into())) .collect(), ) }) diff --git a/server/index-node/src/service.rs b/server/index-node/src/service.rs index 22f29f37731..09ddfd29038 100644 --- a/server/index-node/src/service.rs +++ b/server/index-node/src/service.rs @@ -150,8 +150,8 @@ where let options = QueryExecutionOptions { resolver, deadline: None, - max_first: std::u32::MAX, - max_skip: std::u32::MAX, + max_first: u32::MAX, + max_skip: u32::MAX, trace: false, }; let (result, _) = execute_query(query_clone.cheap_clone(), None, None, options).await; @@ -432,19 +432,16 @@ mod tests { let query = request.expect("Should accept valid queries"); let expected_query = q::parse_query("{ user { name } }").unwrap().into_static(); - let expected_variables = QueryVariables::new(HashMap::from_iter( - vec![ - (String::from("string"), r::Value::String(String::from("s"))), - ( - String::from("map"), - r::Value::Object(Object::from_iter( - vec![(Word::from("k"), r::Value::String(String::from("v")))].into_iter(), - )), - ), - (String::from("int"), r::Value::Int(5)), - ] - .into_iter(), - )); + let expected_variables = QueryVariables::new(HashMap::from_iter(vec![ + (String::from("string"), r::Value::String(String::from("s"))), + ( + String::from("map"), + r::Value::Object(Object::from_iter( + vec![(Word::from("k"), r::Value::String(String::from("v")))].into_iter(), + )), + ), + (String::from("int"), r::Value::Int(5)), + ])); assert_eq!(query.document, expected_query); assert_eq!(query.variables, Some(expected_variables)); diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 2e577af94c3..a6b98d71cdc 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -42,3 +42,6 @@ serde_yaml.workspace = true [dev-dependencies] clap.workspace = true graphql-parser = "0.4.1" + +[lints] +workspace = true diff --git a/store/postgres/src/advisory_lock.rs b/store/postgres/src/advisory_lock.rs index e012f08e82a..6b0fc671ffa 100644 --- a/store/postgres/src/advisory_lock.rs +++ b/store/postgres/src/advisory_lock.rs @@ -9,10 +9,8 @@ //! * 1: to synchronize on migratons //! //! We use the following 2x 32-bit locks -//! * 1, n: to lock copying of the deployment with id n in the destination -//! shard -//! * 2, n: to lock the deployment with id n to make sure only one write -//! happens to it +//! * 1, n: to lock copying of the deployment with id n in the destination shard +//! * 2, n: to lock the deployment with id n to make sure only one write happens to it use diesel::sql_query; use diesel::sql_types::Bool; diff --git a/store/postgres/src/block_range.rs b/store/postgres/src/block_range.rs index d6044c644ad..4383ab80d7e 100644 --- a/store/postgres/src/block_range.rs +++ b/store/postgres/src/block_range.rs @@ -1,8 +1,8 @@ +//! Utilities to deal with block numbers and block ranges use derive_more::Constructor; use diesel::pg::Pg; use diesel::query_builder::{AstPass, QueryFragment}; use diesel::result::QueryResult; -///! Utilities to deal with block numbers and block ranges use diesel::serialize::{Output, ToSql}; use diesel::sql_types::{Integer, Range}; use graph::env::ENV_VARS; @@ -31,6 +31,7 @@ pub(crate) const BLOCK_RANGE_CURRENT: &str = "block_range @> 2147483647"; /// - any CRUD operation modifies such an entity in place /// - queries by a block number consider such an entity as present for /// any block number +/// /// We therefore mark such entities with a block range `[-1,\infinity)`; we /// use `-1` as the lower bound to make it easier to identify such entities /// for troubleshooting/debugging diff --git a/store/postgres/src/block_store.rs b/store/postgres/src/block_store.rs index c07e9bdc732..674c274ac5c 100644 --- a/store/postgres/src/block_store.rs +++ b/store/postgres/src/block_store.rs @@ -303,19 +303,16 @@ impl BlockStore { // For each configured chain, add a chain store for (chain_name, shard) in chains { - match existing_chains + if let Some(chain) = existing_chains .iter() .find(|chain| chain.name == chain_name) { - Some(chain) => { - let status = if chain_ingestible(&block_store.logger, chain, &shard) { - ChainStatus::Ingestible - } else { - ChainStatus::ReadOnly - }; - block_store.add_chain_store(chain, status, false).await?; - } - None => {} + let status = if chain_ingestible(&block_store.logger, chain, &shard) { + ChainStatus::Ingestible + } else { + ChainStatus::ReadOnly + }; + block_store.add_chain_store(chain, status, false).await?; }; } @@ -345,7 +342,7 @@ impl BlockStore { pub async fn allocate_chain( conn: &mut AsyncPgConnection, - name: &String, + name: &str, shard: &Shard, ident: &ChainIdentifier, ) -> Result { @@ -369,7 +366,7 @@ impl BlockStore { let chain = Chain { id: next_val as i32, - name: name.clone(), + name: name.to_string(), shard: shard.clone(), net_version: ident.net_version.clone(), genesis_block: ident.genesis_block_hash.hash_hex(), @@ -544,11 +541,11 @@ impl BlockStore { eth_rpc_only_nets: Vec, ) -> Result<(), StoreError> { for store in self.stores() { - if !eth_rpc_only_nets.contains(&&store.chain) { + if !eth_rpc_only_nets.contains(&store.chain) { continue; }; - if let Some(head_block) = store.remove_cursor(&&store.chain).await? { + if let Some(head_block) = store.remove_cursor(&store.chain).await? { let lower_bound = head_block.saturating_sub(ENV_VARS.reorg_threshold() * 2); info!(&self.logger, "Removed cursor for non-firehose chain, now cleaning shallow blocks"; "network" => &store.chain, "lower_bound" => lower_bound); store.cleanup_shallow_blocks(lower_bound).await?; @@ -601,11 +598,8 @@ impl BlockStore { network: &str, ident: ChainIdentifier, ) -> anyhow::Result> { - match self.store(network).await { - Some(chain_store) => { - return Ok(chain_store); - } - None => {} + if let Some(chain_store) = self.store(network).await { + return Ok(chain_store); } let mut conn = self.mirror.primary().get().await?; @@ -620,7 +614,7 @@ impl BlockStore { } }) .ok_or_else(|| anyhow!("unable to find shard for network {}", network))?; - let chain = primary::add_chain(&mut conn, &network, &shard, ident).await?; + let chain = primary::add_chain(&mut conn, network, shard, ident).await?; self.add_chain_store(&chain, ChainStatus::Ingestible, true) .await .map_err(anyhow::Error::from) @@ -643,7 +637,7 @@ impl ChainIdStore for BlockStore { chain_name: &ChainName, ) -> Result { let chain_store = self - .chain_store(&chain_name) + .chain_store(chain_name) .await .ok_or_else(|| anyhow!("unable to get store for chain '{chain_name}'"))?; @@ -659,7 +653,7 @@ impl ChainIdStore for BlockStore { // Update the block shard first since that contains a copy from the primary let chain_store = self - .chain_store(&chain_name) + .chain_store(chain_name) .await .ok_or_else(|| anyhow!("unable to get store for chain '{chain_name}'"))?; diff --git a/store/postgres/src/catalog.rs b/store/postgres/src/catalog.rs index 0f7dc065733..455fb6fe29b 100644 --- a/store/postgres/src/catalog.rs +++ b/store/postgres/src/catalog.rs @@ -825,7 +825,7 @@ pub async fn create_cross_shard_view( ) -> Result { let mut query = String::new(); write!(query, "create view \"{}\".\"{}\" as ", dst_nsp, table_name)?; - for (idx, (name, nsp)) in shard_nsps.into_iter().enumerate() { + for (idx, (name, nsp)) in shard_nsps.iter().enumerate() { if idx > 0 { write!(query, " union all ")?; } diff --git a/store/postgres/src/chain_head_listener.rs b/store/postgres/src/chain_head_listener.rs index b9faa164f0b..035e10f377a 100644 --- a/store/postgres/src/chain_head_listener.rs +++ b/store/postgres/src/chain_head_listener.rs @@ -168,8 +168,7 @@ impl ChainHeadUpdateListener { if let Some(watcher) = watchers .try_read() .as_ref() - .map(|w| w.get(&update.network_name)) - .flatten() + .and_then(|w| w.get(&update.network_name)) { watcher.send(); } diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index eadde677f96..7469cfec1ba 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -324,6 +324,7 @@ mod data { #[derive(Clone, Debug, AsExpression, FromSqlRow)] #[diesel(sql_type = Text)] + #[allow(clippy::large_enum_variant)] /// Storage for a chain. The underlying namespace (database schema) is either /// `public` or of the form `chain[0-9]+`. pub enum Storage { @@ -500,29 +501,22 @@ mod data { chain: &str, ) -> Result, StoreError> { use diesel::dsl::not; - use public::ethereum_networks::dsl::*; + use public::ethereum_networks as n; - match update( - ethereum_networks - .filter(name.eq(chain)) - .filter(not(head_block_cursor.is_null())), + let head_block_number = update( + n::table + .filter(n::name.eq(chain)) + .filter(not(n::head_block_cursor.is_null())), ) - .set(head_block_cursor.eq(None as Option)) - .returning(head_block_number) + .set(n::head_block_cursor.eq(None as Option)) + .returning(n::head_block_number) .get_result::>(conn) .await - .optional() - { - Ok(res) => match res { - Some(opt_num) => match opt_num { - Some(num) => Ok(Some(num as i32)), - None => Ok(None), - }, - None => Ok(None), - }, - Err(e) => Err(e), - } - .map_err(Into::into) + .optional()? + .flatten() + .map(|num| num as i32); + + Ok(head_block_number) } /// Insert a block. If the table already contains a block with the @@ -731,7 +725,6 @@ mod data { .into_iter() .map(|h| h.parse()) .collect::, _>>() - .map_err(Error::from) } Storage::Private(Schema { blocks, .. }) => Ok(blocks .table() @@ -835,7 +828,7 @@ mod data { Ok(Some(( number, crate::chain_store::try_parse_timestamp(ts)?, - parent_hash.map(|h| BlockHash::from(h)), + parent_hash.map(BlockHash::from), ))) } } @@ -2505,7 +2498,7 @@ impl ChainStoreTrait for ChainStore { match res { Ok(blocks) => { - for (_, blocks_for_num) in &blocks { + for blocks_for_num in blocks.values() { if blocks.len() == 1 { self.recent_blocks_cache .insert_block(blocks_for_num[0].clone()); @@ -2528,9 +2521,7 @@ impl ChainStoreTrait for ChainStore { let mut result = cached_map; for (num, blocks) in stored { - if !result.contains_key(&num) { - result.insert(num, blocks); - } + result.entry(num).or_insert(blocks); } result @@ -2564,7 +2555,7 @@ impl ChainStoreTrait for ChainStore { let stored = if cached.len() < hashes.len() { let hashes = hashes .iter() - .filter(|hash| cached.iter().find(|(ptr, _)| &ptr.hash == *hash).is_none()) + .filter(|hash| !cached.iter().any(|(ptr, _)| &ptr.hash == *hash)) .cloned() .collect::>(); // We key this off the entire list of hashes, which means @@ -2600,7 +2591,8 @@ impl ChainStoreTrait for ChainStore { // since it depends on a lot of the details of how the // `HerdCache` is implemented let res = Arc::try_unwrap(res).unwrap_or_else(|arc| (*arc).clone()); - let stored = match res { + + match res { Ok(blocks) => { for block in &blocks { self.recent_blocks_cache.insert_block(block.clone()); @@ -2610,8 +2602,7 @@ impl ChainStoreTrait for ChainStore { Err(e) => { return Err(e.into()); } - }; - stored + } } else { Vec::new() }; @@ -3140,7 +3131,7 @@ impl EthereumCallCache for ChainStore { } let ids: Vec<_> = reqs - .into_iter() + .iter() .map(|req| contract_call_id(req, &block)) .collect(); let id_refs: Vec<_> = ids.iter().map(|id| id.as_slice()).collect(); @@ -3170,9 +3161,9 @@ impl EthereumCallCache for ChainStore { resps.push(resp); } let calls = reqs - .into_iter() + .iter() .enumerate() - .filter(|(idx, _)| !found.contains(&idx)) + .filter(|(idx, _)| !found.contains(idx)) .map(|(_, call)| call.cheap_clone()) .collect(); Ok((resps, calls)) diff --git a/store/postgres/src/copy.rs b/store/postgres/src/copy.rs index 80830b3e61b..54c1a03a896 100644 --- a/store/postgres/src/copy.rs +++ b/store/postgres/src/copy.rs @@ -31,7 +31,6 @@ use diesel_async::{ AsyncConnection, }; use diesel_async::{RunQueryDsl, SimpleAsyncConnection}; -use tokio; use graph::{ futures03::{ @@ -214,7 +213,7 @@ impl CopyState { let mut unfinished = Vec::new(); for dst_table in dst.tables.values() { - if let Some(src_table) = src.table_for_entity(&dst_table.object).ok() { + if let Ok(src_table) = src.table_for_entity(&dst_table.object) { unfinished.push( TableState::init( conn, @@ -386,7 +385,7 @@ impl TableState { e ) }) - .map(|table| table.clone()) + .cloned() } let mut states = Vec::new(); @@ -670,7 +669,7 @@ impl CopyProgress { } enum WorkerResult { - Ok(CopyTableWorker), + Ok(Box), Err(StoreError), Wake, } @@ -678,7 +677,7 @@ enum WorkerResult { impl From> for WorkerResult { fn from(result: Result) -> Self { match result { - Ok(worker) => WorkerResult::Ok(worker), + Ok(worker) => WorkerResult::Ok(Box::new(worker)), Err(e) => WorkerResult::Err(e), } } @@ -1061,9 +1060,7 @@ impl Connection { state: &mut CopyState, progress: &Arc, ) -> Option { - let Some(conn) = self.conn.take() else { - return None; - }; + let conn = self.conn.take()?; let Some(table) = state.unfinished.pop() else { self.conn = Some(conn); return None; @@ -1085,16 +1082,11 @@ impl Connection { ) -> Option { // It's important that we get the connection before the table since // we remove the table from the state and could drop it otherwise - let Some(conn) = self + let conn = self .pool .try_get_fdw(&self.logger, ENV_VARS.store.batch_worker_wait) - .await - else { - return None; - }; - let Some(table) = state.unfinished.pop() else { - return None; - }; + .await?; + let table = state.unfinished.pop()?; let conn = LockTrackingConnection::new(conn); let worker = CopyTableWorker::new(conn, table); diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 07ed4d13b6f..239ccdf61b3 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -110,7 +110,7 @@ impl OnSync { } } - pub fn to_str(&self) -> &str { + pub fn to_str(&self) -> &'static str { match self { OnSync::None => "none", OnSync::Activate => "activate", @@ -118,7 +118,7 @@ impl OnSync { } } - fn to_sql(&self) -> Option<&str> { + fn to_sql(self) -> Option<&'static str> { match self { OnSync::None => None, OnSync::Activate | OnSync::Replace => Some(self.to_str()), @@ -481,7 +481,7 @@ pub async fn transact_block( // Performance note: This costs us an extra DB query on every update. We used to put this in the // `where` clause of the `update` statement, but that caused Postgres to use bitmap scans instead // of a simple primary key lookup. So a separate query it is. - let block_ptr = block_ptr(conn, &site).await?; + let block_ptr = block_ptr(conn, site).await?; if let Some(block_ptr_from) = block_ptr { if block_ptr_from.number >= ptr.number { return Err(StoreError::DuplicateBlockProcessing( @@ -561,7 +561,7 @@ pub async fn forward_block_ptr( // No matching rows were found. This is an error. By the filter conditions, this can only be // due to a missing deployment (which `block_ptr` catches) or duplicate block processing. - 0 => match block_ptr(conn, &site).await? { + 0 => match block_ptr(conn, site).await? { Some(block_ptr_from) if block_ptr_from.number >= ptr.number => Err( StoreError::DuplicateBlockProcessing(site.deployment.clone(), ptr.number), ), @@ -1113,7 +1113,7 @@ pub(crate) async fn revert_subgraph_errors( // The result will be the same at `reverted_block` or `reverted_block - 1` since the errors at // `reverted_block` were just deleted, but semantically we care about `reverted_block - 1` which // is the block being reverted to. - check_health(&logger, conn, id, reverted_block - 1).await?; + check_health(logger, conn, id, reverted_block - 1).await?; // If the deployment is failed in both `failed` and `status` columns, // update both values respectively to `false` and `healthy`. Basically diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index f8abf4d93e4..b8c34b64e81 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -727,7 +727,6 @@ impl DeploymentStore { sql_query(drop_index_sql).execute(&mut conn).await?; Err(StoreError::Canceled) } - .map_err(Into::into) } /// Returns a list of all existing indexes for the specified Entity table. @@ -745,8 +744,7 @@ impl DeploymentStore { let table_name = &table.name; let indexes = catalog::indexes_for_table(&mut conn, schema_name.as_str(), table_name.as_str()) - .await - .map_err(StoreError::from)?; + .await?; Ok(indexes.into_iter().map(CreateIndex::parse).collect()) } @@ -858,13 +856,13 @@ impl DeploymentStore { ) -> Result, StoreError> { async fn do_prune( store: Arc, - mut conn: &mut AsyncPgConnection, + conn: &mut AsyncPgConnection, site: Arc, req: PruneRequest, mut reporter: Box, ) -> Result, StoreError> { - let layout = store.layout(&mut conn, site.clone()).await?; - let state = deployment::state(&mut conn, &site).await?; + let layout = store.layout(conn, site.clone()).await?; + let state = deployment::state(conn, &site).await?; if state.latest_block.number <= req.history_blocks { // We haven't accumulated enough history yet, nothing to prune @@ -884,7 +882,7 @@ impl DeploymentStore { .await?; layout - .prune(&store.logger, reporter.as_mut(), &mut conn, &req) + .prune(&store.logger, reporter.as_mut(), conn, &req) .await?; Ok(reporter) } @@ -1083,7 +1081,7 @@ impl DeploymentStore { site: Arc, derived_query: &DerivedEntityQuery, block: BlockNumber, - excluded_keys: &Vec, + excluded_keys: &[EntityKey], ) -> Result, StoreError> { let mut conn = self.pool.get_permitted().await?; let layout = self.layout(&mut conn, site).await?; @@ -1293,7 +1291,7 @@ impl DeploymentStore { .await } - if !prune_in_progress(&self, &site)? { + if !prune_in_progress(self, &site)? { let req = PruneRequest::new( &site.as_ref().into(), history_blocks, @@ -1303,7 +1301,7 @@ impl DeploymentStore { )?; let deployment_id = site.id; - let logger = Logger::new(&logger, o!("component" => "Prune")); + let logger = Logger::new(logger, o!("component" => "Prune")); let handle = graph::spawn(run(logger, self.clone(), site, req)); self.prune_handles .lock() @@ -1733,7 +1731,7 @@ impl DeploymentStore { // We reset the firehose cursor. That way, on resume, Firehose will start from // the block_ptr instead (with sanity checks to ensure it's resuming at the // correct block). - let _ = self.revert_block_operations(site.clone(), parent_ptr.clone(), &FirehoseCursor::None).await?; + self.revert_block_operations(site.clone(), parent_ptr.clone(), &FirehoseCursor::None).await?; // Unfail the deployment. deployment::update_deployment_status(conn, deployment_id, prev_health, None,None).await?; @@ -1946,7 +1944,7 @@ pub fn generate_index_creation_sql( after: Option, ) -> Result<(String, String), StoreError> { let schema_name = layout.site.namespace.clone(); - let table = resolve_table_name(&layout, &entity_name)?; + let table = resolve_table_name(&layout, entity_name)?; let (column_names, index_exprs) = resolve_column_names_and_index_exprs(table, &field_names)?; let column_names_sep_by_underscores = column_names.join("_"); diff --git a/store/postgres/src/detail.rs b/store/postgres/src/detail.rs index 7dcac2f3bd8..74a6d546a4a 100644 --- a/store/postgres/src/detail.rs +++ b/store/postgres/src/detail.rs @@ -133,7 +133,7 @@ impl From<(Deployment, Head)> for DeploymentDetail { synced_at, synced_at_block_number, block_hash: block_hash.clone(), - block_number: block_number.clone(), + block_number, entity_count: entity_count as usize, } } @@ -546,7 +546,7 @@ struct StoredSubgraphManifest { } impl StoredSubgraphManifest { - fn as_manifest(self, schema: &InputSchema) -> SubgraphManifestEntity { + fn into_manifest_entity(self, schema: &InputSchema) -> SubgraphManifestEntity { let e: Vec<_> = self .entities_with_causality_region .into_iter() @@ -568,7 +568,7 @@ impl StoredSubgraphManifest { struct StoredDeploymentEntity(crate::detail::DeploymentDetail, StoredSubgraphManifest); impl StoredDeploymentEntity { - fn as_subgraph_deployment( + fn into_subgraph_deployment_entity( self, schema: &InputSchema, ) -> Result { @@ -578,7 +578,7 @@ impl StoredDeploymentEntity { &detail.subgraph, "start_block", manifest.start_block_hash.clone(), - manifest.start_block_number.map(|n| n.into()), + manifest.start_block_number, )? .map(|block| block.to_ptr()); @@ -611,7 +611,7 @@ impl StoredDeploymentEntity { .map_err(|b| internal_error!("invalid debug fork `{}`", b))?; Ok(SubgraphDeploymentEntity { - manifest: manifest.as_manifest(schema), + manifest: manifest.into_manifest_entity(schema), failed: detail.failed, health: detail.health.into(), synced_at: detail.synced_at, @@ -653,7 +653,7 @@ pub async fn deployment_entity( .await .map(DeploymentDetail::from)?; - StoredDeploymentEntity(detail, manifest).as_subgraph_deployment(schema) + StoredDeploymentEntity(detail, manifest).into_subgraph_deployment_entity(schema) } #[derive(Queryable, Identifiable, Insertable)] diff --git a/store/postgres/src/dynds/private.rs b/store/postgres/src/dynds/private.rs index 874db77e788..793a3ffe9fc 100644 --- a/store/postgres/src/dynds/private.rs +++ b/store/postgres/src/dynds/private.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, i32, ops::Bound}; +use std::{collections::HashMap, ops::Bound}; use diesel::{ pg::{sql_types, Pg}, diff --git a/store/postgres/src/dynds/shared.rs b/store/postgres/src/dynds/shared.rs index 8835f449c35..3562e4584f2 100644 --- a/store/postgres/src/dynds/shared.rs +++ b/store/postgres/src/dynds/shared.rs @@ -117,7 +117,7 @@ pub(super) async fn insert( let dds: Vec<_> = data_sources .entries .iter() - .map(|(block_ptr, dds)| { + .flat_map(|(block_ptr, dds)| { dds.iter().map(|ds| { let StoredDynamicDataSource { manifest_idx: _, @@ -160,7 +160,6 @@ pub(super) async fn insert( )) }) }) - .flatten() .collect::>()?; insert_into(decds::table) diff --git a/store/postgres/src/fork.rs b/store/postgres/src/fork.rs index 6c9c340342c..c8602e0a519 100644 --- a/store/postgres/src/fork.rs +++ b/store/postgres/src/fork.rs @@ -209,7 +209,7 @@ query Query ($id: String) {{ e )) })?; - map.insert(Word::from(f.name.clone()), value); + map.insert(f.name.clone(), value); } map }; diff --git a/store/postgres/src/notification_listener.rs b/store/postgres/src/notification_listener.rs index 4f3864fc4f8..7f0e5eb51ba 100644 --- a/store/postgres/src/notification_listener.rs +++ b/store/postgres/src/notification_listener.rs @@ -347,8 +347,7 @@ impl JsonNotification { anyhow!("Invalid notification ID, not compatible with i64: {}", n) })?; - if payload_id < (i32::min_value() as i64) || payload_id > (i32::max_value() as i64) - { + if payload_id < (i32::MIN as i64) || payload_id > (i32::MAX as i64) { Err(anyhow!( "Invalid notification ID, value exceeds i32: {}", payload_id @@ -368,10 +367,10 @@ impl JsonNotification { ) })?; - if payload_rows.is_empty() || payload_rows.get(0).is_none() { + if payload_rows.is_empty() || payload_rows.is_empty() { return Err(anyhow!("No payload found for notification {}", payload_id))?; } - let payload: String = payload_rows.get(0).unwrap().get(0); + let payload: String = payload_rows.first().unwrap().get(0); Ok(JsonNotification { payload: serde_json::from_str(&payload)?, diff --git a/store/postgres/src/pool/coordinator.rs b/store/postgres/src/pool/coordinator.rs index c16ba4d4b7a..fb0b05a1ac0 100644 --- a/store/postgres/src/pool/coordinator.rs +++ b/store/postgres/src/pool/coordinator.rs @@ -96,7 +96,7 @@ impl PoolCoordinator { if count.had_migrations() { let server = self.server(&pool.shard)?; for pool in self.pools() { - let remap_res = pool.remap(&server).await; + let remap_res = pool.remap(server).await; if let Err(e) = remap_res { error!(pool.logger, "Failed to map imports from {}", server.shard; "error" => e.to_string()); return Err(e); @@ -130,7 +130,7 @@ impl PoolCoordinator { fn primary(&self) -> Result, StoreError> { let map = self.pools.lock().unwrap(); - let pool_state = map.get(&*&PRIMARY_SHARD).ok_or_else(|| { + let pool_state = map.get(&PRIMARY_SHARD).ok_or_else(|| { internal_error!("internal error: primary shard not found in pool coordinator") })?; @@ -297,7 +297,7 @@ impl PoolCoordinator { let migrated = migrate(&states, self.servers.as_ref()).await?; - let propagated = propagate(&self, migrated).await?; + let propagated = propagate(self, migrated).await?; primary.create_cross_shard_views(&self.servers).await?; diff --git a/store/postgres/src/pool/foreign_server.rs b/store/postgres/src/pool/foreign_server.rs index 9f9f9f60791..78cd83116db 100644 --- a/store/postgres/src/pool/foreign_server.rs +++ b/store/postgres/src/pool/foreign_server.rs @@ -49,7 +49,7 @@ impl ForeignServer { if shard == current { "subgraphs".to_string() } else { - Self::metadata_schema(&shard) + Self::metadata_schema(shard) } } @@ -67,7 +67,7 @@ impl ForeignServer { ), }; - let host = match config.get_hosts().get(0) { + let host = match config.get_hosts().first() { Some(Host::Tcp(host)) => host.to_string(), _ => bail!("can not find host name in `{}`", SafeDisplay(postgres_url)), }; @@ -226,7 +226,7 @@ impl ForeignServer { existing != needed } - if &self.shard == &*PRIMARY_SHARD { + if self.shard == *PRIMARY_SHARD { let existing = catalog::foreign_tables(conn, PRIMARY_PUBLIC).await?; let needed = PRIMARY_TABLES .into_iter() diff --git a/store/postgres/src/pool/manager.rs b/store/postgres/src/pool/manager.rs index fdca61d2ca6..4677ea6276b 100644 --- a/store/postgres/src/pool/manager.rs +++ b/store/postgres/src/pool/manager.rs @@ -274,7 +274,9 @@ pub(crate) fn spawn_connection_reaper( if last_used.elapsed() > CHECK_INTERVAL { // Reset wait time if there was no activity recently so that // we don't report stale wait times - wait_gauge.as_ref().map(|wait_gauge| wait_gauge.set(0.0)); + if let Some(wait_gauge) = wait_gauge.as_ref() { + wait_gauge.set(0.0) + } } tokio::time::sleep(CHECK_INTERVAL).await; } diff --git a/store/postgres/src/pool/mod.rs b/store/postgres/src/pool/mod.rs index cd44b32463e..20d332616a2 100644 --- a/store/postgres/src/pool/mod.rs +++ b/store/postgres/src/pool/mod.rs @@ -67,14 +67,14 @@ impl DerefMut for PermittedConnection { /// The namespace under which the `PRIMARY_TABLES` are mapped into each /// shard -pub(crate) const PRIMARY_PUBLIC: &'static str = "primary_public"; +pub(crate) const PRIMARY_PUBLIC: &str = "primary_public"; /// Tables that we map from the primary into `primary_public` in each shard const PRIMARY_TABLES: [&str; 3] = ["deployment_schemas", "chains", "active_copies"]; /// The namespace under which we create views in the primary that union all /// the `SHARDED_TABLES` -pub(crate) const CROSS_SHARD_NSP: &'static str = "sharded"; +pub(crate) const CROSS_SHARD_NSP: &str = "sharded"; /// Tables that we map from each shard into each other shard into the /// `shard__subgraphs` namespace @@ -206,7 +206,7 @@ impl PoolState { // we didn't have an error, it means the database is not available if self.needs_setup() { error!(self.logger, "Database is not available, setup did not work"); - return Err(StoreError::DatabaseUnavailable); + Err(StoreError::DatabaseUnavailable) } else { Ok(pool) } @@ -596,17 +596,17 @@ impl PoolInner { match res { Ok(conn) => { self.state_tracker.mark_available(); - return Ok(conn); + Ok(conn) } Err(PoolError::Closed) | Err(PoolError::Backend(_)) => { self.state_tracker.mark_unavailable(Duration::from_nanos(0)); - return Err(StoreError::DatabaseUnavailable); + Err(StoreError::DatabaseUnavailable) } Err(PoolError::Timeout(_)) => { if !self.state_tracker.timeout_is_ignored() { self.state_tracker.mark_unavailable(elapsed); } - return Err(StoreError::StatementTimeout); + Err(StoreError::StatementTimeout) } Err(PoolError::NoRuntimeSpecified) | Err(PoolError::PostCreateHook(_)) => { let e = res.err().unwrap(); @@ -650,7 +650,7 @@ impl PoolInner { { let pool = self.fdw_pool(logger)?; loop { - match self.get_from_pool(&pool, None, Duration::ZERO).await { + match self.get_from_pool(pool, None, Duration::ZERO).await { Ok(conn) => return Ok(conn), Err(e) => { if timeout() { @@ -701,20 +701,18 @@ impl PoolInner { async fn locale_check(&self, logger: &Logger) -> Result<(), StoreError> { let mut conn = self.get().await?; - Ok( - if let Err(msg) = catalog::Locale::load(&mut conn).await?.suitable() { - if &self.shard == &*PRIMARY_SHARD && primary::is_empty(&mut conn).await? { - const MSG: &str = - "Database does not use C locale. \ - Please check the graph-node documentation for how to set up the database locale"; - - crit!(logger, "{}: {}", MSG, msg); - panic!("{}: {}", MSG, msg); - } else { - warn!(logger, "{}.\nPlease check the graph-node documentation for how to set up the database locale", msg); - } - }, - ) + let _: () = if let Err(msg) = catalog::Locale::load(&mut conn).await?.suitable() { + if self.shard == *PRIMARY_SHARD && primary::is_empty(&mut conn).await? { + const MSG: &str = "Database does not use C locale. \ + Please check the graph-node documentation for how to set up the database locale"; + + crit!(logger, "{}: {}", MSG, msg); + panic!("{}: {}", MSG, msg); + } else { + warn!(logger, "{}.\nPlease check the graph-node documentation for how to set up the database locale", msg); + } + }; + Ok(()) } pub(crate) async fn query_permit(&self) -> OwnedSemaphorePermit { @@ -834,7 +832,7 @@ impl PoolInner { servers: &'a [ForeignServer], ) -> Vec<(&'a str, String)> { servers - .into_iter() + .iter() .map(|server| { let nsp = if &server.shard == current { local_nsp.to_string() @@ -900,13 +898,13 @@ impl PoolInner { /// need to remap anything that we are importing via fdw to make sure we /// are using this updated schema pub async fn remap(&self, server: &ForeignServer) -> Result<(), StoreError> { - if &server.shard == &*PRIMARY_SHARD { + if server.shard == *PRIMARY_SHARD { info!(&self.logger, "Mapping primary"); let mut conn = self.get().await?; conn.transaction(|conn| ForeignServer::map_primary(conn, &self.shard).scope_boxed()) .await?; } - if &server.shard != &self.shard { + if server.shard != self.shard { info!( &self.logger, "Mapping metadata from {}", @@ -920,7 +918,7 @@ impl PoolInner { } pub async fn needs_remap(&self, server: &ForeignServer) -> Result { - if &server.shard == &self.shard { + if server.shard == self.shard { return Ok(false); } diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index 5a1840b39e3..d7f506ff024 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -681,7 +681,7 @@ mod queries { .await .optional()? .map(|node| { - NodeId::new(&node).map_err(|()| { + NodeId::new(node).map_err(|node| { internal_error!( "invalid node id `{}` in assignment for `{}`", node, @@ -707,7 +707,7 @@ mod queries { .await .optional()? .map(|(node, ts)| { - let node_id = NodeId::new(&node).map_err(|()| { + let node_id = NodeId::new(node).map_err(|node| { internal_error!( "invalid node id `{}` in assignment for `{}`", node, @@ -1265,7 +1265,7 @@ impl Connection { features, data_source_kinds: data_sources, handler_kinds: handlers, - network: network, + network, has_declared_calls, has_bytes_as_ids, has_aggregations, @@ -1634,7 +1634,7 @@ impl Connection { .map(|(node, count)| (node.as_str(), *count)) .chain(missing) .min_by_key(|(_, count)| *count) - .map(|(node, _)| NodeId::new(node).map_err(|()| node)) + .map(|(node, _)| NodeId::new(node)) .transpose() // This can't really happen since we filtered by valid NodeId's .map_err(|node| { @@ -1808,7 +1808,7 @@ impl Connection { &detail.subgraph, "latest_ethereum_block", detail.block_hash.clone(), - detail.block_number.clone(), + detail.block_number, )? .map(|b| b.to_ptr()) .map(|ptr| (Some(Vec::from(ptr.hash_slice())), Some(ptr.number))) @@ -1822,7 +1822,7 @@ impl Connection { u::latest_ethereum_block_number.eq(latest_number), u::failed.eq(detail.failed), u::synced_at.eq(detail.synced_at), - u::synced_at_block_number.eq(detail.synced_at_block_number.clone()), + u::synced_at_block_number.eq(detail.synced_at_block_number), )) .execute(&mut self.conn) .await?; @@ -2201,7 +2201,7 @@ impl Mirror { } pub async fn active_assignments(&self, node: &NodeId) -> Result, StoreError> { - self.read_async(|conn| queries::active_assignments(conn, &node).scope_boxed()) + self.read_async(|conn| queries::active_assignments(conn, node).scope_boxed()) .await } diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index b2dbbcc8a26..404daa42b8d 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -27,7 +27,6 @@ use diesel::sql_types::Text; use diesel::{debug_query, sql_query, OptionalExtension, QueryDsl, QueryResult}; use diesel_async::scoped_futures::ScopedFutureExt; use diesel_async::{AsyncConnection, RunQueryDsl, SimpleAsyncConnection}; -use tokio; use graph::blockchain::block_stream::{EntityOperationKind, EntitySourceOperation}; use graph::blockchain::BlockTime; @@ -206,7 +205,7 @@ impl PartialEq for SqlName { impl FromSql for SqlName { fn from_sql(bytes: diesel::pg::PgValue) -> diesel::deserialize::Result { - >::from_sql(bytes).map(|s| SqlName::verbatim(s)) + >::from_sql(bytes).map(SqlName::verbatim) } } @@ -282,7 +281,7 @@ impl Layout { if catalog.use_poi { tables.push(Self::make_poi_table( - &schema, + schema, &catalog, has_ts_tables, tables.len(), @@ -296,7 +295,7 @@ impl Layout { tables }); - let rollups = Self::rollups(&tables, &schema)?; + let rollups = Self::rollups(&tables, schema)?; Ok(Layout { site, @@ -506,6 +505,7 @@ impl Layout { let key = entity_type.key_in(entity_data.id(), CausalityRegion::from_entity(&entity_data)); + #[allow(clippy::map_entry)] if entities.contains_key(&key) { return Err(internal_error!( "duplicate entity {}[{}] in result set, block = {}", @@ -652,7 +652,7 @@ impl Layout { } // sort the elements in each blocks bucket by vid - for (_, vec) in &mut entities { + for vec in entities.values_mut() { vec.sort_by(|a, b| a.vid.cmp(&b.vid)); } @@ -664,7 +664,7 @@ impl Layout { conn: &mut AsyncPgConnection, derived_query: &DerivedEntityQuery, block: BlockNumber, - excluded_keys: &Vec, + excluded_keys: &[EntityKey], ) -> Result, StoreError> { let table = self.table_for_entity(&derived_query.entity_type)?; let ids = excluded_keys.iter().map(|key| &key.entity_id).cloned(); @@ -1025,7 +1025,7 @@ impl Layout { // FIXME: we clone all the ids here let chunk = IdList::try_from_iter( group.entity_type.id_type()?, - chunk.into_iter().map(|id| (*id).to_owned()), + chunk.iter().map(|id| (*id).to_owned()), )?; count += ClampRangeQuery::new(table, &chunk, block)? .execute(conn) @@ -1045,7 +1045,7 @@ impl Layout { pub async fn truncate_tables(&self, conn: &mut AsyncPgConnection) -> Result<(), StoreError> { for table in self.tables.values() { - sql_query(&format!("TRUNCATE TABLE {}", table.qualified_name)) + sql_query(format!("TRUNCATE TABLE {}", table.qualified_name)) .execute(conn) .await?; } @@ -1302,7 +1302,7 @@ impl Layout { break; } Some(bucket) => { - rollup.insert(conn, &bucket, *block).await?; + rollup.insert(conn, bucket, *block).await?; } } } @@ -1391,7 +1391,7 @@ impl ColumnType { if let Some(id_type) = schema .entity_type(name) .ok() - .and_then(|entity_type| Some(entity_type.id_type())) + .map(|entity_type| entity_type.id_type()) .transpose()? { return Ok(id_type.into()); @@ -1488,7 +1488,7 @@ impl Column { let sql_name = SqlName::from(&*field.name); - let is_reference = schema.is_reference(&field.field_type.get_base_type()); + let is_reference = schema.is_reference(field.field_type.get_base_type()); let column_type = if sql_name.as_str() == PRIMARY_KEY_COLUMN { IdType::try_from(&field.field_type)?.into() @@ -1566,13 +1566,7 @@ impl Column { } pub fn is_nullable(&self) -> bool { - fn is_nullable(field_type: &q::Type) -> bool { - match field_type { - q::Type::NonNullType(_) => false, - _ => true, - } - } - is_nullable(&self.field_type) + !matches!(&self.field_type, q::Type::NonNullType(_)) } pub fn is_list(&self) -> bool { @@ -1736,10 +1730,7 @@ impl Table { pub fn column(&self, name: &SqlName) -> Option<&Column> { self.columns .iter() - .filter(|column| match column.column_type { - ColumnType::TSVector(_) => false, - _ => true, - }) + .filter(|column| !matches!(column.column_type, ColumnType::TSVector(_))) .find(|column| &column.name == name) } diff --git a/store/postgres/src/relational/ddl.rs b/store/postgres/src/relational/ddl.rs index a3c4ed6885e..cdb162978b6 100644 --- a/store/postgres/src/relational/ddl.rs +++ b/store/postgres/src/relational/ddl.rs @@ -406,16 +406,10 @@ impl Table { self.create_table(out)?; self.create_time_travel_indexes(catalog, out)?; if index_def.is_some() && ENV_VARS.postpone_attribute_index_creation { + #[allow(clippy::unnecessary_unwrap)] let arr = index_def .unwrap() - .indexes_for_table( - &self.nsp, - &self.name.to_string(), - &self, - false, - false, - false, - ) + .indexes_for_table(&self.nsp, &self.name.to_string(), self, false, false, false) .map_err(|_| fmt::Error)?; for (_, sql) in arr { writeln!(out, "{};", sql).expect("properly formated index statements") diff --git a/store/postgres/src/relational/ddl_tests.rs b/store/postgres/src/relational/ddl_tests.rs index 6a9a2fdfaee..901b4daa1e5 100644 --- a/store/postgres/src/relational/ddl_tests.rs +++ b/store/postgres/src/relational/ddl_tests.rs @@ -415,14 +415,7 @@ fn postponed_indexes_with_block_column() { let dst_nsp = Namespace::new("sgd2".to_string()).unwrap(); let arr = index_list() - .indexes_for_table( - &dst_nsp, - &table.name.to_string(), - &table, - true, - false, - false, - ) + .indexes_for_table(&dst_nsp, &table.name.to_string(), table, true, false, false) .unwrap(); assert_eq!(1, arr.len()); assert!(!arr[0].1.contains(BLOCK_IDX)); @@ -432,7 +425,7 @@ fn postponed_indexes_with_block_column() { .indexes_for_table( &dst_nsp, &table.name.to_string(), - &table, + table, false, false, false, diff --git a/store/postgres/src/relational/dsl.rs b/store/postgres/src/relational/dsl.rs index 13cab9dd9d0..8620fe7c1fa 100644 --- a/store/postgres/src/relational/dsl.rs +++ b/store/postgres/src/relational/dsl.rs @@ -86,7 +86,7 @@ pub struct ChildAliasStr { impl ChildAliasStr { fn new(idx: u8) -> Self { - let c = 'i' as u8; + let c = b'i'; let alias = if idx == 0 { [c, 0, 0, 0] } else if idx < 10 { @@ -187,9 +187,9 @@ impl<'a> Table<'a> { self.meta .columns .iter() - .chain(META_COLS.into_iter()) + .chain(*META_COLS) .find(|c| &c.name == name) - .map(|c| Column::new(self.clone(), c)) + .map(|c| Column::new(*self, c)) } pub fn name(&self) -> &str { @@ -266,7 +266,7 @@ impl<'a> Table<'a> { .collect(); names.sort(); for name in names { - let column = self.meta.column_for_field(&name)?; + let column = self.meta.column_for_field(name)?; cols.push(column); } } diff --git a/store/postgres/src/relational/index.rs b/store/postgres/src/relational/index.rs index c72d832ba7a..1465b52838a 100644 --- a/store/postgres/src/relational/index.rs +++ b/store/postgres/src/relational/index.rs @@ -49,7 +49,7 @@ impl Display for Method { impl Method { fn parse(method: String) -> Self { - method.parse().unwrap_or_else(|()| Method::Unknown(method)) + method.parse().unwrap_or(Method::Unknown(method)) } } @@ -194,7 +194,7 @@ impl Expr { /// Here we check if all the columns expressions of the two indexes are "kind of same". /// We ignore the operator class of the expression by checking if the string of the /// original expression is a prexif of the string of the current one. - fn is_same_kind_columns(current: &Vec, orig: &Vec) -> bool { + fn is_same_kind_columns(current: &[Expr], orig: &[Expr]) -> bool { if orig.len() != current.len() { return false; } @@ -607,10 +607,8 @@ impl CreateIndex { match self { CreateIndex::Unknown { .. } => (), CreateIndex::Parsed { columns, .. } => { - if columns.len() == 1 { - if columns[0].is_id() { - return true; - } + if columns.len() == 1 && columns[0].is_id() { + return true; } } } @@ -648,15 +646,12 @@ impl CreateIndex { } } - pub fn fields_exist_in_dest<'a>(&self, dest_table: &'a Table) -> bool { + pub fn fields_exist_in_dest(&self, dest_table: &Table) -> bool { fn column_exists<'a>(it: &mut impl Iterator, column_name: &str) -> bool { it.any(|c| *c == *column_name) } - fn some_column_contained<'a>( - expr: &String, - it: &mut impl Iterator, - ) -> bool { + fn some_column_contained<'a>(expr: &str, it: &mut impl Iterator) -> bool { it.any(|c| expr.contains(c)) } @@ -774,7 +769,7 @@ impl IndexList { }; let schema_name = site.namespace.clone(); let layout = store.layout(conn, site).await?; - for (_, table) in &layout.tables { + for table in layout.tables.values() { let indexes = load_indexes_from_table(conn, table, schema_name.as_str()).await?; list.indexes.insert(table.name.to_string(), indexes); } @@ -855,17 +850,13 @@ impl IndexList { .get_results::(conn) .await? .into_iter() - .map(|ii| ii.into()) .collect::>(); assert!(ii_vec.len() <= 1); - if ii_vec.len() == 0 || !ii_vec[0].isvalid { + if ii_vec.is_empty() || !ii_vec[0].isvalid { // if a bad index exist lets first drop it - if ii_vec.len() > 0 { - let drop_query = sql_query(format!( - "DROP INDEX {}.{};", - namespace.to_string(), - index_name - )); + if !ii_vec.is_empty() { + let drop_query = + sql_query(format!("DROP INDEX {}.{};", namespace, index_name)); drop_query.execute(conn).await?; } sql_query(create_query).execute(conn).await?; diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 7b9bc0b8e41..4154eb5110a 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -468,9 +468,9 @@ impl Layout { PruningStrategy::Delete => { // Delete all entity versions whose range was closed // before `req.earliest_block` - let range = VidRange::for_prune(conn, &table, 0, req.earliest_block).await?; + let range = VidRange::for_prune(conn, table, 0, req.earliest_block).await?; let mut batcher = - VidBatcher::load(conn, &self.site.namespace, &table, range).await?; + VidBatcher::load(conn, &self.site.namespace, table, range).await?; tracker.start_delete(conn, table, range, &batcher).await?; while !batcher.finished() { @@ -665,7 +665,7 @@ mod status { } impl Phase { - pub fn from_str(phase: &str) -> Self { + fn from_str(phase: &str) -> Self { use Phase::*; match phase { "queued" => Queued, diff --git a/store/postgres/src/relational/rollup.rs b/store/postgres/src/relational/rollup.rs index 82f59e59dbf..c2929f6ca05 100644 --- a/store/postgres/src/relational/rollup.rs +++ b/store/postgres/src/relational/rollup.rs @@ -385,7 +385,7 @@ impl<'a> RollupSql<'a> { .aggregates .iter() .flat_map(|agg| &agg.src_columns) - .map(|col| *col) + .copied() .filter(|&col| col != "id" && col != "timestamp") .collect(); agg_srcs.sort(); @@ -403,7 +403,7 @@ impl<'a> RollupSql<'a> { " order by {src_table}.timestamp) data group by timestamp", src_table = self.src_table )?; - Ok(write_dims(self.dimensions, w)?) + write_dims(self.dimensions, w) } fn select(&self, w: &mut dyn fmt::Write) -> fmt::Result { diff --git a/store/postgres/src/relational/value.rs b/store/postgres/src/relational/value.rs index fadcfdcfbca..a59dcfe511c 100644 --- a/store/postgres/src/relational/value.rs +++ b/store/postgres/src/relational/value.rs @@ -50,22 +50,22 @@ pub enum OidValue { impl FromSql for OidValue { fn from_sql(value: diesel::pg::PgValue) -> diesel::deserialize::Result { - const VARCHAR_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1043) }; - const VARCHAR_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1015) }; - const TEXT_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(25) }; - const TEXT_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1009) }; - const BYTEA_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(17) }; - const BYTEA_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1001) }; - const BOOL_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(16) }; - const BOOL_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1000) }; - const INTEGER_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(23) }; - const INTEGER_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1007) }; - const INT8_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(20) }; - const INT8_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1016) }; - const NUMERIC_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1700) }; - const NUMERIC_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1231) }; - const TIMESTAMPTZ_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1184) }; - const TIMESTAMPTZ_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1185) }; + const VARCHAR_OID: NonZeroU32 = NonZeroU32::new(1043).unwrap(); + const VARCHAR_ARY_OID: NonZeroU32 = NonZeroU32::new(1015).unwrap(); + const TEXT_OID: NonZeroU32 = NonZeroU32::new(25).unwrap(); + const TEXT_ARY_OID: NonZeroU32 = NonZeroU32::new(1009).unwrap(); + const BYTEA_OID: NonZeroU32 = NonZeroU32::new(17).unwrap(); + const BYTEA_ARY_OID: NonZeroU32 = NonZeroU32::new(1001).unwrap(); + const BOOL_OID: NonZeroU32 = NonZeroU32::new(16).unwrap(); + const BOOL_ARY_OID: NonZeroU32 = NonZeroU32::new(1000).unwrap(); + const INTEGER_OID: NonZeroU32 = NonZeroU32::new(23).unwrap(); + const INTEGER_ARY_OID: NonZeroU32 = NonZeroU32::new(1007).unwrap(); + const INT8_OID: NonZeroU32 = NonZeroU32::new(20).unwrap(); + const INT8_ARY_OID: NonZeroU32 = NonZeroU32::new(1016).unwrap(); + const NUMERIC_OID: NonZeroU32 = NonZeroU32::new(1700).unwrap(); + const NUMERIC_ARY_OID: NonZeroU32 = NonZeroU32::new(1231).unwrap(); + const TIMESTAMPTZ_OID: NonZeroU32 = NonZeroU32::new(1184).unwrap(); + const TIMESTAMPTZ_ARY_OID: NonZeroU32 = NonZeroU32::new(1185).unwrap(); match value.get_oid() { VARCHAR_OID | TEXT_OID => { @@ -230,7 +230,7 @@ impl FromOidRow for Entity { .filter(|(value, _)| !matches!(value, OidValue::Null)) .map(|(value, column)| { graph::prelude::Value::from_oid_value(value, &column.column_type) - .map(|value| (Word::from(column.field.clone()), value)) + .map(|value| (column.field.clone(), value)) }); schema.try_make_entity(x).map_err(StoreError::from) } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index eb77976924b..ef066b208c8 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1,10 +1,10 @@ -///! This module contains the gory details of using Diesel to query -///! a database schema that is not known at compile time. The code in this -///! module is mostly concerned with constructing SQL queries and some -///! helpers for serializing and deserializing entities. -///! -///! Code in this module works very hard to minimize the number of allocations -///! that it performs +//! This module contains the gory details of using Diesel to query +//! a database schema that is not known at compile time. The code in this +//! module is mostly concerned with constructing SQL queries and some +//! helpers for serializing and deserializing entities. +//! +//! Code in this module works very hard to minimize the number of allocations +//! that it performs use diesel::pg::Pg; use diesel::query_builder::{AstPass, Query, QueryFragment, QueryId}; use diesel::query_dsl::RunQueryDsl; @@ -335,7 +335,7 @@ impl FromColumnValue for r::Value { fn from_timestamp(i: &str) -> Result { scalar::Timestamp::from_rfc3339(i) - .map(|v| r::Value::Timestamp(v)) + .map(r::Value::Timestamp) .map_err(|e| { StoreError::Unknown(anyhow!("failed to convert {} to Timestamp: {}", i, e)) }) @@ -645,7 +645,7 @@ impl<'a> SqlValue<'a> { BigDecimal(d) => { S::Numeric(d.to_string()) } - Timestamp(ts) => S::Timestamp(ts.clone()), + Timestamp(ts) => S::Timestamp(*ts), Bool(b) => S::Bool(*b), List(values) => { match column_type { @@ -685,7 +685,7 @@ impl std::fmt::Display for SqlValue<'_> { S::Int(i) => write!(f, "{}", i), S::Int8(i) => write!(f, "{}", i), S::Numeric(s) => write!(f, "{}", s), - S::Timestamp(ts) => write!(f, "{}", ts.as_microseconds_since_epoch().to_string()), + S::Timestamp(ts) => write!(f, "{}", ts.as_microseconds_since_epoch()), S::Numerics(values) => write!(f, "{:?}", values), S::Bool(b) => write!(f, "{}", b), S::List(values) => write!(f, "{:?}", values), @@ -710,7 +710,7 @@ impl<'a> QueryValue<'a> { Ok(Self { value, column_type }) } - fn many(values: &'a Vec, column_type: &'a ColumnType) -> QueryResult> { + fn many(values: &'a [Value], column_type: &'a ColumnType) -> QueryResult> { values .iter() .map(|value| Self::new(value, column_type)) @@ -718,10 +718,7 @@ impl<'a> QueryValue<'a> { } fn is_null(&self) -> bool { - match &self.value { - SqlValue::Null => true, - _ => false, - } + matches!(&self.value, SqlValue::Null) } } @@ -817,7 +814,7 @@ impl<'a> QueryFragment for QueryValue<'a> { } fn process_vec_ast<'a, T: diesel::serialize::ToSql>( - values: &'a Vec, + values: &'a [T], out: &mut AstPass<'_, 'a, Pg>, sql_language: &str, ) -> Result<(), DieselError> { @@ -891,12 +888,10 @@ impl Comparison { | Comparison::Greater, Value::Bool(_) | Value::List(_) | Value::Null, ) - | (Comparison::Match, _) => { - return Err(StoreError::UnsupportedFilter( - self.to_string(), - value.to_string(), - )); - } + | (Comparison::Match, _) => Err(StoreError::UnsupportedFilter( + self.to_string(), + value.to_string(), + )), } } } @@ -1009,7 +1004,7 @@ impl PrefixType { } } -fn is_large_string(s: &String) -> Result { +fn is_large_string(s: &str) -> Result { let len = if s.starts_with("0x") { (s.len() - 2) / 2 } else { @@ -1278,19 +1273,17 @@ impl<'a> QueryFragment for QueryChild<'a> { out.push_sql(" = "); child_column.walk_ast(out.reborrow())?; } + } else if parent_column.is_list() { + // Type C: i.id = any(c.child_ids) + child_column.walk_ast(out.reborrow())?; + out.push_sql(" = any("); + parent_column.walk_ast(out.reborrow())?; + out.push_sql(")"); } else { - if parent_column.is_list() { - // Type C: i.id = any(c.child_ids) - child_column.walk_ast(out.reborrow())?; - out.push_sql(" = any("); - parent_column.walk_ast(out.reborrow())?; - out.push_sql(")"); - } else { - // Type D: i.id = c.child_id - child_column.walk_ast(out.reborrow())?; - out.push_sql(" = "); - parent_column.walk_ast(out.reborrow())?; - } + // Type D: i.id = c.child_id + child_column.walk_ast(out.reborrow())?; + out.push_sql(" = "); + parent_column.walk_ast(out.reborrow())?; } out.push_sql(" and "); @@ -1375,7 +1368,7 @@ impl<'a> Filter<'a> { ) -> Result { fn column_and_value<'v>( table: dsl::Table<'v>, - attr: &String, + attr: &str, value: &'v Value, ) -> Result<(dsl::Column<'v>, QueryValue<'v>), StoreError> { let column = table.column_for_field(attr)?; @@ -1386,7 +1379,7 @@ impl<'a> Filter<'a> { fn starts_or_ends_with<'s>( table: dsl::Table<'s>, - attr: &String, + attr: &str, value: &Value, op: &'static str, starts_with: bool, @@ -1414,18 +1407,16 @@ impl<'a> Filter<'a> { | Value::Int(_) | Value::Int8(_) | Value::List(_) - | Value::Null => { - return Err(StoreError::UnsupportedFilter( - op.to_owned(), - value.to_string(), - )); - } + | Value::Null => Err(StoreError::UnsupportedFilter( + op.to_owned(), + value.to_string(), + )), } } fn cmp<'s>( table: dsl::Table<'s>, - attr: &String, + attr: &str, op: Comparison, value: &'s Value, ) -> Result, StoreError> { @@ -1435,8 +1426,7 @@ impl<'a> Filter<'a> { if column.use_prefix_comparison() && !value.is_null() { let column_type = column.column_type(); - PrefixComparison::new(op, column, column_type, value) - .map(|pc| Filter::PrefixCmp(pc)) + PrefixComparison::new(op, column, column_type, value).map(Filter::PrefixCmp) } else { let value = QueryValue::new(value, column.column_type())?; Ok(Filter::Cmp(column, op, value)) @@ -1445,7 +1435,7 @@ impl<'a> Filter<'a> { fn contains<'s>( table: dsl::Table<'s>, - attr: &String, + attr: &str, op: ContainsOp, value: &'s Value, ) -> Result, StoreError> { @@ -1522,7 +1512,7 @@ impl<'a> Filter<'a> { } NotIn(attr, values) => { let column = table.column_for_field(attr.as_str())?; - let values = QueryValue::many(values, &column.column_type())?; + let values = QueryValue::many(values, column.column_type())?; Ok(F::NotIn(column, values)) } Contains(attr, value) => contains(table, attr, K::Like, value), @@ -1755,7 +1745,7 @@ impl<'a> Filter<'a> { if have_non_nulls { if column.use_prefix_comparison() - && PrefixType::new(&column).is_ok() + && PrefixType::new(column).is_ok() && values.iter().all(|v| match &v.value { SqlValue::Text(s) => s.len() < STRING_PREFIX_SIZE, SqlValue::String(s) => s.len() < STRING_PREFIX_SIZE, @@ -1770,7 +1760,7 @@ impl<'a> Filter<'a> { // query optimizer // See PrefixComparison for a more detailed discussion of what // is happening here - PrefixType::new(&column)?.push_column_prefix(&column, &mut out.reborrow())?; + PrefixType::new(column)?.push_column_prefix(column, &mut out.reborrow())?; } else { column.walk_ast(out.reborrow())?; } @@ -2232,7 +2222,7 @@ impl<'a> QueryFragment for FindDerivedQuery<'a> { out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" e\n where "); // This clause with an empty array would filter out everything - if self.excluded_keys.len() > 0 { + if !self.excluded_keys.is_empty() { out.push_identifier(&self.table.primary_key().name)?; // For truly gigantic `excluded_keys` lists, this will be slow, and // we should rewrite this query to use a CTE or a temp table to hold @@ -2316,7 +2306,7 @@ impl<'a> InsertRow<'a> { }) .collect::>()?; if let ColumnType::TSVector(config) = &column.column_type { - InsertValue::Fulltext(fulltext_field_values, &config) + InsertValue::Fulltext(fulltext_field_values, config) } else { return Err(StoreError::FulltextColumnMissingConfig); } @@ -2508,7 +2498,7 @@ impl<'a> ConflictingEntitiesQuery<'a> { .iter() .map(|entity| layout.table_for_entity(entity).map(|table| table.as_ref())) .collect::, _>>()?; - let ids = IdList::try_from_iter_ref(group.ids().map(|id| IdRef::from(id)))?; + let ids = IdList::try_from_iter_ref(group.ids().map(IdRef::from))?; Ok(ConflictingEntitiesQuery { tables, ids }) } } @@ -3017,11 +3007,11 @@ impl<'a> FilterWindow<'a> { out.push_sql("select '"); out.push_sql(self.table.meta.object.as_str()); out.push_sql("' as entity, c.id, c.vid, p.id::text as "); - out.push_sql(&*PARENT_ID); + out.push_sql(PARENT_ID); limit .sort_key .select(&mut out, SelectStatementLevel::InnerStatement)?; - self.children(true, &limit, &mut out) + self.children(true, limit, &mut out) } /// Collect all the parent id's from all windows @@ -3587,7 +3577,7 @@ impl<'a> SortKey<'a> { direction: SortDirection, ) -> Result>, QueryExecutionError> { assert!(entity_types.len() < 255); - return entity_types + entity_types .iter() .enumerate() .map(|(i, entity_type)| { @@ -3645,7 +3635,7 @@ impl<'a> SortKey<'a> { }) } }) - .collect::>, QueryExecutionError>>(); + .collect::>, QueryExecutionError>>() } fn with_child_interface_key<'a>( @@ -3964,7 +3954,7 @@ impl<'a> SortKey<'a> { ) -> QueryResult<()> { fn order_by_parent_id(out: &mut AstPass) { out.push_sql("order by "); - out.push_sql(&*PARENT_ID); + out.push_sql(PARENT_ID); out.push_sql(", "); } @@ -4065,11 +4055,8 @@ impl<'a> SortKey<'a> { )); } - match sort_by.column_type() { - ColumnType::TSVector(_) => { - return Err(internal_error!("TSVector is not supported")); - } - _ => {} + if let ColumnType::TSVector(_) = sort_by.column_type() { + return Err(internal_error!("TSVector is not supported")); } } @@ -4176,8 +4163,8 @@ impl<'a> SortKey<'a> { Ok(()) } - match self { - SortKey::ChildKey(nested) => match nested { + if let SortKey::ChildKey(nested) = self { + match nested { ChildKey::Single(child) => { add( &child.child_from, @@ -4218,8 +4205,7 @@ impl<'a> SortKey<'a> { out, )?; } - }, - _ => {} + } } Ok(()) } @@ -4389,7 +4375,7 @@ impl<'a> FilterQuery<'a> { out.push_sql(" from (select "); write_column_names(&window.column_names, window.table, Some("c."), &mut out)?; out.push_sql(", p.id::text as "); - out.push_sql(&*PARENT_ID); + out.push_sql(PARENT_ID); window.children(false, &self.limit, &mut out)?; out.push_sql(") c"); out.push_sql("\n "); @@ -4663,7 +4649,7 @@ impl<'a> QueryFragment for ClampRangeQuery<'a> { self.br_column.clamp(&mut out)?; out.push_sql("\n where "); - id_is_in(&self.entity_ids, &mut out)?; + id_is_in(self.entity_ids, &mut out)?; out.push_sql(" and ("); self.br_column.latest(&mut out); out.push_sql(")"); @@ -5097,8 +5083,8 @@ fn jsonb_build_object( /// Helper function to iterate over the merged fields of BASE_SQL_COLUMNS and the provided attribute /// names, yielding valid SQL names for the given table. -fn iter_column_names<'a, 'b>( - attribute_names: &'a BTreeSet, +fn iter_column_names<'b>( + attribute_names: &BTreeSet, table: dsl::Table<'b>, include_block_range_column: bool, ) -> impl Iterator { diff --git a/store/postgres/src/sql/mod.rs b/store/postgres/src/sql/mod.rs index 55917f854c4..f08f89ae711 100644 --- a/store/postgres/src/sql/mod.rs +++ b/store/postgres/src/sql/mod.rs @@ -22,7 +22,7 @@ mod test { let namespace = Namespace::new("sgd0815".to_string()).unwrap(); let site = Arc::new(make_dummy_site(subgraph, namespace, "anet".to_string())); let catalog = Catalog::for_tests(site.clone(), BTreeSet::new()).unwrap(); - let layout = Layout::new(site, &schema, catalog).unwrap(); - layout + + Layout::new(site, &schema, catalog).unwrap() } } diff --git a/store/postgres/src/sql/parser.rs b/store/postgres/src/sql/parser.rs index 9f1b1483741..e263a54deb2 100644 --- a/store/postgres/src/sql/parser.rs +++ b/store/postgres/src/sql/parser.rs @@ -24,7 +24,7 @@ impl Parser { validator.validate_statements(&mut statements)?; let statement = statements - .get(0) + .first() .ok_or_else(|| anyhow!("No SQL statements found"))?; Ok(statement.to_string()) diff --git a/store/postgres/src/sql/validation.rs b/store/postgres/src/sql/validation.rs index ac8421f9648..73720b771b0 100644 --- a/store/postgres/src/sql/validation.rs +++ b/store/postgres/src/sql/validation.rs @@ -38,7 +38,7 @@ pub enum Error { #[error("Qualified table names are not supported: {0}")] NoQualifiedTables(String), #[error("Internal error: {0}")] - InternalError(String), + Internal(String), } /// A wrapper around table names that correctly handles quoted vs unquoted @@ -108,7 +108,7 @@ impl CteStack { fn add_ctes(&mut self, ctes: &[Cte]) -> ControlFlow { let Some(entry) = self.stack.last_mut() else { - return ControlFlow::Break(Error::InternalError("CTE stack is empty".into())); + return ControlFlow::Break(Error::Internal("CTE stack is empty".into())); }; for cte in ctes { entry.insert(TableName::from(&cte.alias.name)); @@ -266,7 +266,7 @@ impl VisitorMut for Validator<'_> { ) -> ControlFlow { /// Check whether `args` is a single string argument and return that /// string - fn extract_string_arg(args: &Vec) -> Option { + fn extract_string_arg(args: &[FunctionArg]) -> Option { if args.len() != 1 { return None; } @@ -305,7 +305,7 @@ impl VisitorMut for Validator<'_> { } (Some(_), Some(_)) => { // Table exists but has args, must be a function - return self.validate_function_name(&name); + return self.validate_function_name(name); } (None, Some(args)) => { // Table does not exist but has args, is either an @@ -314,7 +314,7 @@ impl VisitorMut for Validator<'_> { if !self.layout.has_aggregation(table_name.as_str()) { // Not an aggregation, must be a function - return self.validate_function_name(&name); + return self.validate_function_name(name); } let TableFunctionArgs { args, settings } = args; @@ -339,7 +339,7 @@ impl VisitorMut for Validator<'_> { let Some(table) = self.layout.aggregation_table(table_name.as_str(), intv) else { - return self.validate_function_name(&name); + return self.validate_function_name(name); }; table } diff --git a/store/postgres/src/store_events.rs b/store/postgres/src/store_events.rs index 5c7b8dfd845..6189120f602 100644 --- a/store/postgres/src/store_events.rs +++ b/store/postgres/src/store_events.rs @@ -193,7 +193,7 @@ impl SubscriptionManager { let stale_ids = subscriptions .iter_mut() .filter_map(|(id, sender)| match sender.is_closed() { - true => Some(id.clone()), + true => Some(*id), false => None, }) .collect::>(); diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 7b6d37026de..8f2faa9ecda 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -365,7 +365,7 @@ impl SubgraphStore { // if it doesn't exist, we need to copy the graft base to the new deployment let graft_base_layout = if !exists { let graft_base = match deployment.graft_base.as_ref() { - Some(base) => Some(self.layout(&base).await?), + Some(base) => Some(self.layout(base).await?), None => None, }; @@ -1016,7 +1016,7 @@ impl Inner { .ok_or_else(|| StoreError::UnknownShard(shard.to_string()))?; infos.extend(store.deployment_statuses(&sites).await?); } - let nodes = self.mirror.fill_assignments(&mut infos).await?; + let nodes = self.mirror.fill_assignments(&infos).await?; for info in infos.iter_mut() { info.node = nodes.get(&info.id).map(|(node, _)| node.clone()); info.paused = nodes.get(&info.id).map(|(_, paused)| *paused); @@ -1030,7 +1030,9 @@ impl Inner { let id = DeploymentHash::new(deployment_id.clone()) .map_err(|id| internal_error!("illegal deployment id {}", id))?; let (store, site) = self.store(&id).await?; - let statuses = store.deployment_statuses(&[site.clone()]).await?; + let statuses = store + .deployment_statuses(std::slice::from_ref(&site)) + .await?; let status = statuses .first() .ok_or_else(|| StoreError::DeploymentNotFound(deployment_id.clone()))?; @@ -1643,10 +1645,7 @@ impl SubgraphStoreTrait for SubgraphStore { ) -> Result, StoreError> { let deployment = deployment.to_string(); let mut pconn = self.primary_conn().await?; - pconn - .get_subgraph_features(deployment) - .await - .map_err(|e| e.into()) + pconn.get_subgraph_features(deployment).await } async fn entity_changes_in_block( diff --git a/store/postgres/src/vid_batcher.rs b/store/postgres/src/vid_batcher.rs index 0dea582bbac..8cb0496bd86 100644 --- a/store/postgres/src/vid_batcher.rs +++ b/store/postgres/src/vid_batcher.rs @@ -239,7 +239,7 @@ impl VidBatcher { pub(crate) fn set_batch_size(&mut self, size: usize) { self.batch_size.size = size as i64; self.end = match &self.ogive { - Some(ogive) => ogive.next_point(self.start, size as usize).unwrap(), + Some(ogive) => ogive.next_point(self.start, size).unwrap(), None => self.start + size as i64, }; } @@ -374,7 +374,6 @@ mod tests { (_, None) => { if start > end { // Expected, the batcher is exhausted - return; } else { panic!("step didn't return start and end") } @@ -436,20 +435,23 @@ mod tests { // The schedule of how we move through the bounds above in batches, // with varying timings for each batch - batcher.run(040, 075, 10, S010).await; - batcher.run(076, 145, 20, S010).await; - batcher.run(146, 240, 40, S200).await; - batcher.run(241, 270, 20, S200).await; - batcher.run(271, 281, 10, S200).await; - batcher.run(282, 287, 05, S050).await; - batcher.run(288, 298, 10, S050).await; - batcher.run(299, 309, 20, S050).await; - batcher.run(310, 325, 40, S100).await; - batcher.run(326, 336, 40, S100).await; - batcher.run(337, 347, 40, S100).await; - batcher.run(348, 357, 40, S100).await; - batcher.run(358, 359, 40, S010).await; - assert!(batcher.finished()); + #[allow(clippy::zero_prefixed_literal)] + { + batcher.run(040, 075, 10, S010).await; + batcher.run(076, 145, 20, S010).await; + batcher.run(146, 240, 40, S200).await; + batcher.run(241, 270, 20, S200).await; + batcher.run(271, 281, 10, S200).await; + batcher.run(282, 287, 05, S050).await; + batcher.run(288, 298, 10, S050).await; + batcher.run(299, 309, 20, S050).await; + batcher.run(310, 325, 40, S100).await; + batcher.run(326, 336, 40, S100).await; + batcher.run(337, 347, 40, S100).await; + batcher.run(348, 357, 40, S100).await; + batcher.run(358, 359, 40, S010).await; + assert!(batcher.finished()); + } batcher.at(360, 359, 80); batcher.step(360, 359, S010).await; diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 39ef086e61d..ff5ffb2d45b 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -102,7 +102,7 @@ impl LastRollup { (true, Some(_)) => { let block_time = store.block_time(site).await?; block_time - .map(|b| LastRollup::Some(b)) + .map(LastRollup::Some) .unwrap_or(LastRollup::Unknown) } }; @@ -126,7 +126,7 @@ impl LastRollupTracker { block, ) .await - .map(|kind| Mutex::new(kind))?; + .map(Mutex::new)?; Ok(Self(rollup)) } @@ -209,10 +209,7 @@ impl SyncStore { } async fn block_cursor(&self) -> Result { - self.writable - .block_cursor(self.site.cheap_clone()) - .await - .map(FirehoseCursor::from) + self.writable.block_cursor(self.site.cheap_clone()).await } async fn start_subgraph_deployment(&self, logger: &Logger) -> Result<(), StoreError> { @@ -653,7 +650,7 @@ impl BlockTracker { /// a batch can still be appended to enum QueuedBatch { /// An open batch that can still be appended to - Open(Batch), + Open(Box), /// A closed batch that can no longer be modified Closed(Arc), /// Temporary placeholder during state transitions. Must never be @@ -691,7 +688,7 @@ impl QueuedBatch { fn close(&mut self) -> Arc { let old = std::mem::replace(self, QueuedBatch::Invalid); *self = match old { - QueuedBatch::Open(batch) => QueuedBatch::Closed(Arc::new(batch)), + QueuedBatch::Open(batch) => QueuedBatch::Closed(Arc::new(*batch)), closed @ QueuedBatch::Closed(_) => closed, QueuedBatch::Invalid => unreachable!("close is never called on a QueuedBatch::Invalid"), }; @@ -710,6 +707,7 @@ impl QueuedBatch { /// The `processed` flag is set to true as soon as the background writer is /// working on that request. Once it has been set, no changes can be made to /// the request +#[allow(clippy::large_enum_variant)] enum Request { Write { queued: Instant, @@ -770,7 +768,7 @@ impl Request { queued: Instant::now(), store, stopwatch, - batch: RwLock::new(QueuedBatch::Open(batch)), + batch: RwLock::new(QueuedBatch::Open(Box::new(batch))), processed: AtomicBool::new(false), } } @@ -1187,7 +1185,7 @@ impl Queue { // are not 'full' at the head of the // queue, something that start_writer // has to take into account - return Ok(Some(batch)); + Ok(Some(batch)) } Err(RwLockError::Poisoned(e)) => { panic!("rwlock on batch was poisoned {:?}", e); @@ -1364,9 +1362,7 @@ impl Queue { // already existing entries in map as that would make us // produce stale values for (k, v) in effective_ops(batch, derived_query, at) { - if !map.contains_key(&k) { - map.insert(k, v); - } + map.entry(k).or_insert(v); } map }, @@ -1920,7 +1916,7 @@ impl WritableStoreTrait for WritableStore { store .writable(logger, self.store.site.id.into(), manifest_idx_and_name) .await - .map(|store| Some(store)) + .map(Some) } else { Ok(None) } diff --git a/store/test-store/Cargo.toml b/store/test-store/Cargo.toml index c16db6c5d11..2f0d24a9489 100644 --- a/store/test-store/Cargo.toml +++ b/store/test-store/Cargo.toml @@ -22,3 +22,6 @@ tokio = { workspace = true } [dev-dependencies] hex = "0.4.3" pretty_assertions = "1.4.1" + +[lints] +workspace = true diff --git a/store/test-store/src/block_store.rs b/store/test-store/src/block_store.rs index f085e2dbd9d..76ae6d52937 100644 --- a/store/test-store/src/block_store.rs +++ b/store/test-store/src/block_store.rs @@ -105,13 +105,13 @@ impl FakeBlock { pub fn as_ethereum_block(&self) -> EthereumBlock { let parent_hash = H256::from_str(self.parent_hash.as_str()).expect("invalid parent hash"); - let mut block = LightEthereumBlock::default(); - block.number = Some(self.number.into()); - block.parent_hash = parent_hash; - block.hash = Some(H256(self.block_hash().as_slice().try_into().unwrap())); - if let Some(ts) = self.timestamp { - block.timestamp = ts; - } + let block = LightEthereumBlock { + number: Some(self.number.into()), + parent_hash, + hash: Some(H256(self.block_hash().as_slice().try_into().unwrap())), + timestamp: self.timestamp.unwrap_or_default(), + ..Default::default() + }; EthereumBlock { block: Arc::new(block), @@ -120,19 +120,21 @@ impl FakeBlock { } pub fn as_firehose_block(&self) -> Block { - let mut block = Block::default(); - block.hash = self.hash.clone().into_bytes(); - block.number = self.number as u64; - - let mut header = BlockHeader::default(); - header.parent_hash = self.parent_hash.clone().into_bytes(); - header.timestamp = self.timestamp.map(|ts| Timestamp { - seconds: i64::from_str_radix(&ts.to_string(), 10).unwrap(), - nanos: 0, - }); - block.header = Some(header); - - block + let header = BlockHeader { + parent_hash: self.parent_hash.clone().into_bytes(), + timestamp: self.timestamp.map(|ts| Timestamp { + seconds: ts.to_string().parse().unwrap(), + nanos: 0, + }), + ..Default::default() + }; + + Block { + hash: self.hash.clone().into_bytes(), + number: self.number as u64, + header: Some(header), + ..Default::default() + } } } diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index a671e770a6f..af973c32993 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -123,6 +123,7 @@ where } /// Run a test with a connection into the primary database, not a full store +#[allow(clippy::await_holding_lock)] pub async fn run_test_with_conn(test: F) where F: AsyncFnOnce(&mut AsyncPgConnection), @@ -568,8 +569,8 @@ async fn execute_subgraph_query_internal( QueryExecutionOptions { resolver, deadline, - max_first: std::u32::MAX, - max_skip: std::u32::MAX, + max_first: u32::MAX, + max_skip: u32::MAX, trace, }, ) @@ -607,8 +608,8 @@ pub fn all_shards() -> Vec { fn build_store() -> (Arc, ConnectionPool, Config, Arc) { let mut opt = Opt::default(); - let url = std::env::var_os("THEGRAPH_STORE_POSTGRES_DIESEL_URL").filter(|s| s.len() > 0); - let file = std::env::var_os("GRAPH_NODE_TEST_CONFIG").filter(|s| s.len() > 0); + let url = std::env::var_os("THEGRAPH_STORE_POSTGRES_DIESEL_URL").filter(|s| !s.is_empty()); + let file = std::env::var_os("GRAPH_NODE_TEST_CONFIG").filter(|s| !s.is_empty()); if let Some(file) = file { let file = file.into_string().unwrap(); opt.config = Some(file); diff --git a/store/test-store/tests/chain/ethereum/manifest.rs b/store/test-store/tests/chain/ethereum/manifest.rs index ff6eb06302c..e4dee44c9ab 100644 --- a/store/test-store/tests/chain/ethereum/manifest.rs +++ b/store/test-store/tests/chain/ethereum/manifest.rs @@ -101,7 +101,7 @@ impl LinkResolver for TextResolver { self.texts .get(&link.link) .ok_or(anyhow!("No text for {}", &link.link)) - .map(Clone::clone) + .cloned() } async fn get_block( @@ -880,8 +880,8 @@ specVersion: 0.0.8 .filter_map(|ds| ds.as_onchain().cloned()) .collect::>(); - let data_source = onchain_data_sources.get(0).unwrap(); - let validation_errors = data_source.validate(&LATEST_VERSION); + let data_source = onchain_data_sources.first().unwrap(); + let validation_errors = data_source.validate(LATEST_VERSION); let filter = data_source.mapping.block_handlers[0].filter.clone(); assert_eq!(0, validation_errors.len()); @@ -976,7 +976,7 @@ specVersion: 0.0.8 .filter_map(|ds| ds.as_onchain().cloned()) .collect::>(); - let data_source = onchain_data_sources.get(0).unwrap(); + let data_source = onchain_data_sources.first().unwrap(); let validation_errors = data_source.validate(LATEST_VERSION); let filters = data_source .mapping @@ -1041,7 +1041,7 @@ specVersion: 0.0.8 .filter_map(|ds| ds.as_onchain().cloned()) .collect::>(); - let data_source = onchain_data_sources.get(0).unwrap(); + let data_source = onchain_data_sources.first().unwrap(); let validation_errors = data_source.validate(LATEST_VERSION); let filters = data_source .mapping @@ -1103,12 +1103,12 @@ specVersion: 0.0.2 .filter_map(|ds| ds.as_onchain().cloned()) .collect::>(); - let data_source = onchain_data_sources.get(0).unwrap(); + let data_source = onchain_data_sources.first().unwrap(); let filter = data_source.mapping.block_handlers[0].filter.clone(); let required_capabilities = NodeCapabilities::from_data_sources(&onchain_data_sources); assert_eq!(BlockHandlerFilter::Call, filter.unwrap()); - assert_eq!(true, required_capabilities.traces); + assert!(required_capabilities.traces); assert_eq!("Qmmanifest", manifest.id.as_str()); } @@ -1151,12 +1151,12 @@ specVersion: 0.0.8 .filter_map(|ds| ds.as_onchain().cloned()) .collect::>(); - let data_source = onchain_data_sources.get(0).unwrap(); + let data_source = onchain_data_sources.first().unwrap(); let filter = data_source.mapping.block_handlers[0].filter.clone(); let required_capabilities = NodeCapabilities::from_data_sources(&onchain_data_sources); assert_eq!(BlockHandlerFilter::Once, filter.unwrap()); - assert_eq!(false, required_capabilities.traces); + assert!(!required_capabilities.traces); assert_eq!("Qmmanifest", manifest.id.as_str()); } @@ -1200,7 +1200,7 @@ specVersion: 0.0.2 let required_capabilities = NodeCapabilities::from_data_sources(&onchain_data_sources); assert_eq!("Qmmanifest", manifest.id.as_str()); - assert_eq!(true, required_capabilities.traces); + assert!(required_capabilities.traces); } #[test] @@ -1238,6 +1238,23 @@ graft: }) } +async fn has_feature_validation_error( + unvalidated: UnvalidatedSubgraphManifest, + store: Arc, +) -> bool { + unvalidated + .validate(store, true) + .await + .expect_err("Validation must fail") + .into_iter() + .any(|e| { + matches!( + e, + SubgraphManifestValidationError::FeatureValidationError(_) + ) + }) +} + #[test] fn declared_grafting_feature_causes_no_feature_validation_errors() { const YAML: &str = " @@ -1255,18 +1272,8 @@ graft: test_store::run_test_sequentially(|store| async move { let store = store.subgraph_store(); let unvalidated = resolve_unvalidated(YAML).await; - assert!(unvalidated - .validate(store.clone(), true) - .await - .expect_err("Validation must fail") - .into_iter() - .find(|e| { - matches!( - e, - SubgraphManifestValidationError::FeatureValidationError(_) - ) - }) - .is_none()); + let has_error = has_feature_validation_error(unvalidated, store).await; + assert!(!has_error, "There must be no FeatureValidationError"); let manifest = resolve_manifest(YAML, SPEC_VERSION_0_0_4).await; assert!(manifest.features.contains(&SubgraphFeature::Grafting)) }) @@ -1286,18 +1293,8 @@ schema: test_store::run_test_sequentially(|store| async move { let store = store.subgraph_store(); let unvalidated = resolve_unvalidated(YAML).await; - assert!(unvalidated - .validate(store.clone(), true) - .await - .expect_err("Validation must fail") - .into_iter() - .find(|e| { - matches!( - e, - SubgraphManifestValidationError::FeatureValidationError(_) - ) - }) - .is_none()); + let has_error = has_feature_validation_error(unvalidated, store).await; + assert!(!has_error, "There must be no FeatureValidationError"); let manifest = resolve_manifest(YAML, SPEC_VERSION_0_0_4).await; assert!(manifest.features.contains(&SubgraphFeature::NonFatalErrors)) @@ -1340,18 +1337,8 @@ schema: .expect("Parsing simple manifest works") }; - assert!(unvalidated - .validate(store.clone(), true) - .await - .expect_err("Validation must fail") - .into_iter() - .find(|e| { - matches!( - e, - SubgraphManifestValidationError::FeatureValidationError(_) - ) - }) - .is_none()); + let has_error = has_feature_validation_error(unvalidated, store).await; + assert!(!has_error, "There must be no FeatureValidationError"); let manifest = resolve_manifest(YAML, SPEC_VERSION_0_0_4).await; assert!(manifest.features.contains(&SubgraphFeature::FullTextSearch)) @@ -1549,18 +1536,8 @@ dataSources: .expect("Parsing simple manifest works") }; - assert!(unvalidated - .validate(store.clone(), true) - .await - .expect_err("Validation must fail") - .into_iter() - .find(|e| { - matches!( - e, - SubgraphManifestValidationError::FeatureValidationError(_) - ) - }) - .is_none()); + let has_error = has_feature_validation_error(unvalidated, store).await; + assert!(!has_error, "There must be no FeatureValidationError"); }); } @@ -1578,18 +1555,8 @@ schema: test_store::run_test_sequentially(|store| async move { let store = store.subgraph_store(); let unvalidated = resolve_unvalidated(YAML).await; - assert!(unvalidated - .validate(store.clone(), true) - .await - .expect_err("Validation must fail") - .into_iter() - .find(|e| { - matches!( - e, - SubgraphManifestValidationError::FeatureValidationError(_) - ) - }) - .is_none()); + let has_error = has_feature_validation_error(unvalidated, store).await; + assert!(!has_error, "There must be no FeatureValidationError"); let manifest = resolve_manifest(YAML, SPEC_VERSION_0_0_4).await; assert!(manifest.features.contains(&SubgraphFeature::NonFatalErrors)) diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index be27d111fa8..0923b038254 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -335,7 +335,7 @@ async fn check_vid_sequence() { for n in 0..10 { let id = (10 - n).to_string(); - let name = format!("Mogwai"); + let name = "Mogwai".to_string(); let mogwai_key = make_band_key(id.as_str()); let mogwai_data = entity! { SCHEMA => id: id, name: name }; cache diff --git a/store/test-store/tests/graphql/introspection.rs b/store/test-store/tests/graphql/introspection.rs index 6607a04be05..6a978bccfc5 100644 --- a/store/test-store/tests/graphql/introspection.rs +++ b/store/test-store/tests/graphql/introspection.rs @@ -46,7 +46,7 @@ impl Resolver for MockResolver { async fn resolve_object( &self, - __: Option, + _: Option, _field: &a::Field, _field_definition: &s::Field, _object_type: ObjectOrInterface<'_>, @@ -122,8 +122,8 @@ async fn introspection_query(schema: Arc, query: &str) -> QueryResult let options = QueryExecutionOptions { resolver: MockResolver, deadline: None, - max_first: std::u32::MAX, - max_skip: std::u32::MAX, + max_first: u32::MAX, + max_skip: u32::MAX, trace: false, }; @@ -173,7 +173,7 @@ fn compare(a: &r::Value, b: &r::Value, path: &mut Vec) -> Option<(r::Val path.push(la.len().to_string()); return different(&r::Value::Null, &lb[la.len()]); } - return None; + None } _ => different(a, b), }, @@ -207,7 +207,7 @@ fn compare(a: &r::Value, b: &r::Value, path: &mut Vec) -> Option<(r::Val } } } - return None; + None } _ => different(a, b), }, diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index e33c62fe7de..f206fe2644f 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -420,7 +420,7 @@ async fn insert_test_entities( ) -> Vec { entities .into_iter() - .map(|(typename, entities)| { + .flat_map(|(typename, entities)| { let entity_type = schema.entity_type(typename).unwrap(); entities.into_iter().map(move |mut data| { data.set_vid_if_empty(); @@ -430,7 +430,6 @@ async fn insert_test_entities( } }) }) - .flatten() .collect() } @@ -470,8 +469,8 @@ async fn insert_test_entities( ( "Musician", vec![ - entity! { is => id: "m1", name: "John", mainBand: "b1", bands: vec!["b1", "b2"], favoriteCount: 10, birthDate: timestamp.clone(), vid: 0i64 }, - entity! { is => id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"], favoriteCount: 100, birthDate: timestamp.clone(), vid: 1i64 }, + entity! { is => id: "m1", name: "John", mainBand: "b1", bands: vec!["b1", "b2"], favoriteCount: 10, birthDate: timestamp, vid: 0i64 }, + entity! { is => id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"], favoriteCount: 100, birthDate: timestamp, vid: 1i64 }, ], ), ("Publisher", vec![entity! { is => id: pub1, vid: 0i64 }]), @@ -580,9 +579,9 @@ async fn insert_test_entities( let entities1 = vec![( "Musician", vec![ - entity! { is => id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"], favoriteCount: 5, birthDate: timestamp.clone(), vid: 2i64 }, - entity! { is => id: "m4", name: "Valerie", bands: Vec::::new(), favoriteCount: 20, birthDate: timestamp.clone(), vid: 3i64 }, - entity! { is => id: "m5", name: "Paul", mainBand: "b2", bands: vec!["b2"], favoriteCount: 2 , birthDate: timestamp.clone(), vid: 4i64 }, + entity! { is => id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"], favoriteCount: 5, birthDate: timestamp, vid: 2i64 }, + entity! { is => id: "m4", name: "Valerie", bands: Vec::::new(), favoriteCount: 20, birthDate: timestamp, vid: 3i64 }, + entity! { is => id: "m5", name: "Paul", mainBand: "b2", bands: vec!["b2"], favoriteCount: 2 , birthDate: timestamp, vid: 4i64 }, ], )]; let entities1 = insert_ops(&manifest.schema, entities1); @@ -2121,7 +2120,7 @@ fn ignores_invalid_field_arguments() { }, // With validations Err(e) => { - match e.get(0).unwrap() { + match e.first().unwrap() { QueryError::ExecutionError(QueryExecutionError::ValidationError( _pos, message, @@ -2156,7 +2155,7 @@ fn leaf_selection_mismatch() { } // With validations Err(e) => { - match e.get(0).unwrap() { + match e.first().unwrap() { QueryError::ExecutionError(QueryExecutionError::ValidationError( _pos, message, @@ -2192,7 +2191,7 @@ fn leaf_selection_mismatch() { } // With validations Err(e) => { - match e.get(0).unwrap() { + match e.first().unwrap() { QueryError::ExecutionError(QueryExecutionError::ValidationError( _pos, message, @@ -2231,7 +2230,7 @@ fn missing_variable() { assert_eq!(exp, *data); } // With GraphQL validations active, this query fails - Err(e) => match e.get(0).unwrap() { + Err(e) => match e.first().unwrap() { QueryError::ExecutionError(QueryExecutionError::ValidationError(_pos, message)) => { assert_eq!(message, "Variable \"$first\" is not defined."); } @@ -2262,7 +2261,7 @@ fn missing_variable() { assert_eq!(exp, *data); } // With GraphQL validations active, this query fails - Err(e) => match e.get(0).unwrap() { + Err(e) => match e.first().unwrap() { QueryError::ExecutionError(QueryExecutionError::ValidationError(_pos, message)) => { assert_eq!(message, "Variable \"$where\" is not defined."); } diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 6527f12756b..dc7c759ef64 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -80,7 +80,7 @@ lazy_static! { static ref TEST_SUBGRAPH_SCHEMA: InputSchema = InputSchema::parse_latest(USER_GQL, TEST_SUBGRAPH_ID.clone()) .expect("Failed to parse user schema"); - static ref BLOCKS: Vec = vec![ + static ref BLOCKS: Vec = [ "bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f", "8511fa04b64657581e3f00e14543c1d522d5d7e771b54aa3060b662ade47da13", "b98fb783b49de5652097a989414c767824dff7e7fd765a63b493772511db81c1", diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 483be514504..dcd4e770014 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -182,7 +182,7 @@ lazy_static! { InputSchema::parse_latest(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()) .expect("failed to parse schema"); static ref NAMESPACE: Namespace = Namespace::new("sgd0815".to_string()).unwrap(); - static ref LARGE_INT: BigInt = BigInt::from(std::i64::MAX).pow(17).unwrap(); + static ref LARGE_INT: BigInt = BigInt::from(i64::MAX).pow(17).unwrap(); static ref LARGE_DECIMAL: BigDecimal = BigDecimal::from(1) / BigDecimal::new(LARGE_INT.clone(), 1); static ref BYTES_VALUE: H256 = H256::from(hex!( @@ -200,8 +200,8 @@ lazy_static! { entity! { THINGS_SCHEMA => id: "one", bool: true, - int: std::i32::MAX, - int8: std::i64::MAX, + int: i32::MAX, + int8: i64::MAX, timestamp: Value::Timestamp(Timestamp::from_microseconds_since_epoch(1710837304040956).expect("failed to create timestamp")), bigDecimal: decimal.clone(), bigDecimalArray: vec![decimal.clone(), (decimal + 1.into())], @@ -269,7 +269,7 @@ async fn insert_entity_at( "Failed to insert entities {}[{:?}]", entity_type, entities_with_keys ); - let group = row_group_insert(&entity_type, block, entities_with_keys_owned.clone()); + let group = row_group_insert(entity_type, block, entities_with_keys_owned.clone()); layout .insert(&LOGGER, conn, &group, &MOCK_STOPWATCH) .await @@ -312,7 +312,7 @@ async fn update_entity_at( "Failed to insert entities {}[{:?}]", entity_type, entities_with_keys ); - let group = row_group_update(&entity_type, block, entities_with_keys_owned.clone()); + let group = row_group_update(entity_type, block, entities_with_keys_owned.clone()); let updated = layout .update(conn, &group, &MOCK_STOPWATCH) .await @@ -394,7 +394,7 @@ async fn insert_users(conn: &mut AsyncPgConnection, layout: &Layout) { conn, layout, "1", - &*USER_TYPE, + &USER_TYPE, "Johnton", "tonofjohn@email.com", 67_i32, @@ -411,7 +411,7 @@ async fn insert_users(conn: &mut AsyncPgConnection, layout: &Layout) { conn, layout, "2", - &*USER_TYPE, + &USER_TYPE, "Cindini", "dinici@email.com", 43_i32, @@ -428,7 +428,7 @@ async fn insert_users(conn: &mut AsyncPgConnection, layout: &Layout) { conn, layout, "3", - &*USER_TYPE, + &USER_TYPE, "Shaqueeena", "teeko@email.com", 28_i32, @@ -493,8 +493,8 @@ async fn insert_pet( } async fn insert_pets(conn: &mut AsyncPgConnection, layout: &Layout) { - insert_pet(conn, layout, &*DOG_TYPE, "pluto", "Pluto", 0, 0).await; - insert_pet(conn, layout, &*CAT_TYPE, "garfield", "Garfield", 0, 1).await; + insert_pet(conn, layout, &DOG_TYPE, "pluto", "Pluto", 0, 0).await; + insert_pet(conn, layout, &CAT_TYPE, "garfield", "Garfield", 0, 1).await; } async fn create_schema(conn: &mut AsyncPgConnection) -> Layout { @@ -571,7 +571,7 @@ where #[graph::test] async fn find() { run_test(async |conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; // Happy path: find existing entity let entity = layout @@ -605,7 +605,7 @@ async fn insert_null_fulltext_fields() { insert_entity( conn, layout, - &*NULLABLE_STRINGS_TYPE, + &NULLABLE_STRINGS_TYPE, vec![EMPTY_NULLABLESTRINGS_ENTITY.clone()], ) .await; @@ -628,7 +628,7 @@ async fn insert_null_fulltext_fields() { #[graph::test] async fn update() { run_test(async |conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; // Update with overwrite let mut entity = SCALAR_ENTITY.clone(); @@ -673,7 +673,7 @@ async fn update_many() { insert_entity( conn, layout, - &*SCALAR_TYPE, + &SCALAR_TYPE, vec![one.clone(), two.clone(), three.clone()], ) .await; @@ -762,7 +762,7 @@ async fn update_many() { #[graph::test] async fn serialize_bigdecimal() { run_test(async |conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; // Update with overwrite let mut entity = SCALAR_ENTITY.clone(); @@ -857,11 +857,11 @@ async fn count_scalar_entities(conn: &mut AsyncPgConnection, layout: &Layout) -> #[graph::test] async fn delete() { run_test(async |conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; let mut two = SCALAR_ENTITY.clone(); two.set("id", "two").unwrap(); two.set("vid", 1i64).unwrap(); - insert_entity(conn, layout, &*SCALAR_TYPE, vec![two]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![two]).await; // Delete where nothing is getting deleted let key = SCALAR_TYPE.parse_key("no such entity").unwrap(); @@ -902,7 +902,7 @@ async fn insert_many_and_delete_many() { let mut three = SCALAR_ENTITY.clone(); three.set("id", "three").unwrap(); three.set("vid", 2i64).unwrap(); - insert_entity(conn, layout, &*SCALAR_TYPE, vec![one, two, three]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![one, two, three]).await; // confidence test: there should be 3 scalar entities in store right now assert_eq!(3, count_scalar_entities(conn, layout).await); @@ -912,7 +912,7 @@ async fn insert_many_and_delete_many() { .into_iter() .map(|key| SCALAR_TYPE.parse_key(key).unwrap()) .collect(); - let group = row_group_delete(&*SCALAR_TYPE, 1, entity_keys); + let group = row_group_delete(&SCALAR_TYPE, 1, entity_keys); let num_removed = layout .delete(conn, &group, &MOCK_STOPWATCH) .await @@ -945,7 +945,7 @@ async fn layout_cache() { .await .expect("we can get the layout"); let table = layout.table(&table_name).unwrap(); - assert_eq!(false, table.is_account_like); + assert!(!table.is_account_like); set_account_like(conn, site.as_ref(), &table_name, true) .await @@ -958,7 +958,7 @@ async fn layout_cache() { .await .expect("we can get the layout"); let table = layout.table(&table_name).unwrap(); - assert_eq!(true, table.is_account_like); + assert!(table.is_account_like); // Set it back to false set_account_like(conn, site.as_ref(), &table_name, false) @@ -971,7 +971,7 @@ async fn layout_cache() { .await .expect("we can get the layout"); let table = layout.table(&table_name).unwrap(); - assert_eq!(false, table.is_account_like); + assert!(!table.is_account_like); }) .await; } @@ -1030,12 +1030,12 @@ async fn conflicting_entity() { assert_eq!(None, conflict); } - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { let id = Value::String("fred".to_string()); - check(&mut conn, layout, id, "Cat", "Dog", "Ferret", 0).await; + check(conn, layout, id, "Cat", "Dog", "Ferret", 0).await; let id = Value::Bytes(scalar::Bytes::from_str("0xf1ed").unwrap()); - check(&mut conn, layout, id, "ByteCat", "ByteDog", "ByteFerret", 1).await; + check(conn, layout, id, "ByteCat", "ByteDog", "ByteFerret", 1).await; }) .await } @@ -1052,9 +1052,9 @@ async fn revert_block() { vid: block as i64, }; if block == 0 { - insert_entity_at(conn, layout, &*CAT_TYPE, vec![fred], block).await; + insert_entity_at(conn, layout, &CAT_TYPE, vec![fred], block).await; } else { - update_entity_at(conn, layout, &*CAT_TYPE, vec![fred], block).await; + update_entity_at(conn, layout, &CAT_TYPE, vec![fred], block).await; } }; @@ -1092,7 +1092,7 @@ async fn revert_block() { order: block, vid: (block + 10) as i64 }; - insert_entity_at(conn, layout, &*MINK_TYPE, vec![marty], block).await; + insert_entity_at(conn, layout, &MINK_TYPE, vec![marty], block).await; } }; @@ -1164,7 +1164,7 @@ impl<'a> QueryChecker<'a> { conn, layout, "1", - &*USER_TYPE, + &USER_TYPE, "Jono", "achangedemail@email.com", 67_i32, @@ -1221,7 +1221,7 @@ fn query(entity_types: &[&EntityType]) -> EntityQuery { BLOCK_NUMBER_MAX, EntityCollection::All( entity_types - .into_iter() + .iter() .map(|entity_type| ((*entity_type).clone(), AttributeNames::All)) .collect(), ), @@ -1229,7 +1229,7 @@ fn query(entity_types: &[&EntityType]) -> EntityQuery { } fn user_query() -> EntityQuery { - query(&vec![&*USER_TYPE]) + query(&[&*USER_TYPE]) } trait EasyOrder { @@ -1259,8 +1259,8 @@ impl EasyOrder for EntityQuery { expected = "layout.query failed to execute query: FulltextQueryInvalidSyntax(\"syntax error in tsquery: \\\"Jono 'a\\\"\")" )] async fn check_fulltext_search_syntax_error() { - run_test(async |mut conn, layout| { - QueryChecker::new(&mut conn, layout) + run_test(async |conn, layout| { + QueryChecker::new(conn, layout) .await .check( vec!["1"], @@ -1276,14 +1276,14 @@ async fn check_fulltext_search_syntax_error() { #[graph::test] async fn check_block_finds() { - run_test(async |mut conn, layout| { - let checker = QueryChecker::new(&mut conn, layout).await; + run_test(async |conn, layout| { + let checker = QueryChecker::new(conn, layout).await; update_user_entity( checker.conn, layout, "1", - &*USER_TYPE, + &USER_TYPE, "Johnton", "tonofjohn@email.com", 67_i32, @@ -1322,10 +1322,10 @@ async fn check_block_finds() { #[graph::test] async fn check_find() { - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { // find with interfaces let types = vec![&*CAT_TYPE, &*DOG_TYPE]; - let checker = QueryChecker::new(&mut conn, layout) + let checker = QueryChecker::new(conn, layout) .await .check(vec!["garfield", "pluto"], query(&types)) .await @@ -1920,10 +1920,10 @@ struct FilterChecker<'a> { impl<'a> FilterChecker<'a> { async fn new(conn: &'a mut AsyncPgConnection, layout: &'a Layout) -> Self { let (a1, a2, a2b, a3) = ferrets(); - insert_pet(conn, layout, &*FERRET_TYPE, "a1", &a1, 0, 0).await; - insert_pet(conn, layout, &*FERRET_TYPE, "a2", &a2, 0, 1).await; - insert_pet(conn, layout, &*FERRET_TYPE, "a2b", &a2b, 0, 2).await; - insert_pet(conn, layout, &*FERRET_TYPE, "a3", &a3, 0, 3).await; + insert_pet(conn, layout, &FERRET_TYPE, "a1", &a1, 0, 0).await; + insert_pet(conn, layout, &FERRET_TYPE, "a2", &a2, 0, 1).await; + insert_pet(conn, layout, &FERRET_TYPE, "a2b", &a2b, 0, 2).await; + insert_pet(conn, layout, &FERRET_TYPE, "a3", &a3, 0, 3).await; Self { conn, layout } } @@ -1936,7 +1936,7 @@ impl<'a> FilterChecker<'a> { let expected_entity_ids: Vec = expected_entity_ids.into_iter().map(str::to_owned).collect(); - let query = query(&vec![&*FERRET_TYPE]).filter(filter).asc("id"); + let query = query(&[&*FERRET_TYPE]).filter(filter).asc("id"); let entities = self .layout @@ -2105,7 +2105,7 @@ async fn check_filters() { update_entity_at( checker.conn, layout, - &*FERRET_TYPE, + &FERRET_TYPE, vec![entity! { layout.input_schema => id: "a1", name: "Test", diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index c42bdc2eef4..7eab03c5df5 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -43,7 +43,7 @@ lazy_static! { static ref THINGS_SCHEMA: InputSchema = InputSchema::parse_latest(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()) .expect("Failed to parse THINGS_GQL"); - static ref LARGE_INT: BigInt = BigInt::from(std::i64::MAX).pow(17).unwrap(); + static ref LARGE_INT: BigInt = BigInt::from(i64::MAX).pow(17).unwrap(); static ref LARGE_DECIMAL: BigDecimal = BigDecimal::from(1) / BigDecimal::new(LARGE_INT.clone(), 1); static ref BYTES_VALUE: H256 = H256::from(hex!( @@ -270,7 +270,7 @@ async fn bad_id() { #[graph::test] async fn find() { - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { async fn find_entity( conn: &mut AsyncPgConnection, layout: &Layout, @@ -280,12 +280,12 @@ async fn find() { layout .find(conn, &key, BLOCK_NUMBER_MAX) .await - .expect(&format!("Failed to read Thing[{}]", id)) + .unwrap_or_else(|_| panic!("Failed to read Thing[{}]", id)) } const ID: &str = "deadbeef"; const NAME: &str = "Beef"; - insert_thing(&mut conn, layout, ID, NAME, 0).await; + insert_thing(conn, layout, ID, NAME, 0).await; // Happy path: find existing entity let entity = find_entity(conn, layout, ID).await.unwrap(); @@ -301,13 +301,13 @@ async fn find() { #[graph::test] async fn find_many() { - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { const ID: &str = "0xdeadbeef"; const NAME: &str = "Beef"; const ID2: &str = "0xdeadbeef02"; const NAME2: &str = "Moo"; - insert_thing(&mut conn, layout, ID, NAME, 0).await; - insert_thing(&mut conn, layout, ID2, NAME2, 1).await; + insert_thing(conn, layout, ID, NAME, 0).await; + insert_thing(conn, layout, ID2, NAME2, 1).await; let mut id_map = BTreeMap::default(); let ids = IdList::try_from_iter( @@ -335,8 +335,8 @@ async fn find_many() { #[graph::test] async fn update() { - run_test(async |mut conn, layout| { - insert_entity(&mut conn, layout, "Thing", BEEF_ENTITY.clone()).await; + run_test(async |conn, layout| { + insert_entity(conn, layout, "Thing", BEEF_ENTITY.clone()).await; // Update the entity let mut entity = BEEF_ENTITY.clone(); @@ -366,14 +366,14 @@ async fn update() { #[graph::test] async fn delete() { - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { const TWO_ID: &str = "deadbeef02"; - insert_entity(&mut conn, layout, "Thing", BEEF_ENTITY.clone()).await; + insert_entity(conn, layout, "Thing", BEEF_ENTITY.clone()).await; let mut two = BEEF_ENTITY.clone(); two.set("id", TWO_ID).unwrap(); two.set("vid", 1i64).unwrap(); - insert_entity(&mut conn, layout, "Thing", two).await; + insert_entity(conn, layout, "Thing", two).await; // Delete where nothing is getting deleted let key = THING_TYPE.parse_key("ffff").unwrap(); @@ -381,7 +381,7 @@ async fn delete() { let mut entity_keys = vec![key.clone()]; let group = row_group_delete(&entity_type, 1, entity_keys.clone()); let count = layout - .delete(&mut conn, &group, &MOCK_STOPWATCH) + .delete(conn, &group, &MOCK_STOPWATCH) .await .expect("Failed to delete"); assert_eq!(0, count); @@ -393,7 +393,7 @@ async fn delete() { .expect("Failed to update entity types"); let group = row_group_delete(&entity_type, 1, entity_keys); let count = layout - .delete(&mut conn, &group, &MOCK_STOPWATCH) + .delete(conn, &group, &MOCK_STOPWATCH) .await .expect("Failed to delete"); assert_eq!(1, count); @@ -483,21 +483,21 @@ async fn query() { .collect::>() } - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { // This test exercises the different types of queries we generate; // the type of query is based on knowledge of what the test data // looks like, not on just an inference from the GraphQL model. // Especially the multiplicity for type A and B queries is determined // by knowing whether there are one or many entities per parent // in the test data - make_thing_tree(&mut conn, layout).await; + make_thing_tree(conn, layout).await; // See https://graphprotocol.github.io/rfcs/engineering-plans/0001-graphql-query-prefetching.html#handling-parentchild-relationships // for a discussion of the various types of relationships and queries // EntityCollection::All let coll = EntityCollection::All(vec![(THING_TYPE.clone(), AttributeNames::All)]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2, ROOT, GRANDCHILD1, GRANDCHILD2], things); // EntityCollection::Window, type A, many @@ -511,7 +511,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![ROOT], things); // EntityCollection::Window, type A, single @@ -527,7 +527,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2], things); // EntityCollection::Window, type B, many @@ -541,7 +541,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2], things); // EntityCollection::Window, type B, single @@ -555,7 +555,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![GRANDCHILD1, GRANDCHILD2], things); // EntityCollection::Window, type C @@ -570,7 +570,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2], things); // EntityCollection::Window, type D @@ -585,7 +585,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![ROOT, ROOT], things); }) .await; diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 60fb746fbe8..0059032e3ba 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -187,7 +187,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator let test_entity_1 = create_test_entity( "1", - &*USER_TYPE, + &USER_TYPE, "Johnton", "tonofjohn@email.com", 67_i32, @@ -207,7 +207,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator let test_entity_2 = create_test_entity( "2", - &*USER_TYPE, + &USER_TYPE, "Cindini", "dinici@email.com", 43_i32, @@ -218,7 +218,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator ); let test_entity_3_1 = create_test_entity( "3", - &*USER_TYPE, + &USER_TYPE, "Shaqueeena", "queensha@email.com", 28_i32, @@ -238,7 +238,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator let test_entity_3_2 = create_test_entity( "3", - &*USER_TYPE, + &USER_TYPE, "Shaqueeena", "teeko@email.com", 28_i32, @@ -389,7 +389,7 @@ fn insert_entity() { let entity_key = USER_TYPE.parse_key("7").unwrap(); let test_entity = create_test_entity( "7", - &*USER_TYPE, + &USER_TYPE, "Wanjon", "wanawana@email.com", 76_i32, @@ -424,7 +424,7 @@ fn update_existing() { let op = create_test_entity( "1", - &*USER_TYPE, + &USER_TYPE, "Wanjon", "wanawana@email.com", 76_i32, @@ -1318,7 +1318,7 @@ fn handle_large_string_with_index() { writable .transact_block_operations( TEST_BLOCK_3_PTR.clone(), - BlockTime::for_test(&*TEST_BLOCK_3_PTR), + BlockTime::for_test(&TEST_BLOCK_3_PTR), FirehoseCursor::None, vec![ make_insert_op(ONE, &long_text, &schema, block, 11), @@ -1426,7 +1426,7 @@ fn handle_large_bytea_with_index() { writable .transact_block_operations( TEST_BLOCK_3_PTR.clone(), - BlockTime::for_test(&*TEST_BLOCK_3_PTR), + BlockTime::for_test(&TEST_BLOCK_3_PTR), FirehoseCursor::None, vec![ make_insert_op(ONE, &long_bytea, &schema, block, 10), @@ -1613,11 +1613,11 @@ fn window() { } fn make_user(id: &str, color: &str, age: i32, vid: i64) -> EntityOperation { - make_color_and_age(&*USER_TYPE, id, color, age, vid) + make_color_and_age(&USER_TYPE, id, color, age, vid) } fn make_person(id: &str, color: &str, age: i32, vid: i64) -> EntityOperation { - make_color_and_age(&*PERSON_TYPE, id, color, age, vid) + make_color_and_age(&PERSON_TYPE, id, color, age, vid) } let ops = vec![ @@ -1873,7 +1873,7 @@ fn parse_null_timestamp() { .expect("block_number to return correct number and timestamp") .unwrap(); assert_eq!(number, 3); - assert_eq!(true, timestamp.is_none()); + assert!(timestamp.is_none()); }) } #[test] @@ -1887,7 +1887,7 @@ fn reorg_tracking() { ) { let test_entity_1 = create_test_entity( "1", - &*USER_TYPE, + &USER_TYPE, "Johnton", "tonofjohn@email.com", age, diff --git a/store/test-store/tests/postgres/subgraph.rs b/store/test-store/tests/postgres/subgraph.rs index 5cd31c93e44..23b60ecc52c 100644 --- a/store/test-store/tests/postgres/subgraph.rs +++ b/store/test-store/tests/postgres/subgraph.rs @@ -523,8 +523,8 @@ fn version_info() { let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.synced); - assert_eq!(false, vi.failed); + assert!(!vi.synced); + assert!(!vi.failed); assert_eq!( Some("manifest for versionInfoSubgraph"), vi.description.as_deref() @@ -580,9 +580,9 @@ fn subgraph_features() { assert_eq!(handler_kinds.len(), 2); assert!(handler_kinds.contains(&"mock_handler_1".to_string())); assert!(handler_kinds.contains(&"mock_handler_2".to_string())); - assert_eq!(has_declared_calls, true); - assert_eq!(has_bytes_as_ids, true); - assert_eq!(has_aggregations, true); + assert!(has_declared_calls); + assert!(has_bytes_as_ids); + assert!(has_aggregations); assert_eq!( immutable_entities, vec!["User2".to_string(), "Data".to_string()] @@ -805,7 +805,7 @@ fn fail_unfail_deterministic_error() { assert!(!state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); // Process the second block. @@ -823,7 +823,7 @@ fn fail_unfail_deterministic_error() { assert!(!state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -848,7 +848,7 @@ fn fail_unfail_deterministic_error() { assert!(state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); // Unfail the subgraph. @@ -863,7 +863,7 @@ fn fail_unfail_deterministic_error() { assert!(!state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); test_store::remove_subgraphs().await; @@ -902,7 +902,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); // Process the second block. @@ -919,7 +919,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let writable = store @@ -939,7 +939,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -957,7 +957,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); // Running unfail_deterministic_error against a NON-deterministic error will do nothing. @@ -972,7 +972,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -999,7 +999,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 2); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); test_store::remove_subgraphs().await; @@ -1038,7 +1038,7 @@ fn fail_unfail_non_deterministic_error() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -1062,7 +1062,7 @@ fn fail_unfail_non_deterministic_error() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); // Process the second block. @@ -1079,7 +1079,7 @@ fn fail_unfail_non_deterministic_error() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); // Unfail the subgraph and delete the fatal error. @@ -1093,7 +1093,7 @@ fn fail_unfail_non_deterministic_error() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); test_store::remove_subgraphs().await; @@ -1132,7 +1132,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); // Process the second block. @@ -1149,7 +1149,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let writable = store @@ -1169,7 +1169,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -1187,7 +1187,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); // Running unfail_non_deterministic_error will be NOOP, the error is deterministic. @@ -1201,7 +1201,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -1226,7 +1226,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 2); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); test_store::remove_subgraphs().await; diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index da828e8784f..93fd28c6d8f 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -370,7 +370,7 @@ fn restart() { #[test] fn read_range_test() { run_test(|store, writable, sourceable, deployment| async move { - let result_entities = vec![ + let result_entities = [ r#"(1, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }])"#, r#"(2, [EntitySourceOperation { entity_op: Modify, entity_type: EntityType(Counter), entity: Entity { count: Int(4), id: String("1"), vid: Int8(2) }, vid: 2 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(4), id: String("2"), vid: Int8(2) }, vid: 2 }])"#, r#"(3, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(4), id: String("1"), vid: Int8(2) }, vid: 2 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(6), id: String("3"), vid: Int8(3) }, vid: 3 }])"#, @@ -442,10 +442,8 @@ fn read_immutable_only_range_test() { #[test] fn read_range_pool_created_test() { run_test(|store, writable, sourceable, deployment| async move { - let result_entities = vec![ - format!("(1, [EntitySourceOperation {{ entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity {{ blockNumber: BigInt(12369621), blockTimestamp: BigInt(1620243254), fee: Int(500), id: Bytes(0xff80818283848586), logIndex: BigInt(0), pool: Bytes(0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8), tickSpacing: Int(10), token0: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000000), vid: Int8(1) }}, vid: 1 }}])"), - format!("(2, [EntitySourceOperation {{ entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity {{ blockNumber: BigInt(12369622), blockTimestamp: BigInt(1620243255), fee: Int(3000), id: Bytes(0xff90919293949596), logIndex: BigInt(1), pool: Bytes(0x4585fe77225b41b697c938b018e2ac67ac5a20c0), tickSpacing: Int(60), token0: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000001), vid: Int8(2) }}, vid: 2 }}])"), - ]; + let result_entities = ["(1, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity { blockNumber: BigInt(12369621), blockTimestamp: BigInt(1620243254), fee: Int(500), id: Bytes(0xff80818283848586), logIndex: BigInt(0), pool: Bytes(0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8), tickSpacing: Int(10), token0: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000000), vid: Int8(1) }, vid: 1 }])", + "(2, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity { blockNumber: BigInt(12369622), blockTimestamp: BigInt(1620243255), fee: Int(3000), id: Bytes(0xff90919293949596), logIndex: BigInt(1), pool: Bytes(0x4585fe77225b41b697c938b018e2ac67ac5a20c0), tickSpacing: Int(60), token0: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000001), vid: Int8(2) }, vid: 2 }])"]; // Rest of the test remains the same let subgraph_store = store.subgraph_store(); @@ -505,7 +503,7 @@ fn read_range_pool_created_test() { assert_eq!(e.len(), 2); for en in &e { let index = *en.0 - 1; - let a = result_entities[index as usize].clone(); + let a = result_entities[index as usize]; assert_eq!(a, format!("{:?}", en)); } diff --git a/substreams/substreams-trigger-filter/src/lib.rs b/substreams/substreams-trigger-filter/src/lib.rs index 01109234fdd..7e2ce7e84b1 100755 --- a/substreams/substreams-trigger-filter/src/lib.rs +++ b/substreams/substreams-trigger-filter/src/lib.rs @@ -45,7 +45,7 @@ fn near_filter(params: String, blk: Block) -> Result TryFrom<&'a str> for NearFilter<'a> { partial_accounts.extend(lines.take(partials_len).map(|line| { let mut parts = line.split(","); let start = match parts.next() { - Some(x) if x.is_empty() => None, + Some("") => None, x => x, }; let end = match parts.next() { - Some(x) if x.is_empty() => None, + Some("") => None, x => x, }; (start, end) diff --git a/tests/Cargo.toml b/tests/Cargo.toml index e61cab6b660..0c8033c7d8c 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -30,3 +30,6 @@ tokio-util.workspace = true [dev-dependencies] anyhow = "1.0.100" tokio-stream = "0.1" + +[lints] +workspace = true diff --git a/tests/runner-tests/data-source-revert/grafted.yaml b/tests/runner-tests/data-source-revert/grafted.yaml index 9992583098b..16dca790850 100644 --- a/tests/runner-tests/data-source-revert/grafted.yaml +++ b/tests/runner-tests/data-source-revert/grafted.yaml @@ -6,7 +6,7 @@ schema: graft: # This can be overwritten by `updateAndDeploy.js`. # Please commit this file when this happens. - base: QmcAL39QSKZvRssr2ToCJrav7XK9ggajxvBR7M1NNUCqdh + base: QmRfaNu6ymQVUiJVTayqWuT8ftLtvn7iE8kQSSAtTPA2J2 block: 3 dataSources: - kind: ethereum/contract diff --git a/tests/src/fixture/ethereum.rs b/tests/src/fixture/ethereum.rs index ddf950bd273..96473c6ad8c 100644 --- a/tests/src/fixture/ethereum.rs +++ b/tests/src/fixture/ethereum.rs @@ -170,9 +170,9 @@ pub fn push_test_subgraph_trigger( source_idx: u32, ) { let entity = EntitySourceOperation { - entity: entity, - entity_type: entity_type, - entity_op: entity_op, + entity, + entity_type, + entity_op, vid, }; diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index a965bab3c23..31ba1680c1b 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -407,6 +407,7 @@ fn test_logger(test_name: &str) -> Logger { graph::log::logger(true).new(o!("test" => test_name.to_string())) } +#[allow(clippy::await_holding_lock)] pub async fn stores(test_name: &str, store_config_path: &str) -> Stores { let _mutex_guard = STORE_MUTEX.lock().unwrap(); diff --git a/tests/src/subgraph.rs b/tests/src/subgraph.rs index dfac2020efe..e1057fccdcb 100644 --- a/tests/src/subgraph.rs +++ b/tests/src/subgraph.rs @@ -124,7 +124,7 @@ impl Subgraph { pub async fn wait_ready(name: &str) -> anyhow::Result { let start = Instant::now(); while start.elapsed() <= CONFIG.timeout { - if let Some(subgraph) = Self::status(&name).await? { + if let Some(subgraph) = Self::status(name).await? { if subgraph.synced || !subgraph.healthy { return Ok(subgraph); } @@ -199,7 +199,7 @@ impl Subgraph { } let data = resp["data"].as_object().unwrap(); let values = keys - .into_iter() + .iter() .map(|key| data[*key].as_array().unwrap().clone()) .collect::>(); diff --git a/tests/tests/integration_tests.rs b/tests/tests/integration_tests.rs index db459972bc3..322eb643533 100644 --- a/tests/tests/integration_tests.rs +++ b/tests/tests/integration_tests.rs @@ -52,10 +52,7 @@ pub struct TestResult { impl TestResult { pub fn success(&self) -> bool { - match self.status { - TestStatus::Ok => true, - _ => false, - } + matches!(self.status, TestStatus::Ok) } fn print_subgraph(&self) { @@ -101,7 +98,7 @@ pub enum SourceSubgraph { } impl SourceSubgraph { - pub fn from_str(s: &str) -> Self { + fn new(s: &str) -> Self { if let Some((alias, subgraph)) = s.split_once(':') { Self::WithAlias((alias.to_string(), subgraph.to_string())) } else { @@ -147,7 +144,7 @@ impl TestCase { T: Future> + Send + 'static, { let mut test_case = Self::new(name, test); - test_case.source_subgraph = Some(vec![SourceSubgraph::from_str(base_subgraph)]); + test_case.source_subgraph = Some(vec![SourceSubgraph::new(base_subgraph)]); test_case } @@ -163,7 +160,7 @@ impl TestCase { test_case.source_subgraph = Some( source_subgraphs .into_iter() - .map(SourceSubgraph::from_str) + .map(SourceSubgraph::new) .collect(), ); test_case @@ -175,7 +172,7 @@ impl TestCase { contracts: &[Contract], ) -> Result { status!(&self.name, "Deploying subgraph"); - let subgraph_name = match Subgraph::deploy(&subgraph_name, contracts).await { + let subgraph_name = match Subgraph::deploy(subgraph_name, contracts).await { Ok(name) => name, Err(e) => { error!(&self.name, "Deploy failed"); @@ -1119,7 +1116,7 @@ async fn test_declared_calls_basic(ctx: TestContext) -> anyhow::Result<()> { assert!(subgraph.healthy); // Query the results - const QUERY: &'static str = "{ + const QUERY: &str = "{ transferCalls(first: 1, orderBy: blockNumber) { id from @@ -1200,7 +1197,7 @@ async fn test_declared_calls_struct_fields(ctx: TestContext) -> anyhow::Result<( sleep(Duration::from_secs(2)).await; // Query the results - const QUERY: &'static str = "{ + const QUERY: &str = "{ assetTransferCalls(first: 1, orderBy: blockNumber) { id assetAddr diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index 6e3b7d80b0c..473ac8860f9 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -38,14 +38,14 @@ fn assert_eq_ignore_backtrace(err: &SubgraphError, expected: &SubgraphError) { || err.handler != expected.handler || err.deterministic != expected.deterministic { - false; - } - - // Ignore any WASM backtrace in the error message - let split_err: Vec<&str> = err.message.split("\\twasm backtrace:").collect(); - let split_expected: Vec<&str> = expected.message.split("\\twasm backtrace:").collect(); + false + } else { + // Ignore any WASM backtrace in the error message + let split_err: Vec<&str> = err.message.split("\\twasm backtrace:").collect(); + let split_expected: Vec<&str> = expected.message.split("\\twasm backtrace:").collect(); - split_err.get(0) == split_expected.get(0) + split_err.first() == split_expected.first() + } }; if !equal { @@ -234,7 +234,7 @@ async fn api_version_0_0_7() { ctx.start_and_sync_to(stop_block).await; let query_res = ctx - .query(&format!(r#"{{ testResults{{ id, message }} }}"#,)) + .query(r#"{ testResults{ id, message } }"#) .await .unwrap(); @@ -318,9 +318,7 @@ async fn derived_loaders() { // Where the test cases are documented in the code. let query_res = ctx - .query(&format!( - r#"{{ testResult(id:"1_0", block: {{ number: 1 }} ){{ id barDerived{{id value value2}} bBarDerived{{id value value2}} }} }}"#, - )) + .query(r#"{ testResult(id:"1_0", block: { number: 1 } ){ id barDerived{id value value2} bBarDerived{id value value2} } }"#) .await .unwrap(); @@ -368,9 +366,7 @@ async fn derived_loaders() { ); let query_res = ctx - .query(&format!( - r#"{{ testResult(id:"1_1", block: {{ number: 1 }} ){{ id barDerived{{id value value2}} bBarDerived{{id value value2}} }} }}"#, - )) + .query(r#"{ testResult(id:"1_1", block: { number: 1 } ){ id barDerived{id value value2} bBarDerived{id value value2} } }"#) .await .unwrap(); @@ -408,9 +404,7 @@ async fn derived_loaders() { ); let query_res = ctx.query( - &format!( - r#"{{ testResult(id:"2_0" ){{ id barDerived{{id value value2}} bBarDerived{{id value value2}} }} }}"# - ) + r#"{ testResult(id:"2_0" ){ id barDerived{id value value2} bBarDerived{id value value2} } }"# ) .await .unwrap(); @@ -494,9 +488,9 @@ async fn end_block() -> anyhow::Result<()> { .collect::>(); if should_contain_addr { - assert!(addresses.contains(&addr)); + assert!(addresses.contains(addr)); } else { - assert!(!addresses.contains(&addr)); + assert!(!addresses.contains(addr)); }; } @@ -1208,14 +1202,14 @@ async fn arweave_file_data_sources() { assert_eq!(datasources.len(), 1); let ds = datasources.first().unwrap(); assert_ne!(ds.causality_region, CausalityRegion::ONCHAIN); - assert_eq!(ds.done_at.is_some(), true); + assert!(ds.done_at.is_some()); assert_eq!( ds.param.as_ref().unwrap(), &Bytes::from(Word::from(id).as_bytes()) ); let content_bytes = ctx.arweave_resolver.get(&Word::from(id)).await.unwrap(); - let content = String::from_utf8(content_bytes.into()).unwrap(); + let content = String::from_utf8(content_bytes).unwrap(); let query_res = ctx .query(&format!(r#"{{ file(id: "{id}") {{ id, content }} }}"#,)) .await