From 2e73d0a1ab4cdb3fbfdace9068a167c0b569ea65 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 11:02:45 -0800 Subject: [PATCH 01/92] all: Run 'cargo clippy --fix' This applies all clippy lints that are marked as MachineApplicable, i.e., safe to apply without changing code semantics --- core/graphman/src/commands/deployment/info.rs | 4 +- .../src/commands/deployment/reassign.rs | 8 +- core/src/subgraph/context/instance/mod.rs | 2 +- core/src/subgraph/error.rs | 16 ++-- core/src/subgraph/inputs.rs | 4 +- core/src/subgraph/instance_manager.rs | 8 +- core/src/subgraph/registrar.rs | 4 +- core/src/subgraph/runner.rs | 29 +++--- gnd/src/main.rs | 2 +- graph/derive/src/lib.rs | 6 +- graph/src/blockchain/block_stream.rs | 4 +- graph/src/blockchain/firehose_block_stream.rs | 12 +-- graph/src/blockchain/mock.rs | 4 +- graph/src/blockchain/mod.rs | 7 +- graph/src/blockchain/types.rs | 2 +- graph/src/components/link_resolver/file.rs | 18 ++-- graph/src/components/link_resolver/ipfs.rs | 2 +- graph/src/components/metrics/block_state.rs | 8 +- graph/src/components/metrics/registry.rs | 2 +- .../chain_identifier_validator.rs | 4 +- .../network_provider/provider_manager.rs | 2 +- graph/src/components/store/entity_cache.rs | 8 +- graph/src/components/store/err.rs | 8 +- graph/src/components/store/mod.rs | 4 +- graph/src/components/store/write.rs | 22 ++--- graph/src/components/subgraph/host.rs | 4 +- graph/src/data/graphql/ext.rs | 2 +- graph/src/data/graphql/load_manager.rs | 6 +- graph/src/data/graphql/object_or_interface.rs | 2 +- graph/src/data/query/cache_status.rs | 9 +- graph/src/data/query/trace.rs | 9 +- graph/src/data/store/id.rs | 4 +- graph/src/data/store/mod.rs | 8 +- graph/src/data/store/scalar/bigint.rs | 2 +- graph/src/data/store/scalar/bytes.rs | 2 +- graph/src/data/store/scalar/timestamp.rs | 2 +- graph/src/data/subgraph/mod.rs | 38 ++++---- graph/src/data/value.rs | 4 +- graph/src/data_source/common.rs | 14 +-- graph/src/data_source/mod.rs | 2 +- graph/src/data_source/offchain.rs | 12 +-- graph/src/data_source/subgraph.rs | 8 +- graph/src/endpoint.rs | 6 +- graph/src/env/mappings.rs | 2 +- graph/src/env/store.rs | 2 +- graph/src/firehose/endpoints.rs | 14 ++- graph/src/ipfs/cache.rs | 4 +- graph/src/ipfs/mod.rs | 2 +- graph/src/ipfs/test_utils.rs | 2 +- graph/src/runtime/asc_ptr.rs | 4 +- graph/src/schema/api.rs | 4 +- graph/src/schema/ast.rs | 8 +- graph/src/schema/input/mod.rs | 42 ++++---- graph/src/schema/input/sqlexpr.rs | 2 +- graph/src/util/intern.rs | 20 ++-- graph/src/util/lfu_cache.rs | 2 +- graph/src/util/ogive.rs | 2 +- graphql/src/execution/ast.rs | 6 +- graphql/src/execution/execution.rs | 6 +- graphql/src/query/ext.rs | 9 +- graphql/src/runner.rs | 5 +- graphql/src/store/query.rs | 2 +- graphql/src/store/resolver.rs | 10 +- graphql/src/values/coercion.rs | 2 +- runtime/wasm/src/asc_abi/class.rs | 36 ++----- runtime/wasm/src/asc_abi/v0_0_4.rs | 6 +- runtime/wasm/src/asc_abi/v0_0_5.rs | 4 +- runtime/wasm/src/host.rs | 2 +- runtime/wasm/src/host_exports.rs | 28 +++--- runtime/wasm/src/mapping.rs | 2 +- runtime/wasm/src/module/context.rs | 6 +- runtime/wasm/src/module/instance.rs | 10 +- runtime/wasm/src/module/mod.rs | 2 +- runtime/wasm/src/to_from/mod.rs | 2 +- .../resolvers/deployment_mutation/reassign.rs | 2 +- server/http/src/service.rs | 6 +- server/index-node/src/resolver.rs | 4 +- store/postgres/src/block_store.rs | 34 +++---- store/postgres/src/catalog.rs | 2 +- store/postgres/src/chain_head_listener.rs | 3 +- store/postgres/src/chain_store.rs | 17 ++-- store/postgres/src/copy.rs | 5 +- store/postgres/src/deployment.rs | 6 +- store/postgres/src/deployment_store.rs | 20 ++-- store/postgres/src/detail.rs | 4 +- store/postgres/src/dynds/shared.rs | 3 +- store/postgres/src/fork.rs | 2 +- store/postgres/src/notification_listener.rs | 4 +- store/postgres/src/pool/coordinator.rs | 6 +- store/postgres/src/pool/foreign_server.rs | 6 +- store/postgres/src/pool/manager.rs | 4 +- store/postgres/src/pool/mod.rs | 48 +++++----- store/postgres/src/primary.rs | 8 +- store/postgres/src/relational.rs | 19 ++-- store/postgres/src/relational/ddl.rs | 9 +- store/postgres/src/relational/dsl.rs | 8 +- store/postgres/src/relational/index.rs | 22 ++--- store/postgres/src/relational/prune.rs | 4 +- store/postgres/src/relational/rollup.rs | 4 +- store/postgres/src/relational/value.rs | 34 +++---- store/postgres/src/relational_queries.rs | 95 ++++++++----------- store/postgres/src/sql/parser.rs | 2 +- store/postgres/src/sql/validation.rs | 6 +- store/postgres/src/store_events.rs | 2 +- store/postgres/src/subgraph_store.rs | 9 +- store/postgres/src/vid_batcher.rs | 2 +- store/postgres/src/writable.rs | 17 ++-- store/test-store/src/store.rs | 4 +- tests/src/fixture/ethereum.rs | 6 +- tests/src/subgraph.rs | 4 +- 110 files changed, 456 insertions(+), 547 deletions(-) diff --git a/core/graphman/src/commands/deployment/info.rs b/core/graphman/src/commands/deployment/info.rs index 7cf0e87c758..55e00e917ca 100644 --- a/core/graphman/src/commands/deployment/info.rs +++ b/core/graphman/src/commands/deployment/info.rs @@ -33,7 +33,7 @@ pub async fn load_deployments( ) -> Result, GraphmanError> { let mut primary_conn = primary_pool.get().await?; - crate::deployment::load_deployments(&mut primary_conn, &deployment, &version).await + crate::deployment::load_deployments(&mut primary_conn, deployment, version).await } pub async fn load_deployment_statuses( @@ -56,7 +56,7 @@ pub async fn load_deployment_statuses( let chain = status .chains - .get(0) + .first() .ok_or_else(|| { GraphmanError::Store(anyhow!( "deployment status has no chains on deployment '{id}'" diff --git a/core/graphman/src/commands/deployment/reassign.rs b/core/graphman/src/commands/deployment/reassign.rs index 5d8e282f306..b1ead37cd12 100644 --- a/core/graphman/src/commands/deployment/reassign.rs +++ b/core/graphman/src/commands/deployment/reassign.rs @@ -100,17 +100,17 @@ pub async fn reassign_deployment( let mut catalog_conn = catalog::Connection::new(primary_conn); let changes: Vec = match &curr_node { Some(curr) => { - if &curr == &node { + if curr == node { vec![] } else { catalog_conn - .reassign_subgraph(&deployment.site, &node) + .reassign_subgraph(&deployment.site, node) .await .map_err(GraphmanError::from)? } } None => catalog_conn - .assign_subgraph(&deployment.site, &node) + .assign_subgraph(&deployment.site, node) .await .map_err(GraphmanError::from)?, }; @@ -129,7 +129,7 @@ pub async fn reassign_deployment( let mirror = catalog::Mirror::primary_only(primary_pool); let count = mirror - .assignments(&node) + .assignments(node) .await .map_err(GraphmanError::from)? .len(); diff --git a/core/src/subgraph/context/instance/mod.rs b/core/src/subgraph/context/instance/mod.rs index 4ad491e786c..e9c311420e6 100644 --- a/core/src/subgraph/context/instance/mod.rs +++ b/core/src/subgraph/context/instance/mod.rs @@ -241,7 +241,7 @@ where .matches_by_address(trigger.address_match()), TriggerData::Offchain(trigger) => self .offchain_hosts - .matches_by_address(trigger.source.address().as_ref().map(|a| a.as_slice())), + .matches_by_address(trigger.source.address().as_deref()), TriggerData::Subgraph(trigger) => self .subgraph_hosts .matches_by_address(Some(trigger.source.to_bytes().as_slice())), diff --git a/core/src/subgraph/error.rs b/core/src/subgraph/error.rs index c50712c08db..502a28dbc66 100644 --- a/core/src/subgraph/error.rs +++ b/core/src/subgraph/error.rs @@ -50,7 +50,7 @@ impl ProcessingError { /// call the method `detail` to avoid ambiguity with anyhow's `context` /// method pub trait DetailHelper { - fn detail(self: Self, ctx: &str) -> Result; + fn detail(self, ctx: &str) -> Result; } impl DetailHelper for Result { @@ -61,12 +61,12 @@ impl DetailHelper for Result { /// Implement this for errors that are always non-deterministic. pub(crate) trait NonDeterministicErrorHelper { - fn non_deterministic(self: Self) -> Result; + fn non_deterministic(self) -> Result; } impl NonDeterministicErrorHelper for Result { fn non_deterministic(self) -> Result { - self.map_err(|e| ProcessingError::Unknown(e)) + self.map_err(ProcessingError::Unknown) } } @@ -79,7 +79,7 @@ impl NonDeterministicErrorHelper for Result { /// Implement this for errors where it depends on the details whether they /// are deterministic or not. pub(crate) trait ClassifyErrorHelper { - fn classify(self: Self) -> Result; + fn classify(self) -> Result; } impl ClassifyErrorHelper for Result { @@ -88,12 +88,10 @@ impl ClassifyErrorHelper for Result { if ENV_VARS.mappings.store_errors_are_nondeterministic { // Old behavior, just in case the new behavior causes issues ProcessingError::Unknown(Error::from(e)) + } else if e.is_deterministic() { + ProcessingError::Deterministic(Box::new(e)) } else { - if e.is_deterministic() { - ProcessingError::Deterministic(Box::new(e)) - } else { - ProcessingError::Unknown(Error::from(e)) - } + ProcessingError::Unknown(Error::from(e)) } }) } diff --git a/core/src/subgraph/inputs.rs b/core/src/subgraph/inputs.rs index 91bbdd131f4..88e89de4ff4 100644 --- a/core/src/subgraph/inputs.rs +++ b/core/src/subgraph/inputs.rs @@ -61,8 +61,8 @@ impl IndexingInputs { start_blocks: start_blocks.clone(), end_blocks: end_blocks.clone(), source_subgraph_stores: source_subgraph_stores.clone(), - stop_block: stop_block.clone(), - max_end_block: max_end_block.clone(), + stop_block: *stop_block, + max_end_block: *max_end_block, store, debug_fork: debug_fork.clone(), triggers_adapter: triggers_adapter.clone(), diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index 7c8c0799660..5d0c89ae171 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -215,7 +215,7 @@ impl SubgraphInstanceManager { .await? .ok_or_else(|| anyhow!("no active deployment for hash {}", hash))?; - let sourceable_store = subgraph_store.clone().sourceable(loc.id.clone()).await?; + let sourceable_store = subgraph_store.clone().sourceable(loc.id).await?; sourceable_stores.push(sourceable_store); } @@ -390,11 +390,7 @@ impl SubgraphInstanceManager { let end_blocks: BTreeSet = manifest .data_sources .iter() - .filter_map(|d| { - d.as_onchain() - .map(|d: &C::DataSource| d.end_block()) - .flatten() - }) + .filter_map(|d| d.as_onchain().and_then(|d: &C::DataSource| d.end_block())) .collect(); // We can set `max_end_block` to the maximum of `end_blocks` and stop the subgraph diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index f34df50ad88..a60f2b01b03 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -150,7 +150,7 @@ where let logger = self.logger.new(o!("subgraph_id" => deployment.hash.to_string(), "node_id" => self.node_id.to_string())); if let Some((assigned, is_paused)) = assigned { - if &assigned == &self.node_id { + if assigned == self.node_id { if is_paused { // Subgraph is paused, so we don't start it debug!(logger, "Deployment assignee is this node"; "assigned_to" => assigned, "paused" => is_paused, "action" => "ignore"); @@ -413,7 +413,7 @@ async fn resolve_start_block( 0 => Ok(None), min_start_block => Retry::spawn(retry_strategy(Some(2), RETRY_DEFAULT_LIMIT), move || { chain - .block_pointer_from_number(&logger, min_start_block - 1) + .block_pointer_from_number(logger, min_start_block - 1) .inspect_err(move |e| warn!(&logger, "Failed to get block number: {}", e)) }) .await diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 7e3ad0fd353..81db925a092 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -144,7 +144,7 @@ where // Filter out data sources that have reached their end block let end_block_filter = |ds: &&C::DataSource| match current_ptr.as_ref() { // We filter out datasources for which the current block is at or past their end block. - Some(block) => ds.end_block().map_or(true, |end| block.number < end), + Some(block) => ds.end_block().is_none_or(|end| block.number < end), // If there is no current block, we keep all datasources. None => true, }; @@ -308,11 +308,10 @@ where // This will require some code refactor in how the BlockStream is created let block_start = Instant::now(); - let action = self.handle_stream_event(event).await.map(|res| { + let action = self.handle_stream_event(event).await.inspect(|res| { self.metrics .subgraph .observe_block_processed(block_start.elapsed(), res.block_finished()); - res })?; self.update_deployment_synced_metric(); @@ -513,7 +512,7 @@ where .observe(elapsed); block_state_metrics - .flush_metrics_to_store(&logger, block_ptr, self.inputs.deployment.id) + .flush_metrics_to_store(logger, block_ptr, self.inputs.deployment.id) .non_deterministic()?; if has_errors { @@ -537,7 +536,7 @@ where // Use `Canceled` to avoiding setting the subgraph health to failed, an error was // just transacted so it will be already be set to unhealthy. - Err(ProcessingError::Canceled.into()) + Err(ProcessingError::Canceled) } else { Ok(()) } @@ -561,8 +560,8 @@ where self.ctx .decoder .match_and_decode_many( - &logger, - &block, + logger, + block, triggers, hosts_filter, &self.metrics.subgraph, @@ -996,11 +995,11 @@ where } } - return Ok(action); + Ok(action) } Err(ProcessingError::Canceled) => { debug!(self.logger, "Subgraph block stream shut down cleanly"); - return Ok(Action::Stop); + Ok(Action::Stop) } // Handle unexpected stream errors by marking the subgraph as failed. @@ -1037,7 +1036,7 @@ where .await .context("Failed to set subgraph status to `failed`")?; - return Err(err); + Err(err) } false => { // Shouldn't fail subgraph if it's already failed for non-deterministic @@ -1072,7 +1071,7 @@ where self.state.should_try_unfail_non_deterministic = true; // And restart the subgraph. - return Ok(Action::Restart); + Ok(Action::Restart) } } } @@ -1114,12 +1113,12 @@ where // it so that we are up to date when checking if synced. let cached_head_ptr = self.state.cached_head_ptr.cheap_clone(); if cached_head_ptr.is_none() - || close_to_chain_head(&block_ptr, &cached_head_ptr, CAUGHT_UP_DISTANCE) + || close_to_chain_head(block_ptr, &cached_head_ptr, CAUGHT_UP_DISTANCE) { self.state.cached_head_ptr = self.inputs.chain.chain_head_ptr().await?; } let is_caught_up = - close_to_chain_head(&block_ptr, &self.state.cached_head_ptr, CAUGHT_UP_DISTANCE); + close_to_chain_head(block_ptr, &self.state.cached_head_ptr, CAUGHT_UP_DISTANCE); if is_caught_up { // Stop recording time-to-sync metrics. self.metrics.stream.stopwatch.disable(); @@ -1242,7 +1241,7 @@ where // This propagates any deterministic error as a non-deterministic one. Which might make // sense considering offchain data sources are non-deterministic. if let Some(err) = block_state.deterministic_errors.into_iter().next() { - return Err(anyhow!("{}", err.to_string())); + return Err(anyhow!("{}", err)); } mods.extend( @@ -1469,7 +1468,7 @@ async fn update_proof_of_indexing( (digest_name, Value::from(digest)), ]; if entity_cache.schema.has_aggregations() { - let block_time = Value::Int8(block_time.as_secs_since_epoch() as i64); + let block_time = Value::Int8(block_time.as_secs_since_epoch()); data.push((entity_cache.schema.poi_block_time(), block_time)); } let poi = entity_cache.make_entity(data)?; diff --git a/gnd/src/main.rs b/gnd/src/main.rs index 3e5936824cd..fc79c707310 100644 --- a/gnd/src/main.rs +++ b/gnd/src/main.rs @@ -166,7 +166,7 @@ async fn run_graph_node( let (prometheus_registry, metrics_registry) = launcher::setup_metrics(logger); - let ipfs_client = graph::ipfs::new_ipfs_client(&opt.ipfs, &metrics_registry, &logger) + let ipfs_client = graph::ipfs::new_ipfs_client(&opt.ipfs, &metrics_registry, logger) .await .unwrap_or_else(|err| panic!("Failed to create IPFS client: {err:#}")); diff --git a/graph/derive/src/lib.rs b/graph/derive/src/lib.rs index ce13fc9faf9..be7e5364032 100644 --- a/graph/derive/src/lib.rs +++ b/graph/derive/src/lib.rs @@ -33,7 +33,7 @@ fn impl_cheap_clone(input: TokenStream2) -> TokenStream2 { fn cheap_clone_body(data: Data) -> TokenStream2 { match data { Data::Struct(st) => match &st.fields { - Fields::Unit => return quote! { Self }, + Fields::Unit => quote! { Self }, Fields::Unnamed(fields) => { let mut field_clones = Vec::new(); for (num, _) in fields.unnamed.iter().enumerate() { @@ -105,7 +105,7 @@ fn impl_cheap_clone(input: TokenStream2) -> TokenStream2 { let input = match syn::parse2::(input) { Ok(input) => input, Err(e) => { - return e.to_compile_error().into(); + return e.to_compile_error(); } }; let DeriveInput { @@ -275,7 +275,7 @@ pub fn test(args: TokenStream, item: TokenStream) -> TokenStream { if input.sig.asyncness.is_none() { let msg = "the `async` keyword is missing from the function declaration"; - return syn::Error::new_spanned(&input.sig.fn_token, msg) + return syn::Error::new_spanned(input.sig.fn_token, msg) .to_compile_error() .into(); } diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index 7d87bba9398..7de4222efe2 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -565,8 +565,8 @@ impl TriggersAdapterWrapper { let ptrs = futures03::future::try_join_all( self.source_subgraph_stores - .iter() - .map(|(_, store)| store.block_ptr()), + .values() + .map(|store| store.block_ptr()), ) .await?; diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index 4ec6e17c83f..04558aab619 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -64,10 +64,10 @@ impl FirehoseBlockStreamMetrics { fn observe_successful_connection(&self, time: &mut Instant, provider: &str) { self.restarts - .with_label_values(&[self.deployment.as_str(), &provider, "true"]) + .with_label_values(&[self.deployment.as_str(), provider, "true"]) .inc(); self.connect_duration - .with_label_values(&[self.deployment.as_str(), &provider]) + .with_label_values(&[self.deployment.as_str(), provider]) .set(time.elapsed().as_secs_f64()); // Reset last connection timestamp @@ -76,10 +76,10 @@ impl FirehoseBlockStreamMetrics { fn observe_failed_connection(&self, time: &mut Instant, provider: &str) { self.restarts - .with_label_values(&[self.deployment.as_str(), &provider, "false"]) + .with_label_values(&[self.deployment.as_str(), provider, "false"]) .inc(); self.connect_duration - .with_label_values(&[self.deployment.as_str(), &provider]) + .with_label_values(&[self.deployment.as_str(), provider]) .set(time.elapsed().as_secs_f64()); // Reset last connection timestamp @@ -88,10 +88,10 @@ impl FirehoseBlockStreamMetrics { fn observe_response(&self, kind: &str, time: &mut Instant, provider: &str) { self.time_between_responses - .with_label_values(&[self.deployment.as_str(), &provider]) + .with_label_values(&[self.deployment.as_str(), provider]) .observe(time.elapsed().as_secs_f64()); self.responses - .with_label_values(&[self.deployment.as_str(), &provider, kind]) + .with_label_values(&[self.deployment.as_str(), provider, kind]) .inc(); // Reset last response timestamp diff --git a/graph/src/blockchain/mock.rs b/graph/src/blockchain/mock.rs index 577b5fbc816..a4a411d8e5c 100644 --- a/graph/src/blockchain/mock.rs +++ b/graph/src/blockchain/mock.rs @@ -203,8 +203,8 @@ impl UnresolvedDataSource for MockUnresolvedDataSource { #[derive(Debug, Clone)] pub struct MockDataSourceTemplate; -impl Into for MockDataSourceTemplate { - fn into(self) -> DataSourceTemplateInfo { +impl From for DataSourceTemplateInfo { + fn from(_val: MockDataSourceTemplate) -> Self { todo!() } } diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index f8358a209c1..d71f54f7779 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -358,8 +358,7 @@ pub trait DataSource: 'static + Sized + Send + Sync + Clone { fn validate(&self, spec_version: &semver::Version) -> Vec; fn has_expired(&self, block: BlockNumber) -> bool { - self.end_block() - .map_or(false, |end_block| block > end_block) + self.end_block().is_some_and(|end_block| block > end_block) } fn has_declared_calls(&self) -> bool { @@ -602,11 +601,11 @@ impl BlockchainKind { // // Split by `/` to, for example, read 'ethereum' in 'ethereum/contracts'. manifest - .get(&Value::String("dataSources".to_owned())) + .get(Value::String("dataSources".to_owned())) .and_then(|ds| ds.as_sequence()) .and_then(|ds| ds.first()) .and_then(|ds| ds.as_mapping()) - .and_then(|ds| ds.get(&Value::String("kind".to_owned()))) + .and_then(|ds| ds.get(Value::String("kind".to_owned()))) .and_then(|kind| kind.as_str()) .and_then(|kind| kind.split('/').next()) .context("invalid manifest") diff --git a/graph/src/blockchain/types.rs b/graph/src/blockchain/types.rs index c64da4f4f7a..34b44f2723e 100644 --- a/graph/src/blockchain/types.rs +++ b/graph/src/blockchain/types.rs @@ -642,7 +642,7 @@ impl ToSql for BlockTime { impl FromSql for BlockTime { fn from_sql(bytes: diesel::pg::PgValue) -> diesel::deserialize::Result { - >::from_sql(bytes).map(|ts| Self(ts)) + >::from_sql(bytes).map(Self) } } diff --git a/graph/src/components/link_resolver/file.rs b/graph/src/components/link_resolver/file.rs index 14b7438642e..2f1da0c7a3f 100644 --- a/graph/src/components/link_resolver/file.rs +++ b/graph/src/components/link_resolver/file.rs @@ -33,7 +33,7 @@ impl FileLinkResolver { /// All paths are treated as absolute paths. pub fn new(base_dir: Option, aliases: HashMap) -> Self { Self { - base_dir: base_dir, + base_dir, timeout: Duration::from_secs(30), aliases, } @@ -81,20 +81,20 @@ impl FileLinkResolver { aliased.clone() } else { match &resolver.base_dir { - Some(dir) => dir.join(&manifest_path_str), + Some(dir) => dir.join(manifest_path_str), None => PathBuf::from(manifest_path_str), } }; let canonical_manifest_path = manifest_path .canonicalize() - .map_err(|e| Error::from(anyhow!("Failed to canonicalize manifest path: {}", e)))?; + .map_err(|e| anyhow!("Failed to canonicalize manifest path: {}", e))?; // The manifest path is the path of the subgraph manifest file in the build directory // We use the parent directory as the base directory for the new resolver let base_dir = canonical_manifest_path .parent() - .ok_or_else(|| Error::from(anyhow!("Manifest path has no parent directory")))? + .ok_or_else(|| anyhow!("Manifest path has no parent directory"))? .to_path_buf(); resolver.base_dir = Some(base_dir); @@ -125,7 +125,7 @@ impl LinkResolverTrait for FileLinkResolver { async fn cat(&self, ctx: &LinkResolverContext, link: &Link) -> Result, Error> { let link = remove_prefix(&link.link); - let path = self.resolve_path(&link); + let path = self.resolve_path(link); slog::debug!(ctx.logger, "File resolver: reading file"; "path" => path.to_string_lossy().to_string()); @@ -136,7 +136,7 @@ impl LinkResolverTrait for FileLinkResolver { slog::error!(ctx.logger, "Failed to read file"; "path" => path.to_string_lossy().to_string(), "error" => e.to_string()); - Err(anyhow!("Failed to read file {}: {}", path.display(), e).into()) + Err(anyhow!("Failed to read file {}: {}", path.display(), e)) } } } @@ -146,7 +146,7 @@ impl LinkResolverTrait for FileLinkResolver { } async fn get_block(&self, _ctx: &LinkResolverContext, _link: &Link) -> Result, Error> { - Err(anyhow!("get_block is not implemented for FileLinkResolver").into()) + Err(anyhow!("get_block is not implemented for FileLinkResolver")) } async fn json_stream( @@ -154,7 +154,9 @@ impl LinkResolverTrait for FileLinkResolver { _ctx: &LinkResolverContext, _link: &Link, ) -> Result { - Err(anyhow!("json_stream is not implemented for FileLinkResolver").into()) + Err(anyhow!( + "json_stream is not implemented for FileLinkResolver" + )) } } diff --git a/graph/src/components/link_resolver/ipfs.rs b/graph/src/components/link_resolver/ipfs.rs index 59a9f8027d7..37e22c12994 100644 --- a/graph/src/components/link_resolver/ipfs.rs +++ b/graph/src/components/link_resolver/ipfs.rs @@ -224,7 +224,7 @@ impl LinkResolver for IpfsResolver { // run through the loop. match try_ready!(stream.poll().map_err(|e| anyhow::anyhow!("{}", e))) { Some(b) => buf.extend_from_slice(&b), - None if !buf.is_empty() => buf.extend_from_slice(&[b'\n']), + None if !buf.is_empty() => buf.extend_from_slice(b"\n"), None => return Ok(Async::Ready(None)), } } diff --git a/graph/src/components/metrics/block_state.rs b/graph/src/components/metrics/block_state.rs index 87984d46647..2a6ffb3fc64 100644 --- a/graph/src/components/metrics/block_state.rs +++ b/graph/src/components/metrics/block_state.rs @@ -37,6 +37,12 @@ impl From<&str> for CounterKey { } } +impl Default for BlockStateMetrics { + fn default() -> Self { + Self::new() + } +} + impl BlockStateMetrics { pub fn new() -> Self { BlockStateMetrics { @@ -101,7 +107,7 @@ impl BlockStateMetrics { let data_bytes = data.into_bytes(); let bucket = - Url::parse(&bucket).map_err(|e| anyhow!("Failed to parse bucket url: {}", e))?; + Url::parse(bucket).map_err(|e| anyhow!("Failed to parse bucket url: {}", e))?; let store = GoogleCloudStorageBuilder::from_env() .with_url(bucket) .build()?; diff --git a/graph/src/components/metrics/registry.rs b/graph/src/components/metrics/registry.rs index b41f27bc785..cb210040952 100644 --- a/graph/src/components/metrics/registry.rs +++ b/graph/src/components/metrics/registry.rs @@ -395,7 +395,7 @@ impl MetricsRegistry { variable_labels: &[&str], ) -> Result, PrometheusError> { let opts = Opts::new(name, help); - let counters = Box::new(IntCounterVec::new(opts, &variable_labels)?); + let counters = Box::new(IntCounterVec::new(opts, variable_labels)?); self.register(name, counters.clone()); Ok(counters) } diff --git a/graph/src/components/network_provider/chain_identifier_validator.rs b/graph/src/components/network_provider/chain_identifier_validator.rs index d64eb0a401d..c275e4e31bc 100644 --- a/graph/src/components/network_provider/chain_identifier_validator.rs +++ b/graph/src/components/network_provider/chain_identifier_validator.rs @@ -78,7 +78,7 @@ impl ChainIdentifierValidator for ChainIdentifierStore { .store .chain_identifier(chain_name) .await - .map_err(|err| ChainIdentifierValidationError::Store(err))?; + .map_err(ChainIdentifierValidationError::Store)?; if store_identifier.is_default() { return Err(ChainIdentifierValidationError::IdentifierNotSet( @@ -120,6 +120,6 @@ impl ChainIdentifierValidator for ChainIdentifierStore { self.store .set_chain_identifier(chain_name, chain_identifier) .await - .map_err(|err| ChainIdentifierValidationError::Store(err)) + .map_err(ChainIdentifierValidationError::Store) } } diff --git a/graph/src/components/network_provider/provider_manager.rs b/graph/src/components/network_provider/provider_manager.rs index 510b053b6ff..beac2cb762c 100644 --- a/graph/src/components/network_provider/provider_manager.rs +++ b/graph/src/components/network_provider/provider_manager.rs @@ -126,7 +126,7 @@ impl ProviderManager { }; let mut validations: Vec = Vec::new(); - let adapters = Self::adapters_by_chain_names(adapters, &mut validations, &enabled_checks); + let adapters = Self::adapters_by_chain_names(adapters, &mut validations, enabled_checks); let inner = Inner { logger, diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 11748415444..4993339831a 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -220,13 +220,13 @@ impl EntityCache { if let Some(op) = self.updates.get(key).cloned() { entity = op - .apply_to(&mut entity) + .apply_to(&entity) .map_err(|e| key.unknown_attribute(e))? .map(Arc::new); } if let Some(op) = self.handler_updates.get(key).cloned() { entity = op - .apply_to(&mut entity) + .apply_to(&entity) .map_err(|e| key.unknown_attribute(e))? .map(Arc::new); } @@ -241,7 +241,7 @@ impl EntityCache { let query = DerivedEntityQuery { entity_type, - entity_field: field.name.clone().into(), + entity_field: field.name.clone(), value: eref.entity_id.clone(), causality_region: eref.causality_region, }; @@ -250,7 +250,7 @@ impl EntityCache { for (key, entity) in entity_map.iter() { // Only insert to the cache if it's not already there - if !self.current.contains_key(&key) { + if !self.current.contains_key(key) { self.current .insert(key.clone(), Some(Arc::new(entity.clone()))); } diff --git a/graph/src/components/store/err.rs b/graph/src/components/store/err.rs index d59a835d57b..cbf500884df 100644 --- a/graph/src/components/store/err.rs +++ b/graph/src/components/store/err.rs @@ -108,7 +108,7 @@ impl Clone for StoreError { } Self::InvalidIdentifier(arg0) => Self::InvalidIdentifier(arg0.clone()), Self::DuplicateBlockProcessing(arg0, arg1) => { - Self::DuplicateBlockProcessing(arg0.clone(), arg1.clone()) + Self::DuplicateBlockProcessing(arg0.clone(), *arg1) } Self::InternalError(arg0) => Self::InternalError(arg0.clone()), Self::DeploymentNotFound(arg0) => Self::DeploymentNotFound(arg0.clone()), @@ -121,14 +121,14 @@ impl Clone for StoreError { Self::Poisoned => Self::Poisoned, Self::WriterPanic(arg0) => Self::Unknown(anyhow!("writer panic: {}", arg0)), Self::UnsupportedDeploymentSchemaVersion(arg0) => { - Self::UnsupportedDeploymentSchemaVersion(arg0.clone()) + Self::UnsupportedDeploymentSchemaVersion(*arg0) } Self::PruneFailure(arg0) => Self::PruneFailure(arg0.clone()), Self::UnsupportedFilter(arg0, arg1) => { Self::UnsupportedFilter(arg0.clone(), arg1.clone()) } Self::WriteFailure(arg0, arg1, arg2, arg3) => { - Self::WriteFailure(arg0.clone(), arg1.clone(), arg2.clone(), arg3.clone()) + Self::WriteFailure(arg0.clone(), *arg1, arg2.clone(), arg3.clone()) } Self::StatementTimeout => Self::StatementTimeout, Self::ConstraintViolation(arg0) => Self::ConstraintViolation(arg0.clone()), @@ -244,7 +244,7 @@ impl From for StoreError { impl From for StoreError { fn from(e: std::fmt::Error) -> Self { - StoreError::Unknown(anyhow!("{}", e.to_string())) + StoreError::Unknown(anyhow!("{}", e)) } } diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 818718a5f74..27a6b26d7e7 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1017,12 +1017,12 @@ impl PruneRequest { ) -> Result { let rebuild_threshold = ENV_VARS.store.rebuild_threshold; let delete_threshold = ENV_VARS.store.delete_threshold; - if rebuild_threshold < 0.0 || rebuild_threshold > 1.0 { + if !(0.0..=1.0).contains(&rebuild_threshold) { return Err(internal_error!( "the copy threshold must be between 0 and 1 but is {rebuild_threshold}" )); } - if delete_threshold < 0.0 || delete_threshold > 1.0 { + if !(0.0..=1.0).contains(&delete_threshold) { return Err(internal_error!( "the delete threshold must be between 0 and 1 but is {delete_threshold}" )); diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 1c464be487e..22e9fab729f 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -106,7 +106,7 @@ impl<'a> TryFrom<&'a EntityModification> for EntityWrite<'a> { end, } => Ok(EntityWrite { id: &key.entity_id, - entity: &data, + entity: data, causality_region: key.causality_region, block: *block, end: *end, @@ -223,13 +223,11 @@ impl EntityModification { block, end, }), - Remove { key, .. } => { - return Err(internal_error!( - "a remove for {}[{}] can not be converted into an insert", - entity_type, - key.entity_id - )) - } + Remove { key, .. } => Err(internal_error!( + "a remove for {}[{}] can not be converted into an insert", + entity_type, + key.entity_id + )), } } @@ -839,8 +837,7 @@ impl Batch { .entries .iter() .filter(move |(ptr, _)| ptr.number <= at) - .map(|(_, ds)| ds) - .flatten() + .flat_map(|(_, ds)| ds) .filter(|ds| { !self .offchain_to_remove @@ -850,7 +847,7 @@ impl Batch { }) } - pub fn groups<'a>(&'a self) -> impl Iterator { + pub fn groups(&self) -> impl Iterator { self.mods.groups.iter() } @@ -937,7 +934,6 @@ impl<'a> WriteChunk<'a> { /// Return a vector of `WriteChunk`s each containing a single write pub fn as_single_writes(&self) -> Vec { (0..self.len()) - .into_iter() .map(|position| WriteChunk { group: self.group, chunk_size: 1, @@ -981,7 +977,7 @@ impl<'a> Iterator for WriteChunkIter<'a> { return insert; } } - return None; + None } } diff --git a/graph/src/components/subgraph/host.rs b/graph/src/components/subgraph/host.rs index 0cba98912c7..40bdc10f8eb 100644 --- a/graph/src/components/subgraph/host.rs +++ b/graph/src/components/subgraph/host.rs @@ -31,9 +31,9 @@ impl From for MappingError { impl From for MappingError { fn from(value: HostExportError) -> MappingError { match value { - HostExportError::PossibleReorg(e) => MappingError::PossibleReorg(e.into()), + HostExportError::PossibleReorg(e) => MappingError::PossibleReorg(e), HostExportError::Deterministic(e) | HostExportError::Unknown(e) => { - MappingError::Unknown(e.into()) + MappingError::Unknown(e) } } } diff --git a/graph/src/data/graphql/ext.rs b/graph/src/data/graphql/ext.rs index 271ace79237..55e592b8ce6 100644 --- a/graph/src/data/graphql/ext.rs +++ b/graph/src/data/graphql/ext.rs @@ -391,7 +391,7 @@ impl FieldExt for Field { } fn argument(&self, name: &str) -> Option<&s::InputValue> { - self.arguments.iter().find(|iv| &iv.name == name) + self.arguments.iter().find(|iv| iv.name == name) } } diff --git a/graph/src/data/graphql/load_manager.rs b/graph/src/data/graphql/load_manager.rs index 12fa565d321..32a02e9f59c 100644 --- a/graph/src/data/graphql/load_manager.rs +++ b/graph/src/data/graphql/load_manager.rs @@ -310,9 +310,9 @@ impl LoadManager { .map(GenericCounter::inc); if !ENV_VARS.load_management_is_disabled() { let qref = QueryRef::new(deployment, shape_hash); - self.effort - .get(shard) - .map(|effort| effort.add(shard, qref, duration, &self.effort_gauge)); + if let Some(effort) = self.effort.get(shard) { + effort.add(shard, qref, duration, &self.effort_gauge) + } } } diff --git a/graph/src/data/graphql/object_or_interface.rs b/graph/src/data/graphql/object_or_interface.rs index 625965f2ba1..b86764bbdc5 100644 --- a/graph/src/data/graphql/object_or_interface.rs +++ b/graph/src/data/graphql/object_or_interface.rs @@ -100,7 +100,7 @@ impl<'a> ObjectOrInterface<'a> { } pub fn field(&self, name: &str) -> Option<&s::Field> { - self.fields().iter().find(|field| &field.name == name) + self.fields().iter().find(|field| field.name == name) } pub fn object_types(self, schema: &'a Schema) -> Option> { diff --git a/graph/src/data/query/cache_status.rs b/graph/src/data/query/cache_status.rs index b5ff2db3ae1..0a713e81a46 100644 --- a/graph/src/data/query/cache_status.rs +++ b/graph/src/data/query/cache_status.rs @@ -6,7 +6,7 @@ use serde::Serialize; use crate::derive::CacheWeight; /// Used for checking if a response hit the cache. -#[derive(Copy, Clone, CacheWeight, Debug, PartialEq, Eq, Hash)] +#[derive(Copy, Clone, CacheWeight, Debug, PartialEq, Eq, Hash, Default)] pub enum CacheStatus { /// Hit is a hit in the generational cache. Hit, @@ -18,15 +18,10 @@ pub enum CacheStatus { Insert, /// A miss is none of the above. + #[default] Miss, } -impl Default for CacheStatus { - fn default() -> Self { - CacheStatus::Miss - } -} - impl fmt::Display for CacheStatus { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(self.as_str()) diff --git a/graph/src/data/query/trace.rs b/graph/src/data/query/trace.rs index 256c9cdeaf6..c6682274100 100644 --- a/graph/src/data/query/trace.rs +++ b/graph/src/data/query/trace.rs @@ -35,8 +35,9 @@ impl HttpTrace { } } -#[derive(Debug, CacheWeight)] +#[derive(Debug, CacheWeight, Default)] pub enum Trace { + #[default] None, Root { query: Arc, @@ -77,12 +78,6 @@ pub enum Trace { }, } -impl Default for Trace { - fn default() -> Self { - Self::None - } -} - impl Trace { pub fn root( query: &Arc, diff --git a/graph/src/data/store/id.rs b/graph/src/data/store/id.rs index 9726141e2d6..1d5a274a522 100644 --- a/graph/src/data/store/id.rs +++ b/graph/src/data/store/id.rs @@ -94,7 +94,7 @@ impl<'a> TryFrom<&s::ObjectType> for IdType { fn try_from(obj_type: &s::ObjectType) -> Result { let base_type = obj_type - .field(&*ID) + .field(&ID) .ok_or_else(|| anyhow!("Type {} does not have an `id` field", obj_type.name))? .field_type .get_base_type(); @@ -484,7 +484,7 @@ impl IdList { } pub fn first(&self) -> Option> { - if self.len() > 0 { + if !self.is_empty() { Some(self.index(0)) } else { None diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index d56ae785cf3..9ac537c5716 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -358,7 +358,7 @@ impl Value { })?), BIG_DECIMAL_SCALAR => Value::BigDecimal(scalar::BigDecimal::from_str(s)?), INT8_SCALAR => Value::Int8(s.parse::().map_err(|_| { - QueryExecutionError::ValueParseError("Int8".to_string(), format!("{}", s)) + QueryExecutionError::ValueParseError("Int8".to_string(), s.to_string()) })?), TIMESTAMP_SCALAR => { Value::Timestamp(scalar::Timestamp::parse_timestamp(s).map_err(|_| { @@ -644,7 +644,7 @@ impl From for Value { impl From for Value { fn from(value: i64) -> Value { - Value::Int8(value.into()) + Value::Int8(value) } } @@ -960,7 +960,7 @@ impl Entity { // assigning a scalar to a list will be caught below if let Value::List(elts) = value { for (index, elt) in elts.iter().enumerate() { - if !elt.is_assignable(&scalar_type, false) { + if !elt.is_assignable(scalar_type, false) { return Err( EntityValidationError::MismatchedElementTypeInList { entity: key.entity_type.to_string(), @@ -976,7 +976,7 @@ impl Entity { } } } - if !value.is_assignable(&scalar_type, field.field_type.is_list()) { + if !value.is_assignable(scalar_type, field.field_type.is_list()) { return Err(EntityValidationError::InvalidFieldType { entity: key.entity_type.to_string(), entity_id: key.entity_id.to_string(), diff --git a/graph/src/data/store/scalar/bigint.rs b/graph/src/data/store/scalar/bigint.rs index 195d26a5cb5..696a1fd49ec 100644 --- a/graph/src/data/store/scalar/bigint.rs +++ b/graph/src/data/store/scalar/bigint.rs @@ -73,7 +73,7 @@ mod big_int { } pub fn bits(&self) -> usize { - self.0.bits() as usize + self.0.bits() } pub(in super::super) fn inner(self) -> num_bigint::BigInt { diff --git a/graph/src/data/store/scalar/bytes.rs b/graph/src/data/store/scalar/bytes.rs index 585b548f931..8c5f4f1fe08 100644 --- a/graph/src/data/store/scalar/bytes.rs +++ b/graph/src/data/store/scalar/bytes.rs @@ -52,7 +52,7 @@ impl FromStr for Bytes { } } -impl<'a> From<&'a [u8]> for Bytes { +impl From<&[u8]> for Bytes { fn from(array: &[u8]) -> Self { Bytes(array.into()) } diff --git a/graph/src/data/store/scalar/timestamp.rs b/graph/src/data/store/scalar/timestamp.rs index 58b2ef10cb8..1d9026d5a72 100644 --- a/graph/src/data/store/scalar/timestamp.rs +++ b/graph/src/data/store/scalar/timestamp.rs @@ -58,7 +58,7 @@ impl Timestamp { } pub fn since_epoch(secs: i64, nanos: u32) -> Option { - DateTime::from_timestamp(secs, nanos).map(|dt| Timestamp(dt)) + DateTime::from_timestamp(secs, nanos).map(Timestamp) } pub fn as_secs_since_epoch(&self) -> i64 { diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index 43b2b8e89dc..2aede7a76ba 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -380,6 +380,12 @@ pub enum SubgraphManifestResolveError { #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct DataSourceContext(HashMap); +impl Default for DataSourceContext { + fn default() -> Self { + Self::new() + } +} + impl DataSourceContext { pub fn new() -> Self { Self(HashMap::new()) @@ -739,20 +745,19 @@ impl UnvalidatedSubgraphManifest { let mut errors = Vec::new(); // Check spec version support for subgraph datasources - if *spec_version < SPEC_VERSION_1_3_0 { - if data_sources + if *spec_version < SPEC_VERSION_1_3_0 + && data_sources .iter() .any(|ds| matches!(ds, DataSource::Subgraph(_))) - { - errors.push(SubgraphManifestValidationError::DataSourceValidation( - "subgraph".to_string(), - anyhow!( - "Subgraph datasources are not supported prior to spec version {}", - SPEC_VERSION_1_3_0 - ), - )); - return errors; - } + { + errors.push(SubgraphManifestValidationError::DataSourceValidation( + "subgraph".to_string(), + anyhow!( + "Subgraph datasources are not supported prior to spec version {}", + SPEC_VERSION_1_3_0 + ), + )); + return errors; } let subgraph_ds_count = data_sources @@ -1012,15 +1017,12 @@ impl SubgraphManifest { .map(|s| s.to_string()) .collect_vec(); - let api_version = unified_api_version - .map(|v| v.version().map(|v| v.to_string())) - .flatten(); + let api_version = unified_api_version.and_then(|v| v.version().map(|v| v.to_string())); let handler_kinds = self .data_sources .iter() - .map(|ds| ds.handler_kinds()) - .flatten() + .flat_map(|ds| ds.handler_kinds()) .collect::>(); let features: Vec = self @@ -1354,7 +1356,7 @@ impl DeploymentState { /// `block` pub fn has_deterministic_errors(&self, block: &BlockPtr) -> bool { self.first_error_block - .map_or(false, |first_error_block| first_error_block <= block.number) + .is_some_and(|first_error_block| first_error_block <= block.number) } } diff --git a/graph/src/data/value.rs b/graph/src/data/value.rs index af2629a1f18..107689d3d75 100644 --- a/graph/src/data/value.rs +++ b/graph/src/data/value.rs @@ -409,7 +409,7 @@ impl std::fmt::Display for Value { write!(f, "}}") } Value::Timestamp(ref ts) => { - write!(f, "\"{}\"", ts.as_microseconds_since_epoch().to_string()) + write!(f, "\"{}\"", ts.as_microseconds_since_epoch()) } } } @@ -433,7 +433,7 @@ impl Serialize for Value { seq.end() } Value::Timestamp(ts) => { - serializer.serialize_str(&ts.as_microseconds_since_epoch().to_string().as_str()) + serializer.serialize_str(ts.as_microseconds_since_epoch().to_string().as_str()) } Value::Null => serializer.serialize_none(), Value::String(s) => serializer.serialize_str(s), diff --git a/graph/src/data_source/common.rs b/graph/src/data_source/common.rs index f89739c6ab7..da3196c1251 100644 --- a/graph/src/data_source/common.rs +++ b/graph/src/data_source/common.rs @@ -420,7 +420,7 @@ impl CallDecl { EthereumArg::Param(name) => { let value = params .iter() - .find(|param| ¶m.name == name.as_str()) + .find(|param| param.name == name.as_str()) .ok_or_else(|| { anyhow!( "In declarative call '{}': unknown param {}", @@ -441,7 +441,7 @@ impl CallDecl { EthereumArg::StructField(param_name, field_accesses) => { let param = params .iter() - .find(|param| ¶m.name == param_name.as_str()) + .find(|param| param.name == param_name.as_str()) .ok_or_else(|| { anyhow!( "In declarative call '{}': unknown param {}", @@ -486,7 +486,7 @@ impl CallDecl { EthereumArg::Param(name) => { let value = params .iter() - .find(|param| ¶m.name == name.as_str()) + .find(|param| param.name == name.as_str()) .ok_or_else(|| anyhow!("In declarative call '{}': unknown param {}", self.label, name))? .value .clone(); @@ -495,7 +495,7 @@ impl CallDecl { EthereumArg::StructField(param_name, field_accesses) => { let param = params .iter() - .find(|param| ¶m.name == param_name.as_str()) + .find(|param| param.name == param_name.as_str()) .ok_or_else(|| anyhow!("In declarative call '{}': unknown param {}", self.label, param_name))?; Self::extract_nested_struct_field( @@ -578,7 +578,7 @@ impl CallDecl { self.expr .args .iter() - .zip(param_types.into_iter()) + .zip(param_types) .map(|(arg, expected_type)| { self.process_entity_handler_arg(arg, &expected_type, entity) }) @@ -1163,7 +1163,7 @@ impl CallArg { .into_iter() .map(|part| part.parse::()) .collect::, _>>() - .with_context(|| format!("Failed to parse numeric field indices"))? + .with_context(|| "Failed to parse numeric field indices".to_string())? }; Ok(CallArg::Ethereum(EthereumArg::StructField( Word::from(param), @@ -1235,7 +1235,7 @@ impl DeclaredCall { .context(format!( "Failed to parse arguments for call to function \"{}\" of contract \"{}\"", decl.expr.func.as_str(), - decl.expr.abi.to_string() + decl.expr.abi ))?, )) }) diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index d33c6e41560..08130ad000e 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -457,7 +457,7 @@ impl DataSourceTemplate { pub fn name(&self) -> &str { match self { - Self::Onchain(ds) => &ds.name(), + Self::Onchain(ds) => ds.name(), Self::Offchain(ds) => &ds.name, Self::Subgraph(ds) => &ds.name, } diff --git a/graph/src/data_source/offchain.rs b/graph/src/data_source/offchain.rs index 70459a86692..282f8aeff23 100644 --- a/graph/src/data_source/offchain.rs +++ b/graph/src/data_source/offchain.rs @@ -336,9 +336,9 @@ impl Source { } } -impl Into for Source { - fn into(self) -> Bytes { - match self { +impl From for Bytes { + fn from(val: Source) -> Self { + match val { Source::Ipfs(ref path) => Bytes::from(path.to_string().as_bytes().to_vec()), Source::Arweave(ref base64) => Bytes::from(base64.as_bytes()), } @@ -435,15 +435,15 @@ pub struct DataSourceTemplate { pub mapping: Mapping, } -impl Into for DataSourceTemplate { - fn into(self) -> DataSourceTemplateInfo { +impl From for DataSourceTemplateInfo { + fn from(val: DataSourceTemplate) -> Self { let DataSourceTemplate { kind, network: _, name, manifest_idx, mapping, - } = self; + } = val; DataSourceTemplateInfo { api_version: mapping.api_version.clone(), diff --git a/graph/src/data_source/subgraph.rs b/graph/src/data_source/subgraph.rs index c9f01cf4890..b2a1d34f4e6 100644 --- a/graph/src/data_source/subgraph.rs +++ b/graph/src/data_source/subgraph.rs @@ -250,7 +250,7 @@ impl UnresolvedDataSource { source_manifest: &SubgraphManifest, ) -> Result<(), Error> { for entity in mapping_entities { - let type_kind = source_manifest.schema.kind_of_declared_type(&entity); + let type_kind = source_manifest.schema.kind_of_declared_type(entity); match type_kind { Some(TypeKind::Interface) => { @@ -584,15 +584,15 @@ pub struct DataSourceTemplate { pub mapping: Mapping, } -impl Into for DataSourceTemplate { - fn into(self) -> DataSourceTemplateInfo { +impl From for DataSourceTemplateInfo { + fn from(val: DataSourceTemplate) -> Self { let DataSourceTemplate { kind, network: _, name, manifest_idx, mapping, - } = self; + } = val; DataSourceTemplateInfo { api_version: mapping.api_version.clone(), diff --git a/graph/src/endpoint.rs b/graph/src/endpoint.rs index cc6c3da99c7..1b563fb52f0 100644 --- a/graph/src/endpoint.rs +++ b/graph/src/endpoint.rs @@ -34,9 +34,9 @@ pub enum ConnectionType { Rpc, } -impl Into<&str> for &ConnectionType { - fn into(self) -> &'static str { - match self { +impl From<&ConnectionType> for &str { + fn from(val: &ConnectionType) -> Self { + match val { ConnectionType::Firehose => "firehose", ConnectionType::Rpc => "rpc", } diff --git a/graph/src/env/mappings.rs b/graph/src/env/mappings.rs index 27bc5720e9b..66a01d6cb6f 100644 --- a/graph/src/env/mappings.rs +++ b/graph/src/env/mappings.rs @@ -116,7 +116,7 @@ impl TryFrom for EnvVarsMapping { max_ipfs_file_bytes: x.max_ipfs_file_bytes.0, ipfs_request_limit: x.ipfs_request_limit, ipfs_max_attempts: x.ipfs_max_attempts, - ipfs_cache_location: ipfs_cache_location, + ipfs_cache_location, allow_non_deterministic_ipfs: x.allow_non_deterministic_ipfs.0, disable_declared_calls: x.disable_declared_calls.0, store_errors_are_nondeterministic: x.store_errors_are_nondeterministic.0, diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index 6d51b04ba34..efec3d693ae 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -336,7 +336,7 @@ impl FromStr for ZeroToOneF64 { fn from_str(s: &str) -> Result { let f = s.parse::()?; - if f < 0.0 || f > 1.0 { + if !(0.0..=1.0).contains(&f) { bail!("invalid value: {s} must be between 0 and 1"); } else { Ok(ZeroToOneF64(f)) diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index 76fefd61797..beb682fc527 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -91,6 +91,12 @@ impl NetworkDetails for Arc { } } +impl Default for ConnectionHeaders { + fn default() -> Self { + Self::new() + } +} + impl ConnectionHeaders { pub fn new() -> Self { Self(HashMap::new()) @@ -269,7 +275,7 @@ impl FirehoseEndpoint { metrics: self.endpoint_metrics.cheap_clone(), service: self.channel.cheap_clone(), labels: RequestLabels { - provider: self.provider.clone().into(), + provider: self.provider.clone(), req_type: "unknown".into(), conn_type: ConnectionType::Firehose, }, @@ -360,7 +366,7 @@ impl FirehoseEndpoint { Ok(v) => Ok(M::decode( v.get_ref().block.as_ref().unwrap().value.as_ref(), )?), - Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), + Err(e) => Err(anyhow::format_err!("firehose error {}", e)), } } @@ -395,7 +401,7 @@ impl FirehoseEndpoint { Ok(v) => Ok(M::decode( v.get_ref().block.as_ref().unwrap().value.as_ref(), )?), - Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), + Err(e) => Err(anyhow::format_err!("firehose error {}", e)), } } @@ -457,7 +463,7 @@ impl FirehoseEndpoint { Ok(v) => Ok(M::decode( v.get_ref().block.as_ref().unwrap().value.as_ref(), )?), - Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), + Err(e) => Err(anyhow::format_err!("firehose error {}", e)), } } diff --git a/graph/src/ipfs/cache.rs b/graph/src/ipfs/cache.rs index e0e256a7c22..2e2c3253f55 100644 --- a/graph/src/ipfs/cache.rs +++ b/graph/src/ipfs/cache.rs @@ -174,9 +174,7 @@ impl Cache { async fn insert(&self, logger: &Logger, path: ContentPath, data: Bytes) { match self { - Cache::Memory { max_entry_size, .. } if data.len() > *max_entry_size => { - return; - } + Cache::Memory { max_entry_size, .. } if data.len() > *max_entry_size => {} Cache::Memory { cache, .. } => { let mut cache = cache.lock().unwrap(); diff --git a/graph/src/ipfs/mod.rs b/graph/src/ipfs/mod.rs index 403cbf614cd..d8231d2074c 100644 --- a/graph/src/ipfs/mod.rs +++ b/graph/src/ipfs/mod.rs @@ -75,7 +75,7 @@ where input: "".to_owned(), source: anyhow!("at least one server address is required"), }), - 1 => Ok(clients.pop().unwrap().into()), + 1 => Ok(clients.pop().unwrap()), n => { info!(logger, "Creating a pool of {} IPFS clients", n); diff --git a/graph/src/ipfs/test_utils.rs b/graph/src/ipfs/test_utils.rs index decd9724a78..c2ccbbf0650 100644 --- a/graph/src/ipfs/test_utils.rs +++ b/graph/src/ipfs/test_utils.rs @@ -18,7 +18,7 @@ impl From> for IpfsAddFile { fn from(content: Vec) -> Self { Self { path: Default::default(), - content: content.into(), + content: content, } } } diff --git a/graph/src/runtime/asc_ptr.rs b/graph/src/runtime/asc_ptr.rs index 7a51805269e..f3c783ac4cd 100644 --- a/graph/src/runtime/asc_ptr.rs +++ b/graph/src/runtime/asc_ptr.rs @@ -69,7 +69,7 @@ impl AscPtr { let using_buffer = |buffer: &mut [MaybeUninit]| { let buffer = heap.read(self.0, buffer, gas)?; - C::from_asc_bytes(buffer, &heap.api_version()) + C::from_asc_bytes(buffer, heap.api_version()) }; let len = len as usize; @@ -103,7 +103,7 @@ impl AscPtr { let aligned_len = padding_to_16(bytes.len()); // Since AssemblyScript keeps all allocated objects with a 16 byte alignment, // we need to do the same when we allocate ourselves. - bytes.extend(std::iter::repeat(0).take(aligned_len)); + bytes.extend(std::iter::repeat_n(0, aligned_len)); let header = Self::generate_header( heap, diff --git a/graph/src/schema/api.rs b/graph/src/schema/api.rs index 7fe29806a3f..157cec0ac90 100644 --- a/graph/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -188,7 +188,7 @@ impl ApiSchema { match t { s::Type::NamedType(name) => { let named_type = self.get_named_type(name); - named_type.map_or(false, |type_def| match type_def { + named_type.is_some_and(|type_def| match type_def { s::TypeDefinition::Scalar(_) | s::TypeDefinition::Enum(_) | s::TypeDefinition::InputObject(_) => true, @@ -916,7 +916,7 @@ fn field_scalar_filter_input_values( set: FilterOpsSet<'_>, ) -> Vec { field_filter_ops(set) - .into_iter() + .iter() .map(|filter_type| { let field_type = s::Type::NamedType(set.type_name().to_string()); let value_type = match *filter_type { diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 841f7568ad7..76cdae11e18 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -74,12 +74,12 @@ pub fn parse_field_as_filter(key: &str) -> (String, FilterOp) { _ => ("", FilterOp::Equal), }; - return match op { + match op { FilterOp::And => (key.to_owned(), op), FilterOp::Or => (key.to_owned(), op), // Strip the operator suffix to get the attribute. _ => (key.trim_end_matches(suffix).to_owned(), op), - }; + } } /// An `ObjectType` with `Hash` and `Eq` derived from the name. @@ -317,7 +317,7 @@ pub fn is_input_type(schema: &s::Document, t: &s::Type) -> bool { match t { s::Type::NamedType(name) => { let named_type = schema.get_named_type(name); - named_type.map_or(false, |type_def| match type_def { + named_type.is_some_and(|type_def| match type_def { s::TypeDefinition::Scalar(_) | s::TypeDefinition::Enum(_) | s::TypeDefinition::InputObject(_) => true, @@ -333,7 +333,7 @@ pub fn is_entity_type(schema: &s::Document, t: &s::Type) -> bool { match t { s::Type::NamedType(name) => schema .get_named_type(name) - .map_or(false, is_entity_type_definition), + .is_some_and(is_entity_type_definition), s::Type::ListType(inner_type) => is_entity_type(schema, inner_type), s::Type::NonNullType(inner_type) => is_entity_type(schema, inner_type), } diff --git a/graph/src/schema/input/mod.rs b/graph/src/schema/input/mod.rs index ac7a4284175..51678a3f9db 100644 --- a/graph/src/schema/input/mod.rs +++ b/graph/src/schema/input/mod.rs @@ -201,7 +201,7 @@ impl TypeInfo { } fn for_aggregation(schema: &Schema, pool: &AtomPool, agg_type: &s::ObjectType) -> Self { - let agg_type = Aggregation::new(&schema, &pool, agg_type); + let agg_type = Aggregation::new(schema, pool, agg_type); TypeInfo::Aggregation(agg_type) } @@ -230,7 +230,7 @@ impl Field { field_type: &s::Type, derived_from: Option, ) -> Self { - let value_type = Self::scalar_value_type(&schema, field_type); + let value_type = Self::scalar_value_type(schema, field_type); Self { name: Word::from(name), field_type: field_type.clone(), @@ -245,7 +245,7 @@ impl Field { s::Type::NamedType(name) => name.parse::().unwrap_or_else(|_| { match schema.document.get_named_type(name) { Some(t::Object(obj_type)) => { - let id = obj_type.field(&*ID).expect("all object types have an id"); + let id = obj_type.field(&ID).expect("all object types have an id"); Self::scalar_value_type(schema, &id.field_type) } Some(t::Interface(intf)) => { @@ -265,7 +265,7 @@ impl Field { ValueType::String } Some(obj_type) => { - let id = obj_type.field(&*ID).expect("all object types have an id"); + let id = obj_type.field(&ID).expect("all object types have an id"); Self::scalar_value_type(schema, &id.field_type) } } @@ -290,7 +290,7 @@ impl Field { let derived_from = self.derived_from.as_ref()?; let name = schema .pool() - .lookup(&self.field_type.get_base_type()) + .lookup(self.field_type.get_base_type()) .unwrap(); schema.field(name, derived_from) } @@ -309,11 +309,9 @@ pub enum ObjectOrInterface<'a> { impl<'a> CheapClone for ObjectOrInterface<'a> { fn cheap_clone(&self) -> Self { match self { - ObjectOrInterface::Object(schema, object) => { - ObjectOrInterface::Object(*schema, *object) - } + ObjectOrInterface::Object(schema, object) => ObjectOrInterface::Object(schema, object), ObjectOrInterface::Interface(schema, interface) => { - ObjectOrInterface::Interface(*schema, *interface) + ObjectOrInterface::Interface(schema, interface) } } } @@ -357,7 +355,7 @@ impl<'a> ObjectOrInterface<'a> { let object_type = match self { ObjectOrInterface::Object(_, object_type) => Some(*object_type), ObjectOrInterface::Interface(schema, interface) => { - schema.implementers(&interface).next() + schema.implementers(interface).next() } }; object_type.and_then(|object_type| object_type.field(name)) @@ -431,7 +429,7 @@ impl ObjectType { .fields .iter() .map(|field| { - let derived_from = field.derived_from().map(|name| Word::from(name)); + let derived_from = field.derived_from().map(Word::from); Field::new(schema, &field.name, &field.field_type, derived_from) }) .collect(); @@ -535,7 +533,7 @@ impl InterfaceType { // since the API schema does not contain certain filters for // derived fields on interfaces that it would for // non-derived fields - let derived_from = field.derived_from().map(|name| Word::from(name)); + let derived_from = field.derived_from().map(Word::from); Field::new(schema, &field.name, &field.field_type, derived_from) }) .collect(); @@ -932,7 +930,7 @@ impl Aggregation { pub fn dimensions(&self) -> impl Iterator { self.fields .iter() - .filter(|field| &field.name != &*ID && field.name != kw::TIMESTAMP) + .filter(|field| field.name != *ID && field.name != kw::TIMESTAMP) } fn object_type(&self, interval: AggregationInterval) -> Option<&ObjectType> { @@ -969,7 +967,7 @@ impl InputSchema { .iter() .enumerate() .filter_map(|(idx, ti)| ti.aggregation().map(|agg_type| (idx, agg_type))) - .map(|(aggregation, agg_type)| { + .flat_map(|(aggregation, agg_type)| { agg_type .intervals .iter() @@ -980,7 +978,6 @@ impl InputSchema { agg_type, }) }) - .flatten() .collect(); mappings.sort(); mappings.into_boxed_slice() @@ -1611,7 +1608,7 @@ impl InputSchema { fn atom_pool(document: &s::Document) -> AtomPool { let mut pool = AtomPool::new(); - pool.intern(&*ID); + pool.intern(&ID); // Name and attributes of PoI entity type pool.intern(POI_OBJECT); pool.intern(POI_DIGEST); @@ -1676,7 +1673,7 @@ fn atom_pool(document: &s::Document) -> AtomPool { } for object_type in document.get_object_type_definitions() { - for defn in InputSchema::fulltext_definitions(&document, &object_type.name).unwrap() { + for defn in InputSchema::fulltext_definitions(document, &object_type.name).unwrap() { pool.intern(defn.name.as_str()); } } @@ -1785,8 +1782,7 @@ mod validations { if subgraph_schema_type .directives .iter() - .filter(|directive| !directive.name.eq("fulltext")) - .next() + .find(|directive| !directive.name.eq("fulltext")) .is_some() { Some(SchemaValidationError::InvalidSchemaTypeDirectives) @@ -2002,7 +1998,7 @@ mod validations { /// type `Int8` and that the `id` field has type `Int8` fn validate_entity_directives(&self) -> Vec { fn id_type_is_int8(object_type: &s::ObjectType) -> Option { - let field = match object_type.field(&*ID) { + let field = match object_type.field(&ID) { Some(field) => field, None => { return Some(Err::IdFieldMissing(object_type.name.to_owned())); @@ -2066,7 +2062,7 @@ mod validations { self.entity_types .iter() .fold(vec![], |mut errors, object_type| { - match object_type.field(&*ID) { + match object_type.field(&ID) { None => errors.push(SchemaValidationError::IdFieldMissing( object_type.name.clone(), )), @@ -2281,7 +2277,7 @@ mod validations { // when we query, and just assume that that's ok. let target_field_type = target_field.field_type.get_base_type(); if target_field_type != object_type.name - && &target_field.name != ID.as_str() + && target_field.name != ID.as_str() && !interface_types .iter() .any(|iface| target_field_type.eq(iface.as_str())) @@ -2320,7 +2316,7 @@ mod validations { let id_types: HashSet<&str> = HashSet::from_iter( obj_types .iter() - .filter_map(|obj_type| obj_type.field(&*ID)) + .filter_map(|obj_type| obj_type.field(&ID)) .map(|f| f.field_type.get_base_type()) .map(|name| if name == "ID" { "String" } else { name }), ); diff --git a/graph/src/schema/input/sqlexpr.rs b/graph/src/schema/input/sqlexpr.rs index bc5705810bb..f2736574bc3 100644 --- a/graph/src/schema/input/sqlexpr.rs +++ b/graph/src/schema/input/sqlexpr.rs @@ -360,7 +360,7 @@ struct Validator { errors: Vec, } -const FN_WHITELIST: [&'static str; 14] = [ +const FN_WHITELIST: [&str; 14] = [ // Clearly deterministic functions from // https://www.postgresql.org/docs/current/functions-math.html, Table // 9.5. We could also add trig functions (Table 9.7 and 9.8), but under diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs index 62ff3b4618f..1c12f57dc61 100644 --- a/graph/src/util/intern.rs +++ b/graph/src/util/intern.rs @@ -103,7 +103,7 @@ impl AtomPool { /// pool or any of its ancestors. pub fn get(&self, atom: Atom) -> Option<&str> { if atom.0 < self.base_sym { - self.base.as_ref().map(|base| base.get(atom)).flatten() + self.base.as_ref().and_then(|base| base.get(atom)) } else { self.atoms .get((atom.0 - self.base_sym) as usize) @@ -271,7 +271,7 @@ impl Object { pub(crate) fn contains_key(&self, key: &str) -> bool { self.entries .iter() - .any(|entry| self.pool.get(entry.key).map_or(false, |k| key == k)) + .any(|entry| self.pool.get(entry.key) == Some(key)) } pub fn merge(&mut self, other: Object) { @@ -337,12 +337,12 @@ impl Object { if self.same_pool(other) { self.entries .iter() - .filter(|e| e.key != TOMBSTONE_KEY && ignore.map_or(true, |ig| e.key != ig)) - .all(|Entry { key, value }| other.get_by_atom(key).map_or(false, |o| o == value)) + .filter(|e| e.key != TOMBSTONE_KEY && (ignore != Some(e.key))) + .all(|Entry { key, value }| other.get_by_atom(key) == Some(value)) } else { self.iter() .filter(|(key, _)| *key != ignore_key) - .all(|(key, value)| other.get(key).map_or(false, |o| o == value)) + .all(|(key, value)| other.get(key) == Some(value)) } } } @@ -385,7 +385,7 @@ impl<'a, V> Iterator for ObjectIter<'a, V> { type Item = (&'a str, &'a V); fn next(&mut self) -> Option { - while let Some(entry) = self.iter.next() { + for entry in self.iter.by_ref() { if entry.key != TOMBSTONE_KEY { // unwrap: we only add entries that are backed by the pool let key = self.pool.get(entry.key).unwrap(); @@ -424,7 +424,7 @@ impl Iterator for ObjectOwningIter { type Item = (Word, V); fn next(&mut self) -> Option { - while let Some(entry) = self.iter.next() { + for entry in self.iter.by_ref() { if entry.key != TOMBSTONE_KEY { // unwrap: we only add entries that are backed by the pool let key = self.pool.get(entry.key).unwrap(); @@ -451,7 +451,7 @@ impl<'a, V> Iterator for AtomIter<'a, V> { type Item = Atom; fn next(&mut self) -> Option { - while let Some(entry) = self.iter.next() { + for entry in self.iter.by_ref() { if entry.key != TOMBSTONE_KEY { return Some(entry.key); } @@ -498,10 +498,10 @@ impl PartialEq for Object { self.entries .iter() .filter(|e| e.key != TOMBSTONE_KEY) - .all(|Entry { key, value }| other.get_by_atom(key).map_or(false, |o| o == value)) + .all(|Entry { key, value }| other.get_by_atom(key) == Some(value)) } else { self.iter() - .all(|(key, value)| other.get(key).map_or(false, |o| o == value)) + .all(|(key, value)| other.get(key) == Some(value)) } } } diff --git a/graph/src/util/lfu_cache.rs b/graph/src/util/lfu_cache.rs index 06ec6a475db..a169deb1780 100644 --- a/graph/src/util/lfu_cache.rs +++ b/graph/src/util/lfu_cache.rs @@ -179,7 +179,7 @@ impl }) } - pub fn iter<'a>(&'a self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.queue .iter() .map(|entry| (&entry.0.key, &entry.0.value)) diff --git a/graph/src/util/ogive.rs b/graph/src/util/ogive.rs index 29938b03b17..f8e98e8291d 100644 --- a/graph/src/util/ogive.rs +++ b/graph/src/util/ogive.rs @@ -155,7 +155,7 @@ impl Ogive { // rewritten to be more friendly to lossy calculations with f64 let offset = (value as f64).rem_euclid(self.bin_size) * (b - a) as f64; let x = a + (offset / self.bin_size) as i64; - Ok(x as i64) + Ok(x) } fn check_in_range(&self, point: i64) -> Result<(), StoreError> { diff --git a/graphql/src/execution/ast.rs b/graphql/src/execution/ast.rs index 0f20845e5d5..65bdb6298d1 100644 --- a/graphql/src/execution/ast.rs +++ b/graphql/src/execution/ast.rs @@ -315,7 +315,7 @@ impl Field { // can find the field type entity_type .field(&field.name) - .map_or(false, |field| !field.is_derived()) + .is_some_and(|field| !field.is_derived()) }) .filter_map(|field| { if field.name.starts_with("__") { @@ -351,13 +351,13 @@ impl Field { .map(|value| match value { r::Value::Enum(interval) => interval.parse::().map_err(|_| { QueryExecutionError::InvalidArgumentError( - self.position.clone(), + self.position, kw::INTERVAL.to_string(), q::Value::from(value.clone()), ) }), _ => Err(QueryExecutionError::InvalidArgumentError( - self.position.clone(), + self.position, kw::INTERVAL.to_string(), q::Value::from(value.clone()), )), diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index 8173f00f2bf..0f1b473c903 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -309,7 +309,7 @@ pub(crate) async fn execute_root_selection_set_uncached( let ictx = ctx.as_introspection_context(); values.append( - execute_selection_set_to_map(&ictx, &intro_set, &*INTROSPECTION_QUERY_TYPE, None) + execute_selection_set_to_map(&ictx, &intro_set, &INTROSPECTION_QUERY_TYPE, None) .await?, ); } @@ -898,8 +898,8 @@ async fn complete_value( } /// Resolves an abstract type (interface, union) into an object type based on the given value. -fn resolve_abstract_type<'a>( - ctx: &'a ExecutionContext, +fn resolve_abstract_type( + ctx: &ExecutionContext, abstract_type: &s::TypeDefinition, object_value: &r::Value, ) -> Result> { diff --git a/graphql/src/query/ext.rs b/graphql/src/query/ext.rs index 44d7eb5306a..4abca7a4b6c 100644 --- a/graphql/src/query/ext.rs +++ b/graphql/src/query/ext.rs @@ -54,22 +54,17 @@ impl ValueExt for q::Value { } } -#[derive(Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, PartialEq, Eq, Hash, Debug, Default)] pub enum BlockConstraint { Hash(BlockHash), Number(BlockNumber), /// Execute the query on the latest block only if the the subgraph has progressed to or past the /// given block number. Min(BlockNumber), + #[default] Latest, } -impl Default for BlockConstraint { - fn default() -> Self { - BlockConstraint::Latest - } -} - impl BlockConstraint { /// Return the `Some(hash)` if this constraint constrains by hash, /// otherwise return `None` diff --git a/graphql/src/runner.rs b/graphql/src/runner.rs index 6723c2802ae..293dcaa111b 100644 --- a/graphql/src/runner.rs +++ b/graphql/src/runner.rs @@ -288,10 +288,7 @@ where .to_result()?; let query_start = Instant::now(); - let result = store - .execute_sql(&req.query) - .await - .map_err(|e| QueryExecutionError::from(e)); + let result = store.execute_sql(&req.query).await; self.load_manager.record_work( store.shard(), diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 451c4d19422..42a60575984 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -241,7 +241,7 @@ fn build_filter_from_object<'a>( schema: &InputSchema, ) -> Result, QueryExecutionError> { // Check if we have both column filters and 'or' operator at the same level - if let Some(_) = object.get("or") { + if object.get("or").is_some() { let column_filters: Vec = object .iter() .filter_map(|(key, _)| { diff --git a/graphql/src/store/resolver.rs b/graphql/src/store/resolver.rs index 500964ea7a2..426e921f2c6 100644 --- a/graphql/src/store/resolver.rs +++ b/graphql/src/store/resolver.rs @@ -164,8 +164,7 @@ impl StoreResolver { let Some(block) = field .selection_set .fields() - .map(|(_, iter)| iter) - .flatten() + .flat_map(|(_, iter)| iter) .find(|f| f.name == BLOCK) else { return false; @@ -173,8 +172,7 @@ impl StoreResolver { block .selection_set .fields() - .map(|(_, iter)| iter) - .flatten() + .flat_map(|(_, iter)| iter) .any(|f| f.name == TIMESTAMP || f.name == PARENT_HASH) } @@ -248,7 +246,7 @@ impl StoreResolver { "__typename".into(), r::Value::String(META_FIELD_TYPE.to_string()), ); - return Ok(r::Value::object(map)); + Ok(r::Value::object(map)) } } @@ -299,7 +297,7 @@ impl Resolver for StoreResolver { fn child_id(child: &r::Value) -> String { match child { r::Value::Object(child) => child - .get(&*ID) + .get(&ID) .map(|id| id.to_string()) .unwrap_or("(no id)".to_string()), _ => "(no child object)".to_string(), diff --git a/graphql/src/values/coercion.rs b/graphql/src/values/coercion.rs index b0365e7f335..ad4b0f1dab3 100644 --- a/graphql/src/values/coercion.rs +++ b/graphql/src/values/coercion.rs @@ -88,7 +88,7 @@ fn coerce_to_definition<'a>( let def = t .fields .iter() - .find(|f| f.name == &*name) + .find(|f| f.name == *name) .ok_or_else(|| object_for_error.clone())?; coerced_object.insert( name.clone(), diff --git a/runtime/wasm/src/asc_abi/class.rs b/runtime/wasm/src/asc_abi/class.rs index 4fe5b3192cd..6db9bcc0df6 100644 --- a/runtime/wasm/src/asc_abi/class.rs +++ b/runtime/wasm/src/asc_abi/class.rs @@ -528,8 +528,9 @@ impl AscIndexId for AscEnum { pub type AscEnumArray = AscPtr>>>; #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub enum EthereumValueKind { + #[default] Address, FixedBytes, Bytes, @@ -559,22 +560,17 @@ impl EthereumValueKind { } } -impl Default for EthereumValueKind { - fn default() -> Self { - EthereumValueKind::Address - } -} - impl AscValue for EthereumValueKind {} #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub enum StoreValueKind { String, Int, BigDecimal, Bool, Array, + #[default] Null, Bytes, BigInt, @@ -601,12 +597,6 @@ impl StoreValueKind { } } -impl Default for StoreValueKind { - fn default() -> Self { - StoreValueKind::Null - } -} - impl AscValue for StoreValueKind {} /// Big ints are represented using signed number representation. Note: This differs @@ -670,8 +660,9 @@ pub type AscEntity = AscTypedMap>; pub(crate) type AscJson = AscTypedMap>; #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub enum JsonValueKind { + #[default] Null, Bool, Number, @@ -680,12 +671,6 @@ pub enum JsonValueKind { Object, } -impl Default for JsonValueKind { - fn default() -> Self { - JsonValueKind::Null - } -} - impl AscValue for JsonValueKind {} impl JsonValueKind { @@ -780,8 +765,9 @@ impl AscIndexId for AscWrapped>> { } #[repr(u32)] -#[derive(AscType, Clone, Copy)] +#[derive(AscType, Clone, Copy, Default)] pub enum YamlValueKind { + #[default] Null, Bool, Number, @@ -791,12 +777,6 @@ pub enum YamlValueKind { Tagged, } -impl Default for YamlValueKind { - fn default() -> Self { - YamlValueKind::Null - } -} - impl AscValue for YamlValueKind {} impl YamlValueKind { diff --git a/runtime/wasm/src/asc_abi/v0_0_4.rs b/runtime/wasm/src/asc_abi/v0_0_4.rs index c4098ac0889..3d5ced0a25f 100644 --- a/runtime/wasm/src/asc_abi/v0_0_4.rs +++ b/runtime/wasm/src/asc_abi/v0_0_4.rs @@ -62,7 +62,7 @@ impl ArrayBuffer { self.content[byte_offset..] .chunks(size_of::()) .take(length) - .map(|asc_obj| T::from_asc_bytes(asc_obj, &api_version)) + .map(|asc_obj| T::from_asc_bytes(asc_obj, api_version)) .collect() // TODO: This code is preferred as it validates the length of the array. @@ -96,7 +96,7 @@ impl AscType for ArrayBuffer { let total_size = self.byte_length as usize + header_size; let total_capacity = total_size.next_power_of_two(); let extra_capacity = total_capacity - total_size; - asc_layout.extend(std::iter::repeat(0).take(extra_capacity)); + asc_layout.extend(std::iter::repeat_n(0, extra_capacity)); assert_eq!(asc_layout.len(), total_capacity); Ok(asc_layout) @@ -249,7 +249,7 @@ impl AscType for AscString { } // Prevents panic when accessing offset + 1 in the loop - if asc_obj.len() % 2 != 0 { + if !asc_obj.len().is_multiple_of(2) { return Err(DeterministicHostError::from(anyhow::anyhow!( "Invalid string length" ))); diff --git a/runtime/wasm/src/asc_abi/v0_0_5.rs b/runtime/wasm/src/asc_abi/v0_0_5.rs index 906f6ff1cf6..3497a88b173 100644 --- a/runtime/wasm/src/asc_abi/v0_0_5.rs +++ b/runtime/wasm/src/asc_abi/v0_0_5.rs @@ -75,7 +75,7 @@ impl AscType for ArrayBuffer { let total_size = self.byte_length as usize + HEADER_SIZE; let total_capacity = total_size.next_power_of_two(); let extra_capacity = total_capacity - total_size; - asc_layout.extend(std::iter::repeat(0).take(extra_capacity)); + asc_layout.extend(std::iter::repeat_n(0, extra_capacity)); Ok(asc_layout) } @@ -205,7 +205,7 @@ impl AscType for AscString { let total_size = (self.byte_length as usize * 2) + header_size; let total_capacity = total_size.next_power_of_two(); let extra_capacity = total_capacity - total_size; - content.extend(std::iter::repeat(0).take(extra_capacity)); + content.extend(std::iter::repeat_n(0, extra_capacity)); Ok(content) } diff --git a/runtime/wasm/src/host.rs b/runtime/wasm/src/host.rs index c64b55e35b1..77b03594a5e 100644 --- a/runtime/wasm/src/host.rs +++ b/runtime/wasm/src/host.rs @@ -190,7 +190,7 @@ where proof_of_indexing, host_fns: self.host_fns.cheap_clone(), debug_fork: debug_fork.cheap_clone(), - mapping_logger: Logger::new(&logger, o!("component" => "UserMapping")), + mapping_logger: Logger::new(logger, o!("component" => "UserMapping")), instrument, }, trigger, diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 43e235c6299..18ea839a771 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -175,7 +175,7 @@ impl HostExports { !state .entity_cache .schema - .has_field_with_name(entity_type, &field_name) + .has_field_with_name(entity_type, field_name) }); if has_invalid_fields { @@ -185,7 +185,7 @@ impl HostExports { if !state .entity_cache .schema - .has_field_with_name(entity_type, &field_name) + .has_field_with_name(entity_type, field_name) { Some(field_name.clone()) } else { @@ -326,7 +326,7 @@ impl HostExports { let poi_section = stopwatch.start_section("host_export_store_set__proof_of_indexing"); proof_of_indexing.write_event( &ProofOfIndexingEvent::SetEntity { - entity_type: &key.entity_type.typename(), + entity_type: key.entity_type.typename(), id: &key.entity_id.to_string(), data: &entity, }, @@ -412,7 +412,7 @@ impl HostExports { )?; if let Some(ref entity) = result { - state.metrics.track_entity_read(&entity_type, &entity) + state.metrics.track_entity_read(&entity_type, entity) } Ok(result) @@ -585,7 +585,7 @@ impl HostExports { } Ok(v) }; - result.map_err(move |e: Error| anyhow::anyhow!("{}: {}", errmsg, e.to_string())) + result.map_err(move |e: Error| anyhow::anyhow!("{}: {}", errmsg, e)) } /// Expects a decimal string. @@ -1164,9 +1164,10 @@ impl HostExports { )?; if bytes.len() > MAX_JSON_SIZE { - return Err(DeterministicHostError::Other( - anyhow!("JSON size exceeds max size of {}", MAX_JSON_SIZE).into(), - )); + return Err(DeterministicHostError::Other(anyhow!( + "JSON size exceeds max size of {}", + MAX_JSON_SIZE + ))); } serde_json::from_slice(bytes.as_slice()) @@ -1264,13 +1265,10 @@ impl HostExports { )?; if bytes.len() > YAML_MAX_SIZE_BYTES { - return Err(DeterministicHostError::Other( - anyhow!( - "YAML size exceeds max size of {} bytes", - YAML_MAX_SIZE_BYTES - ) - .into(), - )); + return Err(DeterministicHostError::Other(anyhow!( + "YAML size exceeds max size of {} bytes", + YAML_MAX_SIZE_BYTES + ))); } serde_yaml::from_slice(bytes) diff --git a/runtime/wasm/src/mapping.rs b/runtime/wasm/src/mapping.rs index 75c7cd64042..2be6d502390 100644 --- a/runtime/wasm/src/mapping.rs +++ b/runtime/wasm/src/mapping.rs @@ -320,7 +320,7 @@ impl ValidModule { let mut epoch_counter_abort_handle = None; if let Some(timeout) = timeout { - let timeout = timeout.clone(); + let timeout = timeout; let engine = engine.clone(); // The epoch counter task will perpetually increment the epoch every `timeout` seconds. diff --git a/runtime/wasm/src/module/context.rs b/runtime/wasm/src/module/context.rs index 9ecb04782ef..490a2414c6b 100644 --- a/runtime/wasm/src/module/context.rs +++ b/runtime/wasm/src/module/context.rs @@ -531,7 +531,7 @@ impl WasmInstanceContext<'_> { let ipfs_res = host_exports.ipfs_cat(&logger, link).await; let logger = self.as_ref().ctx.logger.cheap_clone(); match ipfs_res { - Ok(bytes) => asc_new(self, &*bytes, gas).await.map_err(Into::into), + Ok(bytes) => asc_new(self, &*bytes, gas).await, // Return null in case of error. Err(e) => { @@ -568,7 +568,7 @@ impl WasmInstanceContext<'_> { .ipfs_get_block(&self.as_ref().ctx.logger, link) .await; match ipfs_res { - Ok(bytes) => asc_new(self, &*bytes, gas).await.map_err(Into::into), + Ok(bytes) => asc_new(self, &*bytes, gas).await, // Return null in case of error. Err(e) => { @@ -1118,7 +1118,7 @@ impl WasmInstanceContext<'_> { // map `None` to `null`, and `Some(s)` to a runtime string match name { - Some(name) => asc_new(self, &*name, gas).await.map_err(Into::into), + Some(name) => asc_new(self, &*name, gas).await, None => Ok(AscPtr::null()), } } diff --git a/runtime/wasm/src/module/instance.rs b/runtime/wasm/src/module/instance.rs index 0b6617bcb24..efe7549b64c 100644 --- a/runtime/wasm/src/module/instance.rs +++ b/runtime/wasm/src/module/instance.rs @@ -174,13 +174,13 @@ impl WasmInstance { .await { Ok(()) => { - assert!(self.instance_ctx().as_ref().possible_reorg == false); - assert!(self.instance_ctx().as_ref().deterministic_host_trap == false); + assert!(!self.instance_ctx().as_ref().possible_reorg); + assert!(!self.instance_ctx().as_ref().deterministic_host_trap); None } Err(trap) if self.instance_ctx().as_ref().possible_reorg => { self.instance_ctx().as_mut().ctx.state.exit_handler(); - return Err(MappingError::PossibleReorg(trap.into())); + return Err(MappingError::PossibleReorg(trap)); } // Treat timeouts anywhere in the error chain as a special case to have a better error @@ -192,7 +192,7 @@ impl WasmInstance { .any(|e| e.downcast_ref::() == Some(&Trap::Interrupt)) => { self.instance_ctx().as_mut().ctx.state.exit_handler(); - return Err(MappingError::Unknown(Error::from(trap).context(format!( + return Err(MappingError::Unknown(trap.context(format!( "Handler '{}' hit the timeout of '{}' seconds", handler, self.instance_ctx().as_ref().valid_module.timeout.unwrap().as_secs() @@ -612,7 +612,7 @@ impl WasmInstance { // we cannot execute anything that requires access to the heap before it's created. if let Some(start_func) = valid_module.start_function.as_ref() { instance - .get_func(store.as_context_mut(), &start_func) + .get_func(store.as_context_mut(), start_func) .context(format!("`{start_func}` function not found"))? .typed::<(), ()>(store.as_context_mut())? .call_async(store.as_context_mut(), ()) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 86bf4055e5a..b79e066dfb9 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -246,7 +246,7 @@ impl AscHeapCtx { fn host_export_error_from_trap(trap: Error, context: String) -> HostExportError { let trap_is_deterministic = is_trap_deterministic(&trap); - let e = Error::from(trap).context(context); + let e = trap.context(context); match trap_is_deterministic { true => HostExportError::Deterministic(e), false => HostExportError::Unknown(e), diff --git a/runtime/wasm/src/to_from/mod.rs b/runtime/wasm/src/to_from/mod.rs index 4edb688caf8..55f1f8b2316 100644 --- a/runtime/wasm/src/to_from/mod.rs +++ b/runtime/wasm/src/to_from/mod.rs @@ -223,6 +223,6 @@ where depth: usize, ) -> Result { let entries: Vec<(T, U)> = asc_get(heap, asc_map.entries, gas, depth)?; - Ok(HashMap::from_iter(entries.into_iter())) + Ok(HashMap::from_iter(entries)) } } diff --git a/server/graphman/src/resolvers/deployment_mutation/reassign.rs b/server/graphman/src/resolvers/deployment_mutation/reassign.rs index 8a1d3459479..00d32bc0a7b 100644 --- a/server/graphman/src/resolvers/deployment_mutation/reassign.rs +++ b/server/graphman/src/resolvers/deployment_mutation/reassign.rs @@ -20,7 +20,7 @@ pub async fn run( ctx.primary_pool.clone(), ctx.notification_sender.clone(), &deployment, - &node, + node, curr_node, ) .await?; diff --git a/server/http/src/service.rs b/server/http/src/service.rs index 875a7d6b0eb..c90a1d50eb5 100644 --- a/server/http/src/service.rs +++ b/server/http/src/service.rs @@ -164,7 +164,7 @@ where .get("X-GraphTraceQuery") .map(|v| { v.to_str() - .map(|s| s == &ENV_VARS.graphql.query_trace_token) + .map(|s| s == ENV_VARS.graphql.query_trace_token) .unwrap_or(false) }) .unwrap_or(false) @@ -342,7 +342,7 @@ where segments .iter() .filter(|&&segment| !segment.is_empty()) - .map(|&segment| segment) + .copied() .collect::>() .join("/") } @@ -355,7 +355,7 @@ where .find(|(key, _)| key == "query") .map(|(_, value)| value.into_owned()) }) - .unwrap_or_else(|| String::new()) + .unwrap_or_else(String::new) .trim() .to_lowercase() .starts_with("mutation"); diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index 5714a128c77..43ab376dea7 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -147,7 +147,7 @@ where .collect(), _ => unreachable!(), }) - .unwrap_or_else(Vec::new); + .unwrap_or_default(); let infos = self .store @@ -738,7 +738,7 @@ fn entity_changes_to_graphql(entity_changes: Vec) -> r::Value { r::Value::object( e.sorted() .into_iter() - .map(|(name, value)| (name.into(), value.into())) + .map(|(name, value)| (name, value.into())) .collect(), ) }) diff --git a/store/postgres/src/block_store.rs b/store/postgres/src/block_store.rs index c07e9bdc732..10a7e03e16f 100644 --- a/store/postgres/src/block_store.rs +++ b/store/postgres/src/block_store.rs @@ -303,19 +303,16 @@ impl BlockStore { // For each configured chain, add a chain store for (chain_name, shard) in chains { - match existing_chains + if let Some(chain) = existing_chains .iter() .find(|chain| chain.name == chain_name) { - Some(chain) => { - let status = if chain_ingestible(&block_store.logger, chain, &shard) { - ChainStatus::Ingestible - } else { - ChainStatus::ReadOnly - }; - block_store.add_chain_store(chain, status, false).await?; - } - None => {} + let status = if chain_ingestible(&block_store.logger, chain, &shard) { + ChainStatus::Ingestible + } else { + ChainStatus::ReadOnly + }; + block_store.add_chain_store(chain, status, false).await?; }; } @@ -544,11 +541,11 @@ impl BlockStore { eth_rpc_only_nets: Vec, ) -> Result<(), StoreError> { for store in self.stores() { - if !eth_rpc_only_nets.contains(&&store.chain) { + if !eth_rpc_only_nets.contains(&store.chain) { continue; }; - if let Some(head_block) = store.remove_cursor(&&store.chain).await? { + if let Some(head_block) = store.remove_cursor(&store.chain).await? { let lower_bound = head_block.saturating_sub(ENV_VARS.reorg_threshold() * 2); info!(&self.logger, "Removed cursor for non-firehose chain, now cleaning shallow blocks"; "network" => &store.chain, "lower_bound" => lower_bound); store.cleanup_shallow_blocks(lower_bound).await?; @@ -601,11 +598,8 @@ impl BlockStore { network: &str, ident: ChainIdentifier, ) -> anyhow::Result> { - match self.store(network).await { - Some(chain_store) => { - return Ok(chain_store); - } - None => {} + if let Some(chain_store) = self.store(network).await { + return Ok(chain_store); } let mut conn = self.mirror.primary().get().await?; @@ -620,7 +614,7 @@ impl BlockStore { } }) .ok_or_else(|| anyhow!("unable to find shard for network {}", network))?; - let chain = primary::add_chain(&mut conn, &network, &shard, ident).await?; + let chain = primary::add_chain(&mut conn, network, shard, ident).await?; self.add_chain_store(&chain, ChainStatus::Ingestible, true) .await .map_err(anyhow::Error::from) @@ -643,7 +637,7 @@ impl ChainIdStore for BlockStore { chain_name: &ChainName, ) -> Result { let chain_store = self - .chain_store(&chain_name) + .chain_store(chain_name) .await .ok_or_else(|| anyhow!("unable to get store for chain '{chain_name}'"))?; @@ -659,7 +653,7 @@ impl ChainIdStore for BlockStore { // Update the block shard first since that contains a copy from the primary let chain_store = self - .chain_store(&chain_name) + .chain_store(chain_name) .await .ok_or_else(|| anyhow!("unable to get store for chain '{chain_name}'"))?; diff --git a/store/postgres/src/catalog.rs b/store/postgres/src/catalog.rs index 0f7dc065733..455fb6fe29b 100644 --- a/store/postgres/src/catalog.rs +++ b/store/postgres/src/catalog.rs @@ -825,7 +825,7 @@ pub async fn create_cross_shard_view( ) -> Result { let mut query = String::new(); write!(query, "create view \"{}\".\"{}\" as ", dst_nsp, table_name)?; - for (idx, (name, nsp)) in shard_nsps.into_iter().enumerate() { + for (idx, (name, nsp)) in shard_nsps.iter().enumerate() { if idx > 0 { write!(query, " union all ")?; } diff --git a/store/postgres/src/chain_head_listener.rs b/store/postgres/src/chain_head_listener.rs index b9faa164f0b..035e10f377a 100644 --- a/store/postgres/src/chain_head_listener.rs +++ b/store/postgres/src/chain_head_listener.rs @@ -168,8 +168,7 @@ impl ChainHeadUpdateListener { if let Some(watcher) = watchers .try_read() .as_ref() - .map(|w| w.get(&update.network_name)) - .flatten() + .and_then(|w| w.get(&update.network_name)) { watcher.send(); } diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index 8ea6ebb8067..44b739abefa 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -731,7 +731,6 @@ mod data { .into_iter() .map(|h| h.parse()) .collect::, _>>() - .map_err(Error::from) } Storage::Private(Schema { blocks, .. }) => Ok(blocks .table() @@ -835,7 +834,7 @@ mod data { Ok(Some(( number, crate::chain_store::try_parse_timestamp(ts)?, - parent_hash.map(|h| BlockHash::from(h)), + parent_hash.map(BlockHash::from), ))) } } @@ -2520,7 +2519,7 @@ impl ChainStoreTrait for ChainStore { match res { Ok(blocks) => { - for (_, blocks_for_num) in &blocks { + for blocks_for_num in blocks.values() { if blocks.len() == 1 { self.recent_blocks_cache .insert_block(blocks_for_num[0].clone()); @@ -2543,9 +2542,7 @@ impl ChainStoreTrait for ChainStore { let mut result = cached_map; for (num, blocks) in stored { - if !result.contains_key(&num) { - result.insert(num, blocks); - } + result.entry(num).or_insert(blocks); } result @@ -2579,7 +2576,7 @@ impl ChainStoreTrait for ChainStore { let stored = if cached.len() < hashes.len() { let hashes = hashes .iter() - .filter(|hash| cached.iter().find(|(ptr, _)| &ptr.hash == *hash).is_none()) + .filter(|hash| !cached.iter().any(|(ptr, _)| &ptr.hash == *hash)) .cloned() .collect::>(); // We key this off the entire list of hashes, which means @@ -3128,7 +3125,7 @@ impl EthereumCallCache for ChainStore { } let ids: Vec<_> = reqs - .into_iter() + .iter() .map(|req| contract_call_id(req, &block)) .collect(); let id_refs: Vec<_> = ids.iter().map(|id| id.as_slice()).collect(); @@ -3158,9 +3155,9 @@ impl EthereumCallCache for ChainStore { resps.push(resp); } let calls = reqs - .into_iter() + .iter() .enumerate() - .filter(|(idx, _)| !found.contains(&idx)) + .filter(|(idx, _)| !found.contains(idx)) .map(|(_, call)| call.cheap_clone()) .collect(); Ok((resps, calls)) diff --git a/store/postgres/src/copy.rs b/store/postgres/src/copy.rs index 80830b3e61b..5b052b98e5f 100644 --- a/store/postgres/src/copy.rs +++ b/store/postgres/src/copy.rs @@ -31,7 +31,6 @@ use diesel_async::{ AsyncConnection, }; use diesel_async::{RunQueryDsl, SimpleAsyncConnection}; -use tokio; use graph::{ futures03::{ @@ -214,7 +213,7 @@ impl CopyState { let mut unfinished = Vec::new(); for dst_table in dst.tables.values() { - if let Some(src_table) = src.table_for_entity(&dst_table.object).ok() { + if let Ok(src_table) = src.table_for_entity(&dst_table.object) { unfinished.push( TableState::init( conn, @@ -386,7 +385,7 @@ impl TableState { e ) }) - .map(|table| table.clone()) + .cloned() } let mut states = Vec::new(); diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 07ed4d13b6f..017e74591c8 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -481,7 +481,7 @@ pub async fn transact_block( // Performance note: This costs us an extra DB query on every update. We used to put this in the // `where` clause of the `update` statement, but that caused Postgres to use bitmap scans instead // of a simple primary key lookup. So a separate query it is. - let block_ptr = block_ptr(conn, &site).await?; + let block_ptr = block_ptr(conn, site).await?; if let Some(block_ptr_from) = block_ptr { if block_ptr_from.number >= ptr.number { return Err(StoreError::DuplicateBlockProcessing( @@ -561,7 +561,7 @@ pub async fn forward_block_ptr( // No matching rows were found. This is an error. By the filter conditions, this can only be // due to a missing deployment (which `block_ptr` catches) or duplicate block processing. - 0 => match block_ptr(conn, &site).await? { + 0 => match block_ptr(conn, site).await? { Some(block_ptr_from) if block_ptr_from.number >= ptr.number => Err( StoreError::DuplicateBlockProcessing(site.deployment.clone(), ptr.number), ), @@ -1113,7 +1113,7 @@ pub(crate) async fn revert_subgraph_errors( // The result will be the same at `reverted_block` or `reverted_block - 1` since the errors at // `reverted_block` were just deleted, but semantically we care about `reverted_block - 1` which // is the block being reverted to. - check_health(&logger, conn, id, reverted_block - 1).await?; + check_health(logger, conn, id, reverted_block - 1).await?; // If the deployment is failed in both `failed` and `status` columns, // update both values respectively to `false` and `healthy`. Basically diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index f8abf4d93e4..61e59e2f7ec 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -727,7 +727,6 @@ impl DeploymentStore { sql_query(drop_index_sql).execute(&mut conn).await?; Err(StoreError::Canceled) } - .map_err(Into::into) } /// Returns a list of all existing indexes for the specified Entity table. @@ -745,8 +744,7 @@ impl DeploymentStore { let table_name = &table.name; let indexes = catalog::indexes_for_table(&mut conn, schema_name.as_str(), table_name.as_str()) - .await - .map_err(StoreError::from)?; + .await?; Ok(indexes.into_iter().map(CreateIndex::parse).collect()) } @@ -858,13 +856,13 @@ impl DeploymentStore { ) -> Result, StoreError> { async fn do_prune( store: Arc, - mut conn: &mut AsyncPgConnection, + conn: &mut AsyncPgConnection, site: Arc, req: PruneRequest, mut reporter: Box, ) -> Result, StoreError> { - let layout = store.layout(&mut conn, site.clone()).await?; - let state = deployment::state(&mut conn, &site).await?; + let layout = store.layout(conn, site.clone()).await?; + let state = deployment::state(conn, &site).await?; if state.latest_block.number <= req.history_blocks { // We haven't accumulated enough history yet, nothing to prune @@ -884,7 +882,7 @@ impl DeploymentStore { .await?; layout - .prune(&store.logger, reporter.as_mut(), &mut conn, &req) + .prune(&store.logger, reporter.as_mut(), conn, &req) .await?; Ok(reporter) } @@ -1293,7 +1291,7 @@ impl DeploymentStore { .await } - if !prune_in_progress(&self, &site)? { + if !prune_in_progress(self, &site)? { let req = PruneRequest::new( &site.as_ref().into(), history_blocks, @@ -1303,7 +1301,7 @@ impl DeploymentStore { )?; let deployment_id = site.id; - let logger = Logger::new(&logger, o!("component" => "Prune")); + let logger = Logger::new(logger, o!("component" => "Prune")); let handle = graph::spawn(run(logger, self.clone(), site, req)); self.prune_handles .lock() @@ -1733,7 +1731,7 @@ impl DeploymentStore { // We reset the firehose cursor. That way, on resume, Firehose will start from // the block_ptr instead (with sanity checks to ensure it's resuming at the // correct block). - let _ = self.revert_block_operations(site.clone(), parent_ptr.clone(), &FirehoseCursor::None).await?; + self.revert_block_operations(site.clone(), parent_ptr.clone(), &FirehoseCursor::None).await?; // Unfail the deployment. deployment::update_deployment_status(conn, deployment_id, prev_health, None,None).await?; @@ -1946,7 +1944,7 @@ pub fn generate_index_creation_sql( after: Option, ) -> Result<(String, String), StoreError> { let schema_name = layout.site.namespace.clone(); - let table = resolve_table_name(&layout, &entity_name)?; + let table = resolve_table_name(&layout, entity_name)?; let (column_names, index_exprs) = resolve_column_names_and_index_exprs(table, &field_names)?; let column_names_sep_by_underscores = column_names.join("_"); diff --git a/store/postgres/src/detail.rs b/store/postgres/src/detail.rs index 7dcac2f3bd8..7f2f9994848 100644 --- a/store/postgres/src/detail.rs +++ b/store/postgres/src/detail.rs @@ -133,7 +133,7 @@ impl From<(Deployment, Head)> for DeploymentDetail { synced_at, synced_at_block_number, block_hash: block_hash.clone(), - block_number: block_number.clone(), + block_number, entity_count: entity_count as usize, } } @@ -578,7 +578,7 @@ impl StoredDeploymentEntity { &detail.subgraph, "start_block", manifest.start_block_hash.clone(), - manifest.start_block_number.map(|n| n.into()), + manifest.start_block_number.map(|n| n), )? .map(|block| block.to_ptr()); diff --git a/store/postgres/src/dynds/shared.rs b/store/postgres/src/dynds/shared.rs index 8835f449c35..3562e4584f2 100644 --- a/store/postgres/src/dynds/shared.rs +++ b/store/postgres/src/dynds/shared.rs @@ -117,7 +117,7 @@ pub(super) async fn insert( let dds: Vec<_> = data_sources .entries .iter() - .map(|(block_ptr, dds)| { + .flat_map(|(block_ptr, dds)| { dds.iter().map(|ds| { let StoredDynamicDataSource { manifest_idx: _, @@ -160,7 +160,6 @@ pub(super) async fn insert( )) }) }) - .flatten() .collect::>()?; insert_into(decds::table) diff --git a/store/postgres/src/fork.rs b/store/postgres/src/fork.rs index 6c9c340342c..c8602e0a519 100644 --- a/store/postgres/src/fork.rs +++ b/store/postgres/src/fork.rs @@ -209,7 +209,7 @@ query Query ($id: String) {{ e )) })?; - map.insert(Word::from(f.name.clone()), value); + map.insert(f.name.clone(), value); } map }; diff --git a/store/postgres/src/notification_listener.rs b/store/postgres/src/notification_listener.rs index 4f3864fc4f8..01bc588a72e 100644 --- a/store/postgres/src/notification_listener.rs +++ b/store/postgres/src/notification_listener.rs @@ -368,10 +368,10 @@ impl JsonNotification { ) })?; - if payload_rows.is_empty() || payload_rows.get(0).is_none() { + if payload_rows.is_empty() || payload_rows.is_empty() { return Err(anyhow!("No payload found for notification {}", payload_id))?; } - let payload: String = payload_rows.get(0).unwrap().get(0); + let payload: String = payload_rows.first().unwrap().get(0); Ok(JsonNotification { payload: serde_json::from_str(&payload)?, diff --git a/store/postgres/src/pool/coordinator.rs b/store/postgres/src/pool/coordinator.rs index c16ba4d4b7a..fb0b05a1ac0 100644 --- a/store/postgres/src/pool/coordinator.rs +++ b/store/postgres/src/pool/coordinator.rs @@ -96,7 +96,7 @@ impl PoolCoordinator { if count.had_migrations() { let server = self.server(&pool.shard)?; for pool in self.pools() { - let remap_res = pool.remap(&server).await; + let remap_res = pool.remap(server).await; if let Err(e) = remap_res { error!(pool.logger, "Failed to map imports from {}", server.shard; "error" => e.to_string()); return Err(e); @@ -130,7 +130,7 @@ impl PoolCoordinator { fn primary(&self) -> Result, StoreError> { let map = self.pools.lock().unwrap(); - let pool_state = map.get(&*&PRIMARY_SHARD).ok_or_else(|| { + let pool_state = map.get(&PRIMARY_SHARD).ok_or_else(|| { internal_error!("internal error: primary shard not found in pool coordinator") })?; @@ -297,7 +297,7 @@ impl PoolCoordinator { let migrated = migrate(&states, self.servers.as_ref()).await?; - let propagated = propagate(&self, migrated).await?; + let propagated = propagate(self, migrated).await?; primary.create_cross_shard_views(&self.servers).await?; diff --git a/store/postgres/src/pool/foreign_server.rs b/store/postgres/src/pool/foreign_server.rs index 9f9f9f60791..78cd83116db 100644 --- a/store/postgres/src/pool/foreign_server.rs +++ b/store/postgres/src/pool/foreign_server.rs @@ -49,7 +49,7 @@ impl ForeignServer { if shard == current { "subgraphs".to_string() } else { - Self::metadata_schema(&shard) + Self::metadata_schema(shard) } } @@ -67,7 +67,7 @@ impl ForeignServer { ), }; - let host = match config.get_hosts().get(0) { + let host = match config.get_hosts().first() { Some(Host::Tcp(host)) => host.to_string(), _ => bail!("can not find host name in `{}`", SafeDisplay(postgres_url)), }; @@ -226,7 +226,7 @@ impl ForeignServer { existing != needed } - if &self.shard == &*PRIMARY_SHARD { + if self.shard == *PRIMARY_SHARD { let existing = catalog::foreign_tables(conn, PRIMARY_PUBLIC).await?; let needed = PRIMARY_TABLES .into_iter() diff --git a/store/postgres/src/pool/manager.rs b/store/postgres/src/pool/manager.rs index fdca61d2ca6..4677ea6276b 100644 --- a/store/postgres/src/pool/manager.rs +++ b/store/postgres/src/pool/manager.rs @@ -274,7 +274,9 @@ pub(crate) fn spawn_connection_reaper( if last_used.elapsed() > CHECK_INTERVAL { // Reset wait time if there was no activity recently so that // we don't report stale wait times - wait_gauge.as_ref().map(|wait_gauge| wait_gauge.set(0.0)); + if let Some(wait_gauge) = wait_gauge.as_ref() { + wait_gauge.set(0.0) + } } tokio::time::sleep(CHECK_INTERVAL).await; } diff --git a/store/postgres/src/pool/mod.rs b/store/postgres/src/pool/mod.rs index cd44b32463e..20d332616a2 100644 --- a/store/postgres/src/pool/mod.rs +++ b/store/postgres/src/pool/mod.rs @@ -67,14 +67,14 @@ impl DerefMut for PermittedConnection { /// The namespace under which the `PRIMARY_TABLES` are mapped into each /// shard -pub(crate) const PRIMARY_PUBLIC: &'static str = "primary_public"; +pub(crate) const PRIMARY_PUBLIC: &str = "primary_public"; /// Tables that we map from the primary into `primary_public` in each shard const PRIMARY_TABLES: [&str; 3] = ["deployment_schemas", "chains", "active_copies"]; /// The namespace under which we create views in the primary that union all /// the `SHARDED_TABLES` -pub(crate) const CROSS_SHARD_NSP: &'static str = "sharded"; +pub(crate) const CROSS_SHARD_NSP: &str = "sharded"; /// Tables that we map from each shard into each other shard into the /// `shard__subgraphs` namespace @@ -206,7 +206,7 @@ impl PoolState { // we didn't have an error, it means the database is not available if self.needs_setup() { error!(self.logger, "Database is not available, setup did not work"); - return Err(StoreError::DatabaseUnavailable); + Err(StoreError::DatabaseUnavailable) } else { Ok(pool) } @@ -596,17 +596,17 @@ impl PoolInner { match res { Ok(conn) => { self.state_tracker.mark_available(); - return Ok(conn); + Ok(conn) } Err(PoolError::Closed) | Err(PoolError::Backend(_)) => { self.state_tracker.mark_unavailable(Duration::from_nanos(0)); - return Err(StoreError::DatabaseUnavailable); + Err(StoreError::DatabaseUnavailable) } Err(PoolError::Timeout(_)) => { if !self.state_tracker.timeout_is_ignored() { self.state_tracker.mark_unavailable(elapsed); } - return Err(StoreError::StatementTimeout); + Err(StoreError::StatementTimeout) } Err(PoolError::NoRuntimeSpecified) | Err(PoolError::PostCreateHook(_)) => { let e = res.err().unwrap(); @@ -650,7 +650,7 @@ impl PoolInner { { let pool = self.fdw_pool(logger)?; loop { - match self.get_from_pool(&pool, None, Duration::ZERO).await { + match self.get_from_pool(pool, None, Duration::ZERO).await { Ok(conn) => return Ok(conn), Err(e) => { if timeout() { @@ -701,20 +701,18 @@ impl PoolInner { async fn locale_check(&self, logger: &Logger) -> Result<(), StoreError> { let mut conn = self.get().await?; - Ok( - if let Err(msg) = catalog::Locale::load(&mut conn).await?.suitable() { - if &self.shard == &*PRIMARY_SHARD && primary::is_empty(&mut conn).await? { - const MSG: &str = - "Database does not use C locale. \ - Please check the graph-node documentation for how to set up the database locale"; - - crit!(logger, "{}: {}", MSG, msg); - panic!("{}: {}", MSG, msg); - } else { - warn!(logger, "{}.\nPlease check the graph-node documentation for how to set up the database locale", msg); - } - }, - ) + let _: () = if let Err(msg) = catalog::Locale::load(&mut conn).await?.suitable() { + if self.shard == *PRIMARY_SHARD && primary::is_empty(&mut conn).await? { + const MSG: &str = "Database does not use C locale. \ + Please check the graph-node documentation for how to set up the database locale"; + + crit!(logger, "{}: {}", MSG, msg); + panic!("{}: {}", MSG, msg); + } else { + warn!(logger, "{}.\nPlease check the graph-node documentation for how to set up the database locale", msg); + } + }; + Ok(()) } pub(crate) async fn query_permit(&self) -> OwnedSemaphorePermit { @@ -834,7 +832,7 @@ impl PoolInner { servers: &'a [ForeignServer], ) -> Vec<(&'a str, String)> { servers - .into_iter() + .iter() .map(|server| { let nsp = if &server.shard == current { local_nsp.to_string() @@ -900,13 +898,13 @@ impl PoolInner { /// need to remap anything that we are importing via fdw to make sure we /// are using this updated schema pub async fn remap(&self, server: &ForeignServer) -> Result<(), StoreError> { - if &server.shard == &*PRIMARY_SHARD { + if server.shard == *PRIMARY_SHARD { info!(&self.logger, "Mapping primary"); let mut conn = self.get().await?; conn.transaction(|conn| ForeignServer::map_primary(conn, &self.shard).scope_boxed()) .await?; } - if &server.shard != &self.shard { + if server.shard != self.shard { info!( &self.logger, "Mapping metadata from {}", @@ -920,7 +918,7 @@ impl PoolInner { } pub async fn needs_remap(&self, server: &ForeignServer) -> Result { - if &server.shard == &self.shard { + if server.shard == self.shard { return Ok(false); } diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index 5a1840b39e3..f9d8ee232f2 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -1265,7 +1265,7 @@ impl Connection { features, data_source_kinds: data_sources, handler_kinds: handlers, - network: network, + network, has_declared_calls, has_bytes_as_ids, has_aggregations, @@ -1808,7 +1808,7 @@ impl Connection { &detail.subgraph, "latest_ethereum_block", detail.block_hash.clone(), - detail.block_number.clone(), + detail.block_number, )? .map(|b| b.to_ptr()) .map(|ptr| (Some(Vec::from(ptr.hash_slice())), Some(ptr.number))) @@ -1822,7 +1822,7 @@ impl Connection { u::latest_ethereum_block_number.eq(latest_number), u::failed.eq(detail.failed), u::synced_at.eq(detail.synced_at), - u::synced_at_block_number.eq(detail.synced_at_block_number.clone()), + u::synced_at_block_number.eq(detail.synced_at_block_number), )) .execute(&mut self.conn) .await?; @@ -2201,7 +2201,7 @@ impl Mirror { } pub async fn active_assignments(&self, node: &NodeId) -> Result, StoreError> { - self.read_async(|conn| queries::active_assignments(conn, &node).scope_boxed()) + self.read_async(|conn| queries::active_assignments(conn, node).scope_boxed()) .await } diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index b2dbbcc8a26..5cc4be3cc5d 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -27,7 +27,6 @@ use diesel::sql_types::Text; use diesel::{debug_query, sql_query, OptionalExtension, QueryDsl, QueryResult}; use diesel_async::scoped_futures::ScopedFutureExt; use diesel_async::{AsyncConnection, RunQueryDsl, SimpleAsyncConnection}; -use tokio; use graph::blockchain::block_stream::{EntityOperationKind, EntitySourceOperation}; use graph::blockchain::BlockTime; @@ -206,7 +205,7 @@ impl PartialEq for SqlName { impl FromSql for SqlName { fn from_sql(bytes: diesel::pg::PgValue) -> diesel::deserialize::Result { - >::from_sql(bytes).map(|s| SqlName::verbatim(s)) + >::from_sql(bytes).map(SqlName::verbatim) } } @@ -282,7 +281,7 @@ impl Layout { if catalog.use_poi { tables.push(Self::make_poi_table( - &schema, + schema, &catalog, has_ts_tables, tables.len(), @@ -296,7 +295,7 @@ impl Layout { tables }); - let rollups = Self::rollups(&tables, &schema)?; + let rollups = Self::rollups(&tables, schema)?; Ok(Layout { site, @@ -652,7 +651,7 @@ impl Layout { } // sort the elements in each blocks bucket by vid - for (_, vec) in &mut entities { + for vec in entities.values_mut() { vec.sort_by(|a, b| a.vid.cmp(&b.vid)); } @@ -1025,7 +1024,7 @@ impl Layout { // FIXME: we clone all the ids here let chunk = IdList::try_from_iter( group.entity_type.id_type()?, - chunk.into_iter().map(|id| (*id).to_owned()), + chunk.iter().map(|id| (*id).to_owned()), )?; count += ClampRangeQuery::new(table, &chunk, block)? .execute(conn) @@ -1045,7 +1044,7 @@ impl Layout { pub async fn truncate_tables(&self, conn: &mut AsyncPgConnection) -> Result<(), StoreError> { for table in self.tables.values() { - sql_query(&format!("TRUNCATE TABLE {}", table.qualified_name)) + sql_query(format!("TRUNCATE TABLE {}", table.qualified_name)) .execute(conn) .await?; } @@ -1302,7 +1301,7 @@ impl Layout { break; } Some(bucket) => { - rollup.insert(conn, &bucket, *block).await?; + rollup.insert(conn, bucket, *block).await?; } } } @@ -1391,7 +1390,7 @@ impl ColumnType { if let Some(id_type) = schema .entity_type(name) .ok() - .and_then(|entity_type| Some(entity_type.id_type())) + .map(|entity_type| entity_type.id_type()) .transpose()? { return Ok(id_type.into()); @@ -1488,7 +1487,7 @@ impl Column { let sql_name = SqlName::from(&*field.name); - let is_reference = schema.is_reference(&field.field_type.get_base_type()); + let is_reference = schema.is_reference(field.field_type.get_base_type()); let column_type = if sql_name.as_str() == PRIMARY_KEY_COLUMN { IdType::try_from(&field.field_type)?.into() diff --git a/store/postgres/src/relational/ddl.rs b/store/postgres/src/relational/ddl.rs index a3c4ed6885e..e7160edbe22 100644 --- a/store/postgres/src/relational/ddl.rs +++ b/store/postgres/src/relational/ddl.rs @@ -408,14 +408,7 @@ impl Table { if index_def.is_some() && ENV_VARS.postpone_attribute_index_creation { let arr = index_def .unwrap() - .indexes_for_table( - &self.nsp, - &self.name.to_string(), - &self, - false, - false, - false, - ) + .indexes_for_table(&self.nsp, &self.name.to_string(), self, false, false, false) .map_err(|_| fmt::Error)?; for (_, sql) in arr { writeln!(out, "{};", sql).expect("properly formated index statements") diff --git a/store/postgres/src/relational/dsl.rs b/store/postgres/src/relational/dsl.rs index 13cab9dd9d0..8620fe7c1fa 100644 --- a/store/postgres/src/relational/dsl.rs +++ b/store/postgres/src/relational/dsl.rs @@ -86,7 +86,7 @@ pub struct ChildAliasStr { impl ChildAliasStr { fn new(idx: u8) -> Self { - let c = 'i' as u8; + let c = b'i'; let alias = if idx == 0 { [c, 0, 0, 0] } else if idx < 10 { @@ -187,9 +187,9 @@ impl<'a> Table<'a> { self.meta .columns .iter() - .chain(META_COLS.into_iter()) + .chain(*META_COLS) .find(|c| &c.name == name) - .map(|c| Column::new(self.clone(), c)) + .map(|c| Column::new(*self, c)) } pub fn name(&self) -> &str { @@ -266,7 +266,7 @@ impl<'a> Table<'a> { .collect(); names.sort(); for name in names { - let column = self.meta.column_for_field(&name)?; + let column = self.meta.column_for_field(name)?; cols.push(column); } } diff --git a/store/postgres/src/relational/index.rs b/store/postgres/src/relational/index.rs index c72d832ba7a..6181d1cd1a2 100644 --- a/store/postgres/src/relational/index.rs +++ b/store/postgres/src/relational/index.rs @@ -607,10 +607,8 @@ impl CreateIndex { match self { CreateIndex::Unknown { .. } => (), CreateIndex::Parsed { columns, .. } => { - if columns.len() == 1 { - if columns[0].is_id() { - return true; - } + if columns.len() == 1 && columns[0].is_id() { + return true; } } } @@ -648,7 +646,7 @@ impl CreateIndex { } } - pub fn fields_exist_in_dest<'a>(&self, dest_table: &'a Table) -> bool { + pub fn fields_exist_in_dest(&self, dest_table: &Table) -> bool { fn column_exists<'a>(it: &mut impl Iterator, column_name: &str) -> bool { it.any(|c| *c == *column_name) } @@ -774,7 +772,7 @@ impl IndexList { }; let schema_name = site.namespace.clone(); let layout = store.layout(conn, site).await?; - for (_, table) in &layout.tables { + for table in layout.tables.values() { let indexes = load_indexes_from_table(conn, table, schema_name.as_str()).await?; list.indexes.insert(table.name.to_string(), indexes); } @@ -855,17 +853,13 @@ impl IndexList { .get_results::(conn) .await? .into_iter() - .map(|ii| ii.into()) .collect::>(); assert!(ii_vec.len() <= 1); - if ii_vec.len() == 0 || !ii_vec[0].isvalid { + if ii_vec.is_empty() || !ii_vec[0].isvalid { // if a bad index exist lets first drop it - if ii_vec.len() > 0 { - let drop_query = sql_query(format!( - "DROP INDEX {}.{};", - namespace.to_string(), - index_name - )); + if !ii_vec.is_empty() { + let drop_query = + sql_query(format!("DROP INDEX {}.{};", namespace, index_name)); drop_query.execute(conn).await?; } sql_query(create_query).execute(conn).await?; diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 7b9bc0b8e41..3d31e3df68a 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -468,9 +468,9 @@ impl Layout { PruningStrategy::Delete => { // Delete all entity versions whose range was closed // before `req.earliest_block` - let range = VidRange::for_prune(conn, &table, 0, req.earliest_block).await?; + let range = VidRange::for_prune(conn, table, 0, req.earliest_block).await?; let mut batcher = - VidBatcher::load(conn, &self.site.namespace, &table, range).await?; + VidBatcher::load(conn, &self.site.namespace, table, range).await?; tracker.start_delete(conn, table, range, &batcher).await?; while !batcher.finished() { diff --git a/store/postgres/src/relational/rollup.rs b/store/postgres/src/relational/rollup.rs index 82f59e59dbf..c2929f6ca05 100644 --- a/store/postgres/src/relational/rollup.rs +++ b/store/postgres/src/relational/rollup.rs @@ -385,7 +385,7 @@ impl<'a> RollupSql<'a> { .aggregates .iter() .flat_map(|agg| &agg.src_columns) - .map(|col| *col) + .copied() .filter(|&col| col != "id" && col != "timestamp") .collect(); agg_srcs.sort(); @@ -403,7 +403,7 @@ impl<'a> RollupSql<'a> { " order by {src_table}.timestamp) data group by timestamp", src_table = self.src_table )?; - Ok(write_dims(self.dimensions, w)?) + write_dims(self.dimensions, w) } fn select(&self, w: &mut dyn fmt::Write) -> fmt::Result { diff --git a/store/postgres/src/relational/value.rs b/store/postgres/src/relational/value.rs index fadcfdcfbca..a59dcfe511c 100644 --- a/store/postgres/src/relational/value.rs +++ b/store/postgres/src/relational/value.rs @@ -50,22 +50,22 @@ pub enum OidValue { impl FromSql for OidValue { fn from_sql(value: diesel::pg::PgValue) -> diesel::deserialize::Result { - const VARCHAR_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1043) }; - const VARCHAR_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1015) }; - const TEXT_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(25) }; - const TEXT_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1009) }; - const BYTEA_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(17) }; - const BYTEA_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1001) }; - const BOOL_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(16) }; - const BOOL_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1000) }; - const INTEGER_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(23) }; - const INTEGER_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1007) }; - const INT8_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(20) }; - const INT8_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1016) }; - const NUMERIC_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1700) }; - const NUMERIC_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1231) }; - const TIMESTAMPTZ_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1184) }; - const TIMESTAMPTZ_ARY_OID: NonZeroU32 = unsafe { NonZeroU32::new_unchecked(1185) }; + const VARCHAR_OID: NonZeroU32 = NonZeroU32::new(1043).unwrap(); + const VARCHAR_ARY_OID: NonZeroU32 = NonZeroU32::new(1015).unwrap(); + const TEXT_OID: NonZeroU32 = NonZeroU32::new(25).unwrap(); + const TEXT_ARY_OID: NonZeroU32 = NonZeroU32::new(1009).unwrap(); + const BYTEA_OID: NonZeroU32 = NonZeroU32::new(17).unwrap(); + const BYTEA_ARY_OID: NonZeroU32 = NonZeroU32::new(1001).unwrap(); + const BOOL_OID: NonZeroU32 = NonZeroU32::new(16).unwrap(); + const BOOL_ARY_OID: NonZeroU32 = NonZeroU32::new(1000).unwrap(); + const INTEGER_OID: NonZeroU32 = NonZeroU32::new(23).unwrap(); + const INTEGER_ARY_OID: NonZeroU32 = NonZeroU32::new(1007).unwrap(); + const INT8_OID: NonZeroU32 = NonZeroU32::new(20).unwrap(); + const INT8_ARY_OID: NonZeroU32 = NonZeroU32::new(1016).unwrap(); + const NUMERIC_OID: NonZeroU32 = NonZeroU32::new(1700).unwrap(); + const NUMERIC_ARY_OID: NonZeroU32 = NonZeroU32::new(1231).unwrap(); + const TIMESTAMPTZ_OID: NonZeroU32 = NonZeroU32::new(1184).unwrap(); + const TIMESTAMPTZ_ARY_OID: NonZeroU32 = NonZeroU32::new(1185).unwrap(); match value.get_oid() { VARCHAR_OID | TEXT_OID => { @@ -230,7 +230,7 @@ impl FromOidRow for Entity { .filter(|(value, _)| !matches!(value, OidValue::Null)) .map(|(value, column)| { graph::prelude::Value::from_oid_value(value, &column.column_type) - .map(|value| (Word::from(column.field.clone()), value)) + .map(|value| (column.field.clone(), value)) }); schema.try_make_entity(x).map_err(StoreError::from) } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index eb77976924b..015a41ee88d 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -335,7 +335,7 @@ impl FromColumnValue for r::Value { fn from_timestamp(i: &str) -> Result { scalar::Timestamp::from_rfc3339(i) - .map(|v| r::Value::Timestamp(v)) + .map(r::Value::Timestamp) .map_err(|e| { StoreError::Unknown(anyhow!("failed to convert {} to Timestamp: {}", i, e)) }) @@ -645,7 +645,7 @@ impl<'a> SqlValue<'a> { BigDecimal(d) => { S::Numeric(d.to_string()) } - Timestamp(ts) => S::Timestamp(ts.clone()), + Timestamp(ts) => S::Timestamp(*ts), Bool(b) => S::Bool(*b), List(values) => { match column_type { @@ -685,7 +685,7 @@ impl std::fmt::Display for SqlValue<'_> { S::Int(i) => write!(f, "{}", i), S::Int8(i) => write!(f, "{}", i), S::Numeric(s) => write!(f, "{}", s), - S::Timestamp(ts) => write!(f, "{}", ts.as_microseconds_since_epoch().to_string()), + S::Timestamp(ts) => write!(f, "{}", ts.as_microseconds_since_epoch()), S::Numerics(values) => write!(f, "{:?}", values), S::Bool(b) => write!(f, "{}", b), S::List(values) => write!(f, "{:?}", values), @@ -891,12 +891,10 @@ impl Comparison { | Comparison::Greater, Value::Bool(_) | Value::List(_) | Value::Null, ) - | (Comparison::Match, _) => { - return Err(StoreError::UnsupportedFilter( - self.to_string(), - value.to_string(), - )); - } + | (Comparison::Match, _) => Err(StoreError::UnsupportedFilter( + self.to_string(), + value.to_string(), + )), } } } @@ -1278,19 +1276,17 @@ impl<'a> QueryFragment for QueryChild<'a> { out.push_sql(" = "); child_column.walk_ast(out.reborrow())?; } + } else if parent_column.is_list() { + // Type C: i.id = any(c.child_ids) + child_column.walk_ast(out.reborrow())?; + out.push_sql(" = any("); + parent_column.walk_ast(out.reborrow())?; + out.push_sql(")"); } else { - if parent_column.is_list() { - // Type C: i.id = any(c.child_ids) - child_column.walk_ast(out.reborrow())?; - out.push_sql(" = any("); - parent_column.walk_ast(out.reborrow())?; - out.push_sql(")"); - } else { - // Type D: i.id = c.child_id - child_column.walk_ast(out.reborrow())?; - out.push_sql(" = "); - parent_column.walk_ast(out.reborrow())?; - } + // Type D: i.id = c.child_id + child_column.walk_ast(out.reborrow())?; + out.push_sql(" = "); + parent_column.walk_ast(out.reborrow())?; } out.push_sql(" and "); @@ -1414,12 +1410,10 @@ impl<'a> Filter<'a> { | Value::Int(_) | Value::Int8(_) | Value::List(_) - | Value::Null => { - return Err(StoreError::UnsupportedFilter( - op.to_owned(), - value.to_string(), - )); - } + | Value::Null => Err(StoreError::UnsupportedFilter( + op.to_owned(), + value.to_string(), + )), } } @@ -1435,8 +1429,7 @@ impl<'a> Filter<'a> { if column.use_prefix_comparison() && !value.is_null() { let column_type = column.column_type(); - PrefixComparison::new(op, column, column_type, value) - .map(|pc| Filter::PrefixCmp(pc)) + PrefixComparison::new(op, column, column_type, value).map(Filter::PrefixCmp) } else { let value = QueryValue::new(value, column.column_type())?; Ok(Filter::Cmp(column, op, value)) @@ -1522,7 +1515,7 @@ impl<'a> Filter<'a> { } NotIn(attr, values) => { let column = table.column_for_field(attr.as_str())?; - let values = QueryValue::many(values, &column.column_type())?; + let values = QueryValue::many(values, column.column_type())?; Ok(F::NotIn(column, values)) } Contains(attr, value) => contains(table, attr, K::Like, value), @@ -1755,7 +1748,7 @@ impl<'a> Filter<'a> { if have_non_nulls { if column.use_prefix_comparison() - && PrefixType::new(&column).is_ok() + && PrefixType::new(column).is_ok() && values.iter().all(|v| match &v.value { SqlValue::Text(s) => s.len() < STRING_PREFIX_SIZE, SqlValue::String(s) => s.len() < STRING_PREFIX_SIZE, @@ -1770,7 +1763,7 @@ impl<'a> Filter<'a> { // query optimizer // See PrefixComparison for a more detailed discussion of what // is happening here - PrefixType::new(&column)?.push_column_prefix(&column, &mut out.reborrow())?; + PrefixType::new(column)?.push_column_prefix(column, &mut out.reborrow())?; } else { column.walk_ast(out.reborrow())?; } @@ -2232,7 +2225,7 @@ impl<'a> QueryFragment for FindDerivedQuery<'a> { out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" e\n where "); // This clause with an empty array would filter out everything - if self.excluded_keys.len() > 0 { + if !self.excluded_keys.is_empty() { out.push_identifier(&self.table.primary_key().name)?; // For truly gigantic `excluded_keys` lists, this will be slow, and // we should rewrite this query to use a CTE or a temp table to hold @@ -2316,7 +2309,7 @@ impl<'a> InsertRow<'a> { }) .collect::>()?; if let ColumnType::TSVector(config) = &column.column_type { - InsertValue::Fulltext(fulltext_field_values, &config) + InsertValue::Fulltext(fulltext_field_values, config) } else { return Err(StoreError::FulltextColumnMissingConfig); } @@ -2508,7 +2501,7 @@ impl<'a> ConflictingEntitiesQuery<'a> { .iter() .map(|entity| layout.table_for_entity(entity).map(|table| table.as_ref())) .collect::, _>>()?; - let ids = IdList::try_from_iter_ref(group.ids().map(|id| IdRef::from(id)))?; + let ids = IdList::try_from_iter_ref(group.ids().map(IdRef::from))?; Ok(ConflictingEntitiesQuery { tables, ids }) } } @@ -3017,11 +3010,11 @@ impl<'a> FilterWindow<'a> { out.push_sql("select '"); out.push_sql(self.table.meta.object.as_str()); out.push_sql("' as entity, c.id, c.vid, p.id::text as "); - out.push_sql(&*PARENT_ID); + out.push_sql(PARENT_ID); limit .sort_key .select(&mut out, SelectStatementLevel::InnerStatement)?; - self.children(true, &limit, &mut out) + self.children(true, limit, &mut out) } /// Collect all the parent id's from all windows @@ -3587,7 +3580,7 @@ impl<'a> SortKey<'a> { direction: SortDirection, ) -> Result>, QueryExecutionError> { assert!(entity_types.len() < 255); - return entity_types + entity_types .iter() .enumerate() .map(|(i, entity_type)| { @@ -3645,7 +3638,7 @@ impl<'a> SortKey<'a> { }) } }) - .collect::>, QueryExecutionError>>(); + .collect::>, QueryExecutionError>>() } fn with_child_interface_key<'a>( @@ -3964,7 +3957,7 @@ impl<'a> SortKey<'a> { ) -> QueryResult<()> { fn order_by_parent_id(out: &mut AstPass) { out.push_sql("order by "); - out.push_sql(&*PARENT_ID); + out.push_sql(PARENT_ID); out.push_sql(", "); } @@ -4065,11 +4058,8 @@ impl<'a> SortKey<'a> { )); } - match sort_by.column_type() { - ColumnType::TSVector(_) => { - return Err(internal_error!("TSVector is not supported")); - } - _ => {} + if let ColumnType::TSVector(_) = sort_by.column_type() { + return Err(internal_error!("TSVector is not supported")); } } @@ -4176,8 +4166,8 @@ impl<'a> SortKey<'a> { Ok(()) } - match self { - SortKey::ChildKey(nested) => match nested { + if let SortKey::ChildKey(nested) = self { + match nested { ChildKey::Single(child) => { add( &child.child_from, @@ -4218,8 +4208,7 @@ impl<'a> SortKey<'a> { out, )?; } - }, - _ => {} + } } Ok(()) } @@ -4389,7 +4378,7 @@ impl<'a> FilterQuery<'a> { out.push_sql(" from (select "); write_column_names(&window.column_names, window.table, Some("c."), &mut out)?; out.push_sql(", p.id::text as "); - out.push_sql(&*PARENT_ID); + out.push_sql(PARENT_ID); window.children(false, &self.limit, &mut out)?; out.push_sql(") c"); out.push_sql("\n "); @@ -4663,7 +4652,7 @@ impl<'a> QueryFragment for ClampRangeQuery<'a> { self.br_column.clamp(&mut out)?; out.push_sql("\n where "); - id_is_in(&self.entity_ids, &mut out)?; + id_is_in(self.entity_ids, &mut out)?; out.push_sql(" and ("); self.br_column.latest(&mut out); out.push_sql(")"); @@ -5097,8 +5086,8 @@ fn jsonb_build_object( /// Helper function to iterate over the merged fields of BASE_SQL_COLUMNS and the provided attribute /// names, yielding valid SQL names for the given table. -fn iter_column_names<'a, 'b>( - attribute_names: &'a BTreeSet, +fn iter_column_names<'b>( + attribute_names: &BTreeSet, table: dsl::Table<'b>, include_block_range_column: bool, ) -> impl Iterator { diff --git a/store/postgres/src/sql/parser.rs b/store/postgres/src/sql/parser.rs index 9f1b1483741..e263a54deb2 100644 --- a/store/postgres/src/sql/parser.rs +++ b/store/postgres/src/sql/parser.rs @@ -24,7 +24,7 @@ impl Parser { validator.validate_statements(&mut statements)?; let statement = statements - .get(0) + .first() .ok_or_else(|| anyhow!("No SQL statements found"))?; Ok(statement.to_string()) diff --git a/store/postgres/src/sql/validation.rs b/store/postgres/src/sql/validation.rs index ac8421f9648..0bbca47853c 100644 --- a/store/postgres/src/sql/validation.rs +++ b/store/postgres/src/sql/validation.rs @@ -305,7 +305,7 @@ impl VisitorMut for Validator<'_> { } (Some(_), Some(_)) => { // Table exists but has args, must be a function - return self.validate_function_name(&name); + return self.validate_function_name(name); } (None, Some(args)) => { // Table does not exist but has args, is either an @@ -314,7 +314,7 @@ impl VisitorMut for Validator<'_> { if !self.layout.has_aggregation(table_name.as_str()) { // Not an aggregation, must be a function - return self.validate_function_name(&name); + return self.validate_function_name(name); } let TableFunctionArgs { args, settings } = args; @@ -339,7 +339,7 @@ impl VisitorMut for Validator<'_> { let Some(table) = self.layout.aggregation_table(table_name.as_str(), intv) else { - return self.validate_function_name(&name); + return self.validate_function_name(name); }; table } diff --git a/store/postgres/src/store_events.rs b/store/postgres/src/store_events.rs index 5c7b8dfd845..6189120f602 100644 --- a/store/postgres/src/store_events.rs +++ b/store/postgres/src/store_events.rs @@ -193,7 +193,7 @@ impl SubscriptionManager { let stale_ids = subscriptions .iter_mut() .filter_map(|(id, sender)| match sender.is_closed() { - true => Some(id.clone()), + true => Some(*id), false => None, }) .collect::>(); diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 7b6d37026de..3fb2bda9ecd 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -365,7 +365,7 @@ impl SubgraphStore { // if it doesn't exist, we need to copy the graft base to the new deployment let graft_base_layout = if !exists { let graft_base = match deployment.graft_base.as_ref() { - Some(base) => Some(self.layout(&base).await?), + Some(base) => Some(self.layout(base).await?), None => None, }; @@ -1016,7 +1016,7 @@ impl Inner { .ok_or_else(|| StoreError::UnknownShard(shard.to_string()))?; infos.extend(store.deployment_statuses(&sites).await?); } - let nodes = self.mirror.fill_assignments(&mut infos).await?; + let nodes = self.mirror.fill_assignments(&infos).await?; for info in infos.iter_mut() { info.node = nodes.get(&info.id).map(|(node, _)| node.clone()); info.paused = nodes.get(&info.id).map(|(_, paused)| *paused); @@ -1643,10 +1643,7 @@ impl SubgraphStoreTrait for SubgraphStore { ) -> Result, StoreError> { let deployment = deployment.to_string(); let mut pconn = self.primary_conn().await?; - pconn - .get_subgraph_features(deployment) - .await - .map_err(|e| e.into()) + pconn.get_subgraph_features(deployment).await } async fn entity_changes_in_block( diff --git a/store/postgres/src/vid_batcher.rs b/store/postgres/src/vid_batcher.rs index 0dea582bbac..6c32d8a2cca 100644 --- a/store/postgres/src/vid_batcher.rs +++ b/store/postgres/src/vid_batcher.rs @@ -239,7 +239,7 @@ impl VidBatcher { pub(crate) fn set_batch_size(&mut self, size: usize) { self.batch_size.size = size as i64; self.end = match &self.ogive { - Some(ogive) => ogive.next_point(self.start, size as usize).unwrap(), + Some(ogive) => ogive.next_point(self.start, size).unwrap(), None => self.start + size as i64, }; } diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 39ef086e61d..dd8593f1371 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -102,7 +102,7 @@ impl LastRollup { (true, Some(_)) => { let block_time = store.block_time(site).await?; block_time - .map(|b| LastRollup::Some(b)) + .map(LastRollup::Some) .unwrap_or(LastRollup::Unknown) } }; @@ -126,7 +126,7 @@ impl LastRollupTracker { block, ) .await - .map(|kind| Mutex::new(kind))?; + .map(Mutex::new)?; Ok(Self(rollup)) } @@ -209,10 +209,7 @@ impl SyncStore { } async fn block_cursor(&self) -> Result { - self.writable - .block_cursor(self.site.cheap_clone()) - .await - .map(FirehoseCursor::from) + self.writable.block_cursor(self.site.cheap_clone()).await } async fn start_subgraph_deployment(&self, logger: &Logger) -> Result<(), StoreError> { @@ -1187,7 +1184,7 @@ impl Queue { // are not 'full' at the head of the // queue, something that start_writer // has to take into account - return Ok(Some(batch)); + Ok(Some(batch)) } Err(RwLockError::Poisoned(e)) => { panic!("rwlock on batch was poisoned {:?}", e); @@ -1364,9 +1361,7 @@ impl Queue { // already existing entries in map as that would make us // produce stale values for (k, v) in effective_ops(batch, derived_query, at) { - if !map.contains_key(&k) { - map.insert(k, v); - } + map.entry(k).or_insert(v); } map }, @@ -1920,7 +1915,7 @@ impl WritableStoreTrait for WritableStore { store .writable(logger, self.store.site.id.into(), manifest_idx_and_name) .await - .map(|store| Some(store)) + .map(Some) } else { Ok(None) } diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 5719caaa9ec..0744f864746 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -608,8 +608,8 @@ pub fn all_shards() -> Vec { fn build_store() -> (Arc, ConnectionPool, Config, Arc) { let mut opt = Opt::default(); - let url = std::env::var_os("THEGRAPH_STORE_POSTGRES_DIESEL_URL").filter(|s| s.len() > 0); - let file = std::env::var_os("GRAPH_NODE_TEST_CONFIG").filter(|s| s.len() > 0); + let url = std::env::var_os("THEGRAPH_STORE_POSTGRES_DIESEL_URL").filter(|s| !s.is_empty()); + let file = std::env::var_os("GRAPH_NODE_TEST_CONFIG").filter(|s| !s.is_empty()); if let Some(file) = file { let file = file.into_string().unwrap(); opt.config = Some(file); diff --git a/tests/src/fixture/ethereum.rs b/tests/src/fixture/ethereum.rs index ddf950bd273..96473c6ad8c 100644 --- a/tests/src/fixture/ethereum.rs +++ b/tests/src/fixture/ethereum.rs @@ -170,9 +170,9 @@ pub fn push_test_subgraph_trigger( source_idx: u32, ) { let entity = EntitySourceOperation { - entity: entity, - entity_type: entity_type, - entity_op: entity_op, + entity, + entity_type, + entity_op, vid, }; diff --git a/tests/src/subgraph.rs b/tests/src/subgraph.rs index dfac2020efe..e1057fccdcb 100644 --- a/tests/src/subgraph.rs +++ b/tests/src/subgraph.rs @@ -124,7 +124,7 @@ impl Subgraph { pub async fn wait_ready(name: &str) -> anyhow::Result { let start = Instant::now(); while start.elapsed() <= CONFIG.timeout { - if let Some(subgraph) = Self::status(&name).await? { + if let Some(subgraph) = Self::status(name).await? { if subgraph.synced || !subgraph.healthy { return Ok(subgraph); } @@ -199,7 +199,7 @@ impl Subgraph { } let data = resp["data"].as_object().unwrap(); let values = keys - .into_iter() + .iter() .map(|key| data[*key].as_array().unwrap().clone()) .collect::>(); From 816cc91a9b8ba9b72def26fb70ab94831fc1a4fc Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 11:22:03 -0800 Subject: [PATCH 02/92] all: Fix warnings from clippy::from_str_radix_10 --- store/test-store/src/block_store.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/store/test-store/src/block_store.rs b/store/test-store/src/block_store.rs index f085e2dbd9d..b38d5aae03d 100644 --- a/store/test-store/src/block_store.rs +++ b/store/test-store/src/block_store.rs @@ -127,7 +127,7 @@ impl FakeBlock { let mut header = BlockHeader::default(); header.parent_hash = self.parent_hash.clone().into_bytes(); header.timestamp = self.timestamp.map(|ts| Timestamp { - seconds: i64::from_str_radix(&ts.to_string(), 10).unwrap(), + seconds: ts.to_string().parse().unwrap(), nanos: 0, }); block.header = Some(header); From c48cd35f2c1e511161932cd9bab0de986e41c607 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 11:26:06 -0800 Subject: [PATCH 03/92] all: Fix warnings from clippy::bind_instead_of_map --- node/src/helpers.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/node/src/helpers.rs b/node/src/helpers.rs index fd59d6d8d15..7b2e81335f8 100644 --- a/node/src/helpers.rs +++ b/node/src/helpers.rs @@ -51,9 +51,8 @@ async fn deploy_subgraph( true, ) .await - .and_then(|locator| { + .inspect(|locator| { info!(logger, "Subgraph deployed"; "name" => name.to_string(), "id" => subgraph_id.to_string(), "locator" => locator.to_string()); - Ok(locator) }) } From bf7c33e76a0108c222f77680201d100c2d6fcce2 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 11:29:01 -0800 Subject: [PATCH 04/92] all: Fix warnings from clippy::box_collection --- chain/ethereum/src/polling_block_stream.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/chain/ethereum/src/polling_block_stream.rs b/chain/ethereum/src/polling_block_stream.rs index b10f1976d6c..242cdf3e0f9 100644 --- a/chain/ethereum/src/polling_block_stream.rs +++ b/chain/ethereum/src/polling_block_stream.rs @@ -39,7 +39,7 @@ enum BlockStreamState { /// store up to date with the chain store. /// /// Valid next states: BeginReconciliation - YieldingBlocks(Box>>), + YieldingBlocks(VecDeque>), /// The BlockStream experienced an error and is pausing before attempting to produce /// blocks again. @@ -526,8 +526,7 @@ impl Stream for PollingBlockStream { } // Switch to yielding state until next_blocks is depleted - self.state = - BlockStreamState::YieldingBlocks(Box::new(next_blocks)); + self.state = BlockStreamState::YieldingBlocks(next_blocks); // Yield the first block in next_blocks continue; From e51a5dc6245a3570eed0161b0978111c4d56502c Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 11:33:37 -0800 Subject: [PATCH 05/92] all: Fix warnings from clippy::cloned_ref_to_slice_refs --- store/postgres/src/subgraph_store.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 3fb2bda9ecd..8f2faa9ecda 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -1030,7 +1030,9 @@ impl Inner { let id = DeploymentHash::new(deployment_id.clone()) .map_err(|id| internal_error!("illegal deployment id {}", id))?; let (store, site) = self.store(&id).await?; - let statuses = store.deployment_statuses(&[site.clone()]).await?; + let statuses = store + .deployment_statuses(std::slice::from_ref(&site)) + .await?; let status = statuses .first() .ok_or_else(|| StoreError::DeploymentNotFound(deployment_id.clone()))?; From 613fbd21900dcda1fdd8548752df3ab26fc54f5a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 11:41:06 -0800 Subject: [PATCH 06/92] all: Fix warnings from clippy::collapsible_match --- node/src/manager/commands/deployment/info.rs | 37 +++++++++----------- store/postgres/src/chain_store.rs | 31 +++++++--------- 2 files changed, 29 insertions(+), 39 deletions(-) diff --git a/node/src/manager/commands/deployment/info.rs b/node/src/manager/commands/deployment/info.rs index 4d121d3692a..7153dd81995 100644 --- a/node/src/manager/commands/deployment/info.rs +++ b/node/src/manager/commands/deployment/info.rs @@ -148,26 +148,23 @@ fn render( } table.push_row(["Node ID", &optional(deployment.node_id.as_ref())]); table.push_row(["Active", &deployment.is_active.to_string()]); - if let Some((_, status)) = deployments.get(0) { - if let Some(status) = status { - table.push_row(["Paused", &optional(status.is_paused)]); - table.push_row(["Synced", &status.is_synced.to_string()]); - table.push_row(["Health", status.health.as_str()]); - - let earliest = status.earliest_block_number; - let latest = status.latest_block.as_ref().map(|x| x.number); - let chain_head = status.chain_head_block.as_ref().map(|x| x.number); - let behind = match (latest, chain_head) { - (Some(latest), Some(chain_head)) => Some(chain_head - latest), - _ => None, - }; - - table.push_row(["Earliest Block", &earliest.to_string()]); - table.push_row(["Latest Block", &number(latest)]); - table.push_row(["Chain Head Block", &number(chain_head)]); - if let Some(behind) = behind { - table.push_row([" Blocks behind", &behind.to_string()]); - } + if let Some((_, Some(status))) = deployments.get(0) { + table.push_row(["Paused", &optional(status.is_paused)]); + table.push_row(["Synced", &status.is_synced.to_string()]); + table.push_row(["Health", status.health.as_str()]); + let earliest = status.earliest_block_number; + let latest = status.latest_block.as_ref().map(|x| x.number); + let chain_head = status.chain_head_block.as_ref().map(|x| x.number); + let behind = match (latest, chain_head) { + (Some(latest), Some(chain_head)) => Some(chain_head - latest), + _ => None, + }; + + table.push_row(["Earliest Block", &earliest.to_string()]); + table.push_row(["Latest Block", &number(latest)]); + table.push_row(["Chain Head Block", &number(chain_head)]); + if let Some(behind) = behind { + table.push_row([" Blocks behind", &behind.to_string()]); } } } diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index 44b739abefa..f13c6b8cf6d 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -500,29 +500,22 @@ mod data { chain: &str, ) -> Result, StoreError> { use diesel::dsl::not; - use public::ethereum_networks::dsl::*; + use public::ethereum_networks as n; - match update( - ethereum_networks - .filter(name.eq(chain)) - .filter(not(head_block_cursor.is_null())), + let head_block_number = update( + n::table + .filter(n::name.eq(chain)) + .filter(not(n::head_block_cursor.is_null())), ) - .set(head_block_cursor.eq(None as Option)) - .returning(head_block_number) + .set(n::head_block_cursor.eq(None as Option)) + .returning(n::head_block_number) .get_result::>(conn) .await - .optional() - { - Ok(res) => match res { - Some(opt_num) => match opt_num { - Some(num) => Ok(Some(num as i32)), - None => Ok(None), - }, - None => Ok(None), - }, - Err(e) => Err(e), - } - .map_err(Into::into) + .optional()? + .flatten() + .map(|num| num as i32); + + Ok(head_block_number) } /// Insert a block. If the table already contains a block with the From 5f5daa17c920fc3998a4ccba7eb52d2629c2b7bf Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 11:48:56 -0800 Subject: [PATCH 07/92] all: Fix warnings from clippy::derivable_impls --- chain/near/src/runtime/generated.rs | 36 +++++++---------------------- node/src/config.rs | 18 ++++----------- node/src/manager/display.rs | 10 +------- 3 files changed, 13 insertions(+), 51 deletions(-) diff --git a/chain/near/src/runtime/generated.rs b/chain/near/src/runtime/generated.rs index 153eb8b5ab5..d8fe2937f43 100644 --- a/chain/near/src/runtime/generated.rs +++ b/chain/near/src/runtime/generated.rs @@ -227,20 +227,15 @@ impl AscIndexId for AscSignature { } #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub(crate) enum AscAccessKeyPermissionKind { + #[default] FunctionCall, FullAccess, } impl AscValue for AscAccessKeyPermissionKind {} -impl Default for AscAccessKeyPermissionKind { - fn default() -> Self { - Self::FunctionCall - } -} - #[repr(C)] #[derive(AscType)] pub(crate) struct AscFunctionCallPermission { @@ -293,8 +288,9 @@ impl AscIndexId for AscDataReceiver { } #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub(crate) enum AscActionKind { + #[default] CreateAccount, DeployContract, FunctionCall, @@ -307,12 +303,6 @@ pub(crate) enum AscActionKind { impl AscValue for AscActionKind {} -impl Default for AscActionKind { - fn default() -> Self { - Self::CreateAccount - } -} - #[repr(C)] #[derive(AscType)] pub(crate) struct AscCreateAccountAction {} @@ -424,20 +414,15 @@ impl AscIndexId for AscActionReceipt { } #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub(crate) enum AscSuccessStatusKind { + #[default] Value, ReceiptId, } impl AscValue for AscSuccessStatusKind {} -impl Default for AscSuccessStatusKind { - fn default() -> Self { - Self::Value - } -} - pub struct AscSuccessStatusEnum(pub(crate) AscEnum); impl AscType for AscSuccessStatusEnum { @@ -458,20 +443,15 @@ impl AscIndexId for AscSuccessStatusEnum { } #[repr(u32)] -#[derive(AscType, Copy, Clone)] +#[derive(AscType, Copy, Clone, Default)] pub(crate) enum AscDirection { + #[default] Left, Right, } impl AscValue for AscDirection {} -impl Default for AscDirection { - fn default() -> Self { - Self::Left - } -} - #[repr(C)] #[derive(AscType)] pub(crate) struct AscMerklePathItem { diff --git a/node/src/config.rs b/node/src/config.rs index 0d781375c43..d4d075393d2 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -322,20 +322,15 @@ impl Shard { } } -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize, Default)] #[serde(untagged)] pub enum PoolSize { + #[default] None, Fixed(u32), Rule(Vec), } -impl Default for PoolSize { - fn default() -> Self { - Self::None - } -} - impl PoolSize { fn five() -> Self { Self::Fixed(5) @@ -924,9 +919,10 @@ enum ProviderField { Headers, } -#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq)] +#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Default)] pub enum Transport { #[serde(rename = "rpc")] + #[default] Rpc, #[serde(rename = "ws")] Ws, @@ -934,12 +930,6 @@ pub enum Transport { Ipc, } -impl Default for Transport { - fn default() -> Self { - Self::Rpc - } -} - impl std::fmt::Display for Transport { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { use Transport::*; diff --git a/node/src/manager/display.rs b/node/src/manager/display.rs index 7d27b8269cb..289497fbe2e 100644 --- a/node/src/manager/display.rs +++ b/node/src/manager/display.rs @@ -58,6 +58,7 @@ impl List { /// A more general list of columns than `List`. In practical terms, this is /// a very simple table with two columns, where both columns are /// left-aligned +#[derive(Default)] pub struct Columns { widths: Vec, rows: Vec, @@ -84,15 +85,6 @@ impl Columns { } } -impl Default for Columns { - fn default() -> Self { - Self { - widths: Vec::new(), - rows: Vec::new(), - } - } -} - pub enum Row { Cells(Vec), Separator, From 7680b60d6d5c36bbe04869193805a23002a9a11b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 11:53:03 -0800 Subject: [PATCH 08/92] all: Fix warnings from clippy::doc_lazy_continuation --- graph/src/data/subgraph/mod.rs | 2 +- graph/src/runtime/asc_ptr.rs | 3 +++ graph/src/schema/input/mod.rs | 6 +++--- node/src/chain.rs | 2 +- store/postgres/src/block_range.rs | 1 + 5 files changed, 9 insertions(+), 5 deletions(-) diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index 2aede7a76ba..3e6f8f35ffb 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -414,7 +414,7 @@ pub struct Link { /// Custom deserializer for Link /// This handles both formats: /// 1. Simple string: "schema.graphql" or "subgraph.yaml" which is used in [`FileLinkResolver`] -/// FileLinkResolver is used in local development environments +/// FileLinkResolver is used in local development environments /// 2. IPLD format: { "/": "Qm..." } which is used in [`IpfsLinkResolver`] impl<'de> de::Deserialize<'de> for Link { fn deserialize(deserializer: D) -> Result diff --git a/graph/src/runtime/asc_ptr.rs b/graph/src/runtime/asc_ptr.rs index f3c783ac4cd..b37cf117a80 100644 --- a/graph/src/runtime/asc_ptr.rs +++ b/graph/src/runtime/asc_ptr.rs @@ -140,6 +140,7 @@ impl AscPtr { /// - gc_info2: usize -> second GC info (we don't free memory so it's irrelevant) /// - rt_id: u32 -> identifier for the class being allocated /// - rt_size: u32 -> content size + /// /// Only used for version >= 0.0.5. async fn generate_header( heap: &mut H, @@ -169,12 +170,14 @@ impl AscPtr { } /// Helper to read the length from the header. + /// /// An AssemblyScript header has 20 bytes, and it's right before the content, and composed by: /// - mm_info: usize /// - gc_info: usize /// - gc_info2: usize /// - rt_id: u32 /// - rt_size: u32 + /// /// This function returns the `rt_size`. /// Only used for version >= 0.0.5. pub fn read_len( diff --git a/graph/src/schema/input/mod.rs b/graph/src/schema/input/mod.rs index 51678a3f9db..e458dced990 100644 --- a/graph/src/schema/input/mod.rs +++ b/graph/src/schema/input/mod.rs @@ -2426,10 +2426,10 @@ mod validations { /// * `source` is an existing timeseries type /// * all non-aggregate fields are also fields on the `source` - /// type and have the same type + /// type and have the same type /// * `arg` for each `@aggregate` is a numeric type in the - /// timeseries, coercible to the type of the field (e.g. `Int -> - /// BigDecimal`, but not `BigInt -> Int8`) + /// timeseries, coercible to the type of the field (e.g. `Int + /// -> BigDecimal`, but not `BigInt -> Int8`) fn aggregate_directive( schema: &Schema, agg_type: &s::ObjectType, diff --git a/node/src/chain.rs b/node/src/chain.rs index 9df653bc9d2..994bc5a9f1d 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -273,7 +273,7 @@ pub async fn create_ethereum_networks_for_chain( /// Deep integration chains (explicitly defined on the graph-node like Ethereum, Near, etc): /// - These can have adapter of any type. Adapters of firehose and rpc types are used by the Chain implementation, aka deep integration /// - The substreams adapters will trigger the creation of a Substreams chain, the priority for the block ingestor setup depends on the chain, if enabled at all. -/// Substreams Chain(chains the graph-node knows nothing about and are only accessible through substreams): +/// Substreams Chain(chains the graph-node knows nothing about and are only accessible through substreams): /// - This chain type is more generic and can only have adapters of substreams type. /// - Substreams chain are created as a "secondary" chain for deep integrations but in that case the block ingestor should be run by the main/deep integration chain. /// - These chains will use SubstreamsBlockIngestor by default. diff --git a/store/postgres/src/block_range.rs b/store/postgres/src/block_range.rs index d6044c644ad..51dbc4d1b9a 100644 --- a/store/postgres/src/block_range.rs +++ b/store/postgres/src/block_range.rs @@ -31,6 +31,7 @@ pub(crate) const BLOCK_RANGE_CURRENT: &str = "block_range @> 2147483647"; /// - any CRUD operation modifies such an entity in place /// - queries by a block number consider such an entity as present for /// any block number +/// /// We therefore mark such entities with a block range `[-1,\infinity)`; we /// use `-1` as the lower bound to make it easier to identify such entities /// for troubleshooting/debugging From 6bda834cc4d70875817a88f604226639696e0acd Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 12:08:44 -0800 Subject: [PATCH 09/92] all: Fix warnings from clippy::doc_overindented_list_items --- store/postgres/src/advisory_lock.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/store/postgres/src/advisory_lock.rs b/store/postgres/src/advisory_lock.rs index e012f08e82a..6b0fc671ffa 100644 --- a/store/postgres/src/advisory_lock.rs +++ b/store/postgres/src/advisory_lock.rs @@ -9,10 +9,8 @@ //! * 1: to synchronize on migratons //! //! We use the following 2x 32-bit locks -//! * 1, n: to lock copying of the deployment with id n in the destination -//! shard -//! * 2, n: to lock the deployment with id n to make sure only one write -//! happens to it +//! * 1, n: to lock copying of the deployment with id n in the destination shard +//! * 2, n: to lock the deployment with id n to make sure only one write happens to it use diesel::sql_query; use diesel::sql_types::Bool; From 6ee0ea869fbe38293af9d6c5f448353427377362 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 12:21:14 -0800 Subject: [PATCH 10/92] all: Fix warnings from clippy::empty_line_after_doc_comments --- graph/src/util/cache_weight.rs | 3 +-- runtime/wasm/src/asc_abi/class.rs | 1 - runtime/wasm/src/asc_abi/v0_0_4.rs | 2 +- runtime/wasm/src/asc_abi/v0_0_5.rs | 2 +- runtime/wasm/src/lib.rs | 2 +- 5 files changed, 4 insertions(+), 6 deletions(-) diff --git a/graph/src/util/cache_weight.rs b/graph/src/util/cache_weight.rs index 3c1bf1bec10..077db9a51ce 100644 --- a/graph/src/util/cache_weight.rs +++ b/graph/src/util/cache_weight.rs @@ -265,13 +265,12 @@ fn derive_cache_weight() { /// number of entries divided by `NODE_FILL`, and the number of /// interior nodes can be determined by dividing the number of nodes /// at the child level by `NODE_FILL` - +/// /// The other difficulty is that the structs with which `BTreeMap` /// represents internal and leaf nodes are not public, so we can't /// get their size with `std::mem::size_of`; instead, we base our /// estimates of their size on the current `std` code, assuming that /// these structs will not change - pub mod btree { use std::collections::BTreeMap; use std::mem; diff --git a/runtime/wasm/src/asc_abi/class.rs b/runtime/wasm/src/asc_abi/class.rs index 6db9bcc0df6..e33e788bdd9 100644 --- a/runtime/wasm/src/asc_abi/class.rs +++ b/runtime/wasm/src/asc_abi/class.rs @@ -20,7 +20,6 @@ use semver::Version; ///! Rust types that have with a direct correspondence to an Asc class, ///! with their `AscType` implementations. - /// Wrapper of ArrayBuffer for multiple AssemblyScript versions. /// It just delegates its method calls to the correct mappings apiVersion. pub enum ArrayBuffer { diff --git a/runtime/wasm/src/asc_abi/v0_0_4.rs b/runtime/wasm/src/asc_abi/v0_0_4.rs index 3d5ced0a25f..3a4b85b8030 100644 --- a/runtime/wasm/src/asc_abi/v0_0_4.rs +++ b/runtime/wasm/src/asc_abi/v0_0_4.rs @@ -12,7 +12,7 @@ use graph_runtime_derive::AscType; use crate::asc_abi::class; /// Module related to AssemblyScript version v0.6. - +/// /// Asc std ArrayBuffer: "a generic, fixed-length raw binary data buffer". /// See https://github.com/AssemblyScript/assemblyscript/wiki/Memory-Layout-&-Management/86447e88be5aa8ec633eaf5fe364651136d136ab#arrays pub struct ArrayBuffer { diff --git a/runtime/wasm/src/asc_abi/v0_0_5.rs b/runtime/wasm/src/asc_abi/v0_0_5.rs index 3497a88b173..8f42a158d84 100644 --- a/runtime/wasm/src/asc_abi/v0_0_5.rs +++ b/runtime/wasm/src/asc_abi/v0_0_5.rs @@ -16,7 +16,7 @@ use crate::asc_abi::class; /// All `to_asc_bytes`/`from_asc_bytes` only consider the #data/content/payload /// not the #header, that's handled on `AscPtr`. /// Header in question: https://www.assemblyscript.org/memory.html#common-header-layout - +/// /// Similar as JS ArrayBuffer, "a generic, fixed-length raw binary data buffer". /// See https://www.assemblyscript.org/memory.html#arraybuffer-layout pub struct ArrayBuffer { diff --git a/runtime/wasm/src/lib.rs b/runtime/wasm/src/lib.rs index a9b28f872f1..7c543a4c128 100644 --- a/runtime/wasm/src/lib.rs +++ b/runtime/wasm/src/lib.rs @@ -4,7 +4,7 @@ mod host; pub mod to_from; /// Public interface of the crate, receives triggers to be processed. - +/// /// Pre-processes modules and manages their threads. Serves as an interface from `host` to `module`. pub mod mapping; From 8a0ccfb03ac24dba15057a36f2068e9118c3e5a7 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 12:22:53 -0800 Subject: [PATCH 11/92] all: Fix warnings from clippy::enum_variant_names --- store/postgres/src/sql/validation.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/store/postgres/src/sql/validation.rs b/store/postgres/src/sql/validation.rs index 0bbca47853c..a147a6fb2ad 100644 --- a/store/postgres/src/sql/validation.rs +++ b/store/postgres/src/sql/validation.rs @@ -38,7 +38,7 @@ pub enum Error { #[error("Qualified table names are not supported: {0}")] NoQualifiedTables(String), #[error("Internal error: {0}")] - InternalError(String), + Internal(String), } /// A wrapper around table names that correctly handles quoted vs unquoted @@ -108,7 +108,7 @@ impl CteStack { fn add_ctes(&mut self, ctes: &[Cte]) -> ControlFlow { let Some(entry) = self.stack.last_mut() else { - return ControlFlow::Break(Error::InternalError("CTE stack is empty".into())); + return ControlFlow::Break(Error::Internal("CTE stack is empty".into())); }; for cte in ctes { entry.insert(TableName::from(&cte.alias.name)); From e622c3a6e6d6264bda78cb27e42575cc9f1a6289 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 12:28:22 -0800 Subject: [PATCH 12/92] all: Fix warnings from clippy::explicit_counter_loop --- gnd/src/watcher.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/gnd/src/watcher.rs b/gnd/src/watcher.rs index 3171f240128..bc13b96a263 100644 --- a/gnd/src/watcher.rs +++ b/gnd/src/watcher.rs @@ -294,8 +294,7 @@ pub async fn deploy_all_subgraphs( sender: &Sender<(DeploymentHash, SubgraphName)>, ) -> Result<()> { info!(logger, "File change detected, redeploying all subgraphs"); - let mut count = 0; - for manifest_path in manifests_paths { + for (count, manifest_path) in manifests_paths.iter().enumerate() { let alias_name = source_subgraph_aliases .iter() .find(|(_, path)| path == &manifest_path) @@ -312,7 +311,6 @@ pub async fn deploy_all_subgraphs( .map_err(|_| anyhow!("Failed to create subgraph name"))?, )) .await; - count += 1; } Ok(()) } From bc06863b6c594992ae5d1243f7917048b464f7f2 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 12:44:24 -0800 Subject: [PATCH 13/92] all: Fix warnings from clippy::explicit_write --- node/src/manager/commands/listen.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/src/manager/commands/listen.rs b/node/src/manager/commands/listen.rs index d53dfaae455..f24b0da8002 100644 --- a/node/src/manager/commands/listen.rs +++ b/node/src/manager/commands/listen.rs @@ -16,7 +16,7 @@ async fn listen(mgr: Arc) -> Result<(), Error> { .for_each(move |event| { serde_json::to_writer_pretty(std::io::stdout(), &event) .expect("event can be serialized to JSON"); - writeln!(std::io::stdout()).unwrap(); + println!(""); std::io::stdout().flush().unwrap(); future::ready(()) }) From 67ec3de1f5611ec6fa84354133d63d17db653a30 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 12:45:59 -0800 Subject: [PATCH 14/92] all: Fix warnings from clippy::extra_unused_lifetimes --- graph/src/amp/sql/query_builder/block_range_query.rs | 2 +- graph/src/data/store/id.rs | 2 +- graphql/src/store/query.rs | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/graph/src/amp/sql/query_builder/block_range_query.rs b/graph/src/amp/sql/query_builder/block_range_query.rs index 6a89eb02ce7..dde44d803ad 100644 --- a/graph/src/amp/sql/query_builder/block_range_query.rs +++ b/graph/src/amp/sql/query_builder/block_range_query.rs @@ -17,7 +17,7 @@ use super::{extract_tables, parse_query, TableReference}; /// All the table references in the original SQL query are replaced with the created CTE names. /// /// The output is ordered by block numbers. -pub(super) fn new_block_range_query<'a>( +pub(super) fn new_block_range_query( query: &ast::Query, block_number_column: &str, block_range: &RangeInclusive, diff --git a/graph/src/data/store/id.rs b/graph/src/data/store/id.rs index 1d5a274a522..6ce909e81bd 100644 --- a/graph/src/data/store/id.rs +++ b/graph/src/data/store/id.rs @@ -89,7 +89,7 @@ impl IdType { } } -impl<'a> TryFrom<&s::ObjectType> for IdType { +impl TryFrom<&s::ObjectType> for IdType { type Error = Error; fn try_from(obj_type: &s::ObjectType) -> Result { diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 42a60575984..7cf3370597d 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -219,7 +219,7 @@ fn build_list_filter_from_value( } /// build a filter which has list of nested filters -fn build_list_filter_from_object<'a>( +fn build_list_filter_from_object( entity: &ObjectOrInterface, object: &Object, schema: &InputSchema, @@ -235,7 +235,7 @@ fn build_list_filter_from_object<'a>( } /// Parses a GraphQL input object into an EntityFilter, if present. -fn build_filter_from_object<'a>( +fn build_filter_from_object( entity: &ObjectOrInterface, object: &Object, schema: &InputSchema, From c167ceea79ab6ed46c95b4bca53f58ca38b87ec8 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 12:59:00 -0800 Subject: [PATCH 15/92] all: Fix warnings from clippy::field_reassign_with_default --- node/src/bin/manager.rs | 11 ++++---- store/test-store/src/block_store.rs | 42 +++++++++++++++-------------- 2 files changed, 28 insertions(+), 25 deletions(-) diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index f8c28079f13..9bcc016d74e 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -860,11 +860,12 @@ pub enum CheckBlockMethod { impl From for config::Opt { fn from(opt: Opt) -> Self { - let mut config_opt = config::Opt::default(); - config_opt.config = Some(opt.config); - config_opt.store_connection_pool_size = 5; - config_opt.node_id = opt.node_id; - config_opt + config::Opt { + config: Some(opt.config), + store_connection_pool_size: 5, + node_id: opt.node_id, + ..Default::default() + } } } diff --git a/store/test-store/src/block_store.rs b/store/test-store/src/block_store.rs index b38d5aae03d..1d8559a9d2c 100644 --- a/store/test-store/src/block_store.rs +++ b/store/test-store/src/block_store.rs @@ -105,13 +105,13 @@ impl FakeBlock { pub fn as_ethereum_block(&self) -> EthereumBlock { let parent_hash = H256::from_str(self.parent_hash.as_str()).expect("invalid parent hash"); - let mut block = LightEthereumBlock::default(); - block.number = Some(self.number.into()); - block.parent_hash = parent_hash; - block.hash = Some(H256(self.block_hash().as_slice().try_into().unwrap())); - if let Some(ts) = self.timestamp { - block.timestamp = ts; - } + let block = LightEthereumBlock { + number: Some(self.number.into()), + parent_hash, + hash: Some(H256(self.block_hash().as_slice().try_into().unwrap())), + timestamp: self.timestamp.unwrap_or(U256::default()), + ..Default::default() + }; EthereumBlock { block: Arc::new(block), @@ -120,19 +120,21 @@ impl FakeBlock { } pub fn as_firehose_block(&self) -> Block { - let mut block = Block::default(); - block.hash = self.hash.clone().into_bytes(); - block.number = self.number as u64; - - let mut header = BlockHeader::default(); - header.parent_hash = self.parent_hash.clone().into_bytes(); - header.timestamp = self.timestamp.map(|ts| Timestamp { - seconds: ts.to_string().parse().unwrap(), - nanos: 0, - }); - block.header = Some(header); - - block + let header = BlockHeader { + parent_hash: self.parent_hash.clone().into_bytes(), + timestamp: self.timestamp.map(|ts| Timestamp { + seconds: ts.to_string().parse().unwrap(), + nanos: 0, + }), + ..Default::default() + }; + + Block { + hash: self.hash.clone().into_bytes(), + number: self.number as u64, + header: Some(header), + ..Default::default() + } } } From 2eba5fbc276eccd1748aa43e160a6b6d880b028c Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:00:33 -0800 Subject: [PATCH 16/92] all: Fix warnings from clippy::from_over_into --- chain/ethereum/src/adapter.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index efadb95c089..7a04fa5cf18 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -614,16 +614,16 @@ pub struct EthereumCallFilter { pub wildcard_signatures: HashSet, } -impl Into> for EthereumCallFilter { - fn into(self) -> Vec { - if self.is_empty() { +impl From for Vec { + fn from(val: EthereumCallFilter) -> Self { + if val.is_empty() { return Vec::new(); } let EthereumCallFilter { contract_addresses_function_signatures, wildcard_signatures, - } = self; + } = val; let mut filters: Vec = contract_addresses_function_signatures .into_iter() @@ -810,9 +810,9 @@ pub struct EthereumBlockFilter { pub trigger_every_block: bool, } -impl Into> for EthereumBlockFilter { - fn into(self) -> Vec { - self.contract_addresses +impl From for Vec { + fn from(val: EthereumBlockFilter) -> Self { + val.contract_addresses .into_iter() .map(|(_, addr)| addr) .sorted() From bb2d187ea5ad29a5387932184b333959cef914a3 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:03:38 -0800 Subject: [PATCH 17/92] all: Run 'cargo clippy --fix' (again) --- .../amp_subgraph/runner/data_processing.rs | 2 +- core/src/amp_subgraph/runner/data_stream.rs | 2 +- core/src/amp_subgraph/runner/latest_blocks.rs | 11 +++---- core/src/amp_subgraph/runner/reorg_handler.rs | 2 +- core/src/subgraph_provider.rs | 3 +- graph/src/amp/codec/array_decoder.rs | 2 +- graph/src/amp/codec/mapping_decoder.rs | 2 +- graph/src/amp/codec/utils.rs | 2 +- graph/src/amp/manifest/data_source/raw.rs | 4 +-- graph/src/amp/schema/generator/entity.rs | 4 +-- .../query_builder/event_signature_resolver.rs | 10 +++--- .../record_batch/aggregator.rs | 6 ++-- graph/src/data/subgraph/mod.rs | 17 ++++------ graph/src/data_source/mod.rs | 4 +-- graph/src/env/amp.rs | 4 +-- graph/src/ipfs/test_utils.rs | 2 +- graph/src/schema/input/mod.rs | 3 +- node/src/chain.rs | 8 ++--- node/src/config.rs | 4 +-- node/src/launcher.rs | 32 +++++++++---------- node/src/manager/color.rs | 10 ++++-- node/src/manager/commands/config.rs | 2 +- node/src/manager/commands/copy.rs | 2 +- node/src/manager/commands/deployment/info.rs | 2 +- node/src/manager/commands/listen.rs | 2 +- node/src/manager/commands/prune.rs | 6 ++-- node/src/manager/commands/rewind.rs | 4 +-- node/src/manager/display.rs | 6 +--- node/src/manager/fmt.rs | 4 +-- node/src/network_setup.rs | 12 +++---- server/http/src/service.rs | 2 +- store/test-store/src/block_store.rs | 2 +- 32 files changed, 83 insertions(+), 95 deletions(-) diff --git a/core/src/amp_subgraph/runner/data_processing.rs b/core/src/amp_subgraph/runner/data_processing.rs index 83b113922e1..8c403de2b7f 100644 --- a/core/src/amp_subgraph/runner/data_processing.rs +++ b/core/src/amp_subgraph/runner/data_processing.rs @@ -258,7 +258,7 @@ fn decode_block_timestamp(record_batches: &[StreamRecordBatch]) -> Result { return decoder .decode(0) - .map_err(|e| Error::Deterministic(e))? + .map_err(Error::Deterministic)? .ok_or_else(|| Error::Deterministic(anyhow!("block timestamp is empty"))); } Err(e) => { diff --git a/core/src/amp_subgraph/runner/data_stream.rs b/core/src/amp_subgraph/runner/data_stream.rs index 7f3636a5af9..ad6d6d471f8 100644 --- a/core/src/amp_subgraph/runner/data_stream.rs +++ b/core/src/amp_subgraph/runner/data_stream.rs @@ -43,7 +43,7 @@ where ); loop { - let next_block_ranges = next_block_ranges(&cx, latest_queried_block, latest_block); + let next_block_ranges = next_block_ranges(cx, latest_queried_block, latest_block); if next_block_ranges.is_empty() { if data_streams.is_empty() { diff --git a/core/src/amp_subgraph/runner/latest_blocks.rs b/core/src/amp_subgraph/runner/latest_blocks.rs index 559aef963cd..cb62f2e3a42 100644 --- a/core/src/amp_subgraph/runner/latest_blocks.rs +++ b/core/src/amp_subgraph/runner/latest_blocks.rs @@ -31,7 +31,7 @@ impl LatestBlocks { .data_sources .iter() .enumerate() - .map(|(i, data_source)| { + .flat_map(|(i, data_source)| { data_source .source .tables @@ -39,10 +39,9 @@ impl LatestBlocks { .enumerate() .map(move |(j, table)| ((i, j), &data_source.source.dataset, table)) }) - .flatten() .unique_by(|(_, dataset, table)| (dataset.to_string(), table.to_string())) .map(|(table_ptr, dataset, table)| { - latest_block(&cx, dataset, table) + latest_block(cx, dataset, table) .map_ok(move |latest_block| (table_ptr, latest_block)) .map_err(move |e| { e.context(format!( @@ -100,7 +99,7 @@ impl LatestBlocks { let dataset = &source.dataset; let table = &source.tables[j]; - latest_block_changed(&cx, dataset, table, latest_block).map_err(move |e| { + latest_block_changed(cx, dataset, table, latest_block).map_err(move |e| { e.context(format!( "failed to check if the latest block changed in '{dataset}.{table}'" )) @@ -136,9 +135,9 @@ where let record_batch = read_once(stream).await?; let latest_block = block_number_decoder(&record_batch, 0) - .map_err(|e| Error::Deterministic(e))? + .map_err(Error::Deterministic)? .decode(0) - .map_err(|e| Error::Deterministic(e))? + .map_err(Error::Deterministic)? .ok_or_else(|| Error::NonDeterministic(anyhow!("table is empty")))?; Ok(latest_block) diff --git a/core/src/amp_subgraph/runner/reorg_handler.rs b/core/src/amp_subgraph/runner/reorg_handler.rs index 911c4ebf818..03130b19625 100644 --- a/core/src/amp_subgraph/runner/reorg_handler.rs +++ b/core/src/amp_subgraph/runner/reorg_handler.rs @@ -103,7 +103,7 @@ where let table = &data_source.source.tables[*j]; detect_reorg( - &cx, + cx, network, dataset, table, diff --git a/core/src/subgraph_provider.rs b/core/src/subgraph_provider.rs index cbfb60a5e11..2bd3d36f0a9 100644 --- a/core/src/subgraph_provider.rs +++ b/core/src/subgraph_provider.rs @@ -287,8 +287,7 @@ impl SubgraphProcessingKind { .filter_map(Value::as_mapping) .filter_map(|map| map.get("kind")) .filter_map(Value::as_str) - .filter(|kind| *kind == amp::manifest::DataSource::KIND) - .next() + .find(|kind| *kind == amp::manifest::DataSource::KIND) }) .is_some(); diff --git a/graph/src/amp/codec/array_decoder.rs b/graph/src/amp/codec/array_decoder.rs index e74a777cb12..f7a480f38d0 100644 --- a/graph/src/amp/codec/array_decoder.rs +++ b/graph/src/amp/codec/array_decoder.rs @@ -470,7 +470,7 @@ impl Decoder>> for ArrayDecoder<'_, TimestampNanosecondArra } } -fn downcast_ref<'a, T>(array: &'a dyn Array) -> Result<&'a T> +fn downcast_ref(array: &dyn Array) -> Result<&T> where T: Array + 'static, { diff --git a/graph/src/amp/codec/mapping_decoder.rs b/graph/src/amp/codec/mapping_decoder.rs index b0c85e9d2e6..19a81cb6dcb 100644 --- a/graph/src/amp/codec/mapping_decoder.rs +++ b/graph/src/amp/codec/mapping_decoder.rs @@ -27,6 +27,6 @@ where fn decode(&self, row_index: usize) -> Result { let value = self.decoder.decode(row_index)?; - Ok((&self.mapping)(value)) + Ok((self.mapping)(value)) } } diff --git a/graph/src/amp/codec/utils.rs b/graph/src/amp/codec/utils.rs index 4f6ba4ff0b1..b8504d84bca 100644 --- a/graph/src/amp/codec/utils.rs +++ b/graph/src/amp/codec/utils.rs @@ -92,7 +92,7 @@ where } } - return None; + None } pub fn column_decoder<'a, T: 'static, U>( diff --git a/graph/src/amp/manifest/data_source/raw.rs b/graph/src/amp/manifest/data_source/raw.rs index 15e2ceb9237..3369eebc54d 100644 --- a/graph/src/amp/manifest/data_source/raw.rs +++ b/graph/src/amp/manifest/data_source/raw.rs @@ -382,7 +382,7 @@ impl RawAbi { let file_bytes = link_resolver .cat( - &LinkResolverContext::new(&DeploymentHash::default(), &logger), + &LinkResolverContext::new(&DeploymentHash::default(), logger), &(file.into()), ) .await @@ -559,7 +559,7 @@ impl RawTable { let record_batch = RecordBatch::new_empty(schema.into()); let (block_number_column, _) = - auto_block_number_decoder(&record_batch).map_err(|e| Error::InvalidQuery(e))?; + auto_block_number_decoder(&record_batch).map_err(Error::InvalidQuery)?; let need_block_hash_column = auto_block_hash_decoder(&record_batch).is_err(); let need_block_timestamp_column = input_schema diff --git a/graph/src/amp/schema/generator/entity.rs b/graph/src/amp/schema/generator/entity.rs index 7e3fa5b8f6c..88745bce51b 100644 --- a/graph/src/amp/schema/generator/entity.rs +++ b/graph/src/amp/schema/generator/entity.rs @@ -45,9 +45,9 @@ impl SchemaEntity { impl fmt::Display for SchemaEntity { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write! {f, "type {} @entity(immutable: true)", self.name.to_pascal_case()}?; - write! {f, " {{\n"}?; + writeln! {f, " {{"}?; for field in &self.fields { - write! {f, "\t{field}\n"}?; + writeln! {f, "\t{field}"}?; } write! {f, "}}"} } diff --git a/graph/src/amp/sql/query_builder/event_signature_resolver.rs b/graph/src/amp/sql/query_builder/event_signature_resolver.rs index 89ab8a31a51..ac37068ea3d 100644 --- a/graph/src/amp/sql/query_builder/event_signature_resolver.rs +++ b/graph/src/amp/sql/query_builder/event_signature_resolver.rs @@ -56,7 +56,7 @@ fn visit_expr(expr: &mut ast::Expr, abis: &[(&str, &JsonAbi)]) -> Result<()> { Ok(()) } -fn get_args<'a>(function: &'a ast::Function) -> Option<(&'a str, &'a str)> { +fn get_args(function: &ast::Function) -> Option<(&str, &str)> { let ast::FunctionArguments::List(args) = &function.args else { return None; }; @@ -71,7 +71,7 @@ fn get_args<'a>(function: &'a ast::Function) -> Option<(&'a str, &'a str)> { } } -fn get_arg<'a>(arg: &'a ast::FunctionArg) -> Option<&'a str> { +fn get_arg(arg: &ast::FunctionArg) -> Option<&str> { let ast::FunctionArg::Unnamed(ast::FunctionArgExpr::Expr(expr)) = arg else { return None; }; @@ -92,10 +92,8 @@ fn get_event<'a>( ) -> Option<&'a alloy::json_abi::Event> { abis.iter() .filter(|(name, _)| *name == contract_name) - .map(|(_, contract)| contract.event(event_name)) - .flatten() - .map(|events| events.first()) - .flatten() + .filter_map(|(_, contract)| contract.event(event_name)) + .filter_map(|events| events.first()) .next() } diff --git a/graph/src/amp/stream_aggregator/record_batch/aggregator.rs b/graph/src/amp/stream_aggregator/record_batch/aggregator.rs index f513a2752ed..f2c9cff13e0 100644 --- a/graph/src/amp/stream_aggregator/record_batch/aggregator.rs +++ b/graph/src/amp/stream_aggregator/record_batch/aggregator.rs @@ -107,7 +107,7 @@ impl Aggregator { return iter.next(); } - iter.skip(1).next() + iter.nth(1) } /// Returns `true` if this aggregator contains completed groups. @@ -220,8 +220,8 @@ impl Aggregator { if block_number == max_block_number && block_hash != max_block_hash { bail!( "received block hash '0x{}' after '0x{}' for block number {block_number}", - hex::encode(&block_hash), - hex::encode(&max_block_hash) + hex::encode(block_hash), + hex::encode(max_block_hash) ); } diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index 3e6f8f35ffb..75e083e89d3 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -1185,16 +1185,13 @@ impl UnresolvedSubgraphManifest { let schema = match schema { Some(schema) => schema, None if amp_data_sources.len() == data_sources.len() => { - let table_schemas = amp_data_sources - .iter() - .map(|data_source| { - data_source - .transformer - .tables - .iter() - .map(|table| (table.name.clone(), table.schema.clone())) - }) - .flatten(); + let table_schemas = amp_data_sources.iter().flat_map(|data_source| { + data_source + .transformer + .tables + .iter() + .map(|table| (table.name.clone(), table.schema.clone())) + }); amp::schema::generate_subgraph_schema(&id, table_schemas)? } diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index 08130ad000e..cd34ca62857 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -708,7 +708,7 @@ impl<'de, C: Blockchain> Deserialize<'de> for UnresolvedDataSource { amp::manifest::data_source::RawDataSource::deserialize(map.into_deserializer()) .map(UnresolvedDataSource::Amp) .map_err(serde::de::Error::custom) - } else if (&C::KIND.to_string() == kind) || C::ALIASES.contains(&kind) { + } else if (C::KIND.to_string() == kind) || C::ALIASES.contains(&kind) { C::UnresolvedDataSource::deserialize(map.into_deserializer()) .map_err(serde::de::Error::custom) .map(UnresolvedDataSource::Onchain) @@ -742,7 +742,7 @@ impl<'de, C: Blockchain> Deserialize<'de> for UnresolvedDataSourceTemplate { subgraph::UnresolvedDataSourceTemplate::deserialize(map.into_deserializer()) .map_err(serde::de::Error::custom) .map(UnresolvedDataSourceTemplate::Subgraph) - } else if (&C::KIND.to_string() == kind) || C::ALIASES.contains(&kind) { + } else if (C::KIND.to_string() == kind) || C::ALIASES.contains(&kind) { C::UnresolvedDataSourceTemplate::deserialize(map.into_deserializer()) .map_err(serde::de::Error::custom) .map(UnresolvedDataSourceTemplate::Onchain) diff --git a/graph/src/env/amp.rs b/graph/src/env/amp.rs index ef4fff7c1dc..a6a02b194c3 100644 --- a/graph/src/env/amp.rs +++ b/graph/src/env/amp.rs @@ -50,11 +50,11 @@ impl AmpEnv { .unwrap_or(Self::DEFAULT_MAX_BUFFER_SIZE), max_block_range: raw_env .amp_max_block_range - .and_then(|mut value| { + .map(|mut value| { if value == 0 { value = usize::MAX; } - Some(value) + value }) .unwrap_or(Self::DEFAULT_MAX_BLOCK_RANGE), query_retry_min_delay: raw_env diff --git a/graph/src/ipfs/test_utils.rs b/graph/src/ipfs/test_utils.rs index c2ccbbf0650..405d46539ea 100644 --- a/graph/src/ipfs/test_utils.rs +++ b/graph/src/ipfs/test_utils.rs @@ -18,7 +18,7 @@ impl From> for IpfsAddFile { fn from(content: Vec) -> Self { Self { path: Default::default(), - content: content, + content, } } } diff --git a/graph/src/schema/input/mod.rs b/graph/src/schema/input/mod.rs index e458dced990..104ffd2534b 100644 --- a/graph/src/schema/input/mod.rs +++ b/graph/src/schema/input/mod.rs @@ -1782,8 +1782,7 @@ mod validations { if subgraph_schema_type .directives .iter() - .find(|directive| !directive.name.eq("fulltext")) - .is_some() + .any(|directive| !directive.name.eq("fulltext")) { Some(SchemaValidationError::InvalidSchemaTypeDirectives) } else { diff --git a/node/src/chain.rs b/node/src/chain.rs index 994bc5a9f1d..06e8bfa155b 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -108,7 +108,7 @@ pub fn create_firehose_networks( let parsed_networks = networks_by_kind .entry((chain.protocol, name.clone())) - .or_insert_with(Vec::new); + .or_default(); // Create n FirehoseEndpoints where n is the size of the pool. If a // subgraph limit is defined for this endpoint then each endpoint @@ -140,7 +140,7 @@ pub fn create_firehose_networks( AdapterConfiguration::Firehose(FirehoseAdapterConfig { chain_id, kind, - adapters: endpoints.into(), + adapters: endpoints, }) }) .collect() @@ -172,7 +172,7 @@ pub async fn create_ethereum_networks( ) }); - Ok(try_join_all(eth_networks_futures).await?) + try_join_all(eth_networks_futures).await } /// Parses a single Ethereum connection string and returns its network name and `EthereumAdapter`. @@ -311,7 +311,7 @@ pub async fn networks_as_chains( None => { let ident = match timeout( config.genesis_validation_timeout, - networks.chain_identifier(&logger, chain_id), + networks.chain_identifier(logger, chain_id), ) .await { diff --git a/node/src/config.rs b/node/src/config.rs index d4d075393d2..2443f8ce253 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -306,7 +306,7 @@ impl Shard { let mut url = Url::parse(shellexpand::env(&self.connection)?.as_ref())?; // Put the PGAPPNAME into the URL since tokio-postgres ignores this // environment variable - if let Some(app_name) = std::env::var("PGAPPNAME").ok() { + if let Ok(app_name) = std::env::var("PGAPPNAME") { let query = match url.query() { Some(query) => { format!("{query}&application_name={app_name}") @@ -502,7 +502,7 @@ impl ChainSection { })?; let (features, url_str) = rest.split_at(colon); - let (url, features) = if vec!["http", "https", "ws", "wss"].contains(&features) { + let (url, features) = if ["http", "https", "ws", "wss"].contains(&features) { (rest, DEFAULT_PROVIDER_FEATURES.to_vec()) } else { (&url_str[1..], features.split(',').collect()) diff --git a/node/src/launcher.rs b/node/src/launcher.rs index 944b80d4530..9c0bef19e44 100644 --- a/node/src/launcher.rs +++ b/node/src/launcher.rs @@ -119,7 +119,7 @@ async fn build_blockchain_map( let network_adapters = Networks::from_config( logger.cheap_clone(), - &config, + config, metrics_registry.cheap_clone(), endpoint_metrics, &provider_checks, @@ -129,10 +129,10 @@ async fn build_blockchain_map( let blockchain_map = network_adapters .blockchain_map( - &env_vars, - &logger, + env_vars, + logger, block_store, - &logger_factory, + logger_factory, metrics_registry.cheap_clone(), chain_head_update_listener, ) @@ -184,7 +184,7 @@ async fn spawn_block_ingestor( metrics_registry: &Arc, ) { let logger = logger.clone(); - let ingestors = Networks::block_ingestors(&logger, &blockchain_map) + let ingestors = Networks::block_ingestors(&logger, blockchain_map) .await .expect("unable to start block ingestors"); @@ -295,7 +295,7 @@ where if let Some(amp_client) = amp_client.cheap_clone() { let amp_instance_manager = graph_core::amp_subgraph::Manager::new( - &logger_factory, + logger_factory, metrics_registry.cheap_clone(), env_vars.cheap_clone(), &cancel_token, @@ -311,7 +311,7 @@ where } let subgraph_instance_manager = graph_core::subgraph::SubgraphInstanceManager::new( - &logger_factory, + logger_factory, env_vars.cheap_clone(), network_store.subgraph_store(), blockchain_map.cheap_clone(), @@ -330,7 +330,7 @@ where ); let subgraph_provider = graph_core::subgraph_provider::SubgraphProvider::new( - &logger_factory, + logger_factory, sg_count.cheap_clone(), network_store.subgraph_store(), link_resolver.cheap_clone(), @@ -342,8 +342,9 @@ where let version_switching_mode = ENV_VARS.subgraph_version_switching_mode; // Create named subgraph provider for resolving subgraph name->ID mappings - let subgraph_registrar = Arc::new(graph_core::subgraph::SubgraphRegistrar::new( - &logger_factory, + + Arc::new(graph_core::subgraph::SubgraphRegistrar::new( + logger_factory, link_resolver, Arc::new(subgraph_provider), network_store.subgraph_store(), @@ -353,9 +354,7 @@ where node_id.clone(), version_switching_mode, Arc::new(subgraph_settings), - )); - - subgraph_registrar + )) } fn build_graphql_server( @@ -368,20 +367,19 @@ fn build_graphql_server( ) -> GraphQLQueryServer> { let shards: Vec<_> = config.stores.keys().cloned().collect(); let load_manager = Arc::new(LoadManager::new( - &logger, + logger, shards, expensive_queries, metrics_registry.clone(), )); let graphql_runner = Arc::new(GraphQlRunner::new( - &logger, + logger, network_store.clone(), load_manager, metrics_registry, )); - let graphql_server = GraphQLQueryServer::new(&logger_factory, graphql_runner.clone()); - graphql_server + GraphQLQueryServer::new(logger_factory, graphql_runner.clone()) } /// Runs the Graph Node by initializing all components and starting all required services diff --git a/node/src/manager/color.rs b/node/src/manager/color.rs index cf10d2e22d4..5c89789f90c 100644 --- a/node/src/manager/color.rs +++ b/node/src/manager/color.rs @@ -15,6 +15,12 @@ pub struct Terminal { spec: ColorSpec, } +impl Default for Terminal { + fn default() -> Self { + Self::new() + } +} + impl Terminal { pub fn set_color_preference(pref: &str) { let choice = match pref { @@ -78,10 +84,10 @@ impl Terminal { F: FnOnce(&mut Self) -> io::Result, { self.spec.set_fg(Some(color)); - self.out.set_color(&self.spec).map_err(io::Error::from)?; + self.out.set_color(&self.spec)?; let res = f(self); self.spec = ColorSpec::new(); - self.out.set_color(&self.spec).map_err(io::Error::from)?; + self.out.set_color(&self.spec)?; res } } diff --git a/node/src/manager/commands/config.rs b/node/src/manager/commands/config.rs index 8b6d36e9afa..14b70d8d614 100644 --- a/node/src/manager/commands/config.rs +++ b/node/src/manager/commands/config.rs @@ -139,7 +139,7 @@ pub async fn provider( let metrics = Arc::new(EndpointMetrics::mock()); let caps = caps_from_features(features)?; - let networks = Networks::from_config(logger, &config, registry, metrics, &[]).await?; + let networks = Networks::from_config(logger, config, registry, metrics, &[]).await?; let network: ChainName = network.into(); let adapters = networks.ethereum_rpcs(network.clone()); diff --git a/node/src/manager/commands/copy.rs b/node/src/manager/commands/copy.rs index c3fa4cca993..a1919e79ca9 100644 --- a/node/src/manager/commands/copy.rs +++ b/node/src/manager/commands/copy.rs @@ -151,7 +151,7 @@ async fn create_inner( let node = NodeId::new(node.clone()).map_err(|()| anyhow!("invalid node id `{}`", node))?; let dst = subgraph_store - .copy_deployment(&src, shard, node, base_ptr, on_sync) + .copy_deployment(src, shard, node, base_ptr, on_sync) .await?; println!("created deployment {} as copy of {}", dst, src); diff --git a/node/src/manager/commands/deployment/info.rs b/node/src/manager/commands/deployment/info.rs index 7153dd81995..08e8f1c517d 100644 --- a/node/src/manager/commands/deployment/info.rs +++ b/node/src/manager/commands/deployment/info.rs @@ -148,7 +148,7 @@ fn render( } table.push_row(["Node ID", &optional(deployment.node_id.as_ref())]); table.push_row(["Active", &deployment.is_active.to_string()]); - if let Some((_, Some(status))) = deployments.get(0) { + if let Some((_, Some(status))) = deployments.first() { table.push_row(["Paused", &optional(status.is_paused)]); table.push_row(["Synced", &status.is_synced.to_string()]); table.push_row(["Health", status.health.as_str()]); diff --git a/node/src/manager/commands/listen.rs b/node/src/manager/commands/listen.rs index f24b0da8002..0f61f8a5b2e 100644 --- a/node/src/manager/commands/listen.rs +++ b/node/src/manager/commands/listen.rs @@ -16,7 +16,7 @@ async fn listen(mgr: Arc) -> Result<(), Error> { .for_each(move |event| { serde_json::to_writer_pretty(std::io::stdout(), &event) .expect("event can be serialized to JSON"); - println!(""); + println!(); std::io::stdout().flush().unwrap(); future::ready(()) }) diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index 415eccbf984..5078ab37d02 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -86,7 +86,7 @@ impl PruneReporter for Progress { fn start_analyze(&mut self) { if !self.initial_analyze { - println!(""); + println!(); } print!("Analyze tables"); self.analyze_start = Instant::now(); @@ -105,7 +105,7 @@ impl PruneReporter for Progress { let stats: Vec<_> = stats .iter() .filter(|stat| self.initial_analyze || analyzed.contains(&stat.tablename.as_str())) - .map(|stats| stats.clone()) + .cloned() .collect(); println!( "\rAnalyzed {} tables in {}s{: ^30}", @@ -424,7 +424,7 @@ pub async fn status( let table_name = fmt::abbreviate(&table_name, 30); let rows = rows.map_or_null(|rows| rows.to_string()); let batch_size = batch_size.map_or_null(|b| b.to_string()); - let duration = started_at.map_or_null(|s| fmt::duration(&s, &finished_at)); + let duration = started_at.map_or_null(|s| fmt::duration(s, &finished_at)); let phase = phase.as_str(); writeln!(term, "{table_name:<30} | {:<15} {complete:>6} | {rows:>8} | {batch_size:>11} | {duration:>8}", diff --git a/node/src/manager/commands/rewind.rs b/node/src/manager/commands/rewind.rs index b407089d32a..5c2eb00e5b7 100644 --- a/node/src/manager/commands/rewind.rs +++ b/node/src/manager/commands/rewind.rs @@ -147,7 +147,7 @@ pub async fn run( println!("Pausing deployments"); for (_, locator) in &locators { - pause_or_resume(primary.clone(), &sender, &locator, true).await?; + pause_or_resume(primary.clone(), sender, locator, true).await?; } // There's no good way to tell that a subgraph has in fact stopped @@ -193,7 +193,7 @@ pub async fn run( println!("Resuming deployments"); for (_, locator) in &locators { - pause_or_resume(primary.clone(), &sender, locator, false).await?; + pause_or_resume(primary.clone(), sender, locator, false).await?; } Ok(()) } diff --git a/node/src/manager/display.rs b/node/src/manager/display.rs index 289497fbe2e..cda62f6ea74 100644 --- a/node/src/manager/display.rs +++ b/node/src/manager/display.rs @@ -114,11 +114,7 @@ impl Row { } Row::Separator => { let total_width = widths.iter().sum::(); - let extra_width = if total_width >= LINE_WIDTH { - 0 - } else { - LINE_WIDTH - total_width - }; + let extra_width = LINE_WIDTH.saturating_sub(total_width); for (idx, width) in widths.iter().enumerate() { if idx > 0 { write!(out, "-+-")?; diff --git a/node/src/manager/fmt.rs b/node/src/manager/fmt.rs index 6aaa12192a7..45807fecbf7 100644 --- a/node/src/manager/fmt.rs +++ b/node/src/manager/fmt.rs @@ -24,9 +24,7 @@ impl MapOrNull for Option { where F: FnOnce(&T) -> String, { - self.as_ref() - .map(|value| f(value)) - .unwrap_or_else(|| NULL.to_string()) + self.as_ref().map(f).unwrap_or_else(|| NULL.to_string()) } } diff --git a/node/src/network_setup.rs b/node/src/network_setup.rs index c35b5fe4831..8de5532e8ff 100644 --- a/node/src/network_setup.rs +++ b/node/src/network_setup.rs @@ -103,12 +103,12 @@ impl Networks { adapters: vec![], rpc_provider_manager: ProviderManager::new( Logger::root(Discard, o!()), - vec![].into_iter(), + vec![], ProviderCheckStrategy::MarkAsValid, ), firehose_provider_manager: ProviderManager::new( Logger::root(Discard, o!()), - vec![].into_iter(), + vec![], ProviderCheckStrategy::MarkAsValid, ), } @@ -171,14 +171,14 @@ impl Networks { let eth = create_ethereum_networks( logger.cheap_clone(), registry, - &config, + config, endpoint_metrics.cheap_clone(), chain_filter, ) .await?; let firehose = create_firehose_networks( logger.cheap_clone(), - &config, + config, endpoint_metrics.cheap_clone(), chain_filter, ); @@ -270,9 +270,7 @@ impl Networks { ), firehose_provider_manager: ProviderManager::new( logger.clone(), - firehose_adapters - .into_iter() - .map(|(chain_id, endpoints)| (chain_id, endpoints)), + firehose_adapters, ProviderCheckStrategy::RequireAll(provider_checks), ), }; diff --git a/server/http/src/service.rs b/server/http/src/service.rs index c90a1d50eb5..df933b45d86 100644 --- a/server/http/src/service.rs +++ b/server/http/src/service.rs @@ -355,7 +355,7 @@ where .find(|(key, _)| key == "query") .map(|(_, value)| value.into_owned()) }) - .unwrap_or_else(String::new) + .unwrap_or_default() .trim() .to_lowercase() .starts_with("mutation"); diff --git a/store/test-store/src/block_store.rs b/store/test-store/src/block_store.rs index 1d8559a9d2c..76ae6d52937 100644 --- a/store/test-store/src/block_store.rs +++ b/store/test-store/src/block_store.rs @@ -109,7 +109,7 @@ impl FakeBlock { number: Some(self.number.into()), parent_hash, hash: Some(H256(self.block_hash().as_slice().try_into().unwrap())), - timestamp: self.timestamp.unwrap_or(U256::default()), + timestamp: self.timestamp.unwrap_or_default(), ..Default::default() }; From f0bccfeaa5b882ce202980bb3ba36b25ebe7855d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:07:16 -0800 Subject: [PATCH 18/92] all: Fix warnings from clippy::get_first --- chain/ethereum/src/codec.rs | 2 +- chain/ethereum/src/data_source.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/chain/ethereum/src/codec.rs b/chain/ethereum/src/codec.rs index 114982607ec..737859df2a5 100644 --- a/chain/ethereum/src/codec.rs +++ b/chain/ethereum/src/codec.rs @@ -533,7 +533,7 @@ mod test { fn get_to_address(trace: &TransactionTrace) -> Result, Error> { // Try to detect contract creation transactions, which have no 'to' address let is_contract_creation = trace.to.len() == 0 - || trace.calls.get(0).map_or(false, |call| { + || trace.calls.first().map_or(false, |call| { CallType::try_from(call.call_type) .map_or(false, |call_type| call_type == CallType::Create) }); diff --git a/chain/ethereum/src/data_source.rs b/chain/ethereum/src/data_source.rs index e314b5a158f..c139337ebb0 100644 --- a/chain/ethereum/src/data_source.rs +++ b/chain/ethereum/src/data_source.rs @@ -97,7 +97,7 @@ impl blockchain::DataSource for DataSource { // Obtain the address from the parameters let string = params - .get(0) + .first() .with_context(|| { format!( "Failed to create data source from template `{}`: address parameter is missing", @@ -1569,7 +1569,7 @@ impl MappingEventHandler { }) }; - if let Some(topic0) = log.topics.get(0) { + if let Some(topic0) = log.topics.first() { return self.topic0() == *topic0 && matches_topic(1, &self.topic1) && matches_topic(2, &self.topic2) From c891d0de43153de82ebc7d187fba7b6ee598f873 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:12:11 -0800 Subject: [PATCH 19/92] all: Fix warnings from clippy::iter_kv_map --- chain/ethereum/src/adapter.rs | 6 ++---- chain/ethereum/src/ethereum_adapter.rs | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index 7a04fa5cf18..924c928bdd7 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -536,10 +536,8 @@ impl EthereumLogFilter { // Handle events with topic filters. filters.extend( self.events_with_topic_filters - .into_iter() - .map(|(event_with_topics, _)| { - EthGetLogsFilter::from_event_with_topics(event_with_topics) - }), + .into_keys() + .map(EthGetLogsFilter::from_event_with_topics), ); // The current algorithm is to repeatedly find the maximum cardinality vertex and turn all diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index c4dd377fa58..9977ef19ae9 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -1034,7 +1034,7 @@ impl EthereumAdapter { let blocks_matching_polling_filter = self.load_ptrs_for_blocks( logger.clone(), - matching_blocks.iter().map(|(k, _)| *k).collect_vec(), + matching_blocks.keys().cloned().collect_vec(), ); let block_futures = blocks_matching_polling_filter.map(move |ptrs| { From 3c27f9d33ed8c9badeec5b1d2842f393d83f8e1d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:14:06 -0800 Subject: [PATCH 20/92] all: Fix warnings from clippy::iter_nth --- chain/ethereum/src/ethereum_adapter.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index 9977ef19ae9..7ad693a1d80 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -1092,7 +1092,7 @@ impl EthereumAdapter { // all the traces for the block, we need to ensure that the // block hash for the traces is equal to the desired block hash. // Assume all traces are for the same block. - if traces.iter().nth(0).unwrap().block_hash != block_hash { + if traces.first().unwrap().block_hash != block_hash { return Err(anyhow!( "Trace stream returned traces for an unexpected block: \ number = `{}`, hash = `{}`", From d624bba54ada2956e2387e2d72a9766e53262b18 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:20:54 -0800 Subject: [PATCH 21/92] all: Fix warnings from clippy::large_enum_variant --- store/postgres/src/chain_store.rs | 1 + store/postgres/src/copy.rs | 4 ++-- store/postgres/src/writable.rs | 7 ++++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index f13c6b8cf6d..765aa9e05e4 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -324,6 +324,7 @@ mod data { #[derive(Clone, Debug, AsExpression, FromSqlRow)] #[diesel(sql_type = Text)] + #[allow(clippy::large_enum_variant)] /// Storage for a chain. The underlying namespace (database schema) is either /// `public` or of the form `chain[0-9]+`. pub enum Storage { diff --git a/store/postgres/src/copy.rs b/store/postgres/src/copy.rs index 5b052b98e5f..3754ed49d1a 100644 --- a/store/postgres/src/copy.rs +++ b/store/postgres/src/copy.rs @@ -669,7 +669,7 @@ impl CopyProgress { } enum WorkerResult { - Ok(CopyTableWorker), + Ok(Box), Err(StoreError), Wake, } @@ -677,7 +677,7 @@ enum WorkerResult { impl From> for WorkerResult { fn from(result: Result) -> Self { match result { - Ok(worker) => WorkerResult::Ok(worker), + Ok(worker) => WorkerResult::Ok(Box::new(worker)), Err(e) => WorkerResult::Err(e), } } diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index dd8593f1371..ff5ffb2d45b 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -650,7 +650,7 @@ impl BlockTracker { /// a batch can still be appended to enum QueuedBatch { /// An open batch that can still be appended to - Open(Batch), + Open(Box), /// A closed batch that can no longer be modified Closed(Arc), /// Temporary placeholder during state transitions. Must never be @@ -688,7 +688,7 @@ impl QueuedBatch { fn close(&mut self) -> Arc { let old = std::mem::replace(self, QueuedBatch::Invalid); *self = match old { - QueuedBatch::Open(batch) => QueuedBatch::Closed(Arc::new(batch)), + QueuedBatch::Open(batch) => QueuedBatch::Closed(Arc::new(*batch)), closed @ QueuedBatch::Closed(_) => closed, QueuedBatch::Invalid => unreachable!("close is never called on a QueuedBatch::Invalid"), }; @@ -707,6 +707,7 @@ impl QueuedBatch { /// The `processed` flag is set to true as soon as the background writer is /// working on that request. Once it has been set, no changes can be made to /// the request +#[allow(clippy::large_enum_variant)] enum Request { Write { queued: Instant, @@ -767,7 +768,7 @@ impl Request { queued: Instant::now(), store, stopwatch, - batch: RwLock::new(QueuedBatch::Open(batch)), + batch: RwLock::new(QueuedBatch::Open(Box::new(batch))), processed: AtomicBool::new(false), } } From 66be8e1c7ae5667023fb0713582ce373bbef0574 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:27:40 -0800 Subject: [PATCH 22/92] all: Fix warnings from clippy::legacy_numeric_constants --- graph/src/components/store/mod.rs | 2 +- graph/src/data/value.rs | 2 +- graph/src/util/futures.rs | 4 ++-- graph/src/util/lfu_cache.rs | 2 +- graphql/src/execution/execution.rs | 4 ++-- graphql/src/execution/query.rs | 4 +--- graphql/src/values/coercion.rs | 2 +- runtime/wasm/src/asc_abi/v0_0_4.rs | 4 ++-- runtime/wasm/src/asc_abi/v0_0_5.rs | 4 ++-- server/index-node/src/service.rs | 4 ++-- store/postgres/src/dynds/private.rs | 2 +- store/postgres/src/notification_listener.rs | 3 +-- store/test-store/src/store.rs | 4 ++-- 13 files changed, 19 insertions(+), 22 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 27a6b26d7e7..ef92cfdc284 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -427,7 +427,7 @@ impl EntityCollection { /// be enough for everybody pub type BlockNumber = i32; -pub const BLOCK_NUMBER_MAX: BlockNumber = std::i32::MAX; +pub const BLOCK_NUMBER_MAX: BlockNumber = i32::MAX; /// A query for entities in a store. /// diff --git a/graph/src/data/value.rs b/graph/src/data/value.rs index 107689d3d75..7abf8fec47e 100644 --- a/graph/src/data/value.rs +++ b/graph/src/data/value.rs @@ -350,7 +350,7 @@ impl Value { ("BigDecimal", Value::Int(i)) => Ok(Value::String(i.to_string())), ("BigDecimal", Value::String(s)) => Ok(Value::String(s)), ("Int", Value::Int(num)) => { - if i32::min_value() as i64 <= num && num <= i32::max_value() as i64 { + if i32::MIN as i64 <= num && num <= i32::MAX as i64 { Ok(Value::Int(num)) } else { Err(Value::Int(num)) diff --git a/graph/src/util/futures.rs b/graph/src/util/futures.rs index b4da90c8a1c..7dff592b342 100644 --- a/graph/src/util/futures.rs +++ b/graph/src/util/futures.rs @@ -114,8 +114,8 @@ where /// Never log failed attempts. /// May still log at `trace` logging level. pub fn no_logging(mut self) -> Self { - self.log_after = u64::max_value(); - self.warn_after = u64::max_value(); + self.log_after = u64::MAX; + self.warn_after = u64::MAX; self } diff --git a/graph/src/util/lfu_cache.rs b/graph/src/util/lfu_cache.rs index a169deb1780..12712350a01 100644 --- a/graph/src/util/lfu_cache.rs +++ b/graph/src/util/lfu_cache.rs @@ -194,7 +194,7 @@ impl // the absolute minimum and popping. let key_entry = CacheEntry::cache_key(key.clone()); self.queue - .change_priority(&key_entry, (true, Reverse(u64::min_value()))) + .change_priority(&key_entry, (true, Reverse(u64::MIN))) .and_then(|_| { self.queue.pop().map(|(e, _)| { assert_eq!(e.key, key_entry.key); diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index 0f1b473c903..5010ba36a1f 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -258,8 +258,8 @@ where resolver: introspection_resolver, query: self.query.cheap_clone(), deadline: self.deadline, - max_first: std::u32::MAX, - max_skip: std::u32::MAX, + max_first: u32::MAX, + max_skip: u32::MAX, // `cache_status` is a dead value for the introspection context. cache_status: AtomicCell::new(CacheStatus::Miss), diff --git a/graphql/src/execution/query.rs b/graphql/src/execution/query.rs index e8593f27fba..e04d5f3f3e7 100644 --- a/graphql/src/execution/query.rs +++ b/graphql/src/execution/query.rs @@ -602,9 +602,7 @@ impl<'s> RawQuery<'s> { Ok(complexity) => Ok(complexity), Err(ComplexityError::Invalid) => Ok(0), Err(ComplexityError::TooDeep) => Err(QueryExecutionError::TooDeep(max_depth)), - Err(ComplexityError::Overflow) => { - Err(QueryExecutionError::TooComplex(u64::max_value(), 0)) - } + Err(ComplexityError::Overflow) => Err(QueryExecutionError::TooComplex(u64::MAX, 0)), Err(ComplexityError::CyclicalFragment(name)) => { Err(QueryExecutionError::CyclicalFragment(name)) } diff --git a/graphql/src/values/coercion.rs b/graphql/src/values/coercion.rs index ad4b0f1dab3..9d85059b33c 100644 --- a/graphql/src/values/coercion.rs +++ b/graphql/src/values/coercion.rs @@ -37,7 +37,7 @@ impl MaybeCoercible for q::Value { ("BigDecimal", q::Value::String(s)) => Ok(r::Value::String(s)), ("Int", q::Value::Int(num)) => { let n = num.as_i64().ok_or_else(|| q::Value::Int(num.clone()))?; - if i32::min_value() as i64 <= n && n <= i32::max_value() as i64 { + if i32::MIN as i64 <= n && n <= i32::MAX as i64 { Ok(r::Value::Int((n as i32).into())) } else { Err(q::Value::Int(num)) diff --git a/runtime/wasm/src/asc_abi/v0_0_4.rs b/runtime/wasm/src/asc_abi/v0_0_4.rs index 3a4b85b8030..3eb796434e6 100644 --- a/runtime/wasm/src/asc_abi/v0_0_4.rs +++ b/runtime/wasm/src/asc_abi/v0_0_4.rs @@ -35,7 +35,7 @@ impl ArrayBuffer { content.extend(&asc_bytes); } - if content.len() > u32::max_value() as usize { + if content.len() > u32::MAX as usize { return Err(DeterministicHostError::from(anyhow::anyhow!( "slice cannot fit in WASM memory" ))); @@ -194,7 +194,7 @@ pub struct AscString { impl AscString { pub fn new(content: &[u16]) -> Result { - if size_of_val(content) > u32::max_value() as usize { + if size_of_val(content) > u32::MAX as usize { return Err(DeterministicHostError::from(anyhow!( "string cannot fit in WASM memory" ))); diff --git a/runtime/wasm/src/asc_abi/v0_0_5.rs b/runtime/wasm/src/asc_abi/v0_0_5.rs index 8f42a158d84..3c769845b2b 100644 --- a/runtime/wasm/src/asc_abi/v0_0_5.rs +++ b/runtime/wasm/src/asc_abi/v0_0_5.rs @@ -34,7 +34,7 @@ impl ArrayBuffer { content.extend(&asc_bytes); } - if content.len() > u32::max_value() as usize { + if content.len() > u32::MAX as usize { return Err(DeterministicHostError::from(anyhow::anyhow!( "slice cannot fit in WASM memory" ))); @@ -176,7 +176,7 @@ pub struct AscString { impl AscString { pub fn new(content: &[u16]) -> Result { - if size_of_val(content) > u32::max_value() as usize { + if size_of_val(content) > u32::MAX as usize { return Err(DeterministicHostError::from(anyhow!( "string cannot fit in WASM memory" ))); diff --git a/server/index-node/src/service.rs b/server/index-node/src/service.rs index 22f29f37731..98a919b3aec 100644 --- a/server/index-node/src/service.rs +++ b/server/index-node/src/service.rs @@ -150,8 +150,8 @@ where let options = QueryExecutionOptions { resolver, deadline: None, - max_first: std::u32::MAX, - max_skip: std::u32::MAX, + max_first: u32::MAX, + max_skip: u32::MAX, trace: false, }; let (result, _) = execute_query(query_clone.cheap_clone(), None, None, options).await; diff --git a/store/postgres/src/dynds/private.rs b/store/postgres/src/dynds/private.rs index 874db77e788..793a3ffe9fc 100644 --- a/store/postgres/src/dynds/private.rs +++ b/store/postgres/src/dynds/private.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, i32, ops::Bound}; +use std::{collections::HashMap, ops::Bound}; use diesel::{ pg::{sql_types, Pg}, diff --git a/store/postgres/src/notification_listener.rs b/store/postgres/src/notification_listener.rs index 01bc588a72e..7f0e5eb51ba 100644 --- a/store/postgres/src/notification_listener.rs +++ b/store/postgres/src/notification_listener.rs @@ -347,8 +347,7 @@ impl JsonNotification { anyhow!("Invalid notification ID, not compatible with i64: {}", n) })?; - if payload_id < (i32::min_value() as i64) || payload_id > (i32::max_value() as i64) - { + if payload_id < (i32::MIN as i64) || payload_id > (i32::MAX as i64) { Err(anyhow!( "Invalid notification ID, value exceeds i32: {}", payload_id diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 0744f864746..af973c32993 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -569,8 +569,8 @@ async fn execute_subgraph_query_internal( QueryExecutionOptions { resolver, deadline, - max_first: std::u32::MAX, - max_skip: std::u32::MAX, + max_first: u32::MAX, + max_skip: u32::MAX, trace, }, ) From c63f90c71389e96aa5c48881e2159f5d96594216 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:30:23 -0800 Subject: [PATCH 23/92] all: Fix warnings from clippy::len_without_is_empty --- graph/src/firehose/endpoints.rs | 16 ++++++++++------ graph/src/util/intern.rs | 4 ++++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index beb682fc527..f0c2e376e6a 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -91,12 +91,12 @@ impl NetworkDetails for Arc { } } -impl Default for ConnectionHeaders { - fn default() -> Self { - Self::new() - } -} - +impl Default for ConnectionHeaders { + fn default() -> Self { + Self::new() + } +} + impl ConnectionHeaders { pub fn new() -> Self { Self(HashMap::new()) @@ -687,6 +687,10 @@ impl FirehoseEndpoints { self.1.len(&self.0) } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + /// This function will attempt to grab an endpoint based on the Lowest error count // with high capacity available. If an adapter cannot be found `endpoint` will // return an error. diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs index 1c12f57dc61..9e57056be75 100644 --- a/graph/src/util/intern.rs +++ b/graph/src/util/intern.rs @@ -211,6 +211,10 @@ impl Object { .count() } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + /// Find the value for `key` in the object. Return `None` if the key is /// not present. pub fn get(&self, key: &str) -> Option<&V> { From 97c58cc9eb358e51d646e28d7156db92dfe5f641 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:33:00 -0800 Subject: [PATCH 24/92] all: Fix warnings from clippy::len_zero --- chain/ethereum/src/codec.rs | 2 +- node/src/chain.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/chain/ethereum/src/codec.rs b/chain/ethereum/src/codec.rs index 737859df2a5..7ceaa727eaa 100644 --- a/chain/ethereum/src/codec.rs +++ b/chain/ethereum/src/codec.rs @@ -532,7 +532,7 @@ mod test { fn get_to_address(trace: &TransactionTrace) -> Result, Error> { // Try to detect contract creation transactions, which have no 'to' address - let is_contract_creation = trace.to.len() == 0 + let is_contract_creation = trace.to.is_empty() || trace.calls.first().map_or(false, |call| { CallType::try_from(call.call_type) .map_or(false, |call_type| call_type == CallType::Create) diff --git a/node/src/chain.rs b/node/src/chain.rs index 06e8bfa155b..b1f2b0709cb 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -340,7 +340,7 @@ pub async fn networks_as_chains( let firehose_endpoints = networks.firehose_endpoints(chain_id.clone()); let eth_adapters = networks.ethereum_rpcs(chain_id.clone()); - let cc = if firehose_endpoints.len() > 0 { + let cc = if !firehose_endpoints.is_empty() { ChainClient::::new_firehose(firehose_endpoints) } else { ChainClient::::new_rpc(eth_adapters.clone()) From 8d063590ba95795c49c443fca679c7401967275b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:34:21 -0800 Subject: [PATCH 25/92] all: Fix warnings from clippy::let_and_return --- chain/ethereum/src/data_source.rs | 3 +-- store/postgres/src/chain_store.rs | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/chain/ethereum/src/data_source.rs b/chain/ethereum/src/data_source.rs index c139337ebb0..bb9141cc193 100644 --- a/chain/ethereum/src/data_source.rs +++ b/chain/ethereum/src/data_source.rs @@ -513,8 +513,7 @@ impl DataSource { .find(move |handler| match handler.filter { Some(BlockHandlerFilter::Polling { every }) => { let start_block = self.start_block; - let should_trigger = (block - start_block) % every.get() as i32 == 0; - should_trigger + (block - start_block) % every.get() as i32 == 0 } None => true, _ => false, diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index 765aa9e05e4..0d5e7a182db 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -2589,7 +2589,7 @@ impl ChainStoreTrait for ChainStore { }) .await; - let stored = match res { + match res { Ok(blocks) => { for block in &blocks { self.recent_blocks_cache.insert_block(block.clone()); @@ -2599,8 +2599,7 @@ impl ChainStoreTrait for ChainStore { Err(e) => { return Err(e.into()); } - }; - stored + } } else { Vec::new() }; From fe1b86bfdbd866ef7afa98656e60b215f0b30d0e Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:36:15 -0800 Subject: [PATCH 26/92] all: Fix warnings from clippy::manual_clamp --- graph/src/data/graphql/load_manager.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/graph/src/data/graphql/load_manager.rs b/graph/src/data/graphql/load_manager.rs index 32a02e9f59c..b8bdb4a63d0 100644 --- a/graph/src/data/graphql/load_manager.rs +++ b/graph/src/data/graphql/load_manager.rs @@ -438,8 +438,7 @@ impl LoadManager { // Kill random queries in case we have no queries, or not enough queries // that cause at least 20% of the effort let kill_rate = self.update_kill_rate(shard, kill_rate, last_update, overloaded, wait_ms); - let decline = - rng().random_bool((kill_rate * query_effort / total_effort).min(1.0).max(0.0)); + let decline = rng().random_bool((kill_rate * query_effort / total_effort).clamp(0.0, 1.0)); if decline { if ENV_VARS.load_simulate { debug!(self.logger, "Declining query"; From 998d88b53f104226fb2f1e01659961a41e9c0d62 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 14:25:43 -0800 Subject: [PATCH 27/92] all: Fix warnings from clippy::manual_inspect --- chain/ethereum/src/adapter.rs | 13 ++++++------- chain/ethereum/src/ethereum_adapter.rs | 6 ++---- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index 924c928bdd7..ed799fe2778 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -769,14 +769,13 @@ impl FromIterator<(BlockNumber, Address, FunctionSelector)> for EthereumCallFilt .for_each(|(start_block, address, function_signature)| { lookup .entry(address) + .and_modify(|set| { + if set.0 > start_block { + set.0 = start_block + } + set.1.insert(function_signature); + }) .or_insert((start_block, HashSet::default())); - lookup.get_mut(&address).map(|set| { - if set.0 > start_block { - set.0 = start_block - } - set.1.insert(function_signature); - set - }); }); EthereumCallFilter { contract_addresses_function_signatures: lookup, diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index 7ad693a1d80..501c055b296 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -169,7 +169,7 @@ impl EthereumAdapter { .trace() .filter(trace_filter) .await - .map(move |traces| { + .inspect(|traces| { if !traces.is_empty() { if to == from { debug!( @@ -188,7 +188,6 @@ impl EthereumAdapter { ); } } - traces }) .map_err(Error::from); @@ -1217,9 +1216,8 @@ impl EthereumAdapterTrait for EthereumAdapter { ENV_VARS.genesis_block_number.into(), ))) .await - .map_err(|e| { + .inspect_err(|_| { metrics.set_status(ProviderStatus::GenesisFail, &provider); - e })? .and_then(|gen_block| gen_block.hash.map(BlockHash::from)) .ok_or_else(|| anyhow!("Ethereum node could not find genesis block")) From 546d95faf11691b21ceb939601fb89c2c495ef28 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 14:30:42 -0800 Subject: [PATCH 28/92] all: Fix warnings from clippy::manual_non_exhaustive --- graph/src/components/store/mod.rs | 4 +--- graph/src/data/query/query.rs | 3 +-- graph/src/data_source/common.rs | 14 ++++---------- graph/src/schema/entity_key.rs | 4 +--- 4 files changed, 7 insertions(+), 18 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index ef92cfdc284..77675967c25 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -434,6 +434,7 @@ pub const BLOCK_NUMBER_MAX: BlockNumber = i32::MAX; /// Details of how query generation for `EntityQuery` works can be found /// at https://github.com/graphprotocol/rfcs/blob/master/engineering-plans/0001-graphql-query-prefetching.md #[derive(Clone, Debug)] +#[non_exhaustive] pub struct EntityQuery { /// ID of the subgraph. pub subgraph_id: DeploymentHash, @@ -464,8 +465,6 @@ pub struct EntityQuery { pub query_id: Option, pub trace: bool, - - _force_use_of_new: (), } impl EntityQuery { @@ -484,7 +483,6 @@ impl EntityQuery { logger: None, query_id: None, trace: false, - _force_use_of_new: (), } } diff --git a/graph/src/data/query/query.rs b/graph/src/data/query/query.rs index 5bb64a8a134..0520be3be78 100644 --- a/graph/src/data/query/query.rs +++ b/graph/src/data/query/query.rs @@ -127,6 +127,7 @@ impl QueryTarget { /// A GraphQL query as submitted by a client, either directly or through a subscription. #[derive(Clone, Debug)] +#[non_exhaustive] pub struct Query { pub document: q::Document, pub variables: Option, @@ -134,7 +135,6 @@ pub struct Query { pub query_text: Arc, pub variables_text: Arc, pub trace: bool, - _force_use_of_new: (), } impl Query { @@ -162,7 +162,6 @@ impl Query { query_text: Arc::new(query_text), variables_text: Arc::new(variables_text), trace, - _force_use_of_new: (), } } } diff --git a/graph/src/data_source/common.rs b/graph/src/data_source/common.rs index da3196c1251..511d18f3de7 100644 --- a/graph/src/data_source/common.rs +++ b/graph/src/data_source/common.rs @@ -387,20 +387,20 @@ impl UnresolvedMappingABI { /// `event.params.`. Each entry under `calls` gets turned into a /// `CallDcl` #[derive(Clone, CheapClone, Debug, Default, Hash, Eq, PartialEq)] +#[non_exhaustive] pub struct CallDecls { pub decls: Arc>, - readonly: (), } /// A single call declaration, like `myCall1: /// Contract[address].function(arg1, arg2, ...)` #[derive(Clone, Debug, Hash, Eq, PartialEq)] +#[non_exhaustive] pub struct CallDecl { /// A user-defined label pub label: String, /// The call expression pub expr: CallExpr, - readonly: (), } impl CallDecl { @@ -763,9 +763,9 @@ impl CallDecl { /// Unresolved representation of declared calls stored as raw strings /// Used during initial manifest parsing before ABI context is available #[derive(Clone, CheapClone, Debug, Default, Eq, PartialEq)] +#[non_exhaustive] pub struct UnresolvedCallDecls { pub raw_decls: Arc>, - readonly: (), } impl UnresolvedCallDecls { @@ -784,7 +784,6 @@ impl UnresolvedCallDecls { .map(|expr| CallDecl { label: label.clone(), expr, - readonly: (), }) .with_context(|| format!("Error in declared call '{}':", label)) }) @@ -792,7 +791,6 @@ impl UnresolvedCallDecls { Ok(CallDecls { decls: Arc::new(decls?), - readonly: (), }) } @@ -811,18 +809,17 @@ impl<'de> de::Deserialize<'de> for UnresolvedCallDecls { de::Deserialize::deserialize(deserializer)?; Ok(UnresolvedCallDecls { raw_decls: Arc::new(raw_decls), - readonly: (), }) } } #[derive(Clone, Debug, Hash, Eq, PartialEq)] +#[non_exhaustive] pub struct CallExpr { pub abi: Word, pub address: CallArg, pub func: Word, pub args: Vec, - readonly: (), } impl CallExpr { @@ -962,7 +959,6 @@ impl CallExpr { address, func: Word::from(func), args, - readonly: (), }; expr.validate_args().with_context(|| { @@ -1655,7 +1651,6 @@ mod tests { let call_decl = CallDecl { label: "myTokenCall".to_string(), expr: parser.ok("ERC20[event.params.asset.1].name()"), - readonly: (), }; // Test scenario 1: Unknown parameter @@ -1719,7 +1714,6 @@ mod tests { let call_decl_with_args = CallDecl { label: "transferCall".to_string(), expr, - readonly: (), }; // Create a structure where base has only 2 fields instead of 3 diff --git a/graph/src/schema/entity_key.rs b/graph/src/schema/entity_key.rs index d560351f71e..520d3d6320a 100644 --- a/graph/src/schema/entity_key.rs +++ b/graph/src/schema/entity_key.rs @@ -10,6 +10,7 @@ use crate::util::intern; /// Key by which an individual entity in the store can be accessed. Stores /// only the entity type and id. The deployment must be known from context. #[derive(Clone, CacheWeight, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[non_exhaustive] pub struct EntityKey { /// Name of the entity type. pub entity_type: EntityType, @@ -23,8 +24,6 @@ pub struct EntityKey { /// doing the lookup. So if the entity exists but was created on a different causality region, /// the lookup will return empty. pub causality_region: CausalityRegion, - - _force_use_of_new: (), } impl EntityKey { @@ -43,7 +42,6 @@ impl EntityKey { entity_type, entity_id, causality_region, - _force_use_of_new: (), } } From 8b2154df27a85de4409f486d2200627087a1fb12 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 14:34:16 -0800 Subject: [PATCH 29/92] all: Fix warnings from clippy::manual_strip --- graph/src/components/link_resolver/file.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/graph/src/components/link_resolver/file.rs b/graph/src/components/link_resolver/file.rs index 2f1da0c7a3f..37224c55495 100644 --- a/graph/src/components/link_resolver/file.rs +++ b/graph/src/components/link_resolver/file.rs @@ -104,11 +104,7 @@ impl FileLinkResolver { pub fn remove_prefix(link: &str) -> &str { const IPFS: &str = "/ipfs/"; - if link.starts_with(IPFS) { - &link[IPFS.len()..] - } else { - link - } + link.strip_prefix(IPFS).unwrap_or(link) } #[async_trait] From b86bcb498a4c3f5cd0db1336b80da9e2f0be720a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 13:03:38 -0800 Subject: [PATCH 30/92] all: Run 'cargo clippy --fix' (again) --- chain/ethereum/src/adapter.rs | 21 +++++++++------- chain/ethereum/src/buffered_call_cache.rs | 2 +- chain/ethereum/src/chain.rs | 2 +- chain/ethereum/src/codec.rs | 8 +++---- chain/ethereum/src/data_source.rs | 24 +++++++++---------- chain/ethereum/src/ethereum_adapter.rs | 18 +++++--------- chain/ethereum/src/ingestor.rs | 8 +++---- chain/ethereum/src/network.rs | 2 +- chain/ethereum/src/polling_block_stream.rs | 1 - chain/ethereum/src/runtime/abi.rs | 6 ++--- chain/ethereum/src/runtime/runtime_adapter.rs | 2 +- chain/ethereum/src/trigger.rs | 2 +- 12 files changed, 45 insertions(+), 51 deletions(-) diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index ed799fe2778..dfcbcc02faa 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -80,15 +80,18 @@ impl EventSignatureWithTopics { address_matches && self.signature == sig - && self.topic1.as_ref().map_or(true, |t1| { - topics.get(1).map_or(false, |topic| t1.contains(topic)) - }) - && self.topic2.as_ref().map_or(true, |t2| { - topics.get(2).map_or(false, |topic| t2.contains(topic)) - }) - && self.topic3.as_ref().map_or(true, |t3| { - topics.get(3).map_or(false, |topic| t3.contains(topic)) - }) + && self + .topic1 + .as_ref() + .is_none_or(|t1| topics.get(1).is_some_and(|topic| t1.contains(topic))) + && self + .topic2 + .as_ref() + .is_none_or(|t2| topics.get(2).is_some_and(|topic| t2.contains(topic))) + && self + .topic3 + .as_ref() + .is_none_or(|t3| topics.get(3).is_some_and(|topic| t3.contains(topic))) } } diff --git a/chain/ethereum/src/buffered_call_cache.rs b/chain/ethereum/src/buffered_call_cache.rs index c6e0040b570..2ec0bfa40e7 100644 --- a/chain/ethereum/src/buffered_call_cache.rs +++ b/chain/ethereum/src/buffered_call_cache.rs @@ -61,7 +61,7 @@ impl EthereumCallCache for BufferedCallCache { return Ok(Some(value)); } - let result = self.call_cache.get_call(&call, block).await?; + let result = self.call_cache.get_call(call, block).await?; let mut buffer = self.buffer.lock().unwrap(); if let Some(call::Response { diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index 4acfc4aab8b..fcdfc6696fb 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -746,7 +746,7 @@ async fn fetch_unique_blocks_from_cache( // Load blocks from the cache let blocks_map = chain_store .cheap_clone() - .block_ptrs_by_numbers(block_numbers.iter().map(|&b| b.into()).collect::>()) + .block_ptrs_by_numbers(block_numbers.iter().map(|&b| b).collect::>()) .await .map_err(|e| { error!(logger, "Error accessing block cache {}", e); diff --git a/chain/ethereum/src/codec.rs b/chain/ethereum/src/codec.rs index 7ceaa727eaa..cca09ea10cb 100644 --- a/chain/ethereum/src/codec.rs +++ b/chain/ethereum/src/codec.rs @@ -533,10 +533,10 @@ mod test { fn get_to_address(trace: &TransactionTrace) -> Result, Error> { // Try to detect contract creation transactions, which have no 'to' address let is_contract_creation = trace.to.is_empty() - || trace.calls.first().map_or(false, |call| { - CallType::try_from(call.call_type) - .map_or(false, |call_type| call_type == CallType::Create) - }); + || trace + .calls + .first() + .is_some_and(|call| CallType::try_from(call.call_type) == Ok(CallType::Create)); if is_contract_creation { Ok(None) diff --git a/chain/ethereum/src/data_source.rs b/chain/ethereum/src/data_source.rs index bb9141cc193..b4af15d76cd 100644 --- a/chain/ethereum/src/data_source.rs +++ b/chain/ethereum/src/data_source.rs @@ -467,7 +467,7 @@ impl DataSource { self.mapping .event_handlers .iter() - .filter(|handler| handler.matches(&log)) + .filter(|handler| handler.matches(log)) .cloned() .collect::>() } @@ -650,7 +650,7 @@ impl DataSource { // `address,uint256,bool) arguments.push(')'); // `operation(address,uint256,bool)` - let actual_signature = vec![function.name.clone(), arguments].join("("); + let actual_signature = [function.name.clone(), arguments].join("("); target_signature == actual_signature }) } @@ -1384,11 +1384,9 @@ impl UnresolvedMapping { // resolve each abi abis.into_iter() .map(|unresolved_abi| async { - Result::<_, Error>::Ok( - unresolved_abi - .resolve(deployment_hash, resolver, logger) - .await?, - ) + unresolved_abi + .resolve(deployment_hash, resolver, logger) + .await }) .collect::>() .try_collect::>(), @@ -1415,7 +1413,7 @@ impl UnresolvedMapping { ) })?; - unresolved_handler.resolve(abi_json, &spec_version) + unresolved_handler.resolve(abi_json, spec_version) }) .collect::, anyhow::Error>>()?; @@ -1561,10 +1559,10 @@ impl MappingEventHandler { pub fn matches(&self, log: &Log) -> bool { let matches_topic = |index: usize, topic_opt: &Option>| -> bool { - topic_opt.as_ref().map_or(true, |topic_vec| { + topic_opt.as_ref().is_none_or(|topic_vec| { log.topics .get(index) - .map_or(false, |log_topic| topic_vec.contains(log_topic)) + .is_some_and(|log_topic| topic_vec.contains(log_topic)) }) }; @@ -1580,9 +1578,9 @@ impl MappingEventHandler { } pub fn has_additional_topics(&self) -> bool { - self.topic1.as_ref().map_or(false, |v| !v.is_empty()) - || self.topic2.as_ref().map_or(false, |v| !v.is_empty()) - || self.topic3.as_ref().map_or(false, |v| !v.is_empty()) + self.topic1.as_ref().is_some_and(|v| !v.is_empty()) + || self.topic2.as_ref().is_some_and(|v| !v.is_empty()) + || self.topic3.as_ref().is_some_and(|v| !v.is_empty()) } } diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index 501c055b296..b1087b8d848 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -498,17 +498,14 @@ impl EthereumAdapter { block_ptr: BlockPtr, ) -> Result { let web3 = self.web3.clone(); - let logger = Logger::new(&logger, o!("provider" => self.provider.clone())); + let logger = Logger::new(logger, o!("provider" => self.provider.clone())); let block_id = self.block_ptr_to_id(&block_ptr); let retry_log_message = format!("eth_getCode RPC call for block {}", block_ptr); retry(retry_log_message, &logger) .redact_log_urls(true) - .when(|result| match result { - Ok(_) => false, - Err(_) => true, - }) + .when(|result| result.is_err()) .limit(ENV_VARS.request_retries) .timeout_secs(ENV_VARS.json_rpc_timeout.as_secs()) .run(move || { @@ -533,17 +530,14 @@ impl EthereumAdapter { block_ptr: BlockPtr, ) -> Result { let web3 = self.web3.clone(); - let logger = Logger::new(&logger, o!("provider" => self.provider.clone())); + let logger = Logger::new(logger, o!("provider" => self.provider.clone())); let block_id = self.block_ptr_to_id(&block_ptr); let retry_log_message = format!("eth_getBalance RPC call for block {}", block_ptr); retry(retry_log_message, &logger) .redact_log_urls(true) - .when(|result| match result { - Ok(_) => false, - Err(_) => true, - }) + .when(|result| result.is_err()) .limit(ENV_VARS.request_retries) .timeout_secs(ENV_VARS.json_rpc_timeout.as_secs()) .run(move || { @@ -589,7 +583,7 @@ impl EthereumAdapter { async move { let req = CallRequest { to: Some(call_data.address), - gas: gas.map(|val| web3::types::U256::from(val)), + gas: gas.map(web3::types::U256::from), data: Some(Bytes::from(call_data.encoded_call.to_vec())), from: None, gas_price: None, @@ -735,7 +729,7 @@ impl EthereumAdapter { .await?; if let Err(e) = cache .set_call( - &logger, + logger, req.cheap_clone(), call.block_ptr.cheap_clone(), result.clone(), diff --git a/chain/ethereum/src/ingestor.rs b/chain/ethereum/src/ingestor.rs index 47cae0b93c5..ed16710ac4a 100644 --- a/chain/ethereum/src/ingestor.rs +++ b/chain/ethereum/src/ingestor.rs @@ -140,7 +140,7 @@ impl PollingBlockIngestor { // ingest_blocks will return a (potentially incomplete) list of blocks that are // missing. let mut missing_block_hash = self - .ingest_block(&logger, ð_adapter, &latest_block.hash) + .ingest_block(logger, ð_adapter, &latest_block.hash) .await?; // Repeatedly fetch missing parent blocks, and ingest them. @@ -162,7 +162,7 @@ impl PollingBlockIngestor { // iteration will have at most block number N-1. // - Therefore, the loop will iterate at most ancestor_count times. while let Some(hash) = missing_block_hash { - missing_block_hash = self.ingest_block(&logger, ð_adapter, &hash).await?; + missing_block_hash = self.ingest_block(logger, ð_adapter, &hash).await?; } Ok(()) } @@ -181,7 +181,7 @@ impl PollingBlockIngestor { .block_by_hash(logger, block_hash) .await? .ok_or(IngestorError::BlockUnavailable(block_hash))?; - let ethereum_block = eth_adapter.load_full_block(&logger, block).await?; + let ethereum_block = eth_adapter.load_full_block(logger, block).await?; // We need something that implements `Block` to store the block; the // store does not care whether the block is final or not @@ -212,7 +212,7 @@ impl PollingBlockIngestor { eth_adapter: &Arc, ) -> Result { eth_adapter - .latest_block_header(&logger) + .latest_block_header(logger) .await .map(|block| block.into()) } diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index ca45411cdc2..23835e8198e 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -197,7 +197,7 @@ impl EthereumNetworkAdapters { required_capabilities: &NodeCapabilities, retest_percent: f64, ) -> Result, Error> { - let retest_rng: f64 = (&mut rand::rng()).random(); + let retest_rng: f64 = rand::rng().random(); let cheapest = input.into_iter().choose_multiple(&mut rand::rng(), 3); let cheapest = cheapest.iter(); diff --git a/chain/ethereum/src/polling_block_stream.rs b/chain/ethereum/src/polling_block_stream.rs index 242cdf3e0f9..196c43a35a9 100644 --- a/chain/ethereum/src/polling_block_stream.rs +++ b/chain/ethereum/src/polling_block_stream.rs @@ -5,7 +5,6 @@ use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; -use tokio; use graph::blockchain::block_stream::{ BlockStream, BlockStreamError, BlockStreamEvent, BlockWithTriggers, ChainHeadUpdateStream, diff --git a/chain/ethereum/src/runtime/abi.rs b/chain/ethereum/src/runtime/abi.rs index a716c4ea3a8..7a772caccec 100644 --- a/chain/ethereum/src/runtime/abi.rs +++ b/chain/ethereum/src/runtime/abi.rs @@ -438,7 +438,7 @@ impl<'a> ToAscObj for EthereumBlockData<'a> { gas: &GasCounter, ) -> Result { let size = match self.size() { - Some(size) => asc_new(heap, &BigInt::from_unsigned_u256(&size), gas).await?, + Some(size) => asc_new(heap, &BigInt::from_unsigned_u256(size), gas).await?, None => AscPtr::null(), }; @@ -474,11 +474,11 @@ impl<'a> ToAscObj for EthereumBlockData<'a> { gas: &GasCounter, ) -> Result { let size = match self.size() { - Some(size) => asc_new(heap, &BigInt::from_unsigned_u256(&size), gas).await?, + Some(size) => asc_new(heap, &BigInt::from_unsigned_u256(size), gas).await?, None => AscPtr::null(), }; let base_fee_per_block = match self.base_fee_per_gas() { - Some(base_fee) => asc_new(heap, &BigInt::from_unsigned_u256(&base_fee), gas).await?, + Some(base_fee) => asc_new(heap, &BigInt::from_unsigned_u256(base_fee), gas).await?, None => AscPtr::null(), }; diff --git a/chain/ethereum/src/runtime/runtime_adapter.rs b/chain/ethereum/src/runtime/runtime_adapter.rs index acbf41c62a3..2d06b28733e 100644 --- a/chain/ethereum/src/runtime/runtime_adapter.rs +++ b/chain/ethereum/src/runtime/runtime_adapter.rs @@ -315,7 +315,7 @@ async fn eth_call( fn result_as_string(result: &Result>, HostExportError>) -> String { match result { - Ok(Some(tokens)) => format!("({})", tokens_as_string(&tokens)), + Ok(Some(tokens)) => format!("({})", tokens_as_string(tokens)), Ok(None) => "none".to_string(), Err(_) => "error".to_string(), } diff --git a/chain/ethereum/src/trigger.rs b/chain/ethereum/src/trigger.rs index bbbaa69a8d2..c9225cc3ce9 100644 --- a/chain/ethereum/src/trigger.rs +++ b/chain/ethereum/src/trigger.rs @@ -359,7 +359,7 @@ impl EthereumTrigger { Some(address) } EthereumTrigger::Call(call) => Some(&call.to), - EthereumTrigger::Log(log_ref) => Some(&log_ref.address()), + EthereumTrigger::Log(log_ref) => Some(log_ref.address()), // Unfiltered block triggers match any data source address. EthereumTrigger::Block(_, EthereumBlockTriggerType::End) => None, EthereumTrigger::Block(_, EthereumBlockTriggerType::Start) => None, From c4374593c84a6f068f6c3fb629267e69ec94bd4e Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 14:46:16 -0800 Subject: [PATCH 31/92] all: Fix warnings from clippy::map_clone --- chain/ethereum/src/chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index fcdfc6696fb..3efbd0c8a2e 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -746,7 +746,7 @@ async fn fetch_unique_blocks_from_cache( // Load blocks from the cache let blocks_map = chain_store .cheap_clone() - .block_ptrs_by_numbers(block_numbers.iter().map(|&b| b).collect::>()) + .block_ptrs_by_numbers(block_numbers.iter().copied().collect::>()) .await .map_err(|e| { error!(logger, "Error accessing block cache {}", e); From e4df61e758338718415b97411e2a35cbbf685114 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 14:48:25 -0800 Subject: [PATCH 32/92] all: Fix warnings from clippy::map_entry --- store/postgres/src/relational.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 5cc4be3cc5d..3f93d1a7ab3 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -505,6 +505,7 @@ impl Layout { let key = entity_type.key_in(entity_data.id(), CausalityRegion::from_entity(&entity_data)); + #[allow(clippy::map_entry)] if entities.contains_key(&key) { return Err(internal_error!( "duplicate entity {}[{}] in result set, block = {}", From e52faa320a3487c331453f4a86a956c898cc7df0 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 14:49:48 -0800 Subject: [PATCH 33/92] all: Fix warnings from clippy::map_identity --- store/postgres/src/detail.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/store/postgres/src/detail.rs b/store/postgres/src/detail.rs index 7f2f9994848..df00b46ceee 100644 --- a/store/postgres/src/detail.rs +++ b/store/postgres/src/detail.rs @@ -578,7 +578,7 @@ impl StoredDeploymentEntity { &detail.subgraph, "start_block", manifest.start_block_hash.clone(), - manifest.start_block_number.map(|n| n), + manifest.start_block_number, )? .map(|block| block.to_ptr()); From af77913b101885c869f33d6cf9c68f6b5f7d5065 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 15:26:15 -0800 Subject: [PATCH 34/92] all: Fix warnings from clippy::match_like_matches_macro --- chain/ethereum/src/adapter.rs | 5 ++--- graph/src/components/subgraph/instance.rs | 5 +---- graph/src/data/graphql/ext.rs | 5 +---- graph/src/schema/api.rs | 12 +++++++----- graph/src/schema/ast.rs | 22 +++++++++------------- graph/src/schema/input/mod.rs | 7 +------ store/postgres/src/relational.rs | 13 ++----------- store/postgres/src/relational_queries.rs | 5 +---- 8 files changed, 24 insertions(+), 50 deletions(-) diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index dfcbcc02faa..7b225140f79 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -847,9 +847,8 @@ impl EthereumBlockFilter { .block_handlers .clone() .into_iter() - .any(|block_handler| match block_handler.filter { - Some(BlockHandlerFilter::Call) => true, - _ => false, + .any(|block_handler| { + matches!(block_handler.filter, Some(BlockHandlerFilter::Call)) }); let has_block_handler_without_filter = data_source diff --git a/graph/src/components/subgraph/instance.rs b/graph/src/components/subgraph/instance.rs index c6d3f0c7e85..6ee720a10c0 100644 --- a/graph/src/components/subgraph/instance.rs +++ b/graph/src/components/subgraph/instance.rs @@ -140,10 +140,7 @@ impl BlockState { assert!(!self.in_handler); self.created_data_sources .iter() - .any(|ds| match ds.template { - InstanceDSTemplate::Onchain(_) => true, - _ => false, - }) + .any(|ds| matches!(ds.template, InstanceDSTemplate::Onchain(_))) } pub fn drain_created_data_sources(&mut self) -> Vec { diff --git a/graph/src/data/graphql/ext.rs b/graph/src/data/graphql/ext.rs index 55e592b8ce6..8cdc312f72b 100644 --- a/graph/src/data/graphql/ext.rs +++ b/graph/src/data/graphql/ext.rs @@ -237,10 +237,7 @@ impl TypeExt for Type { // Returns true if the given type is a non-null type. fn is_non_null(&self) -> bool { - match self { - Type::NonNullType(_) => true, - _ => false, - } + matches!(self, Type::NonNullType(_)) } } diff --git a/graph/src/schema/api.rs b/graph/src/schema/api.rs index 157cec0ac90..e6c218bbb48 100644 --- a/graph/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -188,11 +188,13 @@ impl ApiSchema { match t { s::Type::NamedType(name) => { let named_type = self.get_named_type(name); - named_type.is_some_and(|type_def| match type_def { - s::TypeDefinition::Scalar(_) - | s::TypeDefinition::Enum(_) - | s::TypeDefinition::InputObject(_) => true, - _ => false, + named_type.is_some_and(|type_def| { + matches!( + type_def, + s::TypeDefinition::Scalar(_) + | s::TypeDefinition::Enum(_) + | s::TypeDefinition::InputObject(_) + ) }) } s::Type::ListType(inner) => self.is_input_type(inner), diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 76cdae11e18..f80572b1d5b 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -303,10 +303,7 @@ pub fn get_object_type_directive( // Returns true if the given type is a non-null type. pub fn is_non_null_type(t: &s::Type) -> bool { - match t { - s::Type::NonNullType(_) => true, - _ => false, - } + matches!(t, s::Type::NonNullType(_)) } /// Returns true if the given type is an input type. @@ -317,11 +314,13 @@ pub fn is_input_type(schema: &s::Document, t: &s::Type) -> bool { match t { s::Type::NamedType(name) => { let named_type = schema.get_named_type(name); - named_type.is_some_and(|type_def| match type_def { - s::TypeDefinition::Scalar(_) - | s::TypeDefinition::Enum(_) - | s::TypeDefinition::InputObject(_) => true, - _ => false, + named_type.is_some_and(|type_def| { + matches!( + type_def, + s::TypeDefinition::Scalar(_) + | s::TypeDefinition::Enum(_) + | s::TypeDefinition::InputObject(_) + ) }) } s::Type::ListType(inner) => is_input_type(schema, inner), @@ -359,10 +358,7 @@ pub fn is_entity_type_definition(type_def: &s::TypeDefinition) -> bool { pub fn is_list_or_non_null_list_field(field: &s::Field) -> bool { match &field.field_type { s::Type::ListType(_) => true, - s::Type::NonNullType(inner_type) => match inner_type.deref() { - s::Type::ListType(_) => true, - _ => false, - }, + s::Type::NonNullType(inner_type) => matches!(inner_type.deref(), s::Type::ListType(_)), _ => false, } } diff --git a/graph/src/schema/input/mod.rs b/graph/src/schema/input/mod.rs index 104ffd2534b..6fff8a071da 100644 --- a/graph/src/schema/input/mod.rs +++ b/graph/src/schema/input/mod.rs @@ -1405,12 +1405,7 @@ impl InputSchema { .filter(|directive| match directive.argument("include") { Some(Value::List(includes)) if !includes.is_empty() => { includes.iter().any(|include| match include { - Value::Object(include) => match include.get("entity") { - Some(Value::String(fulltext_entity)) if fulltext_entity == entity => { - true - } - _ => false, - }, + Value::Object(include) => matches!(include.get("entity"), Some(Value::String(fulltext_entity)) if fulltext_entity == entity), _ => false, }) } diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 3f93d1a7ab3..7e3a76d4e84 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -1566,13 +1566,7 @@ impl Column { } pub fn is_nullable(&self) -> bool { - fn is_nullable(field_type: &q::Type) -> bool { - match field_type { - q::Type::NonNullType(_) => false, - _ => true, - } - } - is_nullable(&self.field_type) + !matches!(&self.field_type, q::Type::NonNullType(_)) } pub fn is_list(&self) -> bool { @@ -1736,10 +1730,7 @@ impl Table { pub fn column(&self, name: &SqlName) -> Option<&Column> { self.columns .iter() - .filter(|column| match column.column_type { - ColumnType::TSVector(_) => false, - _ => true, - }) + .filter(|column| !matches!(column.column_type, ColumnType::TSVector(_))) .find(|column| &column.name == name) } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 015a41ee88d..7aec99faa9d 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -718,10 +718,7 @@ impl<'a> QueryValue<'a> { } fn is_null(&self) -> bool { - match &self.value { - SqlValue::Null => true, - _ => false, - } + matches!(&self.value, SqlValue::Null) } } From db82555baccc9d689551553cbcdb82f2eb68c2f5 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 15:43:24 -0800 Subject: [PATCH 35/92] all: Fix warnings from clippy::module_inception --- graph/src/components/server/mod.rs | 1 + graph/src/data/query/mod.rs | 1 + graphql/src/execution/mod.rs | 1 + 3 files changed, 3 insertions(+) diff --git a/graph/src/components/server/mod.rs b/graph/src/components/server/mod.rs index 89323b9c8b1..a2dbcadb65f 100644 --- a/graph/src/components/server/mod.rs +++ b/graph/src/components/server/mod.rs @@ -4,4 +4,5 @@ pub mod query; /// Component for the index node server. pub mod index_node; +#[allow(clippy::module_inception)] pub mod server; diff --git a/graph/src/data/query/mod.rs b/graph/src/data/query/mod.rs index 407c2218525..b278160a01d 100644 --- a/graph/src/data/query/mod.rs +++ b/graph/src/data/query/mod.rs @@ -1,5 +1,6 @@ mod cache_status; mod error; +#[allow(clippy::module_inception)] mod query; mod result; mod trace; diff --git a/graphql/src/execution/mod.rs b/graphql/src/execution/mod.rs index 8e409d66770..9cd5db531df 100644 --- a/graphql/src/execution/mod.rs +++ b/graphql/src/execution/mod.rs @@ -1,5 +1,6 @@ mod cache; /// Implementation of the GraphQL execution algorithm. +#[allow(clippy::module_inception)] mod execution; mod query; /// Common trait for field resolvers used in the execution. From fd304ca9c68e9c3282c26ee011fa933436dfcf5a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 15:47:58 -0800 Subject: [PATCH 36/92] all: Fix warnings from clippy::multiple_bound_locations --- graph/src/amp/codec/utils.rs | 4 ++-- graph/src/runtime/asc_heap.rs | 4 ++-- graph/src/util/timed_cache.rs | 12 ++++++------ runtime/wasm/src/module/instance.rs | 7 ++----- runtime/wasm/src/module/mod.rs | 2 +- 5 files changed, 13 insertions(+), 16 deletions(-) diff --git a/graph/src/amp/codec/utils.rs b/graph/src/amp/codec/utils.rs index b8504d84bca..9811f661f7b 100644 --- a/graph/src/amp/codec/utils.rs +++ b/graph/src/amp/codec/utils.rs @@ -95,13 +95,13 @@ where None } -pub fn column_decoder<'a, T: 'static, U>( +pub fn column_decoder<'a, T, U>( record_batch: &'a RecordBatch, column_index: usize, nullable: bool, ) -> Result> + 'a>> where - T: Array, + T: Array + 'static, ArrayDecoder<'a, T>: Decoder>, { if column_index >= record_batch.num_columns() { diff --git a/graph/src/runtime/asc_heap.rs b/graph/src/runtime/asc_heap.rs index 4f2f5c41a87..1301ea017f5 100644 --- a/graph/src/runtime/asc_heap.rs +++ b/graph/src/runtime/asc_heap.rs @@ -47,14 +47,14 @@ pub trait AscHeap: Send { /// /// This operation is expensive as it requires a call to `raw_new` for every /// nested object. -pub async fn asc_new( +pub async fn asc_new( heap: &mut H, rust_obj: &T, gas: &GasCounter, ) -> Result, HostExportError> where C: AscType + AscIndexId, - T: ToAscObj, + T: ToAscObj + ?Sized, { let obj = rust_obj.to_asc_obj(heap, gas).await?; AscPtr::alloc_obj(obj, heap, gas).await diff --git a/graph/src/util/timed_cache.rs b/graph/src/util/timed_cache.rs index 20ac7ba49fd..8f64c844630 100644 --- a/graph/src/util/timed_cache.rs +++ b/graph/src/util/timed_cache.rs @@ -36,18 +36,18 @@ impl TimedCache { /// return `None` otherwise. Note that expired entries stay in the cache /// as it is assumed that, after returning `None`, the caller will /// immediately overwrite that entry with a call to `set` - pub fn get(&self, key: &Q) -> Option> + pub fn get(&self, key: &Q) -> Option> where K: Borrow + Eq + Hash, - Q: Hash + Eq, + Q: Hash + Eq + ?Sized, { self.get_at(key, Instant::now()) } - fn get_at(&self, key: &Q, now: Instant) -> Option> + fn get_at(&self, key: &Q, now: Instant) -> Option> where K: Borrow + Eq + Hash, - Q: Hash + Eq, + Q: Hash + Eq + ?Sized, { match self.entries.read().unwrap().get(key) { Some(CacheEntry { value, expires }) if expires >= &now => Some(value.clone()), @@ -94,10 +94,10 @@ impl TimedCache { /// Remove an entry from the cache. If there was an entry for `key`, /// return the value associated with it and whether the entry is still /// live - pub fn remove(&self, key: &Q) -> Option<(Arc, bool)> + pub fn remove(&self, key: &Q) -> Option<(Arc, bool)> where K: Borrow + Eq + Hash, - Q: Hash + Eq, + Q: Hash + Eq + ?Sized, { self.entries .write() diff --git a/runtime/wasm/src/module/instance.rs b/runtime/wasm/src/module/instance.rs index efe7549b64c..45a04e9afbc 100644 --- a/runtime/wasm/src/module/instance.rs +++ b/runtime/wasm/src/module/instance.rs @@ -59,13 +59,10 @@ mod impl_for_tests { asc_get(&ctx, asc_ptr, &self.gas) } - pub async fn asc_new( - &mut self, - rust_obj: &T, - ) -> Result, HostExportError> + pub async fn asc_new(&mut self, rust_obj: &T) -> Result, HostExportError> where P: AscType + AscIndexId, - T: ToAscObj

, + T: ToAscObj

+ ?Sized, { let mut ctx = WasmInstanceContext::new(&mut self.store); asc_new(&mut ctx, rust_obj, &self.gas).await diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index b79e066dfb9..02914abc519 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -35,7 +35,7 @@ mod instance; mod into_wasm_ret; // Convenience for a 'top-level' asc_get, with depth 0. -fn asc_get( +fn asc_get( heap: &H, ptr: AscPtr, gas: &GasCounter, From 8ad40f0a1559a31330b29ac38e222c5f10396b37 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 15:49:06 -0800 Subject: [PATCH 37/92] all: Fix warnings from clippy::needless_lifetimes --- runtime/wasm/src/host_exports.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 18ea839a771..bc2ba76572d 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -385,9 +385,9 @@ impl HostExports { Ok(()) } - pub(crate) async fn store_get<'a>( + pub(crate) async fn store_get( &self, - state: &'a mut BlockState, + state: &mut BlockState, entity_type: String, entity_id: String, gas: &GasCounter, From f0e18a30ee0b98e1583a4b8207c0caee877de3c3 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 15:51:22 -0800 Subject: [PATCH 38/92] all: Fix warnings from clippy::needless_maybe_sized --- graph/src/cheap_clone.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graph/src/cheap_clone.rs b/graph/src/cheap_clone.rs index fc9c98ab7d1..adcb823c303 100644 --- a/graph/src/cheap_clone.rs +++ b/graph/src/cheap_clone.rs @@ -39,14 +39,14 @@ impl CheapClone for Arc { } } -impl CheapClone for Box { +impl CheapClone for Box { #[inline] fn cheap_clone(&self) -> Self { self.clone() } } -impl CheapClone for std::pin::Pin { +impl CheapClone for std::pin::Pin { #[inline] fn cheap_clone(&self) -> Self { self.clone() From 4d158709fdaa357c32b4eb644b97c01c788ed668 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 15:53:42 -0800 Subject: [PATCH 39/92] all: Fix warnings from clippy::new_without_default --- core/src/subgraph_provider.rs | 1 + graph/src/util/intern.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/subgraph_provider.rs b/core/src/subgraph_provider.rs index 2bd3d36f0a9..7e479dd9495 100644 --- a/core/src/subgraph_provider.rs +++ b/core/src/subgraph_provider.rs @@ -230,6 +230,7 @@ enum Error { /// /// Before starting a subgraph, its processing kind is determined from the subgraph manifest. /// Then, the appropriate instance manager is loaded from this mapping. +#[derive(Default)] pub struct SubgraphInstanceManagers( HashMap>, ); diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs index 9e57056be75..884e4cb7e3d 100644 --- a/graph/src/util/intern.rs +++ b/graph/src/util/intern.rs @@ -64,7 +64,7 @@ impl Error { } } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Default)] /// A pool of interned strings. Pools can be organized hierarchically with /// lookups in child pools also considering the parent pool. The chain of /// pools from a pool through all its ancestors act as one big pool to the From 3d81b245fc41d048d00ee8be84fe42d2b367049e Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 15:56:16 -0800 Subject: [PATCH 40/92] all: Fix warnings from clippy::non_canonical_clone_impl --- graph/src/runtime/asc_ptr.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graph/src/runtime/asc_ptr.rs b/graph/src/runtime/asc_ptr.rs index b37cf117a80..3325d4b7f02 100644 --- a/graph/src/runtime/asc_ptr.rs +++ b/graph/src/runtime/asc_ptr.rs @@ -19,7 +19,7 @@ impl Copy for AscPtr {} impl Clone for AscPtr { fn clone(&self) -> Self { - AscPtr(self.0, PhantomData) + *self } } From 2380297b84ece1423758b31713120d8c85543280 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 16:05:05 -0800 Subject: [PATCH 41/92] all: Fix warnings from clippy::non_canonical_partial_ord_impl --- graph/src/schema/ast.rs | 2 +- graph/src/schema/entity_type.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index f80572b1d5b..2152ed25723 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -94,7 +94,7 @@ impl Ord for ObjectType { impl PartialOrd for ObjectType { fn partial_cmp(&self, other: &Self) -> Option { - Some(self.0.name.cmp(&other.0.name)) + Some(self.cmp(other)) } } diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index 098b48362b9..deb3cb3d8ef 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -190,7 +190,7 @@ impl Eq for EntityType {} impl PartialOrd for EntityType { fn partial_cmp(&self, other: &Self) -> Option { - self.as_str().partial_cmp(other.as_str()) + Some(self.cmp(other)) } } From 65617caed64952e67b648d7f6a553c4e573d4a10 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 16:07:30 -0800 Subject: [PATCH 42/92] all: Fix warnings from clippy::only_used_in_recursion --- graphql/src/execution/execution.rs | 13 ++----------- graphql/src/execution/query.rs | 18 +++++++----------- 2 files changed, 9 insertions(+), 22 deletions(-) diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index 5010ba36a1f..48477d3eb5f 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -629,7 +629,6 @@ async fn resolve_field_value( s::Type::ListType(inner_type) => { resolve_field_value_for_list_type( ctx, - object_type, field_value, field, field_definition, @@ -692,7 +691,6 @@ async fn resolve_field_value_for_named_type( #[async_recursion] async fn resolve_field_value_for_list_type( ctx: &ExecutionContext, - object_type: &s::ObjectType, field_value: Option, field: &a::Field, field_definition: &s::Field, @@ -700,15 +698,8 @@ async fn resolve_field_value_for_list_type( ) -> Result> { match inner_type { s::Type::NonNullType(inner_type) => { - resolve_field_value_for_list_type( - ctx, - object_type, - field_value, - field, - field_definition, - inner_type, - ) - .await + resolve_field_value_for_list_type(ctx, field_value, field, field_definition, inner_type) + .await } s::Type::NamedType(ref type_name) => { diff --git a/graphql/src/execution/query.rs b/graphql/src/execution/query.rs index e04d5f3f3e7..1eb377acafb 100644 --- a/graphql/src/execution/query.rs +++ b/graphql/src/execution/query.rs @@ -728,21 +728,17 @@ impl Transform { } /// Interpolate variable references in the arguments `args` - fn interpolate_arguments( - &self, - args: Vec<(String, q::Value)>, - pos: &q::Pos, - ) -> Vec<(String, r::Value)> { + fn interpolate_arguments(&self, args: Vec<(String, q::Value)>) -> Vec<(String, r::Value)> { args.into_iter() .map(|(name, val)| { - let val = self.interpolate_value(val, pos); + let val = self.interpolate_value(val); (name, val) }) .collect() } /// Turn `value` into an `r::Value` by resolving variable references - fn interpolate_value(&self, value: q::Value, pos: &q::Pos) -> r::Value { + fn interpolate_value(&self, value: q::Value) -> r::Value { match value { q::Value::Variable(var) => self.variable(&var), q::Value::Int(ref num) => { @@ -756,14 +752,14 @@ impl Transform { q::Value::List(vals) => { let vals = vals .into_iter() - .map(|val| self.interpolate_value(val, pos)) + .map(|val| self.interpolate_value(val)) .collect(); r::Value::List(vals) } q::Value::Object(map) => { let mut rmap = BTreeMap::new(); for (key, value) in map.into_iter() { - let value = self.interpolate_value(value, pos); + let value = self.interpolate_value(value); rmap.insert(key.into(), value); } r::Value::object(rmap) @@ -786,7 +782,7 @@ impl Transform { position, arguments, } = dir; - let arguments = self.interpolate_arguments(arguments, &position); + let arguments = self.interpolate_arguments(arguments); a::Directive { name, position, @@ -903,7 +899,7 @@ impl Transform { return Ok(None); } - let mut arguments = self.interpolate_arguments(arguments, &position); + let mut arguments = self.interpolate_arguments(arguments); self.coerce_argument_values(&mut arguments, parent_type, &name)?; let is_leaf_type = self.schema.document().is_leaf_type(&field_type.field_type); From 75cc00f4df850a183ab67096f8e9bb10f82ddd01 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 16:10:42 -0800 Subject: [PATCH 43/92] all: Fix warnings from clippy::ptr_arg --- chain/ethereum/src/adapter.rs | 4 ++-- gnd/src/watcher.rs | 4 ++-- node/src/config.rs | 2 +- node/src/manager/commands/rewind.rs | 2 +- store/postgres/src/block_store.rs | 4 ++-- store/postgres/src/deployment_store.rs | 2 +- store/postgres/src/relational.rs | 2 +- store/postgres/src/relational/index.rs | 7 ++----- store/postgres/src/relational_queries.rs | 14 +++++++------- store/postgres/src/sql/validation.rs | 2 +- 10 files changed, 20 insertions(+), 23 deletions(-) diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index 7b225140f79..cfc05feb6fd 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -71,7 +71,7 @@ impl EventSignatureWithTopics { /// If self.address is None, it's considered a wildcard match. /// Otherwise, it must match the provided address. /// It must also match the topics if they are Some - pub fn matches(&self, address: Option<&H160>, sig: H256, topics: &Vec) -> bool { + pub fn matches(&self, address: Option<&H160>, sig: H256, topics: &[H256]) -> bool { // If self.address is None, it's considered a wildcard match. Otherwise, it must match the provided address. let address_matches = match self.address { Some(ref self_addr) => address == Some(self_addr), @@ -393,7 +393,7 @@ impl EthereumLogFilter { &self, event_signature: &H256, contract_address: Option<&Address>, - topics: &Vec, + topics: &[H256], ) -> bool { // Check for wildcard events first. if self.wildcard_events.get(event_signature) == Some(&true) { diff --git a/gnd/src/watcher.rs b/gnd/src/watcher.rs index bc13b96a263..63e920ddbd8 100644 --- a/gnd/src/watcher.rs +++ b/gnd/src/watcher.rs @@ -231,7 +231,7 @@ async fn process_file_events( logger: &Logger, rx: mpsc::Receiver>, exclusion_set: &GlobSet, - manifests_paths: &Vec, + manifests_paths: &[PathBuf], source_subgraph_aliases: &HashMap, sender: Sender<(DeploymentHash, SubgraphName)>, ) -> Result<()> { @@ -289,7 +289,7 @@ fn is_relevant_event(event: &Event, watched_dirs: Vec, exclusion_set: & /// Redeploys all subgraphs in the order it appears in the manifests_paths pub async fn deploy_all_subgraphs( logger: &Logger, - manifests_paths: &Vec, + manifests_paths: &[PathBuf], source_subgraph_aliases: &HashMap, sender: &Sender<(DeploymentHash, SubgraphName)>, ) -> Result<()> { diff --git a/node/src/config.rs b/node/src/config.rs index 2443f8ce253..658bba79888 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -464,7 +464,7 @@ impl ChainSection { fn parse_networks( chains: &mut BTreeMap, transport: Transport, - args: &Vec, + args: &[String], ) -> Result<()> { for (nr, arg) in args.iter().enumerate() { if arg.starts_with("wss://") diff --git a/node/src/manager/commands/rewind.rs b/node/src/manager/commands/rewind.rs index 5c2eb00e5b7..0ca0c9d2bd2 100644 --- a/node/src/manager/commands/rewind.rs +++ b/node/src/manager/commands/rewind.rs @@ -16,7 +16,7 @@ use graph_store_postgres::{ConnectionPool, Store}; async fn block_ptr( store: BlockStore, locators: &HashSet<(String, DeploymentLocator)>, - searches: &Vec, + searches: &[DeploymentSearch], hash: &str, number: BlockNumber, force: bool, diff --git a/store/postgres/src/block_store.rs b/store/postgres/src/block_store.rs index 10a7e03e16f..674c274ac5c 100644 --- a/store/postgres/src/block_store.rs +++ b/store/postgres/src/block_store.rs @@ -342,7 +342,7 @@ impl BlockStore { pub async fn allocate_chain( conn: &mut AsyncPgConnection, - name: &String, + name: &str, shard: &Shard, ident: &ChainIdentifier, ) -> Result { @@ -366,7 +366,7 @@ impl BlockStore { let chain = Chain { id: next_val as i32, - name: name.clone(), + name: name.to_string(), shard: shard.clone(), net_version: ident.net_version.clone(), genesis_block: ident.genesis_block_hash.hash_hex(), diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 61e59e2f7ec..b8c34b64e81 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1081,7 +1081,7 @@ impl DeploymentStore { site: Arc, derived_query: &DerivedEntityQuery, block: BlockNumber, - excluded_keys: &Vec, + excluded_keys: &[EntityKey], ) -> Result, StoreError> { let mut conn = self.pool.get_permitted().await?; let layout = self.layout(&mut conn, site).await?; diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 7e3a76d4e84..404daa42b8d 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -664,7 +664,7 @@ impl Layout { conn: &mut AsyncPgConnection, derived_query: &DerivedEntityQuery, block: BlockNumber, - excluded_keys: &Vec, + excluded_keys: &[EntityKey], ) -> Result, StoreError> { let table = self.table_for_entity(&derived_query.entity_type)?; let ids = excluded_keys.iter().map(|key| &key.entity_id).cloned(); diff --git a/store/postgres/src/relational/index.rs b/store/postgres/src/relational/index.rs index 6181d1cd1a2..3019be3ddf1 100644 --- a/store/postgres/src/relational/index.rs +++ b/store/postgres/src/relational/index.rs @@ -194,7 +194,7 @@ impl Expr { /// Here we check if all the columns expressions of the two indexes are "kind of same". /// We ignore the operator class of the expression by checking if the string of the /// original expression is a prexif of the string of the current one. - fn is_same_kind_columns(current: &Vec, orig: &Vec) -> bool { + fn is_same_kind_columns(current: &[Expr], orig: &[Expr]) -> bool { if orig.len() != current.len() { return false; } @@ -651,10 +651,7 @@ impl CreateIndex { it.any(|c| *c == *column_name) } - fn some_column_contained<'a>( - expr: &String, - it: &mut impl Iterator, - ) -> bool { + fn some_column_contained<'a>(expr: &str, it: &mut impl Iterator) -> bool { it.any(|c| expr.contains(c)) } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 7aec99faa9d..c40bf1fb744 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -710,7 +710,7 @@ impl<'a> QueryValue<'a> { Ok(Self { value, column_type }) } - fn many(values: &'a Vec, column_type: &'a ColumnType) -> QueryResult> { + fn many(values: &'a [Value], column_type: &'a ColumnType) -> QueryResult> { values .iter() .map(|value| Self::new(value, column_type)) @@ -814,7 +814,7 @@ impl<'a> QueryFragment for QueryValue<'a> { } fn process_vec_ast<'a, T: diesel::serialize::ToSql>( - values: &'a Vec, + values: &'a [T], out: &mut AstPass<'_, 'a, Pg>, sql_language: &str, ) -> Result<(), DieselError> { @@ -1004,7 +1004,7 @@ impl PrefixType { } } -fn is_large_string(s: &String) -> Result { +fn is_large_string(s: &str) -> Result { let len = if s.starts_with("0x") { (s.len() - 2) / 2 } else { @@ -1368,7 +1368,7 @@ impl<'a> Filter<'a> { ) -> Result { fn column_and_value<'v>( table: dsl::Table<'v>, - attr: &String, + attr: &str, value: &'v Value, ) -> Result<(dsl::Column<'v>, QueryValue<'v>), StoreError> { let column = table.column_for_field(attr)?; @@ -1379,7 +1379,7 @@ impl<'a> Filter<'a> { fn starts_or_ends_with<'s>( table: dsl::Table<'s>, - attr: &String, + attr: &str, value: &Value, op: &'static str, starts_with: bool, @@ -1416,7 +1416,7 @@ impl<'a> Filter<'a> { fn cmp<'s>( table: dsl::Table<'s>, - attr: &String, + attr: &str, op: Comparison, value: &'s Value, ) -> Result, StoreError> { @@ -1435,7 +1435,7 @@ impl<'a> Filter<'a> { fn contains<'s>( table: dsl::Table<'s>, - attr: &String, + attr: &str, op: ContainsOp, value: &'s Value, ) -> Result, StoreError> { diff --git a/store/postgres/src/sql/validation.rs b/store/postgres/src/sql/validation.rs index a147a6fb2ad..73720b771b0 100644 --- a/store/postgres/src/sql/validation.rs +++ b/store/postgres/src/sql/validation.rs @@ -266,7 +266,7 @@ impl VisitorMut for Validator<'_> { ) -> ControlFlow { /// Check whether `args` is a single string argument and return that /// string - fn extract_string_arg(args: &Vec) -> Option { + fn extract_string_arg(args: &[FunctionArg]) -> Option { if args.len() != 1 { return None; } From 7e8659b5475f7aac0310f4d2e0ca8220bf4880e2 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 16:15:50 -0800 Subject: [PATCH 44/92] all: Fix warnings from clippy::question_mark --- store/postgres/src/copy.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/store/postgres/src/copy.rs b/store/postgres/src/copy.rs index 3754ed49d1a..54c1a03a896 100644 --- a/store/postgres/src/copy.rs +++ b/store/postgres/src/copy.rs @@ -1060,9 +1060,7 @@ impl Connection { state: &mut CopyState, progress: &Arc, ) -> Option { - let Some(conn) = self.conn.take() else { - return None; - }; + let conn = self.conn.take()?; let Some(table) = state.unfinished.pop() else { self.conn = Some(conn); return None; @@ -1084,16 +1082,11 @@ impl Connection { ) -> Option { // It's important that we get the connection before the table since // we remove the table from the state and could drop it otherwise - let Some(conn) = self + let conn = self .pool .try_get_fdw(&self.logger, ENV_VARS.store.batch_worker_wait) - .await - else { - return None; - }; - let Some(table) = state.unfinished.pop() else { - return None; - }; + .await?; + let table = state.unfinished.pop()?; let conn = LockTrackingConnection::new(conn); let worker = CopyTableWorker::new(conn, table); From 371f8612c9193b2f27250b32d48d126ffe701ea5 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 16:17:18 -0800 Subject: [PATCH 45/92] all: Fix warnings from clippy::redundant_allocation --- graph/src/schema/input/sqlexpr.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/graph/src/schema/input/sqlexpr.rs b/graph/src/schema/input/sqlexpr.rs index f2736574bc3..c1735ade461 100644 --- a/graph/src/schema/input/sqlexpr.rs +++ b/graph/src/schema/input/sqlexpr.rs @@ -46,7 +46,7 @@ pub trait ExprVisitor { } pub struct VisitExpr<'a> { - visitor: Box<&'a mut dyn ExprVisitor>, + visitor: &'a mut dyn ExprVisitor, } impl<'a> VisitExpr<'a> { @@ -78,9 +78,7 @@ impl<'a> VisitExpr<'a> { .tokenize_with_location() .unwrap(); parser = parser.with_tokens_with_locations(tokens); - let mut visit = VisitExpr { - visitor: Box::new(visitor), - }; + let mut visit = VisitExpr { visitor }; let mut expr = match parser.parse_expr() { Ok(expr) => expr, Err(e) => { From 6dc57c7cdb38894cb93ae5fe78362116b6c77df2 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 16:20:28 -0800 Subject: [PATCH 46/92] all: Fix warnings from clippy::redundant_locals --- runtime/wasm/src/mapping.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/runtime/wasm/src/mapping.rs b/runtime/wasm/src/mapping.rs index 2be6d502390..0a73832e9dd 100644 --- a/runtime/wasm/src/mapping.rs +++ b/runtime/wasm/src/mapping.rs @@ -320,7 +320,6 @@ impl ValidModule { let mut epoch_counter_abort_handle = None; if let Some(timeout) = timeout { - let timeout = timeout; let engine = engine.clone(); // The epoch counter task will perpetually increment the epoch every `timeout` seconds. From ccdb2da96ef1481187f174031bb42a11b74084e9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 16:22:35 -0800 Subject: [PATCH 47/92] all: Fix warnings from clippy::redundant_pattern_matching --- graph/src/schema/input/mod.rs | 2 +- graph/src/util/bounded_queue.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/graph/src/schema/input/mod.rs b/graph/src/schema/input/mod.rs index 6fff8a071da..af5c13b4c95 100644 --- a/graph/src/schema/input/mod.rs +++ b/graph/src/schema/input/mod.rs @@ -2681,7 +2681,7 @@ mod validations { return; } for interval in intervals { - if let Err(_) = interval.parse::() { + if interval.parse::().is_err() { errors.push(Err::AggregationInvalidInterval( agg_type.name.to_owned(), interval.to_owned(), diff --git a/graph/src/util/bounded_queue.rs b/graph/src/util/bounded_queue.rs index f618c7eca7d..6129817eabe 100644 --- a/graph/src/util/bounded_queue.rs +++ b/graph/src/util/bounded_queue.rs @@ -169,6 +169,6 @@ impl BoundedQueue { /// Clear the queue by popping entries until there are none left pub fn clear(&self) { - while let Some(_) = self.try_pop() {} + while self.try_pop().is_some() {} } } From 58d86006148dffab2eef14b214fa13a9dc0ada9a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 16:52:33 -0800 Subject: [PATCH 48/92] all: Fix warnings from clippy::result_large_err --- graph/src/data/store/mod.rs | 171 +++++++++++++++++++++++++++--------- 1 file changed, 130 insertions(+), 41 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 9ac537c5716..47b46f195f3 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -3,8 +3,7 @@ use crate::{ prelude::{lazy_static, q, r, s, CacheWeight, QueryExecutionError}, runtime::gas::{Gas, GasSizeOf}, schema::{input::VID_FIELD, EntityKey}, - util::intern::{self, AtomPool}, - util::intern::{Error as InternError, NullValue, Object}, + util::intern::{self, AtomPool, Error as InternError, NullValue, Object}, }; use anyhow::{anyhow, Error}; use itertools::Itertools; @@ -712,7 +711,117 @@ pub trait TryIntoEntityIterator: IntoIterator impl>> TryIntoEntityIterator for T {} #[derive(Debug, Error, PartialEq, Eq, Clone)] -pub enum EntityValidationError { +pub struct EntityValidationError(Box); + +impl fmt::Display for EntityValidationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl EntityValidationError { + pub fn unknown_entity_type(key: &EntityKey) -> Self { + let entity = key.entity_type.to_string(); + let id = key.entity_id.to_string(); + + EntityValidationError(Box::new(EntityValidationErrorInner::UnknownEntityType { + entity, + id, + })) + } + + pub fn mismatched_element_type_in_list( + key: &EntityKey, + field: &crate::schema::Field, + value: &Value, + elt: &Value, + index: usize, + ) -> Self { + let entity = key.entity_type.to_string(); + let entity_id = key.entity_id.to_string(); + let expected_type = field.field_type.to_string(); + let field = field.name.to_string(); + let value = value.to_string(); + let actual_type = elt.type_name(); + EntityValidationError(Box::new( + EntityValidationErrorInner::MismatchedElementTypeInList { + entity, + entity_id, + field, + expected_type, + value, + actual_type, + index, + }, + )) + } + + pub fn invalid_field_type( + key: &EntityKey, + field: &crate::schema::Field, + value: &Value, + ) -> Self { + let entity = key.entity_type.to_string(); + let entity_id = key.entity_id.to_string(); + let expected_type = field.field_type.to_string(); + let field = field.name.to_string(); + let actual_type = value.type_name(); + let value = value.to_string(); + + EntityValidationError(Box::new(EntityValidationErrorInner::InvalidFieldType { + entity, + entity_id, + value, + field, + expected_type, + actual_type, + })) + } + + fn missing_value_for_non_nullable_field(key: &EntityKey, field: &crate::schema::Field) -> Self { + let entity = key.entity_type.to_string(); + let entity_id = key.entity_id.to_string(); + let field = field.name.to_string(); + EntityValidationError(Box::new( + EntityValidationErrorInner::MissingValueForNonNullableField { + entity, + entity_id, + field, + }, + )) + } + + fn cannot_set_derived_field(key: &EntityKey, field: &crate::schema::Field) -> Self { + EntityValidationError(Box::new( + EntityValidationErrorInner::CannotSetDerivedField { + entity: key.entity_type.to_string(), + entity_id: key.entity_id.to_string(), + field: field.name.to_string(), + }, + )) + } + + fn unknown_key(not_interned: String) -> Self { + EntityValidationError(Box::new(EntityValidationErrorInner::UnknownKey( + not_interned, + ))) + } + + fn missing_id_attribute(entity: String) -> EntityValidationError { + EntityValidationError(Box::new(EntityValidationErrorInner::MissingIDAttribute { + entity, + })) + } + + fn unsupported_type_for_id_attribute() -> EntityValidationError { + EntityValidationError(Box::new( + EntityValidationErrorInner::UnsupportedTypeForIDAttribute, + )) + } +} + +#[derive(Debug, Error, PartialEq, Eq, Clone)] +pub enum EntityValidationErrorInner { #[error("Entity {entity}[{id}]: unknown entity type `{entity}`")] UnknownEntityType { entity: String, id: String }, @@ -801,7 +910,7 @@ impl Entity { let mut obj = Object::new(pool); for (key, value) in iter { obj.insert(key, value) - .map_err(|e| EntityValidationError::UnknownKey(e.not_interned()))?; + .map_err(|e| EntityValidationError::unknown_key(e.not_interned()))?; } let entity = Entity(obj); entity.check_id()?; @@ -858,11 +967,12 @@ impl Entity { fn check_id(&self) -> Result<(), EntityValidationError> { match self.get("id") { - None => Err(EntityValidationError::MissingIDAttribute { - entity: format!("{:?}", self.0), - }), + None => Err(EntityValidationError::missing_id_attribute(format!( + "{:?}", + self.0 + ))), Some(Value::String(_)) | Some(Value::Bytes(_)) | Some(Value::Int8(_)) => Ok(()), - _ => Err(EntityValidationError::UnsupportedTypeForIDAttribute), + _ => Err(EntityValidationError::unsupported_type_for_id_attribute()), } } @@ -943,12 +1053,10 @@ impl Entity { return Ok(()); } - let object_type = key.entity_type.object_type().map_err(|_| { - EntityValidationError::UnknownEntityType { - entity: key.entity_type.to_string(), - id: key.entity_id.to_string(), - } - })?; + let object_type = key + .entity_type + .object_type() + .map_err(|_| EntityValidationError::unknown_entity_type(key))?; for field in object_type.fields.iter() { match (self.get(&field.name), field.is_derived()) { @@ -962,46 +1070,27 @@ impl Entity { for (index, elt) in elts.iter().enumerate() { if !elt.is_assignable(scalar_type, false) { return Err( - EntityValidationError::MismatchedElementTypeInList { - entity: key.entity_type.to_string(), - entity_id: key.entity_id.to_string(), - field: field.name.to_string(), - expected_type: field.field_type.to_string(), - value: value.to_string(), - actual_type: elt.type_name().to_string(), - index, - }, + EntityValidationError::mismatched_element_type_in_list( + key, field, value, elt, index, + ), ); } } } } if !value.is_assignable(scalar_type, field.field_type.is_list()) { - return Err(EntityValidationError::InvalidFieldType { - entity: key.entity_type.to_string(), - entity_id: key.entity_id.to_string(), - value: value.to_string(), - field: field.name.to_string(), - expected_type: field.field_type.to_string(), - actual_type: value.type_name().to_string(), - }); + return Err(EntityValidationError::invalid_field_type(key, field, value)); } } (None, false) => { if field.field_type.is_non_null() { - return Err(EntityValidationError::MissingValueForNonNullableField { - entity: key.entity_type.to_string(), - entity_id: key.entity_id.to_string(), - field: field.name.to_string(), - }); + return Err(EntityValidationError::missing_value_for_non_nullable_field( + key, field, + )); } } (Some(_), true) => { - return Err(EntityValidationError::CannotSetDerivedField { - entity: key.entity_type.to_string(), - entity_id: key.entity_id.to_string(), - field: field.name.to_string(), - }); + return Err(EntityValidationError::cannot_set_derived_field(key, field)); } (None, true) => { // derived fields should not be set From a7ac724e638a19f490b335dbb26f4847a62da5d9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 16:59:45 -0800 Subject: [PATCH 49/92] all: Change the error for NodeId::new This partially addresses clippy::result_unit_err --- graph/src/data/store/mod.rs | 10 ++++++---- node/src/bin/manager.rs | 4 ++-- node/src/config.rs | 11 +++++------ node/src/manager/commands/assign.rs | 2 +- node/src/manager/commands/config.rs | 2 +- node/src/manager/commands/copy.rs | 2 +- server/graphman/src/resolvers/deployment_mutation.rs | 2 +- store/postgres/src/primary.rs | 6 +++--- 8 files changed, 20 insertions(+), 19 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 47b46f195f3..724401eaba0 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -37,12 +37,14 @@ pub mod sql; pub struct NodeId(String); impl NodeId { - pub fn new(s: impl Into) -> Result { + /// Create a new NodeId. The name `s` must be between 1 and 63 + /// characters long. If it is not, `Err(s)` is returned + pub fn new(s: impl Into) -> Result { let s = s.into(); // Enforce minimum and maximum length limit if s.len() > 63 || s.is_empty() { - return Err(()); + return Err(s); } Ok(NodeId(s)) @@ -76,8 +78,8 @@ impl<'de> de::Deserialize<'de> for NodeId { D: de::Deserializer<'de>, { let s: String = de::Deserialize::deserialize(deserializer)?; - NodeId::new(s.clone()) - .map_err(|()| de::Error::invalid_value(de::Unexpected::Str(&s), &"valid node ID")) + NodeId::new(s) + .map_err(|s| de::Error::invalid_value(de::Unexpected::Str(&s), &"valid node ID")) } } diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 9bcc016d74e..792df8853c9 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -1121,8 +1121,8 @@ async fn main() -> anyhow::Result<()> { } let node = match NodeId::new(&opt.node_id) { - Err(()) => { - eprintln!("invalid node id: {}", opt.node_id); + Err(node_id) => { + eprintln!("invalid node id: {}", node_id); std::process::exit(1); } Ok(node) => node, diff --git a/node/src/config.rs b/node/src/config.rs index 658bba79888..613f4826e21 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -181,7 +181,7 @@ impl Config { pub fn from_str(config: &str, node: &str) -> Result { let mut config: Config = toml::from_str(config)?; - config.node = NodeId::new(node).map_err(|()| anyhow!("invalid node id {}", node))?; + config.node = NodeId::new(node).map_err(|node| anyhow!("invalid node id {}", node))?; config.validate()?; Ok(config) } @@ -191,7 +191,7 @@ impl Config { let mut stores = BTreeMap::new(); let chains = ChainSection::from_opt(opt)?; let node = NodeId::new(opt.node_id.to_string()) - .map_err(|()| anyhow!("invalid node id {}", opt.node_id))?; + .map_err(|node| anyhow!("invalid node id {}", node))?; stores.insert(PRIMARY_SHARD.to_string(), Shard::from_opt(true, opt)?); Ok(Config { node, @@ -426,7 +426,7 @@ pub struct ChainSection { impl ChainSection { fn validate(&mut self) -> Result<()> { NodeId::new(&self.ingestor) - .map_err(|()| anyhow!("invalid node id for ingestor {}", &self.ingestor))?; + .map_err(|node| anyhow!("invalid node id for ingestor {}", node))?; for (_, chain) in self.chains.iter_mut() { chain.validate()? } @@ -993,8 +993,7 @@ impl DeploymentPlacer for Deployment { .indexers .iter() .map(|idx| { - NodeId::new(idx.clone()) - .map_err(|()| format!("{} is not a valid node name", idx)) + NodeId::new(idx).map_err(|idx| format!("{} is not a valid node name", idx)) }) .collect::, _>>()?; Some((shards, indexers)) @@ -1041,7 +1040,7 @@ impl Rule { return Err(anyhow!("useless rule without indexers")); } for indexer in &self.indexers { - NodeId::new(indexer).map_err(|()| anyhow!("invalid node id {}", &indexer))?; + NodeId::new(indexer).map_err(|indexer| anyhow!("invalid node id {}", indexer))?; } self.shard_names().map_err(Error::from)?; Ok(()) diff --git a/node/src/manager/commands/assign.rs b/node/src/manager/commands/assign.rs index 971d8a4687f..c5f451281be 100644 --- a/node/src/manager/commands/assign.rs +++ b/node/src/manager/commands/assign.rs @@ -35,7 +35,7 @@ pub async fn reassign( search: &DeploymentSearch, node: String, ) -> Result<(), Error> { - let node = NodeId::new(node.clone()).map_err(|()| anyhow!("illegal node id `{}`", node))?; + let node = NodeId::new(node).map_err(|node| anyhow!("illegal node id `{}`", node))?; let locator = search.locate_unique(&primary).await?; let pconn = primary.get_permitted().await?; diff --git a/node/src/manager/commands/config.rs b/node/src/manager/commands/config.rs index 14b70d8d614..b5f047812bd 100644 --- a/node/src/manager/commands/config.rs +++ b/node/src/manager/commands/config.rs @@ -72,7 +72,7 @@ pub fn pools(config: &Config, nodes: Vec, shard: bool) -> Result<(), Err .into_iter() .map(|name| { NodeId::new(name.replace('-', "_")) - .map_err(|()| anyhow!("illegal node name `{}`", name)) + .map_err(|name| anyhow!("illegal node name `{}`", name)) }) .collect::>()?; // node -> shard_name -> size diff --git a/node/src/manager/commands/copy.rs b/node/src/manager/commands/copy.rs index a1919e79ca9..cafe82dacbb 100644 --- a/node/src/manager/commands/copy.rs +++ b/node/src/manager/commands/copy.rs @@ -148,7 +148,7 @@ async fn create_inner( ) } let shard = Shard::new(shard)?; - let node = NodeId::new(node.clone()).map_err(|()| anyhow!("invalid node id `{}`", node))?; + let node = NodeId::new(node).map_err(|node| anyhow!("invalid node id `{}`", node))?; let dst = subgraph_store .copy_deployment(src, shard, node, base_ptr, on_sync) diff --git a/server/graphman/src/resolvers/deployment_mutation.rs b/server/graphman/src/resolvers/deployment_mutation.rs index aa716c286d0..c4a9b483fa9 100644 --- a/server/graphman/src/resolvers/deployment_mutation.rs +++ b/server/graphman/src/resolvers/deployment_mutation.rs @@ -118,7 +118,7 @@ impl DeploymentMutation { ) -> Result { let ctx = GraphmanContext::new(ctx)?; let deployment = deployment.try_into()?; - let node = NodeId::new(node.clone()).map_err(|()| anyhow!("illegal node id `{}`", node))?; + let node = NodeId::new(node).map_err(|node| anyhow!("illegal node id `{}`", node))?; let reassign_result = reassign::run(&ctx, &deployment, &node).await?; match reassign_result { ReassignResult::CompletedWithWarnings(warnings) => Ok( diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index f9d8ee232f2..d7f506ff024 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -681,7 +681,7 @@ mod queries { .await .optional()? .map(|node| { - NodeId::new(&node).map_err(|()| { + NodeId::new(node).map_err(|node| { internal_error!( "invalid node id `{}` in assignment for `{}`", node, @@ -707,7 +707,7 @@ mod queries { .await .optional()? .map(|(node, ts)| { - let node_id = NodeId::new(&node).map_err(|()| { + let node_id = NodeId::new(node).map_err(|node| { internal_error!( "invalid node id `{}` in assignment for `{}`", node, @@ -1634,7 +1634,7 @@ impl Connection { .map(|(node, count)| (node.as_str(), *count)) .chain(missing) .min_by_key(|(_, count)| *count) - .map(|(node, _)| NodeId::new(node).map_err(|()| node)) + .map(|(node, _)| NodeId::new(node)) .transpose() // This can't really happen since we filtered by valid NodeId's .map_err(|node| { From 54aea682437d3250a38ffdd5a658d3594bdc08c0 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:03:44 -0800 Subject: [PATCH 50/92] all: Change the error for SubgraphName::new This partially addresses clippy::result_unit_err --- graph/src/data/subgraph/mod.rs | 21 ++++++++++++++------- node/src/manager/commands/config.rs | 2 +- node/src/manager/commands/create.rs | 2 +- node/src/manager/commands/query.rs | 2 +- node/src/manager/commands/remove.rs | 2 +- server/http/src/service.rs | 5 ++--- 6 files changed, 20 insertions(+), 14 deletions(-) diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index 75e083e89d3..ed0acc894a2 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -192,7 +192,14 @@ impl TryFromValue for DeploymentHash { pub struct SubgraphName(String); impl SubgraphName { - pub fn new(s: impl Into) -> Result { + /// Construct a new `SubgraphName`, validating the name according to the rules: + /// - Length between 1 and 255 characters + /// - Contains only alphanumeric characters, dashes (`-`), underscores (`_`), and slashes (`/`) + /// - Each part (separated by `/`) must be non-empty, start and end with an alphanumeric character, + /// contain at least one alphabetic character, and not be equal to "graphql" + /// + /// If the name is invalid, return s (as a `String`) as the error + pub fn new(s: impl Into) -> Result { let s = s.into(); // Note: these validation rules must be kept consistent with the validation rules @@ -200,7 +207,7 @@ impl SubgraphName { // Enforce length limits if s.is_empty() || s.len() > 255 { - return Err(()); + return Err(s); } // Check that the name contains only allowed characters. @@ -208,19 +215,19 @@ impl SubgraphName { .chars() .all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_' || c == '/') { - return Err(()); + return Err(s); } // Parse into components and validate each for part in s.split('/') { // Each part must be non-empty if part.is_empty() { - return Err(()); + return Err(s); } // To keep URLs unambiguous, reserve the token "graphql" if part == "graphql" { - return Err(()); + return Err(s); } // Part should not start or end with a special character. @@ -230,7 +237,7 @@ impl SubgraphName { || !last_char.is_ascii_alphanumeric() || !part.chars().any(|c| c.is_ascii_alphabetic()) { - return Err(()); + return Err(s); } } @@ -270,7 +277,7 @@ impl<'de> de::Deserialize<'de> for SubgraphName { { let s: String = de::Deserialize::deserialize(deserializer)?; SubgraphName::new(s.clone()) - .map_err(|()| de::Error::invalid_value(de::Unexpected::Str(&s), &"valid subgraph name")) + .map_err(|s| de::Error::invalid_value(de::Unexpected::Str(&s), &"valid subgraph name")) } } diff --git a/node/src/manager/commands/config.rs b/node/src/manager/commands/config.rs index b5f047812bd..08633ae586d 100644 --- a/node/src/manager/commands/config.rs +++ b/node/src/manager/commands/config.rs @@ -157,7 +157,7 @@ pub async fn provider( } pub fn setting(name: &str) -> Result<(), Error> { - let name = SubgraphName::new(name).map_err(|()| anyhow!("illegal subgraph name `{}`", name))?; + let name = SubgraphName::new(name).map_err(|name| anyhow!("illegal subgraph name `{name}`"))?; let env_vars = EnvVars::from_env().unwrap(); if let Some(path) = &env_vars.subgraph_settings { let settings = Settings::from_file(path) diff --git a/node/src/manager/commands/create.rs b/node/src/manager/commands/create.rs index cfaa62aa958..db75e28d5c1 100644 --- a/node/src/manager/commands/create.rs +++ b/node/src/manager/commands/create.rs @@ -5,7 +5,7 @@ use graph_store_postgres::SubgraphStore; pub async fn run(store: Arc, name: String) -> Result<(), Error> { let name = SubgraphName::new(name.clone()) - .map_err(|()| anyhow!("illegal subgraph name `{}`", name))?; + .map_err(|name| anyhow!("illegal subgraph name `{name}`"))?; println!("creating subgraph {}", name); store.create_subgraph(name).await?; diff --git a/node/src/manager/commands/query.rs b/node/src/manager/commands/query.rs index 6339b7bf9cc..04400bfb923 100644 --- a/node/src/manager/commands/query.rs +++ b/node/src/manager/commands/query.rs @@ -30,7 +30,7 @@ pub async fn run( QueryTarget::Deployment(id, Default::default()) } else { let name = SubgraphName::new(target.clone()) - .map_err(|()| anyhow!("illegal subgraph name `{}`", target))?; + .map_err(|name| anyhow!("illegal subgraph name `{name}`"))?; QueryTarget::Name(name, Default::default()) }; diff --git a/node/src/manager/commands/remove.rs b/node/src/manager/commands/remove.rs index 3d03bdf6148..bcf9417569a 100644 --- a/node/src/manager/commands/remove.rs +++ b/node/src/manager/commands/remove.rs @@ -4,7 +4,7 @@ use graph::prelude::{anyhow, Error, SubgraphName, SubgraphStore as _}; use graph_store_postgres::SubgraphStore; pub async fn run(store: Arc, name: &str) -> Result<(), Error> { - let name = SubgraphName::new(name).map_err(|()| anyhow!("illegal subgraph name `{}`", name))?; + let name = SubgraphName::new(name).map_err(|name| anyhow!("illegal subgraph name `{name}`"))?; println!("Removing subgraph {}", name); store.remove_subgraph(name).await?; diff --git a/server/http/src/service.rs b/server/http/src/service.rs index df933b45d86..06950c9ac5f 100644 --- a/server/http/src/service.rs +++ b/server/http/src/service.rs @@ -130,9 +130,8 @@ where request: Request, ) -> ServerResult { let version = self.resolve_api_version(&request)?; - let subgraph_name = SubgraphName::new(subgraph_name.as_str()).map_err(|()| { - ServerError::ClientError(format!("Invalid subgraph name {:?}", subgraph_name)) - })?; + let subgraph_name = SubgraphName::new(subgraph_name.as_str()) + .map_err(|name| ServerError::ClientError(format!("Invalid subgraph name `{name}`")))?; self.handle_graphql_query(QueryTarget::Name(subgraph_name, version), request) .await From 2a1199bbb6cd2b3e7bfde981de5b02772f66707b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:13:16 -0800 Subject: [PATCH 51/92] all: Fix warnings from clippy::result_unit_err --- graph/src/schema/input/sqlexpr.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/graph/src/schema/input/sqlexpr.rs b/graph/src/schema/input/sqlexpr.rs index c1735ade461..24d373a930a 100644 --- a/graph/src/schema/input/sqlexpr.rs +++ b/graph/src/schema/input/sqlexpr.rs @@ -31,12 +31,17 @@ pub(crate) fn parse( /// `store/postgres/src/relational/rollup.rs`. Note that the visitor can /// mutate both itself (e.g., to store errors) and the expression it is /// visiting. +/// +/// The error type is `()`, as the visitor is expected to record any errors +/// internally pub trait ExprVisitor { /// Visit an identifier (column name). Must return `Err` if the /// identifier is not allowed + #[allow(clippy::result_unit_err)] fn visit_ident(&mut self, ident: &mut p::Ident) -> Result<(), ()>; /// Visit a function name. Must return `Err` if the function is not /// allowed + #[allow(clippy::result_unit_err)] fn visit_func_name(&mut self, func: &mut p::ObjectNamePart) -> Result<(), ()>; /// Called when we encounter a construct that is not supported like a /// subquery @@ -69,6 +74,7 @@ impl<'a> VisitExpr<'a> { /// return `Err(())`. The visitor will know the details of the error /// since this can only happen if `visit_ident` or `visit_func_name` /// returned an error, or `parse_error` or `not_supported` was called. + #[allow(clippy::result_unit_err)] pub fn visit(sql: &str, visitor: &'a mut dyn ExprVisitor) -> Result { let dialect = PostgreSqlDialect {}; From 7b88ca4d8e2b2623e0c1dad17d6818fa51ef2a1d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 11:41:37 -0800 Subject: [PATCH 52/92] all: Fix warnings from clippy::search_is_some --- graph/src/schema/input/mod.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/graph/src/schema/input/mod.rs b/graph/src/schema/input/mod.rs index af5c13b4c95..7314dcc7ab1 100644 --- a/graph/src/schema/input/mod.rs +++ b/graph/src/schema/input/mod.rs @@ -2395,13 +2395,9 @@ mod validations { )), Err(_) => { if is_first_last - && schema - .entity_types - .iter() - .find(|entity_type| { - entity_type.name.eq(field.field_type.get_base_type()) - }) - .is_some() + && schema.entity_types.iter().any(|entity_type| { + entity_type.name.eq(field.field_type.get_base_type()) + }) { return Ok(()); } From 46fc658c657a58225ecbdb89c81435ee857ef767 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:16:25 -0800 Subject: [PATCH 53/92] all: Fix warnings from clippy::should_implement_trait --- graph/src/components/subgraph/settings.rs | 2 +- store/postgres/src/relational/prune.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/graph/src/components/subgraph/settings.rs b/graph/src/components/subgraph/settings.rs index a7512614583..e30261f7592 100644 --- a/graph/src/components/subgraph/settings.rs +++ b/graph/src/components/subgraph/settings.rs @@ -45,7 +45,7 @@ impl Settings { Self::from_str(&read_to_string(path)?) } - pub fn from_str(toml: &str) -> Result { + fn from_str(toml: &str) -> Result { toml::from_str::(toml).map_err(anyhow::Error::from) } diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 3d31e3df68a..4154eb5110a 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -665,7 +665,7 @@ mod status { } impl Phase { - pub fn from_str(phase: &str) -> Self { + fn from_str(phase: &str) -> Self { use Phase::*; match phase { "queued" => Queued, From cab443b13971eb2992638347449eca7247f13f23 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:17:45 -0800 Subject: [PATCH 54/92] all: Fix warnings from clippy::single_match --- chain/ethereum/src/ingestor.rs | 8 ++------ node/src/config.rs | 5 ++--- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/chain/ethereum/src/ingestor.rs b/chain/ethereum/src/ingestor.rs index ed16710ac4a..e7821248d90 100644 --- a/chain/ethereum/src/ingestor.rs +++ b/chain/ethereum/src/ingestor.rs @@ -252,12 +252,8 @@ impl BlockIngestor for PollingBlockIngestor { .logger .new(o!("provider" => eth_adapter.provider().to_string())); - match self.do_poll(&logger, eth_adapter).await { - // Some polls will fail due to transient issues - Err(err) => { - error!(logger, "Trying again after block polling failed: {}", err); - } - Ok(()) => (), + if let Err(err) = self.do_poll(&logger, eth_adapter).await { + error!(logger, "Trying again after block polling failed: {}", err); } if ENV_VARS.cleanup_blocks { diff --git a/node/src/config.rs b/node/src/config.rs index 613f4826e21..c655e270edc 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -872,9 +872,8 @@ impl<'de> Deserialize<'de> for Provider { return Err(serde::de::Error::custom("when `details` field is provided, deprecated `url`, `transport`, `features` and `headers` cannot be specified")); } - match v { - ProviderDetails::Firehose(ref mut firehose) => firehose.rules = nodes, - _ => {} + if let ProviderDetails::Firehose(ref mut firehose) = v { + firehose.rules = nodes } v From a509d66b746f48a5bdadfc859b68404e3b8dec89 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:20:50 -0800 Subject: [PATCH 55/92] all: Fix warnings from clippy::suspicious_doc_comments --- graph/src/ext/mod.rs | 2 +- runtime/wasm/src/asc_abi/class.rs | 4 ++-- runtime/wasm/src/to_from/mod.rs | 4 ++-- store/postgres/src/block_range.rs | 2 +- store/postgres/src/relational_queries.rs | 14 +++++++------- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/graph/src/ext/mod.rs b/graph/src/ext/mod.rs index 4e9773f7bd3..375e8f62340 100644 --- a/graph/src/ext/mod.rs +++ b/graph/src/ext/mod.rs @@ -1,2 +1,2 @@ -///! Extension traits for external types. +//! Extension traits for external types. pub mod futures; diff --git a/runtime/wasm/src/asc_abi/class.rs b/runtime/wasm/src/asc_abi/class.rs index e33e788bdd9..0fac865ab0e 100644 --- a/runtime/wasm/src/asc_abi/class.rs +++ b/runtime/wasm/src/asc_abi/class.rs @@ -1,3 +1,5 @@ +//! Rust types that have with a direct correspondence to an Asc class, +//! with their `AscType` implementations. use async_trait::async_trait; use ethabi; @@ -18,8 +20,6 @@ use graph_runtime_derive::AscType; use crate::asc_abi::{v0_0_4, v0_0_5}; use semver::Version; -///! Rust types that have with a direct correspondence to an Asc class, -///! with their `AscType` implementations. /// Wrapper of ArrayBuffer for multiple AssemblyScript versions. /// It just delegates its method calls to the correct mappings apiVersion. pub enum ArrayBuffer { diff --git a/runtime/wasm/src/to_from/mod.rs b/runtime/wasm/src/to_from/mod.rs index 55f1f8b2316..d3dc07c9afe 100644 --- a/runtime/wasm/src/to_from/mod.rs +++ b/runtime/wasm/src/to_from/mod.rs @@ -1,3 +1,5 @@ +//! Implementations of `ToAscObj` and `FromAscObj` for Rust types. +//! Standard Rust types go in `mod.rs` and external types in `external.rs`. use anyhow::anyhow; use async_trait::async_trait; use std::collections::HashMap; @@ -14,8 +16,6 @@ use graph::{ use crate::asc_abi::class::*; -///! Implementations of `ToAscObj` and `FromAscObj` for Rust types. -///! Standard Rust types go in `mod.rs` and external types in `external.rs`. mod external; #[async_trait] diff --git a/store/postgres/src/block_range.rs b/store/postgres/src/block_range.rs index 51dbc4d1b9a..4383ab80d7e 100644 --- a/store/postgres/src/block_range.rs +++ b/store/postgres/src/block_range.rs @@ -1,8 +1,8 @@ +//! Utilities to deal with block numbers and block ranges use derive_more::Constructor; use diesel::pg::Pg; use diesel::query_builder::{AstPass, QueryFragment}; use diesel::result::QueryResult; -///! Utilities to deal with block numbers and block ranges use diesel::serialize::{Output, ToSql}; use diesel::sql_types::{Integer, Range}; use graph::env::ENV_VARS; diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index c40bf1fb744..ef066b208c8 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1,10 +1,10 @@ -///! This module contains the gory details of using Diesel to query -///! a database schema that is not known at compile time. The code in this -///! module is mostly concerned with constructing SQL queries and some -///! helpers for serializing and deserializing entities. -///! -///! Code in this module works very hard to minimize the number of allocations -///! that it performs +//! This module contains the gory details of using Diesel to query +//! a database schema that is not known at compile time. The code in this +//! module is mostly concerned with constructing SQL queries and some +//! helpers for serializing and deserializing entities. +//! +//! Code in this module works very hard to minimize the number of allocations +//! that it performs use diesel::pg::Pg; use diesel::query_builder::{AstPass, Query, QueryFragment, QueryId}; use diesel::query_dsl::RunQueryDsl; From 67f8cf48a576dfd9c0171876aa94b072d6623df3 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:23:21 -0800 Subject: [PATCH 56/92] all: Fix warnings from clippy::suspicious_map --- chain/ethereum/src/data_source.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/chain/ethereum/src/data_source.rs b/chain/ethereum/src/data_source.rs index b4af15d76cd..5be627baf25 100644 --- a/chain/ethereum/src/data_source.rs +++ b/chain/ethereum/src/data_source.rs @@ -1011,7 +1011,10 @@ impl DecoderHook { // We don't have time measurements for each call (though that would be nice) // Use the average time of all calls that we want to observe as the time for // each call - let to_observe = results.iter().map(|(_, source)| source.observe()).count() as f64; + let to_observe = results + .iter() + .filter(|(_, source)| source.observe()) + .count() as f64; let elapsed = start.elapsed().as_secs_f64() / to_observe; results From 2b9d0f8cebf23fb65b05207b6845c4dabb6678d9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:25:05 -0800 Subject: [PATCH 57/92] all: Fix warnings from clippy::suspicious_open_options --- graph/src/components/metrics/stopwatch.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/graph/src/components/metrics/stopwatch.rs b/graph/src/components/metrics/stopwatch.rs index a9236c5d10a..f9eb5ff78dc 100644 --- a/graph/src/components/metrics/stopwatch.rs +++ b/graph/src/components/metrics/stopwatch.rs @@ -229,6 +229,7 @@ impl StopwatchInner { .write(true) .append(false) .create(true) + .truncate(true) .open(section_map) .expect("can open file"); serde_json::to_writer(&file, &entries).expect("can write json"); From eda24bf8573524c1be70f6c8aaa2406d03f35c50 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:35:02 -0800 Subject: [PATCH 58/92] all: Admit defeat for clippy::too_many_arguments We have way too many functions that take too many arguments to make it worth marking them individually. Addressing this would be quite an undertaking, so we will live with it until some brave soul shows up and fixes it. --- Cargo.toml | 3 +++ chain/common/Cargo.toml | 3 +++ chain/ethereum/Cargo.toml | 3 +++ core/Cargo.toml | 3 +++ graph/Cargo.toml | 3 +++ graphql/Cargo.toml | 3 +++ node/Cargo.toml | 3 +++ runtime/wasm/Cargo.toml | 3 +++ store/postgres/Cargo.toml | 3 +++ 9 files changed, 27 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 9097b9b399d..441f5d26d15 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,6 +115,9 @@ parking_lot = "0.12.4" sqlparser-latest = { version = "0.57.0", package = "sqlparser", features = ["visitor"] } tokio-util = "0.7.15" +[workspace.lints.clippy] +too_many_arguments = "allow" + # Incremental compilation on Rust 1.58 causes an ICE on build. As soon as graph node builds again, these can be removed. [profile.test] incremental = false diff --git a/chain/common/Cargo.toml b/chain/common/Cargo.toml index eef11ed85a3..1f945ea984e 100644 --- a/chain/common/Cargo.toml +++ b/chain/common/Cargo.toml @@ -10,3 +10,6 @@ protobuf = "3.0.2" protobuf-parse = "3.7.2" anyhow = "1" heck = "0.5" + +[lints] +workspace = true diff --git a/chain/ethereum/Cargo.toml b/chain/ethereum/Cargo.toml index f3780ddd224..17eafbeaaa0 100644 --- a/chain/ethereum/Cargo.toml +++ b/chain/ethereum/Cargo.toml @@ -29,3 +29,6 @@ base64 = "0" [build-dependencies] tonic-build = { workspace = true } + +[lints] +workspace = true diff --git a/core/Cargo.toml b/core/Cargo.toml index 5d946ed1e6e..a4b38367213 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -34,3 +34,6 @@ tokio-util.workspace = true [dev-dependencies] tower-test = { git = "https://github.com/tower-rs/tower.git" } wiremock = "0.6.5" + +[lints] +workspace = true diff --git a/graph/Cargo.toml b/graph/Cargo.toml index e8149e51086..bf1d04d15e0 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -111,3 +111,6 @@ wiremock = "0.6.5" [build-dependencies] tonic-build = { workspace = true } + +[lints] +workspace = true diff --git a/graphql/Cargo.toml b/graphql/Cargo.toml index d9cb14684f6..898bfc15f9b 100644 --- a/graphql/Cargo.toml +++ b/graphql/Cargo.toml @@ -14,3 +14,6 @@ stable-hash_legacy = { git = "https://github.com/graphprotocol/stable-hash", bra parking_lot = "0.12" anyhow = "1.0" async-recursion = "1.1.1" + +[lints] +workspace = true diff --git a/node/Cargo.toml b/node/Cargo.toml index b60128772af..30d5788d8a1 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -43,3 +43,6 @@ json-structural-diff = { version = "0.2", features = ["colorize"] } # Dependencies related to Amp subgraphs tokio-util.workspace = true + +[lints] +workspace = true diff --git a/runtime/wasm/Cargo.toml b/runtime/wasm/Cargo.toml index e2260a7bb59..c934cc943be 100644 --- a/runtime/wasm/Cargo.toml +++ b/runtime/wasm/Cargo.toml @@ -21,3 +21,6 @@ wasm-instrument = { version = "0.2.0", features = ["std", "sign_ext"] } parity-wasm = { version = "0.45", features = ["std", "sign_ext"] } serde_yaml = { workspace = true } + +[lints] +workspace = true diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 2e577af94c3..a6b98d71cdc 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -42,3 +42,6 @@ serde_yaml.workspace = true [dev-dependencies] clap.workspace = true graphql-parser = "0.4.1" + +[lints] +workspace = true From 25b4b00333eb4203e988e0d28100545037574884 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:40:02 -0800 Subject: [PATCH 59/92] all: Fix warnings from clippy::to_string_trait_impl --- graph/src/data_source/offchain.rs | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/graph/src/data_source/offchain.rs b/graph/src/data_source/offchain.rs index 282f8aeff23..35e62a3cdcc 100644 --- a/graph/src/data_source/offchain.rs +++ b/graph/src/data_source/offchain.rs @@ -63,21 +63,17 @@ impl OffchainDataSourceKind { } } -impl ToString for OffchainDataSourceKind { - fn to_string(&self) -> String { +impl fmt::Display for OffchainDataSourceKind { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // This is less performant than hardcoding the values but makes it more difficult // to be used incorrectly, since this map is quite small it should be fine. - OFFCHAIN_KINDS + let label = OFFCHAIN_KINDS .iter() - .find_map(|(str, kind)| { - if kind.eq(self) { - Some(str.to_string()) - } else { - None - } - }) + .find_map(|(str, kind)| if kind.eq(self) { Some(*str) } else { None }) // the kind is validated based on OFFCHAIN_KINDS so it's guaranteed to exist - .unwrap() + .unwrap_or(""); + + write!(f, "{}", label) } } From 383c9da21cdd458897a77a94e7028daa4123ff79 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:49:24 -0800 Subject: [PATCH 60/92] all: Admit defeat for clippy::type_complexity We have some gnarly types in a few places, but addressing that properly, e.g., through helper structs will take a bit of work --- Cargo.toml | 1 + store/test-store/Cargo.toml | 3 +++ tests/Cargo.toml | 3 +++ 3 files changed, 7 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 441f5d26d15..91b4df27cec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,6 +117,7 @@ tokio-util = "0.7.15" [workspace.lints.clippy] too_many_arguments = "allow" +type_complexity = "allow" # Incremental compilation on Rust 1.58 causes an ICE on build. As soon as graph node builds again, these can be removed. [profile.test] diff --git a/store/test-store/Cargo.toml b/store/test-store/Cargo.toml index c16db6c5d11..2f0d24a9489 100644 --- a/store/test-store/Cargo.toml +++ b/store/test-store/Cargo.toml @@ -22,3 +22,6 @@ tokio = { workspace = true } [dev-dependencies] hex = "0.4.3" pretty_assertions = "1.4.1" + +[lints] +workspace = true diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 737125a5533..0ce0757b049 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -29,3 +29,6 @@ tokio-util.workspace = true [dev-dependencies] anyhow = "1.0.100" tokio-stream = "0.1" + +[lints] +workspace = true From b7d517e75590c5c87637bfe258b49cf26abd6f85 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:51:52 -0800 Subject: [PATCH 61/92] all: Fix warnings from clippy::unnecessary_lazy_evaluations --- store/postgres/src/relational/index.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/store/postgres/src/relational/index.rs b/store/postgres/src/relational/index.rs index 3019be3ddf1..1465b52838a 100644 --- a/store/postgres/src/relational/index.rs +++ b/store/postgres/src/relational/index.rs @@ -49,7 +49,7 @@ impl Display for Method { impl Method { fn parse(method: String) -> Self { - method.parse().unwrap_or_else(|()| Method::Unknown(method)) + method.parse().unwrap_or(Method::Unknown(method)) } } From 274046a3d4ca88e11aa453fca39a8161d695de76 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 8 Jan 2026 17:54:27 -0800 Subject: [PATCH 62/92] all: Fix warnings from clippy::unnecessary_to_owned --- graph/src/components/link_resolver/file.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/graph/src/components/link_resolver/file.rs b/graph/src/components/link_resolver/file.rs index 37224c55495..593dccc641b 100644 --- a/graph/src/components/link_resolver/file.rs +++ b/graph/src/components/link_resolver/file.rs @@ -76,8 +76,7 @@ impl FileLinkResolver { // Create a path to the manifest based on the current resolver's // base directory or default to using the deployment string as path // If the deployment string is an alias, use the aliased path - let manifest_path = if let Some(aliased) = self.aliases.get(&manifest_path_str.to_string()) - { + let manifest_path = if let Some(aliased) = self.aliases.get(manifest_path_str) { aliased.clone() } else { match &resolver.base_dir { From 127d8de813f086e58cb9190cb1b1161527091145 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 10:36:21 -0800 Subject: [PATCH 63/92] all: Suppress warnings from clippy::unnecessary_unwrap --- chain/ethereum/src/polling_block_stream.rs | 1 + graph/src/blockchain/firehose_block_stream.rs | 4 +++- store/postgres/src/relational/ddl.rs | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/chain/ethereum/src/polling_block_stream.rs b/chain/ethereum/src/polling_block_stream.rs index 196c43a35a9..705fdaaaf7f 100644 --- a/chain/ethereum/src/polling_block_stream.rs +++ b/chain/ethereum/src/polling_block_stream.rs @@ -406,6 +406,7 @@ impl PollingBlockStreamContext { // block number, and checking to see if the block we found matches the // subgraph_ptr. + #[allow(clippy::unnecessary_unwrap)] let subgraph_ptr = subgraph_ptr.expect("subgraph block pointer should not be `None` here"); diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index 04558aab619..10a4450c7d9 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -366,10 +366,12 @@ async fn process_firehose_response>( let previous_block_ptr = block.parent_ptr(); if previous_block_ptr.is_some() && previous_block_ptr.as_ref() != subgraph_current_block { + #[allow(clippy::unnecessary_unwrap)] + let firehose_start_block = previous_block_ptr.unwrap(); warn!(&logger, "Firehose selected first streamed block's parent should match subgraph start block, reverting to last know final chain segment"; "subgraph_current_block" => &subgraph_current_block.unwrap(), - "firehose_start_block" => &previous_block_ptr.unwrap(), + "firehose_start_block" => &firehose_start_block, ); let mut revert_to = mapper diff --git a/store/postgres/src/relational/ddl.rs b/store/postgres/src/relational/ddl.rs index e7160edbe22..cdb162978b6 100644 --- a/store/postgres/src/relational/ddl.rs +++ b/store/postgres/src/relational/ddl.rs @@ -406,6 +406,7 @@ impl Table { self.create_table(out)?; self.create_time_travel_indexes(catalog, out)?; if index_def.is_some() && ENV_VARS.postpone_attribute_index_creation { + #[allow(clippy::unnecessary_unwrap)] let arr = index_def .unwrap() .indexes_for_table(&self.nsp, &self.name.to_string(), self, false, false, false) From 206e1875b73a33f86c317c0219b22831a065bf73 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 10:38:11 -0800 Subject: [PATCH 64/92] all: Fix warnings from clippy::useless_conversion --- server/index-node/src/resolver.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index 43ab376dea7..2a479193bc2 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -1,5 +1,4 @@ use std::collections::BTreeMap; -use std::convert::TryInto; use async_trait::async_trait; use graph::data::query::Trace; @@ -360,9 +359,7 @@ where let block_number: i32 = field .get_required::("blockNumber") - .expect("Valid blockNumber required") - .try_into() - .unwrap(); + .expect("Valid blockNumber required"); let block_hash = field .get_required::("blockHash") From 818d6d386c07c7be0134859f742c870b7e029937 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 10:46:18 -0800 Subject: [PATCH 65/92] all: Fix warnings from clippy::wrong_self_convention --- graph/src/components/store/write.rs | 4 ++-- store/postgres/src/deployment.rs | 4 ++-- store/postgres/src/detail.rs | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 22e9fab729f..60091e035d7 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -207,7 +207,7 @@ impl EntityModification { } /// Turn an `Overwrite` into an `Insert`, return an error if this is a `Remove` - fn as_insert(self, entity_type: &EntityType) -> Result { + fn into_insert(self, entity_type: &EntityType) -> Result { use EntityModification::*; match self { @@ -510,7 +510,7 @@ impl RowGroup { Overwrite { block, .. }, ) => { prev_row.clamp(*block)?; - let row = row.as_insert(&self.entity_type)?; + let row = row.into_insert(&self.entity_type)?; self.push_row(row); } (Insert { end: None, .. } | Overwrite { end: None, .. }, Remove { block, .. }) => { diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 017e74591c8..239ccdf61b3 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -110,7 +110,7 @@ impl OnSync { } } - pub fn to_str(&self) -> &str { + pub fn to_str(&self) -> &'static str { match self { OnSync::None => "none", OnSync::Activate => "activate", @@ -118,7 +118,7 @@ impl OnSync { } } - fn to_sql(&self) -> Option<&str> { + fn to_sql(self) -> Option<&'static str> { match self { OnSync::None => None, OnSync::Activate | OnSync::Replace => Some(self.to_str()), diff --git a/store/postgres/src/detail.rs b/store/postgres/src/detail.rs index df00b46ceee..74a6d546a4a 100644 --- a/store/postgres/src/detail.rs +++ b/store/postgres/src/detail.rs @@ -546,7 +546,7 @@ struct StoredSubgraphManifest { } impl StoredSubgraphManifest { - fn as_manifest(self, schema: &InputSchema) -> SubgraphManifestEntity { + fn into_manifest_entity(self, schema: &InputSchema) -> SubgraphManifestEntity { let e: Vec<_> = self .entities_with_causality_region .into_iter() @@ -568,7 +568,7 @@ impl StoredSubgraphManifest { struct StoredDeploymentEntity(crate::detail::DeploymentDetail, StoredSubgraphManifest); impl StoredDeploymentEntity { - fn as_subgraph_deployment( + fn into_subgraph_deployment_entity( self, schema: &InputSchema, ) -> Result { @@ -611,7 +611,7 @@ impl StoredDeploymentEntity { .map_err(|b| internal_error!("invalid debug fork `{}`", b))?; Ok(SubgraphDeploymentEntity { - manifest: manifest.as_manifest(schema), + manifest: manifest.into_manifest_entity(schema), failed: detail.failed, health: detail.health.into(), synced_at: detail.synced_at, @@ -653,7 +653,7 @@ pub async fn deployment_entity( .await .map(DeploymentDetail::from)?; - StoredDeploymentEntity(detail, manifest).as_subgraph_deployment(schema) + StoredDeploymentEntity(detail, manifest).into_subgraph_deployment_entity(schema) } #[derive(Queryable, Identifiable, Insertable)] From 862abdb3b204a073d18d11f72c60e3ed5b04b698 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 10:58:06 -0800 Subject: [PATCH 66/92] justfile: Make 'just lint' run clippy with all default lints turned on Note that that also means that we run 'cargo clippy' in CI because the CI workflow runs 'just lint' --- justfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/justfile b/justfile index 32ae928faa3..128be063fb3 100644 --- a/justfile +++ b/justfile @@ -7,8 +7,8 @@ format *EXTRA_FLAGS: cargo fmt --all {{EXTRA_FLAGS}} # Run Clippy linting (cargo clippy) -lint: - cargo clippy --no-deps -- --allow warnings +lint *EXTRA_FLAGS: + cargo clippy {{EXTRA_FLAGS}} # Check Rust code (cargo check) check *EXTRA_FLAGS: From c55aeef84d83287731ffd66829bf3251d2192a4a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 11:46:57 -0800 Subject: [PATCH 67/92] all: Run 'cargo clippy --fix --all-targets' --- chain/ethereum/src/adapter.rs | 61 ++++------- chain/ethereum/src/codec.rs | 30 ++--- chain/ethereum/src/network.rs | 103 +++++++++--------- chain/ethereum/src/tests.rs | 4 +- chain/near/src/adapter.rs | 5 +- chain/near/src/chain.rs | 12 +- graph/examples/append_row.rs | 2 +- graph/examples/validate.rs | 4 +- graph/src/amp/codec/test_fixtures.rs | 4 +- graph/src/blockchain/firehose_block_stream.rs | 36 +++--- graph/src/components/store/write.rs | 4 +- graph/src/components/subgraph/settings.rs | 12 +- graph/src/data/store/mod.rs | 2 +- graph/src/data/store/scalar/bigdecimal.rs | 6 +- graph/src/firehose/endpoints.rs | 12 +- graph/src/ipfs/gateway_client.rs | 2 +- graph/src/ipfs/rpc_client.rs | 2 +- graph/src/schema/api.rs | 4 +- graph/src/schema/input/mod.rs | 5 +- graph/src/util/ogive.rs | 16 +-- graphql/src/store/query.rs | 8 +- node/src/config.rs | 25 ++--- runtime/test/src/common.rs | 2 +- runtime/test/src/test.rs | 32 +++--- runtime/test/src/test/abi.rs | 12 +- runtime/test/src/test_padding.rs | 4 +- server/graphman/tests/deployment_mutation.rs | 2 +- server/http/src/request.rs | 3 +- server/http/src/service.rs | 2 +- server/index-node/src/service.rs | 3 +- store/postgres/src/relational/ddl_tests.rs | 4 +- store/postgres/src/sql/mod.rs | 4 +- store/postgres/src/vid_batcher.rs | 1 - .../tests/chain/ethereum/manifest.rs | 21 ++-- store/test-store/tests/graph/entity_cache.rs | 2 +- .../test-store/tests/graphql/introspection.rs | 4 +- store/test-store/tests/graphql/query.rs | 23 ++-- store/test-store/tests/postgres/graft.rs | 6 +- store/test-store/tests/postgres/relational.rs | 74 ++++++------- .../tests/postgres/relational_bytes.rs | 34 +++--- store/test-store/tests/postgres/store.rs | 24 ++-- store/test-store/tests/postgres/subgraph.rs | 50 ++++----- store/test-store/tests/postgres/writable.rs | 12 +- tests/tests/integration_tests.rs | 6 +- tests/tests/runner_tests.rs | 24 ++-- 45 files changed, 321 insertions(+), 387 deletions(-) diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index cfc05feb6fd..50b0d743fd9 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -1410,7 +1410,7 @@ mod tests { filter.event_signatures.sort(); } assert_eq!(expected_log_filters, actual_log_filters); - assert_eq!(true, actual_send_all_block_headers); + assert!(actual_send_all_block_headers); } #[test] @@ -1477,7 +1477,7 @@ mod tests { } assert_eq!(expected_log_filters, actual_log_filters); - assert_eq!(true, actual_send_all_block_headers); + assert!(actual_send_all_block_headers); } #[test] @@ -1504,76 +1504,63 @@ mod tests { wildcard_signatures: HashSet::from_iter(vec![[11u8; 4]]), }; - assert_eq!( - false, - filter.matches(&call(address(2), vec![])), + assert!( + !filter.matches(&call(address(2), vec![])), "call with empty bytes are always ignore, whatever the condition" ); - assert_eq!( - false, - filter.matches(&call(address(4), vec![1; 36])), + assert!( + !filter.matches(&call(address(4), vec![1; 36])), "call with incorrect address should be ignored" ); - assert_eq!( - true, + assert!( filter.matches(&call(address(1), vec![1; 36])), "call with correct address & signature should match" ); - assert_eq!( - true, + assert!( filter.matches(&call(address(1), vec![1; 32])), "call with correct address & signature, but with incorrect input size should match" ); - assert_eq!( - false, - filter.matches(&call(address(1), vec![4u8; 36])), + assert!( + !filter.matches(&call(address(1), vec![4u8; 36])), "call with correct address but incorrect signature for a specific contract filter (i.e. matches some signatures) should be ignored" ); - assert_eq!( - false, - filter.matches(&call(address(0), vec![11u8; 36])), + assert!( + !filter.matches(&call(address(0), vec![11u8; 36])), "this signature should not match filter1, this avoid false passes if someone changes the code" ); - assert_eq!( - false, - filter2.matches(&call(address(1), vec![10u8; 36])), + assert!( + !filter2.matches(&call(address(1), vec![10u8; 36])), "this signature should not match filter2 because the address is not the expected one" ); - assert_eq!( - true, + assert!( filter2.matches(&call(address(0), vec![10u8; 36])), "this signature should match filter2 on the non wildcard clause" ); - assert_eq!( - true, + assert!( filter2.matches(&call(address(0), vec![11u8; 36])), "this signature should match filter2 on the wildcard clause" ); // extend filter1 and test the filter 2 stuff again filter.extend(filter2); - assert_eq!( - true, + assert!( filter.matches(&call(address(0), vec![11u8; 36])), "this signature should not match filter1, this avoid false passes if someone changes the code" ); - assert_eq!( - false, - filter.matches(&call(address(1), vec![10u8; 36])), + assert!( + !filter.matches(&call(address(1), vec![10u8; 36])), "this signature should not match filter2 because the address is not the expected one" ); - assert_eq!( - true, + assert!( filter.matches(&call(address(0), vec![10u8; 36])), "this signature should match filter2 on the non wildcard clause" ); - assert_eq!( - true, + assert!( filter.matches(&call(address(0), vec![11u8; 36])), "this signature should match filter2 on the wildcard clause" ); @@ -1673,7 +1660,7 @@ mod tests { base.extend(extension); - assert_eq!(true, base.trigger_every_block); + assert!(base.trigger_every_block); } #[test] @@ -1693,7 +1680,7 @@ mod tests { base.extend(extension); - assert_eq!(true, base.trigger_every_block); + assert!(base.trigger_every_block); assert_eq!( HashSet::from_iter(vec![(10, address(2))]), base.contract_addresses, @@ -1717,7 +1704,7 @@ mod tests { base.extend(extension); - assert_eq!(true, base.trigger_every_block); + assert!(base.trigger_every_block); assert_eq!( HashSet::from_iter(vec![(10, address(2)), (10, address(1))]), base.contract_addresses, diff --git a/chain/ethereum/src/codec.rs b/chain/ethereum/src/codec.rs index cca09ea10cb..e16f207c550 100644 --- a/chain/ethereum/src/codec.rs +++ b/chain/ethereum/src/codec.rs @@ -499,6 +499,21 @@ impl BlockchainBlock for HeaderOnlyBlock { } } +fn get_to_address(trace: &TransactionTrace) -> Result, Error> { + // Try to detect contract creation transactions, which have no 'to' address + let is_contract_creation = trace.to.is_empty() + || trace + .calls + .first() + .is_some_and(|call| CallType::try_from(call.call_type) == Ok(CallType::Create)); + + if is_contract_creation { + Ok(None) + } else { + Ok(Some(trace.to.try_decode_proto("transaction to address")?)) + } +} + #[cfg(test)] mod test { use graph::{blockchain::Block as _, prelude::chrono::Utc}; @@ -529,18 +544,3 @@ mod test { ); } } - -fn get_to_address(trace: &TransactionTrace) -> Result, Error> { - // Try to detect contract creation transactions, which have no 'to' address - let is_contract_creation = trace.to.is_empty() - || trace - .calls - .first() - .is_some_and(|call| CallType::try_from(call.call_type) == Ok(CallType::Create)); - - if is_contract_creation { - Ok(None) - } else { - Ok(Some(trace.to.try_decode_proto("transaction to address")?)) - } -} diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index 23835e8198e..8d53a820486 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -352,35 +352,35 @@ mod tests { }; // Test all real combinations of capability comparisons - assert_eq!(false, &full >= &archive); - assert_eq!(false, &full >= &traces); - assert_eq!(false, &full >= &archive_traces); - assert_eq!(true, &full >= &full); - assert_eq!(false, &full >= &full_traces); - - assert_eq!(true, &archive >= &archive); - assert_eq!(false, &archive >= &traces); - assert_eq!(false, &archive >= &archive_traces); - assert_eq!(true, &archive >= &full); - assert_eq!(false, &archive >= &full_traces); - - assert_eq!(false, &traces >= &archive); - assert_eq!(true, &traces >= &traces); - assert_eq!(false, &traces >= &archive_traces); - assert_eq!(true, &traces >= &full); - assert_eq!(true, &traces >= &full_traces); - - assert_eq!(true, &archive_traces >= &archive); - assert_eq!(true, &archive_traces >= &traces); - assert_eq!(true, &archive_traces >= &archive_traces); - assert_eq!(true, &archive_traces >= &full); - assert_eq!(true, &archive_traces >= &full_traces); - - assert_eq!(false, &full_traces >= &archive); - assert_eq!(true, &full_traces >= &traces); - assert_eq!(false, &full_traces >= &archive_traces); - assert_eq!(true, &full_traces >= &full); - assert_eq!(true, &full_traces >= &full_traces); + assert!(!(full >= archive)); + assert!(!(full >= traces)); + assert!(!(full >= archive_traces)); + assert!(full >= full); + assert!(!(full >= full_traces)); + + assert!(archive >= archive); + assert!(!(archive >= traces)); + assert!(!(archive >= archive_traces)); + assert!(archive >= full); + assert!(!(archive >= full_traces)); + + assert!(!(traces >= archive)); + assert!(traces >= traces); + assert!(!(traces >= archive_traces)); + assert!(traces >= full); + assert!(traces >= full_traces); + + assert!(archive_traces >= archive); + assert!(archive_traces >= traces); + assert!(archive_traces >= archive_traces); + assert!(archive_traces >= full); + assert!(archive_traces >= full_traces); + + assert!(!(full_traces >= archive)); + assert!(full_traces >= traces); + assert!(!(full_traces >= archive_traces)); + assert!(full_traces >= full); + assert!(full_traces >= full_traces); } #[graph::test] @@ -463,16 +463,15 @@ mod tests { }) .await .unwrap(); - assert_eq!(adapter.is_call_only(), false); + assert!(!adapter.is_call_only()); } // Check limits { let adapter = adapters.call_or_cheapest(None).unwrap(); assert!(adapter.is_call_only()); - assert_eq!( - adapters.call_or_cheapest(None).unwrap().is_call_only(), - false + assert!( + !adapters.call_or_cheapest(None).unwrap().is_call_only() ); } @@ -485,7 +484,7 @@ mod tests { traces: false, })) .unwrap(); - assert_eq!(adapter.is_call_only(), false); + assert!(!adapter.is_call_only()); } } @@ -553,11 +552,11 @@ mod tests { // verify that after all call_only were exhausted, we can still // get normal adapters - let keep: Vec> = vec![0; 10] + let keep: Vec> = [0; 10] .iter() .map(|_| adapters.call_or_cheapest(None).unwrap()) .collect(); - assert_eq!(keep.iter().any(|a| !a.is_call_only()), false); + assert!(!keep.iter().any(|a| !a.is_call_only())); } #[graph::test] @@ -621,9 +620,8 @@ mod tests { // one reference above and one inside adapters struct assert_eq!(Arc::strong_count(ð_call_adapter), 2); assert_eq!(Arc::strong_count(ð_adapter), 2); - assert_eq!( - adapters.call_or_cheapest(None).unwrap().is_call_only(), - false + assert!( + !adapters.call_or_cheapest(None).unwrap().is_call_only() ); } @@ -667,9 +665,8 @@ mod tests { .await; // one reference above and one inside adapters struct assert_eq!(Arc::strong_count(ð_adapter), 2); - assert_eq!( - adapters.call_or_cheapest(None).unwrap().is_call_only(), - false + assert!( + !adapters.call_or_cheapest(None).unwrap().is_call_only() ); } @@ -690,25 +687,23 @@ mod tests { let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); let chain_id: Word = "chain_id".into(); - let adapters = vec![ - fake_adapter( + let adapters = [fake_adapter( &logger, - &unavailable_provider, + unavailable_provider, &provider_metrics, &metrics, false, ) .await, - fake_adapter(&logger, &error_provider, &provider_metrics, &metrics, false).await, + fake_adapter(&logger, error_provider, &provider_metrics, &metrics, false).await, fake_adapter( &logger, - &no_error_provider, + no_error_provider, &provider_metrics, &metrics, false, ) - .await, - ]; + .await]; // Set errors metrics.report_for_test(&ProviderName::from(error_provider), false); @@ -813,7 +808,7 @@ mod tests { archive: true, traces: false, }, - adapter: fake_adapter(&logger, &error_provider, &provider_metrics, &metrics, false) + adapter: fake_adapter(&logger, error_provider, &provider_metrics, &metrics, false) .await, limit: SubgraphLimit::Unlimited, }); @@ -827,7 +822,7 @@ mod tests { }, adapter: fake_adapter( &logger, - &no_error_provider, + no_error_provider, &provider_metrics, &metrics, false, @@ -891,7 +886,7 @@ mod tests { }, adapter: fake_adapter( &logger, - &no_error_provider, + no_error_provider, &provider_metrics, &metrics, false, @@ -903,7 +898,7 @@ mod tests { logger, vec![( chain_id.clone(), - no_available_adapter.iter().cloned().collect(), + no_available_adapter.to_vec(), )] .into_iter(), ProviderCheckStrategy::MarkAsValid, @@ -927,7 +922,7 @@ mod tests { call_only: bool, ) -> Arc { let transport = Transport::new_rpc( - Url::parse(&"http://127.0.0.1").unwrap(), + Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new(), endpoint_metrics.clone(), "", diff --git a/chain/ethereum/src/tests.rs b/chain/ethereum/src/tests.rs index 00873f8ea87..a925dc9d71e 100644 --- a/chain/ethereum/src/tests.rs +++ b/chain/ethereum/src/tests.rs @@ -109,7 +109,7 @@ fn test_trigger_ordering() { let expected = vec![log1, log2, call1, log3, call2, call4, call3, block2, block1] .into_iter() - .map(|t| Trigger::Chain(t)) + .map(Trigger::Chain) .collect::>(); assert_eq!(block_with_triggers.trigger_data, expected); @@ -207,7 +207,7 @@ fn test_trigger_dedup() { let expected = vec![log1, log2, call1, log3, call2, call3, block2, block1] .into_iter() - .map(|t| Trigger::Chain(t)) + .map(Trigger::Chain) .collect::>(); assert_eq!(block_with_triggers.trigger_data, expected); diff --git a/chain/near/src/adapter.rs b/chain/near/src/adapter.rs index b6e450f5741..8a9de408bad 100644 --- a/chain/near/src/adapter.rs +++ b/chain/near/src/adapter.rs @@ -339,7 +339,7 @@ mod test { let firehose_filter = decode_filter(filter); assert_eq!(firehose_filter.accounts, vec![String::from("acc1"),],); - let expected_pairs = vec![ + let expected_pairs = [ PrefixSuffixPair { prefix: "acc3".to_string(), suffix: "acc4".to_string(), @@ -356,8 +356,7 @@ mod test { let pairs = firehose_filter.prefix_and_suffix_pairs; assert_eq!(pairs.len(), 3); - assert_eq!( - true, + assert!( expected_pairs.iter().all(|x| pairs.contains(x)), "{:?}", pairs diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index 6f25c64589a..d992c99bd6c 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -543,12 +543,9 @@ mod test { .collect(); assert_eq!(errs.len(), 2, "{:?}", ds); - let expected_errors = vec![ - "partial account prefixes can't have empty values".to_string(), - "partial account suffixes can't have empty values".to_string(), - ]; - assert_eq!( - true, + let expected_errors = ["partial account prefixes can't have empty values".to_string(), + "partial account suffixes can't have empty values".to_string()]; + assert!( expected_errors.iter().all(|err| errs.contains(err)), "{:?}", errs @@ -634,8 +631,7 @@ mod test { case.name, receipt.partial_accounts, ); - assert_eq!( - true, + assert!( case.expected .iter() .all(|x| receipt.partial_accounts.contains(x)), diff --git a/graph/examples/append_row.rs b/graph/examples/append_row.rs index 59f6fc3a5f2..f272c07cf82 100644 --- a/graph/examples/append_row.rs +++ b/graph/examples/append_row.rs @@ -83,7 +83,7 @@ pub fn main() -> anyhow::Result<()> { let id = &ids[pos]; let data = vec![ (Word::from("id"), Value::String(id.to_string())), - (Word::from("count"), Value::Int(block as i32)), + (Word::from("count"), Value::Int(block)), ]; let data = Arc::new(SCHEMA.make_entity(data).unwrap()); let md = if existing.contains(id) { diff --git a/graph/examples/validate.rs b/graph/examples/validate.rs index ed57feb1bec..a5a2159cff4 100644 --- a/graph/examples/validate.rs +++ b/graph/examples/validate.rs @@ -238,8 +238,8 @@ impl Sizer { .map_err(Into::into) })?; let (input_size, input_schema) = - self.size(|| InputSchema::parse_latest(raw, id.clone()).map_err(Into::into))?; - let (api_size, api) = self.size(|| input_schema.api_schema().map_err(Into::into))?; + self.size(|| InputSchema::parse_latest(raw, id.clone()))?; + let (api_size, api) = self.size(|| input_schema.api_schema())?; let api_text = api.document().to_string().len(); Ok(Sizes { gql: gql_size, diff --git a/graph/src/amp/codec/test_fixtures.rs b/graph/src/amp/codec/test_fixtures.rs index a55001439b2..43e53667243 100644 --- a/graph/src/amp/codec/test_fixtures.rs +++ b/graph/src/amp/codec/test_fixtures.rs @@ -35,9 +35,7 @@ pub static RECORD_BATCH: LazyLock = LazyLock::new(|| { let columns = record_batches .into_iter() - .map(|record_batch| record_batch.columns()) - .flatten() - .map(|column| column.clone()) + .flat_map(|record_batch| record_batch.columns()).cloned() .collect::>(); RecordBatch::try_new(Schema::try_merge(schemas).unwrap().into(), columns).unwrap() diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index 10a4450c7d9..23fb9bd5264 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -465,48 +465,40 @@ mod tests { // Nothing - assert_eq!( - must_check_subgraph_continuity(&logger, &no_current_block, &no_cursor, 10), - false, + assert!( + !must_check_subgraph_continuity(&logger, &no_current_block, &no_cursor, 10), ); // No cursor, subgraph current block ptr <, ==, > than manifest start block num - assert_eq!( - must_check_subgraph_continuity(&logger, &some_current_block(9), &no_cursor, 10), - false, + assert!( + !must_check_subgraph_continuity(&logger, &some_current_block(9), &no_cursor, 10), ); - assert_eq!( + assert!( must_check_subgraph_continuity(&logger, &some_current_block(10), &no_cursor, 10), - true, ); - assert_eq!( + assert!( must_check_subgraph_continuity(&logger, &some_current_block(11), &no_cursor, 10), - true, ); // Some cursor, subgraph current block ptr <, ==, > than manifest start block num - assert_eq!( - must_check_subgraph_continuity(&logger, &no_current_block, &some_cursor, 10), - false, + assert!( + !must_check_subgraph_continuity(&logger, &no_current_block, &some_cursor, 10), ); - assert_eq!( - must_check_subgraph_continuity(&logger, &some_current_block(9), &some_cursor, 10), - false, + assert!( + !must_check_subgraph_continuity(&logger, &some_current_block(9), &some_cursor, 10), ); - assert_eq!( - must_check_subgraph_continuity(&logger, &some_current_block(10), &some_cursor, 10), - false, + assert!( + !must_check_subgraph_continuity(&logger, &some_current_block(10), &some_cursor, 10), ); - assert_eq!( - must_check_subgraph_continuity(&logger, &some_current_block(11), &some_cursor, 10), - false, + assert!( + !must_check_subgraph_continuity(&logger, &some_current_block(11), &some_cursor, 10), ); } } diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 60091e035d7..fb123b9a012 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -1042,7 +1042,7 @@ mod test { }) .collect::>(); let exp = Vec::from_iter( - exp.into_iter() + exp.iter() .map(|(block, values)| (*block, Vec::from_iter(values.iter().map(as_id)))), ); assert_eq!(exp, act); @@ -1154,7 +1154,7 @@ mod test { impl PartialEq<&[Mod]> for Group { fn eq(&self, mods: &&[Mod]) -> bool { - let mods: Vec<_> = mods.iter().map(|m| EntityModification::from(m)).collect(); + let mods: Vec<_> = mods.iter().map(EntityModification::from).collect(); self.group.rows == mods } } diff --git a/graph/src/components/subgraph/settings.rs b/graph/src/components/subgraph/settings.rs index e30261f7592..f1ad9459b37 100644 --- a/graph/src/components/subgraph/settings.rs +++ b/graph/src/components/subgraph/settings.rs @@ -77,18 +77,12 @@ mod test { let section = Settings::from_str(content).unwrap(); assert_eq!(section.settings.len(), 3); - let rule1 = match §ion.settings[0].pred { - Predicate::Name(name) => name, - }; + let Predicate::Name(rule1) = §ion.settings[0].pred; assert_eq!(rule1.as_str(), ".*"); - let rule2 = match §ion.settings[1].pred { - Predicate::Name(name) => name, - }; + let Predicate::Name(rule2) = §ion.settings[1].pred; assert_eq!(rule2.as_str(), "xxxxx"); - let rule1 = match §ion.settings[2].pred { - Predicate::Name(name) => name, - }; + let Predicate::Name(rule1) = §ion.settings[2].pred; assert_eq!(rule1.as_str(), ".*!$"); } } diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 724401eaba0..f2013208578 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -1349,7 +1349,7 @@ fn entity_hidden_vid() { // get returns nothing... assert_eq!(entity.get(VID_FIELD), None); - assert_eq!(entity.contains_key(VID_FIELD), false); + assert!(!entity.contains_key(VID_FIELD)); // ...while vid is present assert_eq!(entity.vid(), 3i64); diff --git a/graph/src/data/store/scalar/bigdecimal.rs b/graph/src/data/store/scalar/bigdecimal.rs index 65738563a67..cc1ea2a59e9 100644 --- a/graph/src/data/store/scalar/bigdecimal.rs +++ b/graph/src/data/store/scalar/bigdecimal.rs @@ -631,8 +631,7 @@ mod test { #[test] fn big_decimal_stable() { - let cases = vec![ - ( + let cases = [( "28b09c9c3f3e2fe037631b7fbccdf65c37594073016d8bf4bb0708b3fda8066a", "0.1", ), @@ -651,8 +650,7 @@ mod test { ( "6b06b34cc714810072988dc46c493c66a6b6c2c2dd0030271aa3adf3b3f21c20", "98765587998098786876.0", - ), - ]; + )]; for (hash, s) in cases.iter() { let dec = BigDecimal::from_str(s).unwrap(); assert_eq!(*hash, hex::encode(crypto_stable_hash(dec))); diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index f0c2e376e6a..6a723605a67 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -972,14 +972,12 @@ mod test { #[test] fn available_capacity_ordering() { - assert_eq!( - AvailableCapacity::Unavailable < AvailableCapacity::Low, - true + assert!( + AvailableCapacity::Unavailable < AvailableCapacity::Low ); - assert_eq!( - AvailableCapacity::Unavailable < AvailableCapacity::High, - true + assert!( + AvailableCapacity::Unavailable < AvailableCapacity::High ); - assert_eq!(AvailableCapacity::Low < AvailableCapacity::High, true); + assert!(AvailableCapacity::Low < AvailableCapacity::High); } } diff --git a/graph/src/ipfs/gateway_client.rs b/graph/src/ipfs/gateway_client.rs index 862a46656af..0a7d3ac34ad 100644 --- a/graph/src/ipfs/gateway_client.rs +++ b/graph/src/ipfs/gateway_client.rs @@ -323,7 +323,7 @@ mod tests { ) .await; - assert!(matches!(result, Err(_))); + assert!(result.is_err()); } #[crate::test] diff --git a/graph/src/ipfs/rpc_client.rs b/graph/src/ipfs/rpc_client.rs index e5efcc122d0..92e9e787ec8 100644 --- a/graph/src/ipfs/rpc_client.rs +++ b/graph/src/ipfs/rpc_client.rs @@ -283,7 +283,7 @@ mod tests { ) .await; - assert!(matches!(result, Err(_))); + assert!(result.is_err()); } #[crate::test] diff --git a/graph/src/schema/api.rs b/graph/src/schema/api.rs index e6c218bbb48..40744765d9a 100644 --- a/graph/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -1319,7 +1319,7 @@ mod tests { TypeDefinition::Object(t) => ast::get_field(t, name), _ => None, } - .expect(&format!("Schema should contain a field named `{}`", name)) + .unwrap_or_else(|| panic!("Schema should contain a field named `{}`", name)) } #[test] @@ -2291,7 +2291,7 @@ type Gravatar @entity { TypeDefinition::Object(t) => ast::get_field(t, name), _ => None, } - .expect(&format!("Schema should contain a field named `{}`", name)) + .unwrap_or_else(|| panic!("Schema should contain a field named `{}`", name)) } const SCHEMA: &str = r#" diff --git a/graph/src/schema/input/mod.rs b/graph/src/schema/input/mod.rs index 7314dcc7ab1..bd6aa2d3017 100644 --- a/graph/src/schema/input/mod.rs +++ b/graph/src/schema/input/mod.rs @@ -2793,9 +2793,9 @@ mod validations { BaseSchema::parse(&schema, DeploymentHash::new("dummy").unwrap()).unwrap(); let res = validate(&schema); if ok { - assert!(matches!(res, Ok(_))); + assert!(res.is_ok()); } else { - assert!(matches!(res, Err(_))); + assert!(res.is_err()); assert!(matches!( res.unwrap_err()[0], SchemaValidationError::InterfaceImplementorsMixId(_, _) @@ -3094,7 +3094,6 @@ type Gravatar @entity { let files = { let mut files = std::fs::read_dir(dir) .unwrap() - .into_iter() .filter_map(|entry| entry.ok()) .map(|entry| entry.path()) .filter(|path| path.extension() == Some(OsString::from("graphql").as_os_str())) diff --git a/graph/src/util/ogive.rs b/graph/src/util/ogive.rs index f8e98e8291d..a90d414b038 100644 --- a/graph/src/util/ogive.rs +++ b/graph/src/util/ogive.rs @@ -192,17 +192,17 @@ mod tests { } // Check that the ogive is correct - assert_eq!(ogive.bin_size, 700 as f64 / 5 as f64); + assert_eq!(ogive.bin_size, 700_f64 / 5_f64); assert_eq!(ogive.range, 10..=60); // Test value method - for point in vec![20, 30, 45, 50, 60] { + for point in [20, 30, 45, 50, 60] { assert_eq!(ogive.value(point).unwrap(), f(point), "value for {}", point); } // Test next_point method - for step in vec![50, 140, 200] { - for value in vec![10, 20, 30, 35, 45, 50, 60] { + for step in [50, 140, 200] { + for value in [10, 20, 30, 35, 45, 50, 60] { assert_eq!( ogive.next_point(value, step).unwrap(), g(f(value) + step as i64).min(60), @@ -240,17 +240,17 @@ mod tests { } // Check that the ogive is correct - assert_eq!(ogive.bin_size, 700 as f64 / 1 as f64); + assert_eq!(ogive.bin_size, 700_f64 / 1_f64); assert_eq!(ogive.range, 10..=20); // Test value method - for point in vec![10, 15, 20] { + for point in [10, 15, 20] { assert_eq!(ogive.value(point).unwrap(), f(point), "value for {}", point); } // Test next_point method - for step in vec![50, 140, 200] { - for value in vec![10, 15, 20] { + for step in [50, 140, 200] { + for value in [10, 15, 20] { assert_eq!( ogive.next_point(value, step).unwrap(), g(f(value) + step as i64).min(20), diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 7cf3370597d..720f2844932 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -740,7 +740,7 @@ mod tests { field, std::u32::MAX, std::u32::MAX, - &*&INPUT_SCHEMA, + &INPUT_SCHEMA, ) .unwrap() } @@ -1044,7 +1044,7 @@ mod tests { &query_field, std::u32::MAX, std::u32::MAX, - &*INPUT_SCHEMA, + &INPUT_SCHEMA, ); assert!(result.is_err()); @@ -1102,7 +1102,7 @@ mod tests { &query_field, std::u32::MAX, std::u32::MAX, - &*INPUT_SCHEMA, + &INPUT_SCHEMA, ); assert!(result.is_err()); @@ -1196,7 +1196,7 @@ mod tests { &query_field, std::u32::MAX, std::u32::MAX, - &*INPUT_SCHEMA, + &INPUT_SCHEMA, ); assert!(result.is_err()); diff --git a/node/src/config.rs b/node/src/config.rs index c655e270edc..7da9bfd545a 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -1327,9 +1327,9 @@ mod tests { "#, ); - assert_eq!(true, actual.is_err()); + assert!(actual.is_err()); let err_str = actual.unwrap_err().to_string(); - assert_eq!(err_str.contains("missing field `url`"), true, "{}", err_str); + assert!(err_str.contains("missing field `url`"), "{}", err_str); } #[test] @@ -1342,11 +1342,10 @@ mod tests { "#, ); - assert_eq!(true, actual.is_err()); + assert!(actual.is_err()); let err_str = actual.unwrap_err().to_string(); - assert_eq!( + assert!( err_str.contains("missing field `features`"), - true, "{}", err_str ); @@ -1418,9 +1417,9 @@ mod tests { "#, ); - assert_eq!(true, actual.is_err()); + assert!(actual.is_err()); let err_str = actual.unwrap_err().to_string(); - assert_eq!(err_str.contains("when `details` field is provided, deprecated `url`, `transport`, `features` and `headers` cannot be specified"),true, "{}", err_str); + assert!(err_str.contains("when `details` field is provided, deprecated `url`, `transport`, `features` and `headers` cannot be specified"), "{}", err_str); } #[test] @@ -1536,11 +1535,10 @@ mod tests { details = { type = "firehose", url = "http://localhost:9000", features = ["bananas"]} "#, ).unwrap().validate(); - assert_eq!(true, actual.is_err(), "{:?}", actual); + assert!(actual.is_err(), "{:?}", actual); if let Err(error) = actual { - assert_eq!( - true, + assert!( error .to_string() .starts_with("supported firehose endpoint filters are:") @@ -1643,10 +1641,9 @@ mod tests { .unwrap(); let err = actual.validate(); - assert_eq!(true, err.is_err()); + assert!(err.is_err()); let err = err.unwrap_err(); - assert_eq!( - true, + assert!( err.to_string().contains("unique"), "result: {:?}", err @@ -1675,7 +1672,7 @@ mod tests { .unwrap(); let result = actual.validate(); - assert_eq!(true, result.is_ok(), "error: {:?}", result.unwrap_err()); + assert!(result.is_ok(), "error: {:?}", result.unwrap_err()); } #[test] diff --git a/runtime/test/src/common.rs b/runtime/test/src/common.rs index b0ec8018db2..cb5493fbe01 100644 --- a/runtime/test/src/common.rs +++ b/runtime/test/src/common.rs @@ -30,7 +30,7 @@ fn mock_host_exports( store: Arc, api_version: Version, ) -> HostExports { - let templates = vec![data_source::DataSourceTemplate::Onchain::( + let templates = [data_source::DataSourceTemplate::Onchain::( DataSourceTemplate { kind: String::from("ethereum/contract"), name: String::from("example template"), diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 44ca7f53f02..ceb9d21b9d2 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -190,7 +190,7 @@ impl WasmInstanceExt for WasmInstance { async fn invoke_export0_void(&mut self, f: &str) -> Result<(), Error> { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); func.call_async(&mut self.store.as_context_mut(), ()).await @@ -199,7 +199,7 @@ impl WasmInstanceExt for WasmInstance { async fn invoke_export0(&mut self, f: &str) -> AscPtr { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let ptr: u32 = func @@ -212,7 +212,7 @@ impl WasmInstanceExt for WasmInstance { async fn takes_ptr_returns_ptr(&mut self, f: &str, arg: AscPtr) -> AscPtr { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let ptr: u32 = func @@ -229,7 +229,7 @@ impl WasmInstanceExt for WasmInstance { { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let ptr = self.asc_new(arg).await.unwrap(); @@ -268,7 +268,7 @@ impl WasmInstanceExt for WasmInstance { { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let arg0 = self.asc_new(arg0).await.unwrap(); @@ -297,7 +297,7 @@ impl WasmInstanceExt for WasmInstance { { let func = self .get_func(f) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let arg0 = self.asc_new(arg0).await.unwrap(); @@ -312,7 +312,7 @@ impl WasmInstanceExt for WasmInstance { async fn invoke_export0_val(&mut self, func: &str) -> V { let func = self .get_func(func) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); func.call_async(&mut self.store.as_context_mut(), ()) @@ -327,7 +327,7 @@ impl WasmInstanceExt for WasmInstance { { let func = self .get_func(func) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let ptr = self.asc_new(v).await.unwrap(); @@ -339,7 +339,7 @@ impl WasmInstanceExt for WasmInstance { async fn takes_val_returns_ptr

(&mut self, fn_name: &str, val: impl SyncWasmTy) -> AscPtr

{ let func = self .get_func(fn_name) - .typed(&self.store.as_context()) + .typed(self.store.as_context()) .unwrap() .clone(); let ptr: u32 = func @@ -848,7 +848,7 @@ async fn test_abort(api_version: Version, error_msg: &str) { .await; let res: Result<(), _> = instance .get_func("abort") - .typed(&instance.store.as_context()) + .typed(instance.store.as_context()) .unwrap() .call_async(&mut instance.store.as_context_mut(), ()) .await; @@ -1123,21 +1123,19 @@ fn test_detect_contract_calls(api_version: Version) { &wasm_file_path("abi_store_value.wasm", api_version.clone()), api_version.clone(), ); - assert_eq!( - data_source_without_calls + assert!( + !data_source_without_calls .mapping .requires_archive() - .unwrap(), - false + .unwrap() ); let data_source_with_calls = mock_data_source( &wasm_file_path("contract_calls.wasm", api_version.clone()), api_version, ); - assert_eq!( - data_source_with_calls.mapping.requires_archive().unwrap(), - true + assert!( + data_source_with_calls.mapping.requires_archive().unwrap() ); } diff --git a/runtime/test/src/test/abi.rs b/runtime/test/src/test/abi.rs index 886626a2871..304ccc90e96 100644 --- a/runtime/test/src/test/abi.rs +++ b/runtime/test/src/test/abi.rs @@ -20,7 +20,7 @@ async fn test_unbounded_loop(api_version: Version) { .0; let res: Result<(), _> = instance .get_func("loop") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .call_async(&mut instance.store.as_context_mut(), ()) .await; @@ -54,7 +54,7 @@ async fn test_unbounded_recursion(api_version: Version) { .await; let res: Result<(), _> = instance .get_func("rabbit_hole") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .call_async(&mut instance.store.as_context_mut(), ()) .await; @@ -251,7 +251,7 @@ async fn test_abi_ethabi_token_identity(api_version: Version) { let token_bool_ptr = instance.asc_new(&token_bool).await.unwrap(); let func = instance .get_func("token_to_bool") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); let boolean: i32 = func @@ -324,7 +324,7 @@ async fn test_abi_store_value(api_version: Version) { // Value::Null let func = instance .get_func("value_null") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); let ptr: u32 = func @@ -381,7 +381,7 @@ async fn test_abi_store_value(api_version: Version) { // Value::List let func = instance .get_func("array_from_values") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); @@ -578,7 +578,7 @@ async fn test_invalid_discriminant(api_version: Version) { let func = instance .get_func("invalid_discriminant") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); let ptr: u32 = func diff --git a/runtime/test/src/test_padding.rs b/runtime/test/src/test_padding.rs index ef750674178..93e1a642724 100644 --- a/runtime/test/src/test_padding.rs +++ b/runtime/test/src/test_padding.rs @@ -185,7 +185,7 @@ async fn manual_padding_should_fail(api_version: semver::Version) { let func = instance .get_func("test_padding_manual") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); @@ -220,7 +220,7 @@ async fn manual_padding_manualy_fixed_ok(api_version: semver::Version) { let func = instance .get_func("test_padding_manual") - .typed(&mut instance.store.as_context_mut()) + .typed(instance.store.as_context_mut()) .unwrap() .clone(); diff --git a/server/graphman/tests/deployment_mutation.rs b/server/graphman/tests/deployment_mutation.rs index b9da4672c90..fd2020ee740 100644 --- a/server/graphman/tests/deployment_mutation.rs +++ b/server/graphman/tests/deployment_mutation.rs @@ -464,7 +464,7 @@ fn graphql_can_unassign_deployments() { let is_node_null = subgraph_node_id["data"]["deployment"]["info"][0]["nodeId"].is_null(); assert_eq!(unassign_req, expected_resp); - assert_eq!(is_node_null, true); + assert!(is_node_null); }); } diff --git a/server/http/src/request.rs b/server/http/src/request.rs index c13d46af440..dcb837a70d0 100644 --- a/server/http/src/request.rs +++ b/server/http/src/request.rs @@ -164,8 +164,7 @@ mod tests { )), ), (String::from("int"), r::Value::Int(5)), - ] - .into_iter(), + ], )); assert_eq!(query.document, expected_query); diff --git a/server/http/src/service.rs b/server/http/src/service.rs index 06950c9ac5f..6eee41ce59a 100644 --- a/server/http/src/service.rs +++ b/server/http/src/service.rs @@ -524,7 +524,7 @@ mod tests { .body(Full::from("{}")) .unwrap(); - let response = service.call(request.into()).await; + let response = service.call(request).await; let content_type_header = response.status(); assert_eq!(content_type_header, StatusCode::OK); diff --git a/server/index-node/src/service.rs b/server/index-node/src/service.rs index 98a919b3aec..451a70f7b04 100644 --- a/server/index-node/src/service.rs +++ b/server/index-node/src/service.rs @@ -442,8 +442,7 @@ mod tests { )), ), (String::from("int"), r::Value::Int(5)), - ] - .into_iter(), + ], )); assert_eq!(query.document, expected_query); diff --git a/store/postgres/src/relational/ddl_tests.rs b/store/postgres/src/relational/ddl_tests.rs index 6a9a2fdfaee..e7d30caeca4 100644 --- a/store/postgres/src/relational/ddl_tests.rs +++ b/store/postgres/src/relational/ddl_tests.rs @@ -418,7 +418,7 @@ fn postponed_indexes_with_block_column() { .indexes_for_table( &dst_nsp, &table.name.to_string(), - &table, + table, true, false, false, @@ -432,7 +432,7 @@ fn postponed_indexes_with_block_column() { .indexes_for_table( &dst_nsp, &table.name.to_string(), - &table, + table, false, false, false, diff --git a/store/postgres/src/sql/mod.rs b/store/postgres/src/sql/mod.rs index 55917f854c4..0beb6cf894e 100644 --- a/store/postgres/src/sql/mod.rs +++ b/store/postgres/src/sql/mod.rs @@ -22,7 +22,7 @@ mod test { let namespace = Namespace::new("sgd0815".to_string()).unwrap(); let site = Arc::new(make_dummy_site(subgraph, namespace, "anet".to_string())); let catalog = Catalog::for_tests(site.clone(), BTreeSet::new()).unwrap(); - let layout = Layout::new(site, &schema, catalog).unwrap(); - layout + + Layout::new(site, &schema, catalog).unwrap() } } diff --git a/store/postgres/src/vid_batcher.rs b/store/postgres/src/vid_batcher.rs index 6c32d8a2cca..7fdb3597aeb 100644 --- a/store/postgres/src/vid_batcher.rs +++ b/store/postgres/src/vid_batcher.rs @@ -374,7 +374,6 @@ mod tests { (_, None) => { if start > end { // Expected, the batcher is exhausted - return; } else { panic!("step didn't return start and end") } diff --git a/store/test-store/tests/chain/ethereum/manifest.rs b/store/test-store/tests/chain/ethereum/manifest.rs index ff6eb06302c..a16e88ebfaa 100644 --- a/store/test-store/tests/chain/ethereum/manifest.rs +++ b/store/test-store/tests/chain/ethereum/manifest.rs @@ -100,8 +100,7 @@ impl LinkResolver for TextResolver { async fn cat(&self, _ctx: &LinkResolverContext, link: &Link) -> Result, anyhow::Error> { self.texts .get(&link.link) - .ok_or(anyhow!("No text for {}", &link.link)) - .map(Clone::clone) + .ok_or(anyhow!("No text for {}", &link.link)).cloned() } async fn get_block( @@ -880,8 +879,8 @@ specVersion: 0.0.8 .filter_map(|ds| ds.as_onchain().cloned()) .collect::>(); - let data_source = onchain_data_sources.get(0).unwrap(); - let validation_errors = data_source.validate(&LATEST_VERSION); + let data_source = onchain_data_sources.first().unwrap(); + let validation_errors = data_source.validate(LATEST_VERSION); let filter = data_source.mapping.block_handlers[0].filter.clone(); assert_eq!(0, validation_errors.len()); @@ -976,7 +975,7 @@ specVersion: 0.0.8 .filter_map(|ds| ds.as_onchain().cloned()) .collect::>(); - let data_source = onchain_data_sources.get(0).unwrap(); + let data_source = onchain_data_sources.first().unwrap(); let validation_errors = data_source.validate(LATEST_VERSION); let filters = data_source .mapping @@ -1041,7 +1040,7 @@ specVersion: 0.0.8 .filter_map(|ds| ds.as_onchain().cloned()) .collect::>(); - let data_source = onchain_data_sources.get(0).unwrap(); + let data_source = onchain_data_sources.first().unwrap(); let validation_errors = data_source.validate(LATEST_VERSION); let filters = data_source .mapping @@ -1103,12 +1102,12 @@ specVersion: 0.0.2 .filter_map(|ds| ds.as_onchain().cloned()) .collect::>(); - let data_source = onchain_data_sources.get(0).unwrap(); + let data_source = onchain_data_sources.first().unwrap(); let filter = data_source.mapping.block_handlers[0].filter.clone(); let required_capabilities = NodeCapabilities::from_data_sources(&onchain_data_sources); assert_eq!(BlockHandlerFilter::Call, filter.unwrap()); - assert_eq!(true, required_capabilities.traces); + assert!(required_capabilities.traces); assert_eq!("Qmmanifest", manifest.id.as_str()); } @@ -1151,12 +1150,12 @@ specVersion: 0.0.8 .filter_map(|ds| ds.as_onchain().cloned()) .collect::>(); - let data_source = onchain_data_sources.get(0).unwrap(); + let data_source = onchain_data_sources.first().unwrap(); let filter = data_source.mapping.block_handlers[0].filter.clone(); let required_capabilities = NodeCapabilities::from_data_sources(&onchain_data_sources); assert_eq!(BlockHandlerFilter::Once, filter.unwrap()); - assert_eq!(false, required_capabilities.traces); + assert!(!required_capabilities.traces); assert_eq!("Qmmanifest", manifest.id.as_str()); } @@ -1200,7 +1199,7 @@ specVersion: 0.0.2 let required_capabilities = NodeCapabilities::from_data_sources(&onchain_data_sources); assert_eq!("Qmmanifest", manifest.id.as_str()); - assert_eq!(true, required_capabilities.traces); + assert!(required_capabilities.traces); } #[test] diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index be27d111fa8..0923b038254 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -335,7 +335,7 @@ async fn check_vid_sequence() { for n in 0..10 { let id = (10 - n).to_string(); - let name = format!("Mogwai"); + let name = "Mogwai".to_string(); let mogwai_key = make_band_key(id.as_str()); let mogwai_data = entity! { SCHEMA => id: id, name: name }; cache diff --git a/store/test-store/tests/graphql/introspection.rs b/store/test-store/tests/graphql/introspection.rs index 6607a04be05..4e8108044c8 100644 --- a/store/test-store/tests/graphql/introspection.rs +++ b/store/test-store/tests/graphql/introspection.rs @@ -173,7 +173,7 @@ fn compare(a: &r::Value, b: &r::Value, path: &mut Vec) -> Option<(r::Val path.push(la.len().to_string()); return different(&r::Value::Null, &lb[la.len()]); } - return None; + None } _ => different(a, b), }, @@ -207,7 +207,7 @@ fn compare(a: &r::Value, b: &r::Value, path: &mut Vec) -> Option<(r::Val } } } - return None; + None } _ => different(a, b), }, diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index e33c62fe7de..f206fe2644f 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -420,7 +420,7 @@ async fn insert_test_entities( ) -> Vec { entities .into_iter() - .map(|(typename, entities)| { + .flat_map(|(typename, entities)| { let entity_type = schema.entity_type(typename).unwrap(); entities.into_iter().map(move |mut data| { data.set_vid_if_empty(); @@ -430,7 +430,6 @@ async fn insert_test_entities( } }) }) - .flatten() .collect() } @@ -470,8 +469,8 @@ async fn insert_test_entities( ( "Musician", vec![ - entity! { is => id: "m1", name: "John", mainBand: "b1", bands: vec!["b1", "b2"], favoriteCount: 10, birthDate: timestamp.clone(), vid: 0i64 }, - entity! { is => id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"], favoriteCount: 100, birthDate: timestamp.clone(), vid: 1i64 }, + entity! { is => id: "m1", name: "John", mainBand: "b1", bands: vec!["b1", "b2"], favoriteCount: 10, birthDate: timestamp, vid: 0i64 }, + entity! { is => id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"], favoriteCount: 100, birthDate: timestamp, vid: 1i64 }, ], ), ("Publisher", vec![entity! { is => id: pub1, vid: 0i64 }]), @@ -580,9 +579,9 @@ async fn insert_test_entities( let entities1 = vec![( "Musician", vec![ - entity! { is => id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"], favoriteCount: 5, birthDate: timestamp.clone(), vid: 2i64 }, - entity! { is => id: "m4", name: "Valerie", bands: Vec::::new(), favoriteCount: 20, birthDate: timestamp.clone(), vid: 3i64 }, - entity! { is => id: "m5", name: "Paul", mainBand: "b2", bands: vec!["b2"], favoriteCount: 2 , birthDate: timestamp.clone(), vid: 4i64 }, + entity! { is => id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"], favoriteCount: 5, birthDate: timestamp, vid: 2i64 }, + entity! { is => id: "m4", name: "Valerie", bands: Vec::::new(), favoriteCount: 20, birthDate: timestamp, vid: 3i64 }, + entity! { is => id: "m5", name: "Paul", mainBand: "b2", bands: vec!["b2"], favoriteCount: 2 , birthDate: timestamp, vid: 4i64 }, ], )]; let entities1 = insert_ops(&manifest.schema, entities1); @@ -2121,7 +2120,7 @@ fn ignores_invalid_field_arguments() { }, // With validations Err(e) => { - match e.get(0).unwrap() { + match e.first().unwrap() { QueryError::ExecutionError(QueryExecutionError::ValidationError( _pos, message, @@ -2156,7 +2155,7 @@ fn leaf_selection_mismatch() { } // With validations Err(e) => { - match e.get(0).unwrap() { + match e.first().unwrap() { QueryError::ExecutionError(QueryExecutionError::ValidationError( _pos, message, @@ -2192,7 +2191,7 @@ fn leaf_selection_mismatch() { } // With validations Err(e) => { - match e.get(0).unwrap() { + match e.first().unwrap() { QueryError::ExecutionError(QueryExecutionError::ValidationError( _pos, message, @@ -2231,7 +2230,7 @@ fn missing_variable() { assert_eq!(exp, *data); } // With GraphQL validations active, this query fails - Err(e) => match e.get(0).unwrap() { + Err(e) => match e.first().unwrap() { QueryError::ExecutionError(QueryExecutionError::ValidationError(_pos, message)) => { assert_eq!(message, "Variable \"$first\" is not defined."); } @@ -2262,7 +2261,7 @@ fn missing_variable() { assert_eq!(exp, *data); } // With GraphQL validations active, this query fails - Err(e) => match e.get(0).unwrap() { + Err(e) => match e.first().unwrap() { QueryError::ExecutionError(QueryExecutionError::ValidationError(_pos, message)) => { assert_eq!(message, "Variable \"$where\" is not defined."); } diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 6527f12756b..8edeac388fd 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -80,16 +80,14 @@ lazy_static! { static ref TEST_SUBGRAPH_SCHEMA: InputSchema = InputSchema::parse_latest(USER_GQL, TEST_SUBGRAPH_ID.clone()) .expect("Failed to parse user schema"); - static ref BLOCKS: Vec = vec![ - "bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f", + static ref BLOCKS: Vec = ["bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f", "8511fa04b64657581e3f00e14543c1d522d5d7e771b54aa3060b662ade47da13", "b98fb783b49de5652097a989414c767824dff7e7fd765a63b493772511db81c1", "7347afe69254df06729e123610b00b8b11f15cfae3241f9366fb113aec07489c", "f8ccbd3877eb98c958614f395dd351211afb9abba187bfc1fb4ac414b099c4a6", "7b0ea919e258eb2b119eb32de56b85d12d50ac6a9f7c5909f843d6172c8ba196", "6b834521bb753c132fdcf0e1034803ed9068e324112f8750ba93580b393a986b", - "7cce080f5a49c2997a6cc65fc1cee9910fd8fc3721b7010c0b5d0873e2ac785e" - ] + "7cce080f5a49c2997a6cc65fc1cee9910fd8fc3721b7010c0b5d0873e2ac785e"] .iter() .enumerate() .map(|(idx, hash)| BlockPtr::try_from((*hash, idx as i64)).unwrap()) diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 483be514504..e76b1c519d4 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -269,7 +269,7 @@ async fn insert_entity_at( "Failed to insert entities {}[{:?}]", entity_type, entities_with_keys ); - let group = row_group_insert(&entity_type, block, entities_with_keys_owned.clone()); + let group = row_group_insert(entity_type, block, entities_with_keys_owned.clone()); layout .insert(&LOGGER, conn, &group, &MOCK_STOPWATCH) .await @@ -312,7 +312,7 @@ async fn update_entity_at( "Failed to insert entities {}[{:?}]", entity_type, entities_with_keys ); - let group = row_group_update(&entity_type, block, entities_with_keys_owned.clone()); + let group = row_group_update(entity_type, block, entities_with_keys_owned.clone()); let updated = layout .update(conn, &group, &MOCK_STOPWATCH) .await @@ -394,7 +394,7 @@ async fn insert_users(conn: &mut AsyncPgConnection, layout: &Layout) { conn, layout, "1", - &*USER_TYPE, + &USER_TYPE, "Johnton", "tonofjohn@email.com", 67_i32, @@ -411,7 +411,7 @@ async fn insert_users(conn: &mut AsyncPgConnection, layout: &Layout) { conn, layout, "2", - &*USER_TYPE, + &USER_TYPE, "Cindini", "dinici@email.com", 43_i32, @@ -428,7 +428,7 @@ async fn insert_users(conn: &mut AsyncPgConnection, layout: &Layout) { conn, layout, "3", - &*USER_TYPE, + &USER_TYPE, "Shaqueeena", "teeko@email.com", 28_i32, @@ -493,8 +493,8 @@ async fn insert_pet( } async fn insert_pets(conn: &mut AsyncPgConnection, layout: &Layout) { - insert_pet(conn, layout, &*DOG_TYPE, "pluto", "Pluto", 0, 0).await; - insert_pet(conn, layout, &*CAT_TYPE, "garfield", "Garfield", 0, 1).await; + insert_pet(conn, layout, &DOG_TYPE, "pluto", "Pluto", 0, 0).await; + insert_pet(conn, layout, &CAT_TYPE, "garfield", "Garfield", 0, 1).await; } async fn create_schema(conn: &mut AsyncPgConnection) -> Layout { @@ -571,7 +571,7 @@ where #[graph::test] async fn find() { run_test(async |conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; // Happy path: find existing entity let entity = layout @@ -605,7 +605,7 @@ async fn insert_null_fulltext_fields() { insert_entity( conn, layout, - &*NULLABLE_STRINGS_TYPE, + &NULLABLE_STRINGS_TYPE, vec![EMPTY_NULLABLESTRINGS_ENTITY.clone()], ) .await; @@ -628,7 +628,7 @@ async fn insert_null_fulltext_fields() { #[graph::test] async fn update() { run_test(async |conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; // Update with overwrite let mut entity = SCALAR_ENTITY.clone(); @@ -673,7 +673,7 @@ async fn update_many() { insert_entity( conn, layout, - &*SCALAR_TYPE, + &SCALAR_TYPE, vec![one.clone(), two.clone(), three.clone()], ) .await; @@ -762,7 +762,7 @@ async fn update_many() { #[graph::test] async fn serialize_bigdecimal() { run_test(async |conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; // Update with overwrite let mut entity = SCALAR_ENTITY.clone(); @@ -857,11 +857,11 @@ async fn count_scalar_entities(conn: &mut AsyncPgConnection, layout: &Layout) -> #[graph::test] async fn delete() { run_test(async |conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; let mut two = SCALAR_ENTITY.clone(); two.set("id", "two").unwrap(); two.set("vid", 1i64).unwrap(); - insert_entity(conn, layout, &*SCALAR_TYPE, vec![two]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![two]).await; // Delete where nothing is getting deleted let key = SCALAR_TYPE.parse_key("no such entity").unwrap(); @@ -902,7 +902,7 @@ async fn insert_many_and_delete_many() { let mut three = SCALAR_ENTITY.clone(); three.set("id", "three").unwrap(); three.set("vid", 2i64).unwrap(); - insert_entity(conn, layout, &*SCALAR_TYPE, vec![one, two, three]).await; + insert_entity(conn, layout, &SCALAR_TYPE, vec![one, two, three]).await; // confidence test: there should be 3 scalar entities in store right now assert_eq!(3, count_scalar_entities(conn, layout).await); @@ -912,7 +912,7 @@ async fn insert_many_and_delete_many() { .into_iter() .map(|key| SCALAR_TYPE.parse_key(key).unwrap()) .collect(); - let group = row_group_delete(&*SCALAR_TYPE, 1, entity_keys); + let group = row_group_delete(&SCALAR_TYPE, 1, entity_keys); let num_removed = layout .delete(conn, &group, &MOCK_STOPWATCH) .await @@ -945,7 +945,7 @@ async fn layout_cache() { .await .expect("we can get the layout"); let table = layout.table(&table_name).unwrap(); - assert_eq!(false, table.is_account_like); + assert!(!table.is_account_like); set_account_like(conn, site.as_ref(), &table_name, true) .await @@ -958,7 +958,7 @@ async fn layout_cache() { .await .expect("we can get the layout"); let table = layout.table(&table_name).unwrap(); - assert_eq!(true, table.is_account_like); + assert!(table.is_account_like); // Set it back to false set_account_like(conn, site.as_ref(), &table_name, false) @@ -971,7 +971,7 @@ async fn layout_cache() { .await .expect("we can get the layout"); let table = layout.table(&table_name).unwrap(); - assert_eq!(false, table.is_account_like); + assert!(!table.is_account_like); }) .await; } @@ -1032,10 +1032,10 @@ async fn conflicting_entity() { run_test(async |mut conn, layout| { let id = Value::String("fred".to_string()); - check(&mut conn, layout, id, "Cat", "Dog", "Ferret", 0).await; + check(conn, layout, id, "Cat", "Dog", "Ferret", 0).await; let id = Value::Bytes(scalar::Bytes::from_str("0xf1ed").unwrap()); - check(&mut conn, layout, id, "ByteCat", "ByteDog", "ByteFerret", 1).await; + check(conn, layout, id, "ByteCat", "ByteDog", "ByteFerret", 1).await; }) .await } @@ -1052,9 +1052,9 @@ async fn revert_block() { vid: block as i64, }; if block == 0 { - insert_entity_at(conn, layout, &*CAT_TYPE, vec![fred], block).await; + insert_entity_at(conn, layout, &CAT_TYPE, vec![fred], block).await; } else { - update_entity_at(conn, layout, &*CAT_TYPE, vec![fred], block).await; + update_entity_at(conn, layout, &CAT_TYPE, vec![fred], block).await; } }; @@ -1092,7 +1092,7 @@ async fn revert_block() { order: block, vid: (block + 10) as i64 }; - insert_entity_at(conn, layout, &*MINK_TYPE, vec![marty], block).await; + insert_entity_at(conn, layout, &MINK_TYPE, vec![marty], block).await; } }; @@ -1164,7 +1164,7 @@ impl<'a> QueryChecker<'a> { conn, layout, "1", - &*USER_TYPE, + &USER_TYPE, "Jono", "achangedemail@email.com", 67_i32, @@ -1221,7 +1221,7 @@ fn query(entity_types: &[&EntityType]) -> EntityQuery { BLOCK_NUMBER_MAX, EntityCollection::All( entity_types - .into_iter() + .iter() .map(|entity_type| ((*entity_type).clone(), AttributeNames::All)) .collect(), ), @@ -1229,7 +1229,7 @@ fn query(entity_types: &[&EntityType]) -> EntityQuery { } fn user_query() -> EntityQuery { - query(&vec![&*USER_TYPE]) + query(&[&*USER_TYPE]) } trait EasyOrder { @@ -1260,7 +1260,7 @@ impl EasyOrder for EntityQuery { )] async fn check_fulltext_search_syntax_error() { run_test(async |mut conn, layout| { - QueryChecker::new(&mut conn, layout) + QueryChecker::new(conn, layout) .await .check( vec!["1"], @@ -1277,13 +1277,13 @@ async fn check_fulltext_search_syntax_error() { #[graph::test] async fn check_block_finds() { run_test(async |mut conn, layout| { - let checker = QueryChecker::new(&mut conn, layout).await; + let checker = QueryChecker::new(conn, layout).await; update_user_entity( checker.conn, layout, "1", - &*USER_TYPE, + &USER_TYPE, "Johnton", "tonofjohn@email.com", 67_i32, @@ -1325,7 +1325,7 @@ async fn check_find() { run_test(async |mut conn, layout| { // find with interfaces let types = vec![&*CAT_TYPE, &*DOG_TYPE]; - let checker = QueryChecker::new(&mut conn, layout) + let checker = QueryChecker::new(conn, layout) .await .check(vec!["garfield", "pluto"], query(&types)) .await @@ -1920,10 +1920,10 @@ struct FilterChecker<'a> { impl<'a> FilterChecker<'a> { async fn new(conn: &'a mut AsyncPgConnection, layout: &'a Layout) -> Self { let (a1, a2, a2b, a3) = ferrets(); - insert_pet(conn, layout, &*FERRET_TYPE, "a1", &a1, 0, 0).await; - insert_pet(conn, layout, &*FERRET_TYPE, "a2", &a2, 0, 1).await; - insert_pet(conn, layout, &*FERRET_TYPE, "a2b", &a2b, 0, 2).await; - insert_pet(conn, layout, &*FERRET_TYPE, "a3", &a3, 0, 3).await; + insert_pet(conn, layout, &FERRET_TYPE, "a1", &a1, 0, 0).await; + insert_pet(conn, layout, &FERRET_TYPE, "a2", &a2, 0, 1).await; + insert_pet(conn, layout, &FERRET_TYPE, "a2b", &a2b, 0, 2).await; + insert_pet(conn, layout, &FERRET_TYPE, "a3", &a3, 0, 3).await; Self { conn, layout } } @@ -1936,7 +1936,7 @@ impl<'a> FilterChecker<'a> { let expected_entity_ids: Vec = expected_entity_ids.into_iter().map(str::to_owned).collect(); - let query = query(&vec![&*FERRET_TYPE]).filter(filter).asc("id"); + let query = query(&[&*FERRET_TYPE]).filter(filter).asc("id"); let entities = self .layout @@ -2105,7 +2105,7 @@ async fn check_filters() { update_entity_at( checker.conn, layout, - &*FERRET_TYPE, + &FERRET_TYPE, vec![entity! { layout.input_schema => id: "a1", name: "Test", diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index c42bdc2eef4..8325ef33b20 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -280,12 +280,12 @@ async fn find() { layout .find(conn, &key, BLOCK_NUMBER_MAX) .await - .expect(&format!("Failed to read Thing[{}]", id)) + .unwrap_or_else(|_| panic!("Failed to read Thing[{}]", id)) } const ID: &str = "deadbeef"; const NAME: &str = "Beef"; - insert_thing(&mut conn, layout, ID, NAME, 0).await; + insert_thing(conn, layout, ID, NAME, 0).await; // Happy path: find existing entity let entity = find_entity(conn, layout, ID).await.unwrap(); @@ -306,8 +306,8 @@ async fn find_many() { const NAME: &str = "Beef"; const ID2: &str = "0xdeadbeef02"; const NAME2: &str = "Moo"; - insert_thing(&mut conn, layout, ID, NAME, 0).await; - insert_thing(&mut conn, layout, ID2, NAME2, 1).await; + insert_thing(conn, layout, ID, NAME, 0).await; + insert_thing(conn, layout, ID2, NAME2, 1).await; let mut id_map = BTreeMap::default(); let ids = IdList::try_from_iter( @@ -336,7 +336,7 @@ async fn find_many() { #[graph::test] async fn update() { run_test(async |mut conn, layout| { - insert_entity(&mut conn, layout, "Thing", BEEF_ENTITY.clone()).await; + insert_entity(conn, layout, "Thing", BEEF_ENTITY.clone()).await; // Update the entity let mut entity = BEEF_ENTITY.clone(); @@ -369,11 +369,11 @@ async fn delete() { run_test(async |mut conn, layout| { const TWO_ID: &str = "deadbeef02"; - insert_entity(&mut conn, layout, "Thing", BEEF_ENTITY.clone()).await; + insert_entity(conn, layout, "Thing", BEEF_ENTITY.clone()).await; let mut two = BEEF_ENTITY.clone(); two.set("id", TWO_ID).unwrap(); two.set("vid", 1i64).unwrap(); - insert_entity(&mut conn, layout, "Thing", two).await; + insert_entity(conn, layout, "Thing", two).await; // Delete where nothing is getting deleted let key = THING_TYPE.parse_key("ffff").unwrap(); @@ -381,7 +381,7 @@ async fn delete() { let mut entity_keys = vec![key.clone()]; let group = row_group_delete(&entity_type, 1, entity_keys.clone()); let count = layout - .delete(&mut conn, &group, &MOCK_STOPWATCH) + .delete(conn, &group, &MOCK_STOPWATCH) .await .expect("Failed to delete"); assert_eq!(0, count); @@ -393,7 +393,7 @@ async fn delete() { .expect("Failed to update entity types"); let group = row_group_delete(&entity_type, 1, entity_keys); let count = layout - .delete(&mut conn, &group, &MOCK_STOPWATCH) + .delete(conn, &group, &MOCK_STOPWATCH) .await .expect("Failed to delete"); assert_eq!(1, count); @@ -490,14 +490,14 @@ async fn query() { // Especially the multiplicity for type A and B queries is determined // by knowing whether there are one or many entities per parent // in the test data - make_thing_tree(&mut conn, layout).await; + make_thing_tree(conn, layout).await; // See https://graphprotocol.github.io/rfcs/engineering-plans/0001-graphql-query-prefetching.html#handling-parentchild-relationships // for a discussion of the various types of relationships and queries // EntityCollection::All let coll = EntityCollection::All(vec![(THING_TYPE.clone(), AttributeNames::All)]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2, ROOT, GRANDCHILD1, GRANDCHILD2], things); // EntityCollection::Window, type A, many @@ -511,7 +511,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![ROOT], things); // EntityCollection::Window, type A, single @@ -527,7 +527,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2], things); // EntityCollection::Window, type B, many @@ -541,7 +541,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2], things); // EntityCollection::Window, type B, single @@ -555,7 +555,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![GRANDCHILD1, GRANDCHILD2], things); // EntityCollection::Window, type C @@ -570,7 +570,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2], things); // EntityCollection::Window, type D @@ -585,7 +585,7 @@ async fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll).await; + let things = fetch(conn, layout, coll).await; assert_eq!(vec![ROOT, ROOT], things); }) .await; diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 60fb746fbe8..0059032e3ba 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -187,7 +187,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator let test_entity_1 = create_test_entity( "1", - &*USER_TYPE, + &USER_TYPE, "Johnton", "tonofjohn@email.com", 67_i32, @@ -207,7 +207,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator let test_entity_2 = create_test_entity( "2", - &*USER_TYPE, + &USER_TYPE, "Cindini", "dinici@email.com", 43_i32, @@ -218,7 +218,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator ); let test_entity_3_1 = create_test_entity( "3", - &*USER_TYPE, + &USER_TYPE, "Shaqueeena", "queensha@email.com", 28_i32, @@ -238,7 +238,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator let test_entity_3_2 = create_test_entity( "3", - &*USER_TYPE, + &USER_TYPE, "Shaqueeena", "teeko@email.com", 28_i32, @@ -389,7 +389,7 @@ fn insert_entity() { let entity_key = USER_TYPE.parse_key("7").unwrap(); let test_entity = create_test_entity( "7", - &*USER_TYPE, + &USER_TYPE, "Wanjon", "wanawana@email.com", 76_i32, @@ -424,7 +424,7 @@ fn update_existing() { let op = create_test_entity( "1", - &*USER_TYPE, + &USER_TYPE, "Wanjon", "wanawana@email.com", 76_i32, @@ -1318,7 +1318,7 @@ fn handle_large_string_with_index() { writable .transact_block_operations( TEST_BLOCK_3_PTR.clone(), - BlockTime::for_test(&*TEST_BLOCK_3_PTR), + BlockTime::for_test(&TEST_BLOCK_3_PTR), FirehoseCursor::None, vec![ make_insert_op(ONE, &long_text, &schema, block, 11), @@ -1426,7 +1426,7 @@ fn handle_large_bytea_with_index() { writable .transact_block_operations( TEST_BLOCK_3_PTR.clone(), - BlockTime::for_test(&*TEST_BLOCK_3_PTR), + BlockTime::for_test(&TEST_BLOCK_3_PTR), FirehoseCursor::None, vec![ make_insert_op(ONE, &long_bytea, &schema, block, 10), @@ -1613,11 +1613,11 @@ fn window() { } fn make_user(id: &str, color: &str, age: i32, vid: i64) -> EntityOperation { - make_color_and_age(&*USER_TYPE, id, color, age, vid) + make_color_and_age(&USER_TYPE, id, color, age, vid) } fn make_person(id: &str, color: &str, age: i32, vid: i64) -> EntityOperation { - make_color_and_age(&*PERSON_TYPE, id, color, age, vid) + make_color_and_age(&PERSON_TYPE, id, color, age, vid) } let ops = vec![ @@ -1873,7 +1873,7 @@ fn parse_null_timestamp() { .expect("block_number to return correct number and timestamp") .unwrap(); assert_eq!(number, 3); - assert_eq!(true, timestamp.is_none()); + assert!(timestamp.is_none()); }) } #[test] @@ -1887,7 +1887,7 @@ fn reorg_tracking() { ) { let test_entity_1 = create_test_entity( "1", - &*USER_TYPE, + &USER_TYPE, "Johnton", "tonofjohn@email.com", age, diff --git a/store/test-store/tests/postgres/subgraph.rs b/store/test-store/tests/postgres/subgraph.rs index 5cd31c93e44..23b60ecc52c 100644 --- a/store/test-store/tests/postgres/subgraph.rs +++ b/store/test-store/tests/postgres/subgraph.rs @@ -523,8 +523,8 @@ fn version_info() { let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.synced); - assert_eq!(false, vi.failed); + assert!(!vi.synced); + assert!(!vi.failed); assert_eq!( Some("manifest for versionInfoSubgraph"), vi.description.as_deref() @@ -580,9 +580,9 @@ fn subgraph_features() { assert_eq!(handler_kinds.len(), 2); assert!(handler_kinds.contains(&"mock_handler_1".to_string())); assert!(handler_kinds.contains(&"mock_handler_2".to_string())); - assert_eq!(has_declared_calls, true); - assert_eq!(has_bytes_as_ids, true); - assert_eq!(has_aggregations, true); + assert!(has_declared_calls); + assert!(has_bytes_as_ids); + assert!(has_aggregations); assert_eq!( immutable_entities, vec!["User2".to_string(), "Data".to_string()] @@ -805,7 +805,7 @@ fn fail_unfail_deterministic_error() { assert!(!state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); // Process the second block. @@ -823,7 +823,7 @@ fn fail_unfail_deterministic_error() { assert!(!state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -848,7 +848,7 @@ fn fail_unfail_deterministic_error() { assert!(state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); // Unfail the subgraph. @@ -863,7 +863,7 @@ fn fail_unfail_deterministic_error() { assert!(!state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); test_store::remove_subgraphs().await; @@ -902,7 +902,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); // Process the second block. @@ -919,7 +919,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let writable = store @@ -939,7 +939,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -957,7 +957,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); // Running unfail_deterministic_error against a NON-deterministic error will do nothing. @@ -972,7 +972,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -999,7 +999,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(count().await, 2); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); test_store::remove_subgraphs().await; @@ -1038,7 +1038,7 @@ fn fail_unfail_non_deterministic_error() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -1062,7 +1062,7 @@ fn fail_unfail_non_deterministic_error() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); // Process the second block. @@ -1079,7 +1079,7 @@ fn fail_unfail_non_deterministic_error() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); // Unfail the subgraph and delete the fatal error. @@ -1093,7 +1093,7 @@ fn fail_unfail_non_deterministic_error() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); test_store::remove_subgraphs().await; @@ -1132,7 +1132,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); // Process the second block. @@ -1149,7 +1149,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let writable = store @@ -1169,7 +1169,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 0); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(false, vi.failed); + assert!(!vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -1187,7 +1187,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); // Running unfail_non_deterministic_error will be NOOP, the error is deterministic. @@ -1201,7 +1201,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 1); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); let error = SubgraphError { @@ -1226,7 +1226,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(count().await, 2); let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); - assert_eq!(true, vi.failed); + assert!(vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); test_store::remove_subgraphs().await; diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index da828e8784f..0321b6d3579 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -370,15 +370,13 @@ fn restart() { #[test] fn read_range_test() { run_test(|store, writable, sourceable, deployment| async move { - let result_entities = vec![ - r#"(1, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }])"#, + let result_entities = [r#"(1, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }])"#, r#"(2, [EntitySourceOperation { entity_op: Modify, entity_type: EntityType(Counter), entity: Entity { count: Int(4), id: String("1"), vid: Int8(2) }, vid: 2 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(4), id: String("2"), vid: Int8(2) }, vid: 2 }])"#, r#"(3, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(4), id: String("1"), vid: Int8(2) }, vid: 2 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(6), id: String("3"), vid: Int8(3) }, vid: 3 }])"#, r#"(4, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(8), id: String("1"), vid: Int8(4) }, vid: 4 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(8), id: String("4"), vid: Int8(4) }, vid: 4 }])"#, r#"(5, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(8), id: String("1"), vid: Int8(4) }, vid: 4 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(10), id: String("5"), vid: Int8(5) }, vid: 5 }])"#, r#"(6, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(12), id: String("1"), vid: Int8(6) }, vid: 6 }])"#, - r#"(7, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(12), id: String("1"), vid: Int8(6) }, vid: 6 }])"#, - ]; + r#"(7, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(12), id: String("1"), vid: Int8(6) }, vid: 6 }])"#]; let subgraph_store = store.subgraph_store(); writable.deployment_synced(block_pointer(0)).await.unwrap(); @@ -442,10 +440,8 @@ fn read_immutable_only_range_test() { #[test] fn read_range_pool_created_test() { run_test(|store, writable, sourceable, deployment| async move { - let result_entities = vec![ - format!("(1, [EntitySourceOperation {{ entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity {{ blockNumber: BigInt(12369621), blockTimestamp: BigInt(1620243254), fee: Int(500), id: Bytes(0xff80818283848586), logIndex: BigInt(0), pool: Bytes(0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8), tickSpacing: Int(10), token0: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000000), vid: Int8(1) }}, vid: 1 }}])"), - format!("(2, [EntitySourceOperation {{ entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity {{ blockNumber: BigInt(12369622), blockTimestamp: BigInt(1620243255), fee: Int(3000), id: Bytes(0xff90919293949596), logIndex: BigInt(1), pool: Bytes(0x4585fe77225b41b697c938b018e2ac67ac5a20c0), tickSpacing: Int(60), token0: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000001), vid: Int8(2) }}, vid: 2 }}])"), - ]; + let result_entities = [format!("(1, [EntitySourceOperation {{ entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity {{ blockNumber: BigInt(12369621), blockTimestamp: BigInt(1620243254), fee: Int(500), id: Bytes(0xff80818283848586), logIndex: BigInt(0), pool: Bytes(0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8), tickSpacing: Int(10), token0: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000000), vid: Int8(1) }}, vid: 1 }}])"), + format!("(2, [EntitySourceOperation {{ entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity {{ blockNumber: BigInt(12369622), blockTimestamp: BigInt(1620243255), fee: Int(3000), id: Bytes(0xff90919293949596), logIndex: BigInt(1), pool: Bytes(0x4585fe77225b41b697c938b018e2ac67ac5a20c0), tickSpacing: Int(60), token0: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000001), vid: Int8(2) }}, vid: 2 }}])")]; // Rest of the test remains the same let subgraph_store = store.subgraph_store(); diff --git a/tests/tests/integration_tests.rs b/tests/tests/integration_tests.rs index db459972bc3..56236183a12 100644 --- a/tests/tests/integration_tests.rs +++ b/tests/tests/integration_tests.rs @@ -175,7 +175,7 @@ impl TestCase { contracts: &[Contract], ) -> Result { status!(&self.name, "Deploying subgraph"); - let subgraph_name = match Subgraph::deploy(&subgraph_name, contracts).await { + let subgraph_name = match Subgraph::deploy(subgraph_name, contracts).await { Ok(name) => name, Err(e) => { error!(&self.name, "Deploy failed"); @@ -1119,7 +1119,7 @@ async fn test_declared_calls_basic(ctx: TestContext) -> anyhow::Result<()> { assert!(subgraph.healthy); // Query the results - const QUERY: &'static str = "{ + const QUERY: &str = "{ transferCalls(first: 1, orderBy: blockNumber) { id from @@ -1200,7 +1200,7 @@ async fn test_declared_calls_struct_fields(ctx: TestContext) -> anyhow::Result<( sleep(Duration::from_secs(2)).await; // Query the results - const QUERY: &'static str = "{ + const QUERY: &str = "{ assetTransferCalls(first: 1, orderBy: blockNumber) { id assetAddr diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index f35df89ce2e..a28a05434f3 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -44,7 +44,7 @@ fn assert_eq_ignore_backtrace(err: &SubgraphError, expected: &SubgraphError) { let split_err: Vec<&str> = err.message.split("\\twasm backtrace:").collect(); let split_expected: Vec<&str> = expected.message.split("\\twasm backtrace:").collect(); - split_err.get(0) == split_expected.get(0) + split_err.first() == split_expected.first() }; if !equal { @@ -233,7 +233,7 @@ async fn api_version_0_0_7() { ctx.start_and_sync_to(stop_block).await; let query_res = ctx - .query(&format!(r#"{{ testResults{{ id, message }} }}"#,)) + .query(&r#"{ testResults{ id, message } }"#.to_string()) .await .unwrap(); @@ -317,9 +317,7 @@ async fn derived_loaders() { // Where the test cases are documented in the code. let query_res = ctx - .query(&format!( - r#"{{ testResult(id:"1_0", block: {{ number: 1 }} ){{ id barDerived{{id value value2}} bBarDerived{{id value value2}} }} }}"#, - )) + .query(&r#"{ testResult(id:"1_0", block: { number: 1 } ){ id barDerived{id value value2} bBarDerived{id value value2} } }"#.to_string()) .await .unwrap(); @@ -367,9 +365,7 @@ async fn derived_loaders() { ); let query_res = ctx - .query(&format!( - r#"{{ testResult(id:"1_1", block: {{ number: 1 }} ){{ id barDerived{{id value value2}} bBarDerived{{id value value2}} }} }}"#, - )) + .query(&r#"{ testResult(id:"1_1", block: { number: 1 } ){ id barDerived{id value value2} bBarDerived{id value value2} } }"#.to_string()) .await .unwrap(); @@ -407,9 +403,7 @@ async fn derived_loaders() { ); let query_res = ctx.query( - &format!( - r#"{{ testResult(id:"2_0" ){{ id barDerived{{id value value2}} bBarDerived{{id value value2}} }} }}"# - ) + &r#"{ testResult(id:"2_0" ){ id barDerived{id value value2} bBarDerived{id value value2} } }"#.to_string() ) .await .unwrap(); @@ -462,9 +456,9 @@ async fn end_block() -> anyhow::Result<()> { .collect::>(); if should_contain_addr { - assert!(addresses.contains(&addr)); + assert!(addresses.contains(addr)); } else { - assert!(!addresses.contains(&addr)); + assert!(!addresses.contains(addr)); }; } @@ -1176,14 +1170,14 @@ async fn arweave_file_data_sources() { assert_eq!(datasources.len(), 1); let ds = datasources.first().unwrap(); assert_ne!(ds.causality_region, CausalityRegion::ONCHAIN); - assert_eq!(ds.done_at.is_some(), true); + assert!(ds.done_at.is_some()); assert_eq!( ds.param.as_ref().unwrap(), &Bytes::from(Word::from(id).as_bytes()) ); let content_bytes = ctx.arweave_resolver.get(&Word::from(id)).await.unwrap(); - let content = String::from_utf8(content_bytes.into()).unwrap(); + let content = String::from_utf8(content_bytes).unwrap(); let query_res = ctx .query(&format!(r#"{{ file(id: "{id}") {{ id, content }} }}"#,)) .await From 6c38d4374f740066b563fc43a94626563d1a7dc7 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 11:57:10 -0800 Subject: [PATCH 68/92] test-store: Remove unneeded mutability --- store/test-store/tests/postgres/relational.rs | 8 ++++---- store/test-store/tests/postgres/relational_bytes.rs | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index e76b1c519d4..697d2f2b57a 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -1030,7 +1030,7 @@ async fn conflicting_entity() { assert_eq!(None, conflict); } - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { let id = Value::String("fred".to_string()); check(conn, layout, id, "Cat", "Dog", "Ferret", 0).await; @@ -1259,7 +1259,7 @@ impl EasyOrder for EntityQuery { expected = "layout.query failed to execute query: FulltextQueryInvalidSyntax(\"syntax error in tsquery: \\\"Jono 'a\\\"\")" )] async fn check_fulltext_search_syntax_error() { - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { QueryChecker::new(conn, layout) .await .check( @@ -1276,7 +1276,7 @@ async fn check_fulltext_search_syntax_error() { #[graph::test] async fn check_block_finds() { - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { let checker = QueryChecker::new(conn, layout).await; update_user_entity( @@ -1322,7 +1322,7 @@ async fn check_block_finds() { #[graph::test] async fn check_find() { - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { // find with interfaces let types = vec![&*CAT_TYPE, &*DOG_TYPE]; let checker = QueryChecker::new(conn, layout) diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 8325ef33b20..b0330bfe23e 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -270,7 +270,7 @@ async fn bad_id() { #[graph::test] async fn find() { - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { async fn find_entity( conn: &mut AsyncPgConnection, layout: &Layout, @@ -301,7 +301,7 @@ async fn find() { #[graph::test] async fn find_many() { - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { const ID: &str = "0xdeadbeef"; const NAME: &str = "Beef"; const ID2: &str = "0xdeadbeef02"; @@ -335,7 +335,7 @@ async fn find_many() { #[graph::test] async fn update() { - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { insert_entity(conn, layout, "Thing", BEEF_ENTITY.clone()).await; // Update the entity @@ -366,7 +366,7 @@ async fn update() { #[graph::test] async fn delete() { - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { const TWO_ID: &str = "deadbeef02"; insert_entity(conn, layout, "Thing", BEEF_ENTITY.clone()).await; @@ -483,7 +483,7 @@ async fn query() { .collect::>() } - run_test(async |mut conn, layout| { + run_test(async |conn, layout| { // This test exercises the different types of queries we generate; // the type of query is based on knowledge of what the test data // looks like, not on just an inference from the GraphQL model. From 7d6f157757ae0bea84280085b79ae908b4dfbc54 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 11:59:29 -0800 Subject: [PATCH 69/92] tests: Fix warnings from clippy::approx_constant --- server/http/tests/response.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/server/http/tests/response.rs b/server/http/tests/response.rs index 63e94509aca..cd5041260cf 100644 --- a/server/http/tests/response.rs +++ b/server/http/tests/response.rs @@ -71,7 +71,12 @@ fn canonical_serialization() { ); // Value::Float - assert_resp!(r#"{"data":{"float":3.14159}}"#, object! { float: 3.14159 }); + #[allow(clippy::approx_constant)] + let almost_pi = 3.14159_f64; + assert_resp!( + r#"{"data":{"float":3.14159}}"#, + object! { float: almost_pi } + ); // Value::String assert_resp!( From 6fe5d471499e90bf6bc00ee6957bd0f83bcd90b9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:03:02 -0800 Subject: [PATCH 70/92] tests: Fix warnings from clippy::arc_with_non_send_sync --- chain/ethereum/src/ethereum_adapter.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index b1087b8d848..b1ee277c0d3 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -2751,6 +2751,7 @@ mod tests { // transport.set_response(block_json); // transport.add_response(json_value); + #[allow(clippy::arc_with_non_send_sync)] let web3 = Arc::new(Web3::new(transport.clone())); let result = check_block_receipt_support( web3.clone(), From 7f8e28dc1b406f1d3d02a4249cc378df743a3520 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:03:49 -0800 Subject: [PATCH 71/92] tests: Fix warnings from clippy::assertions_on_constants --- chain/common/tests/test-acme.rs | 2 +- graph/src/util/jobs.rs | 2 +- runtime/test/src/test.rs | 18 +++++++----------- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/chain/common/tests/test-acme.rs b/chain/common/tests/test-acme.rs index 554e4ecbd5c..cd0b32b8582 100644 --- a/chain/common/tests/test-acme.rs +++ b/chain/common/tests/test-acme.rs @@ -63,7 +63,7 @@ fn required_ok() { !f.required, "Transaction.events field should NOT be required!" ), - _ => assert!(false, "Unexpected message field [{}]!", f.name), + _ => panic!("Unexpected message field [{}]!", f.name), }; }); } diff --git a/graph/src/util/jobs.rs b/graph/src/util/jobs.rs index 4abed5e2a56..438ff72004c 100644 --- a/graph/src/util/jobs.rs +++ b/graph/src/util/jobs.rs @@ -142,7 +142,7 @@ mod tests { break; } if start.elapsed() > Duration::from_secs(2) { - assert!(false, "Counting to 10 took longer than 2 seconds"); + panic!("Counting to 10 took longer than 2 seconds"); } } diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index ceb9d21b9d2..17c6196b8d5 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -1081,7 +1081,7 @@ async fn test_entity_store(api_version: Version) { assert_eq!(Some(&Value::from("steve")), data.get("id")); assert_eq!(Some(&Value::from("Steve-O")), data.get("name")); } - _ => assert!(false, "expected Overwrite modification"), + _ => panic!("expected Overwrite modification"), } // Load, set, save cycle for a new entity with fulltext API @@ -1104,7 +1104,7 @@ async fn test_entity_store(api_version: Version) { assert_eq!(Some(&Value::from("herobrine")), data.get("id")); assert_eq!(Some(&Value::from("Brine-O")), data.get("name")); } - _ => assert!(false, "expected Insert modification"), + _ => panic!("expected Insert modification"), }; } @@ -1123,20 +1123,16 @@ fn test_detect_contract_calls(api_version: Version) { &wasm_file_path("abi_store_value.wasm", api_version.clone()), api_version.clone(), ); - assert!( - !data_source_without_calls - .mapping - .requires_archive() - .unwrap() - ); + assert!(!data_source_without_calls + .mapping + .requires_archive() + .unwrap()); let data_source_with_calls = mock_data_source( &wasm_file_path("contract_calls.wasm", api_version.clone()), api_version, ); - assert!( - data_source_with_calls.mapping.requires_archive().unwrap() - ); + assert!(data_source_with_calls.mapping.requires_archive().unwrap()); } #[graph::test] From c416983f9c12e6f4a5d0d9a4cbd96b8fb626b6ac Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:17:26 -0800 Subject: [PATCH 72/92] tests: Fix warnings from clippy::cloned_ref_to_slice_refs --- chain/ethereum/examples/firehose.rs | 3 ++- .../components/network_provider/provider_manager.rs | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/chain/ethereum/examples/firehose.rs b/chain/ethereum/examples/firehose.rs index e5f85964fe1..45199a0ec89 100644 --- a/chain/ethereum/examples/firehose.rs +++ b/chain/ethereum/examples/firehose.rs @@ -9,6 +9,7 @@ use graph::{ use graph_chain_ethereum::codec; use hex::ToHex; use prost::Message; +use std::slice; use std::sync::Arc; use tonic::Streaming; @@ -25,7 +26,7 @@ async fn main() -> Result<(), Error> { let host = "https://api.streamingfast.io:443".to_string(); let metrics = Arc::new(EndpointMetrics::new( logger, - &[host.clone()], + slice::from_ref(&host), Arc::new(MetricsRegistry::mock()), )); diff --git a/graph/src/components/network_provider/provider_manager.rs b/graph/src/components/network_provider/provider_manager.rs index beac2cb762c..35652164b07 100644 --- a/graph/src/components/network_provider/provider_manager.rs +++ b/graph/src/components/network_provider/provider_manager.rs @@ -566,6 +566,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -583,6 +584,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -675,6 +677,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone(), adapter_2.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -736,6 +739,7 @@ mod tests { let mut manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -766,6 +770,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -793,6 +798,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -822,6 +828,7 @@ mod tests { let mut manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -850,6 +857,7 @@ mod tests { let mut manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -889,6 +897,7 @@ mod tests { chain_name(), vec![adapter_1.clone(), adapter_2.clone(), adapter_3.clone()], )], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); @@ -933,6 +942,7 @@ mod tests { let manager: ProviderManager> = ProviderManager::new( discard(), [(chain_name(), vec![adapter_1.clone()])], + #[allow(clippy::cloned_ref_to_slice_refs)] ProviderCheckStrategy::RequireAll(&[check_1.clone()]), ); From 5e8d4691eaf05556a80191a1b1c94b59e6d242c5 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:18:44 -0800 Subject: [PATCH 73/92] tests: Fix warnings from clippy::empty_line_after_doc_comments --- graph/examples/stress.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/graph/examples/stress.rs b/graph/examples/stress.rs index 5534f2263b3..7a50df521d4 100644 --- a/graph/examples/stress.rs +++ b/graph/examples/stress.rs @@ -48,13 +48,12 @@ static mut PRINT_SAMPLES: bool = false; /// number of entries divided by `NODE_FILL`, and the number of /// interior nodes can be determined by dividing the number of nodes /// at the child level by `NODE_FILL` - +/// /// The other difficulty is that the structs with which `BTreeMap` /// represents internal and leaf nodes are not public, so we can't /// get their size with `std::mem::size_of`; instead, we base our /// estimates of their size on the current `std` code, assuming that /// these structs will not change - mod btree { use std::mem; use std::{mem::MaybeUninit, ptr::NonNull}; From b2ba9ce4e838c3080f5845dd0dfd5c25ad94574c Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:26:17 -0800 Subject: [PATCH 74/92] tests: Fix warnings from clippy::field_reassign_with_default --- chain/ethereum/src/codec.rs | 21 ++++++---- chain/ethereum/src/tests.rs | 83 ++++++++++++++++++++++--------------- 2 files changed, 63 insertions(+), 41 deletions(-) diff --git a/chain/ethereum/src/codec.rs b/chain/ethereum/src/codec.rs index e16f207c550..935b294599b 100644 --- a/chain/ethereum/src/codec.rs +++ b/chain/ethereum/src/codec.rs @@ -526,14 +526,19 @@ mod test { #[test] fn ensure_block_serialization() { let now = Utc::now().timestamp(); - let mut block = Block::default(); - let mut header = BlockHeader::default(); - header.timestamp = Some(Timestamp { - seconds: now, - nanos: 0, - }); - - block.header = Some(header); + + let header = BlockHeader { + timestamp: Some(Timestamp { + seconds: now, + nanos: 0, + }), + ..Default::default() + }; + + let block = Block { + header: Some(header.clone()), + ..Default::default() + }; let str_block = block.data().unwrap().to_string(); diff --git a/chain/ethereum/src/tests.rs b/chain/ethereum/src/tests.rs index a925dc9d71e..6ed05265cf3 100644 --- a/chain/ethereum/src/tests.rs +++ b/chain/ethereum/src/tests.rs @@ -26,24 +26,31 @@ fn test_trigger_ordering() { EthereumBlockTriggerType::WithCallTo(Address::random()), ); - let mut call1 = EthereumCall::default(); - call1.transaction_index = 1; + let call1 = EthereumCall { + transaction_index: 1, + ..Default::default() + }; let call1 = EthereumTrigger::Call(Arc::new(call1)); - let mut call2 = EthereumCall::default(); - call2.transaction_index = 2; - call2.input = Bytes(vec![0]); + let call2 = EthereumCall { + transaction_index: 2, + input: Bytes(vec![0]), + ..Default::default() + }; let call2 = EthereumTrigger::Call(Arc::new(call2)); - let mut call3 = EthereumCall::default(); - call3.transaction_index = 3; + let call3 = EthereumCall { + transaction_index: 3, + ..Default::default() + }; let call3 = EthereumTrigger::Call(Arc::new(call3)); // Call with the same tx index as call2 - let mut call4 = EthereumCall::default(); - call4.transaction_index = 2; - // different than call2 so they don't get mistaken as the same - call4.input = Bytes(vec![1]); + let call4 = EthereumCall { + transaction_index: 2, + input: Bytes(vec![1]), + ..Default::default() + }; let call4 = EthereumTrigger::Call(Arc::new(call4)); fn create_log(tx_index: u64, log_index: u64) -> Arc { @@ -92,13 +99,14 @@ fn test_trigger_ordering() { let logger = Logger::root(slog::Discard, o!()); - let mut b: LightEthereumBlock = Default::default(); - - // This is necessary because inside of BlockWithTriggers::new - // there's a log for both fields. So just using Default above - // gives None on them. - b.number = Some(Default::default()); - b.hash = Some(Default::default()); + // The field initializers are necessary because inside of + // BlockWithTriggers::new there's a log for both fields. So just using + // Default above gives None on them. + let b: LightEthereumBlock = LightEthereumBlock { + number: Some(Default::default()), + hash: Some(Default::default()), + ..Default::default() + }; // Test that `BlockWithTriggers` sorts the triggers. let block_with_triggers = BlockWithTriggers::::new( @@ -130,21 +138,29 @@ fn test_trigger_dedup() { // duplicate block2 let block3 = block2.clone(); - let mut call1 = EthereumCall::default(); - call1.transaction_index = 1; + let call1 = EthereumCall { + transaction_index: 1, + ..Default::default() + }; let call1 = EthereumTrigger::Call(Arc::new(call1)); - let mut call2 = EthereumCall::default(); - call2.transaction_index = 2; + let call2 = EthereumCall { + transaction_index: 2, + ..Default::default() + }; let call2 = EthereumTrigger::Call(Arc::new(call2)); - let mut call3 = EthereumCall::default(); - call3.transaction_index = 3; + let call3 = EthereumCall { + transaction_index: 3, + ..Default::default() + }; let call3 = EthereumTrigger::Call(Arc::new(call3)); // duplicate call2 - let mut call4 = EthereumCall::default(); - call4.transaction_index = 2; + let call4 = EthereumCall { + transaction_index: 2, + ..Default::default() + }; let call4 = EthereumTrigger::Call(Arc::new(call4)); fn create_log(tx_index: u64, log_index: u64) -> Arc { @@ -190,13 +206,14 @@ fn test_trigger_dedup() { let logger = Logger::root(slog::Discard, o!()); - let mut b: LightEthereumBlock = Default::default(); - - // This is necessary because inside of BlockWithTriggers::new - // there's a log for both fields. So just using Default above - // gives None on them. - b.number = Some(Default::default()); - b.hash = Some(Default::default()); + // The field initializers are necessary because inside of + // BlockWithTriggers::new there's a log for both fields. So just using + // Default above gives None on them. + let b: LightEthereumBlock = LightEthereumBlock { + number: Some(Default::default()), + hash: Some(Default::default()), + ..Default::default() + }; // Test that `BlockWithTriggers` sorts the triggers. let block_with_triggers = BlockWithTriggers::::new( From 2e6981beaf26a5e9679e694832aae25af6dd411d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:33:50 -0800 Subject: [PATCH 75/92] tests: Fix warnings from clippy::just_underscores_and_digits --- store/test-store/tests/graphql/introspection.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/store/test-store/tests/graphql/introspection.rs b/store/test-store/tests/graphql/introspection.rs index 4e8108044c8..dc6427b2038 100644 --- a/store/test-store/tests/graphql/introspection.rs +++ b/store/test-store/tests/graphql/introspection.rs @@ -46,7 +46,7 @@ impl Resolver for MockResolver { async fn resolve_object( &self, - __: Option, + _: Option, _field: &a::Field, _field_definition: &s::Field, _object_type: ObjectOrInterface<'_>, From 06dfcb5a8f00819725e073f7a6f410e4b09449ee Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:36:25 -0800 Subject: [PATCH 76/92] tests: Fix warnings from clippy::legacy_numeric_constants --- graphql/src/store/query.rs | 16 ++++++++-------- runtime/test/src/test/abi.rs | 4 ++-- store/test-store/tests/graphql/introspection.rs | 4 ++-- store/test-store/tests/postgres/relational.rs | 6 +++--- .../tests/postgres/relational_bytes.rs | 2 +- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 720f2844932..ce43dee97a9 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -738,8 +738,8 @@ mod tests { &object, BLOCK_NUMBER_MAX, field, - std::u32::MAX, - std::u32::MAX, + u32::MAX, + u32::MAX, &INPUT_SCHEMA, ) .unwrap() @@ -1042,8 +1042,8 @@ mod tests { &object, BLOCK_NUMBER_MAX, &query_field, - std::u32::MAX, - std::u32::MAX, + u32::MAX, + u32::MAX, &INPUT_SCHEMA, ); @@ -1100,8 +1100,8 @@ mod tests { &object, BLOCK_NUMBER_MAX, &query_field, - std::u32::MAX, - std::u32::MAX, + u32::MAX, + u32::MAX, &INPUT_SCHEMA, ); @@ -1194,8 +1194,8 @@ mod tests { &object, BLOCK_NUMBER_MAX, &query_field, - std::u32::MAX, - std::u32::MAX, + u32::MAX, + u32::MAX, &INPUT_SCHEMA, ); diff --git a/runtime/test/src/test/abi.rs b/runtime/test/src/test/abi.rs index 304ccc90e96..ba9048f6040 100644 --- a/runtime/test/src/test/abi.rs +++ b/runtime/test/src/test/abi.rs @@ -342,13 +342,13 @@ async fn test_abi_store_value(api_version: Version) { assert_eq!(new_value, Value::from(string)); // Value::Int - let int = i32::min_value(); + let int = i32::MIN; let new_value_ptr = instance.takes_val_returns_ptr("value_from_int", int).await; let new_value: Value = instance.asc_get(new_value_ptr).unwrap(); assert_eq!(new_value, Value::Int(int)); // Value::Int8 - let int8 = i64::min_value(); + let int8 = i64::MIN; let new_value_ptr = instance .takes_val_returns_ptr("value_from_int8", int8) .await; diff --git a/store/test-store/tests/graphql/introspection.rs b/store/test-store/tests/graphql/introspection.rs index dc6427b2038..6a978bccfc5 100644 --- a/store/test-store/tests/graphql/introspection.rs +++ b/store/test-store/tests/graphql/introspection.rs @@ -122,8 +122,8 @@ async fn introspection_query(schema: Arc, query: &str) -> QueryResult let options = QueryExecutionOptions { resolver: MockResolver, deadline: None, - max_first: std::u32::MAX, - max_skip: std::u32::MAX, + max_first: u32::MAX, + max_skip: u32::MAX, trace: false, }; diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 697d2f2b57a..dcd4e770014 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -182,7 +182,7 @@ lazy_static! { InputSchema::parse_latest(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()) .expect("failed to parse schema"); static ref NAMESPACE: Namespace = Namespace::new("sgd0815".to_string()).unwrap(); - static ref LARGE_INT: BigInt = BigInt::from(std::i64::MAX).pow(17).unwrap(); + static ref LARGE_INT: BigInt = BigInt::from(i64::MAX).pow(17).unwrap(); static ref LARGE_DECIMAL: BigDecimal = BigDecimal::from(1) / BigDecimal::new(LARGE_INT.clone(), 1); static ref BYTES_VALUE: H256 = H256::from(hex!( @@ -200,8 +200,8 @@ lazy_static! { entity! { THINGS_SCHEMA => id: "one", bool: true, - int: std::i32::MAX, - int8: std::i64::MAX, + int: i32::MAX, + int8: i64::MAX, timestamp: Value::Timestamp(Timestamp::from_microseconds_since_epoch(1710837304040956).expect("failed to create timestamp")), bigDecimal: decimal.clone(), bigDecimalArray: vec![decimal.clone(), (decimal + 1.into())], diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index b0330bfe23e..7eab03c5df5 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -43,7 +43,7 @@ lazy_static! { static ref THINGS_SCHEMA: InputSchema = InputSchema::parse_latest(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()) .expect("Failed to parse THINGS_GQL"); - static ref LARGE_INT: BigInt = BigInt::from(std::i64::MAX).pow(17).unwrap(); + static ref LARGE_INT: BigInt = BigInt::from(i64::MAX).pow(17).unwrap(); static ref LARGE_DECIMAL: BigDecimal = BigDecimal::from(1) / BigDecimal::new(LARGE_INT.clone(), 1); static ref BYTES_VALUE: H256 = H256::from(hex!( From 9016044255abd89055f61102f7de6333bd4970ba Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:40:14 -0800 Subject: [PATCH 77/92] tests: Fix warnings from clippy::match_like_matches_macro --- graph/src/schema/api.rs | 10 ++-------- tests/tests/integration_tests.rs | 5 +---- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/graph/src/schema/api.rs b/graph/src/schema/api.rs index 40744765d9a..86b13a9f3f2 100644 --- a/graph/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -1778,10 +1778,7 @@ mod tests { let change_block_filter = user_filter_type .fields .iter() - .find(move |p| match p.name.as_str() { - "_change_block" => true, - _ => false, - }) + .find(|p| p.name == "_change_block") .expect("_change_block field is missing in User_filter"); match &change_block_filter.value_type { @@ -1904,10 +1901,7 @@ mod tests { let change_block_filter = user_filter_type .fields .iter() - .find(move |p| match p.name.as_str() { - "_change_block" => true, - _ => false, - }) + .find(|p| p.name == "_change_block") .expect("_change_block field is missing in User_filter"); match &change_block_filter.value_type { diff --git a/tests/tests/integration_tests.rs b/tests/tests/integration_tests.rs index 56236183a12..ff3886a7712 100644 --- a/tests/tests/integration_tests.rs +++ b/tests/tests/integration_tests.rs @@ -52,10 +52,7 @@ pub struct TestResult { impl TestResult { pub fn success(&self) -> bool { - match self.status { - TestStatus::Ok => true, - _ => false, - } + matches!(self.status, TestStatus::Ok) } fn print_subgraph(&self) { From a60f6402cecbc3997db59fa6f5066d8ca392c44a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:43:19 -0800 Subject: [PATCH 78/92] tests: Fix warnings from clippy::mut_mutex_lock --- graph/src/components/network_provider/genesis_hash_check.rs | 6 +++--- graph/src/components/network_provider/provider_manager.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/graph/src/components/network_provider/genesis_hash_check.rs b/graph/src/components/network_provider/genesis_hash_check.rs index 26c8f91bab1..b92d4602315 100644 --- a/graph/src/components/network_provider/genesis_hash_check.rs +++ b/graph/src/components/network_provider/genesis_hash_check.rs @@ -185,8 +185,8 @@ mod tests { update_identifier_calls, } = self; - assert!(validate_identifier_calls.lock().unwrap().is_empty()); - assert!(update_identifier_calls.lock().unwrap().is_empty()); + assert!(validate_identifier_calls.get_mut().unwrap().is_empty()); + assert!(update_identifier_calls.get_mut().unwrap().is_empty()); } } @@ -226,7 +226,7 @@ mod tests { chain_identifier_calls, } = self; - assert!(chain_identifier_calls.lock().unwrap().is_empty()); + assert!(chain_identifier_calls.get_mut().unwrap().is_empty()); } } diff --git a/graph/src/components/network_provider/provider_manager.rs b/graph/src/components/network_provider/provider_manager.rs index 35652164b07..54454df40f6 100644 --- a/graph/src/components/network_provider/provider_manager.rs +++ b/graph/src/components/network_provider/provider_manager.rs @@ -428,7 +428,7 @@ mod tests { provider_name_calls, } = self; - assert!(provider_name_calls.lock().unwrap().is_empty()); + assert!(provider_name_calls.get_mut().unwrap().is_empty()); } } From e910eaef79e0a8209160182e261115d569c018b5 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:46:00 -0800 Subject: [PATCH 79/92] tests: Fix warnings from clippy::neg_cmp_op_on_partial_ord --- chain/ethereum/src/network.rs | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index 8d53a820486..536f7a8a54d 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -329,6 +329,7 @@ mod tests { use super::{EthereumNetworkAdapter, EthereumNetworkAdapters, NodeCapabilities}; #[test] + #[allow(clippy::neg_cmp_op_on_partial_ord)] fn ethereum_capabilities_comparison() { let archive = NodeCapabilities { archive: true, @@ -470,9 +471,7 @@ mod tests { { let adapter = adapters.call_or_cheapest(None).unwrap(); assert!(adapter.is_call_only()); - assert!( - !adapters.call_or_cheapest(None).unwrap().is_call_only() - ); + assert!(!adapters.call_or_cheapest(None).unwrap().is_call_only()); } // Check empty falls back to call only @@ -620,9 +619,7 @@ mod tests { // one reference above and one inside adapters struct assert_eq!(Arc::strong_count(ð_call_adapter), 2); assert_eq!(Arc::strong_count(ð_adapter), 2); - assert!( - !adapters.call_or_cheapest(None).unwrap().is_call_only() - ); + assert!(!adapters.call_or_cheapest(None).unwrap().is_call_only()); } #[graph::test] @@ -665,9 +662,7 @@ mod tests { .await; // one reference above and one inside adapters struct assert_eq!(Arc::strong_count(ð_adapter), 2); - assert!( - !adapters.call_or_cheapest(None).unwrap().is_call_only() - ); + assert!(!adapters.call_or_cheapest(None).unwrap().is_call_only()); } #[graph::test] @@ -687,7 +682,8 @@ mod tests { let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); let chain_id: Word = "chain_id".into(); - let adapters = [fake_adapter( + let adapters = [ + fake_adapter( &logger, unavailable_provider, &provider_metrics, @@ -703,7 +699,8 @@ mod tests { &metrics, false, ) - .await]; + .await, + ]; // Set errors metrics.report_for_test(&ProviderName::from(error_provider), false); @@ -896,11 +893,7 @@ mod tests { }); let manager = ProviderManager::new( logger, - vec![( - chain_id.clone(), - no_available_adapter.to_vec(), - )] - .into_iter(), + vec![(chain_id.clone(), no_available_adapter.to_vec())].into_iter(), ProviderCheckStrategy::MarkAsValid, ); From 2b7dbb3c670fc1ee8c79d06f228340cad0f3b427 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:50:08 -0800 Subject: [PATCH 80/92] tests: Fix warnings from clippy::never_loop --- graph/src/components/subgraph/proof_of_indexing/mod.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/graph/src/components/subgraph/proof_of_indexing/mod.rs b/graph/src/components/subgraph/proof_of_indexing/mod.rs index 718a3a5cecd..b3861c0cea6 100644 --- a/graph/src/components/subgraph/proof_of_indexing/mod.rs +++ b/graph/src/components/subgraph/proof_of_indexing/mod.rs @@ -148,11 +148,10 @@ mod tests { // Create a database which stores intermediate PoIs let mut db = HashMap::>::new(); - let mut block_count = 1; - for causality_region in case.data.causality_regions.values() { - block_count = causality_region.blocks.len(); - break; - } + let block_count = match case.data.causality_regions.values().next() { + Some(causality_region) => causality_region.blocks.len(), + None => 1, + }; for block_i in 0..block_count { let mut stream = ProofOfIndexing::new(block_i.try_into().unwrap(), version); From 83a3c78d3d1968a5346a0c14895014741b728f61 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 12:59:40 -0800 Subject: [PATCH 81/92] tests: Fix warnings from clippy::no_effect Actually a nice logic error --- tests/runner-tests/data-source-revert/grafted.yaml | 2 +- tests/tests/runner_tests.rs | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/runner-tests/data-source-revert/grafted.yaml b/tests/runner-tests/data-source-revert/grafted.yaml index 9992583098b..16dca790850 100644 --- a/tests/runner-tests/data-source-revert/grafted.yaml +++ b/tests/runner-tests/data-source-revert/grafted.yaml @@ -6,7 +6,7 @@ schema: graft: # This can be overwritten by `updateAndDeploy.js`. # Please commit this file when this happens. - base: QmcAL39QSKZvRssr2ToCJrav7XK9ggajxvBR7M1NNUCqdh + base: QmRfaNu6ymQVUiJVTayqWuT8ftLtvn7iE8kQSSAtTPA2J2 block: 3 dataSources: - kind: ethereum/contract diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index a28a05434f3..ce1f4e28400 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -37,14 +37,14 @@ fn assert_eq_ignore_backtrace(err: &SubgraphError, expected: &SubgraphError) { || err.handler != expected.handler || err.deterministic != expected.deterministic { - false; - } - - // Ignore any WASM backtrace in the error message - let split_err: Vec<&str> = err.message.split("\\twasm backtrace:").collect(); - let split_expected: Vec<&str> = expected.message.split("\\twasm backtrace:").collect(); + false + } else { + // Ignore any WASM backtrace in the error message + let split_err: Vec<&str> = err.message.split("\\twasm backtrace:").collect(); + let split_expected: Vec<&str> = expected.message.split("\\twasm backtrace:").collect(); - split_err.first() == split_expected.first() + split_err.first() == split_expected.first() + } }; if !equal { From 76f25e985c07f0758af9f78912b30da3c168aaf2 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 13:06:16 -0800 Subject: [PATCH 82/92] tests: Fix warnings from clippy::ptr_arg --- chain/near/src/chain.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index d992c99bd6c..6dae3c3a1f0 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -543,8 +543,10 @@ mod test { .collect(); assert_eq!(errs.len(), 2, "{:?}", ds); - let expected_errors = ["partial account prefixes can't have empty values".to_string(), - "partial account suffixes can't have empty values".to_string()]; + let expected_errors = [ + "partial account prefixes can't have empty values".to_string(), + "partial account suffixes can't have empty values".to_string(), + ]; assert!( expected_errors.iter().all(|err| errs.contains(err)), "{:?}", @@ -882,7 +884,7 @@ mod test { .collect() } - fn new_success_block(height: u64, receiver_id: &String) -> codec::Block { + fn new_success_block(height: u64, receiver_id: &str) -> codec::Block { codec::Block { header: Some(BlockHeader { height, @@ -894,12 +896,12 @@ mod test { receipt: Some(crate::codec::Receipt { receipt: Some(receipt::Receipt::Action(ReceiptAction { output_data_receivers: vec![DataReceiver { - receiver_id: receiver_id.clone(), + receiver_id: receiver_id.to_string(), ..Default::default() }], ..Default::default() })), - receiver_id: receiver_id.clone(), + receiver_id: receiver_id.to_string(), ..Default::default() }), execution_outcome: Some(ExecutionOutcomeWithId { @@ -949,7 +951,7 @@ mod test { } } - fn new_receipt_with_outcome(receiver_id: &String, block: Arc) -> ReceiptWithOutcome { + fn new_receipt_with_outcome(receiver_id: &str, block: Arc) -> ReceiptWithOutcome { ReceiptWithOutcome { outcome: ExecutionOutcomeWithId { outcome: Some(ExecutionOutcome { @@ -964,12 +966,12 @@ mod test { receipt: codec::Receipt { receipt: Some(receipt::Receipt::Action(ReceiptAction { output_data_receivers: vec![DataReceiver { - receiver_id: receiver_id.clone(), + receiver_id: receiver_id.to_string(), ..Default::default() }], ..Default::default() })), - receiver_id: receiver_id.clone(), + receiver_id: receiver_id.to_string(), ..Default::default() }, block, From ecf6bd97fc957f1090df6d564305485ea04fcf81 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 13:15:52 -0800 Subject: [PATCH 83/92] tests: Fix warnings from clippy::search_is_some --- .../tests/chain/ethereum/manifest.rs | 90 ++++++------------- 1 file changed, 29 insertions(+), 61 deletions(-) diff --git a/store/test-store/tests/chain/ethereum/manifest.rs b/store/test-store/tests/chain/ethereum/manifest.rs index a16e88ebfaa..e4dee44c9ab 100644 --- a/store/test-store/tests/chain/ethereum/manifest.rs +++ b/store/test-store/tests/chain/ethereum/manifest.rs @@ -100,7 +100,8 @@ impl LinkResolver for TextResolver { async fn cat(&self, _ctx: &LinkResolverContext, link: &Link) -> Result, anyhow::Error> { self.texts .get(&link.link) - .ok_or(anyhow!("No text for {}", &link.link)).cloned() + .ok_or(anyhow!("No text for {}", &link.link)) + .cloned() } async fn get_block( @@ -1237,6 +1238,23 @@ graft: }) } +async fn has_feature_validation_error( + unvalidated: UnvalidatedSubgraphManifest, + store: Arc, +) -> bool { + unvalidated + .validate(store, true) + .await + .expect_err("Validation must fail") + .into_iter() + .any(|e| { + matches!( + e, + SubgraphManifestValidationError::FeatureValidationError(_) + ) + }) +} + #[test] fn declared_grafting_feature_causes_no_feature_validation_errors() { const YAML: &str = " @@ -1254,18 +1272,8 @@ graft: test_store::run_test_sequentially(|store| async move { let store = store.subgraph_store(); let unvalidated = resolve_unvalidated(YAML).await; - assert!(unvalidated - .validate(store.clone(), true) - .await - .expect_err("Validation must fail") - .into_iter() - .find(|e| { - matches!( - e, - SubgraphManifestValidationError::FeatureValidationError(_) - ) - }) - .is_none()); + let has_error = has_feature_validation_error(unvalidated, store).await; + assert!(!has_error, "There must be no FeatureValidationError"); let manifest = resolve_manifest(YAML, SPEC_VERSION_0_0_4).await; assert!(manifest.features.contains(&SubgraphFeature::Grafting)) }) @@ -1285,18 +1293,8 @@ schema: test_store::run_test_sequentially(|store| async move { let store = store.subgraph_store(); let unvalidated = resolve_unvalidated(YAML).await; - assert!(unvalidated - .validate(store.clone(), true) - .await - .expect_err("Validation must fail") - .into_iter() - .find(|e| { - matches!( - e, - SubgraphManifestValidationError::FeatureValidationError(_) - ) - }) - .is_none()); + let has_error = has_feature_validation_error(unvalidated, store).await; + assert!(!has_error, "There must be no FeatureValidationError"); let manifest = resolve_manifest(YAML, SPEC_VERSION_0_0_4).await; assert!(manifest.features.contains(&SubgraphFeature::NonFatalErrors)) @@ -1339,18 +1337,8 @@ schema: .expect("Parsing simple manifest works") }; - assert!(unvalidated - .validate(store.clone(), true) - .await - .expect_err("Validation must fail") - .into_iter() - .find(|e| { - matches!( - e, - SubgraphManifestValidationError::FeatureValidationError(_) - ) - }) - .is_none()); + let has_error = has_feature_validation_error(unvalidated, store).await; + assert!(!has_error, "There must be no FeatureValidationError"); let manifest = resolve_manifest(YAML, SPEC_VERSION_0_0_4).await; assert!(manifest.features.contains(&SubgraphFeature::FullTextSearch)) @@ -1548,18 +1536,8 @@ dataSources: .expect("Parsing simple manifest works") }; - assert!(unvalidated - .validate(store.clone(), true) - .await - .expect_err("Validation must fail") - .into_iter() - .find(|e| { - matches!( - e, - SubgraphManifestValidationError::FeatureValidationError(_) - ) - }) - .is_none()); + let has_error = has_feature_validation_error(unvalidated, store).await; + assert!(!has_error, "There must be no FeatureValidationError"); }); } @@ -1577,18 +1555,8 @@ schema: test_store::run_test_sequentially(|store| async move { let store = store.subgraph_store(); let unvalidated = resolve_unvalidated(YAML).await; - assert!(unvalidated - .validate(store.clone(), true) - .await - .expect_err("Validation must fail") - .into_iter() - .find(|e| { - matches!( - e, - SubgraphManifestValidationError::FeatureValidationError(_) - ) - }) - .is_none()); + let has_error = has_feature_validation_error(unvalidated, store).await; + assert!(!has_error, "There must be no FeatureValidationError"); let manifest = resolve_manifest(YAML, SPEC_VERSION_0_0_4).await; assert!(manifest.features.contains(&SubgraphFeature::NonFatalErrors)) From a482a33d9b4b3e192f53e9f42b3938f58421f00a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 13:18:15 -0800 Subject: [PATCH 84/92] tests: Fix warnings from clippy::should_implement_trait --- tests/tests/integration_tests.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/tests/integration_tests.rs b/tests/tests/integration_tests.rs index ff3886a7712..322eb643533 100644 --- a/tests/tests/integration_tests.rs +++ b/tests/tests/integration_tests.rs @@ -98,7 +98,7 @@ pub enum SourceSubgraph { } impl SourceSubgraph { - pub fn from_str(s: &str) -> Self { + fn new(s: &str) -> Self { if let Some((alias, subgraph)) = s.split_once(':') { Self::WithAlias((alias.to_string(), subgraph.to_string())) } else { @@ -144,7 +144,7 @@ impl TestCase { T: Future> + Send + 'static, { let mut test_case = Self::new(name, test); - test_case.source_subgraph = Some(vec![SourceSubgraph::from_str(base_subgraph)]); + test_case.source_subgraph = Some(vec![SourceSubgraph::new(base_subgraph)]); test_case } @@ -160,7 +160,7 @@ impl TestCase { test_case.source_subgraph = Some( source_subgraphs .into_iter() - .map(SourceSubgraph::from_str) + .map(SourceSubgraph::new) .collect(), ); test_case From 9c00ae0401ef0c678878177410bfbfd14610b382 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 13:19:34 -0800 Subject: [PATCH 85/92] tests: Fix warnings from clippy::unnecessary_fallible_conversions --- chain/near/src/trigger.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/chain/near/src/trigger.rs b/chain/near/src/trigger.rs index d604f97bc14..c929d7caa19 100644 --- a/chain/near/src/trigger.rs +++ b/chain/near/src/trigger.rs @@ -152,8 +152,6 @@ pub struct ReceiptWithOutcome { #[cfg(test)] mod tests { - use std::convert::TryFrom; - use super::*; use graph::{ @@ -407,8 +405,7 @@ mod tests { } fn big_int(input: u64) -> Option { - let value = - BigInt::try_from(input).unwrap_or_else(|_| panic!("Invalid BigInt value {}", input)); + let value = BigInt::from(input); let bytes = value.to_signed_bytes_le(); Some(codec::BigInt { bytes }) From f02381ce04f74565543ce3a504a408a2fc630537 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 13:22:21 -0800 Subject: [PATCH 86/92] tests: Fix warnings from clippy::unnecessary_to_owned --- tests/tests/runner_tests.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index ce1f4e28400..046a1476491 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -233,7 +233,7 @@ async fn api_version_0_0_7() { ctx.start_and_sync_to(stop_block).await; let query_res = ctx - .query(&r#"{ testResults{ id, message } }"#.to_string()) + .query(r#"{ testResults{ id, message } }"#) .await .unwrap(); @@ -317,7 +317,7 @@ async fn derived_loaders() { // Where the test cases are documented in the code. let query_res = ctx - .query(&r#"{ testResult(id:"1_0", block: { number: 1 } ){ id barDerived{id value value2} bBarDerived{id value value2} } }"#.to_string()) + .query(r#"{ testResult(id:"1_0", block: { number: 1 } ){ id barDerived{id value value2} bBarDerived{id value value2} } }"#) .await .unwrap(); @@ -365,7 +365,7 @@ async fn derived_loaders() { ); let query_res = ctx - .query(&r#"{ testResult(id:"1_1", block: { number: 1 } ){ id barDerived{id value value2} bBarDerived{id value value2} } }"#.to_string()) + .query(r#"{ testResult(id:"1_1", block: { number: 1 } ){ id barDerived{id value value2} bBarDerived{id value value2} } }"#) .await .unwrap(); @@ -403,7 +403,7 @@ async fn derived_loaders() { ); let query_res = ctx.query( - &r#"{ testResult(id:"2_0" ){ id barDerived{id value value2} bBarDerived{id value value2} } }"#.to_string() + r#"{ testResult(id:"2_0" ){ id barDerived{id value value2} bBarDerived{id value value2} } }"# ) .await .unwrap(); From fe94732cc7f6ef52f76df8d083ffa256126143d6 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 13:24:06 -0800 Subject: [PATCH 87/92] tests: Fix warnings from clippy::unusual_byte_groupings --- graph/src/data/store/id.rs | 2 +- runtime/test/src/test.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/graph/src/data/store/id.rs b/graph/src/data/store/id.rs index 6ce909e81bd..222a11fefdf 100644 --- a/graph/src/data/store/id.rs +++ b/graph/src/data/store/id.rs @@ -566,7 +566,7 @@ mod tests { assert_eq!(exp, id); let id = IdType::Int8.generate_id(3, 2).unwrap(); - let exp = Id::Int8(0x0000_0003__0000_0002); + let exp = Id::Int8(0x0000_0003_0000_0002); assert_eq!(exp, id); // Should be id + 1 diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 17c6196b8d5..6af2bf74cb2 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -1641,8 +1641,8 @@ async fn generate_id() { "bin2", IdType::Bytes.parse("0x0000000c00000003".into()).unwrap(), ), - ("int1", Id::Int8(0x0000_000c__0000_0000)), - ("int2", Id::Int8(0x0000_000c__0000_0001)), + ("int1", Id::Int8(0x0000_000c_0000_0000)), + ("int2", Id::Int8(0x0000_000c_0000_0001)), ] .into_iter(), ); From 747a354dc3e042abdcb903a05d7cd5a79f09b902 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 13:43:59 -0800 Subject: [PATCH 88/92] tests: Fix warnings from clippy::useless_format --- store/test-store/tests/postgres/writable.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index 0321b6d3579..93fd28c6d8f 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -370,13 +370,15 @@ fn restart() { #[test] fn read_range_test() { run_test(|store, writable, sourceable, deployment| async move { - let result_entities = [r#"(1, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }])"#, + let result_entities = [ + r#"(1, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }])"#, r#"(2, [EntitySourceOperation { entity_op: Modify, entity_type: EntityType(Counter), entity: Entity { count: Int(4), id: String("1"), vid: Int8(2) }, vid: 2 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(4), id: String("2"), vid: Int8(2) }, vid: 2 }])"#, r#"(3, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(4), id: String("1"), vid: Int8(2) }, vid: 2 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(6), id: String("3"), vid: Int8(3) }, vid: 3 }])"#, r#"(4, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(8), id: String("1"), vid: Int8(4) }, vid: 4 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(8), id: String("4"), vid: Int8(4) }, vid: 4 }])"#, r#"(5, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(8), id: String("1"), vid: Int8(4) }, vid: 4 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(10), id: String("5"), vid: Int8(5) }, vid: 5 }])"#, r#"(6, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(12), id: String("1"), vid: Int8(6) }, vid: 6 }])"#, - r#"(7, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(12), id: String("1"), vid: Int8(6) }, vid: 6 }])"#]; + r#"(7, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(12), id: String("1"), vid: Int8(6) }, vid: 6 }])"#, + ]; let subgraph_store = store.subgraph_store(); writable.deployment_synced(block_pointer(0)).await.unwrap(); @@ -440,8 +442,8 @@ fn read_immutable_only_range_test() { #[test] fn read_range_pool_created_test() { run_test(|store, writable, sourceable, deployment| async move { - let result_entities = [format!("(1, [EntitySourceOperation {{ entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity {{ blockNumber: BigInt(12369621), blockTimestamp: BigInt(1620243254), fee: Int(500), id: Bytes(0xff80818283848586), logIndex: BigInt(0), pool: Bytes(0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8), tickSpacing: Int(10), token0: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000000), vid: Int8(1) }}, vid: 1 }}])"), - format!("(2, [EntitySourceOperation {{ entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity {{ blockNumber: BigInt(12369622), blockTimestamp: BigInt(1620243255), fee: Int(3000), id: Bytes(0xff90919293949596), logIndex: BigInt(1), pool: Bytes(0x4585fe77225b41b697c938b018e2ac67ac5a20c0), tickSpacing: Int(60), token0: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000001), vid: Int8(2) }}, vid: 2 }}])")]; + let result_entities = ["(1, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity { blockNumber: BigInt(12369621), blockTimestamp: BigInt(1620243254), fee: Int(500), id: Bytes(0xff80818283848586), logIndex: BigInt(0), pool: Bytes(0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8), tickSpacing: Int(10), token0: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000000), vid: Int8(1) }, vid: 1 }])", + "(2, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity { blockNumber: BigInt(12369622), blockTimestamp: BigInt(1620243255), fee: Int(3000), id: Bytes(0xff90919293949596), logIndex: BigInt(1), pool: Bytes(0x4585fe77225b41b697c938b018e2ac67ac5a20c0), tickSpacing: Int(60), token0: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000001), vid: Int8(2) }, vid: 2 }])"]; // Rest of the test remains the same let subgraph_store = store.subgraph_store(); @@ -501,7 +503,7 @@ fn read_range_pool_created_test() { assert_eq!(e.len(), 2); for en in &e { let index = *en.0 - 1; - let a = result_entities[index as usize].clone(); + let a = result_entities[index as usize]; assert_eq!(a, format!("{:?}", en)); } From 760dcedc6bc3176b6ef468bd9d4e344b85dad7fa Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 13:50:26 -0800 Subject: [PATCH 89/92] tests: Fix warnings from clippy::vec_init_then_push --- graph/src/data/store/mod.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index f2013208578..f113c5248c8 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -892,10 +892,9 @@ pub enum EntityValidationErrorInner { macro_rules! entity { ($schema:expr => $($name:ident: $value:expr,)*) => { { - let mut result = Vec::new(); - $( - result.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); - )* + let result = vec![$( + ($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value)), + )*]; $schema.make_entity(result).unwrap() } }; From 5a87f363432d727c24c68a7ffb53f545256c516d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 13:55:13 -0800 Subject: [PATCH 90/92] tests: Fix warnings from clippy::zero_prefixed_literal --- store/postgres/src/vid_batcher.rs | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/store/postgres/src/vid_batcher.rs b/store/postgres/src/vid_batcher.rs index 7fdb3597aeb..8cb0496bd86 100644 --- a/store/postgres/src/vid_batcher.rs +++ b/store/postgres/src/vid_batcher.rs @@ -435,20 +435,23 @@ mod tests { // The schedule of how we move through the bounds above in batches, // with varying timings for each batch - batcher.run(040, 075, 10, S010).await; - batcher.run(076, 145, 20, S010).await; - batcher.run(146, 240, 40, S200).await; - batcher.run(241, 270, 20, S200).await; - batcher.run(271, 281, 10, S200).await; - batcher.run(282, 287, 05, S050).await; - batcher.run(288, 298, 10, S050).await; - batcher.run(299, 309, 20, S050).await; - batcher.run(310, 325, 40, S100).await; - batcher.run(326, 336, 40, S100).await; - batcher.run(337, 347, 40, S100).await; - batcher.run(348, 357, 40, S100).await; - batcher.run(358, 359, 40, S010).await; - assert!(batcher.finished()); + #[allow(clippy::zero_prefixed_literal)] + { + batcher.run(040, 075, 10, S010).await; + batcher.run(076, 145, 20, S010).await; + batcher.run(146, 240, 40, S200).await; + batcher.run(241, 270, 20, S200).await; + batcher.run(271, 281, 10, S200).await; + batcher.run(282, 287, 05, S050).await; + batcher.run(288, 298, 10, S050).await; + batcher.run(299, 309, 20, S050).await; + batcher.run(310, 325, 40, S100).await; + batcher.run(326, 336, 40, S100).await; + batcher.run(337, 347, 40, S100).await; + batcher.run(348, 357, 40, S100).await; + batcher.run(358, 359, 40, S010).await; + assert!(batcher.finished()); + } batcher.at(360, 359, 80); batcher.step(360, 359, S010).await; From d4745c396521175ae3da9c71fed77d6afe68ce8a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 13:55:33 -0800 Subject: [PATCH 91/92] all: Run 'cargo fmt' --- graph/src/amp/codec/test_fixtures.rs | 3 +- graph/src/blockchain/firehose_block_stream.rs | 82 +- graph/src/data/store/scalar/bigdecimal.rs | 6 +- graph/src/firehose/endpoints.rs | 1962 ++++++++--------- node/src/config.rs | 20 +- server/http/src/request.rs | 22 +- server/index-node/src/service.rs | 22 +- store/postgres/src/relational/ddl_tests.rs | 9 +- store/postgres/src/sql/mod.rs | 2 +- store/test-store/tests/postgres/graft.rs | 6 +- 10 files changed, 1069 insertions(+), 1065 deletions(-) diff --git a/graph/src/amp/codec/test_fixtures.rs b/graph/src/amp/codec/test_fixtures.rs index 43e53667243..a8a6882ff88 100644 --- a/graph/src/amp/codec/test_fixtures.rs +++ b/graph/src/amp/codec/test_fixtures.rs @@ -35,7 +35,8 @@ pub static RECORD_BATCH: LazyLock = LazyLock::new(|| { let columns = record_batches .into_iter() - .flat_map(|record_batch| record_batch.columns()).cloned() + .flat_map(|record_batch| record_batch.columns()) + .cloned() .collect::>(); RecordBatch::try_new(Schema::try_merge(schemas).unwrap().into(), columns).unwrap() diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index 23fb9bd5264..90e985ff44d 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -465,40 +465,64 @@ mod tests { // Nothing - assert!( - !must_check_subgraph_continuity(&logger, &no_current_block, &no_cursor, 10), - ); + assert!(!must_check_subgraph_continuity( + &logger, + &no_current_block, + &no_cursor, + 10 + ),); // No cursor, subgraph current block ptr <, ==, > than manifest start block num - assert!( - !must_check_subgraph_continuity(&logger, &some_current_block(9), &no_cursor, 10), - ); - - assert!( - must_check_subgraph_continuity(&logger, &some_current_block(10), &no_cursor, 10), - ); - - assert!( - must_check_subgraph_continuity(&logger, &some_current_block(11), &no_cursor, 10), - ); + assert!(!must_check_subgraph_continuity( + &logger, + &some_current_block(9), + &no_cursor, + 10 + ),); + + assert!(must_check_subgraph_continuity( + &logger, + &some_current_block(10), + &no_cursor, + 10 + ),); + + assert!(must_check_subgraph_continuity( + &logger, + &some_current_block(11), + &no_cursor, + 10 + ),); // Some cursor, subgraph current block ptr <, ==, > than manifest start block num - assert!( - !must_check_subgraph_continuity(&logger, &no_current_block, &some_cursor, 10), - ); - - assert!( - !must_check_subgraph_continuity(&logger, &some_current_block(9), &some_cursor, 10), - ); - - assert!( - !must_check_subgraph_continuity(&logger, &some_current_block(10), &some_cursor, 10), - ); - - assert!( - !must_check_subgraph_continuity(&logger, &some_current_block(11), &some_cursor, 10), - ); + assert!(!must_check_subgraph_continuity( + &logger, + &no_current_block, + &some_cursor, + 10 + ),); + + assert!(!must_check_subgraph_continuity( + &logger, + &some_current_block(9), + &some_cursor, + 10 + ),); + + assert!(!must_check_subgraph_continuity( + &logger, + &some_current_block(10), + &some_cursor, + 10 + ),); + + assert!(!must_check_subgraph_continuity( + &logger, + &some_current_block(11), + &some_cursor, + 10 + ),); } } diff --git a/graph/src/data/store/scalar/bigdecimal.rs b/graph/src/data/store/scalar/bigdecimal.rs index cc1ea2a59e9..baba57a2d4e 100644 --- a/graph/src/data/store/scalar/bigdecimal.rs +++ b/graph/src/data/store/scalar/bigdecimal.rs @@ -631,7 +631,8 @@ mod test { #[test] fn big_decimal_stable() { - let cases = [( + let cases = [ + ( "28b09c9c3f3e2fe037631b7fbccdf65c37594073016d8bf4bb0708b3fda8066a", "0.1", ), @@ -650,7 +651,8 @@ mod test { ( "6b06b34cc714810072988dc46c493c66a6b6c2c2dd0030271aa3adf3b3f21c20", "98765587998098786876.0", - )]; + ), + ]; for (hash, s) in cases.iter() { let dec = BigDecimal::from_str(s).unwrap(); assert_eq!(*hash, hex::encode(crypto_stable_hash(dec))); diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index 6a723605a67..1c723790bcd 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -1,983 +1,979 @@ -use crate::firehose::fetch_client::FetchClient; -use crate::firehose::interceptors::AuthInterceptor; -use crate::{ - blockchain::{ - block_stream::FirehoseCursor, Block as BlockchainBlock, BlockPtr, ChainIdentifier, - }, - cheap_clone::CheapClone, - components::store::BlockNumber, - endpoint::{ConnectionType, EndpointMetrics, RequestLabels}, - env::ENV_VARS, - firehose::decode_firehose_block, - prelude::{anyhow, debug, DeploymentHash}, -}; -use anyhow::Context; -use async_trait::async_trait; -use futures03::{StreamExt, TryStreamExt}; -use http::uri::{Scheme, Uri}; -use itertools::Itertools; -use slog::{error, info, trace, Logger}; -use std::{collections::HashMap, fmt::Display, ops::ControlFlow, sync::Arc, time::Duration}; -use tokio::sync::OnceCell; -use tonic::codegen::InterceptedService; -use tonic::{ - codegen::CompressionEncoding, - metadata::{Ascii, MetadataKey, MetadataValue}, - transport::{Channel, ClientTlsConfig}, - Request, -}; - -use super::{codec as firehose, interceptors::MetricsInterceptor, stream_client::StreamClient}; -use crate::components::network_provider::ChainName; -use crate::components::network_provider::NetworkDetails; -use crate::components::network_provider::ProviderCheckStrategy; -use crate::components::network_provider::ProviderManager; -use crate::components::network_provider::ProviderName; -use crate::prelude::retry; - -/// This is constant because we found this magic number of connections after -/// which the grpc connections start to hang. -/// For more details see: https://github.com/graphprotocol/graph-node/issues/3879 -pub const SUBGRAPHS_PER_CONN: usize = 100; - -const LOW_VALUE_THRESHOLD: usize = 10; -const LOW_VALUE_USED_PERCENTAGE: usize = 50; -const HIGH_VALUE_USED_PERCENTAGE: usize = 80; - -#[derive(Debug)] -pub struct FirehoseEndpoint { - pub provider: ProviderName, - pub auth: AuthInterceptor, - pub filters_enabled: bool, - pub compression_enabled: bool, - pub subgraph_limit: SubgraphLimit, - endpoint_metrics: Arc, - channel: Channel, - - /// The endpoint info is not intended to change very often, as it only contains the - /// endpoint's metadata, so caching it avoids sending unnecessary network requests. - info_response: OnceCell, -} - -#[derive(Debug)] -pub struct ConnectionHeaders(HashMap, MetadataValue>); - -#[async_trait] -impl NetworkDetails for Arc { - fn provider_name(&self) -> ProviderName { - self.provider.clone() - } - - async fn chain_identifier(&self) -> anyhow::Result { - let genesis_block_ptr = self.clone().info().await?.genesis_block_ptr()?; - - Ok(ChainIdentifier { - net_version: "0".to_string(), - genesis_block_hash: genesis_block_ptr.hash, - }) - } - - async fn provides_extended_blocks(&self) -> anyhow::Result { - let info = self.clone().info().await?; - let pred = if info.chain_name.contains("arbitrum-one") - || info.chain_name.contains("optimism-mainnet") - { - |x: &String| x.starts_with("extended") || x == "hybrid" - } else { - |x: &String| x == "extended" - }; - - Ok(info.block_features.iter().any(pred)) - } -} - -impl Default for ConnectionHeaders { - fn default() -> Self { - Self::new() - } -} - -impl ConnectionHeaders { - pub fn new() -> Self { - Self(HashMap::new()) - } - pub fn with_deployment(mut self, deployment: DeploymentHash) -> Self { - if let Ok(deployment) = deployment.parse() { - self.0 - .insert("x-deployment-id".parse().unwrap(), deployment); - } - self - } - pub fn add_to_request(&self, request: T) -> Request { - let mut request = Request::new(request); - self.0.iter().for_each(|(k, v)| { - request.metadata_mut().insert(k, v.clone()); - }); - request - } -} - -#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] -pub enum AvailableCapacity { - Unavailable, - Low, - High, -} - -// TODO: Find a new home for this type. -#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] -pub enum SubgraphLimit { - Disabled, - Limit(usize), - Unlimited, -} - -impl SubgraphLimit { - pub fn get_capacity(&self, current: usize) -> AvailableCapacity { - match self { - // Limit(0) should probably be Disabled but just in case - SubgraphLimit::Disabled | SubgraphLimit::Limit(0) => AvailableCapacity::Unavailable, - SubgraphLimit::Limit(total) => { - let total = *total; - if current >= total { - return AvailableCapacity::Unavailable; - } - - let used_percent = current * 100 / total; - - // If total is low it can vary very quickly so we can consider 50% as the low threshold - // to make selection more reliable - let threshold_percent = if total <= LOW_VALUE_THRESHOLD { - LOW_VALUE_USED_PERCENTAGE - } else { - HIGH_VALUE_USED_PERCENTAGE - }; - - if used_percent < threshold_percent { - return AvailableCapacity::High; - } - - AvailableCapacity::Low - } - _ => AvailableCapacity::High, - } - } - - pub fn has_capacity(&self, current: usize) -> bool { - match self { - SubgraphLimit::Unlimited => true, - SubgraphLimit::Limit(limit) => limit > ¤t, - SubgraphLimit::Disabled => false, - } - } -} - -impl Display for FirehoseEndpoint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - Display::fmt(self.provider.as_str(), f) - } -} - -impl FirehoseEndpoint { - pub fn new>( - provider: S, - url: S, - token: Option, - key: Option, - filters_enabled: bool, - compression_enabled: bool, - subgraph_limit: SubgraphLimit, - endpoint_metrics: Arc, - ) -> Self { - let uri = url - .as_ref() - .parse::() - .expect("the url should have been validated by now, so it is a valid Uri"); - - let endpoint_builder = match uri.scheme().unwrap_or(&Scheme::HTTP).as_str() { - "http" => Channel::builder(uri), - "https" => { - let mut tls = ClientTlsConfig::new(); - tls = tls.with_native_roots(); - - Channel::builder(uri) - .tls_config(tls) - .expect("TLS config on this host is invalid") - } - _ => panic!("invalid uri scheme for firehose endpoint"), - }; - - // These tokens come from the config so they have to be ascii. - let token: Option> = token - .map_or(Ok(None), |token| { - let bearer_token = format!("bearer {}", token); - bearer_token.parse::>().map(Some) - }) - .expect("Firehose token is invalid"); - - let key: Option> = key - .map_or(Ok(None), |key| { - key.parse::>().map(Some) - }) - .expect("Firehose key is invalid"); - - // Note on the connection window size: We run multiple block streams on a same connection, - // and a problematic subgraph with a stalled block stream might consume the entire window - // capacity for its http2 stream and never release it. If there are enough stalled block - // streams to consume all the capacity on the http2 connection, then _all_ subgraphs using - // this same http2 connection will stall. At a default stream window size of 2^16, setting - // the connection window size to the maximum of 2^31 allows for 2^15 streams without any - // contention, which is effectively unlimited for normal graph node operation. - // - // Note: Do not set `http2_keep_alive_interval` or `http2_adaptive_window`, as these will - // send ping frames, and many cloud load balancers will drop connections that frequently - // send pings. - let endpoint = endpoint_builder - .initial_connection_window_size(Some((1 << 31) - 1)) - .connect_timeout(Duration::from_secs(10)) - .tcp_keepalive(Some(Duration::from_secs(15))) - // Timeout on each request, so the timeout to estabilish each 'Blocks' stream. - .timeout(Duration::from_secs(120)); - - let subgraph_limit = match subgraph_limit { - // See the comment on the constant - SubgraphLimit::Unlimited => SubgraphLimit::Limit(SUBGRAPHS_PER_CONN), - // This is checked when parsing from config but doesn't hurt to be defensive. - SubgraphLimit::Limit(limit) => SubgraphLimit::Limit(limit.min(SUBGRAPHS_PER_CONN)), - l => l, - }; - - FirehoseEndpoint { - provider: provider.as_ref().into(), - channel: endpoint.connect_lazy(), - auth: AuthInterceptor { token, key }, - filters_enabled, - compression_enabled, - subgraph_limit, - endpoint_metrics, - info_response: OnceCell::new(), - } - } - - pub fn current_error_count(&self) -> u64 { - self.endpoint_metrics.get_count(&self.provider) - } - - // we need to -1 because there will always be a reference - // inside FirehoseEndpoints that is not used (is always cloned). - pub fn get_capacity(self: &Arc) -> AvailableCapacity { - self.subgraph_limit - .get_capacity(Arc::strong_count(self).saturating_sub(1)) - } - - fn metrics_interceptor(&self) -> MetricsInterceptor { - MetricsInterceptor { - metrics: self.endpoint_metrics.cheap_clone(), - service: self.channel.cheap_clone(), - labels: RequestLabels { - provider: self.provider.clone(), - req_type: "unknown".into(), - conn_type: ConnectionType::Firehose, - }, - } - } - - fn max_message_size(&self) -> usize { - 1024 * 1024 * ENV_VARS.firehose_grpc_max_decode_size_mb - } - - fn new_fetch_client( - &self, - ) -> FetchClient< - InterceptedService, impl tonic::service::Interceptor>, - > { - let metrics = self.metrics_interceptor(); - - let mut client = FetchClient::with_interceptor(metrics, self.auth.clone()) - .accept_compressed(CompressionEncoding::Gzip); - - if self.compression_enabled { - client = client.send_compressed(CompressionEncoding::Gzip); - } - - client = client.max_decoding_message_size(self.max_message_size()); - - client - } - - fn new_stream_client( - &self, - ) -> StreamClient< - InterceptedService, impl tonic::service::Interceptor>, - > { - let metrics = self.metrics_interceptor(); - - let mut client = StreamClient::with_interceptor(metrics, self.auth.clone()) - .accept_compressed(CompressionEncoding::Gzip); - - if self.compression_enabled { - client = client.send_compressed(CompressionEncoding::Gzip); - } - - client = client.max_decoding_message_size(self.max_message_size()); - - client - } - - fn new_firehose_info_client(&self) -> crate::firehose::endpoint_info::Client { - let metrics = self.metrics_interceptor(); - let auth = self.auth.clone(); - - let mut client = crate::firehose::endpoint_info::Client::new(metrics, auth); - - if self.compression_enabled { - client = client.with_compression(); - } - - client = client.with_max_message_size(self.max_message_size()); - client - } - - pub async fn get_block( - &self, - cursor: FirehoseCursor, - logger: &Logger, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - debug!( - logger, - "Connecting to firehose to retrieve block for cursor {}", cursor; - "provider" => self.provider.as_str(), - ); - - let req = firehose::SingleBlockRequest { - transforms: [].to_vec(), - reference: Some(firehose::single_block_request::Reference::Cursor( - firehose::single_block_request::Cursor { - cursor: cursor.to_string(), - }, - )), - }; - - let mut client = self.new_fetch_client(); - match client.block(req).await { - Ok(v) => Ok(M::decode( - v.get_ref().block.as_ref().unwrap().value.as_ref(), - )?), - Err(e) => Err(anyhow::format_err!("firehose error {}", e)), - } - } - - pub async fn get_block_by_ptr( - &self, - ptr: &BlockPtr, - logger: &Logger, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - debug!( - logger, - "Connecting to firehose to retrieve block for ptr {}", ptr; - "provider" => self.provider.as_str(), - ); - - let req = firehose::SingleBlockRequest { - transforms: [].to_vec(), - reference: Some( - firehose::single_block_request::Reference::BlockHashAndNumber( - firehose::single_block_request::BlockHashAndNumber { - hash: ptr.hash.to_string(), - num: ptr.number as u64, - }, - ), - ), - }; - - let mut client = self.new_fetch_client(); - match client.block(req).await { - Ok(v) => Ok(M::decode( - v.get_ref().block.as_ref().unwrap().value.as_ref(), - )?), - Err(e) => Err(anyhow::format_err!("firehose error {}", e)), - } - } - - pub async fn get_block_by_ptr_with_retry( - self: Arc, - ptr: &BlockPtr, - logger: &Logger, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - let retry_log_message = format!("get_block_by_ptr for block {}", ptr); - let endpoint = self.cheap_clone(); - let logger = logger.cheap_clone(); - let ptr_for_retry = ptr.clone(); - - retry(retry_log_message, &logger) - .limit(ENV_VARS.firehose_block_fetch_retry_limit) - .timeout_secs(ENV_VARS.firehose_block_fetch_timeout) - .run(move || { - let endpoint = endpoint.cheap_clone(); - let logger = logger.cheap_clone(); - let ptr = ptr_for_retry.clone(); - async move { - endpoint - .get_block_by_ptr::(&ptr, &logger) - .await - .context(format!( - "Failed to fetch block by ptr {} from firehose", - ptr - )) - } - }) - .await - .map_err(move |e| { - anyhow::anyhow!("Failed to fetch block by ptr {} from firehose: {}", ptr, e) - }) - } - - async fn get_block_by_number(&self, number: u64, logger: &Logger) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - trace!( - logger, - "Connecting to firehose to retrieve block for number {}", number; - "provider" => self.provider.as_str(), - ); - - let req = firehose::SingleBlockRequest { - transforms: [].to_vec(), - reference: Some(firehose::single_block_request::Reference::BlockNumber( - firehose::single_block_request::BlockNumber { num: number }, - )), - }; - - let mut client = self.new_fetch_client(); - match client.block(req).await { - Ok(v) => Ok(M::decode( - v.get_ref().block.as_ref().unwrap().value.as_ref(), - )?), - Err(e) => Err(anyhow::format_err!("firehose error {}", e)), - } - } - - pub async fn get_block_by_number_with_retry( - self: Arc, - number: u64, - logger: &Logger, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - let retry_log_message = format!("get_block_by_number for block {}", number); - let endpoint = self.cheap_clone(); - let logger = logger.cheap_clone(); - - retry(retry_log_message, &logger) - .limit(ENV_VARS.firehose_block_fetch_retry_limit) - .timeout_secs(ENV_VARS.firehose_block_fetch_timeout) - .run(move || { - let endpoint = endpoint.cheap_clone(); - let logger = logger.cheap_clone(); - async move { - endpoint - .get_block_by_number::(number, &logger) - .await - .context(format!( - "Failed to fetch block by number {} from firehose", - number - )) - } - }) - .await - .map_err(|e| { - anyhow::anyhow!( - "Failed to fetch block by number {} from firehose: {}", - number, - e - ) - }) - } - - pub async fn load_blocks_by_numbers( - self: Arc, - numbers: Vec, - logger: &Logger, - ) -> Result, anyhow::Error> - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - let logger = logger.clone(); - let logger_for_error = logger.clone(); - - let blocks_stream = futures03::stream::iter(numbers) - .map(move |number| { - let e = self.cheap_clone(); - let l = logger.clone(); - async move { e.get_block_by_number_with_retry::(number, &l).await } - }) - .buffered(ENV_VARS.firehose_block_batch_size); - - let blocks = blocks_stream.try_collect::>().await.map_err(|e| { - error!( - logger_for_error, - "Failed to load blocks from firehose: {}", e; - ); - anyhow::format_err!("failed to load blocks from firehose: {}", e) - })?; - - Ok(blocks) - } - - pub async fn genesis_block_ptr(&self, logger: &Logger) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - info!(logger, "Requesting genesis block from firehose"; - "provider" => self.provider.as_str()); - - // We use 0 here to mean the genesis block of the chain. Firehose - // when seeing start block number 0 will always return the genesis - // block of the chain, even if the chain's start block number is - // not starting at block #0. - self.block_ptr_for_number::(logger, 0).await - } - - pub async fn block_ptr_for_number( - &self, - logger: &Logger, - number: BlockNumber, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - debug!( - logger, - "Connecting to firehose to retrieve block for number {}", number; - "provider" => self.provider.as_str(), - ); - - let mut client = self.new_stream_client(); - - // The trick is the following. - // - // Firehose `start_block_num` and `stop_block_num` are both inclusive, so we specify - // the block we are looking for in both. - // - // Now, the remaining question is how the block from the canonical chain is picked. We - // leverage the fact that Firehose will always send the block in the longuest chain as the - // last message of this request. - // - // That way, we either get the final block if the block is now in a final segment of the - // chain (or probabilisticly if not finality concept exists for the chain). Or we get the - // block that is in the longuest chain according to Firehose. - let response_stream = client - .blocks(firehose::Request { - start_block_num: number as i64, - stop_block_num: number as u64, - final_blocks_only: false, - ..Default::default() - }) - .await?; - - let mut block_stream = response_stream.into_inner(); - - debug!(logger, "Retrieving block(s) from firehose"; - "provider" => self.provider.as_str()); - - let mut latest_received_block: Option = None; - while let Some(message) = block_stream.next().await { - match message { - Ok(v) => { - let block = decode_firehose_block::(&v)?.ptr(); - - match latest_received_block { - None => { - latest_received_block = Some(block); - } - Some(ref actual_ptr) => { - // We want to receive all events related to a specific block number, - // however, in some circumstances, it seems Firehose would not stop sending - // blocks (`start_block_num: 0 and stop_block_num: 0` on NEAR seems to trigger - // this). - // - // To prevent looping infinitely, we stop as soon as a new received block's - // number is higher than the latest received block's number, in which case it - // means it's an event for a block we are not interested in. - if block.number > actual_ptr.number { - break; - } - - latest_received_block = Some(block); - } - } - } - Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), - }; - } - - match latest_received_block { - Some(block_ptr) => Ok(block_ptr), - None => Err(anyhow::format_err!( - "Firehose should have returned at least one block for request" - )), - } - } - - pub async fn stream_blocks( - self: Arc, - request: firehose::Request, - headers: &ConnectionHeaders, - ) -> Result, anyhow::Error> { - let mut client = self.new_stream_client(); - let request = headers.add_to_request(request); - let response_stream = client.blocks(request).await?; - let block_stream = response_stream.into_inner(); - - Ok(block_stream) - } - - pub async fn info( - self: Arc, - ) -> Result { - let endpoint = self.cheap_clone(); - - self.info_response - .get_or_try_init(move || async move { - let mut client = endpoint.new_firehose_info_client(); - - client.info().await - }) - .await - .map(ToOwned::to_owned) - } -} - -#[derive(Debug)] -pub struct FirehoseEndpoints(ChainName, ProviderManager>); - -impl FirehoseEndpoints { - pub fn for_testing(adapters: Vec>) -> Self { - let chain_name: ChainName = "testing".into(); - - Self( - chain_name.clone(), - ProviderManager::new( - crate::log::discard(), - [(chain_name, adapters)], - ProviderCheckStrategy::MarkAsValid, - ), - ) - } - - pub fn new( - chain_name: ChainName, - provider_manager: ProviderManager>, - ) -> Self { - Self(chain_name, provider_manager) - } - - pub fn len(&self) -> usize { - self.1.len(&self.0) - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// This function will attempt to grab an endpoint based on the Lowest error count - // with high capacity available. If an adapter cannot be found `endpoint` will - // return an error. - pub async fn endpoint(&self) -> anyhow::Result> { - let endpoint = self - .1 - .providers(&self.0) - .await? - .sorted_by_key(|x| x.current_error_count()) - .try_fold(None, |acc, adapter| { - match adapter.get_capacity() { - AvailableCapacity::Unavailable => ControlFlow::Continue(acc), - AvailableCapacity::Low => match acc { - Some(_) => ControlFlow::Continue(acc), - None => ControlFlow::Continue(Some(adapter)), - }, - // This means that if all adapters with low/no errors are low capacity - // we will retry the high capacity that has errors, at this point - // any other available with no errors are almost at their limit. - AvailableCapacity::High => ControlFlow::Break(Some(adapter)), - } - }); - - match endpoint { - ControlFlow::Continue(adapter) - | ControlFlow::Break(adapter) => - adapter.cloned().ok_or(anyhow!("unable to get a connection, increase the firehose conn_pool_size or limit for the node")) - } - } -} - -#[cfg(test)] -mod test { - use std::{mem, sync::Arc}; - - use slog::{o, Discard, Logger}; - - use super::*; - use crate::components::metrics::MetricsRegistry; - use crate::endpoint::EndpointMetrics; - use crate::firehose::SubgraphLimit; - - #[crate::test] - async fn firehose_endpoint_errors() { - let endpoint = vec![Arc::new(FirehoseEndpoint::new( - String::new(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - Arc::new(EndpointMetrics::mock()), - ))]; - - let endpoints = FirehoseEndpoints::for_testing(endpoint); - - let mut keep = vec![]; - for _i in 0..SUBGRAPHS_PER_CONN { - keep.push(endpoints.endpoint().await.unwrap()); - } - - let err = endpoints.endpoint().await.unwrap_err(); - assert!(err.to_string().contains("conn_pool_size")); - - mem::drop(keep); - endpoints.endpoint().await.unwrap(); - - let endpoints = FirehoseEndpoints::for_testing(vec![]); - - let err = endpoints.endpoint().await.unwrap_err(); - assert!(err.to_string().contains("unable to get a connection")); - } - - #[crate::test] - async fn firehose_endpoint_with_limit() { - let endpoint = vec![Arc::new(FirehoseEndpoint::new( - String::new(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Limit(2), - Arc::new(EndpointMetrics::mock()), - ))]; - - let endpoints = FirehoseEndpoints::for_testing(endpoint); - - let mut keep = vec![]; - for _ in 0..2 { - keep.push(endpoints.endpoint().await.unwrap()); - } - - let err = endpoints.endpoint().await.unwrap_err(); - assert!(err.to_string().contains("conn_pool_size")); - - mem::drop(keep); - endpoints.endpoint().await.unwrap(); - } - - #[crate::test] - async fn firehose_endpoint_no_traffic() { - let endpoint = vec![Arc::new(FirehoseEndpoint::new( - String::new(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Disabled, - Arc::new(EndpointMetrics::mock()), - ))]; - - let endpoints = FirehoseEndpoints::for_testing(endpoint); - - let err = endpoints.endpoint().await.unwrap_err(); - assert!(err.to_string().contains("conn_pool_size")); - } - - #[crate::test] - async fn firehose_endpoint_selection() { - let logger = Logger::root(Discard, o!()); - let endpoint_metrics = Arc::new(EndpointMetrics::new( - logger, - &["high_error", "low availability", "high availability"], - Arc::new(MetricsRegistry::mock()), - )); - - let high_error_adapter1 = Arc::new(FirehoseEndpoint::new( - "high_error".to_string(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - endpoint_metrics.clone(), - )); - let high_error_adapter2 = Arc::new(FirehoseEndpoint::new( - "high_error".to_string(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - endpoint_metrics.clone(), - )); - let low_availability = Arc::new(FirehoseEndpoint::new( - "low availability".to_string(), - "http://127.0.0.2".to_string(), - None, - None, - false, - false, - SubgraphLimit::Limit(2), - endpoint_metrics.clone(), - )); - let high_availability = Arc::new(FirehoseEndpoint::new( - "high availability".to_string(), - "http://127.0.0.3".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - endpoint_metrics.clone(), - )); - - endpoint_metrics.report_for_test(&high_error_adapter1.provider, false); - - let endpoints = FirehoseEndpoints::for_testing(vec![ - high_error_adapter1.clone(), - high_error_adapter2.clone(), - low_availability.clone(), - high_availability.clone(), - ]); - - let res = endpoints.endpoint().await.unwrap(); - assert_eq!(res.provider, high_availability.provider); - mem::drop(endpoints); - - // Removing high availability without errors should fallback to low availability - let endpoints = FirehoseEndpoints::for_testing( - vec![ - high_error_adapter1.clone(), - high_error_adapter2, - low_availability.clone(), - high_availability.clone(), - ] - .into_iter() - .filter(|a| a.provider_name() != high_availability.provider) - .collect(), - ); - - // Ensure we're in a low capacity situation - assert_eq!(low_availability.get_capacity(), AvailableCapacity::Low); - - // In the scenario where the only high level adapter has errors we keep trying that - // because the others will be low or unavailable - let res = endpoints.endpoint().await.unwrap(); - // This will match both high error adapters - assert_eq!(res.provider, high_error_adapter1.provider); - } - - #[test] - fn subgraph_limit_calculates_availability() { - #[derive(Debug)] - struct Case { - limit: SubgraphLimit, - current: usize, - capacity: AvailableCapacity, - } - - let cases = vec![ - Case { - limit: SubgraphLimit::Disabled, - current: 20, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(0), - current: 20, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(0), - current: 0, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 80, - capacity: AvailableCapacity::Low, - }, - Case { - limit: SubgraphLimit::Limit(2), - current: 1, - capacity: AvailableCapacity::Low, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 19, - capacity: AvailableCapacity::High, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 100, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 99, - capacity: AvailableCapacity::Low, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 101, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Unlimited, - current: 1000, - capacity: AvailableCapacity::High, - }, - Case { - limit: SubgraphLimit::Unlimited, - current: 0, - capacity: AvailableCapacity::High, - }, - ]; - - for c in cases { - let res = c.limit.get_capacity(c.current); - assert_eq!(res, c.capacity, "{:#?}", c); - } - } - - #[test] - fn available_capacity_ordering() { - assert!( - AvailableCapacity::Unavailable < AvailableCapacity::Low - ); - assert!( - AvailableCapacity::Unavailable < AvailableCapacity::High - ); - assert!(AvailableCapacity::Low < AvailableCapacity::High); - } -} +use crate::firehose::fetch_client::FetchClient; +use crate::firehose::interceptors::AuthInterceptor; +use crate::{ + blockchain::{ + block_stream::FirehoseCursor, Block as BlockchainBlock, BlockPtr, ChainIdentifier, + }, + cheap_clone::CheapClone, + components::store::BlockNumber, + endpoint::{ConnectionType, EndpointMetrics, RequestLabels}, + env::ENV_VARS, + firehose::decode_firehose_block, + prelude::{anyhow, debug, DeploymentHash}, +}; +use anyhow::Context; +use async_trait::async_trait; +use futures03::{StreamExt, TryStreamExt}; +use http::uri::{Scheme, Uri}; +use itertools::Itertools; +use slog::{error, info, trace, Logger}; +use std::{collections::HashMap, fmt::Display, ops::ControlFlow, sync::Arc, time::Duration}; +use tokio::sync::OnceCell; +use tonic::codegen::InterceptedService; +use tonic::{ + codegen::CompressionEncoding, + metadata::{Ascii, MetadataKey, MetadataValue}, + transport::{Channel, ClientTlsConfig}, + Request, +}; + +use super::{codec as firehose, interceptors::MetricsInterceptor, stream_client::StreamClient}; +use crate::components::network_provider::ChainName; +use crate::components::network_provider::NetworkDetails; +use crate::components::network_provider::ProviderCheckStrategy; +use crate::components::network_provider::ProviderManager; +use crate::components::network_provider::ProviderName; +use crate::prelude::retry; + +/// This is constant because we found this magic number of connections after +/// which the grpc connections start to hang. +/// For more details see: https://github.com/graphprotocol/graph-node/issues/3879 +pub const SUBGRAPHS_PER_CONN: usize = 100; + +const LOW_VALUE_THRESHOLD: usize = 10; +const LOW_VALUE_USED_PERCENTAGE: usize = 50; +const HIGH_VALUE_USED_PERCENTAGE: usize = 80; + +#[derive(Debug)] +pub struct FirehoseEndpoint { + pub provider: ProviderName, + pub auth: AuthInterceptor, + pub filters_enabled: bool, + pub compression_enabled: bool, + pub subgraph_limit: SubgraphLimit, + endpoint_metrics: Arc, + channel: Channel, + + /// The endpoint info is not intended to change very often, as it only contains the + /// endpoint's metadata, so caching it avoids sending unnecessary network requests. + info_response: OnceCell, +} + +#[derive(Debug)] +pub struct ConnectionHeaders(HashMap, MetadataValue>); + +#[async_trait] +impl NetworkDetails for Arc { + fn provider_name(&self) -> ProviderName { + self.provider.clone() + } + + async fn chain_identifier(&self) -> anyhow::Result { + let genesis_block_ptr = self.clone().info().await?.genesis_block_ptr()?; + + Ok(ChainIdentifier { + net_version: "0".to_string(), + genesis_block_hash: genesis_block_ptr.hash, + }) + } + + async fn provides_extended_blocks(&self) -> anyhow::Result { + let info = self.clone().info().await?; + let pred = if info.chain_name.contains("arbitrum-one") + || info.chain_name.contains("optimism-mainnet") + { + |x: &String| x.starts_with("extended") || x == "hybrid" + } else { + |x: &String| x == "extended" + }; + + Ok(info.block_features.iter().any(pred)) + } +} + +impl Default for ConnectionHeaders { + fn default() -> Self { + Self::new() + } +} + +impl ConnectionHeaders { + pub fn new() -> Self { + Self(HashMap::new()) + } + pub fn with_deployment(mut self, deployment: DeploymentHash) -> Self { + if let Ok(deployment) = deployment.parse() { + self.0 + .insert("x-deployment-id".parse().unwrap(), deployment); + } + self + } + pub fn add_to_request(&self, request: T) -> Request { + let mut request = Request::new(request); + self.0.iter().for_each(|(k, v)| { + request.metadata_mut().insert(k, v.clone()); + }); + request + } +} + +#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] +pub enum AvailableCapacity { + Unavailable, + Low, + High, +} + +// TODO: Find a new home for this type. +#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] +pub enum SubgraphLimit { + Disabled, + Limit(usize), + Unlimited, +} + +impl SubgraphLimit { + pub fn get_capacity(&self, current: usize) -> AvailableCapacity { + match self { + // Limit(0) should probably be Disabled but just in case + SubgraphLimit::Disabled | SubgraphLimit::Limit(0) => AvailableCapacity::Unavailable, + SubgraphLimit::Limit(total) => { + let total = *total; + if current >= total { + return AvailableCapacity::Unavailable; + } + + let used_percent = current * 100 / total; + + // If total is low it can vary very quickly so we can consider 50% as the low threshold + // to make selection more reliable + let threshold_percent = if total <= LOW_VALUE_THRESHOLD { + LOW_VALUE_USED_PERCENTAGE + } else { + HIGH_VALUE_USED_PERCENTAGE + }; + + if used_percent < threshold_percent { + return AvailableCapacity::High; + } + + AvailableCapacity::Low + } + _ => AvailableCapacity::High, + } + } + + pub fn has_capacity(&self, current: usize) -> bool { + match self { + SubgraphLimit::Unlimited => true, + SubgraphLimit::Limit(limit) => limit > ¤t, + SubgraphLimit::Disabled => false, + } + } +} + +impl Display for FirehoseEndpoint { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Display::fmt(self.provider.as_str(), f) + } +} + +impl FirehoseEndpoint { + pub fn new>( + provider: S, + url: S, + token: Option, + key: Option, + filters_enabled: bool, + compression_enabled: bool, + subgraph_limit: SubgraphLimit, + endpoint_metrics: Arc, + ) -> Self { + let uri = url + .as_ref() + .parse::() + .expect("the url should have been validated by now, so it is a valid Uri"); + + let endpoint_builder = match uri.scheme().unwrap_or(&Scheme::HTTP).as_str() { + "http" => Channel::builder(uri), + "https" => { + let mut tls = ClientTlsConfig::new(); + tls = tls.with_native_roots(); + + Channel::builder(uri) + .tls_config(tls) + .expect("TLS config on this host is invalid") + } + _ => panic!("invalid uri scheme for firehose endpoint"), + }; + + // These tokens come from the config so they have to be ascii. + let token: Option> = token + .map_or(Ok(None), |token| { + let bearer_token = format!("bearer {}", token); + bearer_token.parse::>().map(Some) + }) + .expect("Firehose token is invalid"); + + let key: Option> = key + .map_or(Ok(None), |key| { + key.parse::>().map(Some) + }) + .expect("Firehose key is invalid"); + + // Note on the connection window size: We run multiple block streams on a same connection, + // and a problematic subgraph with a stalled block stream might consume the entire window + // capacity for its http2 stream and never release it. If there are enough stalled block + // streams to consume all the capacity on the http2 connection, then _all_ subgraphs using + // this same http2 connection will stall. At a default stream window size of 2^16, setting + // the connection window size to the maximum of 2^31 allows for 2^15 streams without any + // contention, which is effectively unlimited for normal graph node operation. + // + // Note: Do not set `http2_keep_alive_interval` or `http2_adaptive_window`, as these will + // send ping frames, and many cloud load balancers will drop connections that frequently + // send pings. + let endpoint = endpoint_builder + .initial_connection_window_size(Some((1 << 31) - 1)) + .connect_timeout(Duration::from_secs(10)) + .tcp_keepalive(Some(Duration::from_secs(15))) + // Timeout on each request, so the timeout to estabilish each 'Blocks' stream. + .timeout(Duration::from_secs(120)); + + let subgraph_limit = match subgraph_limit { + // See the comment on the constant + SubgraphLimit::Unlimited => SubgraphLimit::Limit(SUBGRAPHS_PER_CONN), + // This is checked when parsing from config but doesn't hurt to be defensive. + SubgraphLimit::Limit(limit) => SubgraphLimit::Limit(limit.min(SUBGRAPHS_PER_CONN)), + l => l, + }; + + FirehoseEndpoint { + provider: provider.as_ref().into(), + channel: endpoint.connect_lazy(), + auth: AuthInterceptor { token, key }, + filters_enabled, + compression_enabled, + subgraph_limit, + endpoint_metrics, + info_response: OnceCell::new(), + } + } + + pub fn current_error_count(&self) -> u64 { + self.endpoint_metrics.get_count(&self.provider) + } + + // we need to -1 because there will always be a reference + // inside FirehoseEndpoints that is not used (is always cloned). + pub fn get_capacity(self: &Arc) -> AvailableCapacity { + self.subgraph_limit + .get_capacity(Arc::strong_count(self).saturating_sub(1)) + } + + fn metrics_interceptor(&self) -> MetricsInterceptor { + MetricsInterceptor { + metrics: self.endpoint_metrics.cheap_clone(), + service: self.channel.cheap_clone(), + labels: RequestLabels { + provider: self.provider.clone(), + req_type: "unknown".into(), + conn_type: ConnectionType::Firehose, + }, + } + } + + fn max_message_size(&self) -> usize { + 1024 * 1024 * ENV_VARS.firehose_grpc_max_decode_size_mb + } + + fn new_fetch_client( + &self, + ) -> FetchClient< + InterceptedService, impl tonic::service::Interceptor>, + > { + let metrics = self.metrics_interceptor(); + + let mut client = FetchClient::with_interceptor(metrics, self.auth.clone()) + .accept_compressed(CompressionEncoding::Gzip); + + if self.compression_enabled { + client = client.send_compressed(CompressionEncoding::Gzip); + } + + client = client.max_decoding_message_size(self.max_message_size()); + + client + } + + fn new_stream_client( + &self, + ) -> StreamClient< + InterceptedService, impl tonic::service::Interceptor>, + > { + let metrics = self.metrics_interceptor(); + + let mut client = StreamClient::with_interceptor(metrics, self.auth.clone()) + .accept_compressed(CompressionEncoding::Gzip); + + if self.compression_enabled { + client = client.send_compressed(CompressionEncoding::Gzip); + } + + client = client.max_decoding_message_size(self.max_message_size()); + + client + } + + fn new_firehose_info_client(&self) -> crate::firehose::endpoint_info::Client { + let metrics = self.metrics_interceptor(); + let auth = self.auth.clone(); + + let mut client = crate::firehose::endpoint_info::Client::new(metrics, auth); + + if self.compression_enabled { + client = client.with_compression(); + } + + client = client.with_max_message_size(self.max_message_size()); + client + } + + pub async fn get_block( + &self, + cursor: FirehoseCursor, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + debug!( + logger, + "Connecting to firehose to retrieve block for cursor {}", cursor; + "provider" => self.provider.as_str(), + ); + + let req = firehose::SingleBlockRequest { + transforms: [].to_vec(), + reference: Some(firehose::single_block_request::Reference::Cursor( + firehose::single_block_request::Cursor { + cursor: cursor.to_string(), + }, + )), + }; + + let mut client = self.new_fetch_client(); + match client.block(req).await { + Ok(v) => Ok(M::decode( + v.get_ref().block.as_ref().unwrap().value.as_ref(), + )?), + Err(e) => Err(anyhow::format_err!("firehose error {}", e)), + } + } + + pub async fn get_block_by_ptr( + &self, + ptr: &BlockPtr, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + debug!( + logger, + "Connecting to firehose to retrieve block for ptr {}", ptr; + "provider" => self.provider.as_str(), + ); + + let req = firehose::SingleBlockRequest { + transforms: [].to_vec(), + reference: Some( + firehose::single_block_request::Reference::BlockHashAndNumber( + firehose::single_block_request::BlockHashAndNumber { + hash: ptr.hash.to_string(), + num: ptr.number as u64, + }, + ), + ), + }; + + let mut client = self.new_fetch_client(); + match client.block(req).await { + Ok(v) => Ok(M::decode( + v.get_ref().block.as_ref().unwrap().value.as_ref(), + )?), + Err(e) => Err(anyhow::format_err!("firehose error {}", e)), + } + } + + pub async fn get_block_by_ptr_with_retry( + self: Arc, + ptr: &BlockPtr, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + let retry_log_message = format!("get_block_by_ptr for block {}", ptr); + let endpoint = self.cheap_clone(); + let logger = logger.cheap_clone(); + let ptr_for_retry = ptr.clone(); + + retry(retry_log_message, &logger) + .limit(ENV_VARS.firehose_block_fetch_retry_limit) + .timeout_secs(ENV_VARS.firehose_block_fetch_timeout) + .run(move || { + let endpoint = endpoint.cheap_clone(); + let logger = logger.cheap_clone(); + let ptr = ptr_for_retry.clone(); + async move { + endpoint + .get_block_by_ptr::(&ptr, &logger) + .await + .context(format!( + "Failed to fetch block by ptr {} from firehose", + ptr + )) + } + }) + .await + .map_err(move |e| { + anyhow::anyhow!("Failed to fetch block by ptr {} from firehose: {}", ptr, e) + }) + } + + async fn get_block_by_number(&self, number: u64, logger: &Logger) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + trace!( + logger, + "Connecting to firehose to retrieve block for number {}", number; + "provider" => self.provider.as_str(), + ); + + let req = firehose::SingleBlockRequest { + transforms: [].to_vec(), + reference: Some(firehose::single_block_request::Reference::BlockNumber( + firehose::single_block_request::BlockNumber { num: number }, + )), + }; + + let mut client = self.new_fetch_client(); + match client.block(req).await { + Ok(v) => Ok(M::decode( + v.get_ref().block.as_ref().unwrap().value.as_ref(), + )?), + Err(e) => Err(anyhow::format_err!("firehose error {}", e)), + } + } + + pub async fn get_block_by_number_with_retry( + self: Arc, + number: u64, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + let retry_log_message = format!("get_block_by_number for block {}", number); + let endpoint = self.cheap_clone(); + let logger = logger.cheap_clone(); + + retry(retry_log_message, &logger) + .limit(ENV_VARS.firehose_block_fetch_retry_limit) + .timeout_secs(ENV_VARS.firehose_block_fetch_timeout) + .run(move || { + let endpoint = endpoint.cheap_clone(); + let logger = logger.cheap_clone(); + async move { + endpoint + .get_block_by_number::(number, &logger) + .await + .context(format!( + "Failed to fetch block by number {} from firehose", + number + )) + } + }) + .await + .map_err(|e| { + anyhow::anyhow!( + "Failed to fetch block by number {} from firehose: {}", + number, + e + ) + }) + } + + pub async fn load_blocks_by_numbers( + self: Arc, + numbers: Vec, + logger: &Logger, + ) -> Result, anyhow::Error> + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + let logger = logger.clone(); + let logger_for_error = logger.clone(); + + let blocks_stream = futures03::stream::iter(numbers) + .map(move |number| { + let e = self.cheap_clone(); + let l = logger.clone(); + async move { e.get_block_by_number_with_retry::(number, &l).await } + }) + .buffered(ENV_VARS.firehose_block_batch_size); + + let blocks = blocks_stream.try_collect::>().await.map_err(|e| { + error!( + logger_for_error, + "Failed to load blocks from firehose: {}", e; + ); + anyhow::format_err!("failed to load blocks from firehose: {}", e) + })?; + + Ok(blocks) + } + + pub async fn genesis_block_ptr(&self, logger: &Logger) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + info!(logger, "Requesting genesis block from firehose"; + "provider" => self.provider.as_str()); + + // We use 0 here to mean the genesis block of the chain. Firehose + // when seeing start block number 0 will always return the genesis + // block of the chain, even if the chain's start block number is + // not starting at block #0. + self.block_ptr_for_number::(logger, 0).await + } + + pub async fn block_ptr_for_number( + &self, + logger: &Logger, + number: BlockNumber, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + debug!( + logger, + "Connecting to firehose to retrieve block for number {}", number; + "provider" => self.provider.as_str(), + ); + + let mut client = self.new_stream_client(); + + // The trick is the following. + // + // Firehose `start_block_num` and `stop_block_num` are both inclusive, so we specify + // the block we are looking for in both. + // + // Now, the remaining question is how the block from the canonical chain is picked. We + // leverage the fact that Firehose will always send the block in the longuest chain as the + // last message of this request. + // + // That way, we either get the final block if the block is now in a final segment of the + // chain (or probabilisticly if not finality concept exists for the chain). Or we get the + // block that is in the longuest chain according to Firehose. + let response_stream = client + .blocks(firehose::Request { + start_block_num: number as i64, + stop_block_num: number as u64, + final_blocks_only: false, + ..Default::default() + }) + .await?; + + let mut block_stream = response_stream.into_inner(); + + debug!(logger, "Retrieving block(s) from firehose"; + "provider" => self.provider.as_str()); + + let mut latest_received_block: Option = None; + while let Some(message) = block_stream.next().await { + match message { + Ok(v) => { + let block = decode_firehose_block::(&v)?.ptr(); + + match latest_received_block { + None => { + latest_received_block = Some(block); + } + Some(ref actual_ptr) => { + // We want to receive all events related to a specific block number, + // however, in some circumstances, it seems Firehose would not stop sending + // blocks (`start_block_num: 0 and stop_block_num: 0` on NEAR seems to trigger + // this). + // + // To prevent looping infinitely, we stop as soon as a new received block's + // number is higher than the latest received block's number, in which case it + // means it's an event for a block we are not interested in. + if block.number > actual_ptr.number { + break; + } + + latest_received_block = Some(block); + } + } + } + Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), + }; + } + + match latest_received_block { + Some(block_ptr) => Ok(block_ptr), + None => Err(anyhow::format_err!( + "Firehose should have returned at least one block for request" + )), + } + } + + pub async fn stream_blocks( + self: Arc, + request: firehose::Request, + headers: &ConnectionHeaders, + ) -> Result, anyhow::Error> { + let mut client = self.new_stream_client(); + let request = headers.add_to_request(request); + let response_stream = client.blocks(request).await?; + let block_stream = response_stream.into_inner(); + + Ok(block_stream) + } + + pub async fn info( + self: Arc, + ) -> Result { + let endpoint = self.cheap_clone(); + + self.info_response + .get_or_try_init(move || async move { + let mut client = endpoint.new_firehose_info_client(); + + client.info().await + }) + .await + .map(ToOwned::to_owned) + } +} + +#[derive(Debug)] +pub struct FirehoseEndpoints(ChainName, ProviderManager>); + +impl FirehoseEndpoints { + pub fn for_testing(adapters: Vec>) -> Self { + let chain_name: ChainName = "testing".into(); + + Self( + chain_name.clone(), + ProviderManager::new( + crate::log::discard(), + [(chain_name, adapters)], + ProviderCheckStrategy::MarkAsValid, + ), + ) + } + + pub fn new( + chain_name: ChainName, + provider_manager: ProviderManager>, + ) -> Self { + Self(chain_name, provider_manager) + } + + pub fn len(&self) -> usize { + self.1.len(&self.0) + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// This function will attempt to grab an endpoint based on the Lowest error count + // with high capacity available. If an adapter cannot be found `endpoint` will + // return an error. + pub async fn endpoint(&self) -> anyhow::Result> { + let endpoint = self + .1 + .providers(&self.0) + .await? + .sorted_by_key(|x| x.current_error_count()) + .try_fold(None, |acc, adapter| { + match adapter.get_capacity() { + AvailableCapacity::Unavailable => ControlFlow::Continue(acc), + AvailableCapacity::Low => match acc { + Some(_) => ControlFlow::Continue(acc), + None => ControlFlow::Continue(Some(adapter)), + }, + // This means that if all adapters with low/no errors are low capacity + // we will retry the high capacity that has errors, at this point + // any other available with no errors are almost at their limit. + AvailableCapacity::High => ControlFlow::Break(Some(adapter)), + } + }); + + match endpoint { + ControlFlow::Continue(adapter) + | ControlFlow::Break(adapter) => + adapter.cloned().ok_or(anyhow!("unable to get a connection, increase the firehose conn_pool_size or limit for the node")) + } + } +} + +#[cfg(test)] +mod test { + use std::{mem, sync::Arc}; + + use slog::{o, Discard, Logger}; + + use super::*; + use crate::components::metrics::MetricsRegistry; + use crate::endpoint::EndpointMetrics; + use crate::firehose::SubgraphLimit; + + #[crate::test] + async fn firehose_endpoint_errors() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + Arc::new(EndpointMetrics::mock()), + ))]; + + let endpoints = FirehoseEndpoints::for_testing(endpoint); + + let mut keep = vec![]; + for _i in 0..SUBGRAPHS_PER_CONN { + keep.push(endpoints.endpoint().await.unwrap()); + } + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + + mem::drop(keep); + endpoints.endpoint().await.unwrap(); + + let endpoints = FirehoseEndpoints::for_testing(vec![]); + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("unable to get a connection")); + } + + #[crate::test] + async fn firehose_endpoint_with_limit() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Limit(2), + Arc::new(EndpointMetrics::mock()), + ))]; + + let endpoints = FirehoseEndpoints::for_testing(endpoint); + + let mut keep = vec![]; + for _ in 0..2 { + keep.push(endpoints.endpoint().await.unwrap()); + } + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + + mem::drop(keep); + endpoints.endpoint().await.unwrap(); + } + + #[crate::test] + async fn firehose_endpoint_no_traffic() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Disabled, + Arc::new(EndpointMetrics::mock()), + ))]; + + let endpoints = FirehoseEndpoints::for_testing(endpoint); + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + } + + #[crate::test] + async fn firehose_endpoint_selection() { + let logger = Logger::root(Discard, o!()); + let endpoint_metrics = Arc::new(EndpointMetrics::new( + logger, + &["high_error", "low availability", "high availability"], + Arc::new(MetricsRegistry::mock()), + )); + + let high_error_adapter1 = Arc::new(FirehoseEndpoint::new( + "high_error".to_string(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + endpoint_metrics.clone(), + )); + let high_error_adapter2 = Arc::new(FirehoseEndpoint::new( + "high_error".to_string(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + endpoint_metrics.clone(), + )); + let low_availability = Arc::new(FirehoseEndpoint::new( + "low availability".to_string(), + "http://127.0.0.2".to_string(), + None, + None, + false, + false, + SubgraphLimit::Limit(2), + endpoint_metrics.clone(), + )); + let high_availability = Arc::new(FirehoseEndpoint::new( + "high availability".to_string(), + "http://127.0.0.3".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + endpoint_metrics.clone(), + )); + + endpoint_metrics.report_for_test(&high_error_adapter1.provider, false); + + let endpoints = FirehoseEndpoints::for_testing(vec![ + high_error_adapter1.clone(), + high_error_adapter2.clone(), + low_availability.clone(), + high_availability.clone(), + ]); + + let res = endpoints.endpoint().await.unwrap(); + assert_eq!(res.provider, high_availability.provider); + mem::drop(endpoints); + + // Removing high availability without errors should fallback to low availability + let endpoints = FirehoseEndpoints::for_testing( + vec![ + high_error_adapter1.clone(), + high_error_adapter2, + low_availability.clone(), + high_availability.clone(), + ] + .into_iter() + .filter(|a| a.provider_name() != high_availability.provider) + .collect(), + ); + + // Ensure we're in a low capacity situation + assert_eq!(low_availability.get_capacity(), AvailableCapacity::Low); + + // In the scenario where the only high level adapter has errors we keep trying that + // because the others will be low or unavailable + let res = endpoints.endpoint().await.unwrap(); + // This will match both high error adapters + assert_eq!(res.provider, high_error_adapter1.provider); + } + + #[test] + fn subgraph_limit_calculates_availability() { + #[derive(Debug)] + struct Case { + limit: SubgraphLimit, + current: usize, + capacity: AvailableCapacity, + } + + let cases = vec![ + Case { + limit: SubgraphLimit::Disabled, + current: 20, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(0), + current: 20, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(0), + current: 0, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 80, + capacity: AvailableCapacity::Low, + }, + Case { + limit: SubgraphLimit::Limit(2), + current: 1, + capacity: AvailableCapacity::Low, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 19, + capacity: AvailableCapacity::High, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 100, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 99, + capacity: AvailableCapacity::Low, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 101, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Unlimited, + current: 1000, + capacity: AvailableCapacity::High, + }, + Case { + limit: SubgraphLimit::Unlimited, + current: 0, + capacity: AvailableCapacity::High, + }, + ]; + + for c in cases { + let res = c.limit.get_capacity(c.current); + assert_eq!(res, c.capacity, "{:#?}", c); + } + } + + #[test] + fn available_capacity_ordering() { + assert!(AvailableCapacity::Unavailable < AvailableCapacity::Low); + assert!(AvailableCapacity::Unavailable < AvailableCapacity::High); + assert!(AvailableCapacity::Low < AvailableCapacity::High); + } +} diff --git a/node/src/config.rs b/node/src/config.rs index 7da9bfd545a..b118f34da57 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -1344,11 +1344,7 @@ mod tests { assert!(actual.is_err()); let err_str = actual.unwrap_err().to_string(); - assert!( - err_str.contains("missing field `features`"), - "{}", - err_str - ); + assert!(err_str.contains("missing field `features`"), "{}", err_str); } #[test] @@ -1538,11 +1534,9 @@ mod tests { assert!(actual.is_err(), "{:?}", actual); if let Err(error) = actual { - assert!( - error - .to_string() - .starts_with("supported firehose endpoint filters are:") - ) + assert!(error + .to_string() + .starts_with("supported firehose endpoint filters are:")) } } @@ -1643,11 +1637,7 @@ mod tests { let err = actual.validate(); assert!(err.is_err()); let err = err.unwrap_err(); - assert!( - err.to_string().contains("unique"), - "result: {:?}", - err - ); + assert!(err.to_string().contains("unique"), "result: {:?}", err); } #[test] diff --git a/server/http/src/request.rs b/server/http/src/request.rs index dcb837a70d0..4b7fdacdf59 100644 --- a/server/http/src/request.rs +++ b/server/http/src/request.rs @@ -154,18 +154,16 @@ mod tests { let query = request.expect("Should accept valid queries"); let expected_query = q::parse_query("{ user { name } }").unwrap().into_static(); - let expected_variables = QueryVariables::new(HashMap::from_iter( - vec![ - (String::from("string"), r::Value::String(String::from("s"))), - ( - String::from("map"), - r::Value::Object(Object::from_iter( - vec![(Word::from("k"), r::Value::String(String::from("v")))].into_iter(), - )), - ), - (String::from("int"), r::Value::Int(5)), - ], - )); + let expected_variables = QueryVariables::new(HashMap::from_iter(vec![ + (String::from("string"), r::Value::String(String::from("s"))), + ( + String::from("map"), + r::Value::Object(Object::from_iter( + vec![(Word::from("k"), r::Value::String(String::from("v")))].into_iter(), + )), + ), + (String::from("int"), r::Value::Int(5)), + ])); assert_eq!(query.document, expected_query); assert_eq!(query.variables, Some(expected_variables)); diff --git a/server/index-node/src/service.rs b/server/index-node/src/service.rs index 451a70f7b04..09ddfd29038 100644 --- a/server/index-node/src/service.rs +++ b/server/index-node/src/service.rs @@ -432,18 +432,16 @@ mod tests { let query = request.expect("Should accept valid queries"); let expected_query = q::parse_query("{ user { name } }").unwrap().into_static(); - let expected_variables = QueryVariables::new(HashMap::from_iter( - vec![ - (String::from("string"), r::Value::String(String::from("s"))), - ( - String::from("map"), - r::Value::Object(Object::from_iter( - vec![(Word::from("k"), r::Value::String(String::from("v")))].into_iter(), - )), - ), - (String::from("int"), r::Value::Int(5)), - ], - )); + let expected_variables = QueryVariables::new(HashMap::from_iter(vec![ + (String::from("string"), r::Value::String(String::from("s"))), + ( + String::from("map"), + r::Value::Object(Object::from_iter( + vec![(Word::from("k"), r::Value::String(String::from("v")))].into_iter(), + )), + ), + (String::from("int"), r::Value::Int(5)), + ])); assert_eq!(query.document, expected_query); assert_eq!(query.variables, Some(expected_variables)); diff --git a/store/postgres/src/relational/ddl_tests.rs b/store/postgres/src/relational/ddl_tests.rs index e7d30caeca4..901b4daa1e5 100644 --- a/store/postgres/src/relational/ddl_tests.rs +++ b/store/postgres/src/relational/ddl_tests.rs @@ -415,14 +415,7 @@ fn postponed_indexes_with_block_column() { let dst_nsp = Namespace::new("sgd2".to_string()).unwrap(); let arr = index_list() - .indexes_for_table( - &dst_nsp, - &table.name.to_string(), - table, - true, - false, - false, - ) + .indexes_for_table(&dst_nsp, &table.name.to_string(), table, true, false, false) .unwrap(); assert_eq!(1, arr.len()); assert!(!arr[0].1.contains(BLOCK_IDX)); diff --git a/store/postgres/src/sql/mod.rs b/store/postgres/src/sql/mod.rs index 0beb6cf894e..f08f89ae711 100644 --- a/store/postgres/src/sql/mod.rs +++ b/store/postgres/src/sql/mod.rs @@ -22,7 +22,7 @@ mod test { let namespace = Namespace::new("sgd0815".to_string()).unwrap(); let site = Arc::new(make_dummy_site(subgraph, namespace, "anet".to_string())); let catalog = Catalog::for_tests(site.clone(), BTreeSet::new()).unwrap(); - + Layout::new(site, &schema, catalog).unwrap() } } diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 8edeac388fd..dc7c759ef64 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -80,14 +80,16 @@ lazy_static! { static ref TEST_SUBGRAPH_SCHEMA: InputSchema = InputSchema::parse_latest(USER_GQL, TEST_SUBGRAPH_ID.clone()) .expect("Failed to parse user schema"); - static ref BLOCKS: Vec = ["bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f", + static ref BLOCKS: Vec = [ + "bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f", "8511fa04b64657581e3f00e14543c1d522d5d7e771b54aa3060b662ade47da13", "b98fb783b49de5652097a989414c767824dff7e7fd765a63b493772511db81c1", "7347afe69254df06729e123610b00b8b11f15cfae3241f9366fb113aec07489c", "f8ccbd3877eb98c958614f395dd351211afb9abba187bfc1fb4ac414b099c4a6", "7b0ea919e258eb2b119eb32de56b85d12d50ac6a9f7c5909f843d6172c8ba196", "6b834521bb753c132fdcf0e1034803ed9068e324112f8750ba93580b393a986b", - "7cce080f5a49c2997a6cc65fc1cee9910fd8fc3721b7010c0b5d0873e2ac785e"] + "7cce080f5a49c2997a6cc65fc1cee9910fd8fc3721b7010c0b5d0873e2ac785e" + ] .iter() .enumerate() .map(|(idx, hash)| BlockPtr::try_from((*hash, idx as i64)).unwrap()) From 7b02b3237b40b5488e3a4a79062e90b44ca2494c Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 9 Jan 2026 14:00:59 -0800 Subject: [PATCH 92/92] justfile: Make 'just lint' run clippy against all targets Note that that also means that we run 'cargo clippy' in CI because the CI workflow runs 'just lint' --- justfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/justfile b/justfile index 128be063fb3..de74a789498 100644 --- a/justfile +++ b/justfile @@ -8,7 +8,7 @@ format *EXTRA_FLAGS: # Run Clippy linting (cargo clippy) lint *EXTRA_FLAGS: - cargo clippy {{EXTRA_FLAGS}} + cargo clippy --all-targets {{EXTRA_FLAGS}} # Check Rust code (cargo check) check *EXTRA_FLAGS: