From 8fd4dc9375f3473accf50f1b80d24320336b5533 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 31 Mar 2026 15:23:19 +0200 Subject: [PATCH 01/12] chore: First round of pedantic clippy lints --- crates/stackable-operator/CHANGELOG.md | 4 + .../stackable-operator/src/builder/event.rs | 5 +- crates/stackable-operator/src/builder/meta.rs | 8 +- crates/stackable-operator/src/builder/pdb.rs | 6 +- .../src/builder/pod/container.rs | 10 +-- .../src/builder/pod/probe.rs | 10 ++- .../src/builder/pod/resources.rs | 35 ++++---- .../src/builder/pod/volume.rs | 38 ++++----- crates/stackable-operator/src/cli/mod.rs | 2 +- .../src/cli/product_config.rs | 6 +- crates/stackable-operator/src/client.rs | 15 ++-- .../src/cluster_resources.rs | 10 +-- .../src/commons/affinity.rs | 10 +-- .../stackable-operator/src/commons/cache.rs | 2 +- .../src/commons/networking.rs | 2 +- crates/stackable-operator/src/commons/opa.rs | 29 +++---- .../src/commons/product_image_selection.rs | 78 +++++++++--------- .../src/commons/resources.rs | 64 +++++++-------- .../src/commons/tls_verification.rs | 3 +- crates/stackable-operator/src/config/merge.rs | 4 +- crates/stackable-operator/src/cpu.rs | 9 ++- .../crd/authentication/ldap/v1alpha1_impl.rs | 4 +- .../crd/authentication/oidc/v1alpha1_impl.rs | 4 +- .../src/crd/git_sync/v1alpha1_impl.rs | 24 +++--- .../src/crd/git_sync/v1alpha2_impl.rs | 28 ++++--- .../crd/listener/listeners/v1alpha1_impl.rs | 1 + crates/stackable-operator/src/crd/mod.rs | 2 +- .../src/crd/s3/connection/mod.rs | 23 +++--- .../src/crd/s3/connection/v1alpha1_impl.rs | 2 +- crates/stackable-operator/src/eos/mod.rs | 16 ++-- crates/stackable-operator/src/iter.rs | 6 +- crates/stackable-operator/src/kvp/key.rs | 2 +- .../stackable-operator/src/kvp/label/mod.rs | 31 ++++--- .../src/kvp/label/selector.rs | 12 +-- crates/stackable-operator/src/kvp/mod.rs | 8 +- crates/stackable-operator/src/lib.rs | 15 ++++ .../src/logging/controller.rs | 18 ++--- .../src/logging/k8s_events.rs | 2 +- crates/stackable-operator/src/memory.rs | 49 ++++++------ .../src/product_config_utils.rs | 80 ++++++++++--------- .../src/product_logging/framework.rs | 19 +++-- .../src/product_logging/spec.rs | 19 ++--- .../src/status/condition/daemonset.rs | 2 +- .../src/status/condition/deployment.rs | 2 +- .../src/status/condition/mod.rs | 11 +-- .../src/status/condition/operations.rs | 2 +- .../src/status/condition/statefulset.rs | 2 +- .../stackable-operator/src/status/rollout.rs | 4 +- crates/stackable-operator/src/utils/crds.rs | 8 +- .../stackable-operator/src/utils/kubelet.rs | 2 +- .../stackable-operator/src/utils/logging.rs | 4 +- crates/stackable-operator/src/utils/url.rs | 2 +- crates/stackable-operator/src/validation.rs | 2 +- 53 files changed, 382 insertions(+), 374 deletions(-) diff --git a/crates/stackable-operator/CHANGELOG.md b/crates/stackable-operator/CHANGELOG.md index e7fa5cc1f..0ff145a27 100644 --- a/crates/stackable-operator/CHANGELOG.md +++ b/crates/stackable-operator/CHANGELOG.md @@ -10,6 +10,10 @@ All notable changes to this project will be documented in this file. - Add support for specifying a `clientAuthenticationMethod` for OIDC ([#1178]). This was originally done in [#1158] and had been reverted in [#1170]. +### Changed + +- BREAKING: `OpaConfig::full_document_url` now takes `&OpaApiVersion` instead of `OpaApiVersion` ([#XXXX]). + ### Removed - BREAKING: Remove unused `add_prefix`, `try_add_prefix`, `set_name`, and `try_set_name` associated diff --git a/crates/stackable-operator/src/builder/event.rs b/crates/stackable-operator/src/builder/event.rs index f3d07d3c2..162cc7641 100644 --- a/crates/stackable-operator/src/builder/event.rs +++ b/crates/stackable-operator/src/builder/event.rs @@ -116,10 +116,7 @@ impl EventBuilder { reporting_instance: self.reporting_instance.clone(), series: None, source, - type_: self - .event_type - .as_ref() - .map(|event_type| event_type.to_string()), + type_: self.event_type.as_ref().map(ToString::to_string), } } } diff --git a/crates/stackable-operator/src/builder/meta.rs b/crates/stackable-operator/src/builder/meta.rs index df48fa815..7f5acdce2 100644 --- a/crates/stackable-operator/src/builder/meta.rs +++ b/crates/stackable-operator/src/builder/meta.rs @@ -152,7 +152,7 @@ impl ObjectMetaBuilder { /// for more flexibility if needed. pub fn with_recommended_labels( &mut self, - object_labels: ObjectLabels, + object_labels: &ObjectLabels, ) -> Result<&mut Self> { let recommended_labels = Labels::recommended(object_labels).context(RecommendedLabelsSnafu)?; @@ -206,8 +206,8 @@ impl ObjectMetaBuilder { .ownerreference .as_ref() .map(|ownerreference| vec![ownerreference.clone()]), - labels: self.labels.clone().map(|l| l.into()), - annotations: self.annotations.clone().map(|a| a.into()), + labels: self.labels.clone().map(Into::into), + annotations: self.annotations.clone().map(Into::into), finalizers: self.finalizers.clone(), ..ObjectMeta::default() } @@ -350,7 +350,7 @@ mod tests { .namespace("bar") .ownerreference_from_resource(&pod, Some(true), Some(false)) .unwrap() - .with_recommended_labels(ObjectLabels { + .with_recommended_labels(&ObjectLabels { owner: &pod, app_name: "test_app", app_version: "1.0", diff --git a/crates/stackable-operator/src/builder/pdb.rs b/crates/stackable-operator/src/builder/pdb.rs index e262863d3..02be4f323 100644 --- a/crates/stackable-operator/src/builder/pdb.rs +++ b/crates/stackable-operator/src/builder/pdb.rs @@ -190,7 +190,7 @@ impl PodDisruptionBudgetBuilder &mut Self { @@ -118,7 +118,7 @@ impl ContainerBuilder { pub fn add_env_var_from_field_path( &mut self, name: impl Into, - field_path: FieldPathEnvVar, + field_path: &FieldPathEnvVar, ) -> &mut Self { self.add_env_var_from_source( name, @@ -587,12 +587,12 @@ mod tests { assert_eq!( source.to_string(), "input is 64 bytes long but must be no more than 63" - ) + ); } // One characters shorter name is valid let max_len_container_name: String = long_container_name.chars().skip(1).collect(); assert_eq!(max_len_container_name.len(), 63); - assert!(ContainerBuilder::new(&max_len_container_name).is_ok()) + assert!(ContainerBuilder::new(&max_len_container_name).is_ok()); } #[test] @@ -644,7 +644,7 @@ mod tests { .resources(resources.clone()) .build(); - assert_eq!(container.resources, Some(resources)) + assert_eq!(container.resources, Some(resources)); } /// Panics if given container builder constructor result is not [Err] with error message diff --git a/crates/stackable-operator/src/builder/pod/probe.rs b/crates/stackable-operator/src/builder/pod/probe.rs index 2915bf1eb..bf87f54da 100644 --- a/crates/stackable-operator/src/builder/pod/probe.rs +++ b/crates/stackable-operator/src/builder/pod/probe.rs @@ -194,7 +194,10 @@ impl ProbeBuilder { // SAFETY: Period is checked above to be non-zero let success_threshold = success_threshold_duration.div_duration_f32(*self.period); - Ok(self.with_success_threshold(success_threshold.ceil() as i32)) + // Note: We calculate an f32, which can overflow an i32, so we clamp it to bo in range + #[expect(clippy::cast_possible_truncation)] + #[expect(clippy::cast_precision_loss)] + Ok(self.with_success_threshold(success_threshold.ceil().clamp(0.0, i32::MAX as f32) as i32)) } /// After a probe fails `failureThreshold` times in a row, Kubernetes considers that the @@ -257,7 +260,10 @@ impl ProbeBuilder { // SAFETY: Period is checked above to be non-zero let failure_threshold = failure_threshold_duration.div_duration_f32(*self.period); - Ok(self.with_failure_threshold(failure_threshold.ceil() as i32)) + // Note: We calculate an f32, which can overflow an i32, so we clamp it to bo in range + #[expect(clippy::cast_possible_truncation)] + #[expect(clippy::cast_precision_loss)] + Ok(self.with_failure_threshold(failure_threshold.ceil().clamp(0.0, i32::MAX as f32) as i32)) } /// Build the [`Probe`] using the specified contents. diff --git a/crates/stackable-operator/src/builder/pod/resources.rs b/crates/stackable-operator/src/builder/pod/resources.rs index 040231457..64967e0f6 100644 --- a/crates/stackable-operator/src/builder/pod/resources.rs +++ b/crates/stackable-operator/src/builder/pod/resources.rs @@ -98,11 +98,11 @@ impl ResourceRequirementsBuilder { } } -impl ResourceRequirementsBuilder { +impl ResourceRequirementsBuilder { pub fn with_memory_request( self, request: impl Into, - ) -> ResourceRequirementsBuilder { + ) -> ResourceRequirementsBuilder { let Self { cpu_request, cpu_limit, @@ -124,7 +124,7 @@ impl ResourceRequirementsBuilder { self, request: impl Into, factor: f32, - ) -> memory::Result> { + ) -> memory::Result> { let request = MemoryQuantity::from_str(&request.into())?; let limit = request * factor; @@ -168,7 +168,7 @@ impl ResourceRequirementsBuilder { } } -impl ResourceRequirementsBuilder { +impl ResourceRequirementsBuilder { pub fn with_resource( mut self, rr_type: ResourceRequirementsType, @@ -185,21 +185,18 @@ impl ResourceRequirementsBuilder { let resource = resource.to_string(); - match self.other.get_mut(&resource) { - Some(types) => { - if types.contains_key(&rr_type) { - warn!( - "resource {} for '{}' already set, not overwriting", - rr_type, resource - ); - } - - types.insert(rr_type, Quantity(quantity.into())); - } - None => { - let types = BTreeMap::from([(rr_type, Quantity(quantity.into()))]); - self.other.insert(resource, types); + if let Some(types) = self.other.get_mut(&resource) { + if types.contains_key(&rr_type) { + warn!( + "resource {} for '{}' already set, not overwriting", + rr_type, resource + ); } + + types.insert(rr_type, Quantity(quantity.into())); + } else { + let types = BTreeMap::from([(rr_type, Quantity(quantity.into()))]); + self.other.insert(resource, types); } self @@ -286,6 +283,6 @@ mod tests { .with_resource(ResourceRequirementsType::Requests, "nvidia.com/gpu", "1") .build(); - assert_eq!(rr, resources) + assert_eq!(rr, resources); } } diff --git a/crates/stackable-operator/src/builder/pod/volume.rs b/crates/stackable-operator/src/builder/pod/volume.rs index 9b7a1bd98..23bfcdfe3 100644 --- a/crates/stackable-operator/src/builder/pod/volume.rs +++ b/crates/stackable-operator/src/builder/pod/volume.rs @@ -84,7 +84,7 @@ impl VolumeBuilder { quantity: Option, ) -> &mut Self { self.volume_source = VolumeSource::EmptyDir(EmptyDirVolumeSource { - medium: medium.map(|m| m.into()), + medium: medium.map(Into::into), size_limit: quantity, }); self @@ -102,7 +102,7 @@ impl VolumeBuilder { ) -> &mut Self { self.volume_source = VolumeSource::HostPath(HostPathVolumeSource { path: path.into(), - type_: type_.map(|t| t.into()), + type_: type_.map(Into::into), }); self } @@ -362,12 +362,12 @@ impl SecretOperatorVolumeSourceBuilder { if let Some(password) = &self.tls_pkcs12_password { // The `tls_pkcs12_password` is only used for PKCS12 stores. - if Some(SecretFormat::TlsPkcs12) != self.format { - warn!(format.actual = ?self.format, format.expected = ?Some(SecretFormat::TlsPkcs12), "A TLS PKCS12 password was set but ignored because another format was requested") - } else { + if Some(SecretFormat::TlsPkcs12) == self.format { annotations.insert( Annotation::tls_pkcs12_password(password).context(ParseAnnotationSnafu)?, ); + } else { + warn!(format.actual = ?self.format, format.expected = ?Some(SecretFormat::TlsPkcs12), "A TLS PKCS12 password was set but ignored because another format was requested"); } } @@ -500,18 +500,6 @@ impl ListenerOperatorVolumeSourceBuilder { } } - fn build_spec(&self) -> PersistentVolumeClaimSpec { - PersistentVolumeClaimSpec { - storage_class_name: Some("listeners.stackable.tech".to_string()), - resources: Some(VolumeResourceRequirements { - requests: Some([("storage".to_string(), Quantity("1".to_string()))].into()), - ..Default::default() - }), - access_modes: Some(vec!["ReadWriteMany".to_string()]), - ..PersistentVolumeClaimSpec::default() - } - } - #[deprecated(note = "renamed to `build_ephemeral`", since = "0.61.1")] pub fn build(&self) -> Result { self.build_ephemeral() @@ -534,7 +522,7 @@ impl ListenerOperatorVolumeSourceBuilder { .with_labels(self.labels.clone()) .build(), ), - spec: self.build_spec(), + spec: Self::spec(), }), }) } @@ -555,10 +543,22 @@ impl ListenerOperatorVolumeSourceBuilder { .with_annotation(listener_reference_annotation) .with_labels(self.labels.clone()) .build(), - spec: Some(self.build_spec()), + spec: Some(Self::spec()), ..Default::default() }) } + + fn spec() -> PersistentVolumeClaimSpec { + PersistentVolumeClaimSpec { + storage_class_name: Some("listeners.stackable.tech".to_string()), + resources: Some(VolumeResourceRequirements { + requests: Some([("storage".to_string(), Quantity("1".to_string()))].into()), + ..Default::default() + }), + access_modes: Some(vec!["ReadWriteMany".to_string()]), + ..PersistentVolumeClaimSpec::default() + } + } } #[cfg(test)] diff --git a/crates/stackable-operator/src/cli/mod.rs b/crates/stackable-operator/src/cli/mod.rs index 0224604fc..710850820 100644 --- a/crates/stackable-operator/src/cli/mod.rs +++ b/crates/stackable-operator/src/cli/mod.rs @@ -126,6 +126,6 @@ mod tests { RunArguments::command() .print_long_help() .expect("help message should be printed to stdout"); - RunArguments::command().debug_assert() + RunArguments::command().debug_assert(); } } diff --git a/crates/stackable-operator/src/cli/product_config.rs b/crates/stackable-operator/src/cli/product_config.rs index 361c087d5..38094a46f 100644 --- a/crates/stackable-operator/src/cli/product_config.rs +++ b/crates/stackable-operator/src/cli/product_config.rs @@ -59,7 +59,7 @@ impl ProductConfigPath { let search_paths = if let Some(path) = user_provided_path { vec![path] } else { - default_paths.iter().map(|path| path.as_ref()).collect() + default_paths.iter().map(AsRef::as_ref).collect() }; for path in &search_paths { if path.exists() { @@ -148,7 +148,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic = "RequiredFileMissing { search_path: [\"user_provided_path_properties.yaml\"] }"] fn resolve_path_user_path_not_existing() { ProductConfigPath::resolve_path(Some(USER_PROVIDED_PATH.as_ref()), &[DEPLOY_FILE_PATH]) .unwrap(); @@ -165,7 +165,7 @@ mod tests { PathBuf::from(DEPLOY_FILE_PATH), PathBuf::from(DEFAULT_FILE_PATH) ] - ) + ); } else { panic!("must return RequiredFileMissing when file was not found") } diff --git a/crates/stackable-operator/src/client.rs b/crates/stackable-operator/src/client.rs index 3c27863ba..d5e3273b1 100644 --- a/crates/stackable-operator/src/client.rs +++ b/crates/stackable-operator/src/client.rs @@ -95,6 +95,7 @@ pub enum Error { /// It wraps an underlying [kube::client::Client] and provides some common functionality. #[derive(Clone)] pub struct Client { + #[expect(clippy::struct_field_names)] client: KubeClient, patch_params: PatchParams, post_params: PostParams, @@ -707,7 +708,7 @@ mod tests { use crate::utils::cluster_info::KubernetesClusterInfoOptions; - async fn test_cluster_info_opts() -> KubernetesClusterInfoOptions { + fn test_cluster_info_opts() -> KubernetesClusterInfoOptions { KubernetesClusterInfoOptions { // We have to hard-code a made-up cluster domain, // since kubernetes_node_name (probably) won't be a valid Node that we can query. @@ -724,7 +725,7 @@ mod tests { #[tokio::test] #[ignore = "Tests depending on Kubernetes are not ran by default"] async fn k8s_test_wait_created() { - let client = super::initialize_operator(None, &test_cluster_info_opts().await) + let client = super::initialize_operator(None, &test_cluster_info_opts()) .await .expect("KUBECONFIG variable must be configured."); @@ -785,10 +786,10 @@ mod tests { Event::Delete(_) => { panic!("Not expected the test_wait_created busybox pod to be deleted"); } - Event::Init | Event::InitDone => continue, + Event::Init | Event::InitDone => {} }, - Err(_) => { - panic!("Error while waiting for readiness."); + Err(err) => { + panic!("Error while waiting for readiness: {err}"); } } } @@ -802,7 +803,7 @@ mod tests { #[tokio::test] #[ignore = "Tests depending on Kubernetes are not ran by default"] async fn k8s_test_wait_created_timeout() { - let client = super::initialize_operator(None, &test_cluster_info_opts().await) + let client = super::initialize_operator(None, &test_cluster_info_opts()) .await .expect("KUBECONFIG variable must be configured."); @@ -822,7 +823,7 @@ mod tests { #[tokio::test] #[ignore = "Tests depending on Kubernetes are not ran by default"] async fn k8s_test_list_with_label_selector() { - let client = super::initialize_operator(None, &test_cluster_info_opts().await) + let client = super::initialize_operator(None, &test_cluster_info_opts()) .await .expect("KUBECONFIG variable must be configured."); diff --git a/crates/stackable-operator/src/cluster_resources.rs b/crates/stackable-operator/src/cluster_resources.rs index 6bb0474c0..345de5269 100644 --- a/crates/stackable-operator/src/cluster_resources.rs +++ b/crates/stackable-operator/src/cluster_resources.rs @@ -509,7 +509,7 @@ impl<'a> ClusterResources<'a> { operator_name: operator_name.into(), controller_name: controller_name.into(), manager: format_full_controller_name(operator_name, controller_name), - resource_ids: Default::default(), + resource_ids: HashSet::default(), apply_strategy, object_overrides, }) @@ -739,7 +739,7 @@ impl<'a> ClusterResources<'a> { T::plural(&()), ClusterResources::print_resources(&orphaned_resources), ); - for resource in orphaned_resources.iter() { + for resource in &orphaned_resources { client .delete(resource) .await @@ -798,17 +798,17 @@ impl<'a> ClusterResources<'a> { LabelSelectorRequirement { key: K8S_APP_INSTANCE_KEY.into(), operator: "In".into(), - values: Some(vec![self.app_instance.to_owned()]), + values: Some(vec![self.app_instance.clone()]), }, LabelSelectorRequirement { key: K8S_APP_NAME_KEY.into(), operator: "In".into(), - values: Some(vec![self.app_name.to_owned()]), + values: Some(vec![self.app_name.clone()]), }, LabelSelectorRequirement { key: K8S_APP_MANAGED_BY_KEY.into(), operator: "In".into(), - values: Some(vec![self.manager.to_owned()]), + values: Some(vec![self.manager.clone()]), }, ]), ..Default::default() diff --git a/crates/stackable-operator/src/commons/affinity.rs b/crates/stackable-operator/src/commons/affinity.rs index 5c9a708f2..9dbc47daa 100644 --- a/crates/stackable-operator/src/commons/affinity.rs +++ b/crates/stackable-operator/src/commons/affinity.rs @@ -152,7 +152,7 @@ mod tests { node_selector: None, }; - let role_input = r#" + let role_input = r" podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: @@ -173,7 +173,7 @@ mod tests { - antarctica-west1 nodeSelector: disktype: ssd - "#; + "; let mut role_affinity: StackableAffinityFragment = serde_yaml::from_str(role_input).expect("illegal test input"); @@ -195,7 +195,7 @@ mod tests { }]), match_labels: None, }), - topology_key: "".to_string(), + topology_key: String::new(), ..Default::default() } ]) @@ -265,7 +265,7 @@ mod tests { // The following anti-affinity tells k8s it *must* spread the brokers across multiple zones // It will overwrite the default anti-affinity - let role_input = r#" + let role_input = r" podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: @@ -274,7 +274,7 @@ mod tests { app.kubernetes.io/instance: simple-kafka app.kubernetes.io/component: broker topologyKey: topology.kubernetes.io/zone - "#; + "; let mut role_affinity: StackableAffinityFragment = serde_yaml::from_str(role_input).expect("illegal test input"); diff --git a/crates/stackable-operator/src/commons/cache.rs b/crates/stackable-operator/src/commons/cache.rs index 8ae440e89..24ba198dc 100644 --- a/crates/stackable-operator/src/commons/cache.rs +++ b/crates/stackable-operator/src/commons/cache.rs @@ -82,7 +82,7 @@ mod tests { #[test] fn test_defaults() { - let my_cache: MyCache = Default::default(); + let my_cache: MyCache = TtlCache::default(); assert_eq!(my_cache.entry_time_to_live, Duration::from_secs(30)); assert_eq!(my_cache.max_entries, 10_000); } diff --git a/crates/stackable-operator/src/commons/networking.rs b/crates/stackable-operator/src/commons/networking.rs index 952170839..7bbf911a1 100644 --- a/crates/stackable-operator/src/commons/networking.rs +++ b/crates/stackable-operator/src/commons/networking.rs @@ -97,7 +97,7 @@ impl FromStr for HostName { if let Ok(domain_name) = value.parse() { return Ok(HostName::DomainName(domain_name)); - }; + } InvalidHostnameSnafu { hostname: value.to_owned(), diff --git a/crates/stackable-operator/src/commons/opa.rs b/crates/stackable-operator/src/commons/opa.rs index d41d47079..c8c4caed4 100644 --- a/crates/stackable-operator/src/commons/opa.rs +++ b/crates/stackable-operator/src/commons/opa.rs @@ -44,11 +44,11 @@ //! let opa_config: &OpaConfig = cluster.spec.opa.as_ref().unwrap(); //! //! assert_eq!( -//! opa_config.document_url(&cluster, Some("allow"), OpaApiVersion::V1), +//! opa_config.document_url(&cluster, Some("allow"), &OpaApiVersion::V1), //! "v1/data/test/allow".to_string() //! ); //! assert_eq!( -//! opa_config.full_document_url(&cluster, "http://localhost:8081", None, OpaApiVersion::V1), +//! opa_config.full_document_url(&cluster, "http://localhost:8081", None, &OpaApiVersion::V1), //! "http://localhost:8081/v1/data/test".to_string() //! ); //! ``` @@ -141,7 +141,7 @@ impl OpaConfig { &self, resource: &T, rule: Option<&str>, - api_version: OpaApiVersion, + api_version: &OpaApiVersion, ) -> String where T: ResourceExt, @@ -182,21 +182,19 @@ impl OpaConfig { resource: &T, opa_base_url: &str, rule: Option<&str>, - api_version: OpaApiVersion, + api_version: &OpaApiVersion, ) -> String where T: ResourceExt, { if opa_base_url.ends_with('/') { format!( - "{}{}", - opa_base_url, + "{opa_base_url}{}", self.document_url(resource, rule, api_version) ) } else { format!( - "{}/{}", - opa_base_url, + "{opa_base_url}/{}", self.document_url(resource, rule, api_version) ) } @@ -226,7 +224,7 @@ impl OpaConfig { client: &Client, resource: &T, rule: Option<&str>, - api_version: OpaApiVersion, + api_version: &OpaApiVersion, ) -> Result where T: Resource, @@ -286,7 +284,7 @@ mod tests { const OPA_BASE_URL_WITH_SLASH: &str = "http://opa:8081/"; const OPA_BASE_URL_WITHOUT_SLASH: &str = "http://opa:8081"; - const V1: OpaApiVersion = OpaApiVersion::V1; + const V1: &OpaApiVersion = &OpaApiVersion::V1; #[test] fn document_url_with_package_name() { @@ -371,7 +369,7 @@ mod tests { fn build_opa_config(package: Option<&str>) -> OpaConfig { OpaConfig { config_map_name: "opa".to_string(), - package: package.map(|p| p.to_string()), + package: package.map(ToString::to_string), } } @@ -403,11 +401,8 @@ mod tests { package: Some("///kafka.authz".to_owned()), }; - let document_url = opa_config.document_url( - &k8s_openapi::api::core::v1::Pod::default(), - None, - OpaApiVersion::V1, - ); - assert_eq!(document_url, "v1/data/kafka/authz") + let document_url = + opa_config.document_url(&k8s_openapi::api::core::v1::Pod::default(), None, V1); + assert_eq!(document_url, "v1/data/kafka/authz"); } } diff --git a/crates/stackable-operator/src/commons/product_image_selection.rs b/crates/stackable-operator/src/commons/product_image_selection.rs index a776aadb1..2b3b05e78 100644 --- a/crates/stackable-operator/src/commons/product_image_selection.rs +++ b/crates/stackable-operator/src/commons/product_image_selection.rs @@ -124,7 +124,7 @@ impl ProductImage { let image = ImageRef::parse(&image_selection.custom); let image_tag_or_hash = image.tag.or(image.hash).unwrap_or("latest".to_string()); - let app_version = format!("{}-{}", product_version, image_tag_or_hash); + let app_version = format!("{product_version}-{image_tag_or_hash}"); let app_version_label_value = Self::prepare_app_version_label_value(&app_version)?; Ok(ResolvedProductImage { @@ -180,8 +180,8 @@ impl ProductImage { ProductImageSelection::Custom(ProductImageCustom { product_version: pv, .. - }) => pv, - ProductImageSelection::StackableVersion(ProductImageStackableVersion { + }) + | ProductImageSelection::StackableVersion(ProductImageStackableVersion { product_version: pv, .. }) => pv, @@ -194,12 +194,12 @@ impl ProductImage { formatted_app_version.truncate(LABEL_VALUE_MAX_LEN); // The hash has the format `sha256:85fa483aa99b9997ce476b86893ad5ed81fb7fd2db602977eb` // As the colon (`:`) is not a valid label value character, we replace it with a valid "-" character. - let formatted_app_version = formatted_app_version.replace(":", "-"); + let formatted_app_version = formatted_app_version.replace(':', "-"); formatted_app_version .parse() .with_context(|_| ParseAppVersionLabelSnafu { - app_version: formatted_app_version.to_string(), + app_version: formatted_app_version, }) } } @@ -214,9 +214,9 @@ mod tests { #[case::stackable_version_without_stackable_version_stable_version( "superset", "23.7.42", - r#" + r" productVersion: 1.4.1 - "#, + ", ResolvedProductImage { image: "oci.stackable.tech/sdp/superset:1.4.1-stackable23.7.42".to_string(), app_version_label_value: "1.4.1-stackable23.7.42".parse().expect("static app version label is always valid"), @@ -228,9 +228,9 @@ mod tests { #[case::stackable_version_without_stackable_version_nightly( "superset", "0.0.0-dev", - r#" + r" productVersion: 1.4.1 - "#, + ", ResolvedProductImage { image: "oci.stackable.tech/sdp/superset:1.4.1-stackable0.0.0-dev".to_string(), app_version_label_value: "1.4.1-stackable0.0.0-dev".parse().expect("static app version label is always valid"), @@ -242,9 +242,9 @@ mod tests { #[case::stackable_version_without_stackable_version_pr_version( "superset", "0.0.0-pr123", - r#" + r" productVersion: 1.4.1 - "#, + ", ResolvedProductImage { image: "oci.stackable.tech/sdp/superset:1.4.1-stackable0.0.0-dev".to_string(), app_version_label_value: "1.4.1-stackable0.0.0-dev".parse().expect("static app version label is always valid"), @@ -256,10 +256,10 @@ mod tests { #[case::stackable_version_without_repo( "superset", "23.7.42", - r#" + r" productVersion: 1.4.1 stackableVersion: 2.1.0 - "#, + ", ResolvedProductImage { image: "oci.stackable.tech/sdp/superset:1.4.1-stackable2.1.0".to_string(), app_version_label_value: "1.4.1-stackable2.1.0".parse().expect("static app version label is always valid"), @@ -271,11 +271,11 @@ mod tests { #[case::stackable_version_with_repo( "trino", "23.7.42", - r#" + r" productVersion: 1.4.1 stackableVersion: 2.1.0 repo: my.corp/myteam/stackable - "#, + ", ResolvedProductImage { image: "my.corp/myteam/stackable/trino:1.4.1-stackable2.1.0".to_string(), app_version_label_value: "1.4.1-stackable2.1.0".parse().expect("static app version label is always valid"), @@ -287,10 +287,10 @@ mod tests { #[case::custom_without_tag( "superset", "23.7.42", - r#" + r" custom: my.corp/myteam/stackable/superset productVersion: 1.4.1 - "#, + ", ResolvedProductImage { image: "my.corp/myteam/stackable/superset".to_string(), app_version_label_value: "1.4.1-latest".parse().expect("static app version label is always valid"), @@ -302,10 +302,10 @@ mod tests { #[case::custom_with_tag( "superset", "23.7.42", - r#" + r" custom: my.corp/myteam/stackable/superset:latest-and-greatest productVersion: 1.4.1 - "#, + ", ResolvedProductImage { image: "my.corp/myteam/stackable/superset:latest-and-greatest".to_string(), app_version_label_value: "1.4.1-latest-and-greatest".parse().expect("static app version label is always valid"), @@ -317,10 +317,10 @@ mod tests { #[case::custom_with_colon_in_repo_and_without_tag( "superset", "23.7.42", - r#" + r" custom: 127.0.0.1:8080/myteam/stackable/superset productVersion: 1.4.1 - "#, + ", ResolvedProductImage { image: "127.0.0.1:8080/myteam/stackable/superset".to_string(), app_version_label_value: "1.4.1-latest".parse().expect("static app version label is always valid"), @@ -332,10 +332,10 @@ mod tests { #[case::custom_with_colon_in_repo_and_with_tag( "superset", "23.7.42", - r#" + r" custom: 127.0.0.1:8080/myteam/stackable/superset:latest-and-greatest productVersion: 1.4.1 - "#, + ", ResolvedProductImage { image: "127.0.0.1:8080/myteam/stackable/superset:latest-and-greatest".to_string(), app_version_label_value: "1.4.1-latest-and-greatest".parse().expect("static app version label is always valid"), @@ -347,10 +347,10 @@ mod tests { #[case::custom_with_hash_in_repo_and_without_tag( "superset", "23.7.42", - r#" + r" custom: oci.stackable.tech/sdp/superset@sha256:85fa483aa99b9997ce476b86893ad5ed81fb7fd2db602977eb8c42f76efc1098 productVersion: 1.4.1 - "#, + ", ResolvedProductImage { image: "oci.stackable.tech/sdp/superset@sha256:85fa483aa99b9997ce476b86893ad5ed81fb7fd2db602977eb8c42f76efc1098".to_string(), app_version_label_value: "1.4.1-sha256-85fa483aa99b9997ce476b86893ad5ed81fb7fd2db602977eb".parse().expect("static app version label is always valid"), @@ -362,11 +362,11 @@ mod tests { #[case::custom_takes_precedence( "superset", "23.7.42", - r#" + r" custom: my.corp/myteam/stackable/superset:latest-and-greatest productVersion: 1.4.1 stackableVersion: not-used - "#, + ", ResolvedProductImage { image: "my.corp/myteam/stackable/superset:latest-and-greatest".to_string(), app_version_label_value: "1.4.1-latest-and-greatest".parse().expect("static app version label is always valid"), @@ -378,11 +378,11 @@ mod tests { #[case::pull_policy_if_not_present( "superset", "23.7.42", - r#" + r" custom: my.corp/myteam/stackable/superset:latest-and-greatest productVersion: 1.4.1 pullPolicy: IfNotPresent - "#, + ", ResolvedProductImage { image: "my.corp/myteam/stackable/superset:latest-and-greatest".to_string(), app_version_label_value: "1.4.1-latest-and-greatest".parse().expect("static app version label is always valid"), @@ -394,11 +394,11 @@ mod tests { #[case::pull_policy_always( "superset", "23.7.42", - r#" + r" custom: my.corp/myteam/stackable/superset:latest-and-greatest productVersion: 1.4.1 pullPolicy: Always - "#, + ", ResolvedProductImage { image: "my.corp/myteam/stackable/superset:latest-and-greatest".to_string(), app_version_label_value: "1.4.1-latest-and-greatest".parse().expect("static app version label is always valid"), @@ -410,11 +410,11 @@ mod tests { #[case::pull_policy_never( "superset", "23.7.42", - r#" + r" custom: my.corp/myteam/stackable/superset:latest-and-greatest productVersion: 1.4.1 pullPolicy: Never - "#, + ", ResolvedProductImage { image: "my.corp/myteam/stackable/superset:latest-and-greatest".to_string(), app_version_label_value: "1.4.1-latest-and-greatest".parse().expect("static app version label is always valid"), @@ -426,14 +426,14 @@ mod tests { #[case::pull_secrets( "superset", "23.7.42", - r#" + r" custom: my.corp/myteam/stackable/superset:latest-and-greatest productVersion: 1.4.1 pullPolicy: Always pullSecrets: - name: myPullSecrets1 - name: myPullSecrets2 - "#, + ", ResolvedProductImage { image: "my.corp/myteam/stackable/superset:latest-and-greatest".to_string(), app_version_label_value: "1.4.1-latest-and-greatest".parse().expect("static app version label is always valid"), @@ -458,15 +458,15 @@ mod tests { #[rstest] #[case::custom( - r#" + r" custom: my.corp/myteam/stackable/superset:latest-and-greatest - "#, + ", "data did not match any variant of untagged enum ProductImageSelection at line 2 column 9" )] #[case::stackable_version( - r#" + r" stackableVersion: 2.1.0 - "#, + ", "data did not match any variant of untagged enum ProductImageSelection at line 2 column 9" )] #[case::empty( diff --git a/crates/stackable-operator/src/commons/resources.rs b/crates/stackable-operator/src/commons/resources.rs index 1eac198a9..e9d82967e 100644 --- a/crates/stackable-operator/src/commons/resources.rs +++ b/crates/stackable-operator/src/commons/resources.rs @@ -557,9 +557,9 @@ mod tests { #[case::no_access( "test", None, - r#" - capacity: 10Gi"#, - r#" + r" + capacity: 10Gi", + r" apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -567,14 +567,14 @@ mod tests { spec: resources: requests: - storage: 10Gi"# + storage: 10Gi" )] #[case::access_readmany( "test2", Some(vec!["ReadWriteMany"]), - r#" - capacity: 100Gi"#, - r#" + r" + capacity: 100Gi", + r" apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -584,14 +584,14 @@ mod tests { - ReadWriteMany resources: requests: - storage: 100Gi"# + storage: 100Gi" )] #[case::multiple_accessmodes( "testtest", Some(vec!["ReadWriteMany", "ReadOnlyMany"]), - r#" - capacity: 200Gi"#, - r#" + r" + capacity: 200Gi", + r" apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -602,15 +602,15 @@ mod tests { - ReadOnlyMany resources: requests: - storage: 200Gi"# + storage: 200Gi" )] #[case::storage_class( "test", None, - r#" + r" capacity: 10Gi - storageClass: CustomClass"#, - r#" + storageClass: CustomClass", + r" apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -619,18 +619,18 @@ mod tests { storageClassName: CustomClass resources: requests: - storage: 10Gi"# + storage: 10Gi" )] #[case::selector( "test", None, - r#" + r" capacity: 10Gi storageClass: CustomClass selectors: matchLabels: - nodeType: directstorage"#, - r#" + nodeType: directstorage", + r" apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -642,7 +642,7 @@ mod tests { storage: 10Gi selector: matchLabels: - nodeType: directstorage"# + nodeType: directstorage" )] fn build_pvc( #[case] name: String, @@ -665,40 +665,40 @@ mod tests { #[rstest] #[case::only_memlimits( - r#" + r" memory: - limit: 1Gi"#, - r#" + limit: 1Gi", + r" limits: memory: 1Gi requests: - memory: 1Gi"# + memory: 1Gi" )] #[case::only_cpulimits( - r#" + r" cpu: min: 1000 - max: 2000"#, - r#" + max: 2000", + r" limits: cpu: 2000 requests: - cpu: 1000"# + cpu: 1000" )] #[case::mem_and_cpu_limits( - r#" + r" cpu: min: 1000 max: 2000 memory: - limit: 20Gi"#, - r#" + limit: 20Gi", + r" limits: memory: 20Gi cpu: 2000 requests: memory: 20Gi - cpu: 1000"# + cpu: 1000" )] fn into_resourcelimits(#[case] input: String, #[case] expected: String) { let input_resources_fragment: ResourcesFragment = diff --git a/crates/stackable-operator/src/commons/tls_verification.rs b/crates/stackable-operator/src/commons/tls_verification.rs index 1e399b0cf..36c96bbde 100644 --- a/crates/stackable-operator/src/commons/tls_verification.rs +++ b/crates/stackable-operator/src/commons/tls_verification.rs @@ -94,8 +94,7 @@ impl TlsClientDetails { pub fn uses_tls_verification(&self) -> bool { self.tls .as_ref() - .map(|tls| tls.verification != TlsVerification::None {}) - .unwrap_or_default() + .is_some_and(|tls| tls.verification != TlsVerification::None {}) } /// Returns the path of the ca.crt that should be used to verify the LDAP server certificate diff --git a/crates/stackable-operator/src/config/merge.rs b/crates/stackable-operator/src/config/merge.rs index 865cec30d..9173f00b8 100644 --- a/crates/stackable-operator/src/config/merge.rs +++ b/crates/stackable-operator/src/config/merge.rs @@ -58,7 +58,7 @@ pub trait Merge { impl Merge for Box { fn merge(&mut self, defaults: &Self) { - T::merge(self, defaults) + T::merge(self, defaults); } } impl Merge for BTreeMap { @@ -166,7 +166,7 @@ mod tests { struct Accumulator(u8); impl Merge for Accumulator { fn merge(&mut self, defaults: &Self) { - self.0 += defaults.0 + self.0 += defaults.0; } } diff --git a/crates/stackable-operator/src/cpu.rs b/crates/stackable-operator/src/cpu.rs index 5af279daa..2dded0336 100644 --- a/crates/stackable-operator/src/cpu.rs +++ b/crates/stackable-operator/src/cpu.rs @@ -93,9 +93,10 @@ impl<'de> Deserialize<'de> for CpuQuantity { impl Display for CpuQuantity { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self.millis < 1000 { - true => write!(f, "{}m", self.millis), - false => write!(f, "{}", self.as_cpu_count()), + if self.millis < 1000 { + write!(f, "{}m", self.millis) + } else { + write!(f, "{}", self.as_cpu_count()) } } } @@ -267,7 +268,7 @@ mod tests { #[case(CpuQuantity::from_millis(2000), "2")] #[case(CpuQuantity::from_millis(1000), "1")] fn to_string(#[case] cpu: CpuQuantity, #[case] expected: &str) { - assert_eq!(cpu.to_string(), expected) + assert_eq!(cpu.to_string(), expected); } #[rstest] diff --git a/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs index 674ddbd91..47e9db6aa 100644 --- a/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs @@ -201,7 +201,7 @@ mod tests { #[test] fn full() { - let input = r#" + let input = r" hostname: my.ldap.server port: 42 searchBase: ou=users,dc=example,dc=org @@ -212,7 +212,7 @@ mod tests { server: caCert: secretClass: ldap-ca-cert - "#; + "; let ldap: AuthenticationProvider = yaml_from_str_singleton_map(input).unwrap(); assert_eq!(ldap.port(), 42); diff --git a/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs index 4b5c182eb..728de99be 100644 --- a/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs @@ -62,7 +62,7 @@ impl AuthenticationProvider { .context(ParseOidcEndpointUrlSnafu)?; if self.tls.uses_tls() { - url.set_scheme("https").map_err(|_| { + url.set_scheme("https").map_err(|()| { SetOidcEndpointSchemeSnafu { scheme: "https".to_string(), endpoint: url.clone(), @@ -137,7 +137,7 @@ impl AuthenticationProvider { let secret_name_hash = hasher.finish(); // Prefix with zeros to have consistent length. Max length is 16 characters, which is caused by [`u64::MAX`]. - let secret_name_hash = format!("{:016x}", secret_name_hash).to_uppercase(); + let secret_name_hash = format!("{secret_name_hash:016x}").to_uppercase(); let env_var_prefix = format!("OIDC_{secret_name_hash}"); ( diff --git a/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs index 4654c1718..b3ee37668 100644 --- a/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs @@ -1,4 +1,4 @@ -use std::{collections::BTreeMap, path::PathBuf}; +use std::{collections::BTreeMap, fmt::Write as _, path::PathBuf}; use k8s_openapi::api::core::v1::{ Container, EmptyDirVolumeSource, EnvVar, EnvVarSource, SecretKeySelector, Volume, VolumeMount, @@ -220,7 +220,7 @@ impl GitSyncResources { ) -> String { let internal_args = BTreeMap::from([ ("--repo".to_string(), git_sync.repo.as_str().to_owned()), - ("--ref".to_string(), git_sync.branch.to_owned()), + ("--ref".to_string(), git_sync.branch.clone()), ("--depth".to_string(), git_sync.depth.to_string()), ( "--period".to_string(), @@ -302,7 +302,7 @@ impl GitSyncResources { log_config, )); shell_script.push('\n'); - }; + } let git_sync_command = format!("/stackable/git-sync {args_string}"); @@ -310,12 +310,14 @@ impl GitSyncResources { shell_script.push_str(&git_sync_command); } else { // Run the git-sync command in the background - shell_script.push_str(&format!( + write!( + shell_script, "{COMMON_BASH_TRAP_FUNCTIONS} prepare_signal_handlers {git_sync_command} & wait_for_termination $!" - )) + ) + .expect("We can always write to a String"); } shell_script @@ -670,7 +672,7 @@ volumeMounts: assert_eq!(3, git_sync_resources.git_sync_init_containers.len()); assert_eq!( - r#"args: + r"args: - |- mkdir --parents /stackable/log/git-sync-0-init && exec > >(tee /stackable/log/git-sync-0-init/container.stdout.log) 2> >(tee /stackable/log/git-sync-0-init/container.stderr.log >&2) /stackable/git-sync --depth=1 --git-config='safe.directory:/tmp/git' --link=current --one-time=true --period=20s --ref=main --repo=https://github.com/stackabletech/repo1 --root=/tmp/git @@ -702,12 +704,12 @@ volumeMounts: name: log-volume - mountPath: /mnt/extra-volume name: extra-volume -"#, +", serde_yaml::to_string(&git_sync_resources.git_sync_init_containers.first()).unwrap() ); assert_eq!( - r#"args: + r"args: - |- mkdir --parents /stackable/log/git-sync-1-init && exec > >(tee /stackable/log/git-sync-1-init/container.stdout.log) 2> >(tee /stackable/log/git-sync-1-init/container.stderr.log >&2) /stackable/git-sync --depth=3 --git-config='safe.directory:/tmp/git,http.sslCAInfo:/tmp/ca-cert/ca.crt' --link=current --one-time=true --period=60s --ref=trunk --repo=https://github.com/stackabletech/repo2 --rev=HEAD --root=/tmp/git @@ -744,12 +746,12 @@ volumeMounts: name: log-volume - mountPath: /mnt/extra-volume name: extra-volume -"#, +", serde_yaml::to_string(&git_sync_resources.git_sync_init_containers.get(1)).unwrap() ); assert_eq!( - r#"args: + r"args: - |- mkdir --parents /stackable/log/git-sync-2-init && exec > >(tee /stackable/log/git-sync-2-init/container.stdout.log) 2> >(tee /stackable/log/git-sync-2-init/container.stderr.log >&2) /stackable/git-sync --depth=1 --git-config='safe.directory:/tmp/git,key:value,safe.directory:/safe-dir' --link=current --one-time=true --period=20s --ref=feat/git-sync --repo=https://github.com/stackabletech/repo3 --root=/tmp/git @@ -781,7 +783,7 @@ volumeMounts: name: log-volume - mountPath: /mnt/extra-volume name: extra-volume -"#, +", serde_yaml::to_string(&git_sync_resources.git_sync_init_containers.get(2)).unwrap() ); diff --git a/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs b/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs index fd3d3131b..af840d5c8 100644 --- a/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs +++ b/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs @@ -1,4 +1,4 @@ -use std::{collections::BTreeMap, path::PathBuf}; +use std::{collections::BTreeMap, fmt::Write as _, path::PathBuf}; use k8s_openapi::api::core::v1::{ Container, EmptyDirVolumeSource, EnvVar, EnvVarSource, SecretKeySelector, Volume, VolumeMount, @@ -264,7 +264,7 @@ impl GitSyncResources { ) -> String { let internal_args = BTreeMap::from([ ("--repo".to_string(), git_sync.repo.as_str().to_owned()), - ("--ref".to_string(), git_sync.branch.to_owned()), + ("--ref".to_string(), git_sync.branch.clone()), ("--depth".to_string(), git_sync.depth.to_string()), ( "--period".to_string(), @@ -346,7 +346,7 @@ impl GitSyncResources { log_config, )); shell_script.push('\n'); - }; + } let git_sync_command = format!("/stackable/git-sync {args_string}"); @@ -354,12 +354,14 @@ impl GitSyncResources { shell_script.push_str(&git_sync_command); } else { // Run the git-sync command in the background - shell_script.push_str(&format!( + write!( + shell_script, "{COMMON_BASH_TRAP_FUNCTIONS} prepare_signal_handlers {git_sync_command} & wait_for_termination $!" - )) + ) + .expect("We can always write to a String"); } shell_script @@ -715,7 +717,7 @@ volumeMounts: assert_eq!(3, git_sync_resources.git_sync_init_containers.len()); assert_eq!( - r#"args: + r"args: - |- mkdir --parents /stackable/log/git-sync-0-init && exec > >(tee /stackable/log/git-sync-0-init/container.stdout.log) 2> >(tee /stackable/log/git-sync-0-init/container.stderr.log >&2) /stackable/git-sync --depth=1 --git-config='safe.directory:/tmp/git' --link=current --one-time=true --period=20s --ref=main --repo=https://github.com/stackabletech/repo1 --root=/tmp/git @@ -747,12 +749,12 @@ volumeMounts: name: log-volume - mountPath: /mnt/extra-volume name: extra-volume -"#, +", serde_yaml::to_string(&git_sync_resources.git_sync_init_containers.first()).unwrap() ); assert_eq!( - r#"args: + r"args: - |- mkdir --parents /stackable/log/git-sync-1-init && exec > >(tee /stackable/log/git-sync-1-init/container.stdout.log) 2> >(tee /stackable/log/git-sync-1-init/container.stderr.log >&2) /stackable/git-sync --depth=3 --git-config='safe.directory:/tmp/git,http.sslCAInfo:/tmp/ca-cert/ca.crt' --link=current --one-time=true --period=60s --ref=trunk --repo=https://github.com/stackabletech/repo2 --rev=HEAD --root=/tmp/git @@ -789,12 +791,12 @@ volumeMounts: name: log-volume - mountPath: /mnt/extra-volume name: extra-volume -"#, +", serde_yaml::to_string(&git_sync_resources.git_sync_init_containers.get(1)).unwrap() ); assert_eq!( - r#"args: + r"args: - |- mkdir --parents /stackable/log/git-sync-2-init && exec > >(tee /stackable/log/git-sync-2-init/container.stdout.log) 2> >(tee /stackable/log/git-sync-2-init/container.stderr.log >&2) /stackable/git-sync --depth=1 --git-config='safe.directory:/tmp/git,key:value,safe.directory:/safe-dir' --link=current --one-time=true --period=20s --ref=feat/git-sync --repo=https://github.com/stackabletech/repo3 --root=/tmp/git @@ -826,7 +828,7 @@ volumeMounts: name: log-volume - mountPath: /mnt/extra-volume name: extra-volume -"#, +", serde_yaml::to_string(&git_sync_resources.git_sync_init_containers.get(2)).unwrap() ); @@ -1029,7 +1031,7 @@ volumeMounts: assert_eq!(1, git_sync_resources.git_sync_init_containers.len()); assert_eq!( - r#"args: + r"args: - |- mkdir --parents /stackable/log/git-sync-0-init && exec > >(tee /stackable/log/git-sync-0-init/container.stdout.log) 2> >(tee /stackable/log/git-sync-0-init/container.stderr.log >&2) /stackable/git-sync --depth=3 --git-config='safe.directory:/tmp/git,http.sslCAInfo:/tmp/ca-cert/ca.crt' --link=current --one-time=true --period=60s --ref=trunk --repo=ssh://git@github.com/stackabletech/repo.git --rev=HEAD --root=/tmp/git @@ -1065,7 +1067,7 @@ volumeMounts: name: extra-volume - mountPath: /stackable/gitssh-0 name: ssh-keys-info-0 -"#, +", serde_yaml::to_string(&git_sync_resources.git_sync_init_containers.first()).unwrap() ); diff --git a/crates/stackable-operator/src/crd/listener/listeners/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/listener/listeners/v1alpha1_impl.rs index f397d0581..756403292 100644 --- a/crates/stackable-operator/src/crd/listener/listeners/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/listener/listeners/v1alpha1_impl.rs @@ -5,6 +5,7 @@ use crate::crd::listener::listeners::v1alpha1::{ }; impl ListenerSpec { + #[expect(clippy::unnecessary_wraps)] pub(super) const fn default_publish_not_ready_addresses() -> Option { Some(true) } diff --git a/crates/stackable-operator/src/crd/mod.rs b/crates/stackable-operator/src/crd/mod.rs index 3beb69aa8..4e2815af5 100644 --- a/crates/stackable-operator/src/crd/mod.rs +++ b/crates/stackable-operator/src/crd/mod.rs @@ -32,7 +32,7 @@ impl ClusterRef { pub fn to_named(name: &str, namespace: Option<&str>) -> Self { Self { name: Some(name.into()), - namespace: namespace.map(|ns| ns.into()), + namespace: namespace.map(Into::into), _kind: PhantomData, } } diff --git a/crates/stackable-operator/src/crd/s3/connection/mod.rs b/crates/stackable-operator/src/crd/s3/connection/mod.rs index b41c280d8..f05f1dd05 100644 --- a/crates/stackable-operator/src/crd/s3/connection/mod.rs +++ b/crates/stackable-operator/src/crd/s3/connection/mod.rs @@ -110,9 +110,14 @@ mod tests { use url::Url; use super::*; - use crate::commons::{ - secret_class::SecretClassVolume, - tls_verification::{CaCert, Tls, TlsClientDetails, TlsServerVerification, TlsVerification}, + use crate::{ + commons::{ + secret_class::SecretClassVolume, + tls_verification::{ + CaCert, Tls, TlsClientDetails, TlsServerVerification, TlsVerification, + }, + }, + crd::s3::v1alpha1::{Region, S3AccessStyle}, }; // We can't test the correct resolve, as we can't mock the k8s API. @@ -121,10 +126,10 @@ mod tests { let s3 = ResolvedConnection { host: "minio".parse().unwrap(), port: None, - access_style: Default::default(), + access_style: S3AccessStyle::default(), credentials: None, tls: TlsClientDetails { tls: None }, - region: Default::default(), + region: Region::default(), }; let (volumes, mounts) = s3.volumes_and_mounts().unwrap(); @@ -138,7 +143,7 @@ mod tests { let s3 = ResolvedConnection { host: "s3-eu-central-2.ionoscloud.com".parse().unwrap(), port: None, - access_style: Default::default(), + access_style: S3AccessStyle::default(), credentials: Some(SecretClassVolume { secret_class: "ionos-s3-credentials".to_string(), scope: None, @@ -150,7 +155,7 @@ mod tests { }), }), }, - region: Default::default(), + region: Region::default(), }; let (mut volumes, mut mounts) = s3.volumes_and_mounts().unwrap(); @@ -196,14 +201,14 @@ mod tests { let s3 = ResolvedConnection { host: "minio".parse().unwrap(), port: Some(1234), - access_style: Default::default(), + access_style: S3AccessStyle::default(), credentials: None, tls: TlsClientDetails { tls: Some(Tls { verification: TlsVerification::None {}, }), }, - region: Default::default(), + region: Region::default(), }; let (volumes, mounts) = s3.volumes_and_mounts().unwrap(); diff --git a/crates/stackable-operator/src/crd/s3/connection/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/s3/connection/v1alpha1_impl.rs index 0c6c1efd7..cd926165b 100644 --- a/crates/stackable-operator/src/crd/s3/connection/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/s3/connection/v1alpha1_impl.rs @@ -61,7 +61,7 @@ impl ConnectionSpec { let mut url = Url::parse(&endpoint).context(ParseS3EndpointSnafu { endpoint })?; if self.tls.uses_tls() { - url.set_scheme("https").map_err(|_| { + url.set_scheme("https").map_err(|()| { SetS3EndpointSchemeSnafu { scheme: "https".to_string(), endpoint: url.clone(), diff --git a/crates/stackable-operator/src/eos/mod.rs b/crates/stackable-operator/src/eos/mod.rs index 2dc5d94e1..26a2b5e90 100644 --- a/crates/stackable-operator/src/eos/mod.rs +++ b/crates/stackable-operator/src/eos/mod.rs @@ -81,7 +81,7 @@ impl EndOfSupportChecker { /// - The `options` allow customizing the checker. It is recommended to use values provided by /// CLI args, see [`EndOfSupportOptions`], [`MaintenanceOptions`](crate::cli::MaintenanceOptions), /// and [`RunArguments`](crate::cli::RunArguments). - pub fn new(built_time: &str, options: EndOfSupportOptions) -> Result { + pub fn new(built_time: &str, options: &EndOfSupportOptions) -> Result { let EndOfSupportOptions { interval, support_duration, @@ -98,13 +98,13 @@ impl EndOfSupportChecker { }; // Add the support duration to the built date. This marks the end-of-support date. - let eos_datetime = &built_datetime + *support_duration; + let eos_datetime = &built_datetime + **support_duration; Ok(Self { built_datetime, eos_datetime, - interval, - disabled, + interval: *interval, + disabled: *disabled, }) } @@ -133,7 +133,7 @@ impl EndOfSupportChecker { // interval. biased; - _ = &mut shutdown_signal => { + () = &mut shutdown_signal => { tracing::trace!("received shutdown signal"); break; } @@ -155,7 +155,7 @@ impl EndOfSupportChecker { continue; } - self.emit_warning(now); + self.emit_warning(&now); } } } @@ -163,10 +163,10 @@ impl EndOfSupportChecker { /// Emits the end-of-support warning. #[instrument(level = Level::DEBUG, skip(self))] - fn emit_warning(&self, now: Zoned) { + fn emit_warning(&self, now: &Zoned) { let built_datetime = jiff::fmt::rfc2822::to_string(&self.built_datetime) .expect("The build datetime can always be serialized using rfc2822::to_string"); - let build_age = Duration::try_from(&now - &self.built_datetime) + let build_age = Duration::try_from(now - &self.built_datetime) .expect("time delta of now and built datetime must not be less than 0") .to_string(); diff --git a/crates/stackable-operator/src/iter.rs b/crates/stackable-operator/src/iter.rs index e6911e081..d03f433e1 100644 --- a/crates/stackable-operator/src/iter.rs +++ b/crates/stackable-operator/src/iter.rs @@ -136,8 +136,6 @@ mod tests { #[test] fn try_from_iter_success() { - let iter = [1, 2, 3, 4]; - #[derive(Debug, PartialEq)] struct Sum(usize); @@ -150,13 +148,12 @@ mod tests { } } + let iter = [1, 2, 3, 4]; assert_eq!(Sum(10), Sum::try_from_iter(iter).unwrap()); } #[test] fn try_from_iter_error() { - let iter = ["1", "2", "3", "-4"]; - #[derive(Debug, PartialEq)] struct Sum(usize); @@ -177,6 +174,7 @@ mod tests { } } + let iter = ["1", "2", "3", "-4"]; assert!(Sum::try_from_iter(iter).is_err()); } } diff --git a/crates/stackable-operator/src/kvp/key.rs b/crates/stackable-operator/src/kvp/key.rs index ed280bea9..b07f9c1f0 100644 --- a/crates/stackable-operator/src/kvp/key.rs +++ b/crates/stackable-operator/src/kvp/key.rs @@ -417,7 +417,7 @@ mod test { .prefix() .is_some_and(|prefix| *prefix == "app.kubernetes.io"); - assert_eq!(is_valid, expected) + assert_eq!(is_valid, expected); } #[rstest] diff --git a/crates/stackable-operator/src/kvp/label/mod.rs b/crates/stackable-operator/src/kvp/label/mod.rs index edae2fdbe..85dcdbf16 100644 --- a/crates/stackable-operator/src/kvp/label/mod.rs +++ b/crates/stackable-operator/src/kvp/label/mod.rs @@ -65,21 +65,18 @@ where fn add_label(&mut self, label: Label) -> &mut Self { let meta = self.meta_mut(); - match &mut meta.labels { - Some(labels) => { - // TODO (@Techassi): Add an API to consume key and value - let KeyValuePair { key, value } = label.into_inner(); - labels.insert(key.to_string(), value.to_string()); - } - None => { - let mut labels = BTreeMap::new(); - - // TODO (@Techassi): Add an API to consume key and value - let KeyValuePair { key, value } = label.into_inner(); - labels.insert(key.to_string(), value.to_string()); - - meta.labels = Some(labels); - } + if let Some(labels) = &mut meta.labels { + // TODO (@Techassi): Add an API to consume key and value + let KeyValuePair { key, value } = label.into_inner(); + labels.insert(key.to_string(), value.to_string()); + } else { + let mut labels = BTreeMap::new(); + + // TODO (@Techassi): Add an API to consume key and value + let KeyValuePair { key, value } = label.into_inner(); + labels.insert(key.to_string(), value.to_string()); + + meta.labels = Some(labels); } self @@ -90,7 +87,7 @@ where match &mut meta.labels { Some(existing_labels) => { - existing_labels.extend::>(labels.into()) + existing_labels.extend::>(labels.into()); } None => meta.labels = Some(labels.into()), } @@ -397,7 +394,7 @@ impl Labels { /// This function returns a result, because the parameter `object_labels` /// can contain invalid data or can exceed the maximum allowed number of /// characters. - pub fn recommended(object_labels: ObjectLabels) -> Result + pub fn recommended(object_labels: &ObjectLabels) -> Result where R: Resource, { diff --git a/crates/stackable-operator/src/kvp/label/selector.rs b/crates/stackable-operator/src/kvp/label/selector.rs index 9f9c3f806..e7f4fe754 100644 --- a/crates/stackable-operator/src/kvp/label/selector.rs +++ b/crates/stackable-operator/src/kvp/label/selector.rs @@ -63,7 +63,7 @@ impl LabelSelectorExt for LabelSelector { .iter() .map(|requirement| match requirement.operator.as_str() { // In and NotIn can be handled the same, they both map to a simple "key OPERATOR (values)" string - operator @ "In" | operator @ "NotIn" => match &requirement.values { + operator @ ("In" | "NotIn") => match &requirement.values { Some(values) if !values.is_empty() => Ok(format!( "{} {} ({})", requirement.key, @@ -81,7 +81,7 @@ impl LabelSelectorExt for LabelSelector { operator: operator.to_owned(), }) } - _ => Ok(requirement.key.to_string()), + _ => Ok(requirement.key.clone()), }, // "DoesNotExist" is similar to "Exists" but it is preceded by an exclamation mark operator @ "DoesNotExist" => match &requirement.values { @@ -103,7 +103,7 @@ impl LabelSelectorExt for LabelSelector { if let Some(expressions) = expressions.transpose()? { query_string.push_str(&expressions.join(",")); - }; + } Ok(query_string) } @@ -174,7 +174,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic = "LabelSelectorBinaryOperatorWithoutValues { operator: \"In\" }"] fn invalid_label_in_selector() { let match_expressions = vec![LabelSelectorRequirement { key: "foo".to_string(), @@ -191,7 +191,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic = "LabelSelectorInvalidOperator { operator: \"IllegalOperator\" }"] fn invalid_operator_in_selector() { let match_expressions = vec![LabelSelectorRequirement { key: "foo".to_string(), @@ -208,7 +208,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic = "LabelSelectorUnaryOperatorWithValues { operator: \"Exists\" }"] fn invalid_exists_operator_in_selector() { let match_expressions = vec![LabelSelectorRequirement { key: "foo".to_string(), diff --git a/crates/stackable-operator/src/kvp/mod.rs b/crates/stackable-operator/src/kvp/mod.rs index a0f211997..7e945d996 100644 --- a/crates/stackable-operator/src/kvp/mod.rs +++ b/crates/stackable-operator/src/kvp/mod.rs @@ -464,7 +464,7 @@ mod test { let labels = Labels::common("test", "test-01").unwrap(); assert!(labels.contains(("app.kubernetes.io/name", "test"))); - assert!(labels.contains_key("app.kubernetes.io/instance")) + assert!(labels.contains_key("app.kubernetes.io/instance")); } #[test] @@ -482,14 +482,14 @@ mod test { fn key_error() { let err = Label::try_from(("stäckable.tech/vendor", "Stackable")).unwrap_err(); let report = Report::from_error(err); - println!("{report}") + println!("{report}"); } #[test] fn value_error() { let err = Label::try_from(("stackable.tech/vendor", "Stäckable")).unwrap_err(); let report = Report::from_error(err); - println!("{report}") + println!("{report}"); } #[test] @@ -502,6 +502,6 @@ mod test { BTreeMap::from( Labels::try_from_iter([("a", "a"), ("b", "b"), ("c", "c"), ("d", "d")]).unwrap() ) - ) + ); } } diff --git a/crates/stackable-operator/src/lib.rs b/crates/stackable-operator/src/lib.rs index 46616252c..70c13f242 100644 --- a/crates/stackable-operator/src/lib.rs +++ b/crates/stackable-operator/src/lib.rs @@ -1,3 +1,18 @@ +#![deny(clippy::pedantic)] +#![expect(clippy::doc_markdown)] +#![expect(clippy::missing_errors_doc)] +#![expect(clippy::must_use_candidate)] +#![expect(clippy::return_self_not_must_use)] +#![expect(clippy::too_many_lines)] +#![expect(clippy::implicit_hasher)] +#![expect(clippy::doc_link_with_quotes)] +#![expect(clippy::missing_panics_doc)] +#![expect(clippy::explicit_deref_methods)] +#![expect(clippy::cast_possible_truncation)] +#![expect(clippy::float_cmp)] +#![expect(clippy::cast_sign_loss)] +#![expect(clippy::cast_precision_loss)] + //! ## Crate Features //! //! - `default` enables a default set of features which most operators need. diff --git a/crates/stackable-operator/src/logging/controller.rs b/crates/stackable-operator/src/logging/controller.rs index a41b73879..c043c9c52 100644 --- a/crates/stackable-operator/src/logging/controller.rs +++ b/crates/stackable-operator/src/logging/controller.rs @@ -62,23 +62,19 @@ pub async fn report_controller_reconciled( ); } Err(controller_error) => { - match controller_error { - // Errors raised from queued stuff we will mark as _warning_. - // We can't easily discriminate any further. - controller::Error::QueueError(queue_error) => tracing::warn!( + if let controller::Error::QueueError(queue_error) = controller_error { + tracing::warn!( controller.name = controller_name, error = queue_error as &dyn std::error::Error, "Queued reconcile resulted in an error" - ), - // Assume others are _error_ level. - // NOTE (@NickLarsenNZ): Keeping the same error message as before, - // but am not sure if it is correct - _ => tracing::error!( + ); + } else { + tracing::error!( controller.name = controller_name, error = controller_error as &dyn std::error::Error, "Failed to reconcile object" - ), - }; + ); + } publish_controller_error_as_k8s_event(recorder, controller_error).await; } diff --git a/crates/stackable-operator/src/logging/k8s_events.rs b/crates/stackable-operator/src/logging/k8s_events.rs index edb9d2ed9..c9e1682cf 100644 --- a/crates/stackable-operator/src/logging/k8s_events.rs +++ b/crates/stackable-operator/src/logging/k8s_events.rs @@ -27,7 +27,7 @@ fn error_to_event(err: &E) -> Event { reason: err.category().to_string(), note: Some(error), action: "Reconcile".to_string(), - secondary: err.secondary_object().map(|secondary| secondary.into()), + secondary: err.secondary_object().map(Into::into), } } diff --git a/crates/stackable-operator/src/memory.rs b/crates/stackable-operator/src/memory.rs index 26df0adcd..327b62296 100644 --- a/crates/stackable-operator/src/memory.rs +++ b/crates/stackable-operator/src/memory.rs @@ -71,7 +71,7 @@ impl BinaryMultiple { /// The exponential scale factor used when converting a `BinaryMultiple` /// to another one. - const fn exponential_scale_factor(&self) -> i32 { + const fn exponential_scale_factor(self) -> i32 { match self { BinaryMultiple::Kibi => 1, BinaryMultiple::Mebi => 2, @@ -138,9 +138,7 @@ impl Display for BinaryMultiple { pub fn to_java_heap(q: &Quantity, factor: f32) -> Result { let scaled = (q.0.parse::()? * factor).scale_for_java(); if scaled.value < 1.0 { - Err(Error::CannotConvertToJavaHeap { - value: q.0.to_owned(), - }) + Err(Error::CannotConvertToJavaHeap { value: q.0.clone() }) } else { Ok(format!( "-Xmx{:.0}{}", @@ -174,7 +172,7 @@ pub fn to_java_heap_value(q: &Quantity, factor: f32, target_unit: BinaryMultiple if scaled.value < 1.0 { Err(Error::CannotConvertToJavaHeapValue { - value: q.0.to_owned(), + value: q.0.clone(), target_unit: target_unit.to_string(), }) } else { @@ -206,19 +204,17 @@ impl MemoryQuantity { /// Scales down the unit to GB if it is TB or bigger. /// Leaves the quantity unchanged otherwise. - fn scale_to_at_most_gb(&self) -> Self { + fn scale_to_at_most_gb(self) -> Self { match self.unit { - BinaryMultiple::Kibi => *self, - BinaryMultiple::Mebi => *self, - BinaryMultiple::Gibi => *self, - BinaryMultiple::Tebi => self.scale_to(BinaryMultiple::Gibi), - BinaryMultiple::Pebi => self.scale_to(BinaryMultiple::Gibi), - BinaryMultiple::Exbi => self.scale_to(BinaryMultiple::Gibi), + BinaryMultiple::Kibi | BinaryMultiple::Mebi | BinaryMultiple::Gibi => self, + BinaryMultiple::Tebi | BinaryMultiple::Pebi | BinaryMultiple::Exbi => { + self.scale_to(BinaryMultiple::Gibi) + } } } /// Scale down the unit by one order of magnitude, i.e. GB to MB. - fn scale_down_unit(&self) -> Result { + fn scale_down_unit(self) -> Result { match self.unit { BinaryMultiple::Kibi => Err(Error::CannotScaleDownMemoryUnit), BinaryMultiple::Mebi => Ok(self.scale_to(BinaryMultiple::Kibi)), @@ -248,11 +244,11 @@ impl MemoryQuantity { /// If the MemoryQuantity value is smaller than 1 (starts with a zero), convert it to a smaller /// unit until the non fractional part of the value is not zero anymore. /// This can fail if the quantity is smaller than 1kB. - fn ensure_no_zero(&self) -> Result { + fn ensure_no_zero(self) -> Result { if self.value < 1. { self.scale_down_unit()?.ensure_no_zero() } else { - Ok(*self) + Ok(self) } } @@ -260,7 +256,7 @@ impl MemoryQuantity { /// This is done by picking smaller units until the fractional part is smaller than the tolerated /// rounding loss, and then rounding down. /// This can fail if the tolerated rounding loss is less than 1kB. - fn ensure_integer(&self, tolerated_rounding_loss: MemoryQuantity) -> Result { + fn ensure_integer(self, tolerated_rounding_loss: MemoryQuantity) -> Result { let fraction_memory = MemoryQuantity { value: self.value.fract(), unit: self.unit, @@ -289,17 +285,18 @@ impl MemoryQuantity { /// Scales the unit to a value supported by Java and may even scale /// further down, in an attempt to avoid having zero sizes or losing too /// much precision. - fn scale_for_java(&self) -> Self { + fn scale_for_java(self) -> Self { + const EPS: f32 = 0.2; + let (norm_value, norm_unit) = match self.unit { - BinaryMultiple::Kibi => (self.value, self.unit), - BinaryMultiple::Mebi => (self.value, self.unit), - BinaryMultiple::Gibi => (self.value, self.unit), + BinaryMultiple::Kibi | BinaryMultiple::Mebi | BinaryMultiple::Gibi => { + (self.value, self.unit) + } BinaryMultiple::Tebi => (self.value * 1024.0, BinaryMultiple::Gibi), BinaryMultiple::Pebi => (self.value * 1024.0 * 1024.0, BinaryMultiple::Gibi), BinaryMultiple::Exbi => (self.value * 1024.0 * 1024.0 * 1024.0, BinaryMultiple::Gibi), }; - const EPS: f32 = 0.2; let (scaled_value, scaled_unit) = if norm_value < 1.0 || norm_value.fract() > EPS { match norm_unit { BinaryMultiple::Mebi => (norm_value * 1024.0, BinaryMultiple::Kibi), @@ -509,7 +506,7 @@ impl From for Quantity { impl From<&MemoryQuantity> for Quantity { fn from(quantity: &MemoryQuantity) -> Self { - Quantity(format!("{}", quantity)) + Quantity(format!("{quantity}")) } } @@ -522,7 +519,7 @@ mod tests { #[rstest] #[case("256Ki", MemoryQuantity { value: 256.0, unit: BinaryMultiple::Kibi })] - #[case("49041204Ki", MemoryQuantity { value: 49041204.0, unit: BinaryMultiple::Kibi })] + #[case("49041204Ki", MemoryQuantity { value: 49_041_204.0, unit: BinaryMultiple::Kibi })] #[case("8Mi", MemoryQuantity { value: 8.0, unit: BinaryMultiple::Mebi })] #[case("1.5Gi", MemoryQuantity { value: 1.5, unit: BinaryMultiple::Gibi })] #[case("0.8Ti", MemoryQuantity { value: 0.8, unit: BinaryMultiple::Tebi })] @@ -583,7 +580,7 @@ mod tests { #[case("2Mi", 0.8, BinaryMultiple::Kibi, 1638)] #[case("1.5Gi", 0.8, BinaryMultiple::Mebi, 1228)] #[case("2Gi", 0.8, BinaryMultiple::Mebi, 1638)] - #[case("2Ti", 0.8, BinaryMultiple::Mebi, 1677721)] + #[case("2Ti", 0.8, BinaryMultiple::Mebi, 1_677_721)] #[case("2Ti", 0.8, BinaryMultiple::Gibi, 1638)] #[case("2Ti", 1.0, BinaryMultiple::Gibi, 2048)] #[case("2048Ki", 1.0, BinaryMultiple::Mebi, 2)] @@ -664,7 +661,7 @@ mod tests { fn partial_ord(#[case] lhs: &str, #[case] rhs: &str, #[case] res: bool) { let lhs = MemoryQuantity::try_from(Quantity(lhs.to_owned())).unwrap(); let rhs = MemoryQuantity::try_from(Quantity(rhs.to_owned())).unwrap(); - assert_eq!(lhs > rhs, res) + assert_eq!(lhs > rhs, res); } #[rstest] @@ -675,7 +672,7 @@ mod tests { fn partial_eq(#[case] lhs: &str, #[case] rhs: &str, #[case] res: bool) { let lhs = MemoryQuantity::try_from(Quantity(lhs.to_owned())).unwrap(); let rhs = MemoryQuantity::try_from(Quantity(rhs.to_owned())).unwrap(); - assert_eq!(lhs == rhs, res) + assert_eq!(lhs == rhs, res); } #[rstest] diff --git a/crates/stackable-operator/src/product_config_utils.rs b/crates/stackable-operator/src/product_config_utils.rs index bcf75572e..fb8c5ef73 100644 --- a/crates/stackable-operator/src/product_config_utils.rs +++ b/crates/stackable-operator/src/product_config_utils.rs @@ -173,7 +173,7 @@ pub fn config_for_role_and_group<'a>( #[allow(clippy::type_complexity)] pub fn transform_all_roles_to_config( resource: &T::Configurable, - roles: HashMap< + roles: &HashMap< String, ( Vec, @@ -188,10 +188,10 @@ where { let mut result = HashMap::new(); - for (role_name, (property_name_kinds, role)) in &roles { + for (role_name, (property_name_kinds, role)) in roles { let role_properties = transform_role_to_config(resource, role_name, role, property_name_kinds)?; - result.insert(role_name.to_string(), role_properties); + result.insert(role_name.clone(), role_properties); } Ok(result) @@ -220,7 +220,7 @@ pub fn validate_all_roles_and_groups_config( let mut result = HashMap::new(); for (role, role_group) in role_config { - let role_entry = result.entry(role.to_string()).or_insert(HashMap::new()); + let role_entry = result.entry(role.clone()).or_insert(HashMap::new()); for (group, properties_by_kind) in role_group { role_entry.insert( @@ -305,7 +305,7 @@ fn process_validation_result( let mut properties = BTreeMap::new(); let mut collected_errors = Vec::new(); - for (key, result) in validation_result.iter() { + for (key, result) in validation_result { match result { PropertyValidationResult::Default(value) => { debug!( @@ -395,7 +395,7 @@ where // Properties from the role have the lowest priority, so they are computed first... let role_properties = parse_role_config(resource, role_name, &role.config, property_kinds)?; - let role_overrides = parse_role_overrides(&role.config, property_kinds)?; + let role_overrides = parse_role_overrides(&role.config, property_kinds); // for each role group ... for (role_group_name, role_group) in &role.role_groups { @@ -420,7 +420,7 @@ where } // ... compute the role group overrides and merge them into `role_group_properties_merged`. - let role_group_overrides = parse_role_overrides(&role_group.config, property_kinds)?; + let role_group_overrides = parse_role_overrides(&role_group.config, property_kinds); for (property_kind, property_overrides) in role_group_overrides { role_group_properties_merged .entry(property_kind) @@ -477,7 +477,7 @@ where fn parse_role_overrides( config: &CommonConfiguration, property_kinds: &[PropertyNameKind], -) -> Result>>> +) -> HashMap>> where T: Configuration, { @@ -485,7 +485,7 @@ where for property_kind in property_kinds { match property_kind { PropertyNameKind::File(file) => { - result.insert(property_kind.clone(), parse_file_overrides(config, file)?) + result.insert(property_kind.clone(), parse_file_overrides(config, file)) } PropertyNameKind::Env => result.insert( property_kind.clone(), @@ -508,13 +508,13 @@ where }; } - Ok(result) + result } fn parse_file_overrides( config: &CommonConfiguration, file: &str, -) -> Result>> +) -> BTreeMap> where T: Configuration, { @@ -527,7 +527,7 @@ where } } - Ok(final_overrides) + final_overrides } /// Extract the environment variables of a rolegroup config into a vector of EnvVars. @@ -670,13 +670,15 @@ pub fn insert_or_update_env_vars(env_vars: &[EnvVar], env_overrides: &[EnvVar]) let mut combined = BTreeMap::new(); for env_var in env_vars.iter().chain(env_overrides) { - combined.insert(env_var.name.to_owned(), env_var.to_owned()); + combined.insert(env_var.name.clone(), env_var.to_owned()); } combined.into_values().collect() } #[cfg(test)] +#[expect(clippy::unnecessary_wraps)] +#[expect(clippy::fn_params_excessive_bools)] mod tests { macro_rules! collection { // map-like @@ -735,7 +737,7 @@ mod tests { ) -> Result>> { let mut result = BTreeMap::new(); if let Some(env) = &self.env { - result.insert("env".to_string(), Some(env.to_string())); + result.insert("env".to_string(), Some(env.clone())); } Ok(result) } @@ -747,7 +749,7 @@ mod tests { ) -> Result>> { let mut result = BTreeMap::new(); if let Some(cli) = &self.cli { - result.insert("cli".to_string(), Some(cli.to_string())); + result.insert("cli".to_string(), Some(cli.clone())); } Ok(result) } @@ -760,7 +762,7 @@ mod tests { ) -> Result>> { let mut result = BTreeMap::new(); if let Some(conf) = &self.conf { - result.insert("file".to_string(), Some(conf.to_string())); + result.insert("file".to_string(), Some(conf.clone())); } Ok(result) } @@ -824,7 +826,7 @@ mod tests { build_env_override(ROLE_ENV_OVERRIDE), build_cli_override(ROLE_CLI_OVERRIDE), ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -841,7 +843,7 @@ mod tests { build_env_override(ROLE_ENV_OVERRIDE), build_cli_override(ROLE_CLI_OVERRIDE), ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -855,7 +857,7 @@ mod tests { None, None, ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -872,7 +874,7 @@ mod tests { None, None, ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -889,7 +891,7 @@ mod tests { build_env_override(ROLE_ENV_OVERRIDE), build_cli_override(ROLE_CLI_OVERRIDE), ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -906,7 +908,7 @@ mod tests { build_env_override(ROLE_ENV_OVERRIDE), build_cli_override(ROLE_CLI_OVERRIDE), ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: CommonConfiguration::default(), @@ -919,7 +921,7 @@ mod tests { None, None, ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -937,7 +939,7 @@ mod tests { None, None, ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: CommonConfiguration::default(), @@ -950,7 +952,7 @@ mod tests { build_env_override(ROLE_ENV_OVERRIDE), build_cli_override(ROLE_CLI_OVERRIDE), ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -967,7 +969,7 @@ mod tests { build_env_override(ROLE_ENV_OVERRIDE), build_cli_override(ROLE_CLI_OVERRIDE), ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -979,7 +981,7 @@ mod tests { }, (false, true, false, true) => Role { config: CommonConfiguration::default(), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -991,7 +993,7 @@ mod tests { }, (false, true, false, false) => Role { config: CommonConfiguration::default(), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -1008,7 +1010,7 @@ mod tests { build_env_override(ROLE_ENV_OVERRIDE), build_cli_override(ROLE_CLI_OVERRIDE), ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -1025,7 +1027,7 @@ mod tests { build_env_override(ROLE_ENV_OVERRIDE), build_cli_override(ROLE_CLI_OVERRIDE), ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: CommonConfiguration::default(), @@ -1033,7 +1035,7 @@ mod tests { }, (false, false, false, true) => Role { config: CommonConfiguration::default(), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: build_common_config( @@ -1045,7 +1047,7 @@ mod tests { }, (false, false, false, false) => Role { config: CommonConfiguration::default(), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group => RoleGroup { replicas: Some(1), config: CommonConfiguration::default(), @@ -1338,7 +1340,7 @@ mod tests { Some(role_env_override), Some(role_cli_override), ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {"role_group".to_string() => RoleGroup { replicas: Some(1), config: build_common_config( @@ -1437,7 +1439,7 @@ mod tests { None, None, ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group_1.to_string() => RoleGroup { replicas: Some(1), config: build_common_config( @@ -1464,7 +1466,7 @@ mod tests { None, None, ), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! {role_group_1.to_string() => RoleGroup { replicas: Some(1), config: build_common_config( @@ -1504,7 +1506,7 @@ mod tests { } }}; - let all_config = transform_all_roles_to_config(&String::new(), roles).unwrap(); + let all_config = transform_all_roles_to_config(&String::new(), &roles).unwrap(); assert_eq!(all_config, expected); } @@ -1529,7 +1531,7 @@ mod tests { > = collection! { role_1.to_string() => (vec![PropertyNameKind::File(file_name.to_string()), PropertyNameKind::Env], Role { config: CommonConfiguration::default(), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! { role_group_1.to_string() => RoleGroup { replicas: Some(1), @@ -1544,7 +1546,7 @@ mod tests { ), role_2.to_string() => (vec![PropertyNameKind::File(file_name.to_string())], Role { config: CommonConfiguration::default(), - role_config: Default::default(), + role_config: TestRoleConfig::default(), role_groups: collection! { role_group_2.to_string() => RoleGroup { replicas: Some(1), @@ -1559,7 +1561,7 @@ mod tests { ), }; - let role_config = transform_all_roles_to_config(&String::new(), roles).unwrap(); + let role_config = transform_all_roles_to_config(&String::new(), &roles).unwrap(); let config = &format!( " diff --git a/crates/stackable-operator/src/product_logging/framework.rs b/crates/stackable-operator/src/product_logging/framework.rs index 69b2c69f4..35c7bab02 100644 --- a/crates/stackable-operator/src/product_logging/framework.rs +++ b/crates/stackable-operator/src/product_logging/framework.rs @@ -108,7 +108,7 @@ pub enum LoggingError { pub fn calculate_log_volume_size_limit(max_log_files_size: &[MemoryQuantity]) -> Quantity { let log_volume_size_limit = max_log_files_size .iter() - .cloned() + .copied() .sum::() .scale_to(BinaryMultiple::Mebi) // According to the reasons mentioned in the function documentation, the multiplier must be @@ -194,7 +194,7 @@ pub fn capture_shell_output( file_log_level <= LogLevel::INFO, ) { (true, true) => format!(" > >(tee {log_file_dir}/container.stdout.log)"), - (true, false) => "".into(), + (true, false) => String::new(), (false, true) => format!(" > {log_file_dir}/container.stdout.log"), (false, false) => " > /dev/null".into(), }; @@ -204,7 +204,7 @@ pub fn capture_shell_output( file_log_level <= LogLevel::ERROR, ) { (true, true) => format!(" 2> >(tee {log_file_dir}/container.stderr.log >&2)"), - (true, false) => "".into(), + (true, false) => String::new(), (false, true) => format!(" 2> {log_file_dir}/container.stderr.log"), (false, false) => " 2> /dev/null".into(), }; @@ -305,7 +305,7 @@ pub fn create_log4j_config( }); format!( - r#"log4j.rootLogger={root_log_level}, CONSOLE, FILE + r"log4j.rootLogger={root_log_level}, CONSOLE, FILE log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender log4j.appender.CONSOLE.Threshold={console_log_level} @@ -319,7 +319,7 @@ log4j.appender.FILE.MaxFileSize={max_log_file_size_in_mib}MB log4j.appender.FILE.MaxBackupIndex={number_of_archived_log_files} log4j.appender.FILE.layout=org.apache.log4j.xml.XMLLayout -{loggers}"#, +{loggers}", max_log_file_size_in_mib = cmp::max(1, max_size_in_mib / (1 + number_of_archived_log_files)), root_log_level = config.root_log_level().to_log4j_literal(), @@ -414,7 +414,7 @@ pub fn create_log4j2_config( .collect::>() .join(", "); let loggers = if logger_names.is_empty() { - "".to_string() + String::new() } else { format!("loggers = {logger_names}") }; @@ -431,7 +431,7 @@ pub fn create_log4j2_config( }); format!( - r#"appenders = FILE, CONSOLE + r"appenders = FILE, CONSOLE appender.CONSOLE.type = Console appender.CONSOLE.name = CONSOLE @@ -458,7 +458,7 @@ appender.FILE.filter.threshold.level = {file_log_level} rootLogger.level={root_log_level} rootLogger.appenderRefs = CONSOLE, FILE rootLogger.appenderRef.CONSOLE.ref = CONSOLE -rootLogger.appenderRef.FILE.ref = FILE"#, +rootLogger.appenderRef.FILE.ref = FILE", max_log_file_size_in_mib = cmp::max(1, max_size_in_mib / (1 + number_of_archived_log_files)), root_log_level = config.root_log_level().to_log4j2_literal(), @@ -708,8 +708,7 @@ where LogLevel::INFO => r#"!includes(["TRACE", "DEBUG"], .metadata.level)"#, LogLevel::WARN => r#"!includes(["TRACE", "DEBUG", "INFO"], .metadata.level)"#, LogLevel::ERROR => r#"!includes(["TRACE", "DEBUG", "INFO", "WARN"], .metadata.level)"#, - LogLevel::FATAL => "false", - LogLevel::NONE => "false", + LogLevel::FATAL | LogLevel::NONE => "false", }; format!( diff --git a/crates/stackable-operator/src/product_logging/spec.rs b/crates/stackable-operator/src/product_logging/spec.rs index 0f6bf3f8e..b013466e2 100644 --- a/crates/stackable-operator/src/product_logging/spec.rs +++ b/crates/stackable-operator/src/product_logging/spec.rs @@ -248,7 +248,7 @@ impl AutomaticContainerLogConfig { pub fn root_log_level(&self) -> LogLevel { self.loggers .get(Self::ROOT_LOGGER) - .map(|root| root.level.to_owned()) + .map(|root| root.level) .unwrap_or_default() } } @@ -338,8 +338,7 @@ impl LogLevel { LogLevel::DEBUG => "debug", LogLevel::INFO => "info", LogLevel::WARN => "warn", - LogLevel::ERROR => "error", - LogLevel::FATAL => "error", + LogLevel::ERROR | LogLevel::FATAL => "error", LogLevel::NONE => "off", } .into() @@ -352,8 +351,7 @@ impl LogLevel { LogLevel::DEBUG => "DEBUG", LogLevel::INFO => "INFO", LogLevel::WARN => "WARN", - LogLevel::ERROR => "ERROR", - LogLevel::FATAL => "ERROR", + LogLevel::ERROR | LogLevel::FATAL => "ERROR", LogLevel::NONE => "OFF", } .into() @@ -382,13 +380,9 @@ impl LogLevel { // based on https://www.openpolicyagent.org/docs/latest/cli/#options-10 opa has only log levels {debug,info,error} pub fn to_opa_literal(&self) -> String { match self { - LogLevel::TRACE => "debug", - LogLevel::DEBUG => "debug", + LogLevel::TRACE | LogLevel::DEBUG => "debug", LogLevel::INFO => "info", - LogLevel::WARN => "error", - LogLevel::ERROR => "error", - LogLevel::FATAL => "error", - LogLevel::NONE => "error", + LogLevel::WARN | LogLevel::ERROR | LogLevel::FATAL | LogLevel::NONE => "error", } .into() } @@ -396,8 +390,7 @@ impl LogLevel { /// Convert the log level to a Python expression pub fn to_python_expression(&self) -> String { match self { - LogLevel::TRACE => "logging.DEBUG", - LogLevel::DEBUG => "logging.DEBUG", + LogLevel::TRACE | LogLevel::DEBUG => "logging.DEBUG", LogLevel::INFO => "logging.INFO", LogLevel::WARN => "logging.WARNING", LogLevel::ERROR => "logging.ERROR", diff --git a/crates/stackable-operator/src/status/condition/daemonset.rs b/crates/stackable-operator/src/status/condition/daemonset.rs index c7e79bb9a..d34891a12 100644 --- a/crates/stackable-operator/src/status/condition/daemonset.rs +++ b/crates/stackable-operator/src/status/condition/daemonset.rs @@ -37,7 +37,7 @@ impl DaemonSetConditionBuilder { let current_status = Self::daemon_set_available(ds); if current_status != ClusterConditionStatus::True { - unavailable_resources.push(ds.name_any()) + unavailable_resources.push(ds.name_any()); } available = cmp::max(available, current_status); diff --git a/crates/stackable-operator/src/status/condition/deployment.rs b/crates/stackable-operator/src/status/condition/deployment.rs index 44b67de06..5d3f080e3 100644 --- a/crates/stackable-operator/src/status/condition/deployment.rs +++ b/crates/stackable-operator/src/status/condition/deployment.rs @@ -36,7 +36,7 @@ impl DeploymentConditionBuilder { let current_status = Self::deployment_available(deployment); if current_status != ClusterConditionStatus::True { - unavailable_resources.push(deployment.name_any()) + unavailable_resources.push(deployment.name_any()); } available = cmp::max(available, current_status); diff --git a/crates/stackable-operator/src/status/condition/mod.rs b/crates/stackable-operator/src/status/condition/mod.rs index c658d03fb..20f470e7e 100644 --- a/crates/stackable-operator/src/status/condition/mod.rs +++ b/crates/stackable-operator/src/status/condition/mod.rs @@ -181,7 +181,7 @@ impl ClusterCondition { /// combines it with the optional message to provide more context. pub fn display_long(&self) -> String { match &self.message { - Some(message) => format!("{}: {}", self, message), + Some(message) => format!("{self}: {message}"), None => self.to_string(), } } @@ -193,9 +193,10 @@ impl ClusterCondition { /// which contains the optional message to provide more context. Internally /// this method uses the `display_short` and `display_long` methods. pub fn display_short_or_long(&self) -> String { - match self.is_good() { - true => self.display_short(), - false => self.display_long(), + if self.is_good() { + self.display_short() + } else { + self.display_long() } } } @@ -328,7 +329,7 @@ impl ClusterConditionSet { _ => None, } { result.put(condition); - }; + } } result diff --git a/crates/stackable-operator/src/status/condition/operations.rs b/crates/stackable-operator/src/status/condition/operations.rs index 6f0d074f5..15b37f7ed 100644 --- a/crates/stackable-operator/src/status/condition/operations.rs +++ b/crates/stackable-operator/src/status/condition/operations.rs @@ -123,8 +123,8 @@ mod tests { #[case] expected_stopped_status: ClusterConditionStatus, ) { let cluster_operation = ClusterOperation { - reconciliation_paused, stopped, + reconciliation_paused, }; let op_condition_builder = ClusterOperationsConditionBuilder::new(&cluster_operation); diff --git a/crates/stackable-operator/src/status/condition/statefulset.rs b/crates/stackable-operator/src/status/condition/statefulset.rs index e69eb716c..e0eea3625 100644 --- a/crates/stackable-operator/src/status/condition/statefulset.rs +++ b/crates/stackable-operator/src/status/condition/statefulset.rs @@ -36,7 +36,7 @@ impl StatefulSetConditionBuilder { let current_status = Self::stateful_set_available(sts); if current_status != ClusterConditionStatus::True { - unavailable_resources.push(sts.name_any()) + unavailable_resources.push(sts.name_any()); } available = cmp::max(available, current_status); diff --git a/crates/stackable-operator/src/status/rollout.rs b/crates/stackable-operator/src/status/rollout.rs index b90d63353..e5c56e90f 100644 --- a/crates/stackable-operator/src/status/rollout.rs +++ b/crates/stackable-operator/src/status/rollout.rs @@ -5,6 +5,8 @@ use std::borrow::Cow; use k8s_openapi::api::apps::v1::StatefulSet; use snafu::Snafu; +use crate::status::rollout::outdated_statefulset::{HasOutdatedReplicasSnafu, NotYetObservedSnafu}; + /// The reason for why a [`StatefulSet`] is still rolling out. Returned by [`check_statefulset_rollout_complete`]. #[derive(Debug, Snafu)] #[snafu(module(outdated_statefulset))] @@ -35,8 +37,6 @@ pub enum StatefulSetRolloutInProgress { pub fn check_statefulset_rollout_complete( sts: &StatefulSet, ) -> Result<(), StatefulSetRolloutInProgress> { - use outdated_statefulset::*; - let status = sts.status.as_ref().map_or_else(Cow::default, Cow::Borrowed); let current_generation = sts.metadata.generation; diff --git a/crates/stackable-operator/src/utils/crds.rs b/crates/stackable-operator/src/utils/crds.rs index 4511b5109..fa0f8a30d 100644 --- a/crates/stackable-operator/src/utils/crds.rs +++ b/crates/stackable-operator/src/utils/crds.rs @@ -57,12 +57,12 @@ mod tests { #[test] fn valid_pod_override_with_labels() { - let input = r#" + let input = r" podOverrides: metadata: labels: my-custom-label: super-important-label - "#; + "; serde_yaml::from_str::(input).expect("Failed to parse valid podOverride"); } @@ -109,12 +109,12 @@ mod tests { #[test] fn invalid_pod_override_missing_container_name() { - let input = r#" + let input = r" podOverrides: spec: containers: - image: oci.stackable.tech/sdp/nifi:1.23.2-stackable23.11.0 - "#; + "; // FIXME: Ideally we would require the names of the containers to be set. We had users using podOverrides // without setting the name of the container and wondering why it didn't work. diff --git a/crates/stackable-operator/src/utils/kubelet.rs b/crates/stackable-operator/src/utils/kubelet.rs index 5579930c7..c09df5526 100644 --- a/crates/stackable-operator/src/utils/kubelet.rs +++ b/crates/stackable-operator/src/utils/kubelet.rs @@ -40,7 +40,7 @@ impl KubeletConfig { pub async fn fetch(client: &Client, node_name: &str) -> Result { let url_path = format!("/api/v1/nodes/{node_name}/proxy/configz"); let req = http::Request::get(url_path.clone()) - .body(Default::default()) + .body(Vec::new()) .context(BuildConfigzRequestSnafu { url_path })?; let resp = client diff --git a/crates/stackable-operator/src/utils/logging.rs b/crates/stackable-operator/src/utils/logging.rs index e58b63bab..7db400e72 100644 --- a/crates/stackable-operator/src/utils/logging.rs +++ b/crates/stackable-operator/src/utils/logging.rs @@ -36,11 +36,11 @@ pub fn print_startup_string( rustc_version: &str, ) { let git = match git_version { - None => "".to_string(), + None => String::new(), Some(git) => format!(" (Git information: {git})"), }; info!("Starting {pkg_description}"); info!( "This is version {pkg_version}{git}, built for {target} by {rustc_version} at {built_time}", - ) + ); } diff --git a/crates/stackable-operator/src/utils/url.rs b/crates/stackable-operator/src/utils/url.rs index e7e94e0fe..81ca9356e 100644 --- a/crates/stackable-operator/src/utils/url.rs +++ b/crates/stackable-operator/src/utils/url.rs @@ -31,7 +31,7 @@ impl UrlExt for url::Url { while let Some(input) = iter.next() { url = if !input.ends_with('/') && iter.peek().is_some() { - url.join(&format!("{}/", input))? + url.join(&format!("{input}/"))? } else { url.join(input)? }; diff --git a/crates/stackable-operator/src/validation.rs b/crates/stackable-operator/src/validation.rs index dbc5beb9e..ef2334bf8 100644 --- a/crates/stackable-operator/src/validation.rs +++ b/crates/stackable-operator/src/validation.rs @@ -190,7 +190,7 @@ fn validate_str_regex( fn validate_all(validations: impl IntoIterator>) -> Result { let errors = validations .into_iter() - .filter_map(|res| res.err()) + .filter_map(Result::err) .collect::>(); if errors.is_empty() { Ok(()) From 48ead1795f62ea1384e3e3252ab2689bba5a759e Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 31 Mar 2026 15:26:10 +0200 Subject: [PATCH 02/12] clippy::too_many_lines --- crates/stackable-operator/src/commons/affinity.rs | 1 + crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs | 1 + crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs | 3 +++ crates/stackable-operator/src/lib.rs | 1 - crates/stackable-operator/src/product_config_utils.rs | 2 ++ crates/stackable-operator/src/product_logging/spec.rs | 1 + 6 files changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/stackable-operator/src/commons/affinity.rs b/crates/stackable-operator/src/commons/affinity.rs index 9dbc47daa..903e7241f 100644 --- a/crates/stackable-operator/src/commons/affinity.rs +++ b/crates/stackable-operator/src/commons/affinity.rs @@ -139,6 +139,7 @@ mod tests { use crate::config::fragment; #[test] + #[expect(clippy::too_many_lines)] fn merge_new_attributes() { let default_affinity = StackableAffinityFragment { pod_affinity: None, diff --git a/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs index b3ee37668..fbefae71e 100644 --- a/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs @@ -388,6 +388,7 @@ mod tests { } #[test] + #[expect(clippy::too_many_lines)] fn test_multiple_git_syncs() { let git_sync_spec = r#" # GitSync with defaults diff --git a/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs b/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs index af840d5c8..45c2385b3 100644 --- a/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs +++ b/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs @@ -84,6 +84,7 @@ impl GitSyncResources { } /// Creates `GitSyncResources` from the given `GitSync` specifications. + #[expect(clippy::too_many_lines)] pub fn new( git_syncs: &[GitSync], resolved_product_image: &ResolvedProductImage, @@ -432,6 +433,7 @@ mod tests { } #[test] + #[expect(clippy::too_many_lines)] fn test_multiple_git_syncs() { let git_sync_spec = r#" # GitSync with defaults @@ -906,6 +908,7 @@ name: content-from-git-2 } #[test] + #[expect(clippy::too_many_lines)] fn test_git_sync_ssh() { let git_sync_spec = r#" # GitSync using SSH diff --git a/crates/stackable-operator/src/lib.rs b/crates/stackable-operator/src/lib.rs index 70c13f242..be2ca878a 100644 --- a/crates/stackable-operator/src/lib.rs +++ b/crates/stackable-operator/src/lib.rs @@ -3,7 +3,6 @@ #![expect(clippy::missing_errors_doc)] #![expect(clippy::must_use_candidate)] #![expect(clippy::return_self_not_must_use)] -#![expect(clippy::too_many_lines)] #![expect(clippy::implicit_hasher)] #![expect(clippy::doc_link_with_quotes)] #![expect(clippy::missing_panics_doc)] diff --git a/crates/stackable-operator/src/product_config_utils.rs b/crates/stackable-operator/src/product_config_utils.rs index fb8c5ef73..bebe5fd3f 100644 --- a/crates/stackable-operator/src/product_config_utils.rs +++ b/crates/stackable-operator/src/product_config_utils.rs @@ -809,6 +809,7 @@ mod tests { Some(collection! {property.to_string() => property.to_string()}) } + #[expect(clippy::too_many_lines)] fn build_role_and_group( role_config: bool, group_config: bool, @@ -1512,6 +1513,7 @@ mod tests { } #[test] + #[expect(clippy::too_many_lines)] fn test_validate_all_roles_and_groups_config() { let role_1 = "role_1"; let role_2 = "role_2"; diff --git a/crates/stackable-operator/src/product_logging/spec.rs b/crates/stackable-operator/src/product_logging/spec.rs index b013466e2..679d23c57 100644 --- a/crates/stackable-operator/src/product_logging/spec.rs +++ b/crates/stackable-operator/src/product_logging/spec.rs @@ -561,6 +561,7 @@ mod tests { } #[test] + #[expect(clippy::too_many_lines)] fn merge_automatic_container_log_config_fragment() { // no overriding log level + no default log level -> no log level assert_eq!( From 42cfdac6bb33684665c211295b54db2336ed5f56 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 31 Mar 2026 15:29:33 +0200 Subject: [PATCH 03/12] clippy::implicit_hasher --- crates/stackable-operator/src/config/fragment.rs | 4 ++-- crates/stackable-operator/src/config/merge.rs | 4 ++-- crates/stackable-operator/src/lib.rs | 1 - .../stackable-operator/src/product_config_utils.rs | 13 +++++++++---- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/crates/stackable-operator/src/config/fragment.rs b/crates/stackable-operator/src/config/fragment.rs index 291e4d6f5..697cea13a 100644 --- a/crates/stackable-operator/src/config/fragment.rs +++ b/crates/stackable-operator/src/config/fragment.rs @@ -5,7 +5,7 @@ use std::{ collections::{BTreeMap, HashMap}, fmt::{Display, Write}, - hash::Hash, + hash::{BuildHasher, Hash}, }; use k8s_openapi::api::core::v1::PodTemplateSpec; @@ -134,7 +134,7 @@ impl FromFragment for T { fragment.ok_or_else(|| validator.error_required()) } } -impl FromFragment for HashMap +impl FromFragment for HashMap where K: Eq + Hash + Display, { diff --git a/crates/stackable-operator/src/config/merge.rs b/crates/stackable-operator/src/config/merge.rs index 9173f00b8..a68b9886a 100644 --- a/crates/stackable-operator/src/config/merge.rs +++ b/crates/stackable-operator/src/config/merge.rs @@ -2,7 +2,7 @@ use std::{ collections::{BTreeMap, HashMap, btree_map, hash_map}, - hash::Hash, + hash::{BuildHasher, Hash}, }; use k8s_openapi::{ @@ -75,7 +75,7 @@ impl Merge for BTreeMap { } } } -impl Merge for HashMap { +impl Merge for HashMap { fn merge(&mut self, defaults: &Self) { for (k, default_v) in defaults { match self.entry(k.clone()) { diff --git a/crates/stackable-operator/src/lib.rs b/crates/stackable-operator/src/lib.rs index be2ca878a..c98ce8b9d 100644 --- a/crates/stackable-operator/src/lib.rs +++ b/crates/stackable-operator/src/lib.rs @@ -3,7 +3,6 @@ #![expect(clippy::missing_errors_doc)] #![expect(clippy::must_use_candidate)] #![expect(clippy::return_self_not_must_use)] -#![expect(clippy::implicit_hasher)] #![expect(clippy::doc_link_with_quotes)] #![expect(clippy::missing_panics_doc)] #![expect(clippy::explicit_deref_methods)] diff --git a/crates/stackable-operator/src/product_config_utils.rs b/crates/stackable-operator/src/product_config_utils.rs index bebe5fd3f..ec2bdd7fd 100644 --- a/crates/stackable-operator/src/product_config_utils.rs +++ b/crates/stackable-operator/src/product_config_utils.rs @@ -1,4 +1,7 @@ -use std::collections::{BTreeMap, HashMap}; +use std::{ + collections::{BTreeMap, HashMap}, + hash::BuildHasher, +}; use k8s_openapi::api::core::v1::EnvVar; use product_config::{ProductConfigManager, PropertyValidationResult, types::PropertyNameKind}; @@ -171,7 +174,7 @@ pub fn config_for_role_and_group<'a>( /// - `roles`: A map keyed by role names. The value is a tuple of a vector of `PropertyNameKind` /// like (Cli, Env or Files) and [`crate::role_utils::Role`] with a boxed [`Configuration`]. #[allow(clippy::type_complexity)] -pub fn transform_all_roles_to_config( +pub fn transform_all_roles_to_config( resource: &T::Configurable, roles: &HashMap< String, @@ -179,12 +182,14 @@ pub fn transform_all_roles_to_config( Vec, Role, ), + S, >, ) -> Result where T: Configuration, U: Default + JsonSchema + Serialize, ProductSpecificCommonConfig: Default + JsonSchema + Serialize, + S: BuildHasher, { let mut result = HashMap::new(); @@ -570,8 +575,8 @@ where /// env_vars_from_rolegroup_config(&rolegroup_config) /// ); /// ``` -pub fn env_vars_from_rolegroup_config( - rolegroup_config: &HashMap>, +pub fn env_vars_from_rolegroup_config( + rolegroup_config: &HashMap, S>, ) -> Vec { env_vars_from( rolegroup_config From c0201c7332e6232354aa49e83fcd83fdf8c4a171 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 31 Mar 2026 15:30:56 +0200 Subject: [PATCH 04/12] Move clippy::doc_link_with_quotes --- crates/stackable-operator/src/config/fragment.rs | 1 + crates/stackable-operator/src/lib.rs | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/stackable-operator/src/config/fragment.rs b/crates/stackable-operator/src/config/fragment.rs index 697cea13a..e6a29c0b0 100644 --- a/crates/stackable-operator/src/config/fragment.rs +++ b/crates/stackable-operator/src/config/fragment.rs @@ -1,3 +1,4 @@ +#![allow(clippy::doc_link_with_quotes)] //! Fragments are partially validated parts of a product configuration. For example, mandatory values may be missing. //! Fragments may be [`validate`]d and turned into their ["full"](`FromFragment`) type. //! diff --git a/crates/stackable-operator/src/lib.rs b/crates/stackable-operator/src/lib.rs index c98ce8b9d..2781ca73d 100644 --- a/crates/stackable-operator/src/lib.rs +++ b/crates/stackable-operator/src/lib.rs @@ -3,7 +3,6 @@ #![expect(clippy::missing_errors_doc)] #![expect(clippy::must_use_candidate)] #![expect(clippy::return_self_not_must_use)] -#![expect(clippy::doc_link_with_quotes)] #![expect(clippy::missing_panics_doc)] #![expect(clippy::explicit_deref_methods)] #![expect(clippy::cast_possible_truncation)] From 9d0a34845aa8eed5a660443165f2a2c2f8e6a39f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 31 Mar 2026 15:41:22 +0200 Subject: [PATCH 05/12] clippy::use_self --- .../src/builder/configmap.rs | 4 +- .../stackable-operator/src/builder/event.rs | 6 +- crates/stackable-operator/src/builder/meta.rs | 8 +- crates/stackable-operator/src/builder/pdb.rs | 2 +- .../src/builder/pod/container.rs | 18 ++-- .../stackable-operator/src/builder/pod/mod.rs | 4 +- .../src/builder/pod/resources.rs | 2 +- .../src/builder/pod/security.rs | 8 +- .../src/builder/pod/volume.rs | 18 ++-- crates/stackable-operator/src/client.rs | 2 +- .../src/cluster_resources.rs | 20 ++-- .../src/commons/networking.rs | 16 +-- .../stackable-operator/src/config/fragment.rs | 6 +- crates/stackable-operator/src/cpu.rs | 24 ++--- .../crd/authentication/core/v1alpha1_impl.rs | 4 +- .../crd/authentication/ldap/v1alpha1_impl.rs | 2 +- .../src/crd/git_sync/v1alpha1_impl.rs | 8 +- .../src/crd/git_sync/v1alpha2_impl.rs | 8 +- .../src/crd/listener/core/v1alpha1_impl.rs | 6 +- crates/stackable-operator/src/iter.rs | 2 +- crates/stackable-operator/src/lib.rs | 4 + .../src/logging/k8s_events.rs | 2 +- crates/stackable-operator/src/memory.rs | 100 +++++++++--------- crates/stackable-operator/src/namespace.rs | 8 +- .../src/product_logging/spec.rs | 56 +++++----- .../src/status/condition/mod.rs | 10 +- 26 files changed, 176 insertions(+), 172 deletions(-) diff --git a/crates/stackable-operator/src/builder/configmap.rs b/crates/stackable-operator/src/builder/configmap.rs index 0dfe1b334..d23dfbb41 100644 --- a/crates/stackable-operator/src/builder/configmap.rs +++ b/crates/stackable-operator/src/builder/configmap.rs @@ -19,8 +19,8 @@ pub struct ConfigMapBuilder { } impl ConfigMapBuilder { - pub fn new() -> ConfigMapBuilder { - ConfigMapBuilder::default() + pub fn new() -> Self { + Self::default() } pub fn metadata_default(&mut self) -> &mut Self { diff --git a/crates/stackable-operator/src/builder/event.rs b/crates/stackable-operator/src/builder/event.rs index 162cc7641..9813040ed 100644 --- a/crates/stackable-operator/src/builder/event.rs +++ b/crates/stackable-operator/src/builder/event.rs @@ -34,7 +34,7 @@ impl EventBuilder { /// # Arguments /// /// - `resource` - The resource for which this event is created, will be used to create the `involvedObject` and `metadata.name` fields - pub fn new(resource: &T) -> EventBuilder + pub fn new(resource: &T) -> Self where T: Resource, { @@ -48,10 +48,10 @@ impl EventBuilder { uid: resource.meta().uid.clone(), }; - EventBuilder { + Self { name: resource.name_any(), involved_object, - ..EventBuilder::default() + ..Self::default() } } diff --git a/crates/stackable-operator/src/builder/meta.rs b/crates/stackable-operator/src/builder/meta.rs index 7f5acdce2..bb926250b 100644 --- a/crates/stackable-operator/src/builder/meta.rs +++ b/crates/stackable-operator/src/builder/meta.rs @@ -34,8 +34,8 @@ pub struct ObjectMetaBuilder { } impl ObjectMetaBuilder { - pub fn new() -> ObjectMetaBuilder { - ObjectMetaBuilder::default() + pub fn new() -> Self { + Self::default() } /// This sets the name and namespace from a given resource @@ -228,8 +228,8 @@ pub struct OwnerReferenceBuilder { } impl OwnerReferenceBuilder { - pub fn new() -> OwnerReferenceBuilder { - OwnerReferenceBuilder::default() + pub fn new() -> Self { + Self::default() } pub fn api_version(&mut self, api_version: impl Into) -> &mut Self { diff --git a/crates/stackable-operator/src/builder/pdb.rs b/crates/stackable-operator/src/builder/pdb.rs index 02be4f323..9542fd979 100644 --- a/crates/stackable-operator/src/builder/pdb.rs +++ b/crates/stackable-operator/src/builder/pdb.rs @@ -59,7 +59,7 @@ pub enum PodDisruptionBudgetConstraint { impl PodDisruptionBudgetBuilder<(), (), ()> { pub fn new() -> Self { - PodDisruptionBudgetBuilder::default() + Self::default() } /// This method populates [`PodDisruptionBudget::metadata`] and diff --git a/crates/stackable-operator/src/builder/pod/container.rs b/crates/stackable-operator/src/builder/pod/container.rs index 55d030e79..c501bfa76 100644 --- a/crates/stackable-operator/src/builder/pod/container.rs +++ b/crates/stackable-operator/src/builder/pod/container.rs @@ -67,9 +67,9 @@ pub struct ContainerBuilder { impl ContainerBuilder { pub fn new(name: &str) -> Result { Self::validate_container_name(name)?; - Ok(ContainerBuilder { + Ok(Self { name: name.to_string(), - ..ContainerBuilder::default() + ..Self::default() }) } @@ -371,9 +371,9 @@ pub struct ContainerPortBuilder { impl ContainerPortBuilder { pub fn new(container_port: i32) -> Self { - ContainerPortBuilder { + Self { container_port, - ..ContainerPortBuilder::default() + ..Self::default() } } @@ -422,11 +422,11 @@ pub enum FieldPathEnvVar { impl fmt::Display for FieldPathEnvVar { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - FieldPathEnvVar::Name => write!(f, "metadata.name"), - FieldPathEnvVar::Namespace => write!(f, "metadata.namespace"), - FieldPathEnvVar::UID => write!(f, "metadata.uid"), - FieldPathEnvVar::Labels(name) => write!(f, "metadata.labels['{name}']"), - FieldPathEnvVar::Annotations(name) => write!(f, "metadata.annotations['{name}']"), + Self::Name => write!(f, "metadata.name"), + Self::Namespace => write!(f, "metadata.namespace"), + Self::UID => write!(f, "metadata.uid"), + Self::Labels(name) => write!(f, "metadata.labels['{name}']"), + Self::Annotations(name) => write!(f, "metadata.annotations['{name}']"), } } } diff --git a/crates/stackable-operator/src/builder/pod/mod.rs b/crates/stackable-operator/src/builder/pod/mod.rs index 4ac14575c..8f12b9d00 100644 --- a/crates/stackable-operator/src/builder/pod/mod.rs +++ b/crates/stackable-operator/src/builder/pod/mod.rs @@ -88,8 +88,8 @@ pub struct PodBuilder { } impl PodBuilder { - pub fn new() -> PodBuilder { - PodBuilder::default() + pub fn new() -> Self { + Self::default() } pub fn service_account_name(&mut self, value: impl Into) -> &mut Self { diff --git a/crates/stackable-operator/src/builder/pod/resources.rs b/crates/stackable-operator/src/builder/pod/resources.rs index 64967e0f6..f63beb6c9 100644 --- a/crates/stackable-operator/src/builder/pod/resources.rs +++ b/crates/stackable-operator/src/builder/pod/resources.rs @@ -24,7 +24,7 @@ pub struct ResourceRequirementsBuilder { impl ResourceRequirementsBuilder<(), (), (), ()> { pub fn new() -> Self { - ResourceRequirementsBuilder::default() + Self::default() } } diff --git a/crates/stackable-operator/src/builder/pod/security.rs b/crates/stackable-operator/src/builder/pod/security.rs index 72426cf05..5cffa0f56 100644 --- a/crates/stackable-operator/src/builder/pod/security.rs +++ b/crates/stackable-operator/src/builder/pod/security.rs @@ -18,8 +18,8 @@ impl SecurityContextBuilder { } } - pub fn new() -> SecurityContextBuilder { - SecurityContextBuilder::default() + pub fn new() -> Self { + Self::default() } pub fn allow_privilege_escalation(&mut self, value: bool) -> &mut Self { @@ -150,8 +150,8 @@ pub struct PodSecurityContextBuilder { } impl PodSecurityContextBuilder { - pub fn new() -> PodSecurityContextBuilder { - PodSecurityContextBuilder::default() + pub fn new() -> Self { + Self::default() } pub fn build(&self) -> PodSecurityContext { diff --git a/crates/stackable-operator/src/builder/pod/volume.rs b/crates/stackable-operator/src/builder/pod/volume.rs index 23bfcdfe3..732d5b348 100644 --- a/crates/stackable-operator/src/builder/pod/volume.rs +++ b/crates/stackable-operator/src/builder/pod/volume.rs @@ -48,10 +48,10 @@ impl Default for VolumeSource { } impl VolumeBuilder { - pub fn new(name: impl Into) -> VolumeBuilder { - VolumeBuilder { + pub fn new(name: impl Into) -> Self { + Self { name: name.into(), - ..VolumeBuilder::default() + ..Self::default() } } @@ -222,11 +222,11 @@ pub struct VolumeMountBuilder { } impl VolumeMountBuilder { - pub fn new(name: impl Into, mount_path: impl Into) -> VolumeMountBuilder { - VolumeMountBuilder { + pub fn new(name: impl Into, mount_path: impl Into) -> Self { + Self { mount_path: mount_path.into(), name: name.into(), - ..VolumeMountBuilder::default() + ..Self::default() } } @@ -428,10 +428,10 @@ impl ListenerReference { /// Return the key and value for a Kubernetes object annotation fn to_annotation(&self) -> Result { match self { - ListenerReference::ListenerClass(class) => { + Self::ListenerClass(class) => { Annotation::try_from(("listeners.stackable.tech/listener-class", class.as_str())) } - ListenerReference::ListenerName(name) => { + Self::ListenerName(name) => { Annotation::try_from(("listeners.stackable.tech/listener-name", name.as_str())) } } @@ -493,7 +493,7 @@ impl ListenerOperatorVolumeSourceBuilder { pub fn new( listener_reference: &ListenerReference, labels: &Labels, - ) -> ListenerOperatorVolumeSourceBuilder { + ) -> Self { Self { listener_reference: listener_reference.to_owned(), labels: labels.to_owned(), diff --git a/crates/stackable-operator/src/client.rs b/crates/stackable-operator/src/client.rs index d5e3273b1..cbd36391a 100644 --- a/crates/stackable-operator/src/client.rs +++ b/crates/stackable-operator/src/client.rs @@ -113,7 +113,7 @@ impl Client { default_namespace: String, kubernetes_cluster_info: KubernetesClusterInfo, ) -> Self { - Client { + Self { client, post_params: PostParams { field_manager: field_manager.clone(), diff --git a/crates/stackable-operator/src/cluster_resources.rs b/crates/stackable-operator/src/cluster_resources.rs index 345de5269..606ce5baa 100644 --- a/crates/stackable-operator/src/cluster_resources.rs +++ b/crates/stackable-operator/src/cluster_resources.rs @@ -144,11 +144,11 @@ pub enum ClusterResourceApplyStrategy { impl From<&ClusterOperation> for ClusterResourceApplyStrategy { fn from(commons_spec: &ClusterOperation) -> Self { if commons_spec.reconciliation_paused { - ClusterResourceApplyStrategy::ReconciliationPaused + Self::ReconciliationPaused } else if commons_spec.stopped { - ClusterResourceApplyStrategy::ClusterStopped + Self::ClusterStopped } else { - ClusterResourceApplyStrategy::Default + Self::Default } } } @@ -205,10 +205,10 @@ impl ClusterResourceApplyStrategy { /// Indicates if orphaned resources should be deleted depending on the strategy. const fn delete_orphans(&self) -> bool { match self { - ClusterResourceApplyStrategy::NoApply - | ClusterResourceApplyStrategy::ReconciliationPaused => false, - ClusterResourceApplyStrategy::ClusterStopped - | ClusterResourceApplyStrategy::Default => true, + Self::NoApply + | Self::ReconciliationPaused => false, + Self::ClusterStopped + | Self::Default => true, } } } @@ -234,7 +234,7 @@ impl ClusterResource for Job { impl ClusterResource for StatefulSet { fn maybe_mutate(self, strategy: &ClusterResourceApplyStrategy) -> Self { match strategy { - ClusterResourceApplyStrategy::ClusterStopped => StatefulSet { + ClusterResourceApplyStrategy::ClusterStopped => Self { spec: Some(StatefulSetSpec { replicas: Some(0), ..self.spec.unwrap_or_default() @@ -257,7 +257,7 @@ impl ClusterResource for StatefulSet { impl ClusterResource for DaemonSet { fn maybe_mutate(self, strategy: &ClusterResourceApplyStrategy) -> Self { match strategy { - ClusterResourceApplyStrategy::ClusterStopped => DaemonSet { + ClusterResourceApplyStrategy::ClusterStopped => Self { spec: Some(DaemonSetSpec { template: PodTemplateSpec { spec: Some(PodSpec { @@ -299,7 +299,7 @@ impl ClusterResource for DaemonSet { impl ClusterResource for Deployment { fn maybe_mutate(self, strategy: &ClusterResourceApplyStrategy) -> Self { match strategy { - ClusterResourceApplyStrategy::ClusterStopped => Deployment { + ClusterResourceApplyStrategy::ClusterStopped => Self { spec: Some(DeploymentSpec { replicas: Some(0), ..self.spec.unwrap_or_default() diff --git a/crates/stackable-operator/src/commons/networking.rs b/crates/stackable-operator/src/commons/networking.rs index 7bbf911a1..b8800fbc1 100644 --- a/crates/stackable-operator/src/commons/networking.rs +++ b/crates/stackable-operator/src/commons/networking.rs @@ -21,7 +21,7 @@ impl FromStr for DomainName { fn from_str(value: &str) -> Result { validation::is_domain(value)?; - Ok(DomainName(value.to_owned())) + Ok(Self(value.to_owned())) } } @@ -92,11 +92,11 @@ impl FromStr for HostName { fn from_str(value: &str) -> Result { if let Ok(ip) = value.parse::() { - return Ok(HostName::IpAddress(ip)); + return Ok(Self::IpAddress(ip)); } if let Ok(domain_name) = value.parse() { - return Ok(HostName::DomainName(domain_name)); + return Ok(Self::DomainName(domain_name)); } InvalidHostnameSnafu { @@ -123,8 +123,8 @@ impl From for String { impl Display for HostName { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - HostName::IpAddress(ip) => write!(f, "{ip}"), - HostName::DomainName(domain_name) => write!(f, "{domain_name}"), + Self::IpAddress(ip) => write!(f, "{ip}"), + Self::DomainName(domain_name) => write!(f, "{domain_name}"), } } } @@ -133,11 +133,11 @@ impl HostName { /// Formats the host in such a way that it can be used in URLs. pub fn as_url_host(&self) -> String { match self { - HostName::IpAddress(ip) => match ip { + Self::IpAddress(ip) => match ip { IpAddr::V4(ip) => ip.to_string(), IpAddr::V6(ip) => format!("[{ip}]"), }, - HostName::DomainName(domain_name) => domain_name.to_string(), + Self::DomainName(domain_name) => domain_name.to_string(), } } } @@ -155,7 +155,7 @@ impl TryFrom for KerberosRealmName { fn try_from(value: String) -> Result { validation::is_kerberos_realm_name(&value)?; - Ok(KerberosRealmName(value)) + Ok(Self(value)) } } diff --git a/crates/stackable-operator/src/config/fragment.rs b/crates/stackable-operator/src/config/fragment.rs index e6a29c0b0..a1a89f6be 100644 --- a/crates/stackable-operator/src/config/fragment.rs +++ b/crates/stackable-operator/src/config/fragment.rs @@ -24,7 +24,7 @@ use crate::role_utils::{Role, RoleGroup}; /// Constructed internally in [`validate`] pub struct Validator<'a> { ident: Option<&'a dyn Display>, - parent: Option<&'a Validator<'a>>, + parent: Option<&'a Self>, } impl Validator<'_> { @@ -193,8 +193,8 @@ impl FromFragment for Option { } } impl FromFragment for PodTemplateSpec { - type Fragment = PodTemplateSpec; - type RequiredFragment = PodTemplateSpec; + type Fragment = Self; + type RequiredFragment = Self; fn from_fragment( fragment: Self::Fragment, diff --git a/crates/stackable-operator/src/cpu.rs b/crates/stackable-operator/src/cpu.rs index 2dded0336..e4e5f62ee 100644 --- a/crates/stackable-operator/src/cpu.rs +++ b/crates/stackable-operator/src/cpu.rs @@ -150,7 +150,7 @@ impl From for Quantity { impl From<&CpuQuantity> for Quantity { fn from(quantity: &CpuQuantity) -> Self { - Quantity(format!("{}", quantity.as_cpu_count())) + Self(format!("{}", quantity.as_cpu_count())) } } @@ -170,22 +170,22 @@ impl TryFrom for CpuQuantity { } } -impl Add for CpuQuantity { - type Output = CpuQuantity; +impl Add for CpuQuantity { + type Output = Self; - fn add(self, rhs: CpuQuantity) -> Self::Output { - CpuQuantity::from_millis(self.millis + rhs.millis) + fn add(self, rhs: Self) -> Self::Output { + Self::from_millis(self.millis + rhs.millis) } } -impl AddAssign for CpuQuantity { - fn add_assign(&mut self, rhs: CpuQuantity) { +impl AddAssign for CpuQuantity { + fn add_assign(&mut self, rhs: Self) { self.millis += rhs.millis; } } impl Mul for CpuQuantity { - type Output = CpuQuantity; + type Output = Self; fn mul(self, rhs: usize) -> Self::Output { Self { @@ -201,7 +201,7 @@ impl MulAssign for CpuQuantity { } impl Mul for CpuQuantity { - type Output = CpuQuantity; + type Output = Self; fn mul(self, rhs: f32) -> Self::Output { Self { @@ -210,10 +210,10 @@ impl Mul for CpuQuantity { } } -impl Div for CpuQuantity { +impl Div for CpuQuantity { type Output = f32; - fn div(self, rhs: CpuQuantity) -> Self::Output { + fn div(self, rhs: Self) -> Self::Output { self.millis as f32 / rhs.millis as f32 } } @@ -226,7 +226,7 @@ impl MulAssign for CpuQuantity { impl Sum for CpuQuantity { fn sum>(iter: I) -> Self { - iter.fold(CpuQuantity { millis: 0 }, CpuQuantity::add) + iter.fold(Self { millis: 0 }, Self::add) } } diff --git a/crates/stackable-operator/src/crd/authentication/core/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/authentication/core/v1alpha1_impl.rs index 13f1eb922..8a2d87229 100644 --- a/crates/stackable-operator/src/crd/authentication/core/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/authentication/core/v1alpha1_impl.rs @@ -23,9 +23,9 @@ impl AuthenticationClass { pub async fn resolve( client: &Client, authentication_class_name: &str, - ) -> crate::client::Result { + ) -> crate::client::Result { client - .get::(authentication_class_name, &()) // AuthenticationClass has ClusterScope + .get::(authentication_class_name, &()) // AuthenticationClass has ClusterScope .await } } diff --git a/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs index 47e9db6aa..3d49c4949 100644 --- a/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs @@ -156,7 +156,7 @@ impl FieldNames { impl Default for FieldNames { fn default() -> Self { - FieldNames { + Self { uid: Self::default_uid(), group: Self::default_group(), given_name: Self::default_given_name(), diff --git a/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs index fbefae71e..a75a45293 100644 --- a/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/git_sync/v1alpha1_impl.rs @@ -84,18 +84,18 @@ impl GitSyncResources { extra_volume_mounts: &[VolumeMount], log_volume_name: &str, container_log_config: &ContainerLogConfig, - ) -> Result { - let mut resources = GitSyncResources::default(); + ) -> Result { + let mut resources = Self::default(); for (i, git_sync) in git_syncs.iter().enumerate() { let mut env_vars = vec![]; if let Some(git_credentials_secret) = &git_sync.credentials_secret { - env_vars.push(GitSyncResources::env_var_from_secret( + env_vars.push(Self::env_var_from_secret( "GITSYNC_USERNAME", git_credentials_secret, "user", )); - env_vars.push(GitSyncResources::env_var_from_secret( + env_vars.push(Self::env_var_from_secret( "GITSYNC_PASSWORD", git_credentials_secret, "password", diff --git a/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs b/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs index 45c2385b3..c46b9ad10 100644 --- a/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs +++ b/crates/stackable-operator/src/crd/git_sync/v1alpha2_impl.rs @@ -92,8 +92,8 @@ impl GitSyncResources { extra_volume_mounts: &[VolumeMount], log_volume_name: &str, container_log_config: &ContainerLogConfig, - ) -> Result { - let mut resources = GitSyncResources::default(); + ) -> Result { + let mut resources = Self::default(); for (i, git_sync) in git_syncs.iter().enumerate() { let mut env_vars = vec![]; @@ -101,12 +101,12 @@ impl GitSyncResources { if let Some(Credentials::BasicAuthSecretName(basic_auth_secret_name)) = &git_sync.credentials { - env_vars.push(GitSyncResources::env_var_from_secret( + env_vars.push(Self::env_var_from_secret( "GITSYNC_USERNAME", basic_auth_secret_name, "user", )); - env_vars.push(GitSyncResources::env_var_from_secret( + env_vars.push(Self::env_var_from_secret( "GITSYNC_PASSWORD", basic_auth_secret_name, "password", diff --git a/crates/stackable-operator/src/crd/listener/core/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/listener/core/v1alpha1_impl.rs index a6bd85198..e140f5b14 100644 --- a/crates/stackable-operator/src/crd/listener/core/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/listener/core/v1alpha1_impl.rs @@ -6,12 +6,12 @@ use crate::crd::listener::{ impl PreferredAddressType { pub fn resolve(self, listener_class: &ListenerClassSpec) -> AddressType { match self { - PreferredAddressType::HostnameConservative => match listener_class.service_type { + Self::HostnameConservative => match listener_class.service_type { ServiceType::NodePort => AddressType::Ip, _ => AddressType::Hostname, }, - PreferredAddressType::Hostname => AddressType::Hostname, - PreferredAddressType::Ip => AddressType::Ip, + Self::Hostname => AddressType::Hostname, + Self::Ip => AddressType::Ip, } } } diff --git a/crates/stackable-operator/src/iter.rs b/crates/stackable-operator/src/iter.rs index d03f433e1..22319d54f 100644 --- a/crates/stackable-operator/src/iter.rs +++ b/crates/stackable-operator/src/iter.rs @@ -144,7 +144,7 @@ mod tests { fn try_from_iter>(iter: I) -> Result { let sum = iter.into_iter().sum(); - Ok(Sum(sum)) + Ok(Self(sum)) } } diff --git a/crates/stackable-operator/src/lib.rs b/crates/stackable-operator/src/lib.rs index 2781ca73d..30bfe9fda 100644 --- a/crates/stackable-operator/src/lib.rs +++ b/crates/stackable-operator/src/lib.rs @@ -1,3 +1,4 @@ +// Pedantic lints #![deny(clippy::pedantic)] #![expect(clippy::doc_markdown)] #![expect(clippy::missing_errors_doc)] @@ -9,6 +10,9 @@ #![expect(clippy::float_cmp)] #![expect(clippy::cast_sign_loss)] #![expect(clippy::cast_precision_loss)] +// Nursery lints +// #![deny(clippy::nursery)] +#![deny(clippy::use_self)] //! ## Crate Features //! diff --git a/crates/stackable-operator/src/logging/k8s_events.rs b/crates/stackable-operator/src/logging/k8s_events.rs index c9e1682cf..e995c5ea4 100644 --- a/crates/stackable-operator/src/logging/k8s_events.rs +++ b/crates/stackable-operator/src/logging/k8s_events.rs @@ -171,7 +171,7 @@ mod tests { fn secondary_object(&self) -> Option> { match self { - ErrorFoo::Bar { + Self::Bar { source: ErrorBar::Baz { source: ErrorBaz::NoChocolate { descriptor }, diff --git a/crates/stackable-operator/src/memory.rs b/crates/stackable-operator/src/memory.rs index 327b62296..ab9e7913c 100644 --- a/crates/stackable-operator/src/memory.rs +++ b/crates/stackable-operator/src/memory.rs @@ -60,12 +60,12 @@ pub enum BinaryMultiple { impl BinaryMultiple { pub fn to_java_memory_unit(&self) -> String { match self { - BinaryMultiple::Kibi => "k".to_string(), - BinaryMultiple::Mebi => "m".to_string(), - BinaryMultiple::Gibi => "g".to_string(), - BinaryMultiple::Tebi => "t".to_string(), - BinaryMultiple::Pebi => "p".to_string(), - BinaryMultiple::Exbi => "e".to_string(), + Self::Kibi => "k".to_string(), + Self::Mebi => "m".to_string(), + Self::Gibi => "g".to_string(), + Self::Tebi => "t".to_string(), + Self::Pebi => "p".to_string(), + Self::Exbi => "e".to_string(), } } @@ -73,12 +73,12 @@ impl BinaryMultiple { /// to another one. const fn exponential_scale_factor(self) -> i32 { match self { - BinaryMultiple::Kibi => 1, - BinaryMultiple::Mebi => 2, - BinaryMultiple::Gibi => 3, - BinaryMultiple::Tebi => 4, - BinaryMultiple::Pebi => 5, - BinaryMultiple::Exbi => 6, + Self::Kibi => 1, + Self::Mebi => 2, + Self::Gibi => 3, + Self::Tebi => 4, + Self::Pebi => 5, + Self::Exbi => 6, } } @@ -90,14 +90,14 @@ impl BinaryMultiple { impl FromStr for BinaryMultiple { type Err = Error; - fn from_str(q: &str) -> Result { + fn from_str(q: &str) -> Result { match q { - "Ki" => Ok(BinaryMultiple::Kibi), - "Mi" => Ok(BinaryMultiple::Mebi), - "Gi" => Ok(BinaryMultiple::Gibi), - "Ti" => Ok(BinaryMultiple::Tebi), - "Pi" => Ok(BinaryMultiple::Pebi), - "Ei" => Ok(BinaryMultiple::Exbi), + "Ki" => Ok(Self::Kibi), + "Mi" => Ok(Self::Mebi), + "Gi" => Ok(Self::Gibi), + "Ti" => Ok(Self::Tebi), + "Pi" => Ok(Self::Pebi), + "Ei" => Ok(Self::Exbi), _ => Err(Error::InvalidQuantityUnit { value: q.to_string(), }), @@ -108,12 +108,12 @@ impl FromStr for BinaryMultiple { impl Display for BinaryMultiple { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let out = match self { - BinaryMultiple::Kibi => "Ki", - BinaryMultiple::Mebi => "Mi", - BinaryMultiple::Gibi => "Gi", - BinaryMultiple::Tebi => "Ti", - BinaryMultiple::Pebi => "Pi", - BinaryMultiple::Exbi => "Ei", + Self::Kibi => "Ki", + Self::Mebi => "Mi", + Self::Gibi => "Gi", + Self::Tebi => "Ti", + Self::Pebi => "Pi", + Self::Exbi => "Ei", }; out.fmt(f) @@ -256,8 +256,8 @@ impl MemoryQuantity { /// This is done by picking smaller units until the fractional part is smaller than the tolerated /// rounding loss, and then rounding down. /// This can fail if the tolerated rounding loss is less than 1kB. - fn ensure_integer(self, tolerated_rounding_loss: MemoryQuantity) -> Result { - let fraction_memory = MemoryQuantity { + fn ensure_integer(self, tolerated_rounding_loss: Self) -> Result { + let fraction_memory = Self { value: self.value.fract(), unit: self.unit, }; @@ -278,7 +278,7 @@ impl MemoryQuantity { let m = self .scale_to_at_most_gb() // Java Heap only supports specifying kb, mb or gb .ensure_no_zero()? // We don't want 0.9 or 0.2 - .ensure_integer(MemoryQuantity::from_mebi(20.))?; // Java only accepts integers not floats + .ensure_integer(Self::from_mebi(20.))?; // Java only accepts integers not floats Ok(format!("{}{}", m.value, m.unit.to_java_memory_unit())) } @@ -307,7 +307,7 @@ impl MemoryQuantity { (norm_value, norm_unit) }; - MemoryQuantity { + Self { value: scaled_value, unit: scaled_unit, } @@ -321,7 +321,7 @@ impl MemoryQuantity { let exponent_diff = from_exponent - to_exponent; - MemoryQuantity { + Self { value: self.value * 1024f32.powi(exponent_diff), unit: binary_multiple, } @@ -373,7 +373,7 @@ impl FromStr for MemoryQuantity { value: q.to_owned(), })?; let (value, unit) = q.split_at(start_of_unit); - Ok(MemoryQuantity { + Ok(Self { value: value.parse::().context(InvalidQuantitySnafu { value: q.to_owned(), })?, @@ -389,10 +389,10 @@ impl Display for MemoryQuantity { } impl Mul for MemoryQuantity { - type Output = MemoryQuantity; + type Output = Self; fn mul(self, factor: f32) -> Self { - MemoryQuantity { + Self { value: self.value * factor, unit: self.unit, } @@ -407,38 +407,38 @@ impl Div for MemoryQuantity { } } -impl Div for MemoryQuantity { +impl Div for MemoryQuantity { type Output = f32; - fn div(self, rhs: MemoryQuantity) -> Self::Output { + fn div(self, rhs: Self) -> Self::Output { let rhs = rhs.scale_to(self.unit); self.value / rhs.value } } -impl Sub for MemoryQuantity { - type Output = MemoryQuantity; +impl Sub for MemoryQuantity { + type Output = Self; - fn sub(self, rhs: MemoryQuantity) -> Self::Output { - MemoryQuantity { + fn sub(self, rhs: Self) -> Self::Output { + Self { value: self.value - rhs.scale_to(self.unit).value, unit: self.unit, } } } -impl SubAssign for MemoryQuantity { - fn sub_assign(&mut self, rhs: MemoryQuantity) { +impl SubAssign for MemoryQuantity { + fn sub_assign(&mut self, rhs: Self) { let rhs = rhs.scale_to(self.unit); self.value -= rhs.value; } } -impl Add for MemoryQuantity { - type Output = MemoryQuantity; +impl Add for MemoryQuantity { + type Output = Self; - fn add(self, rhs: MemoryQuantity) -> Self::Output { - MemoryQuantity { + fn add(self, rhs: Self) -> Self::Output { + Self { value: self.value + rhs.scale_to(self.unit).value, unit: self.unit, } @@ -448,17 +448,17 @@ impl Add for MemoryQuantity { impl Sum for MemoryQuantity { fn sum>(iter: I) -> Self { iter.fold( - MemoryQuantity { + Self { value: 0.0, unit: BinaryMultiple::Kibi, }, - MemoryQuantity::add, + Self::add, ) } } -impl AddAssign for MemoryQuantity { - fn add_assign(&mut self, rhs: MemoryQuantity) { +impl AddAssign for MemoryQuantity { + fn add_assign(&mut self, rhs: Self) { let rhs = rhs.scale_to(self.unit); self.value += rhs.value; } @@ -506,7 +506,7 @@ impl From for Quantity { impl From<&MemoryQuantity> for Quantity { fn from(quantity: &MemoryQuantity) -> Self { - Quantity(format!("{quantity}")) + Self(format!("{quantity}")) } } diff --git a/crates/stackable-operator/src/namespace.rs b/crates/stackable-operator/src/namespace.rs index ccca8dc13..ec5809d3a 100644 --- a/crates/stackable-operator/src/namespace.rs +++ b/crates/stackable-operator/src/namespace.rs @@ -13,9 +13,9 @@ pub enum WatchNamespace { impl From<&str> for WatchNamespace { fn from(s: &str) -> Self { if s.is_empty() { - WatchNamespace::All + Self::All } else { - WatchNamespace::One(s.to_string()) + Self::One(s.to_string()) } } } @@ -28,8 +28,8 @@ impl WatchNamespace { T: Resource, { match self { - WatchNamespace::All => client.get_all_api(), - WatchNamespace::One(namespace) => client.get_api::(namespace), + Self::All => client.get_all_api(), + Self::One(namespace) => client.get_api::(namespace), } } } diff --git a/crates/stackable-operator/src/product_logging/spec.rs b/crates/stackable-operator/src/product_logging/spec.rs index 679d23c57..db6622b61 100644 --- a/crates/stackable-operator/src/product_logging/spec.rs +++ b/crates/stackable-operator/src/product_logging/spec.rs @@ -334,12 +334,12 @@ impl LogLevel { /// Convert the log level to a string understood by Vector pub fn to_vector_literal(&self) -> String { match self { - LogLevel::TRACE => "trace", - LogLevel::DEBUG => "debug", - LogLevel::INFO => "info", - LogLevel::WARN => "warn", - LogLevel::ERROR | LogLevel::FATAL => "error", - LogLevel::NONE => "off", + Self::TRACE => "trace", + Self::DEBUG => "debug", + Self::INFO => "info", + Self::WARN => "warn", + Self::ERROR | Self::FATAL => "error", + Self::NONE => "off", } .into() } @@ -347,12 +347,12 @@ impl LogLevel { /// Convert the log level to a string understood by logback pub fn to_logback_literal(&self) -> String { match self { - LogLevel::TRACE => "TRACE", - LogLevel::DEBUG => "DEBUG", - LogLevel::INFO => "INFO", - LogLevel::WARN => "WARN", - LogLevel::ERROR | LogLevel::FATAL => "ERROR", - LogLevel::NONE => "OFF", + Self::TRACE => "TRACE", + Self::DEBUG => "DEBUG", + Self::INFO => "INFO", + Self::WARN => "WARN", + Self::ERROR | Self::FATAL => "ERROR", + Self::NONE => "OFF", } .into() } @@ -360,13 +360,13 @@ impl LogLevel { /// Convert the log level to a string understood by log4j pub fn to_log4j_literal(&self) -> String { match self { - LogLevel::TRACE => "TRACE", - LogLevel::DEBUG => "DEBUG", - LogLevel::INFO => "INFO", - LogLevel::WARN => "WARN", - LogLevel::ERROR => "ERROR", - LogLevel::FATAL => "FATAL", - LogLevel::NONE => "OFF", + Self::TRACE => "TRACE", + Self::DEBUG => "DEBUG", + Self::INFO => "INFO", + Self::WARN => "WARN", + Self::ERROR => "ERROR", + Self::FATAL => "FATAL", + Self::NONE => "OFF", } .into() } @@ -380,9 +380,9 @@ impl LogLevel { // based on https://www.openpolicyagent.org/docs/latest/cli/#options-10 opa has only log levels {debug,info,error} pub fn to_opa_literal(&self) -> String { match self { - LogLevel::TRACE | LogLevel::DEBUG => "debug", - LogLevel::INFO => "info", - LogLevel::WARN | LogLevel::ERROR | LogLevel::FATAL | LogLevel::NONE => "error", + Self::TRACE | Self::DEBUG => "debug", + Self::INFO => "info", + Self::WARN | Self::ERROR | Self::FATAL | Self::NONE => "error", } .into() } @@ -390,12 +390,12 @@ impl LogLevel { /// Convert the log level to a Python expression pub fn to_python_expression(&self) -> String { match self { - LogLevel::TRACE | LogLevel::DEBUG => "logging.DEBUG", - LogLevel::INFO => "logging.INFO", - LogLevel::WARN => "logging.WARNING", - LogLevel::ERROR => "logging.ERROR", - LogLevel::FATAL => "logging.CRITICAL", - LogLevel::NONE => "logging.CRITICAL + 1", + Self::TRACE | Self::DEBUG => "logging.DEBUG", + Self::INFO => "logging.INFO", + Self::WARN => "logging.WARNING", + Self::ERROR => "logging.ERROR", + Self::FATAL => "logging.CRITICAL", + Self::NONE => "logging.CRITICAL + 1", } .into() } diff --git a/crates/stackable-operator/src/status/condition/mod.rs b/crates/stackable-operator/src/status/condition/mod.rs index 20f470e7e..cc23fa008 100644 --- a/crates/stackable-operator/src/status/condition/mod.rs +++ b/crates/stackable-operator/src/status/condition/mod.rs @@ -275,7 +275,7 @@ pub struct ClusterConditionSet { impl ClusterConditionSet { pub fn new() -> Self { - ClusterConditionSet { + Self { // We use this as a quasi "Set" where each ClusterConditionType has its fixed position // This ensures ordering, and in contrast to e.g. a // BTreeMap, prevents shenanigans like adding a @@ -307,10 +307,10 @@ impl ClusterConditionSet { /// timestamps correctly. fn merge( self, - other: ClusterConditionSet, + other: Self, condition_combiner: fn(ClusterCondition, ClusterCondition) -> ClusterCondition, - ) -> ClusterConditionSet { - let mut result = ClusterConditionSet::new(); + ) -> Self { + let mut result = Self::new(); // Combine the two condition vectors of old and new `ClusterConditionSet`. for (old_condition, new_condition) in self @@ -400,7 +400,7 @@ impl From for Vec { impl From> for ClusterConditionSet { fn from(value: Vec) -> Self { - let mut result = ClusterConditionSet::new(); + let mut result = Self::new(); for c in value { result.put(c); } From 7b70c66c0c06bd3d107a894ee6dd160a8fa41401 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 31 Mar 2026 15:51:00 +0200 Subject: [PATCH 06/12] clippy::or_fun_call --- crates/stackable-operator/src/builder/meta.rs | 10 +- .../src/builder/pod/container.rs | 6 +- .../src/builder/pod/security.rs | 234 ++++++++++-------- .../src/commons/product_image_selection.rs | 5 +- .../crd/authentication/ldap/v1alpha1_impl.rs | 2 +- .../crd/authentication/oidc/v1alpha1_impl.rs | 2 +- .../src/crd/s3/connection/v1alpha1_impl.rs | 2 +- crates/stackable-operator/src/lib.rs | 1 + .../src/product_config_utils.rs | 2 +- .../stackable-operator/src/utils/logging.rs | 5 +- 10 files changed, 154 insertions(+), 115 deletions(-) diff --git a/crates/stackable-operator/src/builder/meta.rs b/crates/stackable-operator/src/builder/meta.rs index bb926250b..7eddda036 100644 --- a/crates/stackable-operator/src/builder/meta.rs +++ b/crates/stackable-operator/src/builder/meta.rs @@ -106,7 +106,7 @@ impl ObjectMetaBuilder { /// It'll override an annotation with the same key. pub fn with_annotation(&mut self, annotation: Annotation) -> &mut Self { self.annotations - .get_or_insert(Annotations::new()) + .get_or_insert_with(Annotations::new) .insert(annotation); self } @@ -115,7 +115,7 @@ impl ObjectMetaBuilder { /// Any existing annotation with a key that is contained in `annotations` will be overwritten pub fn with_annotations(&mut self, annotations: Annotations) -> &mut Self { self.annotations - .get_or_insert(Annotations::new()) + .get_or_insert_with(Annotations::new) .extend(annotations); self } @@ -129,14 +129,14 @@ impl ObjectMetaBuilder { /// This adds a single label to the existing labels. /// It'll override a label with the same key. pub fn with_label(&mut self, label: Label) -> &mut Self { - self.labels.get_or_insert(Labels::new()).insert(label); + self.labels.get_or_insert_with(Labels::new).insert(label); self } /// This adds multiple labels to the existing labels. /// Any existing label with a key that is contained in `labels` will be overwritten pub fn with_labels(&mut self, labels: Labels) -> &mut Self { - self.labels.get_or_insert(Labels::new()).extend(labels); + self.labels.get_or_insert_with(Labels::new).extend(labels); self } @@ -158,7 +158,7 @@ impl ObjectMetaBuilder { Labels::recommended(object_labels).context(RecommendedLabelsSnafu)?; self.labels - .get_or_insert(Labels::new()) + .get_or_insert_with(Labels::new) .extend(recommended_labels); Ok(self) diff --git a/crates/stackable-operator/src/builder/pod/container.rs b/crates/stackable-operator/src/builder/pod/container.rs index c501bfa76..ce0738a63 100644 --- a/crates/stackable-operator/src/builder/pod/container.rs +++ b/crates/stackable-operator/src/builder/pod/container.rs @@ -305,13 +305,15 @@ impl ContainerBuilder { pub fn lifecycle_post_start(&mut self, post_start: LifecycleHandler) -> &mut Self { self.lifecycle - .get_or_insert(Lifecycle::default()) + .get_or_insert_with(Lifecycle::default) .post_start = Some(post_start); self } pub fn lifecycle_pre_stop(&mut self, pre_stop: LifecycleHandler) -> &mut Self { - self.lifecycle.get_or_insert(Lifecycle::default()).pre_stop = Some(pre_stop); + self.lifecycle + .get_or_insert_with(Lifecycle::default) + .pre_stop = Some(pre_stop); self } diff --git a/crates/stackable-operator/src/builder/pod/security.rs b/crates/stackable-operator/src/builder/pod/security.rs index 5cffa0f56..6a64ebb97 100644 --- a/crates/stackable-operator/src/builder/pod/security.rs +++ b/crates/stackable-operator/src/builder/pod/security.rs @@ -189,92 +189,116 @@ impl PodSecurityContextBuilder { } pub fn se_linux_level(&mut self, level: &str) -> &mut Self { - self.pod_security_context.se_linux_options = - Some(self.pod_security_context.se_linux_options.clone().map_or( - SELinuxOptions { - level: Some(level.to_string()), - ..SELinuxOptions::default() - }, - |o| SELinuxOptions { - level: Some(level.to_string()), - ..o - }, - )); + self.pod_security_context.se_linux_options = Some( + self.pod_security_context + .se_linux_options + .clone() + .map_or_else( + || SELinuxOptions { + level: Some(level.to_string()), + ..SELinuxOptions::default() + }, + |o| SELinuxOptions { + level: Some(level.to_string()), + ..o + }, + ), + ); self } pub fn se_linux_role(&mut self, role: &str) -> &mut Self { - self.pod_security_context.se_linux_options = - Some(self.pod_security_context.se_linux_options.clone().map_or( - SELinuxOptions { - role: Some(role.to_string()), - ..SELinuxOptions::default() - }, - |o| SELinuxOptions { - role: Some(role.to_string()), - ..o - }, - )); + self.pod_security_context.se_linux_options = Some( + self.pod_security_context + .se_linux_options + .clone() + .map_or_else( + || SELinuxOptions { + role: Some(role.to_string()), + ..SELinuxOptions::default() + }, + |o| SELinuxOptions { + role: Some(role.to_string()), + ..o + }, + ), + ); self } pub fn se_linux_type(&mut self, type_: &str) -> &mut Self { - self.pod_security_context.se_linux_options = - Some(self.pod_security_context.se_linux_options.clone().map_or( - SELinuxOptions { - type_: Some(type_.to_string()), - ..SELinuxOptions::default() - }, - |o| SELinuxOptions { - type_: Some(type_.to_string()), - ..o - }, - )); + self.pod_security_context.se_linux_options = Some( + self.pod_security_context + .se_linux_options + .clone() + .map_or_else( + || SELinuxOptions { + type_: Some(type_.to_string()), + ..SELinuxOptions::default() + }, + |o| SELinuxOptions { + type_: Some(type_.to_string()), + ..o + }, + ), + ); self } pub fn se_linux_user(&mut self, user: &str) -> &mut Self { - self.pod_security_context.se_linux_options = - Some(self.pod_security_context.se_linux_options.clone().map_or( - SELinuxOptions { - user: Some(user.to_string()), - ..SELinuxOptions::default() - }, - |o| SELinuxOptions { - user: Some(user.to_string()), - ..o - }, - )); + self.pod_security_context.se_linux_options = Some( + self.pod_security_context + .se_linux_options + .clone() + .map_or_else( + || SELinuxOptions { + user: Some(user.to_string()), + ..SELinuxOptions::default() + }, + |o| SELinuxOptions { + user: Some(user.to_string()), + ..o + }, + ), + ); self } pub fn seccomp_profile_localhost(&mut self, profile: &str) -> &mut Self { - self.pod_security_context.seccomp_profile = - Some(self.pod_security_context.seccomp_profile.clone().map_or( - SeccompProfile { - localhost_profile: Some(profile.to_string()), - ..SeccompProfile::default() - }, - |o| SeccompProfile { - localhost_profile: Some(profile.to_string()), - ..o - }, - )); + self.pod_security_context.seccomp_profile = Some( + self.pod_security_context + .seccomp_profile + .clone() + .map_or_else( + || SeccompProfile { + localhost_profile: Some(profile.to_string()), + ..SeccompProfile::default() + }, + |o| SeccompProfile { + localhost_profile: Some(profile.to_string()), + ..o + }, + ), + ); self } pub fn seccomp_profile_type(&mut self, type_: &str) -> &mut Self { - self.pod_security_context.seccomp_profile = - Some(self.pod_security_context.seccomp_profile.clone().map_or( - SeccompProfile { - type_: type_.to_string(), - ..SeccompProfile::default() - }, - |o| SeccompProfile { - type_: type_.to_string(), - ..o - }, - )); + self.pod_security_context.seccomp_profile = Some( + self.pod_security_context + .seccomp_profile + .clone() + .map_or_else( + || SeccompProfile { + type_: type_.to_string(), + ..SeccompProfile::default() + }, + |o| SeccompProfile { + type_: type_.to_string(), + ..o + }, + ), + ); self } @@ -292,47 +316,59 @@ impl PodSecurityContextBuilder { } pub fn win_credential_spec(&mut self, spec: &str) -> &mut Self { - self.pod_security_context.windows_options = - Some(self.pod_security_context.windows_options.clone().map_or( - WindowsSecurityContextOptions { - gmsa_credential_spec: Some(spec.to_string()), - ..WindowsSecurityContextOptions::default() - }, - |o| WindowsSecurityContextOptions { - gmsa_credential_spec: Some(spec.to_string()), - ..o - }, - )); + self.pod_security_context.windows_options = Some( + self.pod_security_context + .windows_options + .clone() + .map_or_else( + || WindowsSecurityContextOptions { + gmsa_credential_spec: Some(spec.to_string()), + ..WindowsSecurityContextOptions::default() + }, + |o| WindowsSecurityContextOptions { + gmsa_credential_spec: Some(spec.to_string()), + ..o + }, + ), + ); self } pub fn win_credential_spec_name(&mut self, name: &str) -> &mut Self { - self.pod_security_context.windows_options = - Some(self.pod_security_context.windows_options.clone().map_or( - WindowsSecurityContextOptions { - gmsa_credential_spec_name: Some(name.to_string()), - ..WindowsSecurityContextOptions::default() - }, - |o| WindowsSecurityContextOptions { - gmsa_credential_spec_name: Some(name.to_string()), - ..o - }, - )); + self.pod_security_context.windows_options = Some( + self.pod_security_context + .windows_options + .clone() + .map_or_else( + || WindowsSecurityContextOptions { + gmsa_credential_spec_name: Some(name.to_string()), + ..WindowsSecurityContextOptions::default() + }, + |o| WindowsSecurityContextOptions { + gmsa_credential_spec_name: Some(name.to_string()), + ..o + }, + ), + ); self } pub fn win_run_as_user_name(&mut self, name: &str) -> &mut Self { - self.pod_security_context.windows_options = - Some(self.pod_security_context.windows_options.clone().map_or( - WindowsSecurityContextOptions { - run_as_user_name: Some(name.to_string()), - ..WindowsSecurityContextOptions::default() - }, - |o| WindowsSecurityContextOptions { - run_as_user_name: Some(name.to_string()), - ..o - }, - )); + self.pod_security_context.windows_options = Some( + self.pod_security_context + .windows_options + .clone() + .map_or_else( + || WindowsSecurityContextOptions { + run_as_user_name: Some(name.to_string()), + ..WindowsSecurityContextOptions::default() + }, + |o| WindowsSecurityContextOptions { + run_as_user_name: Some(name.to_string()), + ..o + }, + ), + ); self } } diff --git a/crates/stackable-operator/src/commons/product_image_selection.rs b/crates/stackable-operator/src/commons/product_image_selection.rs index 2b3b05e78..a11d823df 100644 --- a/crates/stackable-operator/src/commons/product_image_selection.rs +++ b/crates/stackable-operator/src/commons/product_image_selection.rs @@ -122,7 +122,10 @@ impl ProductImage { match &self.image_selection { ProductImageSelection::Custom(image_selection) => { let image = ImageRef::parse(&image_selection.custom); - let image_tag_or_hash = image.tag.or(image.hash).unwrap_or("latest".to_string()); + let image_tag_or_hash = image + .tag + .or(image.hash) + .unwrap_or_else(|| "latest".to_string()); let app_version = format!("{product_version}-{image_tag_or_hash}"); let app_version_label_value = Self::prepare_app_version_label_value(&app_version)?; diff --git a/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs index 3d49c4949..cfa266a56 100644 --- a/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/authentication/ldap/v1alpha1_impl.rs @@ -56,7 +56,7 @@ impl AuthenticationProvider { /// Returns the port to be used, which is either user configured or defaulted based upon TLS usage pub fn port(&self) -> u16 { self.port - .unwrap_or(if self.tls.uses_tls() { 636 } else { 389 }) + .unwrap_or_else(|| if self.tls.uses_tls() { 636 } else { 389 }) } /// This functions adds diff --git a/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs index 728de99be..5b40f190d 100644 --- a/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs @@ -108,7 +108,7 @@ impl AuthenticationProvider { /// Returns the port to be used, which is either user configured or defaulted based upon TLS usage pub fn port(&self) -> u16 { self.port - .unwrap_or(if self.tls.uses_tls() { 443 } else { 80 }) + .unwrap_or_else(|| if self.tls.uses_tls() { 443 } else { 80 }) } /// Returns the path of the files containing client id and secret in case they are given. diff --git a/crates/stackable-operator/src/crd/s3/connection/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/s3/connection/v1alpha1_impl.rs index cd926165b..a473f7dbb 100644 --- a/crates/stackable-operator/src/crd/s3/connection/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/s3/connection/v1alpha1_impl.rs @@ -76,7 +76,7 @@ impl ConnectionSpec { /// Returns the port to be used, which is either user configured or defaulted based upon TLS usage pub fn port(&self) -> u16 { self.port - .unwrap_or(if self.tls.uses_tls() { 443 } else { 80 }) + .unwrap_or_else(|| if self.tls.uses_tls() { 443 } else { 80 }) } /// This functions adds diff --git a/crates/stackable-operator/src/lib.rs b/crates/stackable-operator/src/lib.rs index 30bfe9fda..57100aba6 100644 --- a/crates/stackable-operator/src/lib.rs +++ b/crates/stackable-operator/src/lib.rs @@ -13,6 +13,7 @@ // Nursery lints // #![deny(clippy::nursery)] #![deny(clippy::use_self)] +#![deny(clippy::or_fun_call)] //! ## Crate Features //! diff --git a/crates/stackable-operator/src/product_config_utils.rs b/crates/stackable-operator/src/product_config_utils.rs index ec2bdd7fd..e957f7566 100644 --- a/crates/stackable-operator/src/product_config_utils.rs +++ b/crates/stackable-operator/src/product_config_utils.rs @@ -225,7 +225,7 @@ pub fn validate_all_roles_and_groups_config( let mut result = HashMap::new(); for (role, role_group) in role_config { - let role_entry = result.entry(role.clone()).or_insert(HashMap::new()); + let role_entry = result.entry(role.clone()).or_insert_with(HashMap::new); for (group, properties_by_kind) in role_group { role_entry.insert( diff --git a/crates/stackable-operator/src/utils/logging.rs b/crates/stackable-operator/src/utils/logging.rs index 7db400e72..7d05634ea 100644 --- a/crates/stackable-operator/src/utils/logging.rs +++ b/crates/stackable-operator/src/utils/logging.rs @@ -35,10 +35,7 @@ pub fn print_startup_string( built_time: &str, rustc_version: &str, ) { - let git = match git_version { - None => String::new(), - Some(git) => format!(" (Git information: {git})"), - }; + let git = git_version.map_or_else(String::new, |git| format!(" (Git information: {git})")); info!("Starting {pkg_description}"); info!( "This is version {pkg_version}{git}, built for {target} by {rustc_version} at {built_time}", From 2324fec3aeffb000b988b02252e296d89bfe4a0f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 31 Mar 2026 16:11:58 +0200 Subject: [PATCH 07/12] clippy::derive_partial_eq_without_eq --- crates/stackable-operator/src/builder/meta.rs | 2 +- crates/stackable-operator/src/builder/pdb.rs | 2 +- .../stackable-operator/src/builder/pod/mod.rs | 2 +- .../src/builder/pod/volume.rs | 13 ++++--------- .../src/cluster_resources.rs | 18 ++++++------------ .../src/commons/networking.rs | 2 +- crates/stackable-operator/src/commons/pdb.rs | 2 +- crates/stackable-operator/src/commons/rbac.rs | 2 +- .../src/commons/secret_class.rs | 2 +- crates/stackable-operator/src/cpu.rs | 4 ++-- .../crd/authentication/core/v1alpha1_impl.rs | 2 +- .../crd/authentication/oidc/v1alpha1_impl.rs | 2 +- crates/stackable-operator/src/kvp/key.rs | 6 +++--- .../src/kvp/label/selector.rs | 2 +- .../stackable-operator/src/kvp/label/value.rs | 2 +- crates/stackable-operator/src/kvp/mod.rs | 4 ++-- crates/stackable-operator/src/lib.rs | 7 ++++--- crates/stackable-operator/src/memory.rs | 2 +- .../src/product_logging/spec.rs | 18 ++++++++++++++++-- crates/stackable-operator/src/role_utils.rs | 10 +++++----- 20 files changed, 54 insertions(+), 50 deletions(-) diff --git a/crates/stackable-operator/src/builder/meta.rs b/crates/stackable-operator/src/builder/meta.rs index 7eddda036..a11fbffae 100644 --- a/crates/stackable-operator/src/builder/meta.rs +++ b/crates/stackable-operator/src/builder/meta.rs @@ -7,7 +7,7 @@ use crate::kvp::{Annotation, Annotations, Label, LabelError, Labels, ObjectLabel type Result = std::result::Result; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum Error { #[snafu(display("failed to set recommended labels"))] RecommendedLabels { source: LabelError }, diff --git a/crates/stackable-operator/src/builder/pdb.rs b/crates/stackable-operator/src/builder/pdb.rs index 9542fd979..2d1af56f2 100644 --- a/crates/stackable-operator/src/builder/pdb.rs +++ b/crates/stackable-operator/src/builder/pdb.rs @@ -15,7 +15,7 @@ use crate::{ type Result = std::result::Result; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum Error { #[snafu(display("failed to create role selector labels"))] RoleSelectorLabels { source: crate::kvp::LabelError }, diff --git a/crates/stackable-operator/src/builder/pod/mod.rs b/crates/stackable-operator/src/builder/pod/mod.rs index 8f12b9d00..edffdfeac 100644 --- a/crates/stackable-operator/src/builder/pod/mod.rs +++ b/crates/stackable-operator/src/builder/pod/mod.rs @@ -36,7 +36,7 @@ pub mod volume; type Result = std::result::Result; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum Error { #[snafu(display("termination grace period is too long (got {duration}, maximum allowed is {max})", max = Duration::from_secs(i64::MAX as u64)))] TerminationGracePeriodTooLong { diff --git a/crates/stackable-operator/src/builder/pod/volume.rs b/crates/stackable-operator/src/builder/pod/volume.rs index 732d5b348..b16400249 100644 --- a/crates/stackable-operator/src/builder/pod/volume.rs +++ b/crates/stackable-operator/src/builder/pod/volume.rs @@ -41,9 +41,7 @@ pub enum VolumeSource { impl Default for VolumeSource { fn default() -> Self { - Self::EmptyDir(EmptyDirVolumeSource { - ..EmptyDirVolumeSource::default() - }) + Self::EmptyDir(EmptyDirVolumeSource::default()) } } @@ -267,7 +265,7 @@ impl VolumeMountBuilder { } } -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum SecretOperatorVolumeSourceBuilderError { #[snafu(display("failed to parse secret operator volume annotation"))] ParseAnnotation { source: AnnotationError }, @@ -440,7 +438,7 @@ impl ListenerReference { // NOTE (Techassi): We might want to think about these names and how long they // are getting. -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum ListenerOperatorVolumeSourceBuilderError { #[snafu(display("failed to convert listener reference into Kubernetes annotation"))] ListenerReferenceAnnotation { source: AnnotationError }, @@ -490,10 +488,7 @@ pub struct ListenerOperatorVolumeSourceBuilder { impl ListenerOperatorVolumeSourceBuilder { /// Create a builder for the given listener class or listener name - pub fn new( - listener_reference: &ListenerReference, - labels: &Labels, - ) -> Self { + pub fn new(listener_reference: &ListenerReference, labels: &Labels) -> Self { Self { listener_reference: listener_reference.to_owned(), labels: labels.to_owned(), diff --git a/crates/stackable-operator/src/cluster_resources.rs b/crates/stackable-operator/src/cluster_resources.rs index 606ce5baa..db4039ab8 100644 --- a/crates/stackable-operator/src/cluster_resources.rs +++ b/crates/stackable-operator/src/cluster_resources.rs @@ -205,10 +205,8 @@ impl ClusterResourceApplyStrategy { /// Indicates if orphaned resources should be deleted depending on the strategy. const fn delete_orphans(&self) -> bool { match self { - Self::NoApply - | Self::ReconciliationPaused => false, - Self::ClusterStopped - | Self::Default => true, + Self::NoApply | Self::ReconciliationPaused => false, + Self::ClusterStopped | Self::Default => true, } } } @@ -261,14 +259,10 @@ impl ClusterResource for DaemonSet { spec: Some(DaemonSetSpec { template: PodTemplateSpec { spec: Some(PodSpec { - node_selector: Some( - [( - "stackable.tech/do-not-schedule".to_string(), - "cluster-stopped".to_string(), - )] - .into_iter() - .collect::>(), - ), + node_selector: Some(BTreeMap::from([( + "stackable.tech/do-not-schedule".to_string(), + "cluster-stopped".to_string(), + )])), ..self .spec .clone() diff --git a/crates/stackable-operator/src/commons/networking.rs b/crates/stackable-operator/src/commons/networking.rs index b8800fbc1..25550bb8c 100644 --- a/crates/stackable-operator/src/commons/networking.rs +++ b/crates/stackable-operator/src/commons/networking.rs @@ -143,7 +143,7 @@ impl HostName { } /// A validated kerberos realm name type, for use in CRDs. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)] #[serde(try_from = "String", into = "String")] pub struct KerberosRealmName( // Note: Starting with schemars 1.0 and kube 2.0, this pattern is missing in the CRD diff --git a/crates/stackable-operator/src/commons/pdb.rs b/crates/stackable-operator/src/commons/pdb.rs index 80f2ae0be..80280b362 100644 --- a/crates/stackable-operator/src/commons/pdb.rs +++ b/crates/stackable-operator/src/commons/pdb.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; /// /// Learn more in the /// [allowed Pod disruptions documentation](DOCS_BASE_URL_PLACEHOLDER/concepts/operations/pod_disruptions). -#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Eq, Serialize)] #[serde(rename_all = "camelCase")] pub struct PdbConfig { /// Whether a PodDisruptionBudget should be written out for this role. diff --git a/crates/stackable-operator/src/commons/rbac.rs b/crates/stackable-operator/src/commons/rbac.rs index 7812d2d25..eeb89c778 100644 --- a/crates/stackable-operator/src/commons/rbac.rs +++ b/crates/stackable-operator/src/commons/rbac.rs @@ -12,7 +12,7 @@ use crate::{ type Result = std::result::Result; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum Error { #[snafu(display("failed to set owner reference from resource for ServiceAccount {name:?}"))] ServiceAccountOwnerReferenceFromResource { diff --git a/crates/stackable-operator/src/commons/secret_class.rs b/crates/stackable-operator/src/commons/secret_class.rs index 2ed8ca7de..397015e27 100644 --- a/crates/stackable-operator/src/commons/secret_class.rs +++ b/crates/stackable-operator/src/commons/secret_class.rs @@ -7,7 +7,7 @@ use crate::builder::pod::volume::{ SecretOperatorVolumeSourceBuilder, SecretOperatorVolumeSourceBuilderError, VolumeBuilder, }; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum SecretClassVolumeError { #[snafu(display("failed to build secret operator volume"))] SecretOperatorVolume { diff --git a/crates/stackable-operator/src/cpu.rs b/crates/stackable-operator/src/cpu.rs index e4e5f62ee..5d6bfc508 100644 --- a/crates/stackable-operator/src/cpu.rs +++ b/crates/stackable-operator/src/cpu.rs @@ -11,7 +11,7 @@ use snafu::{ResultExt, Snafu}; pub type Result = std::result::Result; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum Error { #[snafu(display( "unsupported precision {value:?}. Kubernetes doesn't allow you to specify CPU resources with a precision finer than 1m. Because of this, it's useful to specify CPU units less than 1.0 or 1000m using the milliCPU form; for example, 5m rather than 0.005" @@ -37,7 +37,7 @@ pub enum Error { /// A CPU quantity cannot have a precision finer than 'm' (millis) in Kubernetes. /// So we use that as our internal representation (see: /// ``). -#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd)] pub struct CpuQuantity { millis: usize, } diff --git a/crates/stackable-operator/src/crd/authentication/core/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/authentication/core/v1alpha1_impl.rs index 8a2d87229..fd0fbbd77 100644 --- a/crates/stackable-operator/src/crd/authentication/core/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/authentication/core/v1alpha1_impl.rs @@ -11,7 +11,7 @@ use crate::{ type Result = std::result::Result; // NOTE (@Techassi): Where is the best place to put this? -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum Error { #[snafu(display( "authentication details for OIDC were not specified. The AuthenticationClass {auth_class_name:?} uses an OIDC provider, you need to specify OIDC authentication details (such as client credentials) as well" diff --git a/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs b/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs index 5b40f190d..c9d24bab4 100644 --- a/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs +++ b/crates/stackable-operator/src/crd/authentication/oidc/v1alpha1_impl.rs @@ -18,7 +18,7 @@ use crate::{ pub type Result = std::result::Result; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum Error { #[snafu(display("failed to parse OIDC endpoint url"))] ParseOidcEndpointUrl { source: ParseError }, diff --git a/crates/stackable-operator/src/kvp/key.rs b/crates/stackable-operator/src/kvp/key.rs index b07f9c1f0..0c9b538dd 100644 --- a/crates/stackable-operator/src/kvp/key.rs +++ b/crates/stackable-operator/src/kvp/key.rs @@ -22,7 +22,7 @@ static KEY_NAME_REGEX: LazyLock = LazyLock::new(|| { /// This error will be returned if the input is empty, the parser encounters /// multiple prefixes or any deeper errors occur during key prefix and key name /// parsing. -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum KeyError { /// Indicates that the input is empty. The key must at least contain a name. /// The prefix is optional. @@ -169,7 +169,7 @@ impl Key { } /// The error type for key prefix parsing/validation operations. -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum KeyPrefixError { /// Indicates that the key prefix segment is empty, which is not permitted /// when the key indicates that a prefix is present (via a slash). This @@ -254,7 +254,7 @@ where } /// The error type for key name parsing/validation operations. -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum KeyNameError { /// Indicates that the key name segment is empty. The key name is required /// and therefore cannot be empty. diff --git a/crates/stackable-operator/src/kvp/label/selector.rs b/crates/stackable-operator/src/kvp/label/selector.rs index e7f4fe754..2775db9fe 100644 --- a/crates/stackable-operator/src/kvp/label/selector.rs +++ b/crates/stackable-operator/src/kvp/label/selector.rs @@ -3,7 +3,7 @@ use snafu::Snafu; type Result = std::result::Result; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum SelectorError { #[snafu(display("label selector with binary operator {operator:?} must have values"))] LabelSelectorBinaryOperatorWithoutValues { operator: String }, diff --git a/crates/stackable-operator/src/kvp/label/value.rs b/crates/stackable-operator/src/kvp/label/value.rs index 510887e65..6b0faf1d5 100644 --- a/crates/stackable-operator/src/kvp/label/value.rs +++ b/crates/stackable-operator/src/kvp/label/value.rs @@ -15,7 +15,7 @@ static LABEL_VALUE_REGEX: LazyLock = LazyLock::new(|| { }); /// The error type for label value parse/validation operations. -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum LabelValueError { /// Indicates that the label value exceeds the maximum length of 63 ASCII /// characters. It additionally reports how many characters were diff --git a/crates/stackable-operator/src/kvp/mod.rs b/crates/stackable-operator/src/kvp/mod.rs index 7e945d996..d6733d802 100644 --- a/crates/stackable-operator/src/kvp/mod.rs +++ b/crates/stackable-operator/src/kvp/mod.rs @@ -23,7 +23,7 @@ pub use label::*; pub use value::*; /// The error type for key/value pair parsing/validating operations. -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum KeyValuePairError where E: std::error::Error + 'static, @@ -150,7 +150,7 @@ where } } -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum KeyValuePairsError { #[snafu(display("key already exists"))] KeyAlreadyExists, diff --git a/crates/stackable-operator/src/lib.rs b/crates/stackable-operator/src/lib.rs index 57100aba6..742383f6e 100644 --- a/crates/stackable-operator/src/lib.rs +++ b/crates/stackable-operator/src/lib.rs @@ -1,4 +1,4 @@ -// Pedantic lints +// We enable all pedantic lints and explicitly disable a few #![deny(clippy::pedantic)] #![expect(clippy::doc_markdown)] #![expect(clippy::missing_errors_doc)] @@ -10,10 +10,11 @@ #![expect(clippy::float_cmp)] #![expect(clippy::cast_sign_loss)] #![expect(clippy::cast_precision_loss)] -// Nursery lints -// #![deny(clippy::nursery)] +// Additionally, we explicitly enable a few nursery lints #![deny(clippy::use_self)] #![deny(clippy::or_fun_call)] +#![deny(clippy::derive_partial_eq_without_eq)] +#![deny(clippy::unnecessary_struct_initialization)] //! ## Crate Features //! diff --git a/crates/stackable-operator/src/memory.rs b/crates/stackable-operator/src/memory.rs index ab9e7913c..49d198a9a 100644 --- a/crates/stackable-operator/src/memory.rs +++ b/crates/stackable-operator/src/memory.rs @@ -21,7 +21,7 @@ use snafu::{OptionExt, ResultExt, Snafu}; pub type Result = std::result::Result; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum Error { #[snafu(display("cannot convert quantity {value:?} to Java heap"))] CannotConvertToJavaHeap { value: String }, diff --git a/crates/stackable-operator/src/product_logging/spec.rs b/crates/stackable-operator/src/product_logging/spec.rs index db6622b61..43afdeb36 100644 --- a/crates/stackable-operator/src/product_logging/spec.rs +++ b/crates/stackable-operator/src/product_logging/spec.rs @@ -102,6 +102,7 @@ where JsonSchema, Merge, PartialEq, + Eq, Serialize ), merge(path_overrides(merge = "crate::config::merge")), @@ -127,7 +128,7 @@ pub enum ContainerLogConfigChoice { Automatic(AutomaticContainerLogConfig), } -#[derive(Clone, Debug, Deserialize, JsonSchema, Merge, PartialEq, Serialize, Educe)] +#[derive(Clone, Debug, Deserialize, JsonSchema, Merge, PartialEq, Eq, Serialize, Educe)] #[educe(Default)] #[merge(path_overrides(merge = "crate::config::merge"))] #[serde(untagged)] @@ -171,6 +172,7 @@ impl FromFragment for ContainerLogConfigChoice { JsonSchema, Merge, PartialEq, + Eq, Serialize ), merge(path_overrides(merge = "crate::config::merge")), @@ -192,6 +194,7 @@ pub struct CustomContainerLogConfig { JsonSchema, Merge, PartialEq, + Eq, Serialize ), merge(path_overrides(merge = "crate::config::merge")), @@ -207,7 +210,16 @@ pub struct ConfigMapLogConfig { #[derive(Clone, Debug, Default, Eq, Fragment, JsonSchema, PartialEq)] #[fragment(path_overrides(fragment = "crate::config::fragment"))] #[fragment_attrs( - derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize), + derive( + Clone, + Debug, + Default, + Deserialize, + JsonSchema, + PartialEq, + Eq, + Serialize + ), serde(rename_all = "camelCase") )] pub struct AutomaticContainerLogConfig { @@ -264,6 +276,7 @@ impl AutomaticContainerLogConfig { Deserialize, JsonSchema, PartialEq, + Eq, Merge, Serialize ), @@ -288,6 +301,7 @@ pub struct LoggerConfig { JsonSchema, Merge, PartialEq, + Eq, Serialize ), merge(path_overrides(merge = "crate::config::merge")), diff --git a/crates/stackable-operator/src/role_utils.rs b/crates/stackable-operator/src/role_utils.rs index b82bd6623..2901bd54d 100644 --- a/crates/stackable-operator/src/role_utils.rs +++ b/crates/stackable-operator/src/role_utils.rs @@ -177,10 +177,10 @@ impl CommonConfiguration Date: Wed, 1 Apr 2026 09:33:52 +0200 Subject: [PATCH 08/12] Also run for all other crates --- Cargo.toml | 23 ++++++ crates/k8s-version/src/api_version/darling.rs | 6 +- crates/k8s-version/src/api_version/mod.rs | 7 +- crates/k8s-version/src/group.rs | 2 +- crates/k8s-version/src/level/darling.rs | 6 +- crates/k8s-version/src/level/mod.rs | 40 +++++------ crates/k8s-version/src/version/darling.rs | 6 +- crates/k8s-version/src/version/mod.rs | 8 +-- crates/stackable-certs/src/ca/mod.rs | 14 ++-- crates/stackable-certs/src/keys/ecdsa.rs | 2 +- crates/stackable-certs/src/keys/rsa.rs | 2 +- crates/stackable-certs/src/lib.rs | 12 ++-- .../stackable-operator-derive/src/fragment.rs | 4 +- crates/stackable-operator-derive/src/lib.rs | 4 +- crates/stackable-operator-derive/src/merge.rs | 4 +- crates/stackable-operator/src/helm/mod.rs | 2 +- crates/stackable-operator/src/lib.rs | 18 ----- crates/stackable-shared/src/crd.rs | 6 +- crates/stackable-shared/src/secret.rs | 6 +- crates/stackable-shared/src/time/duration.rs | 60 ++++++++-------- crates/stackable-shared/src/time/jiff_impl.rs | 2 +- crates/stackable-shared/src/time/time_impl.rs | 10 +-- crates/stackable-shared/src/yaml.rs | 8 +-- .../src/instrumentation/axum/injector.rs | 4 +- .../src/instrumentation/axum/mod.rs | 10 +-- crates/stackable-telemetry/src/tracing/mod.rs | 72 +++++++++---------- .../src/tracing/settings/console_log.rs | 27 +++---- .../src/tracing/settings/file_log.rs | 10 +-- .../src/tracing/settings/mod.rs | 2 +- .../src/tracing/settings/otlp_log.rs | 19 ++--- .../src/tracing/settings/otlp_trace.rs | 19 ++--- .../src/attrs/item/field.rs | 2 +- .../src/attrs/item/mod.rs | 21 +++--- .../src/attrs/item/variant.rs | 8 +-- .../src/attrs/module.rs | 5 +- .../src/codegen/changes.rs | 2 +- .../src/codegen/container/mod.rs | 15 ++-- .../codegen/container/struct/conversion.rs | 15 ++-- .../src/codegen/container/struct/mod.rs | 25 +++---- .../src/codegen/item/field.rs | 26 ++++--- .../src/codegen/item/mod.rs | 14 ++-- .../src/codegen/item/variant.rs | 2 + .../src/codegen/module.rs | 4 +- .../src/test_utils.rs | 4 +- .../src/utils/doc_comments.rs | 2 +- crates/stackable-versioned/src/lib.rs | 11 ++- .../stackable-versioned/tests/conversions.rs | 4 +- crates/stackable-webhook/src/lib.rs | 2 +- .../src/tls/cert_resolver.rs | 2 +- crates/stackable-webhook/src/tls/mod.rs | 18 ++--- .../src/webhooks/conversion_webhook.rs | 4 +- crates/stackable-webhook/src/webhooks/mod.rs | 4 +- .../src/webhooks/mutating_webhook.rs | 1 + crates/xtask/src/crd/mod.rs | 2 +- 54 files changed, 300 insertions(+), 308 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 42a36c9d3..12c4345fe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,6 +94,29 @@ unwrap_in_result = "deny" unwrap_used = "deny" panic = "deny" +# Enable all pedantic lints (with lower priority so individual lints can override) +pedantic = { level = "deny", priority = -1 } + +# Pedantic lints we don't enforce (yet) +doc_markdown = "allow" +missing_errors_doc = "allow" +must_use_candidate = "allow" +return_self_not_must_use = "allow" +missing_panics_doc = "allow" +explicit_deref_methods = "allow" +cast_possible_truncation = "allow" +float_cmp = "allow" +cast_sign_loss = "allow" +cast_precision_loss = "allow" +needless_continue = "allow" +unchecked_time_subtraction = "allow" + +# Additional nursery lints we enforce +use_self = "deny" +or_fun_call = "deny" +derive_partial_eq_without_eq = "deny" +unnecessary_struct_initialization = "deny" + # Use O3 in tests to improve the RSA key generation speed in the stackable-certs crate [profile.test.package] stackable-certs.opt-level = 3 diff --git a/crates/k8s-version/src/api_version/darling.rs b/crates/k8s-version/src/api_version/darling.rs index e81ad2e88..a3c1b4427 100644 --- a/crates/k8s-version/src/api_version/darling.rs +++ b/crates/k8s-version/src/api_version/darling.rs @@ -18,9 +18,9 @@ mod test { use super::*; use crate::{Level, Version}; - fn parse_meta(tokens: proc_macro2::TokenStream) -> ::std::result::Result { + fn parse_meta(tokens: &proc_macro2::TokenStream) -> syn::Meta { let attribute: syn::Attribute = syn::parse_quote!(#[#tokens]); - Ok(attribute.meta) + attribute.meta } #[rstest] @@ -28,7 +28,7 @@ mod test { #[case(quote!(ignore = "v1beta1"), ApiVersion { group: None, version: Version { major: 1, level: Some(Level::Beta(1)) } })] #[case(quote!(ignore = "v1"), ApiVersion { group: None, version: Version { major: 1, level: None } })] fn from_meta(#[case] input: proc_macro2::TokenStream, #[case] expected: ApiVersion) { - let meta = parse_meta(input).expect("valid attribute tokens"); + let meta = parse_meta(&input); let api_version = ApiVersion::from_meta(&meta).expect("version must parse from attribute"); assert_eq!(api_version, expected); } diff --git a/crates/k8s-version/src/api_version/mod.rs b/crates/k8s-version/src/api_version/mod.rs index bdd72193b..29c233305 100644 --- a/crates/k8s-version/src/api_version/mod.rs +++ b/crates/k8s-version/src/api_version/mod.rs @@ -12,7 +12,7 @@ mod darling; /// Error variants which can be encountered when creating a new [`ApiVersion`] /// from unparsed input. -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum ParseApiVersionError { #[snafu(display("failed to parse version"))] ParseVersion { source: ParseVersionError }, @@ -87,10 +87,7 @@ impl ApiVersion { /// Try to create a new Kubernetes API version based on the unvalidated /// `group` string. pub fn try_new(group: Option<&str>, version: Version) -> Result { - let group = group - .map(|g| g.parse()) - .transpose() - .context(ParseGroupSnafu)?; + let group = group.map(str::parse).transpose().context(ParseGroupSnafu)?; Ok(Self { group, version }) } diff --git a/crates/k8s-version/src/group.rs b/crates/k8s-version/src/group.rs index cef7c5ca5..d4bf9f8ec 100644 --- a/crates/k8s-version/src/group.rs +++ b/crates/k8s-version/src/group.rs @@ -12,7 +12,7 @@ static API_GROUP_REGEX: LazyLock = LazyLock::new(|| { /// Error variants which can be encountered when creating a new [`Group`] from /// unparsed input. -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum ParseGroupError { #[snafu(display("group must not be empty"))] Empty, diff --git a/crates/k8s-version/src/level/darling.rs b/crates/k8s-version/src/level/darling.rs index 41aaad4f5..5e12d0d74 100644 --- a/crates/k8s-version/src/level/darling.rs +++ b/crates/k8s-version/src/level/darling.rs @@ -17,9 +17,9 @@ mod tests { use super::*; - fn parse_meta(tokens: proc_macro2::TokenStream) -> ::std::result::Result { + fn parse_meta(tokens: &proc_macro2::TokenStream) -> syn::Meta { let attribute: syn::Attribute = syn::parse_quote!(#[#tokens]); - Ok(attribute.meta) + attribute.meta } #[rstest] @@ -27,7 +27,7 @@ mod tests { #[case(quote!(ignore = "alpha1"), Level::Alpha(1))] #[case(quote!(ignore = "beta1"), Level::Beta(1))] fn from_meta(#[case] input: proc_macro2::TokenStream, #[case] expected: Level) { - let meta = parse_meta(input).expect("valid attribute tokens"); + let meta = parse_meta(&input); let version = Level::from_meta(&meta).expect("level must parse from attribute"); assert_eq!(version, expected); } diff --git a/crates/k8s-version/src/level/mod.rs b/crates/k8s-version/src/level/mod.rs index 43bbf3b5c..9f65d2d71 100644 --- a/crates/k8s-version/src/level/mod.rs +++ b/crates/k8s-version/src/level/mod.rs @@ -22,7 +22,7 @@ static LEVEL_REGEX: LazyLock = LazyLock::new(|| { /// Error variants which can be encountered when creating a new [`Level`] from /// unparsed input. -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum ParseLevelError { #[snafu(display("invalid level format, expected alpha|beta"))] InvalidFormat, @@ -87,13 +87,13 @@ impl PartialOrd for Level { impl Ord for Level { fn cmp(&self, other: &Self) -> Ordering { match self { - Level::Alpha(lhs) => match other { - Level::Alpha(rhs) => lhs.cmp(rhs), - Level::Beta(_) => Ordering::Less, + Self::Alpha(lhs) => match other { + Self::Alpha(rhs) => lhs.cmp(rhs), + Self::Beta(_) => Ordering::Less, }, - Level::Beta(lhs) => match other { - Level::Alpha(_) => Ordering::Greater, - Level::Beta(rhs) => lhs.cmp(rhs), + Self::Beta(lhs) => match other { + Self::Alpha(_) => Ordering::Greater, + Self::Beta(rhs) => lhs.cmp(rhs), }, } } @@ -103,12 +103,12 @@ impl Add for Level where T: Into, { - type Output = Level; + type Output = Self; fn add(self, rhs: T) -> Self::Output { match self { - Level::Alpha(lhs) => Level::Alpha(lhs + rhs.into()), - Level::Beta(lhs) => Level::Beta(lhs + rhs.into()), + Self::Alpha(lhs) => Self::Alpha(lhs + rhs.into()), + Self::Beta(lhs) => Self::Beta(lhs + rhs.into()), } } } @@ -119,8 +119,7 @@ where { fn add_assign(&mut self, rhs: T) { match self { - Level::Alpha(lhs) => *lhs + rhs.into(), - Level::Beta(lhs) => *lhs + rhs.into(), + Self::Alpha(lhs) | Self::Beta(lhs) => *lhs + rhs.into(), }; } } @@ -129,12 +128,12 @@ impl Sub for Level where T: Into, { - type Output = Level; + type Output = Self; fn sub(self, rhs: T) -> Self::Output { match self { - Level::Alpha(lhs) => Level::Alpha(lhs - rhs.into()), - Level::Beta(lhs) => Level::Beta(lhs - rhs.into()), + Self::Alpha(lhs) => Self::Alpha(lhs - rhs.into()), + Self::Beta(lhs) => Self::Beta(lhs - rhs.into()), } } } @@ -145,8 +144,7 @@ where { fn sub_assign(&mut self, rhs: T) { match self { - Level::Alpha(lhs) => *lhs - rhs.into(), - Level::Beta(lhs) => *lhs - rhs.into(), + Self::Alpha(lhs) | Self::Beta(lhs) => *lhs - rhs.into(), }; } } @@ -154,8 +152,8 @@ where impl Display for Level { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Level::Alpha(alpha) => write!(f, "alpha{}", alpha), - Level::Beta(beta) => write!(f, "beta{}", beta), + Self::Alpha(alpha) => write!(f, "alpha{alpha}"), + Self::Beta(beta) => write!(f, "beta{beta}"), } } } @@ -181,11 +179,11 @@ mod test { #[apply(ord_cases)] fn ord(input: Level, other: Level, expected: Ordering) { - assert_eq!(input.cmp(&other), expected) + assert_eq!(input.cmp(&other), expected); } #[apply(ord_cases)] fn partial_ord(input: Level, other: Level, expected: Ordering) { - assert_eq!(input.partial_cmp(&other), Some(expected)) + assert_eq!(input.partial_cmp(&other), Some(expected)); } } diff --git a/crates/k8s-version/src/version/darling.rs b/crates/k8s-version/src/version/darling.rs index 3c2ae3216..0b69b03af 100644 --- a/crates/k8s-version/src/version/darling.rs +++ b/crates/k8s-version/src/version/darling.rs @@ -18,9 +18,9 @@ mod tests { use super::*; use crate::Level; - fn parse_meta(tokens: proc_macro2::TokenStream) -> ::std::result::Result { + fn parse_meta(tokens: &proc_macro2::TokenStream) -> syn::Meta { let attribute: syn::Attribute = syn::parse_quote!(#[#tokens]); - Ok(attribute.meta) + attribute.meta } #[cfg(feature = "darling")] @@ -30,7 +30,7 @@ mod tests { #[case(quote!(ignore = "v1beta1"), Version { major: 1, level: Some(Level::Beta(1)) })] #[case(quote!(ignore = "v1"), Version { major: 1, level: None })] fn from_meta(#[case] input: proc_macro2::TokenStream, #[case] expected: Version) { - let meta = parse_meta(input).expect("valid attribute tokens"); + let meta = parse_meta(&input); let version = Version::from_meta(&meta).expect("version must parse from attribute"); assert_eq!(version, expected); } diff --git a/crates/k8s-version/src/version/mod.rs b/crates/k8s-version/src/version/mod.rs index f41d6fc74..04a59e179 100644 --- a/crates/k8s-version/src/version/mod.rs +++ b/crates/k8s-version/src/version/mod.rs @@ -18,7 +18,7 @@ static VERSION_REGEX: LazyLock = LazyLock::new(|| { /// Error variants which can be encountered when creating a new [`Version`] from /// unparsed input. -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum ParseVersionError { #[snafu(display( "invalid version format. Input is empty, contains non-ASCII characters or contains more than 63 characters" @@ -153,16 +153,16 @@ mod test { #[case("", ParseVersionError::InvalidFormat)] fn invalid_version(#[case] input: &str, #[case] error: ParseVersionError) { let err = Version::from_str(input).expect_err("invalid Kubernetes version"); - assert_eq!(err, error) + assert_eq!(err, error); } #[apply(ord_cases)] fn ord(input: Version, other: Version, expected: Ordering) { - assert_eq!(input.cmp(&other), expected) + assert_eq!(input.cmp(&other), expected); } #[apply(ord_cases)] fn partial_ord(input: Version, other: Version, expected: Ordering) { - assert_eq!(input.partial_cmp(&other), Some(expected)) + assert_eq!(input.partial_cmp(&other), Some(expected)); } } diff --git a/crates/stackable-certs/src/ca/mod.rs b/crates/stackable-certs/src/ca/mod.rs index 12952f27a..97cda9a1c 100644 --- a/crates/stackable-certs/src/ca/mod.rs +++ b/crates/stackable-certs/src/ca/mod.rs @@ -415,7 +415,7 @@ where /// Kubernetes [`Secret`]. Common keys are `ca.crt` and `ca.key`. #[instrument(name = "create_certificate_authority_from_k8s_secret", skip(secret))] pub fn from_secret( - secret: Secret, + secret: &Secret, key_certificate: &str, key_private_key: &str, ) -> Result> { @@ -424,19 +424,19 @@ where } let data = secret.data.as_ref().with_context(|| NoSecretDataSnafu { - secret: ObjectRef::from_obj(&secret), + secret: ObjectRef::from_obj(secret), })?; debug!("retrieving certificate data from secret via key {key_certificate:?}"); let certificate_data = data.get(key_certificate) .with_context(|| NoCertificateDataSnafu { - secret: ObjectRef::from_obj(&secret), + secret: ObjectRef::from_obj(secret), })?; let certificate = x509_cert::Certificate::load_pem_chain(&certificate_data.0) .with_context(|_| ReadChainSnafu { - secret: ObjectRef::from_obj(&secret), + secret: ObjectRef::from_obj(secret), })? .remove(0); @@ -444,7 +444,7 @@ where let private_key_data = data.get(key_private_key) .with_context(|| NoPrivateKeyDataSnafu { - secret: ObjectRef::from_obj(&secret), + secret: ObjectRef::from_obj(secret), })?; let private_key_data = @@ -472,7 +472,7 @@ where key_private_key: &str, client: Client, ) -> Result> { - let secret_api = Api::namespaced(client, &secret_ref.namespace); + let secret_api: Api = Api::namespaced(client, &secret_ref.namespace); let secret = secret_api .get(&secret_ref.name) .await @@ -480,7 +480,7 @@ where secret_ref: secret_ref.to_owned(), })?; - Self::from_secret(secret, key_certificate, key_private_key) + Self::from_secret(&secret, key_certificate, key_private_key) } /// Returns the ca certificate. diff --git a/crates/stackable-certs/src/keys/ecdsa.rs b/crates/stackable-certs/src/keys/ecdsa.rs index 38de9d225..25be98cf5 100644 --- a/crates/stackable-certs/src/keys/ecdsa.rs +++ b/crates/stackable-certs/src/keys/ecdsa.rs @@ -10,7 +10,7 @@ use crate::keys::CertificateKeypair; pub type Result = std::result::Result; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum Error { #[snafu(context(false))] SerializeKeyToPem { source: x509_cert::spki::Error }, diff --git a/crates/stackable-certs/src/keys/rsa.rs b/crates/stackable-certs/src/keys/rsa.rs index 2bcd018ee..c33e18126 100644 --- a/crates/stackable-certs/src/keys/rsa.rs +++ b/crates/stackable-certs/src/keys/rsa.rs @@ -17,7 +17,7 @@ const KEY_SIZE: usize = 512; pub type Result = std::result::Result; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] pub enum Error { #[snafu(display("failed to create RSA key"))] CreateKey { source: rsa::Error }, diff --git a/crates/stackable-certs/src/lib.rs b/crates/stackable-certs/src/lib.rs index 80ed898d2..df59652b7 100644 --- a/crates/stackable-certs/src/lib.rs +++ b/crates/stackable-certs/src/lib.rs @@ -78,10 +78,8 @@ where impl PartialEq for CertificatePairError { fn eq(&self, other: &Self) -> bool { match (self, other) { - (Self::WriteFile { source: lhs_source }, Self::WriteFile { source: rhs_source }) => { - lhs_source.kind() == rhs_source.kind() - } - (Self::ReadFile { source: lhs_source }, Self::ReadFile { source: rhs_source }) => { + (Self::WriteFile { source: lhs_source }, Self::WriteFile { source: rhs_source }) + | (Self::ReadFile { source: lhs_source }, Self::ReadFile { source: rhs_source }) => { lhs_source.kind() == rhs_source.kind() } (lhs, rhs) => lhs == rhs, @@ -169,7 +167,7 @@ pub enum PrivateKeyType { } /// Private and public key encoding, either DER or PEM. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub enum KeyEncoding { Pem, Der, @@ -178,8 +176,8 @@ pub enum KeyEncoding { impl std::fmt::Display for KeyEncoding { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - KeyEncoding::Pem => write!(f, "PEM"), - KeyEncoding::Der => write!(f, "DER"), + Self::Pem => write!(f, "PEM"), + Self::Der => write!(f, "DER"), } } } diff --git a/crates/stackable-operator-derive/src/fragment.rs b/crates/stackable-operator-derive/src/fragment.rs index 6b2a9fae7..772dba62a 100644 --- a/crates/stackable-operator-derive/src/fragment.rs +++ b/crates/stackable-operator-derive/src/fragment.rs @@ -105,7 +105,7 @@ struct FragmentField { attrs: Vec, } -pub fn derive(input: DeriveInput) -> TokenStream { +pub fn derive(input: &DeriveInput) -> TokenStream { let FragmentInput { ident, data, @@ -117,7 +117,7 @@ pub fn derive(input: DeriveInput) -> TokenStream { fragment: fragment_mod, result: result_mod, }, - } = match FragmentInput::from_derive_input(&input) { + } = match FragmentInput::from_derive_input(input) { Ok(input) => input, Err(err) => return err.write_errors(), }; diff --git a/crates/stackable-operator-derive/src/lib.rs b/crates/stackable-operator-derive/src/lib.rs index bf7dfe582..b91def54b 100644 --- a/crates/stackable-operator-derive/src/lib.rs +++ b/crates/stackable-operator-derive/src/lib.rs @@ -56,7 +56,7 @@ mod merge; /// or the `stackable_operator` crate is renamed. #[proc_macro_derive(Merge, attributes(merge))] pub fn derive_merge(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - merge::derive(parse_macro_input!(input)).into() + merge::derive(&parse_macro_input!(input)).into() } /// Creates a [fragment type](index.html) for the given type, and implements [`FromFragment`](trait.FromFragment.html). @@ -145,5 +145,5 @@ pub fn derive_merge(input: proc_macro::TokenStream) -> proc_macro::TokenStream { /// Enums are not currently supported. #[proc_macro_derive(Fragment, attributes(fragment, fragment_attrs))] pub fn derive_fragment(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - fragment::derive(parse_macro_input!(input)).into() + fragment::derive(&parse_macro_input!(input)).into() } diff --git a/crates/stackable-operator-derive/src/merge.rs b/crates/stackable-operator-derive/src/merge.rs index 0598b275f..dd98a2969 100644 --- a/crates/stackable-operator-derive/src/merge.rs +++ b/crates/stackable-operator-derive/src/merge.rs @@ -53,14 +53,14 @@ enum InputType { Enum, } -pub fn derive(input: DeriveInput) -> TokenStream { +pub fn derive(input: &DeriveInput) -> TokenStream { let MergeInput { ident, mut generics, data, path_overrides: PathOverrides { merge: merge_mod }, bound, - } = match MergeInput::from_derive_input(&input) { + } = match MergeInput::from_derive_input(input) { Ok(input) => input, Err(err) => return err.write_errors(), }; diff --git a/crates/stackable-operator/src/helm/mod.rs b/crates/stackable-operator/src/helm/mod.rs index 93582fd2a..6bf975cdf 100644 --- a/crates/stackable-operator/src/helm/mod.rs +++ b/crates/stackable-operator/src/helm/mod.rs @@ -101,7 +101,7 @@ mod tests { let expected = std::fs::read_to_string("fixtures/helm/output.yaml").unwrap(); let mut output = Vec::new(); - ser(&values, &mut output, SerializeOptions::default()).unwrap(); + ser(&values, &mut output, &SerializeOptions::default()).unwrap(); assert_eq!(std::str::from_utf8(&output).unwrap(), expected); } diff --git a/crates/stackable-operator/src/lib.rs b/crates/stackable-operator/src/lib.rs index 742383f6e..46616252c 100644 --- a/crates/stackable-operator/src/lib.rs +++ b/crates/stackable-operator/src/lib.rs @@ -1,21 +1,3 @@ -// We enable all pedantic lints and explicitly disable a few -#![deny(clippy::pedantic)] -#![expect(clippy::doc_markdown)] -#![expect(clippy::missing_errors_doc)] -#![expect(clippy::must_use_candidate)] -#![expect(clippy::return_self_not_must_use)] -#![expect(clippy::missing_panics_doc)] -#![expect(clippy::explicit_deref_methods)] -#![expect(clippy::cast_possible_truncation)] -#![expect(clippy::float_cmp)] -#![expect(clippy::cast_sign_loss)] -#![expect(clippy::cast_precision_loss)] -// Additionally, we explicitly enable a few nursery lints -#![deny(clippy::use_self)] -#![deny(clippy::or_fun_call)] -#![deny(clippy::derive_partial_eq_without_eq)] -#![deny(clippy::unnecessary_struct_initialization)] - //! ## Crate Features //! //! - `default` enables a default set of features which most operators need. diff --git a/crates/stackable-shared/src/crd.rs b/crates/stackable-shared/src/crd.rs index 4e40b38da..2b0126ad7 100644 --- a/crates/stackable-shared/src/crd.rs +++ b/crates/stackable-shared/src/crd.rs @@ -28,7 +28,7 @@ pub trait CustomResourceExt: kube::CustomResourceExt { /// leading dashes (`---`). fn write_yaml_schema>(path: P, operator_version: &str) -> Result<()> { Self::crd() - .write_yaml_schema(path, operator_version, SerializeOptions::default()) + .write_yaml_schema(path, operator_version, &SerializeOptions::default()) .context(WriteToFileSnafu) } @@ -41,14 +41,14 @@ pub trait CustomResourceExt: kube::CustomResourceExt { /// [stdout]: std::io::stdout fn print_yaml_schema(operator_version: &str) -> Result<()> { Self::crd() - .print_yaml_schema(operator_version, SerializeOptions::default()) + .print_yaml_schema(operator_version, &SerializeOptions::default()) .context(WriteToStdoutSnafu) } /// Generates the YAML schema of a `CustomResourceDefinition` and returns it as a [`String`]. fn yaml_schema(operator_version: &str) -> Result { Self::crd() - .generate_yaml_schema(operator_version, SerializeOptions::default()) + .generate_yaml_schema(operator_version, &SerializeOptions::default()) .context(GenerateSchemaSnafu) } } diff --git a/crates/stackable-shared/src/secret.rs b/crates/stackable-shared/src/secret.rs index 2330c940d..3406b1450 100644 --- a/crates/stackable-shared/src/secret.rs +++ b/crates/stackable-shared/src/secret.rs @@ -15,7 +15,7 @@ use serde::{Deserialize, Serialize}; /// /// This struct is a redefinition of the one provided by k8s-openapi to make /// name and namespace mandatory. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)] #[serde(rename_all = "camelCase")] pub struct SecretReference { /// Namespace of the Secret being referred to. @@ -34,12 +34,12 @@ impl Display for SecretReference { impl From for ObjectRef { fn from(val: SecretReference) -> Self { - ObjectRef::::from(&val) + Self::from(&val) } } impl From<&SecretReference> for ObjectRef { fn from(val: &SecretReference) -> Self { - ObjectRef::::new(&val.name).within(&val.namespace) + Self::new(&val.name).within(&val.namespace) } } diff --git a/crates/stackable-shared/src/time/duration.rs b/crates/stackable-shared/src/time/duration.rs index 5233901fd..d0683c765 100644 --- a/crates/stackable-shared/src/time/duration.rs +++ b/crates/stackable-shared/src/time/duration.rs @@ -24,7 +24,7 @@ use schemars::JsonSchema; use snafu::{OptionExt, ResultExt, Snafu}; use strum::IntoEnumIterator; -#[derive(Debug, PartialEq, Snafu)] +#[derive(Debug, PartialEq, Eq, Snafu)] #[snafu(module)] pub enum DurationParseError { #[snafu(display("empty input"))] @@ -69,6 +69,7 @@ impl FromStr for Duration { type Err = DurationParseError; fn from_str(input: &str) -> Result { + #[allow(clippy::wildcard_imports)] use duration_parse_error::*; if input.is_empty() { return EmptyInputSnafu.fail(); @@ -98,9 +99,8 @@ impl FromStr for Duration { let Some(unit) = take_group(char::is_alphabetic) else { if let Some(&(_, chr)) = chars.peek() { return UnexpectedCharacterSnafu { chr }.fail(); - } else { - return NoUnitSnafu { value }.fail(); } + return NoUnitSnafu { value }.fail(); }; let unit = unit.parse::().ok().context(ParseUnitSnafu { @@ -119,7 +119,7 @@ impl FromStr for Duration { .fail(); } Ordering::Equal => return DuplicateUnitSnafu { unit }.fail(), - _ => (), + Ordering::Greater => (), } } @@ -127,7 +127,7 @@ impl FromStr for Duration { // the appropriate number of milliseconds for this unit let fragment_value = value - .checked_mul(unit.millis() as u128) + .checked_mul(u128::from(unit.millis())) .context(OverflowSnafu { input: input.to_string(), value, @@ -186,11 +186,11 @@ impl Display for Duration { let mut millis = self.0.as_millis(); for unit in DurationUnit::iter() { - let whole = millis / unit.millis() as u128; - let rest = millis % unit.millis() as u128; + let whole = millis / u128::from(unit.millis()); + let rest = millis % u128::from(unit.millis()); if whole > 0 { - write!(f, "{}{}", whole, unit)?; + write!(f, "{whole}{unit}")?; } millis = rest; @@ -244,13 +244,13 @@ impl Add for std::time::Instant { impl AddAssign for Duration { fn add_assign(&mut self, rhs: Self) { - self.0 += rhs.0 + self.0 += rhs.0; } } impl AddAssign for std::time::Instant { fn add_assign(&mut self, rhs: Duration) { - *self += rhs.0 + *self += rhs.0; } } @@ -272,20 +272,20 @@ impl Sub for std::time::Instant { impl SubAssign for Duration { fn sub_assign(&mut self, rhs: Self) { - self.0 -= rhs.0 + self.0 -= rhs.0; } } impl SubAssign for std::time::Instant { fn sub_assign(&mut self, rhs: Duration) { - *self -= rhs.0 + *self -= rhs.0; } } impl Mul for Duration { type Output = Self; - fn mul(self, rhs: u32) -> Duration { + fn mul(self, rhs: u32) -> Self { Self(self.0 * rhs) } } @@ -301,7 +301,7 @@ impl Mul for u32 { impl Div for Duration { type Output = Self; - fn div(self, rhs: u32) -> Duration { + fn div(self, rhs: u32) -> Self { Self(self.0 / rhs) } } @@ -410,15 +410,13 @@ pub enum DurationUnit { impl DurationUnit { /// Returns the number of whole milliseconds in each supported /// [`DurationUnit`]. - const fn millis(&self) -> u64 { - use DurationUnit::*; - + const fn millis(self) -> u64 { match self { - Days => 24 * Hours.millis(), - Hours => 60 * Minutes.millis(), - Minutes => 60 * Seconds.millis(), - Seconds => 1000, - Milliseconds => 1, + Self::Days => 24 * Self::Hours.millis(), + Self::Hours => 60 * Self::Minutes.millis(), + Self::Minutes => 60 * Self::Seconds.millis(), + Self::Seconds => 1000, + Self::Milliseconds => 1, } } } @@ -452,7 +450,7 @@ mod test { assert_eq!( Duration::from_days_unchecked(max_duration_days).as_millis(), - 18446744073657600000 // Precision lost due to ms -> day conversion + 18_446_744_073_657_600_000 // Precision lost due to ms -> day conversion ); let result = std::panic::catch_unwind(|| Duration::from_days_unchecked(max_duration_days + 1)); @@ -464,10 +462,10 @@ mod test { #[case("1m", 60)] #[case("1h", 3600)] #[case("70m", 4200)] - #[case("15d2m2s", 1296122)] - #[case("15d2m2s600ms", 1296122)] - #[case("15d2m2s1000ms", 1296123)] - #[case("213503982334d", 18446744073657600)] + #[case("15d2m2s", 1_296_122)] + #[case("15d2m2s600ms", 1_296_122)] + #[case("15d2m2s1000ms", 1_296_123)] + #[case("213503982334d", 18_446_744_073_657_600)] fn parse_as_secs(#[case] input: &str, #[case] output: u64) { let dur: Duration = input.parse().expect("valid duration input must parse"); assert_eq!(dur.as_secs(), output); @@ -479,10 +477,10 @@ mod test { #[case("1ä", DurationParseError::ParseUnitError { unit: "ä".into() })] #[case(" ", DurationParseError::UnexpectedCharacter { chr: ' ' })] #[case("", DurationParseError::EmptyInput)] - #[case("213503982335d", DurationParseError::Overflow { input: "213503982335d".to_string(), value: 213503982335_u128, unit: DurationUnit::Days })] + #[case("213503982335d", DurationParseError::Overflow { input: "213503982335d".to_string(), value: 213_503_982_335_u128, unit: DurationUnit::Days })] fn parse_invalid(#[case] input: &str, #[case] expected_err: DurationParseError) { let err = Duration::from_str(input).expect_err("invalid duration input must not parse"); - assert_eq!(err, expected_err) + assert_eq!(err, expected_err); } #[rstest] @@ -493,7 +491,7 @@ mod test { #[case] expected_err: DurationParseError, ) { let err = Duration::from_str(input).expect_err("invalid duration input must produce error"); - assert_eq!(err, expected_err) + assert_eq!(err, expected_err); } #[rstest] @@ -518,7 +516,7 @@ mod test { } let s: S = serde_yaml::from_str("dur: 15d2m2s").expect("valid duration must deserialize"); - assert_eq!(s.dur.as_secs(), 1296122); + assert_eq!(s.dur.as_secs(), 1_296_122); } #[test] diff --git a/crates/stackable-shared/src/time/jiff_impl.rs b/crates/stackable-shared/src/time/jiff_impl.rs index a655d8eb7..6b8af0866 100644 --- a/crates/stackable-shared/src/time/jiff_impl.rs +++ b/crates/stackable-shared/src/time/jiff_impl.rs @@ -15,6 +15,6 @@ impl TryFrom for Span { type Error = jiff::Error; fn try_from(value: Duration) -> Result { - Span::try_from(Into::::into(value)) + Self::try_from(Into::::into(value)) } } diff --git a/crates/stackable-shared/src/time/time_impl.rs b/crates/stackable-shared/src/time/time_impl.rs index b8e608d30..66f6936bb 100644 --- a/crates/stackable-shared/src/time/time_impl.rs +++ b/crates/stackable-shared/src/time/time_impl.rs @@ -12,7 +12,7 @@ impl Add for time::OffsetDateTime { impl AddAssign for time::OffsetDateTime { fn add_assign(&mut self, rhs: Duration) { - self.add_assign(*rhs) + self.add_assign(*rhs); } } @@ -26,7 +26,7 @@ impl Sub for time::OffsetDateTime { impl SubAssign for time::OffsetDateTime { fn sub_assign(&mut self, rhs: Duration) { - self.sub_assign(*rhs) + self.sub_assign(*rhs); } } @@ -42,7 +42,8 @@ mod test { assert!(now < later); assert_eq!( later.unix_timestamp() - now.unix_timestamp(), - Duration::from_minutes_unchecked(10).as_secs() as i64 + i64::try_from(Duration::from_minutes_unchecked(10).as_secs()) + .expect("10 minutes as seconds fits in i64") ); } @@ -54,7 +55,8 @@ mod test { assert!(now > earlier); assert_eq!( now.unix_timestamp() - earlier.unix_timestamp(), - Duration::from_minutes_unchecked(10).as_secs() as i64 + i64::try_from(Duration::from_minutes_unchecked(10).as_secs()) + .expect("10 minutes as seconds fits in i64") ); } } diff --git a/crates/stackable-shared/src/yaml.rs b/crates/stackable-shared/src/yaml.rs index 04ef83770..4199cec62 100644 --- a/crates/stackable-shared/src/yaml.rs +++ b/crates/stackable-shared/src/yaml.rs @@ -98,7 +98,7 @@ pub trait YamlSchema: Sized + serde::Serialize { fn generate_yaml_schema( &self, operator_version: &str, - options: SerializeOptions, + options: &SerializeOptions, ) -> Result { let mut buffer = Vec::new(); @@ -118,7 +118,7 @@ pub trait YamlSchema: Sized + serde::Serialize { &self, path: P, operator_version: &str, - options: SerializeOptions, + options: &SerializeOptions, ) -> Result<()> { let schema = self.generate_yaml_schema(operator_version, options)?; std::fs::write(path, schema).context(WriteToFileSnafu) @@ -126,7 +126,7 @@ pub trait YamlSchema: Sized + serde::Serialize { /// Generates and prints the YAML schema of `self` to stdout at `path` using the provided /// [`SerializeOptions`]. - fn print_yaml_schema(&self, operator_version: &str, options: SerializeOptions) -> Result<()> { + fn print_yaml_schema(&self, operator_version: &str, options: &SerializeOptions) -> Result<()> { let schema = self.generate_yaml_schema(operator_version, options)?; let mut writer = std::io::stdout(); @@ -139,7 +139,7 @@ pub trait YamlSchema: Sized + serde::Serialize { impl YamlSchema for T where T: serde::ser::Serialize {} /// Serializes the given data structure and writes it to a [`Writer`](Write). -pub fn serialize(value: &T, mut writer: W, options: SerializeOptions) -> Result<()> +pub fn serialize(value: &T, mut writer: W, options: &SerializeOptions) -> Result<()> where T: serde::Serialize, W: std::io::Write, diff --git a/crates/stackable-telemetry/src/instrumentation/axum/injector.rs b/crates/stackable-telemetry/src/instrumentation/axum/injector.rs index a42cfa231..7035b2975 100644 --- a/crates/stackable-telemetry/src/instrumentation/axum/injector.rs +++ b/crates/stackable-telemetry/src/instrumentation/axum/injector.rs @@ -41,7 +41,7 @@ impl<'a> HeaderInjector<'a> { /// [1]: opentelemetry::propagation::TextMapPropagator pub fn inject_context(&mut self, cx: &Context) { opentelemetry::global::get_text_map_propagator(|propagator| { - propagator.inject_context(cx, self) - }) + propagator.inject_context(cx, self); + }); } } diff --git a/crates/stackable-telemetry/src/instrumentation/axum/mod.rs b/crates/stackable-telemetry/src/instrumentation/axum/mod.rs index c258c2e13..869523090 100644 --- a/crates/stackable-telemetry/src/instrumentation/axum/mod.rs +++ b/crates/stackable-telemetry/src/instrumentation/axum/mod.rs @@ -463,7 +463,7 @@ impl SpanExt for Span { "set parent span context based on context extracted from request headers" ); - Span::current().add_link(new_parent.span().span_context().clone()); + Self::current().add_link(new_parent.span().span_context().clone()); span.add_link(Context::current().span().span_context().to_owned()); let _ = span.set_parent(new_parent); } @@ -480,7 +480,7 @@ impl SpanExt for Span { // will NOT be recorded as a number but as a string. This is likely // an issue in the tracing-opentelemetry crate. span.record(semconv::trace::SERVER_ADDRESS, host) - .record(semconv::trace::SERVER_PORT, port as i64); + .record(semconv::trace::SERVER_PORT, i64::from(port)); } // Setting fields according to the HTTP server semantic conventions @@ -498,7 +498,7 @@ impl SpanExt for Span { // likely an issue in the tracing-opentelemetry crate. span.record( semconv::trace::CLIENT_PORT, - client_socket_address.port() as i64, + i64::from(client_socket_address.port()), ); } } @@ -544,7 +544,7 @@ impl SpanExt for Span { // in the tracing-opentelemetry crate. self.record( semconv::trace::HTTP_RESPONSE_STATUS_CODE, - status_code.as_u16() as i64, + i64::from(status_code.as_u16()), ); // Only set the span status to "Error" when we encountered an server @@ -558,7 +558,7 @@ impl SpanExt for Span { } let mut injector = HeaderInjector::new(response.headers_mut()); - injector.inject_context(&Span::current().context()); + injector.inject_context(&Self::current().context()); } fn finalize_with_error(&self, error: &mut E) diff --git a/crates/stackable-telemetry/src/tracing/mod.rs b/crates/stackable-telemetry/src/tracing/mod.rs index c681a32bf..9ab820b9f 100644 --- a/crates/stackable-telemetry/src/tracing/mod.rs +++ b/crates/stackable-telemetry/src/tracing/mod.rs @@ -17,10 +17,13 @@ use opentelemetry_sdk::{ }; use snafu::{ResultExt as _, Snafu}; use tracing::{level_filters::LevelFilter, subscriber::SetGlobalDefaultError}; -use tracing_appender::rolling::{InitError, RollingFileAppender}; +use tracing_appender::rolling::{InitError, RollingFileAppender, Rotation}; use tracing_subscriber::{EnvFilter, Layer, Registry, filter::Directive, layer::SubscriberExt}; -use crate::tracing::settings::*; +use crate::tracing::settings::{ + ConsoleLogSettings, FileLogSettings, Format, OtlpLogSettings, OtlpTraceSettings, Settings, + SettingsToggle, +}; pub mod settings; @@ -417,7 +420,8 @@ impl Tracing { // once we bump the toolchain to 1.91.0. // See https://github.com/rust-lang/rust-clippy/pull/15445 #[allow(clippy::unwrap_in_result)] - pub fn init(mut self) -> Result { + #[allow(clippy::too_many_lines)] + pub fn init(mut self) -> Result { let mut layers: Vec + Sync + Send>> = Vec::new(); if let ConsoleLogSettings::Enabled { @@ -450,7 +454,7 @@ impl Tracing { .with_filter(env_filter_layer); layers.push(console_output_layer.boxed()); } - }; + } } if let FileLogSettings::Enabled { @@ -588,7 +592,7 @@ impl Drop for Tracing { if let Some(tracer_provider) = &self.tracer_provider && let Err(error) = tracer_provider.shutdown() { - tracing::error!(%error, "unable to shutdown TracerProvider") + tracing::error!(%error, "unable to shutdown TracerProvider"); } if let Some(logger_provider) = &self.logger_provider @@ -616,12 +620,12 @@ pub trait BuilderState: private::Sealed {} /// [1]: private::Sealed #[doc(hidden)] mod private { - use super::*; + use crate::tracing::builder_state::{Config, PreServiceName}; pub trait Sealed {} - impl Sealed for builder_state::PreServiceName {} - impl Sealed for builder_state::Config {} + impl Sealed for PreServiceName {} + impl Sealed for Config {} } /// This module holds the possible states that the builder is in. @@ -682,11 +686,8 @@ impl TracingBuilder { /// Enable the console output tracing subscriber and set the default /// [`LevelFilter`] which is overridable through the given environment /// variable. - pub fn with_console_output( - self, - console_log_settings: impl Into, - ) -> TracingBuilder { - TracingBuilder { + pub fn with_console_output(self, console_log_settings: impl Into) -> Self { + Self { service_name: self.service_name, console_log_settings: console_log_settings.into(), otlp_log_settings: self.otlp_log_settings, @@ -699,11 +700,8 @@ impl TracingBuilder { /// Enable the file output tracing subscriber and set the default /// [`LevelFilter`] which is overridable through the given environment /// variable. - pub fn with_file_output( - self, - file_log_settings: impl Into, - ) -> TracingBuilder { - TracingBuilder { + pub fn with_file_output(self, file_log_settings: impl Into) -> Self { + Self { service_name: self.service_name, console_log_settings: self.console_log_settings, file_log_settings: file_log_settings.into(), @@ -718,11 +716,8 @@ impl TracingBuilder { /// /// You can configure the OTLP log exports through the variables defined /// in the opentelemetry crates. See [`Tracing`]. - pub fn with_otlp_log_exporter( - self, - otlp_log_settings: impl Into, - ) -> TracingBuilder { - TracingBuilder { + pub fn with_otlp_log_exporter(self, otlp_log_settings: impl Into) -> Self { + Self { service_name: self.service_name, console_log_settings: self.console_log_settings, otlp_log_settings: otlp_log_settings.into(), @@ -740,8 +735,8 @@ impl TracingBuilder { pub fn with_otlp_trace_exporter( self, otlp_trace_settings: impl Into, - ) -> TracingBuilder { - TracingBuilder { + ) -> Self { + Self { service_name: self.service_name, console_log_settings: self.console_log_settings, otlp_log_settings: self.otlp_log_settings, @@ -784,7 +779,7 @@ fn env_filter_builder(env_var: &str, default_directive: impl Into) -> /// available if the feature `clap` is enabled. #[cfg_attr( feature = "clap", - doc = r#" + doc = r" ``` # use stackable_telemetry::tracing::TelemetryOptions; use clap::Parser; @@ -798,7 +793,7 @@ struct Cli { telemetry_arguments: TelemetryOptions, } ``` -"# +" )] #[cfg_attr( feature = "clap", @@ -916,7 +911,7 @@ mod test { environment_variable: "ABC_B", default_level: LevelFilter::DEBUG }, - log_format: Default::default() + log_format: Format::default() } ); @@ -939,9 +934,9 @@ mod test { environment_variable: "ABC_A", default_level: LevelFilter::TRACE, }, - log_format: Default::default() + log_format: Format::default() } - ) + ); } #[rstest] @@ -953,18 +948,19 @@ mod test { .with_console_output(("ABC_A", LevelFilter::TRACE, enabled)) .build(); - let expected = match enabled { - true => ConsoleLogSettings::Enabled { + let expected = if enabled { + ConsoleLogSettings::Enabled { common_settings: Settings { environment_variable: "ABC_A", default_level: LevelFilter::TRACE, }, - log_format: Default::default(), - }, - false => ConsoleLogSettings::Disabled, + log_format: Format::default(), + } + } else { + ConsoleLogSettings::Disabled }; - assert_eq!(trace_guard.console_log_settings, expected) + assert_eq!(trace_guard.console_log_settings, expected); } #[test] @@ -1005,7 +1001,7 @@ mod test { environment_variable: "ABC_CONSOLE", default_level: LevelFilter::INFO }, - log_format: Default::default() + log_format: Format::default() } ); assert_eq!( @@ -1085,7 +1081,7 @@ mod test { "test", TelemetryOptions { console_log_disabled: false, - console_log_format: Default::default(), + console_log_format: Format::default(), file_log_directory: None, file_log_rotation_period: None, file_log_max_files: None, diff --git a/crates/stackable-telemetry/src/tracing/settings/console_log.rs b/crates/stackable-telemetry/src/tracing/settings/console_log.rs index 18a8aa2bb..6652ec5f9 100644 --- a/crates/stackable-telemetry/src/tracing/settings/console_log.rs +++ b/crates/stackable-telemetry/src/tracing/settings/console_log.rs @@ -5,7 +5,7 @@ use tracing::level_filters::LevelFilter; use super::{Settings, SettingsBuilder, SettingsToggle}; /// Configure specific settings for the console log subscriber. -#[derive(Debug, Default, PartialEq)] +#[derive(Debug, Default, PartialEq, Eq)] pub enum ConsoleLogSettings { /// Console subscriber disabled. #[default] @@ -45,8 +45,8 @@ pub enum Format { impl SettingsToggle for ConsoleLogSettings { fn is_enabled(&self) -> bool { match self { - ConsoleLogSettings::Disabled => false, - ConsoleLogSettings::Enabled { .. } => true, + Self::Disabled => false, + Self::Enabled { .. } => true, } } } @@ -90,21 +90,21 @@ impl From for ConsoleLogSettingsBuilder { impl From for ConsoleLogSettings { fn from(common_settings: Settings) -> Self { - ConsoleLogSettings::Enabled { + Self::Enabled { common_settings, - log_format: Default::default(), + log_format: Format::default(), } } } impl From> for ConsoleLogSettings where - T: Into, + T: Into, { fn from(settings: Option) -> Self { match settings { Some(settings) => settings.into(), - None => ConsoleLogSettings::default(), + None => Self::default(), } } } @@ -116,22 +116,23 @@ impl From<(&'static str, LevelFilter)> for ConsoleLogSettings { environment_variable: value.0, default_level: value.1, }, - log_format: Default::default(), + log_format: Format::default(), } } } impl From<(&'static str, LevelFilter, bool)> for ConsoleLogSettings { fn from(value: (&'static str, LevelFilter, bool)) -> Self { - match value.2 { - true => Self::Enabled { + if value.2 { + Self::Enabled { common_settings: Settings { environment_variable: value.0, default_level: value.1, }, - log_format: Default::default(), - }, - false => Self::Disabled, + log_format: Format::default(), + } + } else { + Self::Disabled } } } diff --git a/crates/stackable-telemetry/src/tracing/settings/file_log.rs b/crates/stackable-telemetry/src/tracing/settings/file_log.rs index ad345c4b7..3a005faf9 100644 --- a/crates/stackable-telemetry/src/tracing/settings/file_log.rs +++ b/crates/stackable-telemetry/src/tracing/settings/file_log.rs @@ -8,7 +8,7 @@ pub use tracing_appender::rolling::Rotation; use super::{Settings, SettingsToggle}; /// Configure specific settings for the File Log subscriber. -#[derive(Debug, Default, PartialEq)] +#[derive(Debug, Default, PartialEq, Eq)] pub enum FileLogSettings { /// File Log subscriber disabled. #[default] @@ -36,8 +36,8 @@ pub enum FileLogSettings { impl SettingsToggle for FileLogSettings { fn is_enabled(&self) -> bool { match self { - FileLogSettings::Disabled => false, - FileLogSettings::Enabled { .. } => true, + Self::Disabled => false, + Self::Enabled { .. } => true, } } } @@ -82,12 +82,12 @@ impl FileLogSettingsBuilder { impl From> for FileLogSettings where - T: Into, + T: Into, { fn from(settings: Option) -> Self { match settings { Some(settings) => settings.into(), - None => FileLogSettings::default(), + None => Self::default(), } } } diff --git a/crates/stackable-telemetry/src/tracing/settings/mod.rs b/crates/stackable-telemetry/src/tracing/settings/mod.rs index 6eb19e509..d7d43fc46 100644 --- a/crates/stackable-telemetry/src/tracing/settings/mod.rs +++ b/crates/stackable-telemetry/src/tracing/settings/mod.rs @@ -28,7 +28,7 @@ pub trait SettingsToggle { } /// General settings that apply to any subscriber. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub struct Settings { /// The environment variable used to set the [`LevelFilter`]. /// diff --git a/crates/stackable-telemetry/src/tracing/settings/otlp_log.rs b/crates/stackable-telemetry/src/tracing/settings/otlp_log.rs index c9394c513..a5dbc7e09 100644 --- a/crates/stackable-telemetry/src/tracing/settings/otlp_log.rs +++ b/crates/stackable-telemetry/src/tracing/settings/otlp_log.rs @@ -5,7 +5,7 @@ use tracing::level_filters::LevelFilter; use super::{Settings, SettingsBuilder, SettingsToggle}; /// Configure specific settings for the OpenTelemetry log subscriber. -#[derive(Debug, Default, PartialEq)] +#[derive(Debug, Default, PartialEq, Eq)] pub enum OtlpLogSettings { /// OpenTelemetry log subscriber disabled. #[default] @@ -21,8 +21,8 @@ pub enum OtlpLogSettings { impl SettingsToggle for OtlpLogSettings { fn is_enabled(&self) -> bool { match self { - OtlpLogSettings::Disabled => false, - OtlpLogSettings::Enabled { .. } => true, + Self::Disabled => false, + Self::Enabled { .. } => true, } } } @@ -65,12 +65,12 @@ impl From for OtlpLogSettings { impl From> for OtlpLogSettings where - T: Into, + T: Into, { fn from(settings: Option) -> Self { match settings { Some(settings) => settings.into(), - None => OtlpLogSettings::default(), + None => Self::default(), } } } @@ -88,14 +88,15 @@ impl From<(&'static str, LevelFilter)> for OtlpLogSettings { impl From<(&'static str, LevelFilter, bool)> for OtlpLogSettings { fn from(value: (&'static str, LevelFilter, bool)) -> Self { - match value.2 { - true => Self::Enabled { + if value.2 { + Self::Enabled { common_settings: Settings { environment_variable: value.0, default_level: value.1, }, - }, - false => Self::Disabled, + } + } else { + Self::Disabled } } } diff --git a/crates/stackable-telemetry/src/tracing/settings/otlp_trace.rs b/crates/stackable-telemetry/src/tracing/settings/otlp_trace.rs index 93dab8aff..19a5ff653 100644 --- a/crates/stackable-telemetry/src/tracing/settings/otlp_trace.rs +++ b/crates/stackable-telemetry/src/tracing/settings/otlp_trace.rs @@ -5,7 +5,7 @@ use tracing::level_filters::LevelFilter; use super::{Settings, SettingsBuilder, SettingsToggle}; /// Configure specific settings for the OpenTelemetry trace subscriber. -#[derive(Debug, Default, PartialEq)] +#[derive(Debug, Default, PartialEq, Eq)] pub enum OtlpTraceSettings { /// OpenTelemetry trace subscriber disabled. #[default] @@ -21,8 +21,8 @@ pub enum OtlpTraceSettings { impl SettingsToggle for OtlpTraceSettings { fn is_enabled(&self) -> bool { match self { - OtlpTraceSettings::Disabled => false, - OtlpTraceSettings::Enabled { .. } => true, + Self::Disabled => false, + Self::Enabled { .. } => true, } } } @@ -65,12 +65,12 @@ impl From for OtlpTraceSettings { impl From> for OtlpTraceSettings where - T: Into, + T: Into, { fn from(settings: Option) -> Self { match settings { Some(settings) => settings.into(), - None => OtlpTraceSettings::default(), + None => Self::default(), } } } @@ -88,14 +88,15 @@ impl From<(&'static str, LevelFilter)> for OtlpTraceSettings { impl From<(&'static str, LevelFilter, bool)> for OtlpTraceSettings { fn from(value: (&'static str, LevelFilter, bool)) -> Self { - match value.2 { - true => Self::Enabled { + if value.2 { + Self::Enabled { common_settings: Settings { environment_variable: value.0, default_level: value.1, }, - }, - false => Self::Disabled, + } + } else { + Self::Disabled } } } diff --git a/crates/stackable-versioned-macros/src/attrs/item/field.rs b/crates/stackable-versioned-macros/src/attrs/item/field.rs index 2fd1e1340..47811b5b9 100644 --- a/crates/stackable-versioned-macros/src/attrs/item/field.rs +++ b/crates/stackable-versioned-macros/src/attrs/item/field.rs @@ -66,7 +66,7 @@ impl FieldAttributes { })?; self.common - .validate(FieldIdents::from(ident), &self.attrs)?; + .validate(&FieldIdents::from(ident), &self.attrs)?; Ok(self) } diff --git a/crates/stackable-versioned-macros/src/attrs/item/mod.rs b/crates/stackable-versioned-macros/src/attrs/item/mod.rs index b09f6cb89..4c8e2dfcf 100644 --- a/crates/stackable-versioned-macros/src/attrs/item/mod.rs +++ b/crates/stackable-versioned-macros/src/attrs/item/mod.rs @@ -50,15 +50,15 @@ pub struct CommonItemAttributes { // it contains functions which can only be called after the initial parsing and validation because // they need additional context, namely the list of versions defined on the container or module. impl CommonItemAttributes { - pub fn validate(&self, item_idents: impl ItemIdents, item_attrs: &[Attribute]) -> Result<()> { + pub fn validate(&self, item_idents: &impl ItemIdents, item_attrs: &[Attribute]) -> Result<()> { let mut errors = Error::accumulator(); - errors.handle(self.validate_action_combinations(&item_idents)); - errors.handle(self.validate_action_order(&item_idents)); - errors.handle(self.validate_item_name(&item_idents)); + errors.handle(self.validate_action_combinations(item_idents)); + errors.handle(self.validate_action_order(item_idents)); + errors.handle(self.validate_item_name(item_idents)); errors.handle(self.validate_added_action()); - errors.handle(self.validate_changed_action(&item_idents)); - errors.handle(self.validate_item_attributes(item_attrs)); + errors.handle(self.validate_changed_action(item_idents)); + errors.handle(Self::validate_item_attributes(item_attrs)); errors.finish() } @@ -274,7 +274,7 @@ impl CommonItemAttributes { /// /// - `deprecated` must not be set on items. Instead, use the `deprecated()` /// action of the `#[versioned()]` macro. - fn validate_item_attributes(&self, item_attrs: &[Attribute]) -> Result<()> { + fn validate_item_attributes(item_attrs: &[Attribute]) -> Result<()> { for attr in item_attrs { for segment in &attr.path().segments { if segment.ident == "deprecated" { @@ -288,6 +288,7 @@ impl CommonItemAttributes { } impl CommonItemAttributes { + #[expect(clippy::too_many_lines)] pub fn into_changeset( self, idents: &impl ItemIdents, @@ -326,8 +327,7 @@ impl CommonItemAttributes { let from_ty = change .from_type .as_ref() - .map(|sv| sv.deref().clone()) - .unwrap_or(ty.clone()); + .map_or_else(|| ty.clone(), |sv| sv.deref().clone()); actions.insert( *change.since, @@ -377,8 +377,7 @@ impl CommonItemAttributes { let from_ty = change .from_type .as_ref() - .map(|sv| sv.deref().clone()) - .unwrap_or(ty.clone()); + .map_or_else(|| ty.clone(), |sv| sv.deref().clone()); actions.insert( *change.since, diff --git a/crates/stackable-versioned-macros/src/attrs/item/variant.rs b/crates/stackable-versioned-macros/src/attrs/item/variant.rs index 087d113df..01f1f4bc2 100644 --- a/crates/stackable-versioned-macros/src/attrs/item/variant.rs +++ b/crates/stackable-versioned-macros/src/attrs/item/variant.rs @@ -52,11 +52,9 @@ impl VariantAttributes { /// Internally, it calls out to other specialized validation functions. fn validate(self) -> Result { let mut errors = Error::accumulator(); + let variant_idents = VariantIdents::from(self.ident.clone()); - errors.handle( - self.common - .validate(VariantIdents::from(self.ident.clone()), &self.attrs), - ); + errors.handle(self.common.validate(&variant_idents, &self.attrs)); // Validate names of renames for change in &self.common.changes { @@ -66,7 +64,7 @@ impl VariantAttributes { errors.push( Error::custom("renamed variant must use PascalCase") .with_span(&from_name.span()), - ) + ); } } diff --git a/crates/stackable-versioned-macros/src/attrs/module.rs b/crates/stackable-versioned-macros/src/attrs/module.rs index 71d61d34b..a7b69cd6f 100644 --- a/crates/stackable-versioned-macros/src/attrs/module.rs +++ b/crates/stackable-versioned-macros/src/attrs/module.rs @@ -172,7 +172,7 @@ impl ToTokens for CrateArguments { fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { let mut crate_overrides = TokenStream::new(); - let CrateArguments { + let Self { kube_client: _, k8s_openapi, serde_json, @@ -257,8 +257,7 @@ impl Deref for Override { fn deref(&self) -> &Self::Target { match &self { - Override::Default(inner) => inner, - Override::Explicit(inner) => inner, + Self::Default(inner) | Self::Explicit(inner) => inner, } } } diff --git a/crates/stackable-versioned-macros/src/codegen/changes.rs b/crates/stackable-versioned-macros/src/codegen/changes.rs index b36efc6f7..38f1112b5 100644 --- a/crates/stackable-versioned-macros/src/codegen/changes.rs +++ b/crates/stackable-versioned-macros/src/codegen/changes.rs @@ -190,7 +190,7 @@ impl ChangesetExt for BTreeMap { } // TODO (@NickLarsenNZ): Explain why it is unreachable, as it can be reached during testing. // To reproduce, use an invalid version, eg: #[versioned(deprecated(since = "v99"))] - _ => unreachable!(), + ItemStatus::Deprecation { .. } => unreachable!(), }; self.insert( diff --git a/crates/stackable-versioned-macros/src/codegen/container/mod.rs b/crates/stackable-versioned-macros/src/codegen/container/mod.rs index 326e10a5b..4896ded56 100644 --- a/crates/stackable-versioned-macros/src/codegen/container/mod.rs +++ b/crates/stackable-versioned-macros/src/codegen/container/mod.rs @@ -151,16 +151,16 @@ impl Container { ctx: ModuleGenerationContext<'a>, ) -> ContainerTokens<'a> { match self { - Container::Struct(s) => s.generate_tokens(versions, ctx), - Container::Enum(e) => e.generate_tokens(versions, ctx), + Self::Struct(s) => s.generate_tokens(versions, ctx), + Self::Enum(e) => e.generate_tokens(versions, ctx), } } /// Returns the original ident of the container. pub fn get_original_ident(&self) -> &Ident { match &self { - Container::Struct(s) => s.common.idents.original.as_ident(), - Container::Enum(e) => e.common.idents.original.as_ident(), + Self::Struct(s) => s.common.idents.original.as_ident(), + Self::Enum(e) => e.common.idents.original.as_ident(), } } } @@ -217,15 +217,16 @@ impl KubernetesIdents { let parameter = kind.as_parameter_ident(); Self { - parameter, - version, - status, kind, + status, + version, + parameter, } } } #[derive(Debug)] +#[allow(clippy::struct_excessive_bools)] pub struct ContainerOptions { pub skip_from: bool, pub skip_object_from: bool, diff --git a/crates/stackable-versioned-macros/src/codegen/container/struct/conversion.rs b/crates/stackable-versioned-macros/src/codegen/container/struct/conversion.rs index 7d6c5e593..9afff4f01 100644 --- a/crates/stackable-versioned-macros/src/codegen/container/struct/conversion.rs +++ b/crates/stackable-versioned-macros/src/codegen/container/struct/conversion.rs @@ -292,14 +292,11 @@ impl Struct { &self, next_version: &VersionDefinition, mod_gen_ctx: ModuleGenerationContext<'_>, - ) -> Option { - let json_paths = self - .fields + ) -> TokenStream { + self.fields .iter() .filter_map(|f| f.generate_for_json_path(next_version, mod_gen_ctx)) - .collect(); - - Some(json_paths) + .collect() } pub(super) fn needs_tracking(&self, version: &VersionDefinition) -> bool { @@ -341,7 +338,7 @@ impl Struct { let convert_object_error = quote! { #versioned_path::ConvertObjectError }; // Generate conversion paths and the match arms for these paths - let match_arms = self.generate_conversion_match_arms(versions, mod_gen_ctx, spec_gen_ctx); + let match_arms = Self::generate_conversion_match_arms(versions, mod_gen_ctx, spec_gen_ctx); // TODO (@Techassi): Make this a feature, drop the option from the macro arguments // Generate tracing attributes and events if tracing is enabled @@ -350,7 +347,7 @@ impl Struct { convert_objects_instrumentation, invalid_conversion_review_event, try_convert_instrumentation, - } = self.generate_conversion_tracing(mod_gen_ctx, spec_gen_ctx); + } = Self::generate_conversion_tracing(mod_gen_ctx, spec_gen_ctx); // Generate doc comments let conversion_review_reference = @@ -626,7 +623,6 @@ impl Struct { } fn generate_conversion_match_arms( - &self, versions: &[VersionDefinition], mod_gen_ctx: ModuleGenerationContext<'_>, spec_gen_ctx: &SpecGenerationContext<'_>, @@ -699,7 +695,6 @@ impl Struct { } fn generate_conversion_tracing( - &self, mod_gen_ctx: ModuleGenerationContext<'_>, spec_gen_ctx: &SpecGenerationContext<'_>, ) -> TracingTokens { diff --git a/crates/stackable-versioned-macros/src/codegen/container/struct/mod.rs b/crates/stackable-versioned-macros/src/codegen/container/struct/mod.rs index 021f523fa..81022c022 100644 --- a/crates/stackable-versioned-macros/src/codegen/container/struct/mod.rs +++ b/crates/stackable-versioned-macros/src/codegen/container/struct/mod.rs @@ -159,7 +159,7 @@ impl Struct { // Generate code which is only needed for the top-level CRD spec if let Some(spec_gen_ctx) = spec_gen_ctx { - let entry_enum = self.generate_entry_enum(mod_gen_ctx, &spec_gen_ctx); + let entry_enum = Self::generate_entry_enum(mod_gen_ctx, &spec_gen_ctx); let entry_enum_impl = self.generate_entry_impl_block(versions, mod_gen_ctx, &spec_gen_ctx); let version_enum = self.generate_version_enum(mod_gen_ctx, &spec_gen_ctx); @@ -195,9 +195,8 @@ impl Struct { .filter_map(|field| field.generate_for_container(ver_ctx.version)) .collect(); - let kube_attribute = spec_gen_ctx.and_then(|spec_gen_ctx| { - self.generate_kube_attribute(ver_ctx, mod_gen_ctx, spec_gen_ctx) - }); + let kube_attribute = spec_gen_ctx + .map(|spec_gen_ctx| self.generate_kube_attribute(ver_ctx, mod_gen_ctx, spec_gen_ctx)); quote! { #(#[doc = #version_docs])* @@ -214,17 +213,14 @@ impl Struct { ver_ctx: VersionContext<'_>, mod_gen_ctx: ModuleGenerationContext<'_>, spec_gen_ctx: &SpecGenerationContext<'_>, - ) -> Option { + ) -> TokenStream { // Required arguments let group = &spec_gen_ctx.kubernetes_arguments.group; let version = ver_ctx.version.inner.to_string(); - let kind = spec_gen_ctx - .kubernetes_arguments - .kind - .as_ref() - .map_or(spec_gen_ctx.kubernetes_idents.kind.to_string(), |kind| { - kind.clone() - }); + let kind = spec_gen_ctx.kubernetes_arguments.kind.as_ref().map_or_else( + || spec_gen_ctx.kubernetes_idents.kind.to_string(), + Clone::clone, + ); // Optional arguments let singular = spec_gen_ctx @@ -279,7 +275,7 @@ impl Struct { .map(|s| quote! { , shortname = #s }) .collect(); - Some(quote! { + quote! { // The end-developer needs to derive CustomResource and JsonSchema. // This is because we don't know if they want to use a re-exported or renamed import. #[kube( @@ -288,11 +284,10 @@ impl Struct { // These fields are optional, and therefore the token stream must prefix each with a comma: #singular #plural #namespaced #crates #status #shortnames )] - }) + } } fn generate_entry_enum( - &self, mod_gen_ctx: ModuleGenerationContext<'_>, spec_gen_ctx: &SpecGenerationContext<'_>, ) -> TokenStream { diff --git a/crates/stackable-versioned-macros/src/codegen/item/field.rs b/crates/stackable-versioned-macros/src/codegen/item/field.rs index 9c51b196d..c59ea29e0 100644 --- a/crates/stackable-versioned-macros/src/codegen/item/field.rs +++ b/crates/stackable-versioned-macros/src/codegen/item/field.rs @@ -83,6 +83,7 @@ impl VersionedField { pub fn generate_for_container(&self, version: &VersionDefinition) -> Option { let original_attributes = &self.original_attributes; + #[allow(clippy::single_match_else)] match &self.changes { Some(changes) => { // Check if the provided container version is present in the map of actions. If it @@ -196,6 +197,7 @@ impl VersionedField { next_version: &VersionDefinition, from_struct_ident: &IdentString, ) -> Option { + #[allow(clippy::single_match_else)] match &self.changes { Some(changes) => { let next_change = changes.get_expect(&next_version.inner); @@ -413,7 +415,7 @@ impl VersionedField { None => { if self.nested { let json_path_ident = lhs_field_ident.json_path_ident(); - let func = self.generate_tracking_conversion_function(json_path_ident); + let func = self.generate_tracking_conversion_function(&json_path_ident); quote! { #lhs_field_ident: #rhs_struct_ident.#rhs_field_ident.#func, @@ -430,28 +432,30 @@ impl VersionedField { } /// Generates tracking conversion functions used by field definitions in `From` impl blocks. - fn generate_tracking_conversion_function(&self, json_path_ident: IdentString) -> TokenStream { - match &self.hint { - Some(hint) => match hint { + fn generate_tracking_conversion_function(&self, json_path_ident: &IdentString) -> TokenStream { + if let Some(hint) = &self.hint { + match hint { Hint::Option => { quote! { map(|v| v.tracking_into(status, &#json_path_ident)) } } Hint::Vec => { quote! { into_iter().map(|v| v.tracking_into(status, &#json_path_ident)).collect() } } - }, - None => quote! { tracking_into(status, &#json_path_ident) }, + } + } else { + quote! { tracking_into(status, &#json_path_ident) } } } /// Generates conversion functions used by field definitions in `From` impl blocks. fn generate_conversion_function(&self) -> TokenStream { - match &self.hint { - Some(hint) => match hint { + if let Some(hint) = &self.hint { + match hint { Hint::Option => quote! { map(Into::into) }, Hint::Vec => quote! { into_iter().map(Into::into).collect() }, - }, - None => quote! { into() }, + } + } else { + quote! { into() } } } } @@ -491,9 +495,9 @@ impl From for FieldIdents { let json_path = cleaned.json_path_ident(); Self { - json_path, original, cleaned, + json_path, } } } diff --git a/crates/stackable-versioned-macros/src/codegen/item/mod.rs b/crates/stackable-versioned-macros/src/codegen/item/mod.rs index 0cc055531..46abd7df8 100644 --- a/crates/stackable-versioned-macros/src/codegen/item/mod.rs +++ b/crates/stackable-versioned-macros/src/codegen/item/mod.rs @@ -7,7 +7,7 @@ pub use field::*; mod variant; pub use variant::*; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub enum ItemStatus { Addition { ident: IdentString, @@ -40,11 +40,13 @@ pub enum ItemStatus { impl ItemStatus { pub fn get_ident(&self) -> &IdentString { match &self { - ItemStatus::Addition { ident, .. } => ident, - ItemStatus::Change { to_ident, .. } => to_ident, - ItemStatus::Deprecation { ident, .. } => ident, - ItemStatus::NoChange { ident, .. } => ident, - ItemStatus::NotPresent => unreachable!("ItemStatus::NotPresent does not have an ident"), + Self::Addition { ident, .. } + | Self::Change { + to_ident: ident, .. + } + | Self::Deprecation { ident, .. } + | Self::NoChange { ident, .. } => ident, + Self::NotPresent => unreachable!("ItemStatus::NotPresent does not have an ident"), } } } diff --git a/crates/stackable-versioned-macros/src/codegen/item/variant.rs b/crates/stackable-versioned-macros/src/codegen/item/variant.rs index 2055fed19..b100aceb1 100644 --- a/crates/stackable-versioned-macros/src/codegen/item/variant.rs +++ b/crates/stackable-versioned-macros/src/codegen/item/variant.rs @@ -64,6 +64,7 @@ impl VersionedVariant { let original_attributes = &self.original_attributes; let fields = &self.fields; + #[allow(clippy::single_match_else)] match &self.changes { // NOTE (@Techassi): `unwrap_or_else` used instead of `expect`. // See: https://rust-lang.github.io/rust-clippy/master/index.html#expect_fun_call @@ -146,6 +147,7 @@ impl VersionedVariant { let from_fields = self.generate_from_fields(); let for_fields = self.generate_for_fields(); + #[allow(clippy::single_match_else)] match &self.changes { Some(changes) => { let next_change = changes.get_expect(&next_version.inner); diff --git a/crates/stackable-versioned-macros/src/codegen/module.rs b/crates/stackable-versioned-macros/src/codegen/module.rs index 7aeb8bc35..681321850 100644 --- a/crates/stackable-versioned-macros/src/codegen/module.rs +++ b/crates/stackable-versioned-macros/src/codegen/module.rs @@ -57,7 +57,7 @@ impl Module { errors.handle(Container::new_enum(item_enum, &versions)) { containers.push(container); - }; + } } Item::Struct(item_struct) => { let experimental_conversion_tracking = module_attributes @@ -123,7 +123,7 @@ impl Module { ) .with_span(&disallowed_item), ), - }; + } } errors.finish_with(Self { diff --git a/crates/stackable-versioned-macros/src/test_utils.rs b/crates/stackable-versioned-macros/src/test_utils.rs index 95598eafa..4acffb2d9 100644 --- a/crates/stackable-versioned-macros/src/test_utils.rs +++ b/crates/stackable-versioned-macros/src/test_utils.rs @@ -42,7 +42,7 @@ pub enum Error { pub fn expand_from_file(path: &Path) -> Result { let input = std::fs::read_to_string(path).context(ReadFileSnafu)?; - let (attrs, input) = prepare_from_string(input)?; + let (attrs, input) = prepare_from_str(&input)?; let expanded = versioned_impl(attrs, input).to_string(); let parsed = syn::parse_file(&expanded).context(ParseOutputFileSnafu)?; @@ -50,7 +50,7 @@ pub fn expand_from_file(path: &Path) -> Result { Ok(prettyplease::unparse(&parsed)) } -fn prepare_from_string(input: String) -> Result<(TokenStream, Item), Error> { +fn prepare_from_str(input: &str) -> Result<(TokenStream, Item), Error> { let parts: [&str; 4] = input .split(DELIMITER) .collect::>() diff --git a/crates/stackable-versioned-macros/src/utils/doc_comments.rs b/crates/stackable-versioned-macros/src/utils/doc_comments.rs index bc4cbfe77..6bd8b5c96 100644 --- a/crates/stackable-versioned-macros/src/utils/doc_comments.rs +++ b/crates/stackable-versioned-macros/src/utils/doc_comments.rs @@ -20,6 +20,6 @@ impl DocComments for &str { impl DocComments for Option<&str> { fn into_doc_comments(self) -> Vec { - self.map_or(vec![], |s| s.into_doc_comments()) + self.map_or(vec![], DocComments::into_doc_comments) } } diff --git a/crates/stackable-versioned/src/lib.rs b/crates/stackable-versioned/src/lib.rs index 7b7445a4b..c5dab454b 100644 --- a/crates/stackable-versioned/src/lib.rs +++ b/crates/stackable-versioned/src/lib.rs @@ -166,7 +166,7 @@ impl ConvertObjectError { // two solutions performs better needs to evaluated. // self.iter_chain().join(": ") self.iter_chain() - .map(|err| err.to_string()) + .map(ToString::to_string) .collect::>() .join(": ") } @@ -174,12 +174,11 @@ impl ConvertObjectError { /// Returns a HTTP status code based on the underlying error. pub fn http_status_code(&self) -> u16 { match self { - ConvertObjectError::Parse { .. } => 400, - ConvertObjectError::Serialize { .. } => 500, - + Self::Serialize { .. } => 500, + Self::Parse { .. } // This is likely the clients fault, as it is requesting a unsupported version - ConvertObjectError::ParseDesiredApiVersion { - source: UnknownDesiredApiVersionError { .. }, + | Self::ParseDesiredApiVersion { + source: UnknownDesiredApiVersionError { .. } } => 400, } } diff --git a/crates/stackable-versioned/tests/conversions.rs b/crates/stackable-versioned/tests/conversions.rs index c04a767da..4f3dbfb4f 100644 --- a/crates/stackable-versioned/tests/conversions.rs +++ b/crates/stackable-versioned/tests/conversions.rs @@ -22,7 +22,7 @@ fn pass() { "File {path:?} should be converted successfully" ); assert_eq!(request.request.unwrap().uid, response.uid); - }) + }); } #[test] @@ -46,5 +46,5 @@ fn fail() { if let Some(request) = &request.request { assert_eq!(request.uid, response.uid); } - }) + }); } diff --git a/crates/stackable-webhook/src/lib.rs b/crates/stackable-webhook/src/lib.rs index c74b9c73c..10b52b0aa 100644 --- a/crates/stackable-webhook/src/lib.rs +++ b/crates/stackable-webhook/src/lib.rs @@ -206,7 +206,7 @@ impl WebhookServer { .context(EncodeCertificateAuthorityAsPemSnafu)?; let ca_bundle = ByteString(ca_bundle.as_bytes().to_vec()); - for webhook in webhooks.iter_mut() { + for webhook in &mut webhooks { if webhook.ignore_certificate_rotation() { continue; } diff --git a/crates/stackable-webhook/src/tls/cert_resolver.rs b/crates/stackable-webhook/src/tls/cert_resolver.rs index 632f13051..6601e06be 100644 --- a/crates/stackable-webhook/src/tls/cert_resolver.rs +++ b/crates/stackable-webhook/src/tls/cert_resolver.rs @@ -151,7 +151,7 @@ impl CertificateResolver { .generate_ecdsa_leaf_certificate( "Leaf", "webhook", - subject_alternative_dns_names.iter().map(|san| san.as_str()), + subject_alternative_dns_names.iter().map(String::as_str), WEBHOOK_CERTIFICATE_LIFETIME, ) .context(GenerateLeafCertificateSnafu)?; diff --git a/crates/stackable-webhook/src/tls/mod.rs b/crates/stackable-webhook/src/tls/mod.rs index a905203a1..4b9e4aa24 100644 --- a/crates/stackable-webhook/src/tls/mod.rs +++ b/crates/stackable-webhook/src/tls/mod.rs @@ -214,7 +214,7 @@ impl TlsServer { // Once a shutdown signal is received (this future becomes `Poll::Ready`), break out // of the main loop which cancels the certification rotation interval and stops // accepting new TCP connections. - _ = &mut shutdown_signal => { + () = &mut shutdown_signal => { tracing::trace!("received shutdown signal"); break; } @@ -251,7 +251,7 @@ impl TlsServer { async move { Self::handle_request(tcp_stream, remote_addr, tls_acceptor, tower_service, self.socket_addr) .instrument(span) - .await + .await; } ); } @@ -274,11 +274,11 @@ impl TlsServer { { semconv::attribute::OTEL_STATUS_CODE } = Empty, { semconv::attribute::OTEL_STATUS_DESCRIPTION } = Empty, { semconv::trace::CLIENT_ADDRESS } = remote_addr.ip().to_string(), - { semconv::trace::CLIENT_PORT } = remote_addr.port() as i64, + { semconv::trace::CLIENT_PORT } = i64::from(remote_addr.port()), { semconv::trace::SERVER_ADDRESS } = Empty, { semconv::trace::SERVER_PORT } = Empty, { semconv::trace::NETWORK_PEER_ADDRESS } = remote_addr.ip().to_string(), - { semconv::trace::NETWORK_PEER_PORT } = remote_addr.port() as i64, + { semconv::trace::NETWORK_PEER_PORT } = i64::from(remote_addr.port()), { semconv::trace::NETWORK_LOCAL_ADDRESS } = Empty, { semconv::trace::NETWORK_LOCAL_PORT } = Empty, { semconv::trace::NETWORK_TRANSPORT } = "tcp", @@ -289,9 +289,9 @@ impl TlsServer { let addr = &local_addr.ip().to_string(); let port = local_addr.port(); span.record(semconv::trace::SERVER_ADDRESS, addr) - .record(semconv::trace::SERVER_PORT, port as i64) + .record(semconv::trace::SERVER_PORT, i64::from(port)) .record(semconv::trace::NETWORK_LOCAL_ADDRESS, addr) - .record(semconv::trace::NETWORK_LOCAL_PORT, port as i64); + .record(semconv::trace::NETWORK_LOCAL_PORT, i64::from(port)); } // Wait for tls handshake to happen @@ -335,7 +335,7 @@ impl TlsServer { span.record(semconv::attribute::OTEL_STATUS_CODE, "Error") .record(semconv::attribute::OTEL_STATUS_DESCRIPTION, err.to_string()); tracing::warn!(%err, %remote_addr, "failed to serve connection"); - }) + }); } } @@ -346,8 +346,8 @@ pub trait SocketAddrExt { impl SocketAddrExt for SocketAddr { fn semantic_convention_network_type(&self) -> &'static str { match self { - SocketAddr::V4(_) => "ipv4", - SocketAddr::V6(_) => "ipv6", + Self::V4(_) => "ipv4", + Self::V6(_) => "ipv6", } } } diff --git a/crates/stackable-webhook/src/webhooks/conversion_webhook.rs b/crates/stackable-webhook/src/webhooks/conversion_webhook.rs index 64cd58f2d..62b1a7e11 100644 --- a/crates/stackable-webhook/src/webhooks/conversion_webhook.rs +++ b/crates/stackable-webhook/src/webhooks/conversion_webhook.rs @@ -171,8 +171,8 @@ impl ConversionWebhook { conversion_review_versions: vec!["v1".to_owned()], client_config: Some(WebhookClientConfig { service: Some(ServiceReference { - name: options.webhook_service_name.to_owned(), - namespace: options.webhook_namespace.to_owned(), + name: options.webhook_service_name.clone(), + namespace: options.webhook_namespace.clone(), path: Some(format!("/convert/{crd_name}")), port: Some(options.socket_addr.port().into()), }), diff --git a/crates/stackable-webhook/src/webhooks/mod.rs b/crates/stackable-webhook/src/webhooks/mod.rs index 47125a3f0..889c9d29e 100644 --- a/crates/stackable-webhook/src/webhooks/mod.rs +++ b/crates/stackable-webhook/src/webhooks/mod.rs @@ -64,8 +64,8 @@ fn create_webhook_client_config( ) -> WebhookClientConfig { WebhookClientConfig { service: Some(ServiceReference { - name: options.webhook_service_name.to_owned(), - namespace: options.webhook_namespace.to_owned(), + name: options.webhook_service_name.clone(), + namespace: options.webhook_namespace.clone(), path: Some(http_path.into()), port: Some(options.socket_addr.port().into()), }), diff --git a/crates/stackable-webhook/src/webhooks/mutating_webhook.rs b/crates/stackable-webhook/src/webhooks/mutating_webhook.rs index 9ff9192fd..9f2b18a28 100644 --- a/crates/stackable-webhook/src/webhooks/mutating_webhook.rs +++ b/crates/stackable-webhook/src/webhooks/mutating_webhook.rs @@ -116,6 +116,7 @@ pub struct MutatingWebhook { /// All webhooks need to set the `admissionReviewVersions` to `["v1"]`, as this mutating webhook /// only supports that version! A failure to do so will result in a panic during the /// [`MutatingWebhook`] creation. + #[allow(clippy::struct_field_names)] mutating_webhook_configuration: MutatingWebhookConfiguration, /// The async handler that get's a [`AdmissionRequest`] and returns an [`AdmissionResponse`] diff --git a/crates/xtask/src/crd/mod.rs b/crates/xtask/src/crd/mod.rs index 0b8ca9282..e47a4bd4e 100644 --- a/crates/xtask/src/crd/mod.rs +++ b/crates/xtask/src/crd/mod.rs @@ -54,7 +54,7 @@ macro_rules! write_crd { &merged, &path, "0.0.0-dev", - stackable_operator::shared::yaml::SerializeOptions::default(), + &stackable_operator::shared::yaml::SerializeOptions::default(), ) .with_context(|_| WriteCrdSnafu { path: path.clone() })?; } From 7bc556422c92f98d053a02e39a88ee5f2de14ffc Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 1 Apr 2026 09:45:49 +0200 Subject: [PATCH 09/12] changelog --- crates/stackable-operator/CHANGELOG.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/stackable-operator/CHANGELOG.md b/crates/stackable-operator/CHANGELOG.md index 0ff145a27..733a34848 100644 --- a/crates/stackable-operator/CHANGELOG.md +++ b/crates/stackable-operator/CHANGELOG.md @@ -12,7 +12,13 @@ All notable changes to this project will be documented in this file. ### Changed -- BREAKING: `OpaConfig::full_document_url` now takes `&OpaApiVersion` instead of `OpaApiVersion` ([#XXXX]). +- BREAKING: `OpaConfig::full_document_url` now takes `&OpaApiVersion` instead of `OpaApiVersion` ([#1186]). +- BREAKING: `EndOfSupportChecker::new` now takes `&EndOfSupportOptions` instead of `EndOfSupportOptions` ([#1186]). +- BREAKING: `Labels::recommended` now takes `&ObjectLabels` instead of `ObjectLabels` ([#1186]). +- BREAKING: `transform_all_roles_to_config` now takes `&HashMap<..., S>` by reference and requires generic `S: BuildHasher` ([#1186]). +- BREAKING: `env_vars_from_rolegroup_config` now requires generic `S: BuildHasher` for the HashMap parameter ([#1186]). +- BREAKING: `FromFragment` impl for `HashMap` now requires `S: BuildHasher + Default` ([#1186]). +- BREAKING: `Merge` impl for `HashMap` now requires `S: BuildHasher` ([#1186]). ### Removed @@ -21,6 +27,7 @@ All notable changes to this project will be documented in this file. [#1178]: https://github.com/stackabletech/operator-rs/pull/1178 [#1182]: https://github.com/stackabletech/operator-rs/pull/1182 +[#1186]: https://github.com/stackabletech/operator-rs/pull/1186 ## [0.108.0] - 2026-03-10 From 0c5e028d9ecc646f290738dffcbdbc92c239c94b Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 1 Apr 2026 09:55:31 +0200 Subject: [PATCH 10/12] Move restriction lints --- Cargo.toml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 12c4345fe..713216229 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,10 +90,6 @@ x509-cert = { version = "0.2.5", features = ["builder"] } zeroize = "1.8.1" [workspace.lints.clippy] -unwrap_in_result = "deny" -unwrap_used = "deny" -panic = "deny" - # Enable all pedantic lints (with lower priority so individual lints can override) pedantic = { level = "deny", priority = -1 } @@ -117,6 +113,11 @@ or_fun_call = "deny" derive_partial_eq_without_eq = "deny" unnecessary_struct_initialization = "deny" +# Additional restriction lints we enforce +unwrap_in_result = "deny" +unwrap_used = "deny" +panic = "deny" + # Use O3 in tests to improve the RSA key generation speed in the stackable-certs crate [profile.test.package] stackable-certs.opt-level = 3 From 5520d88d62ddaba8a192f6dfa7b59bf119a051e4 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 1 Apr 2026 10:14:57 +0200 Subject: [PATCH 11/12] Silence explicit_deref_methods at calling location --- Cargo.toml | 1 - crates/stackable-operator/src/kvp/key.rs | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 713216229..ab6a20153 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -99,7 +99,6 @@ missing_errors_doc = "allow" must_use_candidate = "allow" return_self_not_must_use = "allow" missing_panics_doc = "allow" -explicit_deref_methods = "allow" cast_possible_truncation = "allow" float_cmp = "allow" cast_sign_loss = "allow" diff --git a/crates/stackable-operator/src/kvp/key.rs b/crates/stackable-operator/src/kvp/key.rs index 0c9b538dd..321cbfde6 100644 --- a/crates/stackable-operator/src/kvp/key.rs +++ b/crates/stackable-operator/src/kvp/key.rs @@ -248,6 +248,8 @@ impl PartialEq for KeyPrefix where T: AsRef, { + // Not sure how to write that differently without getting a warning about recursion + #[expect(clippy::explicit_deref_methods)] fn eq(&self, other: &T) -> bool { self.deref() == other.as_ref() } @@ -332,6 +334,8 @@ impl PartialEq for KeyName where T: AsRef, { + // Not sure how to write that differently without getting a warning about recursion + #[expect(clippy::explicit_deref_methods)] fn eq(&self, other: &T) -> bool { self.deref() == other.as_ref() } From 34b527b2cabeb35f0507b10decdd40fe60b75405 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 1 Apr 2026 10:50:51 +0200 Subject: [PATCH 12/12] Document why we can't deny needless_continue --- Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index ab6a20153..cc412db64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,8 +103,9 @@ cast_possible_truncation = "allow" float_cmp = "allow" cast_sign_loss = "allow" cast_precision_loss = "allow" -needless_continue = "allow" unchecked_time_subtraction = "allow" +# We should be able to deny this, but it lint's on code generated by darling, raised https://github.com/TedDriggs/darling/pull/429 +needless_continue = "allow" # Additional nursery lints we enforce use_self = "deny"