Skip to content

Commit f9bea09

Browse files
committed
chore: update references in the crates
The versioned module is imported rather than the individual structs and enums (when there is no conflict, eg: if also importing a versioned shared struct) so that that usages show the version explicitly. There might be times where this isn't possible, for example, once structs and enums are versioned in stackable-operator, there could be multiple modules with the same name. In this case, user-info-fetcher is also versioned with v1alpha1, so it is referred to as user_info_fetcher::v1alpha1 in crd/mod.rs so as to not conflict with the crds v1alpha1.
1 parent 6d862a3 commit f9bea09

File tree

8 files changed

+104
-83
lines changed

8 files changed

+104
-83
lines changed

rust/operator-binary/src/controller.rs

Lines changed: 57 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,7 @@ use serde::{Deserialize, Serialize};
1010
use serde_json::json;
1111
use snafu::{OptionExt, ResultExt, Snafu};
1212
use stackable_opa_operator::crd::{
13-
user_info_fetcher, Container, OpaCluster, OpaClusterStatus, OpaConfig, OpaRole, APP_NAME,
14-
DEFAULT_SERVER_GRACEFUL_SHUTDOWN_TIMEOUT, OPERATOR_NAME,
13+
user_info_fetcher, v1alpha1, APP_NAME, DEFAULT_SERVER_GRACEFUL_SHUTDOWN_TIMEOUT, OPERATOR_NAME,
1514
};
1615
use stackable_operator::{
1716
builder::{
@@ -175,31 +174,31 @@ pub enum Error {
175174
#[snafu(display("failed to apply Service for [{rolegroup}]"))]
176175
ApplyRoleGroupService {
177176
source: stackable_operator::cluster_resources::Error,
178-
rolegroup: RoleGroupRef<OpaCluster>,
177+
rolegroup: RoleGroupRef<v1alpha1::OpaCluster>,
179178
},
180179

181180
#[snafu(display("failed to build ConfigMap for [{rolegroup}]"))]
182181
BuildRoleGroupConfig {
183182
source: stackable_operator::builder::configmap::Error,
184-
rolegroup: RoleGroupRef<OpaCluster>,
183+
rolegroup: RoleGroupRef<v1alpha1::OpaCluster>,
185184
},
186185

187186
#[snafu(display("failed to apply ConfigMap for [{rolegroup}]"))]
188187
ApplyRoleGroupConfig {
189188
source: stackable_operator::cluster_resources::Error,
190-
rolegroup: RoleGroupRef<OpaCluster>,
189+
rolegroup: RoleGroupRef<v1alpha1::OpaCluster>,
191190
},
192191

193192
#[snafu(display("failed to apply DaemonSet for [{rolegroup}]"))]
194193
ApplyRoleGroupDaemonSet {
195194
source: stackable_operator::cluster_resources::Error,
196-
rolegroup: RoleGroupRef<OpaCluster>,
195+
rolegroup: RoleGroupRef<v1alpha1::OpaCluster>,
197196
},
198197

199198
#[snafu(display("failed to apply patch for DaemonSet for [{rolegroup}]"))]
200199
ApplyPatchRoleGroupDaemonSet {
201200
source: stackable_operator::client::Error,
202-
rolegroup: RoleGroupRef<OpaCluster>,
201+
rolegroup: RoleGroupRef<v1alpha1::OpaCluster>,
203202
},
204203

205204
#[snafu(display("failed to patch service account"))]
@@ -388,7 +387,7 @@ pub struct OpaClusterConfigDecisionLog {
388387
}
389388

390389
pub async fn reconcile_opa(
391-
opa: Arc<DeserializeGuard<OpaCluster>>,
390+
opa: Arc<DeserializeGuard<v1alpha1::OpaCluster>>,
392391
ctx: Arc<Ctx>,
393392
) -> Result<Action> {
394393
tracing::info!("Starting reconcile");
@@ -404,7 +403,7 @@ pub async fn reconcile_opa(
404403
.spec
405404
.image
406405
.resolve(DOCKER_IMAGE_BASE_NAME, crate::built_info::PKG_VERSION);
407-
let opa_role = OpaRole::Server;
406+
let opa_role = v1alpha1::OpaRole::Server;
408407

409408
let mut cluster_resources = ClusterResources::new(
410409
APP_NAME,
@@ -563,7 +562,7 @@ pub async fn reconcile_opa(
563562
let cluster_operation_cond_builder =
564563
ClusterOperationsConditionBuilder::new(&opa.spec.cluster_operation);
565564

566-
let status = OpaClusterStatus {
565+
let status = v1alpha1::OpaClusterStatus {
567566
conditions: compute_conditions(opa, &[&ds_cond_builder, &cluster_operation_cond_builder]),
568567
};
569568

@@ -583,10 +582,10 @@ pub async fn reconcile_opa(
583582
/// The server-role service is the primary endpoint that should be used by clients that do not perform internal load balancing,
584583
/// including targets outside of the cluster.
585584
pub fn build_server_role_service(
586-
opa: &OpaCluster,
585+
opa: &v1alpha1::OpaCluster,
587586
resolved_product_image: &ResolvedProductImage,
588587
) -> Result<Service> {
589-
let role_name = OpaRole::Server.to_string();
588+
let role_name = v1alpha1::OpaRole::Server.to_string();
590589
let role_svc_name = opa
591590
.server_role_service_name()
592591
.context(RoleServiceNameNotFoundSnafu)?;
@@ -632,9 +631,9 @@ pub fn build_server_role_service(
632631
///
633632
/// This is mostly useful for internal communication between peers, or for clients that perform client-side load balancing.
634633
fn build_rolegroup_service(
635-
opa: &OpaCluster,
634+
opa: &v1alpha1::OpaCluster,
636635
resolved_product_image: &ResolvedProductImage,
637-
rolegroup: &RoleGroupRef<OpaCluster>,
636+
rolegroup: &RoleGroupRef<v1alpha1::OpaCluster>,
638637
) -> Result<Service> {
639638
let prometheus_label =
640639
Label::try_from(("prometheus.io/scrape", "true")).context(BuildLabelSnafu)?;
@@ -677,10 +676,10 @@ fn build_rolegroup_service(
677676

678677
/// The rolegroup [`ConfigMap`] configures the rolegroup based on the configuration given by the administrator
679678
fn build_server_rolegroup_config_map(
680-
opa: &OpaCluster,
679+
opa: &v1alpha1::OpaCluster,
681680
resolved_product_image: &ResolvedProductImage,
682-
rolegroup: &RoleGroupRef<OpaCluster>,
683-
merged_config: &OpaConfig,
681+
rolegroup: &RoleGroupRef<v1alpha1::OpaCluster>,
682+
merged_config: &v1alpha1::OpaConfig,
684683
vector_aggregator_address: Option<&str>,
685684
) -> Result<ConfigMap> {
686685
let mut cm_builder = ConfigMapBuilder::new();
@@ -736,12 +735,12 @@ fn build_server_rolegroup_config_map(
736735
/// policy queries (which are often chained in serial, and block other tasks in the products).
737736
#[allow(clippy::too_many_arguments)]
738737
fn build_server_rolegroup_daemonset(
739-
opa: &OpaCluster,
738+
opa: &v1alpha1::OpaCluster,
740739
resolved_product_image: &ResolvedProductImage,
741-
opa_role: &OpaRole,
742-
rolegroup_ref: &RoleGroupRef<OpaCluster>,
740+
opa_role: &v1alpha1::OpaRole,
741+
rolegroup_ref: &RoleGroupRef<v1alpha1::OpaCluster>,
743742
server_config: &HashMap<PropertyNameKind, BTreeMap<String, String>>,
744-
merged_config: &OpaConfig,
743+
merged_config: &v1alpha1::OpaConfig,
745744
opa_bundle_builder_image: &str,
746745
user_info_fetcher_image: &str,
747746
service_account: &ServiceAccount,
@@ -764,15 +763,15 @@ fn build_server_rolegroup_daemonset(
764763

765764
let mut pb = PodBuilder::new();
766765

767-
let prepare_container_name = Container::Prepare.to_string();
766+
let prepare_container_name = v1alpha1::Container::Prepare.to_string();
768767
let mut cb_prepare =
769768
ContainerBuilder::new(&prepare_container_name).context(IllegalContainerNameSnafu)?;
770769

771-
let bundle_builder_container_name = Container::BundleBuilder.to_string();
770+
let bundle_builder_container_name = v1alpha1::Container::BundleBuilder.to_string();
772771
let mut cb_bundle_builder =
773772
ContainerBuilder::new(&bundle_builder_container_name).context(IllegalContainerNameSnafu)?;
774773

775-
let opa_container_name = Container::Opa.to_string();
774+
let opa_container_name = v1alpha1::Container::Opa.to_string();
776775
let mut cb_opa =
777776
ContainerBuilder::new(&opa_container_name).context(IllegalContainerNameSnafu)?;
778777

@@ -967,9 +966,9 @@ fn build_server_rolegroup_daemonset(
967966
);
968967

969968
match &user_info.backend {
970-
user_info_fetcher::Backend::None {} => {}
971-
user_info_fetcher::Backend::ExperimentalXfscAas(_) => {}
972-
user_info_fetcher::Backend::ActiveDirectory(ad) => {
969+
user_info_fetcher::v1alpha1::Backend::None {} => {}
970+
user_info_fetcher::v1alpha1::Backend::ExperimentalXfscAas(_) => {}
971+
user_info_fetcher::v1alpha1::Backend::ActiveDirectory(ad) => {
973972
pb.add_volume(
974973
SecretClassVolume::new(
975974
ad.kerberos_secret_class_name.clone(),
@@ -1003,7 +1002,7 @@ fn build_server_rolegroup_daemonset(
10031002
.add_volumes_and_mounts(&mut pb, vec![&mut cb_user_info_fetcher])
10041003
.context(UserInfoFetcherTlsVolumeAndMountsSnafu)?;
10051004
}
1006-
user_info_fetcher::Backend::Keycloak(keycloak) => {
1005+
user_info_fetcher::v1alpha1::Backend::Keycloak(keycloak) => {
10071006
pb.add_volume(
10081007
VolumeBuilder::new(USER_INFO_FETCHER_CREDENTIALS_VOLUME_NAME)
10091008
.secret(SecretVolumeSource {
@@ -1035,7 +1034,10 @@ fn build_server_rolegroup_daemonset(
10351034
resolved_product_image,
10361035
CONFIG_VOLUME_NAME,
10371036
LOG_VOLUME_NAME,
1038-
merged_config.logging.containers.get(&Container::Vector),
1037+
merged_config
1038+
.logging
1039+
.containers
1040+
.get(&v1alpha1::Container::Vector),
10391041
ResourceRequirementsBuilder::new()
10401042
.with_cpu_request("250m")
10411043
.with_cpu_limit("500m")
@@ -1092,7 +1094,7 @@ fn build_server_rolegroup_daemonset(
10921094
}
10931095

10941096
pub fn error_policy(
1095-
_obj: Arc<DeserializeGuard<OpaCluster>>,
1097+
_obj: Arc<DeserializeGuard<v1alpha1::OpaCluster>>,
10961098
error: &Error,
10971099
_ctx: Arc<Ctx>,
10981100
) -> Action {
@@ -1104,12 +1106,15 @@ pub fn error_policy(
11041106
}
11051107
}
11061108

1107-
fn build_config_file(merged_config: &OpaConfig) -> String {
1109+
fn build_config_file(merged_config: &v1alpha1::OpaConfig) -> String {
11081110
let mut decision_logging_enabled = DEFAULT_DECISION_LOGGING_ENABLED;
11091111

11101112
if let Some(ContainerLogConfig {
11111113
choice: Some(ContainerLogConfigChoice::Automatic(log_config)),
1112-
}) = merged_config.logging.containers.get(&Container::Opa)
1114+
}) = merged_config
1115+
.logging
1116+
.containers
1117+
.get(&v1alpha1::Container::Opa)
11131118
{
11141119
if let Some(config) = log_config.loggers.get("decision") {
11151120
decision_logging_enabled = config.level != LogLevel::NONE;
@@ -1129,15 +1134,18 @@ fn build_config_file(merged_config: &OpaConfig) -> String {
11291134
serde_json::to_string_pretty(&json!(config)).unwrap()
11301135
}
11311136

1132-
fn build_opa_start_command(merged_config: &OpaConfig, container_name: &str) -> String {
1137+
fn build_opa_start_command(merged_config: &v1alpha1::OpaConfig, container_name: &str) -> String {
11331138
let mut file_log_level = DEFAULT_FILE_LOG_LEVEL;
11341139
let mut console_log_level = DEFAULT_CONSOLE_LOG_LEVEL;
11351140
let mut server_log_level = DEFAULT_SERVER_LOG_LEVEL;
11361141
let mut decision_log_level = DEFAULT_DECISION_LOG_LEVEL;
11371142

11381143
if let Some(ContainerLogConfig {
11391144
choice: Some(ContainerLogConfigChoice::Automatic(log_config)),
1140-
}) = merged_config.logging.containers.get(&Container::Opa)
1145+
}) = merged_config
1146+
.logging
1147+
.containers
1148+
.get(&v1alpha1::Container::Opa)
11411149
{
11421150
if let Some(AppenderConfig {
11431151
level: Some(log_level),
@@ -1198,7 +1206,10 @@ fn build_opa_start_command(merged_config: &OpaConfig, container_name: &str) -> S
11981206
}
11991207
}
12001208

1201-
fn build_bundle_builder_start_command(merged_config: &OpaConfig, container_name: &str) -> String {
1209+
fn build_bundle_builder_start_command(
1210+
merged_config: &v1alpha1::OpaConfig,
1211+
container_name: &str,
1212+
) -> String {
12021213
let mut console_logging_off = false;
12031214

12041215
// We need to check if the console logging is deactivated (NONE)
@@ -1208,7 +1219,7 @@ fn build_bundle_builder_start_command(merged_config: &OpaConfig, container_name:
12081219
}) = merged_config
12091220
.logging
12101221
.containers
1211-
.get(&Container::BundleBuilder)
1222+
.get(&v1alpha1::Container::BundleBuilder)
12121223
{
12131224
if let Some(AppenderConfig {
12141225
level: Some(log_level),
@@ -1233,13 +1244,13 @@ fn build_bundle_builder_start_command(merged_config: &OpaConfig, container_name:
12331244
}
12341245
}
12351246

1236-
fn bundle_builder_log_level(merged_config: &OpaConfig) -> BundleBuilderLogLevel {
1247+
fn bundle_builder_log_level(merged_config: &v1alpha1::OpaConfig) -> BundleBuilderLogLevel {
12371248
if let Some(ContainerLogConfig {
12381249
choice: Some(ContainerLogConfigChoice::Automatic(log_config)),
12391250
}) = merged_config
12401251
.logging
12411252
.containers
1242-
.get(&Container::BundleBuilder)
1253+
.get(&v1alpha1::Container::BundleBuilder)
12431254
{
12441255
if let Some(logger) = log_config
12451256
.loggers
@@ -1252,11 +1263,17 @@ fn bundle_builder_log_level(merged_config: &OpaConfig) -> BundleBuilderLogLevel
12521263
BundleBuilderLogLevel::Info
12531264
}
12541265

1255-
fn build_prepare_start_command(merged_config: &OpaConfig, container_name: &str) -> Vec<String> {
1266+
fn build_prepare_start_command(
1267+
merged_config: &v1alpha1::OpaConfig,
1268+
container_name: &str,
1269+
) -> Vec<String> {
12561270
let mut prepare_container_args = vec![];
12571271
if let Some(ContainerLogConfig {
12581272
choice: Some(ContainerLogConfigChoice::Automatic(log_config)),
1259-
}) = merged_config.logging.containers.get(&Container::Prepare)
1273+
}) = merged_config
1274+
.logging
1275+
.containers
1276+
.get(&v1alpha1::Container::Prepare)
12601277
{
12611278
prepare_container_args.push(product_logging::framework::capture_shell_output(
12621279
STACKABLE_LOG_DIR,

rust/operator-binary/src/discovery.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
use crate::controller::{build_recommended_labels, APP_PORT};
22

33
use snafu::{OptionExt, ResultExt, Snafu};
4-
use stackable_opa_operator::crd::{OpaCluster, OpaRole};
4+
use stackable_opa_operator::crd::v1alpha1;
55
use stackable_operator::{
66
builder::{configmap::ConfigMapBuilder, meta::ObjectMetaBuilder},
77
commons::product_image_selection::ResolvedProductImage,
@@ -15,7 +15,7 @@ pub enum Error {
1515
#[snafu(display("object {} is missing metadata to build owner reference", opa))]
1616
ObjectMissingMetadataForOwnerRef {
1717
source: stackable_operator::builder::meta::Error,
18-
opa: ObjectRef<OpaCluster>,
18+
opa: ObjectRef<v1alpha1::OpaCluster>,
1919
},
2020

2121
#[snafu(display("object has no name associated"))]
@@ -38,7 +38,7 @@ pub enum Error {
3838
/// Builds discovery [`ConfigMap`]s for connecting to a [`OpaCluster`] for all expected scenarios
3939
pub fn build_discovery_configmaps(
4040
owner: &impl Resource<DynamicType = ()>,
41-
opa: &OpaCluster,
41+
opa: &v1alpha1::OpaCluster,
4242
resolved_product_image: &ResolvedProductImage,
4343
svc: &Service,
4444
cluster_info: &KubernetesClusterInfo,
@@ -58,7 +58,7 @@ pub fn build_discovery_configmaps(
5858
fn build_discovery_configmap(
5959
name: &str,
6060
owner: &impl Resource<DynamicType = ()>,
61-
opa: &OpaCluster,
61+
opa: &v1alpha1::OpaCluster,
6262
resolved_product_image: &ResolvedProductImage,
6363
svc: &Service,
6464
cluster_info: &KubernetesClusterInfo,
@@ -85,7 +85,7 @@ fn build_discovery_configmap(
8585
.with_recommended_labels(build_recommended_labels(
8686
opa,
8787
&resolved_product_image.app_version_label,
88-
&OpaRole::Server.to_string(),
88+
&v1alpha1::OpaRole::Server.to_string(),
8989
"discovery",
9090
))
9191
.context(ObjectMetaSnafu)?

rust/operator-binary/src/main.rs

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ use std::sync::Arc;
33
use clap::{crate_description, crate_version, Parser};
44
use futures::StreamExt;
55
use product_config::ProductConfigManager;
6-
use stackable_opa_operator::crd::{OpaCluster, APP_NAME, OPERATOR_NAME};
6+
use stackable_opa_operator::crd::{v1alpha1, OpaCluster, APP_NAME, OPERATOR_NAME};
77
use stackable_operator::{
88
cli::{Command, ProductOperatorRun},
99
client::{self, Client},
@@ -18,7 +18,8 @@ use stackable_operator::{
1818
},
1919
logging::controller::report_controller_reconciled,
2020
namespace::WatchNamespace,
21-
CustomResourceExt,
21+
shared::yaml::SerializeOptions,
22+
YamlSchema,
2223
};
2324

2425
use crate::controller::OPA_CONTROLLER_NAME;
@@ -54,7 +55,8 @@ async fn main() -> anyhow::Result<()> {
5455
let opts = Opts::parse();
5556
match opts.cmd {
5657
Command::Crd => {
57-
OpaCluster::print_yaml_schema(built_info::PKG_VERSION)?;
58+
OpaCluster::merged_crd(OpaCluster::V1Alpha1)?
59+
.print_yaml_schema(built_info::PKG_VERSION, SerializeOptions::default())?;
5860
}
5961
Command::Run(OpaRun {
6062
operator_image,
@@ -112,7 +114,7 @@ async fn create_controller(
112114
opa_bundle_builder_image: String,
113115
user_info_fetcher_image: String,
114116
) {
115-
let opa_api: Api<DeserializeGuard<OpaCluster>> = watch_namespace.get_api(&client);
117+
let opa_api: Api<DeserializeGuard<v1alpha1::OpaCluster>> = watch_namespace.get_api(&client);
116118
let daemonsets_api: Api<DeserializeGuard<DaemonSet>> = watch_namespace.get_api(&client);
117119
let configmaps_api: Api<DeserializeGuard<ConfigMap>> = watch_namespace.get_api(&client);
118120
let services_api: Api<DeserializeGuard<Service>> = watch_namespace.get_api(&client);

rust/operator-binary/src/operations/graceful_shutdown.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
use snafu::{ResultExt, Snafu};
2-
use stackable_opa_operator::crd::{OpaConfig, SERVER_GRACEFUL_SHUTDOWN_SAFETY_OVERHEAD};
2+
use stackable_opa_operator::crd::{v1alpha1, SERVER_GRACEFUL_SHUTDOWN_SAFETY_OVERHEAD};
33
use stackable_operator::builder::pod::PodBuilder;
44

55
#[derive(Debug, Snafu)]
@@ -11,7 +11,7 @@ pub enum Error {
1111
}
1212

1313
pub fn add_graceful_shutdown_config(
14-
merged_config: &OpaConfig,
14+
merged_config: &v1alpha1::OpaConfig,
1515
pod_builder: &mut PodBuilder,
1616
) -> Result<(), Error> {
1717
// This must be always set by the merge mechanism, as we provide a default value,

0 commit comments

Comments
 (0)