From bf157a372ec9ed448ba6810852e0e132cea277b9 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Fri, 8 May 2026 18:49:08 +0200 Subject: [PATCH 1/5] feat: add framework macros and validated type definitions Introduce the framework module with validated type wrappers that enforce constraints at parse time rather than at each use site. Includes attributed_string_type macro for generating validated newtypes, and typed wrappers for Kubernetes (NamespaceName, Uid) and operator (ProductName, OperatorName, etc.) concepts. Co-Authored-By: Claude Opus 4.6 --- Cargo.lock | 208 +++- Cargo.toml | 2 + rust/operator-binary/Cargo.toml | 2 + rust/operator-binary/src/framework.rs | 46 + rust/operator-binary/src/framework/macros.rs | 2 + .../macros/attributed_string_type.rs | 927 ++++++++++++++++++ .../src/framework/macros/constant.rs | 17 + rust/operator-binary/src/framework/types.rs | 3 + .../src/framework/types/common.rs | 68 ++ .../src/framework/types/kubernetes.rs | 191 ++++ .../src/framework/types/operator.rs | 91 ++ rust/operator-binary/src/main.rs | 1 + 12 files changed, 1555 insertions(+), 3 deletions(-) create mode 100644 rust/operator-binary/src/framework.rs create mode 100644 rust/operator-binary/src/framework/macros.rs create mode 100644 rust/operator-binary/src/framework/macros/attributed_string_type.rs create mode 100644 rust/operator-binary/src/framework/macros/constant.rs create mode 100644 rust/operator-binary/src/framework/types.rs create mode 100644 rust/operator-binary/src/framework/types/common.rs create mode 100644 rust/operator-binary/src/framework/types/kubernetes.rs create mode 100644 rust/operator-binary/src/framework/types/operator.rs diff --git a/Cargo.lock b/Cargo.lock index d6e8310c..9f534a8a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -830,6 +830,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "foldhash" version = "0.2.0" @@ -977,10 +983,23 @@ checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "libc", - "r-efi", + "r-efi 5.3.0", "wasip2", ] +[[package]] +name = "getrandom" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi 6.0.0", + "wasip2", + "wasip3", +] + [[package]] name = "git2" version = "0.20.4" @@ -1042,6 +1061,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash 0.1.5", +] + [[package]] name = "hashbrown" version = "0.16.1" @@ -1050,7 +1078,7 @@ checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.2.0", ] [[package]] @@ -1317,6 +1345,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "ident_case" version = "1.0.1" @@ -1352,6 +1386,8 @@ checksum = "d466e9454f08e4a911e14806c24e16fba1b4c121d1ea474396f396069cf949d9" dependencies = [ "equivalent", "hashbrown 0.17.0", + "serde", + "serde_core", ] [[package]] @@ -1664,6 +1700,12 @@ dependencies = [ "spin", ] +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "libc" version = "0.2.185" @@ -2152,6 +2194,16 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.117", +] + [[package]] name = "primeorder" version = "0.13.6" @@ -2233,6 +2285,12 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + [[package]] name = "rand" version = "0.8.6" @@ -2906,6 +2964,7 @@ dependencies = [ "futures 0.3.32", "indoc", "product-config", + "regex", "rstest", "serde", "serde_json", @@ -2915,6 +2974,7 @@ dependencies = [ "strum", "tokio", "tracing", + "uuid", ] [[package]] @@ -3650,6 +3710,17 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "uuid" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd74a9687298c6858e9b88ec8935ec45d22e8fd5e6394fa1bd4e99a87789c76" +dependencies = [ + "getrandom 0.4.2", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.1" @@ -3689,7 +3760,16 @@ version = "1.0.3+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20064672db26d7cdc89c7798c48a0fdfac8213434a1186e5ef29fd560ae223d6" dependencies = [ - "wit-bindgen", + "wit-bindgen 0.57.1", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen 0.51.0", ] [[package]] @@ -3747,6 +3827,40 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + [[package]] name = "web-sys" version = "0.3.95" @@ -3917,12 +4031,100 @@ dependencies = [ "memchr", ] +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + [[package]] name = "wit-bindgen" version = "0.57.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ebf944e87a7c253233ad6766e082e3cd714b5d03812acc24c318f549614536e" +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + [[package]] name = "writeable" version = "0.6.3" diff --git a/Cargo.toml b/Cargo.toml index d04c1f1e..119abf53 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,7 @@ const_format = "0.2" fnv = "1.0" futures = { version = "0.3", features = ["compat"] } indoc = "2.0" +regex = "1" rstest = "0.26" semver = "1.0" serde = { version = "1.0", features = ["derive"] } @@ -32,6 +33,7 @@ snafu = "0.9" strum = { version = "0.28", features = ["derive"] } tokio = { version = "1.40", features = ["full"] } tracing = "0.1" +uuid = { version = "1.16", features = ["v4"] } [patch."https://github.com/stackabletech/operator-rs.git"] # stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "main" } diff --git a/rust/operator-binary/Cargo.toml b/rust/operator-binary/Cargo.toml index 2734bb4e..095324c8 100644 --- a/rust/operator-binary/Cargo.toml +++ b/rust/operator-binary/Cargo.toml @@ -21,10 +21,12 @@ indoc.workspace = true serde.workspace = true serde_json.workspace = true serde_yaml.workspace = true +regex.workspace = true snafu.workspace = true strum.workspace = true tokio.workspace = true tracing.workspace = true +uuid.workspace = true [build-dependencies] built.workspace = true diff --git a/rust/operator-binary/src/framework.rs b/rust/operator-binary/src/framework.rs new file mode 100644 index 00000000..56600b95 --- /dev/null +++ b/rust/operator-binary/src/framework.rs @@ -0,0 +1,46 @@ +//! Additions to stackable-operator +//! +//! Functions in stackable-operator usually accept generic types like strings and validate the +//! parameters as late as possible. Therefore, nearly all functions have to return a [`Result`] and +//! errors are returned along the call chain. That makes error handling complex because every +//! module re-packages the received error. Also, the validation is repeated if the value is used in +//! different function calls. Sometimes, validation is not necessary if constant values are used, +//! e.g. the name of the operator. +//! +//! This operator uses a different approach. The incoming values are validated as early as possible +//! and wrapped in a fail-safe type. This type is then used along the call chain, validation is not +//! necessary anymore and functions without side effects do not need to return a [`Result`]. +//! +//! However, this operator uses stackable-operator and at the interface, the fail-safe types must +//! be unwrapped and the [`Result`] returned by the stackable-operator function must be handled. +//! This is done by calling [`Result::expect`] which requires thorough testing. +//! +//! When the development of this module has progressed and changes become less frequent, then this +//! module can be incorporated into stackable-operator. The module structure should already +//! resemble the one of stackable-operator. + +use types::kubernetes::Uid; + +pub mod macros; +pub mod types; + +/// Has a non-empty name +/// +/// Useful as an object reference; Should not be used to create an object because the name could +/// violate the naming constraints (e.g. maximum length) of the object. +pub trait HasName { + #[allow(dead_code)] + fn to_name(&self) -> String; +} + +/// Has a Kubernetes UID +#[allow(dead_code)] +pub trait HasUid { + fn to_uid(&self) -> Uid; +} + +/// The name is a valid label value +#[allow(dead_code)] +pub trait NameIsValidLabelValue { + fn to_label_value(&self) -> String; +} diff --git a/rust/operator-binary/src/framework/macros.rs b/rust/operator-binary/src/framework/macros.rs new file mode 100644 index 00000000..c25def95 --- /dev/null +++ b/rust/operator-binary/src/framework/macros.rs @@ -0,0 +1,2 @@ +pub mod attributed_string_type; +pub mod constant; diff --git a/rust/operator-binary/src/framework/macros/attributed_string_type.rs b/rust/operator-binary/src/framework/macros/attributed_string_type.rs new file mode 100644 index 00000000..aee756a3 --- /dev/null +++ b/rust/operator-binary/src/framework/macros/attributed_string_type.rs @@ -0,0 +1,927 @@ +use snafu::Snafu; +use strum::{EnumDiscriminants, IntoStaticStr}; + +/// Maximum length of label values +/// +/// Duplicates the private constant [`stackable_operator::kvp::LABEL_VALUE_MAX_LEN`] +pub const MAX_LABEL_VALUE_LENGTH: usize = 63; + +#[derive(Debug, EnumDiscriminants, Snafu)] +#[snafu(visibility(pub))] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("minimum length not met"))] + MinimumLengthNotMet { length: usize, min_length: usize }, + + #[snafu(display("maximum length exceeded"))] + LengthExceeded { length: usize, max_length: usize }, + + #[snafu(display("invalid regular expression"))] + InvalidRegex { source: regex::Error }, + + #[snafu(display("regular expression not matched"))] + RegexNotMatched { value: String, regex: &'static str }, + + #[snafu(display("not a valid label value"))] + InvalidLabelValue { + source: stackable_operator::kvp::LabelValueError, + }, + + #[snafu(display("not a valid label name as defined in RFC 1035"))] + InvalidRfc1035LabelName { + source: stackable_operator::validation::Errors, + }, + + #[snafu(display("not a valid DNS subdomain name as defined in RFC 1123"))] + InvalidRfc1123DnsSubdomainName { + source: stackable_operator::validation::Errors, + }, + + #[snafu(display("not a valid label name as defined in RFC 1123"))] + InvalidRfc1123LabelName { + source: stackable_operator::validation::Errors, + }, + + #[snafu(display("not a valid UUID"))] + InvalidUid { source: uuid::Error }, +} + +/// Helper data type to determine combined regular expressions +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Regex { + /// There is a regular expression but it is unknown (because it was too complicated to + /// calculate it). + Unknown, + + /// `MatchAll` equals `Expression(".*")`, but `MatchAll` can be pattern matched in a const + /// context, whereas `Expression(...)` cannot. + MatchAll, + + /// A regular expression + Expression(&'static str), +} + +impl Regex { + /// Combine this regular expression with the given one. + pub const fn combine(self, other: Regex) -> Regex { + match (self, other) { + (_, Regex::MatchAll) => self, + (Regex::MatchAll, _) => other, + // It is hard to combine two regular expressions and nearly impossible to do this in a + // const context. Fortunately, for most of the data types, only one regular expression + // is set. + _ => Regex::Unknown, + } + } +} + +/// Restricted string type with attributes like maximum length. +/// +/// Fully-qualified types are used to ease the import into other modules. +/// +/// # Examples +/// +/// ```rust +/// attributed_string_type! { +/// ConfigMapName, +/// "The name of a ConfigMap", +/// "airflow-webserver-default", +/// is_rfc_1123_dns_subdomain_name +/// } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! attributed_string_type { + ($name:ident, $description:literal, $example:literal $(, $attribute:tt)*) => { + #[doc = std::concat!($description, ", e.g. \"", $example, "\"")] + #[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] + pub struct $name(String); + + impl $name { + /// The minimum length + pub const MIN_LENGTH: usize = attributed_string_type!(@min_length $($attribute)*); + + /// The maximum length + pub const MAX_LENGTH: usize = attributed_string_type!(@max_length $($attribute)*); + + /// The regular expression + /// + /// This field is not meant to be used outside of this macro. + pub const REGEX: $crate::framework::macros::attributed_string_type::Regex = attributed_string_type!(@regex $($attribute)*); + } + + impl stackable_operator::config::merge::Atomic for $name {} + + impl std::fmt::Display for $name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } + } + + impl From<$name> for String { + fn from(value: $name) -> Self { + value.0 + } + } + + impl From<&$name> for String { + fn from(value: &$name) -> Self { + value.0.clone() + } + } + + impl AsRef for $name { + fn as_ref(&self) -> &str { + &self.0 + } + } + + impl std::str::FromStr for $name { + type Err = $crate::framework::macros::attributed_string_type::Error; + + fn from_str(s: &str) -> std::result::Result { + // ResultExt::context is used on most but not all usages of this macro + #[allow(unused_imports)] + use snafu::ResultExt; + + $(attributed_string_type!(@from_str $name, s, $attribute);)* + + Ok(Self(s.to_owned())) + } + } + + impl<'de> serde::Deserialize<'de> for $name { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let string: String = serde::Deserialize::deserialize(deserializer)?; + $name::from_str(&string).map_err(|err| serde::de::Error::custom(&err)) + } + } + + impl serde::Serialize for $name { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.0.serialize(serializer) + } + } + + // The JsonSchema implementation requires `max_length`. + impl stackable_operator::schemars::JsonSchema for $name { + fn schema_name() -> std::borrow::Cow<'static, str> { + std::stringify!($name).into() + } + + fn json_schema(_generator: &mut stackable_operator::schemars::generate::SchemaGenerator) -> stackable_operator::schemars::Schema { + stackable_operator::schemars::json_schema!({ + "type": "string", + "minLength": $name::MIN_LENGTH, + "maxLength": if $name::MAX_LENGTH != usize::MAX { + Some($name::MAX_LENGTH) + } else { + // Do not set maxLength if it is usize::MAX. + None + }, + "pattern": match $name::REGEX { + $crate::framework::macros::attributed_string_type::Regex::Expression(regex) => Some(regex), + _ => None + } + }) + } + } + + impl $name { + /// Converts a string to this type, panicking if the string is invalid. + /// + /// Only use this for compile-time constants or pre-validated values. + pub fn from_str_unsafe(s: &str) -> Self { + std::str::FromStr::from_str(s).expect("should be a valid {name}") + } + } + + #[cfg(test)] + impl $name { + // A dead_code warning is emitted if there is no unit test that calls this function. + pub fn test_example() { + Self::from_str_unsafe($example); + } + } + + $(attributed_string_type!(@trait_impl $name, $attribute);)* + }; + + // std::str::FromStr + + (@from_str $name:ident, $s:expr, (min_length = $min_length:expr)) => { + let length = $s.len() as usize; + snafu::ensure!( + length >= $name::MIN_LENGTH, + $crate::framework::macros::attributed_string_type::MinimumLengthNotMetSnafu { + length, + min_length: $name::MIN_LENGTH, + } + ); + }; + (@from_str $name:ident, $s:expr, (max_length = $max_length:expr)) => { + let length = $s.len() as usize; + snafu::ensure!( + length <= $name::MAX_LENGTH, + $crate::framework::macros::attributed_string_type::LengthExceededSnafu { + length, + max_length: $name::MAX_LENGTH, + } + ); + }; + (@from_str $name:ident, $s:expr, (regex = $regex:expr)) => { + let regex = regex::Regex::new($regex).context($crate::framework::macros::attributed_string_type::InvalidRegexSnafu)?; + snafu::ensure!( + regex.is_match($s), + $crate::framework::macros::attributed_string_type::RegexNotMatchedSnafu { + value: $s, + regex: $regex + } + ); + }; + (@from_str $name:ident, $s:expr, is_rfc_1035_label_name) => { + stackable_operator::validation::is_lowercase_rfc_1035_label($s).context($crate::framework::macros::attributed_string_type::InvalidRfc1035LabelNameSnafu)?; + }; + (@from_str $name:ident, $s:expr, is_rfc_1123_dns_subdomain_name) => { + stackable_operator::validation::is_lowercase_rfc_1123_subdomain($s).context($crate::framework::macros::attributed_string_type::InvalidRfc1123DnsSubdomainNameSnafu)?; + }; + (@from_str $name:ident, $s:expr, is_rfc_1123_label_name) => { + stackable_operator::validation::is_lowercase_rfc_1123_label($s).context($crate::framework::macros::attributed_string_type::InvalidRfc1123LabelNameSnafu)?; + }; + (@from_str $name:ident, $s:expr, is_valid_label_value) => { + stackable_operator::kvp::LabelValue::from_str($s).context($crate::framework::macros::attributed_string_type::InvalidLabelValueSnafu)?; + }; + (@from_str $name:ident, $s:expr, is_uid) => { + uuid::Uuid::try_parse($s).context($crate::framework::macros::attributed_string_type::InvalidUidSnafu)?; + }; + + // MIN_LENGTH + + (@min_length) => { + // The minimum String length is 0. + 0 + }; + (@min_length (min_length = $min_length:expr) $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + $min_length, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + (@min_length (max_length = $max_length:expr) $($attribute:tt)*) => { + // max_length has no opinion on the min_length. + attributed_string_type!(@min_length $($attribute)*) + }; + (@min_length (regex = $regex:expr) $($attribute:tt)*) => { + // regex has no influence on the min_length. + attributed_string_type!(@min_length $($attribute)*) + }; + (@min_length is_rfc_1035_label_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + 1, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + (@min_length is_rfc_1123_dns_subdomain_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + 1, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + (@min_length is_rfc_1123_label_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + 1, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + (@min_length is_valid_label_value $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + 1, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + (@min_length is_uid $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + uuid::fmt::Hyphenated::LENGTH, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + + // MAX_LENGTH + + (@max_length) => { + // If there is no other max_length defined, then the upper bound is usize::MAX. + usize::MAX + }; + (@max_length (min_length = $min_length:expr) $($attribute:tt)*) => { + // min_length has no opinion on the max_length. + attributed_string_type!(@max_length $($attribute)*) + }; + (@max_length (max_length = $max_length:expr) $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + $max_length, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + (@max_length (regex = $regex:expr) $($attribute:tt)*) => { + // regex has no influence on the max_length. + attributed_string_type!(@max_length $($attribute)*) + }; + (@max_length is_rfc_1035_label_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + stackable_operator::validation::RFC_1035_LABEL_MAX_LENGTH, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + (@max_length is_rfc_1123_dns_subdomain_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + stackable_operator::validation::RFC_1123_SUBDOMAIN_MAX_LENGTH, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + (@max_length is_rfc_1123_label_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + stackable_operator::validation::RFC_1123_LABEL_MAX_LENGTH, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + (@max_length is_valid_label_value $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + $crate::framework::macros::attributed_string_type::MAX_LABEL_VALUE_LENGTH, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + (@max_length is_uid $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + uuid::fmt::Hyphenated::LENGTH, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + + // REGEX + + (@regex) => { + // Everything is allowed if there is no other regular expression. + $crate::framework::macros::attributed_string_type::Regex::MatchAll + }; + (@regex (min_length = $min_length:expr) $($attribute:tt)*) => { + // min_length has no influence on the regular expression. + attributed_string_type!(@regex $($attribute)*) + }; + (@regex (max_length = $max_length:expr) $($attribute:tt)*) => { + // max_length has no influence on the regular expression. + attributed_string_type!(@regex $($attribute)*) + }; + (@regex (regex = $regex:expr) $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::Regex::Expression($regex) + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + (@regex is_rfc_1035_label_name $($attribute:tt)*) => { + // see https://github.com/kubernetes/kubernetes/blob/v1.35.0/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L228 + $crate::framework::macros::attributed_string_type::Regex::Expression("^[a-z]([-a-z0-9]*[a-z0-9])?$") + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + (@regex is_rfc_1123_dns_subdomain_name $($attribute:tt)*) => { + // see https://github.com/kubernetes/kubernetes/blob/v1.35.0/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L193 + $crate::framework::macros::attributed_string_type::Regex::Expression("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + (@regex is_rfc_1123_label_name $($attribute:tt)*) => { + // see https://github.com/kubernetes/kubernetes/blob/v1.35.0/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L163 + $crate::framework::macros::attributed_string_type::Regex::Expression("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + (@regex is_valid_label_value $($attribute:tt)*) => { + // regular expression from stackable_operator::kvp::label::LABEL_VALUE_REGEX + $crate::framework::macros::attributed_string_type::Regex::Expression("^[a-z0-9A-Z]([a-z0-9A-Z-_.]*[a-z0-9A-Z]+)?$") + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + (@regex is_uid $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::Regex::Expression("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + + // additional constants and trait implementations + + (@trait_impl $name:ident, (min_length = $max_length:expr)) => { + }; + (@trait_impl $name:ident, (max_length = $max_length:expr)) => { + }; + (@trait_impl $name:ident, (regex = $regex:expr)) => { + }; + (@trait_impl $name:ident, is_rfc_1035_label_name) => { + impl $name { + pub const IS_RFC_1035_LABEL_NAME: bool = true; + pub const IS_RFC_1123_LABEL_NAME: bool = true; + pub const IS_RFC_1123_SUBDOMAIN_NAME: bool = true; + } + }; + (@trait_impl $name:ident, is_rfc_1123_dns_subdomain_name) => { + impl $name { + pub const IS_RFC_1123_SUBDOMAIN_NAME: bool = true; + } + }; + (@trait_impl $name:ident, is_rfc_1123_label_name) => { + impl $name { + pub const IS_RFC_1123_LABEL_NAME: bool = true; + pub const IS_RFC_1123_SUBDOMAIN_NAME: bool = true; + } + }; + (@trait_impl $name:ident, is_valid_label_value) => { + impl $name { + pub const IS_VALID_LABEL_VALUE: bool = true; + } + + impl $crate::framework::NameIsValidLabelValue for $name { + fn to_label_value(&self) -> String { + self.0.clone() + } + } + }; + (@trait_impl $name:ident, is_uid) => { + impl From for $name { + fn from(value: uuid::Uuid) -> Self { + Self(value.to_string()) + } + } + + impl From<&uuid::Uuid> for $name { + fn from(value: &uuid::Uuid) -> Self { + Self(value.to_string()) + } + } + }; +} + +/// Returns the minimum of the given values. +/// +/// As opposed to [`std::cmp::min`], this function can be used at compile-time. +/// +/// # Examples +/// +/// ```rust +/// assert_eq!(2, min(2, 3)); +/// assert_eq!(4, min(5, 4)); +/// assert_eq!(1, min(1, 1)); +/// ``` +pub const fn min(x: usize, y: usize) -> usize { + if x < y { x } else { y } +} + +/// Returns the maximum of the given values. +/// +/// As opposed to [`std::cmp::max`], this function can be used at compile-time. +/// +/// # Examples +/// +/// ```rust +/// assert_eq!(3, max(2, 3)); +/// assert_eq!(5, max(5, 4)); +/// assert_eq!(1, max(1, 1)); +/// ``` +pub const fn max(x: usize, y: usize) -> usize { + if x < y { y } else { x } +} + +#[cfg(test)] +// `InvalidRegexTest` intentionally contains an invalid regular expression. +#[allow(clippy::invalid_regex)] +mod tests { + use std::str::FromStr; + + use serde_json::{Number, Value, json}; + use stackable_operator::schemars::{JsonSchema, SchemaGenerator}; + use uuid::uuid; + + use super::{ErrorDiscriminants, Regex}; + use crate::framework::NameIsValidLabelValue; + + attributed_string_type! { + MinLengthWithoutConstraintsTest, + "min_length test without constraints", + "" + } + + #[test] + fn test_attributed_string_type_min_length_without_constraints() { + type T = MinLengthWithoutConstraintsTest; + + T::test_example(); + assert_eq!(0, T::MIN_LENGTH); + } + + attributed_string_type! { + MinLengthWithConstraintsTest, + "min_length test with constraints", + "test", + (min_length = 2), // should set the minimum length to 2 + (max_length = 8), // should not affect the minimum length + (regex = "^.{4}$"), // should not affect the minimum length + is_rfc_1035_label_name, // should be overruled by the greater min_length + is_valid_label_value // should be overruled by the greater min_length + } + + #[test] + fn test_attributed_string_type_min_length_with_constraints() { + type T = MinLengthWithConstraintsTest; + + T::test_example(); + assert_eq!(2, T::MIN_LENGTH); + assert_eq!( + Err(ErrorDiscriminants::MinimumLengthNotMet), + T::from_str("a").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + MaxLengthWithoutConstraintsTest, + "max_length test without constraints", + "" + } + + #[test] + fn test_attributed_string_type_max_length_without_constraints() { + type T = MaxLengthWithoutConstraintsTest; + + T::test_example(); + assert_eq!(usize::MAX, T::MAX_LENGTH); + } + + attributed_string_type! { + MaxLengthWithConstraintsTest, + "max_length test with constraints", + "test", + (min_length = 2), // should not affect the maximum length + (max_length = 8), // should set the maximum length to 8 + (regex = "^.{4}$"), // should not affect the maximum length + is_rfc_1035_label_name, // should be overruled by the lower max_length + is_valid_label_value // should be overruled by the lower max_length + } + + #[test] + fn test_attributed_string_type_max_length_with_constraints() { + type T = MaxLengthWithConstraintsTest; + + T::test_example(); + assert_eq!(8, T::MAX_LENGTH); + assert_eq!( + Err(ErrorDiscriminants::LengthExceeded), + T::from_str("test-12345").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + RegexWithoutConstraintsTest, + "regex test without constraints", + "" + } + + #[test] + fn test_attributed_string_type_regex_without_constraints() { + type T = RegexWithoutConstraintsTest; + + T::test_example(); + assert_eq!(Regex::MatchAll, T::REGEX); + } + + attributed_string_type! { + RegexWithOneConstraintTest, + "regex test with one constraint", + "test", + (min_length = 2), // should not affect the regular expression + (max_length = 8), // should not affect the regular expression + (regex = "^[est]{4}$") // should set the regular expression to "[est]{4}" + } + + #[test] + fn test_attributed_string_type_regex_with_one_constraint() { + type T = RegexWithOneConstraintTest; + + T::test_example(); + assert_eq!(Regex::Expression("^[est]{4}$"), T::REGEX); + assert_eq!( + Err(ErrorDiscriminants::RegexNotMatched), + T::from_str("t-st").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + RegexWithMultipleConstraintsTest, + "regex test with multiple constraints", + "test", + (min_length = 2), // should not affect the regular expression + (max_length = 8), // should not affect the regular expression + (regex = "^[est]{4}$"), // should not be combinable with is_rfc_1123_dns_subdomain_name + is_rfc_1123_dns_subdomain_name // should not be combinable with regex + } + + #[test] + fn test_attributed_string_type_regex_with_multiple_constraints() { + type T = RegexWithMultipleConstraintsTest; + + T::test_example(); + assert_eq!(Regex::Unknown, T::REGEX); + assert_eq!( + Err(ErrorDiscriminants::RegexNotMatched), + T::from_str("t-st").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + InvalidRegexTest, + "regex test with invalid expression", + "test", + (min_length = 2), // should not affect the regular expression + (max_length = 8), // should not affect the regular expression + (regex = "{") // should throw an error at runtime + } + + #[test] + fn test_attributed_string_type_regex_with_invalid_expression() { + type T = InvalidRegexTest; + + // It is not known yet at compile-time that this expression is invalid. + assert_eq!(Regex::Expression("{"), T::REGEX); + assert_eq!( + Err(ErrorDiscriminants::InvalidRegex), + T::from_str("test").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + DisplayFmtTest, + "Display::fmt test", + "test" + } + + #[test] + fn test_attributed_string_type_display_fmt() { + type T = DisplayFmtTest; + + assert_eq!("test", format!("{}", T::from_str_unsafe("test"))); + } + + attributed_string_type! { + StringFromTest, + "String::from test", + "test" + } + + #[test] + fn test_attributed_string_type_string_from() { + type T = StringFromTest; + + T::test_example(); + assert_eq!("test", String::from(T::from_str_unsafe("test"))); + assert_eq!("test", String::from(&T::from_str_unsafe("test"))); + } + + attributed_string_type! { + DeserializeTest, + "serde::Deserialize test", + "test", + (min_length = 2), + (max_length = 4), + (regex = "^[est-]+$"), + is_rfc_1035_label_name + } + + #[test] + fn test_attributed_string_type_deserialize() { + type T = DeserializeTest; + + T::test_example(); + assert_eq!( + T::from_str_unsafe("test"), + serde_json::from_value(Value::String("test".to_owned())) + .expect("should be deserializable") + ); + assert_eq!( + Err("minimum length not met".to_owned()), + serde_json::from_value::(Value::String("e".to_owned())) + .map_err(|err| err.to_string()) + ); + assert_eq!( + Err("maximum length exceeded".to_owned()), + serde_json::from_value::(Value::String("testt".to_owned())) + .map_err(|err| err.to_string()) + ); + assert_eq!( + Err("regular expression not matched".to_owned()), + serde_json::from_value::(Value::String("abc".to_owned())) + .map_err(|err| err.to_string()) + ); + assert_eq!( + Err("not a valid label name as defined in RFC 1035".to_owned()), + serde_json::from_value::(Value::String("-tst".to_owned())) + .map_err(|err| err.to_string()) + ); + assert_eq!( + Err("invalid type: null, expected a string".to_owned()), + serde_json::from_value::(Value::Null).map_err(|err| err.to_string()) + ); + assert_eq!( + Err("invalid type: boolean `true`, expected a string".to_owned()), + serde_json::from_value::(Value::Bool(true)).map_err(|err| err.to_string()) + ); + assert_eq!( + Err("invalid type: integer `1`, expected a string".to_owned()), + serde_json::from_value::(Value::Number( + Number::from_i128(1).expect("should be a valid number") + )) + .map_err(|err| err.to_string()) + ); + assert_eq!( + Err("invalid type: sequence, expected a string".to_owned()), + serde_json::from_value::(Value::Array(vec![])).map_err(|err| err.to_string()) + ); + assert_eq!( + Err("invalid type: map, expected a string".to_owned()), + serde_json::from_value::(Value::Object(serde_json::Map::new())) + .map_err(|err| err.to_string()) + ); + } + + attributed_string_type! { + SerializeTest, + "serde::Serialize test", + "test" + } + + #[test] + fn test_attributed_string_type_serialize() { + type T = SerializeTest; + + T::test_example(); + assert_eq!( + "\"test\"".to_owned(), + serde_json::to_string(&T::from_str_unsafe("test")).expect("should be serializable") + ); + } + + attributed_string_type! { + JsonSchemaWithoutConstraintsTest, + "JsonSchema test with constraints", + "test" + } + + #[test] + fn test_attributed_string_type_json_schema_without_constaints() { + type T = JsonSchemaWithoutConstraintsTest; + + T::test_example(); + assert_eq!("JsonSchemaWithoutConstraintsTest", T::schema_name()); + assert_eq!( + json!({ + "type": "string", + "minLength": 0, + "maxLength": None::, + "pattern": None:: + }), + T::json_schema(&mut SchemaGenerator::default()) + ); + } + + attributed_string_type! { + JsonSchemaWithConstraintsTest, + "JsonSchema test with constraints", + "test", + (min_length = 4), + (max_length = 8), + (regex = "^[est]+$") + } + + #[test] + fn test_attributed_string_type_json_schema_with_constraints() { + type T = JsonSchemaWithConstraintsTest; + + T::test_example(); + assert_eq!("JsonSchemaWithConstraintsTest", T::schema_name()); + assert_eq!( + json!({ + "type": "string", + "minLength": 4, + "maxLength": 8, + "pattern": "^[est]+$" + }), + T::json_schema(&mut SchemaGenerator::default()) + ); + } + + attributed_string_type! { + IsRfc1035LabelNameTest, + "is_rfc_1035_label_name test", + "a-b", + is_rfc_1035_label_name + } + + #[test] + fn test_attributed_string_type_is_rfc_1035_label_name() { + type T = IsRfc1035LabelNameTest; + + let _ = T::IS_RFC_1035_LABEL_NAME; + let _ = T::IS_RFC_1123_LABEL_NAME; + let _ = T::IS_RFC_1123_SUBDOMAIN_NAME; + + T::test_example(); + assert_eq!( + Err(ErrorDiscriminants::InvalidRfc1035LabelName), + T::from_str("A").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + IsRfc1123DnsSubdomainNameTest, + "is_rfc_1123_dns_subdomain_name test", + "a-b.c", + is_rfc_1123_dns_subdomain_name + } + + #[test] + fn test_attributed_string_type_is_rfc_1123_dns_subdomain_name() { + type T = IsRfc1123DnsSubdomainNameTest; + + let _ = T::IS_RFC_1123_SUBDOMAIN_NAME; + + T::test_example(); + assert_eq!( + Err(ErrorDiscriminants::InvalidRfc1123DnsSubdomainName), + T::from_str("A").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + IsRfc1123LabelNameTest, + "is_rfc_1123_label_name test", + "1-a", + is_rfc_1123_label_name + } + + #[test] + fn test_attributed_string_type_is_rfc_1123_label_name() { + type T = IsRfc1123LabelNameTest; + + let _ = T::IS_RFC_1123_LABEL_NAME; + let _ = T::IS_RFC_1123_SUBDOMAIN_NAME; + + T::test_example(); + assert_eq!( + Err(ErrorDiscriminants::InvalidRfc1123LabelName), + T::from_str("A").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + IsValidLabelValueTest, + "is_valid_label_value test", + "a-_.1", + is_valid_label_value + } + + #[test] + fn test_attributed_string_type_is_valid_label_value() { + type T = IsValidLabelValueTest; + + let _ = T::IS_VALID_LABEL_VALUE; + + T::test_example(); + assert_eq!( + Err(ErrorDiscriminants::InvalidLabelValue), + T::from_str("invalid label value").map_err(ErrorDiscriminants::from) + ); + assert_eq!( + "label-value", + T::from_str_unsafe("label-value").to_label_value() + ); + } + + attributed_string_type! { + IsUidTest, + "is_uid test", + "c27b3971-ca72-42c1-80a4-abdfc1db0ddd", + is_uid + } + + #[test] + fn test_attributed_string_type_is_uid() { + type T = IsUidTest; + + T::test_example(); + assert_eq!( + Err(ErrorDiscriminants::InvalidUid), + T::from_str("invalid UID").map_err(ErrorDiscriminants::from) + ); + assert_eq!( + "c27b3971-ca72-42c1-80a4-abdfc1db0ddd", + T::from(uuid!("c27b3971-ca72-42c1-80a4-abdfc1db0ddd")).to_string() + ); + assert_eq!( + "c27b3971-ca72-42c1-80a4-abdfc1db0ddd", + T::from(&uuid!("c27b3971-ca72-42c1-80a4-abdfc1db0ddd")).to_string() + ); + } +} diff --git a/rust/operator-binary/src/framework/macros/constant.rs b/rust/operator-binary/src/framework/macros/constant.rs new file mode 100644 index 00000000..ae4e9c69 --- /dev/null +++ b/rust/operator-binary/src/framework/macros/constant.rs @@ -0,0 +1,17 @@ +/// Use [`std::sync::LazyLock`] to define a static "constant" from a string. +/// +/// The string is converted into the given type with [`std::str::FromStr::from_str`]. +/// +/// # Examples +/// +/// ```rust +/// constant!(DATA_VOLUME_NAME: VolumeName = "data"); +/// constant!(pub CONFIG_VOLUME_NAME: VolumeName = "config"); +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! constant { + ($qualifier:vis $name:ident: $type:ident = $value:literal) => { + $qualifier static $name: std::sync::LazyLock<$type> = + std::sync::LazyLock::new(|| $type::from_str($value).expect("should be a valid $name")); + }; +} diff --git a/rust/operator-binary/src/framework/types.rs b/rust/operator-binary/src/framework/types.rs new file mode 100644 index 00000000..65f61166 --- /dev/null +++ b/rust/operator-binary/src/framework/types.rs @@ -0,0 +1,3 @@ +pub mod common; +pub mod kubernetes; +pub mod operator; diff --git a/rust/operator-binary/src/framework/types/common.rs b/rust/operator-binary/src/framework/types/common.rs new file mode 100644 index 00000000..3d7326ef --- /dev/null +++ b/rust/operator-binary/src/framework/types/common.rs @@ -0,0 +1,68 @@ +//! Common types that do not belong (yet) to a more specific module +use snafu::{ResultExt, Snafu}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to convert to port number"))] + ConvertToPortNumber { source: std::num::TryFromIntError }, +} + +/// A port number +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Port(pub u16); + +impl std::fmt::Display for Port { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl From for Port { + fn from(value: u16) -> Self { + Port(value) + } +} + +impl From for i32 { + fn from(value: Port) -> Self { + value.0 as i32 + } +} + +impl TryFrom for Port { + type Error = Error; + + fn try_from(value: i32) -> Result { + Ok(Port( + u16::try_from(value).context(ConvertToPortNumberSnafu)?, + )) + } +} + +#[cfg(test)] +mod tests { + + use super::{ErrorDiscriminants, Port}; + + #[test] + fn test_port_fmt() { + assert_eq!("0".to_owned(), Port(0).to_string()); + assert_eq!("65535".to_owned(), Port(65535).to_string()); + } + + #[test] + fn test_port_try_from_i32() { + assert_eq!(Some(Port(0)), Port::try_from(0).ok()); + assert_eq!(Some(Port(65535)), Port::try_from(65535).ok()); + assert_eq!( + Err(ErrorDiscriminants::ConvertToPortNumber), + Port::try_from(-1).map_err(ErrorDiscriminants::from) + ); + assert_eq!( + Err(ErrorDiscriminants::ConvertToPortNumber), + Port::try_from(65536).map_err(ErrorDiscriminants::from) + ); + } +} diff --git a/rust/operator-binary/src/framework/types/kubernetes.rs b/rust/operator-binary/src/framework/types/kubernetes.rs new file mode 100644 index 00000000..3902e5a0 --- /dev/null +++ b/rust/operator-binary/src/framework/types/kubernetes.rs @@ -0,0 +1,191 @@ +//! Kubernetes (resource) names +use std::str::FromStr; + +use stackable_operator::validation::{RFC_1123_LABEL_MAX_LENGTH, RFC_1123_SUBDOMAIN_MAX_LENGTH}; + +use crate::attributed_string_type; + +attributed_string_type! { + ConfigMapName, + "The name of a ConfigMap", + "airflow-webserver-default", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + ConfigMapKey, + "The key for a ConfigMap", + "webserver_config.py", + (min_length = 1), + // see https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L435-L451 + (max_length = RFC_1123_SUBDOMAIN_MAX_LENGTH), + (regex = "^[-._a-zA-Z0-9]+$") +} + +attributed_string_type! { + ContainerName, + "The name of a container in a Pod", + "airflow", + is_rfc_1123_label_name +} + +attributed_string_type! { + ClusterRoleName, + "The name of a ClusterRole", + "airflow-clusterrole", + // On the one hand, ClusterRoles must only contain characters that are allowed for DNS + // subdomain names, on the other hand, their length does not seem to be restricted – at least + // on Kind. However, 253 characters are sufficient for the Stackable operators, and to avoid + // problems on other Kubernetes providers, the length is restricted here. + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + Hostname, + "A hostname", + "example.com", + (min_length = 1), + (max_length = 253), + // see https://en.wikipedia.org/wiki/Hostname#Syntax + (regex = "^[a-zA-Z0-9]([-a-zA-Z0-9]{0,60}[a-zA-Z0-9])?(\\.[a-zA-Z0-9]([-a-zA-Z0-9]{0,60}[a-zA-Z0-9])?)*\\.?$") +} + +attributed_string_type! { + ListenerName, + "The name of a Listener", + "airflow-webserver-default", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + ListenerClassName, + "The name of a ListenerClass", + "external-stable", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + NamespaceName, + "The name of a Namespace", + "stackable-operators", + is_rfc_1123_label_name, + is_valid_label_value +} + +attributed_string_type! { + PersistentVolumeClaimName, + "The name of a PersistentVolumeClaim", + "config", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + RoleBindingName, + "The name of a RoleBinding", + "airflow-rolebinding", + // On the one hand, RoleBindings must only contain characters that are allowed for DNS + // subdomain names, on the other hand, their length does not seem to be restricted – at least + // on Kind. However, 253 characters are sufficient for the Stackable operators, and to avoid + // problems on other Kubernetes providers, the length is restricted here. + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + SecretClassName, + "The name of a SecretClass", + "tls", + // The secret class name is used in an annotation on the tls volume. + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + SecretKey, + "The key for a Secret", + "accessKey", + (min_length = 1), + // see https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L435-L451 + (max_length = RFC_1123_SUBDOMAIN_MAX_LENGTH), + (regex = "^[-._a-zA-Z0-9]+$") +} + +attributed_string_type! { + SecretName, + "The name of a Secret", + "airflow-internal-secret", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + ServiceAccountName, + "The name of a ServiceAccount", + "airflow-serviceaccount", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + ServiceName, + "The name of a Service", + "airflow-webserver-default", + is_rfc_1035_label_name, + is_valid_label_value +} + +attributed_string_type! { + StatefulSetName, + "The name of a StatefulSet", + "airflow-webserver-default", + (max_length = + // see https://github.com/kubernetes/kubernetes/issues/64023 + RFC_1123_LABEL_MAX_LENGTH + - 1 /* dash */ + - 10 /* digits for the controller-revision-hash label */), + is_rfc_1123_label_name, + is_valid_label_value +} + +attributed_string_type! { + Uid, + "A UID", + "c27b3971-ca72-42c1-80a4-abdfc1db0ddd", + is_uid, + is_valid_label_value +} + +attributed_string_type! { + VolumeName, + "The name of a Volume", + "config", + is_rfc_1123_label_name, + is_valid_label_value +} + +#[cfg(test)] +mod tests { + use super::{ + ClusterRoleName, ConfigMapKey, ConfigMapName, ContainerName, Hostname, ListenerClassName, + ListenerName, NamespaceName, PersistentVolumeClaimName, RoleBindingName, SecretClassName, + SecretKey, SecretName, ServiceAccountName, ServiceName, StatefulSetName, Uid, VolumeName, + }; + + #[test] + fn test_attributed_string_type_examples() { + ConfigMapName::test_example(); + ConfigMapKey::test_example(); + ContainerName::test_example(); + ClusterRoleName::test_example(); + Hostname::test_example(); + ListenerName::test_example(); + ListenerClassName::test_example(); + NamespaceName::test_example(); + PersistentVolumeClaimName::test_example(); + RoleBindingName::test_example(); + SecretClassName::test_example(); + SecretKey::test_example(); + SecretName::test_example(); + ServiceAccountName::test_example(); + ServiceName::test_example(); + StatefulSetName::test_example(); + Uid::test_example(); + VolumeName::test_example(); + } +} diff --git a/rust/operator-binary/src/framework/types/operator.rs b/rust/operator-binary/src/framework/types/operator.rs new file mode 100644 index 00000000..21e71c7e --- /dev/null +++ b/rust/operator-binary/src/framework/types/operator.rs @@ -0,0 +1,91 @@ +//! Names for operators + +use std::str::FromStr; + +use crate::attributed_string_type; + +attributed_string_type! { + ProductName, + "The name of a product", + "airflow", + // A suffix is added to produce a label value. An according compile-time check ensures that + // max_length cannot be set higher. + (max_length = 54), + is_rfc_1123_dns_subdomain_name, + is_valid_label_value +} + +attributed_string_type! { + ProductVersion, + "The version of a product", + "2.10.4", + is_valid_label_value +} + +attributed_string_type! { + ClusterName, + "The name of a cluster/stacklet", + "my-airflow-cluster", + // Suffixes are added to produce resource names. According compile-time checks ensure that + // max_length cannot be set higher. + (max_length = 24), + is_rfc_1035_label_name, + is_valid_label_value +} + +attributed_string_type! { + ControllerName, + "The name of a controller in an operator", + "airflowcluster", + is_valid_label_value +} + +attributed_string_type! { + OperatorName, + "The name of an operator", + "airflow.stackable.tech", + is_valid_label_value +} + +attributed_string_type! { + RoleGroupName, + "The name of a role-group name", + "default", + // The role-group name is used to produce resource names. To make sure that all resource names + // are valid, max_length is restricted. Compile-time checks ensure that max_length cannot be + // set higher if not other names like the RoleName are set lower accordingly. + (max_length = 16), + is_rfc_1123_label_name, + is_valid_label_value +} + +attributed_string_type! { + RoleName, + "The name of a role name", + "webserver", + // The role name is used to produce resource names. To make sure that all resource names are + // valid, max_length is restricted. Compile-time checks ensure that max_length cannot be set + // higher if not other names like the RoleGroupName are set lower accordingly. + (max_length = 10), + is_rfc_1123_label_name, + is_valid_label_value +} + +#[cfg(test)] +mod tests { + use super::{ + ClusterName, ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, + RoleName, + }; + + #[test] + fn test_attributed_string_type_examples() { + ProductName::test_example(); + ProductVersion::test_example(); + ClusterName::test_example(); + ControllerName::test_example(); + OperatorName::test_example(); + RoleGroupName::test_example(); + RoleName::test_example(); + } +} diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index 6cc89e99..eb2ebc1c 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -43,6 +43,7 @@ mod config; mod controller_commons; mod crd; mod env_vars; +mod framework; mod operations; mod product_logging; mod service; From f2bcb5ce59c441803c52a15f7ef8da41999c3287 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Fri, 8 May 2026 18:50:10 +0200 Subject: [PATCH 2/5] feat: add framework builder, kvp, and utility modules Add the remaining framework infrastructure: builder helpers for metadata, PDB, pod containers, and statefulsets; KVP label generation for recommended and selector labels; product logging configuration; and role/role-group utility functions. All modules are marked #[allow(dead_code)] pending controller integration. Co-Authored-By: Claude Opus 4.6 --- rust/operator-binary/src/framework.rs | 14 + rust/operator-binary/src/framework/builder.rs | 8 + .../src/framework/builder/meta.rs | 108 +++++ .../src/framework/builder/pdb.rs | 161 ++++++++ .../src/framework/builder/pod.rs | 2 + .../src/framework/builder/pod/container.rs | 367 +++++++++++++++++ .../src/framework/builder/pod/volume.rs | 48 +++ .../src/framework/builder/statefulset.rs | 118 ++++++ .../src/framework/cluster_resources.rs | 50 +++ .../src/framework/controller_utils.rs | 211 ++++++++++ rust/operator-binary/src/framework/kvp.rs | 1 + .../src/framework/kvp/label.rs | 196 ++++++++++ .../src/framework/product_logging.rs | 1 + .../framework/product_logging/framework.rs | 127 ++++++ .../src/framework/role_group_utils.rs | 151 +++++++ .../src/framework/role_utils.rs | 368 ++++++++++++++++++ 16 files changed, 1931 insertions(+) create mode 100644 rust/operator-binary/src/framework/builder.rs create mode 100644 rust/operator-binary/src/framework/builder/meta.rs create mode 100644 rust/operator-binary/src/framework/builder/pdb.rs create mode 100644 rust/operator-binary/src/framework/builder/pod.rs create mode 100644 rust/operator-binary/src/framework/builder/pod/container.rs create mode 100644 rust/operator-binary/src/framework/builder/pod/volume.rs create mode 100644 rust/operator-binary/src/framework/builder/statefulset.rs create mode 100644 rust/operator-binary/src/framework/cluster_resources.rs create mode 100644 rust/operator-binary/src/framework/controller_utils.rs create mode 100644 rust/operator-binary/src/framework/kvp.rs create mode 100644 rust/operator-binary/src/framework/kvp/label.rs create mode 100644 rust/operator-binary/src/framework/product_logging.rs create mode 100644 rust/operator-binary/src/framework/product_logging/framework.rs create mode 100644 rust/operator-binary/src/framework/role_group_utils.rs create mode 100644 rust/operator-binary/src/framework/role_utils.rs diff --git a/rust/operator-binary/src/framework.rs b/rust/operator-binary/src/framework.rs index 56600b95..8bc3c995 100644 --- a/rust/operator-binary/src/framework.rs +++ b/rust/operator-binary/src/framework.rs @@ -21,7 +21,21 @@ use types::kubernetes::Uid; +#[allow(dead_code)] +pub mod builder; +#[allow(dead_code)] +pub mod cluster_resources; +#[allow(dead_code)] +pub mod controller_utils; +#[allow(dead_code)] +pub mod kvp; pub mod macros; +#[allow(dead_code)] +pub mod product_logging; +#[allow(dead_code)] +pub mod role_group_utils; +#[allow(dead_code)] +pub mod role_utils; pub mod types; /// Has a non-empty name diff --git a/rust/operator-binary/src/framework/builder.rs b/rust/operator-binary/src/framework/builder.rs new file mode 100644 index 00000000..a6530b5d --- /dev/null +++ b/rust/operator-binary/src/framework/builder.rs @@ -0,0 +1,8 @@ +#[allow(dead_code)] +pub mod meta; +#[allow(dead_code)] +pub mod pdb; +#[allow(dead_code)] +pub mod pod; +#[allow(dead_code)] +pub mod statefulset; diff --git a/rust/operator-binary/src/framework/builder/meta.rs b/rust/operator-binary/src/framework/builder/meta.rs new file mode 100644 index 00000000..7ebb3cc7 --- /dev/null +++ b/rust/operator-binary/src/framework/builder/meta.rs @@ -0,0 +1,108 @@ +use stackable_operator::{ + builder::meta::OwnerReferenceBuilder, + k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference, kube::Resource, +}; + +use crate::framework::{HasName, HasUid}; + +/// Infallible variant of +/// [`stackable_operator::builder::meta::ObjectMetaBuilder::ownerreference_from_resource`] +pub fn ownerreference_from_resource( + resource: &(impl Resource + HasName + HasUid), + block_owner_deletion: Option, + controller: Option, +) -> OwnerReference { + OwnerReferenceBuilder::new() + // Set api_version, kind, name and additionally the UID if it exists. + .initialize_from_resource(resource) + // Ensure that the name is set. + .name(resource.to_name()) + // Ensure that the UID is set. + .uid(resource.to_uid().to_string()) + .block_owner_deletion_opt(block_owner_deletion) + .controller_opt(controller) + .build() + .expect( + "OwnerReference should be created because the resource has an api_version, kind, name \ + and uid.", + ) +} + +#[cfg(test)] +mod tests { + use std::borrow::Cow; + + use stackable_operator::{ + k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta, kube::Resource, + }; + + use crate::framework::{ + HasName, HasUid, builder::meta::ownerreference_from_resource, types::kubernetes::Uid, + }; + + struct TestCluster { + object_meta: ObjectMeta, + } + + impl TestCluster { + fn new() -> Self { + TestCluster { + object_meta: ObjectMeta { + name: Some("test-cluster".to_owned()), + uid: Some("a6b89911-d48e-4328-88d6-b9251226583d".to_owned()), + ..ObjectMeta::default() + }, + } + } + } + + impl Resource for TestCluster { + type DynamicType = (); + type Scope = (); + + fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("AirflowCluster") + } + + fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("airflow.stackable.tech") + } + + fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("v1alpha2") + } + + fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("airflowclusters") + } + + fn meta(&self) -> &ObjectMeta { + &self.object_meta + } + + fn meta_mut(&mut self) -> &mut ObjectMeta { + &mut self.object_meta + } + } + + impl HasName for TestCluster { + fn to_name(&self) -> String { + self.object_meta.name.clone().expect("set in new()") + } + } + + impl HasUid for TestCluster { + fn to_uid(&self) -> Uid { + Uid::from_str_unsafe(&self.object_meta.uid.clone().expect("set in new()")) + } + } + + #[test] + fn test_ownerreference_from_resource() { + let owner_ref = ownerreference_from_resource(&TestCluster::new(), Some(true), Some(true)); + assert_eq!(owner_ref.name, "test-cluster"); + assert_eq!(owner_ref.uid, "a6b89911-d48e-4328-88d6-b9251226583d"); + assert_eq!(owner_ref.controller, Some(true)); + assert_eq!(owner_ref.block_owner_deletion, Some(true)); + } +} diff --git a/rust/operator-binary/src/framework/builder/pdb.rs b/rust/operator-binary/src/framework/builder/pdb.rs new file mode 100644 index 00000000..5fca4d1e --- /dev/null +++ b/rust/operator-binary/src/framework/builder/pdb.rs @@ -0,0 +1,161 @@ +use stackable_operator::{ + builder::pdb::PodDisruptionBudgetBuilder, + k8s_openapi::apimachinery::pkg::apis::meta::v1::LabelSelector, + kube::{Resource, api::ObjectMeta}, +}; + +use crate::framework::{ + HasName, HasUid, NameIsValidLabelValue, + types::operator::{ControllerName, OperatorName, ProductName, RoleName}, +}; + +/// Infallible variant of +/// [`stackable_operator::builder::pdb::PodDisruptionBudgetBuilder::new_with_role`] +pub fn pod_disruption_budget_builder_with_role( + owner: &(impl Resource + HasName + NameIsValidLabelValue + HasUid), + product_name: &ProductName, + role_name: &RoleName, + operator_name: &OperatorName, + controller_name: &ControllerName, +) -> PodDisruptionBudgetBuilder { + PodDisruptionBudgetBuilder::new_with_role( + owner, + &product_name.to_label_value(), + &role_name.to_label_value(), + &operator_name.to_label_value(), + &controller_name.to_label_value(), + ) + .expect( + "PodDisruptionBudgetBuilder should be created because the owner has an object name and UID \ + and all given parameters produce valid label values.", + ) +} + +#[cfg(test)] +mod tests { + use std::borrow::Cow; + + use stackable_operator::{ + k8s_openapi::{ + api::policy::v1::{PodDisruptionBudget, PodDisruptionBudgetSpec}, + apimachinery::pkg::{ + apis::meta::v1::{LabelSelector, ObjectMeta, OwnerReference}, + util::intstr::IntOrString, + }, + }, + kube::Resource, + }; + + use crate::framework::{ + HasName, HasUid, NameIsValidLabelValue, + builder::pdb::pod_disruption_budget_builder_with_role, + types::{ + kubernetes::Uid, + operator::{ControllerName, OperatorName, ProductName, RoleName}, + }, + }; + + struct Cluster { + object_meta: ObjectMeta, + } + + impl Cluster { + fn new() -> Self { + Cluster { + object_meta: ObjectMeta { + name: Some("cluster-name".to_owned()), + uid: Some("a6b89911-d48e-4328-88d6-b9251226583d".to_owned()), + ..ObjectMeta::default() + }, + } + } + } + + impl Resource for Cluster { + type DynamicType = (); + type Scope = (); + + fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("AirflowCluster") } + fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("airflow.stackable.tech") } + fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("v1alpha2") } + fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("airflowclusters") } + + fn meta(&self) -> &ObjectMeta { &self.object_meta } + fn meta_mut(&mut self) -> &mut ObjectMeta { &mut self.object_meta } + } + + impl HasName for Cluster { + fn to_name(&self) -> String { + self.object_meta.name.clone().expect("set in new()") + } + } + + impl HasUid for Cluster { + fn to_uid(&self) -> Uid { + Uid::from_str_unsafe(&self.object_meta.uid.clone().expect("set in new()")) + } + } + + impl NameIsValidLabelValue for Cluster { + fn to_label_value(&self) -> String { + self.object_meta.name.clone().expect("set in new()") + } + } + + #[test] + fn test_pod_disruption_budget_builder_with_role() { + let actual_pdb = pod_disruption_budget_builder_with_role( + &Cluster::new(), + &ProductName::from_str_unsafe("my-product"), + &RoleName::from_str_unsafe("my-role"), + &OperatorName::from_str_unsafe("my-operator"), + &ControllerName::from_str_unsafe("my-controller"), + ) + .with_max_unavailable(2) + .build(); + + let expected_pdb = PodDisruptionBudget { + metadata: ObjectMeta { + labels: Some( + [ + ("app.kubernetes.io/component", "my-role"), + ("app.kubernetes.io/instance", "cluster-name"), + ("app.kubernetes.io/managed-by", "my-operator_my-controller"), + ("app.kubernetes.io/name", "my-product"), + ] + .map(|(k, v)| (k.to_owned(), v.to_owned())) + .into(), + ), + name: Some("cluster-name-my-role".to_owned()), + owner_references: Some(vec![OwnerReference { + api_version: "airflow.stackable.tech/v1alpha2".to_owned(), + controller: Some(true), + kind: "AirflowCluster".to_owned(), + name: "cluster-name".to_owned(), + uid: "a6b89911-d48e-4328-88d6-b9251226583d".to_owned(), + ..OwnerReference::default() + }]), + ..ObjectMeta::default() + }, + spec: Some(PodDisruptionBudgetSpec { + max_unavailable: Some(IntOrString::Int(2)), + selector: Some(LabelSelector { + match_labels: Some( + [ + ("app.kubernetes.io/component", "my-role"), + ("app.kubernetes.io/instance", "cluster-name"), + ("app.kubernetes.io/name", "my-product"), + ] + .map(|(k, v)| (k.to_owned(), v.to_owned())) + .into(), + ), + ..LabelSelector::default() + }), + ..PodDisruptionBudgetSpec::default() + }), + ..PodDisruptionBudget::default() + }; + + assert_eq!(expected_pdb, actual_pdb); + } +} diff --git a/rust/operator-binary/src/framework/builder/pod.rs b/rust/operator-binary/src/framework/builder/pod.rs new file mode 100644 index 00000000..df93bd44 --- /dev/null +++ b/rust/operator-binary/src/framework/builder/pod.rs @@ -0,0 +1,2 @@ +pub mod container; +pub mod volume; diff --git a/rust/operator-binary/src/framework/builder/pod/container.rs b/rust/operator-binary/src/framework/builder/pod/container.rs new file mode 100644 index 00000000..244bf003 --- /dev/null +++ b/rust/operator-binary/src/framework/builder/pod/container.rs @@ -0,0 +1,367 @@ +use std::{collections::BTreeMap, fmt::Display, str::FromStr}; + +use snafu::Snafu; +use stackable_operator::{ + builder::pod::container::{ContainerBuilder, FieldPathEnvVar}, + k8s_openapi::api::core::v1::{ConfigMapKeySelector, EnvVar, EnvVarSource, ObjectFieldSelector}, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::framework::types::kubernetes::{ConfigMapKey, ConfigMapName, ContainerName}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display( + "invalid environment variable name: a valid environment variable name must not be empty \ + and must consist only of printable ASCII characters other than '='" + ))] + ParseEnvVarName { env_var_name: String }, +} + +/// Infallible variant of [`stackable_operator::builder::pod::container::ContainerBuilder::new`] +pub fn new_container_builder(container_name: &ContainerName) -> ContainerBuilder { + ContainerBuilder::new(container_name.as_ref()).expect("should be a valid container name") +} + +// TODO Use attributed_string_type instead +/// Validated environment variable name +#[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct EnvVarName(String); + +impl EnvVarName { + /// Creates an [`EnvVarName`] from the given string and panics if the validation failed + /// + /// Use this only with constant names that are also tested in unit tests! + pub fn from_str_unsafe(s: &str) -> Self { + EnvVarName::from_str(s).expect("should be a valid environment variable name") + } +} + +impl Display for EnvVarName { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl FromStr for EnvVarName { + type Err = Error; + + fn from_str(s: &str) -> Result { + // The length of environment variable names seems not to be restricted. + + if !s.is_empty() && s.chars().all(|c| matches!(c, ' '..='<' | '>'..='~')) { + Ok(Self(s.to_owned())) + } else { + Err(Error::ParseEnvVarName { + env_var_name: s.to_owned(), + }) + } + } +} + +/// A set of [`EnvVar`]s +/// +/// The environment variable names in the set are unique. +#[derive(Clone, Debug, Default, PartialEq)] +pub struct EnvVarSet(BTreeMap); + +impl EnvVarSet { + /// Creates an empty [`EnvVarSet`] + pub fn new() -> Self { + Self::default() + } + + /// Returns a reference to the [`EnvVar`] with the given name + pub fn get(&self, env_var_name: &EnvVarName) -> Option<&EnvVar> { + self.0.get(env_var_name) + } + + /// Moves all [`EnvVar`]s from the given set into this one. + /// + /// [`EnvVar`]s with the same name are overridden. + pub fn merge(mut self, mut env_var_set: EnvVarSet) -> Self { + self.0.append(&mut env_var_set.0); + + self + } + + /// Adds the given [`EnvVar`]s to this set + /// + /// [`EnvVar`]s with the same name are overridden. + pub fn with_values(self, env_vars: I) -> Self + where + I: IntoIterator, + V: Into, + { + env_vars + .into_iter() + .fold(self, |extended_env_vars, (name, value)| { + extended_env_vars.with_value(&name, value) + }) + } + + /// Adds an environment variable with the given name and string value to this set + /// + /// An [`EnvVar`] with the same name is overridden. + pub fn with_value(mut self, name: &EnvVarName, value: impl Into) -> Self { + self.0.insert( + name.clone(), + EnvVar { + name: name.to_string(), + value: Some(value.into()), + value_from: None, + }, + ); + + self + } + + /// Adds an environment variable with the given name and field path to this set + /// + /// An [`EnvVar`] with the same name is overridden. + pub fn with_field_path(mut self, name: &EnvVarName, field_path: FieldPathEnvVar) -> Self { + self.0.insert( + name.clone(), + EnvVar { + name: name.to_string(), + value: None, + value_from: Some(EnvVarSource { + field_ref: Some(ObjectFieldSelector { + field_path: field_path.to_string(), + ..ObjectFieldSelector::default() + }), + ..EnvVarSource::default() + }), + }, + ); + + self + } + + /// Adds an environment variable with the given ConfigMap key reference to this set + /// + /// An [`EnvVar`] with the same name is overridden. + pub fn with_config_map_key_ref( + mut self, + name: &EnvVarName, + config_map_name: &ConfigMapName, + config_map_key: &ConfigMapKey, + ) -> Self { + self.0.insert( + name.clone(), + EnvVar { + name: name.to_string(), + value: None, + value_from: Some(EnvVarSource { + config_map_key_ref: Some(ConfigMapKeySelector { + key: config_map_key.to_string(), + name: config_map_name.to_string(), + ..ConfigMapKeySelector::default() + }), + ..EnvVarSource::default() + }), + }, + ); + + self + } +} + +impl From for Vec { + fn from(value: EnvVarSet) -> Self { + value.0.values().cloned().collect() + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use stackable_operator::{ + builder::pod::container::FieldPathEnvVar, + k8s_openapi::api::core::v1::{ + ConfigMapKeySelector, EnvVar, EnvVarSource, ObjectFieldSelector, + }, + }; + + use super::{EnvVarName, EnvVarSet}; + use crate::framework::{ + builder::pod::container::new_container_builder, + types::kubernetes::{ConfigMapKey, ConfigMapName, ContainerName}, + }; + + #[test] + fn test_envvarname_fromstr() { + // actually accepted by Kubernetes + assert!(EnvVarName::from_str(" !\"#$%&'()*+,-./0123456789:;<>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~").is_ok()); + + // empty string + assert!(EnvVarName::from_str("").is_err()); + // non-printable ASCII characters + assert!(EnvVarName::from_str("\n").is_err()); + assert!(EnvVarName::from_str("€").is_err()); + // equals sign + assert!(EnvVarName::from_str("=").is_err()); + } + + #[test] + fn test_new_container_builder() { + // Test that the function does not panic + new_container_builder(&ContainerName::from_str_unsafe("valid-container-name")); + } + + #[test] + fn test_envvarname_format() { + assert_eq!( + "TEST".to_owned(), + format!("{}", EnvVarName::from_str_unsafe("TEST")) + ); + } + + #[test] + fn test_envvarset_merge() { + let env_var_set1 = EnvVarSet::new().with_values([ + ( + EnvVarName::from_str_unsafe("ENV1"), + "value1 from env_var_set1", + ), + ( + EnvVarName::from_str_unsafe("ENV2"), + "value2 from env_var_set1", + ), + ( + EnvVarName::from_str_unsafe("ENV3"), + "value3 from env_var_set1", + ), + ]); + let env_var_set2 = EnvVarSet::new() + .with_value( + &EnvVarName::from_str_unsafe("ENV2"), + "value2 from env_var_set2", + ) + .with_field_path(&EnvVarName::from_str_unsafe("ENV3"), FieldPathEnvVar::Name) + .with_value( + &EnvVarName::from_str_unsafe("ENV4"), + "value4 from env_var_set2", + ); + + let merged_env_var_set = env_var_set1.merge(env_var_set2); + + assert_eq!( + vec![ + EnvVar { + name: "ENV1".to_owned(), + value: Some("value1 from env_var_set1".to_owned()), + value_from: None + }, + EnvVar { + name: "ENV2".to_owned(), + value: Some("value2 from env_var_set2".to_owned()), + value_from: None + }, + EnvVar { + name: "ENV3".to_owned(), + value: None, + value_from: Some(EnvVarSource { + field_ref: Some(ObjectFieldSelector { + field_path: "metadata.name".to_owned(), + ..ObjectFieldSelector::default() + }), + ..EnvVarSource::default() + }), + }, + EnvVar { + name: "ENV4".to_owned(), + value: Some("value4 from env_var_set2".to_owned()), + value_from: None + } + ], + Vec::from(merged_env_var_set) + ); + } + + #[test] + fn test_envvarset_with_values() { + let env_var_set = EnvVarSet::new().with_values([ + (EnvVarName::from_str_unsafe("ENV1"), "value1"), + (EnvVarName::from_str_unsafe("ENV2"), "value2"), + ]); + + assert_eq!( + vec![ + EnvVar { + name: "ENV1".to_owned(), + value: Some("value1".to_owned()), + value_from: None + }, + EnvVar { + name: "ENV2".to_owned(), + value: Some("value2".to_owned()), + value_from: None + } + ], + Vec::from(env_var_set) + ); + } + + #[test] + fn test_envvarset_with_value() { + let env_var_set = EnvVarSet::new().with_value(&EnvVarName::from_str_unsafe("ENV"), "value"); + + assert_eq!( + Some(&EnvVar { + name: "ENV".to_owned(), + value: Some("value".to_owned()), + value_from: None + }), + env_var_set.get(&EnvVarName::from_str_unsafe("ENV")) + ); + } + + #[test] + fn test_envvarset_with_field_path() { + let env_var_set = EnvVarSet::new() + .with_field_path(&EnvVarName::from_str_unsafe("ENV"), FieldPathEnvVar::Name); + + assert_eq!( + Some(&EnvVar { + name: "ENV".to_owned(), + value: None, + value_from: Some(EnvVarSource { + field_ref: Some(ObjectFieldSelector { + field_path: "metadata.name".to_owned(), + ..ObjectFieldSelector::default() + }), + ..EnvVarSource::default() + }), + }), + env_var_set.get(&EnvVarName::from_str_unsafe("ENV")) + ); + } + + #[test] + fn test_envvarset_with_config_map_key_ref() { + let env_var_set = EnvVarSet::new().with_config_map_key_ref( + &EnvVarName::from_str_unsafe("ENV"), + &ConfigMapName::from_str_unsafe("config-map"), + &ConfigMapKey::from_str_unsafe("key"), + ); + + assert_eq!( + Some(&EnvVar { + name: "ENV".to_owned(), + value: None, + value_from: Some(EnvVarSource { + config_map_key_ref: Some(ConfigMapKeySelector { + key: "key".to_owned(), + name: "config-map".to_owned(), + ..ConfigMapKeySelector::default() + }), + ..EnvVarSource::default() + }), + }), + env_var_set.get(&EnvVarName::from_str_unsafe("ENV")) + ); + } +} diff --git a/rust/operator-binary/src/framework/builder/pod/volume.rs b/rust/operator-binary/src/framework/builder/pod/volume.rs new file mode 100644 index 00000000..06dc4846 --- /dev/null +++ b/rust/operator-binary/src/framework/builder/pod/volume.rs @@ -0,0 +1,48 @@ +use stackable_operator::{ + builder::pod::volume::ListenerOperatorVolumeSourceBuilder, + k8s_openapi::api::core::v1::PersistentVolumeClaim, kvp::Labels, +}; + +use crate::framework::types::kubernetes::{ + ListenerClassName, ListenerName, PersistentVolumeClaimName, +}; + +/// Infallible variant of [`stackable_operator::builder::pod::volume::ListenerReference`] +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ListenerReference { + ListenerClass(ListenerClassName), + Listener(ListenerName), +} + +impl From<&ListenerReference> for stackable_operator::builder::pod::volume::ListenerReference { + fn from(value: &ListenerReference) -> Self { + match value { + ListenerReference::ListenerClass(listener_class_name) => { + stackable_operator::builder::pod::volume::ListenerReference::ListenerClass( + listener_class_name.to_string(), + ) + } + ListenerReference::Listener(listener_name) => { + stackable_operator::builder::pod::volume::ListenerReference::ListenerName( + listener_name.to_string(), + ) + } + } + } +} + +/// Infallible variant of +/// [`stackable_operator::builder::pod::volume::ListenerOperatorVolumeSourceBuilder::build_pvc`] +pub fn listener_operator_volume_source_builder_build_pvc( + listener_reference: &ListenerReference, + labels: &Labels, + pvc_name: &PersistentVolumeClaimName, +) -> PersistentVolumeClaim { + ListenerOperatorVolumeSourceBuilder::new(&listener_reference.into(), labels) + .build_pvc(pvc_name.to_string()) + .expect( + "should return a PersistentVolumeClaim, because the only check is that \ + listener_reference is a valid annotation value and there are no restrictions on single \ + annotation values", + ) +} diff --git a/rust/operator-binary/src/framework/builder/statefulset.rs b/rust/operator-binary/src/framework/builder/statefulset.rs new file mode 100644 index 00000000..904d333b --- /dev/null +++ b/rust/operator-binary/src/framework/builder/statefulset.rs @@ -0,0 +1,118 @@ +use std::collections::BTreeMap; + +use stackable_operator::kvp::Annotations; + +use crate::framework::types::kubernetes::{ConfigMapName, SecretName}; + +/// Creates `restarter.stackable.tech/ignore-configmap.{i}` annotations for each given ConfigMap. +/// +/// The restarter uses these annotations to skip restarting Pods when specific ConfigMaps change. +/// Indices start at 0 and are assigned in iteration order, so **do not merge the result with +/// annotations from another call** — duplicate indices would overwrite each other. +pub fn restarter_ignore_configmap_annotations( + ignored_config_maps: impl IntoIterator, +) -> Annotations { + let annotation_key_values = ignored_config_maps + .into_iter() + .enumerate() + .map(|(i, config_map_name)| { + ( + format!("restarter.stackable.tech/ignore-configmap.{i}"), + config_map_name.to_string(), + ) + }) + .collect::>(); + + Annotations::try_from(annotation_key_values).expect( + "should contain only valid annotations because the annotation keys are statically \ + defined apart from the index number and the names of ConfigMaps are valid annotation \ + values.", + ) +} + +/// Creates `restarter.stackable.tech/ignore-secret.{i}` annotations for each given Secret. +/// +/// The restarter uses these annotations to skip restarting Pods when specific Secrets change. +/// Indices start at 0 and are assigned in iteration order, so **do not merge the result with +/// annotations from another call** — duplicate indices would overwrite each other. +pub fn restarter_ignore_secret_annotations( + ignored_secrets: impl IntoIterator, +) -> Annotations { + let annotation_key_values = ignored_secrets + .into_iter() + .enumerate() + .map(|(i, secret_name)| { + ( + format!("restarter.stackable.tech/ignore-secret.{i}"), + secret_name.to_string(), + ) + }) + .collect::>(); + + Annotations::try_from(annotation_key_values).expect( + "should contain only valid annotations because the annotation keys are statically \ + defined apart from the index number and the names of Secrets are valid annotation \ + values.", + ) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn multiple_config_maps_produce_indexed_annotations() { + let ignored_config_maps = [ + ConfigMapName::from_str_unsafe("first-config"), + ConfigMapName::from_str_unsafe("second-config"), + ConfigMapName::from_str_unsafe("third-config"), + ]; + + let actual_annotations = restarter_ignore_configmap_annotations(ignored_config_maps); + + let expected_annotations = BTreeMap::from([ + ( + "restarter.stackable.tech/ignore-configmap.0".to_owned(), + "first-config".to_owned(), + ), + ( + "restarter.stackable.tech/ignore-configmap.1".to_owned(), + "second-config".to_owned(), + ), + ( + "restarter.stackable.tech/ignore-configmap.2".to_owned(), + "third-config".to_owned(), + ), + ]); + + assert_eq!(expected_annotations, actual_annotations.into()); + } + + #[test] + fn multiple_secrets_produce_indexed_annotations() { + let ignored_secrets = [ + SecretName::from_str_unsafe("first-secret"), + SecretName::from_str_unsafe("second-secret"), + SecretName::from_str_unsafe("third-secret"), + ]; + + let actual_annotations = restarter_ignore_secret_annotations(ignored_secrets); + + let expected_annotations = BTreeMap::from([ + ( + "restarter.stackable.tech/ignore-secret.0".to_owned(), + "first-secret".to_owned(), + ), + ( + "restarter.stackable.tech/ignore-secret.1".to_owned(), + "second-secret".to_owned(), + ), + ( + "restarter.stackable.tech/ignore-secret.2".to_owned(), + "third-secret".to_owned(), + ), + ]); + + assert_eq!(expected_annotations, actual_annotations.into()); + } +} diff --git a/rust/operator-binary/src/framework/cluster_resources.rs b/rust/operator-binary/src/framework/cluster_resources.rs new file mode 100644 index 00000000..430b534f --- /dev/null +++ b/rust/operator-binary/src/framework/cluster_resources.rs @@ -0,0 +1,50 @@ +use stackable_operator::{ + cluster_resources::{ClusterResourceApplyStrategy, ClusterResources}, + deep_merger::ObjectOverrides, + k8s_openapi::api::core::v1::ObjectReference, +}; + +use super::types::{ + kubernetes::{NamespaceName, Uid}, + operator::{ClusterName, ControllerName, OperatorName, ProductName}, +}; +use crate::framework::{ + NameIsValidLabelValue, macros::attributed_string_type::MAX_LABEL_VALUE_LENGTH, +}; + +/// Infallible variant of [`stackable_operator::cluster_resources::ClusterResources::new`] +#[allow(clippy::too_many_arguments)] +pub fn cluster_resources_new<'a>( + product_name: &ProductName, + operator_name: &OperatorName, + controller_name: &ControllerName, + cluster_name: &ClusterName, + cluster_namespace: &NamespaceName, + cluster_uid: &Uid, + apply_strategy: ClusterResourceApplyStrategy, + object_overrides: &'a ObjectOverrides, +) -> ClusterResources<'a> { + // compile-time check + // ClusterResources::new creates a label value from the given app name by appending + // `-operator`. For the resulting label value to be valid, it must not exceed + // MAX_LABEL_VALUE_LENGTH. + const _: () = assert!( + ProductName::MAX_LENGTH + "-operator".len() <= MAX_LABEL_VALUE_LENGTH, + "The string `-operator` must not exceed the limit of Label names." + ); + + ClusterResources::new( + &product_name.to_label_value(), + &operator_name.to_label_value(), + &controller_name.to_label_value(), + &ObjectReference { + name: Some(cluster_name.to_string()), + namespace: Some(cluster_namespace.to_string()), + uid: Some(cluster_uid.to_string()), + ..Default::default() + }, + apply_strategy, + object_overrides, + ) + .expect("ClusterResources should be created because the cluster object reference contains name, namespace and uid.") +} diff --git a/rust/operator-binary/src/framework/controller_utils.rs b/rust/operator-binary/src/framework/controller_utils.rs new file mode 100644 index 00000000..d15e53f5 --- /dev/null +++ b/rust/operator-binary/src/framework/controller_utils.rs @@ -0,0 +1,211 @@ +//! Helper functions which are not tied to a specific controller step + +use std::str::FromStr; + +use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::kube::runtime::reflector::Lookup; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::framework::types::{ + kubernetes::{NamespaceName, Uid}, + operator::ClusterName, +}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to get the cluster name"))] + GetClusterName {}, + + #[snafu(display("failed to get the namespace"))] + GetNamespace {}, + + #[snafu(display("failed to get the UID"))] + GetUid {}, + + #[snafu(display("failed to set the cluster name"))] + ParseClusterName { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("failed to set the namespace"))] + ParseNamespace { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("failed to set the UID"))] + ParseUid { + source: crate::framework::macros::attributed_string_type::Error, + }, +} + +type Result = std::result::Result; + +/// Get the cluster name from the given resource +pub fn get_cluster_name(cluster: &impl Lookup) -> Result { + let raw_cluster_name = cluster.name().context(GetClusterNameSnafu)?; + let cluster_name = ClusterName::from_str(&raw_cluster_name).context(ParseClusterNameSnafu)?; + + Ok(cluster_name) +} + +/// Get the namespace from the given resource +pub fn get_namespace(resource: &impl Lookup) -> Result { + let raw_namespace = resource.namespace().context(GetNamespaceSnafu)?; + let namespace = NamespaceName::from_str(&raw_namespace).context(ParseNamespaceSnafu)?; + + Ok(namespace) +} + +/// Get the UID from the given resource +pub fn get_uid(resource: &impl Lookup) -> Result { + let raw_uid = resource.uid().context(GetUidSnafu)?; + let uid = Uid::from_str(&raw_uid).context(ParseUidSnafu)?; + + Ok(uid) +} + +#[cfg(test)] +mod tests { + use stackable_operator::kube::runtime::reflector::Lookup; + use uuid::uuid; + + use super::{ErrorDiscriminants, get_cluster_name, get_namespace, get_uid}; + use crate::framework::types::{ + kubernetes::{NamespaceName, Uid}, + operator::ClusterName, + }; + + #[derive(Debug, Default)] + struct TestResource { + name: Option<&'static str>, + namespace: Option<&'static str>, + uid: Option<&'static str>, + } + + impl Lookup for TestResource { + type DynamicType = (); + + fn kind(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + "TestResource".into() + } + + fn group(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + "stackable.tech".into() + } + + fn version(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + "v1".into() + } + + fn plural(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + "testresources".into() + } + + fn name(&self) -> Option> { + self.name.map(std::borrow::Cow::Borrowed) + } + + fn namespace(&self) -> Option> { + self.namespace.map(std::borrow::Cow::Borrowed) + } + + fn resource_version(&self) -> Option> { + Some("1".into()) + } + + fn uid(&self) -> Option> { + self.uid.map(std::borrow::Cow::Borrowed) + } + } + + #[test] + fn test_get_cluster_name() { + assert_eq!( + ClusterName::from_str_unsafe("test-cluster"), + get_cluster_name(&TestResource { + name: Some("test-cluster"), + ..TestResource::default() + }) + .expect("should contain a valid cluster name") + ); + + assert_eq!( + Err(ErrorDiscriminants::GetClusterName), + get_cluster_name(&TestResource { + name: None, + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + + assert_eq!( + Err(ErrorDiscriminants::ParseClusterName), + get_cluster_name(&TestResource { + name: Some("invalid cluster name"), + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + } + + #[test] + fn test_get_namespace() { + assert_eq!( + NamespaceName::from_str_unsafe("test-namespace"), + get_namespace(&TestResource { + namespace: Some("test-namespace"), + ..TestResource::default() + }) + .expect("should contain a valid namespace") + ); + + assert_eq!( + Err(ErrorDiscriminants::GetNamespace), + get_namespace(&TestResource { + namespace: None, + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + + assert_eq!( + Err(ErrorDiscriminants::ParseNamespace), + get_namespace(&TestResource { + namespace: Some("invalid namespace"), + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + } + + #[test] + fn test_get_uid() { + assert_eq!( + Uid::from(uuid!("e6ac237d-a6d4-43a1-8135-f36506110912")), + get_uid(&TestResource { + uid: Some("e6ac237d-a6d4-43a1-8135-f36506110912"), + ..TestResource::default() + }) + .expect("should contain a valid UID") + ); + + assert_eq!( + Err(ErrorDiscriminants::GetUid), + get_uid(&TestResource { + uid: None, + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + + assert_eq!( + Err(ErrorDiscriminants::ParseUid), + get_uid(&TestResource { + uid: Some("invalid UID"), + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + } +} diff --git a/rust/operator-binary/src/framework/kvp.rs b/rust/operator-binary/src/framework/kvp.rs new file mode 100644 index 00000000..0006163a --- /dev/null +++ b/rust/operator-binary/src/framework/kvp.rs @@ -0,0 +1 @@ +pub mod label; diff --git a/rust/operator-binary/src/framework/kvp/label.rs b/rust/operator-binary/src/framework/kvp/label.rs new file mode 100644 index 00000000..88098bf2 --- /dev/null +++ b/rust/operator-binary/src/framework/kvp/label.rs @@ -0,0 +1,196 @@ +use stackable_operator::{ + kube::Resource, + kvp::{Labels, ObjectLabels}, +}; + +use crate::framework::{ + HasName, NameIsValidLabelValue, + types::operator::{ + ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, RoleName, + }, +}; + +/// Infallible variant of [`stackable_operator::kvp::Labels::recommended`] +pub fn recommended_labels( + owner: &(impl Resource + HasName + NameIsValidLabelValue), + product_name: &ProductName, + product_version: &ProductVersion, + operator_name: &OperatorName, + controller_name: &ControllerName, + role_name: &RoleName, + role_group_name: &RoleGroupName, +) -> Labels { + let object_labels = ObjectLabels { + owner, + app_name: &product_name.to_label_value(), + app_version: &product_version.to_label_value(), + operator_name: &operator_name.to_label_value(), + controller_name: &controller_name.to_label_value(), + role: &role_name.to_label_value(), + role_group: &role_group_name.to_label_value(), + }; + Labels::recommended(&object_labels).expect( + "Labels should be created because the owner has an object name and all given parameters \ + produce valid label values.", + ) +} + +/// Infallible variant of [`stackable_operator::kvp::Labels::role_selector`] +#[allow(dead_code)] +pub fn role_selector( + owner: &(impl Resource + HasName + NameIsValidLabelValue), + product_name: &ProductName, + role_name: &RoleName, +) -> Labels { + Labels::role_selector( + owner, + &product_name.to_label_value(), + &role_name.to_label_value(), + ) + .expect("Labels should be created because all given parameters produce valid label values") +} + +/// Infallible variant of [`stackable_operator::kvp::Labels::role_group_selector`] +pub fn role_group_selector( + owner: &(impl Resource + HasName + NameIsValidLabelValue), + product_name: &ProductName, + role_name: &RoleName, + role_group_name: &RoleGroupName, +) -> Labels { + Labels::role_group_selector( + owner, + &product_name.to_label_value(), + &role_name.to_label_value(), + &role_group_name.to_label_value(), + ) + .expect("Labels should be created because all given parameters produce valid label values") +} + +#[cfg(test)] +mod tests { + use std::{borrow::Cow, collections::BTreeMap}; + + use stackable_operator::{ + k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta, kube::Resource, + }; + + use crate::framework::{ + HasName, NameIsValidLabelValue, + kvp::label::{recommended_labels, role_group_selector, role_selector}, + types::operator::{ + ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, RoleName, + }, + }; + + struct Cluster { + object_meta: ObjectMeta, + } + + impl Cluster { + fn new() -> Self { + Cluster { + object_meta: ObjectMeta { + name: Some("cluster-name".to_owned()), + ..ObjectMeta::default() + }, + } + } + } + + impl Resource for Cluster { + type DynamicType = (); + type Scope = (); + + fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("AirflowCluster") } + fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("airflow.stackable.tech") } + fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("v1alpha2") } + fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("airflowclusters") } + + fn meta(&self) -> &ObjectMeta { + &self.object_meta + } + + fn meta_mut(&mut self) -> &mut ObjectMeta { + &mut self.object_meta + } + } + + impl HasName for Cluster { + fn to_name(&self) -> String { + self.object_meta.name.clone().expect("set in new()") + } + } + + impl NameIsValidLabelValue for Cluster { + fn to_label_value(&self) -> String { + self.object_meta.name.clone().expect("set in new()") + } + } + + #[test] + fn test_recommended_labels() { + let actual_labels = recommended_labels( + &Cluster::new(), + &ProductName::from_str_unsafe("my-product"), + &ProductVersion::from_str_unsafe("1.0.0"), + &OperatorName::from_str_unsafe("my-operator"), + &ControllerName::from_str_unsafe("my-controller"), + &RoleName::from_str_unsafe("my-role"), + &RoleGroupName::from_str_unsafe("my-role-group"), + ); + + let expected_labels: BTreeMap = [ + ("app.kubernetes.io/component", "my-role"), + ("app.kubernetes.io/instance", "cluster-name"), + ("app.kubernetes.io/managed-by", "my-operator_my-controller"), + ("app.kubernetes.io/name", "my-product"), + ("app.kubernetes.io/role-group", "my-role-group"), + ("app.kubernetes.io/version", "1.0.0"), + ("stackable.tech/vendor", "Stackable"), + ] + .map(|(k, v)| (k.to_owned(), v.to_owned())) + .into(); + + assert_eq!(expected_labels, actual_labels.into()); + } + + #[test] + fn test_role_selector() { + let actual_labels = role_selector( + &Cluster::new(), + &ProductName::from_str_unsafe("my-product"), + &RoleName::from_str_unsafe("my-role"), + ); + + let expected_labels: BTreeMap = [ + ("app.kubernetes.io/component", "my-role"), + ("app.kubernetes.io/instance", "cluster-name"), + ("app.kubernetes.io/name", "my-product"), + ] + .map(|(k, v)| (k.to_owned(), v.to_owned())) + .into(); + + assert_eq!(expected_labels, actual_labels.into()); + } + + #[test] + fn test_role_group_selector() { + let actual_labels = role_group_selector( + &Cluster::new(), + &ProductName::from_str_unsafe("my-product"), + &RoleName::from_str_unsafe("my-role"), + &RoleGroupName::from_str_unsafe("my-role-group"), + ); + + let expected_labels: BTreeMap = [ + ("app.kubernetes.io/component", "my-role"), + ("app.kubernetes.io/instance", "cluster-name"), + ("app.kubernetes.io/name", "my-product"), + ("app.kubernetes.io/role-group", "my-role-group"), + ] + .map(|(k, v)| (k.to_owned(), v.to_owned())) + .into(); + + assert_eq!(expected_labels, actual_labels.into()); + } +} diff --git a/rust/operator-binary/src/framework/product_logging.rs b/rust/operator-binary/src/framework/product_logging.rs new file mode 100644 index 00000000..0c717499 --- /dev/null +++ b/rust/operator-binary/src/framework/product_logging.rs @@ -0,0 +1 @@ +pub mod framework; diff --git a/rust/operator-binary/src/framework/product_logging/framework.rs b/rust/operator-binary/src/framework/product_logging/framework.rs new file mode 100644 index 00000000..76a5c04b --- /dev/null +++ b/rust/operator-binary/src/framework/product_logging/framework.rs @@ -0,0 +1,127 @@ +use std::fmt::Display; + +use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::product_logging::spec::{ + AutomaticContainerLogConfig, ConfigMapLogConfig, ContainerLogConfig, ContainerLogConfigChoice, + CustomContainerLogConfig, Logging, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::framework::types::kubernetes::ConfigMapName; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to get container log configuration for container {container}"))] + GetContainerLogConfiguration { container: String }, + + #[snafu(display("failed to parse ConfigMap name for custom log configuration"))] + ParseConfigMapName { + source: crate::framework::macros::attributed_string_type::Error, + }, +} + +#[derive(Clone, Debug)] +pub enum ValidatedContainerLogConfigChoice { + Automatic(AutomaticContainerLogConfig), + Custom(ConfigMapName), +} + +impl ValidatedContainerLogConfigChoice { + /// Converts back to the raw upstream type for use at API boundaries + /// (e.g. calling `product_logging::framework::vector_container`). + pub fn to_raw_container_log_config(&self) -> ContainerLogConfig { + match self { + Self::Automatic(auto) => ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Automatic(auto.clone())), + }, + Self::Custom(name) => ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Custom(CustomContainerLogConfig { + custom: ConfigMapLogConfig { + config_map: name.to_string(), + }, + })), + }, + } + } +} + +#[derive(Clone, Debug)] +pub struct VectorContainerLogConfig { + pub log_config: ValidatedContainerLogConfigChoice, +} + +pub fn validate_logging_configuration_for_container( + logging: &Logging, + container: T, +) -> Result +where + T: Clone + Display + Ord, +{ + use std::str::FromStr; + + let config = logging + .containers + .get(&container) + .and_then(|c| c.choice.as_ref()) + .context(GetContainerLogConfigurationSnafu { + container: container.to_string(), + })?; + + match config { + ContainerLogConfigChoice::Automatic(automatic) => Ok( + ValidatedContainerLogConfigChoice::Automatic(automatic.clone()), + ), + ContainerLogConfigChoice::Custom(custom) => { + let config_map_name = ConfigMapName::from_str(&custom.custom.config_map) + .context(ParseConfigMapNameSnafu)?; + Ok(ValidatedContainerLogConfigChoice::Custom(config_map_name)) + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use stackable_operator::product_logging::spec::{ + AutomaticContainerLogConfig, ContainerLogConfig, ContainerLogConfigChoice, Logging, + }; + + use super::*; + use crate::crd::Container; + + fn logging_with_automatic_config() -> Logging { + let mut containers = BTreeMap::new(); + containers.insert( + Container::Airflow, + ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Automatic( + AutomaticContainerLogConfig::default(), + )), + }, + ); + Logging { + enable_vector_agent: false, + containers, + } + } + + #[test] + fn test_validate_automatic_log_config() { + let logging = logging_with_automatic_config(); + let result = validate_logging_configuration_for_container(&logging, Container::Airflow); + assert!(result.is_ok()); + assert!(matches!( + result.unwrap(), + ValidatedContainerLogConfigChoice::Automatic(_) + )); + } + + #[test] + fn test_validate_missing_container_config() { + let logging = logging_with_automatic_config(); + let result = validate_logging_configuration_for_container(&logging, Container::Vector); + assert!(result.is_err()); + } +} diff --git a/rust/operator-binary/src/framework/role_group_utils.rs b/rust/operator-binary/src/framework/role_group_utils.rs new file mode 100644 index 00000000..61ea8637 --- /dev/null +++ b/rust/operator-binary/src/framework/role_group_utils.rs @@ -0,0 +1,151 @@ +use std::str::FromStr; + +use super::types::{ + kubernetes::{ConfigMapName, ListenerName, ServiceName, StatefulSetName}, + operator::{ClusterName, RoleGroupName, RoleName}, +}; +use crate::attributed_string_type; + +attributed_string_type! { + QualifiedRoleGroupName, + "A qualified role group name consisting of the cluster name, role name and role-group name. It is a valid label name as defined in RFC 1035 that can be used e.g. as a name for a Service or a StatefulSet.", + "airflow-webserver-default", + // Suffixes are added to produce resource names. According compile-time checks ensure that + // max_length cannot be set higher. + (max_length = 52), + is_rfc_1035_label_name, + is_valid_label_value +} + +/// Type-safe names for role-group resources +pub struct ResourceNames { + pub cluster_name: ClusterName, + pub role_name: RoleName, + pub role_group_name: RoleGroupName, +} + +impl ResourceNames { + /// Creates a qualified role group name in the format + /// `--` + fn qualified_role_group_name(&self) -> QualifiedRoleGroupName { + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH + + 1 // dash + + RoleName::MAX_LENGTH + + 1 // dash + + RoleGroupName::MAX_LENGTH + <= QualifiedRoleGroupName::MAX_LENGTH, + "The string `--` must not exceed the limit \ + of RFC 1035 label names." + ); + // qualified_role_group_name is only an RFC 1035 label name if it starts with an + // alphabetic character, therefore cluster_name must also be an RFC 1035 label name. + // role_name and role_group_name and the middle of the qualified_role_group_name can + // be RFC 1123 label names because digits are allowed there. + let _ = ClusterName::IS_RFC_1035_LABEL_NAME; + let _ = RoleName::IS_RFC_1123_LABEL_NAME; + let _ = RoleGroupName::IS_RFC_1123_LABEL_NAME; + + QualifiedRoleGroupName::from_str(&format!( + "{}-{}-{}", + self.cluster_name, self.role_name, self.role_group_name, + )) + .expect("should be a valid QualifiedRoleGroupName") + } + + pub fn role_group_config_map(&self) -> ConfigMapName { + // compile-time check + const _: () = assert!( + QualifiedRoleGroupName::MAX_LENGTH <= ConfigMapName::MAX_LENGTH, + "The string `--` must not exceed the limit of \ + ConfigMap names." + ); + let _ = QualifiedRoleGroupName::IS_RFC_1123_SUBDOMAIN_NAME; + + ConfigMapName::from_str(self.qualified_role_group_name().as_ref()) + .expect("should be a valid ConfigMap name") + } + + pub fn stateful_set_name(&self) -> StatefulSetName { + // compile-time checks + const _: () = assert!( + QualifiedRoleGroupName::MAX_LENGTH <= StatefulSetName::MAX_LENGTH, + "The string `--` must not exceed the \ + limit of StatefulSet names." + ); + let _ = QualifiedRoleGroupName::IS_RFC_1123_LABEL_NAME; + let _ = QualifiedRoleGroupName::IS_VALID_LABEL_VALUE; + + StatefulSetName::from_str(self.qualified_role_group_name().as_ref()) + .expect("should be a valid StatefulSet name") + } + + pub fn headless_service_name(&self) -> ServiceName { + const SUFFIX: &str = "-headless"; + + const _: () = assert!( + QualifiedRoleGroupName::MAX_LENGTH + SUFFIX.len() <= ServiceName::MAX_LENGTH, + "The string `---headless` must not exceed the \ + limit of Service names." + ); + let _ = QualifiedRoleGroupName::IS_RFC_1035_LABEL_NAME; + let _ = QualifiedRoleGroupName::IS_VALID_LABEL_VALUE; + + ServiceName::from_str(&format!("{}{SUFFIX}", self.qualified_role_group_name())) + .expect("should be a valid Service name") + } + + pub fn listener_name(&self) -> ListenerName { + const _: () = assert!( + QualifiedRoleGroupName::MAX_LENGTH <= ListenerName::MAX_LENGTH, + "The string `--` must not exceed the limit of \ + Listener names." + ); + let _ = QualifiedRoleGroupName::IS_RFC_1123_SUBDOMAIN_NAME; + + ListenerName::from_str(self.qualified_role_group_name().as_ref()) + .expect("should be a valid Listener name") + } +} + +#[cfg(test)] +mod tests { + use super::{ClusterName, RoleGroupName, RoleName}; + use crate::framework::{ + role_group_utils::{QualifiedRoleGroupName, ResourceNames}, + types::kubernetes::{ConfigMapName, ListenerName, ServiceName, StatefulSetName}, + }; + + #[test] + fn test_resource_names() { + QualifiedRoleGroupName::test_example(); + + let resource_names = ResourceNames { + cluster_name: ClusterName::from_str_unsafe("test-cluster"), + role_name: RoleName::from_str_unsafe("webserver"), + role_group_name: RoleGroupName::from_str_unsafe("default"), + }; + + assert_eq!( + QualifiedRoleGroupName::from_str_unsafe("test-cluster-webserver-default"), + resource_names.qualified_role_group_name() + ); + assert_eq!( + ConfigMapName::from_str_unsafe("test-cluster-webserver-default"), + resource_names.role_group_config_map() + ); + assert_eq!( + StatefulSetName::from_str_unsafe("test-cluster-webserver-default"), + resource_names.stateful_set_name() + ); + assert_eq!( + ServiceName::from_str_unsafe("test-cluster-webserver-default-headless"), + resource_names.headless_service_name() + ); + assert_eq!( + ListenerName::from_str_unsafe("test-cluster-webserver-default"), + resource_names.listener_name() + ); + } +} diff --git a/rust/operator-binary/src/framework/role_utils.rs b/rust/operator-binary/src/framework/role_utils.rs new file mode 100644 index 00000000..cb009721 --- /dev/null +++ b/rust/operator-binary/src/framework/role_utils.rs @@ -0,0 +1,368 @@ +use std::{ + collections::{BTreeMap, HashMap}, + str::FromStr, +}; + +use serde::{Deserialize, Serialize}; +use stackable_operator::{ + config::{ + fragment::{self, FromFragment}, + merge::{Merge, merge}, + }, + k8s_openapi::{DeepMerge, api::core::v1::PodTemplateSpec}, + role_utils::{CommonConfiguration, Role, RoleGroup}, + schemars::{self, JsonSchema}, +}; + +use super::{ + builder::pod::container::EnvVarSet, + types::{ + kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName}, + operator::{ClusterName, ProductName}, + }, +}; + +/// Variant of [`stackable_operator::role_utils::GenericProductSpecificCommonConfig`] that +/// implements [`Merge`] +#[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] +pub struct GenericProductSpecificCommonConfig {} + +impl Merge for GenericProductSpecificCommonConfig { + fn merge(&mut self, _defaults: &Self) {} +} + +/// Variant of [`stackable_operator::role_utils::RoleGroup`] that is easier to work with +/// +/// Differences are: +/// * `replicas` is non-optional. +/// * `config` is flattened. +/// * The [`HashMap`] in `env_overrides` is replaced with an [`EnvVarSet`]. +#[derive(Clone, Debug, PartialEq)] +pub struct RoleGroupConfig { + pub replicas: u16, + pub config: T, + pub config_overrides: HashMap>, + pub env_overrides: EnvVarSet, + pub cli_overrides: BTreeMap, + pub pod_overrides: PodTemplateSpec, + // allow(dead_code) is not necessary anymore when moved to operator-rs + #[allow(dead_code)] + pub product_specific_common_config: ProductSpecificCommonConfig, +} + +impl RoleGroupConfig { + pub fn cli_overrides_to_vec(&self) -> Vec { + self.cli_overrides + .clone() + .into_iter() + .flat_map(|(option, value)| [option, value]) + .collect() + } +} + +/// Variant of [`stackable_operator::role_utils::RoleGroup::validate_config`] with fixed types +/// +/// The `role` parameter takes the `ProductSpecificCommonConfig` into account. +pub fn validate_config( + role_group: &RoleGroup, + role: &Role, + default_config: &T, +) -> Result +where + C: FromFragment, + CommonConfig: Default + JsonSchema + Serialize, + ConfigOverrides: Default + JsonSchema + Serialize, + T: Merge + Clone, + RoleConfig: Default + JsonSchema + Serialize, +{ + let mut role_config = role.config.config.clone(); + role_config.merge(default_config); + let mut rolegroup_config = role_group.config.config.clone(); + rolegroup_config.merge(&role_config); + fragment::validate(rolegroup_config) +} + +/// Merges and validates the [`RoleGroup`] with the given `role` and `default_config` +pub fn with_validated_config( + role_group: &RoleGroup, + role: &Role, + default_config: &T, +) -> Result, fragment::ValidationError> +where + C: FromFragment, + CommonConfig: Clone + Default + JsonSchema + Merge + Serialize, + ConfigOverrides: Clone + Default + JsonSchema + Merge + Serialize, + T: Clone + Merge, + RoleConfig: Default + JsonSchema + Serialize, +{ + let validated_config = validate_config(role_group, role, default_config)?; + Ok(RoleGroup { + config: CommonConfiguration { + config: validated_config, + config_overrides: merge( + role_group.config.config_overrides.clone(), + &role.config.config_overrides, + ), + env_overrides: merged_env_overrides( + role.config.env_overrides.clone(), + role_group.config.env_overrides.clone(), + ), + cli_overrides: merged_cli_overrides( + role.config.cli_overrides.clone(), + role_group.config.cli_overrides.clone(), + ), + pod_overrides: merged_pod_overrides( + role.config.pod_overrides.clone(), + role_group.config.pod_overrides.clone(), + ), + product_specific_common_config: merge( + role_group.config.product_specific_common_config.clone(), + &role.config.product_specific_common_config, + ), + }, + replicas: role_group.replicas, + }) +} + +fn merged_env_overrides( + role_env_overrides: HashMap, + role_group_env_overrides: HashMap, +) -> HashMap { + let mut merged_env_overrides = role_env_overrides; + merged_env_overrides.extend(role_group_env_overrides); + merged_env_overrides +} + +fn merged_cli_overrides( + role_cli_overrides: BTreeMap, + role_group_cli_overrides: BTreeMap, +) -> BTreeMap { + let mut merged_cli_overrides = role_cli_overrides; + merged_cli_overrides.extend(role_group_cli_overrides); + merged_cli_overrides +} + +fn merged_pod_overrides( + role_pod_overrides: PodTemplateSpec, + role_group_pod_overrides: PodTemplateSpec, +) -> PodTemplateSpec { + let mut merged_pod_overrides = role_pod_overrides; + merged_pod_overrides.merge_from(role_group_pod_overrides); + merged_pod_overrides +} + +/// Type-safe names for role resources +pub struct ResourceNames { + pub cluster_name: ClusterName, + pub product_name: ProductName, +} + +impl ResourceNames { + pub fn service_account_name(&self) -> ServiceAccountName { + const SUFFIX: &str = "-serviceaccount"; + + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH + SUFFIX.len() <= ServiceAccountName::MAX_LENGTH, + "The string `-serviceaccount` must not exceed the limit of ServiceAccount names." + ); + let _ = ClusterName::IS_RFC_1123_SUBDOMAIN_NAME; + + ServiceAccountName::from_str(&format!("{}{SUFFIX}", self.cluster_name)) + .expect("should be a valid ServiceAccount name") + } + + pub fn role_binding_name(&self) -> RoleBindingName { + const SUFFIX: &str = "-rolebinding"; + + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH + SUFFIX.len() <= RoleBindingName::MAX_LENGTH, + "The string `-rolebinding` must not exceed the limit of RoleBinding names." + ); + let _ = ClusterName::IS_RFC_1123_SUBDOMAIN_NAME; + + RoleBindingName::from_str(&format!("{}{SUFFIX}", self.cluster_name)) + .expect("should be a valid RoleBinding name") + } + + pub fn cluster_role_name(&self) -> ClusterRoleName { + const SUFFIX: &str = "-clusterrole"; + + // compile-time checks + const _: () = assert!( + ProductName::MAX_LENGTH + SUFFIX.len() <= ClusterRoleName::MAX_LENGTH, + "The string `-clusterrole` must not exceed the limit of cluster role names." + ); + let _ = ProductName::IS_RFC_1123_SUBDOMAIN_NAME; + + ClusterRoleName::from_str(&format!("{}{SUFFIX}", self.product_name)) + .expect("should be a valid cluster role name") + } +} + +#[cfg(test)] +mod tests { + use std::collections::{BTreeMap, HashMap}; + + use rstest::*; + use serde::Serialize; + use stackable_operator::{ + config::{fragment::Fragment, merge::Merge}, + k8s_openapi::api::core::v1::PodTemplateSpec, + kube::api::ObjectMeta, + role_utils::{CommonConfiguration, GenericRoleConfig, Role, RoleGroup}, + schemars::{self, JsonSchema}, + }; + + use super::ResourceNames; + use crate::framework::{ + role_utils::with_validated_config, + types::{ + kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName}, + operator::{ClusterName, ProductName}, + }, + }; + + #[derive(Debug, Fragment, PartialEq)] + #[fragment_attrs(derive(Clone, Debug, Default, Merge, PartialEq))] + struct Config { + property: String, + } + + impl Config { + fn new(value: &str) -> Self { + Self { + property: value.to_owned(), + } + } + } + + impl ConfigFragment { + fn new(value: Option<&str>) -> Self { + Self { + property: value.map(str::to_owned), + } + } + } + + #[derive(Clone, Debug, Default, JsonSchema, Merge, PartialEq, Serialize)] + struct ProductCommonConfig { + property: Option, + } + + #[derive(Clone, Debug, Default, JsonSchema, Merge, PartialEq, Serialize)] + struct TestConfigOverrides { + property: Option, + } + + fn new_common_config( + config: T, + override_value: Option<&str>, + ) -> CommonConfiguration { + let mut env_overrides = HashMap::new(); + let mut cli_overrides = BTreeMap::new(); + + if let Some(value) = override_value { + env_overrides.insert("PROPERTY".to_owned(), value.to_owned()); + cli_overrides.insert("--property".to_owned(), value.to_owned()); + } + + CommonConfiguration { + config, + config_overrides: TestConfigOverrides { + property: override_value.map(str::to_owned), + }, + env_overrides, + cli_overrides, + pod_overrides: PodTemplateSpec { + metadata: Some(ObjectMeta { + name: override_value.map(str::to_owned), + ..ObjectMeta::default() + }), + ..PodTemplateSpec::default() + }, + product_specific_common_config: ProductCommonConfig { + property: override_value.map(str::to_owned), + }, + } + } + + #[rstest] + #[case("role-group", Some("role-group"), Some("role-group"), Some("role"), Some("default"))] + #[case("role-group", Some("role-group"), Some("role-group"), Some("role"), None)] + #[case("role-group", Some("role-group"), Some("role-group"), None, Some("default"))] + #[case("role-group", Some("role-group"), Some("role-group"), None, None)] + #[case("role", Some("role"), None, Some("role"), Some("default"))] + #[case("role", Some("role"), None, Some("role"), None)] + #[case("default", None, None, None, Some("default"))] + fn test_with_validated_config_and_result_ok( + #[case] expected_config_value: &str, + #[case] expected_override_value: Option<&str>, + #[case] role_group_value: Option<&str>, + #[case] role_value: Option<&str>, + #[case] default_value: Option<&str>, + ) { + let role_group = RoleGroup { + config: new_common_config(ConfigFragment::new(role_group_value), role_group_value), + replicas: Some(3), + }; + let role = Role::<_, _, GenericRoleConfig, _> { + config: new_common_config(ConfigFragment::new(role_value), role_value), + ..Role::default() + }; + let default_config = ConfigFragment::new(default_value); + + let result = with_validated_config(&role_group, &role, &default_config); + + assert_eq!( + Some(RoleGroup { + config: new_common_config( + Config::new(expected_config_value), + expected_override_value + ), + replicas: Some(3) + }), + result.ok() + ) + } + + #[test] + fn test_with_validated_config_and_result_err() { + let role_group = RoleGroup { + config: new_common_config(ConfigFragment::new(None), None), + replicas: None, + }; + let role = Role::<_, _, GenericRoleConfig, _> { + config: new_common_config(ConfigFragment::new(None), None), + ..Role::default() + }; + let default_config = ConfigFragment::new(None); + + let result: Result, _> = + with_validated_config(&role_group, &role, &default_config); + + assert!(result.is_err()) + } + + #[test] + fn test_resource_names() { + let resource_names = ResourceNames { + cluster_name: ClusterName::from_str_unsafe("my-cluster"), + product_name: ProductName::from_str_unsafe("my-product"), + }; + + assert_eq!( + ServiceAccountName::from_str_unsafe("my-cluster-serviceaccount"), + resource_names.service_account_name() + ); + assert_eq!( + RoleBindingName::from_str_unsafe("my-cluster-rolebinding"), + resource_names.role_binding_name() + ); + assert_eq!( + ClusterRoleName::from_str_unsafe("my-product-clusterrole"), + resource_names.cluster_role_name() + ); + } +} From bb4fe0994060b2d4c3265927435c485667836a1f Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Fri, 8 May 2026 18:57:06 +0200 Subject: [PATCH 3/5] feat: rewrite controller pipeline with validated config types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the monolithic airflow_controller with a structured pipeline of dereference → validate → build → apply → update_status stages. Each stage operates on validated, type-safe data rather than raw CRD types. Remove airflow_controller.rs and operations/ modules whose logic has been absorbed into the new pipeline. Update product_logging.rs, controller_commons.rs, and service.rs signatures for the new validated types. Co-Authored-By: Claude Opus 4.6 --- .../operator-binary/src/airflow_controller.rs | 1526 ----------------- rust/operator-binary/src/controller.rs | 335 ++++ rust/operator-binary/src/controller/apply.rs | 88 + rust/operator-binary/src/controller/build.rs | 506 ++++++ .../controller/build/role_group_builder.rs | 417 +++++ .../src/controller/dereference.rs | 113 ++ .../src/controller/update_status.rs | 54 + .../src/controller/validate.rs | 1285 ++++++++++++++ .../operator-binary/src/controller_commons.rs | 25 +- rust/operator-binary/src/crd/mod.rs | 5 +- rust/operator-binary/src/env_vars.rs | 8 +- rust/operator-binary/src/framework.rs | 5 - rust/operator-binary/src/framework/builder.rs | 2 - .../src/framework/builder/pdb.rs | 28 +- .../src/framework/kvp/label.rs | 19 +- .../src/framework/role_utils.rs | 26 +- rust/operator-binary/src/main.rs | 11 +- .../src/operations/graceful_shutdown.rs | 42 - rust/operator-binary/src/operations/mod.rs | 2 - rust/operator-binary/src/operations/pdb.rs | 89 - rust/operator-binary/src/product_logging.rs | 67 +- rust/operator-binary/src/service.rs | 9 +- 22 files changed, 2904 insertions(+), 1758 deletions(-) delete mode 100644 rust/operator-binary/src/airflow_controller.rs create mode 100644 rust/operator-binary/src/controller.rs create mode 100644 rust/operator-binary/src/controller/apply.rs create mode 100644 rust/operator-binary/src/controller/build.rs create mode 100644 rust/operator-binary/src/controller/build/role_group_builder.rs create mode 100644 rust/operator-binary/src/controller/dereference.rs create mode 100644 rust/operator-binary/src/controller/update_status.rs create mode 100644 rust/operator-binary/src/controller/validate.rs delete mode 100644 rust/operator-binary/src/operations/graceful_shutdown.rs delete mode 100644 rust/operator-binary/src/operations/mod.rs delete mode 100644 rust/operator-binary/src/operations/pdb.rs diff --git a/rust/operator-binary/src/airflow_controller.rs b/rust/operator-binary/src/airflow_controller.rs deleted file mode 100644 index 25a5bc71..00000000 --- a/rust/operator-binary/src/airflow_controller.rs +++ /dev/null @@ -1,1526 +0,0 @@ -//! Ensures that `Pod`s are configured and running for each [`v1alpha2::AirflowCluster`] -use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, - io::Write, - str::FromStr, - sync::Arc, -}; - -use const_format::concatcp; -use product_config::{ - ProductConfigManager, - flask_app_config_writer::{self, FlaskAppConfigWriterError}, - types::PropertyNameKind, -}; -use snafu::{OptionExt, ResultExt, Snafu}; -use stackable_operator::{ - builder::{ - self, - configmap::ConfigMapBuilder, - meta::ObjectMetaBuilder, - pod::{ - PodBuilder, - container::ContainerBuilder, - resources::ResourceRequirementsBuilder, - security::PodSecurityContextBuilder, - volume::{ - ListenerOperatorVolumeSourceBuilder, ListenerOperatorVolumeSourceBuilderError, - ListenerReference, VolumeBuilder, - }, - }, - }, - cli::OperatorEnvironmentOptions, - cluster_resources::{ClusterResourceApplyStrategy, ClusterResources}, - commons::{ - product_image_selection::{self, ResolvedProductImage}, - random_secret_creation, - rbac::build_rbac_resources, - }, - crd::{ - authentication::{core as auth_core, ldap}, - git_sync, listener, - }, - database_connections::{ - TemplatingMechanism, - drivers::{ - celery::CeleryDatabaseConnectionDetails, - sqlalchemy::SqlAlchemyDatabaseConnectionDetails, - }, - }, - k8s_openapi::{ - self, DeepMerge, - api::{ - apps::v1::{StatefulSet, StatefulSetSpec}, - core::v1::{ - ConfigMap, PersistentVolumeClaim, PodTemplateSpec, Probe, ServiceAccount, - TCPSocketAction, - }, - }, - apimachinery::pkg::{apis::meta::v1::LabelSelector, util::intstr::IntOrString}, - }, - kube::{ - Resource, ResourceExt, - api::ObjectMeta, - core::{DeserializeGuard, error_boundary}, - runtime::{controller::Action, reflector::ObjectRef}, - }, - kvp::{Annotation, Label, LabelError, Labels, ObjectLabels}, - logging::controller::ReconcilerError, - product_config_utils::{ - CONFIG_OVERRIDE_FILE_FOOTER_KEY, CONFIG_OVERRIDE_FILE_HEADER_KEY, env_vars_from, - env_vars_from_rolegroup_config, transform_all_roles_to_config, - validate_all_roles_and_groups_config, - }, - product_logging::{ - self, - framework::LoggingError, - spec::{ContainerLogConfig, Logging}, - }, - role_utils::{GenericRoleConfig, RoleGroupRef}, - shared::time::Duration, - status::condition::{ - compute_conditions, operations::ClusterOperationsConditionBuilder, - statefulset::StatefulSetConditionBuilder, - }, - utils::COMMON_BASH_TRAP_FUNCTIONS, -}; -use strum::{EnumDiscriminants, IntoEnumIterator, IntoStaticStr}; - -use crate::{ - config::{self, PYTHON_IMPORTS}, - controller_commons::{self, CONFIG_VOLUME_NAME, LOG_CONFIG_VOLUME_NAME, LOG_VOLUME_NAME}, - crd::{ - self, AIRFLOW_CONFIG_FILENAME, APP_NAME, AirflowClusterStatus, AirflowConfig, - AirflowConfigOptions, AirflowExecutor, AirflowExecutorCommonConfiguration, AirflowRole, - CONFIG_PATH, Container, ExecutorConfig, HTTP_PORT, HTTP_PORT_NAME, LISTENER_VOLUME_DIR, - LISTENER_VOLUME_NAME, LOG_CONFIG_DIR, METRICS_PORT, METRICS_PORT_NAME, OPERATOR_NAME, - STACKABLE_LOG_DIR, TEMPLATE_LOCATION, TEMPLATE_NAME, TEMPLATE_VOLUME_NAME, - authentication::{ - AirflowAuthenticationClassResolved, AirflowClientAuthenticationDetailsResolved, - }, - authorization::AirflowAuthorizationResolved, - build_recommended_labels, - internal_secret::{ - FERNET_KEY_SECRET_KEY, INTERNAL_SECRET_SECRET_KEY, JWT_SECRET_SECRET_KEY, - }, - v1alpha2, - }, - env_vars::{self, build_airflow_template_envs}, - operations::{ - graceful_shutdown::{ - add_airflow_graceful_shutdown_config, add_executor_graceful_shutdown_config, - }, - pdb::add_pdbs, - }, - product_logging::extend_config_map_with_log_config, - service::{ - build_rolegroup_headless_service, build_rolegroup_metrics_service, - stateful_set_service_name, - }, -}; - -pub const AIRFLOW_CONTROLLER_NAME: &str = "airflowcluster"; -pub const CONTAINER_IMAGE_BASE_NAME: &str = "airflow"; -pub const AIRFLOW_FULL_CONTROLLER_NAME: &str = - concatcp!(AIRFLOW_CONTROLLER_NAME, '.', OPERATOR_NAME); - -pub struct Ctx { - pub client: stackable_operator::client::Client, - pub product_config: ProductConfigManager, - pub operator_environment: OperatorEnvironmentOptions, -} - -#[derive(Snafu, Debug, EnumDiscriminants)] -#[strum_discriminants(derive(IntoStaticStr))] -pub enum Error { - #[snafu(display("object defines no airflow config role"))] - NoAirflowRole, - - #[snafu(display("failed to apply global Service"))] - ApplyRoleService { - source: stackable_operator::cluster_resources::Error, - }, - - #[snafu(display("failed to apply Service for {rolegroup}"))] - ApplyRoleGroupService { - source: stackable_operator::cluster_resources::Error, - rolegroup: RoleGroupRef, - }, - - #[snafu(display("failed to apply ConfigMap for {rolegroup}"))] - ApplyRoleGroupConfig { - source: stackable_operator::cluster_resources::Error, - rolegroup: RoleGroupRef, - }, - - #[snafu(display("failed to apply StatefulSet for {rolegroup}"))] - ApplyRoleGroupStatefulSet { - source: stackable_operator::cluster_resources::Error, - rolegroup: RoleGroupRef, - }, - - #[snafu(display("invalid product config"))] - InvalidProductConfig { - source: stackable_operator::product_config_utils::Error, - }, - - #[snafu(display("object is missing metadata to build owner reference"))] - ObjectMissingMetadataForOwnerRef { - source: stackable_operator::builder::meta::Error, - }, - - #[snafu(display("Failed to transform configs"))] - ProductConfigTransform { - source: stackable_operator::product_config_utils::Error, - }, - - #[snafu(display("failed to patch service account"))] - ApplyServiceAccount { - source: stackable_operator::cluster_resources::Error, - }, - - #[snafu(display("failed to patch role binding: {source}"))] - ApplyRoleBinding { - source: stackable_operator::cluster_resources::Error, - }, - - #[snafu(display("failed to build RBAC objects"))] - BuildRBACObjects { - source: stackable_operator::commons::rbac::Error, - }, - - #[snafu(display("failed to retrieve AuthenticationClass {authentication_class}"))] - AuthenticationClassRetrieval { - source: stackable_operator::cluster_resources::Error, - authentication_class: ObjectRef, - }, - - #[snafu(display( - "Airflow doesn't support the AuthenticationClass provider - {authentication_class_provider} from AuthenticationClass {authentication_class}" - ))] - AuthenticationClassProviderNotSupported { - authentication_class_provider: String, - authentication_class: ObjectRef, - }, - - #[snafu(display("failed to build config file for {rolegroup}"))] - BuildRoleGroupConfigFile { - source: FlaskAppConfigWriterError, - rolegroup: RoleGroupRef, - }, - - #[snafu(display("failed to build ConfigMap for {rolegroup}"))] - BuildRoleGroupConfig { - source: stackable_operator::builder::configmap::Error, - rolegroup: RoleGroupRef, - }, - - #[snafu(display("failed to resolve and merge config for role and role group"))] - FailedToResolveConfig { source: crd::Error }, - - #[snafu(display("could not parse Airflow role [{role}]"))] - UnidentifiedAirflowRole { - source: strum::ParseError, - role: String, - }, - - #[snafu(display("invalid container name"))] - InvalidContainerName { - source: stackable_operator::builder::pod::container::Error, - }, - - #[snafu(display("invalid git-sync specification"))] - InvalidGitSyncSpec { source: git_sync::v1alpha2::Error }, - - #[snafu(display("failed to create cluster resources"))] - CreateClusterResources { - source: stackable_operator::cluster_resources::Error, - }, - - #[snafu(display("failed to delete orphaned resources"))] - DeleteOrphanedResources { - source: stackable_operator::cluster_resources::Error, - }, - - #[snafu(display("vector agent is enabled but vector aggregator ConfigMap is missing"))] - VectorAggregatorConfigMapMissing, - - #[snafu(display("failed to add the logging configuration to the ConfigMap [{cm_name}]"))] - InvalidLoggingConfig { - source: crate::product_logging::Error, - cm_name: String, - }, - - #[snafu(display("failed to update status"))] - ApplyStatus { - source: stackable_operator::client::Error, - }, - - #[snafu(display("failed to apply authentication configuration"))] - InvalidAuthenticationConfig { source: crd::authentication::Error }, - - #[snafu(display("pod template serialization"))] - PodTemplateSerde { source: serde_yaml::Error }, - - #[snafu(display("failed to build the pod template config map"))] - PodTemplateConfigMap { - source: stackable_operator::builder::configmap::Error, - }, - - #[snafu(display("failed to apply executor template ConfigMap"))] - ApplyExecutorTemplateConfig { - source: stackable_operator::cluster_resources::Error, - }, - - #[snafu(display("failed to create PodDisruptionBudget"))] - FailedToCreatePdb { - source: crate::operations::pdb::Error, - }, - - #[snafu(display("failed to configure graceful shutdown"))] - GracefulShutdown { - source: crate::operations::graceful_shutdown::Error, - }, - - #[snafu(display("failed to build label"))] - BuildLabel { source: LabelError }, - - #[snafu(display("failed to build object meta data"))] - ObjectMeta { - source: stackable_operator::builder::meta::Error, - }, - - #[snafu(display( - "failed to build volume or volume mount spec for the LDAP backend TLS config" - ))] - VolumeAndMounts { source: ldap::v1alpha1::Error }, - - #[snafu(display("failed to construct config"))] - ConstructConfig { source: config::Error }, - - #[snafu(display( - "failed to write to String (Vec to be precise) containing Airflow config" - ))] - WriteToConfigFileString { source: std::io::Error }, - - #[snafu(display("failed to configure logging"))] - ConfigureLogging { source: LoggingError }, - - #[snafu(display("failed to add needed volume"))] - AddVolume { source: builder::pod::Error }, - - #[snafu(display("failed to add needed volumeMount"))] - AddVolumeMount { - source: builder::pod::container::Error, - }, - - #[snafu(display("failed to add LDAP Volumes and VolumeMounts"))] - AddLdapVolumesAndVolumeMounts { source: ldap::v1alpha1::Error }, - - #[snafu(display("failed to add TLS Volumes and VolumeMounts"))] - AddTlsVolumesAndVolumeMounts { - source: stackable_operator::commons::tls_verification::TlsClientDetailsError, - }, - - #[snafu(display("AirflowCluster object is invalid"))] - InvalidAirflowCluster { - source: error_boundary::InvalidObject, - }, - - #[snafu(display("failed to build Statefulset environmental variables"))] - BuildStatefulsetEnvVars { source: env_vars::Error }, - - #[snafu(display("failed to build Labels"))] - LabelBuild { - source: stackable_operator::kvp::LabelError, - }, - - #[snafu(display("failed to build listener volume"))] - BuildListenerVolume { - source: ListenerOperatorVolumeSourceBuilderError, - }, - - #[snafu(display("failed to apply group listener"))] - ApplyGroupListener { - source: stackable_operator::cluster_resources::Error, - }, - - #[snafu(display("failed to configure service"))] - ServiceConfiguration { source: crate::service::Error }, - - #[snafu(display("invalid authorization config"))] - InvalidAuthorizationConfig { - source: stackable_operator::commons::opa::Error, - }, - - #[snafu(display("failed to resolve product image"))] - ResolveProductImage { - source: product_image_selection::Error, - }, - - #[snafu(display("failed to create internal secret"))] - InvalidInternalSecret { - source: random_secret_creation::Error, - }, -} - -type Result = std::result::Result; - -impl ReconcilerError for Error { - fn category(&self) -> &'static str { - ErrorDiscriminants::from(self).into() - } -} - -pub async fn reconcile_airflow( - airflow: Arc>, - ctx: Arc, -) -> Result { - tracing::info!("Starting reconcile"); - - let airflow = airflow - .0 - .as_ref() - .map_err(error_boundary::InvalidObject::clone) - .context(InvalidAirflowClusterSnafu)?; - - let client = &ctx.client; - let resolved_product_image = airflow - .spec - .image - .resolve( - CONTAINER_IMAGE_BASE_NAME, - &ctx.operator_environment.image_repository, - crate::built_info::PKG_VERSION, - ) - .context(ResolveProductImageSnafu)?; - - let cluster_operation_cond_builder = - ClusterOperationsConditionBuilder::new(&airflow.spec.cluster_operation); - - let authentication_config = AirflowClientAuthenticationDetailsResolved::from( - &airflow.spec.cluster_config.authentication, - client, - ) - .await - .context(InvalidAuthenticationConfigSnafu)?; - - let authorization_config = AirflowAuthorizationResolved::from_authorization_config( - client, - airflow, - &airflow.spec.cluster_config.authorization, - ) - .await - .context(InvalidAuthorizationConfigSnafu)?; - // We don't have a config file, but do everything via env substitution - - let templating_mechanism = TemplatingMechanism::BashEnvSubstitution; - let metadata_database_connection_details = airflow - .spec - .cluster_config - .metadata_database - .sqlalchemy_connection_details_with_templating("METADATA", &templating_mechanism); - let celery_database_connection_details = match &airflow.spec.executor { - AirflowExecutor::CeleryExecutors { - result_backend: celery_result_backend, - broker: celery_broker, - .. - } => { - let celery_result_backend = celery_result_backend - .celery_connection_details_with_templating( - "CELERY_RESULT_BACKEND", - &templating_mechanism, - ); - let celery_broker = celery_broker - .celery_connection_details_with_templating("CELERY_BROKER", &templating_mechanism); - Some((celery_result_backend, celery_broker)) - } - _ => None, - }; - - let mut roles = HashMap::new(); - - // if the kubernetes executor is specified there will be no worker role as the pods - // are provisioned by airflow as defined by the task (default: one pod per task) - for role in AirflowRole::iter() { - if let Some(resolved_role) = airflow.get_role(&role) { - roles.insert( - role.to_string(), - ( - vec![ - PropertyNameKind::Env, - PropertyNameKind::File(AIRFLOW_CONFIG_FILENAME.into()), - ], - resolved_role.clone(), - ), - ); - } - } - - let role_config = transform_all_roles_to_config(airflow, &roles); - let validated_role_config = validate_all_roles_and_groups_config( - &resolved_product_image.product_version, - &role_config.context(ProductConfigTransformSnafu)?, - &ctx.product_config, - false, - false, - ) - .context(InvalidProductConfigSnafu)?; - - let mut cluster_resources = ClusterResources::new( - APP_NAME, - OPERATOR_NAME, - AIRFLOW_CONTROLLER_NAME, - &airflow.object_ref(&()), - ClusterResourceApplyStrategy::from(&airflow.spec.cluster_operation), - &airflow.spec.object_overrides, - ) - .context(CreateClusterResourcesSnafu)?; - - let required_labels = cluster_resources - .get_required_labels() - .context(BuildLabelSnafu)?; - - let (rbac_sa, rbac_rolebinding) = - build_rbac_resources(airflow, APP_NAME, required_labels).context(BuildRBACObjectsSnafu)?; - - let rbac_sa = cluster_resources - .add(client, rbac_sa.clone()) - .await - .context(ApplyServiceAccountSnafu)?; - cluster_resources - .add(client, rbac_rolebinding) - .await - .context(ApplyRoleBindingSnafu)?; - - let mut ss_cond_builder = StatefulSetConditionBuilder::default(); - - let airflow_executor = &airflow.spec.executor; - - // if the kubernetes executor is specified, in place of a worker role that will be in the role - // collection there will be a pod template created to be used for pod provisioning - if let AirflowExecutor::KubernetesExecutors { - common_configuration, - } = &airflow_executor - { - build_executor_template( - airflow, - common_configuration, - &metadata_database_connection_details, - &resolved_product_image, - &authentication_config, - &authorization_config, - &mut cluster_resources, - client, - &rbac_sa, - ) - .await?; - } - - random_secret_creation::create_random_secret_if_not_exists( - &airflow.shared_internal_secret_secret_name(), - INTERNAL_SECRET_SECRET_KEY, - 256, - airflow, - client, - ) - .await - .context(InvalidInternalSecretSnafu)?; - - random_secret_creation::create_random_secret_if_not_exists( - &airflow.shared_jwt_secret_secret_name(), - JWT_SECRET_SECRET_KEY, - 256, - airflow, - client, - ) - .await - .context(InvalidInternalSecretSnafu)?; - - random_secret_creation::create_random_secret_if_not_exists( - &airflow.shared_fernet_key_secret_name(), - FERNET_KEY_SECRET_KEY, - // https://airflow.apache.org/docs/apache-airflow/stable/security/secrets/fernet.html#security-fernet - // does not document how long the fernet key should be, but recommends using - // python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())" - // which returns `jUm21LuA76YZmrIa9u4eXRg0h0P24MDC9IDOmDvJbfw=`, which has 44 characters, which makes 32 bytes. - 32, - airflow, - client, - ) - .await - .context(InvalidInternalSecretSnafu)?; - - for (role_name, role_config) in validated_role_config.iter() { - let airflow_role = - AirflowRole::from_str(role_name).context(UnidentifiedAirflowRoleSnafu { - role: role_name.to_string(), - })?; - - if let Some(GenericRoleConfig { - pod_disruption_budget: pdb, - }) = airflow.role_config(&airflow_role) - { - add_pdbs(&pdb, airflow, &airflow_role, client, &mut cluster_resources) - .await - .context(FailedToCreatePdbSnafu)?; - } - - if let Some(listener_class) = airflow_role.listener_class_name(airflow) { - if let Some(listener_group_name) = airflow.group_listener_name(&airflow_role) { - let rg_group_listener = build_group_listener( - airflow, - build_recommended_labels( - airflow, - AIRFLOW_CONTROLLER_NAME, - &resolved_product_image.app_version_label_value, - role_name, - "none", - ), - listener_class.to_string(), - listener_group_name, - )?; - cluster_resources - .add(client, rg_group_listener) - .await - .context(ApplyGroupListenerSnafu)?; - } - } - - for (rolegroup_name, rolegroup_config) in role_config.iter() { - let rolegroup = RoleGroupRef { - cluster: ObjectRef::from_obj(airflow), - role: role_name.into(), - role_group: rolegroup_name.into(), - }; - - let merged_airflow_config = airflow - .merged_config(&airflow_role, &rolegroup) - .context(FailedToResolveConfigSnafu)?; - - let git_sync_resources = git_sync::v1alpha2::GitSyncResources::new( - &airflow.spec.cluster_config.dags_git_sync, - &resolved_product_image, - &env_vars_from_rolegroup_config(rolegroup_config), - &airflow.volume_mounts(), - LOG_VOLUME_NAME, - &merged_airflow_config - .logging - .for_container(&Container::GitSync), - ) - .context(InvalidGitSyncSpecSnafu)?; - - let role_group_service_recommended_labels = build_recommended_labels( - airflow, - AIRFLOW_CONTROLLER_NAME, - &resolved_product_image.app_version_label_value, - &rolegroup.role, - &rolegroup.role_group, - ); - - let role_group_service_selector = Labels::role_group_selector( - airflow, - APP_NAME, - &rolegroup.role, - &rolegroup.role_group, - ) - .context(LabelBuildSnafu)?; - - let rg_headless_service = build_rolegroup_headless_service( - airflow, - &rolegroup, - role_group_service_recommended_labels.clone(), - role_group_service_selector.clone().into(), - ) - .context(ServiceConfigurationSnafu)?; - - cluster_resources - .add(client, rg_headless_service) - .await - .context(ApplyRoleGroupServiceSnafu { - rolegroup: rolegroup.clone(), - })?; - - let rg_metrics_service = build_rolegroup_metrics_service( - airflow, - &rolegroup, - role_group_service_recommended_labels, - role_group_service_selector.into(), - ) - .context(ServiceConfigurationSnafu)?; - cluster_resources - .add(client, rg_metrics_service) - .await - .context(ApplyRoleGroupServiceSnafu { - rolegroup: rolegroup.clone(), - })?; - - let rg_configmap = build_rolegroup_config_map( - airflow, - &resolved_product_image, - &rolegroup, - rolegroup_config, - &authentication_config, - &authorization_config, - &merged_airflow_config.logging, - &Container::Airflow, - )?; - cluster_resources - .add(client, rg_configmap) - .await - .with_context(|_| ApplyRoleGroupConfigSnafu { - rolegroup: rolegroup.clone(), - })?; - - // Note: The StatefulSet needs to be applied after all ConfigMaps and Secrets it mounts - // to prevent unnecessary Pod restarts. - // See https://github.com/stackabletech/commons-operator/issues/111 for details. - let rg_statefulset = build_server_rolegroup_statefulset( - airflow, - &resolved_product_image, - &airflow_role, - &rolegroup, - rolegroup_config, - &authentication_config, - &authorization_config, - &metadata_database_connection_details, - &celery_database_connection_details, - &rbac_sa, - &merged_airflow_config, - airflow_executor, - &git_sync_resources, - )?; - - ss_cond_builder.add( - cluster_resources - .add(client, rg_statefulset) - .await - .context(ApplyRoleGroupStatefulSetSnafu { - rolegroup: rolegroup.clone(), - })?, - ); - } - } - - cluster_resources - .delete_orphaned_resources(client) - .await - .context(DeleteOrphanedResourcesSnafu)?; - - let status = AirflowClusterStatus { - conditions: compute_conditions( - airflow, - &[&ss_cond_builder, &cluster_operation_cond_builder], - ), - }; - - client - .apply_patch_status(OPERATOR_NAME, airflow, &status) - .await - .context(ApplyStatusSnafu)?; - - Ok(Action::await_change()) -} - -#[allow(clippy::too_many_arguments)] -async fn build_executor_template( - airflow: &v1alpha2::AirflowCluster, - common_config: &AirflowExecutorCommonConfiguration, - metadata_database_connection_details: &SqlAlchemyDatabaseConnectionDetails, - resolved_product_image: &ResolvedProductImage, - authentication_config: &AirflowClientAuthenticationDetailsResolved, - authorization_config: &AirflowAuthorizationResolved, - cluster_resources: &mut ClusterResources<'_>, - client: &stackable_operator::client::Client, - rbac_sa: &stackable_operator::k8s_openapi::api::core::v1::ServiceAccount, -) -> Result<(), Error> { - let merged_executor_config = airflow - .merged_executor_config(&common_config.config) - .context(FailedToResolveConfigSnafu)?; - let rolegroup = RoleGroupRef { - cluster: ObjectRef::from_obj(airflow), - role: "executor".into(), - role_group: "kubernetes".into(), - }; - - let rg_configmap = build_rolegroup_config_map( - airflow, - resolved_product_image, - &rolegroup, - &HashMap::new(), - authentication_config, - authorization_config, - &merged_executor_config.logging, - &Container::Base, - )?; - cluster_resources - .add(client, rg_configmap) - .await - .with_context(|_| ApplyRoleGroupConfigSnafu { - rolegroup: rolegroup.clone(), - })?; - - let git_sync_resources = git_sync::v1alpha2::GitSyncResources::new( - &airflow.spec.cluster_config.dags_git_sync, - resolved_product_image, - &env_vars_from(&common_config.env_overrides), - &airflow.volume_mounts(), - LOG_VOLUME_NAME, - &merged_executor_config - .logging - .for_container(&Container::GitSync), - ) - .context(InvalidGitSyncSpecSnafu)?; - - let worker_pod_template_config_map = build_executor_template_config_map( - airflow, - resolved_product_image, - authentication_config, - metadata_database_connection_details, - &rbac_sa.name_unchecked(), - &merged_executor_config, - &common_config.env_overrides, - &common_config.pod_overrides, - &rolegroup, - &git_sync_resources, - )?; - cluster_resources - .add(client, worker_pod_template_config_map) - .await - .with_context(|_| ApplyExecutorTemplateConfigSnafu {})?; - Ok(()) -} - -/// The rolegroup [`ConfigMap`] configures the rolegroup based on the configuration given by the administrator -#[allow(clippy::too_many_arguments)] -fn build_rolegroup_config_map( - airflow: &v1alpha2::AirflowCluster, - resolved_product_image: &ResolvedProductImage, - rolegroup: &RoleGroupRef, - rolegroup_config: &HashMap>, - authentication_config: &AirflowClientAuthenticationDetailsResolved, - authorization_config: &AirflowAuthorizationResolved, - logging: &Logging, - container: &Container, -) -> Result { - let mut config: BTreeMap = BTreeMap::new(); - - // this will call default values from AirflowClientAuthenticationDetails - config::add_airflow_config( - &mut config, - authentication_config, - authorization_config, - &resolved_product_image.product_version, - ) - .context(ConstructConfigSnafu)?; - - tracing::debug!( - "Default config for {}: {:?}", - rolegroup.object_name(), - config - ); - - let mut file_config = rolegroup_config - .get(&PropertyNameKind::File(AIRFLOW_CONFIG_FILENAME.to_string())) - .cloned() - .unwrap_or_default(); - - tracing::debug!( - "Config overrides for {}: {:?}", - rolegroup.object_name(), - file_config - ); - - // now add any overrides, replacing any defaults - config.append(&mut file_config); - - tracing::debug!( - "Merged config for {}: {:?}", - rolegroup.object_name(), - config - ); - - let mut config_file = Vec::new(); - - // By removing the keys from `config_properties`, we avoid pasting the Python code into a Python variable as well - // (which would be bad) - if let Some(header) = config.remove(CONFIG_OVERRIDE_FILE_HEADER_KEY) { - writeln!(config_file, "{}", header).context(WriteToConfigFileStringSnafu)?; - } - - let temp_file_footer: Option = config.remove(CONFIG_OVERRIDE_FILE_FOOTER_KEY); - - flask_app_config_writer::write::( - &mut config_file, - config.iter(), - PYTHON_IMPORTS, - ) - .with_context(|_| BuildRoleGroupConfigFileSnafu { - rolegroup: rolegroup.clone(), - })?; - - if let Some(footer) = temp_file_footer { - writeln!(config_file, "{}", footer).context(WriteToConfigFileStringSnafu)?; - } - - let mut cm_builder = ConfigMapBuilder::new(); - - cm_builder - .metadata( - ObjectMetaBuilder::new() - .name_and_namespace(airflow) - .name(rolegroup.object_name()) - .ownerreference_from_resource(airflow, None, Some(true)) - .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(&build_recommended_labels( - airflow, - AIRFLOW_CONTROLLER_NAME, - &resolved_product_image.app_version_label_value, - &rolegroup.role, - &rolegroup.role_group, - )) - .context(ObjectMetaSnafu)? - .build(), - ) - .add_data( - AIRFLOW_CONFIG_FILENAME, - String::from_utf8(config_file).unwrap(), - ); - - extend_config_map_with_log_config( - rolegroup, - logging, - container, - &Container::Vector, - &mut cm_builder, - resolved_product_image, - ) - .context(InvalidLoggingConfigSnafu { - cm_name: rolegroup.object_name(), - })?; - - cm_builder - .build() - .with_context(|_| BuildRoleGroupConfigSnafu { - rolegroup: rolegroup.clone(), - }) -} - -fn build_rolegroup_metadata( - airflow: &v1alpha2::AirflowCluster, - resolved_product_image: &&ResolvedProductImage, - rolegroup: &&RoleGroupRef, - prometheus_label: Label, - name: String, -) -> Result { - let metadata = ObjectMetaBuilder::new() - .name_and_namespace(airflow) - .name(name) - .ownerreference_from_resource(airflow, None, Some(true)) - .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(&build_recommended_labels( - airflow, - AIRFLOW_CONTROLLER_NAME, - &resolved_product_image.app_version_label_value, - &rolegroup.role, - &rolegroup.role_group, - )) - .context(ObjectMetaSnafu)? - .with_label(prometheus_label) - .build(); - Ok(metadata) -} - -pub fn build_group_listener( - airflow: &v1alpha2::AirflowCluster, - object_labels: ObjectLabels, - listener_class: String, - listener_group_name: String, -) -> Result { - Ok(listener::v1alpha1::Listener { - metadata: ObjectMetaBuilder::new() - .name_and_namespace(airflow) - .name(listener_group_name) - .ownerreference_from_resource(airflow, None, Some(true)) - .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(&object_labels) - .context(ObjectMetaSnafu)? - .build(), - spec: listener::v1alpha1::ListenerSpec { - class_name: Some(listener_class), - ports: Some(listener_ports()), - ..listener::v1alpha1::ListenerSpec::default() - }, - status: None, - }) -} - -/// We only use the http port here and intentionally omit -/// the metrics one. -fn listener_ports() -> Vec { - vec![listener::v1alpha1::ListenerPort { - name: HTTP_PORT_NAME.to_string(), - port: HTTP_PORT.into(), - protocol: Some("TCP".to_string()), - }] -} - -/// The rolegroup [`StatefulSet`] runs the rolegroup, as configured by the administrator. -#[allow(clippy::too_many_arguments)] -fn build_server_rolegroup_statefulset( - airflow: &v1alpha2::AirflowCluster, - resolved_product_image: &ResolvedProductImage, - airflow_role: &AirflowRole, - rolegroup_ref: &RoleGroupRef, - rolegroup_config: &HashMap>, - authentication_config: &AirflowClientAuthenticationDetailsResolved, - authorization_config: &AirflowAuthorizationResolved, - metadata_database_connection_details: &SqlAlchemyDatabaseConnectionDetails, - celery_database_connection_details: &Option<( - CeleryDatabaseConnectionDetails, - CeleryDatabaseConnectionDetails, - )>, - service_account: &ServiceAccount, - merged_airflow_config: &AirflowConfig, - executor: &AirflowExecutor, - git_sync_resources: &git_sync::v1alpha2::GitSyncResources, -) -> Result { - let binding = airflow.get_role(airflow_role); - let role = binding.as_ref().context(NoAirflowRoleSnafu)?; - - let rolegroup = role.role_groups.get(&rolegroup_ref.role_group); - - let mut pb = PodBuilder::new(); - let recommended_object_labels = build_recommended_labels( - airflow, - AIRFLOW_CONTROLLER_NAME, - &resolved_product_image.app_version_label_value, - &rolegroup_ref.role, - &rolegroup_ref.role_group, - ); - // Used for PVC templates that cannot be modified once they are deployed - let unversioned_recommended_labels = Labels::recommended(&build_recommended_labels( - airflow, - AIRFLOW_CONTROLLER_NAME, - // A version value is required, and we do want to use the "recommended" format for the other desired labels - "none", - &rolegroup_ref.role, - &rolegroup_ref.role_group, - )) - .context(LabelBuildSnafu)?; - - let pb_metadata = ObjectMetaBuilder::new() - .with_recommended_labels(&recommended_object_labels) - .context(ObjectMetaSnafu)? - .with_annotation( - Annotation::try_from(( - "kubectl.kubernetes.io/default-container", - format!("{}", Container::Airflow), - )) - .expect("static annotation is always valid"), - ) - .build(); - - pb.metadata(pb_metadata) - .image_pull_secrets_from_product_image(resolved_product_image) - .affinity(&merged_airflow_config.affinity) - .service_account_name(service_account.name_any()) - .security_context(PodSecurityContextBuilder::new().fs_group(1000).build()); - - let mut airflow_container = ContainerBuilder::new(&Container::Airflow.to_string()) - .context(InvalidContainerNameSnafu)?; - - add_authentication_volumes_and_volume_mounts( - authentication_config, - &mut airflow_container, - &mut pb, - )?; - - add_airflow_graceful_shutdown_config(merged_airflow_config, &mut pb) - .context(GracefulShutdownSnafu)?; - - let mut airflow_container_args = Vec::new(); - airflow_container_args.extend(airflow_role.get_commands( - airflow, - authentication_config, - resolved_product_image, - )); - - airflow_container - .image_from_product_image(resolved_product_image) - .resources(merged_airflow_config.resources.clone().into()) - .command(vec![ - "/bin/bash".to_string(), - "-x".to_string(), - "-euo".to_string(), - "pipefail".to_string(), - "-c".to_string(), - ]) - .args(vec![airflow_container_args.join("\n")]); - - airflow_container.add_env_vars( - env_vars::build_airflow_statefulset_envs( - airflow, - airflow_role, - rolegroup_config, - executor, - authentication_config, - authorization_config, - metadata_database_connection_details, - celery_database_connection_details, - git_sync_resources, - resolved_product_image, - ) - .context(BuildStatefulsetEnvVarsSnafu)?, - ); - - let volume_mounts = airflow.volume_mounts(); - airflow_container - .add_volume_mounts(volume_mounts) - .context(AddVolumeMountSnafu)?; - airflow_container - .add_volume_mount(CONFIG_VOLUME_NAME, CONFIG_PATH) - .context(AddVolumeMountSnafu)?; - airflow_container - .add_volume_mount(LOG_CONFIG_VOLUME_NAME, LOG_CONFIG_DIR) - .context(AddVolumeMountSnafu)?; - airflow_container - .add_volume_mount(LOG_VOLUME_NAME, STACKABLE_LOG_DIR) - .context(AddVolumeMountSnafu)?; - - if let AirflowExecutor::KubernetesExecutors { .. } = executor { - airflow_container - .add_volume_mount(TEMPLATE_VOLUME_NAME, TEMPLATE_LOCATION) - .context(AddVolumeMountSnafu)?; - } - - // for roles with an http endpoint - if let Some(http_port) = airflow_role.get_http_port() { - let probe = Probe { - tcp_socket: Some(TCPSocketAction { - port: IntOrString::Int(http_port.into()), - ..TCPSocketAction::default() - }), - initial_delay_seconds: Some(60), - period_seconds: Some(10), - failure_threshold: Some(6), - ..Probe::default() - }; - airflow_container.readiness_probe(probe.clone()); - airflow_container.liveness_probe(probe); - airflow_container.add_container_port(HTTP_PORT_NAME, http_port.into()); - } - - let mut pvcs: Option> = None; - - if let Some(listener_group_name) = airflow.group_listener_name(airflow_role) { - // Listener endpoints for the Webserver role will use persistent volumes - // so that load balancers can hard-code the target addresses. This will - // be the case even when no class is set (and the value defaults to - // cluster-internal) as the address should still be consistent. - let pvc = ListenerOperatorVolumeSourceBuilder::new( - &ListenerReference::ListenerName(listener_group_name), - &unversioned_recommended_labels, - ) - .build_pvc(LISTENER_VOLUME_NAME.to_string()) - .context(BuildListenerVolumeSnafu)?; - pvcs = Some(vec![pvc]); - - airflow_container - .add_volume_mount(LISTENER_VOLUME_NAME, LISTENER_VOLUME_DIR) - .context(AddVolumeMountSnafu)?; - } - - // If the DAG is modularized we may encounter a timing issue whereby the celery worker - // has started *before* all modules referenced by the DAG have been fetched by gitsync - // and registered. This will result in ModuleNotFoundError errors. This can be avoided - // by running a one-off git-sync process in an init-container so that all DAG - // dependencies are fully loaded. The sidecar git-sync is then used for regular updates. - let use_git_sync_init_containers = matches!(executor, AirflowExecutor::CeleryExecutors { .. }); - add_git_sync_resources( - &mut pb, - &mut airflow_container, - git_sync_resources, - true, - use_git_sync_init_containers, - )?; - - metadata_database_connection_details.add_to_container(&mut airflow_container); - if let Some((celery_result_backend, celery_broker)) = celery_database_connection_details { - celery_result_backend.add_to_container(&mut airflow_container); - celery_broker.add_to_container(&mut airflow_container); - } - - pb.add_container(airflow_container.build()); - - let metrics_container = ContainerBuilder::new("metrics") - .context(InvalidContainerNameSnafu)? - .image_from_product_image(resolved_product_image) - .command(vec![ - "/bin/bash".to_string(), - "-x".to_string(), - "-euo".to_string(), - "pipefail".to_string(), - "-c".to_string(), - ]) - .args(vec![ - [ - COMMON_BASH_TRAP_FUNCTIONS.to_string(), - "prepare_signal_handlers".to_string(), - "/stackable/statsd_exporter &".to_string(), - "wait_for_termination $!".to_string(), - ] - .join("\n"), - ]) - .add_container_port(METRICS_PORT_NAME, METRICS_PORT.into()) - .resources( - ResourceRequirementsBuilder::new() - .with_cpu_request("100m") - .with_cpu_limit("200m") - .with_memory_request("64Mi") - .with_memory_limit("64Mi") - .build(), - ) - .build(); - pb.add_container(metrics_container); - - pb.add_volumes(airflow.volumes().clone()) - .context(AddVolumeSnafu)?; - pb.add_volumes(controller_commons::create_volumes( - &rolegroup_ref.object_name(), - merged_airflow_config - .logging - .containers - .get(&Container::Airflow), - )) - .context(AddVolumeSnafu)?; - - if let AirflowExecutor::KubernetesExecutors { .. } = executor { - pb.add_volume( - VolumeBuilder::new(TEMPLATE_VOLUME_NAME) - .with_config_map(airflow.executor_template_configmap_name()) - .build(), - ) - .context(AddVolumeSnafu)?; - } - - if merged_airflow_config.logging.enable_vector_agent { - match &airflow - .spec - .cluster_config - .vector_aggregator_config_map_name - { - Some(vector_aggregator_config_map_name) => { - pb.add_container(build_logging_container( - resolved_product_image, - merged_airflow_config - .logging - .containers - .get(&Container::Vector), - vector_aggregator_config_map_name, - )?); - } - None => { - VectorAggregatorConfigMapMissingSnafu.fail()?; - } - } - } - let mut pod_template = pb.build_template(); - pod_template.merge_from(role.config.pod_overrides.clone()); - if let Some(rolegroup) = rolegroup { - pod_template.merge_from(rolegroup.config.pod_overrides.clone()); - } - - let restarter_label = - Label::try_from(("restarter.stackable.tech/enabled", "true")).context(BuildLabelSnafu)?; - - let metadata = build_rolegroup_metadata( - airflow, - &resolved_product_image, - &rolegroup_ref, - restarter_label, - rolegroup_ref.object_name(), - )?; - - let statefulset_match_labels = Labels::role_group_selector( - airflow, - APP_NAME, - &rolegroup_ref.role, - &rolegroup_ref.role_group, - ) - .context(BuildLabelSnafu)?; - - let statefulset_spec = StatefulSetSpec { - pod_management_policy: Some( - match airflow_role { - AirflowRole::Scheduler => { - "OrderedReady" // Scheduler pods should start after another, since part of their startup phase is initializing the database, see crd/src/lib.rs - } - AirflowRole::Webserver - | AirflowRole::Worker - | AirflowRole::DagProcessor - | AirflowRole::Triggerer => "Parallel", - } - .to_string(), - ), - replicas: rolegroup.and_then(|rg| rg.replicas).map(i32::from), - selector: LabelSelector { - match_labels: Some(statefulset_match_labels.into()), - ..LabelSelector::default() - }, - service_name: stateful_set_service_name(rolegroup_ref), - template: pod_template, - volume_claim_templates: pvcs, - ..StatefulSetSpec::default() - }; - - Ok(StatefulSet { - metadata, - spec: Some(statefulset_spec), - status: None, - }) -} - -fn build_logging_container( - resolved_product_image: &ResolvedProductImage, - log_config: Option<&ContainerLogConfig>, - vector_aggregator_config_map_name: &str, -) -> Result { - product_logging::framework::vector_container( - resolved_product_image, - CONFIG_VOLUME_NAME, - LOG_VOLUME_NAME, - log_config, - ResourceRequirementsBuilder::new() - .with_cpu_request("250m") - .with_cpu_limit("500m") - .with_memory_request("128Mi") - .with_memory_limit("128Mi") - .build(), - vector_aggregator_config_map_name, - ) - .context(ConfigureLoggingSnafu) -} - -#[allow(clippy::too_many_arguments)] -fn build_executor_template_config_map( - airflow: &v1alpha2::AirflowCluster, - resolved_product_image: &ResolvedProductImage, - authentication_config: &AirflowClientAuthenticationDetailsResolved, - metadata_database_connection_details: &SqlAlchemyDatabaseConnectionDetails, - sa_name: &str, - merged_executor_config: &ExecutorConfig, - env_overrides: &HashMap, - pod_overrides: &PodTemplateSpec, - rolegroup_ref: &RoleGroupRef, - git_sync_resources: &git_sync::v1alpha2::GitSyncResources, -) -> Result { - let mut pb = PodBuilder::new(); - let pb_metadata = ObjectMetaBuilder::new() - .with_recommended_labels(&build_recommended_labels( - airflow, - AIRFLOW_CONTROLLER_NAME, - &resolved_product_image.app_version_label_value, - "executor", - "executor-template", - )) - .context(ObjectMetaSnafu)? - .build(); - - pb.metadata(pb_metadata) - .image_pull_secrets_from_product_image(resolved_product_image) - .affinity(&merged_executor_config.affinity) - .service_account_name(sa_name) - .restart_policy("Never") - .security_context(PodSecurityContextBuilder::new().fs_group(1000).build()); - - add_executor_graceful_shutdown_config(merged_executor_config, &mut pb) - .context(GracefulShutdownSnafu)?; - - // N.B. this "base" name is an airflow requirement and should not be changed! - // See https://airflow.apache.org/docs/apache-airflow-providers-cncf-kubernetes/8.4.0/kubernetes_executor.html#base-image - let mut airflow_container = - ContainerBuilder::new(&Container::Base.to_string()).context(InvalidContainerNameSnafu)?; - - // Works too, had been changed - add_authentication_volumes_and_volume_mounts( - authentication_config, - &mut airflow_container, - &mut pb, - )?; - airflow_container - .image_from_product_image(resolved_product_image) - .resources(merged_executor_config.resources.clone().into()) - .add_env_vars(build_airflow_template_envs( - airflow, - env_overrides, - merged_executor_config, - metadata_database_connection_details, - git_sync_resources, - resolved_product_image, - )) - .add_volume_mounts(airflow.volume_mounts()) - .context(AddVolumeMountSnafu)? - .add_volume_mount(CONFIG_VOLUME_NAME, CONFIG_PATH) - .context(AddVolumeMountSnafu)? - .add_volume_mount(LOG_CONFIG_VOLUME_NAME, LOG_CONFIG_DIR) - .context(AddVolumeMountSnafu)? - .add_volume_mount(LOG_VOLUME_NAME, STACKABLE_LOG_DIR) - .context(AddVolumeMountSnafu)?; - - add_git_sync_resources( - &mut pb, - &mut airflow_container, - git_sync_resources, - false, - true, - )?; - - metadata_database_connection_details.add_to_container(&mut airflow_container); - - pb.add_container(airflow_container.build()); - pb.add_volumes(airflow.volumes().clone()) - .context(AddVolumeSnafu)?; - pb.add_volumes(controller_commons::create_volumes( - &rolegroup_ref.object_name(), - merged_executor_config - .logging - .containers - .get(&Container::Airflow), - )) - .context(AddVolumeSnafu)?; - - if merged_executor_config.logging.enable_vector_agent { - match &airflow - .spec - .cluster_config - .vector_aggregator_config_map_name - { - Some(vector_aggregator_config_map_name) => { - pb.add_container(build_logging_container( - resolved_product_image, - merged_executor_config - .logging - .containers - .get(&Container::Vector), - vector_aggregator_config_map_name, - )?); - } - None => { - VectorAggregatorConfigMapMissingSnafu.fail()?; - } - } - } - - let mut pod_template = pb.build_template(); - pod_template.merge_from(pod_overrides.clone()); - - let mut cm_builder = ConfigMapBuilder::new(); - - let restarter_label = - Label::try_from(("restarter.stackable.tech/enabled", "true")).context(BuildLabelSnafu)?; - - cm_builder - .metadata( - ObjectMetaBuilder::new() - .name_and_namespace(airflow) - .name(airflow.executor_template_configmap_name()) - .ownerreference_from_resource(airflow, None, Some(true)) - .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(&build_recommended_labels( - airflow, - AIRFLOW_CONTROLLER_NAME, - &resolved_product_image.app_version_label_value, - "executor", - "executor-template", - )) - .context(ObjectMetaSnafu)? - .with_label(restarter_label) - .build(), - ) - .add_data( - TEMPLATE_NAME, - serde_yaml::to_string(&pod_template).context(PodTemplateSerdeSnafu)?, - ); - - cm_builder.build().context(PodTemplateConfigMapSnafu) -} - -pub fn error_policy( - _obj: Arc>, - error: &Error, - _ctx: Arc, -) -> Action { - match error { - // root object is invalid, will be requeued when modified anyway - Error::InvalidAirflowCluster { .. } => Action::await_change(), - - _ => Action::requeue(*Duration::from_secs(10)), - } -} - -fn add_authentication_volumes_and_volume_mounts( - authentication_config: &AirflowClientAuthenticationDetailsResolved, - cb: &mut ContainerBuilder, - pb: &mut PodBuilder, -) -> Result<()> { - // Different authentication entries can reference the same secret - // class or TLS certificate. It must be ensured that the volumes - // and volume mounts are only added once in such a case. - - let mut ldap_authentication_providers = BTreeSet::new(); - let mut tls_client_credentials = BTreeSet::new(); - - for auth_class_resolved in &authentication_config.authentication_classes_resolved { - match auth_class_resolved { - AirflowAuthenticationClassResolved::Ldap { provider } => { - ldap_authentication_providers.insert(provider); - } - AirflowAuthenticationClassResolved::Oidc { provider, .. } => { - tls_client_credentials.insert(&provider.tls); - } - } - } - - for provider in ldap_authentication_providers { - provider - .add_volumes_and_mounts(pb, vec![cb]) - .context(AddLdapVolumesAndVolumeMountsSnafu)?; - } - - for tls in tls_client_credentials { - tls.add_volumes_and_mounts(pb, vec![cb]) - .context(AddTlsVolumesAndVolumeMountsSnafu)?; - } - Ok(()) -} - -fn add_git_sync_resources( - pb: &mut PodBuilder, - cb: &mut ContainerBuilder, - git_sync_resources: &git_sync::v1alpha2::GitSyncResources, - add_sidecar_containers: bool, - add_init_containers: bool, -) -> Result<()> { - if add_sidecar_containers { - for container in git_sync_resources.git_sync_containers.iter().cloned() { - pb.add_container(container); - } - } - if add_init_containers { - for container in git_sync_resources.git_sync_init_containers.iter().cloned() { - pb.add_init_container(container); - } - } - pb.add_volumes(git_sync_resources.git_content_volumes.to_owned()) - .context(AddVolumeSnafu)?; - pb.add_volumes(git_sync_resources.git_ssh_volumes.to_owned()) - .context(AddVolumeSnafu)?; - pb.add_volumes(git_sync_resources.git_ca_cert_volumes.to_owned()) - .context(AddVolumeSnafu)?; - cb.add_volume_mounts(git_sync_resources.git_content_volume_mounts.to_owned()) - .context(AddVolumeMountSnafu)?; - - Ok(()) -} diff --git a/rust/operator-binary/src/controller.rs b/rust/operator-binary/src/controller.rs new file mode 100644 index 00000000..275073bb --- /dev/null +++ b/rust/operator-binary/src/controller.rs @@ -0,0 +1,335 @@ +use std::{collections::BTreeMap, marker::PhantomData, sync::Arc}; + +use const_format::concatcp; +use product_config::ProductConfigManager; +use snafu::{ResultExt, Snafu}; +use stackable_operator::{ + cli::OperatorEnvironmentOptions, + cluster_resources::{ClusterResourceApplyStrategy, ClusterResources}, + commons::{ + affinity::StackableAffinity, + product_image_selection::ResolvedProductImage, + resources::{NoRuntimeLimits, Resources}, + }, + crd::listener, + k8s_openapi::api::{ + apps::v1::StatefulSet, + core::v1::{ + ConfigMap, Container as K8sContainer, EnvVar, PersistentVolumeClaim, PodTemplateSpec, + Service, ServiceAccount, Volume, VolumeMount, + }, + policy::v1::PodDisruptionBudget, + rbac::v1::RoleBinding, + }, + kube::{ + Resource, + api::ObjectMeta, + core::{DeserializeGuard, error_boundary}, + runtime::{controller::Action, reflector::ObjectRef}, + }, + logging::controller::ReconcilerError, + product_logging::spec::ContainerLogConfig, + role_utils::RoleGroupRef, + shared::time::Duration, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::{ + crd::{APP_NAME, AirflowExecutor, AirflowRole, AirflowStorageConfig, OPERATOR_NAME, v1alpha2}, + framework::{ + HasName, HasUid, NameIsValidLabelValue, + product_logging::framework::{ValidatedContainerLogConfigChoice, VectorContainerLogConfig}, + types::{ + kubernetes::{NamespaceName, Uid}, + operator::ClusterName, + }, + }, +}; + +pub mod apply; +pub mod build; +pub mod dereference; +pub mod update_status; +pub mod validate; + +pub const AIRFLOW_CONTROLLER_NAME: &str = "airflowcluster"; +pub const CONTAINER_IMAGE_BASE_NAME: &str = "airflow"; +pub const AIRFLOW_FULL_CONTROLLER_NAME: &str = + concatcp!(AIRFLOW_CONTROLLER_NAME, '.', OPERATOR_NAME); + +pub struct Ctx { + pub client: stackable_operator::client::Client, + pub product_config: ProductConfigManager, + pub operator_environment: OperatorEnvironmentOptions, +} + +pub(crate) struct Prepared; +pub(crate) struct Applied; + +pub(crate) struct KubernetesResources { + pub stateful_sets: Vec, + pub config_maps: Vec, + pub services: Vec, + pub service_accounts: Vec, + pub role_bindings: Vec, + pub pod_disruption_budgets: Vec, + pub listeners: Vec, + pub _status: PhantomData, +} + +#[derive(Clone, Debug)] +pub struct ValidatedRoleConfig { + pub pdb_enabled: bool, + pub pdb_max_unavailable: Option, + pub listener_class: Option, + pub group_listener_name: Option, +} + +#[derive(Clone, Debug)] +pub struct ValidatedRoleGroupConfig { + pub resources: Resources, + pub logging: ValidatedLogging, + pub affinity: StackableAffinity, + pub graceful_shutdown_timeout: Duration, + pub config_file_content: String, +} + +#[derive(Clone)] +pub struct PrecomputedPodData { + pub env_vars: Vec, + pub airflow_commands: Vec, + pub auth_volumes: Vec, + pub auth_volume_mounts: Vec, + pub extra_volumes: Vec, + pub extra_volume_mounts: Vec, + pub git_sync_containers: Vec, + pub git_sync_init_containers: Vec, + pub git_sync_volumes: Vec, + pub git_sync_volume_mounts: Vec, + pub vector_container: Option, + pub service_account_name: String, + pub replicas: Option, + pub pod_overrides: PodTemplateSpec, + pub executor: AirflowExecutor, + pub executor_template_configmap_name: Option, + pub listener_volume_claim_template: Option, +} + +#[derive(Clone, Debug)] +pub struct ValidatedLogging { + pub airflow_container: ValidatedContainerLogConfigChoice, + pub vector_container: Option, + pub git_sync_container_log_config: ContainerLogConfig, +} + +impl ValidatedLogging { + pub fn is_vector_agent_enabled(&self) -> bool { + self.vector_container.is_some() + } +} + +#[derive(Clone)] +pub struct ValidatedAirflowCluster { + metadata: ObjectMeta, + pub image: ResolvedProductImage, + pub name: ClusterName, + pub namespace: NamespaceName, + pub uid: Uid, + pub role_groups: BTreeMap>, + pub precomputed_pod_data: BTreeMap>, + pub executor_template_config_maps: Vec, + pub role_configs: BTreeMap, + pub executor: AirflowExecutor, +} + +impl ValidatedAirflowCluster { + #[allow(clippy::too_many_arguments)] + pub fn new( + image: ResolvedProductImage, + name: ClusterName, + namespace: NamespaceName, + uid: Uid, + role_groups: BTreeMap>, + precomputed_pod_data: BTreeMap>, + executor_template_config_maps: Vec, + role_configs: BTreeMap, + executor: AirflowExecutor, + ) -> Self { + Self { + metadata: ObjectMeta { + name: Some(name.to_string()), + namespace: Some(namespace.to_string()), + uid: Some(uid.to_string()), + ..ObjectMeta::default() + }, + image, + name, + namespace, + uid, + role_groups, + precomputed_pod_data, + executor_template_config_maps, + role_configs, + executor, + } + } + + pub fn rolegroup_ref(&self, role: &AirflowRole, role_group: &str) -> RoleGroupRef { + RoleGroupRef { + cluster: ObjectRef::from_obj(self), + role: role.to_string(), + role_group: role_group.to_string(), + } + } +} + +impl HasName for ValidatedAirflowCluster { + fn to_name(&self) -> String { + self.name.to_string() + } +} + +impl HasUid for ValidatedAirflowCluster { + fn to_uid(&self) -> Uid { + self.uid.clone() + } +} + +impl Resource for ValidatedAirflowCluster { + type DynamicType = + ::DynamicType; + type Scope = ::Scope; + + fn kind(dt: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + v1alpha2::AirflowCluster::kind(dt) + } + + fn group(dt: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + v1alpha2::AirflowCluster::group(dt) + } + + fn version(dt: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + v1alpha2::AirflowCluster::version(dt) + } + + fn plural(dt: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + v1alpha2::AirflowCluster::plural(dt) + } + + fn meta(&self) -> &ObjectMeta { + &self.metadata + } + + fn meta_mut(&mut self) -> &mut ObjectMeta { + &mut self.metadata + } +} + +impl NameIsValidLabelValue for ValidatedAirflowCluster { + fn to_label_value(&self) -> String { + self.name.to_label_value() + } +} + +// --------------------------------------------------------------------------- +// Reconcile +// --------------------------------------------------------------------------- + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("AirflowCluster object is invalid"))] + InvalidAirflowCluster { + source: error_boundary::InvalidObject, + }, + + #[snafu(display("failed to dereference resources"))] + Dereference { source: dereference::Error }, + + #[snafu(display("failed to validate cluster"))] + Validate { source: validate::Error }, + + #[snafu(display("failed to create cluster resources"))] + CreateClusterResources { + source: stackable_operator::cluster_resources::Error, + }, + + #[snafu(display("failed to apply resources"))] + Apply { source: apply::Error }, + + #[snafu(display("failed to update status"))] + UpdateStatus { source: update_status::Error }, +} + +type Result = std::result::Result; + +impl ReconcilerError for Error { + fn category(&self) -> &'static str { + ErrorDiscriminants::from(self).into() + } +} + +pub async fn reconcile( + airflow: Arc>, + ctx: Arc, +) -> Result { + tracing::info!("Starting reconcile"); + + let airflow = airflow + .0 + .as_ref() + .map_err(error_boundary::InvalidObject::clone) + .context(InvalidAirflowClusterSnafu)?; + + // --- dereference (async, fallible) --- + let dereferenced = dereference::dereference( + &ctx.client, + airflow, + CONTAINER_IMAGE_BASE_NAME, + &ctx.operator_environment.image_repository, + crate::built_info::PKG_VERSION, + ) + .await + .context(DereferenceSnafu)?; + + // --- validate (sync, fallible) --- + let validated = validate::validate_cluster(airflow, &dereferenced, &ctx.product_config) + .context(ValidateSnafu)?; + + // --- build (sync, infallible) --- + let prepared = build::build(&validated); + + // --- apply (async, fallible) --- + let cluster_resources = ClusterResources::new( + APP_NAME, + OPERATOR_NAME, + AIRFLOW_CONTROLLER_NAME, + &airflow.object_ref(&()), + ClusterResourceApplyStrategy::from(&airflow.spec.cluster_operation), + &airflow.spec.object_overrides, + ) + .context(CreateClusterResourcesSnafu)?; + + let applied = apply::Applier::new(&ctx.client, cluster_resources) + .apply(prepared) + .await + .context(ApplySnafu)?; + + // --- update status (async, fallible) --- + update_status::update_status(&ctx.client, airflow, applied) + .await + .context(UpdateStatusSnafu)?; + + Ok(Action::await_change()) +} + +pub fn error_policy( + _obj: Arc>, + error: &Error, + _ctx: Arc, +) -> Action { + match error { + Error::InvalidAirflowCluster { .. } => Action::await_change(), + _ => Action::requeue(*Duration::from_secs(10)), + } +} diff --git a/rust/operator-binary/src/controller/apply.rs b/rust/operator-binary/src/controller/apply.rs new file mode 100644 index 00000000..c27a0e84 --- /dev/null +++ b/rust/operator-binary/src/controller/apply.rs @@ -0,0 +1,88 @@ +//! The apply step in the AirflowCluster controller +//! +//! Takes `KubernetesResources` and applies them to the cluster, +//! producing `KubernetesResources`. + +use std::marker::PhantomData; + +use snafu::{ResultExt, Snafu}; +use stackable_operator::{ + client::Client, + cluster_resources::{ClusterResource, ClusterResources}, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use super::{Applied, KubernetesResources, Prepared}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to apply resource"))] + ApplyResource { + source: stackable_operator::cluster_resources::Error, + }, + + #[snafu(display("failed to delete orphaned resources"))] + DeleteOrphanedResources { + source: stackable_operator::cluster_resources::Error, + }, +} + +pub struct Applier<'a> { + client: &'a Client, + cluster_resources: ClusterResources<'a>, +} + +impl<'a> Applier<'a> { + pub fn new(client: &'a Client, cluster_resources: ClusterResources<'a>) -> Self { + Applier { + client, + cluster_resources, + } + } + + pub async fn apply( + mut self, + resources: KubernetesResources, + ) -> Result, Error> { + let config_maps = self.add_resources(resources.config_maps).await?; + let service_accounts = self.add_resources(resources.service_accounts).await?; + let services = self.add_resources(resources.services).await?; + let role_bindings = self.add_resources(resources.role_bindings).await?; + let listeners = self.add_resources(resources.listeners).await?; + let stateful_sets = self.add_resources(resources.stateful_sets).await?; + let pod_disruption_budgets = self.add_resources(resources.pod_disruption_budgets).await?; + + self.cluster_resources + .delete_orphaned_resources(self.client) + .await + .context(DeleteOrphanedResourcesSnafu)?; + + Ok(KubernetesResources { + stateful_sets, + config_maps, + services, + service_accounts, + role_bindings, + pod_disruption_budgets, + listeners, + _status: PhantomData, + }) + } + + async fn add_resources( + &mut self, + resources: Vec, + ) -> Result, Error> { + let mut applied = vec![]; + for resource in resources { + let applied_resource = self + .cluster_resources + .add(self.client, resource) + .await + .context(ApplyResourceSnafu)?; + applied.push(applied_resource); + } + Ok(applied) + } +} diff --git a/rust/operator-binary/src/controller/build.rs b/rust/operator-binary/src/controller/build.rs new file mode 100644 index 00000000..5a77c752 --- /dev/null +++ b/rust/operator-binary/src/controller/build.rs @@ -0,0 +1,506 @@ +//! The build step in the AirflowCluster controller +//! +//! Takes a [`ValidatedAirflowCluster`] and produces [`KubernetesResources`]. +//! All methods in this module are infallible — validation happens in the validate stage. + +pub mod role_group_builder; + +use std::marker::PhantomData; + +use role_group_builder::RoleGroupBuilder; +use stackable_operator::{ + builder::meta::ObjectMetaBuilder, + commons::rbac::build_rbac_resources, + crd::listener, + k8s_openapi::api::core::v1::{Service, ServicePort, ServiceSpec}, + kvp::{Annotations, Labels}, + role_utils::RoleGroupRef, +}; + +use super::{ + AIRFLOW_CONTROLLER_NAME, KubernetesResources, Prepared, ValidatedAirflowCluster, + ValidatedRoleConfig, +}; +use crate::{ + crd::{ + APP_NAME, AirflowExecutor, AirflowRole, Container, HTTP_PORT, HTTP_PORT_NAME, METRICS_PORT, + METRICS_PORT_NAME, OPERATOR_NAME, + }, + framework::{self, builder::meta::ownerreference_from_resource}, +}; + +fn main_container_for_role(_role: &AirflowRole) -> Container { + Container::Airflow +} + +pub fn build(validated: &ValidatedAirflowCluster) -> KubernetesResources { + let mut stateful_sets = Vec::new(); + let mut config_maps = Vec::new(); + let mut services = Vec::new(); + let mut pod_disruption_budgets = Vec::new(); + let mut listeners = Vec::new(); + + // --- RBAC --- + let rbac_labels = recommended_labels(validated, "rbac", "rbac"); + + let (rbac_sa, rbac_rolebinding) = build_rbac_resources(validated, APP_NAME, rbac_labels) + .expect( + "RBAC resources should be created because the validated cluster has valid metadata", + ); + + // --- Executor template ConfigMaps (pre-built in validate stage) --- + config_maps.extend(validated.executor_template_config_maps.clone()); + + // --- Per-role/rolegroup resources --- + for (airflow_role, role_groups) in &validated.role_groups { + // PDBs + if let Some(role_config) = validated.role_configs.get(airflow_role) { + if let Some(pdb) = build_pdb(validated, airflow_role, role_config) { + pod_disruption_budgets.push(pdb); + } + } + + // Group listeners (only Webserver) + if let Some(role_config) = validated.role_configs.get(airflow_role) { + if let (Some(listener_class), Some(listener_name)) = ( + &role_config.listener_class, + &role_config.group_listener_name, + ) { + listeners.push(build_group_listener( + validated, + airflow_role, + listener_class.clone(), + listener_name.clone(), + )); + } + } + + for (rolegroup_name, role_group_config) in role_groups { + let rolegroup_ref = validated.rolegroup_ref(airflow_role, rolegroup_name); + + let main_container = main_container_for_role(airflow_role); + + // Services + services.push(build_headless_service(validated, &rolegroup_ref)); + services.push(build_metrics_service(validated, &rolegroup_ref)); + + // ConfigMap + StatefulSet via RoleGroupBuilder + let pod_data = validated + .precomputed_pod_data + .get(airflow_role) + .and_then(|groups| groups.get(rolegroup_name)) + .expect( + "PrecomputedPodData should exist for every role group \ + because validate_cluster computes it for each one", + ); + + let builder = RoleGroupBuilder::new( + validated, + role_group_config, + rolegroup_ref, + airflow_role.clone(), + main_container, + pod_data, + ); + + config_maps.push(builder.build_config_map()); + stateful_sets.push(builder.build_stateful_set()); + } + } + + KubernetesResources { + stateful_sets, + config_maps, + services, + service_accounts: vec![rbac_sa], + role_bindings: vec![rbac_rolebinding], + pod_disruption_budgets, + listeners, + _status: PhantomData, + } +} + +fn build_pdb( + cluster: &ValidatedAirflowCluster, + role: &AirflowRole, + role_config: &ValidatedRoleConfig, +) -> Option { + if !role_config.pdb_enabled { + return None; + } + + let max_unavailable = role_config.pdb_max_unavailable.unwrap_or(match role { + AirflowRole::Worker => match &cluster.executor { + AirflowExecutor::KubernetesExecutors { .. } => return None, + _ => 1, + }, + _ => 1, + }); + + Some({ + use crate::framework::types::operator::*; + framework::builder::pdb::pod_disruption_budget_builder_with_role( + cluster, + &ProductName::from_str_unsafe(APP_NAME), + &RoleName::from_str_unsafe(&role.to_string()), + &OperatorName::from_str_unsafe(OPERATOR_NAME), + &ControllerName::from_str_unsafe(AIRFLOW_CONTROLLER_NAME), + ) + .with_max_unavailable(max_unavailable) + .build() + }) +} + +fn build_headless_service( + cluster: &ValidatedAirflowCluster, + rolegroup_ref: &RoleGroupRef, +) -> Service { + let metadata = ObjectMetaBuilder::new() + .name(format!("{}-headless", rolegroup_ref.object_name())) + .namespace(&cluster.namespace) + .ownerreference(ownerreference_from_resource(cluster, None, Some(true))) + .with_labels(recommended_labels( + cluster, + &rolegroup_ref.role, + &rolegroup_ref.role_group, + )) + .build(); + + Service { + metadata, + spec: Some(ServiceSpec { + type_: Some("ClusterIP".to_string()), + cluster_ip: Some("None".to_string()), + ports: Some(vec![ServicePort { + name: Some(HTTP_PORT_NAME.to_string()), + port: HTTP_PORT.into(), + protocol: Some("TCP".to_string()), + ..ServicePort::default() + }]), + selector: Some( + role_group_selector_labels(cluster, &rolegroup_ref.role, &rolegroup_ref.role_group) + .into(), + ), + publish_not_ready_addresses: Some(true), + ..ServiceSpec::default() + }), + status: None, + } +} + +fn build_metrics_service( + cluster: &ValidatedAirflowCluster, + rolegroup_ref: &RoleGroupRef, +) -> Service { + let metadata = ObjectMetaBuilder::new() + .name(format!("{}-metrics", rolegroup_ref.object_name())) + .namespace(&cluster.namespace) + .ownerreference(ownerreference_from_resource(cluster, None, Some(true))) + .with_labels(recommended_labels( + cluster, + &rolegroup_ref.role, + &rolegroup_ref.role_group, + )) + .with_labels(prometheus_labels()) + .with_annotations(prometheus_annotations()) + .build(); + + Service { + metadata, + spec: Some(ServiceSpec { + type_: Some("ClusterIP".to_string()), + cluster_ip: Some("None".to_string()), + ports: Some(vec![ServicePort { + name: Some(METRICS_PORT_NAME.to_string()), + port: METRICS_PORT.into(), + protocol: Some("TCP".to_string()), + ..ServicePort::default() + }]), + selector: Some( + role_group_selector_labels(cluster, &rolegroup_ref.role, &rolegroup_ref.role_group) + .into(), + ), + publish_not_ready_addresses: Some(true), + ..ServiceSpec::default() + }), + status: None, + } +} + +fn recommended_labels(cluster: &ValidatedAirflowCluster, role: &str, role_group: &str) -> Labels { + use crate::framework::types::operator::*; + framework::kvp::label::recommended_labels( + cluster, + &ProductName::from_str_unsafe(APP_NAME), + &ProductVersion::from_str_unsafe(&cluster.image.app_version_label_value.to_string()), + &OperatorName::from_str_unsafe(OPERATOR_NAME), + &ControllerName::from_str_unsafe(AIRFLOW_CONTROLLER_NAME), + &RoleName::from_str_unsafe(role), + &RoleGroupName::from_str_unsafe(role_group), + ) +} + +fn role_group_selector_labels( + cluster: &ValidatedAirflowCluster, + role: &str, + role_group: &str, +) -> Labels { + use crate::framework::types::operator::*; + framework::kvp::label::role_group_selector( + cluster, + &ProductName::from_str_unsafe(APP_NAME), + &RoleName::from_str_unsafe(role), + &RoleGroupName::from_str_unsafe(role_group), + ) +} + +fn prometheus_labels() -> Labels { + Labels::try_from([("prometheus.io/scrape", "true")]).expect("should be a valid label") +} + +fn prometheus_annotations() -> Annotations { + Annotations::try_from([ + ("prometheus.io/path".to_owned(), "/metrics".to_owned()), + ("prometheus.io/port".to_owned(), METRICS_PORT.to_string()), + ("prometheus.io/scheme".to_owned(), "http".to_owned()), + ("prometheus.io/scrape".to_owned(), "true".to_owned()), + ]) + .expect("should be valid annotations") +} + +fn build_group_listener( + cluster: &ValidatedAirflowCluster, + role: &AirflowRole, + listener_class: String, + listener_group_name: String, +) -> listener::v1alpha1::Listener { + let metadata = ObjectMetaBuilder::new() + .name(&listener_group_name) + .namespace(&cluster.namespace) + .ownerreference(ownerreference_from_resource(cluster, None, Some(true))) + .with_labels(recommended_labels(cluster, &role.to_string(), "none")) + .build(); + + listener::v1alpha1::Listener { + metadata, + spec: listener::v1alpha1::ListenerSpec { + class_name: Some(listener_class), + ports: Some(listener_ports()), + ..listener::v1alpha1::ListenerSpec::default() + }, + status: None, + } +} + +fn listener_ports() -> Vec { + vec![listener::v1alpha1::ListenerPort { + name: HTTP_PORT_NAME.to_string(), + port: HTTP_PORT.into(), + protocol: Some("TCP".to_string()), + }] +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeMap, str::FromStr}; + + use stackable_operator::{ + commons::{ + affinity::StackableAffinity, + product_image_selection::ResolvedProductImage, + resources::{NoRuntimeLimits, Resources}, + }, + k8s_openapi::api::core::v1::PodTemplateSpec, + kube::Resource, + kvp::LabelValue, + product_logging::spec::{ + AutomaticContainerLogConfig, ContainerLogConfig, ContainerLogConfigChoice, + }, + shared::time::Duration, + }; + + use super::*; + use crate::{ + controller::{ + PrecomputedPodData, ValidatedAirflowCluster, ValidatedLogging, ValidatedRoleConfig, + ValidatedRoleGroupConfig, + }, + crd::{AirflowExecutor, AirflowRole, AirflowStorageConfig}, + framework::{ + product_logging::framework::ValidatedContainerLogConfigChoice, + types::{kubernetes::Uid, operator::ClusterName}, + }, + }; + + // Note: airflow_cluster() helper removed — build() no longer takes a raw CRD argument + + #[test] + fn test_build() { + let validated = validated_cluster(); + + let resources = build(&validated); + + assert_eq!( + vec![ + "my-airflow-scheduler-default", + "my-airflow-webserver-default", + ], + extract_resource_names(&resources.stateful_sets) + ); + assert_eq!( + vec![ + "my-airflow-scheduler-default-headless", + "my-airflow-scheduler-default-metrics", + "my-airflow-webserver-default-headless", + "my-airflow-webserver-default-metrics", + ], + extract_resource_names(&resources.services) + ); + assert_eq!( + vec![ + "my-airflow-scheduler-default", + "my-airflow-webserver-default", + ], + extract_resource_names(&resources.config_maps) + ); + assert_eq!( + vec!["my-airflow-serviceaccount"], + extract_resource_names(&resources.service_accounts) + ); + assert_eq!( + vec!["my-airflow-rolebinding"], + extract_resource_names(&resources.role_bindings) + ); + assert_eq!( + vec!["my-airflow-scheduler", "my-airflow-webserver"], + extract_resource_names(&resources.pod_disruption_budgets) + ); + assert_eq!( + vec!["my-airflow-webserver"], + extract_resource_names(&resources.listeners) + ); + } + + fn extract_resource_names(resources: &[impl Resource]) -> Vec<&str> { + let mut names: Vec<&str> = resources + .iter() + .filter_map(|r| r.meta().name.as_ref()) + .map(|n| n.as_str()) + .collect(); + names.sort(); + names + } + + fn validated_cluster() -> ValidatedAirflowCluster { + use crate::framework::types::kubernetes::NamespaceName; + + let image = ResolvedProductImage { + product_version: "2.10.4".to_owned(), + app_version_label_value: LabelValue::from_str("2.10.4-stackable0.0.0-dev") + .expect("valid label value"), + image: "oci.stackable.tech/sdp/airflow:2.10.4-stackable0.0.0-dev".to_string(), + image_pull_policy: "Always".to_owned(), + pull_secrets: None, + }; + + let logging = ValidatedLogging { + airflow_container: ValidatedContainerLogConfigChoice::Automatic( + AutomaticContainerLogConfig::default(), + ), + vector_container: None, + git_sync_container_log_config: ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Automatic( + AutomaticContainerLogConfig::default(), + )), + }, + }; + + let role_group_config = ValidatedRoleGroupConfig { + resources: Resources::::default(), + logging: logging.clone(), + affinity: StackableAffinity::default(), + graceful_shutdown_timeout: Duration::from_secs(120), + config_file_content: String::new(), + }; + + let pod_data = PrecomputedPodData { + env_vars: vec![], + airflow_commands: vec!["airflow webserver".to_string()], + auth_volumes: vec![], + auth_volume_mounts: vec![], + extra_volumes: vec![], + extra_volume_mounts: vec![], + git_sync_containers: vec![], + git_sync_init_containers: vec![], + git_sync_volumes: vec![], + git_sync_volume_mounts: vec![], + vector_container: None, + service_account_name: "my-airflow-serviceaccount".to_string(), + replicas: Some(1), + pod_overrides: PodTemplateSpec::default(), + executor: AirflowExecutor::KubernetesExecutors { + common_configuration: Box::default(), + }, + executor_template_configmap_name: None, + listener_volume_claim_template: None, + }; + + let role_groups = BTreeMap::from([ + ( + AirflowRole::Webserver, + BTreeMap::from([("default".to_string(), role_group_config.clone())]), + ), + ( + AirflowRole::Scheduler, + BTreeMap::from([("default".to_string(), role_group_config)]), + ), + ]); + + let precomputed_pod_data = BTreeMap::from([ + ( + AirflowRole::Webserver, + BTreeMap::from([("default".to_string(), pod_data.clone())]), + ), + ( + AirflowRole::Scheduler, + BTreeMap::from([("default".to_string(), pod_data)]), + ), + ]); + + // Role configs: PDB enabled for both roles; Webserver also gets a listener + let role_configs = BTreeMap::from([ + ( + AirflowRole::Scheduler, + ValidatedRoleConfig { + pdb_enabled: true, + pdb_max_unavailable: None, + listener_class: None, + group_listener_name: None, + }, + ), + ( + AirflowRole::Webserver, + ValidatedRoleConfig { + pdb_enabled: true, + pdb_max_unavailable: None, + listener_class: Some("cluster-internal".to_string()), + group_listener_name: Some("my-airflow-webserver".to_string()), + }, + ), + ]); + + ValidatedAirflowCluster::new( + image, + ClusterName::from_str_unsafe("my-airflow"), + NamespaceName::from_str_unsafe("default"), + Uid::from_str_unsafe("e6ac237d-a6d4-43a1-8135-f36506110912"), + role_groups, + precomputed_pod_data, + vec![], + role_configs, + AirflowExecutor::KubernetesExecutors { + common_configuration: Box::default(), + }, + ) + } +} diff --git a/rust/operator-binary/src/controller/build/role_group_builder.rs b/rust/operator-binary/src/controller/build/role_group_builder.rs new file mode 100644 index 00000000..130367af --- /dev/null +++ b/rust/operator-binary/src/controller/build/role_group_builder.rs @@ -0,0 +1,417 @@ +//! Builder for role group Kubernetes resources +//! +//! The [`RoleGroupBuilder`] constructs ConfigMaps and StatefulSets for a single role +//! group. All build methods are infallible — validation and config generation happen in +//! the validate stage. + +use std::collections::BTreeMap; + +use stackable_operator::{ + builder::{configmap::ConfigMapBuilder, meta::ObjectMetaBuilder}, + k8s_openapi::{ + DeepMerge, + api::{ + apps::v1::{StatefulSet, StatefulSetSpec}, + core::v1::{ + Affinity, ConfigMap, Container as K8sContainer, ContainerPort, PodSecurityContext, + PodSpec, PodTemplateSpec, Probe, ResourceRequirements, TCPSocketAction, Volume, + VolumeMount, + }, + }, + apimachinery::pkg::{ + api::resource::Quantity, apis::meta::v1::LabelSelector, util::intstr::IntOrString, + }, + }, + kvp::{Annotation, Label, Labels}, + role_utils::RoleGroupRef, + utils::COMMON_BASH_TRAP_FUNCTIONS, +}; + +use crate::{ + controller::{ + AIRFLOW_CONTROLLER_NAME, PrecomputedPodData, ValidatedAirflowCluster, + ValidatedRoleGroupConfig, + }, + controller_commons::{self, CONFIG_VOLUME_NAME, LOG_CONFIG_VOLUME_NAME, LOG_VOLUME_NAME}, + crd::{ + AIRFLOW_CONFIG_FILENAME, APP_NAME, AirflowExecutor, AirflowRole, CONFIG_PATH, Container, + HTTP_PORT_NAME, LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME, LOG_CONFIG_DIR, METRICS_PORT, + METRICS_PORT_NAME, OPERATOR_NAME, STACKABLE_LOG_DIR, TEMPLATE_LOCATION, + TEMPLATE_VOLUME_NAME, + }, + framework::builder::meta::ownerreference_from_resource, + product_logging::extend_config_map_with_log_config, + service::stateful_set_service_name, +}; + +pub struct RoleGroupBuilder<'a> { + cluster: &'a ValidatedAirflowCluster, + role_group_config: &'a ValidatedRoleGroupConfig, + rolegroup_ref: RoleGroupRef, + airflow_role: AirflowRole, + main_container: Container, + pod_data: &'a PrecomputedPodData, +} + +impl<'a> RoleGroupBuilder<'a> { + pub fn new( + cluster: &'a ValidatedAirflowCluster, + role_group_config: &'a ValidatedRoleGroupConfig, + rolegroup_ref: RoleGroupRef, + airflow_role: AirflowRole, + main_container: Container, + pod_data: &'a PrecomputedPodData, + ) -> Self { + Self { + cluster, + role_group_config, + rolegroup_ref, + airflow_role, + main_container, + pod_data, + } + } + + pub fn build_config_map(&self) -> ConfigMap { + let metadata = self + .common_metadata(self.rolegroup_ref.object_name()) + .build(); + + let mut cm_builder = ConfigMapBuilder::new(); + cm_builder.metadata(metadata); + + cm_builder.add_data( + AIRFLOW_CONFIG_FILENAME, + self.role_group_config.config_file_content.clone(), + ); + + extend_config_map_with_log_config( + &self.rolegroup_ref, + &self.main_container, + &self.role_group_config.logging.airflow_container, + self.role_group_config.logging.vector_container.as_ref(), + &mut cm_builder, + &self.cluster.image, + ); + + cm_builder + .build() + .expect("ConfigMap should build because metadata is set") + } + + pub fn build_stateful_set(&self) -> StatefulSet { + let restarter_label = Label::try_from(("restarter.stackable.tech/enabled", "true")) + .expect("static label is always valid"); + + let metadata = self + .common_metadata(self.rolegroup_ref.object_name()) + .with_label(restarter_label) + .build(); + + let template = self.build_pod_template(); + + let match_labels = { + use crate::framework::types::operator::*; + crate::framework::kvp::label::role_group_selector( + self.cluster, + &ProductName::from_str_unsafe(APP_NAME), + &RoleName::from_str_unsafe(&self.rolegroup_ref.role), + &RoleGroupName::from_str_unsafe(&self.rolegroup_ref.role_group), + ) + }; + + let pod_management_policy = match self.airflow_role { + AirflowRole::Scheduler => "OrderedReady", + AirflowRole::Webserver + | AirflowRole::Worker + | AirflowRole::DagProcessor + | AirflowRole::Triggerer => "Parallel", + } + .to_string(); + + let spec = StatefulSetSpec { + pod_management_policy: Some(pod_management_policy), + replicas: self.pod_data.replicas.map(i32::from), + selector: LabelSelector { + match_labels: Some(match_labels.into()), + ..LabelSelector::default() + }, + service_name: stateful_set_service_name(&self.rolegroup_ref), + template, + volume_claim_templates: self + .pod_data + .listener_volume_claim_template + .clone() + .map(|pvc| vec![pvc]), + ..StatefulSetSpec::default() + }; + + StatefulSet { + metadata, + spec: Some(spec), + status: None, + } + } + + fn build_pod_template(&self) -> PodTemplateSpec { + let pod_metadata = ObjectMetaBuilder::new() + .with_labels(self.recommended_labels()) + .with_annotation( + Annotation::try_from(( + "kubectl.kubernetes.io/default-container", + format!("{}", self.main_container), + )) + .expect("static annotation is always valid"), + ) + .build(); + + let airflow_container = self.build_airflow_container(); + let metrics_container = self.build_metrics_container(); + + let mut containers = vec![airflow_container, metrics_container]; + containers.extend(self.pod_data.git_sync_containers.clone()); + if let Some(vector_container) = &self.pod_data.vector_container { + containers.push(vector_container.clone()); + } + + let init_containers = if self.pod_data.git_sync_init_containers.is_empty() { + None + } else { + Some(self.pod_data.git_sync_init_containers.clone()) + }; + + let volumes = self.build_volumes(); + + let termination_grace_period_seconds = self + .role_group_config + .graceful_shutdown_timeout + .as_secs() + .try_into() + .ok(); + + let mut pod_template = PodTemplateSpec { + metadata: Some(pod_metadata), + spec: Some(PodSpec { + affinity: { + let a = &self.role_group_config.affinity; + if a.pod_affinity.is_some() + || a.pod_anti_affinity.is_some() + || a.node_affinity.is_some() + { + Some(Affinity { + pod_affinity: a.pod_affinity.clone(), + pod_anti_affinity: a.pod_anti_affinity.clone(), + node_affinity: a.node_affinity.clone(), + }) + } else { + None + } + }, + containers, + init_containers, + service_account_name: Some(self.pod_data.service_account_name.clone()), + termination_grace_period_seconds, + security_context: Some(PodSecurityContext { + fs_group: Some(1000), + ..PodSecurityContext::default() + }), + image_pull_secrets: self.cluster.image.pull_secrets.clone(), + volumes: if volumes.is_empty() { + None + } else { + Some(volumes) + }, + ..PodSpec::default() + }), + }; + + pod_template.merge_from(self.pod_data.pod_overrides.clone()); + pod_template + } + + fn build_airflow_container(&self) -> K8sContainer { + let mut volume_mounts = vec![ + VolumeMount { + name: CONFIG_VOLUME_NAME.to_string(), + mount_path: CONFIG_PATH.to_string(), + ..VolumeMount::default() + }, + VolumeMount { + name: LOG_CONFIG_VOLUME_NAME.to_string(), + mount_path: LOG_CONFIG_DIR.to_string(), + ..VolumeMount::default() + }, + VolumeMount { + name: LOG_VOLUME_NAME.to_string(), + mount_path: STACKABLE_LOG_DIR.to_string(), + ..VolumeMount::default() + }, + ]; + + volume_mounts.extend(self.pod_data.extra_volume_mounts.clone()); + volume_mounts.extend(self.pod_data.auth_volume_mounts.clone()); + volume_mounts.extend(self.pod_data.git_sync_volume_mounts.clone()); + + if matches!( + self.pod_data.executor, + AirflowExecutor::KubernetesExecutors { .. } + ) { + volume_mounts.push(VolumeMount { + name: TEMPLATE_VOLUME_NAME.to_string(), + mount_path: TEMPLATE_LOCATION.to_string(), + ..VolumeMount::default() + }); + } + + if self.airflow_role.get_http_port().is_some() + && self.pod_data.listener_volume_claim_template.is_some() + { + volume_mounts.push(VolumeMount { + name: LISTENER_VOLUME_NAME.to_string(), + mount_path: LISTENER_VOLUME_DIR.to_string(), + ..VolumeMount::default() + }); + } + + let mut ports = Vec::new(); + if let Some(http_port) = self.airflow_role.get_http_port() { + ports.push(ContainerPort { + name: Some(HTTP_PORT_NAME.to_string()), + container_port: http_port.into(), + ..ContainerPort::default() + }); + } + + let (readiness_probe, liveness_probe) = + if let Some(http_port) = self.airflow_role.get_http_port() { + let probe = Probe { + tcp_socket: Some(TCPSocketAction { + port: IntOrString::Int(http_port.into()), + ..TCPSocketAction::default() + }), + initial_delay_seconds: Some(60), + period_seconds: Some(10), + failure_threshold: Some(6), + ..Probe::default() + }; + (Some(probe.clone()), Some(probe)) + } else { + (None, None) + }; + + K8sContainer { + name: self.main_container.to_string(), + image: Some(self.cluster.image.image.clone()), + image_pull_policy: Some(self.cluster.image.image_pull_policy.clone()), + command: Some(vec![ + "/bin/bash".to_string(), + "-x".to_string(), + "-euo".to_string(), + "pipefail".to_string(), + "-c".to_string(), + ]), + args: Some(vec![self.pod_data.airflow_commands.join("\n")]), + env: Some(self.pod_data.env_vars.clone()), + ports: if ports.is_empty() { None } else { Some(ports) }, + volume_mounts: Some(volume_mounts), + resources: Some(self.role_group_config.resources.clone().into()), + readiness_probe, + liveness_probe, + ..K8sContainer::default() + } + } + + fn build_metrics_container(&self) -> K8sContainer { + let args = [ + COMMON_BASH_TRAP_FUNCTIONS.to_string(), + "prepare_signal_handlers".to_string(), + "/stackable/statsd_exporter &".to_string(), + "wait_for_termination $!".to_string(), + ] + .join("\n"); + + K8sContainer { + name: "metrics".to_string(), + image: Some(self.cluster.image.image.clone()), + image_pull_policy: Some(self.cluster.image.image_pull_policy.clone()), + command: Some(vec![ + "/bin/bash".to_string(), + "-x".to_string(), + "-euo".to_string(), + "pipefail".to_string(), + "-c".to_string(), + ]), + args: Some(vec![args]), + ports: Some(vec![ContainerPort { + name: Some(METRICS_PORT_NAME.to_string()), + container_port: METRICS_PORT.into(), + ..ContainerPort::default() + }]), + resources: Some(ResourceRequirements { + requests: Some(BTreeMap::from([ + ("cpu".to_string(), Quantity("100m".to_string())), + ("memory".to_string(), Quantity("64Mi".to_string())), + ])), + limits: Some(BTreeMap::from([ + ("cpu".to_string(), Quantity("200m".to_string())), + ("memory".to_string(), Quantity("64Mi".to_string())), + ])), + ..ResourceRequirements::default() + }), + ..K8sContainer::default() + } + } + + fn build_volumes(&self) -> Vec { + let mut volumes = controller_commons::create_volumes( + &self.rolegroup_ref.object_name(), + &self.role_group_config.logging.airflow_container, + ); + + volumes.extend(self.pod_data.extra_volumes.clone()); + volumes.extend(self.pod_data.auth_volumes.clone()); + volumes.extend(self.pod_data.git_sync_volumes.clone()); + + if let Some(template_cm_name) = &self.pod_data.executor_template_configmap_name { + volumes.push(Volume { + name: TEMPLATE_VOLUME_NAME.to_string(), + config_map: Some( + stackable_operator::k8s_openapi::api::core::v1::ConfigMapVolumeSource { + name: template_cm_name.clone(), + ..Default::default() + }, + ), + ..Volume::default() + }); + } + + volumes + } + + fn common_metadata(&self, resource_name: impl Into) -> ObjectMetaBuilder { + let mut builder = ObjectMetaBuilder::new(); + + builder + .name(resource_name) + .namespace(&self.cluster.namespace) + .ownerreference(ownerreference_from_resource(self.cluster, None, Some(true))) + .with_labels(self.recommended_labels()); + + builder + } + + fn recommended_labels(&self) -> Labels { + use crate::framework::types::operator::*; + crate::framework::kvp::label::recommended_labels( + self.cluster, + &ProductName::from_str_unsafe(APP_NAME), + &ProductVersion::from_str_unsafe( + &self.cluster.image.app_version_label_value.to_string(), + ), + &OperatorName::from_str_unsafe(OPERATOR_NAME), + &ControllerName::from_str_unsafe(AIRFLOW_CONTROLLER_NAME), + &RoleName::from_str_unsafe(&self.rolegroup_ref.role), + &RoleGroupName::from_str_unsafe(&self.rolegroup_ref.role_group), + ) + } +} diff --git a/rust/operator-binary/src/controller/dereference.rs b/rust/operator-binary/src/controller/dereference.rs new file mode 100644 index 00000000..9441a140 --- /dev/null +++ b/rust/operator-binary/src/controller/dereference.rs @@ -0,0 +1,113 @@ +//! The dereference step in the AirflowCluster controller +//! +//! Fetches and resolves all external resources referenced by the AirflowCluster spec: +//! product image, authentication classes, authorization config, and internal secrets. + +use snafu::{ResultExt, Snafu}; +use stackable_operator::{ + client::Client, + commons::{product_image_selection::ResolvedProductImage, random_secret_creation}, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::crd::{ + authentication::AirflowClientAuthenticationDetailsResolved, + authorization::AirflowAuthorizationResolved, + internal_secret::{FERNET_KEY_SECRET_KEY, INTERNAL_SECRET_SECRET_KEY, JWT_SECRET_SECRET_KEY}, + v1alpha2, +}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to resolve product image"))] + ResolveProductImage { + source: stackable_operator::commons::product_image_selection::Error, + }, + + #[snafu(display("failed to apply authentication configuration"))] + InvalidAuthenticationConfig { + source: crate::crd::authentication::Error, + }, + + #[snafu(display("invalid authorization config"))] + InvalidAuthorizationConfig { + source: stackable_operator::commons::opa::Error, + }, + + #[snafu(display("failed to create internal secret"))] + InvalidInternalSecret { + source: random_secret_creation::Error, + }, +} + +pub struct DereferencedObjects { + pub resolved_product_image: ResolvedProductImage, + pub authentication_config: AirflowClientAuthenticationDetailsResolved, + pub authorization_config: AirflowAuthorizationResolved, +} + +pub async fn dereference( + client: &Client, + airflow: &v1alpha2::AirflowCluster, + image_base_name: &str, + image_repository: &str, + pkg_version: &str, +) -> Result { + let resolved_product_image = airflow + .spec + .image + .resolve(image_base_name, image_repository, pkg_version) + .context(ResolveProductImageSnafu)?; + + let authentication_config = AirflowClientAuthenticationDetailsResolved::from( + &airflow.spec.cluster_config.authentication, + client, + ) + .await + .context(InvalidAuthenticationConfigSnafu)?; + + let authorization_config = AirflowAuthorizationResolved::from_authorization_config( + client, + airflow, + &airflow.spec.cluster_config.authorization, + ) + .await + .context(InvalidAuthorizationConfigSnafu)?; + + random_secret_creation::create_random_secret_if_not_exists( + &airflow.shared_internal_secret_secret_name(), + INTERNAL_SECRET_SECRET_KEY, + 256, + airflow, + client, + ) + .await + .context(InvalidInternalSecretSnafu)?; + + random_secret_creation::create_random_secret_if_not_exists( + &airflow.shared_jwt_secret_secret_name(), + JWT_SECRET_SECRET_KEY, + 256, + airflow, + client, + ) + .await + .context(InvalidInternalSecretSnafu)?; + + random_secret_creation::create_random_secret_if_not_exists( + &airflow.shared_fernet_key_secret_name(), + FERNET_KEY_SECRET_KEY, + 32, + airflow, + client, + ) + .await + .context(InvalidInternalSecretSnafu)?; + + Ok(DereferencedObjects { + resolved_product_image, + authentication_config, + authorization_config, + }) +} diff --git a/rust/operator-binary/src/controller/update_status.rs b/rust/operator-binary/src/controller/update_status.rs new file mode 100644 index 00000000..43883460 --- /dev/null +++ b/rust/operator-binary/src/controller/update_status.rs @@ -0,0 +1,54 @@ +//! The update status step in the AirflowCluster controller +//! +//! Computes the cluster status from the applied resources and patches it onto +//! the AirflowCluster object. + +use snafu::{ResultExt, Snafu}; +use stackable_operator::{ + client::Client, + status::condition::{ + compute_conditions, operations::ClusterOperationsConditionBuilder, + statefulset::StatefulSetConditionBuilder, + }, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use super::{Applied, KubernetesResources}; +use crate::crd::{AirflowClusterStatus, OPERATOR_NAME, v1alpha2}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to update status"))] + UpdateStatus { + source: stackable_operator::client::Error, + }, +} + +pub async fn update_status( + client: &Client, + airflow: &v1alpha2::AirflowCluster, + applied_resources: KubernetesResources, +) -> Result<(), Error> { + let mut ss_cond_builder = StatefulSetConditionBuilder::default(); + for stateful_set in applied_resources.stateful_sets { + ss_cond_builder.add(stateful_set); + } + + let cluster_operation_cond_builder = + ClusterOperationsConditionBuilder::new(&airflow.spec.cluster_operation); + + let status = AirflowClusterStatus { + conditions: compute_conditions( + airflow, + &[&ss_cond_builder, &cluster_operation_cond_builder], + ), + }; + + client + .apply_patch_status(OPERATOR_NAME, airflow, &status) + .await + .context(UpdateStatusSnafu)?; + + Ok(()) +} diff --git a/rust/operator-binary/src/controller/validate.rs b/rust/operator-binary/src/controller/validate.rs new file mode 100644 index 00000000..e8bf287a --- /dev/null +++ b/rust/operator-binary/src/controller/validate.rs @@ -0,0 +1,1285 @@ +//! The validate step in the AirflowCluster controller +//! +//! Validates the AirflowCluster spec and produces a [`ValidatedAirflowCluster`] where +//! all optional-after-merge fields are unwrapped and logging is pre-validated. + +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + str::FromStr, +}; + +use product_config::{ProductConfigManager, types::PropertyNameKind}; +use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::{ + builder::{ + configmap::ConfigMapBuilder, + meta::ObjectMetaBuilder, + pod::{ + PodBuilder, + container::ContainerBuilder, + resources::ResourceRequirementsBuilder, + security::PodSecurityContextBuilder, + volume::{ListenerOperatorVolumeSourceBuilder, ListenerReference}, + }, + }, + commons::product_image_selection::ResolvedProductImage, + crd::git_sync, + database_connections::drivers::{ + celery::CeleryDatabaseConnectionDetails, sqlalchemy::SqlAlchemyDatabaseConnectionDetails, + }, + k8s_openapi::{ + DeepMerge, + api::core::v1::{ConfigMap, PodTemplateSpec, Volume, VolumeMount}, + }, + kube::{Resource, ResourceExt}, + kvp::{Label, Labels, ObjectLabels}, + product_config_utils::{ + env_vars_from, env_vars_from_rolegroup_config, transform_all_roles_to_config, + validate_all_roles_and_groups_config, + }, + product_logging::{self, framework::LoggingError, spec::Logging}, + role_utils::RoleGroupRef, +}; +use strum::{EnumDiscriminants, IntoEnumIterator, IntoStaticStr}; + +use super::{ + AIRFLOW_CONTROLLER_NAME, PrecomputedPodData, ValidatedAirflowCluster, ValidatedLogging, + ValidatedRoleConfig, ValidatedRoleGroupConfig, dereference::DereferencedObjects, +}; +use crate::{ + controller_commons::{self, CONFIG_VOLUME_NAME, LOG_CONFIG_VOLUME_NAME, LOG_VOLUME_NAME}, + crd::{ + AIRFLOW_CONFIG_FILENAME, APP_NAME, AirflowConfig, AirflowExecutor, AirflowRole, + CONFIG_PATH, Container, ExecutorConfig, LISTENER_VOLUME_NAME, LOG_CONFIG_DIR, + OPERATOR_NAME, STACKABLE_LOG_DIR, TEMPLATE_NAME, + authentication::{ + AirflowAuthenticationClassResolved, AirflowClientAuthenticationDetailsResolved, + }, + authorization::AirflowAuthorizationResolved, + v1alpha2, + }, + env_vars, + framework::{ + product_logging::framework::{ + VectorContainerLogConfig, validate_logging_configuration_for_container, + }, + types::{ + kubernetes::{ConfigMapName, NamespaceName, Uid}, + operator::ClusterName, + }, + }, + product_logging::extend_config_map_with_log_config, +}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to validate cluster name"))] + InvalidClusterName { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("object has no associated namespace"))] + ObjectHasNoNamespace, + + #[snafu(display("failed to validate cluster namespace"))] + InvalidClusterNamespace { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("object has no UID"))] + ObjectHasNoUid, + + #[snafu(display("failed to validate cluster UID"))] + InvalidClusterUid { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("failed to validate logging configuration"))] + ValidateLoggingConfig { + source: crate::framework::product_logging::framework::Error, + }, + + #[snafu(display("vectorAggregatorConfigMapName must be set when vector agent is enabled"))] + MissingVectorAggregatorConfigMapName, + + #[snafu(display("failed to parse vector aggregator ConfigMap name"))] + ParseVectorAggregatorConfigMapName { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("graceful shutdown timeout is not configured"))] + MissingGracefulShutdownTimeout, + + #[snafu(display("failed to resolve and merge config for role and role group"))] + FailedToResolveConfig { source: crate::crd::Error }, + + #[snafu(display("failed to construct Airflow configuration"))] + ConstructConfig { source: crate::config::Error }, + + #[snafu(display("failed to write config file"))] + BuildConfigFile { + source: product_config::flask_app_config_writer::FlaskAppConfigWriterError, + }, + + #[snafu(display("Failed to transform configs"))] + ProductConfigTransform { + source: stackable_operator::product_config_utils::Error, + }, + + #[snafu(display("invalid product config"))] + InvalidProductConfig { + source: stackable_operator::product_config_utils::Error, + }, + + #[snafu(display("could not parse Airflow role [{role}]"))] + UnidentifiedAirflowRole { + source: strum::ParseError, + role: String, + }, + + #[snafu(display("object defines no airflow config role"))] + NoAirflowRole, + + #[snafu(display("failed to build environment variables"))] + BuildEnvVars { source: crate::env_vars::Error }, + + #[snafu(display("invalid git-sync specification"))] + InvalidGitSyncSpec { source: git_sync::v1alpha2::Error }, + + #[snafu(display("failed to configure logging"))] + ConfigureLogging { source: LoggingError }, + + #[snafu(display("failed to add LDAP volumes and volume mounts"))] + AddLdapVolumesAndVolumeMounts { + source: stackable_operator::crd::authentication::ldap::v1alpha1::Error, + }, + + #[snafu(display("failed to add TLS volumes and volume mounts"))] + AddTlsVolumesAndVolumeMounts { + source: stackable_operator::commons::tls_verification::TlsClientDetailsError, + }, + + #[snafu(display("failed to build listener volume"))] + BuildListenerVolume { + source: stackable_operator::builder::pod::volume::ListenerOperatorVolumeSourceBuilderError, + }, + + #[snafu(display("failed to build labels"))] + BuildLabels { + source: stackable_operator::kvp::LabelError, + }, + + #[snafu(display("invalid container name"))] + InvalidContainerName { + source: stackable_operator::builder::pod::container::Error, + }, + + #[snafu(display("failed to add volume mount"))] + AddVolumeMount { + source: stackable_operator::builder::pod::container::Error, + }, + + #[snafu(display("failed to add volume"))] + AddVolume { + source: stackable_operator::builder::pod::Error, + }, + + #[snafu(display("failed to serialize pod template"))] + PodTemplateSerde { source: serde_yaml::Error }, + + #[snafu(display("failed to build pod template ConfigMap"))] + PodTemplateConfigMap { + source: stackable_operator::builder::configmap::Error, + }, + + #[snafu(display("failed to build object metadata"))] + ObjectMeta { + source: stackable_operator::builder::meta::Error, + }, + + #[snafu(display("failed to build graceful shutdown config"))] + GracefulShutdown { + source: stackable_operator::builder::pod::Error, + }, +} + +type Result = std::result::Result; + +pub fn validate_logging( + logging: &Logging, + main_container: Container, + vector_aggregator_config_map_name: Option<&str>, +) -> Result { + let airflow_container = validate_logging_configuration_for_container(logging, main_container) + .context(ValidateLoggingConfigSnafu)?; + + let vector_container = if logging.enable_vector_agent { + let aggregator_name = + vector_aggregator_config_map_name.context(MissingVectorAggregatorConfigMapNameSnafu)?; + ConfigMapName::from_str(aggregator_name) + .context(ParseVectorAggregatorConfigMapNameSnafu)?; + let log_config = validate_logging_configuration_for_container(logging, Container::Vector) + .context(ValidateLoggingConfigSnafu)?; + Some(VectorContainerLogConfig { log_config }) + } else { + None + }; + + let git_sync_container_log_config = logging.for_container(&Container::GitSync).into_owned(); + + Ok(ValidatedLogging { + airflow_container, + vector_container, + git_sync_container_log_config, + }) +} + +pub fn validate_airflow_config( + config: &AirflowConfig, + vector_aggregator_config_map_name: Option<&str>, + config_file_content: String, +) -> Result { + let logging = validate_logging( + &config.logging, + Container::Airflow, + vector_aggregator_config_map_name, + )?; + + let graceful_shutdown_timeout = config + .graceful_shutdown_timeout + .context(MissingGracefulShutdownTimeoutSnafu)?; + + Ok(ValidatedRoleGroupConfig { + resources: config.resources.clone(), + logging, + affinity: config.affinity.clone(), + graceful_shutdown_timeout, + config_file_content, + }) +} + +pub fn validate_executor_config( + config: &ExecutorConfig, + vector_aggregator_config_map_name: Option<&str>, + config_file_content: String, +) -> Result { + let logging = validate_logging( + &config.logging, + Container::Base, + vector_aggregator_config_map_name, + )?; + + let graceful_shutdown_timeout = config + .graceful_shutdown_timeout + .context(MissingGracefulShutdownTimeoutSnafu)?; + + Ok(ValidatedRoleGroupConfig { + resources: config.resources.clone(), + logging, + affinity: config.affinity.clone(), + graceful_shutdown_timeout, + config_file_content, + }) +} + +/// Generates the `webserver_config.py` content for a role group. +/// +/// This function is called during the validate stage so that the build stage can +/// construct ConfigMaps infallibly. +pub fn generate_config_file_content( + authentication_config: &crate::crd::authentication::AirflowClientAuthenticationDetailsResolved, + authorization_config: &crate::crd::authorization::AirflowAuthorizationResolved, + product_version: &str, + rolegroup_config_overrides: &std::collections::HashMap< + product_config::types::PropertyNameKind, + BTreeMap, + >, +) -> Result { + use std::io::Write; + + use product_config::flask_app_config_writer; + use stackable_operator::product_config_utils::{ + CONFIG_OVERRIDE_FILE_FOOTER_KEY, CONFIG_OVERRIDE_FILE_HEADER_KEY, + }; + + use crate::{ + config::{self, PYTHON_IMPORTS}, + crd::{AIRFLOW_CONFIG_FILENAME, AirflowConfigOptions}, + }; + + let mut config = BTreeMap::new(); + config::add_airflow_config( + &mut config, + authentication_config, + authorization_config, + product_version, + ) + .context(ConstructConfigSnafu)?; + + let mut file_overrides = rolegroup_config_overrides + .get(&product_config::types::PropertyNameKind::File( + AIRFLOW_CONFIG_FILENAME.to_string(), + )) + .cloned() + .unwrap_or_default(); + + config.append(&mut file_overrides); + + let mut config_file = Vec::new(); + + if let Some(header) = config.remove(CONFIG_OVERRIDE_FILE_HEADER_KEY) { + writeln!(config_file, "{}", header).expect("writing to Vec is infallible"); + } + + let temp_file_footer: Option = config.remove(CONFIG_OVERRIDE_FILE_FOOTER_KEY); + + flask_app_config_writer::write::( + &mut config_file, + config.iter(), + PYTHON_IMPORTS, + ) + .context(BuildConfigFileSnafu)?; + + if let Some(footer) = temp_file_footer { + writeln!(config_file, "{}", footer).expect("writing to Vec is infallible"); + } + + Ok(String::from_utf8(config_file).expect("flask_app_config_writer produces valid UTF-8")) +} + +/// Top-level validation: runs product_config, merges/validates per-rolegroup configs, +/// generates config file contents, and assembles a [`ValidatedAirflowCluster`]. +pub fn validate_cluster( + airflow: &v1alpha2::AirflowCluster, + dereferenced: &DereferencedObjects, + product_config_manager: &ProductConfigManager, +) -> Result { + let vector_aggregator_config_map_name = airflow + .spec + .cluster_config + .vector_aggregator_config_map_name + .as_deref(); + + // --- product_config transform + validate --- + let mut roles = HashMap::new(); + for role in AirflowRole::iter() { + if let Some(resolved_role) = airflow.get_role(&role) { + roles.insert( + role.to_string(), + ( + vec![ + PropertyNameKind::Env, + PropertyNameKind::File(AIRFLOW_CONFIG_FILENAME.into()), + ], + resolved_role.clone(), + ), + ); + } + } + + let role_config = transform_all_roles_to_config(airflow, &roles); + let validated_role_config = validate_all_roles_and_groups_config( + &dereferenced.resolved_product_image.product_version, + &role_config.context(ProductConfigTransformSnafu)?, + product_config_manager, + false, + false, + ) + .context(InvalidProductConfigSnafu)?; + + // --- compute database connection details (infallible) --- + let templating_mechanism = + stackable_operator::database_connections::TemplatingMechanism::BashEnvSubstitution; + let metadata_database_connection_details = airflow + .spec + .cluster_config + .metadata_database + .sqlalchemy_connection_details_with_templating("METADATA", &templating_mechanism); + let celery_database_connection_details = match &airflow.spec.executor { + AirflowExecutor::CeleryExecutors { + result_backend: celery_result_backend, + broker: celery_broker, + .. + } => { + let celery_result_backend = celery_result_backend + .celery_connection_details_with_templating( + "CELERY_RESULT_BACKEND", + &templating_mechanism, + ); + let celery_broker = celery_broker + .celery_connection_details_with_templating("CELERY_BROKER", &templating_mechanism); + Some((celery_result_backend, celery_broker)) + } + _ => None, + }; + + // --- compute auth volumes/mounts (fallible) --- + let (auth_volumes, auth_volume_mounts) = + compute_auth_volumes_and_mounts(&dereferenced.authentication_config)?; + + // --- service account name (matches build_rbac_resources output) --- + let service_account_name = airflow.name_any(); + + // --- per-role/rolegroup validation --- + let mut validated_role_groups = BTreeMap::new(); + let mut all_precomputed_pod_data = BTreeMap::new(); + + for (role_name, role_config) in validated_role_config.iter() { + let airflow_role = + AirflowRole::from_str(role_name).context(UnidentifiedAirflowRoleSnafu { + role: role_name.to_string(), + })?; + + let mut validated_groups = BTreeMap::new(); + let mut pod_data_groups = BTreeMap::new(); + + for (rolegroup_name, rolegroup_config) in role_config.iter() { + let rolegroup_ref = RoleGroupRef { + cluster: stackable_operator::kube::runtime::reflector::ObjectRef::from_obj(airflow), + role: role_name.into(), + role_group: rolegroup_name.into(), + }; + + let merged_airflow_config = airflow + .merged_config(&airflow_role, &rolegroup_ref) + .context(FailedToResolveConfigSnafu)?; + + let config_file_content = generate_config_file_content( + &dereferenced.authentication_config, + &dereferenced.authorization_config, + &dereferenced.resolved_product_image.product_version, + rolegroup_config, + )?; + + let validated_config = validate_airflow_config( + &merged_airflow_config, + vector_aggregator_config_map_name, + config_file_content, + )?; + + let pod_data = compute_precomputed_pod_data( + airflow, + &airflow_role, + &rolegroup_ref, + rolegroup_config, + &dereferenced.resolved_product_image, + &dereferenced.authentication_config, + &dereferenced.authorization_config, + &metadata_database_connection_details, + &celery_database_connection_details, + &validated_config.logging, + &auth_volumes, + &auth_volume_mounts, + &service_account_name, + )?; + + validated_groups.insert(rolegroup_name.clone(), validated_config); + pod_data_groups.insert(rolegroup_name.clone(), pod_data); + } + + validated_role_groups.insert(airflow_role.clone(), validated_groups); + all_precomputed_pod_data.insert(airflow_role, pod_data_groups); + } + + // --- per-role config (PDB, listeners) --- + let mut validated_role_configs_map = BTreeMap::new(); + for role in AirflowRole::iter() { + if let Some(role_config) = airflow.role_config(&role) { + let pdb = &role_config.pod_disruption_budget; + let listener_class = role.listener_class_name(airflow); + let group_listener_name = airflow.group_listener_name(&role); + validated_role_configs_map.insert( + role, + ValidatedRoleConfig { + pdb_enabled: pdb.enabled, + pdb_max_unavailable: pdb.max_unavailable, + listener_class, + group_listener_name, + }, + ); + } + } + + // --- executor template config maps --- + let executor_template_config_maps = if let AirflowExecutor::KubernetesExecutors { + common_configuration, + } = &airflow.spec.executor + { + let merged_executor_config = airflow + .merged_executor_config(&common_configuration.config) + .context(FailedToResolveConfigSnafu)?; + + let config_file_content = generate_config_file_content( + &dereferenced.authentication_config, + &dereferenced.authorization_config, + &dereferenced.resolved_product_image.product_version, + &HashMap::new(), + )?; + + let validated_config = validate_executor_config( + &merged_executor_config, + vector_aggregator_config_map_name, + config_file_content, + )?; + + build_executor_template_config_maps( + airflow, + &dereferenced.resolved_product_image, + &dereferenced.authentication_config, + &metadata_database_connection_details, + &service_account_name, + &validated_config, + common_configuration, + )? + } else { + Vec::new() + }; + + // --- assemble --- + validate( + airflow, + &dereferenced.resolved_product_image, + validated_role_groups, + all_precomputed_pod_data, + executor_template_config_maps, + validated_role_configs_map, + ) +} + +/// Validates the AirflowCluster and produces a [`ValidatedAirflowCluster`] containing +/// all role groups with their validated configs. +pub fn validate( + airflow: &v1alpha2::AirflowCluster, + resolved_product_image: &stackable_operator::commons::product_image_selection::ResolvedProductImage, + validated_role_configs: BTreeMap>, + precomputed_pod_data: BTreeMap>, + executor_template_config_maps: Vec, + role_configs: BTreeMap, +) -> Result { + let cluster_name = + ClusterName::from_str(&airflow.name_any()).context(InvalidClusterNameSnafu)?; + let namespace = + NamespaceName::from_str(&airflow.namespace().context(ObjectHasNoNamespaceSnafu)?) + .context(InvalidClusterNamespaceSnafu)?; + let uid = Uid::from_str(airflow.meta().uid.as_deref().context(ObjectHasNoUidSnafu)?) + .context(InvalidClusterUidSnafu)?; + + Ok(ValidatedAirflowCluster::new( + resolved_product_image.clone(), + cluster_name, + namespace, + uid, + validated_role_configs, + precomputed_pod_data, + executor_template_config_maps, + role_configs, + airflow.spec.executor.clone(), + )) +} + +/// Extracts auth volumes and volume mounts using temporary builders. +/// +/// The upstream LDAP/TLS provider APIs require `PodBuilder`/`ContainerBuilder` references. +/// We create temporary builders, call the auth methods, then extract the raw volumes and mounts. +fn compute_auth_volumes_and_mounts( + authentication_config: &AirflowClientAuthenticationDetailsResolved, +) -> Result<(Vec, Vec)> { + let mut pb = PodBuilder::new(); + let mut cb = ContainerBuilder::new("dummy").expect("'dummy' is a valid container name"); + + let mut ldap_providers = BTreeSet::new(); + let mut tls_credentials = BTreeSet::new(); + + for auth_class in &authentication_config.authentication_classes_resolved { + match auth_class { + AirflowAuthenticationClassResolved::Ldap { provider } => { + ldap_providers.insert(provider); + } + AirflowAuthenticationClassResolved::Oidc { provider, .. } => { + tls_credentials.insert(&provider.tls); + } + } + } + + for provider in ldap_providers { + provider + .add_volumes_and_mounts(&mut pb, vec![&mut cb]) + .context(AddLdapVolumesAndVolumeMountsSnafu)?; + } + for tls in tls_credentials { + tls.add_volumes_and_mounts(&mut pb, vec![&mut cb]) + .context(AddTlsVolumesAndVolumeMountsSnafu)?; + } + + let container = cb.build(); + let pod_template = pb.build_template(); + + let volumes = pod_template + .spec + .and_then(|s| s.volumes) + .unwrap_or_default(); + let mounts = container.volume_mounts.unwrap_or_default(); + + Ok((volumes, mounts)) +} + +/// Builds the executor template ConfigMaps for KubernetesExecutor mode. +/// +/// Produces two ConfigMaps: +/// 1. A logging/config ConfigMap for the executor pods (equivalent to a rolegroup ConfigMap) +/// 2. A pod template ConfigMap containing a serialised PodTemplate that Airflow uses to +/// launch executor pods +/// +/// This is done in the validate stage because it uses PodBuilder/ContainerBuilder which +/// are fallible. The build stage then just passes these through to KubernetesResources. +#[allow(clippy::too_many_arguments)] +fn build_executor_template_config_maps( + airflow: &v1alpha2::AirflowCluster, + resolved_product_image: &ResolvedProductImage, + authentication_config: &AirflowClientAuthenticationDetailsResolved, + metadata_database_connection_details: &SqlAlchemyDatabaseConnectionDetails, + service_account_name: &str, + validated_config: &super::ValidatedRoleGroupConfig, + common_configuration: &crate::crd::AirflowExecutorCommonConfiguration, +) -> Result> { + let executor_rolegroup_ref = RoleGroupRef { + cluster: stackable_operator::kube::runtime::reflector::ObjectRef::from_obj(airflow), + role: "executor".into(), + role_group: "kubernetes".into(), + }; + + // 1. Build the executor logging/config ConfigMap + let executor_config_cm = { + let metadata = ObjectMetaBuilder::new() + .name(executor_rolegroup_ref.object_name()) + .namespace_opt(airflow.namespace()) + .ownerreference_from_resource(airflow, None, Some(true)) + .context(ObjectMetaSnafu)? + .with_recommended_labels(&build_object_labels( + airflow, + resolved_product_image, + "executor", + "executor-template", + )) + .context(ObjectMetaSnafu)? + .build(); + + let mut cm_builder = ConfigMapBuilder::new(); + cm_builder.metadata(metadata); + cm_builder.add_data( + AIRFLOW_CONFIG_FILENAME, + validated_config.config_file_content.clone(), + ); + + extend_config_map_with_log_config( + &executor_rolegroup_ref, + &Container::Base, + &validated_config.logging.airflow_container, + validated_config.logging.vector_container.as_ref(), + &mut cm_builder, + resolved_product_image, + ); + + cm_builder.build().context(PodTemplateConfigMapSnafu)? + }; + + // 2. Build the executor pod template ConfigMap + let executor_template_cm = { + // git-sync resources for the executor template + let git_sync_resources = git_sync::v1alpha2::GitSyncResources::new( + &airflow.spec.cluster_config.dags_git_sync, + resolved_product_image, + &env_vars_from(&common_configuration.env_overrides), + &airflow.volume_mounts(), + LOG_VOLUME_NAME, + &validated_config.logging.git_sync_container_log_config, + ) + .context(InvalidGitSyncSpecSnafu)?; + + let mut pb = PodBuilder::new(); + let pb_metadata = ObjectMetaBuilder::new() + .with_recommended_labels(&build_object_labels( + airflow, + resolved_product_image, + "executor", + "executor-template", + )) + .context(ObjectMetaSnafu)? + .build(); + + pb.metadata(pb_metadata) + .image_pull_secrets_from_product_image(resolved_product_image) + .affinity(&validated_config.affinity) + .service_account_name(service_account_name) + .restart_policy("Never") + .security_context(PodSecurityContextBuilder::new().fs_group(1000).build()); + + pb.termination_grace_period(&validated_config.graceful_shutdown_timeout) + .context(GracefulShutdownSnafu)?; + + // Container name "base" is an Airflow requirement + let mut airflow_container = ContainerBuilder::new(&Container::Base.to_string()) + .context(InvalidContainerNameSnafu)?; + + // Auth volumes and mounts + add_authentication_volumes_and_volume_mounts( + authentication_config, + &mut airflow_container, + &mut pb, + )?; + + airflow_container + .image_from_product_image(resolved_product_image) + .resources(validated_config.resources.clone().into()) + .add_env_vars(env_vars::build_airflow_template_envs( + airflow, + &common_configuration.env_overrides, + validated_config.logging.is_vector_agent_enabled(), + metadata_database_connection_details, + &git_sync_resources, + resolved_product_image, + )) + .add_volume_mounts(airflow.volume_mounts()) + .context(AddVolumeMountSnafu)? + .add_volume_mount(CONFIG_VOLUME_NAME, CONFIG_PATH) + .context(AddVolumeMountSnafu)? + .add_volume_mount(LOG_CONFIG_VOLUME_NAME, LOG_CONFIG_DIR) + .context(AddVolumeMountSnafu)? + .add_volume_mount(LOG_VOLUME_NAME, STACKABLE_LOG_DIR) + .context(AddVolumeMountSnafu)?; + + // Git-sync resources (init containers only, no sidecars for executor template) + for container in git_sync_resources.git_sync_init_containers.iter().cloned() { + pb.add_init_container(container); + } + pb.add_volumes(git_sync_resources.git_content_volumes.clone()) + .context(AddVolumeSnafu)?; + pb.add_volumes(git_sync_resources.git_ssh_volumes.clone()) + .context(AddVolumeSnafu)?; + pb.add_volumes(git_sync_resources.git_ca_cert_volumes.clone()) + .context(AddVolumeSnafu)?; + airflow_container + .add_volume_mounts(git_sync_resources.git_content_volume_mounts.clone()) + .context(AddVolumeMountSnafu)?; + + // Database connection env vars + metadata_database_connection_details.add_to_container(&mut airflow_container); + + pb.add_container(airflow_container.build()); + pb.add_volumes(airflow.volumes().clone()) + .context(AddVolumeSnafu)?; + pb.add_volumes(controller_commons::create_volumes( + &executor_rolegroup_ref.object_name(), + &validated_config.logging.airflow_container, + )) + .context(AddVolumeSnafu)?; + + if let Some(vector_config) = &validated_config.logging.vector_container { + let vector_aggregator_config_map_name = airflow + .spec + .cluster_config + .vector_aggregator_config_map_name + .as_deref() + .context(MissingVectorAggregatorConfigMapNameSnafu)?; + pb.add_container(build_logging_container( + resolved_product_image, + vector_config, + vector_aggregator_config_map_name, + )?); + } + + let mut pod_template = pb.build_template(); + pod_template.merge_from(common_configuration.pod_overrides.clone()); + + let restarter_label = Label::try_from(("restarter.stackable.tech/enabled", "true")) + .expect("static label is always valid"); + + let mut cm_builder = ConfigMapBuilder::new(); + cm_builder + .metadata( + ObjectMetaBuilder::new() + .name_and_namespace(airflow) + .name(airflow.executor_template_configmap_name()) + .ownerreference_from_resource(airflow, None, Some(true)) + .context(ObjectMetaSnafu)? + .with_recommended_labels(&build_object_labels( + airflow, + resolved_product_image, + "executor", + "executor-template", + )) + .context(ObjectMetaSnafu)? + .with_label(restarter_label) + .build(), + ) + .add_data( + TEMPLATE_NAME, + serde_yaml::to_string(&pod_template).context(PodTemplateSerdeSnafu)?, + ); + + cm_builder.build().context(PodTemplateConfigMapSnafu)? + }; + + Ok(vec![executor_config_cm, executor_template_cm]) +} + +/// Helper to add authentication volumes and volume mounts directly to builders. +/// Used by the executor template where we build a PodTemplate using PodBuilder/ContainerBuilder. +fn add_authentication_volumes_and_volume_mounts( + authentication_config: &AirflowClientAuthenticationDetailsResolved, + cb: &mut ContainerBuilder, + pb: &mut PodBuilder, +) -> Result<()> { + let mut ldap_providers = BTreeSet::new(); + let mut tls_credentials = BTreeSet::new(); + + for auth_class in &authentication_config.authentication_classes_resolved { + match auth_class { + AirflowAuthenticationClassResolved::Ldap { provider } => { + ldap_providers.insert(provider); + } + AirflowAuthenticationClassResolved::Oidc { provider, .. } => { + tls_credentials.insert(&provider.tls); + } + } + } + + for provider in ldap_providers { + provider + .add_volumes_and_mounts(pb, vec![cb]) + .context(AddLdapVolumesAndVolumeMountsSnafu)?; + } + for tls in tls_credentials { + tls.add_volumes_and_mounts(pb, vec![cb]) + .context(AddTlsVolumesAndVolumeMountsSnafu)?; + } + Ok(()) +} + +fn build_object_labels<'a>( + airflow: &'a v1alpha2::AirflowCluster, + resolved_product_image: &'a ResolvedProductImage, + role: &'a str, + role_group: &'a str, +) -> ObjectLabels<'a, v1alpha2::AirflowCluster> { + ObjectLabels { + owner: airflow, + app_name: APP_NAME, + app_version: &resolved_product_image.app_version_label_value, + operator_name: OPERATOR_NAME, + controller_name: AIRFLOW_CONTROLLER_NAME, + role, + role_group, + } +} + +fn build_logging_container( + resolved_product_image: &ResolvedProductImage, + vector_config: &VectorContainerLogConfig, + vector_aggregator_config_map_name: &str, +) -> Result { + let raw_log_config = vector_config.log_config.to_raw_container_log_config(); + + product_logging::framework::vector_container( + resolved_product_image, + CONFIG_VOLUME_NAME, + LOG_VOLUME_NAME, + Some(&raw_log_config), + ResourceRequirementsBuilder::new() + .with_cpu_request("250m") + .with_cpu_limit("500m") + .with_memory_request("128Mi") + .with_memory_limit("128Mi") + .build(), + vector_aggregator_config_map_name, + ) + .context(ConfigureLoggingSnafu) +} + +/// Computes all pod-level data needed by the build stage to construct StatefulSets infallibly. +#[allow(clippy::too_many_arguments)] +fn compute_precomputed_pod_data( + airflow: &v1alpha2::AirflowCluster, + airflow_role: &AirflowRole, + rolegroup_ref: &RoleGroupRef, + rolegroup_config: &HashMap>, + resolved_product_image: &ResolvedProductImage, + authentication_config: &AirflowClientAuthenticationDetailsResolved, + authorization_config: &AirflowAuthorizationResolved, + metadata_database_connection_details: &SqlAlchemyDatabaseConnectionDetails, + celery_database_connection_details: &Option<( + CeleryDatabaseConnectionDetails, + CeleryDatabaseConnectionDetails, + )>, + validated_logging: &ValidatedLogging, + auth_volumes: &[Volume], + auth_volume_mounts: &[VolumeMount], + service_account_name: &str, +) -> Result { + let executor = &airflow.spec.executor; + + // --- git-sync resources --- + let git_sync_resources = git_sync::v1alpha2::GitSyncResources::new( + &airflow.spec.cluster_config.dags_git_sync, + resolved_product_image, + &env_vars_from_rolegroup_config(rolegroup_config), + &airflow.volume_mounts(), + LOG_VOLUME_NAME, + &validated_logging.git_sync_container_log_config, + ) + .context(InvalidGitSyncSpecSnafu)?; + + // --- env vars --- + let mut env_vars = env_vars::build_airflow_statefulset_envs( + airflow, + airflow_role, + rolegroup_config, + executor, + authentication_config, + authorization_config, + metadata_database_connection_details, + celery_database_connection_details, + &git_sync_resources, + resolved_product_image, + ) + .context(BuildEnvVarsSnafu)?; + + // Database connection details add secret-referenced env vars via ContainerBuilder. + // Extract them using a temp builder. + let db_env_vars = { + let mut cb = ContainerBuilder::new("dummy").expect("'dummy' is a valid container name"); + metadata_database_connection_details.add_to_container(&mut cb); + if let Some((celery_result_backend, celery_broker)) = celery_database_connection_details { + celery_result_backend.add_to_container(&mut cb); + celery_broker.add_to_container(&mut cb); + } + cb.build().env.unwrap_or_default() + }; + env_vars.extend(db_env_vars); + + // --- commands --- + let airflow_commands = + airflow_role.get_commands(airflow, authentication_config, resolved_product_image); + + // --- git-sync containers/volumes --- + let use_git_sync_init_containers = matches!(executor, AirflowExecutor::CeleryExecutors { .. }); + let git_sync_containers = git_sync_resources.git_sync_containers.clone(); + let git_sync_init_containers = if use_git_sync_init_containers { + git_sync_resources.git_sync_init_containers.clone() + } else { + Vec::new() + }; + let mut git_sync_volumes = git_sync_resources.git_content_volumes.clone(); + git_sync_volumes.extend(git_sync_resources.git_ssh_volumes.clone()); + git_sync_volumes.extend(git_sync_resources.git_ca_cert_volumes.clone()); + let git_sync_volume_mounts = git_sync_resources.git_content_volume_mounts.clone(); + + // --- vector container --- + let vector_container = if let Some(vector_config) = &validated_logging.vector_container { + let vector_aggregator_config_map_name = airflow + .spec + .cluster_config + .vector_aggregator_config_map_name + .as_deref() + .context(MissingVectorAggregatorConfigMapNameSnafu)?; + Some(build_logging_container( + resolved_product_image, + vector_config, + vector_aggregator_config_map_name, + )?) + } else { + None + }; + + // --- replicas --- + let binding = airflow.get_role(airflow_role); + let role = binding.as_ref().context(NoAirflowRoleSnafu)?; + let rolegroup = role.role_groups.get(&rolegroup_ref.role_group); + let replicas = rolegroup.and_then(|rg| rg.replicas); + + // --- pod overrides --- + let mut pod_overrides = PodTemplateSpec::default(); + pod_overrides.merge_from(role.config.pod_overrides.clone()); + if let Some(rg) = rolegroup { + pod_overrides.merge_from(rg.config.pod_overrides.clone()); + } + + // --- executor template configmap name --- + let executor_template_configmap_name = + if matches!(executor, AirflowExecutor::KubernetesExecutors { .. }) { + Some(airflow.executor_template_configmap_name()) + } else { + None + }; + + // --- listener PVC --- + let listener_volume_claim_template = if airflow_role.get_http_port().is_some() { + if let Some(listener_group_name) = airflow.group_listener_name(airflow_role) { + let unversioned_labels = Labels::recommended(&ObjectLabels { + owner: airflow, + app_name: APP_NAME, + app_version: "none", + operator_name: OPERATOR_NAME, + controller_name: AIRFLOW_CONTROLLER_NAME, + role: &rolegroup_ref.role, + role_group: &rolegroup_ref.role_group, + }) + .context(BuildLabelsSnafu)?; + + let pvc = ListenerOperatorVolumeSourceBuilder::new( + &ListenerReference::ListenerName(listener_group_name), + &unversioned_labels, + ) + .build_pvc(LISTENER_VOLUME_NAME.to_string()) + .context(BuildListenerVolumeSnafu)?; + Some(pvc) + } else { + None + } + } else { + None + }; + + // --- user-defined extra volumes/mounts from CRD --- + let extra_volumes = airflow.volumes().clone(); + let extra_volume_mounts = airflow.volume_mounts(); + + Ok(PrecomputedPodData { + env_vars, + airflow_commands, + auth_volumes: auth_volumes.to_vec(), + auth_volume_mounts: auth_volume_mounts.to_vec(), + extra_volumes, + extra_volume_mounts, + git_sync_containers, + git_sync_init_containers, + git_sync_volumes, + git_sync_volume_mounts, + vector_container, + service_account_name: service_account_name.to_string(), + replicas, + pod_overrides, + executor: executor.clone(), + executor_template_configmap_name, + listener_volume_claim_template, + }) +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeMap, str::FromStr}; + + use stackable_operator::{ + commons::product_image_selection::ResolvedProductImage, + kvp::LabelValue, + product_logging::spec::{ + AutomaticContainerLogConfig, ContainerLogConfig, ContainerLogConfigChoice, Logging, + }, + shared::time::Duration, + }; + + use super::*; + use crate::crd::{AirflowConfig, Container}; + + fn airflow_config_with_logging(enable_vector: bool) -> AirflowConfig { + let mut containers = BTreeMap::new(); + containers.insert( + Container::Airflow, + ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Automatic( + AutomaticContainerLogConfig::default(), + )), + }, + ); + if enable_vector { + containers.insert( + Container::Vector, + ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Automatic( + AutomaticContainerLogConfig::default(), + )), + }, + ); + } + AirflowConfig { + resources: Default::default(), + logging: Logging { + enable_vector_agent: enable_vector, + containers, + }, + affinity: Default::default(), + graceful_shutdown_timeout: Some(Duration::from_secs(120)), + } + } + + #[test] + fn test_validate_airflow_config_without_vector() { + let config = airflow_config_with_logging(false); + let result = validate_airflow_config(&config, None, String::new()); + assert!(result.is_ok()); + let validated = result.unwrap(); + assert!(validated.logging.vector_container.is_none()); + assert!(!validated.logging.is_vector_agent_enabled()); + assert_eq!( + validated.graceful_shutdown_timeout, + Duration::from_secs(120) + ); + } + + #[test] + fn test_validate_airflow_config_with_vector() { + let config = airflow_config_with_logging(true); + let result = + validate_airflow_config(&config, Some("vector-aggregator-discovery"), String::new()); + assert!(result.is_ok()); + let validated = result.unwrap(); + assert!(validated.logging.vector_container.is_some()); + assert!(validated.logging.is_vector_agent_enabled()); + } + + #[test] + fn test_validate_vector_enabled_missing_config_map() { + let config = airflow_config_with_logging(true); + let result = validate_airflow_config(&config, None, String::new()); + assert!(result.is_err()); + } + + #[test] + fn test_validate_missing_graceful_shutdown() { + let mut config = airflow_config_with_logging(false); + config.graceful_shutdown_timeout = None; + let result = validate_airflow_config(&config, None, String::new()); + assert!(result.is_err()); + } + + #[test] + fn test_validate_ok() { + let (airflow, image) = test_objects(); + let result = validate( + &airflow, + &image, + BTreeMap::new(), + BTreeMap::new(), + vec![], + BTreeMap::new(), + ); + assert!(result.is_ok()); + let validated = result.unwrap(); + assert_eq!(validated.name.to_string(), "my-airflow"); + assert_eq!(validated.namespace.to_string(), "default"); + } + + #[test] + fn test_validate_err_missing_name() { + test_validate_err( + |airflow, _| airflow.metadata.name = None, + ErrorDiscriminants::InvalidClusterName, + ); + } + + #[test] + fn test_validate_err_missing_namespace() { + test_validate_err( + |airflow, _| airflow.metadata.namespace = None, + ErrorDiscriminants::ObjectHasNoNamespace, + ); + } + + #[test] + fn test_validate_err_missing_uid() { + test_validate_err( + |airflow, _| airflow.metadata.uid = None, + ErrorDiscriminants::ObjectHasNoUid, + ); + } + + #[test] + fn test_validate_err_invalid_cluster_name() { + test_validate_err( + |airflow, _| { + airflow.metadata.name = + Some("THIS-IS-NOT-A-VALID-DNS-LABEL-NAME-BECAUSE-UPPERCASE".to_string()) + }, + ErrorDiscriminants::InvalidClusterName, + ); + } + + #[test] + fn test_validate_err_invalid_namespace() { + test_validate_err( + |airflow, _| airflow.metadata.namespace = Some("INVALID NAMESPACE".to_string()), + ErrorDiscriminants::InvalidClusterNamespace, + ); + } + + #[test] + fn test_validate_err_invalid_uid() { + test_validate_err( + |airflow, _| airflow.metadata.uid = Some("not-a-uuid".to_string()), + ErrorDiscriminants::InvalidClusterUid, + ); + } + + fn test_validate_err( + mutate: fn(&mut v1alpha2::AirflowCluster, &mut ResolvedProductImage), + expected: ErrorDiscriminants, + ) { + let (mut airflow, mut image) = test_objects(); + mutate(&mut airflow, &mut image); + let result = validate( + &airflow, + &image, + BTreeMap::new(), + BTreeMap::new(), + vec![], + BTreeMap::new(), + ); + match result { + Err(err) => assert_eq!(expected, ErrorDiscriminants::from(err)), + Ok(_) => panic!("validate should have failed with {expected:?}"), + } + } + + fn test_objects() -> (v1alpha2::AirflowCluster, ResolvedProductImage) { + use stackable_operator::kube::api::ObjectMeta; + + let airflow = v1alpha2::AirflowCluster { + metadata: ObjectMeta { + name: Some("my-airflow".to_string()), + namespace: Some("default".to_string()), + uid: Some("e6ac237d-a6d4-43a1-8135-f36506110912".to_string()), + ..ObjectMeta::default() + }, + spec: serde_json::from_value(serde_json::json!({ + "image": { "productVersion": "2.10.4" }, + "clusterConfig": { + "credentialsSecretName": "airflow-admin-credentials", + "metadataDatabase": { + "postgresql": { + "host": "airflow-postgresql", + "database": "airflow", + "credentialsSecretName": "airflow-postgresql-credentials" + } + } + }, + "kubernetesExecutors": { "config": {} }, + "webservers": { "roleGroups": { "default": { "config": {} } } }, + "schedulers": { "roleGroups": { "default": { "config": {} } } } + })) + .expect("test spec JSON should be valid"), + status: None, + }; + + let image = ResolvedProductImage { + product_version: "2.10.4".to_owned(), + app_version_label_value: LabelValue::from_str("2.10.4-stackable0.0.0-dev") + .expect("valid label value"), + image: "oci.stackable.tech/sdp/airflow:2.10.4-stackable0.0.0-dev".to_string(), + image_pull_policy: "Always".to_owned(), + pull_secrets: None, + }; + + (airflow, image) + } +} diff --git a/rust/operator-binary/src/controller_commons.rs b/rust/operator-binary/src/controller_commons.rs index 7d16c41b..45766bf4 100644 --- a/rust/operator-binary/src/controller_commons.rs +++ b/rust/operator-binary/src/controller_commons.rs @@ -1,16 +1,13 @@ use stackable_operator::{ builder::pod::volume::VolumeBuilder, k8s_openapi::api::core::v1::{ConfigMapVolumeSource, EmptyDirVolumeSource, Volume}, - product_logging::{ - self, - spec::{ - ConfigMapLogConfig, ContainerLogConfig, ContainerLogConfigChoice, - CustomContainerLogConfig, - }, - }, + product_logging, }; -use crate::crd::MAX_LOG_FILES_SIZE; +use crate::{ + crd::MAX_LOG_FILES_SIZE, + framework::product_logging::framework::ValidatedContainerLogConfigChoice, +}; pub const CONFIG_VOLUME_NAME: &str = "config"; pub const LOG_CONFIG_VOLUME_NAME: &str = "log-config"; @@ -18,7 +15,7 @@ pub const LOG_VOLUME_NAME: &str = "log"; pub fn create_volumes( config_map_name: &str, - log_config: Option<&ContainerLogConfig>, + log_config: &ValidatedContainerLogConfigChoice, ) -> Vec { let mut volumes = Vec::new(); @@ -38,17 +35,11 @@ pub fn create_volumes( ..Volume::default() }); - if let Some(ContainerLogConfig { - choice: - Some(ContainerLogConfigChoice::Custom(CustomContainerLogConfig { - custom: ConfigMapLogConfig { config_map }, - })), - }) = log_config - { + if let ValidatedContainerLogConfigChoice::Custom(custom_config_map) = log_config { volumes.push(Volume { name: LOG_CONFIG_VOLUME_NAME.into(), config_map: Some(ConfigMapVolumeSource { - name: config_map.into(), + name: custom_config_map.as_ref().into(), ..ConfigMapVolumeSource::default() }), ..Volume::default() diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index f0fc0f27..78ecd660 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -573,7 +573,9 @@ pub struct AirflowOpaConfig { Eq, Hash, JsonSchema, + Ord, PartialEq, + PartialOrd, Serialize, EnumString, )] @@ -981,6 +983,7 @@ pub struct AirflowConfig { } impl AirflowConfig { + #[allow(dead_code)] pub const GIT_CREDENTIALS_SECRET_PROPERTY: &'static str = "gitCredentialsSecret"; fn default_config(cluster_name: &str, role: &AirflowRole) -> AirflowConfigFragment { @@ -1089,7 +1092,7 @@ fn default_resources(role: &AirflowRole) -> ResourcesFragment( owner: &'a T, controller_name: &'a str, diff --git a/rust/operator-binary/src/env_vars.rs b/rust/operator-binary/src/env_vars.rs index b4828dbe..c006f4af 100644 --- a/rust/operator-binary/src/env_vars.rs +++ b/rust/operator-binary/src/env_vars.rs @@ -18,8 +18,8 @@ use stackable_operator::{ use crate::{ crd::{ - AirflowExecutor, AirflowRole, ExecutorConfig, LOG_CONFIG_DIR, STACKABLE_LOG_DIR, - TEMPLATE_LOCATION, TEMPLATE_NAME, + AirflowExecutor, AirflowRole, LOG_CONFIG_DIR, STACKABLE_LOG_DIR, TEMPLATE_LOCATION, + TEMPLATE_NAME, authentication::{ AirflowAuthenticationClassResolved, AirflowClientAuthenticationDetailsResolved, }, @@ -376,7 +376,7 @@ fn static_envs( pub fn build_airflow_template_envs( airflow: &v1alpha2::AirflowCluster, env_overrides: &HashMap, - config: &ExecutorConfig, + vector_agent_enabled: bool, metadata_database_connection_details: &SqlAlchemyDatabaseConnectionDetails, git_sync_resources: &git_sync::v1alpha2::GitSyncResources, resolved_product_image: &ResolvedProductImage, @@ -434,7 +434,7 @@ pub fn build_airflow_template_envs( // _STACKABLE_POST_HOOK will contain a command to create a shutdown hook that will be // evaluated in the wrapper for each stackable spark container: this is necessary for pods // that are created and then terminated (we do a similar thing for spark-k8s). - if config.logging.enable_vector_agent { + if vector_agent_enabled { env.insert( "_STACKABLE_POST_HOOK".into(), EnvVar { diff --git a/rust/operator-binary/src/framework.rs b/rust/operator-binary/src/framework.rs index 8bc3c995..1a98b8c3 100644 --- a/rust/operator-binary/src/framework.rs +++ b/rust/operator-binary/src/framework.rs @@ -21,16 +21,13 @@ use types::kubernetes::Uid; -#[allow(dead_code)] pub mod builder; #[allow(dead_code)] pub mod cluster_resources; #[allow(dead_code)] pub mod controller_utils; -#[allow(dead_code)] pub mod kvp; pub mod macros; -#[allow(dead_code)] pub mod product_logging; #[allow(dead_code)] pub mod role_group_utils; @@ -48,13 +45,11 @@ pub trait HasName { } /// Has a Kubernetes UID -#[allow(dead_code)] pub trait HasUid { fn to_uid(&self) -> Uid; } /// The name is a valid label value -#[allow(dead_code)] pub trait NameIsValidLabelValue { fn to_label_value(&self) -> String; } diff --git a/rust/operator-binary/src/framework/builder.rs b/rust/operator-binary/src/framework/builder.rs index a6530b5d..5d02a0b0 100644 --- a/rust/operator-binary/src/framework/builder.rs +++ b/rust/operator-binary/src/framework/builder.rs @@ -1,6 +1,4 @@ -#[allow(dead_code)] pub mod meta; -#[allow(dead_code)] pub mod pdb; #[allow(dead_code)] pub mod pod; diff --git a/rust/operator-binary/src/framework/builder/pdb.rs b/rust/operator-binary/src/framework/builder/pdb.rs index 5fca4d1e..9cf22af8 100644 --- a/rust/operator-binary/src/framework/builder/pdb.rs +++ b/rust/operator-binary/src/framework/builder/pdb.rs @@ -75,13 +75,29 @@ mod tests { type DynamicType = (); type Scope = (); - fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("AirflowCluster") } - fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("airflow.stackable.tech") } - fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("v1alpha2") } - fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("airflowclusters") } + fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("AirflowCluster") + } + + fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("airflow.stackable.tech") + } + + fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("v1alpha2") + } - fn meta(&self) -> &ObjectMeta { &self.object_meta } - fn meta_mut(&mut self) -> &mut ObjectMeta { &mut self.object_meta } + fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("airflowclusters") + } + + fn meta(&self) -> &ObjectMeta { + &self.object_meta + } + + fn meta_mut(&mut self) -> &mut ObjectMeta { + &mut self.object_meta + } } impl HasName for Cluster { diff --git a/rust/operator-binary/src/framework/kvp/label.rs b/rust/operator-binary/src/framework/kvp/label.rs index 88098bf2..101e239e 100644 --- a/rust/operator-binary/src/framework/kvp/label.rs +++ b/rust/operator-binary/src/framework/kvp/label.rs @@ -101,10 +101,21 @@ mod tests { type DynamicType = (); type Scope = (); - fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("AirflowCluster") } - fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("airflow.stackable.tech") } - fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("v1alpha2") } - fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("airflowclusters") } + fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("AirflowCluster") + } + + fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("airflow.stackable.tech") + } + + fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("v1alpha2") + } + + fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("airflowclusters") + } fn meta(&self) -> &ObjectMeta { &self.object_meta diff --git a/rust/operator-binary/src/framework/role_utils.rs b/rust/operator-binary/src/framework/role_utils.rs index cb009721..de61ac54 100644 --- a/rust/operator-binary/src/framework/role_utils.rs +++ b/rust/operator-binary/src/framework/role_utils.rs @@ -22,7 +22,7 @@ use super::{ }, }; -/// Variant of [`stackable_operator::role_utils::GenericProductSpecificCommonConfig`] that +/// Variant of `stackable_operator::role_utils::GenericCommonConfig` that /// implements [`Merge`] #[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] pub struct GenericProductSpecificCommonConfig {} @@ -289,9 +289,27 @@ mod tests { } #[rstest] - #[case("role-group", Some("role-group"), Some("role-group"), Some("role"), Some("default"))] - #[case("role-group", Some("role-group"), Some("role-group"), Some("role"), None)] - #[case("role-group", Some("role-group"), Some("role-group"), None, Some("default"))] + #[case( + "role-group", + Some("role-group"), + Some("role-group"), + Some("role"), + Some("default") + )] + #[case( + "role-group", + Some("role-group"), + Some("role-group"), + Some("role"), + None + )] + #[case( + "role-group", + Some("role-group"), + Some("role-group"), + None, + Some("default") + )] #[case("role-group", Some("role-group"), Some("role-group"), None, None)] #[case("role", Some("role"), None, Some("role"), Some("default"))] #[case("role", Some("role"), None, Some("role"), None)] diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index eb2ebc1c..74e19f1e 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -33,18 +33,17 @@ use stackable_operator::{ }; use crate::{ - airflow_controller::AIRFLOW_FULL_CONTROLLER_NAME, + controller::AIRFLOW_FULL_CONTROLLER_NAME, crd::{AirflowCluster, AirflowClusterVersion, OPERATOR_NAME, v1alpha1, v1alpha2}, webhooks::conversion::create_webhook_server, }; -mod airflow_controller; mod config; +mod controller; mod controller_commons; mod crd; mod env_vars; mod framework; -mod operations; mod product_logging; mod service; mod util; @@ -178,9 +177,9 @@ async fn main() -> anyhow::Result<()> { ) .graceful_shutdown_on(sigterm_watcher.handle()) .run( - airflow_controller::reconcile_airflow, - airflow_controller::error_policy, - Arc::new(airflow_controller::Ctx { + controller::reconcile, + controller::error_policy, + Arc::new(controller::Ctx { client: client.clone(), operator_environment, product_config, diff --git a/rust/operator-binary/src/operations/graceful_shutdown.rs b/rust/operator-binary/src/operations/graceful_shutdown.rs deleted file mode 100644 index dde8e074..00000000 --- a/rust/operator-binary/src/operations/graceful_shutdown.rs +++ /dev/null @@ -1,42 +0,0 @@ -use snafu::{ResultExt, Snafu}; -use stackable_operator::builder::pod::PodBuilder; - -use crate::crd::{AirflowConfig, ExecutorConfig}; - -#[derive(Debug, Snafu)] -pub enum Error { - #[snafu(display("Failed to set terminationGracePeriod"))] - SetTerminationGracePeriod { - source: stackable_operator::builder::pod::Error, - }, -} - -pub fn add_airflow_graceful_shutdown_config( - merged_config: &AirflowConfig, - pod_builder: &mut PodBuilder, -) -> Result<(), Error> { - // This must be always set by the merge mechanism, as we provide a default value, - // users can not disable graceful shutdown. - if let Some(graceful_shutdown_timeout) = merged_config.graceful_shutdown_timeout { - pod_builder - .termination_grace_period(&graceful_shutdown_timeout) - .context(SetTerminationGracePeriodSnafu)?; - } - - Ok(()) -} - -pub fn add_executor_graceful_shutdown_config( - merged_config: &ExecutorConfig, - pod_builder: &mut PodBuilder, -) -> Result<(), Error> { - // This must be always set by the merge mechanism, as we provide a default value, - // users can not disable graceful shutdown. - if let Some(graceful_shutdown_timeout) = merged_config.graceful_shutdown_timeout { - pod_builder - .termination_grace_period(&graceful_shutdown_timeout) - .context(SetTerminationGracePeriodSnafu)?; - } - - Ok(()) -} diff --git a/rust/operator-binary/src/operations/mod.rs b/rust/operator-binary/src/operations/mod.rs deleted file mode 100644 index 92ca2ec7..00000000 --- a/rust/operator-binary/src/operations/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod graceful_shutdown; -pub mod pdb; diff --git a/rust/operator-binary/src/operations/pdb.rs b/rust/operator-binary/src/operations/pdb.rs deleted file mode 100644 index b3261678..00000000 --- a/rust/operator-binary/src/operations/pdb.rs +++ /dev/null @@ -1,89 +0,0 @@ -use snafu::{ResultExt, Snafu}; -use stackable_operator::{ - builder::pdb::PodDisruptionBudgetBuilder, client::Client, cluster_resources::ClusterResources, - commons::pdb::PdbConfig, kube::ResourceExt, -}; - -use crate::{ - airflow_controller::AIRFLOW_CONTROLLER_NAME, - crd::{APP_NAME, AirflowExecutor, AirflowRole, OPERATOR_NAME, v1alpha2}, -}; - -#[derive(Snafu, Debug)] -pub enum Error { - #[snafu(display("Cannot create PodDisruptionBudget for role [{role}]"))] - CreatePdb { - source: stackable_operator::builder::pdb::Error, - role: String, - }, - #[snafu(display("Cannot apply PodDisruptionBudget [{name}]"))] - ApplyPdb { - source: stackable_operator::cluster_resources::Error, - name: String, - }, -} - -pub async fn add_pdbs( - pdb: &PdbConfig, - airflow: &v1alpha2::AirflowCluster, - role: &AirflowRole, - client: &Client, - cluster_resources: &mut ClusterResources<'_>, -) -> Result<(), Error> { - if !pdb.enabled { - return Ok(()); - } - - let max_unavailable = pdb.max_unavailable.unwrap_or(match role { - AirflowRole::Scheduler => max_unavailable_schedulers(), - AirflowRole::Webserver => max_unavailable_webservers(), - AirflowRole::DagProcessor => max_unavailable_dag_processors(), - AirflowRole::Triggerer => max_unavailable_triggerers(), - AirflowRole::Worker => match airflow.spec.executor { - AirflowExecutor::CeleryExecutors { .. } => max_unavailable_workers(), - AirflowExecutor::KubernetesExecutors { .. } => { - // In case Airflow creates the Pods, we don't want to influence that. - return Ok(()); - } - }, - }); - let pdb = PodDisruptionBudgetBuilder::new_with_role( - airflow, - APP_NAME, - &role.to_string(), - OPERATOR_NAME, - AIRFLOW_CONTROLLER_NAME, - ) - .with_context(|_| CreatePdbSnafu { - role: role.to_string(), - })? - .with_max_unavailable(max_unavailable) - .build(); - let pdb_name = pdb.name_any(); - cluster_resources - .add(client, pdb) - .await - .with_context(|_| ApplyPdbSnafu { name: pdb_name })?; - - Ok(()) -} - -fn max_unavailable_schedulers() -> u16 { - 1 -} - -fn max_unavailable_workers() -> u16 { - 1 -} - -fn max_unavailable_webservers() -> u16 { - 1 -} - -fn max_unavailable_dag_processors() -> u16 { - 1 -} - -fn max_unavailable_triggerers() -> u16 { - 1 -} diff --git a/rust/operator-binary/src/product_logging.rs b/rust/operator-binary/src/product_logging.rs index 51572729..4bafb481 100644 --- a/rust/operator-binary/src/product_logging.rs +++ b/rust/operator-binary/src/product_logging.rs @@ -1,59 +1,35 @@ use std::fmt::{Display, Write}; -use snafu::Snafu; use stackable_operator::{ builder::configmap::ConfigMapBuilder, commons::product_image_selection::ResolvedProductImage, kube::Resource, - product_logging::{ - self, - spec::{ - AutomaticContainerLogConfig, ContainerLogConfig, ContainerLogConfigChoice, Logging, - }, - }, + product_logging::{self, spec::AutomaticContainerLogConfig}, role_utils::RoleGroupRef, }; -use crate::crd::STACKABLE_LOG_DIR; - -#[derive(Snafu, Debug)] -pub enum Error { - #[snafu(display("failed to retrieve the ConfigMap [{cm_name}]"))] - ConfigMapNotFound { - source: stackable_operator::client::Error, - cm_name: String, - }, - #[snafu(display("failed to retrieve the entry [{entry}] for ConfigMap [{cm_name}]"))] - MissingConfigMapEntry { - entry: &'static str, - cm_name: String, +use crate::{ + crd::STACKABLE_LOG_DIR, + framework::product_logging::framework::{ + ValidatedContainerLogConfigChoice, VectorContainerLogConfig, }, - #[snafu(display("vectorAggregatorConfigMapName must be set"))] - MissingVectorAggregatorAddress, -} - -type Result = std::result::Result; +}; const LOG_CONFIG_FILE: &str = "log_config.py"; const LOG_FILE: &str = "airflow.py.json"; /// Extend the ConfigMap with logging and Vector configurations -pub fn extend_config_map_with_log_config( +pub fn extend_config_map_with_log_config( rolegroup: &RoleGroupRef, - logging: &Logging, - main_container: &C, - vector_container: &C, + main_container: &impl Display, + main_container_log_config: &ValidatedContainerLogConfigChoice, + vector_config: Option<&VectorContainerLogConfig>, cm_builder: &mut ConfigMapBuilder, resolved_product_image: &ResolvedProductImage, -) -> Result<()> -where - C: Clone + Ord + Display, +) where K: Resource, { - if let Some(ContainerLogConfig { - choice: Some(ContainerLogConfigChoice::Automatic(log_config)), - }) = logging.containers.get(main_container) - { + if let ValidatedContainerLogConfigChoice::Automatic(log_config) = main_container_log_config { let log_dir = format!("{STACKABLE_LOG_DIR}/{main_container}"); cm_builder.add_data( LOG_CONFIG_FILE, @@ -61,23 +37,20 @@ where ); } - let vector_log_config = if let Some(ContainerLogConfig { - choice: Some(ContainerLogConfigChoice::Automatic(log_config)), - }) = logging.containers.get(vector_container) - { - Some(log_config) - } else { - None - }; + if let Some(vector_config) = vector_config { + let vector_log_config = if let ValidatedContainerLogConfigChoice::Automatic(log_config) = + &vector_config.log_config + { + Some(log_config) + } else { + None + }; - if logging.enable_vector_agent { cm_builder.add_data( product_logging::framework::VECTOR_CONFIG_FILE, product_logging::framework::create_vector_config(rolegroup, vector_log_config), ); } - - Ok(()) } fn create_airflow_config( diff --git a/rust/operator-binary/src/service.rs b/rust/operator-binary/src/service.rs index b9dbe325..410430f0 100644 --- a/rust/operator-binary/src/service.rs +++ b/rust/operator-binary/src/service.rs @@ -4,15 +4,18 @@ use snafu::{ResultExt, Snafu}; use stackable_operator::{ builder::meta::ObjectMetaBuilder, k8s_openapi::api::core::v1::{Service, ServicePort, ServiceSpec}, + kube::Resource, kvp::{Annotations, Labels, ObjectLabels}, role_utils::RoleGroupRef, }; use crate::crd::{HTTP_PORT, HTTP_PORT_NAME, METRICS_PORT, METRICS_PORT_NAME, v1alpha2}; +#[allow(dead_code)] pub const METRICS_SERVICE_SUFFIX: &str = "metrics"; pub const HEADLESS_SERVICE_SUFFIX: &str = "headless"; +#[allow(dead_code)] #[derive(Snafu, Debug)] pub enum Error { #[snafu(display("object is missing metadata to build owner reference"))] @@ -33,6 +36,7 @@ pub enum Error { /// The rolegroup headless [`Service`] is a service that allows direct access to the instances of a certain rolegroup /// This is mostly useful for internal communication between peers, or for clients that perform client-side load balancing. +#[allow(dead_code)] pub fn build_rolegroup_headless_service( airflow: &v1alpha2::AirflowCluster, rolegroup_ref: &RoleGroupRef, @@ -70,6 +74,7 @@ pub fn build_rolegroup_headless_service( } /// The rolegroup metrics [`Service`] is a service that exposes metrics and a prometheus scraping label. +#[allow(dead_code)] pub fn build_rolegroup_metrics_service( airflow: &v1alpha2::AirflowCluster, rolegroup_ref: &RoleGroupRef, @@ -106,9 +111,7 @@ pub fn build_rolegroup_metrics_service( }) } -pub fn stateful_set_service_name( - rolegroup_ref: &RoleGroupRef, -) -> Option { +pub fn stateful_set_service_name(rolegroup_ref: &RoleGroupRef) -> Option { Some(rolegroup_headless_service_name( &rolegroup_ref.object_name(), )) From 247cf1e9a2862c5b2584c18046381fe51fbfe8e0 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Mon, 11 May 2026 13:27:43 +0200 Subject: [PATCH 4/5] fixes due to assumed max name lengths --- Cargo.nix | 790 +++++++++++++++++- crate-hashes.json | 18 +- .../src/controller/validate.rs | 4 +- .../src/framework/types/operator.rs | 7 +- 4 files changed, 783 insertions(+), 36 deletions(-) diff --git a/Cargo.nix b/Cargo.nix index 32f0dd35..59184a81 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -2589,7 +2589,19 @@ rec { }; resolvedDefaultFeatures = [ "default" "std" ]; }; - "foldhash" = rec { + "foldhash 0.1.5" = rec { + crateName = "foldhash"; + version = "0.1.5"; + edition = "2021"; + sha256 = "1wisr1xlc2bj7hk4rgkcjkz3j2x4dhd1h9lwk7mj8p71qpdgbi6r"; + authors = [ + "Orson Peters " + ]; + features = { + "default" = [ "std" ]; + }; + }; + "foldhash 0.2.0" = rec { crateName = "foldhash"; version = "0.2.0"; edition = "2021"; @@ -3072,7 +3084,7 @@ rec { } { name = "r-efi"; - packageId = "r-efi"; + packageId = "r-efi 5.3.0"; usesDefaultFeatures = false; target = { target, features }: (("uefi" == target."os" or null) && ("efi_rng" == target."getrandom_backend" or null)); } @@ -3088,6 +3100,90 @@ rec { }; resolvedDefaultFeatures = [ "std" ]; }; + "getrandom 0.4.2" = rec { + crateName = "getrandom"; + version = "0.4.2"; + edition = "2024"; + sha256 = "0mb5833hf9pvn9dhvxjgfg5dx0m77g8wavvjdpvpnkp9fil1xr8d"; + authors = [ + "The Rand Project Developers" + ]; + dependencies = [ + { + name = "cfg-if"; + packageId = "cfg-if"; + } + { + name = "libc"; + packageId = "libc"; + usesDefaultFeatures = false; + target = { target, features }: ((("linux" == target."os" or null) || ("android" == target."os" or null)) && (!((("linux" == target."os" or null) && ("" == target."env" or null)) || ("custom" == target."getrandom_backend" or null) || ("linux_raw" == target."getrandom_backend" or null) || ("rdrand" == target."getrandom_backend" or null) || ("rndr" == target."getrandom_backend" or null)))); + } + { + name = "libc"; + packageId = "libc"; + usesDefaultFeatures = false; + target = { target, features }: (("dragonfly" == target."os" or null) || ("freebsd" == target."os" or null) || ("hurd" == target."os" or null) || ("illumos" == target."os" or null) || ("cygwin" == target."os" or null) || (("horizon" == target."os" or null) && ("arm" == target."arch" or null))); + } + { + name = "libc"; + packageId = "libc"; + usesDefaultFeatures = false; + target = { target, features }: (("haiku" == target."os" or null) || ("redox" == target."os" or null) || ("nto" == target."os" or null) || ("aix" == target."os" or null)); + } + { + name = "libc"; + packageId = "libc"; + usesDefaultFeatures = false; + target = { target, features }: (("ios" == target."os" or null) || ("visionos" == target."os" or null) || ("watchos" == target."os" or null) || ("tvos" == target."os" or null)); + } + { + name = "libc"; + packageId = "libc"; + usesDefaultFeatures = false; + target = { target, features }: (("macos" == target."os" or null) || ("openbsd" == target."os" or null) || ("vita" == target."os" or null) || ("emscripten" == target."os" or null)); + } + { + name = "libc"; + packageId = "libc"; + usesDefaultFeatures = false; + target = { target, features }: ("netbsd" == target."os" or null); + } + { + name = "libc"; + packageId = "libc"; + usesDefaultFeatures = false; + target = { target, features }: ("solaris" == target."os" or null); + } + { + name = "libc"; + packageId = "libc"; + usesDefaultFeatures = false; + target = { target, features }: ("vxworks" == target."os" or null); + } + { + name = "r-efi"; + packageId = "r-efi 6.0.0"; + usesDefaultFeatures = false; + target = { target, features }: (("uefi" == target."os" or null) && ("efi_rng" == target."getrandom_backend" or null)); + } + { + name = "wasip2"; + packageId = "wasip2"; + usesDefaultFeatures = false; + target = { target, features }: (("wasm32" == target."arch" or null) && ("wasi" == target."os" or null) && ("p2" == target."env" or null)); + } + { + name = "wasip3"; + packageId = "wasip3"; + target = { target, features }: (("wasm32" == target."arch" or null) && ("wasi" == target."os" or null) && ("p3" == target."env" or null)); + } + ]; + features = { + "sys_rng" = [ "dep:rand_core" ]; + "wasm_js" = [ "dep:wasm-bindgen" "dep:js-sys" ]; + }; + }; "git2" = rec { crateName = "git2"; version = "0.20.4"; @@ -3284,6 +3380,36 @@ rec { features = { }; }; + "hashbrown 0.15.5" = rec { + crateName = "hashbrown"; + version = "0.15.5"; + edition = "2021"; + sha256 = "189qaczmjxnikm9db748xyhiw04kpmhm9xj9k9hg0sgx7pjwyacj"; + authors = [ + "Amanieu d'Antras " + ]; + dependencies = [ + { + name = "foldhash"; + packageId = "foldhash 0.1.5"; + optional = true; + usesDefaultFeatures = false; + } + ]; + features = { + "alloc" = [ "dep:alloc" ]; + "allocator-api2" = [ "dep:allocator-api2" ]; + "core" = [ "dep:core" ]; + "default" = [ "default-hasher" "inline-more" "allocator-api2" "equivalent" "raw-entry" ]; + "default-hasher" = [ "dep:foldhash" ]; + "equivalent" = [ "dep:equivalent" ]; + "nightly" = [ "bumpalo/allocator_api" ]; + "rayon" = [ "dep:rayon" ]; + "rustc-dep-of-std" = [ "nightly" "core" "alloc" "rustc-internal-api" ]; + "serde" = [ "dep:serde" ]; + }; + resolvedDefaultFeatures = [ "default-hasher" ]; + }; "hashbrown 0.16.1" = rec { crateName = "hashbrown"; version = "0.16.1"; @@ -3308,7 +3434,7 @@ rec { } { name = "foldhash"; - packageId = "foldhash"; + packageId = "foldhash 0.2.0"; optional = true; usesDefaultFeatures = false; } @@ -4259,6 +4385,22 @@ rec { }; resolvedDefaultFeatures = [ "baked" ]; }; + "id-arena" = rec { + crateName = "id-arena"; + version = "2.3.0"; + edition = "2021"; + sha256 = "0m6rs0jcaj4mg33gkv98d71w3hridghp5c4yr928hplpkgbnfc1x"; + libName = "id_arena"; + authors = [ + "Nick Fitzgerald " + "Aleksey Kladov " + ]; + features = { + "default" = [ "std" ]; + "rayon" = [ "dep:rayon" ]; + }; + resolvedDefaultFeatures = [ "default" "std" ]; + }; "ident_case" = rec { crateName = "ident_case"; version = "1.0.1"; @@ -4340,6 +4482,27 @@ rec { packageId = "hashbrown 0.17.0"; usesDefaultFeatures = false; } + { + name = "serde"; + packageId = "serde"; + optional = true; + usesDefaultFeatures = false; + target = { target, features }: false; + } + { + name = "serde_core"; + packageId = "serde_core"; + optional = true; + usesDefaultFeatures = false; + } + ]; + devDependencies = [ + { + name = "serde"; + packageId = "serde"; + usesDefaultFeatures = false; + features = [ "derive" ]; + } ]; features = { "arbitrary" = [ "dep:arbitrary" ]; @@ -4350,7 +4513,7 @@ rec { "serde" = [ "dep:serde_core" "dep:serde" ]; "sval" = [ "dep:sval" ]; }; - resolvedDefaultFeatures = [ "default" "std" ]; + resolvedDefaultFeatures = [ "default" "serde" "std" ]; }; "indoc" = rec { crateName = "indoc"; @@ -4868,7 +5031,7 @@ rec { src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; rev = "b7c8a3a5483b4d35d0abfa11f6db6c153bda8a51"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + sha256 = "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc"; }; libName = "k8s_version"; authors = [ @@ -5516,6 +5679,18 @@ rec { }; resolvedDefaultFeatures = [ "spin" "spin_no_std" ]; }; + "leb128fmt" = rec { + crateName = "leb128fmt"; + version = "0.1.0"; + edition = "2021"; + sha256 = "1chxm1484a0bly6anh6bd7a99sn355ymlagnwj3yajafnpldkv89"; + authors = [ + "Bryant Luk " + ]; + features = { + "default" = [ "std" ]; + }; + }; "libc" = rec { crateName = "libc"; version = "0.2.185"; @@ -7100,6 +7275,45 @@ rec { }; resolvedDefaultFeatures = [ "simd" "std" ]; }; + "prettyplease" = rec { + crateName = "prettyplease"; + version = "0.2.37"; + edition = "2021"; + links = "prettyplease02"; + sha256 = "0azn11i1kh0byabhsgab6kqs74zyrg69xkirzgqyhz6xmjnsi727"; + authors = [ + "David Tolnay " + ]; + dependencies = [ + { + name = "proc-macro2"; + packageId = "proc-macro2"; + usesDefaultFeatures = false; + } + { + name = "syn"; + packageId = "syn 2.0.117"; + usesDefaultFeatures = false; + features = [ "full" ]; + } + ]; + devDependencies = [ + { + name = "proc-macro2"; + packageId = "proc-macro2"; + usesDefaultFeatures = false; + } + { + name = "syn"; + packageId = "syn 2.0.117"; + usesDefaultFeatures = false; + features = [ "clone-impls" "extra-traits" "parsing" "printing" "visit-mut" ]; + } + ]; + features = { + "verbatim" = [ "syn/parsing" ]; + }; + }; "primeorder" = rec { crateName = "primeorder"; version = "0.13.6"; @@ -7306,7 +7520,7 @@ rec { }; resolvedDefaultFeatures = [ "default" "proc-macro" ]; }; - "r-efi" = rec { + "r-efi 5.3.0" = rec { crateName = "r-efi"; version = "5.3.0"; edition = "2018"; @@ -7318,6 +7532,17 @@ rec { "rustc-dep-of-std" = [ "core" ]; }; }; + "r-efi 6.0.0" = rec { + crateName = "r-efi"; + version = "6.0.0"; + edition = "2018"; + sha256 = "1gyrl2k5fyzj9k7kchg2n296z5881lg7070msabid09asp3wkp7q"; + libName = "r_efi"; + features = { + "core" = [ "dep:core" ]; + "rustc-dep-of-std" = [ "core" ]; + }; + }; "rand 0.8.6" = rec { crateName = "rand"; version = "0.8.6"; @@ -9579,6 +9804,10 @@ rec { name = "product-config"; packageId = "product-config"; } + { + name = "regex"; + packageId = "regex"; + } { name = "serde"; packageId = "serde"; @@ -9615,6 +9844,11 @@ rec { name = "tracing"; packageId = "tracing"; } + { + name = "uuid"; + packageId = "uuid"; + features = [ "v4" ]; + } ]; buildDependencies = [ { @@ -9643,7 +9877,7 @@ rec { src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; rev = "b7c8a3a5483b4d35d0abfa11f6db6c153bda8a51"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + sha256 = "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc"; }; libName = "stackable_certs"; authors = [ @@ -9746,7 +9980,7 @@ rec { src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; rev = "b7c8a3a5483b4d35d0abfa11f6db6c153bda8a51"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + sha256 = "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc"; }; libName = "stackable_operator"; authors = [ @@ -9926,7 +10160,7 @@ rec { src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; rev = "b7c8a3a5483b4d35d0abfa11f6db6c153bda8a51"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + sha256 = "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc"; }; procMacro = true; libName = "stackable_operator_derive"; @@ -9961,7 +10195,7 @@ rec { src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; rev = "b7c8a3a5483b4d35d0abfa11f6db6c153bda8a51"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + sha256 = "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc"; }; libName = "stackable_shared"; authors = [ @@ -10042,7 +10276,7 @@ rec { src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; rev = "b7c8a3a5483b4d35d0abfa11f6db6c153bda8a51"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + sha256 = "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc"; }; libName = "stackable_telemetry"; authors = [ @@ -10152,7 +10386,7 @@ rec { src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; rev = "b7c8a3a5483b4d35d0abfa11f6db6c153bda8a51"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + sha256 = "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc"; }; libName = "stackable_versioned"; authors = [ @@ -10202,7 +10436,7 @@ rec { src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; rev = "b7c8a3a5483b4d35d0abfa11f6db6c153bda8a51"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + sha256 = "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc"; }; procMacro = true; libName = "stackable_versioned_macros"; @@ -10270,7 +10504,7 @@ rec { src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; rev = "b7c8a3a5483b4d35d0abfa11f6db6c153bda8a51"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + sha256 = "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc"; }; libName = "stackable_webhook"; authors = [ @@ -12293,6 +12527,72 @@ rec { }; resolvedDefaultFeatures = [ "default" ]; }; + "uuid" = rec { + crateName = "uuid"; + version = "1.23.1"; + edition = "2021"; + sha256 = "0xlwg23rmsfl3gx98qsyzpl24pf4bs9wi3mqx5c6i319hyb4mmyx"; + authors = [ + "Ashley Mannix" + "Dylan DPC" + "Hunar Roop Kahlon" + ]; + dependencies = [ + { + name = "getrandom"; + packageId = "getrandom 0.4.2"; + optional = true; + target = { target, features }: (!(("wasm32" == target."arch" or null) && (("unknown" == target."os" or null) || ("none" == target."os" or null)))); + } + { + name = "js-sys"; + packageId = "js-sys"; + optional = true; + usesDefaultFeatures = false; + target = { target, features }: (("wasm32" == target."arch" or null) && (("unknown" == target."os" or null) || ("none" == target."os" or null)) && (builtins.elem "atomics" targetFeatures)); + } + { + name = "wasm-bindgen"; + packageId = "wasm-bindgen"; + optional = true; + usesDefaultFeatures = false; + target = { target, features }: (("wasm32" == target."arch" or null) && (("unknown" == target."os" or null) || ("none" == target."os" or null))); + } + ]; + devDependencies = [ + { + name = "wasm-bindgen"; + packageId = "wasm-bindgen"; + target = { target, features }: (("wasm32" == target."arch" or null) && (("unknown" == target."os" or null) || ("none" == target."os" or null))); + } + ]; + features = { + "arbitrary" = [ "dep:arbitrary" ]; + "atomic" = [ "dep:atomic" ]; + "borsh" = [ "dep:borsh" "dep:borsh-derive" ]; + "bytemuck" = [ "dep:bytemuck" ]; + "default" = [ "std" ]; + "fast-rng" = [ "rng" "dep:rand" ]; + "js" = [ "dep:wasm-bindgen" "dep:js-sys" ]; + "md5" = [ "dep:md-5" ]; + "rng" = [ "dep:getrandom" ]; + "rng-getrandom" = [ "rng" "dep:getrandom" "uuid-rng-internal-lib" "uuid-rng-internal-lib/getrandom" ]; + "rng-rand" = [ "rng" "dep:rand" "uuid-rng-internal-lib" "uuid-rng-internal-lib/rand" ]; + "serde" = [ "dep:serde_core" ]; + "sha1" = [ "dep:sha1_smol" ]; + "slog" = [ "dep:slog" ]; + "std" = [ "wasm-bindgen?/std" "js-sys?/std" ]; + "uuid-rng-internal-lib" = [ "dep:uuid-rng-internal-lib" ]; + "v1" = [ "atomic" ]; + "v3" = [ "md5" ]; + "v4" = [ "rng" ]; + "v5" = [ "sha1" ]; + "v6" = [ "atomic" ]; + "v7" = [ "rng" ]; + "zerocopy" = [ "dep:zerocopy" ]; + }; + resolvedDefaultFeatures = [ "default" "rng" "std" "v4" ]; + }; "valuable" = rec { crateName = "valuable"; version = "0.1.1"; @@ -12366,7 +12666,7 @@ rec { dependencies = [ { name = "wit-bindgen"; - packageId = "wit-bindgen"; + packageId = "wit-bindgen 0.57.1"; usesDefaultFeatures = false; } ]; @@ -12378,6 +12678,31 @@ rec { "rustc-dep-of-std" = [ "core" "alloc" "wit-bindgen/rustc-dep-of-std" ]; }; }; + "wasip3" = rec { + crateName = "wasip3"; + version = "0.4.0+wasi-0.3.0-rc-2026-01-06"; + edition = "2021"; + sha256 = "19dc8p0y2mfrvgk3qw3c3240nfbylv22mvyxz84dqpgai2zzha2l"; + dependencies = [ + { + name = "wit-bindgen"; + packageId = "wit-bindgen 0.51.0"; + usesDefaultFeatures = false; + features = [ "async" ]; + } + ]; + devDependencies = [ + { + name = "wit-bindgen"; + packageId = "wit-bindgen 0.51.0"; + usesDefaultFeatures = false; + features = [ "async-spawn" ]; + } + ]; + features = { + "http-compat" = [ "dep:bytes" "dep:http-body" "dep:http" "dep:thiserror" "wit-bindgen/async-spawn" ]; + }; + }; "wasm-bindgen" = rec { crateName = "wasm-bindgen"; version = "0.2.118"; @@ -12536,14 +12861,124 @@ rec { ]; }; - "web-sys" = rec { - crateName = "web-sys"; - version = "0.3.95"; + "wasm-encoder" = rec { + crateName = "wasm-encoder"; + version = "0.244.0"; edition = "2021"; - sha256 = "0zfr2jy5bpkkggl88i43yy37p538hg20i56kwn421yj9g6qznbag"; - libName = "web_sys"; + sha256 = "06c35kv4h42vk3k51xjz1x6hn3mqwfswycmr6ziky033zvr6a04r"; + libName = "wasm_encoder"; authors = [ - "The wasm-bindgen Developers" + "Nick Fitzgerald " + ]; + dependencies = [ + { + name = "leb128fmt"; + packageId = "leb128fmt"; + usesDefaultFeatures = false; + } + { + name = "wasmparser"; + packageId = "wasmparser"; + optional = true; + usesDefaultFeatures = false; + features = [ "simd" "simd" ]; + } + ]; + features = { + "component-model" = [ "wasmparser?/component-model" ]; + "default" = [ "std" "component-model" ]; + "std" = [ "wasmparser?/std" ]; + "wasmparser" = [ "dep:wasmparser" ]; + }; + resolvedDefaultFeatures = [ "component-model" "std" "wasmparser" ]; + }; + "wasm-metadata" = rec { + crateName = "wasm-metadata"; + version = "0.244.0"; + edition = "2021"; + sha256 = "02f9dhlnryd2l7zf03whlxai5sv26x4spfibjdvc3g9gd8z3a3mv"; + libName = "wasm_metadata"; + dependencies = [ + { + name = "anyhow"; + packageId = "anyhow"; + } + { + name = "indexmap"; + packageId = "indexmap"; + usesDefaultFeatures = false; + features = [ "serde" ]; + } + { + name = "wasm-encoder"; + packageId = "wasm-encoder"; + usesDefaultFeatures = false; + features = [ "std" "component-model" ]; + } + { + name = "wasmparser"; + packageId = "wasmparser"; + usesDefaultFeatures = false; + features = [ "simd" "std" "component-model" "hash-collections" ]; + } + ]; + features = { + "clap" = [ "dep:clap" ]; + "default" = [ "oci" "serde" ]; + "oci" = [ "dep:auditable-serde" "dep:flate2" "dep:url" "dep:spdx" "dep:serde_json" "serde" ]; + "serde" = [ "dep:serde_derive" "dep:serde" ]; + }; + }; + "wasmparser" = rec { + crateName = "wasmparser"; + version = "0.244.0"; + edition = "2021"; + sha256 = "1zi821hrlsxfhn39nqpmgzc0wk7ax3dv6vrs5cw6kb0v5v3hgf27"; + authors = [ + "Yury Delendik " + ]; + dependencies = [ + { + name = "bitflags"; + packageId = "bitflags"; + } + { + name = "hashbrown"; + packageId = "hashbrown 0.15.5"; + optional = true; + usesDefaultFeatures = false; + features = [ "default-hasher" ]; + } + { + name = "indexmap"; + packageId = "indexmap"; + optional = true; + usesDefaultFeatures = false; + } + { + name = "semver"; + packageId = "semver"; + optional = true; + usesDefaultFeatures = false; + } + ]; + features = { + "component-model" = [ "dep:semver" ]; + "default" = [ "std" "validate" "serde" "features" "component-model" "hash-collections" "simd" ]; + "hash-collections" = [ "dep:hashbrown" "dep:indexmap" ]; + "serde" = [ "dep:serde" "indexmap?/serde" "hashbrown?/serde" ]; + "std" = [ "indexmap?/std" ]; + }; + resolvedDefaultFeatures = [ "component-model" "features" "hash-collections" "simd" "std" "validate" ]; + }; + "web-sys" = rec { + crateName = "web-sys"; + version = "0.3.95"; + edition = "2021"; + sha256 = "0zfr2jy5bpkkggl88i43yy37p538hg20i56kwn421yj9g6qznbag"; + libName = "web_sys"; + authors = [ + "The wasm-bindgen Developers" ]; dependencies = [ { @@ -13860,7 +14295,34 @@ rec { }; resolvedDefaultFeatures = [ "alloc" "ascii" "binary" "default" "parser" "std" ]; }; - "wit-bindgen" = rec { + "wit-bindgen 0.51.0" = rec { + crateName = "wit-bindgen"; + version = "0.51.0"; + edition = "2024"; + sha256 = "19fazgch8sq5cvjv3ynhhfh5d5x08jq2pkw8jfb05vbcyqcr496p"; + libName = "wit_bindgen"; + authors = [ + "Alex Crichton " + ]; + dependencies = [ + { + name = "wit-bindgen-rust-macro"; + packageId = "wit-bindgen-rust-macro"; + optional = true; + } + ]; + features = { + "async" = [ "std" "wit-bindgen-rust-macro?/async" ]; + "async-spawn" = [ "async" "dep:futures" ]; + "bitflags" = [ "dep:bitflags" ]; + "default" = [ "macros" "realloc" "async" "std" "bitflags" ]; + "inter-task-wakeup" = [ "async" ]; + "macros" = [ "dep:wit-bindgen-rust-macro" ]; + "rustc-dep-of-std" = [ "dep:core" "dep:alloc" ]; + }; + resolvedDefaultFeatures = [ "async" "std" ]; + }; + "wit-bindgen 0.57.1" = rec { crateName = "wit-bindgen"; version = "0.57.1"; edition = "2024"; @@ -13880,6 +14342,290 @@ rec { "rustc-dep-of-std" = [ "dep:core" "dep:alloc" ]; }; }; + "wit-bindgen-core" = rec { + crateName = "wit-bindgen-core"; + version = "0.51.0"; + edition = "2024"; + sha256 = "1p2jszqsqbx8k7y8nwvxg65wqzxjm048ba5phaq8r9iy9ildwqga"; + libName = "wit_bindgen_core"; + authors = [ + "Alex Crichton " + ]; + dependencies = [ + { + name = "anyhow"; + packageId = "anyhow"; + } + { + name = "heck"; + packageId = "heck"; + } + { + name = "wit-parser"; + packageId = "wit-parser"; + } + ]; + features = { + "clap" = [ "dep:clap" ]; + "serde" = [ "dep:serde" ]; + }; + }; + "wit-bindgen-rust" = rec { + crateName = "wit-bindgen-rust"; + version = "0.51.0"; + edition = "2024"; + sha256 = "08bzn5fsvkb9x9wyvyx98qglknj2075xk1n7c5jxv15jykh6didp"; + libName = "wit_bindgen_rust"; + authors = [ + "Alex Crichton " + ]; + dependencies = [ + { + name = "anyhow"; + packageId = "anyhow"; + } + { + name = "heck"; + packageId = "heck"; + } + { + name = "indexmap"; + packageId = "indexmap"; + } + { + name = "prettyplease"; + packageId = "prettyplease"; + } + { + name = "syn"; + packageId = "syn 2.0.117"; + features = [ "printing" ]; + } + { + name = "wasm-metadata"; + packageId = "wasm-metadata"; + usesDefaultFeatures = false; + } + { + name = "wit-bindgen-core"; + packageId = "wit-bindgen-core"; + } + { + name = "wit-component"; + packageId = "wit-component"; + } + ]; + features = { + "clap" = [ "dep:clap" "wit-bindgen-core/clap" ]; + "serde" = [ "dep:serde" "wit-bindgen-core/serde" ]; + }; + }; + "wit-bindgen-rust-macro" = rec { + crateName = "wit-bindgen-rust-macro"; + version = "0.51.0"; + edition = "2024"; + sha256 = "0ymizapzv2id89igxsz2n587y2hlfypf6n8kyp68x976fzyrn3qc"; + procMacro = true; + libName = "wit_bindgen_rust_macro"; + authors = [ + "Alex Crichton " + ]; + dependencies = [ + { + name = "anyhow"; + packageId = "anyhow"; + } + { + name = "prettyplease"; + packageId = "prettyplease"; + } + { + name = "proc-macro2"; + packageId = "proc-macro2"; + } + { + name = "quote"; + packageId = "quote"; + } + { + name = "syn"; + packageId = "syn 2.0.117"; + features = [ "printing" ]; + } + { + name = "wit-bindgen-core"; + packageId = "wit-bindgen-core"; + } + { + name = "wit-bindgen-rust"; + packageId = "wit-bindgen-rust"; + } + ]; + features = { + }; + resolvedDefaultFeatures = [ "async" ]; + }; + "wit-component" = rec { + crateName = "wit-component"; + version = "0.244.0"; + edition = "2021"; + sha256 = "1clwxgsgdns3zj2fqnrjcp8y5gazwfa1k0sy5cbk0fsmx4hflrlx"; + libName = "wit_component"; + authors = [ + "Peter Huene " + ]; + dependencies = [ + { + name = "anyhow"; + packageId = "anyhow"; + } + { + name = "bitflags"; + packageId = "bitflags"; + } + { + name = "indexmap"; + packageId = "indexmap"; + usesDefaultFeatures = false; + } + { + name = "log"; + packageId = "log"; + } + { + name = "serde"; + packageId = "serde"; + usesDefaultFeatures = false; + features = [ "alloc" ]; + } + { + name = "serde_derive"; + packageId = "serde_derive"; + } + { + name = "serde_json"; + packageId = "serde_json"; + } + { + name = "wasm-encoder"; + packageId = "wasm-encoder"; + usesDefaultFeatures = false; + features = [ "std" "wasmparser" ]; + } + { + name = "wasm-metadata"; + packageId = "wasm-metadata"; + usesDefaultFeatures = false; + } + { + name = "wasmparser"; + packageId = "wasmparser"; + usesDefaultFeatures = false; + features = [ "simd" "std" "component-model" "simd" ]; + } + { + name = "wit-parser"; + packageId = "wit-parser"; + features = [ "decoding" "serde" ]; + } + ]; + devDependencies = [ + { + name = "wasm-metadata"; + packageId = "wasm-metadata"; + usesDefaultFeatures = false; + features = [ "oci" ]; + } + { + name = "wasmparser"; + packageId = "wasmparser"; + usesDefaultFeatures = false; + features = [ "simd" "std" "component-model" "features" ]; + } + ]; + features = { + "dummy-module" = [ "dep:wat" ]; + "semver-check" = [ "dummy-module" ]; + "wat" = [ "dep:wast" "dep:wat" ]; + }; + }; + "wit-parser" = rec { + crateName = "wit-parser"; + version = "0.244.0"; + edition = "2021"; + sha256 = "0dm7avvdxryxd5b02l0g5h6933z1cw5z0d4wynvq2cywq55srj7c"; + libName = "wit_parser"; + authors = [ + "Alex Crichton " + ]; + dependencies = [ + { + name = "anyhow"; + packageId = "anyhow"; + } + { + name = "id-arena"; + packageId = "id-arena"; + } + { + name = "indexmap"; + packageId = "indexmap"; + usesDefaultFeatures = false; + features = [ "std" ]; + } + { + name = "log"; + packageId = "log"; + } + { + name = "semver"; + packageId = "semver"; + usesDefaultFeatures = false; + } + { + name = "serde"; + packageId = "serde"; + optional = true; + usesDefaultFeatures = false; + features = [ "alloc" ]; + } + { + name = "serde_derive"; + packageId = "serde_derive"; + optional = true; + } + { + name = "serde_json"; + packageId = "serde_json"; + optional = true; + } + { + name = "unicode-xid"; + packageId = "unicode-xid"; + } + { + name = "wasmparser"; + packageId = "wasmparser"; + optional = true; + usesDefaultFeatures = false; + features = [ "simd" "std" "validate" "component-model" "features" ]; + } + ]; + devDependencies = [ + { + name = "serde_json"; + packageId = "serde_json"; + } + ]; + features = { + "decoding" = [ "dep:wasmparser" ]; + "default" = [ "serde" "decoding" ]; + "serde" = [ "dep:serde" "dep:serde_derive" "indexmap/serde" "serde_json" ]; + "serde_json" = [ "dep:serde_json" ]; + "wat" = [ "decoding" "dep:wat" ]; + }; + resolvedDefaultFeatures = [ "decoding" "default" "serde" "serde_json" ]; + }; "writeable" = rec { crateName = "writeable"; version = "0.6.3"; diff --git a/crate-hashes.json b/crate-hashes.json index 4ab79fa6..71fbc1c3 100644 --- a/crate-hashes.json +++ b/crate-hashes.json @@ -1,12 +1,12 @@ { - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#k8s-version@0.1.3": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-certs@0.4.0": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-operator-derive@0.3.1": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-operator@0.111.0": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-shared@0.1.0": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-telemetry@0.6.3": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-versioned-macros@0.10.0": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-versioned@0.10.0": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-webhook@0.9.1": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#k8s-version@0.1.3": "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-certs@0.4.0": "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-operator-derive@0.3.1": "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-operator@0.111.0": "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-shared@0.1.0": "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-telemetry@0.6.3": "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-versioned-macros@0.10.0": "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-versioned@0.10.0": "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.111.0#stackable-webhook@0.9.1": "14q10sppdjdf3vbcbxz12rlgm1g9l6p87nk9wr707w2a71z8vgxc", "git+https://github.com/stackabletech/product-config.git?tag=0.8.0#product-config@0.8.0": "1dz70kapm2wdqcr7ndyjji0lhsl98bsq95gnb2lw487wf6yr7987" } \ No newline at end of file diff --git a/rust/operator-binary/src/controller/validate.rs b/rust/operator-binary/src/controller/validate.rs index e8bf287a..c950ee70 100644 --- a/rust/operator-binary/src/controller/validate.rs +++ b/rust/operator-binary/src/controller/validate.rs @@ -418,8 +418,8 @@ pub fn validate_cluster( let (auth_volumes, auth_volume_mounts) = compute_auth_volumes_and_mounts(&dereferenced.authentication_config)?; - // --- service account name (matches build_rbac_resources output) --- - let service_account_name = airflow.name_any(); + // --- service account name (matches build_rbac_resources output: "{cluster}-serviceaccount") --- + let service_account_name = format!("{}-serviceaccount", airflow.name_any()); // --- per-role/rolegroup validation --- let mut validated_role_groups = BTreeMap::new(); diff --git a/rust/operator-binary/src/framework/types/operator.rs b/rust/operator-binary/src/framework/types/operator.rs index 21e71c7e..6a663be6 100644 --- a/rust/operator-binary/src/framework/types/operator.rs +++ b/rust/operator-binary/src/framework/types/operator.rs @@ -27,8 +27,9 @@ attributed_string_type! { "The name of a cluster/stacklet", "my-airflow-cluster", // Suffixes are added to produce resource names. According compile-time checks ensure that - // max_length cannot be set higher. - (max_length = 24), + // max_length cannot be set higher. Reduced from opensearch's 24 to 22 because airflow's + // longest role name ("dagprocessor") is 12 chars vs opensearch's 10. + (max_length = 22), is_rfc_1035_label_name, is_valid_label_value } @@ -66,7 +67,7 @@ attributed_string_type! { // The role name is used to produce resource names. To make sure that all resource names are // valid, max_length is restricted. Compile-time checks ensure that max_length cannot be set // higher if not other names like the RoleGroupName are set lower accordingly. - (max_length = 10), + (max_length = 12), is_rfc_1123_label_name, is_valid_label_value } From 2ee1e67a59706b966dcf97df21fb0b8dd5ccacda Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Mon, 11 May 2026 17:57:34 +0200 Subject: [PATCH 5/5] test fixes: name lengths --- .../templates/kuttl/logging/41-assert.yaml.j2 | 12 +++---- .../41-install-airflow-cluster.yaml.j2 | 16 +++++----- .../templates/kuttl/logging/52-assert.yaml.j2 | 8 ++--- .../templates/kuttl/logging/70-assert.yaml.j2 | 8 ++--- .../airflow-vector-aggregator-values.yaml.j2 | 32 +++++++++---------- .../kuttl/resources/30-assert.yaml.j2 | 6 ++-- .../30-install-airflow-cluster.yaml.j2 | 6 ++-- 7 files changed, 44 insertions(+), 44 deletions(-) diff --git a/tests/templates/kuttl/logging/41-assert.yaml.j2 b/tests/templates/kuttl/logging/41-assert.yaml.j2 index 77370e89..9f3ded31 100644 --- a/tests/templates/kuttl/logging/41-assert.yaml.j2 +++ b/tests/templates/kuttl/logging/41-assert.yaml.j2 @@ -8,7 +8,7 @@ timeout: 1200 apiVersion: apps/v1 kind: StatefulSet metadata: - name: airflow-webserver-automatic-log-config + name: airflow-webserver-auto-log-config status: readyReplicas: 1 replicas: 1 @@ -16,7 +16,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: airflow-webserver-custom-log-config + name: airflow-webserver-custom-log-cfg status: readyReplicas: 1 replicas: 1 @@ -25,7 +25,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: airflow-worker-automatic-log-config + name: airflow-worker-auto-log-config status: readyReplicas: 1 replicas: 1 @@ -33,7 +33,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: airflow-worker-custom-log-config + name: airflow-worker-custom-log-cfg status: readyReplicas: 1 replicas: 1 @@ -42,7 +42,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: airflow-scheduler-automatic-log-config + name: airflow-scheduler-auto-log-config status: readyReplicas: 1 replicas: 1 @@ -50,7 +50,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: airflow-scheduler-custom-log-config + name: airflow-scheduler-custom-log-cfg status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/logging/41-install-airflow-cluster.yaml.j2 b/tests/templates/kuttl/logging/41-install-airflow-cluster.yaml.j2 index d93a44da..3865b546 100644 --- a/tests/templates/kuttl/logging/41-install-airflow-cluster.yaml.j2 +++ b/tests/templates/kuttl/logging/41-install-airflow-cluster.yaml.j2 @@ -121,7 +121,7 @@ spec: memory: limit: 3Gi roleGroups: - automatic-log-config: + auto-log-config: replicas: 1 config: logging: @@ -162,7 +162,7 @@ spec: - name: prepared-logs configMap: name: prepared-logs - custom-log-config: + custom-log-cfg: replicas: 1 config: logging: @@ -181,7 +181,7 @@ spec: memory: limit: 3Gi roleGroups: - automatic-log-config: + auto-log-config: replicas: 1 config: logging: @@ -215,7 +215,7 @@ spec: loggers: ROOT: level: INFO - custom-log-config: + custom-log-cfg: replicas: 1 config: logging: @@ -233,7 +233,7 @@ spec: max: 250m memory: limit: 512Mi - # automatic-log-config + # auto-log-config logging: enableVectorAgent: true containers: @@ -261,7 +261,7 @@ spec: loggers: ROOT: level: INFO - # custom-log-config is not tested for kubernetesExecutors because + # custom-log-cfg is not tested for kubernetesExecutors because # there are no roleGroups to test both {% endif %} schedulers: @@ -273,7 +273,7 @@ spec: memory: limit: 1Gi roleGroups: - automatic-log-config: + auto-log-config: replicas: 1 config: logging: @@ -303,7 +303,7 @@ spec: loggers: ROOT: level: INFO - custom-log-config: + custom-log-cfg: replicas: 1 config: logging: diff --git a/tests/templates/kuttl/logging/52-assert.yaml.j2 b/tests/templates/kuttl/logging/52-assert.yaml.j2 index 35442128..2b3b339d 100644 --- a/tests/templates/kuttl/logging/52-assert.yaml.j2 +++ b/tests/templates/kuttl/logging/52-assert.yaml.j2 @@ -7,10 +7,10 @@ timeout: 600 commands: {% if test_scenario['values']['airflow'].find(",") > 0 %} - script: | - kubectl exec -n $NAMESPACE test-airflow-python-0 -- python /tmp/metrics.py --role-group automatic-log-config --airflow-version "{{ test_scenario['values']['airflow'].split(',')[0] }}" - kubectl exec -n $NAMESPACE test-airflow-python-0 -- python /tmp/metrics.py --role-group custom-log-config --airflow-version "{{ test_scenario['values']['airflow'].split(',')[0] }}" + kubectl exec -n $NAMESPACE test-airflow-python-0 -- python /tmp/metrics.py --role-group auto-log-config --airflow-version "{{ test_scenario['values']['airflow'].split(',')[0] }}" + kubectl exec -n $NAMESPACE test-airflow-python-0 -- python /tmp/metrics.py --role-group custom-log-cfg --airflow-version "{{ test_scenario['values']['airflow'].split(',')[0] }}" {% else %} - script: | - kubectl exec -n $NAMESPACE test-airflow-python-0 -- python /tmp/metrics.py --role-group automatic-log-config --airflow-version "{{ test_scenario['values']['airflow'] }}" - kubectl exec -n $NAMESPACE test-airflow-python-0 -- python /tmp/metrics.py --role-group custom-log-config --airflow-version "{{ test_scenario['values']['airflow'] }}" + kubectl exec -n $NAMESPACE test-airflow-python-0 -- python /tmp/metrics.py --role-group auto-log-config --airflow-version "{{ test_scenario['values']['airflow'] }}" + kubectl exec -n $NAMESPACE test-airflow-python-0 -- python /tmp/metrics.py --role-group custom-log-cfg --airflow-version "{{ test_scenario['values']['airflow'] }}" {% endif %} diff --git a/tests/templates/kuttl/logging/70-assert.yaml.j2 b/tests/templates/kuttl/logging/70-assert.yaml.j2 index 36ff53a9..71a83224 100644 --- a/tests/templates/kuttl/logging/70-assert.yaml.j2 +++ b/tests/templates/kuttl/logging/70-assert.yaml.j2 @@ -19,17 +19,17 @@ commands: # has to be declared. # See https://github.com/apache/airflow/pull/52581. - # Rolegroup custom-log-config + # Rolegroup custom-log-cfg CURL_RESPONSE_CUSTOM=$( - kubectl -n $NAMESPACE exec airflow-webserver-custom-log-config-0 -- sh -c 'CODE=$(curl -s -o /dev/null -w "%{http_code}" http://airflow-worker-custom-log-config-headless:8793/log 2>/dev/null || true);echo "$CODE"' + kubectl -n $NAMESPACE exec airflow-webserver-custom-log-cfg-0 -- sh -c 'CODE=$(curl -s -o /dev/null -w "%{http_code}" http://airflow-worker-custom-log-cfg-headless:8793/log 2>/dev/null || true);echo "$CODE"' ) # Log-Endpoint Test Assertions: echo "The HTTP Code is $CURL_RESPONSE_CUSTOM (an internal JWT token is needed for full access)" - # Rolegroup automatic-log-config + # Rolegroup auto-log-config CURL_RESPONSE_AUTO=$( - kubectl -n $NAMESPACE exec airflow-webserver-automatic-log-config-0 -- sh -c 'CODE=$(curl -s -o /dev/null -w "%{http_code}" http://airflow-worker-automatic-log-config-headless:8793/log 2>/dev/null || true);echo "$CODE"' + kubectl -n $NAMESPACE exec airflow-webserver-auto-log-config-0 -- sh -c 'CODE=$(curl -s -o /dev/null -w "%{http_code}" http://airflow-worker-auto-log-config-headless:8793/log 2>/dev/null || true);echo "$CODE"' ) echo "The HTTP Code is $CURL_RESPONSE_AUTO (an internal JWT token is needed for full access)" [ "$CURL_RESPONSE_CUSTOM" -eq 403 ] && [ "$CURL_RESPONSE_AUTO" -eq 403 ] diff --git a/tests/templates/kuttl/logging/airflow-vector-aggregator-values.yaml.j2 b/tests/templates/kuttl/logging/airflow-vector-aggregator-values.yaml.j2 index 714e3476..2abce6aa 100644 --- a/tests/templates/kuttl/logging/airflow-vector-aggregator-values.yaml.j2 +++ b/tests/templates/kuttl/logging/airflow-vector-aggregator-values.yaml.j2 @@ -28,98 +28,98 @@ customConfig: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-webserver-automatic-log-config-0" && + .pod == "airflow-webserver-auto-log-config-0" && .container == "airflow" filteredAutomaticLogConfigWebserverGitSync: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-webserver-automatic-log-config-0" && + .pod == "airflow-webserver-auto-log-config-0" && .container == "git-sync-0" filteredAutomaticLogConfigWebserverVector: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-webserver-automatic-log-config-0" && + .pod == "airflow-webserver-auto-log-config-0" && .container == "vector" filteredCustomLogConfigWebserverAirflow: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-webserver-custom-log-config-0" && + .pod == "airflow-webserver-custom-log-cfg-0" && .container == "airflow" filteredCustomLogConfigWebserverVector: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-webserver-custom-log-config-0" && + .pod == "airflow-webserver-custom-log-cfg-0" && .container == "vector" filteredAutomaticLogConfigSchedulerAirflow: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-scheduler-automatic-log-config-0" && + .pod == "airflow-scheduler-auto-log-config-0" && .container == "airflow" filteredAutomaticLogConfigSchedulerGitSync: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-scheduler-automatic-log-config-0" && + .pod == "airflow-scheduler-auto-log-config-0" && .container == "git-sync-0" filteredAutomaticLogConfigSchedulerVector: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-scheduler-automatic-log-config-0" && + .pod == "airflow-scheduler-auto-log-config-0" && .container == "vector" filteredCustomLogConfigSchedulerAirflow: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-scheduler-custom-log-config-0" && + .pod == "airflow-scheduler-custom-log-cfg-0" && .container == "airflow" filteredCustomLogConfigSchedulerVector: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-scheduler-custom-log-config-0" && + .pod == "airflow-scheduler-custom-log-cfg-0" && .container == "vector" {% if test_scenario['values']['executor'] == 'celery' %} filteredAutomaticLogConfigWorkerAirflow: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-worker-automatic-log-config-0" && + .pod == "airflow-worker-auto-log-config-0" && .container == "airflow" filteredAutomaticLogConfigWorkerGitSync: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-worker-automatic-log-config-0" && + .pod == "airflow-worker-auto-log-config-0" && .container == "git-sync-0" filteredAutomaticLogConfigWorkerGitSyncInit: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-worker-automatic-log-config-0" && + .pod == "airflow-worker-auto-log-config-0" && .container == "git-sync-0-init" filteredAutomaticLogConfigWorkerVector: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-worker-automatic-log-config-0" && + .pod == "airflow-worker-auto-log-config-0" && .container == "vector" filteredCustomLogConfigWorkerAirflow: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-worker-custom-log-config-0" && + .pod == "airflow-worker-custom-log-cfg-0" && .container == "airflow" filteredCustomLogConfigWorkerVector: type: filter inputs: [validEvents] condition: >- - .pod == "airflow-worker-custom-log-config-0" && + .pod == "airflow-worker-custom-log-cfg-0" && .container == "vector" {% elif test_scenario['values']['executor'] == 'kubernetes' %} filteredExampleTriggerTargetDagBashTaskBase: diff --git a/tests/templates/kuttl/resources/30-assert.yaml.j2 b/tests/templates/kuttl/resources/30-assert.yaml.j2 index 3a8e17bf..09c49a3a 100644 --- a/tests/templates/kuttl/resources/30-assert.yaml.j2 +++ b/tests/templates/kuttl/resources/30-assert.yaml.j2 @@ -16,7 +16,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: airflow-worker-resources-from-role + name: airflow-worker-from-role spec: template: spec: @@ -40,7 +40,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: airflow-worker-resources-from-role-group + name: airflow-worker-from-role-group spec: template: spec: @@ -64,7 +64,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: airflow-worker-resources-from-pod-overrides + name: airflow-worker-from-overrides spec: template: spec: diff --git a/tests/templates/kuttl/resources/30-install-airflow-cluster.yaml.j2 b/tests/templates/kuttl/resources/30-install-airflow-cluster.yaml.j2 index 0bdcdc99..a3f11110 100644 --- a/tests/templates/kuttl/resources/30-install-airflow-cluster.yaml.j2 +++ b/tests/templates/kuttl/resources/30-install-airflow-cluster.yaml.j2 @@ -84,9 +84,9 @@ spec: memory: limit: 3001Mi roleGroups: - resources-from-role: + from-role: replicas: 1 - resources-from-role-group: + from-role-group: config: resources: cpu: @@ -95,7 +95,7 @@ spec: memory: limit: 3002Mi replicas: 1 - resources-from-pod-overrides: + from-overrides: podOverrides: spec: containers: