Skip to content

Commit 23e1120

Browse files
authored
chore: Version CRDs and SparkHistoryServerClusterConfig (#525)
* chore: Remove separate CRD crate * chore: Remove unused items * chore: Version SparkApplication * chore: Version SparkHistoryServer * docs: Fix invalid rustdoc reference * chore: Version SparkHistoryServerConfigCluster * chore: Remove redundant kind argument
1 parent cefde43 commit 23e1120

File tree

18 files changed

+495
-291
lines changed

18 files changed

+495
-291
lines changed

Cargo.lock

Lines changed: 279 additions & 49 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
[workspace]
2-
members = ["rust/crd", "rust/operator-binary"]
2+
members = ["rust/operator-binary"]
33
resolver = "2"
44

55
[workspace.package]
@@ -10,19 +10,21 @@ edition = "2021"
1010
repository = "https://github.com/stackabletech/spark-k8s-operator"
1111

1212
[workspace.dependencies]
13+
stackable-versioned = { git = "https://github.com/stackabletech/operator-rs.git", features = ["k8s"], tag = "stackable-versioned-0.5.0" }
14+
stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "stackable-operator-0.85.0" }
15+
product-config = { git = "https://github.com/stackabletech/product-config.git", tag = "0.7.0" }
16+
1317
anyhow = "1.0"
1418
built = { version = "0.7", features = ["chrono", "git2"] }
1519
clap = "4.5"
1620
const_format = "0.2"
1721
futures = { version = "0.3", features = ["compat"] }
18-
product-config = { git = "https://github.com/stackabletech/product-config.git", tag = "0.7.0" }
1922
rstest = "0.24"
2023
semver = "1.0"
2124
serde = { version = "1.0", features = ["derive"] }
2225
serde_json = "1.0"
2326
serde_yaml = "0.9"
2427
snafu = "0.8"
25-
stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "stackable-operator-0.85.0" }
2628
strum = { version = "0.26", features = ["derive"] }
2729
tokio = { version = "1.39", features = ["full"] }
2830
tracing = "0.1"

deploy/helm/spark-k8s-operator/crds/crds.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ spec:
1212
kind: SparkApplication
1313
plural: sparkapplications
1414
shortNames:
15-
- sc
15+
- sparkapp
1616
singular: sparkapplication
1717
scope: Namespaced
1818
versions:
@@ -998,7 +998,7 @@ spec:
998998
kind: SparkHistoryServer
999999
plural: sparkhistoryservers
10001000
shortNames:
1001-
- shs
1001+
- sparkhist
10021002
singular: sparkhistoryserver
10031003
scope: Namespaced
10041004
versions:

rust/crd/Cargo.toml

Lines changed: 0 additions & 25 deletions
This file was deleted.

rust/operator-binary/Cargo.toml

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,22 +9,27 @@ repository.workspace = true
99
publish = false
1010

1111
[dependencies]
12-
stackable-spark-k8s-crd = { path = "../crd" }
12+
stackable-versioned.workspace = true
13+
stackable-operator.workspace = true
14+
product-config.workspace = true
1315

1416
anyhow.workspace = true
15-
product-config.workspace = true
17+
const_format.workspace = true
1618
semver.workspace = true
1719
serde.workspace = true
1820
serde_json.workspace = true
1921
serde_yaml.workspace = true
2022
snafu.workspace = true
21-
stackable-operator.workspace = true
2223
strum.workspace = true
2324
tracing.workspace = true
2425
tracing-futures.workspace = true
2526
clap.workspace = true
2627
futures.workspace = true
2728
tokio.workspace = true
2829

30+
[dev-dependencies]
31+
indoc.workspace = true
32+
rstest.workspace = true
33+
2934
[build-dependencies]
3035
built.workspace = true

rust/crd/src/affinity.rs renamed to rust/operator-binary/src/crd/affinity.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ use stackable_operator::{
33
k8s_openapi::api::core::v1::PodAntiAffinity,
44
};
55

6-
use crate::constants::{APP_NAME, HISTORY_ROLE_NAME};
6+
use crate::crd::constants::{APP_NAME, HISTORY_ROLE_NAME};
77

88
pub fn history_affinity(cluster_name: &str) -> StackableAffinityFragment {
99
let affinity_between_role_pods =
@@ -36,7 +36,7 @@ mod test {
3636
role_utils::RoleGroupRef,
3737
};
3838

39-
use crate::{constants::HISTORY_ROLE_NAME, history::SparkHistoryServer};
39+
use crate::crd::{constants::HISTORY_ROLE_NAME, history::v1alpha1};
4040

4141
#[test]
4242
pub fn test_history_affinity_defaults() {
@@ -62,7 +62,7 @@ mod test {
6262
"#;
6363

6464
let deserializer = serde_yaml::Deserializer::from_str(input);
65-
let history: SparkHistoryServer =
65+
let history: v1alpha1::SparkHistoryServer =
6666
serde_yaml::with::singleton_map_recursive::deserialize(deserializer).unwrap();
6767
let expected: StackableAffinity = StackableAffinity {
6868
node_affinity: None,

rust/crd/src/constants.rs renamed to rust/operator-binary/src/crd/constants.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@ pub const STACKABLE_TRUST_STORE_NAME: &str = "stackable-truststore";
4343
pub const STACKABLE_TLS_STORE_PASSWORD: &str = "changeit";
4444
pub const SYSTEM_TRUST_STORE_PASSWORD: &str = "changeit";
4545
pub const STACKABLE_MOUNT_PATH_TLS: &str = "/stackable/mount_server_tls";
46-
pub const STACKABLE_MOUNT_NAME_TLS: &str = "servertls";
4746

4847
pub const MIN_MEMORY_OVERHEAD: u32 = 384;
4948
pub const JVM_OVERHEAD_FACTOR: f32 = 0.1;

rust/crd/src/history.rs renamed to rust/operator-binary/src/crd/history.rs

Lines changed: 65 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,10 @@ use stackable_operator::{
2828
schemars::{self, JsonSchema},
2929
time::Duration,
3030
};
31+
use stackable_versioned::versioned;
3132
use strum::{Display, EnumIter};
3233

33-
use crate::{affinity::history_affinity, constants::*, logdir::ResolvedLogDir};
34+
use crate::crd::{affinity::history_affinity, constants::*, logdir::ResolvedLogDir};
3435

3536
#[derive(Snafu, Debug)]
3637
pub enum Error {
@@ -48,62 +49,63 @@ pub enum Error {
4849
CannotRetrieveRoleGroup { role_group: String },
4950
}
5051

51-
/// A Spark cluster history server component. This resource is managed by the Stackable operator
52-
/// for Apache Spark. Find more information on how to use it in the
53-
/// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/history-server).
54-
#[derive(Clone, CustomResource, Debug, Deserialize, JsonSchema, Serialize)]
55-
#[kube(
56-
group = "spark.stackable.tech",
57-
version = "v1alpha1",
58-
kind = "SparkHistoryServer",
59-
shortname = "shs",
60-
namespaced,
61-
crates(
62-
kube_core = "stackable_operator::kube::core",
63-
k8s_openapi = "stackable_operator::k8s_openapi",
64-
schemars = "stackable_operator::schemars"
65-
)
66-
)]
67-
#[serde(rename_all = "camelCase")]
68-
pub struct SparkHistoryServerSpec {
69-
pub image: ProductImage,
70-
71-
/// Global Spark history server configuration that applies to all roles and role groups.
72-
#[serde(default)]
73-
pub cluster_config: SparkHistoryServerClusterConfig,
74-
75-
/// Name of the Vector aggregator discovery ConfigMap.
76-
/// It must contain the key `ADDRESS` with the address of the Vector aggregator.
77-
#[serde(skip_serializing_if = "Option::is_none")]
78-
pub vector_aggregator_config_map_name: Option<String>,
79-
80-
/// The log file directory definition used by the Spark history server.
81-
pub log_file_directory: LogFileDirectorySpec,
82-
83-
/// A map of key/value strings that will be passed directly to Spark when deploying the history server.
84-
#[serde(default)]
85-
pub spark_conf: BTreeMap<String, String>,
86-
87-
/// A history server node role definition.
88-
pub nodes: Role<HistoryConfigFragment>,
89-
}
52+
#[versioned(version(name = "v1alpha1"))]
53+
pub mod versioned {
54+
/// A Spark cluster history server component. This resource is managed by the Stackable operator
55+
/// for Apache Spark. Find more information on how to use it in the
56+
/// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/history-server).
57+
#[versioned(k8s(
58+
group = "spark.stackable.tech",
59+
shortname = "sparkhist",
60+
namespaced,
61+
crates(
62+
kube_core = "stackable_operator::kube::core",
63+
k8s_openapi = "stackable_operator::k8s_openapi",
64+
schemars = "stackable_operator::schemars"
65+
)
66+
))]
67+
#[derive(Clone, CustomResource, Debug, Deserialize, JsonSchema, Serialize)]
68+
#[serde(rename_all = "camelCase")]
69+
pub struct SparkHistoryServerSpec {
70+
pub image: ProductImage,
71+
72+
/// Global Spark history server configuration that applies to all roles and role groups.
73+
#[serde(default)]
74+
pub cluster_config: v1alpha1::SparkHistoryServerClusterConfig,
75+
76+
/// Name of the Vector aggregator discovery ConfigMap.
77+
/// It must contain the key `ADDRESS` with the address of the Vector aggregator.
78+
#[serde(skip_serializing_if = "Option::is_none")]
79+
pub vector_aggregator_config_map_name: Option<String>,
80+
81+
/// The log file directory definition used by the Spark history server.
82+
pub log_file_directory: LogFileDirectorySpec,
83+
84+
/// A map of key/value strings that will be passed directly to Spark when deploying the history server.
85+
#[serde(default)]
86+
pub spark_conf: BTreeMap<String, String>,
87+
88+
/// A history server node role definition.
89+
pub nodes: Role<HistoryConfigFragment>,
90+
}
9091

91-
#[derive(Clone, Deserialize, Debug, Default, Eq, JsonSchema, PartialEq, Serialize)]
92-
#[serde(rename_all = "camelCase")]
93-
pub struct SparkHistoryServerClusterConfig {
94-
/// This field controls which type of Service the Operator creates for this HistoryServer:
95-
///
96-
/// * cluster-internal: Use a ClusterIP service
97-
///
98-
/// * external-unstable: Use a NodePort service
99-
///
100-
/// * external-stable: Use a LoadBalancer service
101-
///
102-
/// This is a temporary solution with the goal to keep yaml manifests forward compatible.
103-
/// In the future, this setting will control which ListenerClass <https://docs.stackable.tech/home/stable/listener-operator/listenerclass.html>
104-
/// will be used to expose the service, and ListenerClass names will stay the same, allowing for a non-breaking change.
105-
#[serde(default)]
106-
pub listener_class: CurrentlySupportedListenerClasses,
92+
#[derive(Clone, Deserialize, Debug, Default, Eq, JsonSchema, PartialEq, Serialize)]
93+
#[serde(rename_all = "camelCase")]
94+
pub struct SparkHistoryServerClusterConfig {
95+
/// This field controls which type of Service the Operator creates for this HistoryServer:
96+
///
97+
/// * cluster-internal: Use a ClusterIP service
98+
///
99+
/// * external-unstable: Use a NodePort service
100+
///
101+
/// * external-stable: Use a LoadBalancer service
102+
///
103+
/// This is a temporary solution with the goal to keep yaml manifests forward compatible.
104+
/// In the future, this setting will control which ListenerClass <https://docs.stackable.tech/home/stable/listener-operator/listenerclass.html>
105+
/// will be used to expose the service, and ListenerClass names will stay the same, allowing for a non-breaking change.
106+
#[serde(default)]
107+
pub listener_class: CurrentlySupportedListenerClasses,
108+
}
107109
}
108110

109111
// TODO: Temporary solution until listener-operator is finished
@@ -129,7 +131,7 @@ impl CurrentlySupportedListenerClasses {
129131
}
130132
}
131133

132-
impl SparkHistoryServer {
134+
impl v1alpha1::SparkHistoryServer {
133135
/// Returns a reference to the role. Raises an error if the role is not defined.
134136
pub fn role(&self) -> &Role<HistoryConfigFragment> {
135137
&self.spec.nodes
@@ -138,7 +140,7 @@ impl SparkHistoryServer {
138140
/// Returns a reference to the role group. Raises an error if the role or role group are not defined.
139141
pub fn rolegroup(
140142
&self,
141-
rolegroup_ref: &RoleGroupRef<SparkHistoryServer>,
143+
rolegroup_ref: &RoleGroupRef<Self>,
142144
) -> Result<RoleGroup<HistoryConfigFragment, GenericProductSpecificCommonConfig>, Error> {
143145
self.spec
144146
.nodes
@@ -152,7 +154,7 @@ impl SparkHistoryServer {
152154

153155
pub fn merged_config(
154156
&self,
155-
rolegroup_ref: &RoleGroupRef<SparkHistoryServer>,
157+
rolegroup_ref: &RoleGroupRef<Self>,
156158
) -> Result<HistoryConfig, Error> {
157159
// Initialize the result with all default values as baseline
158160
let conf_defaults = HistoryConfig::default_config(&self.name_any());
@@ -184,7 +186,7 @@ impl SparkHistoryServer {
184186
.map(i32::from)
185187
}
186188

187-
pub fn cleaner_rolegroups(&self) -> Vec<RoleGroupRef<SparkHistoryServer>> {
189+
pub fn cleaner_rolegroups(&self) -> Vec<RoleGroupRef<Self>> {
188190
let mut rgs = vec![];
189191
for (rg_name, rg_config) in &self.spec.nodes.role_groups {
190192
if let Some(true) = rg_config.config.config.cleaner {
@@ -444,7 +446,7 @@ impl HistoryConfig {
444446
}
445447

446448
impl Configuration for HistoryConfigFragment {
447-
type Configurable = SparkHistoryServer;
449+
type Configurable = v1alpha1::SparkHistoryServer;
448450

449451
fn compute_env(
450452
&self,
@@ -484,7 +486,7 @@ mod test {
484486
};
485487

486488
use super::*;
487-
use crate::logdir::S3LogDir;
489+
use crate::crd::logdir::S3LogDir;
488490

489491
#[test]
490492
pub fn test_env_overrides() {
@@ -515,7 +517,7 @@ mod test {
515517
"#};
516518

517519
let deserializer = serde_yaml::Deserializer::from_str(input);
518-
let history: SparkHistoryServer =
520+
let history: v1alpha1::SparkHistoryServer =
519521
serde_yaml::with::singleton_map_recursive::deserialize(deserializer).unwrap();
520522

521523
let log_dir = ResolvedLogDir::S3(S3LogDir {

rust/crd/src/logdir.rs renamed to rust/operator-binary/src/crd/logdir.rs

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ use stackable_operator::{
1515
};
1616
use strum::{EnumDiscriminants, IntoStaticStr};
1717

18-
use crate::{
18+
use crate::crd::{
1919
constants::*,
2020
history::{
2121
LogFileDirectorySpec::{self, S3},
@@ -133,13 +133,6 @@ impl ResolvedLogDir {
133133
}
134134
}
135135

136-
pub fn credentials(&self) -> Option<SecretClassVolume> {
137-
match self {
138-
ResolvedLogDir::S3(s3_log_dir) => s3_log_dir.credentials(),
139-
ResolvedLogDir::Custom(_) => None,
140-
}
141-
}
142-
143136
pub fn credentials_mount_path(&self) -> Option<String> {
144137
match self {
145138
ResolvedLogDir::S3(s3_log_dir) => s3_log_dir.credentials_mount_path(),

0 commit comments

Comments
 (0)