@@ -28,9 +28,10 @@ use stackable_operator::{
28
28
schemars:: { self , JsonSchema } ,
29
29
time:: Duration ,
30
30
} ;
31
+ use stackable_versioned:: versioned;
31
32
use strum:: { Display , EnumIter } ;
32
33
33
- use crate :: { affinity:: history_affinity, constants:: * , logdir:: ResolvedLogDir } ;
34
+ use crate :: crd :: { affinity:: history_affinity, constants:: * , logdir:: ResolvedLogDir } ;
34
35
35
36
#[ derive( Snafu , Debug ) ]
36
37
pub enum Error {
@@ -48,62 +49,63 @@ pub enum Error {
48
49
CannotRetrieveRoleGroup { role_group : String } ,
49
50
}
50
51
51
- /// A Spark cluster history server component. This resource is managed by the Stackable operator
52
- /// for Apache Spark. Find more information on how to use it in the
53
- /// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/ history- server).
54
- # [ derive ( Clone , CustomResource , Debug , Deserialize , JsonSchema , Serialize ) ]
55
- # [ kube (
56
- group = "spark.stackable.tech" ,
57
- version = "v1alpha1 " ,
58
- kind = "SparkHistoryServer " ,
59
- shortname = "shs" ,
60
- namespaced ,
61
- crates (
62
- kube_core = "stackable_operator::kube::core " ,
63
- k8s_openapi = "stackable_operator::k8s_openapi" ,
64
- schemars = "stackable_operator::schemars"
65
- )
66
- ) ]
67
- #[ serde( rename_all = "camelCase" ) ]
68
- pub struct SparkHistoryServerSpec {
69
- pub image : ProductImage ,
70
-
71
- /// Global Spark history server configuration that applies to all roles and role groups.
72
- #[ serde( default ) ]
73
- pub cluster_config : SparkHistoryServerClusterConfig ,
74
-
75
- /// Name of the Vector aggregator discovery ConfigMap.
76
- /// It must contain the key `ADDRESS` with the address of the Vector aggregator.
77
- #[ serde( skip_serializing_if = "Option::is_none" ) ]
78
- pub vector_aggregator_config_map_name : Option < String > ,
79
-
80
- /// The log file directory definition used by the Spark history server.
81
- pub log_file_directory : LogFileDirectorySpec ,
82
-
83
- /// A map of key/value strings that will be passed directly to Spark when deploying the history server.
84
- #[ serde( default ) ]
85
- pub spark_conf : BTreeMap < String , String > ,
86
-
87
- /// A history server node role definition.
88
- pub nodes : Role < HistoryConfigFragment > ,
89
- }
52
+ # [ versioned ( version ( name = "v1alpha1" ) ) ]
53
+ pub mod versioned {
54
+ /// A Spark cluster history server component. This resource is managed by the Stackable operator
55
+ /// for Apache Spark. Find more information on how to use it in the
56
+ /// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/history-server).
57
+ # [ versioned ( k8s (
58
+ group = "spark.stackable.tech " ,
59
+ shortname = "sparkhist " ,
60
+ namespaced ,
61
+ crates (
62
+ kube_core = "stackable_operator::kube::core" ,
63
+ k8s_openapi = "stackable_operator::k8s_openapi " ,
64
+ schemars = "stackable_operator::schemars"
65
+ )
66
+ ) ) ]
67
+ # [ derive ( Clone , CustomResource , Debug , Deserialize , JsonSchema , Serialize ) ]
68
+ #[ serde( rename_all = "camelCase" ) ]
69
+ pub struct SparkHistoryServerSpec {
70
+ pub image : ProductImage ,
71
+
72
+ /// Global Spark history server configuration that applies to all roles and role groups.
73
+ #[ serde( default ) ]
74
+ pub cluster_config : v1alpha1 :: SparkHistoryServerClusterConfig ,
75
+
76
+ /// Name of the Vector aggregator discovery ConfigMap.
77
+ /// It must contain the key `ADDRESS` with the address of the Vector aggregator.
78
+ #[ serde( skip_serializing_if = "Option::is_none" ) ]
79
+ pub vector_aggregator_config_map_name : Option < String > ,
80
+
81
+ /// The log file directory definition used by the Spark history server.
82
+ pub log_file_directory : LogFileDirectorySpec ,
83
+
84
+ /// A map of key/value strings that will be passed directly to Spark when deploying the history server.
85
+ #[ serde( default ) ]
86
+ pub spark_conf : BTreeMap < String , String > ,
87
+
88
+ /// A history server node role definition.
89
+ pub nodes : Role < HistoryConfigFragment > ,
90
+ }
90
91
91
- #[ derive( Clone , Deserialize , Debug , Default , Eq , JsonSchema , PartialEq , Serialize ) ]
92
- #[ serde( rename_all = "camelCase" ) ]
93
- pub struct SparkHistoryServerClusterConfig {
94
- /// This field controls which type of Service the Operator creates for this HistoryServer:
95
- ///
96
- /// * cluster-internal: Use a ClusterIP service
97
- ///
98
- /// * external-unstable: Use a NodePort service
99
- ///
100
- /// * external-stable: Use a LoadBalancer service
101
- ///
102
- /// This is a temporary solution with the goal to keep yaml manifests forward compatible.
103
- /// In the future, this setting will control which ListenerClass <https://docs.stackable.tech/home/stable/listener-operator/listenerclass.html>
104
- /// will be used to expose the service, and ListenerClass names will stay the same, allowing for a non-breaking change.
105
- #[ serde( default ) ]
106
- pub listener_class : CurrentlySupportedListenerClasses ,
92
+ #[ derive( Clone , Deserialize , Debug , Default , Eq , JsonSchema , PartialEq , Serialize ) ]
93
+ #[ serde( rename_all = "camelCase" ) ]
94
+ pub struct SparkHistoryServerClusterConfig {
95
+ /// This field controls which type of Service the Operator creates for this HistoryServer:
96
+ ///
97
+ /// * cluster-internal: Use a ClusterIP service
98
+ ///
99
+ /// * external-unstable: Use a NodePort service
100
+ ///
101
+ /// * external-stable: Use a LoadBalancer service
102
+ ///
103
+ /// This is a temporary solution with the goal to keep yaml manifests forward compatible.
104
+ /// In the future, this setting will control which ListenerClass <https://docs.stackable.tech/home/stable/listener-operator/listenerclass.html>
105
+ /// will be used to expose the service, and ListenerClass names will stay the same, allowing for a non-breaking change.
106
+ #[ serde( default ) ]
107
+ pub listener_class : CurrentlySupportedListenerClasses ,
108
+ }
107
109
}
108
110
109
111
// TODO: Temporary solution until listener-operator is finished
@@ -129,7 +131,7 @@ impl CurrentlySupportedListenerClasses {
129
131
}
130
132
}
131
133
132
- impl SparkHistoryServer {
134
+ impl v1alpha1 :: SparkHistoryServer {
133
135
/// Returns a reference to the role. Raises an error if the role is not defined.
134
136
pub fn role ( & self ) -> & Role < HistoryConfigFragment > {
135
137
& self . spec . nodes
@@ -138,7 +140,7 @@ impl SparkHistoryServer {
138
140
/// Returns a reference to the role group. Raises an error if the role or role group are not defined.
139
141
pub fn rolegroup (
140
142
& self ,
141
- rolegroup_ref : & RoleGroupRef < SparkHistoryServer > ,
143
+ rolegroup_ref : & RoleGroupRef < Self > ,
142
144
) -> Result < RoleGroup < HistoryConfigFragment , GenericProductSpecificCommonConfig > , Error > {
143
145
self . spec
144
146
. nodes
@@ -152,7 +154,7 @@ impl SparkHistoryServer {
152
154
153
155
pub fn merged_config (
154
156
& self ,
155
- rolegroup_ref : & RoleGroupRef < SparkHistoryServer > ,
157
+ rolegroup_ref : & RoleGroupRef < Self > ,
156
158
) -> Result < HistoryConfig , Error > {
157
159
// Initialize the result with all default values as baseline
158
160
let conf_defaults = HistoryConfig :: default_config ( & self . name_any ( ) ) ;
@@ -184,7 +186,7 @@ impl SparkHistoryServer {
184
186
. map ( i32:: from)
185
187
}
186
188
187
- pub fn cleaner_rolegroups ( & self ) -> Vec < RoleGroupRef < SparkHistoryServer > > {
189
+ pub fn cleaner_rolegroups ( & self ) -> Vec < RoleGroupRef < Self > > {
188
190
let mut rgs = vec ! [ ] ;
189
191
for ( rg_name, rg_config) in & self . spec . nodes . role_groups {
190
192
if let Some ( true ) = rg_config. config . config . cleaner {
@@ -444,7 +446,7 @@ impl HistoryConfig {
444
446
}
445
447
446
448
impl Configuration for HistoryConfigFragment {
447
- type Configurable = SparkHistoryServer ;
449
+ type Configurable = v1alpha1 :: SparkHistoryServer ;
448
450
449
451
fn compute_env (
450
452
& self ,
@@ -484,7 +486,7 @@ mod test {
484
486
} ;
485
487
486
488
use super :: * ;
487
- use crate :: logdir:: S3LogDir ;
489
+ use crate :: crd :: logdir:: S3LogDir ;
488
490
489
491
#[ test]
490
492
pub fn test_env_overrides ( ) {
@@ -515,7 +517,7 @@ mod test {
515
517
"# } ;
516
518
517
519
let deserializer = serde_yaml:: Deserializer :: from_str ( input) ;
518
- let history: SparkHistoryServer =
520
+ let history: v1alpha1 :: SparkHistoryServer =
519
521
serde_yaml:: with:: singleton_map_recursive:: deserialize ( deserializer) . unwrap ( ) ;
520
522
521
523
let log_dir = ResolvedLogDir :: S3 ( S3LogDir {
0 commit comments