diff --git a/CHANGELOG.md b/CHANGELOG.md index 78272614..6d68f6ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,9 +13,12 @@ All notable changes to this project will be documented in this file. - Revert openshift settings ([#207]) - BUGFIX: assign service account to history pods ([#207]) +- Merging and validation of the configuration refactored ([#223]) +- `operator-rs` `0.36.0` → `0.38.0` ([#223]) [#207]: https://github.com/stackabletech/spark-k8s-operator/pull/207 [#217]: https://github.com/stackabletech/spark-k8s-operator/pull/217 +[#223]: https://github.com/stackabletech/spark-k8s-operator/pull/223 ## [23.1.0] - 2023-01-23 diff --git a/Cargo.lock b/Cargo.lock index e053685c..cc916518 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1806,8 +1806,8 @@ dependencies = [ [[package]] name = "stackable-operator" -version = "0.36.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.36.0#e7796ee75dcba3904646e2b7a3ed2029b88651d6" +version = "0.38.0" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.38.0#bc443fb82390de45ecbf36b927fe7e92988ee7b2" dependencies = [ "chrono", "clap", @@ -1840,8 +1840,8 @@ dependencies = [ [[package]] name = "stackable-operator-derive" -version = "0.36.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.36.0#e7796ee75dcba3904646e2b7a3ed2029b88651d6" +version = "0.38.0" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.38.0#bc443fb82390de45ecbf36b927fe7e92988ee7b2" dependencies = [ "darling", "proc-macro2", diff --git a/deploy/helm/spark-k8s-operator/crds/crds.yaml b/deploy/helm/spark-k8s-operator/crds/crds.yaml index bdcb9e5a..f6ba5565 100644 --- a/deploy/helm/spark-k8s-operator/crds/crds.yaml +++ b/deploy/helm/spark-k8s-operator/crds/crds.yaml @@ -87,9 +87,13 @@ spec: nullable: true properties: affinity: - nullable: true + default: + podAffinity: null + podAntiAffinity: null + nodeAffinity: null + nodeSelector: null properties: - node_affinity: + nodeAffinity: description: Node affinity is a group of node affinity scheduling rules. nullable: true properties: @@ -217,10 +221,10 @@ spec: - nodeSelectorTerms type: object type: object - node_selector: + nodeSelector: nullable: true type: object - pod_affinity: + podAffinity: description: Pod affinity is a group of inter pod affinity scheduling rules. nullable: true properties: @@ -390,7 +394,7 @@ spec: type: object type: array type: object - pod_anti_affinity: + podAntiAffinity: description: Pod anti affinity is a group of inter pod anti affinity scheduling rules. nullable: true properties: @@ -562,7 +566,14 @@ spec: type: object type: object resources: - nullable: true + default: + memory: + limit: null + runtimeLimits: {} + cpu: + min: null + max: null + storage: {} properties: cpu: default: @@ -704,9 +715,13 @@ spec: nullable: true properties: affinity: - nullable: true + default: + podAffinity: null + podAntiAffinity: null + nodeAffinity: null + nodeSelector: null properties: - node_affinity: + nodeAffinity: description: Node affinity is a group of node affinity scheduling rules. nullable: true properties: @@ -834,10 +849,10 @@ spec: - nodeSelectorTerms type: object type: object - node_selector: + nodeSelector: nullable: true type: object - pod_affinity: + podAffinity: description: Pod affinity is a group of inter pod affinity scheduling rules. nullable: true properties: @@ -1007,7 +1022,7 @@ spec: type: object type: array type: object - pod_anti_affinity: + podAntiAffinity: description: Pod anti affinity is a group of inter pod anti affinity scheduling rules. nullable: true properties: @@ -1189,7 +1204,14 @@ spec: nullable: true type: object resources: - nullable: true + default: + memory: + limit: null + runtimeLimits: {} + cpu: + min: null + max: null + storage: {} properties: cpu: default: @@ -1253,7 +1275,14 @@ spec: nullable: true properties: resources: - nullable: true + default: + memory: + limit: null + runtimeLimits: {} + cpu: + min: null + max: null + storage: {} properties: cpu: default: diff --git a/rust/crd/Cargo.toml b/rust/crd/Cargo.toml index e1724147..9d3bb7cc 100644 --- a/rust/crd/Cargo.toml +++ b/rust/crd/Cargo.toml @@ -9,7 +9,7 @@ version = "0.0.0-dev" publish = false [dependencies] -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag="0.36.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag="0.38.0" } semver = "1.0" serde = "1.0" diff --git a/rust/crd/src/history.rs b/rust/crd/src/history.rs index 48309495..955ef4c5 100644 --- a/rust/crd/src/history.rs +++ b/rust/crd/src/history.rs @@ -1,33 +1,33 @@ -use crate::affinity::history_affinity; -use crate::constants::*; -use stackable_operator::commons::affinity::StackableAffinity; -use stackable_operator::commons::product_image_selection::{ProductImage, ResolvedProductImage}; -use stackable_operator::commons::resources::{ - CpuLimitsFragment, MemoryLimitsFragment, NoRuntimeLimitsFragment, -}; -use stackable_operator::commons::s3::S3BucketDef; -use stackable_operator::config::fragment::ValidationError; -use stackable_operator::k8s_openapi::apimachinery::pkg::api::resource::Quantity; -use stackable_operator::kube::runtime::reflector::ObjectRef; -use stackable_operator::kube::ResourceExt; -use stackable_operator::product_config::types::PropertyNameKind; -use stackable_operator::product_config::ProductConfigManager; -use stackable_operator::product_config_utils::{ - transform_all_roles_to_config, validate_all_roles_and_groups_config, Configuration, - ValidatedRoleConfigByPropertyKind, -}; -use stackable_operator::role_utils::{Role, RoleGroupRef}; +use crate::{affinity::history_affinity, constants::*}; use std::collections::{BTreeMap, HashMap}; use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; use stackable_operator::{ - commons::resources::{NoRuntimeLimits, Resources, ResourcesFragment}, - config::{fragment, fragment::Fragment, merge::Merge}, -}; -use stackable_operator::{ + commons::{ + affinity::StackableAffinity, + product_image_selection::{ProductImage, ResolvedProductImage}, + resources::{ + CpuLimitsFragment, MemoryLimitsFragment, NoRuntimeLimits, NoRuntimeLimitsFragment, + Resources, ResourcesFragment, + }, + s3::S3BucketDef, + }, + config::{ + fragment, + fragment::{Fragment, ValidationError}, + merge::Merge, + }, + k8s_openapi::apimachinery::pkg::api::resource::Quantity, kube::CustomResource, + kube::{runtime::reflector::ObjectRef, ResourceExt}, + product_config::{types::PropertyNameKind, ProductConfigManager}, + product_config_utils::{ + transform_all_roles_to_config, validate_all_roles_and_groups_config, Configuration, + ValidatedRoleConfigByPropertyKind, + }, + role_utils::{Role, RoleGroupRef}, schemars::{self, JsonSchema}, }; use strum::Display; @@ -91,7 +91,7 @@ impl SparkHistoryServer { conf_role.merge(&conf_defaults); conf_rolegroup.merge(&conf_role); - fragment::validate(conf_defaults).context(FragmentValidationFailureSnafu) + fragment::validate(conf_rolegroup).context(FragmentValidationFailureSnafu) } pub fn replicas(&self, rolegroup_ref: &RoleGroupRef) -> Option { diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 0e877cc8..693332cb 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -5,34 +5,41 @@ pub mod constants; pub mod history; pub mod s3logdir; +use std::{ + cmp::max, + collections::{BTreeMap, HashMap}, + slice, +}; + use constants::*; use history::LogFileDirectorySpec; use s3logdir::S3LogDir; -use stackable_operator::builder::VolumeBuilder; -use stackable_operator::commons::affinity::StackableAffinity; -use stackable_operator::commons::s3::{S3AccessStyle, S3ConnectionDef, S3ConnectionSpec}; -use stackable_operator::k8s_openapi::api::core::v1::{ - EmptyDirVolumeSource, EnvVar, LocalObjectReference, Volume, VolumeMount, -}; -use stackable_operator::memory::{BinaryMultiple, MemoryQuantity}; -use std::cmp::max; - -use std::collections::{BTreeMap, HashMap}; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt, Snafu}; -use stackable_operator::kube::ResourceExt; -use stackable_operator::labels::ObjectLabels; use stackable_operator::{ - commons::resources::{ - CpuLimits, CpuLimitsFragment, MemoryLimits, MemoryLimitsFragment, NoRuntimeLimits, - NoRuntimeLimitsFragment, Resources, ResourcesFragment, + builder::VolumeBuilder, + commons::{ + affinity::{StackableAffinity, StackableAffinityFragment}, + resources::{ + CpuLimits, CpuLimitsFragment, MemoryLimits, MemoryLimitsFragment, NoRuntimeLimits, + NoRuntimeLimitsFragment, Resources, ResourcesFragment, + }, + s3::{S3AccessStyle, S3ConnectionDef, S3ConnectionSpec}, }, - config::{fragment, fragment::Fragment, fragment::ValidationError, merge::Merge}, -}; -use stackable_operator::{ - k8s_openapi::apimachinery::pkg::api::resource::Quantity, - kube::CustomResource, + config::{ + fragment, + fragment::Fragment, + fragment::ValidationError, + merge::{Atomic, Merge}, + }, + k8s_openapi::{ + api::core::v1::{EmptyDirVolumeSource, EnvVar, LocalObjectReference, Volume, VolumeMount}, + apimachinery::pkg::api::resource::Quantity, + }, + kube::{CustomResource, ResourceExt}, + labels::ObjectLabels, + memory::{BinaryMultiple, MemoryQuantity}, role_utils::CommonConfiguration, schemars::{self, JsonSchema}, }; @@ -44,12 +51,8 @@ pub enum Error { NoNamespace, #[snafu(display("object defines no deploy mode"))] ObjectHasNoDeployMode, - #[snafu(display("object defines no main class"))] - ObjectHasNoMainClass, #[snafu(display("object defines no application artifact"))] ObjectHasNoArtifact, - #[snafu(display("object defines no pod image"))] - ObjectHasNoImage, #[snafu(display("object has no name"))] ObjectHasNoName, #[snafu(display("application has no Spark image"))] @@ -97,24 +100,39 @@ pub struct SparkApplicationStatus { )] pub struct SparkStorageConfig {} -#[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] -#[serde(rename_all = "camelCase")] +#[derive(Clone, Debug, Default, Fragment, JsonSchema, PartialEq)] +#[fragment_attrs( + derive( + Clone, + Debug, + Default, + Deserialize, + Merge, + JsonSchema, + PartialEq, + Serialize + ), + serde(rename_all = "camelCase") +)] pub struct SparkConfig { - pub resources: Option>, + #[fragment_attrs(serde(default))] + pub resources: Resources, } impl SparkConfig { - fn default_resources() -> ResourcesFragment { - ResourcesFragment { - cpu: CpuLimitsFragment { - min: Some(Quantity("500m".to_owned())), - max: Some(Quantity("1".to_owned())), - }, - memory: MemoryLimitsFragment { - limit: Some(Quantity("1Gi".to_owned())), - runtime_limits: NoRuntimeLimitsFragment {}, + fn default_config() -> SparkConfigFragment { + SparkConfigFragment { + resources: ResourcesFragment { + cpu: CpuLimitsFragment { + min: Some(Quantity("500m".to_owned())), + max: Some(Quantity("1".to_owned())), + }, + memory: MemoryLimitsFragment { + limit: Some(Quantity("1Gi".to_owned())), + runtime_limits: NoRuntimeLimitsFragment {}, + }, + storage: SparkStorageConfigFragment {}, }, - storage: SparkStorageConfigFragment {}, } } } @@ -152,11 +170,11 @@ pub struct SparkApplicationSpec { #[serde(default, skip_serializing_if = "Option::is_none")] pub spark_image_pull_secrets: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] - pub job: Option, + pub job: Option, #[serde(default, skip_serializing_if = "Option::is_none")] - pub driver: Option, + pub driver: Option, #[serde(default, skip_serializing_if = "Option::is_none")] - pub executor: Option, + pub executor: Option, #[serde(flatten)] pub config: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -287,40 +305,37 @@ impl SparkApplication { result } - pub fn executor_volume_mounts( + pub fn spark_job_volume_mounts( &self, s3conn: &Option, s3logdir: &Option, ) -> Vec { - let result: Vec = self - .spec - .executor - .as_ref() - .and_then(|executor_conf| executor_conf.volume_mounts.clone()) - .iter() - .flat_map(|v| v.iter()) - .cloned() - .collect(); + let volume_mounts = vec![VolumeMount { + name: VOLUME_MOUNT_NAME_POD_TEMPLATES.into(), + mount_path: VOLUME_MOUNT_PATH_POD_TEMPLATES.into(), + ..VolumeMount::default() + }]; + self.add_common_volume_mounts(volume_mounts, s3conn, s3logdir) + } - self.add_common_volume_mounts(result, s3conn, s3logdir) + pub fn executor_volume_mounts( + &self, + config: &ExecutorConfig, + s3conn: &Option, + s3logdir: &Option, + ) -> Vec { + let volume_mounts = config.volume_mounts.clone().unwrap_or_default().into(); + self.add_common_volume_mounts(volume_mounts, s3conn, s3logdir) } pub fn driver_volume_mounts( &self, + config: &DriverConfig, s3conn: &Option, s3logdir: &Option, ) -> Vec { - let result: Vec = self - .spec - .driver - .as_ref() - .and_then(|driver_conf| driver_conf.volume_mounts.clone()) - .iter() - .flat_map(|v| v.iter()) - .cloned() - .collect(); - - self.add_common_volume_mounts(result, s3conn, s3logdir) + let volume_mounts = config.volume_mounts.clone().unwrap_or_default().into(); + self.add_common_volume_mounts(volume_mounts, s3conn, s3logdir) } fn add_common_volume_mounts( @@ -451,11 +466,14 @@ impl SparkApplication { // then added to the vector once complete. let mut submit_conf: BTreeMap = BTreeMap::new(); + let driver_config = self.driver_config()?; + let executor_config = self.executor_config()?; + // resource limits, either declared or taken from defaults if let Resources { cpu: CpuLimits { max: Some(max), .. }, .. - } = &self.driver_resources()? + } = &driver_config.resources { submit_conf.insert( "spark.kubernetes.driver.limit.cores".to_string(), @@ -469,7 +487,7 @@ impl SparkApplication { if let Resources { cpu: CpuLimits { min: Some(min), .. }, .. - } = &self.driver_resources()? + } = &driver_config.resources { submit_conf.insert( "spark.kubernetes.driver.request.cores".to_string(), @@ -481,7 +499,7 @@ impl SparkApplication { limit: Some(limit), .. }, .. - } = &self.driver_resources()? + } = &driver_config.resources { let memory = self .subtract_spark_memory_overhead(limit) @@ -492,7 +510,7 @@ impl SparkApplication { if let Resources { cpu: CpuLimits { max: Some(max), .. }, .. - } = &self.executor_resources()? + } = &executor_config.resources { submit_conf.insert( "spark.kubernetes.executor.limit.cores".to_string(), @@ -506,7 +524,7 @@ impl SparkApplication { if let Resources { cpu: CpuLimits { min: Some(min), .. }, .. - } = &self.executor_resources()? + } = &executor_config.resources { submit_conf.insert( "spark.kubernetes.executor.request.cores".to_string(), @@ -518,7 +536,7 @@ impl SparkApplication { limit: Some(limit), .. }, .. - } = &self.executor_resources()? + } = &executor_config.resources { let memory = self .subtract_spark_memory_overhead(limit) @@ -617,55 +635,33 @@ impl SparkApplication { e } - pub fn affinity(&self, role: SparkApplicationRole) -> Option { + pub fn affinity(&self, role: SparkApplicationRole) -> Result { match role { SparkApplicationRole::Driver => self - .spec - .driver - .as_ref() - .and_then(|driver_config| driver_config.affinity.clone()), + .driver_config() + .map(|driver_config| driver_config.affinity), SparkApplicationRole::Executor => self - .spec - .executor - .as_ref() - .and_then(|executor_config| executor_config.affinity.clone()), + .executor_config() + .map(|executor_config| executor_config.affinity), } } - pub fn job_resources(&self) -> Result, Error> { - let conf = SparkConfig::default_resources(); - - let mut resources = self - .spec - .job - .clone() - .and_then(|spark_config| spark_config.resources) - .unwrap_or_default(); - - resources.merge(&conf); - fragment::validate(resources).context(FragmentValidationFailureSnafu) + pub fn job_config(&self) -> Result { + let mut config = self.spec.job.clone().unwrap_or_default(); + config.merge(&SparkConfig::default_config()); + fragment::validate(config).context(FragmentValidationFailureSnafu) } - pub fn driver_resources( - &self, - ) -> Result, Error> { - let resources = if let Some(driver_config) = self.spec.driver.clone() { - driver_config.spark_config() - } else { - DriverConfig::default_resources() - }; - fragment::validate(resources).context(FragmentValidationFailureSnafu) + pub fn driver_config(&self) -> Result { + let mut config = self.spec.driver.clone().unwrap_or_default(); + config.merge(&DriverConfig::default_config()); + fragment::validate(config).context(FragmentValidationFailureSnafu) } - pub fn executor_resources( - &self, - ) -> Result, Error> { - let resources = if let Some(executor_config) = self.spec.executor.clone() { - executor_config.spark_config() - } else { - ExecutorConfig::default_resources() - }; - fragment::validate(resources).context(FragmentValidationFailureSnafu) + pub fn executor_config(&self) -> Result { + let mut config = self.spec.executor.clone().unwrap_or_default(); + config.merge(&ExecutorConfig::default_config()); + fragment::validate(config).context(FragmentValidationFailureSnafu) } } @@ -691,6 +687,37 @@ fn cores_from_quantity(q: String) -> Result { Ok((cores as u32).to_string()) } +#[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct VolumeMounts { + pub volume_mounts: Option>, +} + +impl Atomic for VolumeMounts {} + +impl<'a> IntoIterator for &'a VolumeMounts { + type Item = &'a VolumeMount; + type IntoIter = slice::Iter<'a, VolumeMount>; + + fn into_iter(self) -> Self::IntoIter { + self.volume_mounts.as_deref().unwrap_or_default().iter() + } +} + +impl From for Vec { + fn from(value: VolumeMounts) -> Self { + value.volume_mounts.unwrap_or_default() + } +} + +#[derive(Clone, Debug, Default, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct NodeSelector { + pub node_selector: Option>, +} + +impl Atomic for NodeSelector {} + #[derive(Clone, Debug, Default, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct CommonConfig { @@ -700,79 +727,96 @@ pub struct CommonConfig { pub enable_monitoring: Option, } -#[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] -#[serde(rename_all = "camelCase")] +#[derive(Clone, Debug, Default, Fragment, JsonSchema, PartialEq)] +#[fragment_attrs( + derive( + Clone, + Debug, + Default, + Deserialize, + Merge, + JsonSchema, + PartialEq, + Serialize + ), + serde(rename_all = "camelCase") +)] pub struct DriverConfig { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub resources: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub volume_mounts: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub affinity: Option, + #[fragment_attrs(serde(default))] + pub resources: Resources, + #[fragment_attrs(serde(default, flatten))] + pub volume_mounts: Option, + #[fragment_attrs(serde(default))] + pub affinity: StackableAffinity, } impl DriverConfig { - fn default_resources() -> ResourcesFragment { - ResourcesFragment { - cpu: CpuLimitsFragment { - min: Some(Quantity("1".to_owned())), - max: Some(Quantity("2".to_owned())), - }, - memory: MemoryLimitsFragment { - limit: Some(Quantity("2Gi".to_owned())), - runtime_limits: NoRuntimeLimitsFragment {}, + fn default_config() -> DriverConfigFragment { + DriverConfigFragment { + resources: ResourcesFragment { + cpu: CpuLimitsFragment { + min: Some(Quantity("1".to_owned())), + max: Some(Quantity("2".to_owned())), + }, + memory: MemoryLimitsFragment { + limit: Some(Quantity("2Gi".to_owned())), + runtime_limits: NoRuntimeLimitsFragment {}, + }, + storage: SparkStorageConfigFragment {}, }, - storage: SparkStorageConfigFragment {}, + volume_mounts: Some(VolumeMounts::default()), + affinity: StackableAffinityFragment::default(), } } - - fn spark_config(&self) -> ResourcesFragment { - let default_resources = DriverConfig::default_resources(); - - let mut resources = self.resources.clone().unwrap_or_default(); - - resources.merge(&default_resources); - resources - } } -#[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] -#[serde(rename_all = "camelCase")] +#[derive(Clone, Debug, Default, Fragment, JsonSchema, PartialEq)] +#[fragment_attrs( + derive( + Clone, + Debug, + Default, + Deserialize, + Merge, + JsonSchema, + PartialEq, + Serialize + ), + serde(rename_all = "camelCase") +)] pub struct ExecutorConfig { + #[fragment_attrs(serde(default))] pub instances: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub resources: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub volume_mounts: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub node_selector: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub affinity: Option, + #[fragment_attrs(serde(default))] + pub resources: Resources, + #[fragment_attrs(serde(default, flatten))] + pub volume_mounts: Option, + #[fragment_attrs(serde(default, flatten))] + pub node_selector: Option, + #[fragment_attrs(serde(default))] + pub affinity: StackableAffinity, } impl ExecutorConfig { - fn default_resources() -> ResourcesFragment { - ResourcesFragment { - cpu: CpuLimitsFragment { - min: Some(Quantity("1".to_owned())), - max: Some(Quantity("4".to_owned())), + fn default_config() -> ExecutorConfigFragment { + ExecutorConfigFragment { + instances: None, + resources: ResourcesFragment { + cpu: CpuLimitsFragment { + min: Some(Quantity("1".to_owned())), + max: Some(Quantity("4".to_owned())), + }, + memory: MemoryLimitsFragment { + limit: Some(Quantity("4Gi".to_owned())), + runtime_limits: NoRuntimeLimitsFragment {}, + }, + storage: SparkStorageConfigFragment {}, }, - memory: MemoryLimitsFragment { - limit: Some(Quantity("4Gi".to_owned())), - runtime_limits: NoRuntimeLimitsFragment {}, - }, - storage: SparkStorageConfigFragment {}, + volume_mounts: Default::default(), + node_selector: Default::default(), + affinity: Default::default(), } } - - fn spark_config(&self) -> ResourcesFragment { - let default_resources = ExecutorConfig::default_resources(); - - let mut resources = self.resources.clone().unwrap_or_default(); - - resources.merge(&default_resources); - resources - } } #[cfg(test)] @@ -1077,63 +1121,17 @@ spec: ) .unwrap(); - let job_resources = &spark_application.job_resources(); - assert_eq!( - "500m", - job_resources.as_ref().unwrap().clone().cpu.min.unwrap().0 - ); - assert_eq!( - "1", - job_resources.as_ref().unwrap().clone().cpu.max.unwrap().0 - ); + let job_resources = &spark_application.job_config().unwrap().resources; + assert_eq!("500m", job_resources.cpu.min.as_ref().unwrap().0); + assert_eq!("1", job_resources.cpu.max.as_ref().unwrap().0); - let driver_resources = &spark_application.driver_resources(); - assert_eq!( - "1", - driver_resources - .as_ref() - .unwrap() - .clone() - .cpu - .min - .unwrap() - .0 - ); - assert_eq!( - "2", - driver_resources - .as_ref() - .unwrap() - .clone() - .cpu - .max - .unwrap() - .0 - ); + let driver_resources = &spark_application.driver_config().unwrap().resources; + assert_eq!("1", driver_resources.cpu.min.as_ref().unwrap().0); + assert_eq!("2", driver_resources.cpu.max.as_ref().unwrap().0); - let executor_resources = &spark_application.executor_resources(); - assert_eq!( - "1", - executor_resources - .as_ref() - .unwrap() - .clone() - .cpu - .min - .unwrap() - .0 - ); - assert_eq!( - "4", - executor_resources - .as_ref() - .unwrap() - .clone() - .cpu - .max - .unwrap() - .0 - ); + let executor_resources = &spark_application.executor_config().unwrap().resources; + assert_eq!("1", executor_resources.cpu.min.as_ref().unwrap().0); + assert_eq!("4", executor_resources.cpu.max.as_ref().unwrap().0); } #[test] @@ -1177,8 +1175,9 @@ spec: assert_eq!( "1300m", &spark_application - .driver_resources() + .driver_config() .unwrap() + .resources .cpu .max .unwrap() @@ -1187,8 +1186,9 @@ spec: assert_eq!( "500m", &spark_application - .executor_resources() + .executor_config() .unwrap() + .resources .cpu .min .unwrap() diff --git a/rust/operator-binary/Cargo.toml b/rust/operator-binary/Cargo.toml index 361afe6a..b4cf76d2 100644 --- a/rust/operator-binary/Cargo.toml +++ b/rust/operator-binary/Cargo.toml @@ -9,10 +9,10 @@ version = "0.0.0-dev" publish = false [dependencies] -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag="0.36.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag="0.38.0" } stackable-spark-k8s-crd = { path = "../crd" } anyhow = "1.0" -clap = "4.0" +clap = "4.1" futures = "0.3" serde = "1.0" serde_yaml = "0.8" @@ -24,5 +24,5 @@ tracing-futures = { version = "0.2", features = ["futures-03"] } [build-dependencies] built = { version = "0.5", features = ["chrono", "git2"] } -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag="0.36.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag="0.38.0" } stackable-spark-k8s-crd = { path = "../crd" } diff --git a/rust/operator-binary/src/spark_k8s_controller.rs b/rust/operator-binary/src/spark_k8s_controller.rs index 6a3a608c..0837e409 100644 --- a/rust/operator-binary/src/spark_k8s_controller.rs +++ b/rust/operator-binary/src/spark_k8s_controller.rs @@ -1,27 +1,33 @@ -use snafu::{OptionExt, ResultExt, Snafu}; -use stackable_operator::builder::{ - ConfigMapBuilder, ContainerBuilder, ObjectMetaBuilder, PodBuilder, +use std::{sync::Arc, time::Duration}; + +use stackable_spark_k8s_crd::{ + constants::*, s3logdir::S3LogDir, SparkApplication, SparkApplicationRole, }; -use stackable_operator::commons::affinity::StackableAffinity; -use stackable_operator::commons::s3::S3ConnectionSpec; -use stackable_operator::commons::tls::{CaCert, TlsVerification}; -use stackable_operator::k8s_openapi::api::batch::v1::{Job, JobSpec}; -use stackable_operator::k8s_openapi::api::core::v1::{ - ConfigMap, ConfigMapVolumeSource, Container, EnvVar, Pod, PodSecurityContext, PodSpec, - PodTemplateSpec, ServiceAccount, Volume, VolumeMount, +use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::{ + builder::{ConfigMapBuilder, ContainerBuilder, ObjectMetaBuilder, PodBuilder}, + commons::{ + affinity::StackableAffinity, + s3::S3ConnectionSpec, + tls::{CaCert, TlsVerification}, + }, + k8s_openapi::{ + api::{ + batch::v1::{Job, JobSpec}, + core::v1::{ + ConfigMap, ConfigMapVolumeSource, Container, EnvVar, Pod, PodSecurityContext, + PodSpec, PodTemplateSpec, ServiceAccount, Volume, VolumeMount, + }, + rbac::v1::{ClusterRole, RoleBinding, RoleRef, Subject}, + }, + Resource, + }, + kube::runtime::controller::Action, + logging::controller::ReconcilerError, }; -use stackable_operator::k8s_openapi::api::rbac::v1::{ClusterRole, RoleBinding, RoleRef, Subject}; -use stackable_operator::k8s_openapi::Resource; -use stackable_operator::kube::runtime::controller::Action; -use stackable_operator::logging::controller::ReconcilerError; -use stackable_spark_k8s_crd::SparkApplication; -use stackable_spark_k8s_crd::{constants::*, SparkApplicationRole}; -use std::{sync::Arc, time::Duration}; use strum::{EnumDiscriminants, IntoStaticStr}; -use stackable_spark_k8s_crd::s3logdir::S3LogDir; - pub struct Ctx { pub client: stackable_operator::client::Client, } @@ -46,10 +52,6 @@ pub enum Error { ApplyApplication { source: stackable_operator::error::Error, }, - #[snafu(display("failed to update status"))] - ApplyStatus { - source: stackable_operator::error::Error, - }, #[snafu(display("failed to build stark-submit command"))] BuildCommand { source: stackable_spark_k8s_crd::Error, @@ -58,14 +60,8 @@ pub enum Error { PodTemplateConfigMap { source: stackable_operator::error::Error, }, - #[snafu(display("no job image specified"))] - ObjectHasNoImage, #[snafu(display("no spark base image specified"))] ObjectHasNoSparkImage, - #[snafu(display("invalid pod template"))] - PodTemplate { - source: stackable_operator::error::Error, - }, #[snafu(display("driver pod template serialization"))] DriverPodTemplateSerde { source: serde_yaml::Error }, #[snafu(display("executor pod template serialization"))] @@ -78,8 +74,8 @@ pub enum Error { S3TlsNoVerificationNotSupported, #[snafu(display("ca-cert verification not supported"))] S3TlsCaVerificationNotSupported, - #[snafu(display("failed to resolve and merge resource config"))] - FailedToResolveResourceConfig { + #[snafu(display("failed to resolve and merge config"))] + FailedToResolveConfig { source: stackable_spark_k8s_crd::Error, }, #[snafu(display("failed to recognise the container name"))] @@ -256,12 +252,18 @@ fn pod_template( // N.B. this may be ignored by spark as preference is given to spark // configuration settings. let resources = match container_name { - CONTAINER_NAME_DRIVER => spark_application - .driver_resources() - .context(FailedToResolveResourceConfigSnafu)?, - CONTAINER_NAME_EXECUTOR => spark_application - .executor_resources() - .context(FailedToResolveResourceConfigSnafu)?, + CONTAINER_NAME_DRIVER => { + spark_application + .driver_config() + .context(FailedToResolveConfigSnafu)? + .resources + } + CONTAINER_NAME_EXECUTOR => { + spark_application + .executor_config() + .context(FailedToResolveConfigSnafu)? + .resources + } _ => return UnrecognisedContainerNameSnafu.fail(), }; @@ -320,27 +322,37 @@ fn pod_template_config_map( ) -> Result { let volumes = spark_application.volumes(s3conn, s3logdir); + let driver_config = spark_application + .driver_config() + .context(FailedToResolveConfigSnafu)?; let driver_template = pod_template( spark_application, CONTAINER_NAME_DRIVER, init_containers, volumes.as_ref(), spark_application - .driver_volume_mounts(s3conn, s3logdir) + .driver_volume_mounts(&driver_config, s3conn, s3logdir) .as_ref(), env, - spark_application.affinity(SparkApplicationRole::Driver), + spark_application + .affinity(SparkApplicationRole::Driver) + .ok(), )?; + let executor_config = spark_application + .executor_config() + .context(FailedToResolveConfigSnafu)?; let executor_template = pod_template( spark_application, CONTAINER_NAME_EXECUTOR, init_containers, volumes.as_ref(), spark_application - .executor_volume_mounts(s3conn, s3logdir) + .executor_volume_mounts(&executor_config, s3conn, s3logdir) .as_ref(), env, - spark_application.affinity(SparkApplicationRole::Executor), + spark_application + .affinity(SparkApplicationRole::Executor) + .ok(), )?; ConfigMapBuilder::new() @@ -378,26 +390,19 @@ fn spark_job( s3conn: &Option, s3logdir: &Option, ) -> Result { - let mut volume_mounts = vec![VolumeMount { - name: VOLUME_MOUNT_NAME_POD_TEMPLATES.into(), - mount_path: VOLUME_MOUNT_PATH_POD_TEMPLATES.into(), - ..VolumeMount::default() - }]; - volume_mounts.extend(spark_application.driver_volume_mounts(s3conn, s3logdir)); - let mut cb = ContainerBuilder::new("spark-submit").with_context(|_| IllegalContainerNameSnafu { container_name: APP_NAME.to_string(), })?; - let resources = spark_application - .job_resources() - .context(FailedToResolveResourceConfigSnafu)?; + let job_config = spark_application + .job_config() + .context(FailedToResolveConfigSnafu)?; cb.image(spark_image) .command(vec!["/bin/sh".to_string()]) - .resources(resources.into()) + .resources(job_config.resources.into()) .args(vec!["-c".to_string(), job_commands.join(" ")]) - .add_volume_mounts(volume_mounts) + .add_volume_mounts(spark_application.spark_job_volume_mounts(s3conn, s3logdir)) .add_env_vars(env.to_vec()) // TODO: move this to the image .add_env_vars(vec![EnvVar { diff --git a/tests/templates/kuttl/pyspark-ny-public-s3-image/00-assert.yaml b/tests/templates/kuttl/pyspark-ny-public-s3-image/00-assert.yaml index 863f6070..5baf8caa 100644 --- a/tests/templates/kuttl/pyspark-ny-public-s3-image/00-assert.yaml +++ b/tests/templates/kuttl/pyspark-ny-public-s3-image/00-assert.yaml @@ -3,22 +3,6 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert timeout: 900 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: test-minio -status: - readyReplicas: 1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: minio-client - labels: - app: minio-client -status: - phase: Running ---- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/tests/templates/kuttl/pyspark-ny-public-s3-image/02-assert.yaml b/tests/templates/kuttl/pyspark-ny-public-s3-image/02-assert.yaml new file mode 100644 index 00000000..fbbea3bd --- /dev/null +++ b/tests/templates/kuttl/pyspark-ny-public-s3-image/02-assert.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 900 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-minio +status: + readyReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: minio-client + labels: + app: minio-client +status: + phase: Running diff --git a/tests/templates/kuttl/pyspark-ny-public-s3-image/00-setup-minio.yaml b/tests/templates/kuttl/pyspark-ny-public-s3-image/02-setup-minio.yaml similarity index 100% rename from tests/templates/kuttl/pyspark-ny-public-s3-image/00-setup-minio.yaml rename to tests/templates/kuttl/pyspark-ny-public-s3-image/02-setup-minio.yaml diff --git a/tests/templates/kuttl/pyspark-ny-public-s3-image/01-prepare-bucket.yaml.j2 b/tests/templates/kuttl/pyspark-ny-public-s3-image/03-prepare-bucket.yaml.j2 similarity index 96% rename from tests/templates/kuttl/pyspark-ny-public-s3-image/01-prepare-bucket.yaml.j2 rename to tests/templates/kuttl/pyspark-ny-public-s3-image/03-prepare-bucket.yaml.j2 index 8a90c2c5..4a3b1aae 100644 --- a/tests/templates/kuttl/pyspark-ny-public-s3-image/01-prepare-bucket.yaml.j2 +++ b/tests/templates/kuttl/pyspark-ny-public-s3-image/03-prepare-bucket.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: # give minio enough time to start - - command: sleep 5 + - command: sleep 10 - command: kubectl cp -n $NAMESPACE yellow_tripdata_2021-07.csv minio-client:/tmp - command: kubectl exec -n $NAMESPACE minio-client -- sh -c 'mc alias set test-minio http://test-minio:9000 $$MINIO_SERVER_ACCESS_KEY $$MINIO_SERVER_SECRET_KEY' - command: kubectl exec -n $NAMESPACE minio-client -- mc mb test-minio/my-bucket diff --git a/tests/templates/kuttl/pyspark-ny-public-s3/00-assert.yaml b/tests/templates/kuttl/pyspark-ny-public-s3/00-assert.yaml index 863f6070..5baf8caa 100644 --- a/tests/templates/kuttl/pyspark-ny-public-s3/00-assert.yaml +++ b/tests/templates/kuttl/pyspark-ny-public-s3/00-assert.yaml @@ -3,22 +3,6 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert timeout: 900 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: test-minio -status: - readyReplicas: 1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: minio-client - labels: - app: minio-client -status: - phase: Running ---- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/tests/templates/kuttl/pyspark-ny-public-s3/02-assert.yaml b/tests/templates/kuttl/pyspark-ny-public-s3/02-assert.yaml new file mode 100644 index 00000000..fbbea3bd --- /dev/null +++ b/tests/templates/kuttl/pyspark-ny-public-s3/02-assert.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 900 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-minio +status: + readyReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: minio-client + labels: + app: minio-client +status: + phase: Running diff --git a/tests/templates/kuttl/pyspark-ny-public-s3/00-setup-minio.yaml b/tests/templates/kuttl/pyspark-ny-public-s3/02-setup-minio.yaml similarity index 100% rename from tests/templates/kuttl/pyspark-ny-public-s3/00-setup-minio.yaml rename to tests/templates/kuttl/pyspark-ny-public-s3/02-setup-minio.yaml diff --git a/tests/templates/kuttl/pyspark-ny-public-s3/01-prepare-bucket.yaml.j2 b/tests/templates/kuttl/pyspark-ny-public-s3/03-prepare-bucket.yaml.j2 similarity index 97% rename from tests/templates/kuttl/pyspark-ny-public-s3/01-prepare-bucket.yaml.j2 rename to tests/templates/kuttl/pyspark-ny-public-s3/03-prepare-bucket.yaml.j2 index 0750d450..9fc99079 100644 --- a/tests/templates/kuttl/pyspark-ny-public-s3/01-prepare-bucket.yaml.j2 +++ b/tests/templates/kuttl/pyspark-ny-public-s3/03-prepare-bucket.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: # give minio enough time to start - - command: sleep 5 + - command: sleep 10 - command: kubectl cp -n $NAMESPACE ny_tlc_report.py minio-client:/tmp - command: kubectl cp -n $NAMESPACE yellow_tripdata_2021-07.csv minio-client:/tmp - command: kubectl exec -n $NAMESPACE minio-client -- sh -c 'mc alias set test-minio http://test-minio:9000 $$MINIO_SERVER_ACCESS_KEY $$MINIO_SERVER_SECRET_KEY' diff --git a/tests/templates/kuttl/spark-history-server/00-assert.yaml b/tests/templates/kuttl/spark-history-server/00-assert.yaml index 32a197af..5baf8caa 100644 --- a/tests/templates/kuttl/spark-history-server/00-assert.yaml +++ b/tests/templates/kuttl/spark-history-server/00-assert.yaml @@ -3,38 +3,6 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert timeout: 900 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: test-minio -status: - readyReplicas: 1 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: eventlog-minio -status: - readyReplicas: 1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: minio-client - labels: - app: minio-client -status: - phase: Running ---- -apiVersion: v1 -kind: Pod -metadata: - name: eventlog-minio-client - labels: - app: eventlog-minio-client -status: - phase: Running ---- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/tests/templates/kuttl/spark-history-server/00-s3-secret.yaml b/tests/templates/kuttl/spark-history-server/02-s3-secret.yaml similarity index 89% rename from tests/templates/kuttl/spark-history-server/00-s3-secret.yaml rename to tests/templates/kuttl/spark-history-server/02-s3-secret.yaml index 2e3675f7..aa1eb9d7 100644 --- a/tests/templates/kuttl/spark-history-server/00-s3-secret.yaml +++ b/tests/templates/kuttl/spark-history-server/02-s3-secret.yaml @@ -4,7 +4,7 @@ kind: Secret metadata: name: minio-credentials labels: - secrets.stackable.tech/class: s3-credentials-class + secrets.stackable.tech/class: spark-history-server-s3-credentials-class stringData: accessKey: minioAccessKey secretKey: minioSecretKey @@ -16,7 +16,7 @@ stringData: apiVersion: secrets.stackable.tech/v1alpha1 kind: SecretClass metadata: - name: s3-credentials-class + name: spark-history-server-s3-credentials-class spec: backend: k8sSearch: diff --git a/tests/templates/kuttl/spark-history-server/03-assert.yaml b/tests/templates/kuttl/spark-history-server/03-assert.yaml new file mode 100644 index 00000000..34cce1da --- /dev/null +++ b/tests/templates/kuttl/spark-history-server/03-assert.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 900 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-minio +status: + readyReplicas: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: eventlog-minio +status: + readyReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: minio-client + labels: + app: minio-client +status: + phase: Running +--- +apiVersion: v1 +kind: Pod +metadata: + name: eventlog-minio-client + labels: + app: eventlog-minio-client +status: + phase: Running diff --git a/tests/templates/kuttl/spark-history-server/00-setup-minio.yaml b/tests/templates/kuttl/spark-history-server/03-setup-minio.yaml similarity index 100% rename from tests/templates/kuttl/spark-history-server/00-setup-minio.yaml rename to tests/templates/kuttl/spark-history-server/03-setup-minio.yaml diff --git a/tests/templates/kuttl/spark-history-server/01-prepare-bucket.yaml.j2 b/tests/templates/kuttl/spark-history-server/04-prepare-bucket.yaml.j2 similarity index 100% rename from tests/templates/kuttl/spark-history-server/01-prepare-bucket.yaml.j2 rename to tests/templates/kuttl/spark-history-server/04-prepare-bucket.yaml.j2 diff --git a/tests/templates/kuttl/spark-history-server/00-s3-connection.yaml b/tests/templates/kuttl/spark-history-server/05-s3-connection.yaml similarity index 90% rename from tests/templates/kuttl/spark-history-server/00-s3-connection.yaml rename to tests/templates/kuttl/spark-history-server/05-s3-connection.yaml index 6a4d513e..097ac6d8 100644 --- a/tests/templates/kuttl/spark-history-server/00-s3-connection.yaml +++ b/tests/templates/kuttl/spark-history-server/05-s3-connection.yaml @@ -8,7 +8,7 @@ spec: port: 9000 accessStyle: Path credentials: - secretClass: s3-credentials-class + secretClass: spark-history-server-s3-credentials-class --- apiVersion: s3.stackable.tech/v1alpha1 kind: S3Connection diff --git a/tests/templates/kuttl/spark-history-server/05-assert.yaml b/tests/templates/kuttl/spark-history-server/06-assert.yaml similarity index 100% rename from tests/templates/kuttl/spark-history-server/05-assert.yaml rename to tests/templates/kuttl/spark-history-server/06-assert.yaml diff --git a/tests/templates/kuttl/spark-history-server/05-deploy-history-server.yaml.j2 b/tests/templates/kuttl/spark-history-server/06-deploy-history-server.yaml.j2 similarity index 100% rename from tests/templates/kuttl/spark-history-server/05-deploy-history-server.yaml.j2 rename to tests/templates/kuttl/spark-history-server/06-deploy-history-server.yaml.j2 diff --git a/tests/templates/kuttl/spark-ny-public-s3/00-assert.yaml b/tests/templates/kuttl/spark-ny-public-s3/00-assert.yaml index 863f6070..5baf8caa 100644 --- a/tests/templates/kuttl/spark-ny-public-s3/00-assert.yaml +++ b/tests/templates/kuttl/spark-ny-public-s3/00-assert.yaml @@ -3,22 +3,6 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert timeout: 900 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: test-minio -status: - readyReplicas: 1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: minio-client - labels: - app: minio-client -status: - phase: Running ---- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/tests/templates/kuttl/spark-ny-public-s3/02-assert.yaml b/tests/templates/kuttl/spark-ny-public-s3/02-assert.yaml new file mode 100644 index 00000000..fbbea3bd --- /dev/null +++ b/tests/templates/kuttl/spark-ny-public-s3/02-assert.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 900 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-minio +status: + readyReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: minio-client + labels: + app: minio-client +status: + phase: Running diff --git a/tests/templates/kuttl/spark-ny-public-s3/00-setup-minio.yaml b/tests/templates/kuttl/spark-ny-public-s3/02-setup-minio.yaml similarity index 100% rename from tests/templates/kuttl/spark-ny-public-s3/00-setup-minio.yaml rename to tests/templates/kuttl/spark-ny-public-s3/02-setup-minio.yaml diff --git a/tests/templates/kuttl/spark-ny-public-s3/01-prepare-bucket.yaml.j2 b/tests/templates/kuttl/spark-ny-public-s3/03-prepare-bucket.yaml.j2 similarity index 97% rename from tests/templates/kuttl/spark-ny-public-s3/01-prepare-bucket.yaml.j2 rename to tests/templates/kuttl/spark-ny-public-s3/03-prepare-bucket.yaml.j2 index a3278cfa..491f9a6b 100644 --- a/tests/templates/kuttl/spark-ny-public-s3/01-prepare-bucket.yaml.j2 +++ b/tests/templates/kuttl/spark-ny-public-s3/03-prepare-bucket.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: # give minio enough time to start - - command: sleep 5 + - command: sleep 10 - command: kubectl cp -n $NAMESPACE ny-tlc-report-1.1.0-{{ test_scenario['values']['examples'] }}.jar minio-client:/tmp - command: kubectl cp -n $NAMESPACE yellow_tripdata_2021-07.csv minio-client:/tmp - command: kubectl exec -n $NAMESPACE minio-client -- sh -c 'mc alias set test-minio http://test-minio:9000 $$MINIO_SERVER_ACCESS_KEY $$MINIO_SERVER_SECRET_KEY' diff --git a/tests/templates/kuttl/spark-pi-private-s3/00-assert.yaml b/tests/templates/kuttl/spark-pi-private-s3/00-assert.yaml index 863f6070..5baf8caa 100644 --- a/tests/templates/kuttl/spark-pi-private-s3/00-assert.yaml +++ b/tests/templates/kuttl/spark-pi-private-s3/00-assert.yaml @@ -3,22 +3,6 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert timeout: 900 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: test-minio -status: - readyReplicas: 1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: minio-client - labels: - app: minio-client -status: - phase: Running ---- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/tests/templates/kuttl/spark-pi-private-s3/00-s3-secret.yaml b/tests/templates/kuttl/spark-pi-private-s3/02-s3-secret.yaml similarity index 81% rename from tests/templates/kuttl/spark-pi-private-s3/00-s3-secret.yaml rename to tests/templates/kuttl/spark-pi-private-s3/02-s3-secret.yaml index 5c78faeb..2898ff1a 100644 --- a/tests/templates/kuttl/spark-pi-private-s3/00-s3-secret.yaml +++ b/tests/templates/kuttl/spark-pi-private-s3/02-s3-secret.yaml @@ -4,7 +4,7 @@ kind: Secret metadata: name: minio-credentials labels: - secrets.stackable.tech/class: s3-credentials-class + secrets.stackable.tech/class: spark-pi-private-s3-credentials-class timeout: 240 stringData: accessKey: minioAccessKey @@ -17,7 +17,7 @@ stringData: apiVersion: secrets.stackable.tech/v1alpha1 kind: SecretClass metadata: - name: s3-credentials-class + name: spark-pi-private-s3-credentials-class spec: backend: k8sSearch: diff --git a/tests/templates/kuttl/spark-pi-private-s3/03-assert.yaml b/tests/templates/kuttl/spark-pi-private-s3/03-assert.yaml new file mode 100644 index 00000000..fbbea3bd --- /dev/null +++ b/tests/templates/kuttl/spark-pi-private-s3/03-assert.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 900 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-minio +status: + readyReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: minio-client + labels: + app: minio-client +status: + phase: Running diff --git a/tests/templates/kuttl/spark-pi-private-s3/00-setup-minio.yaml b/tests/templates/kuttl/spark-pi-private-s3/03-setup-minio.yaml similarity index 100% rename from tests/templates/kuttl/spark-pi-private-s3/00-setup-minio.yaml rename to tests/templates/kuttl/spark-pi-private-s3/03-setup-minio.yaml diff --git a/tests/templates/kuttl/spark-pi-private-s3/01-prepare-bucket.yaml.j2 b/tests/templates/kuttl/spark-pi-private-s3/04-prepare-bucket.yaml.j2 similarity index 96% rename from tests/templates/kuttl/spark-pi-private-s3/01-prepare-bucket.yaml.j2 rename to tests/templates/kuttl/spark-pi-private-s3/04-prepare-bucket.yaml.j2 index af4043f6..565d4e1c 100644 --- a/tests/templates/kuttl/spark-pi-private-s3/01-prepare-bucket.yaml.j2 +++ b/tests/templates/kuttl/spark-pi-private-s3/04-prepare-bucket.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: # give minio enough time to start - - command: sleep 5 + - command: sleep 10 - command: kubectl cp -n $NAMESPACE spark-examples_2.12-{{ test_scenario['values']['examples'] }}.jar minio-client:/tmp - command: kubectl exec -n $NAMESPACE minio-client -- sh -c 'mc alias set test-minio http://test-minio:9000 $$MINIO_SERVER_ACCESS_KEY $$MINIO_SERVER_SECRET_KEY' - command: kubectl exec -n $NAMESPACE minio-client -- mc mb test-minio/my-bucket diff --git a/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 index cb0eb87e..dc9d4b31 100644 --- a/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 @@ -16,6 +16,6 @@ spec: port: 9000 accessStyle: Path credentials: - secretClass: s3-credentials-class + secretClass: spark-pi-private-s3-credentials-class executor: instances: 1 diff --git a/tests/templates/kuttl/spark-pi-public-s3/00-assert.yaml b/tests/templates/kuttl/spark-pi-public-s3/00-assert.yaml index 863f6070..5baf8caa 100644 --- a/tests/templates/kuttl/spark-pi-public-s3/00-assert.yaml +++ b/tests/templates/kuttl/spark-pi-public-s3/00-assert.yaml @@ -3,22 +3,6 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert timeout: 900 --- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: test-minio -status: - readyReplicas: 1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: minio-client - labels: - app: minio-client -status: - phase: Running ---- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/tests/templates/kuttl/spark-pi-public-s3/02-assert.yaml b/tests/templates/kuttl/spark-pi-public-s3/02-assert.yaml new file mode 100644 index 00000000..fbbea3bd --- /dev/null +++ b/tests/templates/kuttl/spark-pi-public-s3/02-assert.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 900 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-minio +status: + readyReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: minio-client + labels: + app: minio-client +status: + phase: Running diff --git a/tests/templates/kuttl/spark-pi-public-s3/00-setup-minio.yaml b/tests/templates/kuttl/spark-pi-public-s3/02-setup-minio.yaml similarity index 100% rename from tests/templates/kuttl/spark-pi-public-s3/00-setup-minio.yaml rename to tests/templates/kuttl/spark-pi-public-s3/02-setup-minio.yaml diff --git a/tests/templates/kuttl/spark-pi-public-s3/01-prepare-bucket.yaml.j2 b/tests/templates/kuttl/spark-pi-public-s3/03-prepare-bucket.yaml.j2 similarity index 97% rename from tests/templates/kuttl/spark-pi-public-s3/01-prepare-bucket.yaml.j2 rename to tests/templates/kuttl/spark-pi-public-s3/03-prepare-bucket.yaml.j2 index 9aaf943f..94ce5723 100644 --- a/tests/templates/kuttl/spark-pi-public-s3/01-prepare-bucket.yaml.j2 +++ b/tests/templates/kuttl/spark-pi-public-s3/03-prepare-bucket.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: # give minio enough time to start - - command: sleep 5 + - command: sleep 10 - command: kubectl cp -n $NAMESPACE spark-examples_2.12-{{ test_scenario['values']['examples'] }}.jar minio-client:/tmp - command: kubectl exec -n $NAMESPACE minio-client -- sh -c 'mc alias set test-minio http://test-minio:9000 $$MINIO_SERVER_ACCESS_KEY $$MINIO_SERVER_SECRET_KEY' - command: kubectl exec -n $NAMESPACE minio-client -- mc mb test-minio/my-bucket