diff --git a/CHANGELOG.md b/CHANGELOG.md index 053ba3e7..4cc81015 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,15 +7,18 @@ All notable changes to this project will be documented in this file. ### Added - Default stackableVersion to operator version. It is recommended to remove `spec.image.stackableVersion` from your custom resources ([#267], [#268]). +- Configuration overrides for the JVM security properties, such as DNS caching ([#272]). ### Changed - `vector` `0.26.0` -> `0.31.0` ([#269]). - `operator-rs` `0.44.0` -> `0.45.1` ([#267]). +- Removed usages of SPARK_DAEMON_JAVA_OPTS since it's not a reliable way to pass extra JVM options ([#272]). [#267]: https://github.com/stackabletech/spark-k8s-operator/pull/267 [#268]: https://github.com/stackabletech/spark-k8s-operator/pull/268 [#269]: https://github.com/stackabletech/spark-k8s-operator/pull/269 +[#272]: https://github.com/stackabletech/spark-k8s-operator/pull/272 ## [23.7.0] - 2023-07-14 diff --git a/deploy/config-spec/properties.yaml b/deploy/config-spec/properties.yaml index c8bdb8de..8bb23059 100644 --- a/deploy/config-spec/properties.yaml +++ b/deploy/config-spec/properties.yaml @@ -3,4 +3,41 @@ version: 0.1.0 spec: units: [] -properties: [] +properties: + - property: &jvmDnsCacheTtl + propertyNames: + - name: "networkaddress.cache.ttl" + kind: + type: "file" + file: "security.properties" + datatype: + type: "integer" + min: "0" + recommendedValues: + - fromVersion: "0.0.0" + value: "30" + roles: + - name: "node" + required: true + asOfVersion: "0.0.0" + comment: "History server - TTL for successfully resolved domain names." + description: "History server - TTL for successfully resolved domain names." + + - property: &jvmDnsCacheNegativeTtl + propertyNames: + - name: "networkaddress.cache.negative.ttl" + kind: + type: "file" + file: "security.properties" + datatype: + type: "integer" + min: "0" + recommendedValues: + - fromVersion: "0.0.0" + value: "0" + roles: + - name: "node" + required: true + asOfVersion: "0.0.0" + comment: "History server - TTL for domain names that cannot be resolved." + description: "History server - TTL for domain names that cannot be resolved." diff --git a/deploy/helm/spark-k8s-operator/configs/properties.yaml b/deploy/helm/spark-k8s-operator/configs/properties.yaml index c8bdb8de..8bb23059 100644 --- a/deploy/helm/spark-k8s-operator/configs/properties.yaml +++ b/deploy/helm/spark-k8s-operator/configs/properties.yaml @@ -3,4 +3,41 @@ version: 0.1.0 spec: units: [] -properties: [] +properties: + - property: &jvmDnsCacheTtl + propertyNames: + - name: "networkaddress.cache.ttl" + kind: + type: "file" + file: "security.properties" + datatype: + type: "integer" + min: "0" + recommendedValues: + - fromVersion: "0.0.0" + value: "30" + roles: + - name: "node" + required: true + asOfVersion: "0.0.0" + comment: "History server - TTL for successfully resolved domain names." + description: "History server - TTL for successfully resolved domain names." + + - property: &jvmDnsCacheNegativeTtl + propertyNames: + - name: "networkaddress.cache.negative.ttl" + kind: + type: "file" + file: "security.properties" + datatype: + type: "integer" + min: "0" + recommendedValues: + - fromVersion: "0.0.0" + value: "0" + roles: + - name: "node" + required: true + asOfVersion: "0.0.0" + comment: "History server - TTL for domain names that cannot be resolved." + description: "History server - TTL for domain names that cannot be resolved." diff --git a/deploy/helm/spark-k8s-operator/crds/crds.yaml b/deploy/helm/spark-k8s-operator/crds/crds.yaml index 498ea45b..c519f368 100644 --- a/deploy/helm/spark-k8s-operator/crds/crds.yaml +++ b/deploy/helm/spark-k8s-operator/crds/crds.yaml @@ -527,6 +527,12 @@ spec: type: array type: object type: object + jvmSecurity: + additionalProperties: + nullable: true + type: string + default: {} + type: object logging: default: enableVectorAgent: null @@ -4015,6 +4021,12 @@ spec: minimum: 0.0 nullable: true type: integer + jvmSecurity: + additionalProperties: + nullable: true + type: string + default: {} + type: object logging: default: enableVectorAgent: null diff --git a/docs/modules/spark-k8s/pages/crd-reference.adoc b/docs/modules/spark-k8s/pages/crd-reference.adoc index 682b36e1..58bcbd89 100644 --- a/docs/modules/spark-k8s/pages/crd-reference.adoc +++ b/docs/modules/spark-k8s/pages/crd-reference.adoc @@ -116,4 +116,10 @@ Below are listed the CRD fields that can be defined by the user: |`spec.logFileDirectory.prefix` |Prefix to use when storing events for the Spark History server. +|`spec.driver.jvmSecurity` +|A list JVM security properties to pass on to the driver VM. The TTL of DNS caches are especially important. + +|`spec.executor.jvmSecurity` +|A list JVM security properties to pass on to the executor VM. The TTL of DNS caches are especially important. + |=== diff --git a/docs/modules/spark-k8s/pages/usage-guide/history-server.adoc b/docs/modules/spark-k8s/pages/usage-guide/history-server.adoc index f89464c7..4392d50b 100644 --- a/docs/modules/spark-k8s/pages/usage-guide/history-server.adoc +++ b/docs/modules/spark-k8s/pages/usage-guide/history-server.adoc @@ -31,7 +31,7 @@ The secret with S3 credentials must contain at least the following two keys: Any other entries of the Secret are ignored by the operator. -== Application configuration +== Spark application configuration The example below demonstrates how to configure Spark applications to write log events to an S3 bucket. @@ -65,3 +65,28 @@ spark-history-node-cleaner NodePort 10.96.203.43 18080:325 By setting up port forwarding on 18080 the UI can be opened by pointing your browser to `http://localhost:18080`: image::history-server-ui.png[History Server Console] + +== Configuration Properties + +For a role group of the Spark history server, you can specify: `configOverrides` for the following files: + +- `security.properties` + +=== The security.properties file + +The `security.properties` file is used to configure JVM security properties. It is very seldom that users need to tweak any of these, but there is one use-case that stands out, and that users need to be aware of: the JVM DNS cache. + +The JVM manages it's own cache of successfully resolved host names as well as a cache of host names that cannot be resolved. Some products of the Stackable platform are very sensible to the contents of these caches and their performance is heavily affected by them. As of version 3.4.0, Apache Spark may perform poorly if the positive cache is disabled. To cache resolved host names, and thus speeding up queries you can configure the TTL of entries in the positive cache like this: + +[source,yaml] +---- + nodes: + configOverrides: + security.properties: + networkaddress.cache.ttl: "30" + networkaddress.cache.negative.ttl: "0" +---- + +NOTE: The operator configures DNS caching by default as shown in the example above. + +For details on the JVM security see https://docs.oracle.com/en/java/javase/11/security/java-security-overview1.html diff --git a/rust/crd/src/constants.rs b/rust/crd/src/constants.rs index bd677a35..d9ca862a 100644 --- a/rust/crd/src/constants.rs +++ b/rust/crd/src/constants.rs @@ -12,6 +12,7 @@ pub const VOLUME_MOUNT_PATH_EXECUTOR_POD_TEMPLATES: &str = pub const POD_TEMPLATE_FILE: &str = "template.yaml"; pub const VOLUME_MOUNT_NAME_CONFIG: &str = "config"; +pub const VOLUME_MOUNT_PATH_CONFIG: &str = "/stackable/spark/conf"; pub const VOLUME_MOUNT_NAME_JOB: &str = "job-files"; pub const VOLUME_MOUNT_PATH_JOB: &str = "/stackable/spark/jobs"; @@ -27,6 +28,8 @@ pub const VOLUME_MOUNT_PATH_LOG: &str = "/stackable/log"; pub const LOG4J2_CONFIG_FILE: &str = "log4j2.properties"; +pub const JVM_SECURITY_PROPERTIES_FILE: &str = "security.properties"; + pub const ACCESS_KEY_ID: &str = "accessKey"; pub const SECRET_ACCESS_KEY: &str = "secretKey"; pub const S3_SECRET_DIR_NAME: &str = "/stackable/secrets"; @@ -67,8 +70,7 @@ pub const HISTORY_ROLE_NAME: &str = "node"; pub const HISTORY_IMAGE_BASE_NAME: &str = "spark-k8s"; -pub const HISTORY_CONFIG_FILE_NAME: &str = "spark-defaults.conf"; -pub const HISTORY_CONFIG_FILE_NAME_FULL: &str = "/stackable/spark/conf/spark-defaults.conf"; +pub const SPARK_DEFAULTS_FILE_NAME: &str = "spark-defaults.conf"; pub const SPARK_CLUSTER_ROLE: &str = "spark-k8s-clusterrole"; pub const SPARK_UID: i64 = 1000; diff --git a/rust/crd/src/history.rs b/rust/crd/src/history.rs index 2b872c47..28c72eb8 100644 --- a/rust/crd/src/history.rs +++ b/rust/crd/src/history.rs @@ -200,7 +200,10 @@ impl SparkHistoryServer { > = vec![( HISTORY_ROLE_NAME.to_string(), ( - vec![PropertyNameKind::File(HISTORY_CONFIG_FILE_NAME.to_string())], + vec![ + PropertyNameKind::File(SPARK_DEFAULTS_FILE_NAME.to_string()), + PropertyNameKind::File(JVM_SECURITY_PROPERTIES_FILE.to_string()), + ], self.spec.nodes.clone(), ), )] diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index b4029a5b..05f63574 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -538,11 +538,27 @@ impl SparkApplication { } } - // s3 with TLS + // Extra JVM opts: + // - java security properties + // - s3 with TLS + let mut extra_java_opts = vec![format!( + "-Djava.security.properties={VOLUME_MOUNT_PATH_LOG_CONFIG}/{JVM_SECURITY_PROPERTIES_FILE}" + )]; if tlscerts::tls_secret_names(s3conn, s3_log_dir).is_some() { - submit_cmd.push(format!("--conf spark.driver.extraJavaOptions=\"-Djavax.net.ssl.trustStore={STACKABLE_TRUST_STORE}/truststore.p12 -Djavax.net.ssl.trustStorePassword={STACKABLE_TLS_STORE_PASSWORD} -Djavax.net.ssl.trustStoreType=pkcs12 -Djavax.net.debug=ssl,handshake\"")); - submit_cmd.push(format!("--conf spark.executor.extraJavaOptions=\"-Djavax.net.ssl.trustStore={STACKABLE_TRUST_STORE}/truststore.p12 -Djavax.net.ssl.trustStorePassword={STACKABLE_TLS_STORE_PASSWORD} -Djavax.net.ssl.trustStoreType=pkcs12 -Djavax.net.debug=ssl,handshake\"")); + extra_java_opts.extend( + vec![ + format!("-Djavax.net.ssl.trustStore={STACKABLE_TRUST_STORE}/truststore.p12"), + format!("-Djavax.net.ssl.trustStorePassword={STACKABLE_TLS_STORE_PASSWORD}"), + format!("-Djavax.net.ssl.trustStoreType=pkcs12"), + ] + .into_iter(), + ); } + let str_extra_java_opts = extra_java_opts.join(" "); + submit_cmd.extend(vec![ + format!("--conf spark.driver.extraJavaOptions=\"{str_extra_java_opts}\""), + format!("--conf spark.executor.extraJavaOptions=\"{str_extra_java_opts}\""), + ]); // repositories and packages arguments if let Some(deps) = self.spec.deps.clone() { @@ -642,18 +658,6 @@ impl SparkApplication { value_from: None, }); } - if let Some(s3logdir) = s3logdir { - if tlscerts::tls_secret_name(&s3logdir.bucket.connection).is_some() { - e.push(EnvVar { - name: "SPARK_DAEMON_JAVA_OPTS".to_string(), - value: Some(format!( - "-Djavax.net.ssl.trustStore={STACKABLE_TRUST_STORE}/truststore.p12 -Djavax.net.ssl.trustStorePassword={STACKABLE_TLS_STORE_PASSWORD} -Djavax.net.ssl.trustStoreType=pkcs12" - )), - value_from: None, - }); - } - } - e } @@ -957,6 +961,8 @@ pub struct DriverConfig { #[fragment_attrs(serde(default))] #[fragment_attrs(schemars(schema_with = "pod_overrides_schema"))] pub pod_overrides: PodTemplateSpec, + #[fragment_attrs(serde(default))] + pub jvm_security: HashMap>, } impl DriverConfig { @@ -977,6 +983,18 @@ impl DriverConfig { volume_mounts: Some(VolumeMounts::default()), affinity: StackableAffinityFragment::default(), pod_overrides: PodTemplateSpec::default(), + jvm_security: vec![ + ( + "networkaddress.cache.ttl".to_string(), + Some("30".to_string()), + ), + ( + "networkaddress.cache.negative.ttl".to_string(), + Some("0".to_string()), + ), + ] + .into_iter() + .collect(), } } } @@ -1011,6 +1029,8 @@ pub struct ExecutorConfig { #[fragment_attrs(serde(default))] #[fragment_attrs(schemars(schema_with = "pod_overrides_schema"))] pub pod_overrides: PodTemplateSpec, + #[fragment_attrs(serde(default))] + pub jvm_security: HashMap>, } impl ExecutorConfig { @@ -1033,6 +1053,18 @@ impl ExecutorConfig { node_selector: Default::default(), affinity: Default::default(), pod_overrides: PodTemplateSpec::default(), + jvm_security: vec![ + ( + "networkaddress.cache.ttl".to_string(), + Some("30".to_string()), + ), + ( + "networkaddress.cache.negative.ttl".to_string(), + Some("0".to_string()), + ), + ] + .into_iter() + .collect(), } } } @@ -1053,7 +1085,7 @@ mod tests { }; use stackable_operator::k8s_openapi::api::core::v1::PodTemplateSpec; use stackable_operator::product_logging::spec::Logging; - use std::collections::BTreeMap; + use std::collections::{BTreeMap, HashMap}; use std::str::FromStr; #[test] @@ -1419,6 +1451,7 @@ spec: volume_mounts: None, affinity: StackableAffinity::default(), pod_overrides: PodTemplateSpec::default(), + jvm_security: HashMap::new(), }; let mut props = BTreeMap::new(); @@ -1474,6 +1507,7 @@ spec: node_selector: None, affinity: StackableAffinity::default(), pod_overrides: PodTemplateSpec::default(), + jvm_security: HashMap::new(), }; let mut props = BTreeMap::new(); diff --git a/rust/crd/src/s3logdir.rs b/rust/crd/src/s3logdir.rs index dfeafbdf..1a6c3970 100644 --- a/rust/crd/src/s3logdir.rs +++ b/rust/crd/src/s3logdir.rs @@ -95,7 +95,7 @@ impl S3LogDir { } /// Constructs the properties needed for loading event logs from S3. - /// These properties are later written in the `HISTORY_CONFIG_FILE_NAME_FULL` file. + /// These properties are later written in the `SPARK_DEFAULTS_FILE_NAME` file. /// /// The following properties related to credentials are not included: /// * spark.hadoop.fs.s3a.aws.credentials.provider diff --git a/rust/operator-binary/src/history_controller.rs b/rust/operator-binary/src/history_controller.rs index 4efa7778..f72d2f1a 100644 --- a/rust/operator-binary/src/history_controller.rs +++ b/rust/operator-binary/src/history_controller.rs @@ -19,7 +19,9 @@ use stackable_operator::{ Resource, ResourceExt, }, labels::{role_group_selector_labels, role_selector_labels, ObjectLabels}, - product_config::ProductConfigManager, + product_config::{ + types::PropertyNameKind, writer::to_java_properties_string, ProductConfigManager, + }, product_logging::{ framework::{calculate_log_volume_size_limit, vector_container}, spec::{ @@ -30,14 +32,21 @@ use stackable_operator::{ role_utils::RoleGroupRef, }; use stackable_spark_k8s_crd::{ - constants::*, + constants::{ + ACCESS_KEY_ID, APP_NAME, HISTORY_CONTROLLER_NAME, HISTORY_IMAGE_BASE_NAME, + HISTORY_ROLE_NAME, JVM_SECURITY_PROPERTIES_FILE, LOG4J2_CONFIG_FILE, + MAX_SPARK_LOG_FILES_SIZE, OPERATOR_NAME, SECRET_ACCESS_KEY, SPARK_CLUSTER_ROLE, + SPARK_DEFAULTS_FILE_NAME, SPARK_UID, STACKABLE_TLS_STORE_PASSWORD, STACKABLE_TRUST_STORE, + VOLUME_MOUNT_NAME_CONFIG, VOLUME_MOUNT_NAME_LOG, VOLUME_MOUNT_NAME_LOG_CONFIG, + VOLUME_MOUNT_PATH_CONFIG, VOLUME_MOUNT_PATH_LOG, VOLUME_MOUNT_PATH_LOG_CONFIG, + }, history, history::{HistoryConfig, SparkHistoryServer, SparkHistoryServerContainer}, s3logdir::S3LogDir, tlscerts, }; -use std::time::Duration; use std::{collections::BTreeMap, sync::Arc}; +use std::{collections::HashMap, time::Duration}; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_operator::builder::resources::ResourceRequirementsBuilder; @@ -122,6 +131,14 @@ pub enum Error { }, #[snafu(display("cannot retrieve role group"))] CannotRetrieveRoleGroup { source: history::Error }, + #[snafu(display( + "History server : failed to serialize [{JVM_SECURITY_PROPERTIES_FILE}] for group {}", + rolegroup + ))] + JvmSecurityProperties { + source: stackable_operator::product_config::writer::PropertiesWriterError, + rolegroup: String, + }, } type Result = std::result::Result; @@ -197,14 +214,14 @@ pub async fn reconcile(shs: Arc, ctx: Arc) -> Result, ctx: Arc) -> Result, ctx: Arc) -> Result, _error: &Error, _ctx: Arc>, + merged_config: &HistoryConfig, app_version_label: &str, rolegroupref: &RoleGroupRef, s3_log_dir: &S3LogDir, @@ -269,7 +288,17 @@ fn build_config_map( ) -> Result { let cm_name = rolegroupref.object_name(); - let spark_config = spark_config(shs, s3_log_dir, rolegroupref)?; + let spark_defaults = spark_defaults(shs, s3_log_dir, rolegroupref)?; + + let jvm_sec_props: BTreeMap> = config + .get(&PropertyNameKind::File( + JVM_SECURITY_PROPERTIES_FILE.to_string(), + )) + .cloned() + .unwrap_or_default() + .into_iter() + .map(|(k, v)| (k, Some(v))) + .collect(); let mut cm_builder = ConfigMapBuilder::new(); @@ -283,12 +312,20 @@ fn build_config_map( .with_recommended_labels(labels(shs, app_version_label, &rolegroupref.role_group)) .build(), ) - .add_data(HISTORY_CONFIG_FILE_NAME, spark_config); + .add_data(SPARK_DEFAULTS_FILE_NAME, spark_defaults) + .add_data( + JVM_SECURITY_PROPERTIES_FILE, + to_java_properties_string(jvm_sec_props.iter()).with_context(|_| { + JvmSecurityPropertiesSnafu { + rolegroup: rolegroupref.role_group.clone(), + } + })?, + ); product_logging::extend_config_map( rolegroupref, vector_aggregator_address, - &config.logging, + &merged_config.logging, SparkHistoryServerContainer::SparkHistory, SparkHistoryServerContainer::Vector, &mut cm_builder, @@ -328,7 +365,7 @@ fn build_stateful_set( pb.service_account_name(serviceaccount.name_unchecked()) .image_pull_secrets_from_product_image(resolved_product_image) .add_volume( - VolumeBuilder::new("config") + VolumeBuilder::new(VOLUME_MOUNT_NAME_CONFIG) .with_config_map(rolegroupref.object_name()) .build(), ) @@ -370,7 +407,7 @@ fn build_stateful_set( .add_container_port("http", 18080) .add_env_vars(env_vars(s3_log_dir)) .add_volume_mounts(s3_log_dir.volume_mounts()) - .add_volume_mount("config", "/stackable/spark/conf") + .add_volume_mount(VOLUME_MOUNT_NAME_CONFIG, VOLUME_MOUNT_PATH_CONFIG) .add_volume_mount(VOLUME_MOUNT_NAME_LOG_CONFIG, VOLUME_MOUNT_PATH_LOG_CONFIG) .add_volume_mount(VOLUME_MOUNT_NAME_LOG, VOLUME_MOUNT_PATH_LOG) .build(); @@ -522,7 +559,7 @@ fn build_history_role_serviceaccount( Ok((sa, binding)) } -fn spark_config( +fn spark_defaults( shs: &SparkHistoryServer, s3_log_dir: &S3LogDir, rolegroupref: &RoleGroupRef, @@ -560,7 +597,7 @@ fn command_args(s3logdir: &S3LogDir) -> Vec { } command.extend(vec![ - format!("/stackable/spark/sbin/start-history-server.sh --properties-file {HISTORY_CONFIG_FILE_NAME_FULL}"), + format!("/stackable/spark/sbin/start-history-server.sh --properties-file {VOLUME_MOUNT_PATH_CONFIG}/{SPARK_DEFAULTS_FILE_NAME}"), ]); vec![String::from("-c"), command.join(" && ")] @@ -581,24 +618,30 @@ fn env_vars(s3logdir: &S3LogDir) -> Vec { value: Some("/stackable/spark/extra-jars/*".into()), value_from: None, }); + + let mut history_opts = vec![ + format!("-Dlog4j.configurationFile={VOLUME_MOUNT_PATH_LOG_CONFIG}/{LOG4J2_CONFIG_FILE}"), + format!( + "-Djava.security.properties={VOLUME_MOUNT_PATH_CONFIG}/{JVM_SECURITY_PROPERTIES_FILE}" + ), + ]; + if tlscerts::tls_secret_name(&s3logdir.bucket.connection).is_some() { + history_opts.extend( + vec![ + format!("-Djavax.net.ssl.trustStore={STACKABLE_TRUST_STORE}/truststore.p12"), + format!("-Djavax.net.ssl.trustStorePassword={STACKABLE_TLS_STORE_PASSWORD}"), + format!("-Djavax.net.ssl.trustStoreType=pkcs12"), + ] + .into_iter(), + ); + } + vars.push(EnvVar { name: "SPARK_HISTORY_OPTS".to_string(), - value: Some(format!( - "-Dlog4j.configurationFile={VOLUME_MOUNT_PATH_LOG_CONFIG}/{LOG4J2_CONFIG_FILE}" - )), + value: Some(history_opts.join(" ")), value_from: None, }); // if TLS is enabled build truststore - if tlscerts::tls_secret_name(&s3logdir.bucket.connection).is_some() { - vars.push(EnvVar { - name: "SPARK_DAEMON_JAVA_OPTS".to_string(), - value: Some(format!( - "-Djavax.net.ssl.trustStore={STACKABLE_TRUST_STORE}/truststore.p12 -Djavax.net.ssl.trustStorePassword={STACKABLE_TLS_STORE_PASSWORD} -Djavax.net.ssl.trustStoreType=pkcs12 -Djavax.net.debug=ssl,handshake" - )), - value_from: None, - }); - } - vars } diff --git a/rust/operator-binary/src/spark_k8s_controller.rs b/rust/operator-binary/src/spark_k8s_controller.rs index 85473c48..553f8d72 100644 --- a/rust/operator-binary/src/spark_k8s_controller.rs +++ b/rust/operator-binary/src/spark_k8s_controller.rs @@ -1,5 +1,6 @@ use std::{sync::Arc, time::Duration, vec}; +use stackable_operator::product_config::writer::to_java_properties_string; use stackable_spark_k8s_crd::{ constants::*, s3logdir::S3LogDir, tlscerts, SparkApplication, SparkApplicationRole, SparkContainer, SparkStorageConfig, SubmitJobContainer, @@ -112,6 +113,11 @@ pub enum Error { source: product_logging::Error, cm_name: String, }, + #[snafu(display("failed to serialize [{JVM_SECURITY_PROPERTIES_FILE}] for {}", role))] + JvmSecurityProperties { + source: stackable_operator::product_config::writer::PropertiesWriterError, + role: SparkApplicationRole, + }, } type Result = std::result::Result; @@ -129,6 +135,7 @@ pub struct PodTemplateConfig { pub volume_mounts: Vec, pub affinity: StackableAffinity, pub pod_overrides: PodTemplateSpec, + pub jvm_security: String, } pub async fn reconcile(spark_application: Arc, ctx: Arc) -> Result { @@ -217,6 +224,11 @@ pub async fn reconcile(spark_application: Arc, ctx: Arc) ), affinity: driver_config.affinity, pod_overrides: driver_config.pod_overrides.clone(), + jvm_security: to_java_properties_string(driver_config.jvm_security.iter()).with_context( + |_| JvmSecurityPropertiesSnafu { + role: SparkApplicationRole::Driver, + }, + )?, }; let driver_pod_template_config_map = pod_template_config_map( &spark_application, @@ -249,6 +261,11 @@ pub async fn reconcile(spark_application: Arc, ctx: Arc) ), affinity: executor_config.affinity, pod_overrides: executor_config.pod_overrides.clone(), + jvm_security: to_java_properties_string(executor_config.jvm_security.iter()).with_context( + |_| JvmSecurityPropertiesSnafu { + role: SparkApplicationRole::Executor, + }, + )?, }; let executor_pod_template_config_map = pod_template_config_map( &spark_application, @@ -591,6 +608,7 @@ fn pod_template_config_map( ) .context(InvalidLoggingConfigSnafu { cm_name })?; + cm_builder.add_data(JVM_SECURITY_PROPERTIES_FILE, config.jvm_security.clone()); cm_builder.build().context(PodTemplateConfigMapSnafu) }