Skip to content

Commit c17aa71

Browse files
committed
1 parent 5a3eaa5 commit c17aa71

File tree

11 files changed

+197
-12
lines changed

11 files changed

+197
-12
lines changed

CHANGELOG.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,10 @@ All notable changes to this project will be documented in this file.
77
### Changed
88

99
- Include chart name when installing with a custom release name ([#205])
10+
- Added OpenShift compatiblity ([#225])
1011

1112
[#205]: https://github.com/stackabletech/hdfs-operator/pull/205
13+
[#225]: https://github.com/stackabletech/hdfs-operator/pull/225
1214

1315
## [0.4.0] - 2022-06-30
1416

@@ -30,7 +32,6 @@ All notable changes to this project will be documented in this file.
3032
- `HADOOP_OPTS` for jmx exporter specified to `HADOOP_NAMENODE_OPTS`, `HADOOP_DATANODE_OPTS` and `HADOOP_JOURNALNODE_OPTS` to fix cli tool ([#148]).
3133
- [BREAKING] Specifying the product version has been changed to adhere to [ADR018](https://docs.stackable.tech/home/contributor/adr/ADR018-product_image_versioning.html) instead of just specifying the product version you will now have to add the Stackable image version as well, so `version: 3.5.8` becomes (for example) `version: 3.5.8-stackable0.1.0` ([#180])
3234

33-
3435
[#122]: https://github.com/stackabletech/hdfs-operator/pull/122
3536
[#130]: https://github.com/stackabletech/hdfs-operator/pull/130
3637
[#134]: https://github.com/stackabletech/hdfs-operator/pull/134

deploy/helm/hdfs-operator/templates/roles.yaml

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,3 +89,83 @@ rules:
8989
- {{ include "operator.name" . }}clusters/status
9090
verbs:
9191
- patch
92+
- apiGroups:
93+
- rbac.authorization.k8s.io
94+
resources:
95+
- clusterroles
96+
verbs:
97+
- bind
98+
resourceNames:
99+
- {{ include "operator.name" . }}-clusterrole
100+
{{ if .Capabilities.APIVersions.Has "security.openshift.io/v1" }}
101+
---
102+
apiVersion: security.openshift.io/v1
103+
kind: SecurityContextConstraints
104+
metadata:
105+
name: hdfs-scc
106+
annotations:
107+
kubernetes.io/description: |-
108+
This resource is derived from hostmount-anyuid. It provides all the features of the
109+
restricted SCC but allows host mounts and any UID by a pod. This is primarily
110+
used by the persistent volume recycler. WARNING: this SCC allows host file
111+
system access as any UID, including UID 0. Grant with caution.
112+
release.openshift.io/create-only: "true"
113+
allowHostDirVolumePlugin: true
114+
allowHostIPC: false
115+
allowHostNetwork: false
116+
allowHostPID: false
117+
allowHostPorts: false
118+
allowPrivilegeEscalation: true
119+
allowPrivilegedContainer: false
120+
allowedCapabilities: null
121+
defaultAddCapabilities: null
122+
fsGroup:
123+
type: RunAsAny
124+
readOnlyRootFilesystem: false
125+
runAsUser:
126+
type: RunAsAny
127+
seLinuxContext:
128+
type: MustRunAs
129+
supplementalGroups:
130+
type: RunAsAny
131+
volumes:
132+
- configMap
133+
- downwardAPI
134+
- emptyDir
135+
- hostPath
136+
- nfs
137+
- persistentVolumeClaim
138+
- projected
139+
- secret
140+
- ephemeral
141+
{{ end }}
142+
---
143+
apiVersion: rbac.authorization.k8s.io/v1
144+
kind: ClusterRole
145+
metadata:
146+
name: {{ include "operator.name" . }}-clusterrole
147+
rules:
148+
- apiGroups:
149+
- ""
150+
resources:
151+
- configmaps
152+
- secrets
153+
- serviceaccounts
154+
verbs:
155+
- get
156+
- apiGroups:
157+
- events.k8s.io
158+
resources:
159+
- events
160+
verbs:
161+
- create
162+
{{ if .Capabilities.APIVersions.Has "security.openshift.io/v1" }}
163+
- apiGroups:
164+
- security.openshift.io
165+
resources:
166+
- securitycontextconstraints
167+
resourceNames:
168+
- hdfs-scc
169+
verbs:
170+
- use
171+
{{ end }}

deploy/manifests/roles.yaml

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,3 +89,31 @@ rules:
8989
- hdfsclusters/status
9090
verbs:
9191
- patch
92+
- apiGroups:
93+
- rbac.authorization.k8s.io
94+
resources:
95+
- clusterroles
96+
verbs:
97+
- bind
98+
resourceNames:
99+
- hdfs-clusterrole
100+
---
101+
apiVersion: rbac.authorization.k8s.io/v1
102+
kind: ClusterRole
103+
metadata:
104+
name: hdfs-clusterrole
105+
rules:
106+
- apiGroups:
107+
- ""
108+
resources:
109+
- configmaps
110+
- secrets
111+
- serviceaccounts
112+
verbs:
113+
- get
114+
- apiGroups:
115+
- events.k8s.io
116+
resources:
117+
- events
118+
verbs:
119+
- create

rust/crd/src/error.rs

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,17 @@ pub enum Error {
126126
JournalnodeJavaHeapConfig {
127127
source: stackable_operator::error::Error,
128128
},
129+
130+
#[error("failed to patch service account: {source}")]
131+
ApplyServiceAccount {
132+
name: String,
133+
source: stackable_operator::error::Error,
134+
},
135+
#[error("failed to patch role binding: {source}")]
136+
ApplyRoleBinding {
137+
name: String,
138+
source: stackable_operator::error::Error,
139+
},
129140
}
130141
pub type HdfsOperatorResult<T> = std::result::Result<T, Error>;
131142

rust/operator/src/hdfs_controller.rs

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,11 @@ use crate::config::{
22
CoreSiteConfigBuilder, HdfsNodeDataDirectory, HdfsSiteConfigBuilder, ROOT_DATA_DIR,
33
};
44
use crate::discovery::build_discovery_configmap;
5-
use crate::OPERATOR_NAME;
5+
use crate::{rbac, OPERATOR_NAME};
66
use stackable_hdfs_crd::error::{Error, HdfsOperatorResult};
77
use stackable_hdfs_crd::{constants::*, ROLE_PORTS};
88
use stackable_hdfs_crd::{HdfsCluster, HdfsPodRef, HdfsRole};
9-
use stackable_operator::builder::{ConfigMapBuilder, ObjectMetaBuilder};
9+
use stackable_operator::builder::{ConfigMapBuilder, ObjectMetaBuilder, PodSecurityContextBuilder};
1010
use stackable_operator::client::Client;
1111
use stackable_operator::k8s_openapi::api::core::v1::{
1212
Container, ContainerPort, ObjectFieldSelector, PodSpec, PodTemplateSpec, Probe,
@@ -25,6 +25,7 @@ use stackable_operator::kube::api::ObjectMeta;
2525
use stackable_operator::kube::runtime::controller::Action;
2626
use stackable_operator::kube::runtime::events::{Event, EventType, Recorder, Reporter};
2727
use stackable_operator::kube::runtime::reflector::ObjectRef;
28+
use stackable_operator::kube::ResourceExt;
2829
use stackable_operator::labels::role_group_selector_labels;
2930
use stackable_operator::memory::to_java_heap;
3031
use stackable_operator::product_config::{types::PropertyNameKind, ProductConfigManager};
@@ -72,6 +73,22 @@ pub async fn reconcile_hdfs(hdfs: Arc<HdfsCluster>, ctx: Arc<Ctx>) -> HdfsOperat
7273

7374
let dfs_replication = hdfs.spec.dfs_replication;
7475

76+
let (rbac_sa, rbac_rolebinding) = rbac::build_rbac_resources(hdfs.as_ref(), "hdfs");
77+
client
78+
.apply_patch(FIELD_MANAGER_SCOPE, &rbac_sa, &rbac_sa)
79+
.await
80+
.map_err(|source| Error::ApplyServiceAccount {
81+
source,
82+
name: rbac_sa.name_any(),
83+
})?;
84+
client
85+
.apply_patch(FIELD_MANAGER_SCOPE, &rbac_rolebinding, &rbac_rolebinding)
86+
.await
87+
.map_err(|source| Error::ApplyRoleBinding {
88+
source,
89+
name: rbac_rolebinding.name_any(),
90+
})?;
91+
7592
for (role_name, group_config) in validated_config.iter() {
7693
let role: HdfsRole = serde_yaml::from_str(role_name).unwrap();
7794
let role_ports = ROLE_PORTS.get(&role).unwrap().as_slice();
@@ -111,6 +128,7 @@ pub async fn reconcile_hdfs(hdfs: Arc<HdfsCluster>, ctx: Arc<Ctx>) -> HdfsOperat
111128
&rolegroup_ref,
112129
&namenode_podrefs,
113130
&hadoop_container,
131+
&rbac_sa.name_any(),
114132
)?;
115133

116134
client
@@ -295,6 +313,7 @@ fn rolegroup_statefulset(
295313
rolegroup_ref: &RoleGroupRef<HdfsCluster>,
296314
namenode_podrefs: &[HdfsPodRef],
297315
hadoop_container: &Container,
316+
rbac_sa: &str,
298317
) -> HdfsOperatorResult<StatefulSet> {
299318
tracing::info!("Setting up StatefulSet for {:?}", rolegroup_ref);
300319
let service_name = rolegroup_ref.object_name();
@@ -342,6 +361,14 @@ fn rolegroup_statefulset(
342361
}),
343362
..Volume::default()
344363
}]),
364+
service_account: Some(rbac_sa.to_string()),
365+
security_context: Some(
366+
PodSecurityContextBuilder::new()
367+
.run_as_user(rbac::HDFS_UID)
368+
.run_as_group(0)
369+
.fs_group(1000) // Needed for secret-operator
370+
.build(),
371+
),
345372
..PodSpec::default()
346373
}),
347374
};

rust/operator/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ mod config;
22
mod discovery;
33
mod hdfs_controller;
44
mod pod_svc_controller;
5+
mod rbac;
56

67
use std::sync::Arc;
78

rust/operator/src/rbac.rs

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
use stackable_operator::builder::ObjectMetaBuilder;
2+
use stackable_operator::k8s_openapi::api::core::v1::ServiceAccount;
3+
use stackable_operator::k8s_openapi::api::rbac::v1::{RoleBinding, RoleRef, Subject};
4+
use stackable_operator::kube::{Resource, ResourceExt};
5+
6+
/// Used as runAsUser in the pod security context. This is specified in the Hadoop image file
7+
pub const HDFS_UID: i64 = 1000;
8+
9+
/// Build RBAC objects for the product workloads.
10+
/// The `rbac_prefix` is meant to be the product name, for example: zookeeper, airflow, etc.
11+
/// and it is a assumed that a ClusterRole named `{rbac_prefix}-clusterrole` exists.
12+
pub fn build_rbac_resources<T: Resource>(
13+
resource: &T,
14+
rbac_prefix: &str,
15+
) -> (ServiceAccount, RoleBinding) {
16+
let sa_name = format!("{rbac_prefix}-sa");
17+
let service_account = ServiceAccount {
18+
metadata: ObjectMetaBuilder::new()
19+
.name_and_namespace(resource)
20+
.name(sa_name.clone())
21+
.build(),
22+
..ServiceAccount::default()
23+
};
24+
25+
let role_binding = RoleBinding {
26+
metadata: ObjectMetaBuilder::new()
27+
.name_and_namespace(resource)
28+
.name(format!("{rbac_prefix}-rolebinding"))
29+
.build(),
30+
role_ref: RoleRef {
31+
kind: "ClusterRole".to_string(),
32+
name: format!("{rbac_prefix}-clusterrole"),
33+
api_group: "rbac.authorization.k8s.io".to_string(),
34+
},
35+
subjects: Some(vec![Subject {
36+
kind: "ServiceAccount".to_string(),
37+
name: sa_name,
38+
namespace: resource.namespace(),
39+
..Subject::default()
40+
}]),
41+
};
42+
43+
(service_account, role_binding)
44+
}

tests/templates/kuttl/fs-ops/01-assert.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ apiVersion: kuttl.dev/v1beta1
33
kind: TestAssert
44
metadata:
55
name: install-hdfs
6-
timeout: 300
6+
timeout: 600
77
---
88
apiVersion: apps/v1
99
kind: StatefulSet

tests/templates/kuttl/fs-ops/02-webhdfs.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,6 @@ spec:
1717
spec:
1818
containers:
1919
- name: webhdfs
20-
image: python:3.10-slim
20+
image: docker.stackable.tech/stackable/testing-tools:0.1.0-stackable0.1.0
2121
stdin: true
2222
tty: true

tests/templates/kuttl/fs-ops/03-create-file.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,4 @@ kind: TestStep
44
commands:
55
- script: kubectl cp -n $NAMESPACE ./webhdfs.py webhdfs-0:/tmp
66
- script: kubectl cp -n $NAMESPACE ./testdata.txt webhdfs-0:/tmp
7-
- script: kubectl cp -n $NAMESPACE ./requirements.txt webhdfs-0:/tmp
8-
- script: kubectl exec -n $NAMESPACE webhdfs-0 -- pip install --user -r /tmp/requirements.txt
97
- script: kubectl exec -n $NAMESPACE webhdfs-0 -- python /tmp/webhdfs.py create

tests/templates/kuttl/fs-ops/requirements.txt

Lines changed: 0 additions & 5 deletions
This file was deleted.

0 commit comments

Comments
 (0)