From 06cc269ce4ed9b0b220bd9af86ea13ba860248d2 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 6 May 2022 14:55:27 +0200 Subject: [PATCH 001/177] Add stacks describe command --- src/operator.rs | 1 + src/release.rs | 13 +++++---- src/stack.rs | 72 +++++++++++++++++++++++++++++++++++++++++++------ stacks.yaml | 14 +++++++--- 4 files changed, 82 insertions(+), 18 deletions(-) diff --git a/src/operator.rs b/src/operator.rs index cb73c830..30f31828 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -19,6 +19,7 @@ pub enum CliCommandOperator { #[clap(alias("desc"))] Describe { operator: String, + #[clap(short, long, arg_enum, default_value = "text")] output: OutputType, }, diff --git a/src/release.rs b/src/release.rs index 6b925507..ed7aa8f7 100644 --- a/src/release.rs +++ b/src/release.rs @@ -14,7 +14,7 @@ use std::sync::Mutex; lazy_static! { pub static ref RELEASE_FILES: Mutex> = Mutex::new(vec![ "https://raw.githubusercontent.com/stackabletech/stackablectl/main/releases.yaml" - .to_string() + .to_string(), ]); } @@ -30,6 +30,7 @@ pub enum CliCommandRelease { #[clap(alias("desc"))] Describe { release: String, + #[clap(short, long, arg_enum, default_value = "text")] output: OutputType, }, @@ -191,12 +192,10 @@ fn get_releases() -> Releases { for release_file in RELEASE_FILES.lock().unwrap().deref() { let yaml = helpers::read_from_url_or_file(release_file); match yaml { - Ok(yaml) => { - let releases: Releases = serde_yaml::from_str(&yaml).unwrap_or_else(|err| { - panic!("Failed to parse release list from {release_file}: {err}") - }); - all_releases.extend(releases.releases.clone()); - } + Ok(yaml) => match serde_yaml::from_str::(&yaml) { + Ok(releases) => all_releases.extend(releases.releases), + Err(err) => warn!("Failed to parse release list from {release_file}: {err}"), + }, Err(err) => { warn!("Could not read from releases file \"{release_file}\": {err}"); } diff --git a/src/stack.rs b/src/stack.rs index 218923eb..cc9fcc51 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -4,14 +4,15 @@ use cached::proc_macro::cached; use clap::Parser; use indexmap::IndexMap; use lazy_static::lazy_static; -use log::warn; +use log::{error, warn}; use serde::{Deserialize, Serialize}; use std::ops::Deref; +use std::process::exit; use std::sync::Mutex; lazy_static! { pub static ref STACK_FILES: Mutex> = Mutex::new(vec![ - "https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks.yaml".to_string() + "https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks.yaml".to_string(), ]); } @@ -23,12 +24,21 @@ pub enum CliCommandStack { #[clap(short, long, arg_enum, default_value = "text")] output: OutputType, }, + /// Show details of a specific stack + #[clap(alias("desc"))] + Describe { + stack: String, + + #[clap(short, long, arg_enum, default_value = "text")] + output: OutputType, + }, } impl CliCommandStack { pub fn handle(&self) { match self { CliCommandStack::List { output } => list_stacks(output), + CliCommandStack::Describe { stack, output } => describe_stack(stack, output), } } } @@ -49,6 +59,8 @@ struct Stacks { struct Stack { description: String, stackable_release: String, + labels: Vec, + manifests: String, } fn list_stacks(output_type: &OutputType) { @@ -72,6 +84,42 @@ fn list_stacks(output_type: &OutputType) { } } +fn describe_stack(stack_name: &str, output_type: &OutputType) { + #[derive(Serialize)] + #[serde(rename_all = "camelCase")] + struct Output { + stack: String, + description: String, + stackable_release: String, + labels: Vec, + manifests: String, + } + + let stack = get_stack(stack_name); + let output = Output { + stack: stack_name.to_string(), + description: stack.description, + stackable_release: stack.stackable_release, + labels: stack.labels, + manifests: stack.manifests, + }; + + match output_type { + OutputType::Text => { + println!("Stack: {}", output.stack); + println!("Description: {}", output.description); + println!("Manifests: {}", output.manifests); + println!("Labels: {}", output.labels.join(", ")); + } + OutputType::Json => { + println!("{}", serde_json::to_string_pretty(&output).unwrap()); + } + OutputType::Yaml => { + println!("{}", serde_yaml::to_string(&output).unwrap()); + } + } +} + /// Cached because of potential slow network calls #[cached] fn get_stacks() -> Stacks { @@ -79,12 +127,10 @@ fn get_stacks() -> Stacks { for stack_file in STACK_FILES.lock().unwrap().deref() { let yaml = helpers::read_from_url_or_file(stack_file); match yaml { - Ok(yaml) => { - let stacks: Stacks = serde_yaml::from_str(&yaml).unwrap_or_else(|err| { - panic!("Failed to parse stack list from {stack_file}: {err}") - }); - all_stacks.extend(stacks.stacks.clone()); - } + Ok(yaml) => match serde_yaml::from_str::(&yaml) { + Ok(stacks) => all_stacks.extend(stacks.stacks), + Err(err) => warn!("Failed to parse stack list from {stack_file}: {err}"), + }, Err(err) => { warn!("Could not read from stacks file \"{stack_file}\": {err}"); } @@ -93,3 +139,13 @@ fn get_stacks() -> Stacks { Stacks { stacks: all_stacks } } + +fn get_stack(stack_name: &str) -> Stack { + get_stacks() + .stacks + .remove(stack_name) // We need to remove to take ownership + .unwrap_or_else(|| { + error!("Stack {stack_name} not found. Use `stackablectl stack list` to list the available stacks."); + exit(1); + }) +} diff --git a/stacks.yaml b/stacks.yaml index e927b4fe..b3ac98c2 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -3,10 +3,18 @@ stacks: trino-superset: description: A modern data-analysis stack with S3, Trino and Superset stackableRelease: 22.04-sbernauer - # TODO manifests + labels: + - trino + - superset + manifests: stacks/trino-superset.yaml # TODO additional services kafka-nifi-druid-superset: - description: Streaming applications and peristant storage with Druid on S3 and Superset for visualization + description: Streaming applications and persistent storage with Druid on S3 and Superset for visualization stackableRelease: 22.04-sbernauer - # TODO manifests + labels: + - kafka + - nifi + - druid + - superset + manifests: stacks/kafka-nifi-druid-superset.yaml # TODO additional services From 245212157df2298dee3a7d3397d9e835e32c1caa Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 6 May 2022 16:17:44 +0200 Subject: [PATCH 002/177] Add stack install command --- README.md | 1 + releases.yaml | 2 +- src/helm.rs | 3 +-- src/kube.rs | 16 +++++++++----- src/main.rs | 16 ++++++++++++++ src/release.rs | 2 +- src/stack.rs | 54 +++++++++++++++++++++++++++++++++++++++++++-- stacks.yaml | 15 +++++++++---- stacks/trino.yaml | 56 +++++++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 150 insertions(+), 15 deletions(-) create mode 100644 stacks/trino.yaml diff --git a/README.md b/README.md index 01559b43..0608af19 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@ When the interface is stable we will write docs on how to use stackablectl and p Until than this document is mean for internal usage and will be replaced by the new docs. ## TODOs +* Switch from `kubectl` shell calls to proper library (best case in Rust) * Check if CRD resources still exist when uninstalling the operators. If so warn the user. * Use Result instead of panic!() in multiple places diff --git a/releases.yaml b/releases.yaml index 371bd461..77812da4 100644 --- a/releases.yaml +++ b/releases.yaml @@ -31,7 +31,7 @@ releases: superset: operatorVersion: 0.4.0 trino: - operatorVersion: 0.3.1 + operatorVersion: 0.3.2-nightly # Trino < 0.4.0 requires regorule-operator and 0.4.0 will be released soon-ish. zookeeper: operatorVersion: 0.9.0 alpha-3: diff --git a/src/helm.rs b/src/helm.rs index 03e44f11..28085221 100644 --- a/src/helm.rs +++ b/src/helm.rs @@ -1,5 +1,5 @@ use crate::helpers::{c_str_ptr_to_str, GoString}; -use crate::CliArgs; +use crate::{CliArgs, NAMESPACE}; use cached::proc_macro::cached; use lazy_static::lazy_static; use log::{debug, error, info, warn, LevelFilter}; @@ -10,7 +10,6 @@ use std::process::exit; use std::sync::Mutex; lazy_static! { - pub static ref NAMESPACE: Mutex = Mutex::new(String::new()); pub static ref HELM_REPOS: Mutex> = Mutex::new(HashMap::new()); pub static ref LOG_LEVEL: Mutex = Mutex::new(LevelFilter::Trace); } diff --git a/src/kube.rs b/src/kube.rs index 000b7967..403f4cd2 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -1,8 +1,14 @@ -// /// This function currently uses `kubectl apply`. -// /// In the future we want to switch to kube-rs or something else to not require the user to install kubectl. -// pub fn deploy_manifest(yaml: &str) { -// helpers::execute_command_with_stdin(vec!["kubectl", "apply", "-f", "-"], yaml); -// } +use crate::{helpers, NAMESPACE}; + +/// This function currently uses `kubectl apply`. +/// In the future we want to switch to kube-rs or something else to not require the user to install kubectl. +pub fn deploy_manifest(yaml: &str) { + let namespace = NAMESPACE.lock().unwrap(); + helpers::execute_command_with_stdin( + vec!["kubectl", "apply", "-n", &namespace, "-f", "-"], + yaml, + ); +} // use crate::kube::Error::TypelessManifest; // use kube::api::{DynamicObject, GroupVersionKind, TypeMeta}; diff --git a/src/main.rs b/src/main.rs index b411a557..7a91f8f2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,10 @@ +use std::sync::Mutex; + use crate::arguments::CliCommand; use arguments::CliArgs; use clap::Parser; +use lazy_static::lazy_static; +use log::info; mod arguments; mod helm; @@ -32,6 +36,10 @@ const AVAILABLE_OPERATORS: &[&str] = &[ "monitoring", ]; +lazy_static! { + pub static ref NAMESPACE: Mutex = Mutex::new(String::new()); +} + fn main() { let args = CliArgs::parse(); env_logger::builder() @@ -39,6 +47,14 @@ fn main() { .format_target(false) .filter_level(args.log_level) .init(); + + let namespace = &args.namespace; + if namespace != "default" { + info!("Deploying into non-default namespace.\ + Please make sure not to deploy the same operator multiple times in different namespaces unless you know what you are doing (TM)."); + } + *(NAMESPACE.lock().unwrap()) = namespace.to_string(); + helm::handle_common_cli_args(&args); release::handle_common_cli_args(&args); stack::handle_common_cli_args(&args); diff --git a/src/release.rs b/src/release.rs index ed7aa8f7..a7442c1d 100644 --- a/src/release.rs +++ b/src/release.rs @@ -167,7 +167,7 @@ fn describe_release(release_name: &str, output_type: &OutputType) { } } -fn install_release(release_name: &str) { +pub fn install_release(release_name: &str) { info!("Installing release {release_name}"); let release = get_release(release_name); diff --git a/src/stack.rs b/src/stack.rs index cc9fcc51..83e1c8ad 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -1,10 +1,10 @@ use crate::arguments::OutputType; -use crate::{helpers, CliArgs}; +use crate::{helpers, kind, kube, release, CliArgs}; use cached::proc_macro::cached; use clap::Parser; use indexmap::IndexMap; use lazy_static::lazy_static; -use log::{error, warn}; +use log::{error, info, warn}; use serde::{Deserialize, Serialize}; use std::ops::Deref; use std::process::exit; @@ -32,6 +32,27 @@ pub enum CliCommandStack { #[clap(short, long, arg_enum, default_value = "text")] output: OutputType, }, + /// Install a specific stack + #[clap(alias("in"))] + Install { + /// Name of the stack to install + #[clap(required = true)] + stack: String, + + /// If specified a local kubernetes cluster consisting of 4 nodes for testing purposes will be created. + /// Kind is a tool to spin up a local kubernetes cluster running on docker on your machine. + /// You need to have `docker` and `kind` installed. Have a look at the README at on how to install them. + #[clap(short, long)] + kind_cluster: bool, + + /// Name of the kind cluster created if `--kind-cluster` is specified + #[clap( + long, + default_value = "stackable-data-platform", + requires = "kind-cluster" + )] + kind_cluster_name: String, + }, } impl CliCommandStack { @@ -39,6 +60,14 @@ impl CliCommandStack { match self { CliCommandStack::List { output } => list_stacks(output), CliCommandStack::Describe { stack, output } => describe_stack(stack, output), + CliCommandStack::Install { + stack, + kind_cluster, + kind_cluster_name, + } => { + kind::handle_cli_arguments(*kind_cluster, kind_cluster_name); + install_stack(stack); + } } } } @@ -120,6 +149,27 @@ fn describe_stack(stack_name: &str, output_type: &OutputType) { } } +fn install_stack(stack_name: &str) { + info!("Installing stack {stack_name}"); + let stack = get_stack(stack_name); + + release::install_release(&stack.stackable_release); + + info!("Installing products of stack {stack_name}"); + match helpers::read_from_url_or_file(&stack.manifests) { + Ok(manifests) => kube::deploy_manifest(&manifests), + Err(err) => { + panic!( + "Could not read stack manifests from file \"{}\": {err}", + &stack.manifests + ); + } + } + + info!(""); + info!("Installed stack {stack_name}. Have a nice day!"); +} + /// Cached because of potential slow network calls #[cached] fn get_stacks() -> Stacks { diff --git a/stacks.yaml b/stacks.yaml index b3ac98c2..7b3b2cec 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -1,16 +1,23 @@ --- stacks: + trino: + description: Simply stack only containing Trino + stackableRelease: 22.05-sbernauer + labels: + - trino + manifests: stacks/trino.yaml + # TODO additional services trino-superset: - description: A modern data-analysis stack with S3, Trino and Superset - stackableRelease: 22.04-sbernauer + description: NOT READY! A modern data-analysis stack with S3, Trino and Superset + stackableRelease: 22.05-sbernauer labels: - trino - superset manifests: stacks/trino-superset.yaml # TODO additional services kafka-nifi-druid-superset: - description: Streaming applications and persistent storage with Druid on S3 and Superset for visualization - stackableRelease: 22.04-sbernauer + description: NOT READY! Streaming applications and persistent storage with Druid on S3 and Superset for visualization + stackableRelease: 22.05-sbernauer labels: - kafka - nifi diff --git a/stacks/trino.yaml b/stacks/trino.yaml new file mode 100644 index 00000000..71281d21 --- /dev/null +++ b/stacks/trino.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: hive.stackable.tech/v1alpha1 +kind: HiveCluster +metadata: + name: simple-hive-derby +spec: + version: 2.3.9 + metastore: + roleGroups: + default: + selector: + matchLabels: + kubernetes.io/os: linux + replicas: 1 + config: + database: + connString: jdbc:derby:;databaseName=/stackable/metastore_db;create=true + user: APP + password: mine + dbType: derby + s3Connection: + endPoint: changeme + accessKey: changeme + secretKey: changeme + sslEnabled: false + pathStyleAccess: true +--- +apiVersion: trino.stackable.tech/v1alpha1 +kind: TrinoCluster +metadata: + name: simple-trino +spec: + version: 0.0.377 + hiveConfigMapName: simple-hive-derby + s3: + endPoint: changeme + accessKey: changeme + secretKey: changeme + sslEnabled: false + pathStyleAccess: true + coordinators: + roleGroups: + default: + selector: + matchLabels: + kubernetes.io/os: linux + replicas: 1 + config: {} + workers: + roleGroups: + default: + selector: + matchLabels: + kubernetes.io/os: linux + replicas: 1 + config: {} From 4d1c80d3bee9b18a9fbe97fa35c2e750a692800f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 6 May 2022 16:22:05 +0200 Subject: [PATCH 003/177] Add missing Stackable release output --- src/stack.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/stack.rs b/src/stack.rs index 83e1c8ad..5314e07c 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -137,6 +137,7 @@ fn describe_stack(stack_name: &str, output_type: &OutputType) { OutputType::Text => { println!("Stack: {}", output.stack); println!("Description: {}", output.description); + println!("Stackable release: {}", output.stackable_release); println!("Manifests: {}", output.manifests); println!("Labels: {}", output.labels.join(", ")); } From 7db372c687c0b7f10c52c75d1e4687361566cb72 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 8 Jun 2022 10:57:54 +0200 Subject: [PATCH 004/177] Fix merge conflicts --- releases.yaml | 6 +++--- src/release.rs | 2 +- src/stack.rs | 2 +- stacks/trino.yaml | 16 ++-------------- 4 files changed, 7 insertions(+), 19 deletions(-) diff --git a/releases.yaml b/releases.yaml index 77812da4..81332c66 100644 --- a/releases.yaml +++ b/releases.yaml @@ -2,10 +2,10 @@ releases: 22.05-sbernauer: releaseDate: 2022-05-06 - description: Non-offical release from sbernauer to demonstrate stackablectl. It includes the latest stable versions. + description: Non-official release from sbernauer to demonstrate stackablectl. It includes the latest stable versions. products: airflow: - operatorVersion: 0.2.0 + operatorVersion: 0.3.0 commons: operatorVersion: 0.1.0 druid: @@ -23,7 +23,7 @@ releases: opa: operatorVersion: 0.8.0 secret: - operatorVersion: 0.3.0 + operatorVersion: 0.4.0 spark: operatorVersion: 0.5.0 spark-k8s: diff --git a/src/release.rs b/src/release.rs index d253eef5..43aadd30 100644 --- a/src/release.rs +++ b/src/release.rs @@ -186,7 +186,7 @@ fn describe_release(release_name: &str, output_type: &OutputType) { /// If include_operators is an non-empty list only the whitelisted product operators will be installed. /// If exclude_operators is an non-empty list the blacklisted product operators will be skipped. -fn install_release(release_name: &str, include_products: &[String], exclude_products: &[String]) { +pub fn install_release(release_name: &str, include_products: &[String], exclude_products: &[String]) { info!("Installing release {release_name}"); let release = get_release(release_name); diff --git a/src/stack.rs b/src/stack.rs index 5314e07c..453b0400 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -154,7 +154,7 @@ fn install_stack(stack_name: &str) { info!("Installing stack {stack_name}"); let stack = get_stack(stack_name); - release::install_release(&stack.stackable_release); + release::install_release(&stack.stackable_release, &[], &[]); info!("Installing products of stack {stack_name}"); match helpers::read_from_url_or_file(&stack.manifests) { diff --git a/stacks/trino.yaml b/stacks/trino.yaml index 71281d21..43a2ce04 100644 --- a/stacks/trino.yaml +++ b/stacks/trino.yaml @@ -14,30 +14,18 @@ spec: replicas: 1 config: database: - connString: jdbc:derby:;databaseName=/stackable/metastore_db;create=true + connString: jdbc:derby:;databaseName=/tmp/metastore_db;create=true user: APP password: mine dbType: derby - s3Connection: - endPoint: changeme - accessKey: changeme - secretKey: changeme - sslEnabled: false - pathStyleAccess: true --- apiVersion: trino.stackable.tech/v1alpha1 kind: TrinoCluster metadata: name: simple-trino spec: - version: 0.0.377 + version: 377-stackable0 hiveConfigMapName: simple-hive-derby - s3: - endPoint: changeme - accessKey: changeme - secretKey: changeme - sslEnabled: false - pathStyleAccess: true coordinators: roleGroups: default: From 46b2edb2a3585a12e508764f0244e583a4fc7a0d Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 8 Jun 2022 14:13:30 +0200 Subject: [PATCH 005/177] Add support for plain yaml and helm charts inside stacks --- go-helm-wrapper/main.go | 3 +- src/helm.rs | 14 ++++- src/operator.rs | 1 + src/release.rs | 6 +- src/stack.rs | 73 +++++++++++++++++----- stacks.yaml | 48 ++++++++------ stacks/{trino.yaml => trino-superset.yaml} | 28 +++++++++ 7 files changed, 134 insertions(+), 39 deletions(-) rename stacks/{trino.yaml => trino-superset.yaml} (58%) diff --git a/go-helm-wrapper/main.go b/go-helm-wrapper/main.go index f0b57560..048110c6 100644 --- a/go-helm-wrapper/main.go +++ b/go-helm-wrapper/main.go @@ -19,7 +19,7 @@ func main() { } //export go_install_helm_release -func go_install_helm_release(releaseName string, chartName string, chartVersion string, namespace string, suppressOutput bool) { +func go_install_helm_release(releaseName string, chartName string, chartVersion string, valuesYaml string, namespace string, suppressOutput bool) { helmClient := getHelmClient(namespace, suppressOutput) timeout, _ := time.ParseDuration("10m") @@ -27,6 +27,7 @@ func go_install_helm_release(releaseName string, chartName string, chartVersion ReleaseName: releaseName, ChartName: chartName, Version: chartVersion, + ValuesYaml: valuesYaml, Namespace: namespace, UpgradeCRDs: true, Wait: true, diff --git a/src/helm.rs b/src/helm.rs index fb894126..f0a84599 100644 --- a/src/helm.rs +++ b/src/helm.rs @@ -19,6 +19,7 @@ extern "C" { release_name: GoString, chart_name: GoString, chart_version: GoString, + values_yaml: GoString, namespace: GoString, supress_output: bool, ); @@ -67,6 +68,7 @@ pub fn install_helm_release_from_repo( repo_name: &str, chart_name: &str, chart_version: Option<&str>, + values_yaml: Option<&str>, ) { if helm_release_exists(release_name) { let helm_release = get_helm_release(release_name).unwrap_or_else(|| { @@ -108,7 +110,7 @@ pub fn install_helm_release_from_repo( let full_chart_name = format!("{repo_name}/{chart_name}"); let chart_version = chart_version.unwrap_or(">0.0.0-0"); debug!("Installing helm release {repo_name} from chart {full_chart_name} in version {chart_version}"); - install_helm_release(release_name, &full_chart_name, chart_version); + install_helm_release(release_name, &full_chart_name, chart_version, values_yaml); } /// Cached because of slow network calls @@ -140,12 +142,18 @@ pub fn uninstall_helm_release(release_name: &str) { } } -fn install_helm_release(release_name: &str, chart_name: &str, chart_version: &str) { +fn install_helm_release( + release_name: &str, + chart_name: &str, + chart_version: &str, + values_yaml: Option<&str>, +) { unsafe { go_install_helm_release( GoString::from(release_name), GoString::from(chart_name), GoString::from(chart_version), + GoString::from(values_yaml.unwrap_or("")), GoString::from(NAMESPACE.lock().unwrap().as_str()), *LOG_LEVEL.lock().unwrap() < LevelFilter::Debug, ) @@ -187,7 +195,7 @@ pub fn get_helm_release(release_name: &str) -> Option { .find(|release| release.name == release_name) } -fn add_helm_repo(name: &str, url: &str) { +pub fn add_helm_repo(name: &str, url: &str) { unsafe { go_add_helm_repo(GoString::from(name), GoString::from(url)) } } diff --git a/src/operator.rs b/src/operator.rs index cdb8a322..702c866b 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -288,6 +288,7 @@ impl Operator { helm_repo_name, &helm_release_name, self.version.as_deref(), + None, ); } } diff --git a/src/release.rs b/src/release.rs index 43aadd30..d1d3272a 100644 --- a/src/release.rs +++ b/src/release.rs @@ -186,7 +186,11 @@ fn describe_release(release_name: &str, output_type: &OutputType) { /// If include_operators is an non-empty list only the whitelisted product operators will be installed. /// If exclude_operators is an non-empty list the blacklisted product operators will be skipped. -pub fn install_release(release_name: &str, include_products: &[String], exclude_products: &[String]) { +pub fn install_release( + release_name: &str, + include_products: &[String], + exclude_products: &[String], +) { info!("Installing release {release_name}"); let release = get_release(release_name); diff --git a/src/stack.rs b/src/stack.rs index 453b0400..e5fa0853 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -1,10 +1,10 @@ use crate::arguments::OutputType; -use crate::{helpers, kind, kube, release, CliArgs}; +use crate::{helm, helpers, kind, kube, release, CliArgs}; use cached::proc_macro::cached; use clap::Parser; use indexmap::IndexMap; use lazy_static::lazy_static; -use log::{error, info, warn}; +use log::{debug, error, info, warn}; use serde::{Deserialize, Serialize}; use std::ops::Deref; use std::process::exit; @@ -89,7 +89,28 @@ struct Stack { description: String, stackable_release: String, labels: Vec, - manifests: String, + manifests: Vec, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +enum StackManifest { + #[serde(rename_all = "camelCase")] + HelmChart { + release_name: String, + name: String, + repo: HelmChartRepo, + version: String, + options: serde_yaml::Value, + }, + PlainYaml(String), +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +struct HelmChartRepo { + name: String, + url: String, } fn list_stacks(output_type: &OutputType) { @@ -121,7 +142,6 @@ fn describe_stack(stack_name: &str, output_type: &OutputType) { description: String, stackable_release: String, labels: Vec, - manifests: String, } let stack = get_stack(stack_name); @@ -130,7 +150,6 @@ fn describe_stack(stack_name: &str, output_type: &OutputType) { description: stack.description, stackable_release: stack.stackable_release, labels: stack.labels, - manifests: stack.manifests, }; match output_type { @@ -138,7 +157,6 @@ fn describe_stack(stack_name: &str, output_type: &OutputType) { println!("Stack: {}", output.stack); println!("Description: {}", output.description); println!("Stackable release: {}", output.stackable_release); - println!("Manifests: {}", output.manifests); println!("Labels: {}", output.labels.join(", ")); } OutputType::Json => { @@ -156,14 +174,41 @@ fn install_stack(stack_name: &str) { release::install_release(&stack.stackable_release, &[], &[]); - info!("Installing products of stack {stack_name}"); - match helpers::read_from_url_or_file(&stack.manifests) { - Ok(manifests) => kube::deploy_manifest(&manifests), - Err(err) => { - panic!( - "Could not read stack manifests from file \"{}\": {err}", - &stack.manifests - ); + info!("Installing components of stack {stack_name}"); + for manifest in stack.manifests { + match manifest { + StackManifest::HelmChart { + release_name, + name, + repo, + version, + options, + } => { + debug!("Installing helm chart {name} as {release_name}"); + helm::add_helm_repo(&repo.name, &repo.url); + + let values_yaml = serde_yaml::to_string(&options).unwrap(); + helm::install_helm_release_from_repo( + &release_name, + &release_name, + &repo.name, + &name, + Some(&version), + Some(&values_yaml), + ) + } + StackManifest::PlainYaml(yaml_url_or_file) => { + debug!("Installing yaml manifest from {yaml_url_or_file}"); + match helpers::read_from_url_or_file(&yaml_url_or_file) { + Ok(manifests) => kube::deploy_manifest(&manifests), + Err(err) => { + panic!( + "Could not read stack manifests from file \"{}\": {err}", + &yaml_url_or_file + ); + } + } + } } } diff --git a/stacks.yaml b/stacks.yaml index 7b3b2cec..bee0cea1 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -1,27 +1,35 @@ --- stacks: - trino: - description: Simply stack only containing Trino - stackableRelease: 22.05-sbernauer - labels: - - trino - manifests: stacks/trino.yaml - # TODO additional services trino-superset: - description: NOT READY! A modern data-analysis stack with S3, Trino and Superset + description: BROKEN, ONLY FOR DEMONSTRATION PURPOSE! Stack containing MinIO, Trino and Superset for Data visualization stackableRelease: 22.05-sbernauer labels: + - minio - trino - superset - manifests: stacks/trino-superset.yaml - # TODO additional services - kafka-nifi-druid-superset: - description: NOT READY! Streaming applications and persistent storage with Druid on S3 and Superset for visualization - stackableRelease: 22.05-sbernauer - labels: - - kafka - - nifi - - druid - - superset - manifests: stacks/kafka-nifi-druid-superset.yaml - # TODO additional services + - s3 + manifests: + - helmChart: + releaseName: minio-trino + name: minio/minio + repo: + name: minio + url: https://charts.min.io/ + version: 4.0.2 + options: + rootUser: accessKey + rootPassword: secretKey + mode: standalone + - helmChart: + releaseName: postgresql-superset + name: bitnami/postgresql + repo: + name: bitnami + url: https://charts.bitnami.com/bitnami/ + version: 11.0.0 + options: + auth: + username: superset + password: superset + database: superset + - plainYaml: stacks/trino-superset.yaml diff --git a/stacks/trino.yaml b/stacks/trino-superset.yaml similarity index 58% rename from stacks/trino.yaml rename to stacks/trino-superset.yaml index 43a2ce04..010709b7 100644 --- a/stacks/trino.yaml +++ b/stacks/trino-superset.yaml @@ -1,4 +1,32 @@ --- +apiVersion: v1 +kind: Secret +metadata: + name: superset-credentials +type: Opaque +stringData: + adminUser.username: admin + adminUser.firstname: Superset + adminUser.lastname: Admin + adminUser.email: admin@superset.com + adminUser.password: admin + connections.secretKey: thisISaSECRET_1234 + connections.sqlalchemyDatabaseUri: postgresql://superset:superset@postgresql-superset/superset +--- +apiVersion: superset.stackable.tech/v1alpha1 +kind: SupersetCluster +metadata: + name: superset +spec: + version: 1.4.1 + statsdExporterVersion: v0.22.4 + credentialsSecret: superset-credentials + loadExamplesOnInit: true + nodes: + roleGroups: + default: + replicas: 1 +--- apiVersion: hive.stackable.tech/v1alpha1 kind: HiveCluster metadata: From 9c1c3fa88f857270e82c1316ff60d7f12beb7b0b Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 8 Jun 2022 14:17:58 +0200 Subject: [PATCH 006/177] bump release name to 22.06-sbernauer --- releases.yaml | 4 ++-- src/stack.rs | 3 +-- stacks.yaml | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/releases.yaml b/releases.yaml index 81332c66..9f4ccdbd 100644 --- a/releases.yaml +++ b/releases.yaml @@ -1,7 +1,7 @@ --- releases: - 22.05-sbernauer: - releaseDate: 2022-05-06 + 22.06-sbernauer: + releaseDate: 2022-06-08 description: Non-official release from sbernauer to demonstrate stackablectl. It includes the latest stable versions. products: airflow: diff --git a/src/stack.rs b/src/stack.rs index e5fa0853..aeb0fa8e 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -212,8 +212,7 @@ fn install_stack(stack_name: &str) { } } - info!(""); - info!("Installed stack {stack_name}. Have a nice day!"); + info!("Installed stack {stack_name}"); } /// Cached because of potential slow network calls diff --git a/stacks.yaml b/stacks.yaml index bee0cea1..8e5c3d15 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -2,7 +2,7 @@ stacks: trino-superset: description: BROKEN, ONLY FOR DEMONSTRATION PURPOSE! Stack containing MinIO, Trino and Superset for Data visualization - stackableRelease: 22.05-sbernauer + stackableRelease: 22.06-sbernauer labels: - minio - trino From 560077ad17d463ca3d7eacba6073436b3973834b Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 8 Jun 2022 14:25:56 +0200 Subject: [PATCH 007/177] typo --- stacks.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks.yaml b/stacks.yaml index 8e5c3d15..d0a24b44 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -1,7 +1,7 @@ --- stacks: trino-superset: - description: BROKEN, ONLY FOR DEMONSTRATION PURPOSE! Stack containing MinIO, Trino and Superset for Data visualization + description: BROKEN, ONLY FOR DEMONSTRATION PURPOSE! Stack containing MinIO, Trino and Superset for data visualization stackableRelease: 22.06-sbernauer labels: - minio From 63341446030cd77efdec266a1a3f0c92ebc0e49f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 9 Jun 2022 12:25:17 +0200 Subject: [PATCH 008/177] Fix wrong addition of additional helm repos when installing stack --- src/helm.rs | 2 +- src/stack.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/helm.rs b/src/helm.rs index f0a84599..15c58a43 100644 --- a/src/helm.rs +++ b/src/helm.rs @@ -59,7 +59,7 @@ pub fn handle_common_cli_args(args: &CliArgs) { } /// Installs the specified helm chart with the release_name -/// If the release is already running it errors out (maybe in the future prompt the user if it should be deleted) +/// If the release is already running in a different version it errors out (maybe in the future prompt the user if it should be deleted) /// If the chart_version is None the version `>0.0.0-0` will be used. /// This is equivalent to the `helm install` flag `--devel`. pub fn install_helm_release_from_repo( diff --git a/src/stack.rs b/src/stack.rs index aeb0fa8e..98010b1c 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -1,4 +1,5 @@ use crate::arguments::OutputType; +use crate::helm::HELM_REPOS; use crate::{helm, helpers, kind, kube, release, CliArgs}; use cached::proc_macro::cached; use clap::Parser; @@ -185,7 +186,7 @@ fn install_stack(stack_name: &str) { options, } => { debug!("Installing helm chart {name} as {release_name}"); - helm::add_helm_repo(&repo.name, &repo.url); + HELM_REPOS.lock().unwrap().insert(repo.name.clone(), repo.url); let values_yaml = serde_yaml::to_string(&options).unwrap(); helm::install_helm_release_from_repo( From ff79db1d5fb001576ae8034128ee2b0a28e532b2 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 9 Jun 2022 16:00:57 +0200 Subject: [PATCH 009/177] First working stack: druid-superset-s3 :) --- releases.yaml | 8 +-- stacks.yaml | 29 ++++++--- stacks/druid-superset/druid.yaml | 94 ++++++++++++++++++++++++++++ stacks/druid-superset/superset.yaml | 40 ++++++++++++ stacks/druid-superset/zookeeper.yaml | 11 ++++ stacks/trino-superset.yaml | 72 --------------------- 6 files changed, 168 insertions(+), 86 deletions(-) create mode 100644 stacks/druid-superset/druid.yaml create mode 100644 stacks/druid-superset/superset.yaml create mode 100644 stacks/druid-superset/zookeeper.yaml delete mode 100644 stacks/trino-superset.yaml diff --git a/releases.yaml b/releases.yaml index 9f4ccdbd..edbd01a1 100644 --- a/releases.yaml +++ b/releases.yaml @@ -9,7 +9,7 @@ releases: commons: operatorVersion: 0.1.0 druid: - operatorVersion: 0.5.0 + operatorVersion: 0.6.0-pr245 hbase: operatorVersion: 0.2.0 hdfs: @@ -29,11 +29,11 @@ releases: spark-k8s: operatorVersion: 0.1.0 superset: - operatorVersion: 0.4.0 + operatorVersion: 0.5.0-nightly trino: - operatorVersion: 0.3.2-nightly # Trino < 0.4.0 requires regorule-operator and 0.4.0 will be released soon-ish. + operatorVersion: 0.3.2-pr213 # Trino < 0.4.0 requires regorule-operator and 0.4.0 will be released soon-ish. Picking a fixed version anyway zookeeper: - operatorVersion: 0.9.0 + operatorVersion: 0.10.0-nightly alpha-3: releaseDate: 2022-02-14 description: Second release which added Airflow, Druid and Superset diff --git a/stacks.yaml b/stacks.yaml index d0a24b44..2e325245 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -1,28 +1,35 @@ --- stacks: - trino-superset: - description: BROKEN, ONLY FOR DEMONSTRATION PURPOSE! Stack containing MinIO, Trino and Superset for data visualization + druid-superset-s3: + description: Stack containing MinIO, Druid and Superset for data visualization stackableRelease: 22.06-sbernauer labels: - - minio - - trino + - druid - superset + - minio - s3 manifests: - helmChart: - releaseName: minio-trino - name: minio/minio + releaseName: minio-druid + name: minio repo: name: minio url: https://charts.min.io/ version: 4.0.2 options: - rootUser: accessKey - rootPassword: secretKey + rootUser: root + rootPassword: rootroot mode: standalone + users: + - accessKey: druid + secretKey: druiddruid + policy: readwrite + buckets: + - name: druid + policy: public - helmChart: releaseName: postgresql-superset - name: bitnami/postgresql + name: postgresql repo: name: bitnami url: https://charts.bitnami.com/bitnami/ @@ -32,4 +39,6 @@ stacks: username: superset password: superset database: superset - - plainYaml: stacks/trino-superset.yaml + - plainYaml: stacks/druid-superset/zookeeper.yaml + - plainYaml: stacks/druid-superset/druid.yaml + - plainYaml: stacks/druid-superset/superset.yaml diff --git a/stacks/druid-superset/druid.yaml b/stacks/druid-superset/druid.yaml new file mode 100644 index 00000000..d5da68e7 --- /dev/null +++ b/stacks/druid-superset/druid.yaml @@ -0,0 +1,94 @@ +--- +apiVersion: druid.stackable.tech/v1alpha1 +kind: DruidCluster +metadata: + name: druid +spec: + version: 0.22.1-authorizer0.1.0-stackable0.2.0 + zookeeperConfigMapName: druid-znode + metadataStorageDatabase: + dbType: derby + connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true + host: localhost # TODO why do i need to specify this? + port: 1527 # TODO why do i need to specify this? + deepStorage: + s3: + bucket: + inline: + bucketName: druid + connection: + inline: + host: minio-druid + port: 9000 + accessStyle: Path + credentials: + secretClass: druid-s3-credentials + baseKey: data # TODO Rename to prefix or so + brokers: + roleGroups: + default: + selector: + matchLabels: + kubernetes.io/os: linux + config: {} + replicas: 1 + coordinators: + roleGroups: + default: + selector: + matchLabels: + kubernetes.io/os: linux + config: {} + replicas: 1 + historicals: + roleGroups: + default: + selector: + matchLabels: + kubernetes.io/os: linux + config: {} + replicas: 1 + middleManagers: + roleGroups: + default: + selector: + matchLabels: + kubernetes.io/os: linux + config: {} + replicas: 1 + routers: + roleGroups: + default: + selector: + matchLabels: + kubernetes.io/os: linux + config: {} + replicas: 1 +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperZnode +metadata: + name: druid-znode +spec: + clusterRef: + name: druid-zookeeper +--- +apiVersion: secrets.stackable.tech/v1alpha1 +kind: SecretClass +metadata: + name: druid-s3-credentials +spec: + backend: + k8sSearch: + searchNamespace: + pod: {} +--- +apiVersion: v1 +kind: Secret +metadata: + name: druid-s3-credentials + labels: + secrets.stackable.tech/class: druid-s3-credentials +stringData: + accessKey: druid + secretKey: druiddruid diff --git a/stacks/druid-superset/superset.yaml b/stacks/druid-superset/superset.yaml new file mode 100644 index 00000000..de49e61a --- /dev/null +++ b/stacks/druid-superset/superset.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: superset-credentials +type: Opaque +stringData: + adminUser.username: admin + adminUser.firstname: Superset + adminUser.lastname: Admin + adminUser.email: admin@superset.com + adminUser.password: admin + connections.secretKey: thisISaSECRET_1234 + connections.sqlalchemyDatabaseUri: postgresql://superset:superset@postgresql-superset/superset +--- +apiVersion: superset.stackable.tech/v1alpha1 +kind: SupersetCluster +metadata: + name: superset +spec: + version: 1.4.1-stackable2.1.0 + statsdExporterVersion: v0.22.4 + credentialsSecret: superset-credentials + loadExamplesOnInit: true + nodes: + roleGroups: + default: + replicas: 1 +--- +apiVersion: superset.stackable.tech/v1alpha1 +kind: DruidConnection +metadata: + name: superset-druid-connection +spec: + superset: + name: superset + namespace: default # TODO this brakes the demo in non-default namespace. Why do i need to specify this? Why not search in the Namespace of the DruidConnection? + druid: + name: druid + namespace: default # TODO this brakes the demo in non-default namespace. Why do i need to specify this? Why not search in the Namespace of the DruidConnection? diff --git a/stacks/druid-superset/zookeeper.yaml b/stacks/druid-superset/zookeeper.yaml new file mode 100644 index 00000000..8ccad09f --- /dev/null +++ b/stacks/druid-superset/zookeeper.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperCluster +metadata: + name: druid-zookeeper +spec: + version: 3.8.0-stackable0.7.1 + servers: + roleGroups: + default: + replicas: 1 diff --git a/stacks/trino-superset.yaml b/stacks/trino-superset.yaml deleted file mode 100644 index 010709b7..00000000 --- a/stacks/trino-superset.yaml +++ /dev/null @@ -1,72 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: superset-credentials -type: Opaque -stringData: - adminUser.username: admin - adminUser.firstname: Superset - adminUser.lastname: Admin - adminUser.email: admin@superset.com - adminUser.password: admin - connections.secretKey: thisISaSECRET_1234 - connections.sqlalchemyDatabaseUri: postgresql://superset:superset@postgresql-superset/superset ---- -apiVersion: superset.stackable.tech/v1alpha1 -kind: SupersetCluster -metadata: - name: superset -spec: - version: 1.4.1 - statsdExporterVersion: v0.22.4 - credentialsSecret: superset-credentials - loadExamplesOnInit: true - nodes: - roleGroups: - default: - replicas: 1 ---- -apiVersion: hive.stackable.tech/v1alpha1 -kind: HiveCluster -metadata: - name: simple-hive-derby -spec: - version: 2.3.9 - metastore: - roleGroups: - default: - selector: - matchLabels: - kubernetes.io/os: linux - replicas: 1 - config: - database: - connString: jdbc:derby:;databaseName=/tmp/metastore_db;create=true - user: APP - password: mine - dbType: derby ---- -apiVersion: trino.stackable.tech/v1alpha1 -kind: TrinoCluster -metadata: - name: simple-trino -spec: - version: 377-stackable0 - hiveConfigMapName: simple-hive-derby - coordinators: - roleGroups: - default: - selector: - matchLabels: - kubernetes.io/os: linux - replicas: 1 - config: {} - workers: - roleGroups: - default: - selector: - matchLabels: - kubernetes.io/os: linux - replicas: 1 - config: {} From b1202fefbf6f2055afa3d11be8aa2f8a5803c8e1 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 10 Jun 2022 14:23:51 +0200 Subject: [PATCH 010/177] First version that simply lists services --- Cargo.toml | 3 ++ src/arguments.rs | 9 +++- src/helm.rs | 7 ++- src/helpers.rs | 6 +-- src/kube.rs | 117 +++++++++++++++++++++++------------------------ src/main.rs | 15 ++++-- src/operator.rs | 47 +++++++++---------- src/release.rs | 37 ++++++++------- src/services.rs | 67 +++++++++++++++++++++++++++ src/stack.rs | 33 +++++++------ 10 files changed, 216 insertions(+), 125 deletions(-) create mode 100644 src/services.rs diff --git a/Cargo.toml b/Cargo.toml index 3b9da341..6b29f524 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,8 @@ cached = "0.34" clap = { version = "3.1", features = ["derive", "cargo"] } env_logger = "0.9" indexmap = { version = "1.8", features = ["serde"] } +k8s-openapi = { version = "0.15.0", default-features = false, features = ["v1_24"] } +kube = "0.73.1" lazy_static = "1.4" log = "0.4" which = "4.2" @@ -19,6 +21,7 @@ serde = { version = "1.0", features = ["derive"]} serde_json = "1.0" serde_yaml = "0.8" reqwest = { version = "0.11", features = ["blocking"] } +tokio = "1.19.2" [profile.release] # strip = true # By default on Linux and macOS, symbol information is included in the compiled .elf file. diff --git a/src/arguments.rs b/src/arguments.rs index 8b37794b..5d4da6e1 100644 --- a/src/arguments.rs +++ b/src/arguments.rs @@ -1,4 +1,7 @@ -use crate::{operator::CliCommandOperator, release::CliCommandRelease, stack::CliCommandStack}; +use crate::{ + operator::CliCommandOperator, release::CliCommandRelease, services::CliCommandServices, + stack::CliCommandStack, +}; use clap::{ArgEnum, Parser}; use log::LevelFilter; @@ -68,6 +71,10 @@ pub enum CliCommand { /// This subcommand interacts with stacks, which are ready-to-use combinations of products. #[clap(subcommand, alias("s"), alias("st"))] Stack(CliCommandStack), + + /// This subcommand interacts with deployed services of products. + #[clap(subcommand, alias("se"))] + Services(CliCommandServices), } #[derive(Clone, Parser, ArgEnum)] diff --git a/src/helm.rs b/src/helm.rs index e203ca72..25d49e48 100644 --- a/src/helm.rs +++ b/src/helm.rs @@ -113,14 +113,17 @@ pub fn install_helm_release_from_repo( } /// Cached because of slow network calls +/// Not returning an Result because i couldn't get it to work with #[cached] #[cached] -pub fn get_repo_index(repo_url: String) -> HelmRepo { +pub async fn get_repo_index(repo_url: String) -> HelmRepo { let index_url = format!("{repo_url}/index.yaml"); debug!("Fetching helm repo index from {index_url}"); - let resp = reqwest::blocking::get(&index_url) + let resp = reqwest::get(&index_url) + .await .unwrap_or_else(|_| panic!("Failed to download helm repo index from {index_url}")) .text() + .await .unwrap_or_else(|_| panic!("Failed to get text from {index_url}")); serde_yaml::from_str(&resp) diff --git a/src/helpers.rs b/src/helpers.rs index 9871ce5d..d8f9656d 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -29,13 +29,13 @@ pub fn c_str_ptr_to_str(ptr: *const c_char) -> &'static str { c_str.to_str().unwrap() } -pub fn read_from_url_or_file(url_or_file: &str) -> Result { +pub async fn read_from_url_or_file(url_or_file: &str) -> Result { if let Ok(str) = fs::read_to_string(url_or_file) { return Ok(str); } - match reqwest::blocking::get(url_or_file) { - Ok(response) => Ok(response.text().unwrap()), + match reqwest::get(url_or_file).await { + Ok(response) => Ok(response.text().await.unwrap()), Err(err) => Err(format!( "Couldn't read a file or a URL with the name \"{url_or_file}\": {err}" )), diff --git a/src/kube.rs b/src/kube.rs index 403f4cd2..080210e1 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -1,3 +1,8 @@ +use indexmap::IndexMap; +use kube::{api::ListParams, Api, Client, ResourceExt}; +use serde::{Deserialize, Serialize}; +use std::{error::Error, vec}; + use crate::{helpers, NAMESPACE}; /// This function currently uses `kubectl apply`. @@ -10,63 +15,55 @@ pub fn deploy_manifest(yaml: &str) { ); } -// use crate::kube::Error::TypelessManifest; -// use kube::api::{DynamicObject, GroupVersionKind, TypeMeta}; -// use kube::{Client, Discovery}; -// use snafu::{OptionExt, ResultExt, Snafu}; -// -// pub const TEST: &str = r#" -// apiVersion: monitoring.coreos.com/v1 -// kind: ServiceMonitor -// foo: -// metadata: -// name: scrape-label -// labels: -// release: prometheus-operator -// spec: -// endpoints: -// - port: metrics -// jobLabel: app.kubernetes.io/instance -// selector: -// matchLabels: -// prometheus.io/scrape: "true" -// namespaceSelector: -// any: true -// "#; -// -// #[derive(Snafu, Debug)] -// pub enum Error { -// #[snafu(display("failed to create kubernetes client"))] -// CreateClient { source: kube::Error }, -// #[snafu(display("failed to parse manifest {manifest}"))] -// ParseManifest { -// source: serde_yaml::Error, -// manifest: String, -// }, -// #[snafu(display("manifest {manifest} has no type"))] -// TypelessManifest { manifest: String }, -// } -// -// // see https://gitlab.com/teozkr/thruster/-/blob/35b6291788fa209c52dd47fe6c96e1b483071793/src/apply.rs#L121-145 -// pub async fn deploy_manifest(yaml: &str) -> Result<(), Error> { -// let manifest = serde_yaml::from_str::(yaml).context(ParseManifestSnafu { -// manifest: yaml.to_string(), -// })?; -// let manifest_type = manifest.types.as_ref().context(TypelessManifestSnafu {manifest: yaml})?; -// let gvk = gvk_of_typemeta(manifest_type); -// -// let client = create_client().await?; -// -// Ok(()) -// } -// -// async fn create_client() -> Result { -// Client::try_default().await.context(CreateClientSnafu) -// } -// -// fn gvk_of_typemeta(tpe: &TypeMeta) -> GroupVersionKind { -// match tpe.api_version.split_once('/') { -// Some((group, version)) => GroupVersionKind::gvk(&group, &version, &tpe.kind), -// None => GroupVersionKind::gvk("", &tpe.api_version, &tpe.kind), -// } -// } +pub async fn get_services(namespaced: bool) -> Result, Box> { + let client = get_client().await?; + + let services: Api = match namespaced { + true => Api::default_namespaced(client), + false => Api::all(client), + }; + + Ok(services + .list(&ListParams::default()) + .await? + .iter() + .map(|service| { + let ports = service + .spec + .as_ref() + .unwrap() + .ports + .as_ref() + .unwrap() + .iter() + .map(|port| ServicePort { + name: port.name.clone(), + }) + .collect::>(); + ( + service.name(), + Service { + namespace: service.namespace().unwrap(), + ports, + }, + ) + }) + .collect()) +} + +async fn get_client() -> Result> { + Ok(Client::try_default().await?) +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Service { + pub namespace: String, + pub ports: Vec, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ServicePort { + pub name: Option, +} diff --git a/src/main.rs b/src/main.rs index 52b5ca36..9bd53b51 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,7 +3,7 @@ use arguments::CliArgs; use clap::Parser; use lazy_static::lazy_static; use log::info; -use std::sync::Mutex; +use std::{error::Error, sync::Mutex}; mod arguments; mod helm; @@ -12,6 +12,7 @@ mod kind; mod kube; mod operator; mod release; +mod services; mod stack; const AVAILABLE_OPERATORS: &[&str] = &[ @@ -39,7 +40,8 @@ lazy_static! { pub static ref NAMESPACE: Mutex = Mutex::new(String::new()); } -fn main() { +#[tokio::main] +async fn main() -> Result<(), Box> { let args = CliArgs::parse(); env_logger::builder() .format_timestamp(None) @@ -59,8 +61,11 @@ fn main() { stack::handle_common_cli_args(&args); match &args.cmd { - CliCommand::Operator(command) => command.handle(), - CliCommand::Release(command) => command.handle(), - CliCommand::Stack(command) => command.handle(), + CliCommand::Operator(command) => command.handle().await, + CliCommand::Release(command) => command.handle().await, + CliCommand::Stack(command) => command.handle().await, + CliCommand::Services(command) => command.handle().await?, } + + Ok(()) } diff --git a/src/operator.rs b/src/operator.rs index de55bc6e..16b76bfe 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -16,6 +16,8 @@ pub enum CliCommandOperator { /// Show details of a specific operator #[clap(alias("desc"))] Describe { + /// Name of the operator to describe + #[clap(required = true)] operator: String, #[clap(short, long, arg_enum, default_value = "text")] @@ -59,11 +61,11 @@ pub enum CliCommandOperator { } impl CliCommandOperator { - pub fn handle(&self) { + pub async fn handle(&self) { match self { - CliCommandOperator::List { output } => list_operators(output), + CliCommandOperator::List { output } => list_operators(output).await, CliCommandOperator::Describe { operator, output } => { - describe_operator(operator, output) + describe_operator(operator, output).await } CliCommandOperator::Install { operators, @@ -81,7 +83,7 @@ impl CliCommandOperator { } } -fn list_operators(output_type: &OutputType) { +async fn list_operators(output_type: &OutputType) { type Output = IndexMap; #[derive(Serialize)] @@ -92,19 +94,17 @@ fn list_operators(output_type: &OutputType) { dev_versions: Vec, } - let output: Output = AVAILABLE_OPERATORS - .iter() - .map(|operator| { - ( - operator.to_string(), - OutputOperatorEntry { - stable_versions: get_versions_from_repo(operator, "stackable-stable"), - test_versions: get_versions_from_repo(operator, "stackable-test"), - dev_versions: get_versions_from_repo(operator, "stackable-dev"), - }, - ) - }) - .collect(); + let mut output: Output = IndexMap::new(); + for operator in AVAILABLE_OPERATORS { + output.insert( + operator.to_string(), + OutputOperatorEntry { + stable_versions: get_versions_from_repo(operator, "stackable-stable").await, + test_versions: get_versions_from_repo(operator, "stackable-test").await, + dev_versions: get_versions_from_repo(operator, "stackable-dev").await, + }, + ); + } match output_type { OutputType::Text => { @@ -126,7 +126,7 @@ fn list_operators(output_type: &OutputType) { } } -fn describe_operator(operator: &str, output_type: &OutputType) { +async fn describe_operator(operator: &str, output_type: &OutputType) { #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct Output { @@ -137,9 +137,9 @@ fn describe_operator(operator: &str, output_type: &OutputType) { } let output = Output { operator: operator.to_string(), - stable_versions: get_versions_from_repo(operator, "stackable-stable"), - test_versions: get_versions_from_repo(operator, "stackable-test"), - dev_versions: get_versions_from_repo(operator, "stackable-dev"), + stable_versions: get_versions_from_repo(operator, "stackable-stable").await, + test_versions: get_versions_from_repo(operator, "stackable-test").await, + dev_versions: get_versions_from_repo(operator, "stackable-dev").await, }; match output_type { @@ -158,7 +158,7 @@ fn describe_operator(operator: &str, output_type: &OutputType) { } } -fn get_versions_from_repo(operator: &str, helm_repo_name: &str) -> Vec { +async fn get_versions_from_repo(operator: &str, helm_repo_name: &str) -> Vec { let chart_name = format!("{operator}-operator"); let repo = helm::get_repo_index( @@ -168,7 +168,8 @@ fn get_versions_from_repo(operator: &str, helm_repo_name: &str) -> Vec { .get(helm_repo_name) .unwrap_or_else(|| panic!("Could not find a helm repo with the name {helm_repo_name}")) .to_string(), - ); + ) + .await; match repo.entries.get(&chart_name) { None => { warn!("Could not find {operator} operator (chart name {chart_name}) in helm repo {helm_repo_name}"); diff --git a/src/release.rs b/src/release.rs index ec046764..0e17f05b 100644 --- a/src/release.rs +++ b/src/release.rs @@ -24,6 +24,8 @@ pub enum CliCommandRelease { /// Show details of a specific release #[clap(alias("desc"))] Describe { + /// Name of the release to describe + #[clap(required = true)] release: String, #[clap(short, long, arg_enum, default_value = "text")] @@ -75,10 +77,12 @@ pub enum CliCommandRelease { } impl CliCommandRelease { - pub fn handle(&self) { + pub async fn handle(&self) { match self { - CliCommandRelease::List { output } => list_releases(output), - CliCommandRelease::Describe { release, output } => describe_release(release, output), + CliCommandRelease::List { output } => list_releases(output).await, + CliCommandRelease::Describe { release, output } => { + describe_release(release, output).await + } CliCommandRelease::Install { release, include_products, @@ -87,9 +91,9 @@ impl CliCommandRelease { kind_cluster_name, } => { kind::handle_cli_arguments(*kind_cluster, kind_cluster_name); - install_release(release, include_products, exclude_products); + install_release(release, include_products, exclude_products).await; } - CliCommandRelease::Uninstall { release } => uninstall_release(release), + CliCommandRelease::Uninstall { release } => uninstall_release(release).await, } } } @@ -119,8 +123,8 @@ struct ReleaseProduct { operator_version: String, } -fn list_releases(output_type: &OutputType) { - let output = get_releases(); +async fn list_releases(output_type: &OutputType) { + let output = get_releases().await; match output_type { OutputType::Text => { println!("RELEASE RELEASE DATE DESCRIPTION"); @@ -140,7 +144,7 @@ fn list_releases(output_type: &OutputType) { } } -fn describe_release(release_name: &str, output_type: &OutputType) { +async fn describe_release(release_name: &str, output_type: &OutputType) { #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct Output { @@ -150,7 +154,7 @@ fn describe_release(release_name: &str, output_type: &OutputType) { products: IndexMap, } - let release = get_release(release_name); + let release = get_release(release_name).await; let output = Output { release: release_name.to_string(), release_date: release.release_date, @@ -181,13 +185,13 @@ fn describe_release(release_name: &str, output_type: &OutputType) { /// If include_operators is an non-empty list only the whitelisted product operators will be installed. /// If exclude_operators is an non-empty list the blacklisted product operators will be skipped. -pub fn install_release( +pub async fn install_release( release_name: &str, include_products: &[String], exclude_products: &[String], ) { info!("Installing release {release_name}"); - let release = get_release(release_name); + let release = get_release(release_name).await; for (product_name, product) in release.products.into_iter() { let included = include_products.is_empty() || include_products.contains(&product_name); @@ -201,19 +205,19 @@ pub fn install_release( } } -fn uninstall_release(release_name: &str) { +async fn uninstall_release(release_name: &str) { info!("Uninstalling release {release_name}"); - let release = get_release(release_name); + let release = get_release(release_name).await; operator::uninstall_operators(&release.products.into_keys().collect()); } /// Cached because of potential slow network calls #[cached] -fn get_releases() -> Releases { +async fn get_releases() -> Releases { let mut all_releases: IndexMap = IndexMap::new(); for release_file in RELEASE_FILES.lock().unwrap().deref() { - let yaml = helpers::read_from_url_or_file(release_file); + let yaml = helpers::read_from_url_or_file(release_file).await; match yaml { Ok(yaml) => match serde_yaml::from_str::(&yaml) { Ok(releases) => all_releases.extend(releases.releases), @@ -230,8 +234,9 @@ fn get_releases() -> Releases { } } -fn get_release(release_name: &str) -> Release { +async fn get_release(release_name: &str) -> Release { get_releases() + .await .releases .remove(release_name) // We need to remove to take ownership .unwrap_or_else(|| { diff --git a/src/services.rs b/src/services.rs new file mode 100644 index 00000000..3e22cd99 --- /dev/null +++ b/src/services.rs @@ -0,0 +1,67 @@ +use std::error::Error; + +use clap::Parser; + +use crate::{arguments::OutputType, kube}; + +#[derive(Parser)] +pub enum CliCommandServices { + /// List deployed services + #[clap(alias("ls"))] + List { + /// If specified services of all namespaces will be shown, not only the namespace you're currently in + #[clap(short, long)] + all_namespaces: bool, + + #[clap(short, long, arg_enum, default_value = "text")] + output: OutputType, + }, +} + +impl CliCommandServices { + pub async fn handle(&self) -> Result<(), Box> { + match self { + CliCommandServices::List { + all_namespaces, + output, + } => list_services(*all_namespaces, output).await?, + } + Ok(()) + } +} + +async fn list_services( + all_namespaces: bool, + output_type: &OutputType, +) -> Result<(), Box> { + let output = kube::get_services(!all_namespaces).await?; + + match output_type { + OutputType::Text => { + println!( + "SERVICE NAMESPACE PORTS" + ); + for (service_name, service_entry) in output.iter() { + println!( + "{:40} {:30} {}", + service_name, + service_entry.namespace, + service_entry + .ports + .iter() + .filter_map(|port| port.name.clone()) + .collect::>() + .join(", ") + ); + } + } + OutputType::Json => { + println!("{}", serde_json::to_string_pretty(&output).unwrap()); + } + OutputType::Yaml => { + println!("{}", serde_yaml::to_string(&output).unwrap()); + } + } + + Ok(()) +} diff --git a/src/stack.rs b/src/stack.rs index e379eeee..27af38e3 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -24,6 +24,8 @@ pub enum CliCommandStack { /// Show details of a specific stack #[clap(alias("desc"))] Describe { + /// Name of the stack to describe + #[clap(required = true)] stack: String, #[clap(short, long, arg_enum, default_value = "text")] @@ -53,17 +55,17 @@ pub enum CliCommandStack { } impl CliCommandStack { - pub fn handle(&self) { + pub async fn handle(&self) { match self { - CliCommandStack::List { output } => list_stacks(output), - CliCommandStack::Describe { stack, output } => describe_stack(stack, output), + CliCommandStack::List { output } => list_stacks(output).await, + CliCommandStack::Describe { stack, output } => describe_stack(stack, output).await, CliCommandStack::Install { stack, kind_cluster, kind_cluster_name, } => { kind::handle_cli_arguments(*kind_cluster, kind_cluster_name); - install_stack(stack); + install_stack(stack).await; } } } @@ -110,8 +112,8 @@ struct HelmChartRepo { url: String, } -fn list_stacks(output_type: &OutputType) { - let output = get_stacks(); +async fn list_stacks(output_type: &OutputType) { + let output = get_stacks().await; match output_type { OutputType::Text => { println!("STACK STACKABLE RELEASE DESCRIPTION"); @@ -131,7 +133,7 @@ fn list_stacks(output_type: &OutputType) { } } -fn describe_stack(stack_name: &str, output_type: &OutputType) { +async fn describe_stack(stack_name: &str, output_type: &OutputType) { #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct Output { @@ -141,7 +143,7 @@ fn describe_stack(stack_name: &str, output_type: &OutputType) { labels: Vec, } - let stack = get_stack(stack_name); + let stack = get_stack(stack_name).await; let output = Output { stack: stack_name.to_string(), description: stack.description, @@ -165,11 +167,11 @@ fn describe_stack(stack_name: &str, output_type: &OutputType) { } } -fn install_stack(stack_name: &str) { +async fn install_stack(stack_name: &str) { info!("Installing stack {stack_name}"); - let stack = get_stack(stack_name); + let stack = get_stack(stack_name).await; - release::install_release(&stack.stackable_release, &[], &[]); + release::install_release(&stack.stackable_release, &[], &[]).await; info!("Installing components of stack {stack_name}"); for manifest in stack.manifests { @@ -199,7 +201,7 @@ fn install_stack(stack_name: &str) { } StackManifest::PlainYaml(yaml_url_or_file) => { debug!("Installing yaml manifest from {yaml_url_or_file}"); - match helpers::read_from_url_or_file(&yaml_url_or_file) { + match helpers::read_from_url_or_file(&yaml_url_or_file).await { Ok(manifests) => kube::deploy_manifest(&manifests), Err(err) => { panic!( @@ -217,10 +219,10 @@ fn install_stack(stack_name: &str) { /// Cached because of potential slow network calls #[cached] -fn get_stacks() -> Stacks { +async fn get_stacks() -> Stacks { let mut all_stacks: IndexMap = IndexMap::new(); for stack_file in STACK_FILES.lock().unwrap().deref() { - let yaml = helpers::read_from_url_or_file(stack_file); + let yaml = helpers::read_from_url_or_file(stack_file).await; match yaml { Ok(yaml) => match serde_yaml::from_str::(&yaml) { Ok(stacks) => all_stacks.extend(stacks.stacks), @@ -235,8 +237,9 @@ fn get_stacks() -> Stacks { Stacks { stacks: all_stacks } } -fn get_stack(stack_name: &str) -> Stack { +async fn get_stack(stack_name: &str) -> Stack { get_stacks() + .await .stacks .remove(stack_name) // We need to remove to take ownership .unwrap_or_else(|| { From c3d632ace667e49a8dbeab7f6ca7592c1d94c163 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 10 Jun 2022 14:54:57 +0200 Subject: [PATCH 011/177] fix clippy warnings --- src/operator.rs | 18 +++++++++--------- src/release.rs | 5 +++-- src/stack.rs | 5 +++-- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/src/operator.rs b/src/operator.rs index 16b76bfe..65ef8964 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -161,15 +161,15 @@ async fn describe_operator(operator: &str, output_type: &OutputType) { async fn get_versions_from_repo(operator: &str, helm_repo_name: &str) -> Vec { let chart_name = format!("{operator}-operator"); - let repo = helm::get_repo_index( - HELM_REPOS - .lock() - .unwrap() - .get(helm_repo_name) - .unwrap_or_else(|| panic!("Could not find a helm repo with the name {helm_repo_name}")) - .to_string(), - ) - .await; + let helm_repo_url = HELM_REPOS + .lock() + .unwrap() + .get(helm_repo_name) + .unwrap_or_else(|| panic!("Could not find a helm repo with the name {helm_repo_name}")) + .to_string(); + + let repo = helm::get_repo_index(helm_repo_url).await; + match repo.entries.get(&chart_name) { None => { warn!("Could not find {operator} operator (chart name {chart_name}) in helm repo {helm_repo_name}"); diff --git a/src/release.rs b/src/release.rs index 0e17f05b..bbcff626 100644 --- a/src/release.rs +++ b/src/release.rs @@ -216,8 +216,9 @@ async fn uninstall_release(release_name: &str) { #[cached] async fn get_releases() -> Releases { let mut all_releases: IndexMap = IndexMap::new(); - for release_file in RELEASE_FILES.lock().unwrap().deref() { - let yaml = helpers::read_from_url_or_file(release_file).await; + let release_files = RELEASE_FILES.lock().unwrap().deref().clone(); + for release_file in release_files { + let yaml = helpers::read_from_url_or_file(&release_file).await; match yaml { Ok(yaml) => match serde_yaml::from_str::(&yaml) { Ok(releases) => all_releases.extend(releases.releases), diff --git a/src/stack.rs b/src/stack.rs index 27af38e3..43d7e846 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -221,8 +221,9 @@ async fn install_stack(stack_name: &str) { #[cached] async fn get_stacks() -> Stacks { let mut all_stacks: IndexMap = IndexMap::new(); - for stack_file in STACK_FILES.lock().unwrap().deref() { - let yaml = helpers::read_from_url_or_file(stack_file).await; + let stack_files = STACK_FILES.lock().unwrap().deref().clone(); + for stack_file in stack_files { + let yaml = helpers::read_from_url_or_file(&stack_file).await; match yaml { Ok(yaml) => match serde_yaml::from_str::(&yaml) { Ok(stacks) => all_stacks.extend(stacks.stacks), From 2b8110d48cf18566e25795caf9950541db0d0d51 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 13 Jun 2022 13:16:32 +0200 Subject: [PATCH 012/177] First version that lists installed Product CRDs --- src/helm.rs | 2 +- src/kube.rs | 113 ++++++++++++++++++++++++++++++++++-------------- src/main.rs | 5 --- src/services.rs | 41 +++++++++++------- 4 files changed, 107 insertions(+), 54 deletions(-) diff --git a/src/helm.rs b/src/helm.rs index 25d49e48..9b8d8659 100644 --- a/src/helm.rs +++ b/src/helm.rs @@ -20,7 +20,7 @@ extern "C" { chart_version: GoString, values_yaml: GoString, namespace: GoString, - supress_output: bool, + suppress_output: bool, ); fn go_uninstall_helm_release( release_name: GoString, diff --git a/src/kube.rs b/src/kube.rs index 080210e1..e8747eae 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -1,9 +1,44 @@ use indexmap::IndexMap; -use kube::{api::ListParams, Api, Client, ResourceExt}; +use kube::{ + api::{DynamicObject, GroupVersionKind, ListParams}, + core::ErrorResponse, + Api, Client, ResourceExt, +}; +use lazy_static::{__Deref, lazy_static}; +use log::debug; use serde::{Deserialize, Serialize}; use std::{error::Error, vec}; -use crate::{helpers, NAMESPACE}; +use crate::{helpers, services::InstalledProduct, NAMESPACE}; + +lazy_static! { + pub static ref PRODUCT_CRDS: IndexMap<&'static str, GroupVersionKind> = IndexMap::from([ + ( + "hive", + GroupVersionKind { + group: "hive.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "HiveCluster".to_string(), + } + ), + ( + "opa", + GroupVersionKind { + group: "opa.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "OpaCluster".to_string(), + } + ), + ( + "doesnotexist", + GroupVersionKind { + group: "doesnotexist.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "DoesnotexistCluster".to_string(), + } + ), + ]); +} /// This function currently uses `kubectl apply`. /// In the future we want to switch to kube-rs or something else to not require the user to install kubectl. @@ -15,40 +50,52 @@ pub fn deploy_manifest(yaml: &str) { ); } -pub async fn get_services(namespaced: bool) -> Result, Box> { +pub async fn get_services( + namespaced: bool, +) -> Result>, Box> { + let mut result = IndexMap::new(); + let client = get_client().await?; - let services: Api = match namespaced { - true => Api::default_namespaced(client), - false => Api::all(client), - }; + for (product_name, product_gvk) in PRODUCT_CRDS.iter() { + let api_resource = kube::core::discovery::ApiResource::from_gvk(product_gvk); + let api: Api = match namespaced { + true => Api::namespaced_with( + client.clone(), + NAMESPACE.lock().unwrap().deref(), + &api_resource, + ), + false => Api::all_with(client.clone(), &api_resource), + }; + let objects = api.list(&ListParams::default()).await; + match objects { + Ok(objects) => { + let installed_products = objects + .iter() + .map(|o| { + let endpoints = IndexMap::from([ + ("web-ui".to_string(), "http://todo.com".to_string()), + ("rpc".to_string(), "http://todo.com".to_string()), + ]); + InstalledProduct { + name: o.name(), + namespace: o.namespace(), + endpoints, + } + }) + .collect::>(); + result.insert(product_name.to_string(), installed_products); + } + Err(kube::Error::Api(ErrorResponse { code: 404, .. })) => { + debug!("ProductCRD for product {product_name} not installed"); + } + Err(err) => { + return Err(Box::new(err)); + } + } + } - Ok(services - .list(&ListParams::default()) - .await? - .iter() - .map(|service| { - let ports = service - .spec - .as_ref() - .unwrap() - .ports - .as_ref() - .unwrap() - .iter() - .map(|port| ServicePort { - name: port.name.clone(), - }) - .collect::>(); - ( - service.name(), - Service { - namespace: service.namespace().unwrap(), - ports, - }, - ) - }) - .collect()) + Ok(result) } async fn get_client() -> Result> { diff --git a/src/main.rs b/src/main.rs index 9bd53b51..ee43351f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,6 @@ use crate::arguments::CliCommand; use arguments::CliArgs; use clap::Parser; use lazy_static::lazy_static; -use log::info; use std::{error::Error, sync::Mutex}; mod arguments; @@ -50,10 +49,6 @@ async fn main() -> Result<(), Box> { .init(); let namespace = &args.namespace; - if namespace != "default" { - info!("Deploying into non-default namespace.\ - Please make sure not to deploy the same operator multiple times in different namespaces unless you know what you are doing (TM)."); - } *(NAMESPACE.lock().unwrap()) = namespace.to_string(); helm::handle_common_cli_args(&args); diff --git a/src/services.rs b/src/services.rs index 3e22cd99..ca96e3e5 100644 --- a/src/services.rs +++ b/src/services.rs @@ -1,6 +1,8 @@ use std::error::Error; use clap::Parser; +use indexmap::IndexMap; +use serde::Serialize; use crate::{arguments::OutputType, kube}; @@ -30,6 +32,14 @@ impl CliCommandServices { } } +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub struct InstalledProduct { + pub name: String, + pub namespace: Option, // Some CRDs are cluster scoped + pub endpoints: IndexMap, // key: service name (e.g. web-ui), value: url +} + async fn list_services( all_namespaces: bool, output_type: &OutputType, @@ -38,21 +48,22 @@ async fn list_services( match output_type { OutputType::Text => { - println!( - "SERVICE NAMESPACE PORTS" - ); - for (service_name, service_entry) in output.iter() { - println!( - "{:40} {:30} {}", - service_name, - service_entry.namespace, - service_entry - .ports - .iter() - .filter_map(|port| port.name.clone()) - .collect::>() - .join(", ") - ); + println!("PRODUCT NAMESPACE NAME ENDPOINTS"); + for (product_name, installed_products) in output.iter() { + for installed_product in installed_products { + println!( + "{:20} {:30} {:40} {}", + product_name, + installed_product + .namespace + .as_ref() + .unwrap_or(&"~".to_string()), + installed_product.name, + installed_product.endpoints.iter().map(|(name, url)| { + format!("{:10} {url}", format!("{name}:")) + }).collect::>().join("\n ") + ); + } } } OutputType::Json => { From 370dbaca005e2df4be2b9ba6953e661bce53555f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 13 Jun 2022 16:27:00 +0200 Subject: [PATCH 013/177] Implement looking up service poers and endpoints and node ips --- src/kube.rs | 195 ++++++++++++++++++++++++++++++++---------------- src/services.rs | 119 ++++++++++++++++++++++++++++- 2 files changed, 247 insertions(+), 67 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index e8747eae..d785472f 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -1,44 +1,19 @@ +use crate::{ + helpers, + services::{get_service_names, InstalledProduct, PRODUCT_CRDS}, + NAMESPACE, +}; +use cached::proc_macro::cached; +use core::panic; use indexmap::IndexMap; +use k8s_openapi::api::core::v1::{Endpoints, Node}; use kube::{ - api::{DynamicObject, GroupVersionKind, ListParams}, + api::{DynamicObject, ListParams}, core::ErrorResponse, Api, Client, ResourceExt, }; -use lazy_static::{__Deref, lazy_static}; -use log::debug; -use serde::{Deserialize, Serialize}; -use std::{error::Error, vec}; - -use crate::{helpers, services::InstalledProduct, NAMESPACE}; - -lazy_static! { - pub static ref PRODUCT_CRDS: IndexMap<&'static str, GroupVersionKind> = IndexMap::from([ - ( - "hive", - GroupVersionKind { - group: "hive.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "HiveCluster".to_string(), - } - ), - ( - "opa", - GroupVersionKind { - group: "opa.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "OpaCluster".to_string(), - } - ), - ( - "doesnotexist", - GroupVersionKind { - group: "doesnotexist.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "DoesnotexistCluster".to_string(), - } - ), - ]); -} +use log::{debug, warn}; +use std::{collections::HashMap, error::Error, vec}; /// This function currently uses `kubectl apply`. /// In the future we want to switch to kube-rs or something else to not require the user to install kubectl. @@ -54,36 +29,44 @@ pub async fn get_services( namespaced: bool, ) -> Result>, Box> { let mut result = IndexMap::new(); + let namespace = NAMESPACE.lock().unwrap().clone(); let client = get_client().await?; for (product_name, product_gvk) in PRODUCT_CRDS.iter() { let api_resource = kube::core::discovery::ApiResource::from_gvk(product_gvk); let api: Api = match namespaced { - true => Api::namespaced_with( - client.clone(), - NAMESPACE.lock().unwrap().deref(), - &api_resource, - ), + true => Api::namespaced_with(client.clone(), &namespace, &api_resource), false => Api::all_with(client.clone(), &api_resource), }; let objects = api.list(&ListParams::default()).await; match objects { Ok(objects) => { - let installed_products = objects - .iter() - .map(|o| { - let endpoints = IndexMap::from([ - ("web-ui".to_string(), "http://todo.com".to_string()), - ("rpc".to_string(), "http://todo.com".to_string()), - ]); - InstalledProduct { - name: o.name(), - namespace: o.namespace(), - endpoints, + let mut installed_products = Vec::new(); + for object in objects { + let object_name = object.name(); + let object_namespace = object.namespace(); + + let service_names = get_service_names(&object_name, product_name); + let mut endpoints = IndexMap::new(); + for service_name in service_names { + let service_endpoint_urls = + get_service_endpoint_urls(&service_name, object_namespace.as_ref().expect("Failed to get Namespace of object {object_name} besides it having an service"), client.clone()) + .await; + match service_endpoint_urls { + Ok(service_endpoint_urls) => endpoints.extend(service_endpoint_urls), + Err(err) => warn!( + "Failed to get endpoint_urls of service {service_name}: {err}" + ), } - }) - .collect::>(); + } + let product = InstalledProduct { + name: object_name, + namespace: object_namespace, + endpoints, + }; + installed_products.push(product); + } result.insert(product_name.to_string(), installed_products); } Err(kube::Error::Api(ErrorResponse { code: 404, .. })) => { @@ -98,19 +81,101 @@ pub async fn get_services( Ok(result) } -async fn get_client() -> Result> { - Ok(Client::try_default().await?) +pub async fn get_service_endpoint_urls( + service_name: &str, + namespace: &str, + client: Client, +) -> Result, Box> { + let service_api: Api = + Api::namespaced(client.clone(), namespace); + let service = service_api.get(service_name).await?; + + let endpoints_api: Api = Api::namespaced(client.clone(), namespace); + let endpoints = endpoints_api.get(service_name).await?; + + let node_name = match &endpoints.subsets { + Some(subsets) if subsets.len() == 1 => match &subsets[0].addresses { + Some(addresses) if addresses.len() == 1 => match &addresses[0].node_name { + Some(node_name) => node_name, + None => { + warn!("Could not determine the node the endpoint is running on because the address of the subset didn't had a node name"); + return Ok(IndexMap::new()); + } + }, + Some(_) => { + warn!("Could not determine the node the endpoint is running on because subset had multiple addresses"); + return Ok(IndexMap::new()); + } + None => { + warn!("Could not determine the node the endpoint is running on because subset had no addresses"); + return Ok(IndexMap::new()); + } + }, + Some(_) => { + warn!("Could not determine the node the endpoint is running on because endpoints consists of multiple subsets"); + return Ok(IndexMap::new()); + } + None => { + warn!("Could not determine the node the endpoint is running on because the endpoint has no subset"); + return Ok(IndexMap::new()); + } + }; + + let node_ip = get_node_ip(node_name).await; + + let mut result = IndexMap::new(); + for service_port in service.spec.unwrap().ports.unwrap_or_default() { + let port_name = service_port.name.unwrap_or_else(|| "".to_string()); + let port_number = service_port.port; + let node_port_number = service_port.node_port.unwrap_or(port_number); // TODO: Is this correct behavior? + + result.insert(port_name, format!("http://{node_ip}:{node_port_number} ")); + } + + Ok(result) +} + +async fn get_node_ip(node_name: &str) -> String { + let node_name_ip_mapping = get_node_name_ip_mapping().await; + match node_name_ip_mapping.get(node_name) { + Some(node_ip) => node_ip.to_string(), + None => panic!("Failed to find node {node_name} in node_name_ip_mapping"), + } } -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct Service { - pub namespace: String, - pub ports: Vec, +/// Not returning an Result, Error> because i couldn't get it to work with #[cached] +#[cached] +async fn get_node_name_ip_mapping() -> HashMap { + let client = get_client() + .await + .expect("Failed to create kubernetes client"); + let node_api: Api = Api::all(client); + let nodes = node_api + .list(&ListParams::default()) + .await + .expect("Failed to list kubernetes nodes"); + + let mut result = HashMap::new(); + for node in nodes { + let node_name = node.name(); + let preferred_node_ip = node + .status + .unwrap() + .addresses + .unwrap_or_else(|| panic!("Failed to get address of node {node_name}")) + .iter() + .filter(|address| address.type_ == "InternalIP" || address.type_ == "ExternalIP") + .max_by_key(|address| &address.type_) + .map(|address| address.address.clone()) // InternalIP is lower than ExternalIP + .unwrap_or_else(|| { + panic!("Could not find a InternalIP or ExternalIP for node {node_name}") + }); + result.insert(node_name, preferred_node_ip); + } + + result } -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct ServicePort { - pub name: Option, +async fn get_client() -> Result> { + Ok(Client::try_default().await?) } diff --git a/src/services.rs b/src/services.rs index ca96e3e5..9b49978e 100644 --- a/src/services.rs +++ b/src/services.rs @@ -1,11 +1,117 @@ +use core::panic; use std::error::Error; +use ::kube::api::GroupVersionKind; use clap::Parser; use indexmap::IndexMap; +use lazy_static::lazy_static; use serde::Serialize; use crate::{arguments::OutputType, kube}; +// Additional services we need to think of in the future +// * MinIO +lazy_static! { + pub static ref PRODUCT_CRDS: IndexMap<&'static str, GroupVersionKind> = IndexMap::from([ + ( + "airflow", + GroupVersionKind { + group: "airflow.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "AirflowCluster".to_string(), + } + ), + ( + "druid", + GroupVersionKind { + group: "druid.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "DruidCluster".to_string(), + } + ), + ( + "hbase", + GroupVersionKind { + group: "hbase.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "HbaseCluster".to_string(), + } + ), + ( + "hdfs", + GroupVersionKind { + group: "hdfs.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "HdfsCluster".to_string(), + } + ), + ( + "hive", + GroupVersionKind { + group: "hive.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "HiveCluster".to_string(), + } + ), + ( + "kafka", + GroupVersionKind { + group: "kafka.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "KafkaCluster".to_string(), + } + ), + ( + "nifi", + GroupVersionKind { + group: "nifi.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "NifiCluster".to_string(), + } + ), + ( + "opa", + GroupVersionKind { + group: "opa.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "OpenPolicyAgent".to_string(), + } + ), + ( + "spark", + GroupVersionKind { + group: "spark.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "SparkCluster".to_string(), + } + ), + ( + "superset", + GroupVersionKind { + group: "superset.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "SupersetCluster".to_string(), + } + ), + ( + "trino", + GroupVersionKind { + group: "trino.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "TrinoCluster".to_string(), + } + ), + ( + "zookeeper", + GroupVersionKind { + group: "zookeeper.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "ZookeeperCluster".to_string(), + } + ), + ]); +} + #[derive(Parser)] pub enum CliCommandServices { /// List deployed services @@ -48,11 +154,11 @@ async fn list_services( match output_type { OutputType::Text => { - println!("PRODUCT NAMESPACE NAME ENDPOINTS"); + println!("PRODUCT NAMESPACE NAME ENDPOINTS"); for (product_name, installed_products) in output.iter() { for installed_product in installed_products { println!( - "{:20} {:30} {:40} {}", + "{:15} {:30} {:40} {}", product_name, installed_product .namespace @@ -76,3 +182,12 @@ async fn list_services( Ok(()) } + +pub fn get_service_names(product_name: &str, product: &str) -> Vec { + match product { + "druid" => vec![format!("{product_name}-middlemanager")], + "superset" => vec![format!("{product_name}-external")], + "zookeeper" => vec![product_name.to_string()], + _ => panic!("product {product} not known"), + } +} From 5538c322f726da2fbcfcd3dfa2a10091739a66f1 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 14 Jun 2022 08:20:04 +0200 Subject: [PATCH 014/177] Update druid and zookeeper service names --- src/kube.rs | 10 +++++----- src/services.rs | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index d785472f..8d609794 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -98,25 +98,25 @@ pub async fn get_service_endpoint_urls( Some(addresses) if addresses.len() == 1 => match &addresses[0].node_name { Some(node_name) => node_name, None => { - warn!("Could not determine the node the endpoint is running on because the address of the subset didn't had a node name"); + warn!("Could not determine the node the endpoint {service_name} is running on because the address of the subset didn't had a node name"); return Ok(IndexMap::new()); } }, Some(_) => { - warn!("Could not determine the node the endpoint is running on because subset had multiple addresses"); + warn!("Could not determine the node the endpoint {service_name} is running on because subset had multiple addresses"); return Ok(IndexMap::new()); } None => { - warn!("Could not determine the node the endpoint is running on because subset had no addresses"); + warn!("Could not determine the node the endpoint {service_name} is running on because subset had no addresses"); return Ok(IndexMap::new()); } }, Some(_) => { - warn!("Could not determine the node the endpoint is running on because endpoints consists of multiple subsets"); + warn!("Could not determine the node the endpoint {service_name} is running on because endpoints consists of multiple subsets"); return Ok(IndexMap::new()); } None => { - warn!("Could not determine the node the endpoint is running on because the endpoint has no subset"); + warn!("Could not determine the node the endpoint {service_name} is running on because the endpoint has no subset"); return Ok(IndexMap::new()); } }; diff --git a/src/services.rs b/src/services.rs index 9b49978e..1aa656db 100644 --- a/src/services.rs +++ b/src/services.rs @@ -185,9 +185,9 @@ async fn list_services( pub fn get_service_names(product_name: &str, product: &str) -> Vec { match product { - "druid" => vec![format!("{product_name}-middlemanager")], + "druid" => vec![format!("{product_name}-router")], "superset" => vec![format!("{product_name}-external")], - "zookeeper" => vec![product_name.to_string()], + "zookeeper" => vec![], _ => panic!("product {product} not known"), } } From 998dd16600dcfb04cccc8c9dc898c8340718c5dd Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 14 Jun 2022 11:12:56 +0200 Subject: [PATCH 015/177] Add printing of extra infos --- src/kube.rs | 7 +++- src/services.rs | 97 +++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 91 insertions(+), 13 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index 8d609794..cb74fa10 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -1,6 +1,6 @@ use crate::{ helpers, - services::{get_service_names, InstalledProduct, PRODUCT_CRDS}, + services::{get_extra_infos, get_service_names, InstalledProduct, PRODUCT_CRDS}, NAMESPACE, }; use cached::proc_macro::cached; @@ -48,6 +48,8 @@ pub async fn get_services( let object_namespace = object.namespace(); let service_names = get_service_names(&object_name, product_name); + let extra_infos = get_extra_infos(product_name, &object).await?; + let mut endpoints = IndexMap::new(); for service_name in service_names { let service_endpoint_urls = @@ -64,6 +66,7 @@ pub async fn get_services( name: object_name, namespace: object_namespace, endpoints, + extra_infos, }; installed_products.push(product); } @@ -176,6 +179,6 @@ async fn get_node_name_ip_mapping() -> HashMap { result } -async fn get_client() -> Result> { +pub async fn get_client() -> Result> { Ok(Client::try_default().await?) } diff --git a/src/services.rs b/src/services.rs index 1aa656db..d8f76390 100644 --- a/src/services.rs +++ b/src/services.rs @@ -1,13 +1,20 @@ -use core::panic; use std::error::Error; -use ::kube::api::GroupVersionKind; +use ::kube::{ + api::{DynamicObject, GroupVersionKind}, + Api, ResourceExt, +}; use clap::Parser; use indexmap::IndexMap; +use k8s_openapi::api::core::v1::Secret; use lazy_static::lazy_static; +use log::warn; use serde::Serialize; -use crate::{arguments::OutputType, kube}; +use crate::{ + arguments::OutputType, + kube::{self, get_client}, +}; // Additional services we need to think of in the future // * MinIO @@ -144,6 +151,7 @@ pub struct InstalledProduct { pub name: String, pub namespace: Option, // Some CRDs are cluster scoped pub endpoints: IndexMap, // key: service name (e.g. web-ui), value: url + pub extra_infos: Vec, } async fn list_services( @@ -154,21 +162,49 @@ async fn list_services( match output_type { OutputType::Text => { - println!("PRODUCT NAMESPACE NAME ENDPOINTS"); + println!("PRODUCT NAME NAMESPACE ENDPOINTS EXTRA INFOS"); for (product_name, installed_products) in output.iter() { for installed_product in installed_products { println!( - "{:15} {:30} {:40} {}", + "{:12} {:40} {:30} {:40} {}", product_name, + installed_product.name, installed_product .namespace .as_ref() - .unwrap_or(&"~".to_string()), - installed_product.name, - installed_product.endpoints.iter().map(|(name, url)| { - format!("{:10} {url}", format!("{name}:")) - }).collect::>().join("\n ") + .map(|s| s.to_string()) + .unwrap_or_default(), + installed_product + .endpoints + .first() + .map(|(name, url)| { format!("{:10} {url}", format!("{name}:")) }) + .unwrap_or_default(), + installed_product + .extra_infos + .first() + .map(|s| s.to_string()) + .unwrap_or_default(), ); + + let mut endpoints = installed_product.endpoints.iter().skip(1); + let mut extra_infos = installed_product.extra_infos.iter().skip(1); + + loop { + let endpoint = endpoints.next(); + let extra_info = extra_infos.next(); + + println!( + " {:40} {}", + endpoint + .map(|(name, url)| { format!("{:10} {url}", format!("{name}:")) }) + .unwrap_or_default(), + extra_info.map(|s| s.to_string()).unwrap_or_default(), + ); + + if endpoint.is_none() && extra_info.is_none() { + break; + } + } } } } @@ -186,8 +222,47 @@ async fn list_services( pub fn get_service_names(product_name: &str, product: &str) -> Vec { match product { "druid" => vec![format!("{product_name}-router")], + "hive" => vec![], "superset" => vec![format!("{product_name}-external")], + "trino" => vec![format!("{product_name}-coordinator")], "zookeeper" => vec![], - _ => panic!("product {product} not known"), + _ => { + warn!("Cannot calculated exposed services names as product {product} is not known"); + vec![] + } } } + +pub async fn get_extra_infos( + product: &str, + product_crd: &DynamicObject, +) -> Result, Box> { + let mut result = match product_crd.data["spec"]["version"].as_str() { + Some(version) => Vec::from([format!("Version {version}")]), + None => Vec::new(), + }; + + match product { + "superset" => { + if let Some(secret_name) = product_crd.data["spec"]["credentialsSecret"].as_str() { + let client = get_client().await?; + let secret_api: Api = + Api::namespaced(client, &product_crd.namespace().unwrap()); + let secret = secret_api.get(secret_name).await?; + let secret_data = secret.data.unwrap(); + + if let (Some(username), Some(password)) = ( + secret_data.get("adminUser.username"), + secret_data.get("adminUser.password"), + ) { + let username = String::from_utf8(username.0.clone()).unwrap(); + let password = String::from_utf8(password.0.clone()).unwrap(); + result.push(format!("user: {username}, password: {password}")); + } + } + } + _ => (), + } + + Ok(result) +} From aba4fb80274cf10fb2ccb11511fe4887c4a09912 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Jun 2022 11:47:32 +0200 Subject: [PATCH 016/177] Add MinIO to the list of shown services --- releases.yaml | 2 -- src/kube.rs | 31 ++++++++++++++----- src/services.rs | 79 ++++++++++++++++++++++++++++++++++++++++++------- stacks.yaml | 6 ++++ 4 files changed, 98 insertions(+), 20 deletions(-) diff --git a/releases.yaml b/releases.yaml index 4c391b8c..9090a979 100644 --- a/releases.yaml +++ b/releases.yaml @@ -30,8 +30,6 @@ releases: operatorVersion: 0.8.0 secret: operatorVersion: 0.4.0 - spark: - operatorVersion: 0.5.0 spark-k8s: operatorVersion: 0.1.0 superset: diff --git a/src/kube.rs b/src/kube.rs index cb74fa10..75e50f91 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -25,7 +25,7 @@ pub fn deploy_manifest(yaml: &str) { ); } -pub async fn get_services( +pub async fn get_stackable_services( namespaced: bool, ) -> Result>, Box> { let mut result = IndexMap::new(); @@ -53,7 +53,10 @@ pub async fn get_services( let mut endpoints = IndexMap::new(); for service_name in service_names { let service_endpoint_urls = - get_service_endpoint_urls(&service_name, object_namespace.as_ref().expect("Failed to get Namespace of object {object_name} besides it having an service"), client.clone()) + get_service_endpoint_urls(&service_name, &object_name, object_namespace + .as_ref() + .expect("Failed to get the namespace of object {object_name} besides it having an service") + , client.clone()) .await; match service_endpoint_urls { Ok(service_endpoint_urls) => endpoints.extend(service_endpoint_urls), @@ -86,6 +89,7 @@ pub async fn get_services( pub async fn get_service_endpoint_urls( service_name: &str, + object_name: &str, namespace: &str, client: Client, ) -> Result, Box> { @@ -128,11 +132,24 @@ pub async fn get_service_endpoint_urls( let mut result = IndexMap::new(); for service_port in service.spec.unwrap().ports.unwrap_or_default() { - let port_name = service_port.name.unwrap_or_else(|| "".to_string()); - let port_number = service_port.port; - let node_port_number = service_port.node_port.unwrap_or(port_number); // TODO: Is this correct behavior? - - result.insert(port_name, format!("http://{node_ip}:{node_port_number} ")); + match service_port.node_port { + Some(node_port) => { + let endpoint_name = service_name + .trim_start_matches(object_name) + .trim_start_matches('-'); + + let port_name = service_port.name.unwrap_or_else(|| node_port.to_string()); + result.insert( + if endpoint_name.is_empty() { + port_name + } else { + format!("{endpoint_name}-{port_name}") + }, + format!("http://{node_ip}:{node_port} "), + ); + } + None => warn!("Could not get endpoint_url as service {service_name} has no nodePort"), + } } Ok(result) diff --git a/src/services.rs b/src/services.rs index d8f76390..63ab4080 100644 --- a/src/services.rs +++ b/src/services.rs @@ -1,19 +1,20 @@ use std::error::Error; use ::kube::{ - api::{DynamicObject, GroupVersionKind}, + api::{DynamicObject, GroupVersionKind, ListParams}, Api, ResourceExt, }; use clap::Parser; use indexmap::IndexMap; -use k8s_openapi::api::core::v1::Secret; +use k8s_openapi::api::{apps::v1::Deployment, core::v1::Secret}; use lazy_static::lazy_static; use log::warn; use serde::Serialize; use crate::{ arguments::OutputType, - kube::{self, get_client}, + kube::{self, get_client, get_service_endpoint_urls}, + NAMESPACE, }; // Additional services we need to think of in the future @@ -158,15 +159,19 @@ async fn list_services( all_namespaces: bool, output_type: &OutputType, ) -> Result<(), Box> { - let output = kube::get_services(!all_namespaces).await?; + let mut output = kube::get_stackable_services(!all_namespaces).await?; + output.insert( + "minio".to_string(), + get_minio_services(!all_namespaces).await?, + ); match output_type { OutputType::Text => { - println!("PRODUCT NAME NAMESPACE ENDPOINTS EXTRA INFOS"); + println!("PRODUCT NAME NAMESPACE ENDPOINTS EXTRA INFOS"); for (product_name, installed_products) in output.iter() { for installed_product in installed_products { println!( - "{:12} {:40} {:30} {:40} {}", + "{:12} {:40} {:30} {:50} {}", product_name, installed_product.name, installed_product @@ -177,7 +182,7 @@ async fn list_services( installed_product .endpoints .first() - .map(|(name, url)| { format!("{:10} {url}", format!("{name}:")) }) + .map(|(name, url)| { format!("{:20} {url}", format!("{name}:")) }) .unwrap_or_default(), installed_product .extra_infos @@ -194,9 +199,9 @@ async fn list_services( let extra_info = extra_infos.next(); println!( - " {:40} {}", + " {:50} {}", endpoint - .map(|(name, url)| { format!("{:10} {url}", format!("{name}:")) }) + .map(|(name, url)| { format!("{:20} {url}", format!("{name}:")) }) .unwrap_or_default(), extra_info.map(|s| s.to_string()).unwrap_or_default(), ); @@ -221,11 +226,14 @@ async fn list_services( pub fn get_service_names(product_name: &str, product: &str) -> Vec { match product { - "druid" => vec![format!("{product_name}-router")], + "druid" => vec![ + format!("{product_name}-router"), + format!("{product_name}-coordinator"), + ], "hive" => vec![], "superset" => vec![format!("{product_name}-external")], "trino" => vec![format!("{product_name}-coordinator")], - "zookeeper" => vec![], + "zookeeper" => vec![product_name.to_string()], _ => { warn!("Cannot calculated exposed services names as product {product} is not known"); vec![] @@ -266,3 +274,52 @@ pub async fn get_extra_infos( Ok(result) } + +async fn get_minio_services(namespaced: bool) -> Result, Box> { + let client = get_client().await?; + let deployment_api: Api = match namespaced { + true => Api::namespaced(client.clone(), NAMESPACE.lock().unwrap().as_str()), + false => Api::all(client.clone()), + }; + let list_params = ListParams::default().labels("app=minio"); + let minio_deployments = deployment_api.list(&list_params).await?; + + let mut result = Vec::new(); + for minio_deployment in minio_deployments { + let deployment_name = minio_deployment.name(); + let deployment_namespace = minio_deployment.namespace().unwrap(); + + let service_names = vec![ + deployment_name.clone(), + format!("{deployment_name}-console"), + ]; + let extra_infos = vec![ + "This service is not part of the official Stackable Platform".to_string(), + "It is provided as a helper utility".to_string(), + ]; + + let mut endpoints = IndexMap::new(); + for service_name in service_names { + let service_endpoint_urls = get_service_endpoint_urls( + &service_name, + &deployment_name, + &deployment_namespace, + client.clone(), + ) + .await; + match service_endpoint_urls { + Ok(service_endpoint_urls) => endpoints.extend(service_endpoint_urls), + Err(err) => warn!("Failed to get endpoint_urls of service {service_name}: {err}"), + } + } + let product = InstalledProduct { + name: deployment_name, + namespace: Some(deployment_namespace), + endpoints, + extra_infos, + }; + result.push(product); + } + + Ok(result) +} diff --git a/stacks.yaml b/stacks.yaml index 2e325245..a505d395 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -27,6 +27,12 @@ stacks: buckets: - name: druid policy: public + service: + type: NodePort + nodePort: null + consoleService: + type: NodePort + nodePort: null - helmChart: releaseName: postgresql-superset name: postgresql From 2651519f3a69127e56b0da109f8377d7ec75a210 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Jun 2022 11:58:07 +0200 Subject: [PATCH 017/177] Prefer ExternalIP over InternalIP --- src/kube.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index 75e50f91..bbd044de 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -185,8 +185,8 @@ async fn get_node_name_ip_mapping() -> HashMap { .unwrap_or_else(|| panic!("Failed to get address of node {node_name}")) .iter() .filter(|address| address.type_ == "InternalIP" || address.type_ == "ExternalIP") - .max_by_key(|address| &address.type_) - .map(|address| address.address.clone()) // InternalIP is lower than ExternalIP + .min_by_key(|address| &address.type_) // ExternalIP (which we want) is lower than InternalIP + .map(|address| address.address.clone()) .unwrap_or_else(|| { panic!("Could not find a InternalIP or ExternalIP for node {node_name}") }); From d26f9f20b37e778a7ed795dd69241fee7156302a Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Jun 2022 12:00:14 +0200 Subject: [PATCH 018/177] Remove old TODO marker --- src/kube.rs | 4 ++-- src/services.rs | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index bbd044de..bf5a7a36 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -1,6 +1,6 @@ use crate::{ helpers, - services::{get_extra_infos, get_service_names, InstalledProduct, PRODUCT_CRDS}, + services::{get_extra_infos, get_service_names, InstalledProduct, STACKABLE_PRODUCT_CRDS}, NAMESPACE, }; use cached::proc_macro::cached; @@ -33,7 +33,7 @@ pub async fn get_stackable_services( let client = get_client().await?; - for (product_name, product_gvk) in PRODUCT_CRDS.iter() { + for (product_name, product_gvk) in STACKABLE_PRODUCT_CRDS.iter() { let api_resource = kube::core::discovery::ApiResource::from_gvk(product_gvk); let api: Api = match namespaced { true => Api::namespaced_with(client.clone(), &namespace, &api_resource), diff --git a/src/services.rs b/src/services.rs index 63ab4080..3d8c3306 100644 --- a/src/services.rs +++ b/src/services.rs @@ -17,10 +17,8 @@ use crate::{ NAMESPACE, }; -// Additional services we need to think of in the future -// * MinIO lazy_static! { - pub static ref PRODUCT_CRDS: IndexMap<&'static str, GroupVersionKind> = IndexMap::from([ + pub static ref STACKABLE_PRODUCT_CRDS: IndexMap<&'static str, GroupVersionKind> = IndexMap::from([ ( "airflow", GroupVersionKind { From 88da20dea56cd217c3116b3876b7b0645b68dbdb Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Jun 2022 12:38:52 +0200 Subject: [PATCH 019/177] Only put http:// in front if port is called http --- src/kube.rs | 20 ++++++++++++-------- src/services.rs | 8 ++++---- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index bf5a7a36..4540ca50 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -139,14 +139,18 @@ pub async fn get_service_endpoint_urls( .trim_start_matches('-'); let port_name = service_port.name.unwrap_or_else(|| node_port.to_string()); - result.insert( - if endpoint_name.is_empty() { - port_name - } else { - format!("{endpoint_name}-{port_name}") - }, - format!("http://{node_ip}:{node_port} "), - ); + let endpoint_name = if endpoint_name.is_empty() { + port_name.clone() + } else { + format!("{endpoint_name}-{port_name}") + }; + let endpoint = match port_name.as_str() { + "http" => format!("http://{node_ip}:{node_port}"), + "https" => format!("https://{node_ip}:{node_port}"), + _ => format!("{node_ip}:{node_port}"), + }; + + result.insert(endpoint_name, endpoint); } None => warn!("Could not get endpoint_url as service {service_name} has no nodePort"), } diff --git a/src/services.rs b/src/services.rs index 3d8c3306..f0161ad3 100644 --- a/src/services.rs +++ b/src/services.rs @@ -196,6 +196,10 @@ async fn list_services( let endpoint = endpoints.next(); let extra_info = extra_infos.next(); + if endpoint.is_none() && extra_info.is_none() { + break; + } + println!( " {:50} {}", endpoint @@ -203,10 +207,6 @@ async fn list_services( .unwrap_or_default(), extra_info.map(|s| s.to_string()).unwrap_or_default(), ); - - if endpoint.is_none() && extra_info.is_none() { - break; - } } } } From 9e2372c2576f9fb9c9d4829d627eafcff2889406 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Jun 2022 12:51:51 +0200 Subject: [PATCH 020/177] Add flag to hide credentials in output --- src/kube.rs | 78 +------------ src/services.rs | 302 ++++++++++++++++++++++++++++++------------------ 2 files changed, 196 insertions(+), 184 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index 4540ca50..99476439 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -1,18 +1,10 @@ -use crate::{ - helpers, - services::{get_extra_infos, get_service_names, InstalledProduct, STACKABLE_PRODUCT_CRDS}, - NAMESPACE, -}; +use crate::{helpers, NAMESPACE}; use cached::proc_macro::cached; use core::panic; use indexmap::IndexMap; use k8s_openapi::api::core::v1::{Endpoints, Node}; -use kube::{ - api::{DynamicObject, ListParams}, - core::ErrorResponse, - Api, Client, ResourceExt, -}; -use log::{debug, warn}; +use kube::{api::ListParams, Api, Client, ResourceExt}; +use log::warn; use std::{collections::HashMap, error::Error, vec}; /// This function currently uses `kubectl apply`. @@ -25,68 +17,6 @@ pub fn deploy_manifest(yaml: &str) { ); } -pub async fn get_stackable_services( - namespaced: bool, -) -> Result>, Box> { - let mut result = IndexMap::new(); - let namespace = NAMESPACE.lock().unwrap().clone(); - - let client = get_client().await?; - - for (product_name, product_gvk) in STACKABLE_PRODUCT_CRDS.iter() { - let api_resource = kube::core::discovery::ApiResource::from_gvk(product_gvk); - let api: Api = match namespaced { - true => Api::namespaced_with(client.clone(), &namespace, &api_resource), - false => Api::all_with(client.clone(), &api_resource), - }; - let objects = api.list(&ListParams::default()).await; - match objects { - Ok(objects) => { - let mut installed_products = Vec::new(); - for object in objects { - let object_name = object.name(); - let object_namespace = object.namespace(); - - let service_names = get_service_names(&object_name, product_name); - let extra_infos = get_extra_infos(product_name, &object).await?; - - let mut endpoints = IndexMap::new(); - for service_name in service_names { - let service_endpoint_urls = - get_service_endpoint_urls(&service_name, &object_name, object_namespace - .as_ref() - .expect("Failed to get the namespace of object {object_name} besides it having an service") - , client.clone()) - .await; - match service_endpoint_urls { - Ok(service_endpoint_urls) => endpoints.extend(service_endpoint_urls), - Err(err) => warn!( - "Failed to get endpoint_urls of service {service_name}: {err}" - ), - } - } - let product = InstalledProduct { - name: object_name, - namespace: object_namespace, - endpoints, - extra_infos, - }; - installed_products.push(product); - } - result.insert(product_name.to_string(), installed_products); - } - Err(kube::Error::Api(ErrorResponse { code: 404, .. })) => { - debug!("ProductCRD for product {product_name} not installed"); - } - Err(err) => { - return Err(Box::new(err)); - } - } - } - - Ok(result) -} - pub async fn get_service_endpoint_urls( service_name: &str, object_name: &str, @@ -144,7 +74,7 @@ pub async fn get_service_endpoint_urls( } else { format!("{endpoint_name}-{port_name}") }; - let endpoint = match port_name.as_str() { + let endpoint = match port_name.as_str() { "http" => format!("http://{node_ip}:{node_port}"), "https" => format!("https://{node_ip}:{node_port}"), _ => format!("{node_ip}:{node_port}"), diff --git a/src/services.rs b/src/services.rs index f0161ad3..6134fb94 100644 --- a/src/services.rs +++ b/src/services.rs @@ -1,121 +1,125 @@ use std::error::Error; -use ::kube::{ - api::{DynamicObject, GroupVersionKind, ListParams}, - Api, ResourceExt, -}; use clap::Parser; use indexmap::IndexMap; use k8s_openapi::api::{apps::v1::Deployment, core::v1::Secret}; +use kube::{ + api::{DynamicObject, GroupVersionKind, ListParams}, + core::ErrorResponse, + Api, ResourceExt, +}; use lazy_static::lazy_static; -use log::warn; +use log::{debug, warn}; use serde::Serialize; use crate::{ arguments::OutputType, - kube::{self, get_client, get_service_endpoint_urls}, + kube::{get_client, get_service_endpoint_urls}, NAMESPACE, }; +pub static REDACTED_PASSWORD: &str = ""; + lazy_static! { - pub static ref STACKABLE_PRODUCT_CRDS: IndexMap<&'static str, GroupVersionKind> = IndexMap::from([ - ( - "airflow", - GroupVersionKind { - group: "airflow.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "AirflowCluster".to_string(), - } - ), - ( - "druid", - GroupVersionKind { - group: "druid.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "DruidCluster".to_string(), - } - ), - ( - "hbase", - GroupVersionKind { - group: "hbase.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "HbaseCluster".to_string(), - } - ), - ( - "hdfs", - GroupVersionKind { - group: "hdfs.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "HdfsCluster".to_string(), - } - ), - ( - "hive", - GroupVersionKind { - group: "hive.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "HiveCluster".to_string(), - } - ), - ( - "kafka", - GroupVersionKind { - group: "kafka.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "KafkaCluster".to_string(), - } - ), - ( - "nifi", - GroupVersionKind { - group: "nifi.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "NifiCluster".to_string(), - } - ), - ( - "opa", - GroupVersionKind { - group: "opa.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "OpenPolicyAgent".to_string(), - } - ), - ( - "spark", - GroupVersionKind { - group: "spark.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "SparkCluster".to_string(), - } - ), - ( - "superset", - GroupVersionKind { - group: "superset.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "SupersetCluster".to_string(), - } - ), - ( - "trino", - GroupVersionKind { - group: "trino.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "TrinoCluster".to_string(), - } - ), - ( - "zookeeper", - GroupVersionKind { - group: "zookeeper.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "ZookeeperCluster".to_string(), - } - ), - ]); + pub static ref STACKABLE_PRODUCT_CRDS: IndexMap<&'static str, GroupVersionKind> = + IndexMap::from([ + ( + "airflow", + GroupVersionKind { + group: "airflow.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "AirflowCluster".to_string(), + } + ), + ( + "druid", + GroupVersionKind { + group: "druid.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "DruidCluster".to_string(), + } + ), + ( + "hbase", + GroupVersionKind { + group: "hbase.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "HbaseCluster".to_string(), + } + ), + ( + "hdfs", + GroupVersionKind { + group: "hdfs.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "HdfsCluster".to_string(), + } + ), + ( + "hive", + GroupVersionKind { + group: "hive.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "HiveCluster".to_string(), + } + ), + ( + "kafka", + GroupVersionKind { + group: "kafka.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "KafkaCluster".to_string(), + } + ), + ( + "nifi", + GroupVersionKind { + group: "nifi.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "NifiCluster".to_string(), + } + ), + ( + "opa", + GroupVersionKind { + group: "opa.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "OpenPolicyAgent".to_string(), + } + ), + ( + "spark", + GroupVersionKind { + group: "spark.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "SparkCluster".to_string(), + } + ), + ( + "superset", + GroupVersionKind { + group: "superset.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "SupersetCluster".to_string(), + } + ), + ( + "trino", + GroupVersionKind { + group: "trino.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "TrinoCluster".to_string(), + } + ), + ( + "zookeeper", + GroupVersionKind { + group: "zookeeper.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "ZookeeperCluster".to_string(), + } + ), + ]); } #[derive(Parser)] @@ -127,6 +131,10 @@ pub enum CliCommandServices { #[clap(short, long)] all_namespaces: bool, + /// Don't show credentials in the output + #[clap(long)] + hide_credentials: bool, + #[clap(short, long, arg_enum, default_value = "text")] output: OutputType, }, @@ -138,7 +146,8 @@ impl CliCommandServices { CliCommandServices::List { all_namespaces, output, - } => list_services(*all_namespaces, output).await?, + hide_credentials, + } => list_services(*all_namespaces, *hide_credentials, output).await?, } Ok(()) } @@ -155,12 +164,13 @@ pub struct InstalledProduct { async fn list_services( all_namespaces: bool, + hide_credentials: bool, output_type: &OutputType, ) -> Result<(), Box> { - let mut output = kube::get_stackable_services(!all_namespaces).await?; + let mut output = get_stackable_services(!all_namespaces, hide_credentials).await?; output.insert( "minio".to_string(), - get_minio_services(!all_namespaces).await?, + get_minio_services(!all_namespaces, hide_credentials).await?, ); match output_type { @@ -222,6 +232,70 @@ async fn list_services( Ok(()) } +pub async fn get_stackable_services( + namespaced: bool, + hide_credentials: bool, +) -> Result>, Box> { + let mut result = IndexMap::new(); + let namespace = NAMESPACE.lock().unwrap().clone(); + + let client = get_client().await?; + + for (product_name, product_gvk) in STACKABLE_PRODUCT_CRDS.iter() { + let api_resource = kube::core::discovery::ApiResource::from_gvk(product_gvk); + let api: Api = match namespaced { + true => Api::namespaced_with(client.clone(), &namespace, &api_resource), + false => Api::all_with(client.clone(), &api_resource), + }; + let objects = api.list(&ListParams::default()).await; + match objects { + Ok(objects) => { + let mut installed_products = Vec::new(); + for object in objects { + let object_name = object.name(); + let object_namespace = object.namespace(); + + let service_names = get_service_names(&object_name, product_name); + let extra_infos = + get_extra_infos(product_name, &object, hide_credentials).await?; + + let mut endpoints = IndexMap::new(); + for service_name in service_names { + let service_endpoint_urls = + get_service_endpoint_urls(&service_name, &object_name, object_namespace + .as_ref() + .expect("Failed to get the namespace of object {object_name} besides it having an service") + , client.clone()) + .await; + match service_endpoint_urls { + Ok(service_endpoint_urls) => endpoints.extend(service_endpoint_urls), + Err(err) => warn!( + "Failed to get endpoint_urls of service {service_name}: {err}" + ), + } + } + let product = InstalledProduct { + name: object_name, + namespace: object_namespace, + endpoints, + extra_infos, + }; + installed_products.push(product); + } + result.insert(product_name.to_string(), installed_products); + } + Err(kube::Error::Api(ErrorResponse { code: 404, .. })) => { + debug!("ProductCRD for product {product_name} not installed"); + } + Err(err) => { + return Err(Box::new(err)); + } + } + } + + Ok(result) +} + pub fn get_service_names(product_name: &str, product: &str) -> Vec { match product { "druid" => vec![ @@ -242,6 +316,7 @@ pub fn get_service_names(product_name: &str, product: &str) -> Vec { pub async fn get_extra_infos( product: &str, product_crd: &DynamicObject, + hide_credentials: bool, ) -> Result, Box> { let mut result = match product_crd.data["spec"]["version"].as_str() { Some(version) => Vec::from([format!("Version {version}")]), @@ -262,8 +337,12 @@ pub async fn get_extra_infos( secret_data.get("adminUser.password"), ) { let username = String::from_utf8(username.0.clone()).unwrap(); - let password = String::from_utf8(password.0.clone()).unwrap(); - result.push(format!("user: {username}, password: {password}")); + let password = if hide_credentials { + REDACTED_PASSWORD.to_string() + } else { + String::from_utf8(password.0.clone()).unwrap() + }; + result.push(format!("admin username: {username}, password: {password}")); } } } @@ -273,7 +352,10 @@ pub async fn get_extra_infos( Ok(result) } -async fn get_minio_services(namespaced: bool) -> Result, Box> { +async fn get_minio_services( + namespaced: bool, + hide_credentials: bool, +) -> Result, Box> { let client = get_client().await?; let deployment_api: Api = match namespaced { true => Api::namespaced(client.clone(), NAMESPACE.lock().unwrap().as_str()), From add769d6720cc1f4e53dd1332c8aa0d477ebd3fd Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Jun 2022 13:46:15 +0200 Subject: [PATCH 021/177] Add showing of S3 admin user for MinIO --- src/services.rs | 102 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 84 insertions(+), 18 deletions(-) diff --git a/src/services.rs b/src/services.rs index 6134fb94..3870ec9f 100644 --- a/src/services.rs +++ b/src/services.rs @@ -329,20 +329,24 @@ pub async fn get_extra_infos( let client = get_client().await?; let secret_api: Api = Api::namespaced(client, &product_crd.namespace().unwrap()); - let secret = secret_api.get(secret_name).await?; - let secret_data = secret.data.unwrap(); - - if let (Some(username), Some(password)) = ( - secret_data.get("adminUser.username"), - secret_data.get("adminUser.password"), - ) { - let username = String::from_utf8(username.0.clone()).unwrap(); - let password = if hide_credentials { - REDACTED_PASSWORD.to_string() - } else { - String::from_utf8(password.0.clone()).unwrap() - }; - result.push(format!("admin username: {username}, password: {password}")); + + if let Ok(Secret { + data: Some(secret_data), + .. + }) = secret_api.get(secret_name).await + { + if let (Some(username), Some(password)) = ( + secret_data.get("adminUser.username"), + secret_data.get("adminUser.password"), + ) { + let username = String::from_utf8(username.0.clone()).unwrap(); + let password = if hide_credentials { + REDACTED_PASSWORD.to_string() + } else { + String::from_utf8(password.0.clone()).unwrap() + }; + result.push(format!("admin user: {username}, password: {password}")); + } } } } @@ -373,10 +377,6 @@ async fn get_minio_services( deployment_name.clone(), format!("{deployment_name}-console"), ]; - let extra_infos = vec![ - "This service is not part of the official Stackable Platform".to_string(), - "It is provided as a helper utility".to_string(), - ]; let mut endpoints = IndexMap::new(); for service_name in service_names { @@ -392,6 +392,72 @@ async fn get_minio_services( Err(err) => warn!("Failed to get endpoint_urls of service {service_name}: {err}"), } } + + let mut extra_infos = vec!["Third party service".to_string()]; + let containers = minio_deployment + .spec + .unwrap() + .template + .spec + .unwrap() + .containers; + if let Some(minio_container) = containers.iter().find(|c| c.name == "minio") { + if let Some(env) = &minio_container.env { + let admin_user = env.iter().find(|e| e.name == "MINIO_ROOT_USER"); + let admin_password = env.iter().find(|e| e.name == "MINIO_ROOT_PASSWORD"); + + if let (Some(admin_user), Some(admin_password)) = (admin_user, admin_password) { + let admin_user = admin_user + .value_from + .as_ref() + .unwrap() + .secret_key_ref + .as_ref() + .unwrap(); + let admin_password = admin_password + .value_from + .as_ref() + .unwrap() + .secret_key_ref + .as_ref() + .unwrap(); + + let api: Api = Api::namespaced(client.clone(), &deployment_namespace); + let admin_user_secret = api.get(admin_user.name.as_ref().unwrap()).await; + let admin_password_secret = + api.get(admin_password.name.as_ref().unwrap()).await; + + if let ( + Ok(Secret { + data: Some(admin_user_secret_data), + .. + }), + Ok(Secret { + data: Some(admin_password_secret_data), + .. + }), + ) = (admin_user_secret, admin_password_secret) + { + let admin_user = admin_user_secret_data + .get(&admin_user.key) + .map(|b| String::from_utf8(b.clone().0).unwrap()) + .unwrap_or_default(); + let admin_password = if hide_credentials { + REDACTED_PASSWORD.to_string() + } else { + admin_password_secret_data + .get(&admin_password.key) + .map(|b| String::from_utf8(b.clone().0).unwrap()) + .unwrap_or_default() + }; + extra_infos.push(format!( + "admin user: {admin_user}, password: {admin_password}" + )); + } + } + } + } + let product = InstalledProduct { name: deployment_name, namespace: Some(deployment_namespace), From fcc9b83c910f161d89b5444457a429240785a61f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Jun 2022 13:49:08 +0200 Subject: [PATCH 022/177] Move version info down --- src/services.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/services.rs b/src/services.rs index 3870ec9f..b3626860 100644 --- a/src/services.rs +++ b/src/services.rs @@ -318,10 +318,7 @@ pub async fn get_extra_infos( product_crd: &DynamicObject, hide_credentials: bool, ) -> Result, Box> { - let mut result = match product_crd.data["spec"]["version"].as_str() { - Some(version) => Vec::from([format!("Version {version}")]), - None => Vec::new(), - }; + let mut result = Vec::new(); match product { "superset" => { @@ -353,6 +350,10 @@ pub async fn get_extra_infos( _ => (), } + if let Some(version) = product_crd.data["spec"]["version"].as_str() { + result.push(format!("version {version}")); + } + Ok(result) } From 3c362aac8d4cb6a6a6f3624e6a4376632845cb63 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Jun 2022 14:05:37 +0200 Subject: [PATCH 023/177] Rename hide_credentials -> redact_credentials --- src/services.rs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/services.rs b/src/services.rs index b3626860..0110ef87 100644 --- a/src/services.rs +++ b/src/services.rs @@ -132,8 +132,8 @@ pub enum CliCommandServices { all_namespaces: bool, /// Don't show credentials in the output - #[clap(long)] - hide_credentials: bool, + #[clap(short, long)] + redact_credentials: bool, #[clap(short, long, arg_enum, default_value = "text")] output: OutputType, @@ -146,8 +146,8 @@ impl CliCommandServices { CliCommandServices::List { all_namespaces, output, - hide_credentials, - } => list_services(*all_namespaces, *hide_credentials, output).await?, + redact_credentials, + } => list_services(*all_namespaces, *redact_credentials, output).await?, } Ok(()) } @@ -164,13 +164,13 @@ pub struct InstalledProduct { async fn list_services( all_namespaces: bool, - hide_credentials: bool, + redact_credentials: bool, output_type: &OutputType, ) -> Result<(), Box> { - let mut output = get_stackable_services(!all_namespaces, hide_credentials).await?; + let mut output = get_stackable_services(!all_namespaces, redact_credentials).await?; output.insert( "minio".to_string(), - get_minio_services(!all_namespaces, hide_credentials).await?, + get_minio_services(!all_namespaces, redact_credentials).await?, ); match output_type { @@ -234,7 +234,7 @@ async fn list_services( pub async fn get_stackable_services( namespaced: bool, - hide_credentials: bool, + redact_credentials: bool, ) -> Result>, Box> { let mut result = IndexMap::new(); let namespace = NAMESPACE.lock().unwrap().clone(); @@ -257,7 +257,7 @@ pub async fn get_stackable_services( let service_names = get_service_names(&object_name, product_name); let extra_infos = - get_extra_infos(product_name, &object, hide_credentials).await?; + get_extra_infos(product_name, &object, redact_credentials).await?; let mut endpoints = IndexMap::new(); for service_name in service_names { @@ -316,7 +316,7 @@ pub fn get_service_names(product_name: &str, product: &str) -> Vec { pub async fn get_extra_infos( product: &str, product_crd: &DynamicObject, - hide_credentials: bool, + redact_credentials: bool, ) -> Result, Box> { let mut result = Vec::new(); @@ -337,7 +337,7 @@ pub async fn get_extra_infos( secret_data.get("adminUser.password"), ) { let username = String::from_utf8(username.0.clone()).unwrap(); - let password = if hide_credentials { + let password = if redact_credentials { REDACTED_PASSWORD.to_string() } else { String::from_utf8(password.0.clone()).unwrap() @@ -359,7 +359,7 @@ pub async fn get_extra_infos( async fn get_minio_services( namespaced: bool, - hide_credentials: bool, + redact_credentials: bool, ) -> Result, Box> { let client = get_client().await?; let deployment_api: Api = match namespaced { @@ -443,7 +443,7 @@ async fn get_minio_services( .get(&admin_user.key) .map(|b| String::from_utf8(b.clone().0).unwrap()) .unwrap_or_default(); - let admin_password = if hide_credentials { + let admin_password = if redact_credentials { REDACTED_PASSWORD.to_string() } else { admin_password_secret_data From a5a0222318bd20f1d7d96d9c403b0b71a52d7eeb Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Jun 2022 17:43:28 +0200 Subject: [PATCH 024/177] Update releases.yaml --- releases.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/releases.yaml b/releases.yaml index 9090a979..32f8d872 100644 --- a/releases.yaml +++ b/releases.yaml @@ -15,7 +15,7 @@ releases: commons: operatorVersion: 0.1.0 druid: - operatorVersion: 0.6.0-pr245 + operatorVersion: 0.6.0-nightly hbase: operatorVersion: 0.2.0 hdfs: @@ -35,7 +35,7 @@ releases: superset: operatorVersion: 0.5.0-nightly trino: - operatorVersion: 0.3.2-pr213 # Trino < 0.4.0 requires regorule-operator and 0.4.0 will be released soon-ish. Picking a fixed version anyway + operatorVersion: 0.3.2-nightly zookeeper: operatorVersion: 0.10.0-nightly alpha-3: From cc009bf515b8d827139bf40efc69d889e157b074 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Jun 2022 17:44:17 +0200 Subject: [PATCH 025/177] It's okay for endpoints to have multiple addresses --- src/kube.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index 99476439..5ed21d62 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -32,17 +32,13 @@ pub async fn get_service_endpoint_urls( let node_name = match &endpoints.subsets { Some(subsets) if subsets.len() == 1 => match &subsets[0].addresses { - Some(addresses) if addresses.len() == 1 => match &addresses[0].node_name { + Some(addresses) => match &addresses[0].node_name { Some(node_name) => node_name, None => { warn!("Could not determine the node the endpoint {service_name} is running on because the address of the subset didn't had a node name"); return Ok(IndexMap::new()); } }, - Some(_) => { - warn!("Could not determine the node the endpoint {service_name} is running on because subset had multiple addresses"); - return Ok(IndexMap::new()); - } None => { warn!("Could not determine the node the endpoint {service_name} is running on because subset had no addresses"); return Ok(IndexMap::new()); From 8fe474f38ca4504a614f4f6e67237c672c7d9bfa Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 15 Jun 2022 18:23:52 +0200 Subject: [PATCH 026/177] Add option to hive versions --- src/services.rs | 38 ++++++++++++++++++++++++-------------- stacks.yaml | 3 +++ 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/src/services.rs b/src/services.rs index 0110ef87..94528195 100644 --- a/src/services.rs +++ b/src/services.rs @@ -87,14 +87,6 @@ lazy_static! { kind: "OpenPolicyAgent".to_string(), } ), - ( - "spark", - GroupVersionKind { - group: "spark.stackable.tech".to_string(), - version: "v1alpha1".to_string(), - kind: "SparkCluster".to_string(), - } - ), ( "superset", GroupVersionKind { @@ -135,6 +127,10 @@ pub enum CliCommandServices { #[clap(short, long)] redact_credentials: bool, + /// Don't show the product versions in the output + #[clap(long)] + hide_versions: bool, + #[clap(short, long, arg_enum, default_value = "text")] output: OutputType, }, @@ -147,7 +143,10 @@ impl CliCommandServices { all_namespaces, output, redact_credentials, - } => list_services(*all_namespaces, *redact_credentials, output).await?, + hide_versions, + } => { + list_services(*all_namespaces, *redact_credentials, *hide_versions, output).await? + } } Ok(()) } @@ -165,9 +164,11 @@ pub struct InstalledProduct { async fn list_services( all_namespaces: bool, redact_credentials: bool, + hide_versions: bool, output_type: &OutputType, ) -> Result<(), Box> { - let mut output = get_stackable_services(!all_namespaces, redact_credentials).await?; + let mut output = + get_stackable_services(!all_namespaces, redact_credentials, hide_versions).await?; output.insert( "minio".to_string(), get_minio_services(!all_namespaces, redact_credentials).await?, @@ -235,6 +236,7 @@ async fn list_services( pub async fn get_stackable_services( namespaced: bool, redact_credentials: bool, + hide_versions: bool, ) -> Result>, Box> { let mut result = IndexMap::new(); let namespace = NAMESPACE.lock().unwrap().clone(); @@ -257,7 +259,8 @@ pub async fn get_stackable_services( let service_names = get_service_names(&object_name, product_name); let extra_infos = - get_extra_infos(product_name, &object, redact_credentials).await?; + get_extra_infos(product_name, &object, redact_credentials, hide_versions) + .await?; let mut endpoints = IndexMap::new(); for service_name in service_names { @@ -302,7 +305,8 @@ pub fn get_service_names(product_name: &str, product: &str) -> Vec { format!("{product_name}-router"), format!("{product_name}-coordinator"), ], - "hive" => vec![], + "hive" => vec![product_name.to_string()], + "nifi" => vec![product_name.to_string()], "superset" => vec![format!("{product_name}-external")], "trino" => vec![format!("{product_name}-coordinator")], "zookeeper" => vec![product_name.to_string()], @@ -317,10 +321,14 @@ pub async fn get_extra_infos( product: &str, product_crd: &DynamicObject, redact_credentials: bool, + hide_versions: bool, ) -> Result, Box> { let mut result = Vec::new(); match product { + "nifi" => { + result.push("TODO: must implement reading admin user for nifi".to_string()); + } "superset" => { if let Some(secret_name) = product_crd.data["spec"]["credentialsSecret"].as_str() { let client = get_client().await?; @@ -350,8 +358,10 @@ pub async fn get_extra_infos( _ => (), } - if let Some(version) = product_crd.data["spec"]["version"].as_str() { - result.push(format!("version {version}")); + if !hide_versions { + if let Some(version) = product_crd.data["spec"]["version"].as_str() { + result.push(format!("version {version}")); + } } Ok(result) diff --git a/stacks.yaml b/stacks.yaml index a505d395..49cb6df8 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -27,6 +27,9 @@ stacks: buckets: - name: druid policy: public + resources: + requests: + memory: 2Gi service: type: NodePort nodePort: null From 7d5b5c14aab0b11607b554e310ef3b8f410512de Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Jun 2022 11:12:59 +0200 Subject: [PATCH 027/177] Switch from kubectl to kube-rs to deploy manifests --- src/kube.rs | 56 +++++++++++++++++++++++++++++++++++++++++----------- src/main.rs | 2 +- src/stack.rs | 12 ++++++----- 3 files changed, 52 insertions(+), 18 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index 5ed21d62..d5f8ef0c 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -1,20 +1,45 @@ -use crate::{helpers, NAMESPACE}; +use crate::NAMESPACE; use cached::proc_macro::cached; use core::panic; use indexmap::IndexMap; use k8s_openapi::api::core::v1::{Endpoints, Node}; -use kube::{api::ListParams, Api, Client, ResourceExt}; +use kube::{ + api::{DynamicObject, GroupVersionKind, ListParams, Patch, PatchParams, TypeMeta}, + discovery::Scope, + Api, Client, Discovery, ResourceExt, +}; use log::warn; -use std::{collections::HashMap, error::Error, vec}; - -/// This function currently uses `kubectl apply`. -/// In the future we want to switch to kube-rs or something else to not require the user to install kubectl. -pub fn deploy_manifest(yaml: &str) { - let namespace = NAMESPACE.lock().unwrap(); - helpers::execute_command_with_stdin( - vec!["kubectl", "apply", "-n", &namespace, "-f", "-"], - yaml, - ); +use serde::Deserialize; +use std::{collections::HashMap, error::Error}; + +pub async fn deploy_manifests(yaml: &str) -> Result<(), Box> { + let namespace = NAMESPACE.lock().unwrap().clone(); + let client = get_client().await?; + let discovery = Discovery::new(client.clone()).run().await?; + + for manifest in serde_yaml::Deserializer::from_str(yaml) { + let mut object = DynamicObject::deserialize(manifest).unwrap(); + + let gvk = gvk_of_typemeta(object.types.as_ref().expect("Failed to get type of object")); + let (resource, capabilities) = discovery.resolve_gvk(&gvk).expect("Failed to resolve gvk"); + + let api: Api = match capabilities.scope { + Scope::Cluster => { + object.metadata.namespace = None; + Api::all_with(client.clone(), &resource) + } + Scope::Namespaced => Api::namespaced_with(client.clone(), &namespace, &resource), + }; + + api.patch( + &object.name(), + &PatchParams::apply("stackablectl"), + &Patch::Apply(object), + ) + .await?; + } + + Ok(()) } pub async fn get_service_endpoint_urls( @@ -129,3 +154,10 @@ async fn get_node_name_ip_mapping() -> HashMap { pub async fn get_client() -> Result> { Ok(Client::try_default().await?) } + +fn gvk_of_typemeta(type_meta: &TypeMeta) -> GroupVersionKind { + match type_meta.api_version.split_once('/') { + Some((group, version)) => GroupVersionKind::gvk(group, version, &type_meta.kind), + None => GroupVersionKind::gvk("", &type_meta.api_version, &type_meta.kind), + } +} diff --git a/src/main.rs b/src/main.rs index ee43351f..dca4c104 100644 --- a/src/main.rs +++ b/src/main.rs @@ -58,7 +58,7 @@ async fn main() -> Result<(), Box> { match &args.cmd { CliCommand::Operator(command) => command.handle().await, CliCommand::Release(command) => command.handle().await, - CliCommand::Stack(command) => command.handle().await, + CliCommand::Stack(command) => command.handle().await?, CliCommand::Services(command) => command.handle().await?, } diff --git a/src/stack.rs b/src/stack.rs index 43d7e846..61332785 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -5,7 +5,7 @@ use indexmap::IndexMap; use lazy_static::lazy_static; use log::{debug, error, info, warn}; use serde::{Deserialize, Serialize}; -use std::{ops::Deref, process::exit, sync::Mutex}; +use std::{error::Error, ops::Deref, process::exit, sync::Mutex}; lazy_static! { pub static ref STACK_FILES: Mutex> = Mutex::new(vec![ @@ -55,7 +55,7 @@ pub enum CliCommandStack { } impl CliCommandStack { - pub async fn handle(&self) { + pub async fn handle(&self) -> Result<(), Box> { match self { CliCommandStack::List { output } => list_stacks(output).await, CliCommandStack::Describe { stack, output } => describe_stack(stack, output).await, @@ -65,9 +65,10 @@ impl CliCommandStack { kind_cluster_name, } => { kind::handle_cli_arguments(*kind_cluster, kind_cluster_name); - install_stack(stack).await; + install_stack(stack).await?; } } + Ok(()) } } @@ -167,7 +168,7 @@ async fn describe_stack(stack_name: &str, output_type: &OutputType) { } } -async fn install_stack(stack_name: &str) { +async fn install_stack(stack_name: &str) -> Result<(), Box> { info!("Installing stack {stack_name}"); let stack = get_stack(stack_name).await; @@ -202,7 +203,7 @@ async fn install_stack(stack_name: &str) { StackManifest::PlainYaml(yaml_url_or_file) => { debug!("Installing yaml manifest from {yaml_url_or_file}"); match helpers::read_from_url_or_file(&yaml_url_or_file).await { - Ok(manifests) => kube::deploy_manifest(&manifests), + Ok(manifests) => kube::deploy_manifests(&manifests).await?, Err(err) => { panic!( "Could not read stack manifests from file \"{}\": {err}", @@ -215,6 +216,7 @@ async fn install_stack(stack_name: &str) { } info!("Installed stack {stack_name}"); + Ok(()) } /// Cached because of potential slow network calls From 58ffacc604eb5f1c7066f9cab8b1900dd4791a8f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Jun 2022 11:42:33 +0200 Subject: [PATCH 028/177] Adapt to rename OpenPolicyAgent -> OpaCluster --- src/services.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/services.rs b/src/services.rs index 94528195..45e73ecd 100644 --- a/src/services.rs +++ b/src/services.rs @@ -84,7 +84,7 @@ lazy_static! { GroupVersionKind { group: "opa.stackable.tech".to_string(), version: "v1alpha1".to_string(), - kind: "OpenPolicyAgent".to_string(), + kind: "OpaCluster".to_string(), } ), ( From 0fe3a6e28c72140841cb3e7528e80b8e38f2003c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Jun 2022 12:59:19 +0200 Subject: [PATCH 029/177] Switch to nightly versions --- releases.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/releases.yaml b/releases.yaml index 32f8d872..4cf90889 100644 --- a/releases.yaml +++ b/releases.yaml @@ -11,27 +11,27 @@ releases: description: Non-official release from sbernauer to demonstrate stackablectl. It includes the latest stable versions. products: airflow: - operatorVersion: 0.3.0 + operatorVersion: 0.4.0-nightly commons: - operatorVersion: 0.1.0 + operatorVersion: 0.2.0-nightly druid: operatorVersion: 0.6.0-nightly hbase: - operatorVersion: 0.2.0 + operatorVersion: 0.3.0-nightly hdfs: - operatorVersion: 0.3.0 + operatorVersion: 0.4.0-nightly hive: - operatorVersion: 0.5.0 + operatorVersion: 0.6.0-nightly kafka: - operatorVersion: 0.5.0 + operatorVersion: 0.6.0-nightly nifi: - operatorVersion: 0.5.0 + operatorVersion: 0.6.0-nightly opa: - operatorVersion: 0.8.0 + operatorVersion: 0.9.0-nightly secret: - operatorVersion: 0.4.0 + operatorVersion: 0.5.0-nightly spark-k8s: - operatorVersion: 0.1.0 + operatorVersion: 0.2.0-nightly superset: operatorVersion: 0.5.0-nightly trino: From b8b3412ad5ab38532d0a88fe7db4294a8c687b0a Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Jun 2022 12:59:55 +0200 Subject: [PATCH 030/177] Add services and infos for various products --- src/kube.rs | 3 +- src/services.rs | 89 ++++++++++++++++++++++++++++++------------------- 2 files changed, 57 insertions(+), 35 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index d5f8ef0c..88199a71 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -96,7 +96,8 @@ pub async fn get_service_endpoint_urls( format!("{endpoint_name}-{port_name}") }; let endpoint = match port_name.as_str() { - "http" => format!("http://{node_ip}:{node_port}"), + // TODO: Consolidate web-ui port names in operators + "http" | "ui" | "airflow" | "superset" => format!("http://{node_ip}:{node_port}"), "https" => format!("https://{node_ip}:{node_port}"), _ => format!("{node_ip}:{node_port}"), }; diff --git a/src/services.rs b/src/services.rs index 45e73ecd..e6a563f2 100644 --- a/src/services.rs +++ b/src/services.rs @@ -129,7 +129,7 @@ pub enum CliCommandServices { /// Don't show the product versions in the output #[clap(long)] - hide_versions: bool, + show_versions: bool, #[clap(short, long, arg_enum, default_value = "text")] output: OutputType, @@ -143,9 +143,9 @@ impl CliCommandServices { all_namespaces, output, redact_credentials, - hide_versions, + show_versions, } => { - list_services(*all_namespaces, *redact_credentials, *hide_versions, output).await? + list_services(*all_namespaces, *redact_credentials, *show_versions, output).await? } } Ok(()) @@ -164,11 +164,11 @@ pub struct InstalledProduct { async fn list_services( all_namespaces: bool, redact_credentials: bool, - hide_versions: bool, + show_versions: bool, output_type: &OutputType, ) -> Result<(), Box> { let mut output = - get_stackable_services(!all_namespaces, redact_credentials, hide_versions).await?; + get_stackable_services(!all_namespaces, redact_credentials, show_versions).await?; output.insert( "minio".to_string(), get_minio_services(!all_namespaces, redact_credentials).await?, @@ -236,7 +236,7 @@ async fn list_services( pub async fn get_stackable_services( namespaced: bool, redact_credentials: bool, - hide_versions: bool, + show_versions: bool, ) -> Result>, Box> { let mut result = IndexMap::new(); let namespace = NAMESPACE.lock().unwrap().clone(); @@ -259,7 +259,7 @@ pub async fn get_stackable_services( let service_names = get_service_names(&object_name, product_name); let extra_infos = - get_extra_infos(product_name, &object, redact_credentials, hide_versions) + get_extra_infos(product_name, &object, redact_credentials, show_versions) .await?; let mut endpoints = IndexMap::new(); @@ -301,10 +301,17 @@ pub async fn get_stackable_services( pub fn get_service_names(product_name: &str, product: &str) -> Vec { match product { + "airflow" => vec![format!("{product_name}-webserver")], "druid" => vec![ format!("{product_name}-router"), format!("{product_name}-coordinator"), ], + "hbase" => vec![product_name.to_string()], + "hdfs" => vec![ + format!("{product_name}-datanode-default-0"), + format!("{product_name}-namenode-default-0"), + format!("{product_name}-journalnode-default-0"), + ], "hive" => vec![product_name.to_string()], "nifi" => vec![product_name.to_string()], "superset" => vec![format!("{product_name}-external")], @@ -321,44 +328,31 @@ pub async fn get_extra_infos( product: &str, product_crd: &DynamicObject, redact_credentials: bool, - hide_versions: bool, + show_versions: bool, ) -> Result, Box> { let mut result = Vec::new(); match product { - "nifi" => { - result.push("TODO: must implement reading admin user for nifi".to_string()); - } - "superset" => { + "airflow" | "superset" => { if let Some(secret_name) = product_crd.data["spec"]["credentialsSecret"].as_str() { - let client = get_client().await?; - let secret_api: Api = - Api::namespaced(client, &product_crd.namespace().unwrap()); - - if let Ok(Secret { - data: Some(secret_data), - .. - }) = secret_api.get(secret_name).await - { - if let (Some(username), Some(password)) = ( - secret_data.get("adminUser.username"), - secret_data.get("adminUser.password"), - ) { - let username = String::from_utf8(username.0.clone()).unwrap(); - let password = if redact_credentials { - REDACTED_PASSWORD.to_string() - } else { - String::from_utf8(password.0.clone()).unwrap() - }; - result.push(format!("admin user: {username}, password: {password}")); - } + let credentials = get_credentials_from_secret( + secret_name, + product_crd.namespace().unwrap().as_str(), + "adminUser.username", + "adminUser.password", + redact_credentials, + ) + .await?; + + if let Some((username, password)) = credentials { + result.push(format!("admin user: {username}, password: {password}")); } } } _ => (), } - if !hide_versions { + if show_versions { if let Some(version) = product_crd.data["spec"]["version"].as_str() { result.push(format!("version {version}")); } @@ -367,6 +361,33 @@ pub async fn get_extra_infos( Ok(result) } +async fn get_credentials_from_secret( + secret_name: &str, + secret_namespace: &str, + username_key: &str, + password_key: &str, + redact_credentials: bool, +) -> Result, Box> { + let client = get_client().await?; + let secret_api: Api = Api::namespaced(client, secret_namespace); + + let secret = secret_api.get(secret_name).await?; + let secret_data = secret.data.unwrap(); + + match (secret_data.get(username_key), secret_data.get(password_key)) { + (Some(username), Some(password)) => { + let username = String::from_utf8(username.0.clone()).unwrap(); + let password = if redact_credentials { + REDACTED_PASSWORD.to_string() + } else { + String::from_utf8(password.0.clone()).unwrap() + }; + Ok(Some((username, password))) + } + _ => Ok(None), + } +} + async fn get_minio_services( namespaced: bool, redact_credentials: bool, From 7b839290fed0d23afc8d6b7d17828a3b436eab3b Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Jun 2022 14:35:21 +0200 Subject: [PATCH 031/177] Uppercase info statement --- src/services.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/services.rs b/src/services.rs index e6a563f2..f2a45016 100644 --- a/src/services.rs +++ b/src/services.rs @@ -345,7 +345,7 @@ pub async fn get_extra_infos( .await?; if let Some((username, password)) = credentials { - result.push(format!("admin user: {username}, password: {password}")); + result.push(format!("Admin user: {username}, password: {password}")); } } } @@ -483,7 +483,7 @@ async fn get_minio_services( .unwrap_or_default() }; extra_infos.push(format!( - "admin user: {admin_user}, password: {admin_password}" + "Admin user: {admin_user}, password: {admin_password}" )); } } From 4ea9db75b40485e40aa19d9ee763710c5e9c878d Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Jun 2022 14:55:54 +0200 Subject: [PATCH 032/177] fmt --- src/kube.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/kube.rs b/src/kube.rs index 88199a71..88f4dcdd 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -97,7 +97,9 @@ pub async fn get_service_endpoint_urls( }; let endpoint = match port_name.as_str() { // TODO: Consolidate web-ui port names in operators - "http" | "ui" | "airflow" | "superset" => format!("http://{node_ip}:{node_port}"), + "http" | "ui" | "airflow" | "superset" => { + format!("http://{node_ip}:{node_port}") + } "https" => format!("https://{node_ip}:{node_port}"), _ => format!("{node_ip}:{node_port}"), }; From f85c3eded8b2fedd7c0afc4b827d46034f3350ae Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Jun 2022 14:57:15 +0200 Subject: [PATCH 033/177] Improve import --- src/kube.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index 88f4dcdd..ba048441 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -2,7 +2,7 @@ use crate::NAMESPACE; use cached::proc_macro::cached; use core::panic; use indexmap::IndexMap; -use k8s_openapi::api::core::v1::{Endpoints, Node}; +use k8s_openapi::api::core::v1::{Endpoints, Node, Service}; use kube::{ api::{DynamicObject, GroupVersionKind, ListParams, Patch, PatchParams, TypeMeta}, discovery::Scope, @@ -48,8 +48,7 @@ pub async fn get_service_endpoint_urls( namespace: &str, client: Client, ) -> Result, Box> { - let service_api: Api = - Api::namespaced(client.clone(), namespace); + let service_api: Api = Api::namespaced(client.clone(), namespace); let service = service_api.get(service_name).await?; let endpoints_api: Api = Api::namespaced(client.clone(), namespace); From 75aa212507acb9675241fe8e0676f8eeab900935 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 17 Jun 2022 14:59:29 +0200 Subject: [PATCH 034/177] docs --- src/services.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/services.rs b/src/services.rs index f2a45016..fb3fb3e6 100644 --- a/src/services.rs +++ b/src/services.rs @@ -127,7 +127,7 @@ pub enum CliCommandServices { #[clap(short, long)] redact_credentials: bool, - /// Don't show the product versions in the output + /// Show the product versions in the output #[clap(long)] show_versions: bool, From 2538451e268977db7f0896932385063f9b11f51d Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Jun 2022 08:55:10 +0200 Subject: [PATCH 035/177] Remove uneeded feature from reqwest --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 30c6dcac..780855b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ which = "4.2" serde = { version = "1.0", features = ["derive"]} serde_json = "1.0" serde_yaml = "0.8" -reqwest = { version = "0.11", features = ["blocking"] } +reqwest = "0.11" tokio = "1.19.2" [profile.release] From 5278fceea7c623f27d59b9600f5053972259c545 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Jun 2022 09:40:32 +0200 Subject: [PATCH 036/177] Added x86_64-unknown-linux-musl target --- .github/workflows/build.yml | 2 ++ .github/workflows/release.yml | 3 +++ 2 files changed, 5 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d999dc5b..8862e669 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -169,6 +169,8 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest + - target: x86_64-unknown-linux-musl + os: ubuntu-latest - target: x86_64-pc-windows-gnu os: windows-latest - target: x86_64-apple-darwin diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index dbb238b7..5602916b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,6 +16,9 @@ jobs: - target: x86_64-unknown-linux-gnu os: ubuntu-latest file-suffix: "" + - target: x86_64-unknown-linux-musl + os: ubuntu-latest + file-suffix: "" - target: x86_64-pc-windows-gnu os: windows-latest file-suffix: ".exe" From a81dda20a8efb1c861440b894505e287eb9c3c27 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Jun 2022 09:56:54 +0200 Subject: [PATCH 037/177] Switch from openssl to rustls --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 780855b6..648334a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,14 +13,14 @@ clap = { version = "3.2", features = ["derive", "cargo"] } env_logger = "0.9" indexmap = { version = "1.8", features = ["serde"] } k8s-openapi = { version = "0.15.0", default-features = false, features = ["v1_24"] } -kube = "0.73.1" +kube = { version = "0.73.1", default-features = false, features = ["client", "rustls-tls"] } lazy_static = "1.4" log = "0.4" which = "4.2" serde = { version = "1.0", features = ["derive"]} serde_json = "1.0" serde_yaml = "0.8" -reqwest = "0.11" +reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] } tokio = "1.19.2" [profile.release] From ca9d51b25d3ce4be4e7435ea50d49dea195c6d30 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Jun 2022 10:23:48 +0200 Subject: [PATCH 038/177] Revert "Added x86_64-unknown-linux-musl target" This reverts commit 5278fceea7c623f27d59b9600f5053972259c545. --- .github/workflows/build.yml | 2 -- .github/workflows/release.yml | 3 --- 2 files changed, 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8862e669..d999dc5b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -169,8 +169,6 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest - - target: x86_64-unknown-linux-musl - os: ubuntu-latest - target: x86_64-pc-windows-gnu os: windows-latest - target: x86_64-apple-darwin diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5602916b..dbb238b7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,9 +16,6 @@ jobs: - target: x86_64-unknown-linux-gnu os: ubuntu-latest file-suffix: "" - - target: x86_64-unknown-linux-musl - os: ubuntu-latest - file-suffix: "" - target: x86_64-pc-windows-gnu os: windows-latest file-suffix: ".exe" From d11b9fda16954042daca5bccb87443d16fe9f16c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Jun 2022 10:42:45 +0200 Subject: [PATCH 039/177] cargo-deny: Clarify ring as LicenseRef-ring --- deny.toml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/deny.toml b/deny.toml index fcd0e92c..34686ca5 100644 --- a/deny.toml +++ b/deny.toml @@ -4,6 +4,8 @@ targets = [ { triple = "x86_64-unknown-linux-musl" }, { triple = "aarch64-apple-darwin" }, { triple = "x86_64-apple-darwin" }, + { triple = "x86_64-pc-windows-gnu" }, + { triple = "x86_64-pc-windows-msvc" }, ] [advisories] @@ -33,7 +35,14 @@ allow = [ "Zlib" ] exceptions = [ - { name = "stackablectl", allow = ["OSL-3.0"] } + { name = "stackablectl", allow = ["OSL-3.0"] }, +] + +[[licenses.clarify]] +name = "ring" +expression = "LicenseRef-ring" +license-files = [ + { path = "LICENSE", hash = 0xbd0eed23 }, ] [sources] From ba6cd0dc9600c801878fffb34f71f20b498de3b7 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Jun 2022 10:47:51 +0200 Subject: [PATCH 040/177] cargo-deny: Clarify webpki as LicenseRef-webpki licensed --- deny.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/deny.toml b/deny.toml index 34686ca5..15eaae12 100644 --- a/deny.toml +++ b/deny.toml @@ -45,6 +45,13 @@ license-files = [ { path = "LICENSE", hash = 0xbd0eed23 }, ] +[[licenses.clarify]] +name = "webpki" +expression = "LicenseRef-webpki" +license-files = [ + { path = "LICENSE", hash = 0x001c7e6c }, +] + [sources] unknown-registry = "deny" unknown-git = "deny" From 21be90429733a887484ce067460292540c366641 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Jun 2022 10:51:21 +0200 Subject: [PATCH 041/177] Allow webpki-roots with MPL-2.0 license --- deny.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/deny.toml b/deny.toml index 15eaae12..f3c17507 100644 --- a/deny.toml +++ b/deny.toml @@ -36,6 +36,7 @@ allow = [ ] exceptions = [ { name = "stackablectl", allow = ["OSL-3.0"] }, + { name = "webpki-roots", allow = ["MPL-2.0"] }, ] [[licenses.clarify]] From 547e032f7888a3d6f2245cb4d216310a0ed89ead Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Jun 2022 12:44:18 +0200 Subject: [PATCH 042/177] Build a static binary for Linux --- .github/workflows/build.yml | 6 ++++++ .github/workflows/release.yml | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d999dc5b..ac69f849 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -169,12 +169,16 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest + rustFlags: "-C target-feature=+crt-static" - target: x86_64-pc-windows-gnu os: windows-latest + rustFlags: "" - target: x86_64-apple-darwin os: macos-latest + rustFlags: "" - target: aarch64-apple-darwin os: macos-latest + rustFlags: "" steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 @@ -193,3 +197,5 @@ jobs: with: command: build args: --target=${{ matrix.target }} + env: + RUSTFLAGS: "${{ matrix.rustFlags }}" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index dbb238b7..de13b929 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,15 +15,19 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest + rustFlags: "-C target-feature=+crt-static" file-suffix: "" - target: x86_64-pc-windows-gnu os: windows-latest + rustFlags: "" file-suffix: ".exe" - target: x86_64-apple-darwin os: macos-latest + rustFlags: "" file-suffix: "" - target: aarch64-apple-darwin os: macos-latest + rustFlags: "" file-suffix: "" steps: - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # tag=v3 @@ -40,6 +44,8 @@ jobs: with: command: build args: --release --target=${{ matrix.target }} + env: + RUSTFLAGS: "${{ matrix.rustFlags }}" - name: Rename binary file run: mv target/${{ matrix.target }}/release/stackablectl${{ matrix.file-suffix }} stackablectl-${{ matrix.target }}${{ matrix.file-suffix }} - name: Upload Release binaries From 55136c1f3c207eb67942dfcc401d382ec040f873 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 20 Jun 2022 16:22:10 +0200 Subject: [PATCH 043/177] Revert "Build a static binary for Linux" This caused random segfaults on Ubuntu 22.04 when stacklablectl tries to resolve DNS names. This reverts commit 547e032f7888a3d6f2245cb4d216310a0ed89ead. --- .github/workflows/build.yml | 6 ------ .github/workflows/release.yml | 6 ------ 2 files changed, 12 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ac69f849..d999dc5b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -169,16 +169,12 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest - rustFlags: "-C target-feature=+crt-static" - target: x86_64-pc-windows-gnu os: windows-latest - rustFlags: "" - target: x86_64-apple-darwin os: macos-latest - rustFlags: "" - target: aarch64-apple-darwin os: macos-latest - rustFlags: "" steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 @@ -197,5 +193,3 @@ jobs: with: command: build args: --target=${{ matrix.target }} - env: - RUSTFLAGS: "${{ matrix.rustFlags }}" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index de13b929..dbb238b7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,19 +15,15 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest - rustFlags: "-C target-feature=+crt-static" file-suffix: "" - target: x86_64-pc-windows-gnu os: windows-latest - rustFlags: "" file-suffix: ".exe" - target: x86_64-apple-darwin os: macos-latest - rustFlags: "" file-suffix: "" - target: aarch64-apple-darwin os: macos-latest - rustFlags: "" file-suffix: "" steps: - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # tag=v3 @@ -44,8 +40,6 @@ jobs: with: command: build args: --release --target=${{ matrix.target }} - env: - RUSTFLAGS: "${{ matrix.rustFlags }}" - name: Rename binary file run: mv target/${{ matrix.target }}/release/stackablectl${{ matrix.file-suffix }} stackablectl-${{ matrix.target }}${{ matrix.file-suffix }} - name: Upload Release binaries From e64072508d49c9513316ac7a8e45a7e4c9ecf6b1 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 21 Jun 2022 10:46:43 +0200 Subject: [PATCH 044/177] Switch back from rustls to openssl --- Cargo.toml | 4 ++-- deny.toml | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 39c41635..f5c71343 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,14 +13,14 @@ clap = { version = "3.2", features = ["derive", "cargo"] } env_logger = "0.9" indexmap = { version = "1.8", features = ["serde"] } k8s-openapi = { version = "0.15.0", default-features = false, features = ["v1_24"] } -kube = { version = "0.73.1", default-features = false, features = ["client", "rustls-tls"] } +kube = "0.73.1" lazy_static = "1.4" log = "0.4" which = "4.2" serde = { version = "1.0", features = ["derive"]} serde_json = "1.0" serde_yaml = "0.8" -reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] } +reqwest = "0.11" tokio = "1.19.2" [profile.release] diff --git a/deny.toml b/deny.toml index f3c17507..15eaae12 100644 --- a/deny.toml +++ b/deny.toml @@ -36,7 +36,6 @@ allow = [ ] exceptions = [ { name = "stackablectl", allow = ["OSL-3.0"] }, - { name = "webpki-roots", allow = ["MPL-2.0"] }, ] [[licenses.clarify]] From 2259c961f82772c00b35ea5f2f6e62329f24c6bc Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 21 Jun 2022 11:06:17 +0200 Subject: [PATCH 045/177] Add x86_64-pc-windows-msvc target --- .github/workflows/build.yml | 2 ++ .github/workflows/release.yml | 3 +++ 2 files changed, 5 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d999dc5b..a303d15b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -171,6 +171,8 @@ jobs: os: ubuntu-latest - target: x86_64-pc-windows-gnu os: windows-latest + - target: x86_64-pc-windows-msvc + os: windows-latest - target: x86_64-apple-darwin os: macos-latest - target: aarch64-apple-darwin diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index dbb238b7..03b87fb0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,6 +19,9 @@ jobs: - target: x86_64-pc-windows-gnu os: windows-latest file-suffix: ".exe" + - target: x86_64-pc-windows-msvc + os: windows-latest + file-suffix: ".exe" - target: x86_64-apple-darwin os: macos-latest file-suffix: "" From 61ad9c685fd9b0322952b7a521351fdc62fa5de2 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 21 Jun 2022 11:19:19 +0200 Subject: [PATCH 046/177] Install openssl for Windows --- .github/workflows/build.yml | 9 +++++++++ .github/workflows/release.yml | 8 ++++++++ 2 files changed, 17 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a303d15b..ae8cab55 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -191,6 +191,15 @@ jobs: - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # tag=v1.4.0 with: key: build-${{ matrix.target }} + - name: Install openssl + run: | + if [ "$RUNNER_OS" == "Linux" ]; then + echo "openssl should be already installed" + elif [ "$RUNNER_OS" == "Windows" ]; then + export VCPKG_ROOT="$VCPKG_INSTALLATION_ROOT" + vcpkg install openssl:x64-windows + fi + shell: bash - uses: actions-rs/cargo@v1 with: command: build diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 03b87fb0..caddfc68 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -39,6 +39,14 @@ jobs: toolchain: stable target: ${{ matrix.target }} override: true + - name: Install openssl + run: | + if [ "$RUNNER_OS" == "Linux" ]; then + echo "openssl should be already installed" + elif [ "$RUNNER_OS" == "Windows" ]; then + vcpkg install openssl:x64-windows + fi + shell: bash - uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # tag=v1 with: command: build From 4694ce68d56bf0d738bd003767b5ff038aa00d84 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 21 Jun 2022 12:06:06 +0200 Subject: [PATCH 047/177] Remove aarch64-apple-darwin target --- .github/workflows/build.yml | 4 +--- .github/workflows/release.yml | 3 --- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ae8cab55..80b9f888 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -175,8 +175,6 @@ jobs: os: windows-latest - target: x86_64-apple-darwin os: macos-latest - - target: aarch64-apple-darwin - os: macos-latest steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 @@ -196,8 +194,8 @@ jobs: if [ "$RUNNER_OS" == "Linux" ]; then echo "openssl should be already installed" elif [ "$RUNNER_OS" == "Windows" ]; then - export VCPKG_ROOT="$VCPKG_INSTALLATION_ROOT" vcpkg install openssl:x64-windows + vcpkg integrate install fi shell: bash - uses: actions-rs/cargo@v1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index caddfc68..99a47f65 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -25,9 +25,6 @@ jobs: - target: x86_64-apple-darwin os: macos-latest file-suffix: "" - - target: aarch64-apple-darwin - os: macos-latest - file-suffix: "" steps: - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # tag=v3 - uses: actions/setup-go@b22fbbc2921299758641fab08929b4ac52b32923 # tag=v3 From 53c2fd313960f52d1337487bddd16057cdf32342 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 22 Jun 2022 13:09:06 +0200 Subject: [PATCH 048/177] Improved error message --- src/kube.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kube.rs b/src/kube.rs index ba048441..bc8274f8 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -64,7 +64,7 @@ pub async fn get_service_endpoint_urls( } }, None => { - warn!("Could not determine the node the endpoint {service_name} is running on because subset had no addresses"); + warn!("Could not determine the node the endpoint {service_name} is running on because subset had no addresses. Is the service {service_name} up and running?"); return Ok(IndexMap::new()); } }, From a2446fe5c959ed38f13d0cfb7b692ecee42be7d9 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 22 Jun 2022 13:12:29 +0200 Subject: [PATCH 049/177] Retry windows build --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 80b9f888..14c7fd96 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -194,8 +194,8 @@ jobs: if [ "$RUNNER_OS" == "Linux" ]; then echo "openssl should be already installed" elif [ "$RUNNER_OS" == "Windows" ]; then - vcpkg install openssl:x64-windows vcpkg integrate install + vcpkg install openssl:x64-windows-static-md fi shell: bash - uses: actions-rs/cargo@v1 From fb83d8d9ea7bd9290e9d81a819bdcb6d490669e5 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 22 Jun 2022 14:39:54 +0200 Subject: [PATCH 050/177] Rename services alias se -> svc --- src/arguments.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/arguments.rs b/src/arguments.rs index 5d4da6e1..2d342607 100644 --- a/src/arguments.rs +++ b/src/arguments.rs @@ -73,7 +73,7 @@ pub enum CliCommand { Stack(CliCommandStack), /// This subcommand interacts with deployed services of products. - #[clap(subcommand, alias("se"))] + #[clap(subcommand, alias("svc"))] Services(CliCommandServices), } From ddc9bd3a56686bb87f198ba22a1ff652809cccf7 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 22 Jun 2022 15:07:58 +0200 Subject: [PATCH 051/177] Improve error message --- src/kube.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kube.rs b/src/kube.rs index bc8274f8..a9d4a521 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -73,7 +73,7 @@ pub async fn get_service_endpoint_urls( return Ok(IndexMap::new()); } None => { - warn!("Could not determine the node the endpoint {service_name} is running on because the endpoint has no subset"); + warn!("Could not determine the node the endpoint {service_name} is running on because the endpoint has no subset. Is the service {service_name} up and running?"); return Ok(IndexMap::new()); } }; From 9736269c08c2c7d99b6bebb9e4df5976939ecf90 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 30 Jun 2022 18:21:09 +0200 Subject: [PATCH 052/177] Update stacks to new release --- stacks.yaml | 8 ++++---- stacks/{druid-superset => druid-superset-s3}/druid.yaml | 0 .../{druid-superset => druid-superset-s3}/superset.yaml | 0 .../{druid-superset => druid-superset-s3}/zookeeper.yaml | 0 4 files changed, 4 insertions(+), 4 deletions(-) rename stacks/{druid-superset => druid-superset-s3}/druid.yaml (100%) rename stacks/{druid-superset => druid-superset-s3}/superset.yaml (100%) rename stacks/{druid-superset => druid-superset-s3}/zookeeper.yaml (100%) diff --git a/stacks.yaml b/stacks.yaml index 49cb6df8..352d072f 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -2,7 +2,7 @@ stacks: druid-superset-s3: description: Stack containing MinIO, Druid and Superset for data visualization - stackableRelease: 22.06-sbernauer + stackableRelease: 22.06 labels: - druid - superset @@ -48,6 +48,6 @@ stacks: username: superset password: superset database: superset - - plainYaml: stacks/druid-superset/zookeeper.yaml - - plainYaml: stacks/druid-superset/druid.yaml - - plainYaml: stacks/druid-superset/superset.yaml + - plainYaml: stacks/druid-superset-s3/zookeeper.yaml + - plainYaml: stacks/druid-superset-s3/druid.yaml + - plainYaml: stacks/druid-superset-s3/superset.yaml diff --git a/stacks/druid-superset/druid.yaml b/stacks/druid-superset-s3/druid.yaml similarity index 100% rename from stacks/druid-superset/druid.yaml rename to stacks/druid-superset-s3/druid.yaml diff --git a/stacks/druid-superset/superset.yaml b/stacks/druid-superset-s3/superset.yaml similarity index 100% rename from stacks/druid-superset/superset.yaml rename to stacks/druid-superset-s3/superset.yaml diff --git a/stacks/druid-superset/zookeeper.yaml b/stacks/druid-superset-s3/zookeeper.yaml similarity index 100% rename from stacks/druid-superset/zookeeper.yaml rename to stacks/druid-superset-s3/zookeeper.yaml From 7a03b79657a09608a681034e1d4c8a44ab376b82 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 30 Jun 2022 18:23:41 +0200 Subject: [PATCH 053/177] Mark the stacks command as experimental --- src/arguments.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/arguments.rs b/src/arguments.rs index 2d342607..246e2b75 100644 --- a/src/arguments.rs +++ b/src/arguments.rs @@ -68,7 +68,7 @@ pub enum CliCommand { #[clap(subcommand, alias("r"), alias("re"))] Release(CliCommandRelease), - /// This subcommand interacts with stacks, which are ready-to-use combinations of products. + /// This EXPERIMENTAL subcommand interacts with stacks, which are ready-to-use combinations of products. #[clap(subcommand, alias("s"), alias("st"))] Stack(CliCommandStack), From 7810eac09b5a74edd428e788e1b3a4122bab4510 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 30 Jun 2022 18:41:29 +0200 Subject: [PATCH 054/177] try windows build --- .github/workflows/build.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 14c7fd96..87a7c3ac 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -163,6 +163,8 @@ jobs: run_build: name: Build for ${{ matrix.target }} runs-on: ${{ matrix.os }} + env: + VCPKGRS_DYNAMIC: 1 strategy: fail-fast: false matrix: @@ -195,7 +197,7 @@ jobs: echo "openssl should be already installed" elif [ "$RUNNER_OS" == "Windows" ]; then vcpkg integrate install - vcpkg install openssl:x64-windows-static-md + vcpkg install openssl:x64-windows fi shell: bash - uses: actions-rs/cargo@v1 From 81810152cdeeea9277c6a017865fcd75cc0c3e78 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 10:28:32 +0200 Subject: [PATCH 055/177] Add Cargo.lock --- Cargo.lock | 408 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 408 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 87cb0964..751a878d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -117,6 +117,19 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chrono" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +dependencies = [ + "libc", + "num-integer", + "num-traits", + "serde", + "winapi", +] + [[package]] name = "clap" version = "3.2.8" @@ -207,6 +220,27 @@ dependencies = [ "syn", ] +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "either" version = "1.7.0" @@ -364,6 +398,17 @@ dependencies = [ "slab", ] +[[package]] +name = "getrandom" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + [[package]] name = "gobuild" version = "0.1.0-alpha.2" @@ -435,6 +480,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" version = "1.7.1" @@ -477,6 +528,36 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-openssl" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ee5d7a8f718585d1c3c61dfde28ef5b0bb14734b4db13f5ada856cdc6c612b" +dependencies = [ + "http", + "hyper", + "linked_hash_set", + "once_cell", + "openssl", + "openssl-sys", + "parking_lot", + "tokio", + "tokio-openssl", + "tower-layer", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -548,6 +629,93 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "jsonpath_lib" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaa63191d68230cccb81c5aa23abd53ed64d83337cacbb25a7b8c7979523774f" +dependencies = [ + "log", + "serde", + "serde_json", +] + +[[package]] +name = "k8s-openapi" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ae2c04fcee6b01b04e3aadd56bb418932c8e0a9d8a93f48bc68c6bdcdb559d" +dependencies = [ + "base64", + "bytes", + "chrono", + "serde", + "serde-value", + "serde_json", +] + +[[package]] +name = "kube" +version = "0.73.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f68b954ea9ad888de953fb1488bd8f377c4c78d82d4642efa5925189210b50b7" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", +] + +[[package]] +name = "kube-client" +version = "0.73.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9150dc7107d9acf4986088f284a0a6dddc5ae37ef1ffdf142f6811dc5998dd58" +dependencies = [ + "base64", + "bytes", + "chrono", + "dirs-next", + "either", + "futures", + "http", + "http-body", + "hyper", + "hyper-openssl", + "hyper-timeout", + "jsonpath_lib", + "k8s-openapi", + "kube-core", + "openssl", + "pem", + "pin-project", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.73.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc8c429676abe6a73b374438d5ca02caaf9ae7a635441253c589b779fa5d0622" +dependencies = [ + "chrono", + "form_urlencoded", + "http", + "k8s-openapi", + "once_cell", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -566,6 +734,25 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linked_hash_set" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "lock_api" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +dependencies = [ + "autocfg", + "scopeguard", +] + [[package]] name = "log" version = "0.4.17" @@ -623,6 +810,25 @@ dependencies = [ "tempfile", ] +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + [[package]] name = "num_cpus" version = "1.13.1" @@ -684,18 +890,79 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "ordered-float" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7940cf2ca942593318d07fcf2596cdca60a85c9e7fab408a5e21a4f9dcd40d87" +dependencies = [ + "num-traits", +] + [[package]] name = "os_str_bytes" version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-sys", +] + +[[package]] +name = "pem" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9a3b09a20e374558580a4914d3b7d89bd61b954a5a5e1dcbea98753addb1947" +dependencies = [ + "base64", +] + [[package]] name = "percent-encoding" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "pin-project" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pin-project-lite" version = "0.2.9" @@ -765,6 +1032,17 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_users" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +dependencies = [ + "getrandom", + "redox_syscall", + "thiserror", +] + [[package]] name = "regex" version = "1.6.0" @@ -844,6 +1122,22 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "secrecy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +dependencies = [ + "serde", + "zeroize", +] + [[package]] name = "security-framework" version = "2.6.1" @@ -876,6 +1170,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.138" @@ -893,6 +1197,7 @@ version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" dependencies = [ + "indexmap", "itoa", "ryu", "serde", @@ -922,12 +1227,27 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "signal-hook-registry" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] + [[package]] name = "slab" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +[[package]] +name = "smallvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" + [[package]] name = "socket2" version = "0.4.4" @@ -947,12 +1267,15 @@ dependencies = [ "env_logger", "gobuild", "indexmap", + "k8s-openapi", + "kube", "lazy_static", "log", "reqwest", "serde", "serde_json", "serde_yaml", + "tokio", "which", ] @@ -1050,11 +1373,22 @@ dependencies = [ "num_cpus", "once_cell", "pin-project-lite", + "signal-hook-registry", "socket2", "tokio-macros", "winapi", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" version = "1.8.0" @@ -1076,6 +1410,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-openssl" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08f9ffb7809f1b20c1b398d92acf4cc719874b3b2b2d9ea2f09b4a80350878a" +dependencies = [ + "futures-util", + "openssl", + "openssl-sys", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.3" @@ -1090,6 +1436,49 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +dependencies = [ + "base64", + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + [[package]] name = "tower-service" version = "0.3.2" @@ -1103,10 +1492,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ "cfg-if", + "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tracing-core" version = "0.1.28" @@ -1361,3 +1763,9 @@ checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] + +[[package]] +name = "zeroize" +version = "1.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20b578acffd8516a6c3f2a1bdefc1ec37e547bb4e0fb8b6b01a4cafc886b4442" From 482604ff7950ec4c9c8423f250759747a0ea177b Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 11:02:51 +0200 Subject: [PATCH 056/177] Give openssl vendored a try --- Cargo.lock | 97 +++++++----------------------------------------------- Cargo.toml | 4 +-- 2 files changed, 14 insertions(+), 87 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 751a878d..95a0f542 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -528,24 +528,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-openssl" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ee5d7a8f718585d1c3c61dfde28ef5b0bb14734b4db13f5ada856cdc6c612b" -dependencies = [ - "http", - "hyper", - "linked_hash_set", - "once_cell", - "openssl", - "openssl-sys", - "parking_lot", - "tokio", - "tokio-openssl", - "tower-layer", -] - [[package]] name = "hyper-timeout" version = "0.4.1" @@ -680,8 +662,8 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-openssl", "hyper-timeout", + "hyper-tls", "jsonpath_lib", "k8s-openapi", "kube-core", @@ -694,6 +676,7 @@ dependencies = [ "serde_yaml", "thiserror", "tokio", + "tokio-native-tls", "tokio-util", "tower", "tower-http", @@ -734,25 +717,6 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" -[[package]] -name = "linked_hash_set" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "lock_api" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" -dependencies = [ - "autocfg", - "scopeguard", -] - [[package]] name = "log" version = "0.4.17" @@ -877,6 +841,15 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-src" +version = "111.22.0+1.1.1q" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" version = "0.9.74" @@ -886,6 +859,7 @@ dependencies = [ "autocfg", "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] @@ -905,29 +879,6 @@ version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-sys", -] - [[package]] name = "pem" version = "1.0.2" @@ -1122,12 +1073,6 @@ dependencies = [ "windows-sys", ] -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - [[package]] name = "secrecy" version = "0.8.0" @@ -1242,12 +1187,6 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" -[[package]] -name = "smallvec" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" - [[package]] name = "socket2" version = "0.4.4" @@ -1410,18 +1349,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-openssl" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08f9ffb7809f1b20c1b398d92acf4cc719874b3b2b2d9ea2f09b4a80350878a" -dependencies = [ - "futures-util", - "openssl", - "openssl-sys", - "tokio", -] - [[package]] name = "tokio-util" version = "0.7.3" diff --git a/Cargo.toml b/Cargo.toml index 03a4459b..de3d4e87 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,14 +13,14 @@ clap = { version = "3.2", features = ["derive", "cargo"] } env_logger = "0.9" indexmap = { version = "1.9", features = ["serde"] } k8s-openapi = { version = "0.15.0", default-features = false, features = ["v1_24"] } -kube = "0.73.1" +kube = { version = "0.73.1", default-features = false, features = ["client", "native-tls" ] } lazy_static = "1.4" log = "0.4" which = "4.2" serde = { version = "1.0", features = ["derive"]} serde_json = "1.0" serde_yaml = "0.8" -reqwest = "0.11" +reqwest = { version = "0.11", default-features = false, features = ["native-tls-vendored" ] } tokio = "1.19.2" [profile.release] From 44cf09e6b82504bf9a96d9cfe9c25082b1982503 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 11:14:51 +0200 Subject: [PATCH 057/177] Give openssl vendored a try --- Cargo.lock | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- Cargo.toml | 5 ++-- 2 files changed, 89 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 95a0f542..c357900a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -528,6 +528,24 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-openssl" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ee5d7a8f718585d1c3c61dfde28ef5b0bb14734b4db13f5ada856cdc6c612b" +dependencies = [ + "http", + "hyper", + "linked_hash_set", + "once_cell", + "openssl", + "openssl-sys", + "parking_lot", + "tokio", + "tokio-openssl", + "tower-layer", +] + [[package]] name = "hyper-timeout" version = "0.4.1" @@ -662,8 +680,8 @@ dependencies = [ "http", "http-body", "hyper", + "hyper-openssl", "hyper-timeout", - "hyper-tls", "jsonpath_lib", "k8s-openapi", "kube-core", @@ -676,7 +694,6 @@ dependencies = [ "serde_yaml", "thiserror", "tokio", - "tokio-native-tls", "tokio-util", "tower", "tower-http", @@ -717,6 +734,25 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linked_hash_set" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "lock_api" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +dependencies = [ + "autocfg", + "scopeguard", +] + [[package]] name = "log" version = "0.4.17" @@ -879,6 +915,29 @@ version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-sys", +] + [[package]] name = "pem" version = "1.0.2" @@ -1073,6 +1132,12 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + [[package]] name = "secrecy" version = "0.8.0" @@ -1187,6 +1252,12 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +[[package]] +name = "smallvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" + [[package]] name = "socket2" version = "0.4.4" @@ -1210,6 +1281,7 @@ dependencies = [ "kube", "lazy_static", "log", + "openssl", "reqwest", "serde", "serde_json", @@ -1349,6 +1421,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-openssl" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08f9ffb7809f1b20c1b398d92acf4cc719874b3b2b2d9ea2f09b4a80350878a" +dependencies = [ + "futures-util", + "openssl", + "openssl-sys", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.3" diff --git a/Cargo.toml b/Cargo.toml index de3d4e87..c22638be 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,14 +13,15 @@ clap = { version = "3.2", features = ["derive", "cargo"] } env_logger = "0.9" indexmap = { version = "1.9", features = ["serde"] } k8s-openapi = { version = "0.15.0", default-features = false, features = ["v1_24"] } -kube = { version = "0.73.1", default-features = false, features = ["client", "native-tls" ] } +kube = "0.73.1" lazy_static = "1.4" log = "0.4" +openssl = { version = "0.10.36", features = ["vendored"] } # Must match version from kube which = "4.2" serde = { version = "1.0", features = ["derive"]} serde_json = "1.0" serde_yaml = "0.8" -reqwest = { version = "0.11", default-features = false, features = ["native-tls-vendored" ] } +reqwest = "0.11" tokio = "1.19.2" [profile.release] From ebd57136eea7ff4d81982ab9d4a4c67e8a188d18 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 11:20:38 +0200 Subject: [PATCH 058/177] Don't install openssl in Github action --- .github/workflows/build.yml | 9 --------- Cargo.toml | 4 ++-- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 87a7c3ac..b9d8273b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -191,15 +191,6 @@ jobs: - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # tag=v1.4.0 with: key: build-${{ matrix.target }} - - name: Install openssl - run: | - if [ "$RUNNER_OS" == "Linux" ]; then - echo "openssl should be already installed" - elif [ "$RUNNER_OS" == "Windows" ]; then - vcpkg integrate install - vcpkg install openssl:x64-windows - fi - shell: bash - uses: actions-rs/cargo@v1 with: command: build diff --git a/Cargo.toml b/Cargo.toml index c22638be..6a207b76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,7 @@ clap = { version = "3.2", features = ["derive", "cargo"] } env_logger = "0.9" indexmap = { version = "1.9", features = ["serde"] } k8s-openapi = { version = "0.15.0", default-features = false, features = ["v1_24"] } -kube = "0.73.1" +kube = "0.73.1" # Using openssl (and not native-tls) as kube-rs team tries to move away from native-tls lazy_static = "1.4" log = "0.4" openssl = { version = "0.10.36", features = ["vendored"] } # Must match version from kube @@ -21,7 +21,7 @@ which = "4.2" serde = { version = "1.0", features = ["derive"]} serde_json = "1.0" serde_yaml = "0.8" -reqwest = "0.11" +reqwest = "0.11" # Using native-tls as openssl does not seem to be supported as of 0.11 tokio = "1.19.2" [profile.release] From b8c4902beb4cd726aba82ee48a964d7e0379f0ab Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 11:32:22 +0200 Subject: [PATCH 059/177] Re-add aarch64-apple-darwin target --- .github/workflows/build.yml | 2 ++ .github/workflows/release.yml | 11 +++-------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b9d8273b..927751d1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -177,6 +177,8 @@ jobs: os: windows-latest - target: x86_64-apple-darwin os: macos-latest + - target: aarch64-apple-darwin + os: macos-latest steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 99a47f65..03b87fb0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -25,6 +25,9 @@ jobs: - target: x86_64-apple-darwin os: macos-latest file-suffix: "" + - target: aarch64-apple-darwin + os: macos-latest + file-suffix: "" steps: - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # tag=v3 - uses: actions/setup-go@b22fbbc2921299758641fab08929b4ac52b32923 # tag=v3 @@ -36,14 +39,6 @@ jobs: toolchain: stable target: ${{ matrix.target }} override: true - - name: Install openssl - run: | - if [ "$RUNNER_OS" == "Linux" ]; then - echo "openssl should be already installed" - elif [ "$RUNNER_OS" == "Windows" ]; then - vcpkg install openssl:x64-windows - fi - shell: bash - uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # tag=v1 with: command: build From eb53bc54867e22f8343b45db98967cbd15038535 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 12:26:44 +0200 Subject: [PATCH 060/177] Try removing go --- .github/workflows/build.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 927751d1..2b187e03 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -181,9 +181,6 @@ jobs: os: macos-latest steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: '^1.18.1' - uses: actions-rs/toolchain@v1 with: profile: minimal From 4f16d898dbf696220ae9049448075c555de7b243 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 12:33:21 +0200 Subject: [PATCH 061/177] Try cross-compilation --- .github/workflows/build.yml | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2b187e03..631e3f65 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -162,23 +162,16 @@ jobs: run_build: name: Build for ${{ matrix.target }} - runs-on: ${{ matrix.os }} - env: - VCPKGRS_DYNAMIC: 1 + runs-on: ubuntu-latest strategy: fail-fast: false matrix: - include: - - target: x86_64-unknown-linux-gnu - os: ubuntu-latest - - target: x86_64-pc-windows-gnu - os: windows-latest - - target: x86_64-pc-windows-msvc - os: windows-latest - - target: x86_64-apple-darwin - os: macos-latest - - target: aarch64-apple-darwin - os: macos-latest + target: + - x86_64-unknown-linux-gnu + - x86_64-pc-windows-gnu + - x86_64-pc-windows-msvc + - x86_64-apple-darwin + - aarch64-apple-darwin steps: - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 @@ -192,5 +185,6 @@ jobs: key: build-${{ matrix.target }} - uses: actions-rs/cargo@v1 with: + use-cross: true command: build args: --target=${{ matrix.target }} From cae4ac65057dda60960512f6acc8ce9d892f4371 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 12:42:03 +0200 Subject: [PATCH 062/177] dont use cross --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 631e3f65..e7751c44 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -185,6 +185,6 @@ jobs: key: build-${{ matrix.target }} - uses: actions-rs/cargo@v1 with: - use-cross: true + # use-cross: true command: build args: --target=${{ matrix.target }} From 99809f23cd4d83627335260fd0c5317e8fc1e1a9 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 12:50:32 +0200 Subject: [PATCH 063/177] Revert "dont use cross" This reverts commit cae4ac65057dda60960512f6acc8ce9d892f4371. --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e7751c44..631e3f65 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -185,6 +185,6 @@ jobs: key: build-${{ matrix.target }} - uses: actions-rs/cargo@v1 with: - # use-cross: true + use-cross: true command: build args: --target=${{ matrix.target }} From 0c200f1e4c3f1352fee92c8a4b5ce26ea9e0be38 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 12:50:42 +0200 Subject: [PATCH 064/177] Revert "Try cross-compilation" This reverts commit 4f16d898dbf696220ae9049448075c555de7b243. --- .github/workflows/build.yml | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 631e3f65..2b187e03 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -162,16 +162,23 @@ jobs: run_build: name: Build for ${{ matrix.target }} - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} + env: + VCPKGRS_DYNAMIC: 1 strategy: fail-fast: false matrix: - target: - - x86_64-unknown-linux-gnu - - x86_64-pc-windows-gnu - - x86_64-pc-windows-msvc - - x86_64-apple-darwin - - aarch64-apple-darwin + include: + - target: x86_64-unknown-linux-gnu + os: ubuntu-latest + - target: x86_64-pc-windows-gnu + os: windows-latest + - target: x86_64-pc-windows-msvc + os: windows-latest + - target: x86_64-apple-darwin + os: macos-latest + - target: aarch64-apple-darwin + os: macos-latest steps: - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 @@ -185,6 +192,5 @@ jobs: key: build-${{ matrix.target }} - uses: actions-rs/cargo@v1 with: - use-cross: true command: build args: --target=${{ matrix.target }} From 708f217c67784047fdae6e5b107b6700f17c2834 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 13:09:23 +0200 Subject: [PATCH 065/177] Install strawberryperl on Windows --- .github/workflows/build.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2b187e03..b7ae7eeb 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -190,6 +190,12 @@ jobs: - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # tag=v1.4.0 with: key: build-${{ matrix.target }} + - name: Install strawberryperl on Windows + run: | + if [ "$RUNNER_OS" == "Windows" ]; then + choco install strawberryperl + fi + shell: bash - uses: actions-rs/cargo@v1 with: command: build From 7eec1f5cede07f3c3bf90d2eabf61afb961096f1 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 13:11:12 +0200 Subject: [PATCH 066/177] Revert "Install strawberryperl on Windows" as already installed This reverts commit 708f217c67784047fdae6e5b107b6700f17c2834. --- .github/workflows/build.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b7ae7eeb..2b187e03 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -190,12 +190,6 @@ jobs: - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # tag=v1.4.0 with: key: build-${{ matrix.target }} - - name: Install strawberryperl on Windows - run: | - if [ "$RUNNER_OS" == "Windows" ]; then - choco install strawberryperl - fi - shell: bash - uses: actions-rs/cargo@v1 with: command: build From 87eb70d23f92962c5017bdcfcd81b8913287424d Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 13:14:12 +0200 Subject: [PATCH 067/177] Set OPENSSL_SRC_PERL for windows --- .github/workflows/build.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2b187e03..dbdd9f2d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -171,14 +171,20 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest + env: - target: x86_64-pc-windows-gnu os: windows-latest + env: - target: x86_64-pc-windows-msvc os: windows-latest + env: + OPENSSL_SRC_PERL: C:\Strawberry\perl.exe - target: x86_64-apple-darwin os: macos-latest + env: - target: aarch64-apple-darwin os: macos-latest + env: steps: - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 @@ -191,6 +197,7 @@ jobs: with: key: build-${{ matrix.target }} - uses: actions-rs/cargo@v1 + env: ${{ matrix.env }} with: command: build args: --target=${{ matrix.target }} From a64c7252ebd41a02722899daa427b22b01a1a1ce Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 13:17:36 +0200 Subject: [PATCH 068/177] Set OPENSSL_SRC_PERL for windows --- .github/workflows/build.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index dbdd9f2d..4555fc38 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -171,20 +171,19 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest - env: + perl: "" - target: x86_64-pc-windows-gnu os: windows-latest - env: + perl: "" - target: x86_64-pc-windows-msvc os: windows-latest - env: - OPENSSL_SRC_PERL: C:\Strawberry\perl.exe + perl: "C:\Strawberry\perl.exe" - target: x86_64-apple-darwin os: macos-latest - env: + perl: "" - target: aarch64-apple-darwin os: macos-latest - env: + perl: "" steps: - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 @@ -197,7 +196,8 @@ jobs: with: key: build-${{ matrix.target }} - uses: actions-rs/cargo@v1 - env: ${{ matrix.env }} + env: + OPENSSL_SRC_PERL: ${{ matrix.perl }} with: command: build args: --target=${{ matrix.target }} From 77fc4741b2806dcb5b2e19dfeb75eff245f696ea Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 13:19:23 +0200 Subject: [PATCH 069/177] Set OPENSSL_SRC_PERL for windows --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4555fc38..07421912 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -177,7 +177,7 @@ jobs: perl: "" - target: x86_64-pc-windows-msvc os: windows-latest - perl: "C:\Strawberry\perl.exe" + perl: "C:\\Strawberry\\perl.exe" - target: x86_64-apple-darwin os: macos-latest perl: "" From 662db6133dc873c046a9cc98196ae292b9f76039 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 13:42:06 +0200 Subject: [PATCH 070/177] Set OPENSSL_SRC_PERL for windows --- .github/workflows/build.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 07421912..b4d4d608 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -177,7 +177,13 @@ jobs: perl: "" - target: x86_64-pc-windows-msvc os: windows-latest - perl: "C:\\Strawberry\\perl.exe" + perl: "C:\\strawberry\\perl\\bin" + - target: x86_64-pc-windows-msvc + os: windows-latest + perl: "C:\\strawberry\\perl\\bin\\perl" + - target: x86_64-pc-windows-msvc + os: windows-latest + perl: "C:\\strawberry\\perl\\bin\\perl.exe" - target: x86_64-apple-darwin os: macos-latest perl: "" From 78d660c0b7ed387632808dfc7ab56b549da3a37a Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 13:44:30 +0200 Subject: [PATCH 071/177] Set OPENSSL_SRC_PERL for windows --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b4d4d608..ebb2592b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -174,7 +174,7 @@ jobs: perl: "" - target: x86_64-pc-windows-gnu os: windows-latest - perl: "" + perl: "C:\\strawberry\\perl\\bin" - target: x86_64-pc-windows-msvc os: windows-latest perl: "C:\\strawberry\\perl\\bin" From e1fed9d9dc0fd2ee94948643701cc76fde211672 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 13:44:41 +0200 Subject: [PATCH 072/177] Set OPENSSL_SRC_PERL for windows --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ebb2592b..40689bf0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -174,7 +174,7 @@ jobs: perl: "" - target: x86_64-pc-windows-gnu os: windows-latest - perl: "C:\\strawberry\\perl\\bin" + perl: "C:\\strawberry\\perl\\bin\\perl.exe" - target: x86_64-pc-windows-msvc os: windows-latest perl: "C:\\strawberry\\perl\\bin" From ae9619decf73bc5c117b0be1bed8b2e98218360b Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 14:24:36 +0200 Subject: [PATCH 073/177] Remove strawberry installation --- .github/workflows/build.yml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 40689bf0..2b187e03 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -171,25 +171,14 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest - perl: "" - target: x86_64-pc-windows-gnu os: windows-latest - perl: "C:\\strawberry\\perl\\bin\\perl.exe" - target: x86_64-pc-windows-msvc os: windows-latest - perl: "C:\\strawberry\\perl\\bin" - - target: x86_64-pc-windows-msvc - os: windows-latest - perl: "C:\\strawberry\\perl\\bin\\perl" - - target: x86_64-pc-windows-msvc - os: windows-latest - perl: "C:\\strawberry\\perl\\bin\\perl.exe" - target: x86_64-apple-darwin os: macos-latest - perl: "" - target: aarch64-apple-darwin os: macos-latest - perl: "" steps: - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 @@ -202,8 +191,6 @@ jobs: with: key: build-${{ matrix.target }} - uses: actions-rs/cargo@v1 - env: - OPENSSL_SRC_PERL: ${{ matrix.perl }} with: command: build args: --target=${{ matrix.target }} From a19a56c637d5eb622d48706bfd72b454184116a3 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 14:24:41 +0200 Subject: [PATCH 074/177] Revert "Try removing go" This reverts commit eb53bc54867e22f8343b45db98967cbd15038535. --- .github/workflows/build.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2b187e03..927751d1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -181,6 +181,9 @@ jobs: os: macos-latest steps: - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version: '^1.18.1' - uses: actions-rs/toolchain@v1 with: profile: minimal From 00b568ff686f8be0f0c7ff0a1b9914b09e4f39bc Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 15:28:01 +0200 Subject: [PATCH 075/177] Use different perl for windows --- .github/workflows/build.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 927751d1..1e2c1149 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -171,14 +171,19 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest + perl: "" - target: x86_64-pc-windows-gnu os: windows-latest + perl: "C:\\msys64\\usr\\bin\\perl.exe" - target: x86_64-pc-windows-msvc os: windows-latest + perl: "" - target: x86_64-apple-darwin os: macos-latest + perl: "" - target: aarch64-apple-darwin os: macos-latest + perl: "" steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 @@ -194,6 +199,8 @@ jobs: with: key: build-${{ matrix.target }} - uses: actions-rs/cargo@v1 + env: + OPENSSL_SRC_PERL: ${{ matrix.perl }} with: command: build args: --target=${{ matrix.target }} From 5412b4892752b8be2430153c8e96999e3cfdcf68 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 16:07:58 +0200 Subject: [PATCH 076/177] Try MSYS2 for Windows --- .github/workflows/build.yml | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1e2c1149..c43ebc5b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -171,19 +171,14 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest - perl: "" - target: x86_64-pc-windows-gnu os: windows-latest - perl: "C:\\msys64\\usr\\bin\\perl.exe" - target: x86_64-pc-windows-msvc os: windows-latest - perl: "" - target: x86_64-apple-darwin os: macos-latest - perl: "" - target: aarch64-apple-darwin os: macos-latest - perl: "" steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 @@ -198,9 +193,21 @@ jobs: - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # tag=v1.4.0 with: key: build-${{ matrix.target }} - - uses: actions-rs/cargo@v1 - env: - OPENSSL_SRC_PERL: ${{ matrix.perl }} + - name: Prepare Windows env + if: matrix.os == 'windows-latest' + uses: msys2/setup-msys2@v2 + with: + msystem: MINGW64 + update: true + install: base-devel mingw-w64-x86_64-go mingw-w64-x86_64-rust + - name: Build for Windows + if: matrix.os == 'windows-latest' + run: | + echo 'Running in MSYS2!' + cargo build --target=${{ matrix.target }} + - name: Build for non Windows + if: matrix.os != 'windows-latest' + uses: actions-rs/cargo@v1 with: command: build args: --target=${{ matrix.target }} From ea6971afebbb879e3cdbf858251b5755520e5a6c Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 16:09:54 +0200 Subject: [PATCH 077/177] Try MSYS2 for Windows --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c43ebc5b..69b80170 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -205,6 +205,7 @@ jobs: run: | echo 'Running in MSYS2!' cargo build --target=${{ matrix.target }} + shell: msys2 {0} - name: Build for non Windows if: matrix.os != 'windows-latest' uses: actions-rs/cargo@v1 From c570df5970c53243454b304e520f9b2043a668c7 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 16:43:31 +0200 Subject: [PATCH 078/177] Add x86_64-unknown-linux-musl --- .github/workflows/build.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 69b80170..207464a5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -163,18 +163,16 @@ jobs: run_build: name: Build for ${{ matrix.target }} runs-on: ${{ matrix.os }} - env: - VCPKGRS_DYNAMIC: 1 strategy: fail-fast: false matrix: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest + - target: x86_64-unknown-linux-musl + os: ubuntu-latest - target: x86_64-pc-windows-gnu os: windows-latest - - target: x86_64-pc-windows-msvc - os: windows-latest - target: x86_64-apple-darwin os: macos-latest - target: aarch64-apple-darwin @@ -183,7 +181,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '^1.18.1' + go-version: '^1.18.3' - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -206,7 +204,7 @@ jobs: echo 'Running in MSYS2!' cargo build --target=${{ matrix.target }} shell: msys2 {0} - - name: Build for non Windows + - name: Build for non-Windows if: matrix.os != 'windows-latest' uses: actions-rs/cargo@v1 with: From 29218f0be4129dc202322ecba14281dd70894950 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 16:51:13 +0200 Subject: [PATCH 079/177] Add musl-tools --- .github/workflows/build.yml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 207464a5..db116466 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -191,6 +191,9 @@ jobs: - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # tag=v1.4.0 with: key: build-${{ matrix.target }} + - name: Prepare Ubuntu env + if: matrix.os == 'ubuntu-latest' + run: sudo apt install -y musl-tools - name: Prepare Windows env if: matrix.os == 'windows-latest' uses: msys2/setup-msys2@v2 @@ -198,15 +201,15 @@ jobs: msystem: MINGW64 update: true install: base-devel mingw-w64-x86_64-go mingw-w64-x86_64-rust - - name: Build for Windows - if: matrix.os == 'windows-latest' - run: | - echo 'Running in MSYS2!' - cargo build --target=${{ matrix.target }} - shell: msys2 {0} - name: Build for non-Windows if: matrix.os != 'windows-latest' uses: actions-rs/cargo@v1 with: command: build args: --target=${{ matrix.target }} + - name: Build for Windows + if: matrix.os == 'windows-latest' + run: | + echo 'Running in MSYS2!' + cargo build --target=${{ matrix.target }} + shell: msys2 {0} From 443b10cb56ef86e325fd5c25a25d7c52741c8621 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 7 Jul 2022 17:02:43 +0200 Subject: [PATCH 080/177] Update release action --- .github/workflows/build.yml | 2 +- .github/workflows/release.yml | 36 ++++++++++++++++++++++++++--------- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index db116466..a8bb4d03 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -188,7 +188,7 @@ jobs: toolchain: stable target: ${{ matrix.target }} override: true - - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # tag=v1.4.0 + - uses: Swatinem/rust-cache@v1 with: key: build-${{ matrix.target }} - name: Prepare Ubuntu env diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 03b87fb0..fe977bca 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,12 +16,12 @@ jobs: - target: x86_64-unknown-linux-gnu os: ubuntu-latest file-suffix: "" + - target: x86_64-unknown-linux-musl + os: ubuntu-latest + file-suffix: "" - target: x86_64-pc-windows-gnu os: windows-latest file-suffix: ".exe" - - target: x86_64-pc-windows-msvc - os: windows-latest - file-suffix: ".exe" - target: x86_64-apple-darwin os: macos-latest file-suffix: "" @@ -29,23 +29,41 @@ jobs: os: macos-latest file-suffix: "" steps: - - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # tag=v3 - - uses: actions/setup-go@b22fbbc2921299758641fab08929b4ac52b32923 # tag=v3 + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 with: - go-version: '^1.18.1' - - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # tag=v1 + go-version: '^1.18.3' + - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable target: ${{ matrix.target }} override: true - - uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # tag=v1 + - name: Prepare Ubuntu env + if: matrix.os == 'ubuntu-latest' + run: sudo apt install -y musl-tools + - name: Prepare Windows env + if: matrix.os == 'windows-latest' + uses: msys2/setup-msys2@v2 + with: + msystem: MINGW64 + update: true + install: base-devel mingw-w64-x86_64-go mingw-w64-x86_64-rust + - name: Build for non-Windows + if: matrix.os != 'windows-latest' + uses: actions-rs/cargo@v1 with: command: build args: --release --target=${{ matrix.target }} + - name: Build for Windows + if: matrix.os == 'windows-latest' + run: | + echo 'Running in MSYS2!' + cargo build --release --target=${{ matrix.target }} + shell: msys2 {0} - name: Rename binary file run: mv target/${{ matrix.target }}/release/stackablectl${{ matrix.file-suffix }} stackablectl-${{ matrix.target }}${{ matrix.file-suffix }} - name: Upload Release binaries - uses: softprops/action-gh-release@1e07f4398721186383de40550babbdf2b84acfc5 # tag=v1 + uses: softprops/action-gh-release@v1 with: files: stackablectl-${{ matrix.target }}${{ matrix.file-suffix }} From a5b8eba6b2ccdf0287224828ab917ebe3f46af05 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 8 Jul 2022 08:43:41 +0200 Subject: [PATCH 081/177] Remove target x86_64-unknown-linux-musl as it produces segfaults Thread 2 "stackablectl-x8" received signal SIGSEGV, Segmentation fault. [Switching to LWP 19991] 0x00007ffff5033c53 in runtime.argv_index (argv=0x0, i=-156489395) at /opt/hostedtoolcache/go/1.18.3/x64/src/runtime/runtime1.go:58 58 /opt/hostedtoolcache/go/1.18.3/x64/src/runtime/runtime1.go: No such file or directory. --- .github/workflows/build.yml | 2 -- .github/workflows/release.yml | 3 --- 2 files changed, 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a8bb4d03..4b6dfc5d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -169,8 +169,6 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest - - target: x86_64-unknown-linux-musl - os: ubuntu-latest - target: x86_64-pc-windows-gnu os: windows-latest - target: x86_64-apple-darwin diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fe977bca..865aab52 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,9 +16,6 @@ jobs: - target: x86_64-unknown-linux-gnu os: ubuntu-latest file-suffix: "" - - target: x86_64-unknown-linux-musl - os: ubuntu-latest - file-suffix: "" - target: x86_64-pc-windows-gnu os: windows-latest file-suffix: ".exe" From 59246c5639334f6d13d7ed20a5c9d78e62502c31 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 13 Jul 2022 14:12:11 +0200 Subject: [PATCH 082/177] Remove todo mark --- stacks/druid-superset-s3/druid.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks/druid-superset-s3/druid.yaml b/stacks/druid-superset-s3/druid.yaml index d5da68e7..1254deed 100644 --- a/stacks/druid-superset-s3/druid.yaml +++ b/stacks/druid-superset-s3/druid.yaml @@ -23,7 +23,7 @@ spec: accessStyle: Path credentials: secretClass: druid-s3-credentials - baseKey: data # TODO Rename to prefix or so + baseKey: data brokers: roleGroups: default: From 9792205e39ded10f2a4d051b7f3d2c3615802976 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 14 Jul 2022 08:11:51 +0200 Subject: [PATCH 083/177] Add stack for airflow --- stacks.yaml | 29 +++++++++++++++++++ stacks/airflow/airflow.yaml | 40 ++++++++++++++++++++++++++ stacks/druid-superset-s3/superset.yaml | 28 +++++++++--------- 3 files changed, 83 insertions(+), 14 deletions(-) create mode 100644 stacks/airflow/airflow.yaml diff --git a/stacks.yaml b/stacks.yaml index 352d072f..b4ca37c7 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -51,3 +51,32 @@ stacks: - plainYaml: stacks/druid-superset-s3/zookeeper.yaml - plainYaml: stacks/druid-superset-s3/druid.yaml - plainYaml: stacks/druid-superset-s3/superset.yaml + airflow: + description: Stack containing Airflow scheduling platform + stackableRelease: 22.06 + labels: + - airflow + manifests: + - helmChart: + releaseName: postgresql-airflow + name: postgresql + repo: + name: bitnami + url: https://charts.bitnami.com/bitnami/ + version: 11.0.0 + options: + auth: + username: airflow + password: airflow + database: airflow + - helmChart: + releaseName: redis-airflow + name: redis + repo: + name: bitnami + url: https://charts.bitnami.com/bitnami/ + version: 16.13.2 + options: + auth: + password: airflow + - plainYaml: stacks/airflow/airflow.yaml diff --git a/stacks/airflow/airflow.yaml b/stacks/airflow/airflow.yaml new file mode 100644 index 00000000..fdbfd9b1 --- /dev/null +++ b/stacks/airflow/airflow.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: airflow.stackable.tech/v1alpha1 +kind: AirflowCluster +metadata: + name: airflow +spec: + version: 2.2.5-python39-stackable0.3.0 + statsdExporterVersion: v0.22.4 + executor: CeleryExecutor + loadExamples: true + exposeConfig: false + credentialsSecret: airflow-credentials + webservers: + roleGroups: + default: + replicas: 1 + workers: + roleGroups: + default: + replicas: 2 + schedulers: + roleGroups: + default: + replicas: 1 +--- +apiVersion: v1 +kind: Secret +metadata: + name: airflow-credentials +type: Opaque +stringData: + adminUser.username: airflow + adminUser.firstname: Airflow + adminUser.lastname: Admin + adminUser.email: airflow@airflow.com + adminUser.password: airflow + connections.secretKey: thisISaSECRET_1234 + connections.sqlalchemyDatabaseUri: postgresql+psycopg2://airflow:airflow@postgresql-airflow/airflow + connections.celeryResultBackend: db+postgresql://airflow:airflow@postgresql-airflow/airflow + connections.celeryBrokerUrl: redis://:airflow@redis-airflow-master:6379/0 diff --git a/stacks/druid-superset-s3/superset.yaml b/stacks/druid-superset-s3/superset.yaml index de49e61a..58bfd5e2 100644 --- a/stacks/druid-superset-s3/superset.yaml +++ b/stacks/druid-superset-s3/superset.yaml @@ -1,4 +1,18 @@ --- +apiVersion: superset.stackable.tech/v1alpha1 +kind: SupersetCluster +metadata: + name: superset +spec: + version: 1.5.1-stackable0.2.0 + statsdExporterVersion: v0.22.4 + credentialsSecret: superset-credentials + loadExamplesOnInit: true + nodes: + roleGroups: + default: + replicas: 1 +--- apiVersion: v1 kind: Secret metadata: @@ -14,20 +28,6 @@ stringData: connections.sqlalchemyDatabaseUri: postgresql://superset:superset@postgresql-superset/superset --- apiVersion: superset.stackable.tech/v1alpha1 -kind: SupersetCluster -metadata: - name: superset -spec: - version: 1.4.1-stackable2.1.0 - statsdExporterVersion: v0.22.4 - credentialsSecret: superset-credentials - loadExamplesOnInit: true - nodes: - roleGroups: - default: - replicas: 1 ---- -apiVersion: superset.stackable.tech/v1alpha1 kind: DruidConnection metadata: name: superset-druid-connection From 3cb3feeec22179b206855026a4a4bbc700557ab1 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 14 Jul 2022 08:25:23 +0200 Subject: [PATCH 084/177] Update stacks --- stacks/druid-superset-s3/druid.yaml | 21 +++------------------ stacks/druid-superset-s3/superset.yaml | 2 -- 2 files changed, 3 insertions(+), 20 deletions(-) diff --git a/stacks/druid-superset-s3/druid.yaml b/stacks/druid-superset-s3/druid.yaml index 1254deed..fce73f1a 100644 --- a/stacks/druid-superset-s3/druid.yaml +++ b/stacks/druid-superset-s3/druid.yaml @@ -4,13 +4,13 @@ kind: DruidCluster metadata: name: druid spec: - version: 0.22.1-authorizer0.1.0-stackable0.2.0 + version: 0.23.0-stackable0.1.0 zookeeperConfigMapName: druid-znode metadataStorageDatabase: dbType: derby connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost # TODO why do i need to specify this? - port: 1527 # TODO why do i need to specify this? + host: localhost + port: 1527 deepStorage: s3: bucket: @@ -27,41 +27,26 @@ spec: brokers: roleGroups: default: - selector: - matchLabels: - kubernetes.io/os: linux config: {} replicas: 1 coordinators: roleGroups: default: - selector: - matchLabels: - kubernetes.io/os: linux config: {} replicas: 1 historicals: roleGroups: default: - selector: - matchLabels: - kubernetes.io/os: linux config: {} replicas: 1 middleManagers: roleGroups: default: - selector: - matchLabels: - kubernetes.io/os: linux config: {} replicas: 1 routers: roleGroups: default: - selector: - matchLabels: - kubernetes.io/os: linux config: {} replicas: 1 --- diff --git a/stacks/druid-superset-s3/superset.yaml b/stacks/druid-superset-s3/superset.yaml index 58bfd5e2..8f0f9de5 100644 --- a/stacks/druid-superset-s3/superset.yaml +++ b/stacks/druid-superset-s3/superset.yaml @@ -34,7 +34,5 @@ metadata: spec: superset: name: superset - namespace: default # TODO this brakes the demo in non-default namespace. Why do i need to specify this? Why not search in the Namespace of the DruidConnection? druid: name: druid - namespace: default # TODO this brakes the demo in non-default namespace. Why do i need to specify this? Why not search in the Namespace of the DruidConnection? From 03b43ec081497ecc056931c283a5b158a4687e42 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 18 Jul 2022 08:45:30 +0200 Subject: [PATCH 085/177] Writing docs till i need a doc --- README.md | 57 +------ docs/modules/ROOT/images/layers.png | Bin 0 -> 27259 bytes docs/modules/ROOT/nav.adoc | 13 +- docs/modules/ROOT/pages/commands/demo.adoc | 3 + .../modules/ROOT/pages/commands/operator.adoc | 124 ++++++++++++++ docs/modules/ROOT/pages/commands/release.adoc | 151 ++++++++++++++++++ .../modules/ROOT/pages/commands/services.adoc | 44 +++++ docs/modules/ROOT/pages/commands/stack.adoc | 110 +++++++++++++ docs/modules/ROOT/pages/customizability.adoc | 48 ++++++ docs/modules/ROOT/pages/index.adoc | 48 +++++- docs/modules/ROOT/pages/installation.adoc | 81 +++++++--- docs/modules/ROOT/pages/quickstart.adoc | 84 +--------- docs/modules/ROOT/pages/troubleshooting.adoc | 52 ++++++ docs/readme/images/layers.png | Bin 26342 -> 0 bytes src/operator.rs | 1 + 15 files changed, 652 insertions(+), 164 deletions(-) create mode 100644 docs/modules/ROOT/images/layers.png create mode 100644 docs/modules/ROOT/pages/commands/demo.adoc create mode 100644 docs/modules/ROOT/pages/commands/operator.adoc create mode 100644 docs/modules/ROOT/pages/commands/release.adoc create mode 100644 docs/modules/ROOT/pages/commands/services.adoc create mode 100644 docs/modules/ROOT/pages/commands/stack.adoc create mode 100644 docs/modules/ROOT/pages/customizability.adoc create mode 100644 docs/modules/ROOT/pages/troubleshooting.adoc delete mode 100644 docs/readme/images/layers.png diff --git a/README.md b/README.md index ad7bb9f6..48dd59a2 100644 --- a/README.md +++ b/README.md @@ -1,61 +1,6 @@ # stackablectl -# Installing - -See the [docs](https://docs.stackable.tech/stackablectl/stable/installation.html) for detailed instructions. - -# Usage -## List available releases -One good step to start using stackablectl is to list the available Releases with -```bash -$ ./stackablectl release list -``` -You can also ask for the list of currently supported Product operators with -```bash -$ ./stackablectl operator list -``` - -# Building -You need to have Rust and go installed. -To build stackablectl execute `cargo build` or `cargo run` to run it. - -We separate the deployed services into 3 layers: - -| Layer | Description | Examples | -|---------------|-------------------------------------------------------------|-----------------------------------------------| -| **Operators** | The operators needed to operate the Stackable Data Platform | `trino-operator`, `superset-operator` | -| **Stack** | The data products | `Trino`, `Apache Superset` | -| **Demo** | The demos that prepare data and run the applications | Demo loading and analyzing New York taxi data | - -![](docs/readme/images/layers.png) - -Each layer gets deployed via its dedicated `stackablectl` command - -# Deploying -## Operators -Operators manage the products of the Stackable Data Platform. -This command can be used as a direct replacement of `create_test_cluster.py`. -We decided to drop dependency resolution (like the superset operator requires the commons-, secret-, druid-, trino-operator and a postgres) for the following reasons: -1. Imagine the situation "install `trino=1.2.3` and `superset`". Superset expresses a dependency on the latest Trino version. -Now the situation gets complicated because we have conflicting version requirements for the trino-operator. -We could try to resolve this using dependency trees and other magic stuff. -2. Even more important: When you deploy the superset-operator `stackablectl` has no way to know to which data products you want integrate with. -Because of this it would need to deploy the operators for **all** the products Superset supports. -As a result it would install like 90% of the operators by simply specifying Superset. -And all of that on possible non-fixed versions. - -We also don't deploy examples any more as that functionality is now provided by the stack layer below. - -## Stack -A Stack contains data products that are managed by Stackable operators. Additional products like MinIO, Prometheus and Grafana can also be included. - -If you deploy a Stack with `stackablectl` it will automatically install the needed operators layer from the provided release. - -## Demo -The highest layer - demo - is not really needed to spin up a Stackable Data Platform. -It enables us to run end-to-end demos with a single command. - -If you deploy a Demo with `stackablectl` it will automatically install the needed stack and operators layers. +The documentation is hosted [here](https://docs.stackable.tech/stackablectl/stable/index.html) # TODOs * Check if CRD resources still exist when uninstalling the operators. If so warn the user. diff --git a/docs/modules/ROOT/images/layers.png b/docs/modules/ROOT/images/layers.png new file mode 100644 index 0000000000000000000000000000000000000000..69e5a5a56c9131c65f525448945e5b32f62052a9 GIT binary patch literal 27259 zcmeFZcT|*3(;=b=UeBZv$d-j~&fA*X`M;z#@ukNbqs_O2lUrn@*w(32?$AmX-+_8Ur>;TAH*Xdtj90PDkLu=0Q@H? z$Sc4v0ROwcr5(cMp8-X9`GEmAj06NY|9*z>vb6hq99?%$h>?$yypWH8kb|2CRLR#t z`|mzJ2oFyOSC_xL34nNod4>Ld;N|az_`B5x;p1S9xNa3v23YxrW?(9Zf2j2I*7T4U zKw28;=()M7!Sz83%JzQ`;$rFi&mcMgp*#YA4+1jq(6n~*^i|Q}x6~BZu{YLJfV;U^ z!fYJiZV(rJXFUUB8&#Nys)wtLniadI~NO z6-67Evm)45SY1)c*w7iGuIMP>rlJQ!+9L!URc-x{YQjG1#`b>R4(>t@f{t!#AOj7! zyOM&VpSZ4?9zWPuMA!)oK^TkZ2`h*x1-NTE_*+`qAr%F5`3)T9VXnrC-e5NcaUBmO zHKYm{VJqP6>Blech=A(*02CWJ8U^_Ii96dnC^-BEa9;Nj<<=z{kPfTNSG94?MGU(o|IsaCgyyX}W0Gx+D1Ypb)6Kh`WP|6YBoKqF@IrEo~oL13y|Q`K_yl=t*>xAqWKQvnI^i;CzA1z36L^6SEEEDfyeogwb_`WiN(Du&7s zeSagcqPC-kkw$=#4b0ui*wSCq*3L$sUjwe~W^7~cuI2=c>*peZ)De~UcT6z2Uwg820W9_bnbTig*16mwmhKicr!U#bZT~%9SFA+UgOBY>RVW2w_ z8DIc>$cx+PfPIu~tX%}%tgQuH-1S5q)UCvzDhj^VPC{^CR26q8F_nM-aW7*JBwQpw z!9(8CD?rQGUDLouSKR=tDWc-5>jrlRBNe?gEJ5M|5EVfi;4MQ)YCT3tGAht3y0&475c&l{7(025LT*zBVA38o$3dNKnI790dGmI5=p) z?O}exiU8@px<=aeHjaL_UYY=*DgxR_BNttDHKdWC7F5{*CT0jSgutzY;bOW@5Km92 zzrTZ#x2L|9sx1tQ?3 zZLNwx3V9g{dOQ0#i^6@?)xq{)d2KZvD?dP3`VJmW8p2-sKDuBC#7G@@MMzoS&lULO z7jQ=^8CiK5`uiC9+lWF0?L{<^{MsNdKPMquFIOi~eN`b*BPSs(KX-_Ml@&0Sua6Q` z&Du`f&`1{~E~cm*5OBSYP(lA|R|19$`Z@SIK@6QlwY38f);@Of3Qmv!5s134x{ITa zh@!EsBH)TuY+en!*1-TU;F?%lg_mB&c6TY*11k>P* zWJyvY8+czQe04N_v30Q-*tiwARkxf`-#6vEu{E;QDjf*#lQ{dhw{9o9{5DkoJ|&YJ z-pd>J8E}-I5s=A-1aDf6z3HxCkbt5bHjj|FETTYZ1( zz`6BwnK&a>}3Li&T0(}|%44;RWTcKc{U zK5h`-G&x=-z7+#+-WD2niR;WH93$c2dNlx=?G5h;ydz6V*)r#oq zypmQL6T+At;S;X8sn-DmL*wxhPWzSP{I_hrEMz~Ze(uS0LPcYeH?QV2n{82(TXaA) z#!^jf<1q+J$HU2Uppsu|e2}Pvm5}Ea8%YTLR-r8M<~!x(2|^MeB2G)8o~2eYj+1M0 z*6u?MPXk2clJ;`R@zG>qqSNGd0gc3G3*Ij*+1R2HBjix1=^OE-hZcXfk7h!h)BSSYI#rqSH|4p#Ei!VcAT9F&bBoH~#a%QvVdijxRv28gk zekwbNg~}qJuG)UYY_rJE)%5%EVS0JqZmBYBXqaPOZ|={62xe@#f1@u*$s^R@9Pq;uc9L_boz-_%~Dd z8nbJt_MwL-N3w67W-Ux#Kz=jDjN{1>X;36gWU?mka815W!6*`p0<`VAp3K$l+Ehsl zl+LJenpb}OXk4fCf}?zGKeAjsp4#P&df6szT4ygUeHUuaJbwO-ByZ5g`brJ~Ys?00vKhu{gw|D52FEHZANS`^w$Dt)!iZ+tY) z<&S7(_n3=jZ_8jgQH4?`vRsAURCaDqyR|K0LbqA)UTX>Mf_x)-=t#7c*x6igu$AiB z+bH*iJkRo+v4Mrynb1_G)NHO?_MgcnTSZB;Xbbp(9N>* zxQvh`;IGpO}TMEF9WR=_i%%ZHF;M_owYo7 zqwk9ihNnDv%eiOJz`WI1r5$o|V#gVHW_EhScNol8JLMJGP~l&m;W>Tlk&^SrnvADg zOYmJj_k9CCi#%&b-;GILX7-+>iLH+`RRX)QXi+wGCf@f&-p>ubmfhQ(>ypCUWJW2s z-Vxl&{OY8D4f#MEeGu{?d0m%oFpQa9y|{AGn)qa%*5rV(;-eIX6kV9%k(S}H>L79c zdm7(Sed+~|S3N|yn6o66FKM!FFXHTEFIINJ#(U$-k?hFT;cUult|pKC^-HfpCqKth zn&Y`WO=V{(=eCSj_*8MB(c#Ajh8A6C)HCsh`kF_l-i`~AbtU>8orp0}wM!^g!jeY0 z^g%U_k8>QAVE_{MKSVl)_F3J_U0k^|%RglP>1Dd6&Ynp#;^zLl#H|Tt)3f03VGJNp`-^a=@cp%MmEnf^?$pD?Uqw79N5M~B zA7j^*+CUcc2@QqW@AwU6uhwClp?mt3UK^A@gPVywqC}mypUFhhnQiNwEN6$3Wr1x1 zD|TPJp`C{KI8C5yDXl49-MO_Eo@fPl0ZwEpPa3ndglJ-Cm4TXkvUaC~!%St^`WOag z2ks~)c8Tojp<-7n@8wy1eRNc;_|%S$73tm;yz<;w(D&rjEz|DBA!VLl09p}3TXOdF z=WU7GREyoX7HaR-_TLv)NjIX!W`ElcH;a+PHR&y59%@ zvbe;L=Q&3??Bnrm>tlN@>g0Db+|cJC3!t$NHE&-W25Q74b1>(!^=i0%p4JC8BjRp7 zXHITt*n=^|P&mc8vZ)0Vs~VcplzyPL>PhH)t5QKij`l}#u(lG9uKdO2+5Hu9x9VK? zvC1NL7*;`Up*u7gt1oWLOjLP4BXqVcV1hpCGg8D)3TEldN-t_?ta+rzF5Ww5%HIe95(s^oi? zePywWgqYl4HDKA`D`!3Yj_I)wwJd5OJr$BQ?t{k|Y;jYiW48du9fMDZDXk?ih16bn zpl_0)ArD0Z1I6c435ThQRC1VjBDACVxYPb0{u8;70@V^nGi5V}6>(^r)K)k9nZo^h z&%7Auou(P6K0Mo%QOp`*w=L9qTtc~G;jR7s1u|Yq{Bvmb{yJ76^v6yTY;P5p$T(6` z2?c*rrp8ls(R$?Vu|t;cnku8%-ucDQaCHOYZE_e{tL`4~z&vJ!$b9JJ@bm-!Z9l3@ z{i_nw1Irbx=gaBUVCREwL0co)M$B-ts>!3nb>X@kBThTYt|er`1Z~nUl!Q}E#C`|r zWt1I98DG4)RW$s2^gUhF`3D3M9=@GMN}bu4Lt4hdMQQ2tPkpaqbHqAmV+u(LA1>>! z-gKW)#OOEJq~g@2(cKEtS z9P#FqI$JC~uiqFq+T6{Z58+Ls(jGDn99?2!l*D=@-Z9Hv#y?`a|0yyw#PY2d6otPN z;tltiL=mwXOv}FugnT1~&!68nS0)SMwRfk^JIhpOsd$jCoI&xMxbkZ=#eK&ZSEvP&gG31zGh}eS z>bVdpUeYp+_#S4-yWxdDVdgkrQEw1Opsg0X! z)27;xcX8Z$6hM90t8oOr_))r`xOlU*(Y@g`0A?$I`8A7wp>MHOw6Rnc9JMwXKd(xX z0+AUnh*)&2yh1f{_3lVqEOJH=&Mvy|oTu(fzo5c&Y~wmT8ZP%K^ja4%%0xFSQgR4L z$z$|$boF_V){6|U53#nOWi{|bOJEG#_o%#hd?A~2VT_OTTF%BoD`?6nyx-Dk_2(OJ z#qTmAEsQIS%tJ14rLU5~%V5-tp#{v5Pq#mnX`a<5VsOQCyR}v^{G(9D(#In@_>?jv z*{f#!fgcXzY!BX((2}1i`ghrCr582vnVZoPODG-<Mq zw`Ln*Mn1_#M9oY;d&KITA*h=*b+Iocb~Gzf*d^j4{0e%!P1Q6SQ7+a-hWQ-;J@76v z2G1lpmerfX$V|dn2{YNlRkVgDIzG1#H`S_!GOngtgrsOuPY+}pwAtw}D?9J0(hsmt z;1^?ca&$2Y0(E~9x~q#`Cx;8`Vt;(W=R`J|OOKkP3Gn={}uG7`{Uy!8({Yq;v)?N^WxbEi0QPb+_z= z#tJ&so`h4E8PVRJl?ms1bHNav{f?}x9yz8!kv_^&P^6qS5R*@|!gnV3^#!5WglGT5 zxzFo2EUIaezBeS!jCjBv z;_TyDP;`K}W0`x0fpo!pq|*yHbzENsoq=23OExp?9Cd#xcR9Nipp~>en%Ejg14Dn6 z=9geB6KHy)tsHAo$VlQu(4huJZA?B+?k;-U;<+DAgHM^lm&JU=@#`)WjS=7*E8(HlL7&_L<5?B7YwEnFoABv_)lZ$J$@B>=bB3?BOX%Q1oYv-mkQ<)iX%B^)$0p{UG=biF^V{Q|Ij)03Dlt^`vC zujvi04$(46b%q=Kzm+5ji35{)Ju1BZi0XVWC{LHi)*$s13)iXn=juKgBTLTkPBa+* z@;>C4oyqVVZ6{-_)=R>M_iwu%5C_gLvL^5`rb~3@zcSh!jvXSD1XY?$G@9~tG>_W9 z@PROwH*BN9i$BQn^I@@!s^Y0uwSM~){kJUW^xlav`Fvn1|FSzdZ*%g+IbkcNgv?<& zQNqyTtqEmQ<*!tl#hQb^}Qk*^$LF`=pwoIn_rX!$DlfGR!-3Ys%E-+O%KbPA*2-u%5DCT zG-j_bM#83Pf+i+g>T~O?TQ)1I@^TuhmPgP&WUt%HU8~1+{X)pq=~=m^GSA#VJtokUZ~vhz-gZy%1H{+u6VO|hjA^#G&o!3o3u_#u(S zeePFG7!6;@;x+$}F}UP|CZH%-fLXEb0%N_(#FDoV1?IQ?&%o}L zkIb$md~d?iNo!C^YFC$Mt*4|92cjd1h30CF2eSq{xB0$^WnboTZL%i)X~-G9_b9iC z>F^*YT;j6EuhWC8s>ShfE+EF4kUbGv=1}#fb3y0J$I&8o*Sl>ZH55he6~8kR{VD=2 zdLMgUGGAEKrj4lE^^L;6nkaAX%B(dlmEHW6r;KlyNElP}llMqU8dIjZ{-Kf&Jv{Uh z2mXk?j=t2r_T!8Xe=9z#CUNQn>1O|tHPjcxH2entI{LtIMM{>SpJhpdRqQ#`Tij9o zHakMcH8ruGIx|o_z8w}Xg|xe2v9GZ1<8uo4cZYOrhfVD-_ZGgQ zFW!roK}+q2t7scoako8+0`daX^+%@$l)8D6n{b(jxL!+e03%l2>PBJN%=&_>+Y0-%%RI3y@@Hv z!iPC=cV*QUHa#q&(4xe5kcu z71{!d+~u%djw5=}8DQ_rJz&dwxV!EV&F%rom%JjQz>roG+?hOIIZ6W)P<=0B!&@6Z zni-VL9j&N?2H!3CIgdHmdVKmPvGXDy3YGq#+xTEM7TR$R~>`J&fh@9We;o~ z$S<+lAXwb|nXC*sn}R`%7?mj@yHv-Ltj zNqBP)aL_;i(0!5hjUkrTObpE1XCW7=O>rxGyDfFiO^ggpiBCrLF~Bf*w4mrRxVyl2 zA*UX^qA7IzP@Mx`T*0)mn>9-<$WCI9kFvRH58G4{fhhe@+j&+W+HWxzK}9(|rt?ia zY?wB%B$j&!I~o4DZH$~;EaeTX?l&VDBhs)*$|CzdK|ZU}(!=jNOba*Bl;%MVuW}gn zbJp*rYNX1vE{b}y{m^>j_Hw*qa*rTeiSpt0?i`qJkLv2l{so;jd@-vNC$%WSmHMd-lI%(t?xyxP0PM zWElzc%w}-B(z4xf)<`+6I*;P`j*tfVZ)JupOe}UP& zzu-?yVH&!T&))fc1&r|(X92cbvSz(DPf4DYo@OH>1Ur&h8e@;oXDSB!1P<;eh_ZFp zz9E@Iit#j0%Ok0sd+>Xdm7`qO*}S9)8;t`+#ohY85k#C1+YPBgl+9)IOTW;EuE{7dgC?t&aHlO9y}kHP zMQw-f91yWf0pP%$m?~xtB_iQh(jG#3vo!BgwpM{=-+i-$P81m+#wwTF@1kpfAlkmF zdz_)vU#$CDmG@(=IrjrNS1mbSMxd7dyd5VIb&Y!=vK4Q_J;@B>jD6yR7gyV4Sycna zq8ZL_ynCr2UgiFWTA=nFEx9BdTV{wpEZBCt&*Y|9p(}x96oOK_zS;iMIx|YKGKj%^ zochc@NoD1;^IRE9jpDgH*M_c7~K1B?uGof3sMq^u7z~$;FdYjK6%hG09 zu4(QapJqRO!Z!mBUCit!t-L}?^SQ`=O#V;~&YF~!yWEy4kn-|r1l%lC-%e$4uv%F5*O^t`Zpa?y3|!K}4D zSItL20`!0c49w@&bpw=_bY)cXpFJ_(b_%{x3NL*zZatyztjtW*0;do{op-z}Zu9Zl z`euC~U%?BJVY^jUr_H_)+Z*!Cu?mm~f%sr%=$IuX8KyBxiksD?PCxXuTVJ81x77Zu zYcuh3ab&G`+kihDo0TUcBn!&|O71c%d6Y!g9+5|>6Eb=dI+1&B}p!MLysgL~> zk$~SuR@XW?lvCs#mHlhH=1EzBoTi@M+sAxo7bJgg)C}a6T|%#n;C_~8_tM{2z|Xoo z{L$|B{7()_C2DGvTal|XDaEl}Jw%23U0fCwFeo)QpSurQR3cT780Q{!zkXn<>yr@F z;ctu{{jD&!(n71Cvmz|wn6$TL40f>M_uX`knL%udjt5xh!dKW^6f&&zsJfP4&$=oArx>K?}H219wB+M2%CDDc1wlXcLz~fhw`2 zhp|N)XZwB|V^ddc=A1#n0xS9EAqrV~el8Dw-Vf>}t+-vcmU`|PMekUz2z0GeWLz8nCqWmk z>0&FZhI4bE8pW_T9j*S)jr0GLd{i#A=wQp4Vn|H)M<&WW6W(CzXz;Zni-EJ--&zLJ z5MniI?eU|)McxTWjZjsp+k-k53JOK<;HeGrUb`Nf6Y`FdYJG0 zRSnpqK0F)ULK{*5WE=x``^dv555Sjot+ALX;GA+bnL`>?bWnm@v&M&$3s2pv#YLG! z6+Kng!gKm9eUh3s*;>@ujKyT$W3CMg4H=VNYL0l-?FVF-?xzzZ)bVHh=lHQ1@DeWPZcX}!Hd0l+yx7zrh%q&+l`(`^F3EWwgua4Tou9$|XF0so>qdPdD%SK%kDKLFx4cg~K1c>x$CbWTyhz zuLd%2+Aag4X`8XxBFC`(vYSzQq-hJl;{L`0d}YV`Ty%!_B>gqf&0Rj;w|?;ufG(Mv z={P@iu?5fK#gv$Sli%cB)xJM}V9q3W=^xED;SqD=WSLwbu&l5M)1H!X=RN`RSLI9_ z{^%Pgc4WbaT9^5J_#DW=k;9C-HUc+j?dWGtevw_iTd!<(NIPXwuc@t(9e zD!&>D1bAC!O>^qz1akMV@9TA0=wUh&4>l!+S2w7<6TbbO(jNnzq&CsKzg{cPB}zaj zQ&#f@U$Gs{vFxc0Ma0P^SKRMZ#a z^T9^nUB0PDPkk;IvW?HR4z;0<*DHh@z9D)!g%Fs*?zS!3Uo>`=2N~ z6F!VqWWmt}2;3wWFyCSYSWdOC2*mu9iRDfl3 zbZ1QlumIx9Jy~m<$c+kCI|aNJK(E;!{k<;P!HJL|q6qU!Q|- zG8;=N?e@o|g$SL}Gl*E?BbaQ7@XlL`4w2J=~>(wRm)j(<}9Q#88+d31d)|Vxn zA9%Eh^8`(_!zd#^ueyNN4oS%FTw*^tYA(dvYF?NL1r%tPZ@su}f~BF_*dKW!xx*-{ zyL|cbP;dKk@glwFEcLwm59jf~AMIoRS-pkO#lCqeFK#286WTDSn>UwQ(`;gs|? z7|D0+=OX-;yAF~ByL`@C^e>|mL&NyPvpmWWlI2 z+yB-X{y($%-!&UjqA>ju1NDQ|A;jn>OWksF={r2Duma8a;Zl?CSC5496e0;L(q^}m z7#M-QCgQ0zYb@Y+rEmtG+j1az_?Zgj@Se$SPS!1AVUT1RagUB~;rNRUd%cO*z@(7l zS^SfWqnSHo_qQo-?)Dk_jrrZh>yXvO18{B$_)1^N@NVCI$T0QAJ5l!P9ELJ^IhG;r zsc~&{utM#Wo`gXHE}R7m9iBwGd8Vs@*4_$Rbtp9v#``P0M<=N>k#%Duxxp08A#I?uCj!9v1h z=6teKc2AUF947{z_{U@tB#l{mMdYfS%4?SAyEVN(kS3%!eyDkwU%G-T;J(X8YRfN5 z5CgaFud-(PO@c!pZI3OAe?>2A{zmrFFI2*^M2UguS|Y9g_{^hH<&8Ipp`Wo4%xVN- z&nfKK^kS2hI?k|&uzL;cR~@o{?^?biwfEMtCGjfDXelwbTphLy>77Q{@q9i~v#|Ki z{kDR#Z#`)5)6@E|50@OsiD4Lwf9J_s4mhcY zs7$cdO?`tvXNhHD80T5(6T2As%~RH^$=UYt&Fh@$rp|i+peUQWkubUWyRgVDD!X{R zW;^R#sExt*A|Z5h+uGR|0OZN~asU|6$n0ML>Jhl3@N3*Fkt6{COgA9c_^aQh;LBU> zlq+In0GKj$BS@OV=TC`izGJo>Vll0Dv3@Hw2|Yp)7s#C)Kl~FJxxDueUXlCy3|}Hn zOe>;q&|D**@{{&aRc0i3r0W4~R999h%F3L2f#|uW%Tlq{EAiwKObToFOx%b$9XrCc zj!}_$hVIh5x9(h9e6442;Ws-sUa0Ke!0zs18HGphnBDhv{?PajyQ8@z9f>^1sPd^g z$8xVNkoNRcwHa|}RNy{W@kzhguKiS?b&cKXb6~CSu*p+l{Cjy>foo*eVp53A@rw*U ztE}xUPpT#+vw+xS+O41NIEr0n;7eB9O4-X}*mg#iU~qJh`)}?#t}9mE4}O-9`l}){ z+!QvyKNd0S=H8g|A_R1HrD48t8U!T@Pvy9JxgfPE=5XM5uP#ST-amA$eV|~Ezu2Or za({hNDYoJd4<^rgEkO4ojBw|T_~04`((K!^KOzA6Ae}@q)AsrPyhU{$|?XGWYTdtY=x3NCTxKF=vu4x`< zog3m_SqFf}jE~e0*0V)IF~0W(S)HMH3pIdel8(Eb3pmn7Eia?bD|e7_L491M8r0R?Q!L|9#bnVO65 z%iB;rIc$sdm#g1~yJJx(AIZ1b6U{gPO*ddx6(|j4ARQ6@|X0iST?*?b| z6G@rr+^ue^9|uXuqnX)i>Y4J|$Rqg$3xgkySafWV{~r@$DMzD%)MLz#6YT;#35F}O z8(R9}Yn8i#pCQ~ESVD?*(ag;9=O5q7YIM!Or7B)LD2hx4z>VkNJF6LYu$Se8%FZqQ zC-7e&=vilBc6YM!R_fxON@wOGID$PaYNbBCMqM4Vp;KdJ?Z4beazOpKM(NDQk&7C% z6<1^SqI-!%CS~4Zp|$gjm@z?h*6g$h&JZ@R%thHCM-o z781G9+p$MUJoF(9-yQ`hun8&Sa-RvC&N3sK1%UUKf112rBCL^gT2PLa(7I(H<=Y0A;88B!BP{&`n-N+qu-Qw$O2 zrFZme+8Hc(uv>$aRT^%*XmSb)MrPQvZB-gISTqJ;lK3oS{Bbot6*BDdl@u$X#w|il zw6rD4tDOR@KS+%?)Jo|rigM-+Te}g?;&U66r}75>=(|0x$DF|!r6Pf@mhEPwBY!HGavhcujqaX32zTeny_3g zQkB@*n{)cjho7>ZO=}QMxn`N5{uR?k_L0#YNzG#pf&|F?dW#^6F8~HPZi&gyw zmQ?^CXtZ)WVhzvQpN+CFT3^74w~xRh z#P@DZm?&!uB{8l#mz%Co+q{C4TlhH?2bIzFBqk9MM8gTXZh_4cy}8+KA3HweME+VD z0!g!k2h?ICUi>Z_$&31ECTh-CO-kmN@+z6Et!!}q#ZjF@EB<0rV3z~L5F}Fe;b@X; zLS~%jwaH;JGP)?d#pc$+Gu!Co_nUwk$`C=K56INr7_uRmyLyjCA|TUI1E z^$8cP-m#H7MyBaJwHR%Bu|^s4bBRZ62;c8@Xf1A6z~fql{L0DY)4MLb|U6uE7-zYAZaS!117^+{nG zF=s`kHuE;(%doFm_?wdkC_uZL;BGz$5g1FE* zN@TcU3lG@(QXi8T=h|i)@5T@2&4~g1poEcO zXDqrVL&6ldeC=$lz>XmBC7z%zcJsukIRqT;E2_uKqi1~TTv$17=J~ zPf~}TRFPp|SR%ctDDD-cks**o@|~JdXgdCbsQZ0hb+APuJ7C-^eWv-Spux<|Pj1r7 zP|}KH`-7$3R&)Ugxa!AH1O7xSWoPsEuMw{+xFtNNH7*)1ciJr$(w?WTJ)B76x=YAP z2PMMSt2gjII2b)?EPBfN2!yVZd9eL7lQ>|Ri#!{ZY4Oo|Cb&su+UTljaSWo%n=&G-wZOi7saI=nUs2-J8Gqy_%)Qp6W)nM$Ti#r!zB<&PiW>Lu1^tSGGeKOV zPM^JBmt62~jC}U?d#%Mg7Ep0%bdQGD@wXylHGv9giXFW&&2_>=vG^8SC9Hd{&PZgg5>dg^*o}6arq9x+tABhkd z#`6i_5Rlup*ZXtNX1S~{Uwhu=k;2XFQ1={RhargPLq6jpeJ(1W&zXU3nO%WrjxTVX zNPm$``_^lwk4B+~rSAnfd;A0-CUK2tOiPP%C0p?otr8mH-rQ`fuFo^KHM#0geqn*BZUjG3}p)DRdyplSFMPdA|O46ffa4AN|}F z_$=;5sf0g4zPn%$sEShJnd_zdIA|VI;LHMQ4wdJhb1Iy};YRw#I?aF?2z_*E%Md_r`^@ieG(}5nuRu~zQBE5q2z4FDn=T>k97Maz<8AbqonJH z&f9i*=GN2dpPUY0_C2Z0uk4|y0Jk9JAJ`_$>4}>d!{d@;IdG%)lHpE;Vv;9!wGhSB z{T33sKy>r^M1^oGBv4ki$XDO6)L%8F#K3LPl))u$3X))7gB<404%lB%>SG_ zW|Cc6Qsj>7{4~5c+SI4>7T1#FJDPEfZu_nIPrY{+?@Vm3Cb^w8-qwth9iq11;*E8L zI7^`5z-?#!@HIJ5M9}oan!Pu9fED$}xh9^=d;-8B(BA(ZVNI21ab)}GHcHHMGPSR` zKcc_psa4fZp+N^s)M=D|qgYxy|6X3`$a{Ijo z!cZ%%#LGvX7bvt@b26;28X+$!XeqX(@oviNF?)@jEonKu0aUjv*WAN~RbDB;3x1jw z_`YIN=83Siucc&s>6S1X$Jm0SF}?GWM(1?aiz4RP^D2Tq8IWO04_%oxGFRC-b=rF2 z{$gI2?^r0GZEL$`>V0dwKOeOSq`yD96(RZl>)M&l)EiwB@p{q+n$Wcj{LZ4;^rE)T zb?UC+l&aniZU`n&DY|9P+gu`pMe2AF1i!n~nP{?|Vo-lNHDKBXZ9F{<)Kzb5U}#j0 zUn@&GL0Q1Uro)Gd$H-3gj%GVnk(0%DJ5>vjkHCndlt@)twnIOzvUJomBlFcRCMSg?x0lq3o_8^(A; zEDGnQe|w|W&}KLN?)h7n-9bpR2Ih1%Zg!JT@Fo0vFH;E-am<0@^#XlJ1brTy|F z1@ax*!b1I5PxgPzuhI|;x-r;_3PDkC;WPysGr8}GVSc;Cvk8XcrN0-R)5+Vn0%0WX zb_M7oS%)UNwKu+YYhpWU8*^+TMnQirwmvXy^-E-;=ts`jkOD>#5309Bb=kdCgI2`t zvdbV+Oy=7P$}CBY%of%9pCe`L(2mGx*4NpJ)U2)aZ+rFL6TFT5%=uJgbIdk5oy=FPbP!)iDZS0Xm!Awj zgjHhTJ+(m;$xE9Tw7UnJ`s2(i`9;cS+g(hi%Z)!Lh8TOcFtxd1UChz1X#eoNWGzvq zox>R;(Uw?YIXRqg4Hw(LYfZ>>Rf+S{5sC^?;buhftl)NahG_~hDBY&Ry`mNoU0c=J%wup9&1nd7e=*U{GZ|T(d@k()98P9+XlM3ZQ z{B!Q48q&VKrH$JwIye!``iAdb{&x78ymqXqyb;AE39DkqUl9?FC2OlK=>e-vP)N{7 zI!<%t-^v<1y%p6S=U=Q@ye2fn+X;8$Kvt=a8$#2cFrQ8JNq8g4T%9a}zi^fDflj2e zbk2aNTOLeZ?{j*w8OC}iWaxvwM?3AeGkc|KijHRN0_(|g^AsukW&d-XkW-!54%peO zq!8w-<_H-obR`D-mYFMM#e%oYc<@HpDy{y@mUmojquA(1+&17UkSX2~=zIL`gEJ9O zt_!>CF~WDUOe_n+a3aHlxRpC1;X7sJS_bS zIfYBVkm1O6kZ(zBp}=mp?_R*>D$|lrO9yXQAZ2}eaupD=>Kpqw-<*aP4f%-U%i6io z%ktmF1|vT@!hmp=6WRDZ!8%|s{9nqB1CcMlHUF<>=d;FHNEx*134kEk+X(dV9r(Ap zlyjmtJOl150I_@%(7)#g!gFDo|6O>ltsTz4vn_mpSX23rG2XvR>htLT#vA-f2=O85dezzd-iYaB9!O2gEUm|1 z*X2JI!6BzGNEdKwoBlT}0BAdr`3lFCY34Qnqo=NRdiz)Xeing_US8o;xS-=MA4iJX zr5Fb?G1ecx{}w1bA7~-XV7br#?1pmW&HgdOLjXb$+zAx%H`N?@F9C7sVZ$4vV)xao z@i@2!Mvrg4l{lfg!Tj(su%L{;owtAk^#o7P$^Ed8_um&WNd9}4n;b~++im?-YQF|l z*u{*(+zh_Ia6o)r{{pkxEtudDWrq5yil z*|du90QqGEF9aV9UE|DfdUeeX@O=XR8B1`Dy%>|c7TH$`;K@fJ2Mw*4=U`4xe+i=) zIAqqwni22q#WPa|oE6j!W#!Xx83G~ptLIN|30VVQ6A)g1&?pEgJz!hHiOKTy8B%VC`?cB z8JcP6iS3AAr)>TDijC;CYR+N)5*`!j17O^@q&qnD098SFpx;_B?CnW88oFzS8HNDa zs)?Y+M9g>lcmpNg-McmbKj%pT{xkVWogD?Hr?h~I48If3XH&Y&PpXrmeWF}i=vG-l ziqiuF^OA<`D+6-}JJUzJOAM9&^!%NG4mR!E)T#MOF=qOE2E+XhvYRo43ICRoIn;sz z8T>=->+eqZL<~Gx(@fKSz;vbCWtv5tg8mFx=*urxvuJo$Sr*i=Dq+Z9Fk6M7w^j({w+*zfWq|g z>?NshTS}ngi>Gg8`}$jlq1=(iKtDqI;mAYQ1U&#b4B-u4q~3n@(^QF5gJk^X4W-n^ zKhHwWj0Dzv&mw^veg#)xzf`!i-4T5`lH* z&X!WwSmuRetJ3o&cBZk?epC5gCng4{x;5`Z(%S~>ltnJj&v)+sHMLkX$(m2b=d!HC z%hGx3c?RR}blOL|_Q`aWO)Y(z=M)|Pf2tl>B*lr*%?7%zuFNlZ>;J?z<>Z<<>-5La zgk^v?cOX#=hbVwJ;i@5Y2R3kybOCq1jD=BPmwhlDDA`F3@tqv)b-^RbI-QM(2!$Pm z&u5Guy|N2rEZW}o;d4%7{jv9`Jh_2GnmO};6O)}goO5JE4P>XvX|7#y1iu-?3~ek< z6f|5le5e_0}t!j zRyl|DJUm&&MqU2<1Sm}W06-hrMpqMqu`c&O%QXVR04KK4POX?wd-92c*PjOzF?!M9 zIUWB!=l?w%fpF#fvX67F`Pd!(T=Uu}@BZ;}u_r~Xq;{E^N#5)2bHRIp*vWgvX7_%K zmx}$G33QKr-C|8k=#77$R5W*>E(Dy{!DBA%vA_$#?)A~Bz!eP43-meGzUD>b_JRtF z=;l=Avv*qQefh6~?-*op9xRDRB!#+C_sww)(r5xuOy>|8whUd6FniE&RRe}H9<#Z| zBI`D0IrT8)^j@IHIM_$UW|gUG(a_s3uiD#fOq6>at>tUI&G(&c3K}Ue;IgRpm~R_t z4hgAUd!GeH&oxqEuVwn#G3Af<@7<-PPKJli@dfHSpY^woN8eXC3|$e>e;wL@YojzQ zvr-ANSjQCGw}z8=ZPl*a$&zt}T(PI;Ozy+aP}?3*noRUI9~Qf@+y`w?*j(fH^#k<5yu|g%uopB6WF|<5yZMze9L4SsG3bTNd)(hdncG%of zXXTeH*>q~96Mf3j`S$4k5BEMkaU@n3DJe8~6V;+N-4qmf>VSv^)q3Zb*L*%a*_t-3 za-S6yYy6qIG28r*a_#rtiq_6TrxC;z`iz`40m$&qlw2hqd{v$PKXoDu{zxg5>`7NV zlh?Fo_u5PN$z0K=!PKN1N4IvDzX*X=jD|l!OMPiaeyD*su_lwIvp<1_=G7&Mg^hp; z=bhs)dUq-Pp%b-m-#uds9w2o*Sf>wUgt+o^13qkgt2cJ~=fk_k5}iz`6mYiPF1P9Y zTy}kLK(hT7I_T$xy!dcuy&J#nn6BEPh(G%a(1^zoST24rE){K$y7(+$|3{=!qr80| z9XMGNjSKW!H{g@?l>j0Yll1~ER-swrI+!uHu-P8@HQ|@mPV!9rU{YkV3px28$oAmT853OWGS$ z(3>?^I>AK*huLuu1p__hi4xUtwW%q;pUDBctLOT*AoBM8Qp@5<5mkcRpaav%&ySzs z(FEp{)QcBcWE{nu_)iEdCk5|)=o!pWXs7?y;Qxuf+!b5(&smfU1pq&#-eeED8|c6y zUEi8%G{++pYJt9v#!F5O#%xI5ovrVf8h-oauNN2sFDd4oQ>88@3C zJHt=OAUZ}wH!~tT`_w4VSPfkTR`JM#QLzm6u|HibeX7!Vy$-j?slj+<0oB<_DAlnc zVfry{CMCMe|(9HGgq4KxlViY2qSYQ%2 zoPlWVVk#9`C)|X_5Z6k^-b2Fh0M4ZupNw^@*O@EVVBHjJK0$xtP#)uyp?7Tt%XAdV zs7ec~BXAdm2p>f&TbA2I9ysN>BN9zf%7UEY9&^fTQZGNlocqCF2fDxkd#O9{cRsP=>!U95*p3RAKL$6oB!tC%EKs9XD z$~*c>6tx|I{CcF`K=GhJCl2aT_wy!lk8e5_STdh)KH>boS0Ke!7VA7`MVBjYWqwID zBZn8(rY|0N9jFW#tuLKS(9?Lt5vY@lTU7J&HU}q0Efn#cSG>xp zY><)}576B_0OkTL+TK-Gqkh-2F|HbsxR2vKSl?#>3Dj6hQsCRQmPL#jOjK;8hUkj0 z$FDaozj7VY0$qJLsXPOO&K8oCrsYo6x3`4I47FVX;IUCFOMOL(tc(6$$yN+GJFN!i zb-TX?wKL+2(zyh$q>4=H`0H%hCC!%pajCz3+7`2!37o_>;Xg44ZvAyyf8=djz#%5= zHyczv@Ji|7pxk#VhqUVrQ_g=T4;tPU>?gbQy7f7G@dcg)?>k_1elX~|j0=O20&L%B zKXc5tC*NoBX6dlc0v(B#F<4AB2hyrV1WuU)@|QhV{OixVy&1W;~<$dg-Ufzd#@S15OdM&&lg0^}1(? zNugsO?rGgGGT{h0*@>oNyLPvNnX*Lb>0ufE_}iTc++I6c>Q7I9^d+;`z4??E085$y z-b)>Sd^Nu?Za{eQc?9|vkDq+{azWi-o#~%c03ZR1Lkjc;k3ZIgqzVYSbPSDUhXY+` zFxE|139Is(_Kc>_cXYOMndQG6#>z_-{1%O8CKvNH+1lFiLXxTd4>E#v|x z!q&ZU|Gvmu4yWygJzalr*!i4pbP-7?JU#S~pGRJ53pzdCZr<$|YbHlZLF6@Daoe`B zQ*&7UewwL@ngCaTdMl-|4-CeSWxji2v{nZ{mfh{r=078zAAnU!1~ngxWM=1uj+ji% z;oK?HPmk8b!}YaA)P-%V)$ETCHm4{l4b1a(*-jl$ivtwun~lF_gnmtX>m9BXbyawN z*L*2q8>M)5viGv;GE*=PY`Pv|YDHMn)}+I$w2!4t*958~fs$?dGgTKv(v*`Bs0IVa z%xn3j!yVvGy|ZqQ@;aDw;OoqI=@XvyeZn^BpNE~ePe}3vmaS>;q5V%EYp@5I)1H2; zI7-+0+r&`W=yW&{=;$o!cPVTZdtKikeVZIK^ebTfnLOoG)VtC^qPk0*V0CbM%@S`Zlj&A7ftOF|P`c zif~(}9Ojv?`<^jo$z?&u2WA79kH&grx*_}j7+)KNxa)vDO#Mnm`#|xE{~`^Wa`5qH z_3@OnMsX=FU`p;h;FWjkSbK1X6DfH|KV*$m1nSPr!)$9jw4-%Ez&HX5hwpGg`jdW1c`NRJ~mM4+)8NZ?}( z-8j~!93Z{OyqZjey30x%2pHWylZH)1indaIGj+)Ed0z0vP1xANCk^V{vPV2mI~(*X z=Kz*2Vt)VZ`ddQrEuoScPPLv0N%d)qP3be2K+7&3r}?}bMgI!jQbIJ6VkVWt(Qj+I z@H(+&lgJxa)EdBsKI#uu*C-y%_TdG5U=jg3ZhqBf9!|hCkdm1uc4$FPV3p+9o%-lV(h13N{lQ)SOZ6oZN_x9$E>Y|`+vJuC93=CCs{%~&Modoh{ z5k`>c)VZJt6R*0a_X~CFi#Ki~Lv*M70z@{5kE0;mKO*!34>nRP8w2EB2dCB>gSYZ? z3NFUT3%@r}mlT?yP)@Z$S)!(lbmcWDrO6E!hijLjuLJtgJdp=j3%HBukNb8y?0OG|hy-U{-fb$2rn=cw`^y5t@R>w>)o zp5+*tjfiPS5kYPx#90AQR=L*$AIX<2f5}?UM&v<;5d?5{_8-d1YKv7)|J?bGd^=q4 zw~aYVB)fb?e?qfWF;uqXc4udrzf-Y=k-S6_eh;fhu_KXYiQE5b`mF8=;`$IOq7@r$cMsfQrHY{icG@HqMZN!SkBNO}TbFm^b!Krtf-L3Rp z!F6Vp*r^TE%cDk=)VxS8RanI}7%5@gY?(N~6h<)b`nE|DZ1Xi#BFqOxsaff{7Bk|g zn4@oAl3!8HjQ}aB$9YsKtf1%?)nwZZZ4815ns}si5xWPNh^G`WsnWXh9s0S1Da*C@ z#}g%q({HT0D*$Z8MpvY=FUldF@5von2_@JNtoYCmp{_rt(?XO$rZbGz*x$}k@PoR4 zv2(al+PmpX7+a*LSB&rZVW9z1v*UH*8jrt~N2Vu&u}9db<=G-?Z=C)dt-MKZ{_*fC zZ8kYSqPzwl>K&SQP|o?&zpmw!D=9Q#>KYo4&@x_MQ1q3EyzXm^f^sL}B9>rxd_(pm z#8a#DGhs|N5;Yj)SYhp`6_o<3Z(UPK#MeB6YkGw;Cz&s40u?$w6a>6*iR+c(f>oNfC$1fL=YasN(+AQCvPHJBq^18+#cYbT1j-O#%VBx<6T9JH8f?mHj(wFV<45c z+Nx&Jf^@Cze!5^YFe&dTwpjBeAq{kuDbAVzjN@?0nq()|OpYG6kN_)KeUR-T3h3F8 zQdSPoT(gFr`d%UCM$PuZ@MkMHVJ;V{ddCTpHmFGhnqDE2{6fS{X0*)Aei|CX^vd7I z3x+$Ybq5iSs6pJ}2Y{k-vSzFmp?2qiz|}cyYH$bC263a&c|(|aq=33u0}(k_k0|v6 zFb0B7j3QJwvTEJ9$Rw{jOo!D?2RC!w)`L^26@C!o%iUo2UOntvmu8P8| zhi5&2IAERxB`u1GqIp9J`t>O~CTuvm4j6UyU+^2K%2MaWZ7L$|TND=ThW9h4duw@4 zw>v?uFr;H5;sPWb?j%84q>;C2QM4vU3A@J1nn0-=+&)aut+v{)g^OR>@#h7qI4)Tl%z{-I~L_aD#sWp zXM}3q*lp2>Gjr)TE_ql#v zTZf5;SO)HBLF9^0auE~O5!Q^;heL$`*yB=jdDrDB3SxSbMYUHu?F+G;>}aOVIeUV~ zy71|>!u?0`Wt+VFq?fB`r67Q2L+);@S~&v1=|y$Mj41gC2jO#_EM%Q9>bQ{hfP{U4 zI%NVl=sWh+G~FTItplgWUm z2h{>yvr12!EhS;1thaDWia;Z}of`Hr@_>enl8_>dYRL6xmid!Il_F01sG))r*&rXr}4h@dGPiQ{YNb?Ms1+NFVNRRiZ4QdSk0bI}-wrn@7(t@#9}j#s%) z2;Lx*J45oGTY4zkF^nd$mGpo=$<^Qg^m=A2^$8|{LSzSqULr2+g=vN+Qxt8kvebt% zUdk5cH#rL<%$6_-d1DkHaWbdm#@p9CF#zqlytByR%jNX_@hmm>ziMMX*5%BKGDDKh zJ@a0Bki1X$@Mr5n;OYMOF;OHw0!-}Rw>8y%eE;w3i`7og;vFIeg?z|tm|_(3RlV3b6i=TM>lpI151tdcS>-8Ibi+K|Q!Wk^H%?m#cW>VfCoNtpuvx(}@cl1U;g3N8FNE z89>qF2S1=~z%Pp`H&+5%+m(ubthNEmNqr7l=1>M*;N!>@YZe{4tvcw zSGh%%X_0G-GJ9(M$@6>HRo5x0oc`<2M=06Hl~r<|zCBgFQ`+Fa_d`#Li<H--CfKYBH zHTDsYtFNBaVnE7k8G4u(%(tUAE2Yab6I}dU*Gqhtyjp=+iCsrV?oMa4%w_7Gyi4zm^pea3~wlz-{FiW6#r) zEvtleW9OGs#Z3`R#;bXTw$yljXMK> z>yIe__t=Xjc9B9d@-d`BUeLeAMk`71nF1N}bniVXP_Mflk$Ul8=m8Cm1bhQ{;N_fu4*|vs0WrGa7-_UQYs^jZ z36PG3P-gx2^@3zV!t+cjca0DJCo>1QF6u9Afha8WJUOfUH8uiA3)0z!V&&H6|CK~l zLB$K?PkE|?R6t$(X;@mqU+@6g+<`n+q1L-F@mUA^4e{-i*Hv`wK<5#tHJtsS=fIJvRL-E?6C>+FH5m z_=+q6jX3grd}eCuHr0PPKq2)9yP?a=zhfU^`a7$&fk#>^Zs4}UmvSQi-WLD&wsxe= zuNf}zx5oQlQZ$KjL{CdcvjWuRmW)-A?16@WSLYewTKFv)g#=_i0r+=YLsz{-)hhgd0CU}y%K!iX literal 0 HcmV?d00001 diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index 58673369..5982a250 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -1,3 +1,10 @@ -* xref:index.adoc[] -** xref:installation.adoc[] -** xref:quickstart.adoc[] \ No newline at end of file +* xref:installation.adoc[] +* xref:quickstart.adoc[] +* Commands +** xref:commands/demo.adoc[] +** xref:commands/operator.adoc[] +** xref:commands/release.adoc[] +** xref:commands/services.adoc[] +** xref:commands/stack.adoc[] +* xref:customizability.adoc[] +* xref:troubleshooting.adoc[] diff --git a/docs/modules/ROOT/pages/commands/demo.adoc b/docs/modules/ROOT/pages/commands/demo.adoc new file mode 100644 index 00000000..f8e1def5 --- /dev/null +++ b/docs/modules/ROOT/pages/commands/demo.adoc @@ -0,0 +1,3 @@ += Demo + +Not implemented yet diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc new file mode 100644 index 00000000..1f03f706 --- /dev/null +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -0,0 +1,124 @@ += Operator + +The `stackable operator` command allows to list, install and uninstall Stackable operators. +Operators manage the individual data products the Stackable Data Platform consists of. + +This command manages individual operators. +It is mainly intended for persons already having experience or working on the Stackable Data Platform. +If you just wan't an easy way to get started or don't know which products and/or which version to install it is recommended to use the xref:commands/release.adoc[] command. +This command will install a bundle of operators from a official Stackable Release. + +== Browse available operators +To list the operators that are part of the Stackable Data Platform as well as their stable versions run the following command + +[source,console] +---- +$ stackablectl operator list +OPERATOR STABLE VERSIONS +airflow 0.4.0, 0.3.0, 0.2.0, 0.1.0 +commons 0.2.0, 0.1.0 +druid 0.6.0, 0.5.0, 0.4.0, 0.3.0, 0.2.0, 0.1.0 +hbase 0.3.0, 0.2.0 +hdfs 0.4.0, 0.3.0 +hive 0.6.0, 0.5.0, 0.3.0 +kafka 0.6.0, 0.5.0, 0.4.0 +nifi 0.6.0, 0.5.0, 0.4.0 +opa 0.9.0, 0.8.0, 0.7.0, 0.6.0 +secret 0.5.0, 0.4.0, 0.3.0, 0.2.0, 0.1.0 +spark 0.5.0, 0.4.0 +spark-k8s 0.3.0, 0.2.0, 0.1.0 +superset 0.5.0, 0.4.0, 0.3.0, 0.2.0, 0.1.0 +trino 0.4.0, 0.3.1, 0.3.0, 0.2.0 +zookeeper 0.9.0, 0.8.0, 0.7.0, 0.6.0, 0.10.0 +---- + +This command only includes the stable versions for every operator to not mess up the whole screen. +If you're interested in a special version of an operator you can use the describe command to get more details for an specific operator as follows + +[source,console] +---- +$ stackablectl operator describe airflow +Operator: airflow +Stable versions: 0.4.0, 0.3.0, 0.2.0, 0.1.0 +Test versions: 0.5.0-pr135, 0.5.0-pr134, 0.5.0-pr133, 0.5.0-pr132, 0.5.0-pr131, 0.5.0-pr130, 0.5.0-pr129, 0.5.0-pr128, 0.5.0-pr127, 0.5.0-pr126, 0.5.0-pr125, 0.5.0-pr122, 0.4.0-pr123, 0.4.0-pr122, 0.4.0-pr121, 0.4.0-pr120, 0.4.0-pr119, 0.4.0-pr118, 0.4.0-pr117 +Dev versions: 0.5.0-nightly, 0.4.0-nightly, 0.3.0-nightly, 0.2.0-nightly, 0.1.0-nightly +---- + +== Install operator +If you access to an Kubernetes cluster make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. +After that run the following command + +[source,console] +---- +$ stackablectl operator install airflow commons secret +[INFO ] Installing airflow operator +[INFO ] Installing commons operator +[INFO ] Installing secret operator +---- + +If you don't have an Kubernetes cluster available `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. +Make sure you have https://kind.sigs.k8s.io/[kind] installed and run the following command + +[source,console] +---- +$ stackablectl operator install airflow commons secret --kind-cluster +[INFO ] Creating kind cluster stackable-data-platform +Creating cluster "stackable-data-platform" ... + ✓ Ensuring node image (kindest/node:v1.21.1) 🖼 + ✓ Preparing nodes 📦 📦 📦 📦 + ✓ Writing configuration 📜 + ✓ Starting control-plane 🕹️ + ✓ Installing CNI 🔌 + ✓ Installing StorageClass 💾 + ✓ Joining worker nodes 🚜 +Set kubectl context to "kind-stackable-data-platform" +You can now use your cluster with: + +kubectl cluster-info --context kind-stackable-data-platform + +Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/ +[INFO ] Installing airflow operator +[INFO ] Installing commons operator +[INFO ] Installing secret operator +---- + +With this command we installed the operator for Apache Airflow as well as two operators needed internally by the Stackable Data Platform (commons and secret). + +As we didn't specify a specific version to install the operators were installed in the latest nightly version - build from the main branch of the operator. + +If you want to install a specific version you can add the version to each operator to install as follows + +[source,console] +---- +$ stackablectl operator install airflow=0.4.0 commons=0.2.0 secret=0.5.0 +[INFO ] Installing airflow operator in version 0.4.0 +[INFO ] Installing commons operator in version 0.2.0 +[INFO ] Installing secret operator in version 0.5.0 +---- + +As you can see the three operators where installed in the requested version. + +Remember: If you want to install a recommended and tested set of operator versions have a look at the xref:commands/release.adoc[] command. + +== List installed operators +After installing some operators you can list which operators are installed in you Kubernetes cluster. + +[source,console] +---- +$ stackablectl operator installed +OPERATOR VERSION NAMESPACE STATUS LAST UPDATED +airflow 0.5.0-nightly default deployed 2022-07-15 09:44:00.86514992 +0200 CEST +commons 0.3.0-nightly default deployed 2022-07-15 09:44:03.215214235 +0200 CEST +secret 0.6.0-nightly default deployed 2022-07-15 09:44:13.526843785 +0200 CEST +---- + +== Uninstall operator +To uninstall the operators again you can use the `uninstall` command + +[source,console] +---- +$ stackablectl operator uninstall airflow commons secret +[INFO ] Uninstalling airflow operator +[INFO ] Uninstalling commons operator +[INFO ] Uninstalling secret operator +---- diff --git a/docs/modules/ROOT/pages/commands/release.adoc b/docs/modules/ROOT/pages/commands/release.adoc new file mode 100644 index 00000000..d6a88ff5 --- /dev/null +++ b/docs/modules/ROOT/pages/commands/release.adoc @@ -0,0 +1,151 @@ += Release + +A release is a well-playing bundle of operators that get released approximately every 2 months. +If you want to install an single individual operator have a look at the xref:commands/operator.adoc[] command. + +== Browse available releases +To list the available Stackable releases run the following command + +[source,console] +---- +$ stackablectl release list +RELEASE RELEASE DATE DESCRIPTION +22.06 2022-06-30 First official release of the Stackable Data Platform +---- + +To show details run + +[source,console] +---- +$ stackablectl release describe 22.06 +Release: 22.06 +Release date: 2022-06-30 +Description: First official release of the Stackable Data Platform +Included products: + +PRODUCT OPERATOR VERSION +airflow 0.4.0 +commons 0.2.0 +druid 0.6.0 +hbase 0.3.0 +hdfs 0.4.0 +hive 0.6.0 +kafka 0.6.0 +nifi 0.6.0 +opa 0.9.0 +secret 0.5.0 +spark-k8s 0.3.0 +superset 0.5.0 +trino 0.4.0 +zookeeper 0.10.0 +---- + +In the output you can see which product operators are included in the specific release + +== Install release +If you access to an Kubernetes cluster make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. +After that run the following command + +[source,console] +---- +$ stackablectl release install 22.06 +[INFO ] Installing release 22.06 +[INFO ] Installing airflow operator in version 0.4.0 +[INFO ] Installing commons operator in version 0.2.0 +[INFO ] Installing druid operator in version 0.6.0 +[INFO ] Installing hbase operator in version 0.3.0 +[INFO ] Installing hdfs operator in version 0.4.0 +[INFO ] Installing hive operator in version 0.6.0 +[INFO ] Installing kafka operator in version 0.6.0 +[INFO ] Installing nifi operator in version 0.6.0 +[INFO ] Installing opa operator in version 0.9.0 +[INFO ] Installing secret operator in version 0.5.0 +[INFO ] Installing spark-k8s operator in version 0.3.0 +[INFO ] Installing superset operator in version 0.5.0 +[INFO ] Installing trino operator in version 0.4.0 +[INFO ] Installing zookeeper operator in version 0.10.0 +---- + +If you don't have an Kubernetes cluster available `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. +Make sure you have https://kind.sigs.k8s.io/[kind] installed and run the following command + +[source,console] +---- +$ stackablectl release install 22.06 --kind-cluster +[INFO ] Creating kind cluster stackable-data-platform +Creating cluster "stackable-data-platform" ... + ✓ Ensuring node image (kindest/node:v1.21.1) 🖼 + ✓ Preparing nodes 📦 📦 📦 📦 + ✓ Writing configuration 📜 + ✓ Starting control-plane 🕹️ + ✓ Installing CNI 🔌 + ✓ Installing StorageClass 💾 + ✓ Joining worker nodes 🚜 +Set kubectl context to "kind-stackable-data-platform" +You can now use your cluster with: + +kubectl cluster-info --context kind-stackable-data-platform + +Have a nice day! 👋 +[INFO ] Installing release 22.06 +[INFO ] Installing airflow operator in version 0.4.0 +[INFO ] Installing commons operator in version 0.2.0 +[INFO ] Installing druid operator in version 0.6.0 +[INFO ] Installing hbase operator in version 0.3.0 +[INFO ] Installing hdfs operator in version 0.4.0 +[INFO ] Installing hive operator in version 0.6.0 +[INFO ] Installing kafka operator in version 0.6.0 +[INFO ] Installing nifi operator in version 0.6.0 +[INFO ] Installing opa operator in version 0.9.0 +[INFO ] Installing secret operator in version 0.5.0 +[INFO ] Installing spark-k8s operator in version 0.3.0 +[INFO ] Installing superset operator in version 0.5.0 +[INFO ] Installing trino operator in version 0.4.0 +[INFO ] Installing zookeeper operator in version 0.10.0 +---- + +After installing the release we can list the running operators with the xref:commands/operator.adoc[] command. + +[source,console] +---- +$ stackablectl operator installed +OPERATOR VERSION NAMESPACE STATUS LAST UPDATED +airflow 0.4.0 default deployed 2022-07-15 10:00:25.499615024 +0200 CEST +commons 0.2.0 default deployed 2022-07-15 10:00:27.868162264 +0200 CEST +druid 0.6.0 default deployed 2022-07-15 10:00:38.219966654 +0200 CEST +hbase 0.3.0 default deployed 2022-07-15 10:00:46.581528077 +0200 CEST +hdfs 0.4.0 default deployed 2022-07-15 10:00:56.949394849 +0200 CEST +hive 0.6.0 default deployed 2022-07-15 10:01:07.314849464 +0200 CEST +kafka 0.6.0 default deployed 2022-07-15 10:01:09.702246063 +0200 CEST +nifi 0.6.0 default deployed 2022-07-15 10:01:12.059869868 +0200 CEST +opa 0.9.0 default deployed 2022-07-15 10:01:14.413966761 +0200 CEST +secret 0.5.0 default deployed 2022-07-15 10:01:16.759818535 +0200 CEST +spark-k8s 0.3.0 default deployed 2022-07-15 10:01:17.149187107 +0200 CEST +superset 0.5.0 default deployed 2022-07-15 10:01:19.529351352 +0200 CEST +trino 0.4.0 default deployed 2022-07-15 10:01:29.867283641 +0200 CEST +zookeeper 0.10.0 default deployed 2022-07-15 10:01:40.24662955 +0200 CEST +---- + + +== Uninstall release +To uninstall the release again you can use the uninstall command + +[source,console] +---- +$ stackablectl release uninstall 22.06 +[INFO ] Uninstalling release 22.06 +[INFO ] Uninstalling airflow operator +[INFO ] Uninstalling commons operator +[INFO ] Uninstalling druid operator +[INFO ] Uninstalling hbase operator +[INFO ] Uninstalling hdfs operator +[INFO ] Uninstalling hive operator +[INFO ] Uninstalling kafka operator +[INFO ] Uninstalling nifi operator +[INFO ] Uninstalling opa operator +[INFO ] Uninstalling secret operator +[INFO ] Uninstalling spark-k8s operator +[INFO ] Uninstalling superset operator +[INFO ] Uninstalling trino operator +[INFO ] Uninstalling zookeeper operator +---- diff --git a/docs/modules/ROOT/pages/commands/services.adoc b/docs/modules/ROOT/pages/commands/services.adoc new file mode 100644 index 00000000..28030505 --- /dev/null +++ b/docs/modules/ROOT/pages/commands/services.adoc @@ -0,0 +1,44 @@ += Services + +== List running services + +The `stackable services` command allows to inspect the running services of the Stackable Data Platform. +Currently you can only get a read-only view of the running services, future versions may allow to e.g. uninstall running services. + +An example invocation looks as follows + +[source,console] +---- +$ stackablectl services list +PRODUCT NAME NAMESPACE ENDPOINTS EXTRA INFOS +airflow airflow default webserver-airflow: http://172.18.0.5:32290 Admin user: airflow, password: airflow +druid druid default router-http: http://172.18.0.2:30245 + coordinator-http: http://172.18.0.4:30506 +superset superset default external-superset: http://172.18.0.2:31891 Admin user: admin, password: admin +zookeeper druid-zookeeper default zk: 172.18.0.5:30890 +minio minio-druid default http: http://172.18.0.4:32173 Third party service + console-http: http://172.18.0.4:30982 Admin user: root, password: rootroot +---- + +You can also + +- Show services in all namespaces +- Redact the passwords from the output in case you want to share the list of services without giving out the admin credentials +- Print the installed product versions + +To achieve this you can use the following command + +[source,console] +---- +$ stackablectl services list --all-namespaces --redact-credentials --show-versions +PRODUCT NAME NAMESPACE ENDPOINTS EXTRA INFOS +airflow airflow default webserver-airflow: http://172.18.0.5:32290 Admin user: airflow, password: + version 2.2.5-python39-stackable0.3.0 +druid druid default router-http: http://172.18.0.2:30245 version 0.23.0-stackable0.1.0 + coordinator-http: http://172.18.0.4:30506 +superset superset default external-superset: http://172.18.0.2:31891 Admin user: admin, password: + version 1.5.1-stackable0.2.0 +zookeeper druid-zookeeper default zk: 172.18.0.5:30890 version 3.8.0-stackable0.7.1 +minio minio-druid default http: http://172.18.0.4:32173 Third party service + console-http: http://172.18.0.4:30982 Admin user: root, password: +---- diff --git a/docs/modules/ROOT/pages/commands/stack.adoc b/docs/modules/ROOT/pages/commands/stack.adoc new file mode 100644 index 00000000..08241f63 --- /dev/null +++ b/docs/modules/ROOT/pages/commands/stack.adoc @@ -0,0 +1,110 @@ += Stack +A stack is a collection of ready-to-use Stackable data products as well as needed third-party services like Postgresql or MinIO. + +== Browse available stacks +To list the available stacks run the following command + +[source,console] +---- +$ stackablectl stack list +STACK STACKABLE RELEASE DESCRIPTION +druid-superset-s3 22.06 Stack containing MinIO, Druid and Superset for data visualization +airflow 22.06 Stack containing Airflow scheduling platform +---- + +To show details run + +[source,console] +---- +$ stackablectl stack describe druid-superset-s3 +Stack: druid-superset-s3 +Description: Stack containing MinIO, Druid and Superset for data visualization +Stackable release: 22.06 +Labels: druid, superset, minio, s3 +---- + +Future version of `stackablectl` will allow to search for stacks based on the labels. + +== Install stack +If you access to an Kubernetes cluster make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. +After that run the following command + +[source,console] +---- +$ stackablectl stack install druid-superset-s3 +[INFO ] Installing release 22.06 +[INFO ] Installing airflow operator in version 0.4.0 +[INFO ] Installing commons operator in version 0.2.0 +[INFO ] Installing druid operator in version 0.6.0 +[INFO ] Installing hbase operator in version 0.3.0 +[INFO ] Installing hdfs operator in version 0.4.0 +[INFO ] Installing hive operator in version 0.6.0 +[INFO ] Installing kafka operator in version 0.6.0 +[INFO ] Installing nifi operator in version 0.6.0 +[INFO ] Installing opa operator in version 0.9.0 +[INFO ] Installing secret operator in version 0.5.0 +[INFO ] Installing spark-k8s operator in version 0.3.0 +[INFO ] Installing superset operator in version 0.5.0 +[INFO ] Installing trino operator in version 0.4.0 +[INFO ] Installing zookeeper operator in version 0.10.0 +[INFO ] Installing components of stack druid-superset-s3 +[INFO ] Installed stack druid-superset-s3 +---- + +If you don't have an Kubernetes cluster available `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. +Make sure you have https://kind.sigs.k8s.io/[kind] installed and run the following command + +[source,console] +---- +$ stackablectl stack install druid-superset-s3 --kind-cluster +[INFO ] Creating kind cluster stackable-data-platform +Creating cluster "stackable-data-platform" ... + ✓ Ensuring node image (kindest/node:v1.21.1) 🖼 + ✓ Preparing nodes 📦 📦 📦 📦 + ✓ Writing configuration 📜 + ✓ Starting control-plane 🕹️ + ✓ Installing CNI 🔌 + ✓ Installing StorageClass 💾 + ✓ Joining worker nodes 🚜 +Set kubectl context to "kind-stackable-data-platform" +You can now use your cluster with: + +kubectl cluster-info --context kind-stackable-data-platform + +Have a nice day! 👋 +[INFO ] Installing release 22.06 +[INFO ] Installing airflow operator in version 0.4.0 +[INFO ] Installing commons operator in version 0.2.0 +[INFO ] Installing druid operator in version 0.6.0 +[INFO ] Installing hbase operator in version 0.3.0 +[INFO ] Installing hdfs operator in version 0.4.0 +[INFO ] Installing hive operator in version 0.6.0 +[INFO ] Installing kafka operator in version 0.6.0 +[INFO ] Installing nifi operator in version 0.6.0 +[INFO ] Installing opa operator in version 0.9.0 +[INFO ] Installing secret operator in version 0.5.0 +[INFO ] Installing spark-k8s operator in version 0.3.0 +[INFO ] Installing superset operator in version 0.5.0 +[INFO ] Installing trino operator in version 0.4.0 +[INFO ] Installing zookeeper operator in version 0.10.0 +[INFO ] Installing components of stack druid-superset-s3 +[INFO ] Installed stack druid-superset-s3 +---- + +After installing the stack we can access the running services using the xref:commands/operator.adoc[] command + +[source,console] +---- +$ stackablectl services list +PRODUCT NAME NAMESPACE ENDPOINTS EXTRA INFOS +druid druid default router-http: http://172.18.0.2:30245 + coordinator-http: http://172.18.0.4:30506 +superset superset default external-superset: http://172.18.0.2:31891 Admin user: admin, password: admin +zookeeper druid-zookeeper default zk: 172.18.0.5:30890 +minio minio-druid default http: http://172.18.0.4:32173 Third party service + console-http: http://172.18.0.4:30982 Admin user: root, password: rootroot +---- + +== Uninstall stack +Currently there is no support for uninstalling a stack again. +Maybe a solution would be to uninstall the components of the stack but leave the release running. diff --git a/docs/modules/ROOT/pages/customizability.adoc b/docs/modules/ROOT/pages/customizability.adoc new file mode 100644 index 00000000..020cca53 --- /dev/null +++ b/docs/modules/ROOT/pages/customizability.adoc @@ -0,0 +1,48 @@ += Customizability +If you're working for a large company chances are that there are multiple teams using the Stackable Data Platform. +A single team can also operate multiple Stackable Data Platforms. +`stackablectl` is build in a way customers or even single developers can define their own release, stack and even demo! +This way it is possible to cover the following use-cases. +If you are interested in of them give it a try! + +Any additional demos/stacks/releases you specify will be added to the already existing provided by Stackable. + +== How to add a new +=== Demo +==== Benefits +When you have developed a new data pipeline or data product you often want to show it in action to other colleagues or potential clients. +To easily achieve this you can create you own demo so that it can easily be reproduced and/or shared with other people. + +==== Adding a new demo +First you must create a `mycorp-demos.yaml` containing demos according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/demos.yaml[the Stackable provided demos]. + +After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demo-files `. +The `` can be either a path to a file on the local filesystem or a URL. +By using a URL the demos file can be put into to an central Git and referenced by all teams or clients. +Multiple `--additional-demo-files` flags can be specified to include multiple demo files. +Every additional demo will we added to the already existing demos in `stackablectl`, so all the available demo files will be merged. + +=== Stack +==== Benefits +If your company or clients have multiple similar setups or reference architectures it could make sense to make them easily available to all employees or clients. +In the custom defined Stack all Product versions are pinned as well, so you can easily spin up a Stack containing the exact same versions as your production setup. +You can use your defined Stack to give it to colleagues or potential customers to show the overall architecture of the Data platform you're going to build. + +==== Adding a new stack +For the overall procedure have a look on <<_adding_a_new_demo>> on how to add a new demo. +For a custom stack you need to create a `mycorp-stacks.yaml` containing stacks according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/stacks.yaml[the Stackable provided stacks]. +You can than add it to `stackablectl` with the flag `--additional-stack-files`. + + +=== Release +==== Benefits +If advanced users of the Stackable Platform want to define their own internal Release within their company they can easily add their own release. +This has the following benefits: + +- Same operator versions across the whole company. This produces more uniform environments and makes debugging and helping other teams easier. +- If the company is only interested in a subset of the available operators you can only add your relevant operators into your release and not install all the other operators. + +==== Adding a new release +For the overall procedure have a look on <<_adding_a_new_demo>> on how to add a new demo. +For a custom release you need to create a `mycorp-releases.yaml` containing releases according to the format defined by https://github.com/stackabletech/release/blob/main/releases.yaml[the Stackable provided releases]. +You can than add it to `stackablectl` with the flag `--additional-release-files`. diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index 5387bf2f..bdd80c0f 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -1,5 +1,49 @@ = stackablectl -The `stackablectl` command line tool is used to interact with Stackable operators. Either individually or whole data pipelines consisting of multiple operators and multiple deployments of tools. +The `stackablectl` command line tool is used to interact with the Stackable Data Platform. +It can install individual operators as well as Platform releases. +It also ships with a set of pre-build demos that utilize different data products of the Platform to get e.g. an end-to-end data pipeline. -Go to xref:installation.adoc[] to install the tool and then consult the xref:quickstart.adoc[] page to get started. +The installation of `stackablectl` is described in xref:installation.adoc[]. + +To just get a Quickstart please follow xref:quickstart.adoc[]. + +In general, use `stackablectl --help` to find out more about how to use the tool or how to use specific options. +This also works with subcommands i.e.: `stackablectl release install --help` will show the help for installing a release. +Often you can also use a abbreviation instead of typing out all of the commands. +E.g. `stackablectl operator list` can be also written as `stackablectl op ls` + +A Kubernetes cluster is required to use the Stackable Data Platform as all products and operators run on Kubernetes. +If you don't have a Kubernetes cluster `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes Cluster for you. + +The deployed services are separated into three different layers as illustrated below: + +image::layers.png[Layers of the deployed services] + +== Operators +This layer consists of Stackable operators managing the individual data products. +They can either be installed one by one with the command `stackablectl operator` or from a release with `stackablectl release` which is preferred. +A release is a well-playing bundle of operators that get released approximately every 2 months. + +== Stacks +A stack is a collection of ready-to-use Stackable data products as well as needed third-party services like Postgresql or MinIO. + +Stacks are installed with the command `stackablectl stack`. +A stack needs a release (of Stackable operators) to run on. +To achieve this a stacks has a dependency on a release which get's automatically installed when a stack is installed. + +== Demos +A demo is a end-to-end demonstration of the usage of the Stackable Data Platform. +It contains + +. Installing a Stackable release +. Spinning up a stack +. Performing the actual demo +.. Prepare some test data +.. Process test data +.. visualize results (optional) + +Demos are installed with the command `stackablectl demo`. +A demo needs a stack to run on. +To achieve this a demo has a dependency on a stack which get's automatically installed when a demo is installed. +The stack in turn will install the needed Stackable release. diff --git a/docs/modules/ROOT/pages/installation.adoc b/docs/modules/ROOT/pages/installation.adoc index a0e70d95..631cef18 100644 --- a/docs/modules/ROOT/pages/installation.adoc +++ b/docs/modules/ROOT/pages/installation.adoc @@ -1,60 +1,99 @@ = Installation -The `stackablectl` commandline tool does not require a runtime; it is a binary that can be executed on it's own. Below are the installation instructions for <>, <> and <>. +== Pre-compiled binary +We ship pre-compiled binaries of `stackablectl` which should work on most environments such as Windows, macOS, and Linux distros like Ubuntu and Arch. -== Linux +Below are the installation instructions for <>, <> and <>. +If the binary does not work for you you can always <<_build_stackablectl_from_source>> + +=== Linux Download the `stackablectl-x86_64-unknown-linux-gnu` binary file from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release], then rename the file to `stackabelctl`: -[source,shell] +[source,console] ---- -mv stackablectl-x86_64-unknown-linux-gnu stackablectl +$ mv stackablectl-x86_64-unknown-linux-gnu stackablectl ---- and mark it as executable: -[source,shell] +[source,console] ---- -chmod +x stackablectl +$ chmod +x stackablectl ---- You can now invoke it with: -[source,shell] +[source,console] +---- +$ ./stackablectl +---- + +If you want to be able to call it from everywhere (not only the directory you downloaded it to) you can add it to you system with the following command + +[source,console] ---- -./stackablectl +$ sudo mv stackablectl /usr/bin/stackablectl ---- -== MacOS +=== Windows -// TODO someone with a mac should verify this +Download `stackablectl-x86_64-pc-windows-gnu.exe` from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release]. +You can simply execute it. +If you want to execute it from anywhere in your system you need to add it to the system `PATH``. -Download the `stackablectl-x86_64-apple-darwin` binary file for Intel based Macs or `stackablectl-aarch64-apple-darwin` binary file for M1 based Macs from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release]. Then rename the file to `stackablectl`: +=== macOS +Download the `stackablectl-x86_64-apple-darwin` binary file for Intel based Macs or `stackablectl-aarch64-apple-darwin` binary file for ARM based Macs from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release]. +Then rename the file to `stackablectl`: -[source,shell] +[source,console] ---- -mv stackablectl-x86_64-apple-darwin stackablectl +$ mv stackablectl-x86_64-apple-darwin stackablectl # or -mv stackablectl-aarch64-apple-darwin stackablectl +$ mv stackablectl-aarch64-apple-darwin stackablectl ---- and mark it as executable: -[source,shell] +[source,console] ---- -chmod +x stackablectl +$ chmod +x stackablectl ---- You can now invoke it with: -[source,shell] +[source,console] ---- -./stackablectl +$ ./stackablectl ---- - If MacOs denies the execution of stackablectl go to Settings --> Security & Privacy --> General. Here you will see a pop up asking if you want to allow access for ‘stackablectl’. Now allow access. +If macOS denies the execution of stackablectl go to `Settings` -> `Security & Privacy` -> `General`. Here you will see a pop up asking if you want to allow access for `stackablectl`. You must allow access. + +== Build stackablectl from source +To build `stackablectl` from source you need to have the following tools installed: +* Rust compiler +** Needed for compiling source code of `stackablectl` itself +* Go compiler +** Needed for compiling a wrapper around the Go lib `go-helm-client` +* C compiler +** As we use the vendored feature of the https://crates.io/crates/openssl[openssl crate] a C compiler is needed to compile openssl from source +* Perl +** As we use the vendored feature of the https://crates.io/crates/openssl[openssl crate] perl is needed to compile openssl from source +* Make +** As we use the vendored feature of the https://crates.io/crates/openssl[openssl crate] Make is needed to compile openssl from source -== Windows +If you have the required tools available you need to clone the `stackablectl` repo https://github.com/stackabletech/stackablectl and invoke the build with -Download `stackablectl-x86_64-pc-windows-gnu.exe` from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release]. You can simply execute it. +[source,console] +---- +$ cargo build --release +---- + +After a successful build the binary will be placed in `target/release/stackablectl`. +Copy it to you systems path to access it from anywhere if you like + +[source,console] +---- +$ sudo cp target/release/stackablectl /usr/bin/stackablectl +---- diff --git a/docs/modules/ROOT/pages/quickstart.adoc b/docs/modules/ROOT/pages/quickstart.adoc index d83564b1..5f2b3e16 100644 --- a/docs/modules/ROOT/pages/quickstart.adoc +++ b/docs/modules/ROOT/pages/quickstart.adoc @@ -1,85 +1,5 @@ = Quickstart -`stackablectl` interacts with the Stackable platform at three abstraction levels: The <>, <> and <>. These are to interact with individual operators, a whole release of the platform, or specific combinations of products that form a software stack for a specific use case. +This pages wait's until the xref:commands/demo.adoc[] is ready. -In general, use `stackablectl --help` to find out more about how to use the tool or how to use specific options. This also works with subcommands i.e.: `stackablectl release install --help` will show the help for installing a release. - -A running Kubernetes cluster is required to use the tool. All operators and products run on Kubernetes. - -== Operator level - -Using the `stackablectl operator` command, available operators can be listed, installed and uninstalled. - -For example, `stackablectl operator list` shows output similar to: - ----- -OPERATOR STABLE VERSIONS -airflow 0.3.0, 0.2.0, 0.1.0 -commons 0.1.0 -druid 0.5.0, 0.4.0, 0.3.0, 0.2.0, 0.1.0 -hbase 0.2.0 -hdfs 0.3.0 -hive 0.5.0, 0.3.0 -kafka 0.5.0, 0.4.0 -nifi 0.5.0, 0.4.0 -opa 0.8.0, 0.7.0, 0.6.0 -secret 0.4.0, 0.3.0, 0.2.0, 0.1.0 -spark 0.5.0, 0.4.0 -spark-k8s 0.1.0 -superset 0.4.0, 0.3.0, 0.2.0, 0.1.0 -trino 0.3.1, 0.3.0, 0.2.0 -zookeeper 0.9.0, 0.8.0, 0.7.0, 0.6.0 ----- - -You can then use this list to install an operator, for example: - -[shell] ----- -stackablectl operator install zookeeper ----- -Which will print ----- -[INFO ] Installing zookeeper operator ----- - -== Release level - -Using the `stackablectl release` command, available releases can be listed, installed and uninstalled. A release is a collection of operator versions that work well together. - - -The list command: ----- -stackablectl release list ----- -shows output similar to: ----- -RELEASE RELEASE DATE DESCRIPTION -alpha-3 2022-02-14 Second release which added Airflow, Druid and Superset -alpha-2 2021-10-29 First release of the Stackable Data Platform ----- -You can then install a release: ----- -stackablectl release install alpha-3 ----- -Which will install all the operators in that release at the version for that release: ----- -[INFO ] Installing release alpha-3 -[INFO ] Installing airflow operator in version 0.2.0 -[INFO ] Installing druid operator in version 0.4.0 -[INFO ] Installing hbase operator in version 0.2.0 -[INFO ] Installing hdfs operator in version 0.3.0 -[INFO ] Installing hive operator in version 0.5.0 -[INFO ] Installing kafka operator in version 0.5.0 -[INFO ] Installing nifi operator in version 0.5.0 -[INFO ] Installing opa operator in version 0.8.0 -[INFO ] Installing regorule operator in version 0.6.0 -[INFO ] Installing secret operator in version 0.2.0 -[INFO ] Installing spark operator in version 0.5.0 -[INFO ] Installing superset operator in version 0.3.0 -[INFO ] Installing trino operator in version 0.3.1 -[INFO ] Installing zookeeper operator in version 0.9.0 ----- - -== Stack level - -Coming soon! \ No newline at end of file +When the demo command is available we will browse the demos and install a demo together. diff --git a/docs/modules/ROOT/pages/troubleshooting.adoc b/docs/modules/ROOT/pages/troubleshooting.adoc new file mode 100644 index 00000000..27de0b29 --- /dev/null +++ b/docs/modules/ROOT/pages/troubleshooting.adoc @@ -0,0 +1,52 @@ += Troubleshooting + +== No internet connectivity +`stackablectl` uses a Internet connection to always know of all the available versions, releases, stacks and demos. +To achieve this the following online services will be contacted: + +[%autowidth.stretch] +|=== +| URL | Purpose + +| https://repo.stackable.tech/repository/helm-stable/index.yaml +| Retrieve the list of current operator stable versions + +| https://repo.stackable.tech/repository/helm-dev/index.yaml +| Retrieve the list of current operator development versions + +| https://repo.stackable.tech/repository/helm-test/index.yaml +| Retrieve the list of current operator test versions + +| https://raw.githubusercontent.com/stackabletech/release/main/releases.yaml +| List of releases provided by Stackable + +| https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks.yaml +| List of stacks provided by Stackable + +| https://raw.githubusercontent.com/stackabletech/stackablectl/main/demos.yaml +| List of demos provided by Stackable + +|=== + +=== Mirror helm-charts +To allow stackablectl to retrieve the current list of operators you must mirror the `https://repo.stackable.tech/repository/helm-.*/index.yaml` files to some local URL. +If the file is mirrored to e.g. `https://my.corp/stackable/repository/helm-stable/index.yaml` you need to specify the following arguments to `stackablectl` + +[source,console] +---- +$ stackablectl --helm-repo-stackable-stable https://my.corp/stackable/repository/helm-stable operator list +---- + +=== Mirror releases/stacks/demos files +You need to mirror the URL to either a URL or a file on disk. +You can than specify the mirrored file to be included via `--additional-release-files`, `--additional-stack-files` or `--additional-demo-files`, e.g. + +[source,console] +---- +$ stackablectl --additional-release-files=/home/sbernauer/Downloads/releases.yaml release list +---- + +== `panic: open /tmp/.helmcache/stackable-stable-index.yaml: permission denied` +or `panic: open /tmp/.helmrepo: permission denied`. + +See https://github.com/stackabletech/stackablectl/issues/39 diff --git a/docs/readme/images/layers.png b/docs/readme/images/layers.png deleted file mode 100644 index cdc8d9106ab24a14aba03c42f2244a5901e99b3c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26342 zcmce;1yo$!(l&?%NN6mA1PB&9NMnt=JB>Ru?oQ(n!Gnb$L6hJR+_ixQf&~)Xoe&(F z;5H}kd++z(nfdNF_g}MS)?z_(&OUW^)v2nz>#65-l!lr-{)49v(9qEE6%}NlXlNKn zG&FQ#Tx{S-_PdfRG&JnDUb6aL&b~JGPS$7)Jko!EV&LYobM^FM;E`eA=C*Ki}~9=t^OXv&Be{l&c(|P=FtZ8Fz`r%d4XSiTpavh0mHw? zTi9B={6iog2NxiKSsx5$`TLx;mxb-$a@;lS*2}IgqNS2_1{OWtbOb)t^XY5kp)!w$DDy%+5cmfp57`Rl3)i5T@7tF zR|P{Ih?K0|-$Gn0oc|$21DGf~_-`Q)T{oBxzbaf_g9~hQr=pQO48h(P6|+IPX*wB9Pp!Mpy_9?r|2%F2 zQBzjW)>YFBsv~cwsA^;3>g~rbt>$AWXsD(tEoJK<<;<<@FJ)t)?8*=5T zP~+!wlyOzEuyazj^AVJUcqu7C!3MeldYW=vo(9s=QgBHDEdvF93riUr9XQ<0(;1?x z4V720xAfuD(&yt*bM)8dvD8)+uy@hc=7)QG_zFrZJGr>X`C9R)fn}8({PrRa_*kI@`nBzs_O21fF5%CJ^~uHU;#URZv{OKU1ctP zFD+$#e+MN&X9YbQYd$3{YSE^6PLO+m`~Th&B52gUl1Z` z0p?M)QjxNBc9m81k`s{9QBZcau+!Gm^6>Dna`E8v_5)(fN6ypT573j}L)(JeUrtWL z-OAEHQijh*PEJzMLdw=dKvr53rl{wnr7zoyuGrrw4AEDzNeO@y^V{nBEOrZAdk0(y8^$r zq#CcLjviRv&WjhW>gsE)$ffqj1P+SMK*n^Imy+R?)={?i(a>;LwbydDhYBkDs(MNB zdTMAvHFX^wJ+%bfx%AZ4^?B8lRRtYYEfn3jb$NjVDQDm&=g6za z2Ii#A?`-YpVWjKLjL#?L=Gk>V`mM8aqIG1@yTkqdGJ^``50>bNl9QgeXyf1 zpC*rrlrk4w)87vcweb{Gbg)wQlT+dL(3e)_10-0wNWc_8b_ zsd-sC@w)k2`8yeEtH{|%DeFth%JEnM>wpxGhqtYox4tVd%Fn}9QrS(-P|L{;D(}hz z*3t1&u<-rkBL+N9ZdQI!Ze2sLfgX&TpU>LS+QtB;=)=RM0RNMw|NKY;1OJ(w|4P-s z|NmHccx21lbM?^BXwVd8B(;4__e^jaAIna*y`VFf8ZsDyVLf@${}79qJ{pGhoP{ou zPSJzl-M#0pU%jIHSR{iPEy$#T3+swB$67NhJk4o14)^03vu|#`{2d&~p`s^rrhT$y z)ZjvTrDxI=gG+=?CxI1-Mofbt6N=kS^b+-8F-Buqfe|B_iH8wwMsFCcVXRu{jiT~_~=&UZp>`Y%Ip@zA4JDih+} zTY|1mlY?(A6?4RbuV=nVIR?3x zylssyuXA1&y^y~Ri$c2~WxQORt+%*24i9Y^6^vRTP%UjL&B~XOHlGg~dB7AFaDK!^ zbFi>Rs;g+^t9~2SL*u&lE@bi0(%uz!x=Y-Zxsgp3yh{LCQ~n^r@;O2E6|CDgx^VAf ze=GbI^Tj3>yXQW`-B6V*WdIECuB(!&$BnFL^OI^4lk8&@NbBwT%RF>m3+{s~Q0lek zs}#YrcOgHg>EcAI!a^*j<6r`y@#F6W{YkH~S44`)O^Jj!7)R)J&9M)~b z_F@l$QGX~#dMkQ2^l0>Q8*d#|YNT`{7n|ScZ^s$plsFG@d^Z&A7N%Wdyx{7)v6)jp zu=RDHeFSlwzgu%~+dgHc9Y!PxC)lPP%npLMIx|9Kz?2Vm@5S;v8taKIHA=d)x5ZO> z&fl*NUWo9R>O=SFv)Hh@yopz?g7pL*EH5uM^q=m{_C0qNrh?Qbn;|2^ zUrQ?{VG+)oS#TfxpvOIjr_OVER>jcruWw&HMrJP`YoAv|P$w)lEHMvGwlZC@#=EV~ zHd)rCTCs1zTSN^)q}eX}y73~Q*vp5HoYz6=<#*q+vMDQl1V{Dh0>rv7%jey|gu`G* zV$z0LowIRSmjx>0!B=Jze^PSs8_AQoCh{GnIu@DBkERU|T4(P{?)aM5S+^Ds)PjuG zu#-3D{Ym@2Wd-aY2piW(W)Ax!Je&tlM?ZN#y0E|g>xxHV>As@-^dy2A;* zU5_4x|MC?)Jx}s7V-CMb%i7;pYO6f$7Dv@C`IA&K38#GiK$DCZ=_N*VV?HYx_HCLSd+o&lcOJiCghovhOA~ zs^s*(FTZ?za~c$rGoYW^l6YY@ka98-VpTn*0M4Y!Raop)T>0iS(;e9`Yaqmo<+KvS zfBWr&pytn^v@|{^G+3j2;iN}b{}Zpkjr&YG?-O4iB&24WvA1EzCW@ZTD;KDc(Nc#T zX8SLTv*##Tn7LdMVUbz^~s2Jc&&18!gxo9TIBa%G7i+JYqChmzUK~mrN zzWCj3ZqFgP(t3k^NXzL{wG6kyp)?Ug!@8Xpsu@c(gVaF^*r&VC2`l_{@QT$S#_T!e zcl!HeTuMsGtYzR!dA}uFPJKl%<+#K>L5ea|7XH58<&2*OcBUh}INs35Iu1Y*`}T z9!UNF%x^Q#Q4*nP{C_fG+Vc#uXN!o8hw&ACZFo6-`t03q8IBz93(ZV8ai~1fu8+P_ zLywBMQ5}{e{ZWOP1%tOIQ)bYMJWT8JbA#<=6B9r4eO?BU^vAxLEk3QLO1s)p0TD?( zhtI4BY`);pQua^9j1&{j)HX|H2T)`$JiKJ`{Y0B!c5@go<9sGsD`A4z$GiaYPJTP; zyJW(e5(#pq*G)G$-4H}b0ny@US~WBYUjHbpEpj4da64~EW%eMRx^+HCM}(OGX2Et_ zWErPt+N<^Z25)mDMbR?6Z-7+nBbkqsan8ZbO~caqIQ?0IVDzSKwibOUnd!wj%$a9e zfMQ8Z|Em7_a*z|6>l0*hK$;W6x~VHp!9yBo6H8P@Gq-`rvHh{I?S!Bv?@Mu2%D zQfrUOGym6xp`S7QAS*a^D9ameIKovQ0tveKKZ8c61r85B8Qb^}2SB%2QOgG*% zhLJ5+-qJTIhxt4&zkCoe38@>`i<@}_1E&#L543mAEU$e9&v zWGk~JTVY@R9(+YzgnTY`_50BWF3Xqu3LJsR!Rhouy*~fsPpj5-MyLtWoct%qrtv33 zzp{Yzb-C#@emf7W$Aodc6EIZXN84$EX8keRB)r`ioHJXc+C8lz zhweuZL~C|y?I{JrzUli#li7Zrijb0n3!AZ2XxZwD1KW8-ElY9mY_n{+leS3@;Q?n| zvVzEE5+l;#wCDY)#OPyf68-!F@0X1`d0 zGo3+EX)8LM!ORiK!b*^PV9*FPtY9q4;*BDWDwRn8*CGLn?sT;_N35R8N2AzYoaesV zTega4?eso&=%09*O3q#0G)RC#D$aRU%V#dzodREXP>I&28vE$r32u##c^7qCpNy!n zh1X0x`Tm_s&k?YM@3PqAz811z(>8a@Pin?}EAY35Zadi_RdI@g@Y?||GS~{$7Xm+# zivb~O8iK+HY7bl+Z6DFgPMF@*dW`=3$#LnXCRYB4fQ5lj{3tzJx;WMB!n|fI-OX^iYcLp_}3SD&UT`fE3N>FlLakIMfzIT8L|zTPuUtWk5Jt zoRcXLk{s7YH{IO!rLD+RlNsZuCX?D)9#N=v4h@`_HpL>y*WQD5h*ywp0#XsdbWK!p9dx5>Q zw)IceoZGK$lX^iM)#YBE>*>mlc-&o59U*rM4WkV^s!%=afY7Ofs14=3O=o9s%7*{$oug zu;sIbj^Ogg*ZG?r;OIN?Ut|_mf{K(S-L&6E#OG*o$7ARTQfx}tM7$m}I1OHsVrPGf zT7gWtPS|@qByIobRdqR_-kkaJ0-JFOHXVPRQ%vR#^|e&xfIT`AtCB(-T^(>vEMC>z-c@Rr*pii5eno?!h&OKLtF3F=8>T*1U4~L| zM25-|&gaUQD;7^3LW_g<{YLAg@$9Fz2PTZ z=2!#Ck^UD2>~LMH$BN+UFzg7B6`a7 zoi9EH1bS7wo}{A1W@KaTq~0Gqj+zb@ANEFCfL07Q_!JhOA}6~O+8gk0W6F4(#MeS` zIJ!e=+d-}_u85=R%ZJ~Hxete1Hgsgyn|PS|fvITOdp9eTW(e(E0jo>X@15Y9j+ij5 z@1%U~&3%<`wZX>Pi{JPLVqW!E{VW^76Pt;Q0KrbHHyBh>?~Z@-lr}b4DJcS}M&l=| z9MtH~f~6Iay|0PKAz~&p>Ad{wm9*Fgl<8-xyr%7`vNjUjt9|&#U~_o$r!4}DhrS;R zf4&a;C@J=hWi4Vyno+*ALg3_=IP_sAlJe<%OHFHq$6$)4(eyXNg%|8|3n~0djQhZ{ zm3ul&S*W%o-P+NThMOdDJKMh&-ruV}#WJ#HIxsg0-Sq59@pWj|_g&)NfO|X08u=ci zgs2^@-rUkj++M^mIeLbNvR`}hJA~}>D}mO~V<30^XJRjwn5#4p1NRLp>9Wr)pHPo0 zxAmMr!B(4QzD*>^q_2cY=t^kxOpG!ZYG`J@GZP*$v-|0}SV!Y6;X4qKJkh3)y7!Xd zm94?dAvIfU$#2{0NG5%2RNA0DZkm1&qxtWO!$KAzP4XL`RRYm*+!gvP{d8*T$vr6x z2Jf~uDS%S1;r^=bTdRjiqTHbIdM<&0*4b@o{!j+IgCr(1(?Y@r)`J0l3)bH2@C{l? z%8i3-9uZIGQ%3Z7r86VLfly+2>E52!M*oxel6Wxa-9>!)4?nZg z5?(EZFmIF6(@PoART)x_5Jl=2iUdg==%ur^RJmeSZpFN=W15RPoF!=i!lj&nk|28B zlfaFY*9{c~C4}IjL)NaESFIJr*(**LCRv6*NETZ*aN)=`rqpv1~iZPgD%2^Xgu;ou729yGh&UQ&df#hz2E>PDm`ip(Do3x(j+3S%ge#bFM z>PAea=z)N!J*U86QXKz>OzX6Eoe1OZ!-;K6Yym5eHvgB(#?BRO3ULCL0t-3O@_WUb zPYJA6y7sYA<#g7Xz!dmbhx#qO#UDnPR0Y(WcLtmocd6lU7^(K1d(#v(LTS`p#Z-UD37R8k2u~74Hq`dKKDM~w``CR__^_ZQ zbw(>o?$#+nTd)uM@;rNUqEN`4mZSW-lc7#SLgRXkP`s$se1v5DUcBMLz}FqCgMcET zqzdg3w}RFTfh{4Opaone6JolfOSX~0p^L%lp90-eaqo^pE}a9Dn8LpKh4VzM@Wjo@ z_o}p;QOT7UnePunObW$ZD&y7g-;@@$rnm?v;$y=b;^^Ah0EBdq}>UOF)P6f!bQ^BMtGy7pR$vc zsdcO1&UmITh~K?wYEf&WY(Kdiyv#oxzJHeY1UZ9h5y~QEJYMYA2X>1et%uvza|G)n zE!CY?%*g6_lt2)m{*Qy2>N;VrX&-U4>=&#(9WyERqo)mb)$3YqYC!ZUfOX}t&NVnEB@OPSB$4&Tm`#ZxPjIlKn`ydNoUtL| zm&+zi0jDtb`J^5$DCJG0Mb}7OqQ~gz{`Sz z-bXykR=_A&uh6s0YEhUR57-pe4mG7-^L!<7-;Dx*6{HW4z(oQnCQ6Fh8lI?$8`;c@ z8a^|+4t?;!_Y)fF8_nghPm%Y1=)w_~tDZsMaV;LUu)d7NJIYWX#{y^iGX#XEvimVx zE~>uo=@h424v^^yX+Ip+n;pw!=d{QKASxO7Y=IH@T|du!W2c}{gCm_9m8oW+^UiDv zq@QnUDG$|4l6CeF+=1I^P%QS`t!VXWdFnW6Qb(Jy2okqY0QGAvU-{ru^9zr}6d>1HK_=>$~5Z3$$(E80V5Wp!6k=uo?~*-Xz0Je5w$x-7Da}BOZ@TEXiInXXgVUA8d%?Ov+Nz`j0V4;(v zR**hN_khOf<&4P~=RN|1h(o1C`!f7${F&F>OdH~+b{cz~aDG*9EMWaQQ=zCxdPUF3 zV}NBfpsWDi4KKf7Tho0j0Ww?s?VnZl{9?9dcTwosvl;pDx7h{NT$wLCZB)jq>eX9+ zR|UTuNI84>3WJ>K5=368rMcklXmsI){3;~wv@H*T9(U3`YuZT0e~;szsYVqhQbV{1 zk&Zaq{nCD_b5o1l=#5+0keWD97r)RaYX4C$t(TP`Cb;wy0P<)f(dCq0bOSKN;vDyK zTpVg@DFkpvzozS|F#l&X8$W8J=ufUCObsX-m4``HZ3ib@{-KZVw zYk^v1g)%)P4>@VWukbTyY;}r@5e|`<4Fb6Fo1&DIBKcO5;)&B4b;HuK$v3v0QDos7Kpu`YjC>9)rKQr=G;39k zB_B?KKk}BQS=P!q41Qj&;&-1V{9CrC3pF+o^8B6V_JnE}vvJ5;+3af}+e@q)DW6rA z!s0gvcgkfc><1e^soPWAw|C41KGvVLUpcz&{Gx5MTKy6+8vK$&korXz)w7Uw$q&5# zhj)d#!kWz|ua( zd~8kb_Q|)xH)Oq2*p8Eeh~?g;kSo>UQvX-R3#)SW0iJsBH`jO2g2)w>B?6#^O^U)~ z%Y=g|rxm2WoFhydjyJ;2_@7)RC`6h(G$PrkY~-{ljN_>Mc1R1 zcp^w0$BD7xQ&a5^N~hw>=2<;?!MTIA3-6H6E3aODrx-t5`+4BKd+||IOWS1+^KGHv z#ilVR7i+ie`bQY**O=?}18(=NtAq9*OQ29KG|nA`mFbXa8J6NvVJT3^$%d_v6LG{` zRZ^IWXcR~cH;GKxI(s0JM~>>yq!jTvGK;@;e636H?Fwe|7ANOwhuCQS>TBMpkqFY0 zZ|J+P@~?c*eDXc}@efF=oL6$pda#2UZx!PCx0~MF1+%Q0B~E+Vf+J}3P4qAeSxyquxS<;%h_5kh3^#w z*rI-!E0iU*KdI}d)5C&k@$wt44`*sl5Bb2E>1g8lYqV}OnNL?T}Z>0t24qvrM*ua4m*Fsppb}n^XKiq|32R4uGD z_D2p}?D#y$dJ=PRW9aTKQ&0vn%)#!rjW2i|X~`Yzi3St6m_x@s_IgLLNN76OB`#j+ zW{A^Msnt#Af%v152$J|yMBM@}ppis;I$7^D_eN?sS-ELT`{^`8dCOOPo$Qt;mpgCn zj~ucTX0iN1s1SW=L_CeuI8rO$kb2Upf2kxe!;=}SjJKb|(&mcL40+Ig*>vLF+?T|b z`H3Q&tk-t=t7?|ncjCO{H>85J;iNBuOab)Urt@H`^mR|4c^Kz{sqrSrF910rv`*)) zQ|j9&=4;&0Df@0K1Ct2ZzeFIx$ja2Ta9mich9;-5yv%vAEsAXO;anObn=iWa$nw1x~lccPRx%OlVXL|dT zS()14qa^@+-e>QyjA>ll)U6o(M1-p)VeHn7=_MS!Li(PpT>HD`u103C%@eBjBsCDS zWXy7zd&WE@go2mOymUU2_p-x8>0A{k1D5fA8?eF89 z%SX+WM9#k>V;MRBap06=Qa z?B;6i{$>dOZvDe2mM1O&9m&)$xV@8+JWeW_c*2-yjnhKa_c$zV?Swic?I;7Y80NbW z;YhU+e)f90{&0lwMdqX=c=vHtMQ_Lf2S37 zF1LR^w#h!R^di*uDfNB{fX4*)E2nZyD7yHHH#+0pU3YZZQvrK>;+Vc0pv?rbZYDe8 z7Yb0G>$+k-y(*HrBpSynw! zy{}mG$}$`4H?X zWoFdT=$tfMlsOzJAxh}%0rkcF0PCg*wn(lIJ6ku?2caf8E;jMSQ%?WX)yLtlT1Fou z_uY8f-vf=#ndff{)vbi>fJQ*55_tFD_fPK~gEn8Hft~+sk-_)>n=SHVq{YYV2!7S< z$G`uxk%BC(FIB*+_=rlSxvl^d=L>8<$5dc`)XD+U9l-Wby3@oy+5+B4ZWaxMSrqd> z`>g+CR~W#ic!+xL&dKN4L=!IiN zNC4f<-%N6T#4ovsXzscsvYZ)#zkOj|xIpKg)K8w~0nlS1P9ap? z{vu3)v2MoyX7xh2$4z8!So13xx)=!N6o?tFc1v#lh7A7lI;QG~2b2X%Yo);eONSD^ zP9vYMTzMA~PX63xf%Xn&b4VGX49=w2XpP04euYMjU9ZobzD9HR*&br#uX;NJQw;eE zWD?HoFR{2}@xb}sjJ|DxcOkkNQ|FzDA-x&Q?+Kuw2#kPM3Tyr-x4n@F$;R`qZQdx( zeGo4Edflw?JY!aMqSEi)%!m38Gtm3>YDk zuJ`IjCiMFFk?Q2eW5A9qu%pHtn7}iipUhDb=ztv)Ko$TR6&58n$9UKG^6!T#Lu2zt zJkM(ieq+=K*X!;t!s}*3pzJj_mT*t3=muhRBcf$w2Z^$rC*`^s@1BivMZdVwi!A^? z55MV2s@n)ft5IA>gN8N$QfL~#2(barM>o<}IurnC&|gukl(2-;JGB=1Wch_7>nN$Q zS5~RdCi@YMJT&u1Ng*q%t`0kokp+gmtHD41XIt%(664|mdSYOtO#FOrIm#S zFiB`=tZ&_8Br$}hMI9@Xt|Q{5R<|!A2e^Om=l$i)0eH~M<;IzeeVH?A6Lj~+p5Wdy*ltkwIQIrx74oQGXVx{WX%7$JKJ|K1l zNG2#w2{(FUz8<$Xv7{5_JR@$6DQid?T6<7`p3^@Z z$;a_;Z54h)hk;Jp5g{qjQ9YXp#GuEkD9Am)B&0-@#1$zpfmLj)v#3xltaC26XtIN| zM}RY^^BSaTVfBd@M)s@)N3Zx!f5?4~aSwR(65Mb8rm#3=T+`!>(n^3;Oo?8m{SiL| za+!AP7e|5~*6b&QfO&l5kojc8#LSCwSWc3F5shAue{)X=`B9s}-o|+6RF#KV`$%?5 zsy>;5i7~{Oh^c_S*cBlex?H{e1$Y<+w5#|48R+&NuFIR!0m-VP}4)?41w^`S>Zjh4oB1X zo0*75)Jf-w+vK$3{Ud>>q-(a_4{O(h2iNq#9rS>Z{Xs`*Ek@jTijh3h-d)AsJJj=I z`{&yaf1@||FG%G}ZK*vcxuSM;4hiU%K`)(*UJpC0EvOL#p~?blT~Y_0wD=Dz6Ez?; zXVQ_x0|n!ng>9~!B4Ev*%m1HQQyol6DD3%O<|7&qNDAS9FAO=(Ng?YJLICTkACHcR z2Y6VKA3E_VI4*3{Bw>?OC{7#f5%U%+!oZUft^aMU+j~LB3GWK{##oigNa{;0&4&bjk$rd8W6~W zo!uE2fJyLhrz7>0!IbWp)mMKOYBs&{_l&CPeZPL><{Q-5yz?a+$`&;e^4@-a{PSCq z|H-y>X*l8$x*i~++}x5j2Z+)@j8T*Wb2vMMD?%PCRH7J~pQrMH8`|pYK2vL-MCrTy z5;bKO(mz>dWPNqIhZTj31q3cBbL$roJdAZ}mUs97JJ1RD2u^2?jv*aEki6IyFbF_X z#j;=-x2T5Yoo{a#JkNF{o7=nt%|8suBkg3wcn5Rzr^g01Hx}6R!jfR8 zJcDT#IwBATZlc{na~)tUS)WC}P4lM~WilUefv|_t|Glz7TJ`|$;!VEl&@=P^z2_Jk z6hllx9Kvr#a^LXhyz=b_%NT*vZj#$D3!n!6i+RVW9?iT?UO?85sfcO!7xE31 z3I9U@Ad2*(kxd0)Ay>>oTS%5IUe5YJKlrVcQ>cQv&-No9ElW$ri={>fvTNt z)MazbLD=dkdZuP<86n-2qH5`;re=nwaLvifuI5fao6c@8u(3aawc2^^lrG};V1cdm zl!-Fw_3d}0y21#0()x`aslZEG4}dGXuX6g8I^;q2Sj|CzcKi9Zk>GwBGJn9JaNv6| zWRRofy@#HCWd8Lihg!s+R%6^;{FO8@l16DmXi7N>;5^1bOj!!`m^oeBk3Y@NTGsL+ zniGHT{LqargH4p?8x`7&u2=hK_X36JeX%9HF`iulXynRQJrO2IgEO6F_=0ZHLlK#B z*D15pxrQ$!NW@*cs@wvfo{RldZ2~h($%Z!gtxvh#r0P??qJn#eivh`rn`Zt-*q|K7 zlbuZZV_ST8NMxw;42p}(1hgi*1@LhvCg1ek+(xi_oTcsGn-o3$g~SsCi=F?{^1Gaw z`i+fS22Fhu&;@4IKj1j;Y*ga@Ih;mNV|FFd*-U7mS}0#jZ`1^ z*&w6yO|SXjdFp7g;Da0W^}L#Ny9382Bq1x=*Ut>hPp3QPXg4lA}Tk^i6akgFhT%5LiL-mJq){^=7GsLVc1%-pfXhvQF41wI(_ zO58S~sXm!GixlF9nal5il#QnsW)J%Os{?=d7HjRifIn$r4ohtnzq?kxfAo+;>8=5{ zr_%A46dPGI+3k7K$oh12>Ogha+D5BTG9Gq(#35S+f@!m8FYzhkWjYlM8CQ2palf5~c)r8V)OoFaZ{~Ja8&!j zX|`p)@6p{a5BP|$0>5kEC>&$Dn)!-ACGJT&vm8zMAT|8WPRsf8P%^F}2ihU-*pEGQvAW>#?mW0)VGUgx1fGH)h#JQfY(c zNK2M4QWxJ@#tEs2N)IYs@I2i~J&t)g`Tgv9ro-Ls?~>bqD$(Htd-%z__HDsd>zxsg zpIc1Z3lH9Zy=RLqMWZKrQoT6Q5jVx0PaMt0el(5Z<90juYO1@LxNH)2{YfKu*(beG zFc-dM>+gQ@rTt?tpNUzQpZ5~xpw+Km2D9kyT}Sc*X`dRDl55!d1RbV?by^ywd;E%> zu02gH1C`#;g`*$LZ1ShVC(Dx> zbD!A^_kQTH-U*d>v{5si0m1~;=WJW)4?9n z@$u}s#S*hrV-4lSEAzJ6EiQ%L_(qd}vF{N$q!sV*D6~`g**Sg?K0t5+D`4bb?xZfX z!hErRMZNJ6^DOxmKK~IXN?=v^QhhuSiD#$@BkLl4wlJEntW-mBIq(VRfHUTRx_C!{ z1ew(w`|eD9_TxQxv*QZA`T$>9K!1f<#S!Jm!8ax06prXKdbzg=rfvQ#Rdq2=i>)&U zHbE65GPh6o`aXBN)p@ci={@=B6K6=B<98b?1v~FzbJcMSX80gbl0ou_6tedM(|_?J z(w~OgrPVu9g*+gFq~lScD6{7T!rAwVm_@6g^0*7WEUK$;+k{EPsf7GGPmjNyO`lqn zn%6w=!~5Io0?zMG;o8;;Or-C#=daX!=}teh~y$}cxq5x z{En4sTsbIzOfBI)eTxHl9#w!2JsMxD)k5E-H>}-Z%5j;`GwpUsG8@NR+?euCepd1V z>zF>iLRYQsmfPBxu-FadUTrnQ!)-q8<+l<={)&j!KYZbcW(W!p)pCL_WeZpdA9pk1 zUIA}YI1meKw~=Rm!i{G8pn+SNBjlK9JkyvRZ8ooUw`eT-eMK-^sJ*bZ#k+{)KH(yd z${n_=LDJ8KWC=TV^$I`Ln3eSH@5HUGuH{V?8oqMX=uqjEv0TZK`aKdK9EUdV*KlTZ?BLew#fP{s~0%L|+RteZAzsBDULT-;9it zCgQpmct{2)9Di;jm|`s681L%%5bjwk!rM@@0hgWCTWi)H9k*A$n6sOs#jO#z`wJ}% zr?<;A``uBGHf9|wPMUP?N_i4PWMiwD!6HU&z9w)TX%olv;!Xhq^ZB z_NHVfpb!0Ok3KgUjvx0h^0Sx&4gqPa@3dlT{(_E&RtDQO&fM3h{o-)*uHNaeEQDKF zu<70Ju?i2^MXndfUF%7C7#rXWUnlmcD#p$%3opK-@U2nP&n}0g*Bo26v#MpT!9M9# zaX05PNvK^s*MWPfOP}FL0^#3iChgE~J9*9ipC|+ylWwmv9OR!`|KQv;MoQybWHwfc zNjohG{vrWDU|!rhZ9ABN-#FqLl`+f;cKYqKCwuejBAwp2( zcz$Q}%ho+Z(3A+GOl&tP@Tu4j>^HwY3fulFBj`46Kkrs4&a2lLH7T}um=khThk9O- zQb%1rWkgNXbYBzf3GiK=NsK~p6Mfa3x4!6$NR~VOkPgUM0hmWb$J6DvC}h4N2uV@x zwjbH?Rvo;HZ}`{qM0&n7x!*cY<=5Tm@x2OPyWo9QwP{8Ol|n7 z{@kYU39`Y9iiJAx+CDMc!i^$(C8yokgoth(-}z4I^_l$C2YSoiw=6`Rh}+9j7jwkT zX_9OEMK7Wk<4gaa7@z;#eP}eYWJWZbz#5)Wzsn--6!`h$0EIAqB9uYd0c?C)X~-S& zlc=~*is|7LUzvZuSinIOvhxR;>T-z#YUGJi*n=B@!jI}2Jm|~Y!mE|&-GB1NmIL5x zCy#ljS+5EmygP-KdB}dF`V!l6(5~F+dUt&sPqIMWR;3|kc=o|~t^ocip@$qGX&S?Zf&vO97aehBL6#IwW00i-8e`%y z&*}T5eR)3?@_P74N$AE?*u|BT{n?Y599%`gYT95ST)y+GgOFOXa%e-oaj>Fu+qV>d zhVH}kjWMs1q6+>X&NSYc?!!T)YyQSBxg;|arP|}}eP!=S%VYeH-$bw{4sYM+QNLGC zUo7wStlNh?BWT%cjxn7cGa*6>xOp6m-@YWDw*n72J=`D4CPGG*ItDtE3M%&?^~#JT z4?+X$Pr;IxB>+S1^%M#7h-}jr7@07a=%woSh~MwVEE)WR9M3ejdk)*^HWONYTxdOL zrM~Up;C5XUsS~a(`v~<7PwZi2otphxkpR?(GM`&yqb!`%slC@}{XxaB#*5e>JgVPA zjNuhpQuk;duF0px-oMz)tUhdv%MmdvQ2*5?y)sr}QkK&)ZLRPG$&3?W;4yS~Tx#?9 z4g2S6>{RdXJL_Q9+cUL|$I82J^7-FzMNznr)8!!U^greB2iGeetd<9&7z_$v3&hdk zRCv32otT-CW@=^e77vI0hRnkurw1o>_EfP=` zuKu`rh@5^YIV9|3f_>IhtYJ$mAMYn<5~lIIo^v6VF%5cuWF7!j>TG?PlG_urSIjHb z#hN8ff+xf#4yv^}8({|{k z1?Lm)mw2}9``Nb%BA3f#R6Loi;Q<6`%u{TelZo=zXJIZw-B(t-d4588BvQ}au`w;k z8<(xyubW*wVm;o`{`>%&79{NAAdV*MRXiJWG^W4WH__d1yzysXBpmg-=g#dpHnrqP zY|!k*(rL89r#hT^%Br?oY-^ev&RT$DugjY0@#Tdfs`f!B^(Qr2y~)D$B@F|L;qnD3 zYG|qE39l?FjQi?VUdMP}%KP&6hZOaKMC(QnAx9FSHn5B+w!iD)+7I}(!UO<6`z?EP z^u3Fv>H=$64qceBh&8X9uuVNqqHp3Q8gJ><+Z4)mf?TIAJ^c228fp_l?1tSotjnJT zfK^5+TkLaQgm+e9#&yc`4{S@jZcyY!FR)_!d|lC8r-f#^ZFOm*PR&+lw~ z&ktAbeI}bRVpK`%NV9&L<-xUn6B5bgz*=lJPWHYvsZ%%n>rOAGkde_jj%;M2gmylj>qpA3HaF zIzzOmEE4+&r35YkHDE6d@#**cGl8r=>Z6*ggN9Tw$!Om`>Bs2q3xfNmS?h?kSiP;J zl_Rd`HiLuqX#c zc7nuvTlraUNxzh!(B;QsG6^doGxx`jqK_&*Kf@+s=wqy!b7|=(6u&lyP2rvVS)uNw z1J7?yMFWG(Xw!^k6PNq!Eyc6FaffxYpDZ{d+ut>7yac`(dXoh~PEMqzsah~`Jv+i` zq^3%#mK!GgXaf(d?GUGz2I;%xy&T}tWIAlJjl>7+iDC~&j!zp1r)UXrGaNp13wPES zRj&`*_Z+sc&P}K`;7>U^*)tS4ma6C~)B|Md>%X<;q3H>Gh$z!7qse$YQ zK<_(|Ko!(-P;J04RistqD}8;Mlq?vu#b(yF36$bq?|~M3;;fDk#erVAF^l(3`dL+i zHXG2pS!cK)ZCxP3h$4#YKD+mjoo?i?h4b4~7jL;9|838cRO4P%a3-FT?EGuATNl>% zFzV!O=iyrPXAUR?Gl1c#{MVbp|I)!ehNi>x0a(s&<>g%UW28%>*2XD|o{?)O&4eM! zHeXbG%SiJNe;@kaAZnZWys+rPxE=fXn(4u`%#^yY5Vfy+UvC~GgFo7tQvsOSTdDsD zGaFb}>~wnAJH}K^^%rR<|KGmx{O8?hzwqP#dUx>6{~ZYXU-04s|Nm^{pI3kj$d1Cz z^z)rpL@!85UbogM-})ir^V|L~)>}U;dR1GLPvO5AjL?w#xrIeAPEG zZ2}FA`sUB~0{j;x;(r5c{og<@8Py-yBynYnCI0oEItJ_i$etMD$;83vuDuVSx82YG z8)mvDDB`96vsqq0==c9@Maa>x^o0 z>DDwMN`Qox&_fp$AtLpJ-ld3gREm@Y5TtiP?;WWgK}AHwf`SB)A{`Q{3ZjCc6F^jo z6ai_%o#6Stb-#7iUF)v<e;sP=g=5Wxa3<4qKziW6 zF~hr~3LS)3HQ4^cQ4Iln9(T`BR35b^b8)WM+BuwyXkFu;1r!*J0Lu15fkxqQGEsB` zn!q+HHI)@lX7As)yrCvxN0QZ9Ay+s!y~#tL30ZeZ!7c}4Sq@$1B-Lf!muB&BZXTSKTbd)rZWs~UHy506boX-~wNP;tc6&;CpcTcLX^hJA8S(@dd!p{qjERcaEc z@D~TG)(1JdTF6bwgMH^{*%db8zP#(l%scXK*KV#^cF4ABiSsFXzOjJw#&*eUw6_dC zyJWQW$Ex9q=&&5L`QJ^6rNY&Ow!+^0+&RDh>-?h*r~Qp@AM~f>1su zegH*&7|QLD!?+Edk^*}vc-!4K4eMs>%P0y=uTvves}+aLx!5%<-ksl;d)qu~hwUd^ z-#i+l?)^1n6Dk>WD(84Y-Y@|h+W{Bn=lyL{ylxcwt}Wa#SO!D`+jmv9?xqElj}(M> z%59T&+ahnr!%0#H9=l4fJ4E3ImXh`j%=Hsb;K(da!G$C)Nigf{Qyq5&IBwDv2FnQm zkKRYDFG?>MLZ5JSB%Pf5lD8xd)z>?`C|F{2HwW5oMSnNUF$#Lx1)fx>mYbF;dC#HX6A$Hl6sEk(Nj}N8c?b~ zKNw))J+tkciH6P}>mX=$CK+7;l-JZp%BCU&QLqvnb@RaJuti>;qnyOKkpxvysykVL zK$sLDapPlDcZKqGPE+f1XjXD~sM4$v)%MYp^=q;RIH!?^HBXuU+IXaUde}*V1S$$4L>` zaqw`3D^?ePL_Dmj9X1)Ymq6g=m*+~=awg$K7&N($0DRiolY|Q`*J(=Zxc-o zpXMzKAi(+eL^UgcC+4#V@BgjsQvg41Q~%O^w5F`EAY0ssd#B+GqXjLzJBnsoKg1E} z0p;x-Mk`o!D;%ldp0_?fXcU@A;rc$=(&E1`B)t+iOarF@6oOztcVY#;0fLssSBO}n z-}- z6duoM9BxV!ktkR)2otYwN|`$XO&8W$Id|1S;QJMF!F}|7ClUbF;I5xj_46>L&YWEE zc)-78g}~_=i#mtqXPggq$;lC6LscCWfx*Bsp{3>XuC6I6y| z)Qspc%wozMW}wpXQnI3=Sn8~-hiukl?28!wYyy>K5J3RON3k@Rn5Gbm_B|Sob4F?H0iL)wHtxE&+Zm^wLYu+2W)_$H+_rJMh_U``7hG z`^qzuGA(-|U+zaXpR#&susIRx03m?&;EDOUXT{XQ~XhTwO0;6Q=Xw5Ut63HyiNjXy^9qWTn$<$G&{! zwYQ3s-EB-lr zC~WWZ{PXf#pI_&`7K+>x7S&I`S+oMhn^YAKYoNmlib>mNgYXsN96M*!R>^@v6{v4TrdSPArg1h>u8z~9? z{;>()tF7r&Cm_mo#{a9?x3O1M{C;oHYh!->Jpi^6<&ZSJ2{PWsR1kO98_Mq)P2#o)*JI6XldjUvlafna6$%V&43z8_EpX8-aVnx zgQh!DsZvGne@1Rm60K0`n`DQbp zS%uG|-S^htkTp{%E~Dk$6Qh;Ktz89KQ$ZL++2P`@tsrQvhsML!mdjiA-}}^oby11= z@kTC&lNxzHBp7Z$)mK{SRmOyYBEV{@#U!aZaLH_$9>z6N^Tjw6mhMJj|`?g z`3MepH;46mb7f@8tg5qrZH~&M`FvAAm+b$ze*l>dk7;D*pyk+z zqGyFvmgFzY760CH(zIL4kZo3J%nP>l%);kBJUO2my4>`s4!t`rjQ<{`ujbbRj9m}P zy!o2~ex*55}?w=gyz7d zk!S@IG(TXYlnUnB=Op%7 z#4C-fh3^>j7pQIsE8>%Y-G?ZGVgtR za^Dp@Ql!f)utVdMO7yE8^GzK>!Ft|_H4<9tTY50TAj&Icg^s> z!7IN$$-ztWOKs=tL+1EvO4_8+@xLiZq1?n3NdFyqqbJI=ZS07NUg0?Pov)%x$typW z_ye@#a*tmSnRz;%f_~M>xGZfn3g`&Q6XSfoj3i#|oYsYYxfogB zd^lxxFE)*u*+yu!HlEy%$c|-IF}S1PabjcP)2CY07>hhGdahBG!5&tGD76UtN9WJ7SbZ46b`0Z|FGx8f>Tm4Y0+hLZ&r* zo^_hFsmwmzGR<2>P(h4EXf?5pTEI;X<|dNgYlJ~Dgc1bZ6j&U-zZ4S9?~(6BA$a0q zqjZiR!5|X_L!O?t`cqIHa=o#d66ca5VdDQnz0pKOQuuu_s(>ka4PV8;v!a(Q&4Pan z$7GyQ+Snel_WHKF%9wmKuGWO%!^|C7Yz+4zYFjh|b8%`|&w62~EN47u(x%13Hem45 zA8B%cR6O==Cmt2*zB~1qk|dh`Zll$R-k2~RV;G+&X6$Z^DZbO@wN=E@nW$(_x=2|< zcopZZ>)IUVR3Hw|s@@(Kp3+M*zH`>li-%}Qv&{3}_|XdR3GL`nS4&P&)IIeQc3o*8rPc`I3i;&Mr>vhE4sj!$ zq)?)zb7oIog8p3iRNfs??KNrp<*cz7V^=KwHs=M#cE0I4hpu#9Nik-?25s68!c48$ z69d^gX&Zgcb0VDPcof1d?GM`ZKvV+C6b6vSS$TR9v?WyZ1v?%4tBVxXSeWN<<)gl) z6gF=AaMESDA#KRHz0tkxAIxeonQ0t-E*e|y^oa-UQc?1JtA$&CkWzCS3A$wiU)A3` zw?)gkyh8~vr39!A%B5}ai3s3$a6%T8SFvAoRZ7p4m0!3TbkBQ4`XK)$Wdq0u3c!nz zf&C}W1KyN5#5C?4XMh=NJE8@v35Zo)3P2`Ml5XNMK5;d$Kq^Cs8$Am#W__H<#HCEn zQIG=W&NZ6F=iZ3~3LJm%>*t1o{&c(64gVAPDw0;J*f^|kg>`1K;u3uuOS!^nd|}1A zClR5TOHk)|1l^u+{lI-vi8<-4roZ;$^du#^?a1;G?m$f8iOZ!=BbR|6wQ*mRs5&fU z3{=Njo)Lu9afyB-d9U5cIt8Qfbk;-ry+lYXGM_`2s&hAK9GH=I?_++@B5b=W;%|(r zEkYjK$~3KKPgH1!H+-*uzRzQ~9`l8}!2>2^yVm~r#X#i#_LB))^`B#YPW`GsKQ3WY zc^jhf-^DVxqzF^9QFUEbDGLkbEkg|nUlD=F?vqmGvjzTSVymK#b&0E zN`@RA9;IM0XH(`8BT)$YC-&;Qi*ol?_=MT>J}NO}ZY??%MVy+(+qX5Ockp64Z`Vk1 zd{&Zb&ogz;fo#P_0c`|RsAM@05Ufh4DUD5LPK+UNv7pl$OJH3+s~M1BB5&$=-ACs1 zVzbpFE5Ikfxtv1Gsgt+61l@O~U} z)xmG>%|Of<Q5tmGhy3Jz(P`3Y(wRmWB&hplm&_%ocAZEZ4O1^W z_Q>m8nNPnABoioQ8g}ffq$nub(ZY8^jQc$AuaD;EzY&47X_h4< zF%>>{>$EXlL!QL}$1sC8nXO`AC0{f(I%fl@JuUKN-`|8YEjvq;7KXn4(i|EG&NHOy z_J&m1BY2EY9aJ)dauomhuPf^h6!-&)Kt=9C;yW6=VwM0MQzuNH8Gc(T!|oYUZ%gB$ z-i>uETap1DSJA=5cA{p6pfs%*=h7M*J=O)$ZfzMXwKT6y-9q2ggj&vPG0Vs`G9Hf= z8z+B{bxF`?@VN%~0r`b}L*=Af0)$qg@aijrQ@wZ>4fltFjXJ1tD1*8x@w;JPwhqb_ z1QMc>dn&OVUt-D+nDjgeezeY~fD6$3C;jYb!8Ud7+zioFLJ?Wo>%4C<$owF4kwWsq zqWU~+$sgr;(fzO{GFs;@xlKE=f;rh+aZjM7&5~Y{G44#?jY4RF;z|*{=uMycW>5{} zp{Km;wRYw6#r~tp<I-Et3LO~35o6@>D?(9W;? z&~YcH{a3NQp$JTY&a*?d&t&S60M?njeQqOAs%X&Ixw zmanV4OfdFCe5K~z%vGj*Ap>}#H&a}My@&oNIY8(BRDN6hh7htfId(%)zM7XPw*NY; zc8@u9Vs<3rbZ~J~66NZAe|5R(_Zh#*qgL_D8{jxMf+5MaBUi2ymF&8UqGdF&18Pez zKhQ!euN&7RjHlQ>qdugVL+uxKL&#A3i0}BI>37ep&gg`Mr6n3&OTPaLkwIHtFXbue_?y=Dd!6@YefVQF88hb9V& zWwxw9^>Q-n6*n_PU;r@!)+jWfn6?zR(fvhY#VvA!pm$0v(UF zwd9_*u@bst97MGgND0r5>y4cu$-aLk(s1d2*Kp;h|_kDF@rz_ zM#Sx|f`=nWDX1VKMXGt#p2aXvDr@Y>7hgVUT~oTxsicMQ2Bx`DJ~KTdzhWWEa$@bG zGqn@`2g!FHKR+CWm?{zE@}#~gotlI}jMBCgqVg^SY3Y0l2To~~_OB_c6wkL*EiPuW zeP@I_0jp`G2qHwgI_J6;w#el#?ygWL(P5yoD(Yv z)oay3<7BUP`oE>7t_&pB*h&VyQLH@t_(%Gn%sd`z_YW74y#QR`&*#sooyNrviVTfF zN{?~p@A`ISsWqB2cTu3I6lB0*1X>{e3G!}7jBiFgtBsGOZOf?u6>onlTmQ?Y3Xw-4 zNX=D)#5jJ+kmTOt5d>Tg0#es8+eo z9oUOvKp+=qt<14!<|#6j`lLq}<& zopb#J@q}5~-}zjCK19)U5RT~0!v6KcVJ3JCSOKW9-}(=McmfTm5oS*77XFU=6<7J4aRxX8EgdJ38=0m3uOAK(F>io$0KDI#ms%T3p~Y~EQ#163t%U&< zeDW0~OUu&`;0&jaul@}~g8NZIp!EI=y;IEv2yIA$A%vQ!KJ2NL6O75Ef?-$lzjvJ( b#IVnH8#TySCRxEs13rejraCXQ>|_25b?Mt= diff --git a/src/operator.rs b/src/operator.rs index 65ef8964..ab233c3e 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -28,6 +28,7 @@ pub enum CliCommandOperator { Install { /// Space separated list of operators to install. /// Must have the form `name[=version]` e.g. `superset`, `superset=0.3.0`, `superset=0.3.0-nightly` or `superset=0.3.0-pr123`. + /// If no version is specified the latest nightly version - build from the main branch - will be used. /// You can get the available versions with `stackablectl operator list` or `stackablectl operator describe superset` #[clap(multiple_occurrences(true), required = true)] operators: Vec, From 26d0e6157a4f6f4888ea3d3a38f8640df829129b Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 18 Jul 2022 08:58:51 +0200 Subject: [PATCH 086/177] Remove old operators regorule and monitoring as they are not needed any more --- src/main.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index dca4c104..3b60d8f3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -30,9 +30,6 @@ const AVAILABLE_OPERATORS: &[&str] = &[ "superset", "trino", "zookeeper", - // Deprecated - "regorule", - "monitoring", ]; lazy_static! { From 6f821065c153b68c62f20d0a7895dcee2c632f1e Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 18 Jul 2022 15:40:56 +0200 Subject: [PATCH 087/177] Implement service names for OPA clusters --- src/services.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/services.rs b/src/services.rs index fb3fb3e6..52b94dfe 100644 --- a/src/services.rs +++ b/src/services.rs @@ -314,6 +314,7 @@ pub fn get_service_names(product_name: &str, product: &str) -> Vec { ], "hive" => vec![product_name.to_string()], "nifi" => vec![product_name.to_string()], + "opa" => vec![product_name.to_string()], "superset" => vec![format!("{product_name}-external")], "trino" => vec![format!("{product_name}-coordinator")], "zookeeper" => vec![product_name.to_string()], From 0fce2775d5d4dc0a74f21cd86799db81048e4126 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 18 Jul 2022 17:44:58 +0200 Subject: [PATCH 088/177] WIP first demo --- demos.yaml | 25 +++++ .../create-table-in-trino.yaml | 72 ++++++++++++++ demos/trino-taxi-data/load-test-data.yaml | 13 +++ stacks.yaml | 69 +++++++++++++ stacks/trino-superset-s3/hive-metastore.yaml | 44 +++++++++ stacks/trino-superset-s3/superset.yaml | 39 ++++++++ stacks/trino-superset-s3/trino.yaml | 97 +++++++++++++++++++ 7 files changed, 359 insertions(+) create mode 100644 demos.yaml create mode 100644 demos/trino-taxi-data/create-table-in-trino.yaml create mode 100644 demos/trino-taxi-data/load-test-data.yaml create mode 100644 stacks/trino-superset-s3/hive-metastore.yaml create mode 100644 stacks/trino-superset-s3/superset.yaml create mode 100644 stacks/trino-superset-s3/trino.yaml diff --git a/demos.yaml b/demos.yaml new file mode 100644 index 00000000..b248e2c5 --- /dev/null +++ b/demos.yaml @@ -0,0 +1,25 @@ +--- +stacks: # As their is no demo command implemented yet we provide the following demos as a stack. + # They will be converted to demos when the demos feature is available + trino-taxi-data: + description: WIP Demo loading NY taxi-data into S3 bucket, creating Trino table and Superset dashboard + stackableRelease: 22.06 # Later on: stackableStack: trino-superset-s3 + labels: + - trino + - superset + - minio + - s3 + - ny-taxi-data + manifests: + - plainYaml: demos/trino-taxi-data/load-test-data.yaml + - plainYaml: demos/trino-taxi-data/create-table-in-trino.yaml + trino-wine-reviews: + description: WIP Demo loading wine review data into S3 bucket, creating Trino table and Superset dashboard + stackableRelease: 22.06 # Later on: stackableStack: trino-superset-s3 + labels: + - trino + - superset + - minio + - s3 + - ny-taxi-data + manifests: [] diff --git a/demos/trino-taxi-data/create-table-in-trino.yaml b/demos/trino-taxi-data/create-table-in-trino.yaml new file mode 100644 index 00000000..4fc2d3eb --- /dev/null +++ b/demos/trino-taxi-data/create-table-in-trino.yaml @@ -0,0 +1,72 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: create-ny-taxi-data-table-in-trino +spec: + template: + spec: + containers: + - name: create-ny-taxi-data-table-in-trino + image: "python:3.10-slim" + command: ["bash", "-c", "pip install trino==0.314.0 && python /tmp/script/script.py"] + volumeMounts: + - name: script + mountPath: /tmp/script + restartPolicy: OnFailure + volumes: + - name: script + configMap: + name: create-ny-taxi-data-table-in-trino-script +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: create-ny-taxi-data-table-in-trino-script +data: + script.py: | + import sys + import trino + + if not sys.warnoptions: + import warnings + warnings.simplefilter("ignore") + + def get_connection(): + connection = trino.dbapi.connect( + host="trino-coordinator", + port=8443, + user="demo", + http_scheme='https', + auth=trino.auth.BasicAuthentication("demo", "demo"), + ) + connection._http_session.verify = False + return connection + + def run_query(connection, query): + print(f"[DEBUG] Executing query {query}") + cursor = connection.cursor() + cursor.execute(query) + return cursor.fetchall() + + connection = get_connection() + + assert run_query(connection, "CREATE SCHEMA IF NOT EXISTS hive.demo WITH (location = 's3a://demo/')")[0][0] is True + assert run_query(connection, """ + CREATE TABLE IF NOT EXISTS hive.demo.ny_taxi_data_raw14 ( + VendorID BIGINT, + tpep_pickup_datetime TIMESTAMP, + tpep_dropoff_datetime TIMESTAMP, + passenger_count DOUBLE, + trip_distance DOUBLE, + payment_type BIGINT, + Fare_amount DOUBLE, + Tip_amount DOUBLE, + Total_amount DOUBLE + ) WITH ( + external_location = 's3a://demo/ny-taxi-data/raw/', + format = 'parquet' + ) + """)[0][0] is True + + assert run_query(connection, "SELECT COUNT(*) FROM hive.demo.ny_taxi_data_raw")[0][0] == 3_599_920 diff --git a/demos/trino-taxi-data/load-test-data.yaml b/demos/trino-taxi-data/load-test-data.yaml new file mode 100644 index 00000000..eb5274e0 --- /dev/null +++ b/demos/trino-taxi-data/load-test-data.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: load-ny-taxi-data +spec: + template: + spec: + containers: + - name: load-ny-taxi-data + image: "bitnami/minio:2022-debian-10" + command: ["bash", "-c", "cd /tmp && curl -O https://repo.stackable.tech/repository/misc/ny-taxi-data/yellow_tripdata_2022-04.parquet && mc --insecure alias set minio http://minio-trino:9000/ demo demodemo && mc cp yellow_tripdata_2022-04.parquet minio/demo/ny-taxi-data/raw/"] + restartPolicy: OnFailure diff --git a/stacks.yaml b/stacks.yaml index b4ca37c7..8c0f28b2 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -51,6 +51,75 @@ stacks: - plainYaml: stacks/druid-superset-s3/zookeeper.yaml - plainYaml: stacks/druid-superset-s3/druid.yaml - plainYaml: stacks/druid-superset-s3/superset.yaml + trino-superset-s3: + description: Stack containing MinIO, Trino and Superset for data visualization + stackableRelease: 22.06 + labels: + - trino + - superset + - minio + - s3 + manifests: + - helmChart: + releaseName: minio-trino + name: minio + repo: + name: minio + url: https://charts.min.io/ + version: 4.0.5 + options: + rootUser: root + rootPassword: rootroot + mode: standalone + users: + - accessKey: trino + secretKey: trinotrino + policy: readwrite + - accessKey: hive + secretKey: hivehive + policy: readwrite + - accessKey: demo + secretKey: demodemo + policy: readwrite + buckets: + - name: demo + policy: public + resources: + requests: + memory: 2Gi + service: + type: NodePort + nodePort: null + consoleService: + type: NodePort + nodePort: null + - helmChart: + releaseName: postgresql-hive + name: postgresql + repo: + name: bitnami + url: https://charts.bitnami.com/bitnami/ + version: 10.16.2 + options: + # Old version (10) of helm-charts has old way of setting credentials + postgresqlUsername: hive + postgresqlPassword: hive + postgresqlDatabase: hive + - helmChart: + releaseName: postgresql-superset + name: postgresql + repo: + name: bitnami + url: https://charts.bitnami.com/bitnami/ + version: 11.0.0 + options: + auth: + username: superset + password: superset + database: superset + - plainYaml: stacks/trino-superset-s3/hive-metastore.yaml + - plainYaml: stacks/trino-superset-s3/trino.yaml + - plainYaml: stacks/trino-superset-s3/superset.yaml airflow: description: Stack containing Airflow scheduling platform stackableRelease: 22.06 diff --git a/stacks/trino-superset-s3/hive-metastore.yaml b/stacks/trino-superset-s3/hive-metastore.yaml new file mode 100644 index 00000000..77152a12 --- /dev/null +++ b/stacks/trino-superset-s3/hive-metastore.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: hive.stackable.tech/v1alpha1 +kind: HiveCluster +metadata: + name: hive +spec: + version: 2.3.9-stackable0.4.0 + s3: + inline: + host: minio-trino + port: 9000 + accessStyle: Path + credentials: + secretClass: hive-s3-credentials + metastore: + roleGroups: + default: + replicas: 1 + config: + database: + connString: jdbc:postgresql://postgresql-hive:5432/hive + user: hive + password: hive + dbType: postgres +--- +apiVersion: secrets.stackable.tech/v1alpha1 +kind: SecretClass +metadata: + name: hive-s3-credentials +spec: + backend: + k8sSearch: + searchNamespace: + pod: {} +--- +apiVersion: v1 +kind: Secret +metadata: + name: hive-s3-credentials + labels: + secrets.stackable.tech/class: hive-s3-credentials +stringData: + accessKey: hive + secretKey: hivehive diff --git a/stacks/trino-superset-s3/superset.yaml b/stacks/trino-superset-s3/superset.yaml new file mode 100644 index 00000000..78864812 --- /dev/null +++ b/stacks/trino-superset-s3/superset.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: superset.stackable.tech/v1alpha1 +kind: SupersetCluster +metadata: + name: superset +spec: + version: 1.5.1-stackable0.2.0 + statsdExporterVersion: v0.22.4 + credentialsSecret: superset-credentials + loadExamplesOnInit: true + nodes: + roleGroups: + default: + replicas: 1 +--- +apiVersion: v1 +kind: Secret +metadata: + name: superset-credentials +type: Opaque +stringData: + adminUser.username: admin + adminUser.firstname: SupersetNur + adminUser.lastname: Admin + adminUser.email: admin@superset.com + adminUser.password: admin + connections.secretKey: thisISaSECRET_1234 + connections.sqlalchemyDatabaseUri: postgresql://superset:superset@postgresql-superset/superset +# --- +# TODO Use when available (https://github.com/stackabletech/superset-operator/issues/3) +# apiVersion: superset.stackable.tech/v1alpha1 +# kind: TrinoConnection +# metadata: +# name: superset-trino-connection +# spec: +# superset: +# name: superset +# trino: +# name: trino diff --git a/stacks/trino-superset-s3/trino.yaml b/stacks/trino-superset-s3/trino.yaml new file mode 100644 index 00000000..6e78cebf --- /dev/null +++ b/stacks/trino-superset-s3/trino.yaml @@ -0,0 +1,97 @@ +--- +apiVersion: trino.stackable.tech/v1alpha1 +kind: TrinoCluster +metadata: + name: trino +spec: + version: 387-stackable0.1.0 + hiveConfigMapName: hive + opa: + configMapName: opa + package: trino + s3: + inline: + host: minio-trino + port: 9000 + accessStyle: Path + credentials: + secretClass: trino-s3-credentials + authentication: + method: + multiUser: + userCredentialsSecret: + name: trino-users + coordinators: + roleGroups: + default: + replicas: 1 + config: {} + workers: + roleGroups: + default: + replicas: 1 + config: {} +--- +apiVersion: v1 +kind: Secret +metadata: + name: trino-users +type: kubernetes.io/opaque +stringData: + # admin:admin + admin: $2y$10$89xReovvDLacVzRGpjOyAOONnayOgDAyIS2nW9bs5DJT98q17Dy5i + # demo:demo + demo: $2y$10$mMRoIKfWtAuycEQnKiDCeOlCSYiWkvbs0WsMFLkaSnNO0ZnFKVRXm +--- +apiVersion: secrets.stackable.tech/v1alpha1 +kind: SecretClass +metadata: + name: trino-s3-credentials +spec: + backend: + k8sSearch: + searchNamespace: + pod: {} +--- +apiVersion: v1 +kind: Secret +metadata: + name: trino-s3-credentials + labels: + secrets.stackable.tech/class: trino-s3-credentials +stringData: + accessKey: trino + secretKey: trinotrino +--- +apiVersion: opa.stackable.tech/v1alpha1 +kind: OpaCluster +metadata: + name: opa +spec: + version: 0.41.0-stackable0.1.0 + servers: + roleGroups: + default: + selector: + matchLabels: + kubernetes.io/os: linux +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: trino-opa-bundle + labels: + opa.stackable.tech/bundle: "trino" +data: + trino.rego: | + package trino + + default allow = false + + allow { + input.context.identity.user == "admin" + } + + allow { + input.context.identity.user == "demo" + } From c97f6ac8eb1d0efda879b71858f877661d3c212f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 19 Jul 2022 12:06:17 +0200 Subject: [PATCH 089/177] Add docs on shell autocompletion --- docs/modules/ROOT/pages/installation.adoc | 56 +++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/docs/modules/ROOT/pages/installation.adoc b/docs/modules/ROOT/pages/installation.adoc index 631cef18..da374123 100644 --- a/docs/modules/ROOT/pages/installation.adoc +++ b/docs/modules/ROOT/pages/installation.adoc @@ -97,3 +97,59 @@ Copy it to you systems path to access it from anywhere if you like ---- $ sudo cp target/release/stackablectl /usr/bin/stackablectl ---- + +== Configure auto-completion +`stackablectl` provides completion scripts for the major Shells out there. +It uses the same mechanism as `kubectl` does, so if you have any problems following this steps looking at https://kubernetes.io/docs/tasks/tools/included/[their installation documentation] may help you out. + +All of the https://docs.rs/clap_complete/3.2.3/clap_complete/shells/enum.Shell.html[supported shells of] https://crates.io/crates/clap_complete[`clap_complete`] are supported. +As of `07/2022` this includes the following shells: + +* <> +* Elvish +* <> +* <> +* <> + +=== Bash +The stackablectl completion script for Bash can be generated with the command `stackablectl completion bash`. Sourcing the completion script in your shell enables stackablectl autocompletion. + +Install the package `bash-completion` e.g. via `apt install bash-completion`. + +After that run the following command to source the completion script and tell bash to source it every time you start a new shell. + +[source,console] +---- +$ source <(stackablectl completion bash) +$ echo 'source <(stackablectl completion bash)' >> ~/.bashrc +---- + +=== Fish +The stackablectl completion script for Fish can be generated with the command `stackablectl completion fish`. Sourcing the completion script in your shell enables stackablectl autocompletion. + +[source,console] +---- +$ stackablectl completion fish | source +$ echo 'stackablectl completion fish | source' >> ~/.config/fish/config.fish +---- + +=== PowerShell +The stackablectl completion script for PowerShell can be generated with the command `stackablectl completion powershell`. + +To do so in all your shell sessions, add the following line to your `$PROFILE` file: + +[source,console] +---- +kubectl completion powershell | Out-String | Invoke-Expression +---- + +This command will regenerate the auto-completion script on every PowerShell start up. + +=== Zsh +The stackablectl completion script for Zsh can be generated with the command `stackablectl completion zsh`. Sourcing the completion script in your shell enables stackablectl autocompletion. + +[source,console] +---- +$ source <(stackablectl completion zsh) +$ echo 'source <(stackablectl completion zsh)' >> ~/.zshrc +---- From 9b03b5c175cb3b94fae02d3c972386ae3c0a0512 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Wed, 20 Jul 2022 14:57:02 +0200 Subject: [PATCH 090/177] Removed demos as they now have a separate branch --- demos.yaml | 25 ------- .../create-table-in-trino.yaml | 72 ------------------- demos/trino-taxi-data/load-test-data.yaml | 13 ---- 3 files changed, 110 deletions(-) delete mode 100644 demos.yaml delete mode 100644 demos/trino-taxi-data/create-table-in-trino.yaml delete mode 100644 demos/trino-taxi-data/load-test-data.yaml diff --git a/demos.yaml b/demos.yaml deleted file mode 100644 index b248e2c5..00000000 --- a/demos.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -stacks: # As their is no demo command implemented yet we provide the following demos as a stack. - # They will be converted to demos when the demos feature is available - trino-taxi-data: - description: WIP Demo loading NY taxi-data into S3 bucket, creating Trino table and Superset dashboard - stackableRelease: 22.06 # Later on: stackableStack: trino-superset-s3 - labels: - - trino - - superset - - minio - - s3 - - ny-taxi-data - manifests: - - plainYaml: demos/trino-taxi-data/load-test-data.yaml - - plainYaml: demos/trino-taxi-data/create-table-in-trino.yaml - trino-wine-reviews: - description: WIP Demo loading wine review data into S3 bucket, creating Trino table and Superset dashboard - stackableRelease: 22.06 # Later on: stackableStack: trino-superset-s3 - labels: - - trino - - superset - - minio - - s3 - - ny-taxi-data - manifests: [] diff --git a/demos/trino-taxi-data/create-table-in-trino.yaml b/demos/trino-taxi-data/create-table-in-trino.yaml deleted file mode 100644 index 4fc2d3eb..00000000 --- a/demos/trino-taxi-data/create-table-in-trino.yaml +++ /dev/null @@ -1,72 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: create-ny-taxi-data-table-in-trino -spec: - template: - spec: - containers: - - name: create-ny-taxi-data-table-in-trino - image: "python:3.10-slim" - command: ["bash", "-c", "pip install trino==0.314.0 && python /tmp/script/script.py"] - volumeMounts: - - name: script - mountPath: /tmp/script - restartPolicy: OnFailure - volumes: - - name: script - configMap: - name: create-ny-taxi-data-table-in-trino-script ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: create-ny-taxi-data-table-in-trino-script -data: - script.py: | - import sys - import trino - - if not sys.warnoptions: - import warnings - warnings.simplefilter("ignore") - - def get_connection(): - connection = trino.dbapi.connect( - host="trino-coordinator", - port=8443, - user="demo", - http_scheme='https', - auth=trino.auth.BasicAuthentication("demo", "demo"), - ) - connection._http_session.verify = False - return connection - - def run_query(connection, query): - print(f"[DEBUG] Executing query {query}") - cursor = connection.cursor() - cursor.execute(query) - return cursor.fetchall() - - connection = get_connection() - - assert run_query(connection, "CREATE SCHEMA IF NOT EXISTS hive.demo WITH (location = 's3a://demo/')")[0][0] is True - assert run_query(connection, """ - CREATE TABLE IF NOT EXISTS hive.demo.ny_taxi_data_raw14 ( - VendorID BIGINT, - tpep_pickup_datetime TIMESTAMP, - tpep_dropoff_datetime TIMESTAMP, - passenger_count DOUBLE, - trip_distance DOUBLE, - payment_type BIGINT, - Fare_amount DOUBLE, - Tip_amount DOUBLE, - Total_amount DOUBLE - ) WITH ( - external_location = 's3a://demo/ny-taxi-data/raw/', - format = 'parquet' - ) - """)[0][0] is True - - assert run_query(connection, "SELECT COUNT(*) FROM hive.demo.ny_taxi_data_raw")[0][0] == 3_599_920 diff --git a/demos/trino-taxi-data/load-test-data.yaml b/demos/trino-taxi-data/load-test-data.yaml deleted file mode 100644 index eb5274e0..00000000 --- a/demos/trino-taxi-data/load-test-data.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: load-ny-taxi-data -spec: - template: - spec: - containers: - - name: load-ny-taxi-data - image: "bitnami/minio:2022-debian-10" - command: ["bash", "-c", "cd /tmp && curl -O https://repo.stackable.tech/repository/misc/ny-taxi-data/yellow_tripdata_2022-04.parquet && mc --insecure alias set minio http://minio-trino:9000/ demo demodemo && mc cp yellow_tripdata_2022-04.parquet minio/demo/ny-taxi-data/raw/"] - restartPolicy: OnFailure From 8ee667fb0ed88ca3e4e90c156c905826a95b1905 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 15:32:31 +0200 Subject: [PATCH 091/177] Update docs/modules/ROOT/pages/commands/operator.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/operator.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index 1f03f706..a14a9356 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -5,7 +5,7 @@ Operators manage the individual data products the Stackable Data Platform consis This command manages individual operators. It is mainly intended for persons already having experience or working on the Stackable Data Platform. -If you just wan't an easy way to get started or don't know which products and/or which version to install it is recommended to use the xref:commands/release.adoc[] command. +If you just want an easy way to get started or don't know which products and/or which version to install it is recommended to use the xref:commands/release.adoc[] command. This command will install a bundle of operators from a official Stackable Release. == Browse available operators From 212b9722e5e63568ca2570f9aaffa2195a12cc53 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 15:32:46 +0200 Subject: [PATCH 092/177] Update README.md Co-authored-by: Siegfried Weber --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 48dd59a2..76e0e8a7 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # stackablectl -The documentation is hosted [here](https://docs.stackable.tech/stackablectl/stable/index.html) +The documentation of `stackablectl` can be found in the [documentation of the Stackable Data Platform](https://docs.stackable.tech/stackablectl/stable/index.html). # TODOs * Check if CRD resources still exist when uninstalling the operators. If so warn the user. From 65c8f140ea41d41b29482f4cd68d9018f90ac14a Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 15:33:05 +0200 Subject: [PATCH 093/177] Update docs/modules/ROOT/pages/commands/operator.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/operator.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index a14a9356..423e32e5 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -6,7 +6,7 @@ Operators manage the individual data products the Stackable Data Platform consis This command manages individual operators. It is mainly intended for persons already having experience or working on the Stackable Data Platform. If you just want an easy way to get started or don't know which products and/or which version to install it is recommended to use the xref:commands/release.adoc[] command. -This command will install a bundle of operators from a official Stackable Release. +This command will install a bundle of operators from an official Stackable Release. == Browse available operators To list the operators that are part of the Stackable Data Platform as well as their stable versions run the following command From 77ec0b4f2bf4025d31ade7a0cf01fc66d08a0764 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 15:33:15 +0200 Subject: [PATCH 094/177] Update docs/modules/ROOT/pages/commands/operator.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/operator.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index 423e32e5..306f8b2b 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -9,7 +9,7 @@ If you just want an easy way to get started or don't know which products and/or This command will install a bundle of operators from an official Stackable Release. == Browse available operators -To list the operators that are part of the Stackable Data Platform as well as their stable versions run the following command +To list the operators that are part of the Stackable Data Platform as well as their stable versions run the following command: [source,console] ---- From 704960d5f2c0a6f25e7a387aa148fa9b4cd1a470 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 15:42:25 +0200 Subject: [PATCH 095/177] Apply suggestions from code review Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/operator.adoc | 16 ++++++++-------- docs/modules/ROOT/pages/commands/release.adoc | 14 +++++++------- docs/modules/ROOT/pages/commands/services.adoc | 6 +++--- docs/modules/ROOT/pages/commands/stack.adoc | 4 ++-- docs/modules/ROOT/pages/customizability.adoc | 11 +++++------ docs/modules/ROOT/pages/index.adoc | 10 +++++----- docs/modules/ROOT/pages/installation.adoc | 6 +++--- docs/modules/ROOT/pages/troubleshooting.adoc | 4 ++-- 8 files changed, 35 insertions(+), 36 deletions(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index 306f8b2b..87d5bb95 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -33,7 +33,7 @@ zookeeper 0.9.0, 0.8.0, 0.7.0, 0.6.0, 0.10.0 ---- This command only includes the stable versions for every operator to not mess up the whole screen. -If you're interested in a special version of an operator you can use the describe command to get more details for an specific operator as follows +If you're interested in a special version of an operator you can use the `describe` command to get more details for a specific operator as follows: [source,console] ---- @@ -45,8 +45,8 @@ Dev versions: 0.5.0-nightly, 0.4.0-nightly, 0.3.0-nightly, 0.2.0-nightly, ---- == Install operator -If you access to an Kubernetes cluster make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. -After that run the following command +If you want to access a Kubernetes cluster, make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. +After that run the following command: [source,console] ---- @@ -56,8 +56,8 @@ $ stackablectl operator install airflow commons secret [INFO ] Installing secret operator ---- -If you don't have an Kubernetes cluster available `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. -Make sure you have https://kind.sigs.k8s.io/[kind] installed and run the following command +If you don't have a Kubernetes cluster available, `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. +Make sure you have `kind` installed and run the following command: [source,console] ---- @@ -96,12 +96,12 @@ $ stackablectl operator install airflow=0.4.0 commons=0.2.0 secret=0.5.0 [INFO ] Installing secret operator in version 0.5.0 ---- -As you can see the three operators where installed in the requested version. +As you can see, the three operators where installed in the requested version. -Remember: If you want to install a recommended and tested set of operator versions have a look at the xref:commands/release.adoc[] command. +Remember: If you want to install a recommended and tested set of operator versions, have a look at the xref:commands/release.adoc[] command. == List installed operators -After installing some operators you can list which operators are installed in you Kubernetes cluster. +After installing some operators, you can list which operators are installed in you Kubernetes cluster. [source,console] ---- diff --git a/docs/modules/ROOT/pages/commands/release.adoc b/docs/modules/ROOT/pages/commands/release.adoc index d6a88ff5..529c60c9 100644 --- a/docs/modules/ROOT/pages/commands/release.adoc +++ b/docs/modules/ROOT/pages/commands/release.adoc @@ -4,7 +4,7 @@ A release is a well-playing bundle of operators that get released approximately If you want to install an single individual operator have a look at the xref:commands/operator.adoc[] command. == Browse available releases -To list the available Stackable releases run the following command +To list the available Stackable releases run the following command: [source,console] ---- @@ -13,7 +13,7 @@ RELEASE RELEASE DATE DESCRIPTION 22.06 2022-06-30 First official release of the Stackable Data Platform ---- -To show details run +Detailed information of a release can queried with the `describe` command: [source,console] ---- @@ -40,11 +40,11 @@ trino 0.4.0 zookeeper 0.10.0 ---- -In the output you can see which product operators are included in the specific release +In the output you can see which product operators are included in the specific release. == Install release -If you access to an Kubernetes cluster make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. -After that run the following command +If you want to access a Kubernetes cluster, make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. +After that run the following command: [source,console] ---- @@ -66,8 +66,8 @@ $ stackablectl release install 22.06 [INFO ] Installing zookeeper operator in version 0.10.0 ---- -If you don't have an Kubernetes cluster available `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. -Make sure you have https://kind.sigs.k8s.io/[kind] installed and run the following command +If you don't have a Kubernetes cluster available, `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. +Make sure you have `kind` installed and run the following command: [source,console] ---- diff --git a/docs/modules/ROOT/pages/commands/services.adoc b/docs/modules/ROOT/pages/commands/services.adoc index 28030505..5a307e24 100644 --- a/docs/modules/ROOT/pages/commands/services.adoc +++ b/docs/modules/ROOT/pages/commands/services.adoc @@ -2,10 +2,10 @@ == List running services -The `stackable services` command allows to inspect the running services of the Stackable Data Platform. +The `stackablectl services` command allows to inspect the running services of the Stackable Data Platform. Currently you can only get a read-only view of the running services, future versions may allow to e.g. uninstall running services. -An example invocation looks as follows +An example invocation looks as follows: [source,console] ---- @@ -26,7 +26,7 @@ You can also - Redact the passwords from the output in case you want to share the list of services without giving out the admin credentials - Print the installed product versions -To achieve this you can use the following command +To achieve this you can use the following command: [source,console] ---- diff --git a/docs/modules/ROOT/pages/commands/stack.adoc b/docs/modules/ROOT/pages/commands/stack.adoc index 08241f63..3ba5e122 100644 --- a/docs/modules/ROOT/pages/commands/stack.adoc +++ b/docs/modules/ROOT/pages/commands/stack.adoc @@ -2,7 +2,7 @@ A stack is a collection of ready-to-use Stackable data products as well as needed third-party services like Postgresql or MinIO. == Browse available stacks -To list the available stacks run the following command +To list the available stacks, run the following command: [source,console] ---- @@ -91,7 +91,7 @@ Have a nice day! 👋 [INFO ] Installed stack druid-superset-s3 ---- -After installing the stack we can access the running services using the xref:commands/operator.adoc[] command +After installing the stack, we can access the running services using the xref:commands/operator.adoc[] command: [source,console] ---- diff --git a/docs/modules/ROOT/pages/customizability.adoc b/docs/modules/ROOT/pages/customizability.adoc index 020cca53..f42844f7 100644 --- a/docs/modules/ROOT/pages/customizability.adoc +++ b/docs/modules/ROOT/pages/customizability.adoc @@ -3,22 +3,21 @@ If you're working for a large company chances are that there are multiple teams A single team can also operate multiple Stackable Data Platforms. `stackablectl` is build in a way customers or even single developers can define their own release, stack and even demo! This way it is possible to cover the following use-cases. -If you are interested in of them give it a try! -Any additional demos/stacks/releases you specify will be added to the already existing provided by Stackable. +Any additional demos/stacks/releases you specify, will be added to the already existing provided by Stackable. == How to add a new === Demo ==== Benefits When you have developed a new data pipeline or data product you often want to show it in action to other colleagues or potential clients. -To easily achieve this you can create you own demo so that it can easily be reproduced and/or shared with other people. +To easily achieve this you can create your own demo so that it can easily be reproduced and/or shared with other people. ==== Adding a new demo First you must create a `mycorp-demos.yaml` containing demos according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/demos.yaml[the Stackable provided demos]. After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demo-files `. The `` can be either a path to a file on the local filesystem or a URL. -By using a URL the demos file can be put into to an central Git and referenced by all teams or clients. +By using a URL the demos file can be put into to a central Git repository and referenced by all teams or clients. Multiple `--additional-demo-files` flags can be specified to include multiple demo files. Every additional demo will we added to the already existing demos in `stackablectl`, so all the available demo files will be merged. @@ -29,7 +28,7 @@ In the custom defined Stack all Product versions are pinned as well, so you can You can use your defined Stack to give it to colleagues or potential customers to show the overall architecture of the Data platform you're going to build. ==== Adding a new stack -For the overall procedure have a look on <<_adding_a_new_demo>> on how to add a new demo. +For the overall procedure have a look at <<_adding_a_new_demo>> on how to add a new stack. For a custom stack you need to create a `mycorp-stacks.yaml` containing stacks according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/stacks.yaml[the Stackable provided stacks]. You can than add it to `stackablectl` with the flag `--additional-stack-files`. @@ -43,6 +42,6 @@ This has the following benefits: - If the company is only interested in a subset of the available operators you can only add your relevant operators into your release and not install all the other operators. ==== Adding a new release -For the overall procedure have a look on <<_adding_a_new_demo>> on how to add a new demo. +For the overall procedure have a look at <<_adding_a_new_demo>> on how to add a new release. For a custom release you need to create a `mycorp-releases.yaml` containing releases according to the format defined by https://github.com/stackabletech/release/blob/main/releases.yaml[the Stackable provided releases]. You can than add it to `stackablectl` with the flag `--additional-release-files`. diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index bdd80c0f..bfabd38f 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -2,19 +2,19 @@ The `stackablectl` command line tool is used to interact with the Stackable Data Platform. It can install individual operators as well as Platform releases. -It also ships with a set of pre-build demos that utilize different data products of the Platform to get e.g. an end-to-end data pipeline. +It also ships with a set of pre-built demos that utilize different data products of the Platform to get e.g. an end-to-end data pipeline. The installation of `stackablectl` is described in xref:installation.adoc[]. To just get a Quickstart please follow xref:quickstart.adoc[]. In general, use `stackablectl --help` to find out more about how to use the tool or how to use specific options. -This also works with subcommands i.e.: `stackablectl release install --help` will show the help for installing a release. -Often you can also use a abbreviation instead of typing out all of the commands. +This also works with subcommands, i.e. `stackablectl release install --help` will show the help for installing a release. +Often you can also use an abbreviation instead of typing out all of the commands. E.g. `stackablectl operator list` can be also written as `stackablectl op ls` A Kubernetes cluster is required to use the Stackable Data Platform as all products and operators run on Kubernetes. -If you don't have a Kubernetes cluster `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes Cluster for you. +If you don't have a Kubernetes cluster, `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes Cluster for you. The deployed services are separated into three different layers as illustrated below: @@ -33,7 +33,7 @@ A stack needs a release (of Stackable operators) to run on. To achieve this a stacks has a dependency on a release which get's automatically installed when a stack is installed. == Demos -A demo is a end-to-end demonstration of the usage of the Stackable Data Platform. +A demo is an end-to-end demonstration of the usage of the Stackable Data Platform. It contains . Installing a Stackable release diff --git a/docs/modules/ROOT/pages/installation.adoc b/docs/modules/ROOT/pages/installation.adoc index da374123..54039ac8 100644 --- a/docs/modules/ROOT/pages/installation.adoc +++ b/docs/modules/ROOT/pages/installation.adoc @@ -4,7 +4,7 @@ We ship pre-compiled binaries of `stackablectl` which should work on most environments such as Windows, macOS, and Linux distros like Ubuntu and Arch. Below are the installation instructions for <>, <> and <>. -If the binary does not work for you you can always <<_build_stackablectl_from_source>> +If the binary does not work for you, you can always <<_build_stackablectl_from_source>> === Linux @@ -29,7 +29,7 @@ You can now invoke it with: $ ./stackablectl ---- -If you want to be able to call it from everywhere (not only the directory you downloaded it to) you can add it to you system with the following command +If you want to be able to call it from everywhere (not only the directory you downloaded it to) you can add it to your system with the following command: [source,console] ---- @@ -91,7 +91,7 @@ $ cargo build --release ---- After a successful build the binary will be placed in `target/release/stackablectl`. -Copy it to you systems path to access it from anywhere if you like +Copy it to your systems path to access it from anywhere if you like. [source,console] ---- diff --git a/docs/modules/ROOT/pages/troubleshooting.adoc b/docs/modules/ROOT/pages/troubleshooting.adoc index 27de0b29..f0db1c91 100644 --- a/docs/modules/ROOT/pages/troubleshooting.adoc +++ b/docs/modules/ROOT/pages/troubleshooting.adoc @@ -30,7 +30,7 @@ To achieve this the following online services will be contacted: === Mirror helm-charts To allow stackablectl to retrieve the current list of operators you must mirror the `https://repo.stackable.tech/repository/helm-.*/index.yaml` files to some local URL. -If the file is mirrored to e.g. `https://my.corp/stackable/repository/helm-stable/index.yaml` you need to specify the following arguments to `stackablectl` +If the file is mirrored e.g. to `https://my.corp/stackable/repository/helm-stable/index.yaml`, you need to specify the following arguments to `stackablectl`: [source,console] ---- @@ -39,7 +39,7 @@ $ stackablectl --helm-repo-stackable-stable https://my.corp/stackable/repository === Mirror releases/stacks/demos files You need to mirror the URL to either a URL or a file on disk. -You can than specify the mirrored file to be included via `--additional-release-files`, `--additional-stack-files` or `--additional-demo-files`, e.g. +You can then specify the mirrored file to be included via `--additional-release-files`, `--additional-stack-files`, or `--additional-demo-files`, e.g. [source,console] ---- From d6264de74f6e1275d3622b162350a5ad885132dd Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 15:47:15 +0200 Subject: [PATCH 096/177] Feedback to docs --- docs/modules/ROOT/nav.adoc | 2 +- docs/modules/ROOT/pages/commands/release.adoc | 2 +- .../ROOT/pages/{customizability.adoc => customization.adoc} | 2 +- docs/modules/ROOT/pages/index.adoc | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) rename docs/modules/ROOT/pages/{customizability.adoc => customization.adoc} (99%) diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index 5982a250..9a6f25b8 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -6,5 +6,5 @@ ** xref:commands/release.adoc[] ** xref:commands/services.adoc[] ** xref:commands/stack.adoc[] -* xref:customizability.adoc[] +* xref:customization.adoc[] * xref:troubleshooting.adoc[] diff --git a/docs/modules/ROOT/pages/commands/release.adoc b/docs/modules/ROOT/pages/commands/release.adoc index 529c60c9..dfb74f7b 100644 --- a/docs/modules/ROOT/pages/commands/release.adoc +++ b/docs/modules/ROOT/pages/commands/release.adoc @@ -1,6 +1,6 @@ = Release -A release is a well-playing bundle of operators that get released approximately every 2 months. +A release is a well-playing bundle of operators. If you want to install an single individual operator have a look at the xref:commands/operator.adoc[] command. == Browse available releases diff --git a/docs/modules/ROOT/pages/customizability.adoc b/docs/modules/ROOT/pages/customization.adoc similarity index 99% rename from docs/modules/ROOT/pages/customizability.adoc rename to docs/modules/ROOT/pages/customization.adoc index f42844f7..a94b581c 100644 --- a/docs/modules/ROOT/pages/customizability.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -1,4 +1,4 @@ -= Customizability += Customization If you're working for a large company chances are that there are multiple teams using the Stackable Data Platform. A single team can also operate multiple Stackable Data Platforms. `stackablectl` is build in a way customers or even single developers can define their own release, stack and even demo! diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index bfabd38f..57e65758 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -11,7 +11,7 @@ To just get a Quickstart please follow xref:quickstart.adoc[]. In general, use `stackablectl --help` to find out more about how to use the tool or how to use specific options. This also works with subcommands, i.e. `stackablectl release install --help` will show the help for installing a release. Often you can also use an abbreviation instead of typing out all of the commands. -E.g. `stackablectl operator list` can be also written as `stackablectl op ls` +E.g. `stackablectl operator list` can also be written as `stackablectl op ls` A Kubernetes cluster is required to use the Stackable Data Platform as all products and operators run on Kubernetes. If you don't have a Kubernetes cluster, `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes Cluster for you. From de38c83ba3aac982e4bd28dabb44080f8bdf964b Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 15:51:38 +0200 Subject: [PATCH 097/177] Feedback to docs --- docs/modules/ROOT/pages/commands/stack.adoc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/modules/ROOT/pages/commands/stack.adoc b/docs/modules/ROOT/pages/commands/stack.adoc index 3ba5e122..27c78c2e 100644 --- a/docs/modules/ROOT/pages/commands/stack.adoc +++ b/docs/modules/ROOT/pages/commands/stack.adoc @@ -12,7 +12,7 @@ druid-superset-s3 22.06 Stack containing MinIO, D airflow 22.06 Stack containing Airflow scheduling platform ---- -To show details run +Detailed information of a stack can queried with the `describe` command: [source,console] ---- @@ -26,7 +26,7 @@ Labels: druid, superset, minio, s3 Future version of `stackablectl` will allow to search for stacks based on the labels. == Install stack -If you access to an Kubernetes cluster make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. +If you want to access a Kubernetes cluster, make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. After that run the following command [source,console] @@ -51,8 +51,8 @@ $ stackablectl stack install druid-superset-s3 [INFO ] Installed stack druid-superset-s3 ---- -If you don't have an Kubernetes cluster available `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. -Make sure you have https://kind.sigs.k8s.io/[kind] installed and run the following command +If you don't have a Kubernetes cluster available, `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. +Make sure you have `kind` installed and run the following command: [source,console] ---- From c6afbc5adcada584730c172bdc2ad860aba48b76 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 15:52:21 +0200 Subject: [PATCH 098/177] Feedback to docs --- docs/modules/ROOT/pages/installation.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/installation.adoc b/docs/modules/ROOT/pages/installation.adoc index 54039ac8..146ab058 100644 --- a/docs/modules/ROOT/pages/installation.adoc +++ b/docs/modules/ROOT/pages/installation.adoc @@ -40,7 +40,7 @@ $ sudo mv stackablectl /usr/bin/stackablectl Download `stackablectl-x86_64-pc-windows-gnu.exe` from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release]. You can simply execute it. -If you want to execute it from anywhere in your system you need to add it to the system `PATH``. +If you want to execute it from anywhere in your system, you need to add it to the system `PATH`. === macOS Download the `stackablectl-x86_64-apple-darwin` binary file for Intel based Macs or `stackablectl-aarch64-apple-darwin` binary file for ARM based Macs from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release]. From 9feae79d74bf67577cae425ee48b9a8b57af6175 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 16:08:53 +0200 Subject: [PATCH 099/177] Feedback to docs --- docs/modules/ROOT/pages/commands/operator.adoc | 4 +++- src/arguments.rs | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index 87d5bb95..8860747a 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -46,7 +46,7 @@ Dev versions: 0.5.0-nightly, 0.4.0-nightly, 0.3.0-nightly, 0.2.0-nightly, == Install operator If you want to access a Kubernetes cluster, make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. -After that run the following command: +After that run the following command, which will install the operators in their latest version. [source,console] ---- @@ -112,6 +112,8 @@ commons 0.3.0-nightly default deployed secret 0.6.0-nightly default deployed 2022-07-15 09:44:13.526843785 +0200 CEST ---- +In case you have installed the operators in a specific version, the specific versions will be shown instead of the `*-nightly` versions. + == Uninstall operator To uninstall the operators again you can use the `uninstall` command diff --git a/src/arguments.rs b/src/arguments.rs index 246e2b75..2d342607 100644 --- a/src/arguments.rs +++ b/src/arguments.rs @@ -68,7 +68,7 @@ pub enum CliCommand { #[clap(subcommand, alias("r"), alias("re"))] Release(CliCommandRelease), - /// This EXPERIMENTAL subcommand interacts with stacks, which are ready-to-use combinations of products. + /// This subcommand interacts with stacks, which are ready-to-use combinations of products. #[clap(subcommand, alias("s"), alias("st"))] Stack(CliCommandStack), From 754c780a2ef794e01516ae86ea1e2b5afdf89045 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 16:17:44 +0200 Subject: [PATCH 100/177] Feedback to docs --- src/helm.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/helm.rs b/src/helm.rs index 9b8d8659..15b8648d 100644 --- a/src/helm.rs +++ b/src/helm.rs @@ -6,7 +6,7 @@ use cached::proc_macro::cached; use lazy_static::lazy_static; use log::{debug, error, info, warn, LevelFilter}; use serde::Deserialize; -use std::{collections::HashMap, os::raw::c_char, process::exit, sync::Mutex}; +use std::{collections::HashMap, os::raw::c_char, process::exit, sync::Mutex, io::Error}; lazy_static! { pub static ref HELM_REPOS: Mutex> = Mutex::new(HashMap::new()); @@ -113,7 +113,8 @@ pub fn install_helm_release_from_repo( } /// Cached because of slow network calls -/// Not returning an Result because i couldn't get it to work with #[cached] +/// Returning a Result would be better but in combination with #[cached] the following error comes up: +/// the trait `Deserialize<'_>` is not implemented for `std::io::Error` #[cached] pub async fn get_repo_index(repo_url: String) -> HelmRepo { let index_url = format!("{repo_url}/index.yaml"); From a6cb62ef52629af7de67818bc6b88e726019193d Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 16:22:47 +0200 Subject: [PATCH 101/177] Feedback to docs --- src/helm.rs | 2 +- src/operator.rs | 3 ++- src/release.rs | 3 ++- src/stack.rs | 3 ++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/helm.rs b/src/helm.rs index 15b8648d..309519a8 100644 --- a/src/helm.rs +++ b/src/helm.rs @@ -6,7 +6,7 @@ use cached::proc_macro::cached; use lazy_static::lazy_static; use log::{debug, error, info, warn, LevelFilter}; use serde::Deserialize; -use std::{collections::HashMap, os::raw::c_char, process::exit, sync::Mutex, io::Error}; +use std::{collections::HashMap, os::raw::c_char, process::exit, sync::Mutex}; lazy_static! { pub static ref HELM_REPOS: Mutex> = Mutex::new(HashMap::new()); diff --git a/src/operator.rs b/src/operator.rs index ab233c3e..666f9538 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -35,7 +35,8 @@ pub enum CliCommandOperator { /// If specified a local kubernetes cluster consisting of 4 nodes for testing purposes will be created. /// Kind is a tool to spin up a local kubernetes cluster running on docker on your machine. - /// You need to have `docker` and `kind` installed. Have a look at the README at on how to install them. + /// You need to have `docker` and `kind` installed. + /// Have a look at our documentation on how to install `kind` at #[clap(short, long)] kind_cluster: bool, diff --git a/src/release.rs b/src/release.rs index bbcff626..50460ecb 100644 --- a/src/release.rs +++ b/src/release.rs @@ -55,7 +55,8 @@ pub enum CliCommandRelease { /// If specified a local kubernetes cluster consisting of 4 nodes for testing purposes will be created. /// Kind is a tool to spin up a local kubernetes cluster running on docker on your machine. - /// You need to have `docker` and `kind` installed. Have a look at the README at on how to install them. + /// You need to have `docker` and `kind` installed. + /// Have a look at our documentation on how to install `kind` at #[clap(short, long)] kind_cluster: bool, diff --git a/src/stack.rs b/src/stack.rs index 61332785..4b0c3f4d 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -40,7 +40,8 @@ pub enum CliCommandStack { /// If specified a local kubernetes cluster consisting of 4 nodes for testing purposes will be created. /// Kind is a tool to spin up a local kubernetes cluster running on docker on your machine. - /// You need to have `docker` and `kind` installed. Have a look at the README at on how to install them. + /// You need to have `docker` and `kind` installed. + /// Have a look at our documentation on how to install `kind` at #[clap(short, long)] kind_cluster: bool, From bee456a4313bfa91387b6ab9d535cd96da33a42e Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 21 Jul 2022 16:37:51 +0200 Subject: [PATCH 102/177] Feedback to docs --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index a94b581c..fd601f9c 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -15,7 +15,7 @@ To easily achieve this you can create your own demo so that it can easily be rep ==== Adding a new demo First you must create a `mycorp-demos.yaml` containing demos according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/demos.yaml[the Stackable provided demos]. -After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demo-files `. +After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demo-files mycorp-demos.yaml`. The `` can be either a path to a file on the local filesystem or a URL. By using a URL the demos file can be put into to a central Git repository and referenced by all teams or clients. Multiple `--additional-demo-files` flags can be specified to include multiple demo files. From fc9056889f4cc50570347b0e4a5700e8e7dba206 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 22 Jul 2022 12:52:36 +0200 Subject: [PATCH 103/177] Feedback to docs --- docs/modules/ROOT/pages/customization.adoc | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index fd601f9c..f7186843 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -6,13 +6,12 @@ This way it is possible to cover the following use-cases. Any additional demos/stacks/releases you specify, will be added to the already existing provided by Stackable. -== How to add a new -=== Demo -==== Benefits +== Add a new demo +=== Benefits When you have developed a new data pipeline or data product you often want to show it in action to other colleagues or potential clients. To easily achieve this you can create your own demo so that it can easily be reproduced and/or shared with other people. -==== Adding a new demo +=== Adding a new demo First you must create a `mycorp-demos.yaml` containing demos according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/demos.yaml[the Stackable provided demos]. After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demo-files mycorp-demos.yaml`. @@ -21,27 +20,27 @@ By using a URL the demos file can be put into to a central Git repository and re Multiple `--additional-demo-files` flags can be specified to include multiple demo files. Every additional demo will we added to the already existing demos in `stackablectl`, so all the available demo files will be merged. -=== Stack -==== Benefits +== Add a new stack +=== Benefits If your company or clients have multiple similar setups or reference architectures it could make sense to make them easily available to all employees or clients. In the custom defined Stack all Product versions are pinned as well, so you can easily spin up a Stack containing the exact same versions as your production setup. You can use your defined Stack to give it to colleagues or potential customers to show the overall architecture of the Data platform you're going to build. -==== Adding a new stack +=== Adding a new stack For the overall procedure have a look at <<_adding_a_new_demo>> on how to add a new stack. For a custom stack you need to create a `mycorp-stacks.yaml` containing stacks according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/stacks.yaml[the Stackable provided stacks]. You can than add it to `stackablectl` with the flag `--additional-stack-files`. -=== Release -==== Benefits +== Add a new release +=== Benefits If advanced users of the Stackable Platform want to define their own internal Release within their company they can easily add their own release. This has the following benefits: - Same operator versions across the whole company. This produces more uniform environments and makes debugging and helping other teams easier. - If the company is only interested in a subset of the available operators you can only add your relevant operators into your release and not install all the other operators. -==== Adding a new release +=== Adding a new release For the overall procedure have a look at <<_adding_a_new_demo>> on how to add a new release. For a custom release you need to create a `mycorp-releases.yaml` containing releases according to the format defined by https://github.com/stackabletech/release/blob/main/releases.yaml[the Stackable provided releases]. You can than add it to `stackablectl` with the flag `--additional-release-files`. From e537bd5f33e793db76eebf51a7fbb25fc2321c95 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 28 Jul 2022 10:22:22 +0200 Subject: [PATCH 104/177] Add valuehints to stack arguments --- src/stack.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/stack.rs b/src/stack.rs index 4b0c3f4d..5eb2cd77 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -1,6 +1,6 @@ use crate::{arguments::OutputType, helm, helm::HELM_REPOS, helpers, kind, kube, release, CliArgs}; use cached::proc_macro::cached; -use clap::Parser; +use clap::{Parser, ValueHint}; use indexmap::IndexMap; use lazy_static::lazy_static; use log::{debug, error, info, warn}; @@ -25,7 +25,7 @@ pub enum CliCommandStack { #[clap(alias("desc"))] Describe { /// Name of the stack to describe - #[clap(required = true)] + #[clap(required = true, value_hint = ValueHint::Other)] stack: String, #[clap(short, long, arg_enum, default_value = "text")] @@ -35,7 +35,7 @@ pub enum CliCommandStack { #[clap(alias("in"))] Install { /// Name of the stack to install - #[clap(required = true)] + #[clap(required = true, value_hint = ValueHint::Other)] stack: String, /// If specified a local kubernetes cluster consisting of 4 nodes for testing purposes will be created. @@ -49,7 +49,8 @@ pub enum CliCommandStack { #[clap( long, default_value = "stackable-data-platform", - requires = "kind-cluster" + requires = "kind-cluster", + value_hint = ValueHint::Other, )] kind_cluster_name: String, }, From 4c3e4462e162075716736a34de17fd8c3eacfe65 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 28 Jul 2022 11:28:30 +0200 Subject: [PATCH 105/177] Update docs/modules/ROOT/pages/commands/release.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/release.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/release.adoc b/docs/modules/ROOT/pages/commands/release.adoc index dfb74f7b..267f4909 100644 --- a/docs/modules/ROOT/pages/commands/release.adoc +++ b/docs/modules/ROOT/pages/commands/release.adoc @@ -128,7 +128,7 @@ zookeeper 0.10.0 default deployed == Uninstall release -To uninstall the release again you can use the uninstall command +To uninstall all operators contained in a release regardless of their actual installed versions, you can use the uninstall command: [source,console] ---- From 5e5d8f36bf32a2e9a46f6b0b8d205986f16de6e9 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 28 Jul 2022 11:31:35 +0200 Subject: [PATCH 106/177] docs --- docs/modules/ROOT/pages/commands/stack.adoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/modules/ROOT/pages/commands/stack.adoc b/docs/modules/ROOT/pages/commands/stack.adoc index 27c78c2e..695d0d6c 100644 --- a/docs/modules/ROOT/pages/commands/stack.adoc +++ b/docs/modules/ROOT/pages/commands/stack.adoc @@ -1,5 +1,6 @@ = Stack A stack is a collection of ready-to-use Stackable data products as well as needed third-party services like Postgresql or MinIO. +It is tied to a specific release of the Stackable Data Platform, which will provide the needed operators for the Stack. == Browse available stacks To list the available stacks, run the following command: From 0ac0e380d66ae2ad5cec200bfe03b9e6b62cba0e Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 28 Jul 2022 11:33:24 +0200 Subject: [PATCH 107/177] Rename to additional_releases_file and additional_stacks_file --- src/arguments.rs | 4 ++-- src/release.rs | 2 +- src/stack.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/arguments.rs b/src/arguments.rs index d821f83e..9823ee02 100644 --- a/src/arguments.rs +++ b/src/arguments.rs @@ -61,7 +61,7 @@ pub struct CliArgs { /// Can either be an URL or a path to a file e.g. `https://my.server/my-releases.yaml` or '/etc/my-releases.yaml' or `C:\Users\Sebastian\my-releases.yaml`. /// Can be specified multiple times. #[clap(long, multiple_occurrences(true), value_hint = ValueHint::FilePath)] - pub additional_release_files: Vec, + pub additional_releases_file: Vec, /// Adds a YAML file containing custom stacks /// @@ -70,7 +70,7 @@ pub struct CliArgs { /// Can either be an URL or a path to a file e.g. `https://my.server/my-stacks.yaml` or '/etc/my-stacks.yaml' or `C:\Users\Sebastian\my-stacks.yaml`. /// Can be specified multiple times. #[clap(long, multiple_occurrences(true), value_hint = ValueHint::FilePath)] - pub additional_stack_files: Vec, + pub additional_stacks_file: Vec, } #[derive(Parser)] diff --git a/src/release.rs b/src/release.rs index 8589dfa2..0932170b 100644 --- a/src/release.rs +++ b/src/release.rs @@ -102,7 +102,7 @@ impl CliCommandRelease { pub fn handle_common_cli_args(args: &CliArgs) { let mut release_files = RELEASE_FILES.lock().unwrap(); - release_files.append(&mut args.additional_release_files.clone()); + release_files.append(&mut args.additional_releases_file.clone()); } #[derive(Clone, Debug, Deserialize, Serialize)] diff --git a/src/stack.rs b/src/stack.rs index 5eb2cd77..6912d691 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -76,7 +76,7 @@ impl CliCommandStack { pub fn handle_common_cli_args(args: &CliArgs) { let mut stack_files = STACK_FILES.lock().unwrap(); - stack_files.append(&mut args.additional_stack_files.clone()); + stack_files.append(&mut args.additional_stacks_file.clone()); } #[derive(Clone, Debug, Deserialize, Serialize)] From 7c942b8c783963b385f3391468217c004d62cd11 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 28 Jul 2022 15:43:56 +0200 Subject: [PATCH 108/177] Use cli-table for listing services --- Cargo.lock | 84 +++++++++++++++++++++++++++++++++++++++++++++--- Cargo.toml | 1 + src/services.rs | 85 +++++++++++++++++++++++++------------------------ 3 files changed, 125 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a2d5fbf..66c61809 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -57,6 +57,18 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bstr" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +dependencies = [ + "lazy_static", + "memchr", + "regex-automata", + "serde", +] + [[package]] name = "bumpalo" version = "3.10.0" @@ -179,6 +191,29 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "cli-table" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adfbb116d9e2c4be7011360d0c0bee565712c11e969c9609b25b619366dc379d" +dependencies = [ + "cli-table-derive", + "csv", + "termcolor", + "unicode-width", +] + +[[package]] +name = "cli-table-derive" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2af3bfb9da627b0a6c467624fb7963921433774ed435493b5c08a3053e829ad4" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "core-foundation" version = "0.9.3" @@ -195,6 +230,28 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +[[package]] +name = "csv" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +dependencies = [ + "bstr", + "csv-core", + "itoa 0.4.8", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +dependencies = [ + "memchr", +] + [[package]] name = "darling" version = "0.13.4" @@ -476,7 +533,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 1.0.2", ] [[package]] @@ -529,7 +586,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", + "itoa 1.0.2", "pin-project-lite", "socket2", "tokio", @@ -624,6 +681,12 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" +[[package]] +name = "itoa" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" + [[package]] name = "itoa" version = "1.0.2" @@ -1074,6 +1137,12 @@ dependencies = [ "regex-syntax", ] +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" + [[package]] name = "regex-syntax" version = "0.6.27" @@ -1218,7 +1287,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" dependencies = [ "indexmap", - "itoa", + "itoa 1.0.2", "ryu", "serde", ] @@ -1230,7 +1299,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa", + "itoa 1.0.2", "ryu", "serde", ] @@ -1288,6 +1357,7 @@ dependencies = [ "cached", "clap", "clap_complete", + "cli-table", "env_logger", "gobuild", "indexmap", @@ -1571,6 +1641,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-width" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" + [[package]] name = "url" version = "2.2.2" diff --git a/Cargo.toml b/Cargo.toml index ea1d4e5c..85b887a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ repository = "https://github.com/stackabletech/stackablectl" cached = "0.37" clap = { version = "3.2", features = ["derive", "cargo"] } clap_complete = "3.2" +cli-table = "0.4" env_logger = "0.9" indexmap = { version = "1.9", features = ["serde"] } k8s-openapi = { version = "0.15.0", default-features = false, features = ["v1_24"] } diff --git a/src/services.rs b/src/services.rs index 52b94dfe..af508a88 100644 --- a/src/services.rs +++ b/src/services.rs @@ -1,6 +1,5 @@ -use std::error::Error; - use clap::Parser; +use cli_table::{Cell, Table}; use indexmap::IndexMap; use k8s_openapi::api::{apps::v1::Deployment, core::v1::Secret}; use kube::{ @@ -11,6 +10,7 @@ use kube::{ use lazy_static::lazy_static; use log::{debug, warn}; use serde::Serialize; +use std::{error::Error, vec}; use crate::{ arguments::OutputType, @@ -176,51 +176,54 @@ async fn list_services( match output_type { OutputType::Text => { - println!("PRODUCT NAME NAMESPACE ENDPOINTS EXTRA INFOS"); + let mut table = vec![]; + + let max_endpoint_name_length = output + .values() + .flatten() + .flat_map(|p| &p.endpoints) + .map(|e| e.0.len()) + .max() + .unwrap_or_default(); + for (product_name, installed_products) in output.iter() { for installed_product in installed_products { - println!( - "{:12} {:40} {:30} {:50} {}", - product_name, - installed_product.name, + let mut endpoints = vec![]; + for endpoint in &installed_product.endpoints { + endpoints.push(vec![endpoint.0.as_str(), endpoint.1.as_str()]); + } + + let endpoints = installed_product + .endpoints + .iter() + .map(|(name, url)| { + format!("{name:width$}{url}", width = max_endpoint_name_length + 1) + }) + .collect::>() + .join("\n"); + + table.push(vec![ + product_name.cell(), + installed_product.name.as_str().cell(), installed_product .namespace - .as_ref() - .map(|s| s.to_string()) - .unwrap_or_default(), - installed_product - .endpoints - .first() - .map(|(name, url)| { format!("{:20} {url}", format!("{name}:")) }) - .unwrap_or_default(), - installed_product - .extra_infos - .first() - .map(|s| s.to_string()) - .unwrap_or_default(), - ); - - let mut endpoints = installed_product.endpoints.iter().skip(1); - let mut extra_infos = installed_product.extra_infos.iter().skip(1); - - loop { - let endpoint = endpoints.next(); - let extra_info = extra_infos.next(); - - if endpoint.is_none() && extra_info.is_none() { - break; - } - - println!( - " {:50} {}", - endpoint - .map(|(name, url)| { format!("{:20} {url}", format!("{name}:")) }) - .unwrap_or_default(), - extra_info.map(|s| s.to_string()).unwrap_or_default(), - ); - } + .clone() + .unwrap_or_default() + .cell(), + endpoints.cell(), + installed_product.extra_infos.join("\n").cell(), + ]); } } + let table = table.table().title(vec![ + "PRODUCT".cell(), + "NAME".cell(), + "NAMESPACE".cell(), + "ENDPOINTS".cell(), + "EXTRA INFOS".cell(), + ]); + + println!("{}", table.display()?); } OutputType::Json => { println!("{}", serde_json::to_string_pretty(&output).unwrap()); From 124261f8950f799e3eb07c021da17f3fbadcd706 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 29 Jul 2022 14:14:22 +0200 Subject: [PATCH 109/177] Remove border from table --- src/services.rs | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/src/services.rs b/src/services.rs index af508a88..5d769597 100644 --- a/src/services.rs +++ b/src/services.rs @@ -1,5 +1,8 @@ use clap::Parser; -use cli_table::{Cell, Table}; +use cli_table::{ + format::{Border, HorizontalLine, Separator}, + Cell, Table, +}; use indexmap::IndexMap; use k8s_openapi::api::{apps::v1::Deployment, core::v1::Secret}; use kube::{ @@ -215,15 +218,23 @@ async fn list_services( ]); } } - let table = table.table().title(vec![ - "PRODUCT".cell(), - "NAME".cell(), - "NAMESPACE".cell(), - "ENDPOINTS".cell(), - "EXTRA INFOS".cell(), - ]); - - println!("{}", table.display()?); + let table = table + .table() + .title(vec![ + "PRODUCT".cell(), + "NAME".cell(), + "NAMESPACE".cell(), + "ENDPOINTS".cell(), + "EXTRA INFOS".cell(), + ]) + .border(Border::builder().build()) + .separator( + Separator::builder() + .row(Some(HorizontalLine::new(' ', ' ', ' ', ' '))) + .build(), + ); + + print!("{}", table.display()?); } OutputType::Json => { println!("{}", serde_json::to_string_pretty(&output).unwrap()); From e7443f0387ade11b07ab13162c2122a823e01f94 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 29 Jul 2022 14:14:55 +0200 Subject: [PATCH 110/177] Use Result instead of error and exit --- src/stack.rs | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/src/stack.rs b/src/stack.rs index 6912d691..b8746025 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -3,9 +3,9 @@ use cached::proc_macro::cached; use clap::{Parser, ValueHint}; use indexmap::IndexMap; use lazy_static::lazy_static; -use log::{debug, error, info, warn}; +use log::{debug, info, warn}; use serde::{Deserialize, Serialize}; -use std::{error::Error, ops::Deref, process::exit, sync::Mutex}; +use std::{error::Error, ops::Deref, sync::Mutex}; lazy_static! { pub static ref STACK_FILES: Mutex> = Mutex::new(vec![ @@ -60,7 +60,7 @@ impl CliCommandStack { pub async fn handle(&self) -> Result<(), Box> { match self { CliCommandStack::List { output } => list_stacks(output).await, - CliCommandStack::Describe { stack, output } => describe_stack(stack, output).await, + CliCommandStack::Describe { stack, output } => describe_stack(stack, output).await?, CliCommandStack::Install { stack, kind_cluster, @@ -136,7 +136,7 @@ async fn list_stacks(output_type: &OutputType) { } } -async fn describe_stack(stack_name: &str, output_type: &OutputType) { +async fn describe_stack(stack_name: &str, output_type: &OutputType) -> Result<(), Box> { #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct Output { @@ -146,7 +146,7 @@ async fn describe_stack(stack_name: &str, output_type: &OutputType) { labels: Vec, } - let stack = get_stack(stack_name).await; + let stack = get_stack(stack_name).await?; let output = Output { stack: stack_name.to_string(), description: stack.description, @@ -168,11 +168,13 @@ async fn describe_stack(stack_name: &str, output_type: &OutputType) { println!("{}", serde_yaml::to_string(&output).unwrap()); } } + + Ok(()) } async fn install_stack(stack_name: &str) -> Result<(), Box> { info!("Installing stack {stack_name}"); - let stack = get_stack(stack_name).await; + let stack = get_stack(stack_name).await?; release::install_release(&stack.stackable_release, &[], &[]).await; @@ -242,13 +244,10 @@ async fn get_stacks() -> Stacks { Stacks { stacks: all_stacks } } -async fn get_stack(stack_name: &str) -> Stack { +async fn get_stack(stack_name: &str) -> Result> { get_stacks() .await .stacks .remove(stack_name) // We need to remove to take ownership - .unwrap_or_else(|| { - error!("Stack {stack_name} not found. Use `stackablectl stack list` to list the available stacks."); - exit(1); - }) + .ok_or_else(|| format!("Stack {stack_name} not found. Use `stackablectl stack list` to list the available stacks.").into()) } From 11da3f9db2e77a312b1e5092a3cb81604d3bfd53 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 29 Jul 2022 16:52:25 +0200 Subject: [PATCH 111/177] Improve error handling => Remove all panics --- src/helm.rs | 49 +++++++++++++++++-------------------- src/helpers.rs | 39 ++++++++++++++--------------- src/kind.rs | 29 ++++++++++++++-------- src/kube.rs | 31 ++++++++++++----------- src/main.rs | 17 +++++++++---- src/operator.rs | 65 ++++++++++++++++++++++++++++++------------------- src/release.rs | 16 +++++++----- src/services.rs | 13 ++++++++-- src/stack.rs | 23 +++++++++-------- 9 files changed, 161 insertions(+), 121 deletions(-) diff --git a/src/helm.rs b/src/helm.rs index 79c2d513..18b2ebe8 100644 --- a/src/helm.rs +++ b/src/helm.rs @@ -6,7 +6,7 @@ use cached::proc_macro::cached; use lazy_static::lazy_static; use log::{debug, error, info, warn, LevelFilter}; use serde::Deserialize; -use std::{collections::HashMap, os::raw::c_char, process::exit, sync::Mutex}; +use std::{collections::HashMap, error::Error, os::raw::c_char, process::exit, sync::Mutex}; lazy_static! { pub static ref HELM_REPOS: Mutex> = Mutex::new(HashMap::new()); @@ -68,28 +68,25 @@ pub fn install_helm_release_from_repo( chart_name: &str, chart_version: Option<&str>, values_yaml: Option<&str>, -) { +) -> Result<(), Box> { if helm_release_exists(release_name) { - let helm_release = get_helm_release(release_name).unwrap_or_else(|| { - panic!( - "Failed to find helm release {release_name} besides helm saying it should be there" - ) - }); + let helm_release = get_helm_release(release_name)?.ok_or(format!( + "Failed to find helm release {release_name} besides helm saying it should be there" + ))?; let current_version = helm_release.version; match chart_version { None => { warn!("The release {release_name} in version {current_version} is already installed and you have not requested a specific version, not re-installing it"); - return; + return Ok(()); } Some(chart_version) => { if chart_version == current_version { info!("The release {release_name} in version {current_version} is already installed, not installing it"); - return; + return Ok(()); } else { - error!("The helm release {release_name} is already installed in version {current_version} but you requested to install it in version {chart_version}. \ - Use \"stackablectl operator uninstall {operator_name}\" to uninstall it."); - exit(1); + return Err(format!("The helm release {release_name} is already installed in version {current_version} but you requested to install it in version {chart_version}. \ + Use \"stackablectl operator uninstall {operator_name}\" to uninstall it.").into()); } } } @@ -110,25 +107,25 @@ pub fn install_helm_release_from_repo( let chart_version = chart_version.unwrap_or(">0.0.0-0"); debug!("Installing helm release {repo_name} from chart {full_chart_name} in version {chart_version}"); install_helm_release(release_name, &full_chart_name, chart_version, values_yaml); + + Ok(()) } /// Cached because of slow network calls -/// Returning a Result would be better but in combination with #[cached] the following error comes up: -/// the trait `Deserialize<'_>` is not implemented for `std::io::Error` #[cached] -pub async fn get_repo_index(repo_url: String) -> HelmRepo { +pub async fn get_repo_index(repo_url: String) -> Result { let index_url = format!("{repo_url}/index.yaml"); debug!("Fetching helm repo index from {index_url}"); - let resp = reqwest::get(&index_url) + let index = reqwest::get(&index_url) .await - .unwrap_or_else(|_| panic!("Failed to download helm repo index from {index_url}")) + .map_err(|err| format!("Failed to download helm repo index from {index_url}: {err}"))? .text() .await - .unwrap_or_else(|_| panic!("Failed to get text from {index_url}")); + .map_err(|err| format!("Failed to get text from {index_url}: {err}"))?; - serde_yaml::from_str(&resp) - .unwrap_or_else(|_| panic!("Failed to parse helm repo index from {index_url}")) + serde_yaml::from_str(&index) + .map_err(|err| format!("Failed to parse helm repo index from {index_url}: {err}")) } pub fn uninstall_helm_release(release_name: &str) { @@ -182,20 +179,20 @@ pub struct Release { pub last_updated: String, } -pub fn helm_list_releases() -> Vec { +pub fn helm_list_releases() -> Result, Box> { let releases_json = c_str_ptr_to_str(unsafe { go_helm_list_releases(GoString::from(NAMESPACE.lock().unwrap().as_str())) }); - serde_json::from_str(releases_json).unwrap_or_else(|_| { - panic!("Failed to parse helm releases JSON from go wrapper {releases_json}") + serde_json::from_str(releases_json).map_err(|err| { + format!("Failed to parse helm releases JSON from go wrapper {releases_json}: {err}").into() }) } -pub fn get_helm_release(release_name: &str) -> Option { - helm_list_releases() +pub fn get_helm_release(release_name: &str) -> Result, Box> { + Ok(helm_list_releases()? .into_iter() - .find(|release| release.name == release_name) + .find(|release| release.name == release_name)) } pub fn add_helm_repo(name: &str, url: &str) { diff --git a/src/helpers.rs b/src/helpers.rs index d8f9656d..4e109a31 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -1,5 +1,6 @@ use log::trace; use std::{ + error::Error, ffi::CStr, fs, io::Write, @@ -43,13 +44,15 @@ pub async fn read_from_url_or_file(url_or_file: &str) -> Result } /// Ensures that the program is installed -/// If the program is not installed it will panic -pub fn ensure_program_installed(program: &str) { - which(program) - .unwrap_or_else(|_| panic!("Could not find a installation of {program}. Please have a look at the README of stackablectl on what the prerequisites are: https://github.com/stackabletech/stackablectl")); +/// If the program is not installed it will return an Error +pub fn ensure_program_installed(program: &str) -> Result<(), Box> { + match which(program) { + Ok(_) => Ok(()), + Err(err) => Err(format!("Could not find a installation of {program}: {err}").into()), + } } -pub fn execute_command(mut args: Vec<&str>) -> String { +pub fn execute_command(mut args: Vec<&str>) -> Result> { assert!(!args.is_empty()); let args_string = args.join(" "); @@ -59,13 +62,14 @@ pub fn execute_command(mut args: Vec<&str>) -> String { let output = Command::new(command) .args(args) .output() - .unwrap_or_else(|_| panic!("Failed to get output of the command \"{args_string}\"")); + .map_err(|err| format!("Failed to get output of the command \"{args_string}\": {err}"))?; if !output.status.success() { - panic!( + return Err(format!( "Failed to execute the command \"{args_string}\". Stderr was: {}", str::from_utf8(&output.stderr).expect("Could not parse command stderr as utf-8") - ); + ) + .into()); } let stdout_string = @@ -73,10 +77,10 @@ pub fn execute_command(mut args: Vec<&str>) -> String { trace!("Command output for \"{args_string}\":\n{stdout_string}"); - stdout_string.to_string() + Ok(stdout_string.to_string()) } -pub fn execute_command_with_stdin(mut args: Vec<&str>, stdin: &str) { +pub fn execute_command_with_stdin(mut args: Vec<&str>, stdin: &str) -> Result<(), Box> { assert!(!args.is_empty()); let args_string = args.join(" "); @@ -87,16 +91,13 @@ pub fn execute_command_with_stdin(mut args: Vec<&str>, stdin: &str) { .args(args) .stdin(Stdio::piped()) .spawn() - .unwrap_or_else(|_| panic!("Failed to spawn the command \"{args_string}\"")); + .map_err(|err| format!("Failed to spawn the command \"{args_string}\": {err}"))?; - child - .stdin - .as_ref() - .unwrap() - .write_all(stdin.as_bytes()) - .expect("Failed to write kind cluster definition via stdin"); + child.stdin.as_ref().unwrap().write_all(stdin.as_bytes())?; - if !child.wait_with_output().unwrap().status.success() { - panic!("Failed to execute the command \"{args_string}\""); + if child.wait_with_output()?.status.success() { + Ok(()) + } else { + Err(format!("Failed to execute the command \"{args_string}\"").into()) } } diff --git a/src/kind.rs b/src/kind.rs index e7041fe0..858199b4 100644 --- a/src/kind.rs +++ b/src/kind.rs @@ -1,3 +1,5 @@ +use std::error::Error; + use crate::helpers; use log::{info, warn}; @@ -29,28 +31,35 @@ nodes: node-labels: node=3 "#; -pub fn handle_cli_arguments(kind_cluster: bool, kind_cluster_name: &str) { +pub fn handle_cli_arguments( + kind_cluster: bool, + kind_cluster_name: &str, +) -> Result<(), Box> { if kind_cluster { - helpers::ensure_program_installed("docker"); - helpers::ensure_program_installed("kind"); + helpers::ensure_program_installed("docker")?; + helpers::ensure_program_installed("kind")?; - create_cluster_if_not_exists(kind_cluster_name); + create_cluster_if_not_exists(kind_cluster_name)?; } + + Ok(()) } -fn create_cluster_if_not_exists(name: &str) { - if check_if_kind_cluster_exists(name) { +fn create_cluster_if_not_exists(name: &str) -> Result<(), Box> { + if check_if_kind_cluster_exists(name)? { warn!("The kind cluster {name} is already running, not re-creating it. Use `kind delete cluster --name {name}` to delete it"); } else { info!("Creating kind cluster {name}"); helpers::execute_command_with_stdin( vec!["kind", "create", "cluster", "--name", name, "--config", "-"], KIND_CLUSTER_DEFINITION, - ); + )?; } + + Ok(()) } -fn check_if_kind_cluster_exists(name: &str) -> bool { - let result = helpers::execute_command(vec!["kind", "get", "clusters"]); - result.lines().any(|cluster_name| cluster_name == name) +fn check_if_kind_cluster_exists(name: &str) -> Result> { + let result = helpers::execute_command(vec!["kind", "get", "clusters"])?; + Ok(result.lines().any(|cluster_name| cluster_name == name)) } diff --git a/src/kube.rs b/src/kube.rs index a9d4a521..bd9b6de3 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -1,6 +1,5 @@ use crate::NAMESPACE; use cached::proc_macro::cached; -use core::panic; use indexmap::IndexMap; use k8s_openapi::api::core::v1::{Endpoints, Node, Service}; use kube::{ @@ -78,7 +77,7 @@ pub async fn get_service_endpoint_urls( } }; - let node_ip = get_node_ip(node_name).await; + let node_ip = get_node_ip(node_name).await?; let mut result = IndexMap::new(); for service_port in service.spec.unwrap().ports.unwrap_or_default() { @@ -112,45 +111,45 @@ pub async fn get_service_endpoint_urls( Ok(result) } -async fn get_node_ip(node_name: &str) -> String { - let node_name_ip_mapping = get_node_name_ip_mapping().await; +async fn get_node_ip(node_name: &str) -> Result> { + let node_name_ip_mapping = get_node_name_ip_mapping().await?; + match node_name_ip_mapping.get(node_name) { - Some(node_ip) => node_ip.to_string(), - None => panic!("Failed to find node {node_name} in node_name_ip_mapping"), + Some(node_ip) => Ok(node_ip.to_string()), + None => Err(format!("Failed to find node {node_name} in node_name_ip_mapping").into()), } } -/// Not returning an Result, Error> because i couldn't get it to work with #[cached] #[cached] -async fn get_node_name_ip_mapping() -> HashMap { +async fn get_node_name_ip_mapping() -> Result, String> { let client = get_client() .await - .expect("Failed to create kubernetes client"); + .map_err(|err| format!("Failed to create kubernetes client: {err}"))?; let node_api: Api = Api::all(client); let nodes = node_api .list(&ListParams::default()) .await - .expect("Failed to list kubernetes nodes"); + .map_err(|err| format!("Failed to list kubernetes nodes: {err}"))?; let mut result = HashMap::new(); for node in nodes { let node_name = node.name(); let preferred_node_ip = node .status - .unwrap() + .ok_or(format!("Failed to get status of node {node_name}"))? .addresses - .unwrap_or_else(|| panic!("Failed to get address of node {node_name}")) + .ok_or(format!("Failed to get address of node {node_name}"))? .iter() .filter(|address| address.type_ == "InternalIP" || address.type_ == "ExternalIP") .min_by_key(|address| &address.type_) // ExternalIP (which we want) is lower than InternalIP .map(|address| address.address.clone()) - .unwrap_or_else(|| { - panic!("Could not find a InternalIP or ExternalIP for node {node_name}") - }); + .ok_or(format!( + "Could not find a InternalIP or ExternalIP for node {node_name}" + ))?; result.insert(node_name, preferred_node_ip); } - result + Ok(result) } pub async fn get_client() -> Result> { diff --git a/src/main.rs b/src/main.rs index b424dbb5..dd37968c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,8 @@ use crate::arguments::CliCommand; use arguments::CliArgs; use clap::{IntoApp, Parser}; use lazy_static::lazy_static; -use std::{error::Error, sync::Mutex}; +use log::error; +use std::{error::Error, process::exit, sync::Mutex}; mod arguments; mod helm; @@ -52,16 +53,22 @@ async fn main() -> Result<(), Box> { release::handle_common_cli_args(&args); stack::handle_common_cli_args(&args); - match &args.cmd { + let result = match &args.cmd { CliCommand::Operator(command) => command.handle().await, CliCommand::Release(command) => command.handle().await, - CliCommand::Stack(command) => command.handle().await?, - CliCommand::Services(command) => command.handle().await?, + CliCommand::Stack(command) => command.handle().await, + CliCommand::Services(command) => command.handle().await, CliCommand::Completion(command) => { let mut cmd = CliArgs::command(); arguments::print_completions(command.shell, &mut cmd); + Ok(()) } + }; + + if let Err(err) = &result { + error!("{err}"); + exit(-1); } - Ok(()) + result } diff --git a/src/operator.rs b/src/operator.rs index c2698f61..3efc1d23 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -3,7 +3,7 @@ use clap::{Parser, ValueHint}; use indexmap::IndexMap; use log::{info, warn}; use serde::Serialize; -use std::str::FromStr; +use std::{error::Error, str::FromStr}; #[derive(Parser)] pub enum CliCommandOperator { @@ -64,29 +64,31 @@ pub enum CliCommandOperator { } impl CliCommandOperator { - pub async fn handle(&self) { + pub async fn handle(&self) -> Result<(), Box> { match self { - CliCommandOperator::List { output } => list_operators(output).await, + CliCommandOperator::List { output } => list_operators(output).await?, CliCommandOperator::Describe { operator, output } => { - describe_operator(operator, output).await + describe_operator(operator, output).await? } CliCommandOperator::Install { operators, kind_cluster, kind_cluster_name, } => { - kind::handle_cli_arguments(*kind_cluster, kind_cluster_name); + kind::handle_cli_arguments(*kind_cluster, kind_cluster_name)?; for operator in operators { - operator.install(); + operator.install()?; } } CliCommandOperator::Uninstall { operators } => uninstall_operators(operators), - CliCommandOperator::Installed { output } => list_installed_operators(output), + CliCommandOperator::Installed { output } => list_installed_operators(output)?, } + + Ok(()) } } -async fn list_operators(output_type: &OutputType) { +async fn list_operators(output_type: &OutputType) -> Result<(), Box> { type Output = IndexMap; #[derive(Serialize)] @@ -102,9 +104,9 @@ async fn list_operators(output_type: &OutputType) { output.insert( operator.to_string(), OutputOperatorEntry { - stable_versions: get_versions_from_repo(operator, "stackable-stable").await, - test_versions: get_versions_from_repo(operator, "stackable-test").await, - dev_versions: get_versions_from_repo(operator, "stackable-dev").await, + stable_versions: get_versions_from_repo(operator, "stackable-stable").await?, + test_versions: get_versions_from_repo(operator, "stackable-test").await?, + dev_versions: get_versions_from_repo(operator, "stackable-dev").await?, }, ); } @@ -127,9 +129,11 @@ async fn list_operators(output_type: &OutputType) { println!("{}", serde_yaml::to_string(&output).unwrap()); } } + + Ok(()) } -async fn describe_operator(operator: &str, output_type: &OutputType) { +async fn describe_operator(operator: &str, output_type: &OutputType) -> Result<(), Box> { #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct Output { @@ -140,9 +144,9 @@ async fn describe_operator(operator: &str, output_type: &OutputType) { } let output = Output { operator: operator.to_string(), - stable_versions: get_versions_from_repo(operator, "stackable-stable").await, - test_versions: get_versions_from_repo(operator, "stackable-test").await, - dev_versions: get_versions_from_repo(operator, "stackable-dev").await, + stable_versions: get_versions_from_repo(operator, "stackable-stable").await?, + test_versions: get_versions_from_repo(operator, "stackable-test").await?, + dev_versions: get_versions_from_repo(operator, "stackable-dev").await?, }; match output_type { @@ -159,30 +163,37 @@ async fn describe_operator(operator: &str, output_type: &OutputType) { println!("{}", serde_yaml::to_string(&output).unwrap()); } } + + Ok(()) } -async fn get_versions_from_repo(operator: &str, helm_repo_name: &str) -> Vec { +async fn get_versions_from_repo( + operator: &str, + helm_repo_name: &str, +) -> Result, Box> { let chart_name = format!("{operator}-operator"); let helm_repo_url = HELM_REPOS .lock() .unwrap() .get(helm_repo_name) - .unwrap_or_else(|| panic!("Could not find a helm repo with the name {helm_repo_name}")) + .ok_or(format!( + "Could not find a helm repo with the name {helm_repo_name}" + ))? .to_string(); - let repo = helm::get_repo_index(helm_repo_url).await; + let repo = helm::get_repo_index(helm_repo_url).await?; match repo.entries.get(&chart_name) { None => { warn!("Could not find {operator} operator (chart name {chart_name}) in helm repo {helm_repo_name}"); - vec![] + Ok(vec![]) } - Some(versions) => versions + Some(versions) => Ok(versions .iter() .map(|entry| entry.version.clone()) .rev() - .collect(), + .collect()), } } @@ -194,7 +205,7 @@ pub fn uninstall_operators(operators: &Vec) { } } -fn list_installed_operators(output_type: &OutputType) { +fn list_installed_operators(output_type: &OutputType) -> Result<(), Box> { type Output = IndexMap; #[derive(Serialize)] @@ -206,7 +217,7 @@ fn list_installed_operators(output_type: &OutputType) { last_updated: String, } - let output: Output = helm::helm_list_releases() + let output: Output = helm::helm_list_releases()? .into_iter() .filter(|release| { AVAILABLE_OPERATORS @@ -247,6 +258,8 @@ fn list_installed_operators(output_type: &OutputType) { println!("{}", serde_yaml::to_string(&output).unwrap()); } } + + Ok(()) } #[derive(Debug)] @@ -266,7 +279,7 @@ impl Operator { } } - pub fn install(&self) { + pub fn install(&self) -> Result<(), Box> { info!( "Installing {} operator{}", self.name, @@ -291,7 +304,9 @@ impl Operator { &helm_release_name, self.version.as_deref(), None, - ); + )?; + + Ok(()) } } diff --git a/src/release.rs b/src/release.rs index 0932170b..cb9f11ba 100644 --- a/src/release.rs +++ b/src/release.rs @@ -5,7 +5,7 @@ use indexmap::IndexMap; use lazy_static::lazy_static; use log::{error, info, warn}; use serde::{Deserialize, Serialize}; -use std::{ops::Deref, process::exit, sync::Mutex}; +use std::{error::Error, ops::Deref, process::exit, sync::Mutex}; lazy_static! { pub static ref RELEASE_FILES: Mutex> = Mutex::new(vec![ @@ -79,7 +79,7 @@ pub enum CliCommandRelease { } impl CliCommandRelease { - pub async fn handle(&self) { + pub async fn handle(&self) -> Result<(), Box> { match self { CliCommandRelease::List { output } => list_releases(output).await, CliCommandRelease::Describe { release, output } => { @@ -92,11 +92,13 @@ impl CliCommandRelease { kind_cluster, kind_cluster_name, } => { - kind::handle_cli_arguments(*kind_cluster, kind_cluster_name); - install_release(release, include_products, exclude_products).await; + kind::handle_cli_arguments(*kind_cluster, kind_cluster_name)?; + install_release(release, include_products, exclude_products).await?; } CliCommandRelease::Uninstall { release } => uninstall_release(release).await, } + + Ok(()) } } @@ -191,7 +193,7 @@ pub async fn install_release( release_name: &str, include_products: &[String], exclude_products: &[String], -) { +) -> Result<(), Box> { info!("Installing release {release_name}"); let release = get_release(release_name).await; @@ -202,9 +204,11 @@ pub async fn install_release( if included && !excluded { Operator::new(product_name, Some(product.operator_version)) .expect("Failed to construct operator definition") - .install(); + .install()?; } } + + Ok(()) } async fn uninstall_release(release_name: &str) { diff --git a/src/services.rs b/src/services.rs index 5d769597..f617ed2c 100644 --- a/src/services.rs +++ b/src/services.rs @@ -148,10 +148,19 @@ impl CliCommandServices { redact_credentials, show_versions, } => { - list_services(*all_namespaces, *redact_credentials, *show_versions, output).await? + match list_services(*all_namespaces, *redact_credentials, *show_versions, output) + .await + { + Ok(()) => Ok(()), + Err(err) => { + // match err.as_ref() { + + // } + Err(err) + } + } } } - Ok(()) } } diff --git a/src/stack.rs b/src/stack.rs index b8746025..ba688622 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -66,7 +66,7 @@ impl CliCommandStack { kind_cluster, kind_cluster_name, } => { - kind::handle_cli_arguments(*kind_cluster, kind_cluster_name); + kind::handle_cli_arguments(*kind_cluster, kind_cluster_name)?; install_stack(stack).await?; } } @@ -176,7 +176,7 @@ async fn install_stack(stack_name: &str) -> Result<(), Box> { info!("Installing stack {stack_name}"); let stack = get_stack(stack_name).await?; - release::install_release(&stack.stackable_release, &[], &[]).await; + release::install_release(&stack.stackable_release, &[], &[]).await?; info!("Installing components of stack {stack_name}"); for manifest in stack.manifests { @@ -202,19 +202,18 @@ async fn install_stack(stack_name: &str) -> Result<(), Box> { &name, Some(&version), Some(&values_yaml), - ) + )? } StackManifest::PlainYaml(yaml_url_or_file) => { debug!("Installing yaml manifest from {yaml_url_or_file}"); - match helpers::read_from_url_or_file(&yaml_url_or_file).await { - Ok(manifests) => kube::deploy_manifests(&manifests).await?, - Err(err) => { - panic!( - "Could not read stack manifests from file \"{}\": {err}", - &yaml_url_or_file - ); - } - } + let manifests = helpers::read_from_url_or_file(&yaml_url_or_file) + .await + .map_err(|err| { + format!( + "Could not read stack manifests from file \"{yaml_url_or_file}\": {err}" + ) + })?; + kube::deploy_manifests(&manifests).await?; } } } From edd3eda53fe506e7a14ee421c8f5eacf2aab2177 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 1 Aug 2022 10:51:51 +0200 Subject: [PATCH 112/177] Further improve error handling by removing lot's of unwrap()s --- src/helm.rs | 2 +- src/helpers.rs | 9 +++++++-- src/kube.rs | 11 ++++++++--- src/main.rs | 2 +- src/operator.rs | 15 +++++++-------- src/release.rs | 23 +++++++++++++++-------- src/services.rs | 27 ++++++++++++++++++--------- src/stack.rs | 21 ++++++++++----------- 8 files changed, 67 insertions(+), 43 deletions(-) diff --git a/src/helm.rs b/src/helm.rs index 18b2ebe8..e6fa74c6 100644 --- a/src/helm.rs +++ b/src/helm.rs @@ -92,7 +92,7 @@ pub fn install_helm_release_from_repo( } } - match HELM_REPOS.lock().unwrap().get(repo_name) { + match HELM_REPOS.lock()?.get(repo_name) { None => { error!("I don't know about the helm repo {repo_name}"); exit(1); diff --git a/src/helpers.rs b/src/helpers.rs index 4e109a31..f2300b60 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -36,7 +36,8 @@ pub async fn read_from_url_or_file(url_or_file: &str) -> Result } match reqwest::get(url_or_file).await { - Ok(response) => Ok(response.text().await.unwrap()), + Ok(response) => response.text().await + .map_err(|err| format!("Failed to read from the response of the file or a URL with the name \"{url_or_file}\": {err}")), Err(err) => Err(format!( "Couldn't read a file or a URL with the name \"{url_or_file}\": {err}" )), @@ -93,7 +94,11 @@ pub fn execute_command_with_stdin(mut args: Vec<&str>, stdin: &str) -> Result<() .spawn() .map_err(|err| format!("Failed to spawn the command \"{args_string}\": {err}"))?; - child.stdin.as_ref().unwrap().write_all(stdin.as_bytes())?; + child + .stdin + .as_ref() + .ok_or(format!("Failed to get stdin of command \"{args_string}\""))? + .write_all(stdin.as_bytes())?; if child.wait_with_output()?.status.success() { Ok(()) diff --git a/src/kube.rs b/src/kube.rs index bd9b6de3..66cc34bb 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -12,12 +12,12 @@ use serde::Deserialize; use std::{collections::HashMap, error::Error}; pub async fn deploy_manifests(yaml: &str) -> Result<(), Box> { - let namespace = NAMESPACE.lock().unwrap().clone(); + let namespace = NAMESPACE.lock()?.clone(); let client = get_client().await?; let discovery = Discovery::new(client.clone()).run().await?; for manifest in serde_yaml::Deserializer::from_str(yaml) { - let mut object = DynamicObject::deserialize(manifest).unwrap(); + let mut object = DynamicObject::deserialize(manifest)?; let gvk = gvk_of_typemeta(object.types.as_ref().expect("Failed to get type of object")); let (resource, capabilities) = discovery.resolve_gvk(&gvk).expect("Failed to resolve gvk"); @@ -80,7 +80,12 @@ pub async fn get_service_endpoint_urls( let node_ip = get_node_ip(node_name).await?; let mut result = IndexMap::new(); - for service_port in service.spec.unwrap().ports.unwrap_or_default() { + for service_port in service + .spec + .ok_or(format!("Service {service_name} had no spec"))? + .ports + .unwrap_or_default() + { match service_port.node_port { Some(node_port) => { let endpoint_name = service_name diff --git a/src/main.rs b/src/main.rs index dd37968c..0e4bb281 100644 --- a/src/main.rs +++ b/src/main.rs @@ -47,7 +47,7 @@ async fn main() -> Result<(), Box> { .init(); let namespace = &args.namespace; - *(NAMESPACE.lock().unwrap()) = namespace.to_string(); + *(NAMESPACE.lock()?) = namespace.to_string(); helm::handle_common_cli_args(&args); release::handle_common_cli_args(&args); diff --git a/src/operator.rs b/src/operator.rs index 3efc1d23..d52d02c0 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -123,10 +123,10 @@ async fn list_operators(output_type: &OutputType) -> Result<(), Box> } } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } @@ -157,10 +157,10 @@ async fn describe_operator(operator: &str, output_type: &OutputType) -> Result<( println!("Dev versions: {}", output.dev_versions.join(", ")); } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } @@ -174,8 +174,7 @@ async fn get_versions_from_repo( let chart_name = format!("{operator}-operator"); let helm_repo_url = HELM_REPOS - .lock() - .unwrap() + .lock()? .get(helm_repo_name) .ok_or(format!( "Could not find a helm repo with the name {helm_repo_name}" @@ -252,10 +251,10 @@ fn list_installed_operators(output_type: &OutputType) -> Result<(), Box { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } diff --git a/src/release.rs b/src/release.rs index cb9f11ba..84008558 100644 --- a/src/release.rs +++ b/src/release.rs @@ -81,9 +81,9 @@ pub enum CliCommandRelease { impl CliCommandRelease { pub async fn handle(&self) -> Result<(), Box> { match self { - CliCommandRelease::List { output } => list_releases(output).await, + CliCommandRelease::List { output } => list_releases(output).await?, CliCommandRelease::Describe { release, output } => { - describe_release(release, output).await + describe_release(release, output).await? } CliCommandRelease::Install { release, @@ -127,7 +127,7 @@ struct ReleaseProduct { operator_version: String, } -async fn list_releases(output_type: &OutputType) { +async fn list_releases(output_type: &OutputType) -> Result<(), Box> { let output = get_releases().await; match output_type { OutputType::Text => { @@ -140,15 +140,20 @@ async fn list_releases(output_type: &OutputType) { } } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } + + Ok(()) } -async fn describe_release(release_name: &str, output_type: &OutputType) { +async fn describe_release( + release_name: &str, + output_type: &OutputType, +) -> Result<(), Box> { #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct Output { @@ -179,12 +184,14 @@ async fn describe_release(release_name: &str, output_type: &OutputType) { } } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } + + Ok(()) } /// If include_operators is an non-empty list only the whitelisted product operators will be installed. diff --git a/src/services.rs b/src/services.rs index f617ed2c..968d94c8 100644 --- a/src/services.rs +++ b/src/services.rs @@ -246,10 +246,10 @@ async fn list_services( print!("{}", table.display()?); } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } @@ -262,7 +262,7 @@ pub async fn get_stackable_services( show_versions: bool, ) -> Result>, Box> { let mut result = IndexMap::new(); - let namespace = NAMESPACE.lock().unwrap().clone(); + let namespace = NAMESPACE.lock()?.clone(); let client = get_client().await?; @@ -361,7 +361,12 @@ pub async fn get_extra_infos( if let Some(secret_name) = product_crd.data["spec"]["credentialsSecret"].as_str() { let credentials = get_credentials_from_secret( secret_name, - product_crd.namespace().unwrap().as_str(), + product_crd + .namespace() + .ok_or(format!( + "The custom resource {product_crd:?} had no namespace set" + ))? + .as_str(), "adminUser.username", "adminUser.password", redact_credentials, @@ -396,15 +401,17 @@ async fn get_credentials_from_secret( let secret_api: Api = Api::namespaced(client, secret_namespace); let secret = secret_api.get(secret_name).await?; - let secret_data = secret.data.unwrap(); + let secret_data = secret + .data + .ok_or(format!("Secret {secret_name} had no data"))?; match (secret_data.get(username_key), secret_data.get(password_key)) { (Some(username), Some(password)) => { - let username = String::from_utf8(username.0.clone()).unwrap(); + let username = String::from_utf8(username.0.clone())?; let password = if redact_credentials { REDACTED_PASSWORD.to_string() } else { - String::from_utf8(password.0.clone()).unwrap() + String::from_utf8(password.0.clone())? }; Ok(Some((username, password))) } @@ -418,7 +425,7 @@ async fn get_minio_services( ) -> Result, Box> { let client = get_client().await?; let deployment_api: Api = match namespaced { - true => Api::namespaced(client.clone(), NAMESPACE.lock().unwrap().as_str()), + true => Api::namespaced(client.clone(), NAMESPACE.lock()?.as_str()), false => Api::all(client.clone()), }; let list_params = ListParams::default().labels("app=minio"); @@ -427,7 +434,9 @@ async fn get_minio_services( let mut result = Vec::new(); for minio_deployment in minio_deployments { let deployment_name = minio_deployment.name(); - let deployment_namespace = minio_deployment.namespace().unwrap(); + let deployment_namespace = minio_deployment.namespace().ok_or(format!( + "MinIO deployment {deployment_name} had no namespace" + ))?; let service_names = vec![ deployment_name.clone(), diff --git a/src/stack.rs b/src/stack.rs index ba688622..50a09eec 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -59,7 +59,7 @@ pub enum CliCommandStack { impl CliCommandStack { pub async fn handle(&self) -> Result<(), Box> { match self { - CliCommandStack::List { output } => list_stacks(output).await, + CliCommandStack::List { output } => list_stacks(output).await?, CliCommandStack::Describe { stack, output } => describe_stack(stack, output).await?, CliCommandStack::Install { stack, @@ -115,7 +115,7 @@ struct HelmChartRepo { url: String, } -async fn list_stacks(output_type: &OutputType) { +async fn list_stacks(output_type: &OutputType) -> Result<(), Box> { let output = get_stacks().await; match output_type { OutputType::Text => { @@ -128,12 +128,14 @@ async fn list_stacks(output_type: &OutputType) { } } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } + + Ok(()) } async fn describe_stack(stack_name: &str, output_type: &OutputType) -> Result<(), Box> { @@ -162,10 +164,10 @@ async fn describe_stack(stack_name: &str, output_type: &OutputType) -> Result<() println!("Labels: {}", output.labels.join(", ")); } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } @@ -189,12 +191,9 @@ async fn install_stack(stack_name: &str) -> Result<(), Box> { options, } => { debug!("Installing helm chart {name} as {release_name}"); - HELM_REPOS - .lock() - .unwrap() - .insert(repo.name.clone(), repo.url); + HELM_REPOS.lock()?.insert(repo.name.clone(), repo.url); - let values_yaml = serde_yaml::to_string(&options).unwrap(); + let values_yaml = serde_yaml::to_string(&options)?; helm::install_helm_release_from_repo( &release_name, &release_name, From 40c1c38ddec9329cdd2042684a4448d7a70443b4 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 1 Aug 2022 11:37:42 +0200 Subject: [PATCH 113/177] docs: Clarify stackable services --- docs/modules/ROOT/pages/commands/services.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/services.adoc b/docs/modules/ROOT/pages/commands/services.adoc index 5a307e24..cd244cc6 100644 --- a/docs/modules/ROOT/pages/commands/services.adoc +++ b/docs/modules/ROOT/pages/commands/services.adoc @@ -1,7 +1,7 @@ = Services +In this context a (Stackable) service is a running instance of an data product. This is different from the meaning of an kubernetes services, which is an abstract way to expose an application running on a set of pods as a network service. == List running services - The `stackablectl services` command allows to inspect the running services of the Stackable Data Platform. Currently you can only get a read-only view of the running services, future versions may allow to e.g. uninstall running services. From 4e7225fbd01ed38da96964edce4fd5021e19182f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 1 Aug 2022 11:52:42 +0200 Subject: [PATCH 114/177] Removed examples from stacks --- stacks/druid-superset-s3/superset.yaml | 1 - stacks/trino-superset-s3/superset.yaml | 1 - 2 files changed, 2 deletions(-) diff --git a/stacks/druid-superset-s3/superset.yaml b/stacks/druid-superset-s3/superset.yaml index 8f0f9de5..09c60319 100644 --- a/stacks/druid-superset-s3/superset.yaml +++ b/stacks/druid-superset-s3/superset.yaml @@ -7,7 +7,6 @@ spec: version: 1.5.1-stackable0.2.0 statsdExporterVersion: v0.22.4 credentialsSecret: superset-credentials - loadExamplesOnInit: true nodes: roleGroups: default: diff --git a/stacks/trino-superset-s3/superset.yaml b/stacks/trino-superset-s3/superset.yaml index 78864812..dad9b44e 100644 --- a/stacks/trino-superset-s3/superset.yaml +++ b/stacks/trino-superset-s3/superset.yaml @@ -7,7 +7,6 @@ spec: version: 1.5.1-stackable0.2.0 statsdExporterVersion: v0.22.4 credentialsSecret: superset-credentials - loadExamplesOnInit: true nodes: roleGroups: default: From 92bbde4cf69eab66f89bfd1eb148a41e2aaf99ae Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 1 Aug 2022 12:03:03 +0200 Subject: [PATCH 115/177] Improve error when creating kube client --- src/kube.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/kube.rs b/src/kube.rs index 66cc34bb..7327fc8c 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -158,7 +158,9 @@ async fn get_node_name_ip_mapping() -> Result, String> { } pub async fn get_client() -> Result> { - Ok(Client::try_default().await?) + Client::try_default() + .await + .map_err(|err| format! {"Failed to construct kubernetes client: {err}"}.into()) } fn gvk_of_typemeta(type_meta: &TypeMeta) -> GroupVersionKind { From 41dad5e2d0e01f9e1eb24733422a0a3608867df9 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 1 Aug 2022 12:05:19 +0200 Subject: [PATCH 116/177] Fix weird match statement --- src/services.rs | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/src/services.rs b/src/services.rs index 968d94c8..35f1c949 100644 --- a/src/services.rs +++ b/src/services.rs @@ -148,19 +148,11 @@ impl CliCommandServices { redact_credentials, show_versions, } => { - match list_services(*all_namespaces, *redact_credentials, *show_versions, output) - .await - { - Ok(()) => Ok(()), - Err(err) => { - // match err.as_ref() { - - // } - Err(err) - } - } + list_services(*all_namespaces, *redact_credentials, *show_versions, output).await?; } } + + Ok(()) } } From 36755d13f00641adb4f5d41b3ea14f48f4829076 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 1 Aug 2022 15:16:27 +0200 Subject: [PATCH 117/177] Use labels to retrive services that belong to a CRD --- src/kube.rs | 30 ++++++++++------- src/services.rs | 87 +++++++++++++++++++------------------------------ 2 files changed, 51 insertions(+), 66 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index 7327fc8c..1b1095be 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -7,7 +7,7 @@ use kube::{ discovery::Scope, Api, Client, Discovery, ResourceExt, }; -use log::warn; +use log::{warn, debug}; use serde::Deserialize; use std::{collections::HashMap, error::Error}; @@ -42,16 +42,17 @@ pub async fn deploy_manifests(yaml: &str) -> Result<(), Box> { } pub async fn get_service_endpoint_urls( - service_name: &str, - object_name: &str, - namespace: &str, + service: &Service, + referenced_object_name: &str, client: Client, ) -> Result, Box> { - let service_api: Api = Api::namespaced(client.clone(), namespace); - let service = service_api.get(service_name).await?; + let namespace = service + .namespace() + .ok_or(format!("Service {service:?} must have a namespace"))?; + let service_name = service.name(); - let endpoints_api: Api = Api::namespaced(client.clone(), namespace); - let endpoints = endpoints_api.get(service_name).await?; + let endpoints_api: Api = Api::namespaced(client.clone(), &namespace); + let endpoints = endpoints_api.get(&service_name).await?; let node_name = match &endpoints.subsets { Some(subsets) if subsets.len() == 1 => match &subsets[0].addresses { @@ -82,17 +83,22 @@ pub async fn get_service_endpoint_urls( let mut result = IndexMap::new(); for service_port in service .spec + .as_ref() .ok_or(format!("Service {service_name} had no spec"))? .ports - .unwrap_or_default() + .iter() + .flatten() { match service_port.node_port { Some(node_port) => { let endpoint_name = service_name - .trim_start_matches(object_name) + .trim_start_matches(referenced_object_name) .trim_start_matches('-'); - let port_name = service_port.name.unwrap_or_else(|| node_port.to_string()); + let port_name = service_port + .name + .clone() + .unwrap_or_else(|| service_port.port.to_string()); let endpoint_name = if endpoint_name.is_empty() { port_name.clone() } else { @@ -109,7 +115,7 @@ pub async fn get_service_endpoint_urls( result.insert(endpoint_name, endpoint); } - None => warn!("Could not get endpoint_url as service {service_name} has no nodePort"), + None => debug!("Could not get endpoint_url as service {service_name} has no nodePort"), } } diff --git a/src/services.rs b/src/services.rs index 35f1c949..10e65a66 100644 --- a/src/services.rs +++ b/src/services.rs @@ -4,7 +4,10 @@ use cli_table::{ Cell, Table, }; use indexmap::IndexMap; -use k8s_openapi::api::{apps::v1::Deployment, core::v1::Secret}; +use k8s_openapi::api::{ + apps::v1::Deployment, + core::v1::{Secret, Service}, +}; use kube::{ api::{DynamicObject, GroupVersionKind, ListParams}, core::ErrorResponse, @@ -259,42 +262,50 @@ pub async fn get_stackable_services( let client = get_client().await?; for (product_name, product_gvk) in STACKABLE_PRODUCT_CRDS.iter() { - let api_resource = kube::core::discovery::ApiResource::from_gvk(product_gvk); - let api: Api = match namespaced { - true => Api::namespaced_with(client.clone(), &namespace, &api_resource), - false => Api::all_with(client.clone(), &api_resource), + let object_api_resource = kube::core::discovery::ApiResource::from_gvk(product_gvk); + let object_api: Api = match namespaced { + true => Api::namespaced_with(client.clone(), &namespace, &object_api_resource), + false => Api::all_with(client.clone(), &object_api_resource), }; - let objects = api.list(&ListParams::default()).await; + + let objects = object_api.list(&ListParams::default()).await; match objects { Ok(objects) => { let mut installed_products = Vec::new(); for object in objects { let object_name = object.name(); - let object_namespace = object.namespace(); + let object_namespace = match object.namespace() { + Some(namespace) => namespace, + // If the custom resource does not have a namespace set it can't expose a service + None => continue, + }; + + let service_api: Api = + Api::namespaced(client.clone(), object_namespace.as_str()); + let service_list_params = ListParams::default() + .labels(format!("app.kubernetes.io/instance={product_name}").as_str()) + .labels(format!("app.kubernetes.io/name={object_name}").as_str()); + let services = service_api.list(&service_list_params).await?; - let service_names = get_service_names(&object_name, product_name); let extra_infos = get_extra_infos(product_name, &object, redact_credentials, show_versions) .await?; let mut endpoints = IndexMap::new(); - for service_name in service_names { + for service in services { let service_endpoint_urls = - get_service_endpoint_urls(&service_name, &object_name, object_namespace - .as_ref() - .expect("Failed to get the namespace of object {object_name} besides it having an service") - , client.clone()) - .await; + get_service_endpoint_urls(&service, &object_name, client.clone()).await; match service_endpoint_urls { Ok(service_endpoint_urls) => endpoints.extend(service_endpoint_urls), Err(err) => warn!( - "Failed to get endpoint_urls of service {service_name}: {err}" + "Failed to get endpoint_urls of service {service_name}: {err}", + service_name = service.name(), ), } } let product = InstalledProduct { name: object_name, - namespace: object_namespace, + namespace: Some(object_namespace), endpoints, extra_infos, }; @@ -306,7 +317,7 @@ pub async fn get_stackable_services( debug!("ProductCRD for product {product_name} not installed"); } Err(err) => { - return Err(Box::new(err)); + return Err(err.into()); } } } @@ -314,32 +325,6 @@ pub async fn get_stackable_services( Ok(result) } -pub fn get_service_names(product_name: &str, product: &str) -> Vec { - match product { - "airflow" => vec![format!("{product_name}-webserver")], - "druid" => vec![ - format!("{product_name}-router"), - format!("{product_name}-coordinator"), - ], - "hbase" => vec![product_name.to_string()], - "hdfs" => vec![ - format!("{product_name}-datanode-default-0"), - format!("{product_name}-namenode-default-0"), - format!("{product_name}-journalnode-default-0"), - ], - "hive" => vec![product_name.to_string()], - "nifi" => vec![product_name.to_string()], - "opa" => vec![product_name.to_string()], - "superset" => vec![format!("{product_name}-external")], - "trino" => vec![format!("{product_name}-coordinator")], - "zookeeper" => vec![product_name.to_string()], - _ => { - warn!("Cannot calculated exposed services names as product {product} is not known"); - vec![] - } - } -} - pub async fn get_extra_infos( product: &str, product_crd: &DynamicObject, @@ -430,6 +415,7 @@ async fn get_minio_services( "MinIO deployment {deployment_name} had no namespace" ))?; + let service_api = Api::namespaced(client.clone(), &deployment_namespace); let service_names = vec![ deployment_name.clone(), format!("{deployment_name}-console"), @@ -437,17 +423,10 @@ async fn get_minio_services( let mut endpoints = IndexMap::new(); for service_name in service_names { - let service_endpoint_urls = get_service_endpoint_urls( - &service_name, - &deployment_name, - &deployment_namespace, - client.clone(), - ) - .await; - match service_endpoint_urls { - Ok(service_endpoint_urls) => endpoints.extend(service_endpoint_urls), - Err(err) => warn!("Failed to get endpoint_urls of service {service_name}: {err}"), - } + let service = service_api.get(&service_name).await?; + let service_endpoint_urls = + get_service_endpoint_urls(&service, &deployment_name, client.clone()).await?; + endpoints.extend(service_endpoint_urls); } let mut extra_infos = vec!["Third party service".to_string()]; From 8b38f98f19790d8aedd8e91a66c3c5a459ba6435 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 1 Aug 2022 15:25:10 +0200 Subject: [PATCH 118/177] Only use a sinlge redis replica for Airflow stack --- stacks.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stacks.yaml b/stacks.yaml index 8c0f28b2..ac657597 100644 --- a/stacks.yaml +++ b/stacks.yaml @@ -148,4 +148,6 @@ stacks: options: auth: password: airflow + replica: + replicaCount: 1 - plainYaml: stacks/airflow/airflow.yaml From a23dae96846fa870dc4339dd24d2bec68fac6fa4 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 1 Aug 2022 15:58:10 +0200 Subject: [PATCH 119/177] cargo fmt --- src/kube.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kube.rs b/src/kube.rs index 1b1095be..62c3aad2 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -7,7 +7,7 @@ use kube::{ discovery::Scope, Api, Client, Discovery, ResourceExt, }; -use log::{warn, debug}; +use log::{debug, warn}; use serde::Deserialize; use std::{collections::HashMap, error::Error}; From a28a2f446f3c36a7e182f955ad60257c72eb5afe Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 2 Aug 2022 10:42:41 +0200 Subject: [PATCH 120/177] docs --- docs/modules/ROOT/pages/commands/services.adoc | 2 +- src/arguments.rs | 14 +++++++------- src/kube.rs | 6 +++--- src/operator.rs | 4 ++-- src/release.rs | 4 ++-- src/stack.rs | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/modules/ROOT/pages/commands/services.adoc b/docs/modules/ROOT/pages/commands/services.adoc index cd244cc6..c51f9561 100644 --- a/docs/modules/ROOT/pages/commands/services.adoc +++ b/docs/modules/ROOT/pages/commands/services.adoc @@ -1,5 +1,5 @@ = Services -In this context a (Stackable) service is a running instance of an data product. This is different from the meaning of an kubernetes services, which is an abstract way to expose an application running on a set of pods as a network service. +In this context a (Stackable) service is a running instance of an data product. This is different from the meaning of an Kubernetes service, which is an abstract way to expose an application running on a set of pods as a network service. == List running services The `stackablectl services` command allows to inspect the running services of the Stackable Data Platform. diff --git a/src/arguments.rs b/src/arguments.rs index 9823ee02..a2fd93e0 100644 --- a/src/arguments.rs +++ b/src/arguments.rs @@ -56,18 +56,18 @@ pub struct CliArgs { /// Adds a YAML file containing custom releases /// - /// If you don't have access to the Stackable GitHub repos or you want to maintain your own releases you can specify additional YAML files containing release information. - /// Have a look [here](https://raw.githubusercontent.com/stackabletech/stackablectl/main/releases.yaml) for the structure. - /// Can either be an URL or a path to a file e.g. `https://my.server/my-releases.yaml` or '/etc/my-releases.yaml' or `C:\Users\Sebastian\my-releases.yaml`. + /// If you do not have access to the Stackable repositories on GitHub or if you want to maintain your own releases, you can specify additional YAML files containing release information. + /// Have a look at for the structure. + /// Can either be a URL or a path to a file, e.g. `https://my.server/my-releases.yaml`, '/etc/my-releases.yaml' or `C:\Users\Bob\my-releases.yaml`. /// Can be specified multiple times. #[clap(long, multiple_occurrences(true), value_hint = ValueHint::FilePath)] pub additional_releases_file: Vec, /// Adds a YAML file containing custom stacks /// - /// If you don't have access to the Stackable GitHub repos or you want to maintain your own stacks you can specify additional YAML files containing stack information. - /// Have a look [here](https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks.yaml) for the structure. - /// Can either be an URL or a path to a file e.g. `https://my.server/my-stacks.yaml` or '/etc/my-stacks.yaml' or `C:\Users\Sebastian\my-stacks.yaml`. + /// If you do not have access to the Stackable repositories on GitHub or if you want to maintain your own stacks, you can specify additional YAML files containing stack information. + /// Have a look at for the structure. + /// Can either be a URL or a path to a file, e.g. `https://my.server/my-stacks.yaml`, '/etc/my-stacks.yaml' or `C:\Users\Bob\my-stacks.yaml`. /// Can be specified multiple times. #[clap(long, multiple_occurrences(true), value_hint = ValueHint::FilePath)] pub additional_stacks_file: Vec, @@ -83,7 +83,7 @@ pub enum CliCommand { #[clap(subcommand, alias("r"), alias("re"))] Release(CliCommandRelease), - /// This subcommand interacts with stacks, which are ready-to-use combinations of products. + /// This subcommand interacts with stacks which are ready-to-use combinations of products. #[clap(subcommand, alias("s"), alias("st"))] Stack(CliCommandStack), diff --git a/src/kube.rs b/src/kube.rs index 62c3aad2..f398a977 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -135,12 +135,12 @@ async fn get_node_ip(node_name: &str) -> Result> { async fn get_node_name_ip_mapping() -> Result, String> { let client = get_client() .await - .map_err(|err| format!("Failed to create kubernetes client: {err}"))?; + .map_err(|err| format!("Failed to create Kubernetes client: {err}"))?; let node_api: Api = Api::all(client); let nodes = node_api .list(&ListParams::default()) .await - .map_err(|err| format!("Failed to list kubernetes nodes: {err}"))?; + .map_err(|err| format!("Failed to list Kubernetes nodes: {err}"))?; let mut result = HashMap::new(); for node in nodes { @@ -166,7 +166,7 @@ async fn get_node_name_ip_mapping() -> Result, String> { pub async fn get_client() -> Result> { Client::try_default() .await - .map_err(|err| format! {"Failed to construct kubernetes client: {err}"}.into()) + .map_err(|err| format! {"Failed to construct Kubernetes client: {err}"}.into()) } fn gvk_of_typemeta(type_meta: &TypeMeta) -> GroupVersionKind { diff --git a/src/operator.rs b/src/operator.rs index d52d02c0..a31b087b 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -33,8 +33,8 @@ pub enum CliCommandOperator { #[clap(multiple_occurrences(true), required = true, value_hint = ValueHint::Other)] operators: Vec, - /// If specified a local kubernetes cluster consisting of 4 nodes for testing purposes will be created. - /// Kind is a tool to spin up a local kubernetes cluster running on docker on your machine. + /// If specified a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. + /// Kind is a tool to spin up a local Kubernetes cluster running on docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at #[clap(short, long)] diff --git a/src/release.rs b/src/release.rs index 84008558..c6a2823c 100644 --- a/src/release.rs +++ b/src/release.rs @@ -53,8 +53,8 @@ pub enum CliCommandRelease { #[clap(short, long, value_hint = ValueHint::Other)] exclude_products: Vec, - /// If specified a local kubernetes cluster consisting of 4 nodes for testing purposes will be created. - /// Kind is a tool to spin up a local kubernetes cluster running on docker on your machine. + /// If specified a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. + /// Kind is a tool to spin up a local Kubernetes cluster running on docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at #[clap(short, long)] diff --git a/src/stack.rs b/src/stack.rs index 50a09eec..bf4031c6 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -38,8 +38,8 @@ pub enum CliCommandStack { #[clap(required = true, value_hint = ValueHint::Other)] stack: String, - /// If specified a local kubernetes cluster consisting of 4 nodes for testing purposes will be created. - /// Kind is a tool to spin up a local kubernetes cluster running on docker on your machine. + /// If specified a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. + /// Kind is a tool to spin up a local Kubernetes cluster running on docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at #[clap(short, long)] From 2459894b1716201f42c8858b6ba3e76524c32286 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 2 Aug 2022 10:43:53 +0200 Subject: [PATCH 121/177] docs --- src/operator.rs | 2 +- src/release.rs | 2 +- src/stack.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/operator.rs b/src/operator.rs index a31b087b..ebad63c9 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -33,7 +33,7 @@ pub enum CliCommandOperator { #[clap(multiple_occurrences(true), required = true, value_hint = ValueHint::Other)] operators: Vec, - /// If specified a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. + /// If specified, a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. /// Kind is a tool to spin up a local Kubernetes cluster running on docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at diff --git a/src/release.rs b/src/release.rs index c6a2823c..78eeb040 100644 --- a/src/release.rs +++ b/src/release.rs @@ -53,7 +53,7 @@ pub enum CliCommandRelease { #[clap(short, long, value_hint = ValueHint::Other)] exclude_products: Vec, - /// If specified a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. + /// If specified, a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. /// Kind is a tool to spin up a local Kubernetes cluster running on docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at diff --git a/src/stack.rs b/src/stack.rs index bf4031c6..288273a9 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -38,7 +38,7 @@ pub enum CliCommandStack { #[clap(required = true, value_hint = ValueHint::Other)] stack: String, - /// If specified a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. + /// If specified, a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. /// Kind is a tool to spin up a local Kubernetes cluster running on docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at From 5880bf78c0c147ec05ffc393d294d413a3eefbd8 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 2 Aug 2022 10:44:43 +0200 Subject: [PATCH 122/177] docs --- src/operator.rs | 2 +- src/release.rs | 2 +- src/stack.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/operator.rs b/src/operator.rs index ebad63c9..4d4907cb 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -34,7 +34,7 @@ pub enum CliCommandOperator { operators: Vec, /// If specified, a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. - /// Kind is a tool to spin up a local Kubernetes cluster running on docker on your machine. + /// Kind is a tool to spin up a local Kubernetes cluster running on Docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at #[clap(short, long)] diff --git a/src/release.rs b/src/release.rs index 78eeb040..c5b4d026 100644 --- a/src/release.rs +++ b/src/release.rs @@ -54,7 +54,7 @@ pub enum CliCommandRelease { exclude_products: Vec, /// If specified, a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. - /// Kind is a tool to spin up a local Kubernetes cluster running on docker on your machine. + /// Kind is a tool to spin up a local Kubernetes cluster running on Docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at #[clap(short, long)] diff --git a/src/stack.rs b/src/stack.rs index 288273a9..28c77277 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -39,7 +39,7 @@ pub enum CliCommandStack { stack: String, /// If specified, a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. - /// Kind is a tool to spin up a local Kubernetes cluster running on docker on your machine. + /// Kind is a tool to spin up a local Kubernetes cluster running on Docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at #[clap(short, long)] From 424d29578d938262742f12b29875660ab57386be Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 2 Aug 2022 10:46:29 +0200 Subject: [PATCH 123/177] Use extend_from_slice instead of append --- src/release.rs | 2 +- src/stack.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/release.rs b/src/release.rs index c5b4d026..1921a299 100644 --- a/src/release.rs +++ b/src/release.rs @@ -104,7 +104,7 @@ impl CliCommandRelease { pub fn handle_common_cli_args(args: &CliArgs) { let mut release_files = RELEASE_FILES.lock().unwrap(); - release_files.append(&mut args.additional_releases_file.clone()); + release_files.extend_from_slice(&args.additional_releases_file); } #[derive(Clone, Debug, Deserialize, Serialize)] diff --git a/src/stack.rs b/src/stack.rs index 28c77277..f521e203 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -76,7 +76,7 @@ impl CliCommandStack { pub fn handle_common_cli_args(args: &CliArgs) { let mut stack_files = STACK_FILES.lock().unwrap(); - stack_files.append(&mut args.additional_stacks_file.clone()); + stack_files.extend_from_slice(&args.additional_stacks_file); } #[derive(Clone, Debug, Deserialize, Serialize)] From 10a72f630b76a7a36c0cadb8e728217719b402e1 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 2 Aug 2022 10:51:16 +0200 Subject: [PATCH 124/177] Remove uneeded .iter() --- src/operator.rs | 4 ++-- src/release.rs | 6 +++--- src/services.rs | 4 ++-- src/stack.rs | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/operator.rs b/src/operator.rs index 4d4907cb..47f5589d 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -114,7 +114,7 @@ async fn list_operators(output_type: &OutputType) -> Result<(), Box> match output_type { OutputType::Text => { println!("OPERATOR STABLE VERSIONS"); - for (operator, operator_entry) in output.iter() { + for (operator, operator_entry) in output { println!( "{:18} {}", operator, @@ -239,7 +239,7 @@ fn list_installed_operators(output_type: &OutputType) -> Result<(), Box { println!("OPERATOR VERSION NAMESPACE STATUS LAST UPDATED"); - for (operator, operator_entry) in output.iter() { + for (operator, operator_entry) in output { println!( "{:21} {:15} {:30} {:16} {}", operator, diff --git a/src/release.rs b/src/release.rs index 1921a299..7394d95a 100644 --- a/src/release.rs +++ b/src/release.rs @@ -132,7 +132,7 @@ async fn list_releases(output_type: &OutputType) -> Result<(), Box> { match output_type { OutputType::Text => { println!("RELEASE RELEASE DATE DESCRIPTION"); - for (release_name, release_entry) in output.releases.iter() { + for (release_name, release_entry) in output.releases { println!( "{:18} {:14} {}", release_name, release_entry.release_date, release_entry.description, @@ -179,7 +179,7 @@ async fn describe_release( println!("Included products:"); println!(); println!("PRODUCT OPERATOR VERSION"); - for (product_name, product) in output.products.iter() { + for (product_name, product) in output.products { println!("{:19} {}", product_name, product.operator_version); } } @@ -204,7 +204,7 @@ pub async fn install_release( info!("Installing release {release_name}"); let release = get_release(release_name).await; - for (product_name, product) in release.products.into_iter() { + for (product_name, product) in release.products { let included = include_products.is_empty() || include_products.contains(&product_name); let excluded = exclude_products.contains(&product_name); diff --git a/src/services.rs b/src/services.rs index 10e65a66..e8473c84 100644 --- a/src/services.rs +++ b/src/services.rs @@ -193,7 +193,7 @@ async fn list_services( .max() .unwrap_or_default(); - for (product_name, installed_products) in output.iter() { + for (product_name, installed_products) in output { for installed_product in installed_products { let mut endpoints = vec![]; for endpoint in &installed_product.endpoints { @@ -210,7 +210,7 @@ async fn list_services( .join("\n"); table.push(vec![ - product_name.cell(), + (&product_name).cell(), installed_product.name.as_str().cell(), installed_product .namespace diff --git a/src/stack.rs b/src/stack.rs index f521e203..94162d27 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -120,7 +120,7 @@ async fn list_stacks(output_type: &OutputType) -> Result<(), Box> { match output_type { OutputType::Text => { println!("STACK STACKABLE RELEASE DESCRIPTION"); - for (stack_name, stack) in output.stacks.iter() { + for (stack_name, stack) in output.stacks { println!( "{:35} {:18} {}", stack_name, stack.stackable_release, stack.description, From c4facfe0e799162fddd7848457f1604a18d5f484 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:47:06 +0200 Subject: [PATCH 125/177] Fix tokio version --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 85b887a5..2ff1d9e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ serde = { version = "1.0", features = ["derive"]} serde_json = "1.0" serde_yaml = "0.8" reqwest = "0.11" # Using native-tls as openssl does not seem to be supported as of 0.11 -tokio = "1.19.2" +tokio = "1.19" [profile.release] # strip = true # By default on Linux and macOS, symbol information is included in the compiled .elf file. From 8958362449eddc3d1b0d0f2fa67954a2cac3c829 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:47:51 +0200 Subject: [PATCH 126/177] Update docs/modules/ROOT/pages/commands/operator.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/operator.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index 8860747a..404c7427 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -84,7 +84,7 @@ Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/qui With this command we installed the operator for Apache Airflow as well as two operators needed internally by the Stackable Data Platform (commons and secret). -As we didn't specify a specific version to install the operators were installed in the latest nightly version - build from the main branch of the operator. +As we didn't specify a specific version to install, the operators were installed in the latest nightly version - built from the main branch of the operators. If you want to install a specific version you can add the version to each operator to install as follows From 5f281a1f1bf8963cf8e76169c89b7d9fd6ca1603 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:48:04 +0200 Subject: [PATCH 127/177] Update docs/modules/ROOT/pages/troubleshooting.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/troubleshooting.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/troubleshooting.adoc b/docs/modules/ROOT/pages/troubleshooting.adoc index f0db1c91..1ead1618 100644 --- a/docs/modules/ROOT/pages/troubleshooting.adoc +++ b/docs/modules/ROOT/pages/troubleshooting.adoc @@ -1,7 +1,7 @@ = Troubleshooting == No internet connectivity -`stackablectl` uses a Internet connection to always know of all the available versions, releases, stacks and demos. +`stackablectl` uses an Internet connection to always know of all the available versions, releases, stacks and demos. To achieve this the following online services will be contacted: [%autowidth.stretch] From 6617b843d75578feaa97bc558b8a5aa47eefd4fb Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:48:15 +0200 Subject: [PATCH 128/177] Update docs/modules/ROOT/pages/installation.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/installation.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/installation.adoc b/docs/modules/ROOT/pages/installation.adoc index 146ab058..9d2dfe6e 100644 --- a/docs/modules/ROOT/pages/installation.adoc +++ b/docs/modules/ROOT/pages/installation.adoc @@ -83,7 +83,7 @@ To build `stackablectl` from source you need to have the following tools install * Make ** As we use the vendored feature of the https://crates.io/crates/openssl[openssl crate] Make is needed to compile openssl from source -If you have the required tools available you need to clone the `stackablectl` repo https://github.com/stackabletech/stackablectl and invoke the build with +If you have the required tools available, you need to clone the `stackablectl` repo https://github.com/stackabletech/stackablectl and invoke the build with [source,console] ---- From 975c08e595641fc537db6380a5451fcc7a8b2274 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:48:24 +0200 Subject: [PATCH 129/177] Update docs/modules/ROOT/pages/commands/operator.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/operator.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index 404c7427..073b3def 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -101,7 +101,7 @@ As you can see, the three operators where installed in the requested version. Remember: If you want to install a recommended and tested set of operator versions, have a look at the xref:commands/release.adoc[] command. == List installed operators -After installing some operators, you can list which operators are installed in you Kubernetes cluster. +After installing some operators, you can list which operators are installed in your Kubernetes cluster. [source,console] ---- From 65e823a9a79c85ae4b89979d612c660b5975d00f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:48:36 +0200 Subject: [PATCH 130/177] Update docs/modules/ROOT/pages/commands/operator.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/operator.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index 073b3def..dc135d01 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -86,7 +86,7 @@ With this command we installed the operator for Apache Airflow as well as two op As we didn't specify a specific version to install, the operators were installed in the latest nightly version - built from the main branch of the operators. -If you want to install a specific version you can add the version to each operator to install as follows +If you want to install a specific version, you can add the version to each operator to install as follows: [source,console] ---- From 4361dea9c2a6a553cbaa475312eda120682ef1d1 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:48:50 +0200 Subject: [PATCH 131/177] Update docs/modules/ROOT/pages/commands/release.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/release.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/release.adoc b/docs/modules/ROOT/pages/commands/release.adoc index 267f4909..a98258ec 100644 --- a/docs/modules/ROOT/pages/commands/release.adoc +++ b/docs/modules/ROOT/pages/commands/release.adoc @@ -1,7 +1,7 @@ = Release A release is a well-playing bundle of operators. -If you want to install an single individual operator have a look at the xref:commands/operator.adoc[] command. +If you want to install a single individual operator, have a look at the xref:commands/operator.adoc[] command. == Browse available releases To list the available Stackable releases run the following command: From a24c9ca508b638a37cc45345a0781a88124584de Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:49:05 +0200 Subject: [PATCH 132/177] Update docs/modules/ROOT/pages/commands/release.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/release.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/release.adoc b/docs/modules/ROOT/pages/commands/release.adoc index a98258ec..fc84f69e 100644 --- a/docs/modules/ROOT/pages/commands/release.adoc +++ b/docs/modules/ROOT/pages/commands/release.adoc @@ -13,7 +13,7 @@ RELEASE RELEASE DATE DESCRIPTION 22.06 2022-06-30 First official release of the Stackable Data Platform ---- -Detailed information of a release can queried with the `describe` command: +Detailed information of a release can be queried with the `describe` command: [source,console] ---- From 09039c573b0630ab308f01e08b78345379f589cc Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:49:23 +0200 Subject: [PATCH 133/177] Update docs/modules/ROOT/pages/commands/services.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/services.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/services.adoc b/docs/modules/ROOT/pages/commands/services.adoc index c51f9561..16b060c8 100644 --- a/docs/modules/ROOT/pages/commands/services.adoc +++ b/docs/modules/ROOT/pages/commands/services.adoc @@ -1,5 +1,5 @@ = Services -In this context a (Stackable) service is a running instance of an data product. This is different from the meaning of an Kubernetes service, which is an abstract way to expose an application running on a set of pods as a network service. +In this context a (Stackable) service is a running instance of a data product. This is different from the meaning of a Kubernetes service which is an abstract way to expose an application running on a set of pods as a network service. == List running services The `stackablectl services` command allows to inspect the running services of the Stackable Data Platform. From 913a6e29f02bb0c20d2719a4fbf1cbfd77b66013 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:49:41 +0200 Subject: [PATCH 134/177] Update docs/modules/ROOT/pages/commands/stack.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/stack.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/stack.adoc b/docs/modules/ROOT/pages/commands/stack.adoc index 695d0d6c..c5ef13d9 100644 --- a/docs/modules/ROOT/pages/commands/stack.adoc +++ b/docs/modules/ROOT/pages/commands/stack.adoc @@ -13,7 +13,7 @@ druid-superset-s3 22.06 Stack containing MinIO, D airflow 22.06 Stack containing Airflow scheduling platform ---- -Detailed information of a stack can queried with the `describe` command: +Detailed information of a stack can be queried with the `describe` command: [source,console] ---- From 6f46e781aea4aa6a7bb421f7f979078c13807838 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:49:56 +0200 Subject: [PATCH 135/177] Update docs/modules/ROOT/pages/customization.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index f7186843..f440d35a 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -14,7 +14,7 @@ To easily achieve this you can create your own demo so that it can easily be rep === Adding a new demo First you must create a `mycorp-demos.yaml` containing demos according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/demos.yaml[the Stackable provided demos]. -After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demo-files mycorp-demos.yaml`. +After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demos-file mycorp-demos.yaml`. The `` can be either a path to a file on the local filesystem or a URL. By using a URL the demos file can be put into to a central Git repository and referenced by all teams or clients. Multiple `--additional-demo-files` flags can be specified to include multiple demo files. From e9261cac23b9fa434dc9627fac83e89671612985 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:50:14 +0200 Subject: [PATCH 136/177] Update docs/modules/ROOT/pages/commands/stack.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/commands/stack.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/stack.adoc b/docs/modules/ROOT/pages/commands/stack.adoc index c5ef13d9..0e646689 100644 --- a/docs/modules/ROOT/pages/commands/stack.adoc +++ b/docs/modules/ROOT/pages/commands/stack.adoc @@ -92,7 +92,7 @@ Have a nice day! 👋 [INFO ] Installed stack druid-superset-s3 ---- -After installing the stack, we can access the running services using the xref:commands/operator.adoc[] command: +After installing the stack, we can access the running services using the xref:commands/services.adoc[] command: [source,console] ---- From c870949284ed66b9986a7ecf6044b42b512c8ad3 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:50:24 +0200 Subject: [PATCH 137/177] Update docs/modules/ROOT/pages/troubleshooting.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/troubleshooting.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/troubleshooting.adoc b/docs/modules/ROOT/pages/troubleshooting.adoc index 1ead1618..f790f1ee 100644 --- a/docs/modules/ROOT/pages/troubleshooting.adoc +++ b/docs/modules/ROOT/pages/troubleshooting.adoc @@ -39,7 +39,7 @@ $ stackablectl --helm-repo-stackable-stable https://my.corp/stackable/repository === Mirror releases/stacks/demos files You need to mirror the URL to either a URL or a file on disk. -You can then specify the mirrored file to be included via `--additional-release-files`, `--additional-stack-files`, or `--additional-demo-files`, e.g. +You can then specify the mirrored file to be included via `--additional-releases-file`, `--additional-stacks-file`, or `--additional-demos-file`, e.g. [source,console] ---- From 623d77c12a35ab389c4abcc3d0c27d492e85baeb Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:50:35 +0200 Subject: [PATCH 138/177] Update docs/modules/ROOT/pages/troubleshooting.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/troubleshooting.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/troubleshooting.adoc b/docs/modules/ROOT/pages/troubleshooting.adoc index f790f1ee..be2495ae 100644 --- a/docs/modules/ROOT/pages/troubleshooting.adoc +++ b/docs/modules/ROOT/pages/troubleshooting.adoc @@ -43,7 +43,7 @@ You can then specify the mirrored file to be included via `--additional-releases [source,console] ---- -$ stackablectl --additional-release-files=/home/sbernauer/Downloads/releases.yaml release list +$ stackablectl --additional-releases-file=/home/sbernauer/Downloads/releases.yaml release list ---- == `panic: open /tmp/.helmcache/stackable-stable-index.yaml: permission denied` From 2f080a68e6a481f3586393a32e5f66eb82832248 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:50:48 +0200 Subject: [PATCH 139/177] Update docs/modules/ROOT/pages/customization.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index f440d35a..22c6c55e 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -1,5 +1,5 @@ = Customization -If you're working for a large company chances are that there are multiple teams using the Stackable Data Platform. +If you're working for a large company, chances are that there are multiple teams using the Stackable Data Platform. A single team can also operate multiple Stackable Data Platforms. `stackablectl` is build in a way customers or even single developers can define their own release, stack and even demo! This way it is possible to cover the following use-cases. From 191edc842fe87d8f95e0c21c0adcd53af7d55b48 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:51:11 +0200 Subject: [PATCH 140/177] Update docs/modules/ROOT/pages/customization.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index 22c6c55e..88563f27 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -16,7 +16,7 @@ First you must create a `mycorp-demos.yaml` containing demos according to the fo After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demos-file mycorp-demos.yaml`. The `` can be either a path to a file on the local filesystem or a URL. -By using a URL the demos file can be put into to a central Git repository and referenced by all teams or clients. +By using a URL the demos file can be put into a central Git repository and referenced by all teams or clients. Multiple `--additional-demo-files` flags can be specified to include multiple demo files. Every additional demo will we added to the already existing demos in `stackablectl`, so all the available demo files will be merged. From fbf9e3c119f299b6c2c8a8c2660f5269c0049f47 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:51:28 +0200 Subject: [PATCH 141/177] Update docs/modules/ROOT/pages/customization.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index 88563f27..5ccacaf1 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -17,7 +17,7 @@ First you must create a `mycorp-demos.yaml` containing demos according to the fo After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demos-file mycorp-demos.yaml`. The `` can be either a path to a file on the local filesystem or a URL. By using a URL the demos file can be put into a central Git repository and referenced by all teams or clients. -Multiple `--additional-demo-files` flags can be specified to include multiple demo files. +Multiple `--additional-demos-file` flags can be specified to include multiple demo files. Every additional demo will we added to the already existing demos in `stackablectl`, so all the available demo files will be merged. == Add a new stack From 1025672726dbb1ff38adce6a10e9bbe347fac907 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:51:59 +0200 Subject: [PATCH 142/177] Update docs/modules/ROOT/pages/customization.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index 5ccacaf1..51abaa96 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -22,7 +22,7 @@ Every additional demo will we added to the already existing demos in `stackablec == Add a new stack === Benefits -If your company or clients have multiple similar setups or reference architectures it could make sense to make them easily available to all employees or clients. +If your company or clients have multiple similar setups or reference architectures, it could make sense to make them easily available to all employees or clients. In the custom defined Stack all Product versions are pinned as well, so you can easily spin up a Stack containing the exact same versions as your production setup. You can use your defined Stack to give it to colleagues or potential customers to show the overall architecture of the Data platform you're going to build. From 34cd79033b94d2b20a5d7bd6fb3b9dee62f6b3b9 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:52:26 +0200 Subject: [PATCH 143/177] Update docs/modules/ROOT/pages/customization.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index 51abaa96..d00a4526 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -29,7 +29,7 @@ You can use your defined Stack to give it to colleagues or potential customers t === Adding a new stack For the overall procedure have a look at <<_adding_a_new_demo>> on how to add a new stack. For a custom stack you need to create a `mycorp-stacks.yaml` containing stacks according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/stacks.yaml[the Stackable provided stacks]. -You can than add it to `stackablectl` with the flag `--additional-stack-files`. +You can then add it to `stackablectl` with the flag `--additional-stacks-file`. == Add a new release From c2cb8ab8ee3eaf17613b01ade7b38766790d99c4 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:52:39 +0200 Subject: [PATCH 144/177] Update docs/modules/ROOT/pages/customization.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index d00a4526..6d77f1e7 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -43,4 +43,4 @@ This has the following benefits: === Adding a new release For the overall procedure have a look at <<_adding_a_new_demo>> on how to add a new release. For a custom release you need to create a `mycorp-releases.yaml` containing releases according to the format defined by https://github.com/stackabletech/release/blob/main/releases.yaml[the Stackable provided releases]. -You can than add it to `stackablectl` with the flag `--additional-release-files`. +You can then add it to `stackablectl` with the flag `--additional-releases-file`. From 787f6c8868ca25c0f37acab75c8b1a199de92768 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:52:53 +0200 Subject: [PATCH 145/177] Update docs/modules/ROOT/pages/customization.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index 6d77f1e7..1cea1ceb 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -34,7 +34,7 @@ You can then add it to `stackablectl` with the flag `--additional-stacks-file`. == Add a new release === Benefits -If advanced users of the Stackable Platform want to define their own internal Release within their company they can easily add their own release. +If advanced users of the Stackable Platform want to define their own internal Release within their company, they can easily add their own release. This has the following benefits: - Same operator versions across the whole company. This produces more uniform environments and makes debugging and helping other teams easier. From 20f705401072d337232cfe3836880a19437b3758 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:53:08 +0200 Subject: [PATCH 146/177] Update docs/modules/ROOT/pages/index.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/index.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index 57e65758..b0120f60 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -30,7 +30,7 @@ A stack is a collection of ready-to-use Stackable data products as well as neede Stacks are installed with the command `stackablectl stack`. A stack needs a release (of Stackable operators) to run on. -To achieve this a stacks has a dependency on a release which get's automatically installed when a stack is installed. +To achieve this a stacks has a dependency on a release which gets automatically installed when a stack is installed. == Demos A demo is an end-to-end demonstration of the usage of the Stackable Data Platform. From f8e1adff192209b17fa0565c35a2ae1a38e6680e Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:53:23 +0200 Subject: [PATCH 147/177] Update docs/modules/ROOT/pages/index.adoc Co-authored-by: Siegfried Weber --- docs/modules/ROOT/pages/index.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index b0120f60..b0c540f5 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -45,5 +45,5 @@ It contains Demos are installed with the command `stackablectl demo`. A demo needs a stack to run on. -To achieve this a demo has a dependency on a stack which get's automatically installed when a demo is installed. +To achieve this a demo has a dependency on a stack which gets automatically installed when a demo is installed. The stack in turn will install the needed Stackable release. From 920a053e978b1395e0370e0768459c1dd2960120 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 09:55:14 +0200 Subject: [PATCH 148/177] docs --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index 1cea1ceb..fe6f6b19 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -15,7 +15,7 @@ To easily achieve this you can create your own demo so that it can easily be rep First you must create a `mycorp-demos.yaml` containing demos according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/demos.yaml[the Stackable provided demos]. After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demos-file mycorp-demos.yaml`. -The `` can be either a path to a file on the local filesystem or a URL. +The argument to `--additional-demos-file` can be either a path to a file on the local filesystem or a URL. By using a URL the demos file can be put into a central Git repository and referenced by all teams or clients. Multiple `--additional-demos-file` flags can be specified to include multiple demo files. Every additional demo will we added to the already existing demos in `stackablectl`, so all the available demo files will be merged. From 359de14ed5e6e03d61b3a230328ff59f38a38ccb Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 11:23:14 +0200 Subject: [PATCH 149/177] Make case of some commons words consistent --- docs/modules/ROOT/pages/commands/operator.adoc | 2 +- docs/modules/ROOT/pages/customization.adoc | 6 +++--- docs/modules/ROOT/pages/installation.adoc | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index dc135d01..8382bc92 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -6,7 +6,7 @@ Operators manage the individual data products the Stackable Data Platform consis This command manages individual operators. It is mainly intended for persons already having experience or working on the Stackable Data Platform. If you just want an easy way to get started or don't know which products and/or which version to install it is recommended to use the xref:commands/release.adoc[] command. -This command will install a bundle of operators from an official Stackable Release. +This command will install a bundle of operators from an official Stackable release. == Browse available operators To list the operators that are part of the Stackable Data Platform as well as their stable versions run the following command: diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index fe6f6b19..902b915e 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -23,8 +23,8 @@ Every additional demo will we added to the already existing demos in `stackablec == Add a new stack === Benefits If your company or clients have multiple similar setups or reference architectures, it could make sense to make them easily available to all employees or clients. -In the custom defined Stack all Product versions are pinned as well, so you can easily spin up a Stack containing the exact same versions as your production setup. -You can use your defined Stack to give it to colleagues or potential customers to show the overall architecture of the Data platform you're going to build. +In the custom defined stack all product versions are pinned as well, so you can easily spin up a stack containing the exact same versions as your production setup. +You can use your defined stack to give it to colleagues or potential customers to show the overall architecture of the Data Platform you're going to build. === Adding a new stack For the overall procedure have a look at <<_adding_a_new_demo>> on how to add a new stack. @@ -34,7 +34,7 @@ You can then add it to `stackablectl` with the flag `--additional-stacks-file`. == Add a new release === Benefits -If advanced users of the Stackable Platform want to define their own internal Release within their company, they can easily add their own release. +If advanced users of the Stackable Platform want to define their own internal release within their company, they can easily add their own release. This has the following benefits: - Same operator versions across the whole company. This produces more uniform environments and makes debugging and helping other teams easier. diff --git a/docs/modules/ROOT/pages/installation.adoc b/docs/modules/ROOT/pages/installation.adoc index 9d2dfe6e..76d49e8f 100644 --- a/docs/modules/ROOT/pages/installation.adoc +++ b/docs/modules/ROOT/pages/installation.adoc @@ -99,7 +99,7 @@ $ sudo cp target/release/stackablectl /usr/bin/stackablectl ---- == Configure auto-completion -`stackablectl` provides completion scripts for the major Shells out there. +`stackablectl` provides completion scripts for the major shells out there. It uses the same mechanism as `kubectl` does, so if you have any problems following this steps looking at https://kubernetes.io/docs/tasks/tools/included/[their installation documentation] may help you out. All of the https://docs.rs/clap_complete/3.2.3/clap_complete/shells/enum.Shell.html[supported shells of] https://crates.io/crates/clap_complete[`clap_complete`] are supported. From e2a1cf21558e00edc68b03dafbee2f27f21bbaa1 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 16:51:33 +0200 Subject: [PATCH 150/177] Improve error handling when retrieving gvk --- src/kube.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index f398a977..1a57e40a 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -19,8 +19,15 @@ pub async fn deploy_manifests(yaml: &str) -> Result<(), Box> { for manifest in serde_yaml::Deserializer::from_str(yaml) { let mut object = DynamicObject::deserialize(manifest)?; - let gvk = gvk_of_typemeta(object.types.as_ref().expect("Failed to get type of object")); - let (resource, capabilities) = discovery.resolve_gvk(&gvk).expect("Failed to resolve gvk"); + let gvk = gvk_of_typemeta( + object + .types + .as_ref() + .ok_or(format!("Failed to deploy manifest because type of object {object:?} is not set"))?, + ); + let (resource, capabilities) = discovery + .resolve_gvk(&gvk) + .ok_or(format!("Failed to deploy manifest because the gvk {gvk:?} can not be resolved"))?; let api: Api = match capabilities.scope { Scope::Cluster => { From 52ee6f87b794fb1cd11cc449a8caa46371dc7d72 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 17:03:22 +0200 Subject: [PATCH 151/177] Fix confusion app.kubernetes.io/name vs app.kubernetes.io/instance --- src/services.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/services.rs b/src/services.rs index e8473c84..012430a1 100644 --- a/src/services.rs +++ b/src/services.rs @@ -283,8 +283,8 @@ pub async fn get_stackable_services( let service_api: Api = Api::namespaced(client.clone(), object_namespace.as_str()); let service_list_params = ListParams::default() - .labels(format!("app.kubernetes.io/instance={product_name}").as_str()) - .labels(format!("app.kubernetes.io/name={object_name}").as_str()); + .labels(format!("app.kubernetes.io/name={product_name}").as_str()) + .labels(format!("app.kubernetes.io/instance={object_name}").as_str()); let services = service_api.list(&service_list_params).await?; let extra_infos = From b6834cc9d2ac14e48bbdf4af7cbd92e8817af586 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 17:23:31 +0200 Subject: [PATCH 152/177] Update src/kube.rs Co-authored-by: Siegfried Weber --- src/kube.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kube.rs b/src/kube.rs index 1a57e40a..a79c0063 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -162,7 +162,7 @@ async fn get_node_name_ip_mapping() -> Result, String> { .min_by_key(|address| &address.type_) // ExternalIP (which we want) is lower than InternalIP .map(|address| address.address.clone()) .ok_or(format!( - "Could not find a InternalIP or ExternalIP for node {node_name}" + "Could not find an ExternalIP or InternalIP for node {node_name}" ))?; result.insert(node_name, preferred_node_ip); } From 113c09b76b2056595e4baf46942a021180852b76 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 17:30:45 +0200 Subject: [PATCH 153/177] Improved error message when endpoint has 0 subsets --- src/kube.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kube.rs b/src/kube.rs index 1a57e40a..d1cfa403 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -76,7 +76,7 @@ pub async fn get_service_endpoint_urls( } }, Some(_) => { - warn!("Could not determine the node the endpoint {service_name} is running on because endpoints consists of multiple subsets"); + warn!("Could not determine the node the endpoint {service_name} is running on because endpoints consists of none or multiple subsets"); return Ok(IndexMap::new()); } None => { From 4b6711f618ca0de24b0e680525d4e4bc21b75ecf Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Thu, 4 Aug 2022 17:40:01 +0200 Subject: [PATCH 154/177] Improve error handling when address list is empty --- src/kube.rs | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/kube.rs b/src/kube.rs index 29735bf6..ae364d53 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -19,15 +19,12 @@ pub async fn deploy_manifests(yaml: &str) -> Result<(), Box> { for manifest in serde_yaml::Deserializer::from_str(yaml) { let mut object = DynamicObject::deserialize(manifest)?; - let gvk = gvk_of_typemeta( - object - .types - .as_ref() - .ok_or(format!("Failed to deploy manifest because type of object {object:?} is not set"))?, - ); - let (resource, capabilities) = discovery - .resolve_gvk(&gvk) - .ok_or(format!("Failed to deploy manifest because the gvk {gvk:?} can not be resolved"))?; + let gvk = gvk_of_typemeta(object.types.as_ref().ok_or(format!( + "Failed to deploy manifest because type of object {object:?} is not set" + ))?); + let (resource, capabilities) = discovery.resolve_gvk(&gvk).ok_or(format!( + "Failed to deploy manifest because the gvk {gvk:?} can not be resolved" + ))?; let api: Api = match capabilities.scope { Scope::Cluster => { @@ -63,20 +60,24 @@ pub async fn get_service_endpoint_urls( let node_name = match &endpoints.subsets { Some(subsets) if subsets.len() == 1 => match &subsets[0].addresses { - Some(addresses) => match &addresses[0].node_name { + Some(addresses) if !addresses.is_empty() => match &addresses[0].node_name { Some(node_name) => node_name, None => { warn!("Could not determine the node the endpoint {service_name} is running on because the address of the subset didn't had a node name"); return Ok(IndexMap::new()); } }, + Some(_) => { + warn!("Could not determine the node the endpoint {service_name} is running on because the subset had no addresses"); + return Ok(IndexMap::new()); + } None => { warn!("Could not determine the node the endpoint {service_name} is running on because subset had no addresses. Is the service {service_name} up and running?"); return Ok(IndexMap::new()); } }, - Some(_) => { - warn!("Could not determine the node the endpoint {service_name} is running on because endpoints consists of none or multiple subsets"); + Some(subsets) => { + warn!("Could not determine the node the endpoint {service_name} is running on because endpoints consists of {num_subsets} subsets", num_subsets=subsets.len()); return Ok(IndexMap::new()); } None => { From edd169d074fcbad1441362ea928cffdce1e64ccc Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 8 Aug 2022 08:38:47 +0200 Subject: [PATCH 155/177] Update docs/modules/ROOT/pages/commands/operator.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/commands/operator.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index 8382bc92..9fa2a3cb 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -1,7 +1,7 @@ = Operator The `stackable operator` command allows to list, install and uninstall Stackable operators. -Operators manage the individual data products the Stackable Data Platform consists of. +Operators manage the individual data products of the Stackable Data Platform. This command manages individual operators. It is mainly intended for persons already having experience or working on the Stackable Data Platform. From 3267c27cf8992906fa857f3d1ec63ddd9aa7e4dc Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 8 Aug 2022 08:39:01 +0200 Subject: [PATCH 156/177] Update docs/modules/ROOT/pages/commands/operator.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/commands/operator.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index 9fa2a3cb..c2ca5baa 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -4,7 +4,7 @@ The `stackable operator` command allows to list, install and uninstall Stackable Operators manage the individual data products of the Stackable Data Platform. This command manages individual operators. -It is mainly intended for persons already having experience or working on the Stackable Data Platform. +It is mainly intended for people already having experience or working on the Stackable Data Platform. If you just want an easy way to get started or don't know which products and/or which version to install it is recommended to use the xref:commands/release.adoc[] command. This command will install a bundle of operators from an official Stackable release. From 48598cc9bd9354d036a72035ce8a1785f8ab8309 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 8 Aug 2022 08:39:37 +0200 Subject: [PATCH 157/177] Update docs/modules/ROOT/pages/commands/operator.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/commands/operator.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index c2ca5baa..7f701c8d 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -32,7 +32,7 @@ trino 0.4.0, 0.3.1, 0.3.0, 0.2.0 zookeeper 0.9.0, 0.8.0, 0.7.0, 0.6.0, 0.10.0 ---- -This command only includes the stable versions for every operator to not mess up the whole screen. +This command only includes the stable versions of every operator for clarity. If you're interested in a special version of an operator you can use the `describe` command to get more details for a specific operator as follows: [source,console] From ae3a5659b23734f88a62f8796cfd21ebb3a06eed Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 8 Aug 2022 08:41:37 +0200 Subject: [PATCH 158/177] Update docs/modules/ROOT/pages/commands/release.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/commands/release.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/release.adoc b/docs/modules/ROOT/pages/commands/release.adoc index fc84f69e..9e894926 100644 --- a/docs/modules/ROOT/pages/commands/release.adoc +++ b/docs/modules/ROOT/pages/commands/release.adoc @@ -1,6 +1,6 @@ = Release -A release is a well-playing bundle of operators. +A release is a bundle of operators of a specific stable version. The stable versions of the operators are tested and proven to work hand in hand. If you want to install a single individual operator, have a look at the xref:commands/operator.adoc[] command. == Browse available releases From d4dc6f31a330fb57d4b351b5acbdaff4c19a38c7 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 8 Aug 2022 08:41:53 +0200 Subject: [PATCH 159/177] Update docs/modules/ROOT/pages/commands/stack.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/commands/stack.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/stack.adoc b/docs/modules/ROOT/pages/commands/stack.adoc index 0e646689..5ce5c70d 100644 --- a/docs/modules/ROOT/pages/commands/stack.adoc +++ b/docs/modules/ROOT/pages/commands/stack.adoc @@ -1,5 +1,5 @@ = Stack -A stack is a collection of ready-to-use Stackable data products as well as needed third-party services like Postgresql or MinIO. +A stack is a collection of ready-to-use Stackable data products as well as required third-party services like Postgresql or MinIO. It is tied to a specific release of the Stackable Data Platform, which will provide the needed operators for the Stack. == Browse available stacks From 6c0a53e796e0ee92bc81ffb0bd92a774f7164e30 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 8 Aug 2022 08:42:19 +0200 Subject: [PATCH 160/177] Update docs/modules/ROOT/pages/commands/stack.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/commands/stack.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/commands/stack.adoc b/docs/modules/ROOT/pages/commands/stack.adoc index 5ce5c70d..b2bffe6c 100644 --- a/docs/modules/ROOT/pages/commands/stack.adoc +++ b/docs/modules/ROOT/pages/commands/stack.adoc @@ -1,6 +1,6 @@ = Stack A stack is a collection of ready-to-use Stackable data products as well as required third-party services like Postgresql or MinIO. -It is tied to a specific release of the Stackable Data Platform, which will provide the needed operators for the Stack. +It is tied to a specific release of the Stackable Data Platform, which will provide the required operators for the Stack. == Browse available stacks To list the available stacks, run the following command: From 94f07750c457b0000a08e9dd85e8557993a1a1da Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 8 Aug 2022 08:45:19 +0200 Subject: [PATCH 161/177] Update docs/modules/ROOT/pages/customization.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index 902b915e..37c784d5 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -4,7 +4,7 @@ A single team can also operate multiple Stackable Data Platforms. `stackablectl` is build in a way customers or even single developers can define their own release, stack and even demo! This way it is possible to cover the following use-cases. -Any additional demos/stacks/releases you specify, will be added to the already existing provided by Stackable. +Any additional demos/stacks/releases you specify, will be added to the already existing ones provided by Stackable. == Add a new demo === Benefits From d36972c280e6291124e46d5814da0209fa9c74f1 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 8 Aug 2022 08:47:35 +0200 Subject: [PATCH 162/177] Update docs/modules/ROOT/pages/customization.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index 37c784d5..a73e0f37 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -16,7 +16,7 @@ First you must create a `mycorp-demos.yaml` containing demos according to the fo After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demos-file mycorp-demos.yaml`. The argument to `--additional-demos-file` can be either a path to a file on the local filesystem or a URL. -By using a URL the demos file can be put into a central Git repository and referenced by all teams or clients. +By using an URL the demos file can be put into a central Git repository and referenced by all teams or clients. Multiple `--additional-demos-file` flags can be specified to include multiple demo files. Every additional demo will we added to the already existing demos in `stackablectl`, so all the available demo files will be merged. From a5bd2cb614ef08821639744e017d578e070dc727 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Mon, 8 Aug 2022 08:47:43 +0200 Subject: [PATCH 163/177] Update docs/modules/ROOT/pages/customization.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/customization.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index a73e0f37..a8ca2fa7 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -15,7 +15,7 @@ To easily achieve this you can create your own demo so that it can easily be rep First you must create a `mycorp-demos.yaml` containing demos according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/demos.yaml[the Stackable provided demos]. After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demos-file mycorp-demos.yaml`. -The argument to `--additional-demos-file` can be either a path to a file on the local filesystem or a URL. +The argument to `--additional-demos-file` can be either a path to a file on the local filesystem or an URL. By using an URL the demos file can be put into a central Git repository and referenced by all teams or clients. Multiple `--additional-demos-file` flags can be specified to include multiple demo files. Every additional demo will we added to the already existing demos in `stackablectl`, so all the available demo files will be merged. From ab51acc3e8f4bf55aa2a90c739ca3abe38a42c55 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 10:45:49 +0200 Subject: [PATCH 164/177] docs --- .../charts/Average_trip_duration_138.yaml | 65 ++++++++ .../charts/Payment_types_136.yaml | 58 ++++++++ .../charts/Total_income_139.yaml | 55 +++++++ .../charts/Total_trips_140.yaml | 39 +++++ .../charts/Trips_income_137.yaml | 85 +++++++++++ .../dashboards/Taxi_data.yaml | 139 ++++++++++++++++++ .../databases/Trino.yaml | 18 +++ .../datasets/Trino/ny_taxi_data.yaml | 137 +++++++++++++++++ .../datasets/Trino/ny_taxi_data_raw.yaml | 125 ++++++++++++++++ .../metadata.yaml | 3 + demos/trino-taxi-data/superset/assets-old.zip | Bin 0 -> 17425 bytes demos/trino-taxi-data/superset/setup.py | 55 +++++++ .../modules/ROOT/pages/commands/operator.adoc | 2 +- 13 files changed, 780 insertions(+), 1 deletion(-) create mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/charts/Average_trip_duration_138.yaml create mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/charts/Payment_types_136.yaml create mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_income_139.yaml create mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_trips_140.yaml create mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/charts/Trips_income_137.yaml create mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/dashboards/Taxi_data.yaml create mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/databases/Trino.yaml create mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data.yaml create mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data_raw.yaml create mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/metadata.yaml create mode 100644 demos/trino-taxi-data/superset/assets-old.zip create mode 100644 demos/trino-taxi-data/superset/setup.py diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Average_trip_duration_138.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Average_trip_duration_138.yaml new file mode 100644 index 00000000..c10fb58b --- /dev/null +++ b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Average_trip_duration_138.yaml @@ -0,0 +1,65 @@ +slice_name: Average trip duration +viz_type: echarts_timeseries_line +params: + adhoc_filters: [] + annotation_layers: [] + color_scheme: supersetColors + comparison_type: values + datasource: 25__table + extra_form_data: {} + forecastInterval: 0.8 + forecastPeriods: 10 + granularity_sqla: tpep_pickup_datetime + groupby: [] + legendOrientation: top + legendType: scroll + markerSize: 6 + metrics: + - aggregate: AVG + column: + certification_details: null + certified_by: null + column_name: duration_min + description: null + expression: null + filterable: true + groupby: true + id: 783 + is_certified: false + is_dttm: false + python_date_format: null + type: BIGINT + type_generic: 0 + verbose_name: null + warning_markdown: null + expressionType: SIMPLE + hasCustomLabel: true + isNew: false + label: Average trip duration + optionName: metric_w9gstqe6mu_4k2lcfiaojv + sqlExpression: null + only_total: true + order_desc: true + rich_tooltip: true + row_limit: 10000 + time_grain_sqla: P1D + time_range: No filter + time_range_endpoints: + - inclusive + - exclusive + tooltipTimeFormat: smart_date + viz_type: echarts_timeseries_line + x_axis_time_format: smart_date + x_axis_title: Day + x_axis_title_margin: 30 + y_axis_bounds: + - null + - null + y_axis_format: SMART_NUMBER + y_axis_title: Average trinp duration (minutes) + y_axis_title_margin: 30 + y_axis_title_position: Left +cache_timeout: null +uuid: 17a65a0a-9d82-4596-9823-b5ed56854299 +version: 1.0.0 +dataset_uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Payment_types_136.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Payment_types_136.yaml new file mode 100644 index 00000000..29729133 --- /dev/null +++ b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Payment_types_136.yaml @@ -0,0 +1,58 @@ +slice_name: Payment types +viz_type: pie +params: + adhoc_filters: [] + color_scheme: d3Category10 + datasource: 25__table + date_format: smart_date + donut: true + extra_form_data: {} + granularity_sqla: tpep_pickup_datetime + groupby: + - payment_type + innerRadius: 30 + label_line: true + label_type: key_value_percent + labels_outside: true + legendOrientation: top + legendType: scroll + metric: + aggregate: SUM + column: + certification_details: null + certified_by: null + column_name: total_amount + description: null + expression: null + filterable: true + groupby: true + id: 780 + is_certified: false + is_dttm: false + python_date_format: null + type: DOUBLE + type_generic: 0 + verbose_name: null + warning_markdown: null + expressionType: SIMPLE + hasCustomLabel: false + isNew: false + label: SUM(total_amount) + optionName: metric_h5mdqb4w18k_9ix6pqy46a + sqlExpression: null + number_format: SMART_NUMBER + outerRadius: 58 + row_limit: 100 + show_labels: true + show_labels_threshold: 3 + show_legend: true + sort_by_metric: true + time_range: No filter + time_range_endpoints: + - inclusive + - exclusive + viz_type: pie +cache_timeout: null +uuid: b90ad164-9cee-4151-83dd-ea5756a759e1 +version: 1.0.0 +dataset_uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_income_139.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_income_139.yaml new file mode 100644 index 00000000..ccfa79ca --- /dev/null +++ b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_income_139.yaml @@ -0,0 +1,55 @@ +slice_name: Total income +viz_type: big_number +params: + adhoc_filters: [] + color_picker: + a: 1 + b: 135 + g: 122 + r: 0 + datasource: 25__table + extra_form_data: {} + granularity_sqla: tpep_pickup_datetime + header_font_size: 0.4 + metric: + aggregate: SUM + column: + certification_details: null + certified_by: null + column_name: total_amount + description: null + expression: null + filterable: true + groupby: true + id: 780 + is_certified: false + is_dttm: false + python_date_format: null + type: DOUBLE + type_generic: 0 + verbose_name: null + warning_markdown: null + expressionType: SIMPLE + hasCustomLabel: true + isNew: false + label: Income + optionName: metric_xtjhi12ixqf_x778dcicf2 + sqlExpression: null + rolling_periods: 10000000 + rolling_type: sum + show_trend_line: true + slice_id: 139 + start_y_axis_at_zero: true + subheader_font_size: 0.15 + time_format: smart_date + time_grain_sqla: P1D + time_range: No filter + time_range_endpoints: + - inclusive + - exclusive + viz_type: big_number + y_axis_format: SMART_NUMBER +cache_timeout: null +uuid: 7bb34e00-b66f-4bd7-b4c9-181a4a747762 +version: 1.0.0 +dataset_uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_trips_140.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_trips_140.yaml new file mode 100644 index 00000000..4de2739c --- /dev/null +++ b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_trips_140.yaml @@ -0,0 +1,39 @@ +slice_name: Total trips +viz_type: big_number +params: + adhoc_filters: [] + color_picker: + a: 1 + b: 135 + g: 122 + r: 0 + datasource: 25__table + extra_form_data: {} + granularity_sqla: tpep_pickup_datetime + header_font_size: 0.4 + metric: + aggregate: null + column: null + expressionType: SQL + hasCustomLabel: false + isNew: false + label: COUNT(*) + optionName: metric_ngbx3sztk6_31a1t9wh4wn + sqlExpression: COUNT(*) + rolling_periods: 100000 + rolling_type: sum + show_trend_line: true + start_y_axis_at_zero: true + subheader_font_size: 0.15 + time_format: smart_date + time_grain_sqla: P1D + time_range: No filter + time_range_endpoints: + - inclusive + - exclusive + viz_type: big_number + y_axis_format: SMART_NUMBER +cache_timeout: null +uuid: 95136b8e-084e-46b5-b223-cd4a5879584b +version: 1.0.0 +dataset_uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Trips_income_137.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Trips_income_137.yaml new file mode 100644 index 00000000..5c9cea66 --- /dev/null +++ b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Trips_income_137.yaml @@ -0,0 +1,85 @@ +slice_name: Trips income +viz_type: mixed_timeseries +params: + adhoc_filters: [] + adhoc_filters_b: [] + annotation_layers: [] + area: true + areaB: true + color_scheme: supersetColors + datasource: 25__table + extra_form_data: {} + granularity_sqla: tpep_pickup_datetime + groupby: [] + groupby_b: [] + legendOrientation: top + legendType: scroll + markerSize: 6 + markerSizeB: 6 + metrics: + - aggregate: null + column: null + expressionType: SQL + hasCustomLabel: true + isNew: false + label: Trips + optionName: metric_mdo9hflvtr_aqfvxj57kp + sqlExpression: count(*) + metrics_b: + - aggregate: SUM + column: + certification_details: null + certified_by: null + column_name: total_amount + description: null + expression: null + filterable: true + groupby: true + id: 780 + is_certified: false + is_dttm: false + python_date_format: null + type: DOUBLE + type_generic: 0 + verbose_name: null + warning_markdown: null + expressionType: SIMPLE + hasCustomLabel: true + isNew: false + label: Income + optionName: metric_ri0f4uk435_ydmgway2gbd + sqlExpression: null + opacity: 0.2 + opacityB: 0.2 + order_desc: true + order_desc_b: true + rich_tooltip: true + row_limit: 10000 + row_limit_b: 10000 + seriesType: line + seriesTypeB: line + show_legend: true + time_grain_sqla: P1D + time_range: No filter + time_range_endpoints: + - inclusive + - exclusive + tooltipTimeFormat: smart_date + viz_type: mixed_timeseries + x_axis_time_format: smart_date + x_axis_title: Day + x_axis_title_margin: 30 + yAxisIndexB: 1 + yAxisTitleSecondary: Income + y_axis_bounds: + - null + - null + y_axis_format: SMART_NUMBER + y_axis_format_secondary: SMART_NUMBER + y_axis_title: Trips + y_axis_title_margin: 15 + y_axis_title_position: Top +cache_timeout: null +uuid: 95c0cfcc-e688-426d-8976-61386083761a +version: 1.0.0 +dataset_uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/dashboards/Taxi_data.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/dashboards/Taxi_data.yaml new file mode 100644 index 00000000..e8599d43 --- /dev/null +++ b/demos/trino-taxi-data/assets_export_20220808T130607/dashboards/Taxi_data.yaml @@ -0,0 +1,139 @@ +dashboard_title: Taxi data +description: null +css: '' +slug: null +uuid: bf510653-d7a8-427c-9339-4af93facf9f9 +position: + CHART-U_CW3gyJhx: + children: [] + id: CHART-U_CW3gyJhx + meta: + chartId: 140 + height: 26 + sliceName: Total trips + uuid: 95136b8e-084e-46b5-b223-cd4a5879584b + width: 3 + parents: + - ROOT_ID + - GRID_ID + - ROW-Vv8MAQytk_ + type: CHART + CHART-Vx2efXVypw: + children: [] + id: CHART-Vx2efXVypw + meta: + chartId: 136 + height: 44 + sliceName: Payment types + uuid: b90ad164-9cee-4151-83dd-ea5756a759e1 + width: 4 + parents: + - ROOT_ID + - GRID_ID + - ROW-rj_mE1OZm + type: CHART + CHART-kDGYtmfNzz: + children: [] + id: CHART-kDGYtmfNzz + meta: + chartId: 138 + height: 31 + sliceName: Average trip duration + uuid: 17a65a0a-9d82-4596-9823-b5ed56854299 + width: 8 + parents: + - ROOT_ID + - GRID_ID + - ROW-rj_mE1OZm + type: CHART + CHART-lD7AleJ0fL: + children: [] + id: CHART-lD7AleJ0fL + meta: + chartId: 139 + height: 26 + sliceName: Total income + uuid: 7bb34e00-b66f-4bd7-b4c9-181a4a747762 + width: 3 + parents: + - ROOT_ID + - GRID_ID + - ROW-Vv8MAQytk_ + type: CHART + CHART-mWq6G6dTKX: + children: [] + id: CHART-mWq6G6dTKX + meta: + chartId: 137 + height: 37 + sliceName: Trips income + uuid: 95c0cfcc-e688-426d-8976-61386083761a + width: 12 + parents: + - ROOT_ID + - GRID_ID + - ROW-RCpJgstvqs + type: CHART + DASHBOARD_VERSION_KEY: v2 + GRID_ID: + children: + - ROW-Vv8MAQytk_ + - ROW-RCpJgstvqs + - ROW-rj_mE1OZm + id: GRID_ID + parents: + - ROOT_ID + type: GRID + HEADER_ID: + id: HEADER_ID + meta: + text: Taxi data + type: HEADER + ROOT_ID: + children: + - GRID_ID + id: ROOT_ID + type: ROOT + ROW-RCpJgstvqs: + children: + - CHART-mWq6G6dTKX + id: ROW-RCpJgstvqs + meta: + background: BACKGROUND_TRANSPARENT + parents: + - ROOT_ID + - GRID_ID + type: ROW + ROW-Vv8MAQytk_: + children: + - CHART-U_CW3gyJhx + - CHART-lD7AleJ0fL + id: ROW-Vv8MAQytk_ + meta: + background: BACKGROUND_TRANSPARENT + parents: + - ROOT_ID + - GRID_ID + type: ROW + ROW-rj_mE1OZm: + children: + - CHART-Vx2efXVypw + - CHART-kDGYtmfNzz + id: ROW-rj_mE1OZm + meta: + background: BACKGROUND_TRANSPARENT + parents: + - ROOT_ID + - GRID_ID + type: ROW +metadata: + show_native_filters: true + shared_label_colors: {} + color_scheme: '' + refresh_frequency: 0 + expanded_slices: {} + label_colors: {} + timed_refresh_immune_slices: [] + default_filters: '{}' + chart_configuration: {} +version: 1.0.0 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/databases/Trino.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/databases/Trino.yaml new file mode 100644 index 00000000..21e83481 --- /dev/null +++ b/demos/trino-taxi-data/assets_export_20220808T130607/databases/Trino.yaml @@ -0,0 +1,18 @@ +database_name: Trino +sqlalchemy_uri: trino://demo:XXXXXXXXXX@trino-coordinator:8443/hive +cache_timeout: null +expose_in_sqllab: true +allow_run_async: false +allow_ctas: true +allow_cvas: true +allow_csv_upload: false +extra: + allows_virtual_table_explore: true + metadata_params: {} + engine_params: + connect_args: + verify: false + cost_estimate_enabled: false + schemas_allowed_for_csv_upload: [] +uuid: 3dbf65a2-24d8-477f-8a26-14b45ffea214 +version: 1.0.0 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data.yaml new file mode 100644 index 00000000..3ef1d383 --- /dev/null +++ b/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data.yaml @@ -0,0 +1,137 @@ +table_name: ny_taxi_data +main_dttm_col: tpep_pickup_datetime +description: null +default_endpoint: null +offset: 0 +cache_timeout: null +schema: demo +sql: '' +params: null +template_params: null +filter_select_enabled: false +fetch_values_predicate: null +extra: null +uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 +metrics: +- metric_name: count + verbose_name: COUNT(*) + metric_type: count + expression: COUNT(*) + description: null + d3format: null + extra: + warning_markdown: '' + warning_text: null +columns: +- column_name: payment_type + verbose_name: null + is_dttm: false + is_active: true + type: VARCHAR(12) + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: {} +- column_name: tpep_pickup_datetime + verbose_name: null + is_dttm: true + is_active: true + type: TIMESTAMP(3) + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: {} +- column_name: tpep_dropoff_datetime + verbose_name: null + is_dttm: true + is_active: true + type: TIMESTAMP(3) + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: {} +- column_name: fare_amount + verbose_name: null + is_dttm: false + is_active: true + type: DOUBLE + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: {} +- column_name: total_amount + verbose_name: null + is_dttm: false + is_active: true + type: DOUBLE + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: {} +- column_name: trip_distance + verbose_name: null + is_dttm: false + is_active: true + type: DOUBLE + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: {} +- column_name: passenger_count + verbose_name: null + is_dttm: false + is_active: true + type: DOUBLE + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: {} +- column_name: tip_amount + verbose_name: null + is_dttm: false + is_active: true + type: DOUBLE + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: {} +- column_name: duration_min + verbose_name: null + is_dttm: false + is_active: true + type: BIGINT + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: {} +- column_name: vendorid + verbose_name: null + is_dttm: false + is_active: true + type: BIGINT + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: {} +version: 1.0.0 +database_uuid: 3dbf65a2-24d8-477f-8a26-14b45ffea214 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data_raw.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data_raw.yaml new file mode 100644 index 00000000..f83436eb --- /dev/null +++ b/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data_raw.yaml @@ -0,0 +1,125 @@ +table_name: ny_taxi_data_raw +main_dttm_col: tpep_pickup_datetime +description: null +default_endpoint: null +offset: 0 +cache_timeout: null +schema: demo +sql: null +params: null +template_params: null +filter_select_enabled: false +fetch_values_predicate: null +extra: null +uuid: 6ab2bb0c-e8a4-46fd-8690-d5c93466a364 +metrics: +- metric_name: count + verbose_name: COUNT(*) + metric_type: count + expression: COUNT(*) + description: null + d3format: null + extra: null + warning_text: null +columns: +- column_name: tpep_pickup_datetime + verbose_name: null + is_dttm: true + is_active: true + type: TIMESTAMP(3) + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: null +- column_name: tpep_dropoff_datetime + verbose_name: null + is_dttm: true + is_active: true + type: TIMESTAMP(3) + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: null +- column_name: fare_amount + verbose_name: null + is_dttm: false + is_active: true + type: DOUBLE + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: null +- column_name: total_amount + verbose_name: null + is_dttm: false + is_active: true + type: DOUBLE + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: null +- column_name: trip_distance + verbose_name: null + is_dttm: false + is_active: true + type: DOUBLE + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: null +- column_name: passenger_count + verbose_name: null + is_dttm: false + is_active: true + type: DOUBLE + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: null +- column_name: tip_amount + verbose_name: null + is_dttm: false + is_active: true + type: DOUBLE + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: null +- column_name: payment_type + verbose_name: null + is_dttm: false + is_active: true + type: BIGINT + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: null +- column_name: vendorid + verbose_name: null + is_dttm: false + is_active: true + type: BIGINT + groupby: true + filterable: true + expression: null + description: null + python_date_format: null + extra: null +version: 1.0.0 +database_uuid: 3dbf65a2-24d8-477f-8a26-14b45ffea214 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/metadata.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/metadata.yaml new file mode 100644 index 00000000..919b5425 --- /dev/null +++ b/demos/trino-taxi-data/assets_export_20220808T130607/metadata.yaml @@ -0,0 +1,3 @@ +version: 1.0.0 +type: assets +timestamp: '2022-08-08T13:06:07.743262+00:00' diff --git a/demos/trino-taxi-data/superset/assets-old.zip b/demos/trino-taxi-data/superset/assets-old.zip new file mode 100644 index 0000000000000000000000000000000000000000..aa1ce0d5973ed5c2b0114f7ac511a95d48ade18f GIT binary patch literal 17425 zcmeHO%a0sK8J~~@#6-D8LL3l{P-H`BdwRO3=Ya%S?=H^T-bZ%Fb^=1B>h9{9_V#18 zAG7myG~ByR8*aDx*PAR)vJj+}`QoVX$QzN&srZ_m!Gc4K=@#%{}9U9az} zufE6ctJ=H!;!CeJ#P17@|NL<4S9jnApEuy4QN-eiu<0ZSW72N5+pS)!H&oP?*3xcz zET$HX>5Uon++!9-PT&tD^fmh>bk)R2@{{tXy%&($);|U6`L`Q=IWWp)q?1lBojBF zR@RbDNXhVpU5d|r}dIT4=&@|{dVcI>c|qH%;w zrxD@PW)`u7keDUvzV+>9k~q+vY8kfPp>4UXSv^_nc5S&w+q$f1hSsrdM%#+ktVrFw z=)7|9`Jex+*=XSNwUtB%rCmtQO@9WgoH`^IaKTt1L*Llz13QmT8JsUUnR2o%U=I%SaYY@bNd2@QS69}$m+hgNU` zx(c{g6vt3CZ3YXOcs`%L_-{JV37vV&j|ooi6cwERlH^2O7z&Ap0&2#fFY?_esC%%s zzy8+R{&l5|gpa}?nHaFk(j6<73^!6I^-Eys0liH{s>G=rn2W1{8g1dSgPks`m7|R z=!6jPaxZMaZf)J(+8I7|EpZGkR1iAWm8GZR#G=QEF$_6}A3X2){`DV!{O#{Q-Du!* z>mrUX3F!%sp%yv6SmEh9z+AP=IlCXd3#?P4Z5S<6W<9FOnr>TiPw%(D1vdMtrt4JI zwP(%+7C!vB+}-EQE99b5=lDKHeqkMrSKc$MBPzb~zF{p@SKm9VBkQ^F59n#(1cIz4iQMVC@+An2*ACV13dU8@DZ0!aVm}kazqTj1E|R{@KH#Cxioa* z86x&jyujeAc$k4 z{Ch+ol@K{rsTmtVfU_GqP>aWI&|xsaA^3+OPqYBK96<6R!%qcrPe-GWjX0KjaDN*< zas~tFmjU}G3uDJdxEDi03(`4`3j}as!`e_^)Bv0wp~>=xIrMPe7GuLYU=qt~Fib42 zK|8TH<9W7(ScbWQZV!3P-y)LFv4C9DF9^!d7oLHQVjHnIIjE8q5m*7pfk4(K8@3kS z!0M+l=A3B->0-?ewzl^;$R;fUeDoFv) zFzoO=SBj*MJDzo9XeUbVko29YJ~^6cI_0%N+&4?edOA@*@eBZRGUWJRdu@M6cJ6QA z+T4d$Af1JzcY06~LSD;tJO?(K(n9`3V=O^JK$gF^AmRj|X&~(csn9?dMR}f1;mIpma(vTGBFxp{u4U8WZC)+aW%5M$ZS-5z zQglu3n~Xuaqoc?@)v{zpJKc^>yPZB$s%a>$saTu_@GB)u<^%ZT-#7pA(@!-T_zYH6 z%tP=UBS<`&0dPrCyQlOruYfFp3MIAdIaAo*$ioPP95i+sO#{p7bo5*qf&3=d+|>6$ z32;5)(i!A}v+|IkO2rf?bO$fD^0#?qA0?m(A@#M6CLn^!i`4xksc|vwRKetA2%WmJ zvV}wv3lE&fAdfms852w59jN35SsDzbucVH1ogk7hCCz$DNxXdzIzFu-QjQ{oU#}qe zV)LdoO2M;&{@8Yp+9!0-JYUCvZ5ooSBP&RZww`nvabOa!!)2h;dme zI$J_RAW1c{6FMzhOAa-)LuQsYI-#@n$UyM*l<=fd9!#hS-W3?n8*QkTzQPVk-$M(` z4f+eYW`MI6X%TMBbiQDJ$0QB{7hO$i)Rh`9dF zjrLsYyh4n*O9}ee0Qpa_+2EA?h7MlWF6sXDzh7L47U~oFbq9H70E+R8A zx&diNE48`lvdfvfm{wWfP|SUW6AJkd6~`m37vn_uA)W~uiE?R9htjEjfg?GNID0V! z*LktA?|00WX`2A->ODAf($)dk?RRxqSJa-~>Zx5_p_gv#@BU%?N8L|08u+ZQYV6m* z1EnK4e8d5xEUN?4JZ1IMny`(WiInAkO6H%5xUS>?@UUf>7qGzIFEQ%idVqJ~L=bKW zUt1hIf%Q6{xd&llCXM1PAKX#s1C;UgRXCFY5TVoR-M{r!|{wRu%nC>M+ zJ8ZkA?a<)SF=t@K^U92;@4`VejIVQ>S6IiqXjR+zOxdzUil=0qAdtz-#zjopCE2Ld z_LLUfFEpxj9a8EL97h<5*P11c3(b8g%+ft($7k3jrAy%`WQ)putJjvbPG6V%y|xMm z@0iumdmXLa?_auIx+d?v`{7G)@)n-egHR3ytWA8f&#=L)UFtGpw#`Xl7qldJ5HO7tWXK?Mqjo-}&|5UJqYv zH1O%K>e_P+$sK!0uV}3W8Wi36a&o1_qOe8l_7dw8VZ)~(^)cb4*{sCpJ(x)r{tSwTr|hUg7tQDvDgD(=ht{swulxqq;=yF>17zB`bPaimmdRd~fa zLG^Y@g^9w-aH4R2Powu=R)}X1B-A2ShH>B8T-(^(2Q@0Dgx%ytd|G9-C+^IlkrP&| zpk1_q2C0g!C$Kz09I33&#jLP|_wrCKh_3HU=<|wDiC)W%PZ42I^AHokK1|@&+WOtw z`*0s|gADiAb`JK|_BZ(*hjW;oY~l~oc^5|Lta%sanNLto%~M5pWaJcmK5=q#F*}Qz zimA+~saT4Pn!L*=NDPWGfdi z#s@F_WNF8^EY(~5)&8rOyNmw+^oeB?=UCtwSG}uaRqxXjWBu;c3yhWDuBnb!J(pRG z_s`EP8}H0hn$>}-=ZuPhe(<>!2jZ5udg`b;PBq0@jFY^+;y9^ixFF$K9k7aB7X!Zf zl@$jRFiZu>u8va$c#3iMlyi;4BcvSesSZ;OKZ{{L+gUbD9&=WM(Y?DbL&mw$Xu_XY L0OSAH*YWLtkQaWn literal 0 HcmV?d00001 diff --git a/demos/trino-taxi-data/superset/setup.py b/demos/trino-taxi-data/superset/setup.py new file mode 100644 index 00000000..5de0e60b --- /dev/null +++ b/demos/trino-taxi-data/superset/setup.py @@ -0,0 +1,55 @@ +import logging +import requests + +base_url = "http://superset:8088" +# base_url = "http://172.18.0.4:31024" +username = "admin" +password = "admin" + +logging.basicConfig(level=logging.INFO) +logging.info("Starting setup of Superset") + +logging.info("Getting access token from /api/v1/security/login") +session = requests.session() +access_token = session.post(f"{base_url}/api/v1/security/login", json={"username": username, "password": password, "provider": "db", "refresh": True}).json()['access_token'] +# print(f"access_token: {access_token}") + +logging.info("Getting csrf token from /api/v1/security/csrf_token") +csrf_token = session.get(f"{base_url}/api/v1/security/csrf_token", headers={"Authorization": f"Bearer {access_token}"}).json()["result"] +# print(f"csrf_token: {csrf_token}") + +headers = { + "accept": "application/json", + "Authorization": f"Bearer {access_token}", + "X-CSRFToken": csrf_token, +} + +# logging.info("Exporting all assets") +# result = session.get(f"{base_url}/api/v1/assets/export", headers=headers) +# assert result.status_code == 200 +# with open("assets.zip", "wb") as f: +# f.write(result.content) + + +######################### +# IMPORTANT +######################### +# The exported file had to be modified, otherwise we get: +# +# {"errors": [{"message": "Error importing assets", "error_type": "GENERIC_COMMAND_ERROR", "level": "warning", "extra": {"databases/Trino.yaml": {"extra": {"disable_data_preview": ["Unknown field."]}}, "issue_codes": [{"code": 1010, "message": "Issue 1010 - Superset encountered an error while running a command."}]}}]} +# +# The file databases/Trino.yaml was modified and the attribute "extra.disable_data_preview" was removed +######################### +logging.info("Importing all assets") +files = { + "bundle": ("assets.zip", open("assets.zip", "rb")), +} +data = { + "passwords": '{"databases/Trino.yaml": "demo"}' +} +result = session.post(f"{base_url}/api/v1/assets/import", headers=headers, files=files, json=data) +print(result) +print(result.text) +assert result.status_code == 200 + +logging.info("Finished setup of Superset") diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc index 7f701c8d..048bdfe3 100644 --- a/docs/modules/ROOT/pages/commands/operator.adoc +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -46,7 +46,7 @@ Dev versions: 0.5.0-nightly, 0.4.0-nightly, 0.3.0-nightly, 0.2.0-nightly, == Install operator If you want to access a Kubernetes cluster, make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. -After that run the following command, which will install the operators in their latest version. +After that run the following command, which will install the operators in their latest nightly version - built from the main branch of the operators. [source,console] ---- From d90223d589cd317b961fb3d0bd5f3f625538a374 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 10:47:15 +0200 Subject: [PATCH 165/177] Update docs/modules/ROOT/pages/index.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/index.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index b0c540f5..caaae42b 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -41,7 +41,7 @@ It contains . Performing the actual demo .. Prepare some test data .. Process test data -.. visualize results (optional) +.. Visualize results (optional) Demos are installed with the command `stackablectl demo`. A demo needs a stack to run on. From b9cd0439e3890b14e0d92a7abd7f07433fcec846 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 10:47:25 +0200 Subject: [PATCH 166/177] Update docs/modules/ROOT/pages/installation.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/installation.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/installation.adoc b/docs/modules/ROOT/pages/installation.adoc index 76d49e8f..220320fb 100644 --- a/docs/modules/ROOT/pages/installation.adoc +++ b/docs/modules/ROOT/pages/installation.adoc @@ -1,7 +1,7 @@ = Installation == Pre-compiled binary -We ship pre-compiled binaries of `stackablectl` which should work on most environments such as Windows, macOS, and Linux distros like Ubuntu and Arch. +Stackable ships pre-compiled binaries of `stackablectl` which should work on most environments such as Windows, macOS, and Linux distros like Ubuntu and Arch. Below are the installation instructions for <>, <> and <>. If the binary does not work for you, you can always <<_build_stackablectl_from_source>> From 1edd898267f1412ef77aec533aba1f5fd0a9d8d1 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 10:47:46 +0200 Subject: [PATCH 167/177] Update docs/modules/ROOT/pages/installation.adoc Co-authored-by: Malte Sander --- docs/modules/ROOT/pages/installation.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/installation.adoc b/docs/modules/ROOT/pages/installation.adoc index 220320fb..785ddbb4 100644 --- a/docs/modules/ROOT/pages/installation.adoc +++ b/docs/modules/ROOT/pages/installation.adoc @@ -100,7 +100,7 @@ $ sudo cp target/release/stackablectl /usr/bin/stackablectl == Configure auto-completion `stackablectl` provides completion scripts for the major shells out there. -It uses the same mechanism as `kubectl` does, so if you have any problems following this steps looking at https://kubernetes.io/docs/tasks/tools/included/[their installation documentation] may help you out. +It uses the same mechanism as `kubectl` does, so if you have any problems following this steps, looking at https://kubernetes.io/docs/tasks/tools/included/[their installation documentation] may help you out. All of the https://docs.rs/clap_complete/3.2.3/clap_complete/shells/enum.Shell.html[supported shells of] https://crates.io/crates/clap_complete[`clap_complete`] are supported. As of `07/2022` this includes the following shells: From 92d09100c1517364d42d8138c912b81f3348ab8e Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 10:53:18 +0200 Subject: [PATCH 168/177] Handle unwrap() --- src/services.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/services.rs b/src/services.rs index 012430a1..dc332a84 100644 --- a/src/services.rs +++ b/src/services.rs @@ -446,17 +446,17 @@ async fn get_minio_services( let admin_user = admin_user .value_from .as_ref() - .unwrap() + .ok_or("MinIO admin user env var needs to have an valueFrom entry")? .secret_key_ref .as_ref() - .unwrap(); + .ok_or("MinIO admin user env var needs to have an secretKeyRef in the valueFrom entry")?; let admin_password = admin_password .value_from .as_ref() - .unwrap() + .ok_or("MinIO admin password env var needs to have an valueFrom entry")? .secret_key_ref .as_ref() - .unwrap(); + .ok_or("MinIO admin password env var needs to have an secretKeyRef in the valueFrom entry")?; let api: Api = Api::namespaced(client.clone(), &deployment_namespace); let admin_user_secret = api.get(admin_user.name.as_ref().unwrap()).await; From 348510386f6e7cb32fdc676450246790f649a337 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 11:14:55 +0200 Subject: [PATCH 169/177] Use discovery to determine if Product CRD is installed --- src/services.rs | 100 +++++++++++++++++++++++------------------------- 1 file changed, 48 insertions(+), 52 deletions(-) diff --git a/src/services.rs b/src/services.rs index dc332a84..dbfb0356 100644 --- a/src/services.rs +++ b/src/services.rs @@ -10,8 +10,7 @@ use k8s_openapi::api::{ }; use kube::{ api::{DynamicObject, GroupVersionKind, ListParams}, - core::ErrorResponse, - Api, ResourceExt, + Api, Discovery, ResourceExt, }; use lazy_static::lazy_static; use log::{debug, warn}; @@ -260,66 +259,63 @@ pub async fn get_stackable_services( let namespace = NAMESPACE.lock()?.clone(); let client = get_client().await?; + let discovery = Discovery::new(client.clone()).run().await?; for (product_name, product_gvk) in STACKABLE_PRODUCT_CRDS.iter() { - let object_api_resource = kube::core::discovery::ApiResource::from_gvk(product_gvk); + let object_api_resource = match discovery.resolve_gvk(product_gvk) { + Some((object_api_resource, _)) => object_api_resource, + None => { + debug!("Failed to list services of product {product_name} because the gvk {product_gvk:?} can not be resolved"); + continue; + } + }; + let object_api: Api = match namespaced { true => Api::namespaced_with(client.clone(), &namespace, &object_api_resource), false => Api::all_with(client.clone(), &object_api_resource), }; - let objects = object_api.list(&ListParams::default()).await; - match objects { - Ok(objects) => { - let mut installed_products = Vec::new(); - for object in objects { - let object_name = object.name(); - let object_namespace = match object.namespace() { - Some(namespace) => namespace, - // If the custom resource does not have a namespace set it can't expose a service - None => continue, - }; - - let service_api: Api = - Api::namespaced(client.clone(), object_namespace.as_str()); - let service_list_params = ListParams::default() - .labels(format!("app.kubernetes.io/name={product_name}").as_str()) - .labels(format!("app.kubernetes.io/instance={object_name}").as_str()); - let services = service_api.list(&service_list_params).await?; - - let extra_infos = - get_extra_infos(product_name, &object, redact_credentials, show_versions) - .await?; - - let mut endpoints = IndexMap::new(); - for service in services { - let service_endpoint_urls = - get_service_endpoint_urls(&service, &object_name, client.clone()).await; - match service_endpoint_urls { - Ok(service_endpoint_urls) => endpoints.extend(service_endpoint_urls), - Err(err) => warn!( - "Failed to get endpoint_urls of service {service_name}: {err}", - service_name = service.name(), - ), - } - } - let product = InstalledProduct { - name: object_name, - namespace: Some(object_namespace), - endpoints, - extra_infos, - }; - installed_products.push(product); + let objects = object_api.list(&ListParams::default()).await?; + let mut installed_products = Vec::new(); + for object in objects { + let object_name = object.name(); + let object_namespace = match object.namespace() { + Some(namespace) => namespace, + // If the custom resource does not have a namespace set it can't expose a service + None => continue, + }; + + let service_api: Api = + Api::namespaced(client.clone(), object_namespace.as_str()); + let service_list_params = ListParams::default() + .labels(format!("app.kubernetes.io/name={product_name}").as_str()) + .labels(format!("app.kubernetes.io/instance={object_name}").as_str()); + let services = service_api.list(&service_list_params).await?; + + let extra_infos = + get_extra_infos(product_name, &object, redact_credentials, show_versions).await?; + + let mut endpoints = IndexMap::new(); + for service in services { + let service_endpoint_urls = + get_service_endpoint_urls(&service, &object_name, client.clone()).await; + match service_endpoint_urls { + Ok(service_endpoint_urls) => endpoints.extend(service_endpoint_urls), + Err(err) => warn!( + "Failed to get endpoint_urls of service {service_name}: {err}", + service_name = service.name(), + ), } - result.insert(product_name.to_string(), installed_products); - } - Err(kube::Error::Api(ErrorResponse { code: 404, .. })) => { - debug!("ProductCRD for product {product_name} not installed"); - } - Err(err) => { - return Err(err.into()); } + let product = InstalledProduct { + name: object_name, + namespace: Some(object_namespace), + endpoints, + extra_infos, + }; + installed_products.push(product); } + result.insert(product_name.to_string(), installed_products); } Ok(result) From 15a0b8a269073318ec4942633d5690e6eebe247f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 11:25:47 +0200 Subject: [PATCH 170/177] docs --- docs/modules/ROOT/pages/index.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index caaae42b..21fc4aa4 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -13,7 +13,7 @@ This also works with subcommands, i.e. `stackablectl release install --help` wil Often you can also use an abbreviation instead of typing out all of the commands. E.g. `stackablectl operator list` can also be written as `stackablectl op ls` -A Kubernetes cluster is required to use the Stackable Data Platform as all products and operators run on Kubernetes. +A Kubernetes cluster is required in order to use the Stackable Data Platform as all products and operators run on Kubernetes. If you don't have a Kubernetes cluster, `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes Cluster for you. The deployed services are separated into three different layers as illustrated below: From f63dd72caed4a275a69d7befd11099af0f64f259 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 11:27:24 +0200 Subject: [PATCH 171/177] docs --- src/operator.rs | 2 +- src/release.rs | 2 +- src/stack.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/operator.rs b/src/operator.rs index 47f5589d..e91793bf 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -33,7 +33,7 @@ pub enum CliCommandOperator { #[clap(multiple_occurrences(true), required = true, value_hint = ValueHint::Other)] operators: Vec, - /// If specified, a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. + /// If specified, a local Kubernetes cluster consisting of 4 nodes (1 for control-plane and 3 workers) for testing purposes will be created. /// Kind is a tool to spin up a local Kubernetes cluster running on Docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at diff --git a/src/release.rs b/src/release.rs index 7394d95a..dbd94e02 100644 --- a/src/release.rs +++ b/src/release.rs @@ -53,7 +53,7 @@ pub enum CliCommandRelease { #[clap(short, long, value_hint = ValueHint::Other)] exclude_products: Vec, - /// If specified, a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. + /// If specified, a local Kubernetes cluster consisting of 4 nodes (1 for control-plane and 3 workers) for testing purposes will be created. /// Kind is a tool to spin up a local Kubernetes cluster running on Docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at diff --git a/src/stack.rs b/src/stack.rs index 94162d27..f745ed8b 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -38,7 +38,7 @@ pub enum CliCommandStack { #[clap(required = true, value_hint = ValueHint::Other)] stack: String, - /// If specified, a local Kubernetes cluster consisting of 4 nodes for testing purposes will be created. + /// If specified, a local Kubernetes cluster consisting of 4 nodes (1 for control-plane and 3 workers) for testing purposes will be created. /// Kind is a tool to spin up a local Kubernetes cluster running on Docker on your machine. /// You need to have `docker` and `kind` installed. /// Have a look at our documentation on how to install `kind` at From 07069b8555694983bfb34783120ae6d460b784fd Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 11:49:30 +0200 Subject: [PATCH 172/177] Remove the demos folder --- .../charts/Average_trip_duration_138.yaml | 65 -------- .../charts/Payment_types_136.yaml | 58 -------- .../charts/Total_income_139.yaml | 55 ------- .../charts/Total_trips_140.yaml | 39 ----- .../charts/Trips_income_137.yaml | 85 ----------- .../dashboards/Taxi_data.yaml | 139 ------------------ .../databases/Trino.yaml | 18 --- .../datasets/Trino/ny_taxi_data.yaml | 137 ----------------- .../datasets/Trino/ny_taxi_data_raw.yaml | 125 ---------------- .../metadata.yaml | 3 - demos/trino-taxi-data/superset/assets-old.zip | Bin 17425 -> 0 bytes demos/trino-taxi-data/superset/setup.py | 55 ------- 12 files changed, 779 deletions(-) delete mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/charts/Average_trip_duration_138.yaml delete mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/charts/Payment_types_136.yaml delete mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_income_139.yaml delete mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_trips_140.yaml delete mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/charts/Trips_income_137.yaml delete mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/dashboards/Taxi_data.yaml delete mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/databases/Trino.yaml delete mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data.yaml delete mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data_raw.yaml delete mode 100644 demos/trino-taxi-data/assets_export_20220808T130607/metadata.yaml delete mode 100644 demos/trino-taxi-data/superset/assets-old.zip delete mode 100644 demos/trino-taxi-data/superset/setup.py diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Average_trip_duration_138.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Average_trip_duration_138.yaml deleted file mode 100644 index c10fb58b..00000000 --- a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Average_trip_duration_138.yaml +++ /dev/null @@ -1,65 +0,0 @@ -slice_name: Average trip duration -viz_type: echarts_timeseries_line -params: - adhoc_filters: [] - annotation_layers: [] - color_scheme: supersetColors - comparison_type: values - datasource: 25__table - extra_form_data: {} - forecastInterval: 0.8 - forecastPeriods: 10 - granularity_sqla: tpep_pickup_datetime - groupby: [] - legendOrientation: top - legendType: scroll - markerSize: 6 - metrics: - - aggregate: AVG - column: - certification_details: null - certified_by: null - column_name: duration_min - description: null - expression: null - filterable: true - groupby: true - id: 783 - is_certified: false - is_dttm: false - python_date_format: null - type: BIGINT - type_generic: 0 - verbose_name: null - warning_markdown: null - expressionType: SIMPLE - hasCustomLabel: true - isNew: false - label: Average trip duration - optionName: metric_w9gstqe6mu_4k2lcfiaojv - sqlExpression: null - only_total: true - order_desc: true - rich_tooltip: true - row_limit: 10000 - time_grain_sqla: P1D - time_range: No filter - time_range_endpoints: - - inclusive - - exclusive - tooltipTimeFormat: smart_date - viz_type: echarts_timeseries_line - x_axis_time_format: smart_date - x_axis_title: Day - x_axis_title_margin: 30 - y_axis_bounds: - - null - - null - y_axis_format: SMART_NUMBER - y_axis_title: Average trinp duration (minutes) - y_axis_title_margin: 30 - y_axis_title_position: Left -cache_timeout: null -uuid: 17a65a0a-9d82-4596-9823-b5ed56854299 -version: 1.0.0 -dataset_uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Payment_types_136.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Payment_types_136.yaml deleted file mode 100644 index 29729133..00000000 --- a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Payment_types_136.yaml +++ /dev/null @@ -1,58 +0,0 @@ -slice_name: Payment types -viz_type: pie -params: - adhoc_filters: [] - color_scheme: d3Category10 - datasource: 25__table - date_format: smart_date - donut: true - extra_form_data: {} - granularity_sqla: tpep_pickup_datetime - groupby: - - payment_type - innerRadius: 30 - label_line: true - label_type: key_value_percent - labels_outside: true - legendOrientation: top - legendType: scroll - metric: - aggregate: SUM - column: - certification_details: null - certified_by: null - column_name: total_amount - description: null - expression: null - filterable: true - groupby: true - id: 780 - is_certified: false - is_dttm: false - python_date_format: null - type: DOUBLE - type_generic: 0 - verbose_name: null - warning_markdown: null - expressionType: SIMPLE - hasCustomLabel: false - isNew: false - label: SUM(total_amount) - optionName: metric_h5mdqb4w18k_9ix6pqy46a - sqlExpression: null - number_format: SMART_NUMBER - outerRadius: 58 - row_limit: 100 - show_labels: true - show_labels_threshold: 3 - show_legend: true - sort_by_metric: true - time_range: No filter - time_range_endpoints: - - inclusive - - exclusive - viz_type: pie -cache_timeout: null -uuid: b90ad164-9cee-4151-83dd-ea5756a759e1 -version: 1.0.0 -dataset_uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_income_139.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_income_139.yaml deleted file mode 100644 index ccfa79ca..00000000 --- a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_income_139.yaml +++ /dev/null @@ -1,55 +0,0 @@ -slice_name: Total income -viz_type: big_number -params: - adhoc_filters: [] - color_picker: - a: 1 - b: 135 - g: 122 - r: 0 - datasource: 25__table - extra_form_data: {} - granularity_sqla: tpep_pickup_datetime - header_font_size: 0.4 - metric: - aggregate: SUM - column: - certification_details: null - certified_by: null - column_name: total_amount - description: null - expression: null - filterable: true - groupby: true - id: 780 - is_certified: false - is_dttm: false - python_date_format: null - type: DOUBLE - type_generic: 0 - verbose_name: null - warning_markdown: null - expressionType: SIMPLE - hasCustomLabel: true - isNew: false - label: Income - optionName: metric_xtjhi12ixqf_x778dcicf2 - sqlExpression: null - rolling_periods: 10000000 - rolling_type: sum - show_trend_line: true - slice_id: 139 - start_y_axis_at_zero: true - subheader_font_size: 0.15 - time_format: smart_date - time_grain_sqla: P1D - time_range: No filter - time_range_endpoints: - - inclusive - - exclusive - viz_type: big_number - y_axis_format: SMART_NUMBER -cache_timeout: null -uuid: 7bb34e00-b66f-4bd7-b4c9-181a4a747762 -version: 1.0.0 -dataset_uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_trips_140.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_trips_140.yaml deleted file mode 100644 index 4de2739c..00000000 --- a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Total_trips_140.yaml +++ /dev/null @@ -1,39 +0,0 @@ -slice_name: Total trips -viz_type: big_number -params: - adhoc_filters: [] - color_picker: - a: 1 - b: 135 - g: 122 - r: 0 - datasource: 25__table - extra_form_data: {} - granularity_sqla: tpep_pickup_datetime - header_font_size: 0.4 - metric: - aggregate: null - column: null - expressionType: SQL - hasCustomLabel: false - isNew: false - label: COUNT(*) - optionName: metric_ngbx3sztk6_31a1t9wh4wn - sqlExpression: COUNT(*) - rolling_periods: 100000 - rolling_type: sum - show_trend_line: true - start_y_axis_at_zero: true - subheader_font_size: 0.15 - time_format: smart_date - time_grain_sqla: P1D - time_range: No filter - time_range_endpoints: - - inclusive - - exclusive - viz_type: big_number - y_axis_format: SMART_NUMBER -cache_timeout: null -uuid: 95136b8e-084e-46b5-b223-cd4a5879584b -version: 1.0.0 -dataset_uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Trips_income_137.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/charts/Trips_income_137.yaml deleted file mode 100644 index 5c9cea66..00000000 --- a/demos/trino-taxi-data/assets_export_20220808T130607/charts/Trips_income_137.yaml +++ /dev/null @@ -1,85 +0,0 @@ -slice_name: Trips income -viz_type: mixed_timeseries -params: - adhoc_filters: [] - adhoc_filters_b: [] - annotation_layers: [] - area: true - areaB: true - color_scheme: supersetColors - datasource: 25__table - extra_form_data: {} - granularity_sqla: tpep_pickup_datetime - groupby: [] - groupby_b: [] - legendOrientation: top - legendType: scroll - markerSize: 6 - markerSizeB: 6 - metrics: - - aggregate: null - column: null - expressionType: SQL - hasCustomLabel: true - isNew: false - label: Trips - optionName: metric_mdo9hflvtr_aqfvxj57kp - sqlExpression: count(*) - metrics_b: - - aggregate: SUM - column: - certification_details: null - certified_by: null - column_name: total_amount - description: null - expression: null - filterable: true - groupby: true - id: 780 - is_certified: false - is_dttm: false - python_date_format: null - type: DOUBLE - type_generic: 0 - verbose_name: null - warning_markdown: null - expressionType: SIMPLE - hasCustomLabel: true - isNew: false - label: Income - optionName: metric_ri0f4uk435_ydmgway2gbd - sqlExpression: null - opacity: 0.2 - opacityB: 0.2 - order_desc: true - order_desc_b: true - rich_tooltip: true - row_limit: 10000 - row_limit_b: 10000 - seriesType: line - seriesTypeB: line - show_legend: true - time_grain_sqla: P1D - time_range: No filter - time_range_endpoints: - - inclusive - - exclusive - tooltipTimeFormat: smart_date - viz_type: mixed_timeseries - x_axis_time_format: smart_date - x_axis_title: Day - x_axis_title_margin: 30 - yAxisIndexB: 1 - yAxisTitleSecondary: Income - y_axis_bounds: - - null - - null - y_axis_format: SMART_NUMBER - y_axis_format_secondary: SMART_NUMBER - y_axis_title: Trips - y_axis_title_margin: 15 - y_axis_title_position: Top -cache_timeout: null -uuid: 95c0cfcc-e688-426d-8976-61386083761a -version: 1.0.0 -dataset_uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/dashboards/Taxi_data.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/dashboards/Taxi_data.yaml deleted file mode 100644 index e8599d43..00000000 --- a/demos/trino-taxi-data/assets_export_20220808T130607/dashboards/Taxi_data.yaml +++ /dev/null @@ -1,139 +0,0 @@ -dashboard_title: Taxi data -description: null -css: '' -slug: null -uuid: bf510653-d7a8-427c-9339-4af93facf9f9 -position: - CHART-U_CW3gyJhx: - children: [] - id: CHART-U_CW3gyJhx - meta: - chartId: 140 - height: 26 - sliceName: Total trips - uuid: 95136b8e-084e-46b5-b223-cd4a5879584b - width: 3 - parents: - - ROOT_ID - - GRID_ID - - ROW-Vv8MAQytk_ - type: CHART - CHART-Vx2efXVypw: - children: [] - id: CHART-Vx2efXVypw - meta: - chartId: 136 - height: 44 - sliceName: Payment types - uuid: b90ad164-9cee-4151-83dd-ea5756a759e1 - width: 4 - parents: - - ROOT_ID - - GRID_ID - - ROW-rj_mE1OZm - type: CHART - CHART-kDGYtmfNzz: - children: [] - id: CHART-kDGYtmfNzz - meta: - chartId: 138 - height: 31 - sliceName: Average trip duration - uuid: 17a65a0a-9d82-4596-9823-b5ed56854299 - width: 8 - parents: - - ROOT_ID - - GRID_ID - - ROW-rj_mE1OZm - type: CHART - CHART-lD7AleJ0fL: - children: [] - id: CHART-lD7AleJ0fL - meta: - chartId: 139 - height: 26 - sliceName: Total income - uuid: 7bb34e00-b66f-4bd7-b4c9-181a4a747762 - width: 3 - parents: - - ROOT_ID - - GRID_ID - - ROW-Vv8MAQytk_ - type: CHART - CHART-mWq6G6dTKX: - children: [] - id: CHART-mWq6G6dTKX - meta: - chartId: 137 - height: 37 - sliceName: Trips income - uuid: 95c0cfcc-e688-426d-8976-61386083761a - width: 12 - parents: - - ROOT_ID - - GRID_ID - - ROW-RCpJgstvqs - type: CHART - DASHBOARD_VERSION_KEY: v2 - GRID_ID: - children: - - ROW-Vv8MAQytk_ - - ROW-RCpJgstvqs - - ROW-rj_mE1OZm - id: GRID_ID - parents: - - ROOT_ID - type: GRID - HEADER_ID: - id: HEADER_ID - meta: - text: Taxi data - type: HEADER - ROOT_ID: - children: - - GRID_ID - id: ROOT_ID - type: ROOT - ROW-RCpJgstvqs: - children: - - CHART-mWq6G6dTKX - id: ROW-RCpJgstvqs - meta: - background: BACKGROUND_TRANSPARENT - parents: - - ROOT_ID - - GRID_ID - type: ROW - ROW-Vv8MAQytk_: - children: - - CHART-U_CW3gyJhx - - CHART-lD7AleJ0fL - id: ROW-Vv8MAQytk_ - meta: - background: BACKGROUND_TRANSPARENT - parents: - - ROOT_ID - - GRID_ID - type: ROW - ROW-rj_mE1OZm: - children: - - CHART-Vx2efXVypw - - CHART-kDGYtmfNzz - id: ROW-rj_mE1OZm - meta: - background: BACKGROUND_TRANSPARENT - parents: - - ROOT_ID - - GRID_ID - type: ROW -metadata: - show_native_filters: true - shared_label_colors: {} - color_scheme: '' - refresh_frequency: 0 - expanded_slices: {} - label_colors: {} - timed_refresh_immune_slices: [] - default_filters: '{}' - chart_configuration: {} -version: 1.0.0 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/databases/Trino.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/databases/Trino.yaml deleted file mode 100644 index 21e83481..00000000 --- a/demos/trino-taxi-data/assets_export_20220808T130607/databases/Trino.yaml +++ /dev/null @@ -1,18 +0,0 @@ -database_name: Trino -sqlalchemy_uri: trino://demo:XXXXXXXXXX@trino-coordinator:8443/hive -cache_timeout: null -expose_in_sqllab: true -allow_run_async: false -allow_ctas: true -allow_cvas: true -allow_csv_upload: false -extra: - allows_virtual_table_explore: true - metadata_params: {} - engine_params: - connect_args: - verify: false - cost_estimate_enabled: false - schemas_allowed_for_csv_upload: [] -uuid: 3dbf65a2-24d8-477f-8a26-14b45ffea214 -version: 1.0.0 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data.yaml deleted file mode 100644 index 3ef1d383..00000000 --- a/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data.yaml +++ /dev/null @@ -1,137 +0,0 @@ -table_name: ny_taxi_data -main_dttm_col: tpep_pickup_datetime -description: null -default_endpoint: null -offset: 0 -cache_timeout: null -schema: demo -sql: '' -params: null -template_params: null -filter_select_enabled: false -fetch_values_predicate: null -extra: null -uuid: d363d1c3-4db3-414d-b5f4-d5159b6aa0a7 -metrics: -- metric_name: count - verbose_name: COUNT(*) - metric_type: count - expression: COUNT(*) - description: null - d3format: null - extra: - warning_markdown: '' - warning_text: null -columns: -- column_name: payment_type - verbose_name: null - is_dttm: false - is_active: true - type: VARCHAR(12) - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: {} -- column_name: tpep_pickup_datetime - verbose_name: null - is_dttm: true - is_active: true - type: TIMESTAMP(3) - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: {} -- column_name: tpep_dropoff_datetime - verbose_name: null - is_dttm: true - is_active: true - type: TIMESTAMP(3) - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: {} -- column_name: fare_amount - verbose_name: null - is_dttm: false - is_active: true - type: DOUBLE - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: {} -- column_name: total_amount - verbose_name: null - is_dttm: false - is_active: true - type: DOUBLE - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: {} -- column_name: trip_distance - verbose_name: null - is_dttm: false - is_active: true - type: DOUBLE - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: {} -- column_name: passenger_count - verbose_name: null - is_dttm: false - is_active: true - type: DOUBLE - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: {} -- column_name: tip_amount - verbose_name: null - is_dttm: false - is_active: true - type: DOUBLE - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: {} -- column_name: duration_min - verbose_name: null - is_dttm: false - is_active: true - type: BIGINT - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: {} -- column_name: vendorid - verbose_name: null - is_dttm: false - is_active: true - type: BIGINT - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: {} -version: 1.0.0 -database_uuid: 3dbf65a2-24d8-477f-8a26-14b45ffea214 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data_raw.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data_raw.yaml deleted file mode 100644 index f83436eb..00000000 --- a/demos/trino-taxi-data/assets_export_20220808T130607/datasets/Trino/ny_taxi_data_raw.yaml +++ /dev/null @@ -1,125 +0,0 @@ -table_name: ny_taxi_data_raw -main_dttm_col: tpep_pickup_datetime -description: null -default_endpoint: null -offset: 0 -cache_timeout: null -schema: demo -sql: null -params: null -template_params: null -filter_select_enabled: false -fetch_values_predicate: null -extra: null -uuid: 6ab2bb0c-e8a4-46fd-8690-d5c93466a364 -metrics: -- metric_name: count - verbose_name: COUNT(*) - metric_type: count - expression: COUNT(*) - description: null - d3format: null - extra: null - warning_text: null -columns: -- column_name: tpep_pickup_datetime - verbose_name: null - is_dttm: true - is_active: true - type: TIMESTAMP(3) - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: null -- column_name: tpep_dropoff_datetime - verbose_name: null - is_dttm: true - is_active: true - type: TIMESTAMP(3) - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: null -- column_name: fare_amount - verbose_name: null - is_dttm: false - is_active: true - type: DOUBLE - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: null -- column_name: total_amount - verbose_name: null - is_dttm: false - is_active: true - type: DOUBLE - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: null -- column_name: trip_distance - verbose_name: null - is_dttm: false - is_active: true - type: DOUBLE - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: null -- column_name: passenger_count - verbose_name: null - is_dttm: false - is_active: true - type: DOUBLE - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: null -- column_name: tip_amount - verbose_name: null - is_dttm: false - is_active: true - type: DOUBLE - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: null -- column_name: payment_type - verbose_name: null - is_dttm: false - is_active: true - type: BIGINT - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: null -- column_name: vendorid - verbose_name: null - is_dttm: false - is_active: true - type: BIGINT - groupby: true - filterable: true - expression: null - description: null - python_date_format: null - extra: null -version: 1.0.0 -database_uuid: 3dbf65a2-24d8-477f-8a26-14b45ffea214 diff --git a/demos/trino-taxi-data/assets_export_20220808T130607/metadata.yaml b/demos/trino-taxi-data/assets_export_20220808T130607/metadata.yaml deleted file mode 100644 index 919b5425..00000000 --- a/demos/trino-taxi-data/assets_export_20220808T130607/metadata.yaml +++ /dev/null @@ -1,3 +0,0 @@ -version: 1.0.0 -type: assets -timestamp: '2022-08-08T13:06:07.743262+00:00' diff --git a/demos/trino-taxi-data/superset/assets-old.zip b/demos/trino-taxi-data/superset/assets-old.zip deleted file mode 100644 index aa1ce0d5973ed5c2b0114f7ac511a95d48ade18f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17425 zcmeHO%a0sK8J~~@#6-D8LL3l{P-H`BdwRO3=Ya%S?=H^T-bZ%Fb^=1B>h9{9_V#18 zAG7myG~ByR8*aDx*PAR)vJj+}`QoVX$QzN&srZ_m!Gc4K=@#%{}9U9az} zufE6ctJ=H!;!CeJ#P17@|NL<4S9jnApEuy4QN-eiu<0ZSW72N5+pS)!H&oP?*3xcz zET$HX>5Uon++!9-PT&tD^fmh>bk)R2@{{tXy%&($);|U6`L`Q=IWWp)q?1lBojBF zR@RbDNXhVpU5d|r}dIT4=&@|{dVcI>c|qH%;w zrxD@PW)`u7keDUvzV+>9k~q+vY8kfPp>4UXSv^_nc5S&w+q$f1hSsrdM%#+ktVrFw z=)7|9`Jex+*=XSNwUtB%rCmtQO@9WgoH`^IaKTt1L*Llz13QmT8JsUUnR2o%U=I%SaYY@bNd2@QS69}$m+hgNU` zx(c{g6vt3CZ3YXOcs`%L_-{JV37vV&j|ooi6cwERlH^2O7z&Ap0&2#fFY?_esC%%s zzy8+R{&l5|gpa}?nHaFk(j6<73^!6I^-Eys0liH{s>G=rn2W1{8g1dSgPks`m7|R z=!6jPaxZMaZf)J(+8I7|EpZGkR1iAWm8GZR#G=QEF$_6}A3X2){`DV!{O#{Q-Du!* z>mrUX3F!%sp%yv6SmEh9z+AP=IlCXd3#?P4Z5S<6W<9FOnr>TiPw%(D1vdMtrt4JI zwP(%+7C!vB+}-EQE99b5=lDKHeqkMrSKc$MBPzb~zF{p@SKm9VBkQ^F59n#(1cIz4iQMVC@+An2*ACV13dU8@DZ0!aVm}kazqTj1E|R{@KH#Cxioa* z86x&jyujeAc$k4 z{Ch+ol@K{rsTmtVfU_GqP>aWI&|xsaA^3+OPqYBK96<6R!%qcrPe-GWjX0KjaDN*< zas~tFmjU}G3uDJdxEDi03(`4`3j}as!`e_^)Bv0wp~>=xIrMPe7GuLYU=qt~Fib42 zK|8TH<9W7(ScbWQZV!3P-y)LFv4C9DF9^!d7oLHQVjHnIIjE8q5m*7pfk4(K8@3kS z!0M+l=A3B->0-?ewzl^;$R;fUeDoFv) zFzoO=SBj*MJDzo9XeUbVko29YJ~^6cI_0%N+&4?edOA@*@eBZRGUWJRdu@M6cJ6QA z+T4d$Af1JzcY06~LSD;tJO?(K(n9`3V=O^JK$gF^AmRj|X&~(csn9?dMR}f1;mIpma(vTGBFxp{u4U8WZC)+aW%5M$ZS-5z zQglu3n~Xuaqoc?@)v{zpJKc^>yPZB$s%a>$saTu_@GB)u<^%ZT-#7pA(@!-T_zYH6 z%tP=UBS<`&0dPrCyQlOruYfFp3MIAdIaAo*$ioPP95i+sO#{p7bo5*qf&3=d+|>6$ z32;5)(i!A}v+|IkO2rf?bO$fD^0#?qA0?m(A@#M6CLn^!i`4xksc|vwRKetA2%WmJ zvV}wv3lE&fAdfms852w59jN35SsDzbucVH1ogk7hCCz$DNxXdzIzFu-QjQ{oU#}qe zV)LdoO2M;&{@8Yp+9!0-JYUCvZ5ooSBP&RZww`nvabOa!!)2h;dme zI$J_RAW1c{6FMzhOAa-)LuQsYI-#@n$UyM*l<=fd9!#hS-W3?n8*QkTzQPVk-$M(` z4f+eYW`MI6X%TMBbiQDJ$0QB{7hO$i)Rh`9dF zjrLsYyh4n*O9}ee0Qpa_+2EA?h7MlWF6sXDzh7L47U~oFbq9H70E+R8A zx&diNE48`lvdfvfm{wWfP|SUW6AJkd6~`m37vn_uA)W~uiE?R9htjEjfg?GNID0V! z*LktA?|00WX`2A->ODAf($)dk?RRxqSJa-~>Zx5_p_gv#@BU%?N8L|08u+ZQYV6m* z1EnK4e8d5xEUN?4JZ1IMny`(WiInAkO6H%5xUS>?@UUf>7qGzIFEQ%idVqJ~L=bKW zUt1hIf%Q6{xd&llCXM1PAKX#s1C;UgRXCFY5TVoR-M{r!|{wRu%nC>M+ zJ8ZkA?a<)SF=t@K^U92;@4`VejIVQ>S6IiqXjR+zOxdzUil=0qAdtz-#zjopCE2Ld z_LLUfFEpxj9a8EL97h<5*P11c3(b8g%+ft($7k3jrAy%`WQ)putJjvbPG6V%y|xMm z@0iumdmXLa?_auIx+d?v`{7G)@)n-egHR3ytWA8f&#=L)UFtGpw#`Xl7qldJ5HO7tWXK?Mqjo-}&|5UJqYv zH1O%K>e_P+$sK!0uV}3W8Wi36a&o1_qOe8l_7dw8VZ)~(^)cb4*{sCpJ(x)r{tSwTr|hUg7tQDvDgD(=ht{swulxqq;=yF>17zB`bPaimmdRd~fa zLG^Y@g^9w-aH4R2Powu=R)}X1B-A2ShH>B8T-(^(2Q@0Dgx%ytd|G9-C+^IlkrP&| zpk1_q2C0g!C$Kz09I33&#jLP|_wrCKh_3HU=<|wDiC)W%PZ42I^AHokK1|@&+WOtw z`*0s|gADiAb`JK|_BZ(*hjW;oY~l~oc^5|Lta%sanNLto%~M5pWaJcmK5=q#F*}Qz zimA+~saT4Pn!L*=NDPWGfdi z#s@F_WNF8^EY(~5)&8rOyNmw+^oeB?=UCtwSG}uaRqxXjWBu;c3yhWDuBnb!J(pRG z_s`EP8}H0hn$>}-=ZuPhe(<>!2jZ5udg`b;PBq0@jFY^+;y9^ixFF$K9k7aB7X!Zf zl@$jRFiZu>u8va$c#3iMlyi;4BcvSesSZ;OKZ{{L+gUbD9&=WM(Y?DbL&mw$Xu_XY L0OSAH*YWLtkQaWn diff --git a/demos/trino-taxi-data/superset/setup.py b/demos/trino-taxi-data/superset/setup.py deleted file mode 100644 index 5de0e60b..00000000 --- a/demos/trino-taxi-data/superset/setup.py +++ /dev/null @@ -1,55 +0,0 @@ -import logging -import requests - -base_url = "http://superset:8088" -# base_url = "http://172.18.0.4:31024" -username = "admin" -password = "admin" - -logging.basicConfig(level=logging.INFO) -logging.info("Starting setup of Superset") - -logging.info("Getting access token from /api/v1/security/login") -session = requests.session() -access_token = session.post(f"{base_url}/api/v1/security/login", json={"username": username, "password": password, "provider": "db", "refresh": True}).json()['access_token'] -# print(f"access_token: {access_token}") - -logging.info("Getting csrf token from /api/v1/security/csrf_token") -csrf_token = session.get(f"{base_url}/api/v1/security/csrf_token", headers={"Authorization": f"Bearer {access_token}"}).json()["result"] -# print(f"csrf_token: {csrf_token}") - -headers = { - "accept": "application/json", - "Authorization": f"Bearer {access_token}", - "X-CSRFToken": csrf_token, -} - -# logging.info("Exporting all assets") -# result = session.get(f"{base_url}/api/v1/assets/export", headers=headers) -# assert result.status_code == 200 -# with open("assets.zip", "wb") as f: -# f.write(result.content) - - -######################### -# IMPORTANT -######################### -# The exported file had to be modified, otherwise we get: -# -# {"errors": [{"message": "Error importing assets", "error_type": "GENERIC_COMMAND_ERROR", "level": "warning", "extra": {"databases/Trino.yaml": {"extra": {"disable_data_preview": ["Unknown field."]}}, "issue_codes": [{"code": 1010, "message": "Issue 1010 - Superset encountered an error while running a command."}]}}]} -# -# The file databases/Trino.yaml was modified and the attribute "extra.disable_data_preview" was removed -######################### -logging.info("Importing all assets") -files = { - "bundle": ("assets.zip", open("assets.zip", "rb")), -} -data = { - "passwords": '{"databases/Trino.yaml": "demo"}' -} -result = session.post(f"{base_url}/api/v1/assets/import", headers=headers, files=files, json=data) -print(result) -print(result.text) -assert result.status_code == 200 - -logging.info("Finished setup of Superset") From 8f53fbed4a77b2dad2c5d46fb7bf0986835013f2 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 11:59:13 +0200 Subject: [PATCH 173/177] Move and version stacks.yaml --- docs/modules/ROOT/pages/customization.adoc | 2 +- docs/modules/ROOT/pages/troubleshooting.adoc | 4 ++-- src/arguments.rs | 2 +- src/stack.rs | 2 +- stacks.yaml => stacks/stacks-v1.yaml | 0 5 files changed, 5 insertions(+), 5 deletions(-) rename stacks.yaml => stacks/stacks-v1.yaml (100%) diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc index a8ca2fa7..f58e2d99 100644 --- a/docs/modules/ROOT/pages/customization.adoc +++ b/docs/modules/ROOT/pages/customization.adoc @@ -28,7 +28,7 @@ You can use your defined stack to give it to colleagues or potential customers t === Adding a new stack For the overall procedure have a look at <<_adding_a_new_demo>> on how to add a new stack. -For a custom stack you need to create a `mycorp-stacks.yaml` containing stacks according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/stacks.yaml[the Stackable provided stacks]. +For a custom stack you need to create a `mycorp-stacks.yaml` containing stacks according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/stacks/stacks-v1.yaml[the Stackable provided stacks]. You can then add it to `stackablectl` with the flag `--additional-stacks-file`. diff --git a/docs/modules/ROOT/pages/troubleshooting.adoc b/docs/modules/ROOT/pages/troubleshooting.adoc index be2495ae..04343ac5 100644 --- a/docs/modules/ROOT/pages/troubleshooting.adoc +++ b/docs/modules/ROOT/pages/troubleshooting.adoc @@ -20,10 +20,10 @@ To achieve this the following online services will be contacted: | https://raw.githubusercontent.com/stackabletech/release/main/releases.yaml | List of releases provided by Stackable -| https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks.yaml +| https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/stacks-v1.yaml | List of stacks provided by Stackable -| https://raw.githubusercontent.com/stackabletech/stackablectl/main/demos.yaml +| https://raw.githubusercontent.com/stackabletech/stackablectl/main/demos/demos-v1.yaml | List of demos provided by Stackable |=== diff --git a/src/arguments.rs b/src/arguments.rs index a2fd93e0..3eb59dc9 100644 --- a/src/arguments.rs +++ b/src/arguments.rs @@ -66,7 +66,7 @@ pub struct CliArgs { /// Adds a YAML file containing custom stacks /// /// If you do not have access to the Stackable repositories on GitHub or if you want to maintain your own stacks, you can specify additional YAML files containing stack information. - /// Have a look at for the structure. + /// Have a look at for the structure. /// Can either be a URL or a path to a file, e.g. `https://my.server/my-stacks.yaml`, '/etc/my-stacks.yaml' or `C:\Users\Bob\my-stacks.yaml`. /// Can be specified multiple times. #[clap(long, multiple_occurrences(true), value_hint = ValueHint::FilePath)] diff --git a/src/stack.rs b/src/stack.rs index f745ed8b..bd4846b9 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -9,7 +9,7 @@ use std::{error::Error, ops::Deref, sync::Mutex}; lazy_static! { pub static ref STACK_FILES: Mutex> = Mutex::new(vec![ - "https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks.yaml".to_string(), + "https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/stacks-v1.yaml".to_string(), ]); } diff --git a/stacks.yaml b/stacks/stacks-v1.yaml similarity index 100% rename from stacks.yaml rename to stacks/stacks-v1.yaml From af9491ddba07cc9ffdeff05c9c693b2314261a71 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 12:06:34 +0200 Subject: [PATCH 174/177] Point to Github URL rather than to local file --- stacks/stacks-v1.yaml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/stacks/stacks-v1.yaml b/stacks/stacks-v1.yaml index ac657597..77a7e6ba 100644 --- a/stacks/stacks-v1.yaml +++ b/stacks/stacks-v1.yaml @@ -48,9 +48,9 @@ stacks: username: superset password: superset database: superset - - plainYaml: stacks/druid-superset-s3/zookeeper.yaml - - plainYaml: stacks/druid-superset-s3/druid.yaml - - plainYaml: stacks/druid-superset-s3/superset.yaml + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/druid-superset-s3/zookeeper.yaml + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/druid-superset-s3/druid.yaml + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/druid-superset-s3/superset.yaml trino-superset-s3: description: Stack containing MinIO, Trino and Superset for data visualization stackableRelease: 22.06 @@ -117,9 +117,9 @@ stacks: username: superset password: superset database: superset - - plainYaml: stacks/trino-superset-s3/hive-metastore.yaml - - plainYaml: stacks/trino-superset-s3/trino.yaml - - plainYaml: stacks/trino-superset-s3/superset.yaml + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/trino-superset-s3/hive-metastore.yaml + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/trino-superset-s3/trino.yaml + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/trino-superset-s3/superset.yaml airflow: description: Stack containing Airflow scheduling platform stackableRelease: 22.06 @@ -150,4 +150,4 @@ stacks: password: airflow replica: replicaCount: 1 - - plainYaml: stacks/airflow/airflow.yaml + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/airflow/airflow.yaml From fe86a090d42e61006e1e5d1a9fc88879986ad771 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 12:20:11 +0200 Subject: [PATCH 175/177] formatting --- src/stack.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/stack.rs b/src/stack.rs index bd4846b9..b4a8670f 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -9,7 +9,8 @@ use std::{error::Error, ops::Deref, sync::Mutex}; lazy_static! { pub static ref STACK_FILES: Mutex> = Mutex::new(vec![ - "https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/stacks-v1.yaml".to_string(), + "https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/stacks-v1.yaml" + .to_string(), ]); } From fbcd20f257030eb675c7a3280006f05468b5fc99 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 12:21:55 +0200 Subject: [PATCH 176/177] Changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f849c9d..bfae1616 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ### Added +- Support stacks, which are a collection of ready-to-use Stackable data products as well as required third-party services like Postgresql or MinIO ([#36](https://github.com/stackabletech/stackablectl/pull/36)) - Support generation of shell completions ([#54](https://github.com/stackabletech/stackablectl/pull/54)) ### Changed From 02482b34f64e079f34657acbab74586647176124 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 9 Aug 2022 12:29:21 +0200 Subject: [PATCH 177/177] Changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bfae1616..1099ba62 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ ### Added - Support stacks, which are a collection of ready-to-use Stackable data products as well as required third-party services like Postgresql or MinIO ([#36](https://github.com/stackabletech/stackablectl/pull/36)) +- Add `services list` command to list the running Stackable services ([#36](https://github.com/stackabletech/stackablectl/pull/36)) - Support generation of shell completions ([#54](https://github.com/stackabletech/stackablectl/pull/54)) ### Changed