diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d999dc5b..4b6dfc5d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -179,17 +179,35 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '^1.18.1' + go-version: '^1.18.3' - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable target: ${{ matrix.target }} override: true - - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c # tag=v1.4.0 + - uses: Swatinem/rust-cache@v1 with: key: build-${{ matrix.target }} - - uses: actions-rs/cargo@v1 + - name: Prepare Ubuntu env + if: matrix.os == 'ubuntu-latest' + run: sudo apt install -y musl-tools + - name: Prepare Windows env + if: matrix.os == 'windows-latest' + uses: msys2/setup-msys2@v2 + with: + msystem: MINGW64 + update: true + install: base-devel mingw-w64-x86_64-go mingw-w64-x86_64-rust + - name: Build for non-Windows + if: matrix.os != 'windows-latest' + uses: actions-rs/cargo@v1 with: command: build args: --target=${{ matrix.target }} + - name: Build for Windows + if: matrix.os == 'windows-latest' + run: | + echo 'Running in MSYS2!' + cargo build --target=${{ matrix.target }} + shell: msys2 {0} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 405a5e10..b538a597 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -29,20 +29,38 @@ jobs: - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # tag=v3 - uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a # tag=v3 with: - go-version: '^1.18.1' - - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # tag=v1 + go-version: '^1.18.3' + - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable target: ${{ matrix.target }} override: true - - uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # tag=v1 + - name: Prepare Ubuntu env + if: matrix.os == 'ubuntu-latest' + run: sudo apt install -y musl-tools + - name: Prepare Windows env + if: matrix.os == 'windows-latest' + uses: msys2/setup-msys2@v2 + with: + msystem: MINGW64 + update: true + install: base-devel mingw-w64-x86_64-go mingw-w64-x86_64-rust + - name: Build for non-Windows + if: matrix.os != 'windows-latest' + uses: actions-rs/cargo@v1 with: command: build args: --release --target=${{ matrix.target }} + - name: Build for Windows + if: matrix.os == 'windows-latest' + run: | + echo 'Running in MSYS2!' + cargo build --release --target=${{ matrix.target }} + shell: msys2 {0} - name: Rename binary file run: mv target/${{ matrix.target }}/release/stackablectl${{ matrix.file-suffix }} stackablectl-${{ matrix.target }}${{ matrix.file-suffix }} - name: Upload Release binaries - uses: softprops/action-gh-release@1e07f4398721186383de40550babbdf2b84acfc5 # tag=v1 + uses: softprops/action-gh-release@v1 with: files: stackablectl-${{ matrix.target }}${{ matrix.file-suffix }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f849c9d..1099ba62 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ ### Added +- Support stacks, which are a collection of ready-to-use Stackable data products as well as required third-party services like Postgresql or MinIO ([#36](https://github.com/stackabletech/stackablectl/pull/36)) +- Add `services list` command to list the running Stackable services ([#36](https://github.com/stackabletech/stackablectl/pull/36)) - Support generation of shell completions ([#54](https://github.com/stackabletech/stackablectl/pull/54)) ### Changed diff --git a/Cargo.lock b/Cargo.lock index f4a9904a..66c61809 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -57,6 +57,18 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bstr" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +dependencies = [ + "lazy_static", + "memchr", + "regex-automata", + "serde", +] + [[package]] name = "bumpalo" version = "3.10.0" @@ -118,6 +130,19 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chrono" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +dependencies = [ + "libc", + "num-integer", + "num-traits", + "serde", + "winapi", +] + [[package]] name = "clap" version = "3.2.14" @@ -166,6 +191,29 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "cli-table" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adfbb116d9e2c4be7011360d0c0bee565712c11e969c9609b25b619366dc379d" +dependencies = [ + "cli-table-derive", + "csv", + "termcolor", + "unicode-width", +] + +[[package]] +name = "cli-table-derive" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2af3bfb9da627b0a6c467624fb7963921433774ed435493b5c08a3053e829ad4" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "core-foundation" version = "0.9.3" @@ -182,6 +230,28 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +[[package]] +name = "csv" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +dependencies = [ + "bstr", + "csv-core", + "itoa 0.4.8", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +dependencies = [ + "memchr", +] + [[package]] name = "darling" version = "0.13.4" @@ -217,6 +287,27 @@ dependencies = [ "syn", ] +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "either" version = "1.7.0" @@ -374,6 +465,17 @@ dependencies = [ "slab", ] +[[package]] +name = "getrandom" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + [[package]] name = "gobuild" version = "0.1.0-alpha.2" @@ -431,7 +533,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 1.0.2", ] [[package]] @@ -445,6 +547,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" version = "1.7.1" @@ -478,7 +586,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", + "itoa 1.0.2", "pin-project-lite", "socket2", "tokio", @@ -487,6 +595,36 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-openssl" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ee5d7a8f718585d1c3c61dfde28ef5b0bb14734b4db13f5ada856cdc6c612b" +dependencies = [ + "http", + "hyper", + "linked_hash_set", + "once_cell", + "openssl", + "openssl-sys", + "parking_lot", + "tokio", + "tokio-openssl", + "tower-layer", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -543,6 +681,12 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" +[[package]] +name = "itoa" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" + [[package]] name = "itoa" version = "1.0.2" @@ -558,6 +702,93 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "jsonpath_lib" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaa63191d68230cccb81c5aa23abd53ed64d83337cacbb25a7b8c7979523774f" +dependencies = [ + "log", + "serde", + "serde_json", +] + +[[package]] +name = "k8s-openapi" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ae2c04fcee6b01b04e3aadd56bb418932c8e0a9d8a93f48bc68c6bdcdb559d" +dependencies = [ + "base64", + "bytes", + "chrono", + "serde", + "serde-value", + "serde_json", +] + +[[package]] +name = "kube" +version = "0.73.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f68b954ea9ad888de953fb1488bd8f377c4c78d82d4642efa5925189210b50b7" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", +] + +[[package]] +name = "kube-client" +version = "0.73.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9150dc7107d9acf4986088f284a0a6dddc5ae37ef1ffdf142f6811dc5998dd58" +dependencies = [ + "base64", + "bytes", + "chrono", + "dirs-next", + "either", + "futures", + "http", + "http-body", + "hyper", + "hyper-openssl", + "hyper-timeout", + "jsonpath_lib", + "k8s-openapi", + "kube-core", + "openssl", + "pem", + "pin-project", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.73.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc8c429676abe6a73b374438d5ca02caaf9ae7a635441253c589b779fa5d0622" +dependencies = [ + "chrono", + "form_urlencoded", + "http", + "k8s-openapi", + "once_cell", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -576,6 +807,25 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linked_hash_set" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "lock_api" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +dependencies = [ + "autocfg", + "scopeguard", +] + [[package]] name = "log" version = "0.4.17" @@ -633,6 +883,25 @@ dependencies = [ "tempfile", ] +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + [[package]] name = "num_cpus" version = "1.13.1" @@ -681,6 +950,15 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-src" +version = "111.22.0+1.1.1q" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" version = "0.9.75" @@ -690,22 +968,84 @@ dependencies = [ "autocfg", "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] +[[package]] +name = "ordered-float" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7940cf2ca942593318d07fcf2596cdca60a85c9e7fab408a5e21a4f9dcd40d87" +dependencies = [ + "num-traits", +] + [[package]] name = "os_str_bytes" version = "6.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "648001efe5d5c0102d8cea768e348da85d90af8ba91f0bea908f157951493cd4" +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-sys", +] + +[[package]] +name = "pem" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9a3b09a20e374558580a4914d3b7d89bd61b954a5a5e1dcbea98753addb1947" +dependencies = [ + "base64", +] + [[package]] name = "percent-encoding" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "pin-project" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pin-project-lite" version = "0.2.9" @@ -775,6 +1115,17 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_users" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +dependencies = [ + "getrandom", + "redox_syscall", + "thiserror", +] + [[package]] name = "regex" version = "1.6.0" @@ -786,6 +1137,12 @@ dependencies = [ "regex-syntax", ] +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" + [[package]] name = "regex-syntax" version = "0.6.27" @@ -854,6 +1211,22 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "secrecy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +dependencies = [ + "serde", + "zeroize", +] + [[package]] name = "security-framework" version = "2.6.1" @@ -886,6 +1259,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.140" @@ -903,7 +1286,8 @@ version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" dependencies = [ - "itoa", + "indexmap", + "itoa 1.0.2", "ryu", "serde", ] @@ -915,7 +1299,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa", + "itoa 1.0.2", "ryu", "serde", ] @@ -932,6 +1316,15 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "signal-hook-registry" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] + [[package]] name = "slab" version = "0.4.7" @@ -941,6 +1334,12 @@ dependencies = [ "autocfg", ] +[[package]] +name = "smallvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" + [[package]] name = "socket2" version = "0.4.4" @@ -958,15 +1357,20 @@ dependencies = [ "cached", "clap", "clap_complete", + "cli-table", "env_logger", "gobuild", "indexmap", + "k8s-openapi", + "kube", "lazy_static", "log", + "openssl", "reqwest", "serde", "serde_json", "serde_yaml", + "tokio", "which", ] @@ -1065,11 +1469,22 @@ dependencies = [ "num_cpus", "once_cell", "pin-project-lite", + "signal-hook-registry", "socket2", "tokio-macros", "winapi", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" version = "1.8.0" @@ -1091,6 +1506,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-openssl" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08f9ffb7809f1b20c1b398d92acf4cc719874b3b2b2d9ea2f09b4a80350878a" +dependencies = [ + "futures-util", + "openssl", + "openssl-sys", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.3" @@ -1105,6 +1532,49 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +dependencies = [ + "base64", + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + [[package]] name = "tower-service" version = "0.3.2" @@ -1118,10 +1588,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ "cfg-if", + "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tracing-core" version = "0.1.28" @@ -1158,6 +1641,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-width" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" + [[package]] name = "url" version = "2.2.2" @@ -1376,3 +1865,9 @@ checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] + +[[package]] +name = "zeroize" +version = "1.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20b578acffd8516a6c3f2a1bdefc1ec37e547bb4e0fb8b6b01a4cafc886b4442" diff --git a/Cargo.toml b/Cargo.toml index 58074645..2ff1d9e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,15 +11,20 @@ repository = "https://github.com/stackabletech/stackablectl" cached = "0.37" clap = { version = "3.2", features = ["derive", "cargo"] } clap_complete = "3.2" +cli-table = "0.4" env_logger = "0.9" indexmap = { version = "1.9", features = ["serde"] } +k8s-openapi = { version = "0.15.0", default-features = false, features = ["v1_24"] } +kube = "0.73.1" # Using openssl (and not native-tls) as kube-rs team tries to move away from native-tls lazy_static = "1.4" log = "0.4" +openssl = { version = "0.10.36", features = ["vendored"] } # Must match version from kube which = "4.2" serde = { version = "1.0", features = ["derive"]} serde_json = "1.0" serde_yaml = "0.8" -reqwest = { version = "0.11", features = ["blocking"] } +reqwest = "0.11" # Using native-tls as openssl does not seem to be supported as of 0.11 +tokio = "1.19" [profile.release] # strip = true # By default on Linux and macOS, symbol information is included in the compiled .elf file. diff --git a/README.md b/README.md index ad7bb9f6..76e0e8a7 100644 --- a/README.md +++ b/README.md @@ -1,61 +1,6 @@ # stackablectl -# Installing - -See the [docs](https://docs.stackable.tech/stackablectl/stable/installation.html) for detailed instructions. - -# Usage -## List available releases -One good step to start using stackablectl is to list the available Releases with -```bash -$ ./stackablectl release list -``` -You can also ask for the list of currently supported Product operators with -```bash -$ ./stackablectl operator list -``` - -# Building -You need to have Rust and go installed. -To build stackablectl execute `cargo build` or `cargo run` to run it. - -We separate the deployed services into 3 layers: - -| Layer | Description | Examples | -|---------------|-------------------------------------------------------------|-----------------------------------------------| -| **Operators** | The operators needed to operate the Stackable Data Platform | `trino-operator`, `superset-operator` | -| **Stack** | The data products | `Trino`, `Apache Superset` | -| **Demo** | The demos that prepare data and run the applications | Demo loading and analyzing New York taxi data | - -![](docs/readme/images/layers.png) - -Each layer gets deployed via its dedicated `stackablectl` command - -# Deploying -## Operators -Operators manage the products of the Stackable Data Platform. -This command can be used as a direct replacement of `create_test_cluster.py`. -We decided to drop dependency resolution (like the superset operator requires the commons-, secret-, druid-, trino-operator and a postgres) for the following reasons: -1. Imagine the situation "install `trino=1.2.3` and `superset`". Superset expresses a dependency on the latest Trino version. -Now the situation gets complicated because we have conflicting version requirements for the trino-operator. -We could try to resolve this using dependency trees and other magic stuff. -2. Even more important: When you deploy the superset-operator `stackablectl` has no way to know to which data products you want integrate with. -Because of this it would need to deploy the operators for **all** the products Superset supports. -As a result it would install like 90% of the operators by simply specifying Superset. -And all of that on possible non-fixed versions. - -We also don't deploy examples any more as that functionality is now provided by the stack layer below. - -## Stack -A Stack contains data products that are managed by Stackable operators. Additional products like MinIO, Prometheus and Grafana can also be included. - -If you deploy a Stack with `stackablectl` it will automatically install the needed operators layer from the provided release. - -## Demo -The highest layer - demo - is not really needed to spin up a Stackable Data Platform. -It enables us to run end-to-end demos with a single command. - -If you deploy a Demo with `stackablectl` it will automatically install the needed stack and operators layers. +The documentation of `stackablectl` can be found in the [documentation of the Stackable Data Platform](https://docs.stackable.tech/stackablectl/stable/index.html). # TODOs * Check if CRD resources still exist when uninstalling the operators. If so warn the user. diff --git a/deny.toml b/deny.toml index 6c7087fc..dc86e697 100644 --- a/deny.toml +++ b/deny.toml @@ -4,6 +4,8 @@ targets = [ { triple = "x86_64-unknown-linux-musl" }, { triple = "aarch64-apple-darwin" }, { triple = "x86_64-apple-darwin" }, + { triple = "x86_64-pc-windows-gnu" }, + { triple = "x86_64-pc-windows-msvc" }, ] [advisories] @@ -34,7 +36,21 @@ allow = [ "Zlib" ] exceptions = [ - { name = "stackablectl", allow = ["OSL-3.0"] } + { name = "stackablectl", allow = ["OSL-3.0"] }, +] + +[[licenses.clarify]] +name = "ring" +expression = "LicenseRef-ring" +license-files = [ + { path = "LICENSE", hash = 0xbd0eed23 }, +] + +[[licenses.clarify]] +name = "webpki" +expression = "LicenseRef-webpki" +license-files = [ + { path = "LICENSE", hash = 0x001c7e6c }, ] [sources] diff --git a/docs/modules/ROOT/images/layers.png b/docs/modules/ROOT/images/layers.png new file mode 100644 index 00000000..69e5a5a5 Binary files /dev/null and b/docs/modules/ROOT/images/layers.png differ diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index 58673369..9a6f25b8 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -1,3 +1,10 @@ -* xref:index.adoc[] -** xref:installation.adoc[] -** xref:quickstart.adoc[] \ No newline at end of file +* xref:installation.adoc[] +* xref:quickstart.adoc[] +* Commands +** xref:commands/demo.adoc[] +** xref:commands/operator.adoc[] +** xref:commands/release.adoc[] +** xref:commands/services.adoc[] +** xref:commands/stack.adoc[] +* xref:customization.adoc[] +* xref:troubleshooting.adoc[] diff --git a/docs/modules/ROOT/pages/commands/demo.adoc b/docs/modules/ROOT/pages/commands/demo.adoc new file mode 100644 index 00000000..f8e1def5 --- /dev/null +++ b/docs/modules/ROOT/pages/commands/demo.adoc @@ -0,0 +1,3 @@ += Demo + +Not implemented yet diff --git a/docs/modules/ROOT/pages/commands/operator.adoc b/docs/modules/ROOT/pages/commands/operator.adoc new file mode 100644 index 00000000..048bdfe3 --- /dev/null +++ b/docs/modules/ROOT/pages/commands/operator.adoc @@ -0,0 +1,126 @@ += Operator + +The `stackable operator` command allows to list, install and uninstall Stackable operators. +Operators manage the individual data products of the Stackable Data Platform. + +This command manages individual operators. +It is mainly intended for people already having experience or working on the Stackable Data Platform. +If you just want an easy way to get started or don't know which products and/or which version to install it is recommended to use the xref:commands/release.adoc[] command. +This command will install a bundle of operators from an official Stackable release. + +== Browse available operators +To list the operators that are part of the Stackable Data Platform as well as their stable versions run the following command: + +[source,console] +---- +$ stackablectl operator list +OPERATOR STABLE VERSIONS +airflow 0.4.0, 0.3.0, 0.2.0, 0.1.0 +commons 0.2.0, 0.1.0 +druid 0.6.0, 0.5.0, 0.4.0, 0.3.0, 0.2.0, 0.1.0 +hbase 0.3.0, 0.2.0 +hdfs 0.4.0, 0.3.0 +hive 0.6.0, 0.5.0, 0.3.0 +kafka 0.6.0, 0.5.0, 0.4.0 +nifi 0.6.0, 0.5.0, 0.4.0 +opa 0.9.0, 0.8.0, 0.7.0, 0.6.0 +secret 0.5.0, 0.4.0, 0.3.0, 0.2.0, 0.1.0 +spark 0.5.0, 0.4.0 +spark-k8s 0.3.0, 0.2.0, 0.1.0 +superset 0.5.0, 0.4.0, 0.3.0, 0.2.0, 0.1.0 +trino 0.4.0, 0.3.1, 0.3.0, 0.2.0 +zookeeper 0.9.0, 0.8.0, 0.7.0, 0.6.0, 0.10.0 +---- + +This command only includes the stable versions of every operator for clarity. +If you're interested in a special version of an operator you can use the `describe` command to get more details for a specific operator as follows: + +[source,console] +---- +$ stackablectl operator describe airflow +Operator: airflow +Stable versions: 0.4.0, 0.3.0, 0.2.0, 0.1.0 +Test versions: 0.5.0-pr135, 0.5.0-pr134, 0.5.0-pr133, 0.5.0-pr132, 0.5.0-pr131, 0.5.0-pr130, 0.5.0-pr129, 0.5.0-pr128, 0.5.0-pr127, 0.5.0-pr126, 0.5.0-pr125, 0.5.0-pr122, 0.4.0-pr123, 0.4.0-pr122, 0.4.0-pr121, 0.4.0-pr120, 0.4.0-pr119, 0.4.0-pr118, 0.4.0-pr117 +Dev versions: 0.5.0-nightly, 0.4.0-nightly, 0.3.0-nightly, 0.2.0-nightly, 0.1.0-nightly +---- + +== Install operator +If you want to access a Kubernetes cluster, make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. +After that run the following command, which will install the operators in their latest nightly version - built from the main branch of the operators. + +[source,console] +---- +$ stackablectl operator install airflow commons secret +[INFO ] Installing airflow operator +[INFO ] Installing commons operator +[INFO ] Installing secret operator +---- + +If you don't have a Kubernetes cluster available, `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. +Make sure you have `kind` installed and run the following command: + +[source,console] +---- +$ stackablectl operator install airflow commons secret --kind-cluster +[INFO ] Creating kind cluster stackable-data-platform +Creating cluster "stackable-data-platform" ... + βœ“ Ensuring node image (kindest/node:v1.21.1) πŸ–Ό + βœ“ Preparing nodes πŸ“¦ πŸ“¦ πŸ“¦ πŸ“¦ + βœ“ Writing configuration πŸ“œ + βœ“ Starting control-plane πŸ•ΉοΈ + βœ“ Installing CNI πŸ”Œ + βœ“ Installing StorageClass πŸ’Ύ + βœ“ Joining worker nodes 🚜 +Set kubectl context to "kind-stackable-data-platform" +You can now use your cluster with: + +kubectl cluster-info --context kind-stackable-data-platform + +Not sure what to do next? πŸ˜… Check out https://kind.sigs.k8s.io/docs/user/quick-start/ +[INFO ] Installing airflow operator +[INFO ] Installing commons operator +[INFO ] Installing secret operator +---- + +With this command we installed the operator for Apache Airflow as well as two operators needed internally by the Stackable Data Platform (commons and secret). + +As we didn't specify a specific version to install, the operators were installed in the latest nightly version - built from the main branch of the operators. + +If you want to install a specific version, you can add the version to each operator to install as follows: + +[source,console] +---- +$ stackablectl operator install airflow=0.4.0 commons=0.2.0 secret=0.5.0 +[INFO ] Installing airflow operator in version 0.4.0 +[INFO ] Installing commons operator in version 0.2.0 +[INFO ] Installing secret operator in version 0.5.0 +---- + +As you can see, the three operators where installed in the requested version. + +Remember: If you want to install a recommended and tested set of operator versions, have a look at the xref:commands/release.adoc[] command. + +== List installed operators +After installing some operators, you can list which operators are installed in your Kubernetes cluster. + +[source,console] +---- +$ stackablectl operator installed +OPERATOR VERSION NAMESPACE STATUS LAST UPDATED +airflow 0.5.0-nightly default deployed 2022-07-15 09:44:00.86514992 +0200 CEST +commons 0.3.0-nightly default deployed 2022-07-15 09:44:03.215214235 +0200 CEST +secret 0.6.0-nightly default deployed 2022-07-15 09:44:13.526843785 +0200 CEST +---- + +In case you have installed the operators in a specific version, the specific versions will be shown instead of the `*-nightly` versions. + +== Uninstall operator +To uninstall the operators again you can use the `uninstall` command + +[source,console] +---- +$ stackablectl operator uninstall airflow commons secret +[INFO ] Uninstalling airflow operator +[INFO ] Uninstalling commons operator +[INFO ] Uninstalling secret operator +---- diff --git a/docs/modules/ROOT/pages/commands/release.adoc b/docs/modules/ROOT/pages/commands/release.adoc new file mode 100644 index 00000000..9e894926 --- /dev/null +++ b/docs/modules/ROOT/pages/commands/release.adoc @@ -0,0 +1,151 @@ += Release + +A release is a bundle of operators of a specific stable version. The stable versions of the operators are tested and proven to work hand in hand. +If you want to install a single individual operator, have a look at the xref:commands/operator.adoc[] command. + +== Browse available releases +To list the available Stackable releases run the following command: + +[source,console] +---- +$ stackablectl release list +RELEASE RELEASE DATE DESCRIPTION +22.06 2022-06-30 First official release of the Stackable Data Platform +---- + +Detailed information of a release can be queried with the `describe` command: + +[source,console] +---- +$ stackablectl release describe 22.06 +Release: 22.06 +Release date: 2022-06-30 +Description: First official release of the Stackable Data Platform +Included products: + +PRODUCT OPERATOR VERSION +airflow 0.4.0 +commons 0.2.0 +druid 0.6.0 +hbase 0.3.0 +hdfs 0.4.0 +hive 0.6.0 +kafka 0.6.0 +nifi 0.6.0 +opa 0.9.0 +secret 0.5.0 +spark-k8s 0.3.0 +superset 0.5.0 +trino 0.4.0 +zookeeper 0.10.0 +---- + +In the output you can see which product operators are included in the specific release. + +== Install release +If you want to access a Kubernetes cluster, make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. +After that run the following command: + +[source,console] +---- +$ stackablectl release install 22.06 +[INFO ] Installing release 22.06 +[INFO ] Installing airflow operator in version 0.4.0 +[INFO ] Installing commons operator in version 0.2.0 +[INFO ] Installing druid operator in version 0.6.0 +[INFO ] Installing hbase operator in version 0.3.0 +[INFO ] Installing hdfs operator in version 0.4.0 +[INFO ] Installing hive operator in version 0.6.0 +[INFO ] Installing kafka operator in version 0.6.0 +[INFO ] Installing nifi operator in version 0.6.0 +[INFO ] Installing opa operator in version 0.9.0 +[INFO ] Installing secret operator in version 0.5.0 +[INFO ] Installing spark-k8s operator in version 0.3.0 +[INFO ] Installing superset operator in version 0.5.0 +[INFO ] Installing trino operator in version 0.4.0 +[INFO ] Installing zookeeper operator in version 0.10.0 +---- + +If you don't have a Kubernetes cluster available, `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. +Make sure you have `kind` installed and run the following command: + +[source,console] +---- +$ stackablectl release install 22.06 --kind-cluster +[INFO ] Creating kind cluster stackable-data-platform +Creating cluster "stackable-data-platform" ... + βœ“ Ensuring node image (kindest/node:v1.21.1) πŸ–Ό + βœ“ Preparing nodes πŸ“¦ πŸ“¦ πŸ“¦ πŸ“¦ + βœ“ Writing configuration πŸ“œ + βœ“ Starting control-plane πŸ•ΉοΈ + βœ“ Installing CNI πŸ”Œ + βœ“ Installing StorageClass πŸ’Ύ + βœ“ Joining worker nodes 🚜 +Set kubectl context to "kind-stackable-data-platform" +You can now use your cluster with: + +kubectl cluster-info --context kind-stackable-data-platform + +Have a nice day! πŸ‘‹ +[INFO ] Installing release 22.06 +[INFO ] Installing airflow operator in version 0.4.0 +[INFO ] Installing commons operator in version 0.2.0 +[INFO ] Installing druid operator in version 0.6.0 +[INFO ] Installing hbase operator in version 0.3.0 +[INFO ] Installing hdfs operator in version 0.4.0 +[INFO ] Installing hive operator in version 0.6.0 +[INFO ] Installing kafka operator in version 0.6.0 +[INFO ] Installing nifi operator in version 0.6.0 +[INFO ] Installing opa operator in version 0.9.0 +[INFO ] Installing secret operator in version 0.5.0 +[INFO ] Installing spark-k8s operator in version 0.3.0 +[INFO ] Installing superset operator in version 0.5.0 +[INFO ] Installing trino operator in version 0.4.0 +[INFO ] Installing zookeeper operator in version 0.10.0 +---- + +After installing the release we can list the running operators with the xref:commands/operator.adoc[] command. + +[source,console] +---- +$ stackablectl operator installed +OPERATOR VERSION NAMESPACE STATUS LAST UPDATED +airflow 0.4.0 default deployed 2022-07-15 10:00:25.499615024 +0200 CEST +commons 0.2.0 default deployed 2022-07-15 10:00:27.868162264 +0200 CEST +druid 0.6.0 default deployed 2022-07-15 10:00:38.219966654 +0200 CEST +hbase 0.3.0 default deployed 2022-07-15 10:00:46.581528077 +0200 CEST +hdfs 0.4.0 default deployed 2022-07-15 10:00:56.949394849 +0200 CEST +hive 0.6.0 default deployed 2022-07-15 10:01:07.314849464 +0200 CEST +kafka 0.6.0 default deployed 2022-07-15 10:01:09.702246063 +0200 CEST +nifi 0.6.0 default deployed 2022-07-15 10:01:12.059869868 +0200 CEST +opa 0.9.0 default deployed 2022-07-15 10:01:14.413966761 +0200 CEST +secret 0.5.0 default deployed 2022-07-15 10:01:16.759818535 +0200 CEST +spark-k8s 0.3.0 default deployed 2022-07-15 10:01:17.149187107 +0200 CEST +superset 0.5.0 default deployed 2022-07-15 10:01:19.529351352 +0200 CEST +trino 0.4.0 default deployed 2022-07-15 10:01:29.867283641 +0200 CEST +zookeeper 0.10.0 default deployed 2022-07-15 10:01:40.24662955 +0200 CEST +---- + + +== Uninstall release +To uninstall all operators contained in a release regardless of their actual installed versions, you can use the uninstall command: + +[source,console] +---- +$ stackablectl release uninstall 22.06 +[INFO ] Uninstalling release 22.06 +[INFO ] Uninstalling airflow operator +[INFO ] Uninstalling commons operator +[INFO ] Uninstalling druid operator +[INFO ] Uninstalling hbase operator +[INFO ] Uninstalling hdfs operator +[INFO ] Uninstalling hive operator +[INFO ] Uninstalling kafka operator +[INFO ] Uninstalling nifi operator +[INFO ] Uninstalling opa operator +[INFO ] Uninstalling secret operator +[INFO ] Uninstalling spark-k8s operator +[INFO ] Uninstalling superset operator +[INFO ] Uninstalling trino operator +[INFO ] Uninstalling zookeeper operator +---- diff --git a/docs/modules/ROOT/pages/commands/services.adoc b/docs/modules/ROOT/pages/commands/services.adoc new file mode 100644 index 00000000..16b060c8 --- /dev/null +++ b/docs/modules/ROOT/pages/commands/services.adoc @@ -0,0 +1,44 @@ += Services +In this context a (Stackable) service is a running instance of a data product. This is different from the meaning of a Kubernetes service which is an abstract way to expose an application running on a set of pods as a network service. + +== List running services +The `stackablectl services` command allows to inspect the running services of the Stackable Data Platform. +Currently you can only get a read-only view of the running services, future versions may allow to e.g. uninstall running services. + +An example invocation looks as follows: + +[source,console] +---- +$ stackablectl services list +PRODUCT NAME NAMESPACE ENDPOINTS EXTRA INFOS +airflow airflow default webserver-airflow: http://172.18.0.5:32290 Admin user: airflow, password: airflow +druid druid default router-http: http://172.18.0.2:30245 + coordinator-http: http://172.18.0.4:30506 +superset superset default external-superset: http://172.18.0.2:31891 Admin user: admin, password: admin +zookeeper druid-zookeeper default zk: 172.18.0.5:30890 +minio minio-druid default http: http://172.18.0.4:32173 Third party service + console-http: http://172.18.0.4:30982 Admin user: root, password: rootroot +---- + +You can also + +- Show services in all namespaces +- Redact the passwords from the output in case you want to share the list of services without giving out the admin credentials +- Print the installed product versions + +To achieve this you can use the following command: + +[source,console] +---- +$ stackablectl services list --all-namespaces --redact-credentials --show-versions +PRODUCT NAME NAMESPACE ENDPOINTS EXTRA INFOS +airflow airflow default webserver-airflow: http://172.18.0.5:32290 Admin user: airflow, password: + version 2.2.5-python39-stackable0.3.0 +druid druid default router-http: http://172.18.0.2:30245 version 0.23.0-stackable0.1.0 + coordinator-http: http://172.18.0.4:30506 +superset superset default external-superset: http://172.18.0.2:31891 Admin user: admin, password: + version 1.5.1-stackable0.2.0 +zookeeper druid-zookeeper default zk: 172.18.0.5:30890 version 3.8.0-stackable0.7.1 +minio minio-druid default http: http://172.18.0.4:32173 Third party service + console-http: http://172.18.0.4:30982 Admin user: root, password: +---- diff --git a/docs/modules/ROOT/pages/commands/stack.adoc b/docs/modules/ROOT/pages/commands/stack.adoc new file mode 100644 index 00000000..b2bffe6c --- /dev/null +++ b/docs/modules/ROOT/pages/commands/stack.adoc @@ -0,0 +1,111 @@ += Stack +A stack is a collection of ready-to-use Stackable data products as well as required third-party services like Postgresql or MinIO. +It is tied to a specific release of the Stackable Data Platform, which will provide the required operators for the Stack. + +== Browse available stacks +To list the available stacks, run the following command: + +[source,console] +---- +$ stackablectl stack list +STACK STACKABLE RELEASE DESCRIPTION +druid-superset-s3 22.06 Stack containing MinIO, Druid and Superset for data visualization +airflow 22.06 Stack containing Airflow scheduling platform +---- + +Detailed information of a stack can be queried with the `describe` command: + +[source,console] +---- +$ stackablectl stack describe druid-superset-s3 +Stack: druid-superset-s3 +Description: Stack containing MinIO, Druid and Superset for data visualization +Stackable release: 22.06 +Labels: druid, superset, minio, s3 +---- + +Future version of `stackablectl` will allow to search for stacks based on the labels. + +== Install stack +If you want to access a Kubernetes cluster, make sure your https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`] Kubernetes client is configured to interact with the Kubernetes cluster. +After that run the following command + +[source,console] +---- +$ stackablectl stack install druid-superset-s3 +[INFO ] Installing release 22.06 +[INFO ] Installing airflow operator in version 0.4.0 +[INFO ] Installing commons operator in version 0.2.0 +[INFO ] Installing druid operator in version 0.6.0 +[INFO ] Installing hbase operator in version 0.3.0 +[INFO ] Installing hdfs operator in version 0.4.0 +[INFO ] Installing hive operator in version 0.6.0 +[INFO ] Installing kafka operator in version 0.6.0 +[INFO ] Installing nifi operator in version 0.6.0 +[INFO ] Installing opa operator in version 0.9.0 +[INFO ] Installing secret operator in version 0.5.0 +[INFO ] Installing spark-k8s operator in version 0.3.0 +[INFO ] Installing superset operator in version 0.5.0 +[INFO ] Installing trino operator in version 0.4.0 +[INFO ] Installing zookeeper operator in version 0.10.0 +[INFO ] Installing components of stack druid-superset-s3 +[INFO ] Installed stack druid-superset-s3 +---- + +If you don't have a Kubernetes cluster available, `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes cluster for you. +Make sure you have `kind` installed and run the following command: + +[source,console] +---- +$ stackablectl stack install druid-superset-s3 --kind-cluster +[INFO ] Creating kind cluster stackable-data-platform +Creating cluster "stackable-data-platform" ... + βœ“ Ensuring node image (kindest/node:v1.21.1) πŸ–Ό + βœ“ Preparing nodes πŸ“¦ πŸ“¦ πŸ“¦ πŸ“¦ + βœ“ Writing configuration πŸ“œ + βœ“ Starting control-plane πŸ•ΉοΈ + βœ“ Installing CNI πŸ”Œ + βœ“ Installing StorageClass πŸ’Ύ + βœ“ Joining worker nodes 🚜 +Set kubectl context to "kind-stackable-data-platform" +You can now use your cluster with: + +kubectl cluster-info --context kind-stackable-data-platform + +Have a nice day! πŸ‘‹ +[INFO ] Installing release 22.06 +[INFO ] Installing airflow operator in version 0.4.0 +[INFO ] Installing commons operator in version 0.2.0 +[INFO ] Installing druid operator in version 0.6.0 +[INFO ] Installing hbase operator in version 0.3.0 +[INFO ] Installing hdfs operator in version 0.4.0 +[INFO ] Installing hive operator in version 0.6.0 +[INFO ] Installing kafka operator in version 0.6.0 +[INFO ] Installing nifi operator in version 0.6.0 +[INFO ] Installing opa operator in version 0.9.0 +[INFO ] Installing secret operator in version 0.5.0 +[INFO ] Installing spark-k8s operator in version 0.3.0 +[INFO ] Installing superset operator in version 0.5.0 +[INFO ] Installing trino operator in version 0.4.0 +[INFO ] Installing zookeeper operator in version 0.10.0 +[INFO ] Installing components of stack druid-superset-s3 +[INFO ] Installed stack druid-superset-s3 +---- + +After installing the stack, we can access the running services using the xref:commands/services.adoc[] command: + +[source,console] +---- +$ stackablectl services list +PRODUCT NAME NAMESPACE ENDPOINTS EXTRA INFOS +druid druid default router-http: http://172.18.0.2:30245 + coordinator-http: http://172.18.0.4:30506 +superset superset default external-superset: http://172.18.0.2:31891 Admin user: admin, password: admin +zookeeper druid-zookeeper default zk: 172.18.0.5:30890 +minio minio-druid default http: http://172.18.0.4:32173 Third party service + console-http: http://172.18.0.4:30982 Admin user: root, password: rootroot +---- + +== Uninstall stack +Currently there is no support for uninstalling a stack again. +Maybe a solution would be to uninstall the components of the stack but leave the release running. diff --git a/docs/modules/ROOT/pages/customization.adoc b/docs/modules/ROOT/pages/customization.adoc new file mode 100644 index 00000000..f58e2d99 --- /dev/null +++ b/docs/modules/ROOT/pages/customization.adoc @@ -0,0 +1,46 @@ += Customization +If you're working for a large company, chances are that there are multiple teams using the Stackable Data Platform. +A single team can also operate multiple Stackable Data Platforms. +`stackablectl` is build in a way customers or even single developers can define their own release, stack and even demo! +This way it is possible to cover the following use-cases. + +Any additional demos/stacks/releases you specify, will be added to the already existing ones provided by Stackable. + +== Add a new demo +=== Benefits +When you have developed a new data pipeline or data product you often want to show it in action to other colleagues or potential clients. +To easily achieve this you can create your own demo so that it can easily be reproduced and/or shared with other people. + +=== Adding a new demo +First you must create a `mycorp-demos.yaml` containing demos according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/demos.yaml[the Stackable provided demos]. + +After creating the `mycorp-demos.yaml` file it can be added to the available demos in `stackablectl` via the CLI argument `--additional-demos-file mycorp-demos.yaml`. +The argument to `--additional-demos-file` can be either a path to a file on the local filesystem or an URL. +By using an URL the demos file can be put into a central Git repository and referenced by all teams or clients. +Multiple `--additional-demos-file` flags can be specified to include multiple demo files. +Every additional demo will we added to the already existing demos in `stackablectl`, so all the available demo files will be merged. + +== Add a new stack +=== Benefits +If your company or clients have multiple similar setups or reference architectures, it could make sense to make them easily available to all employees or clients. +In the custom defined stack all product versions are pinned as well, so you can easily spin up a stack containing the exact same versions as your production setup. +You can use your defined stack to give it to colleagues or potential customers to show the overall architecture of the Data Platform you're going to build. + +=== Adding a new stack +For the overall procedure have a look at <<_adding_a_new_demo>> on how to add a new stack. +For a custom stack you need to create a `mycorp-stacks.yaml` containing stacks according to the format defined by https://github.com/stackabletech/stackablectl/blob/main/stacks/stacks-v1.yaml[the Stackable provided stacks]. +You can then add it to `stackablectl` with the flag `--additional-stacks-file`. + + +== Add a new release +=== Benefits +If advanced users of the Stackable Platform want to define their own internal release within their company, they can easily add their own release. +This has the following benefits: + +- Same operator versions across the whole company. This produces more uniform environments and makes debugging and helping other teams easier. +- If the company is only interested in a subset of the available operators you can only add your relevant operators into your release and not install all the other operators. + +=== Adding a new release +For the overall procedure have a look at <<_adding_a_new_demo>> on how to add a new release. +For a custom release you need to create a `mycorp-releases.yaml` containing releases according to the format defined by https://github.com/stackabletech/release/blob/main/releases.yaml[the Stackable provided releases]. +You can then add it to `stackablectl` with the flag `--additional-releases-file`. diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index 5387bf2f..21fc4aa4 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -1,5 +1,49 @@ = stackablectl -The `stackablectl` command line tool is used to interact with Stackable operators. Either individually or whole data pipelines consisting of multiple operators and multiple deployments of tools. +The `stackablectl` command line tool is used to interact with the Stackable Data Platform. +It can install individual operators as well as Platform releases. +It also ships with a set of pre-built demos that utilize different data products of the Platform to get e.g. an end-to-end data pipeline. -Go to xref:installation.adoc[] to install the tool and then consult the xref:quickstart.adoc[] page to get started. +The installation of `stackablectl` is described in xref:installation.adoc[]. + +To just get a Quickstart please follow xref:quickstart.adoc[]. + +In general, use `stackablectl --help` to find out more about how to use the tool or how to use specific options. +This also works with subcommands, i.e. `stackablectl release install --help` will show the help for installing a release. +Often you can also use an abbreviation instead of typing out all of the commands. +E.g. `stackablectl operator list` can also be written as `stackablectl op ls` + +A Kubernetes cluster is required in order to use the Stackable Data Platform as all products and operators run on Kubernetes. +If you don't have a Kubernetes cluster, `stackablectl` can spin up a https://kind.sigs.k8s.io/[kind] Kubernetes Cluster for you. + +The deployed services are separated into three different layers as illustrated below: + +image::layers.png[Layers of the deployed services] + +== Operators +This layer consists of Stackable operators managing the individual data products. +They can either be installed one by one with the command `stackablectl operator` or from a release with `stackablectl release` which is preferred. +A release is a well-playing bundle of operators that get released approximately every 2 months. + +== Stacks +A stack is a collection of ready-to-use Stackable data products as well as needed third-party services like Postgresql or MinIO. + +Stacks are installed with the command `stackablectl stack`. +A stack needs a release (of Stackable operators) to run on. +To achieve this a stacks has a dependency on a release which gets automatically installed when a stack is installed. + +== Demos +A demo is an end-to-end demonstration of the usage of the Stackable Data Platform. +It contains + +. Installing a Stackable release +. Spinning up a stack +. Performing the actual demo +.. Prepare some test data +.. Process test data +.. Visualize results (optional) + +Demos are installed with the command `stackablectl demo`. +A demo needs a stack to run on. +To achieve this a demo has a dependency on a stack which gets automatically installed when a demo is installed. +The stack in turn will install the needed Stackable release. diff --git a/docs/modules/ROOT/pages/installation.adoc b/docs/modules/ROOT/pages/installation.adoc index a0e70d95..785ddbb4 100644 --- a/docs/modules/ROOT/pages/installation.adoc +++ b/docs/modules/ROOT/pages/installation.adoc @@ -1,60 +1,155 @@ = Installation -The `stackablectl` commandline tool does not require a runtime; it is a binary that can be executed on it's own. Below are the installation instructions for <>, <> and <>. +== Pre-compiled binary +Stackable ships pre-compiled binaries of `stackablectl` which should work on most environments such as Windows, macOS, and Linux distros like Ubuntu and Arch. -== Linux +Below are the installation instructions for <>, <> and <>. +If the binary does not work for you, you can always <<_build_stackablectl_from_source>> + +=== Linux Download the `stackablectl-x86_64-unknown-linux-gnu` binary file from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release], then rename the file to `stackabelctl`: -[source,shell] +[source,console] ---- -mv stackablectl-x86_64-unknown-linux-gnu stackablectl +$ mv stackablectl-x86_64-unknown-linux-gnu stackablectl ---- and mark it as executable: -[source,shell] +[source,console] ---- -chmod +x stackablectl +$ chmod +x stackablectl ---- You can now invoke it with: -[source,shell] +[source,console] +---- +$ ./stackablectl +---- + +If you want to be able to call it from everywhere (not only the directory you downloaded it to) you can add it to your system with the following command: + +[source,console] ---- -./stackablectl +$ sudo mv stackablectl /usr/bin/stackablectl ---- -== MacOS +=== Windows -// TODO someone with a mac should verify this +Download `stackablectl-x86_64-pc-windows-gnu.exe` from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release]. +You can simply execute it. +If you want to execute it from anywhere in your system, you need to add it to the system `PATH`. -Download the `stackablectl-x86_64-apple-darwin` binary file for Intel based Macs or `stackablectl-aarch64-apple-darwin` binary file for M1 based Macs from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release]. Then rename the file to `stackablectl`: +=== macOS +Download the `stackablectl-x86_64-apple-darwin` binary file for Intel based Macs or `stackablectl-aarch64-apple-darwin` binary file for ARM based Macs from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release]. +Then rename the file to `stackablectl`: -[source,shell] +[source,console] ---- -mv stackablectl-x86_64-apple-darwin stackablectl +$ mv stackablectl-x86_64-apple-darwin stackablectl # or -mv stackablectl-aarch64-apple-darwin stackablectl +$ mv stackablectl-aarch64-apple-darwin stackablectl ---- and mark it as executable: -[source,shell] +[source,console] ---- -chmod +x stackablectl +$ chmod +x stackablectl ---- You can now invoke it with: -[source,shell] +[source,console] +---- +$ ./stackablectl +---- + +If macOS denies the execution of stackablectl go to `Settings` -> `Security & Privacy` -> `General`. Here you will see a pop up asking if you want to allow access for `stackablectl`. You must allow access. + +== Build stackablectl from source +To build `stackablectl` from source you need to have the following tools installed: + +* Rust compiler +** Needed for compiling source code of `stackablectl` itself +* Go compiler +** Needed for compiling a wrapper around the Go lib `go-helm-client` +* C compiler +** As we use the vendored feature of the https://crates.io/crates/openssl[openssl crate] a C compiler is needed to compile openssl from source +* Perl +** As we use the vendored feature of the https://crates.io/crates/openssl[openssl crate] perl is needed to compile openssl from source +* Make +** As we use the vendored feature of the https://crates.io/crates/openssl[openssl crate] Make is needed to compile openssl from source + +If you have the required tools available, you need to clone the `stackablectl` repo https://github.com/stackabletech/stackablectl and invoke the build with + +[source,console] ---- -./stackablectl +$ cargo build --release ---- - If MacOs denies the execution of stackablectl go to Settings --> Security & Privacy --> General. Here you will see a pop up asking if you want to allow access for β€˜stackablectl’. Now allow access. +After a successful build the binary will be placed in `target/release/stackablectl`. +Copy it to your systems path to access it from anywhere if you like. + +[source,console] +---- +$ sudo cp target/release/stackablectl /usr/bin/stackablectl +---- + +== Configure auto-completion +`stackablectl` provides completion scripts for the major shells out there. +It uses the same mechanism as `kubectl` does, so if you have any problems following this steps, looking at https://kubernetes.io/docs/tasks/tools/included/[their installation documentation] may help you out. + +All of the https://docs.rs/clap_complete/3.2.3/clap_complete/shells/enum.Shell.html[supported shells of] https://crates.io/crates/clap_complete[`clap_complete`] are supported. +As of `07/2022` this includes the following shells: + +* <> +* Elvish +* <> +* <> +* <> +=== Bash +The stackablectl completion script for Bash can be generated with the command `stackablectl completion bash`. Sourcing the completion script in your shell enables stackablectl autocompletion. -== Windows +Install the package `bash-completion` e.g. via `apt install bash-completion`. -Download `stackablectl-x86_64-pc-windows-gnu.exe` from the link:https://github.com/stackabletech/stackablectl/releases/latest[latest release]. You can simply execute it. +After that run the following command to source the completion script and tell bash to source it every time you start a new shell. + +[source,console] +---- +$ source <(stackablectl completion bash) +$ echo 'source <(stackablectl completion bash)' >> ~/.bashrc +---- + +=== Fish +The stackablectl completion script for Fish can be generated with the command `stackablectl completion fish`. Sourcing the completion script in your shell enables stackablectl autocompletion. + +[source,console] +---- +$ stackablectl completion fish | source +$ echo 'stackablectl completion fish | source' >> ~/.config/fish/config.fish +---- + +=== PowerShell +The stackablectl completion script for PowerShell can be generated with the command `stackablectl completion powershell`. + +To do so in all your shell sessions, add the following line to your `$PROFILE` file: + +[source,console] +---- +kubectl completion powershell | Out-String | Invoke-Expression +---- + +This command will regenerate the auto-completion script on every PowerShell start up. + +=== Zsh +The stackablectl completion script for Zsh can be generated with the command `stackablectl completion zsh`. Sourcing the completion script in your shell enables stackablectl autocompletion. + +[source,console] +---- +$ source <(stackablectl completion zsh) +$ echo 'source <(stackablectl completion zsh)' >> ~/.zshrc +---- diff --git a/docs/modules/ROOT/pages/quickstart.adoc b/docs/modules/ROOT/pages/quickstart.adoc index d83564b1..5f2b3e16 100644 --- a/docs/modules/ROOT/pages/quickstart.adoc +++ b/docs/modules/ROOT/pages/quickstart.adoc @@ -1,85 +1,5 @@ = Quickstart -`stackablectl` interacts with the Stackable platform at three abstraction levels: The <>, <> and <>. These are to interact with individual operators, a whole release of the platform, or specific combinations of products that form a software stack for a specific use case. +This pages wait's until the xref:commands/demo.adoc[] is ready. -In general, use `stackablectl --help` to find out more about how to use the tool or how to use specific options. This also works with subcommands i.e.: `stackablectl release install --help` will show the help for installing a release. - -A running Kubernetes cluster is required to use the tool. All operators and products run on Kubernetes. - -== Operator level - -Using the `stackablectl operator` command, available operators can be listed, installed and uninstalled. - -For example, `stackablectl operator list` shows output similar to: - ----- -OPERATOR STABLE VERSIONS -airflow 0.3.0, 0.2.0, 0.1.0 -commons 0.1.0 -druid 0.5.0, 0.4.0, 0.3.0, 0.2.0, 0.1.0 -hbase 0.2.0 -hdfs 0.3.0 -hive 0.5.0, 0.3.0 -kafka 0.5.0, 0.4.0 -nifi 0.5.0, 0.4.0 -opa 0.8.0, 0.7.0, 0.6.0 -secret 0.4.0, 0.3.0, 0.2.0, 0.1.0 -spark 0.5.0, 0.4.0 -spark-k8s 0.1.0 -superset 0.4.0, 0.3.0, 0.2.0, 0.1.0 -trino 0.3.1, 0.3.0, 0.2.0 -zookeeper 0.9.0, 0.8.0, 0.7.0, 0.6.0 ----- - -You can then use this list to install an operator, for example: - -[shell] ----- -stackablectl operator install zookeeper ----- -Which will print ----- -[INFO ] Installing zookeeper operator ----- - -== Release level - -Using the `stackablectl release` command, available releases can be listed, installed and uninstalled. A release is a collection of operator versions that work well together. - - -The list command: ----- -stackablectl release list ----- -shows output similar to: ----- -RELEASE RELEASE DATE DESCRIPTION -alpha-3 2022-02-14 Second release which added Airflow, Druid and Superset -alpha-2 2021-10-29 First release of the Stackable Data Platform ----- -You can then install a release: ----- -stackablectl release install alpha-3 ----- -Which will install all the operators in that release at the version for that release: ----- -[INFO ] Installing release alpha-3 -[INFO ] Installing airflow operator in version 0.2.0 -[INFO ] Installing druid operator in version 0.4.0 -[INFO ] Installing hbase operator in version 0.2.0 -[INFO ] Installing hdfs operator in version 0.3.0 -[INFO ] Installing hive operator in version 0.5.0 -[INFO ] Installing kafka operator in version 0.5.0 -[INFO ] Installing nifi operator in version 0.5.0 -[INFO ] Installing opa operator in version 0.8.0 -[INFO ] Installing regorule operator in version 0.6.0 -[INFO ] Installing secret operator in version 0.2.0 -[INFO ] Installing spark operator in version 0.5.0 -[INFO ] Installing superset operator in version 0.3.0 -[INFO ] Installing trino operator in version 0.3.1 -[INFO ] Installing zookeeper operator in version 0.9.0 ----- - -== Stack level - -Coming soon! \ No newline at end of file +When the demo command is available we will browse the demos and install a demo together. diff --git a/docs/modules/ROOT/pages/troubleshooting.adoc b/docs/modules/ROOT/pages/troubleshooting.adoc new file mode 100644 index 00000000..04343ac5 --- /dev/null +++ b/docs/modules/ROOT/pages/troubleshooting.adoc @@ -0,0 +1,52 @@ += Troubleshooting + +== No internet connectivity +`stackablectl` uses an Internet connection to always know of all the available versions, releases, stacks and demos. +To achieve this the following online services will be contacted: + +[%autowidth.stretch] +|=== +| URL | Purpose + +| https://repo.stackable.tech/repository/helm-stable/index.yaml +| Retrieve the list of current operator stable versions + +| https://repo.stackable.tech/repository/helm-dev/index.yaml +| Retrieve the list of current operator development versions + +| https://repo.stackable.tech/repository/helm-test/index.yaml +| Retrieve the list of current operator test versions + +| https://raw.githubusercontent.com/stackabletech/release/main/releases.yaml +| List of releases provided by Stackable + +| https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/stacks-v1.yaml +| List of stacks provided by Stackable + +| https://raw.githubusercontent.com/stackabletech/stackablectl/main/demos/demos-v1.yaml +| List of demos provided by Stackable + +|=== + +=== Mirror helm-charts +To allow stackablectl to retrieve the current list of operators you must mirror the `https://repo.stackable.tech/repository/helm-.*/index.yaml` files to some local URL. +If the file is mirrored e.g. to `https://my.corp/stackable/repository/helm-stable/index.yaml`, you need to specify the following arguments to `stackablectl`: + +[source,console] +---- +$ stackablectl --helm-repo-stackable-stable https://my.corp/stackable/repository/helm-stable operator list +---- + +=== Mirror releases/stacks/demos files +You need to mirror the URL to either a URL or a file on disk. +You can then specify the mirrored file to be included via `--additional-releases-file`, `--additional-stacks-file`, or `--additional-demos-file`, e.g. + +[source,console] +---- +$ stackablectl --additional-releases-file=/home/sbernauer/Downloads/releases.yaml release list +---- + +== `panic: open /tmp/.helmcache/stackable-stable-index.yaml: permission denied` +or `panic: open /tmp/.helmrepo: permission denied`. + +See https://github.com/stackabletech/stackablectl/issues/39 diff --git a/docs/readme/images/layers.png b/docs/readme/images/layers.png deleted file mode 100644 index cdc8d910..00000000 Binary files a/docs/readme/images/layers.png and /dev/null differ diff --git a/go-helm-wrapper/main.go b/go-helm-wrapper/main.go index 4ee190c5..a5054527 100644 --- a/go-helm-wrapper/main.go +++ b/go-helm-wrapper/main.go @@ -19,7 +19,7 @@ func main() { } //export go_install_helm_release -func go_install_helm_release(releaseName string, chartName string, chartVersion string, namespace string, suppressOutput bool) { +func go_install_helm_release(releaseName string, chartName string, chartVersion string, valuesYaml string, namespace string, suppressOutput bool) { helmClient := getHelmClient(namespace, suppressOutput) timeout, _ := time.ParseDuration("10m") @@ -27,6 +27,7 @@ func go_install_helm_release(releaseName string, chartName string, chartVersion ReleaseName: releaseName, ChartName: chartName, Version: chartVersion, + ValuesYaml: valuesYaml, Namespace: namespace, UpgradeCRDs: true, Wait: true, diff --git a/src/arguments.rs b/src/arguments.rs index e35233e8..3eb59dc9 100644 --- a/src/arguments.rs +++ b/src/arguments.rs @@ -1,4 +1,7 @@ -use crate::{operator::CliCommandOperator, release::CliCommandRelease, stack::CliCommandStack}; +use crate::{ + operator::CliCommandOperator, release::CliCommandRelease, services::CliCommandServices, + stack::CliCommandStack, +}; use clap::{ArgEnum, Command, Parser, ValueHint}; use clap_complete::{generate, Generator, Shell}; use log::LevelFilter; @@ -53,21 +56,21 @@ pub struct CliArgs { /// Adds a YAML file containing custom releases /// - /// If you don't have access to the Stackable GitHub repos or you want to maintain your own releases you can specify additional YAML files containing release information. - /// Have a look [here](https://raw.githubusercontent.com/stackabletech/stackablectl/main/releases.yaml) for the structure. - /// Can either be an URL or a path to a file e.g. `https://my.server/my-releases.yaml` or '/etc/my-releases.yaml' or `C:\Users\Sebastian\my-releases.yaml`. + /// If you do not have access to the Stackable repositories on GitHub or if you want to maintain your own releases, you can specify additional YAML files containing release information. + /// Have a look at for the structure. + /// Can either be a URL or a path to a file, e.g. `https://my.server/my-releases.yaml`, '/etc/my-releases.yaml' or `C:\Users\Bob\my-releases.yaml`. /// Can be specified multiple times. #[clap(long, multiple_occurrences(true), value_hint = ValueHint::FilePath)] - pub additional_release_files: Vec, + pub additional_releases_file: Vec, /// Adds a YAML file containing custom stacks /// - /// If you don't have access to the Stackable GitHub repos or you want to maintain your own stacks you can specify additional YAML files containing stack information. - /// Have a look [here](https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks.yaml) for the structure. - /// Can either be an URL or a path to a file e.g. `https://my.server/my-stacks.yaml` or '/etc/my-stacks.yaml' or `C:\Users\Sebastian\my-stacks.yaml`. + /// If you do not have access to the Stackable repositories on GitHub or if you want to maintain your own stacks, you can specify additional YAML files containing stack information. + /// Have a look at for the structure. + /// Can either be a URL or a path to a file, e.g. `https://my.server/my-stacks.yaml`, '/etc/my-stacks.yaml' or `C:\Users\Bob\my-stacks.yaml`. /// Can be specified multiple times. #[clap(long, multiple_occurrences(true), value_hint = ValueHint::FilePath)] - pub additional_stack_files: Vec, + pub additional_stacks_file: Vec, } #[derive(Parser)] @@ -80,10 +83,14 @@ pub enum CliCommand { #[clap(subcommand, alias("r"), alias("re"))] Release(CliCommandRelease), - /// This subcommand interacts with stacks, which are ready-to-use combinations of products. + /// This subcommand interacts with stacks which are ready-to-use combinations of products. #[clap(subcommand, alias("s"), alias("st"))] Stack(CliCommandStack), + /// This subcommand interacts with deployed services of products. + #[clap(subcommand, alias("svc"))] + Services(CliCommandServices), + /// Output shell completion code for the specified shell. Completion(CliCommandCompletion), } diff --git a/src/helm.rs b/src/helm.rs index d6b9e364..e6fa74c6 100644 --- a/src/helm.rs +++ b/src/helm.rs @@ -1,15 +1,14 @@ use crate::{ helpers::{c_str_ptr_to_str, GoString}, - CliArgs, + CliArgs, NAMESPACE, }; use cached::proc_macro::cached; use lazy_static::lazy_static; use log::{debug, error, info, warn, LevelFilter}; use serde::Deserialize; -use std::{collections::HashMap, os::raw::c_char, process::exit, sync::Mutex}; +use std::{collections::HashMap, error::Error, os::raw::c_char, process::exit, sync::Mutex}; lazy_static! { - pub static ref NAMESPACE: Mutex = Mutex::new(String::new()); pub static ref HELM_REPOS: Mutex> = Mutex::new(HashMap::new()); pub static ref LOG_LEVEL: Mutex = Mutex::new(LevelFilter::Trace); } @@ -19,8 +18,9 @@ extern "C" { release_name: GoString, chart_name: GoString, chart_version: GoString, + values_yaml: GoString, namespace: GoString, - supress_output: bool, + suppress_output: bool, ); fn go_uninstall_helm_release( release_name: GoString, @@ -58,7 +58,7 @@ pub fn handle_common_cli_args(args: &CliArgs) { } /// Installs the specified helm chart with the release_name -/// If the release is already running it errors out (maybe in the future prompt the user if it should be deleted) +/// If the release is already running in a different version it errors out (maybe in the future prompt the user if it should be deleted) /// If the chart_version is None the version `>0.0.0-0` will be used. /// This is equivalent to the `helm install` flag `--devel`. pub fn install_helm_release_from_repo( @@ -67,34 +67,32 @@ pub fn install_helm_release_from_repo( repo_name: &str, chart_name: &str, chart_version: Option<&str>, -) { + values_yaml: Option<&str>, +) -> Result<(), Box> { if helm_release_exists(release_name) { - let helm_release = get_helm_release(release_name).unwrap_or_else(|| { - panic!( - "Failed to find helm release {release_name} besides helm saying it should be there" - ) - }); + let helm_release = get_helm_release(release_name)?.ok_or(format!( + "Failed to find helm release {release_name} besides helm saying it should be there" + ))?; let current_version = helm_release.version; match chart_version { None => { warn!("The release {release_name} in version {current_version} is already installed and you have not requested a specific version, not re-installing it"); - return; + return Ok(()); } Some(chart_version) => { if chart_version == current_version { info!("The release {release_name} in version {current_version} is already installed, not installing it"); - return; + return Ok(()); } else { - error!("The helm release {release_name} is already installed in version {current_version} but you requested to install it in version {chart_version}. \ - Use \"stackablectl operator uninstall {operator_name}\" to uninstall it."); - exit(1); + return Err(format!("The helm release {release_name} is already installed in version {current_version} but you requested to install it in version {chart_version}. \ + Use \"stackablectl operator uninstall {operator_name}\" to uninstall it.").into()); } } } } - match HELM_REPOS.lock().unwrap().get(repo_name) { + match HELM_REPOS.lock()?.get(repo_name) { None => { error!("I don't know about the helm repo {repo_name}"); exit(1); @@ -108,22 +106,26 @@ pub fn install_helm_release_from_repo( let full_chart_name = format!("{repo_name}/{chart_name}"); let chart_version = chart_version.unwrap_or(">0.0.0-0"); debug!("Installing helm release {repo_name} from chart {full_chart_name} in version {chart_version}"); - install_helm_release(release_name, &full_chart_name, chart_version); + install_helm_release(release_name, &full_chart_name, chart_version, values_yaml); + + Ok(()) } /// Cached because of slow network calls #[cached] -pub fn get_repo_index(repo_url: String) -> HelmRepo { +pub async fn get_repo_index(repo_url: String) -> Result { let index_url = format!("{repo_url}/index.yaml"); debug!("Fetching helm repo index from {index_url}"); - let resp = reqwest::blocking::get(&index_url) - .unwrap_or_else(|_| panic!("Failed to download helm repo index from {index_url}")) + let index = reqwest::get(&index_url) + .await + .map_err(|err| format!("Failed to download helm repo index from {index_url}: {err}"))? .text() - .unwrap_or_else(|_| panic!("Failed to get text from {index_url}")); + .await + .map_err(|err| format!("Failed to get text from {index_url}: {err}"))?; - serde_yaml::from_str(&resp) - .unwrap_or_else(|_| panic!("Failed to parse helm repo index from {index_url}")) + serde_yaml::from_str(&index) + .map_err(|err| format!("Failed to parse helm repo index from {index_url}: {err}")) } pub fn uninstall_helm_release(release_name: &str) { @@ -140,12 +142,18 @@ pub fn uninstall_helm_release(release_name: &str) { } } -fn install_helm_release(release_name: &str, chart_name: &str, chart_version: &str) { +fn install_helm_release( + release_name: &str, + chart_name: &str, + chart_version: &str, + values_yaml: Option<&str>, +) { unsafe { go_install_helm_release( GoString::from(release_name), GoString::from(chart_name), GoString::from(chart_version), + GoString::from(values_yaml.unwrap_or("")), GoString::from(NAMESPACE.lock().unwrap().as_str()), *LOG_LEVEL.lock().unwrap() < LevelFilter::Debug, ) @@ -171,23 +179,23 @@ pub struct Release { pub last_updated: String, } -pub fn helm_list_releases() -> Vec { +pub fn helm_list_releases() -> Result, Box> { let releases_json = c_str_ptr_to_str(unsafe { go_helm_list_releases(GoString::from(NAMESPACE.lock().unwrap().as_str())) }); - serde_json::from_str(releases_json).unwrap_or_else(|_| { - panic!("Failed to parse helm releases JSON from go wrapper {releases_json}") + serde_json::from_str(releases_json).map_err(|err| { + format!("Failed to parse helm releases JSON from go wrapper {releases_json}: {err}").into() }) } -pub fn get_helm_release(release_name: &str) -> Option { - helm_list_releases() +pub fn get_helm_release(release_name: &str) -> Result, Box> { + Ok(helm_list_releases()? .into_iter() - .find(|release| release.name == release_name) + .find(|release| release.name == release_name)) } -fn add_helm_repo(name: &str, url: &str) { +pub fn add_helm_repo(name: &str, url: &str) { unsafe { go_add_helm_repo(GoString::from(name), GoString::from(url)) } } diff --git a/src/helpers.rs b/src/helpers.rs index 9871ce5d..f2300b60 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -1,5 +1,6 @@ use log::trace; use std::{ + error::Error, ffi::CStr, fs, io::Write, @@ -29,13 +30,14 @@ pub fn c_str_ptr_to_str(ptr: *const c_char) -> &'static str { c_str.to_str().unwrap() } -pub fn read_from_url_or_file(url_or_file: &str) -> Result { +pub async fn read_from_url_or_file(url_or_file: &str) -> Result { if let Ok(str) = fs::read_to_string(url_or_file) { return Ok(str); } - match reqwest::blocking::get(url_or_file) { - Ok(response) => Ok(response.text().unwrap()), + match reqwest::get(url_or_file).await { + Ok(response) => response.text().await + .map_err(|err| format!("Failed to read from the response of the file or a URL with the name \"{url_or_file}\": {err}")), Err(err) => Err(format!( "Couldn't read a file or a URL with the name \"{url_or_file}\": {err}" )), @@ -43,13 +45,15 @@ pub fn read_from_url_or_file(url_or_file: &str) -> Result { } /// Ensures that the program is installed -/// If the program is not installed it will panic -pub fn ensure_program_installed(program: &str) { - which(program) - .unwrap_or_else(|_| panic!("Could not find a installation of {program}. Please have a look at the README of stackablectl on what the prerequisites are: https://github.com/stackabletech/stackablectl")); +/// If the program is not installed it will return an Error +pub fn ensure_program_installed(program: &str) -> Result<(), Box> { + match which(program) { + Ok(_) => Ok(()), + Err(err) => Err(format!("Could not find a installation of {program}: {err}").into()), + } } -pub fn execute_command(mut args: Vec<&str>) -> String { +pub fn execute_command(mut args: Vec<&str>) -> Result> { assert!(!args.is_empty()); let args_string = args.join(" "); @@ -59,13 +63,14 @@ pub fn execute_command(mut args: Vec<&str>) -> String { let output = Command::new(command) .args(args) .output() - .unwrap_or_else(|_| panic!("Failed to get output of the command \"{args_string}\"")); + .map_err(|err| format!("Failed to get output of the command \"{args_string}\": {err}"))?; if !output.status.success() { - panic!( + return Err(format!( "Failed to execute the command \"{args_string}\". Stderr was: {}", str::from_utf8(&output.stderr).expect("Could not parse command stderr as utf-8") - ); + ) + .into()); } let stdout_string = @@ -73,10 +78,10 @@ pub fn execute_command(mut args: Vec<&str>) -> String { trace!("Command output for \"{args_string}\":\n{stdout_string}"); - stdout_string.to_string() + Ok(stdout_string.to_string()) } -pub fn execute_command_with_stdin(mut args: Vec<&str>, stdin: &str) { +pub fn execute_command_with_stdin(mut args: Vec<&str>, stdin: &str) -> Result<(), Box> { assert!(!args.is_empty()); let args_string = args.join(" "); @@ -87,16 +92,17 @@ pub fn execute_command_with_stdin(mut args: Vec<&str>, stdin: &str) { .args(args) .stdin(Stdio::piped()) .spawn() - .unwrap_or_else(|_| panic!("Failed to spawn the command \"{args_string}\"")); + .map_err(|err| format!("Failed to spawn the command \"{args_string}\": {err}"))?; child .stdin .as_ref() - .unwrap() - .write_all(stdin.as_bytes()) - .expect("Failed to write kind cluster definition via stdin"); + .ok_or(format!("Failed to get stdin of command \"{args_string}\""))? + .write_all(stdin.as_bytes())?; - if !child.wait_with_output().unwrap().status.success() { - panic!("Failed to execute the command \"{args_string}\""); + if child.wait_with_output()?.status.success() { + Ok(()) + } else { + Err(format!("Failed to execute the command \"{args_string}\"").into()) } } diff --git a/src/kind.rs b/src/kind.rs index e7041fe0..858199b4 100644 --- a/src/kind.rs +++ b/src/kind.rs @@ -1,3 +1,5 @@ +use std::error::Error; + use crate::helpers; use log::{info, warn}; @@ -29,28 +31,35 @@ nodes: node-labels: node=3 "#; -pub fn handle_cli_arguments(kind_cluster: bool, kind_cluster_name: &str) { +pub fn handle_cli_arguments( + kind_cluster: bool, + kind_cluster_name: &str, +) -> Result<(), Box> { if kind_cluster { - helpers::ensure_program_installed("docker"); - helpers::ensure_program_installed("kind"); + helpers::ensure_program_installed("docker")?; + helpers::ensure_program_installed("kind")?; - create_cluster_if_not_exists(kind_cluster_name); + create_cluster_if_not_exists(kind_cluster_name)?; } + + Ok(()) } -fn create_cluster_if_not_exists(name: &str) { - if check_if_kind_cluster_exists(name) { +fn create_cluster_if_not_exists(name: &str) -> Result<(), Box> { + if check_if_kind_cluster_exists(name)? { warn!("The kind cluster {name} is already running, not re-creating it. Use `kind delete cluster --name {name}` to delete it"); } else { info!("Creating kind cluster {name}"); helpers::execute_command_with_stdin( vec!["kind", "create", "cluster", "--name", name, "--config", "-"], KIND_CLUSTER_DEFINITION, - ); + )?; } + + Ok(()) } -fn check_if_kind_cluster_exists(name: &str) -> bool { - let result = helpers::execute_command(vec!["kind", "get", "clusters"]); - result.lines().any(|cluster_name| cluster_name == name) +fn check_if_kind_cluster_exists(name: &str) -> Result> { + let result = helpers::execute_command(vec!["kind", "get", "clusters"])?; + Ok(result.lines().any(|cluster_name| cluster_name == name)) } diff --git a/src/kube.rs b/src/kube.rs index 000b7967..ae364d53 100644 --- a/src/kube.rs +++ b/src/kube.rs @@ -1,66 +1,185 @@ -// /// This function currently uses `kubectl apply`. -// /// In the future we want to switch to kube-rs or something else to not require the user to install kubectl. -// pub fn deploy_manifest(yaml: &str) { -// helpers::execute_command_with_stdin(vec!["kubectl", "apply", "-f", "-"], yaml); -// } - -// use crate::kube::Error::TypelessManifest; -// use kube::api::{DynamicObject, GroupVersionKind, TypeMeta}; -// use kube::{Client, Discovery}; -// use snafu::{OptionExt, ResultExt, Snafu}; -// -// pub const TEST: &str = r#" -// apiVersion: monitoring.coreos.com/v1 -// kind: ServiceMonitor -// foo: -// metadata: -// name: scrape-label -// labels: -// release: prometheus-operator -// spec: -// endpoints: -// - port: metrics -// jobLabel: app.kubernetes.io/instance -// selector: -// matchLabels: -// prometheus.io/scrape: "true" -// namespaceSelector: -// any: true -// "#; -// -// #[derive(Snafu, Debug)] -// pub enum Error { -// #[snafu(display("failed to create kubernetes client"))] -// CreateClient { source: kube::Error }, -// #[snafu(display("failed to parse manifest {manifest}"))] -// ParseManifest { -// source: serde_yaml::Error, -// manifest: String, -// }, -// #[snafu(display("manifest {manifest} has no type"))] -// TypelessManifest { manifest: String }, -// } -// -// // see https://gitlab.com/teozkr/thruster/-/blob/35b6291788fa209c52dd47fe6c96e1b483071793/src/apply.rs#L121-145 -// pub async fn deploy_manifest(yaml: &str) -> Result<(), Error> { -// let manifest = serde_yaml::from_str::(yaml).context(ParseManifestSnafu { -// manifest: yaml.to_string(), -// })?; -// let manifest_type = manifest.types.as_ref().context(TypelessManifestSnafu {manifest: yaml})?; -// let gvk = gvk_of_typemeta(manifest_type); -// -// let client = create_client().await?; -// -// Ok(()) -// } -// -// async fn create_client() -> Result { -// Client::try_default().await.context(CreateClientSnafu) -// } -// -// fn gvk_of_typemeta(tpe: &TypeMeta) -> GroupVersionKind { -// match tpe.api_version.split_once('/') { -// Some((group, version)) => GroupVersionKind::gvk(&group, &version, &tpe.kind), -// None => GroupVersionKind::gvk("", &tpe.api_version, &tpe.kind), -// } -// } +use crate::NAMESPACE; +use cached::proc_macro::cached; +use indexmap::IndexMap; +use k8s_openapi::api::core::v1::{Endpoints, Node, Service}; +use kube::{ + api::{DynamicObject, GroupVersionKind, ListParams, Patch, PatchParams, TypeMeta}, + discovery::Scope, + Api, Client, Discovery, ResourceExt, +}; +use log::{debug, warn}; +use serde::Deserialize; +use std::{collections::HashMap, error::Error}; + +pub async fn deploy_manifests(yaml: &str) -> Result<(), Box> { + let namespace = NAMESPACE.lock()?.clone(); + let client = get_client().await?; + let discovery = Discovery::new(client.clone()).run().await?; + + for manifest in serde_yaml::Deserializer::from_str(yaml) { + let mut object = DynamicObject::deserialize(manifest)?; + + let gvk = gvk_of_typemeta(object.types.as_ref().ok_or(format!( + "Failed to deploy manifest because type of object {object:?} is not set" + ))?); + let (resource, capabilities) = discovery.resolve_gvk(&gvk).ok_or(format!( + "Failed to deploy manifest because the gvk {gvk:?} can not be resolved" + ))?; + + let api: Api = match capabilities.scope { + Scope::Cluster => { + object.metadata.namespace = None; + Api::all_with(client.clone(), &resource) + } + Scope::Namespaced => Api::namespaced_with(client.clone(), &namespace, &resource), + }; + + api.patch( + &object.name(), + &PatchParams::apply("stackablectl"), + &Patch::Apply(object), + ) + .await?; + } + + Ok(()) +} + +pub async fn get_service_endpoint_urls( + service: &Service, + referenced_object_name: &str, + client: Client, +) -> Result, Box> { + let namespace = service + .namespace() + .ok_or(format!("Service {service:?} must have a namespace"))?; + let service_name = service.name(); + + let endpoints_api: Api = Api::namespaced(client.clone(), &namespace); + let endpoints = endpoints_api.get(&service_name).await?; + + let node_name = match &endpoints.subsets { + Some(subsets) if subsets.len() == 1 => match &subsets[0].addresses { + Some(addresses) if !addresses.is_empty() => match &addresses[0].node_name { + Some(node_name) => node_name, + None => { + warn!("Could not determine the node the endpoint {service_name} is running on because the address of the subset didn't had a node name"); + return Ok(IndexMap::new()); + } + }, + Some(_) => { + warn!("Could not determine the node the endpoint {service_name} is running on because the subset had no addresses"); + return Ok(IndexMap::new()); + } + None => { + warn!("Could not determine the node the endpoint {service_name} is running on because subset had no addresses. Is the service {service_name} up and running?"); + return Ok(IndexMap::new()); + } + }, + Some(subsets) => { + warn!("Could not determine the node the endpoint {service_name} is running on because endpoints consists of {num_subsets} subsets", num_subsets=subsets.len()); + return Ok(IndexMap::new()); + } + None => { + warn!("Could not determine the node the endpoint {service_name} is running on because the endpoint has no subset. Is the service {service_name} up and running?"); + return Ok(IndexMap::new()); + } + }; + + let node_ip = get_node_ip(node_name).await?; + + let mut result = IndexMap::new(); + for service_port in service + .spec + .as_ref() + .ok_or(format!("Service {service_name} had no spec"))? + .ports + .iter() + .flatten() + { + match service_port.node_port { + Some(node_port) => { + let endpoint_name = service_name + .trim_start_matches(referenced_object_name) + .trim_start_matches('-'); + + let port_name = service_port + .name + .clone() + .unwrap_or_else(|| service_port.port.to_string()); + let endpoint_name = if endpoint_name.is_empty() { + port_name.clone() + } else { + format!("{endpoint_name}-{port_name}") + }; + let endpoint = match port_name.as_str() { + // TODO: Consolidate web-ui port names in operators + "http" | "ui" | "airflow" | "superset" => { + format!("http://{node_ip}:{node_port}") + } + "https" => format!("https://{node_ip}:{node_port}"), + _ => format!("{node_ip}:{node_port}"), + }; + + result.insert(endpoint_name, endpoint); + } + None => debug!("Could not get endpoint_url as service {service_name} has no nodePort"), + } + } + + Ok(result) +} + +async fn get_node_ip(node_name: &str) -> Result> { + let node_name_ip_mapping = get_node_name_ip_mapping().await?; + + match node_name_ip_mapping.get(node_name) { + Some(node_ip) => Ok(node_ip.to_string()), + None => Err(format!("Failed to find node {node_name} in node_name_ip_mapping").into()), + } +} + +#[cached] +async fn get_node_name_ip_mapping() -> Result, String> { + let client = get_client() + .await + .map_err(|err| format!("Failed to create Kubernetes client: {err}"))?; + let node_api: Api = Api::all(client); + let nodes = node_api + .list(&ListParams::default()) + .await + .map_err(|err| format!("Failed to list Kubernetes nodes: {err}"))?; + + let mut result = HashMap::new(); + for node in nodes { + let node_name = node.name(); + let preferred_node_ip = node + .status + .ok_or(format!("Failed to get status of node {node_name}"))? + .addresses + .ok_or(format!("Failed to get address of node {node_name}"))? + .iter() + .filter(|address| address.type_ == "InternalIP" || address.type_ == "ExternalIP") + .min_by_key(|address| &address.type_) // ExternalIP (which we want) is lower than InternalIP + .map(|address| address.address.clone()) + .ok_or(format!( + "Could not find an ExternalIP or InternalIP for node {node_name}" + ))?; + result.insert(node_name, preferred_node_ip); + } + + Ok(result) +} + +pub async fn get_client() -> Result> { + Client::try_default() + .await + .map_err(|err| format! {"Failed to construct Kubernetes client: {err}"}.into()) +} + +fn gvk_of_typemeta(type_meta: &TypeMeta) -> GroupVersionKind { + match type_meta.api_version.split_once('/') { + Some((group, version)) => GroupVersionKind::gvk(group, version, &type_meta.kind), + None => GroupVersionKind::gvk("", &type_meta.api_version, &type_meta.kind), + } +} diff --git a/src/main.rs b/src/main.rs index 8d4e9317..0e4bb281 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,9 @@ use crate::arguments::CliCommand; use arguments::CliArgs; use clap::{IntoApp, Parser}; +use lazy_static::lazy_static; +use log::error; +use std::{error::Error, process::exit, sync::Mutex}; mod arguments; mod helm; @@ -9,6 +12,7 @@ mod kind; mod kube; mod operator; mod release; +mod services; mod stack; const AVAILABLE_OPERATORS: &[&str] = &[ @@ -27,29 +31,44 @@ const AVAILABLE_OPERATORS: &[&str] = &[ "superset", "trino", "zookeeper", - // Deprecated - "regorule", - "monitoring", ]; -fn main() { +lazy_static! { + pub static ref NAMESPACE: Mutex = Mutex::new(String::new()); +} + +#[tokio::main] +async fn main() -> Result<(), Box> { let args = CliArgs::parse(); env_logger::builder() .format_timestamp(None) .format_target(false) .filter_level(args.log_level.into()) .init(); + + let namespace = &args.namespace; + *(NAMESPACE.lock()?) = namespace.to_string(); + helm::handle_common_cli_args(&args); release::handle_common_cli_args(&args); stack::handle_common_cli_args(&args); - match &args.cmd { - CliCommand::Operator(command) => command.handle(), - CliCommand::Release(command) => command.handle(), - CliCommand::Stack(command) => command.handle(), + let result = match &args.cmd { + CliCommand::Operator(command) => command.handle().await, + CliCommand::Release(command) => command.handle().await, + CliCommand::Stack(command) => command.handle().await, + CliCommand::Services(command) => command.handle().await, CliCommand::Completion(command) => { let mut cmd = CliArgs::command(); arguments::print_completions(command.shell, &mut cmd); + Ok(()) } + }; + + if let Err(err) = &result { + error!("{err}"); + exit(-1); } + + result } diff --git a/src/operator.rs b/src/operator.rs index daef10d6..e91793bf 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -3,7 +3,7 @@ use clap::{Parser, ValueHint}; use indexmap::IndexMap; use log::{info, warn}; use serde::Serialize; -use std::str::FromStr; +use std::{error::Error, str::FromStr}; #[derive(Parser)] pub enum CliCommandOperator { @@ -16,8 +16,10 @@ pub enum CliCommandOperator { /// Show details of a specific operator #[clap(alias("desc"))] Describe { - #[clap(value_hint = ValueHint::Other)] + /// Name of the operator to describe + #[clap(required = true, value_hint = ValueHint::Other)] operator: String, + #[clap(short, long, arg_enum, default_value = "text")] output: OutputType, }, @@ -26,13 +28,15 @@ pub enum CliCommandOperator { Install { /// Space separated list of operators to install. /// Must have the form `name[=version]` e.g. `superset`, `superset=0.3.0`, `superset=0.3.0-nightly` or `superset=0.3.0-pr123`. + /// If no version is specified the latest nightly version - build from the main branch - will be used. /// You can get the available versions with `stackablectl operator list` or `stackablectl operator describe superset` #[clap(multiple_occurrences(true), required = true, value_hint = ValueHint::Other)] operators: Vec, - /// If specified a local kubernetes cluster consisting of 4 nodes for testing purposes will be created. - /// Kind is a tool to spin up a local kubernetes cluster running on docker on your machine. - /// You need to have `docker` and `kind` installed. Have a look at the README at on how to install them. + /// If specified, a local Kubernetes cluster consisting of 4 nodes (1 for control-plane and 3 workers) for testing purposes will be created. + /// Kind is a tool to spin up a local Kubernetes cluster running on Docker on your machine. + /// You need to have `docker` and `kind` installed. + /// Have a look at our documentation on how to install `kind` at #[clap(short, long)] kind_cluster: bool, @@ -60,29 +64,31 @@ pub enum CliCommandOperator { } impl CliCommandOperator { - pub fn handle(&self) { + pub async fn handle(&self) -> Result<(), Box> { match self { - CliCommandOperator::List { output } => list_operators(output), + CliCommandOperator::List { output } => list_operators(output).await?, CliCommandOperator::Describe { operator, output } => { - describe_operator(operator, output) + describe_operator(operator, output).await? } CliCommandOperator::Install { operators, kind_cluster, kind_cluster_name, } => { - kind::handle_cli_arguments(*kind_cluster, kind_cluster_name); + kind::handle_cli_arguments(*kind_cluster, kind_cluster_name)?; for operator in operators { - operator.install(); + operator.install()?; } } CliCommandOperator::Uninstall { operators } => uninstall_operators(operators), - CliCommandOperator::Installed { output } => list_installed_operators(output), + CliCommandOperator::Installed { output } => list_installed_operators(output)?, } + + Ok(()) } } -fn list_operators(output_type: &OutputType) { +async fn list_operators(output_type: &OutputType) -> Result<(), Box> { type Output = IndexMap; #[derive(Serialize)] @@ -93,24 +99,22 @@ fn list_operators(output_type: &OutputType) { dev_versions: Vec, } - let output: Output = AVAILABLE_OPERATORS - .iter() - .map(|operator| { - ( - operator.to_string(), - OutputOperatorEntry { - stable_versions: get_versions_from_repo(operator, "stackable-stable"), - test_versions: get_versions_from_repo(operator, "stackable-test"), - dev_versions: get_versions_from_repo(operator, "stackable-dev"), - }, - ) - }) - .collect(); + let mut output: Output = IndexMap::new(); + for operator in AVAILABLE_OPERATORS { + output.insert( + operator.to_string(), + OutputOperatorEntry { + stable_versions: get_versions_from_repo(operator, "stackable-stable").await?, + test_versions: get_versions_from_repo(operator, "stackable-test").await?, + dev_versions: get_versions_from_repo(operator, "stackable-dev").await?, + }, + ); + } match output_type { OutputType::Text => { println!("OPERATOR STABLE VERSIONS"); - for (operator, operator_entry) in output.iter() { + for (operator, operator_entry) in output { println!( "{:18} {}", operator, @@ -119,15 +123,17 @@ fn list_operators(output_type: &OutputType) { } } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } + + Ok(()) } -fn describe_operator(operator: &str, output_type: &OutputType) { +async fn describe_operator(operator: &str, output_type: &OutputType) -> Result<(), Box> { #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct Output { @@ -138,9 +144,9 @@ fn describe_operator(operator: &str, output_type: &OutputType) { } let output = Output { operator: operator.to_string(), - stable_versions: get_versions_from_repo(operator, "stackable-stable"), - test_versions: get_versions_from_repo(operator, "stackable-test"), - dev_versions: get_versions_from_repo(operator, "stackable-dev"), + stable_versions: get_versions_from_repo(operator, "stackable-stable").await?, + test_versions: get_versions_from_repo(operator, "stackable-test").await?, + dev_versions: get_versions_from_repo(operator, "stackable-dev").await?, }; match output_type { @@ -151,35 +157,42 @@ fn describe_operator(operator: &str, output_type: &OutputType) { println!("Dev versions: {}", output.dev_versions.join(", ")); } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } + + Ok(()) } -fn get_versions_from_repo(operator: &str, helm_repo_name: &str) -> Vec { +async fn get_versions_from_repo( + operator: &str, + helm_repo_name: &str, +) -> Result, Box> { let chart_name = format!("{operator}-operator"); - let repo = helm::get_repo_index( - HELM_REPOS - .lock() - .unwrap() - .get(helm_repo_name) - .unwrap_or_else(|| panic!("Could not find a helm repo with the name {helm_repo_name}")) - .to_string(), - ); + let helm_repo_url = HELM_REPOS + .lock()? + .get(helm_repo_name) + .ok_or(format!( + "Could not find a helm repo with the name {helm_repo_name}" + ))? + .to_string(); + + let repo = helm::get_repo_index(helm_repo_url).await?; + match repo.entries.get(&chart_name) { None => { warn!("Could not find {operator} operator (chart name {chart_name}) in helm repo {helm_repo_name}"); - vec![] + Ok(vec![]) } - Some(versions) => versions + Some(versions) => Ok(versions .iter() .map(|entry| entry.version.clone()) .rev() - .collect(), + .collect()), } } @@ -191,7 +204,7 @@ pub fn uninstall_operators(operators: &Vec) { } } -fn list_installed_operators(output_type: &OutputType) { +fn list_installed_operators(output_type: &OutputType) -> Result<(), Box> { type Output = IndexMap; #[derive(Serialize)] @@ -203,7 +216,7 @@ fn list_installed_operators(output_type: &OutputType) { last_updated: String, } - let output: Output = helm::helm_list_releases() + let output: Output = helm::helm_list_releases()? .into_iter() .filter(|release| { AVAILABLE_OPERATORS @@ -226,7 +239,7 @@ fn list_installed_operators(output_type: &OutputType) { match output_type { OutputType::Text => { println!("OPERATOR VERSION NAMESPACE STATUS LAST UPDATED"); - for (operator, operator_entry) in output.iter() { + for (operator, operator_entry) in output { println!( "{:21} {:15} {:30} {:16} {}", operator, @@ -238,12 +251,14 @@ fn list_installed_operators(output_type: &OutputType) { } } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } + + Ok(()) } #[derive(Debug)] @@ -263,7 +278,7 @@ impl Operator { } } - pub fn install(&self) { + pub fn install(&self) -> Result<(), Box> { info!( "Installing {} operator{}", self.name, @@ -287,7 +302,10 @@ impl Operator { helm_repo_name, &helm_release_name, self.version.as_deref(), - ); + None, + )?; + + Ok(()) } } diff --git a/src/release.rs b/src/release.rs index 98c4f463..dbd94e02 100644 --- a/src/release.rs +++ b/src/release.rs @@ -5,7 +5,7 @@ use indexmap::IndexMap; use lazy_static::lazy_static; use log::{error, info, warn}; use serde::{Deserialize, Serialize}; -use std::{ops::Deref, process::exit, sync::Mutex}; +use std::{error::Error, ops::Deref, process::exit, sync::Mutex}; lazy_static! { pub static ref RELEASE_FILES: Mutex> = Mutex::new(vec![ @@ -24,8 +24,10 @@ pub enum CliCommandRelease { /// Show details of a specific release #[clap(alias("desc"))] Describe { - #[clap(value_hint = ValueHint::Other)] + /// Name of the release to describe + #[clap(required = true, value_hint = ValueHint::Other)] release: String, + #[clap(short, long, arg_enum, default_value = "text")] output: OutputType, }, @@ -51,9 +53,10 @@ pub enum CliCommandRelease { #[clap(short, long, value_hint = ValueHint::Other)] exclude_products: Vec, - /// If specified a local kubernetes cluster consisting of 4 nodes for testing purposes will be created. - /// Kind is a tool to spin up a local kubernetes cluster running on docker on your machine. - /// You need to have `docker` and `kind` installed. Have a look at the README at on how to install them. + /// If specified, a local Kubernetes cluster consisting of 4 nodes (1 for control-plane and 3 workers) for testing purposes will be created. + /// Kind is a tool to spin up a local Kubernetes cluster running on Docker on your machine. + /// You need to have `docker` and `kind` installed. + /// Have a look at our documentation on how to install `kind` at #[clap(short, long)] kind_cluster: bool, @@ -76,10 +79,12 @@ pub enum CliCommandRelease { } impl CliCommandRelease { - pub fn handle(&self) { + pub async fn handle(&self) -> Result<(), Box> { match self { - CliCommandRelease::List { output } => list_releases(output), - CliCommandRelease::Describe { release, output } => describe_release(release, output), + CliCommandRelease::List { output } => list_releases(output).await?, + CliCommandRelease::Describe { release, output } => { + describe_release(release, output).await? + } CliCommandRelease::Install { release, include_products, @@ -87,17 +92,19 @@ impl CliCommandRelease { kind_cluster, kind_cluster_name, } => { - kind::handle_cli_arguments(*kind_cluster, kind_cluster_name); - install_release(release, include_products, exclude_products); + kind::handle_cli_arguments(*kind_cluster, kind_cluster_name)?; + install_release(release, include_products, exclude_products).await?; } - CliCommandRelease::Uninstall { release } => uninstall_release(release), + CliCommandRelease::Uninstall { release } => uninstall_release(release).await, } + + Ok(()) } } pub fn handle_common_cli_args(args: &CliArgs) { let mut release_files = RELEASE_FILES.lock().unwrap(); - release_files.append(&mut args.additional_release_files.clone()); + release_files.extend_from_slice(&args.additional_releases_file); } #[derive(Clone, Debug, Deserialize, Serialize)] @@ -120,12 +127,12 @@ struct ReleaseProduct { operator_version: String, } -fn list_releases(output_type: &OutputType) { - let output = get_releases(); +async fn list_releases(output_type: &OutputType) -> Result<(), Box> { + let output = get_releases().await; match output_type { OutputType::Text => { println!("RELEASE RELEASE DATE DESCRIPTION"); - for (release_name, release_entry) in output.releases.iter() { + for (release_name, release_entry) in output.releases { println!( "{:18} {:14} {}", release_name, release_entry.release_date, release_entry.description, @@ -133,15 +140,20 @@ fn list_releases(output_type: &OutputType) { } } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } + + Ok(()) } -fn describe_release(release_name: &str, output_type: &OutputType) { +async fn describe_release( + release_name: &str, + output_type: &OutputType, +) -> Result<(), Box> { #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct Output { @@ -151,7 +163,7 @@ fn describe_release(release_name: &str, output_type: &OutputType) { products: IndexMap, } - let release = get_release(release_name); + let release = get_release(release_name).await; let output = Output { release: release_name.to_string(), release_date: release.release_date, @@ -167,57 +179,64 @@ fn describe_release(release_name: &str, output_type: &OutputType) { println!("Included products:"); println!(); println!("PRODUCT OPERATOR VERSION"); - for (product_name, product) in output.products.iter() { + for (product_name, product) in output.products { println!("{:19} {}", product_name, product.operator_version); } } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } + + Ok(()) } /// If include_operators is an non-empty list only the whitelisted product operators will be installed. /// If exclude_operators is an non-empty list the blacklisted product operators will be skipped. -fn install_release(release_name: &str, include_products: &[String], exclude_products: &[String]) { +pub async fn install_release( + release_name: &str, + include_products: &[String], + exclude_products: &[String], +) -> Result<(), Box> { info!("Installing release {release_name}"); - let release = get_release(release_name); + let release = get_release(release_name).await; - for (product_name, product) in release.products.into_iter() { + for (product_name, product) in release.products { let included = include_products.is_empty() || include_products.contains(&product_name); let excluded = exclude_products.contains(&product_name); if included && !excluded { Operator::new(product_name, Some(product.operator_version)) .expect("Failed to construct operator definition") - .install(); + .install()?; } } + + Ok(()) } -fn uninstall_release(release_name: &str) { +async fn uninstall_release(release_name: &str) { info!("Uninstalling release {release_name}"); - let release = get_release(release_name); + let release = get_release(release_name).await; operator::uninstall_operators(&release.products.into_keys().collect()); } /// Cached because of potential slow network calls #[cached] -fn get_releases() -> Releases { +async fn get_releases() -> Releases { let mut all_releases: IndexMap = IndexMap::new(); - for release_file in RELEASE_FILES.lock().unwrap().deref() { - let yaml = helpers::read_from_url_or_file(release_file); + let release_files = RELEASE_FILES.lock().unwrap().deref().clone(); + for release_file in release_files { + let yaml = helpers::read_from_url_or_file(&release_file).await; match yaml { - Ok(yaml) => { - let releases: Releases = serde_yaml::from_str(&yaml).unwrap_or_else(|err| { - panic!("Failed to parse release list from {release_file}: {err}") - }); - all_releases.extend(releases.releases.clone()); - } + Ok(yaml) => match serde_yaml::from_str::(&yaml) { + Ok(releases) => all_releases.extend(releases.releases), + Err(err) => warn!("Failed to parse release list from {release_file}: {err}"), + }, Err(err) => { warn!("Could not read from releases file \"{release_file}\": {err}"); } @@ -229,8 +248,9 @@ fn get_releases() -> Releases { } } -fn get_release(release_name: &str) -> Release { +async fn get_release(release_name: &str) -> Release { get_releases() + .await .releases .remove(release_name) // We need to remove to take ownership .unwrap_or_else(|| { diff --git a/src/services.rs b/src/services.rs new file mode 100644 index 00000000..dbfb0356 --- /dev/null +++ b/src/services.rs @@ -0,0 +1,503 @@ +use clap::Parser; +use cli_table::{ + format::{Border, HorizontalLine, Separator}, + Cell, Table, +}; +use indexmap::IndexMap; +use k8s_openapi::api::{ + apps::v1::Deployment, + core::v1::{Secret, Service}, +}; +use kube::{ + api::{DynamicObject, GroupVersionKind, ListParams}, + Api, Discovery, ResourceExt, +}; +use lazy_static::lazy_static; +use log::{debug, warn}; +use serde::Serialize; +use std::{error::Error, vec}; + +use crate::{ + arguments::OutputType, + kube::{get_client, get_service_endpoint_urls}, + NAMESPACE, +}; + +pub static REDACTED_PASSWORD: &str = ""; + +lazy_static! { + pub static ref STACKABLE_PRODUCT_CRDS: IndexMap<&'static str, GroupVersionKind> = + IndexMap::from([ + ( + "airflow", + GroupVersionKind { + group: "airflow.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "AirflowCluster".to_string(), + } + ), + ( + "druid", + GroupVersionKind { + group: "druid.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "DruidCluster".to_string(), + } + ), + ( + "hbase", + GroupVersionKind { + group: "hbase.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "HbaseCluster".to_string(), + } + ), + ( + "hdfs", + GroupVersionKind { + group: "hdfs.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "HdfsCluster".to_string(), + } + ), + ( + "hive", + GroupVersionKind { + group: "hive.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "HiveCluster".to_string(), + } + ), + ( + "kafka", + GroupVersionKind { + group: "kafka.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "KafkaCluster".to_string(), + } + ), + ( + "nifi", + GroupVersionKind { + group: "nifi.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "NifiCluster".to_string(), + } + ), + ( + "opa", + GroupVersionKind { + group: "opa.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "OpaCluster".to_string(), + } + ), + ( + "superset", + GroupVersionKind { + group: "superset.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "SupersetCluster".to_string(), + } + ), + ( + "trino", + GroupVersionKind { + group: "trino.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "TrinoCluster".to_string(), + } + ), + ( + "zookeeper", + GroupVersionKind { + group: "zookeeper.stackable.tech".to_string(), + version: "v1alpha1".to_string(), + kind: "ZookeeperCluster".to_string(), + } + ), + ]); +} + +#[derive(Parser)] +pub enum CliCommandServices { + /// List deployed services + #[clap(alias("ls"))] + List { + /// If specified services of all namespaces will be shown, not only the namespace you're currently in + #[clap(short, long)] + all_namespaces: bool, + + /// Don't show credentials in the output + #[clap(short, long)] + redact_credentials: bool, + + /// Show the product versions in the output + #[clap(long)] + show_versions: bool, + + #[clap(short, long, arg_enum, default_value = "text")] + output: OutputType, + }, +} + +impl CliCommandServices { + pub async fn handle(&self) -> Result<(), Box> { + match self { + CliCommandServices::List { + all_namespaces, + output, + redact_credentials, + show_versions, + } => { + list_services(*all_namespaces, *redact_credentials, *show_versions, output).await?; + } + } + + Ok(()) + } +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub struct InstalledProduct { + pub name: String, + pub namespace: Option, // Some CRDs are cluster scoped + pub endpoints: IndexMap, // key: service name (e.g. web-ui), value: url + pub extra_infos: Vec, +} + +async fn list_services( + all_namespaces: bool, + redact_credentials: bool, + show_versions: bool, + output_type: &OutputType, +) -> Result<(), Box> { + let mut output = + get_stackable_services(!all_namespaces, redact_credentials, show_versions).await?; + output.insert( + "minio".to_string(), + get_minio_services(!all_namespaces, redact_credentials).await?, + ); + + match output_type { + OutputType::Text => { + let mut table = vec![]; + + let max_endpoint_name_length = output + .values() + .flatten() + .flat_map(|p| &p.endpoints) + .map(|e| e.0.len()) + .max() + .unwrap_or_default(); + + for (product_name, installed_products) in output { + for installed_product in installed_products { + let mut endpoints = vec![]; + for endpoint in &installed_product.endpoints { + endpoints.push(vec![endpoint.0.as_str(), endpoint.1.as_str()]); + } + + let endpoints = installed_product + .endpoints + .iter() + .map(|(name, url)| { + format!("{name:width$}{url}", width = max_endpoint_name_length + 1) + }) + .collect::>() + .join("\n"); + + table.push(vec![ + (&product_name).cell(), + installed_product.name.as_str().cell(), + installed_product + .namespace + .clone() + .unwrap_or_default() + .cell(), + endpoints.cell(), + installed_product.extra_infos.join("\n").cell(), + ]); + } + } + let table = table + .table() + .title(vec![ + "PRODUCT".cell(), + "NAME".cell(), + "NAMESPACE".cell(), + "ENDPOINTS".cell(), + "EXTRA INFOS".cell(), + ]) + .border(Border::builder().build()) + .separator( + Separator::builder() + .row(Some(HorizontalLine::new(' ', ' ', ' ', ' '))) + .build(), + ); + + print!("{}", table.display()?); + } + OutputType::Json => { + println!("{}", serde_json::to_string_pretty(&output)?); + } + OutputType::Yaml => { + println!("{}", serde_yaml::to_string(&output)?); + } + } + + Ok(()) +} + +pub async fn get_stackable_services( + namespaced: bool, + redact_credentials: bool, + show_versions: bool, +) -> Result>, Box> { + let mut result = IndexMap::new(); + let namespace = NAMESPACE.lock()?.clone(); + + let client = get_client().await?; + let discovery = Discovery::new(client.clone()).run().await?; + + for (product_name, product_gvk) in STACKABLE_PRODUCT_CRDS.iter() { + let object_api_resource = match discovery.resolve_gvk(product_gvk) { + Some((object_api_resource, _)) => object_api_resource, + None => { + debug!("Failed to list services of product {product_name} because the gvk {product_gvk:?} can not be resolved"); + continue; + } + }; + + let object_api: Api = match namespaced { + true => Api::namespaced_with(client.clone(), &namespace, &object_api_resource), + false => Api::all_with(client.clone(), &object_api_resource), + }; + + let objects = object_api.list(&ListParams::default()).await?; + let mut installed_products = Vec::new(); + for object in objects { + let object_name = object.name(); + let object_namespace = match object.namespace() { + Some(namespace) => namespace, + // If the custom resource does not have a namespace set it can't expose a service + None => continue, + }; + + let service_api: Api = + Api::namespaced(client.clone(), object_namespace.as_str()); + let service_list_params = ListParams::default() + .labels(format!("app.kubernetes.io/name={product_name}").as_str()) + .labels(format!("app.kubernetes.io/instance={object_name}").as_str()); + let services = service_api.list(&service_list_params).await?; + + let extra_infos = + get_extra_infos(product_name, &object, redact_credentials, show_versions).await?; + + let mut endpoints = IndexMap::new(); + for service in services { + let service_endpoint_urls = + get_service_endpoint_urls(&service, &object_name, client.clone()).await; + match service_endpoint_urls { + Ok(service_endpoint_urls) => endpoints.extend(service_endpoint_urls), + Err(err) => warn!( + "Failed to get endpoint_urls of service {service_name}: {err}", + service_name = service.name(), + ), + } + } + let product = InstalledProduct { + name: object_name, + namespace: Some(object_namespace), + endpoints, + extra_infos, + }; + installed_products.push(product); + } + result.insert(product_name.to_string(), installed_products); + } + + Ok(result) +} + +pub async fn get_extra_infos( + product: &str, + product_crd: &DynamicObject, + redact_credentials: bool, + show_versions: bool, +) -> Result, Box> { + let mut result = Vec::new(); + + match product { + "airflow" | "superset" => { + if let Some(secret_name) = product_crd.data["spec"]["credentialsSecret"].as_str() { + let credentials = get_credentials_from_secret( + secret_name, + product_crd + .namespace() + .ok_or(format!( + "The custom resource {product_crd:?} had no namespace set" + ))? + .as_str(), + "adminUser.username", + "adminUser.password", + redact_credentials, + ) + .await?; + + if let Some((username, password)) = credentials { + result.push(format!("Admin user: {username}, password: {password}")); + } + } + } + _ => (), + } + + if show_versions { + if let Some(version) = product_crd.data["spec"]["version"].as_str() { + result.push(format!("version {version}")); + } + } + + Ok(result) +} + +async fn get_credentials_from_secret( + secret_name: &str, + secret_namespace: &str, + username_key: &str, + password_key: &str, + redact_credentials: bool, +) -> Result, Box> { + let client = get_client().await?; + let secret_api: Api = Api::namespaced(client, secret_namespace); + + let secret = secret_api.get(secret_name).await?; + let secret_data = secret + .data + .ok_or(format!("Secret {secret_name} had no data"))?; + + match (secret_data.get(username_key), secret_data.get(password_key)) { + (Some(username), Some(password)) => { + let username = String::from_utf8(username.0.clone())?; + let password = if redact_credentials { + REDACTED_PASSWORD.to_string() + } else { + String::from_utf8(password.0.clone())? + }; + Ok(Some((username, password))) + } + _ => Ok(None), + } +} + +async fn get_minio_services( + namespaced: bool, + redact_credentials: bool, +) -> Result, Box> { + let client = get_client().await?; + let deployment_api: Api = match namespaced { + true => Api::namespaced(client.clone(), NAMESPACE.lock()?.as_str()), + false => Api::all(client.clone()), + }; + let list_params = ListParams::default().labels("app=minio"); + let minio_deployments = deployment_api.list(&list_params).await?; + + let mut result = Vec::new(); + for minio_deployment in minio_deployments { + let deployment_name = minio_deployment.name(); + let deployment_namespace = minio_deployment.namespace().ok_or(format!( + "MinIO deployment {deployment_name} had no namespace" + ))?; + + let service_api = Api::namespaced(client.clone(), &deployment_namespace); + let service_names = vec![ + deployment_name.clone(), + format!("{deployment_name}-console"), + ]; + + let mut endpoints = IndexMap::new(); + for service_name in service_names { + let service = service_api.get(&service_name).await?; + let service_endpoint_urls = + get_service_endpoint_urls(&service, &deployment_name, client.clone()).await?; + endpoints.extend(service_endpoint_urls); + } + + let mut extra_infos = vec!["Third party service".to_string()]; + let containers = minio_deployment + .spec + .unwrap() + .template + .spec + .unwrap() + .containers; + if let Some(minio_container) = containers.iter().find(|c| c.name == "minio") { + if let Some(env) = &minio_container.env { + let admin_user = env.iter().find(|e| e.name == "MINIO_ROOT_USER"); + let admin_password = env.iter().find(|e| e.name == "MINIO_ROOT_PASSWORD"); + + if let (Some(admin_user), Some(admin_password)) = (admin_user, admin_password) { + let admin_user = admin_user + .value_from + .as_ref() + .ok_or("MinIO admin user env var needs to have an valueFrom entry")? + .secret_key_ref + .as_ref() + .ok_or("MinIO admin user env var needs to have an secretKeyRef in the valueFrom entry")?; + let admin_password = admin_password + .value_from + .as_ref() + .ok_or("MinIO admin password env var needs to have an valueFrom entry")? + .secret_key_ref + .as_ref() + .ok_or("MinIO admin password env var needs to have an secretKeyRef in the valueFrom entry")?; + + let api: Api = Api::namespaced(client.clone(), &deployment_namespace); + let admin_user_secret = api.get(admin_user.name.as_ref().unwrap()).await; + let admin_password_secret = + api.get(admin_password.name.as_ref().unwrap()).await; + + if let ( + Ok(Secret { + data: Some(admin_user_secret_data), + .. + }), + Ok(Secret { + data: Some(admin_password_secret_data), + .. + }), + ) = (admin_user_secret, admin_password_secret) + { + let admin_user = admin_user_secret_data + .get(&admin_user.key) + .map(|b| String::from_utf8(b.clone().0).unwrap()) + .unwrap_or_default(); + let admin_password = if redact_credentials { + REDACTED_PASSWORD.to_string() + } else { + admin_password_secret_data + .get(&admin_password.key) + .map(|b| String::from_utf8(b.clone().0).unwrap()) + .unwrap_or_default() + }; + extra_infos.push(format!( + "Admin user: {admin_user}, password: {admin_password}" + )); + } + } + } + } + + let product = InstalledProduct { + name: deployment_name, + namespace: Some(deployment_namespace), + endpoints, + extra_infos, + }; + result.push(product); + } + + Ok(result) +} diff --git a/src/stack.rs b/src/stack.rs index 3ddc7f41..b4a8670f 100644 --- a/src/stack.rs +++ b/src/stack.rs @@ -1,15 +1,16 @@ -use crate::{arguments::OutputType, helpers, CliArgs}; +use crate::{arguments::OutputType, helm, helm::HELM_REPOS, helpers, kind, kube, release, CliArgs}; use cached::proc_macro::cached; -use clap::Parser; +use clap::{Parser, ValueHint}; use indexmap::IndexMap; use lazy_static::lazy_static; -use log::warn; +use log::{debug, info, warn}; use serde::{Deserialize, Serialize}; -use std::{ops::Deref, sync::Mutex}; +use std::{error::Error, ops::Deref, sync::Mutex}; lazy_static! { pub static ref STACK_FILES: Mutex> = Mutex::new(vec![ - "https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks.yaml".to_string() + "https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/stacks-v1.yaml" + .to_string(), ]); } @@ -21,19 +22,62 @@ pub enum CliCommandStack { #[clap(short, long, arg_enum, default_value = "text")] output: OutputType, }, + /// Show details of a specific stack + #[clap(alias("desc"))] + Describe { + /// Name of the stack to describe + #[clap(required = true, value_hint = ValueHint::Other)] + stack: String, + + #[clap(short, long, arg_enum, default_value = "text")] + output: OutputType, + }, + /// Install a specific stack + #[clap(alias("in"))] + Install { + /// Name of the stack to install + #[clap(required = true, value_hint = ValueHint::Other)] + stack: String, + + /// If specified, a local Kubernetes cluster consisting of 4 nodes (1 for control-plane and 3 workers) for testing purposes will be created. + /// Kind is a tool to spin up a local Kubernetes cluster running on Docker on your machine. + /// You need to have `docker` and `kind` installed. + /// Have a look at our documentation on how to install `kind` at + #[clap(short, long)] + kind_cluster: bool, + + /// Name of the kind cluster created if `--kind-cluster` is specified + #[clap( + long, + default_value = "stackable-data-platform", + requires = "kind-cluster", + value_hint = ValueHint::Other, + )] + kind_cluster_name: String, + }, } impl CliCommandStack { - pub fn handle(&self) { + pub async fn handle(&self) -> Result<(), Box> { match self { - CliCommandStack::List { output } => list_stacks(output), + CliCommandStack::List { output } => list_stacks(output).await?, + CliCommandStack::Describe { stack, output } => describe_stack(stack, output).await?, + CliCommandStack::Install { + stack, + kind_cluster, + kind_cluster_name, + } => { + kind::handle_cli_arguments(*kind_cluster, kind_cluster_name)?; + install_stack(stack).await?; + } } + Ok(()) } } pub fn handle_common_cli_args(args: &CliArgs) { let mut stack_files = STACK_FILES.lock().unwrap(); - stack_files.append(&mut args.additional_stack_files.clone()); + stack_files.extend_from_slice(&args.additional_stacks_file); } #[derive(Clone, Debug, Deserialize, Serialize)] @@ -47,14 +91,37 @@ struct Stacks { struct Stack { description: String, stackable_release: String, + labels: Vec, + manifests: Vec, } -fn list_stacks(output_type: &OutputType) { - let output = get_stacks(); +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +enum StackManifest { + #[serde(rename_all = "camelCase")] + HelmChart { + release_name: String, + name: String, + repo: HelmChartRepo, + version: String, + options: serde_yaml::Value, + }, + PlainYaml(String), +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +struct HelmChartRepo { + name: String, + url: String, +} + +async fn list_stacks(output_type: &OutputType) -> Result<(), Box> { + let output = get_stacks().await; match output_type { OutputType::Text => { println!("STACK STACKABLE RELEASE DESCRIPTION"); - for (stack_name, stack) in output.stacks.iter() { + for (stack_name, stack) in output.stacks { println!( "{:35} {:18} {}", stack_name, stack.stackable_release, stack.description, @@ -62,27 +129,111 @@ fn list_stacks(output_type: &OutputType) { } } OutputType::Json => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); + println!("{}", serde_json::to_string_pretty(&output)?); } OutputType::Yaml => { - println!("{}", serde_yaml::to_string(&output).unwrap()); + println!("{}", serde_yaml::to_string(&output)?); } } + + Ok(()) +} + +async fn describe_stack(stack_name: &str, output_type: &OutputType) -> Result<(), Box> { + #[derive(Serialize)] + #[serde(rename_all = "camelCase")] + struct Output { + stack: String, + description: String, + stackable_release: String, + labels: Vec, + } + + let stack = get_stack(stack_name).await?; + let output = Output { + stack: stack_name.to_string(), + description: stack.description, + stackable_release: stack.stackable_release, + labels: stack.labels, + }; + + match output_type { + OutputType::Text => { + println!("Stack: {}", output.stack); + println!("Description: {}", output.description); + println!("Stackable release: {}", output.stackable_release); + println!("Labels: {}", output.labels.join(", ")); + } + OutputType::Json => { + println!("{}", serde_json::to_string_pretty(&output)?); + } + OutputType::Yaml => { + println!("{}", serde_yaml::to_string(&output)?); + } + } + + Ok(()) +} + +async fn install_stack(stack_name: &str) -> Result<(), Box> { + info!("Installing stack {stack_name}"); + let stack = get_stack(stack_name).await?; + + release::install_release(&stack.stackable_release, &[], &[]).await?; + + info!("Installing components of stack {stack_name}"); + for manifest in stack.manifests { + match manifest { + StackManifest::HelmChart { + release_name, + name, + repo, + version, + options, + } => { + debug!("Installing helm chart {name} as {release_name}"); + HELM_REPOS.lock()?.insert(repo.name.clone(), repo.url); + + let values_yaml = serde_yaml::to_string(&options)?; + helm::install_helm_release_from_repo( + &release_name, + &release_name, + &repo.name, + &name, + Some(&version), + Some(&values_yaml), + )? + } + StackManifest::PlainYaml(yaml_url_or_file) => { + debug!("Installing yaml manifest from {yaml_url_or_file}"); + let manifests = helpers::read_from_url_or_file(&yaml_url_or_file) + .await + .map_err(|err| { + format!( + "Could not read stack manifests from file \"{yaml_url_or_file}\": {err}" + ) + })?; + kube::deploy_manifests(&manifests).await?; + } + } + } + + info!("Installed stack {stack_name}"); + Ok(()) } /// Cached because of potential slow network calls #[cached] -fn get_stacks() -> Stacks { +async fn get_stacks() -> Stacks { let mut all_stacks: IndexMap = IndexMap::new(); - for stack_file in STACK_FILES.lock().unwrap().deref() { - let yaml = helpers::read_from_url_or_file(stack_file); + let stack_files = STACK_FILES.lock().unwrap().deref().clone(); + for stack_file in stack_files { + let yaml = helpers::read_from_url_or_file(&stack_file).await; match yaml { - Ok(yaml) => { - let stacks: Stacks = serde_yaml::from_str(&yaml).unwrap_or_else(|err| { - panic!("Failed to parse stack list from {stack_file}: {err}") - }); - all_stacks.extend(stacks.stacks.clone()); - } + Ok(yaml) => match serde_yaml::from_str::(&yaml) { + Ok(stacks) => all_stacks.extend(stacks.stacks), + Err(err) => warn!("Failed to parse stack list from {stack_file}: {err}"), + }, Err(err) => { warn!("Could not read from stacks file \"{stack_file}\": {err}"); } @@ -91,3 +242,11 @@ fn get_stacks() -> Stacks { Stacks { stacks: all_stacks } } + +async fn get_stack(stack_name: &str) -> Result> { + get_stacks() + .await + .stacks + .remove(stack_name) // We need to remove to take ownership + .ok_or_else(|| format!("Stack {stack_name} not found. Use `stackablectl stack list` to list the available stacks.").into()) +} diff --git a/stacks.yaml b/stacks.yaml deleted file mode 100644 index e927b4fe..00000000 --- a/stacks.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -stacks: - trino-superset: - description: A modern data-analysis stack with S3, Trino and Superset - stackableRelease: 22.04-sbernauer - # TODO manifests - # TODO additional services - kafka-nifi-druid-superset: - description: Streaming applications and peristant storage with Druid on S3 and Superset for visualization - stackableRelease: 22.04-sbernauer - # TODO manifests - # TODO additional services diff --git a/stacks/airflow/airflow.yaml b/stacks/airflow/airflow.yaml new file mode 100644 index 00000000..fdbfd9b1 --- /dev/null +++ b/stacks/airflow/airflow.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: airflow.stackable.tech/v1alpha1 +kind: AirflowCluster +metadata: + name: airflow +spec: + version: 2.2.5-python39-stackable0.3.0 + statsdExporterVersion: v0.22.4 + executor: CeleryExecutor + loadExamples: true + exposeConfig: false + credentialsSecret: airflow-credentials + webservers: + roleGroups: + default: + replicas: 1 + workers: + roleGroups: + default: + replicas: 2 + schedulers: + roleGroups: + default: + replicas: 1 +--- +apiVersion: v1 +kind: Secret +metadata: + name: airflow-credentials +type: Opaque +stringData: + adminUser.username: airflow + adminUser.firstname: Airflow + adminUser.lastname: Admin + adminUser.email: airflow@airflow.com + adminUser.password: airflow + connections.secretKey: thisISaSECRET_1234 + connections.sqlalchemyDatabaseUri: postgresql+psycopg2://airflow:airflow@postgresql-airflow/airflow + connections.celeryResultBackend: db+postgresql://airflow:airflow@postgresql-airflow/airflow + connections.celeryBrokerUrl: redis://:airflow@redis-airflow-master:6379/0 diff --git a/stacks/druid-superset-s3/druid.yaml b/stacks/druid-superset-s3/druid.yaml new file mode 100644 index 00000000..fce73f1a --- /dev/null +++ b/stacks/druid-superset-s3/druid.yaml @@ -0,0 +1,79 @@ +--- +apiVersion: druid.stackable.tech/v1alpha1 +kind: DruidCluster +metadata: + name: druid +spec: + version: 0.23.0-stackable0.1.0 + zookeeperConfigMapName: druid-znode + metadataStorageDatabase: + dbType: derby + connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true + host: localhost + port: 1527 + deepStorage: + s3: + bucket: + inline: + bucketName: druid + connection: + inline: + host: minio-druid + port: 9000 + accessStyle: Path + credentials: + secretClass: druid-s3-credentials + baseKey: data + brokers: + roleGroups: + default: + config: {} + replicas: 1 + coordinators: + roleGroups: + default: + config: {} + replicas: 1 + historicals: + roleGroups: + default: + config: {} + replicas: 1 + middleManagers: + roleGroups: + default: + config: {} + replicas: 1 + routers: + roleGroups: + default: + config: {} + replicas: 1 +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperZnode +metadata: + name: druid-znode +spec: + clusterRef: + name: druid-zookeeper +--- +apiVersion: secrets.stackable.tech/v1alpha1 +kind: SecretClass +metadata: + name: druid-s3-credentials +spec: + backend: + k8sSearch: + searchNamespace: + pod: {} +--- +apiVersion: v1 +kind: Secret +metadata: + name: druid-s3-credentials + labels: + secrets.stackable.tech/class: druid-s3-credentials +stringData: + accessKey: druid + secretKey: druiddruid diff --git a/stacks/druid-superset-s3/superset.yaml b/stacks/druid-superset-s3/superset.yaml new file mode 100644 index 00000000..09c60319 --- /dev/null +++ b/stacks/druid-superset-s3/superset.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: superset.stackable.tech/v1alpha1 +kind: SupersetCluster +metadata: + name: superset +spec: + version: 1.5.1-stackable0.2.0 + statsdExporterVersion: v0.22.4 + credentialsSecret: superset-credentials + nodes: + roleGroups: + default: + replicas: 1 +--- +apiVersion: v1 +kind: Secret +metadata: + name: superset-credentials +type: Opaque +stringData: + adminUser.username: admin + adminUser.firstname: Superset + adminUser.lastname: Admin + adminUser.email: admin@superset.com + adminUser.password: admin + connections.secretKey: thisISaSECRET_1234 + connections.sqlalchemyDatabaseUri: postgresql://superset:superset@postgresql-superset/superset +--- +apiVersion: superset.stackable.tech/v1alpha1 +kind: DruidConnection +metadata: + name: superset-druid-connection +spec: + superset: + name: superset + druid: + name: druid diff --git a/stacks/druid-superset-s3/zookeeper.yaml b/stacks/druid-superset-s3/zookeeper.yaml new file mode 100644 index 00000000..8ccad09f --- /dev/null +++ b/stacks/druid-superset-s3/zookeeper.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperCluster +metadata: + name: druid-zookeeper +spec: + version: 3.8.0-stackable0.7.1 + servers: + roleGroups: + default: + replicas: 1 diff --git a/stacks/stacks-v1.yaml b/stacks/stacks-v1.yaml new file mode 100644 index 00000000..77a7e6ba --- /dev/null +++ b/stacks/stacks-v1.yaml @@ -0,0 +1,153 @@ +--- +stacks: + druid-superset-s3: + description: Stack containing MinIO, Druid and Superset for data visualization + stackableRelease: 22.06 + labels: + - druid + - superset + - minio + - s3 + manifests: + - helmChart: + releaseName: minio-druid + name: minio + repo: + name: minio + url: https://charts.min.io/ + version: 4.0.2 + options: + rootUser: root + rootPassword: rootroot + mode: standalone + users: + - accessKey: druid + secretKey: druiddruid + policy: readwrite + buckets: + - name: druid + policy: public + resources: + requests: + memory: 2Gi + service: + type: NodePort + nodePort: null + consoleService: + type: NodePort + nodePort: null + - helmChart: + releaseName: postgresql-superset + name: postgresql + repo: + name: bitnami + url: https://charts.bitnami.com/bitnami/ + version: 11.0.0 + options: + auth: + username: superset + password: superset + database: superset + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/druid-superset-s3/zookeeper.yaml + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/druid-superset-s3/druid.yaml + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/druid-superset-s3/superset.yaml + trino-superset-s3: + description: Stack containing MinIO, Trino and Superset for data visualization + stackableRelease: 22.06 + labels: + - trino + - superset + - minio + - s3 + manifests: + - helmChart: + releaseName: minio-trino + name: minio + repo: + name: minio + url: https://charts.min.io/ + version: 4.0.5 + options: + rootUser: root + rootPassword: rootroot + mode: standalone + users: + - accessKey: trino + secretKey: trinotrino + policy: readwrite + - accessKey: hive + secretKey: hivehive + policy: readwrite + - accessKey: demo + secretKey: demodemo + policy: readwrite + buckets: + - name: demo + policy: public + resources: + requests: + memory: 2Gi + service: + type: NodePort + nodePort: null + consoleService: + type: NodePort + nodePort: null + - helmChart: + releaseName: postgresql-hive + name: postgresql + repo: + name: bitnami + url: https://charts.bitnami.com/bitnami/ + version: 10.16.2 + options: + # Old version (10) of helm-charts has old way of setting credentials + postgresqlUsername: hive + postgresqlPassword: hive + postgresqlDatabase: hive + - helmChart: + releaseName: postgresql-superset + name: postgresql + repo: + name: bitnami + url: https://charts.bitnami.com/bitnami/ + version: 11.0.0 + options: + auth: + username: superset + password: superset + database: superset + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/trino-superset-s3/hive-metastore.yaml + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/trino-superset-s3/trino.yaml + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/trino-superset-s3/superset.yaml + airflow: + description: Stack containing Airflow scheduling platform + stackableRelease: 22.06 + labels: + - airflow + manifests: + - helmChart: + releaseName: postgresql-airflow + name: postgresql + repo: + name: bitnami + url: https://charts.bitnami.com/bitnami/ + version: 11.0.0 + options: + auth: + username: airflow + password: airflow + database: airflow + - helmChart: + releaseName: redis-airflow + name: redis + repo: + name: bitnami + url: https://charts.bitnami.com/bitnami/ + version: 16.13.2 + options: + auth: + password: airflow + replica: + replicaCount: 1 + - plainYaml: https://raw.githubusercontent.com/stackabletech/stackablectl/main/stacks/airflow/airflow.yaml diff --git a/stacks/trino-superset-s3/hive-metastore.yaml b/stacks/trino-superset-s3/hive-metastore.yaml new file mode 100644 index 00000000..77152a12 --- /dev/null +++ b/stacks/trino-superset-s3/hive-metastore.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: hive.stackable.tech/v1alpha1 +kind: HiveCluster +metadata: + name: hive +spec: + version: 2.3.9-stackable0.4.0 + s3: + inline: + host: minio-trino + port: 9000 + accessStyle: Path + credentials: + secretClass: hive-s3-credentials + metastore: + roleGroups: + default: + replicas: 1 + config: + database: + connString: jdbc:postgresql://postgresql-hive:5432/hive + user: hive + password: hive + dbType: postgres +--- +apiVersion: secrets.stackable.tech/v1alpha1 +kind: SecretClass +metadata: + name: hive-s3-credentials +spec: + backend: + k8sSearch: + searchNamespace: + pod: {} +--- +apiVersion: v1 +kind: Secret +metadata: + name: hive-s3-credentials + labels: + secrets.stackable.tech/class: hive-s3-credentials +stringData: + accessKey: hive + secretKey: hivehive diff --git a/stacks/trino-superset-s3/superset.yaml b/stacks/trino-superset-s3/superset.yaml new file mode 100644 index 00000000..dad9b44e --- /dev/null +++ b/stacks/trino-superset-s3/superset.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: superset.stackable.tech/v1alpha1 +kind: SupersetCluster +metadata: + name: superset +spec: + version: 1.5.1-stackable0.2.0 + statsdExporterVersion: v0.22.4 + credentialsSecret: superset-credentials + nodes: + roleGroups: + default: + replicas: 1 +--- +apiVersion: v1 +kind: Secret +metadata: + name: superset-credentials +type: Opaque +stringData: + adminUser.username: admin + adminUser.firstname: SupersetNur + adminUser.lastname: Admin + adminUser.email: admin@superset.com + adminUser.password: admin + connections.secretKey: thisISaSECRET_1234 + connections.sqlalchemyDatabaseUri: postgresql://superset:superset@postgresql-superset/superset +# --- +# TODO Use when available (https://github.com/stackabletech/superset-operator/issues/3) +# apiVersion: superset.stackable.tech/v1alpha1 +# kind: TrinoConnection +# metadata: +# name: superset-trino-connection +# spec: +# superset: +# name: superset +# trino: +# name: trino diff --git a/stacks/trino-superset-s3/trino.yaml b/stacks/trino-superset-s3/trino.yaml new file mode 100644 index 00000000..6e78cebf --- /dev/null +++ b/stacks/trino-superset-s3/trino.yaml @@ -0,0 +1,97 @@ +--- +apiVersion: trino.stackable.tech/v1alpha1 +kind: TrinoCluster +metadata: + name: trino +spec: + version: 387-stackable0.1.0 + hiveConfigMapName: hive + opa: + configMapName: opa + package: trino + s3: + inline: + host: minio-trino + port: 9000 + accessStyle: Path + credentials: + secretClass: trino-s3-credentials + authentication: + method: + multiUser: + userCredentialsSecret: + name: trino-users + coordinators: + roleGroups: + default: + replicas: 1 + config: {} + workers: + roleGroups: + default: + replicas: 1 + config: {} +--- +apiVersion: v1 +kind: Secret +metadata: + name: trino-users +type: kubernetes.io/opaque +stringData: + # admin:admin + admin: $2y$10$89xReovvDLacVzRGpjOyAOONnayOgDAyIS2nW9bs5DJT98q17Dy5i + # demo:demo + demo: $2y$10$mMRoIKfWtAuycEQnKiDCeOlCSYiWkvbs0WsMFLkaSnNO0ZnFKVRXm +--- +apiVersion: secrets.stackable.tech/v1alpha1 +kind: SecretClass +metadata: + name: trino-s3-credentials +spec: + backend: + k8sSearch: + searchNamespace: + pod: {} +--- +apiVersion: v1 +kind: Secret +metadata: + name: trino-s3-credentials + labels: + secrets.stackable.tech/class: trino-s3-credentials +stringData: + accessKey: trino + secretKey: trinotrino +--- +apiVersion: opa.stackable.tech/v1alpha1 +kind: OpaCluster +metadata: + name: opa +spec: + version: 0.41.0-stackable0.1.0 + servers: + roleGroups: + default: + selector: + matchLabels: + kubernetes.io/os: linux +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: trino-opa-bundle + labels: + opa.stackable.tech/bundle: "trino" +data: + trino.rego: | + package trino + + default allow = false + + allow { + input.context.identity.user == "admin" + } + + allow { + input.context.identity.user == "demo" + }