From 89a9aa5e160d8d243064f2ca71f06c842a766023 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 1 Apr 2022 15:52:14 -0400 Subject: [PATCH 01/16] Add a script for installing all the software mentioned in the README --- tools/install_prerequisites.sh | 44 ++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100755 tools/install_prerequisites.sh diff --git a/tools/install_prerequisites.sh b/tools/install_prerequisites.sh new file mode 100755 index 00000000000..05dd34100ff --- /dev/null +++ b/tools/install_prerequisites.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +set -eu + +# Set the CWD to Omicron's source. +SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +cd "${SOURCE_DIR}/.." + +HOST_OS=$(uname -s) +if [[ "${HOST_OS}" == "Linux" ]]; then + echo "Linux" + sudo apt-get install libpq-dev + sudo apt-get install pkg-config +elif [[ "${HOST_OS}" == "SunOS" ]]; then + echo "illumos" + need=( + 'build-essential' + 'library/postgresql-13' + 'pkg-config' + 'brand/omicron1/tools' + 'pkg:/package/pkg' + ) + missing=() + for (( i = 0; i < ${#need[@]}; i++ )); do + p=${need[$i]} + if ! pkg info -q "$p"; then + missing+=( "$p" ) + fi + done + if (( ${#missing[@]} > 0 )); then + pfexec pkg install -v "${missing[@]}" + fi + pkg list -v "${need[@]}" +elif [[ "${HOST_OS}" == "Darwin" ]]; then + echo "Mac" + brew install postgresql + brew install pkg-config +else + echo "Unsupported OS" + exit -1 +fi + +./tools/ci_download_cockroachdb +./tools/ci_download_clickhouse From 147c97c2df680a035775950b54ca452a3b30076f Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 1 Apr 2022 16:00:14 -0400 Subject: [PATCH 02/16] Update Readme --- README.adoc | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/README.adoc b/README.adoc index ef6e5d40f65..c63d6043171 100644 --- a/README.adoc +++ b/README.adoc @@ -33,7 +33,15 @@ You can **format the code** using `cargo fmt`. Make sure to run this before pus You can **run the https://github.com/rust-lang/rust-clippy[Clippy linter]** using `cargo clippy \-- -D warnings -A clippy::style`. Make sure to run this before pushing changes. The CI checks that the code is clippy-clean. -**Prerequisites:** +=== Installing Prerequisite Software + +The following software may be installed automatically with the following scripts: + +---- +$ ./tools/install_prerequisites.sh +---- + +Alternatively, the manual installation steps as follows: Both normal execution and the test suite expect certain binaries (described below) on your PATH. @@ -83,6 +91,10 @@ pkg install pkg:/package/pkg pkg update ---- +=== Running (Simulated) Omicron + +NOTE: If you'd like to run on Helios, refer to <> below. + To **run Omicron** you need to run four programs: * a CockroachDB cluster. For development, you can use the `omicron-dev` tool in this repository to start a single-node CockroachDB cluster **that will delete the database when you shut it down.** From 0e5d9846f1f2e29d3d5e60c6c3551ea5a2c7bb35 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Sat, 2 Apr 2022 22:28:12 -0400 Subject: [PATCH 03/16] Merge update and install --- tools/install_prerequisites.sh | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/tools/install_prerequisites.sh b/tools/install_prerequisites.sh index 05dd34100ff..390094b4ef5 100755 --- a/tools/install_prerequisites.sh +++ b/tools/install_prerequisites.sh @@ -8,35 +8,34 @@ cd "${SOURCE_DIR}/.." HOST_OS=$(uname -s) if [[ "${HOST_OS}" == "Linux" ]]; then - echo "Linux" sudo apt-get install libpq-dev sudo apt-get install pkg-config elif [[ "${HOST_OS}" == "SunOS" ]]; then - echo "illumos" need=( + 'pkg:/package/pkg' 'build-essential' 'library/postgresql-13' 'pkg-config' 'brand/omicron1/tools' - 'pkg:/package/pkg' ) - missing=() - for (( i = 0; i < ${#need[@]}; i++ )); do - p=${need[$i]} - if ! pkg info -q "$p"; then - missing+=( "$p" ) + + # Perform updates + if (( ${#need[@]} > 0 )); then + pfexec pkg install -v "${need[@]}" && rc=$? || rc=$? + # Return codes: + # 0: Normal Success + # 4: Failure because we're already up-to-date. Also acceptable. + if [ "$rc" -ne 4 ] && [ "$rc" -ne 0 ]; then + exit "$rc" fi - done - if (( ${#missing[@]} > 0 )); then - pfexec pkg install -v "${missing[@]}" fi + pkg list -v "${need[@]}" elif [[ "${HOST_OS}" == "Darwin" ]]; then - echo "Mac" brew install postgresql brew install pkg-config else - echo "Unsupported OS" + echo "Unsupported OS: ${HOST_OS}" exit -1 fi From 6c01f56ad30a180602f11fca256a4c330428a405 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Sat, 2 Apr 2022 22:30:42 -0400 Subject: [PATCH 04/16] Make DB downloads idempotent, contained in 'out/' directory --- README.adoc | 4 +- package-manifest.toml | 4 +- tools/ci_download_clickhouse | 168 ++++++++++++++++------------ tools/ci_download_cockroachdb | 201 +++++++++++++++++++--------------- 4 files changed, 212 insertions(+), 165 deletions(-) diff --git a/README.adoc b/README.adoc index c63d6043171..8f707328345 100644 --- a/README.adoc +++ b/README.adoc @@ -70,13 +70,13 @@ example, on Helios, you'd want `/usr/bin` on your PATH. . CockroachDB v21.1.10. + The build and test suite expects to be able to start a single-node CockroachDB cluster using the `cockroach` executable on your PATH. -On illumos, MacOS, and Linux, you should be able to use the `tools/ci_download_cockroachdb` script to fetch the official CockroachDB binary. It will be put into `./cockroachdb/bin/cockroach`. +On illumos, MacOS, and Linux, you should be able to use the `tools/ci_download_cockroachdb` script to fetch the official CockroachDB binary. It will be put into `./out/cockroachdb/bin/cockroach`. Alternatively, you can follow the https://www.cockroachlabs.com/docs/stable/install-cockroachdb.html[official CockroachDB installation instructions for your platform]. . ClickHouse >= v21.7.1. + The test suite expects a running instance of the ClickHouse database server. -The script `./tools/ci_download_clickhouse` can be used to download a pre-built binary for illumos, Linux, or macOS platforms. Once complete, you must manually add the binary (located at `clickhouse/clickhouse`) to your PATH. +The script `./tools/ci_download_clickhouse` can be used to download a pre-built binary for illumos, Linux, or macOS platforms. Once complete, you must manually add the binary (located at `./out/clickhouse/clickhouse`) to your PATH. You may also install ClickHouse manually; instructions can be found https://clickhouse.tech/docs/en/getting-started/install[here]. See <<_configuring_clickhouse>> for details on ClickHouse's setup and configuration files. + diff --git a/package-manifest.toml b/package-manifest.toml index 460b9f11a5f..03fd1256f77 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -36,7 +36,7 @@ to = "/var/svc/manifest/site/oximeter" service_name = "clickhouse" zone = true [[package.clickhouse.paths]] -from = "clickhouse" +from = "out/clickhouse" to = "/opt/oxide/clickhouse" [[package.clickhouse.paths]] from = "smf/clickhouse" @@ -46,7 +46,7 @@ to = "/var/svc/manifest/site/clickhouse" service_name = "cockroachdb" zone = true [[package.cockroachdb.paths]] -from = "cockroachdb" +from = "out/cockroachdb" to = "/opt/oxide/cockroachdb" [[package.cockroachdb.paths]] from = "common/src/sql" diff --git a/tools/ci_download_clickhouse b/tools/ci_download_clickhouse index c1099216941..3bdac422115 100755 --- a/tools/ci_download_clickhouse +++ b/tools/ci_download_clickhouse @@ -7,112 +7,136 @@ # set -o pipefail -set -o xtrace set -o errexit SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" ARG0="$(basename ${BASH_SOURCE[0]})" +TARGET_DIR="out" +# Location where intermediate artifacts are downloaded / unpacked. +DOWNLOAD_DIR="$TARGET_DIR/downloads" +# Location where the final clickhouse directory should end up. +DEST_DIR="./$TARGET_DIR/clickhouse" + # If you change this, you must also update the md5sums below CIDL_VERSION="v21.7" source "$SOURCE_DIR/clickhouse_checksums" -CIDL_ASSEMBLE_DIR="./clickhouse" # Download from manually-populated S3 bucket for now CIDL_URL_BASE="https://oxide-clickhouse-build.s3.us-west-2.amazonaws.com" function main { - # - # Process command-line arguments. We generally don't expect any, but - # we allow callers to specify a value to override OSTYPE, just for - # testing. - # - if [[ $# != 0 ]]; then - CIDL_OS="$1" - shift - else - CIDL_OS="$OSTYPE" - fi - - if [[ $# != 0 ]]; then - echo "unexpected arguments" >&2 - exit 2 - fi - - # Configure this program - configure_os "$CIDL_OS" - CIDL_URL="$CIDL_URL_BASE/$CIDL_FILE" - - # Download the file. - echo "URL: $CIDL_URL" - echo "Local file: $CIDL_FILE" - do_download_curl "$CIDL_URL" "$CIDL_FILE" || \ - fail "failed to download file" - - # Verify the md5sum. - calculated_md5="$($CIDL_MD5FUNC "$CIDL_FILE")" || \ - fail "failed to calculate md5sum" - if [[ "$calculated_md5" != "$CIDL_MD5" ]]; then - fail "md5sum mismatch \ - (expected $CIDL_MD5, found $calculated_md5)" - fi - - # Unpack the tarball into a local directory - do_untar "$CIDL_FILE" "$CIDL_ASSEMBLE_DIR" - - # Run the binary as a sanity-check. - "$CIDL_ASSEMBLE_DIR/clickhouse" server --version + # + # Process command-line arguments. We generally don't expect any, but + # we allow callers to specify a value to override OSTYPE, just for + # testing. + # + if [[ $# != 0 ]]; then + CIDL_OS="$1" + shift + else + CIDL_OS="$OSTYPE" + fi + + if [[ $# != 0 ]]; then + echo "unexpected arguments" >&2 + exit 2 + fi + + # Configure this program + configure_os "$CIDL_OS" + CIDL_URL="$CIDL_URL_BASE/$TARBALL_FILENAME" + + # Download the file. + echo "URL: $CIDL_URL" + echo "Local file: $TARBALL_FILE" + + mkdir -p "$DOWNLOAD_DIR" + mkdir -p "$DEST_DIR" + + local DO_DOWNLOAD="true" + if [[ -f "$TARBALL_FILE" ]]; then + # If the file exists with a valid checksum, we can skip downloading. + calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate md5sum" + if [[ "$calculated_md5" == "$CIDL_MD5" ]]; then + DO_DOWNLOAD="false" + fi + fi + + if [ "$DO_DOWNLOAD" == "true" ]; then + echo "Downloading..." + do_download_curl "$CIDL_URL" "$TARBALL_FILE" || \ + fail "failed to download file" + + # Verify the md5sum. + calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate md5sum" + if [[ "$calculated_md5" != "$CIDL_MD5" ]]; then + fail "md5sum mismatch \ + (expected $CIDL_MD5, found $calculated_md5)" + fi + fi + + # Unpack the tarball into a local directory + do_untar "$TARBALL_FILE" "$DEST_DIR" + + # Run the binary as a sanity-check. + "$DEST_DIR/clickhouse" server --version } function fail { - echo "$ARG0: $@" >&2 - exit 1 + echo "$ARG0: $@" >&2 + exit 1 } function configure_os { - echo "current directory: $PWD" - echo "configuring based on OS: \"$1\"" - case "$1" in - darwin*) - CIDL_PLATFORM="macos" - CIDL_MD5="$CIDL_MD5_DARWIN" - CIDL_MD5FUNC="do_md5" - ;; - linux-gnu*) - CIDL_PLATFORM="linux" - CIDL_MD5="$CIDL_MD5_LINUX" - CIDL_MD5FUNC="do_md5sum" - ;; - solaris*) - CIDL_PLATFORM="illumos" - CIDL_MD5="$CIDL_MD5_ILLUMOS" - CIDL_MD5FUNC="do_md5sum" - ;; - *) - fail "unsupported OS: $1" - ;; - esac - - CIDL_DIR="clickhouse-$CIDL_VERSION" - CIDL_FILE="$CIDL_DIR.$CIDL_PLATFORM.tar.gz" + echo "current directory: $PWD" + echo "configuring based on OS: \"$1\"" + case "$1" in + darwin*) + CIDL_PLATFORM="macos" + CIDL_MD5="$CIDL_MD5_DARWIN" + CIDL_MD5FUNC="do_md5" + ;; + linux-gnu*) + CIDL_PLATFORM="linux" + CIDL_MD5="$CIDL_MD5_LINUX" + CIDL_MD5FUNC="do_md5sum" + ;; + solaris*) + CIDL_PLATFORM="illumos" + CIDL_MD5="$CIDL_MD5_ILLUMOS" + CIDL_MD5FUNC="do_md5sum" + ;; + *) + fail "unsupported OS: $1" + ;; + esac + + TARBALL_DIRNAME="clickhouse-$CIDL_VERSION" + TARBALL_FILENAME="$TARBALL_DIRNAME.$CIDL_PLATFORM.tar.gz" + + TARBALL_FILE="$DOWNLOAD_DIR/$TARBALL_FILENAME" + TARBALL_DIR="$DOWNLOAD_DIR/$TARBALL_DIRNAME" } function do_download_curl { - curl --silent --show-error --fail --location --output "$2" "$1" + curl --silent --show-error --fail --location --output "$2" "$1" } function do_md5 { - md5 < "$1" + md5 < "$1" } function do_md5sum { - md5sum < "$1" | awk '{print $1}' + md5sum < "$1" | awk '{print $1}' } function do_untar diff --git a/tools/ci_download_cockroachdb b/tools/ci_download_cockroachdb index fb1a0f97c05..43680fef652 100755 --- a/tools/ci_download_cockroachdb +++ b/tools/ci_download_cockroachdb @@ -7,7 +7,6 @@ # set -o pipefail -set -o xtrace set -o errexit SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" @@ -17,7 +16,11 @@ ARG0="$(basename ${BASH_SOURCE[0]})" CIDL_VERSION="$(cat "$SOURCE_DIR/cockroachdb_version")" source "$SOURCE_DIR/cockroachdb_checksums" -CIDL_ASSEMBLE_DIR="./cockroachdb" +TARGET_DIR="out" +# Location where intermediate artifacts are downloaded / unpacked. +DOWNLOAD_DIR="$TARGET_DIR/downloads" +# Location where the final cockroachdb directory should end up. +DEST_DIR="./$TARGET_DIR/cockroachdb" # Official (or unofficial) download sites CIDL_URL_COCKROACH="https://binaries.cockroachdb.com" @@ -25,113 +28,133 @@ CIDL_URL_ILLUMOS="https://illumos.org/downloads" function main { - # - # Process command-line arguments. We generally don't expect any, but - # we allow callers to specify a value to override OSTYPE, just for - # testing. - # - if [[ $# != 0 ]]; then - CIDL_OS="$1" - shift - else - CIDL_OS="$OSTYPE" - fi - - if [[ $# != 0 ]]; then - echo "unexpected arguments" >&2 - exit 2 - fi - - # Configure this program - configure_os "$CIDL_OS" - CIDL_URL="$CIDL_URL_BASE/$CIDL_FILE" - - # Download the file. - echo "URL: $CIDL_URL" - echo "Local file: $CIDL_FILE" - do_download_curl "$CIDL_URL" "$CIDL_FILE" || \ - fail "failed to download file" - - # Verify the md5sum. - calculated_md5="$($CIDL_MD5FUNC "$CIDL_FILE")" || \ - fail "failed to calculate md5sum" - if [[ "$calculated_md5" != "$CIDL_MD5" ]]; then - fail "md5sum mismatch \ - (expected $CIDL_MD5, found $calculated_md5)" - fi - - # Unpack the tarball. - do_untar "$CIDL_FILE" - - # Copy the "cockroach" binary to the right spot. - $CIDL_ASSEMBLE "$CIDL_DIR" - - # Run the binary as a sanity-check. - "$CIDL_ASSEMBLE_DIR/bin/cockroach" version + # + # Process command-line arguments. We generally don't expect any, but + # we allow callers to specify a value to override OSTYPE, just for + # testing. + # + if [[ $# != 0 ]]; then + CIDL_OS="$1" + shift + else + CIDL_OS="$OSTYPE" + fi + + if [[ $# != 0 ]]; then + echo "unexpected arguments" >&2 + exit 2 + fi + + # Configure this program + configure_os "$CIDL_OS" + CIDL_URL="$CIDL_URL_BASE/$TARBALL_FILENAME" + + # Download the file. + echo "URL: $CIDL_URL" + echo "Local file: $TARBALL_FILE" + + mkdir -p "$DOWNLOAD_DIR" + mkdir -p "$DEST_DIR" + + local DO_DOWNLOAD="true" + if [[ -f "$TARBALL_FILE" ]]; then + # If the file exists with a valid checksum, we can skip downloading. + calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate md5sum" + if [[ "$calculated_md5" == "$CIDL_MD5" ]]; then + DO_DOWNLOAD="false" + fi + fi + + if [ "$DO_DOWNLOAD" == "true" ]; then + echo "Downloading..." + do_download_curl "$CIDL_URL" "$TARBALL_FILE" || \ + fail "failed to download file" + + # Verify the md5sum. + calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate md5sum" + if [[ "$calculated_md5" != "$CIDL_MD5" ]]; then + fail "md5sum mismatch \ + (expected $CIDL_MD5, found $calculated_md5)" + fi + fi + + # Unpack the tarball. + do_untar "$TARBALL_FILE" + + # Copy the "cockroach" binary to the right spot. + $CIDL_ASSEMBLE + + # Run the binary as a sanity-check. + "$DEST_DIR/bin/cockroach" version } function fail { - echo "$ARG0: $@" >&2 - exit 1 + echo "$ARG0: $@" >&2 + exit 1 } function configure_os { - echo "current directory: $PWD" - echo "configuring based on OS: \"$1\"" - case "$1" in - darwin*) - CIDL_BUILD="darwin-10.9-amd64" - CIDL_SUFFIX="tgz" - CIDL_MD5="$CIDL_MD5_DARWIN" - CIDL_MD5FUNC="do_md5" - CIDL_URL_BASE="$CIDL_URL_COCKROACH" - CIDL_ASSEMBLE="do_assemble_official" - ;; - linux-gnu*) - CIDL_BUILD="linux-amd64" - CIDL_SUFFIX="tgz" - CIDL_MD5="$CIDL_MD5_LINUX" - CIDL_MD5FUNC="do_md5sum" - CIDL_URL_BASE="$CIDL_URL_COCKROACH" - CIDL_ASSEMBLE="do_assemble_official" - ;; - solaris*) - CIDL_BUILD="illumos" - CIDL_SUFFIX="tar.gz" - CIDL_MD5="$CIDL_MD5_ILLUMOS" - CIDL_MD5FUNC="do_md5sum" - CIDL_URL_BASE="$CIDL_URL_ILLUMOS" - CIDL_ASSEMBLE="do_assemble_illumos" - ;; - *) - fail "unsupported OS: $1" - ;; - esac - - CIDL_DIR="cockroach-$CIDL_VERSION.$CIDL_BUILD" - CIDL_FILE="$CIDL_DIR.$CIDL_SUFFIX" + echo "current directory: $PWD" + echo "configuring based on OS: \"$1\"" + case "$1" in + darwin*) + CIDL_BUILD="darwin-10.9-amd64" + CIDL_SUFFIX="tgz" + CIDL_MD5="$CIDL_MD5_DARWIN" + CIDL_MD5FUNC="do_md5" + CIDL_URL_BASE="$CIDL_URL_COCKROACH" + CIDL_ASSEMBLE="do_assemble_official" + ;; + linux-gnu*) + CIDL_BUILD="linux-amd64" + CIDL_SUFFIX="tgz" + CIDL_MD5="$CIDL_MD5_LINUX" + CIDL_MD5FUNC="do_md5sum" + CIDL_URL_BASE="$CIDL_URL_COCKROACH" + CIDL_ASSEMBLE="do_assemble_official" + ;; + solaris*) + CIDL_BUILD="illumos" + CIDL_SUFFIX="tar.gz" + CIDL_MD5="$CIDL_MD5_ILLUMOS" + CIDL_MD5FUNC="do_md5sum" + CIDL_URL_BASE="$CIDL_URL_ILLUMOS" + CIDL_ASSEMBLE="do_assemble_illumos" + ;; + *) + fail "unsupported OS: $1" + ;; + esac + + TARBALL_DIRNAME="cockroach-$CIDL_VERSION.$CIDL_BUILD" + TARBALL_FILENAME="$TARBALL_DIRNAME.$CIDL_SUFFIX" + + TARBALL_FILE="$DOWNLOAD_DIR/$TARBALL_FILENAME" + TARBALL_DIR="$DOWNLOAD_DIR/$TARBALL_DIRNAME" } function do_download_curl { - curl --silent --show-error --fail --location --output "$2" "$1" + curl --silent --show-error --fail --location --output "$2" "$1" } function do_md5 { - md5 < "$1" + md5 < "$1" } function do_md5sum { - md5sum < "$1" | awk '{print $1}' + md5sum < "$1" | awk '{print $1}' } function do_untar { - tar xzf "$1" + tar xzf "$1" -C "$DOWNLOAD_DIR" } # @@ -145,14 +168,14 @@ function do_untar function do_assemble_official { - mkdir -p "$CIDL_ASSEMBLE_DIR/bin" - cp "$CIDL_DIR/cockroach" "$CIDL_ASSEMBLE_DIR/bin" + mkdir -p "$DEST_DIR/bin" + cp "$TARBALL_DIR/cockroach" "$DEST_DIR/bin" } function do_assemble_illumos { - rm -r "$CIDL_ASSEMBLE_DIR" || true - cp -r "cockroach-$CIDL_VERSION" "$CIDL_ASSEMBLE_DIR" + rm -r "$DEST_DIR" || true + cp -r "$DOWNLOAD_DIR/cockroach-$CIDL_VERSION" "$DEST_DIR" } main "$@" From 876911f79818052bb95e66e2cd6611926aff4908 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Sat, 2 Apr 2022 23:10:47 -0400 Subject: [PATCH 05/16] Automate PATH verification, simplify README --- README.adoc | 50 ---------------------------------- tools/install_prerequisites.sh | 30 +++++++++++++++++++- 2 files changed, 29 insertions(+), 51 deletions(-) diff --git a/README.adoc b/README.adoc index 8f707328345..e3a82cf34ed 100644 --- a/README.adoc +++ b/README.adoc @@ -41,56 +41,6 @@ The following software may be installed automatically with the following scripts $ ./tools/install_prerequisites.sh ---- -Alternatively, the manual installation steps as follows: - -Both normal execution and the test suite expect certain binaries (described below) on your PATH. - -. libpq, the PostgreSQL client library -+ --- -We use Diesel's PostgreSQL support to connect to CockroachDB (which is wire-compatible with PostgreSQL). Diesel uses the native libpq to do this. You can get the client library with: - -* Helios: `pkg install library/postgresql-13` -* Linux: `sudo apt-get install libpq-dev` -* Mac: `brew install postgresql` - -After doing this, you should have the `pg_config` command on your PATH. For example, on Helios, you'd want `/opt/ooce/bin` on your PATH. --- -. pkg-config, a tool for querying installed libraries -+ --- - -* Helios: `pkg install pkg-config` -* Linux: `sudo apt-get install pkg-config` -* Mac: `brew install pkg-config` - -After doing this, you should have the `pkg-config` command on your PATH. For -example, on Helios, you'd want `/usr/bin` on your PATH. --- -. CockroachDB v21.1.10. -+ -The build and test suite expects to be able to start a single-node CockroachDB cluster using the `cockroach` executable on your PATH. -On illumos, MacOS, and Linux, you should be able to use the `tools/ci_download_cockroachdb` script to fetch the official CockroachDB binary. It will be put into `./out/cockroachdb/bin/cockroach`. -Alternatively, you can follow the https://www.cockroachlabs.com/docs/stable/install-cockroachdb.html[official CockroachDB installation instructions for your platform]. - -. ClickHouse >= v21.7.1. -+ -The test suite expects a running instance of the ClickHouse database server. -The script `./tools/ci_download_clickhouse` can be used to download a pre-built binary for illumos, Linux, or macOS platforms. Once complete, you must manually add the binary (located at `./out/clickhouse/clickhouse`) to your PATH. -You may also install ClickHouse manually; instructions can be found https://clickhouse.tech/docs/en/getting-started/install[here]. -See <<_configuring_clickhouse>> for details on ClickHouse's setup and configuration files. -+ -. Additional software requirements: -+ -On an illumos-based machine (Helios, OmniOS), if you want to run the real (non-simulated) Sled Agent to run actual VMs with Propolis, make sure your packages are up to date, and you have the `brand/omicron1/tools` package: -+ -[source,text] ----- -pkg install brand/omicron1/tools -pkg install pkg:/package/pkg -pkg update ----- - === Running (Simulated) Omicron NOTE: If you'd like to run on Helios, refer to <> below. diff --git a/tools/install_prerequisites.sh b/tools/install_prerequisites.sh index 390094b4ef5..2648f1417dc 100755 --- a/tools/install_prerequisites.sh +++ b/tools/install_prerequisites.sh @@ -21,7 +21,8 @@ elif [[ "${HOST_OS}" == "SunOS" ]]; then # Perform updates if (( ${#need[@]} > 0 )); then - pfexec pkg install -v "${need[@]}" && rc=$? || rc=$? + rc=0 + pfexec pkg install -v "${need[@]}" || rc=$? # Return codes: # 0: Normal Success # 4: Failure because we're already up-to-date. Also acceptable. @@ -41,3 +42,30 @@ fi ./tools/ci_download_cockroachdb ./tools/ci_download_clickhouse + +# Validate the PATH: +expected_in_path=( + 'pg_config' + 'pkg-config' +) + +declare -A illumos_hints=( + ['pg_config']="On illumos, this is typically found in '/opt/ooce/bin'" + ['pkg-config']="On illumos, this is typically found in '/usr/bin'" +) + +for command in "${expected_in_path[@]}"; do + rc=0 + which "$command" &> /dev/null || rc=$? + if [ "$rc" -ne 0 ]; then + echo "$command seems installed, but not found in PATH. Please add it." + + + if [[ "${HOST_OS}" == "SunOS" ]]; then + if [ "${illumos_hints[$command]+_}" ]; then + echo "${illumos_hints[$command]}" + fi + fi + exit -1 + fi +done From a80dd235d2b5f48153abf9391f16c114532f9af0 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Sat, 2 Apr 2022 23:14:32 -0400 Subject: [PATCH 06/16] Patch paths in github action --- .github/workflows/rust.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 091b5dbefd6..946033e8591 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -110,14 +110,14 @@ jobs: uses: actions/cache@26968a09c0ea4f3e233fdddbafd1166051a095f6 with: key: ${{ runner.os }}-cockroach-binary-${{ hashFiles('tools/cockroachdb_checksums') }} - path: "cockroachdb" + path: "./out/cockroachdb" - name: Configure GitHub cache for ClickHouse binaries id: cache-clickhouse # actions/cache@v2.1.4 uses: actions/cache@26968a09c0ea4f3e233fdddbafd1166051a095f6 with: key: ${{ runner.os }}-clickhouse-binary-${{ hashFiles('tools/clickhouse_checksums') }} - path: "clickhouse" + path: "./out/clickhouse" - name: Download ClickHouse if: steps.cache-clickhouse.outputs.cache-hit != 'true' run: ./tools/ci_download_clickhouse @@ -146,7 +146,7 @@ jobs: # rebuild here. # Put "./cockroachdb/bin" and "./clickhouse" on the PATH for the test # suite. - run: TMPDIR=$OMICRON_TMP PATH="$PATH:$PWD/cockroachdb/bin:$PWD/clickhouse" RUSTFLAGS="-D warnings" RUSTDOCFLAGS="-D warnings" cargo test --no-fail-fast --workspace --locked --verbose + run: TMPDIR=$OMICRON_TMP PATH="$PATH:$PWD/out/cockroachdb/bin:$PWD/out/clickhouse" RUSTFLAGS="-D warnings" RUSTDOCFLAGS="-D warnings" cargo test --no-fail-fast --workspace --locked --verbose - name: Archive any failed test results if: ${{ failure() }} # actions/upload-artifact@v2.3.1 From 215fea38f0d6eb2ca06a3cfaaed17feaa400f629 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Sat, 2 Apr 2022 23:18:15 -0400 Subject: [PATCH 07/16] stray newline --- tools/install_prerequisites.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/install_prerequisites.sh b/tools/install_prerequisites.sh index 2648f1417dc..9ec51362fd0 100755 --- a/tools/install_prerequisites.sh +++ b/tools/install_prerequisites.sh @@ -60,7 +60,6 @@ for command in "${expected_in_path[@]}"; do if [ "$rc" -ne 0 ]; then echo "$command seems installed, but not found in PATH. Please add it." - if [[ "${HOST_OS}" == "SunOS" ]]; then if [ "${illumos_hints[$command]+_}" ]; then echo "${illumos_hints[$command]}" From 866e016fe2a3d38bc6a3fb85fa762abb41fe3342 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Sun, 3 Apr 2022 00:08:28 -0400 Subject: [PATCH 08/16] Patch more PATH --- .github/buildomat/jobs/build-and-test.sh | 2 +- .github/workflows/rust.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/buildomat/jobs/build-and-test.sh b/.github/buildomat/jobs/build-and-test.sh index 183697e1f1f..2d204872a94 100644 --- a/.github/buildomat/jobs/build-and-test.sh +++ b/.github/buildomat/jobs/build-and-test.sh @@ -24,7 +24,7 @@ ptime -m bash ./tools/ci_download_cockroachdb # Put "./cockroachdb/bin" and "./clickhouse" on the PATH for the test # suite. # -export PATH="$PATH:$PWD/cockroachdb/bin:$PWD/clickhouse" +export PATH="$PATH:$PWD/out/cockroachdb/bin:$PWD/out/clickhouse" # # We build with: diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 946033e8591..4913737ffd8 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -140,7 +140,7 @@ jobs: # - TMPDIR=$OMICRON_TMP: we specify a specific temporary directory so that # failed test outputs will be in a known place that we can grab at the # end without also grabbing random other temporary files. - run: TMPDIR=$OMICRON_TMP PATH="$PATH:$PWD/cockroachdb/bin:$PWD/clickhouse" RUSTFLAGS="-D warnings" RUSTDOCFLAGS="-D warnings" cargo build --locked --all-targets --verbose + run: TMPDIR=$OMICRON_TMP PATH="$PATH:$PWD/out/cockroachdb/bin:$PWD/out/clickhouse" RUSTFLAGS="-D warnings" RUSTDOCFLAGS="-D warnings" cargo build --locked --all-targets --verbose - name: Run tests # Use the same RUSTFLAGS and RUSTDOCFLAGS as above to avoid having to # rebuild here. From 40bdee81620417c91b2472644fadc1f0bba17d63 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 4 Apr 2022 13:26:07 -0400 Subject: [PATCH 09/16] Restore old formatting, xtrace --- tools/ci_download_clickhouse | 183 ++++++++++++++-------------- tools/ci_download_cockroachdb | 219 +++++++++++++++++----------------- 2 files changed, 202 insertions(+), 200 deletions(-) diff --git a/tools/ci_download_clickhouse b/tools/ci_download_clickhouse index 3bdac422115..fdef07ac7ea 100755 --- a/tools/ci_download_clickhouse +++ b/tools/ci_download_clickhouse @@ -7,6 +7,7 @@ # set -o pipefail +set -o xtrace set -o errexit SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" @@ -27,121 +28,121 @@ CIDL_URL_BASE="https://oxide-clickhouse-build.s3.us-west-2.amazonaws.com" function main { - # - # Process command-line arguments. We generally don't expect any, but - # we allow callers to specify a value to override OSTYPE, just for - # testing. - # - if [[ $# != 0 ]]; then - CIDL_OS="$1" - shift - else - CIDL_OS="$OSTYPE" - fi - - if [[ $# != 0 ]]; then - echo "unexpected arguments" >&2 - exit 2 - fi - - # Configure this program - configure_os "$CIDL_OS" - CIDL_URL="$CIDL_URL_BASE/$TARBALL_FILENAME" - - # Download the file. - echo "URL: $CIDL_URL" - echo "Local file: $TARBALL_FILE" - - mkdir -p "$DOWNLOAD_DIR" - mkdir -p "$DEST_DIR" - - local DO_DOWNLOAD="true" - if [[ -f "$TARBALL_FILE" ]]; then - # If the file exists with a valid checksum, we can skip downloading. - calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ - fail "failed to calculate md5sum" - if [[ "$calculated_md5" == "$CIDL_MD5" ]]; then - DO_DOWNLOAD="false" - fi - fi - - if [ "$DO_DOWNLOAD" == "true" ]; then - echo "Downloading..." - do_download_curl "$CIDL_URL" "$TARBALL_FILE" || \ - fail "failed to download file" - - # Verify the md5sum. - calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ - fail "failed to calculate md5sum" - if [[ "$calculated_md5" != "$CIDL_MD5" ]]; then - fail "md5sum mismatch \ - (expected $CIDL_MD5, found $calculated_md5)" - fi - fi - - # Unpack the tarball into a local directory - do_untar "$TARBALL_FILE" "$DEST_DIR" - - # Run the binary as a sanity-check. - "$DEST_DIR/clickhouse" server --version + # + # Process command-line arguments. We generally don't expect any, but + # we allow callers to specify a value to override OSTYPE, just for + # testing. + # + if [[ $# != 0 ]]; then + CIDL_OS="$1" + shift + else + CIDL_OS="$OSTYPE" + fi + + if [[ $# != 0 ]]; then + echo "unexpected arguments" >&2 + exit 2 + fi + + # Configure this program + configure_os "$CIDL_OS" + CIDL_URL="$CIDL_URL_BASE/$TARBALL_FILENAME" + + # Download the file. + echo "URL: $CIDL_URL" + echo "Local file: $TARBALL_FILE" + + mkdir -p "$DOWNLOAD_DIR" + mkdir -p "$DEST_DIR" + + local DO_DOWNLOAD="true" + if [[ -f "$TARBALL_FILE" ]]; then + # If the file exists with a valid checksum, we can skip downloading. + calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate md5sum" + if [[ "$calculated_md5" == "$CIDL_MD5" ]]; then + DO_DOWNLOAD="false" + fi + fi + + if [ "$DO_DOWNLOAD" == "true" ]; then + echo "Downloading..." + do_download_curl "$CIDL_URL" "$TARBALL_FILE" || \ + fail "failed to download file" + + # Verify the md5sum. + calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate md5sum" + if [[ "$calculated_md5" != "$CIDL_MD5" ]]; then + fail "md5sum mismatch \ + (expected $CIDL_MD5, found $calculated_md5)" + fi + fi + + # Unpack the tarball into a local directory + do_untar "$TARBALL_FILE" "$DEST_DIR" + + # Run the binary as a sanity-check. + "$DEST_DIR/clickhouse" server --version } function fail { - echo "$ARG0: $@" >&2 - exit 1 + echo "$ARG0: $@" >&2 + exit 1 } function configure_os { - echo "current directory: $PWD" - echo "configuring based on OS: \"$1\"" - case "$1" in - darwin*) - CIDL_PLATFORM="macos" - CIDL_MD5="$CIDL_MD5_DARWIN" - CIDL_MD5FUNC="do_md5" - ;; - linux-gnu*) - CIDL_PLATFORM="linux" - CIDL_MD5="$CIDL_MD5_LINUX" - CIDL_MD5FUNC="do_md5sum" - ;; - solaris*) - CIDL_PLATFORM="illumos" - CIDL_MD5="$CIDL_MD5_ILLUMOS" - CIDL_MD5FUNC="do_md5sum" - ;; - *) - fail "unsupported OS: $1" - ;; - esac - - TARBALL_DIRNAME="clickhouse-$CIDL_VERSION" - TARBALL_FILENAME="$TARBALL_DIRNAME.$CIDL_PLATFORM.tar.gz" - - TARBALL_FILE="$DOWNLOAD_DIR/$TARBALL_FILENAME" - TARBALL_DIR="$DOWNLOAD_DIR/$TARBALL_DIRNAME" + echo "current directory: $PWD" + echo "configuring based on OS: \"$1\"" + case "$1" in + darwin*) + CIDL_PLATFORM="macos" + CIDL_MD5="$CIDL_MD5_DARWIN" + CIDL_MD5FUNC="do_md5" + ;; + linux-gnu*) + CIDL_PLATFORM="linux" + CIDL_MD5="$CIDL_MD5_LINUX" + CIDL_MD5FUNC="do_md5sum" + ;; + solaris*) + CIDL_PLATFORM="illumos" + CIDL_MD5="$CIDL_MD5_ILLUMOS" + CIDL_MD5FUNC="do_md5sum" + ;; + *) + fail "unsupported OS: $1" + ;; + esac + + TARBALL_DIRNAME="clickhouse-$CIDL_VERSION" + TARBALL_FILENAME="$TARBALL_DIRNAME.$CIDL_PLATFORM.tar.gz" + + TARBALL_FILE="$DOWNLOAD_DIR/$TARBALL_FILENAME" + TARBALL_DIR="$DOWNLOAD_DIR/$TARBALL_DIRNAME" } function do_download_curl { - curl --silent --show-error --fail --location --output "$2" "$1" + curl --silent --show-error --fail --location --output "$2" "$1" } function do_md5 { - md5 < "$1" + md5 < "$1" } function do_md5sum { - md5sum < "$1" | awk '{print $1}' + md5sum < "$1" | awk '{print $1}' } function do_untar { - mkdir -p "$2" && tar xzf "$1" -C "$2" + mkdir -p "$2" && tar xzf "$1" -C "$2" } main "$@" diff --git a/tools/ci_download_cockroachdb b/tools/ci_download_cockroachdb index 43680fef652..2a4c5db6c74 100755 --- a/tools/ci_download_cockroachdb +++ b/tools/ci_download_cockroachdb @@ -7,6 +7,7 @@ # set -o pipefail +set -o xtrace set -o errexit SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" @@ -28,139 +29,139 @@ CIDL_URL_ILLUMOS="https://illumos.org/downloads" function main { - # - # Process command-line arguments. We generally don't expect any, but - # we allow callers to specify a value to override OSTYPE, just for - # testing. - # - if [[ $# != 0 ]]; then - CIDL_OS="$1" - shift - else - CIDL_OS="$OSTYPE" - fi - - if [[ $# != 0 ]]; then - echo "unexpected arguments" >&2 - exit 2 - fi - - # Configure this program - configure_os "$CIDL_OS" - CIDL_URL="$CIDL_URL_BASE/$TARBALL_FILENAME" - - # Download the file. - echo "URL: $CIDL_URL" - echo "Local file: $TARBALL_FILE" - - mkdir -p "$DOWNLOAD_DIR" - mkdir -p "$DEST_DIR" - - local DO_DOWNLOAD="true" - if [[ -f "$TARBALL_FILE" ]]; then - # If the file exists with a valid checksum, we can skip downloading. - calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ - fail "failed to calculate md5sum" - if [[ "$calculated_md5" == "$CIDL_MD5" ]]; then - DO_DOWNLOAD="false" - fi - fi - - if [ "$DO_DOWNLOAD" == "true" ]; then - echo "Downloading..." - do_download_curl "$CIDL_URL" "$TARBALL_FILE" || \ - fail "failed to download file" - - # Verify the md5sum. - calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ - fail "failed to calculate md5sum" - if [[ "$calculated_md5" != "$CIDL_MD5" ]]; then - fail "md5sum mismatch \ - (expected $CIDL_MD5, found $calculated_md5)" - fi - fi - - # Unpack the tarball. - do_untar "$TARBALL_FILE" - - # Copy the "cockroach" binary to the right spot. - $CIDL_ASSEMBLE - - # Run the binary as a sanity-check. - "$DEST_DIR/bin/cockroach" version + # + # Process command-line arguments. We generally don't expect any, but + # we allow callers to specify a value to override OSTYPE, just for + # testing. + # + if [[ $# != 0 ]]; then + CIDL_OS="$1" + shift + else + CIDL_OS="$OSTYPE" + fi + + if [[ $# != 0 ]]; then + echo "unexpected arguments" >&2 + exit 2 + fi + + # Configure this program + configure_os "$CIDL_OS" + CIDL_URL="$CIDL_URL_BASE/$TARBALL_FILENAME" + + # Download the file. + echo "URL: $CIDL_URL" + echo "Local file: $TARBALL_FILE" + + mkdir -p "$DOWNLOAD_DIR" + mkdir -p "$DEST_DIR" + + local DO_DOWNLOAD="true" + if [[ -f "$TARBALL_FILE" ]]; then + # If the file exists with a valid checksum, we can skip downloading. + calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate md5sum" + if [[ "$calculated_md5" == "$CIDL_MD5" ]]; then + DO_DOWNLOAD="false" + fi + fi + + if [ "$DO_DOWNLOAD" == "true" ]; then + echo "Downloading..." + do_download_curl "$CIDL_URL" "$TARBALL_FILE" || \ + fail "failed to download file" + + # Verify the md5sum. + calculated_md5="$($CIDL_MD5FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate md5sum" + if [[ "$calculated_md5" != "$CIDL_MD5" ]]; then + fail "md5sum mismatch \ + (expected $CIDL_MD5, found $calculated_md5)" + fi + fi + + # Unpack the tarball. + do_untar "$TARBALL_FILE" + + # Copy the "cockroach" binary to the right spot. + $CIDL_ASSEMBLE + + # Run the binary as a sanity-check. + "$DEST_DIR/bin/cockroach" version } function fail { - echo "$ARG0: $@" >&2 - exit 1 + echo "$ARG0: $@" >&2 + exit 1 } function configure_os { - echo "current directory: $PWD" - echo "configuring based on OS: \"$1\"" - case "$1" in - darwin*) - CIDL_BUILD="darwin-10.9-amd64" - CIDL_SUFFIX="tgz" - CIDL_MD5="$CIDL_MD5_DARWIN" - CIDL_MD5FUNC="do_md5" - CIDL_URL_BASE="$CIDL_URL_COCKROACH" - CIDL_ASSEMBLE="do_assemble_official" - ;; - linux-gnu*) - CIDL_BUILD="linux-amd64" - CIDL_SUFFIX="tgz" - CIDL_MD5="$CIDL_MD5_LINUX" - CIDL_MD5FUNC="do_md5sum" - CIDL_URL_BASE="$CIDL_URL_COCKROACH" - CIDL_ASSEMBLE="do_assemble_official" - ;; - solaris*) - CIDL_BUILD="illumos" - CIDL_SUFFIX="tar.gz" - CIDL_MD5="$CIDL_MD5_ILLUMOS" - CIDL_MD5FUNC="do_md5sum" - CIDL_URL_BASE="$CIDL_URL_ILLUMOS" - CIDL_ASSEMBLE="do_assemble_illumos" - ;; - *) - fail "unsupported OS: $1" - ;; - esac - - TARBALL_DIRNAME="cockroach-$CIDL_VERSION.$CIDL_BUILD" - TARBALL_FILENAME="$TARBALL_DIRNAME.$CIDL_SUFFIX" - - TARBALL_FILE="$DOWNLOAD_DIR/$TARBALL_FILENAME" - TARBALL_DIR="$DOWNLOAD_DIR/$TARBALL_DIRNAME" + echo "current directory: $PWD" + echo "configuring based on OS: \"$1\"" + case "$1" in + darwin*) + CIDL_BUILD="darwin-10.9-amd64" + CIDL_SUFFIX="tgz" + CIDL_MD5="$CIDL_MD5_DARWIN" + CIDL_MD5FUNC="do_md5" + CIDL_URL_BASE="$CIDL_URL_COCKROACH" + CIDL_ASSEMBLE="do_assemble_official" + ;; + linux-gnu*) + CIDL_BUILD="linux-amd64" + CIDL_SUFFIX="tgz" + CIDL_MD5="$CIDL_MD5_LINUX" + CIDL_MD5FUNC="do_md5sum" + CIDL_URL_BASE="$CIDL_URL_COCKROACH" + CIDL_ASSEMBLE="do_assemble_official" + ;; + solaris*) + CIDL_BUILD="illumos" + CIDL_SUFFIX="tar.gz" + CIDL_MD5="$CIDL_MD5_ILLUMOS" + CIDL_MD5FUNC="do_md5sum" + CIDL_URL_BASE="$CIDL_URL_ILLUMOS" + CIDL_ASSEMBLE="do_assemble_illumos" + ;; + *) + fail "unsupported OS: $1" + ;; + esac + + TARBALL_DIRNAME="cockroach-$CIDL_VERSION.$CIDL_BUILD" + TARBALL_FILENAME="$TARBALL_DIRNAME.$CIDL_SUFFIX" + + TARBALL_FILE="$DOWNLOAD_DIR/$TARBALL_FILENAME" + TARBALL_DIR="$DOWNLOAD_DIR/$TARBALL_DIRNAME" } function do_download_curl { - curl --silent --show-error --fail --location --output "$2" "$1" + curl --silent --show-error --fail --location --output "$2" "$1" } function do_md5 { - md5 < "$1" + md5 < "$1" } function do_md5sum { - md5sum < "$1" | awk '{print $1}' + md5sum < "$1" | awk '{print $1}' } function do_untar { - tar xzf "$1" -C "$DOWNLOAD_DIR" + tar xzf "$1" -C "$DOWNLOAD_DIR" } # # "Assembling" here is taking unpacked tarball and putting together a directory -# structure that's common for all platforms. This allows consumers (i.e., CI) -# to assume the same directory structure for all platforms. This is +# structure that's common for all platforms. This allows consumers (i.e., CI) +# to assume the same directory structure for all platforms. This is # platform-specific because on illumos, the tarball itself has a different # structure than the official release tarballs and the `cockroach` binary has # dynamic library dependencies. @@ -168,14 +169,14 @@ function do_untar function do_assemble_official { - mkdir -p "$DEST_DIR/bin" - cp "$TARBALL_DIR/cockroach" "$DEST_DIR/bin" + mkdir -p "$DEST_DIR/bin" + cp "$TARBALL_DIR/cockroach" "$DEST_DIR/bin" } function do_assemble_illumos { - rm -r "$DEST_DIR" || true - cp -r "$DOWNLOAD_DIR/cockroach-$CIDL_VERSION" "$DEST_DIR" + rm -r "$DEST_DIR" || true + cp -r "$DOWNLOAD_DIR/cockroach-$CIDL_VERSION" "$DEST_DIR" } main "$@" From c0be073146b3c340c88f1293c643799b5e977e4c Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 4 Apr 2022 13:28:11 -0400 Subject: [PATCH 10/16] stray tab --- tools/ci_download_clickhouse | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci_download_clickhouse b/tools/ci_download_clickhouse index fdef07ac7ea..0eb2ff802fb 100755 --- a/tools/ci_download_clickhouse +++ b/tools/ci_download_clickhouse @@ -142,7 +142,7 @@ function do_md5sum function do_untar { - mkdir -p "$2" && tar xzf "$1" -C "$2" + mkdir -p "$2" && tar xzf "$1" -C "$2" } main "$@" From a4ea71c457575dd782d0de24e571cbf3ac701ff2 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 4 Apr 2022 14:25:24 -0400 Subject: [PATCH 11/16] Add documentation, check clickhouse/cockroach in PATH --- tools/install_prerequisites.sh | 55 +++++++++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/tools/install_prerequisites.sh b/tools/install_prerequisites.sh index 9ec51362fd0..a24ea6e8333 100755 --- a/tools/install_prerequisites.sh +++ b/tools/install_prerequisites.sh @@ -6,6 +6,22 @@ set -eu SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" cd "${SOURCE_DIR}/.." +# Packages to be installed on all OSes: +# +# - libpq, the PostgreSQL client lib. +# We use Diesel's PostgreSQL support to connect to CockroachDB (which is +# wire-compatible with PostgreSQL). Diesel uses the native libpq to do this. +# `pg_config` is a utility which may be used to query for the installed +# PostgreSQL libraries, and is expected by the Omicron build to exist in +# the developer's PATH variable. +# - pkg-config, a tool for querying installed libraries. +# +# Packages to be installed on Helios only: +# +# - pkg, the IPS client (though likely it will just be updated) +# - build-essential: Common development tools +# - brand/omicron1/tools: Oxide's omicron1-brand Zone + HOST_OS=$(uname -s) if [[ "${HOST_OS}" == "Linux" ]]; then sudo apt-get install libpq-dev @@ -40,6 +56,17 @@ else exit -1 fi +# CockroachDB and Clickhouse are used by Omicron for storage of +# control plane metadata and metrics. +# +# They are used in a couple of spots within Omicron: +# +# - Test Suite: The test suite, regardless of host OS, builds temporary +# databases for testing, and expects `cockroach` and `clickhouse` to +# exist as a part of the PATH. +# - Packaging: When constructing packages on Helios, these utilities +# are packaged into zones which may be launched by the sled agent. + ./tools/ci_download_cockroachdb ./tools/ci_download_clickhouse @@ -47,13 +74,22 @@ fi expected_in_path=( 'pg_config' 'pkg-config' + 'cockroach' + 'clickhouse' ) -declare -A illumos_hints=( +declare -A illumos_path_hints=( ['pg_config']="On illumos, this is typically found in '/opt/ooce/bin'" ['pkg-config']="On illumos, this is typically found in '/usr/bin'" ) +declare -A path_hints=( + ['cockroach']="This should have been installed to '$PWD/out/cockroachdb/bin'" + ['clickhouse']="This should have been installed to '$PWD/out/clickhouse'" +) + +# Check all paths before returning an error. +ANY_PATH_ERROR="false" for command in "${expected_in_path[@]}"; do rc=0 which "$command" &> /dev/null || rc=$? @@ -61,10 +97,21 @@ for command in "${expected_in_path[@]}"; do echo "$command seems installed, but not found in PATH. Please add it." if [[ "${HOST_OS}" == "SunOS" ]]; then - if [ "${illumos_hints[$command]+_}" ]; then - echo "${illumos_hints[$command]}" + if [ "${illumos_path_hints[$command]+_}" ]; then + echo "${illumos_path_hints[$command]}" fi fi - exit -1 + + if [ "${path_hints[$command]+_}" ]; then + echo "${path_hints[$command]}" + fi + + ANY_PATH_ERROR="true" fi done + +if [ "$ANY_PATH_ERROR" == "true" ]; then + exit -1 +fi + +echo "All prerequisites installed successfully, and PATH looks valid" From 3de53f182027f6030f1ec907d611ecc688b5960a Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 4 Apr 2022 14:33:58 -0400 Subject: [PATCH 12/16] More docs, hopefully more clear rc usage --- tools/install_prerequisites.sh | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/tools/install_prerequisites.sh b/tools/install_prerequisites.sh index 390094b4ef5..a42cca50e19 100755 --- a/tools/install_prerequisites.sh +++ b/tools/install_prerequisites.sh @@ -11,7 +11,7 @@ if [[ "${HOST_OS}" == "Linux" ]]; then sudo apt-get install libpq-dev sudo apt-get install pkg-config elif [[ "${HOST_OS}" == "SunOS" ]]; then - need=( + packages=( 'pkg:/package/pkg' 'build-essential' 'library/postgresql-13' @@ -19,18 +19,19 @@ elif [[ "${HOST_OS}" == "SunOS" ]]; then 'brand/omicron1/tools' ) - # Perform updates - if (( ${#need[@]} > 0 )); then - pfexec pkg install -v "${need[@]}" && rc=$? || rc=$? - # Return codes: - # 0: Normal Success - # 4: Failure because we're already up-to-date. Also acceptable. - if [ "$rc" -ne 4 ] && [ "$rc" -ne 0 ]; then - exit "$rc" - fi + # Install/update the set of packages. + # Explicitly manage the return code using "rc" to observe the result of this + # command without exiting the script entirely (due to bash's "errexit"). + rc=0 + pfexec pkg install -v "${packages[@]}" || rc=$? + # Return codes: + # 0: Normal Success + # 4: Failure because we're already up-to-date. Also acceptable. + if [[ "$rc" -ne 4 ]] && [[ "$rc" -ne 0 ]]; then + exit "$rc" fi - pkg list -v "${need[@]}" + pkg list -v "${packages[@]}" elif [[ "${HOST_OS}" == "Darwin" ]]; then brew install postgresql brew install pkg-config From fd77f51bd0606b910e3fcf4af2912fd02d9e5835 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 4 Apr 2022 15:52:18 -0400 Subject: [PATCH 13/16] Idempotency message on error --- tools/install_prerequisites.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/install_prerequisites.sh b/tools/install_prerequisites.sh index a42cca50e19..9cc47afb389 100755 --- a/tools/install_prerequisites.sh +++ b/tools/install_prerequisites.sh @@ -2,6 +2,12 @@ set -eu +on_exit () { + echo "Something went wrong, but this script is idempotent - If you can fix the issue, try re-running" +} + +trap on_exit ERR + # Set the CWD to Omicron's source. SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" cd "${SOURCE_DIR}/.." From 809f5f85e69ef6d754490b5676d4abe4d06fe103 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 4 Apr 2022 16:06:54 -0400 Subject: [PATCH 14/16] Avoid using associative arrays. Macs have old bash --- tools/install_prerequisites.sh | 46 ++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/tools/install_prerequisites.sh b/tools/install_prerequisites.sh index 1df7c7f951e..646e7856ee7 100755 --- a/tools/install_prerequisites.sh +++ b/tools/install_prerequisites.sh @@ -84,15 +84,29 @@ expected_in_path=( 'clickhouse' ) -declare -A illumos_path_hints=( - ['pg_config']="On illumos, this is typically found in '/opt/ooce/bin'" - ['pkg-config']="On illumos, this is typically found in '/usr/bin'" -) - -declare -A path_hints=( - ['cockroach']="This should have been installed to '$PWD/out/cockroachdb/bin'" - ['clickhouse']="This should have been installed to '$PWD/out/clickhouse'" -) +function show_hint +{ + case "$1" in + "pg_config") + if [[ "${HOST_OS}" == "SunOS" ]]; then + echo "On illumos, $1 is typically found in '/opt/ooce/bin'" + fi + ;; + "pkg-config") + if [[ "${HOST_OS}" == "SunOS" ]]; then + echo "On illumos, $1 is typically found in '/usr/bin'" + fi + ;; + "cockroach") + echo "$1 should have been installed to '$PWD/out/cockroachdb/bin'" + ;; + "clickhouse") + echo "$1 should have been installed to '$PWD/out/clickhouse'" + ;; + *) + ;; + esac +} # Check all paths before returning an error. ANY_PATH_ERROR="false" @@ -100,18 +114,8 @@ for command in "${expected_in_path[@]}"; do rc=0 which "$command" &> /dev/null || rc=$? if [ "$rc" -ne 0 ]; then - echo "$command seems installed, but not found in PATH. Please add it." - - if [[ "${HOST_OS}" == "SunOS" ]]; then - if [ "${illumos_path_hints[$command]+_}" ]; then - echo "${illumos_path_hints[$command]}" - fi - fi - - if [ "${path_hints[$command]+_}" ]; then - echo "${path_hints[$command]}" - fi - + echo "ERROR: $command seems installed, but was not found in PATH. Please add it." + show_hint "$command" ANY_PATH_ERROR="true" fi done From 001ac8e03273cf9b691a4ab2f5792adaa86be0f4 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 8 Apr 2022 11:41:48 -0400 Subject: [PATCH 15/16] Split up omicron readme into easier-to-navigate pieces --- README.adoc | 394 +-------------------------------- docs/cli.adoc | 184 +++++++++++++++ docs/how-to-run-simulated.adoc | 143 ++++++++++++ docs/how-to-run.adoc | 114 ++++++++++ 4 files changed, 451 insertions(+), 384 deletions(-) create mode 100644 docs/cli.adoc create mode 100644 docs/how-to-run-simulated.adoc create mode 100644 docs/how-to-run.adoc diff --git a/README.adoc b/README.adoc index c691666f078..123d016493e 100644 --- a/README.adoc +++ b/README.adoc @@ -33,400 +33,26 @@ You can **format the code** using `cargo fmt`. Make sure to run this before pus You can **run the https://github.com/rust-lang/rust-clippy[Clippy linter]** using `cargo clippy \-- -D warnings -A clippy::style`. Make sure to run this before pushing changes. The CI checks that the code is clippy-clean. -=== Installing Prerequisite Software +=== Running Omicron -The following software may be installed automatically with the following script: +Omicron has two modes of operation: "simulated" and "non-simulated". ----- -$ ./tools/install_prerequisites.sh ----- - -=== Running (Simulated) Omicron - -NOTE: If you'd like to run on Helios, refer to <> below. - -To **run Omicron** you need to run four programs: - -* a CockroachDB cluster. For development, you can use the `omicron-dev` tool in this repository to start a single-node CockroachDB cluster **that will delete the database when you shut it down.** -* a ClickHouse server. You should use the `omicron-dev` tool for this as well, see below, and as with CockroachDB, -the database files will be deleted when you stop the program. -* `nexus`: the guts of the control plane -* `sled-agent-sim`: a simulator for the component that manages a single sled - -The easiest way to start the required databases is to use the built-in `omicron-dev` tool. This tool assumes that the `cockroach` and `clickhouse` executables are on your PATH, and match the versions above. - -. Start CockroachDB using `omicron-dev db-run`: -+ -[source,text] ----- -$ cargo run --bin=omicron-dev -- db-run - Finished dev [unoptimized + debuginfo] target(s) in 0.15s - Running `target/debug/omicron-dev db-run` -omicron-dev: using temporary directory for database store (cleaned up on clean exit) -omicron-dev: will run this to start CockroachDB: -cockroach start-single-node --insecure --http-addr=:0 --store /var/tmp/omicron_tmp/.tmpM8KpTf/data --listen-addr 127.0.0.1:32221 --listening-url-file /var/tmp/omicron_tmp/.tmpM8KpTf/listen-url -omicron-dev: temporary directory: /var/tmp/omicron_tmp/.tmpM8KpTf -* -* WARNING: ALL SECURITY CONTROLS HAVE BEEN DISABLED! -* -* This mode is intended for non-production testing only. -* -* In this mode: -* - Your cluster is open to any client that can access 127.0.0.1. -* - Intruders with access to your machine or network can observe client-server traffic. -* - Intruders can log in without password and read or write any data in the cluster. -* - Intruders can consume all your server's resources and cause unavailability. -* -* -* INFO: To start a secure server without mandating TLS for clients, -* consider --accept-sql-without-tls instead. For other options, see: -* -* - https://go.crdb.dev/issue-v/53404/v20.2 -* - https://www.cockroachlabs.com/docs/v20.2/secure-a-cluster.html -* - -omicron-dev: child process: pid 3815 -omicron-dev: CockroachDB listening at: postgresql://root@127.0.0.1:32221/omicron?sslmode=disable -omicron-dev: populating database -* -* INFO: Replication was disabled for this cluster. -* When/if adding nodes in the future, update zone configurations to increase the replication factor. -* -CockroachDB node starting at 2021-04-13 15:58:59.680359279 +0000 UTC (took 0.4s) -build: OSS v20.2.5 @ 2021/03/17 21:00:51 (go1.16.2) -webui: http://127.0.0.1:41618 -sql: postgresql://root@127.0.0.1:32221?sslmode=disable -RPC client flags: cockroach --host=127.0.0.1:32221 --insecure -logs: /var/tmp/omicron_tmp/.tmpM8KpTf/data/logs -temp dir: /var/tmp/omicron_tmp/.tmpM8KpTf/data/cockroach-temp022560209 -external I/O path: /var/tmp/omicron_tmp/.tmpM8KpTf/data/extern -store[0]: path=/var/tmp/omicron_tmp/.tmpM8KpTf/data -storage engine: pebble -status: initialized new cluster -clusterID: 8ab646f0-67f0-484d-8010-e4444fb86336 -nodeID: 1 -omicron-dev: populated database ----- -+ -Note that as the output indicates, this cluster will be available to anybody that can reach 127.0.0.1. - -. Start the ClickHouse database server: -+ -[source,text] ----- -$ cargo run --bin omicron-dev -- ch-run - Finished dev [unoptimized + debuginfo] target(s) in 0.47s - Running `target/debug/omicron-dev ch-run` -omicron-dev: running ClickHouse (PID: 2463), full command is "clickhouse server --log-file /var/folders/67/2tlym22x1r3d2kwbh84j298w0000gn/T/.tmpJ5nhot/clickhouse-server.log --errorlog-file /var/folders/67/2tlym22x1r3d2kwbh84j298w0000gn/T/.tmpJ5nhot/clickhouse-server.errlog -- --http_port 8123 --path /var/folders/67/2tlym22x1r3d2kwbh84j298w0000gn/T/.tmpJ5nhot" -omicron-dev: using /var/folders/67/2tlym22x1r3d2kwbh84j298w0000gn/T/.tmpJ5nhot for ClickHouse data storage ----- - -. `nexus` requires a configuration file to run. You can use `nexus/examples/config.toml` to start with. Build and run it like this: -+ -[source,text] ----- -$ cargo run --bin=nexus -- nexus/examples/config.toml -... -listening: http://127.0.0.1:12220 ----- -Nexus can also serve the web console. Instructions for generating the static assets and pointing Nexus to them are https://github.com/oxidecomputer/console/blob/main/docs/serve-from-nexus.md[here]. Without console assets, Nexus will still start and run normally as an API. A few link:./nexus/src/external_api/console_api.rs[console-specific routes] will 404. +The simulated version of Omicron allows the high-level control plane logic to run without +actually managing any sled-local resources. This version can be executed on Linux, Mac, and illumos. +This mode of operation is provided for development and testing only. -. `sled-agent-sim` only accepts configuration on the command line. Run it with a uuid identifying itself (this would be a uuid for the sled it's managing), an IP:port for itself, and the IP:port of `nexus`'s _internal_ interface. Using default config, this might look like this: -+ -[source,text] ----- -$ cargo run --bin=sled-agent-sim -- $(uuidgen) 127.0.0.1:12345 127.0.0.1:12221 -... -Jun 02 12:21:50.989 INFO listening, local_addr: 127.0.0.1:12345, component: dropshot ----- +See: xref:docs/how-to-run-simulated.adoc[]. -. `oximeter` is similar to `nexus`, requiring a configuration file. You can use `oximeter/collector/config.toml`, and the whole thing can be run with: -+ -[source,text] ----- -$ cargo run --bin=oximeter -- oximeter/collector/config.toml -Dec 02 18:00:01.062 INFO starting oximeter server -Dec 02 18:00:01.062 DEBG creating ClickHouse client -Dec 02 18:00:01.068 DEBG initializing ClickHouse database, component: clickhouse-client, collector_id: 1da65e5b-210c-4859-a7d7-200c1e659972, component: oximeter-agent -Dec 02 18:00:01.093 DEBG registered endpoint, path: /producers, method: POST, local_addr: [::1]:12223, component: dropshot -... ----- +The non-simulated version of Omicron actually manages sled-local resources, and may only +be executed on hosts running Helios. +This mode of operation will be used in production. -Once everything is up and running, you can use `curl` directly to hit either of -the servers. But it's easier to use the [`oxide` CLI](https://docs.oxide.computer/cli/manual) (see below). +See: xref:docs/how-to-run.adoc[]. == Docker image This repo includes a Dockerfile that builds an image containing the Nexus and sled agent. There's a GitHub Actions workflow that builds and publishes the Docker image. This is used by the https://github.com/oxidecomputer/console/[console] project for prototyping, demoing, and testing. This is **not** the way Omicron will be deployed on production systems, but it's a useful vehicle for working with it. -== Quick demo - -The `oxide` CLI can be installed from the [latest -release](https://github.com/oxidecomputer/cli/releases). - -Run `oxide auth login` to authenticate with your Oxide account. Alternatively, -`oxide` will respect the `OXIDE_TOKEN` and `OXIDE_HOST` environment variables. - -If you find bugs or have feedback, leave it on the [cli -repo](https://github.com/oxidecomputer/cli/issues). - -Here's a small demo that creates a project, creates an instance, and attaches a disk to it: - -[source,text] ----- -$ oxide org create myorg \ - --description "My organization" -✔ Created organization myorg - -$ oxide project create myproject \ - --description "My project" \ - --organization myorg -✔ Created project myorg/myproject - -$ oxide instance create myinstance \ - --description "My instance" \ - --project myproject \ - --org myorg \ - --hostname "myinstance.maze-war.com" \ - --ncpus 1 \ - --memory 8 -✔ Created instance myinstance in myorg/myproject - -$ oxide instance view myinstance \ - --org myorg \ - --project myproject \ - --format json -{ - "id": "99ad2514-050c-4493-9cb9-d9ceba980a98", - "name": "myinstance", - "description": "My instance", - "timeCreated": "2021-11-17T01:45:07.606749Z", - "timeModified": "2021-11-17T01:45:07.606749Z", - "projectId": "c197b9d2-285c-4e9f-9461-1815ef093c8d", - "ncpus": 1, - "memory": 8, - "hostname": "myinstance.maze-war.com", - "runState": "running", - "timeRunStateUpdated": "2021-11-17T01:45:09.120652Z" -} - -$ oxide disk create nginx \ - -D "The nginx disk." \ - -o myorg \ - -p myproject \ - --size 10 -✔ Created disk nginx in myorg/myproject - - -$ oxide disk view nginx \ - --org myorg \ - --project myproject \ - --format json -{ - "id": "551bbe67-3640-41c9-b968-249a136e5e31", - "name": "nginx", - "description": "The nginx disk.", - "timeCreated": "2021-11-17T01:47:36.524136Z", - "timeModified": "2021-11-17T01:47:36.524136Z", - "projectId": "c197b9d2-285c-4e9f-9461-1815ef093c8d", - "snapshotId": null, - "size": 1024, - "state": { - "state": "detached" - }, - "devicePath": "/mnt/nginx" -} - -$ oxide disk attach nginx myinstance \ - -o maze-war \ - -p prod-online -✔ Attached disk nginx to instance myinstance in myorg/myproject - -$ oxide instance disks myinstance \ - -o maze-war \ - -p prod-online \ - --format json -{ - "instanceId": "99ad2514-050c-4493-9cb9-d9ceba980a98", - "diskId": "551bbe67-3640-41c9-b968-249a136e5e31", - "diskName": "nginx", - "diskState": { - "state": "attached", - "instance": "99ad2514-050c-4493-9cb9-d9ceba980a98" - } -} - -# Alternatively, you can use the API command to run any endpoint. -# This operates like a fancy, authenticated curl. - -$ oxide api --help -Makes an authenticated HTTP request to the Oxide API and prints the response. - -The endpoint argument should be a path of a Oxide API endpoint. - -The default HTTP request method is "GET" normally and "POST" if any parameters -were added. Override the method with `--method`. - -Pass one or more `-f/--raw-field` values in "key=value" format to add static string -parameters to the request payload. To add non-string or otherwise dynamic values, see -`--field` below. Note that adding request parameters will automatically switch the -request method to POST. To send the parameters as a GET query string instead, use -`--method GET`. - -The `-F/--field` flag has magic type conversion based on the format of the value: - -- literal values "true", "false", "null", and integer/float numbers get converted to - appropriate JSON types; -- if the value starts with "@", the rest of the value is interpreted as a - filename to read the value from. Pass "-" to read from standard input. - -Raw request body may be passed from the outside via a file specified by `--input`. -Pass "-" to read from standard input. In this mode, parameters specified via -`--field` flags are serialized into URL query parameters. - -In `--paginate` mode, all pages of results will sequentially be requested until -there are no more pages of results. - -USAGE: - oxide api [OPTIONS] - -ARGS: - - The endpoint to request - -OPTIONS: - -d, --debug - Print debug info - - [env: DEBUG=] - - -f, --raw-field - Add a string parameter in key=value format - - -F, --field - Add a typed parameter in key=value format - - -h, --help - Print help information - - -H, --header
- Add a HTTP request header in `key:value` format - - -i, --include - Include HTTP response headers in the output - - --input - The file to use as body for the HTTP request (use "-" to read from standard input) - - [default: ] - - --paginate - Make additional HTTP requests to fetch all pages of results - - -X, --method - The HTTP method for the request - -$ oxide api /session/me -{ - "id": "99ad2514-050c-4493-9cb9-d9ceba980a98" -} - - ----- - -== Deploying Omicron - - -Prerequisite: Have a machine already running Helios. An easy way to -do this is by using a https://github.com/oxidecomputer/helios-engvm[Helios VM]. -ISOs are also available for download https://pkg.oxide.computer/install[here]. - -The control plane repository contains a packaging tool which bundles binaries -and SMF manifests. After building the expected binaries, they can be packaged -in a format which lets them be transferred to a Helios machine. - -This tool acts on a `package-manifest.toml` file which describes the packages to be -bundled in the build. - -Configuration files are used to select IP addresses, and to manage Zpools -utilized by the Sled Agent. These configuration files are located within -`smf/`, and likely need to be modified to use addresses and zpools which match -your hardware. Much of this configuration will be automated in the future -(e.g., IP addresses will be inferred and posted to a DNS system, Zpools will -automatically be detected on discovered disks), but for now it remains -hard-coded. - -[source,text] ----- -$ cargo build -$ ./target/debug/omicron-package package ----- - -The aforementioned package command fills a target directory of choice -(by default, `out/` within the omicron repository) with tarballs ready -to be unpacked as services. - -To install the services on a target machine, the following command -may be executed: - -[source,text] ----- -# Note that "sudo" is required to install SMF services; an appropriate pfexec -# profile may also be used. -$ sudo ./target/debug/omicron-package install ----- - -This service installs a bootstrap service, which itself loads other -requested services. The bootstrap service is currently the only -service which is "persistent" across reboots - although it will -initialize other service as part of its setup sequence anyway. - -[source,text] ----- -# List all services: -$ svcs -# View zones managed by Omicron (prefixed with "oxz_"): -$ zoneadm list -cv -# View logs for a service: -$ pfexec tail -f $(pfexec svcs -z oxz_nexus -L nexus) ----- - -To uninstall all Omicron services from a machine, the following may be -executed: - -[source,text] ----- -$ sudo ./target/debug/omicron-package uninstall ----- - -=== Test Environment - -When we deploy, we're effectively creating a number of different zones -for all the components that make up Omicron (Nexus, Clickhouse, Crucible, etc). -Since all these services run in different zones they cannot communicate with -each other (and Sled Agent in the global zone) via `localhost`. In practice, -we'll assign addresses as per RFD 63 as well as incorporating DNS based -service discovery. - -For the purposes of local development today, we specify some hardcoded IPv6 -unique local addresses in `fd00:1de::/16`: - -[options="header"] -|=================================================================================================== -| Service | Endpoint -| Sled Agent: Bootstrap | `[::]:12346` -| Sled Agent: Dropshot API | `[fd00:1de::1]:12345` -| Cockroach DB | `[fd00:1de::5]:32221` -| Oximeter | `[fd00:1de::6]:12223` -| Nexus: External API | `[fd00:1de::7]:12220` -| Nexus: Internal API | `[fd00:1de::7]:12221` -| Clickhouse | `[fd00:1de::8]:8123` -| Crucible Downstairs | `[fd00:1de::9]:32345`, `[fd00:1de::10]:32345`, `[fd00:1de::11]:32345` -|=================================================================================================== - -Note that Sled Agent runs in the global zone and is the one responsible for bringing up all the other -other services and allocating them with vNICs and IPv6 addresses. - == Configuration reference `nexus` requires a TOML configuration file. There's an example in diff --git a/docs/cli.adoc b/docs/cli.adoc new file mode 100644 index 00000000000..5cb5101dd12 --- /dev/null +++ b/docs/cli.adoc @@ -0,0 +1,184 @@ +:showtitle: +:toc: left +:icons: font + += Oxide CLI + +The `oxide` CLI is used to access Nexus' external API, which is +the public interface to Omicron. + +For more detail, refer to https://docs.oxide.computer/cli/manual[oxide's CLI Manual]. + +== Quick demo + +The `oxide` CLI can be installed from the https://github.com/oxidecomputer/cli/releases[latest release]. + +Run `oxide auth login` to authenticate with your Oxide account. Alternatively, +`oxide` will respect the `OXIDE_TOKEN` and `OXIDE_HOST` environment variables. + +If you find bugs or have feedback, leave it on the https://github.com/oxidecomputer/cli/issues[cli repo]. + +Here's a small demo that creates a project, creates an instance, and attaches a disk to it: + +[source,text] +---- +$ oxide org create myorg \ + --description "My organization" +✔ Created organization myorg + +$ oxide project create myproject \ + --description "My project" \ + --organization myorg +✔ Created project myorg/myproject + +$ oxide instance create myinstance \ + --description "My instance" \ + --project myproject \ + --org myorg \ + --hostname "myinstance.maze-war.com" \ + --ncpus 1 \ + --memory 8 +✔ Created instance myinstance in myorg/myproject + +$ oxide instance view myinstance \ + --org myorg \ + --project myproject \ + --format json +{ + "id": "99ad2514-050c-4493-9cb9-d9ceba980a98", + "name": "myinstance", + "description": "My instance", + "timeCreated": "2021-11-17T01:45:07.606749Z", + "timeModified": "2021-11-17T01:45:07.606749Z", + "projectId": "c197b9d2-285c-4e9f-9461-1815ef093c8d", + "ncpus": 1, + "memory": 8, + "hostname": "myinstance.maze-war.com", + "runState": "running", + "timeRunStateUpdated": "2021-11-17T01:45:09.120652Z" + + +$ oxide disk create nginx \ + -D "The nginx disk." \ + -o myorg \ + -p myproject \ + --size 10 +✔ Created disk nginx in myorg/myproject + + +$ oxide disk view nginx \ + --org myorg \ + --project myproject \ + --format json +{ + "id": "551bbe67-3640-41c9-b968-249a136e5e31", + "name": "nginx", + "description": "The nginx disk.", + "timeCreated": "2021-11-17T01:47:36.524136Z", + "timeModified": "2021-11-17T01:47:36.524136Z", + "projectId": "c197b9d2-285c-4e9f-9461-1815ef093c8d", + "snapshotId": null, + "size": 1024, + "state": { + "state": "detached" + }, + "devicePath": "/mnt/nginx" +} + +$ oxide disk attach nginx myinstance \ + -o maze-war \ + -p prod-online +✔ Attached disk nginx to instance myinstance in myorg/myproject + +$ oxide instance disks myinstance \ + -o maze-war \ + -p prod-online \ + --format json +{ + "instanceId": "99ad2514-050c-4493-9cb9-d9ceba980a98", + "diskId": "551bbe67-3640-41c9-b968-249a136e5e31", + "diskName": "nginx", + "diskState": { + "state": "attached", + "instance": "99ad2514-050c-4493-9cb9-d9ceba980a98" + } +} +---- + +Alternatively, you can use the API command to run any endpoint. +This operates like a fancy, authenticated curl. + +[source,text] +---- +$ oxide api --help +Makes an authenticated HTTP request to the Oxide API and prints the response. + +The endpoint argument should be a path of a Oxide API endpoint. + +The default HTTP request method is "GET" normally and "POST" if any parameters +were added. Override the method with `--method`. + +Pass one or more `-f/--raw-field` values in "key=value" format to add static string +parameters to the request payload. To add non-string or otherwise dynamic values, see +`--field` below. Note that adding request parameters will automatically switch the +request method to POST. To send the parameters as a GET query string instead, use +`--method GET`. + +The `-F/--field` flag has magic type conversion based on the format of the value: + +- literal values "true", "false", "null", and integer/float numbers get converted to + appropriate JSON types; +- if the value starts with "@", the rest of the value is interpreted as a + filename to read the value from. Pass "-" to read from standard input. + +Raw request body may be passed from the outside via a file specified by `--input`. +Pass "-" to read from standard input. In this mode, parameters specified via +`--field` flags are serialized into URL query parameters. + +In `--paginate` mode, all pages of results will sequentially be requested until +there are no more pages of results. + +USAGE: + oxide api [OPTIONS] + +ARGS: + + The endpoint to request + +OPTIONS: + -d, --debug + Print debug info + + [env: DEBUG=] + + -f, --raw-field + Add a string parameter in key=value format + + -F, --field + Add a typed parameter in key=value format + + -h, --help + Print help information + + -H, --header
+ Add a HTTP request header in `key:value` format + + -i, --include + Include HTTP response headers in the output + + --input + The file to use as body for the HTTP request (use "-" to read from standard input) + + [default: ] + + --paginate + Make additional HTTP requests to fetch all pages of results + + -X, --method + The HTTP method for the request + +$ oxide api /session/me +{ + "id": "99ad2514-050c-4493-9cb9-d9ceba980a98" +} +---- diff --git a/docs/how-to-run-simulated.adoc b/docs/how-to-run-simulated.adoc new file mode 100644 index 00000000000..039cc0d6628 --- /dev/null +++ b/docs/how-to-run-simulated.adoc @@ -0,0 +1,143 @@ +:showtitle: +:toc: left +:icons: font + += Running Omicron (Simulated) + +== What is "Simulated Omicron"? + +The "Sled-local" component of the control plane - which expects to manage local +resources - has tight coupling with the illumos Operating System. However, a +good portion of the control plane (interactions with the database, metrics +collection, and the console, for example) executes within programs that are +decoupled from the underlying Sled. + +To enable more flexible testing of this software, a "simulated" sled agent is +provided, capable of running across many platforms (Linux, Mac, illumos). This +allows developers to test the control plane flows without actually having any +resources to manage. + +If you are interested in running the "real" control plane (which is necessary +for managing instances, storage, and networking) refer to the non-simulated +guide at xref:how-to-run.adoc[]. + +== Installing Prerequisites + +Prerequisite software may be installed with the following script: + +[source,text] +---- +$ ./tools/install_prerequisites.sh +---- + +== Running + +To **run Omicron** you need to run four programs: + +* a CockroachDB cluster. For development, you can use the `omicron-dev` tool in this repository to start a single-node CockroachDB cluster **that will delete the database when you shut it down.** +* a ClickHouse server. You should use the `omicron-dev` tool for this as well, see below, and as with CockroachDB, +the database files will be deleted when you stop the program. +* `nexus`: the guts of the control plane +* `sled-agent-sim`: a simulator for the component that manages a single sled + +The easiest way to start the required databases is to use the built-in `omicron-dev` tool. This tool assumes that the `cockroach` and `clickhouse` executables are on your PATH, and match the versions above. + +. Start CockroachDB using `omicron-dev db-run`: ++ +[source,text] +---- +$ cargo run --bin=omicron-dev -- db-run + Finished dev [unoptimized + debuginfo] target(s) in 0.15s + Running `target/debug/omicron-dev db-run` +omicron-dev: using temporary directory for database store (cleaned up on clean exit) +omicron-dev: will run this to start CockroachDB: +cockroach start-single-node --insecure --http-addr=:0 --store /var/tmp/omicron_tmp/.tmpM8KpTf/data --listen-addr 127.0.0.1:32221 --listening-url-file /var/tmp/omicron_tmp/.tmpM8KpTf/listen-url +omicron-dev: temporary directory: /var/tmp/omicron_tmp/.tmpM8KpTf +* +* WARNING: ALL SECURITY CONTROLS HAVE BEEN DISABLED! +* +* This mode is intended for non-production testing only. +* +* In this mode: +* - Your cluster is open to any client that can access 127.0.0.1. +* - Intruders with access to your machine or network can observe client-server traffic. +* - Intruders can log in without password and read or write any data in the cluster. +* - Intruders can consume all your server's resources and cause unavailability. +* +* +* INFO: To start a secure server without mandating TLS for clients, +* consider --accept-sql-without-tls instead. For other options, see: +* +* - https://go.crdb.dev/issue-v/53404/v20.2 +* - https://www.cockroachlabs.com/docs/v20.2/secure-a-cluster.html +* + +omicron-dev: child process: pid 3815 +omicron-dev: CockroachDB listening at: postgresql://root@127.0.0.1:32221/omicron?sslmode=disable +omicron-dev: populating database +* +* INFO: Replication was disabled for this cluster. +* When/if adding nodes in the future, update zone configurations to increase the replication factor. +* +CockroachDB node starting at 2021-04-13 15:58:59.680359279 +0000 UTC (took 0.4s) +build: OSS v20.2.5 @ 2021/03/17 21:00:51 (go1.16.2) +webui: http://127.0.0.1:41618 +sql: postgresql://root@127.0.0.1:32221?sslmode=disable +RPC client flags: cockroach --host=127.0.0.1:32221 --insecure +logs: /var/tmp/omicron_tmp/.tmpM8KpTf/data/logs +temp dir: /var/tmp/omicron_tmp/.tmpM8KpTf/data/cockroach-temp022560209 +external I/O path: /var/tmp/omicron_tmp/.tmpM8KpTf/data/extern +store[0]: path=/var/tmp/omicron_tmp/.tmpM8KpTf/data +storage engine: pebble +status: initialized new cluster +clusterID: 8ab646f0-67f0-484d-8010-e4444fb86336 +nodeID: 1 +omicron-dev: populated database +---- ++ +Note that as the output indicates, this cluster will be available to anybody that can reach 127.0.0.1. + +. Start the ClickHouse database server: ++ +[source,text] +---- +$ cargo run --bin omicron-dev -- ch-run + Finished dev [unoptimized + debuginfo] target(s) in 0.47s + Running `target/debug/omicron-dev ch-run` +omicron-dev: running ClickHouse (PID: 2463), full command is "clickhouse server --log-file /var/folders/67/2tlym22x1r3d2kwbh84j298w0000gn/T/.tmpJ5nhot/clickhouse-server.log --errorlog-file /var/folders/67/2tlym22x1r3d2kwbh84j298w0000gn/T/.tmpJ5nhot/clickhouse-server.errlog -- --http_port 8123 --path /var/folders/67/2tlym22x1r3d2kwbh84j298w0000gn/T/.tmpJ5nhot" +omicron-dev: using /var/folders/67/2tlym22x1r3d2kwbh84j298w0000gn/T/.tmpJ5nhot for ClickHouse data storage +---- + +. `nexus` requires a configuration file to run. You can use `nexus/examples/config.toml` to start with. Build and run it like this: ++ +[source,text] +---- +$ cargo run --bin=nexus -- nexus/examples/config.toml +... +listening: http://127.0.0.1:12220 +---- +Nexus can also serve the web console. Instructions for generating the static assets and pointing Nexus to them are https://github.com/oxidecomputer/console/blob/main/docs/serve-from-nexus.md[here]. Without console assets, Nexus will still start and run normally as an API. A few link:./nexus/src/external_api/console_api.rs[console-specific routes] will 404. + +. `sled-agent-sim` only accepts configuration on the command line. Run it with a uuid identifying itself (this would be a uuid for the sled it's managing), an IP:port for itself, and the IP:port of `nexus`'s _internal_ interface. Using default config, this might look like this: ++ +[source,text] +---- +$ cargo run --bin=sled-agent-sim -- $(uuidgen) 127.0.0.1:12345 127.0.0.1:12221 +... +Jun 02 12:21:50.989 INFO listening, local_addr: 127.0.0.1:12345, component: dropshot +---- + +. `oximeter` is similar to `nexus`, requiring a configuration file. You can use `oximeter/collector/config.toml`, and the whole thing can be run with: ++ +[source,text] +---- +$ cargo run --bin=oximeter -- oximeter/collector/config.toml +Dec 02 18:00:01.062 INFO starting oximeter server +Dec 02 18:00:01.062 DEBG creating ClickHouse client +Dec 02 18:00:01.068 DEBG initializing ClickHouse database, component: clickhouse-client, collector_id: 1da65e5b-210c-4859-a7d7-200c1e659972, component: oximeter-agent +Dec 02 18:00:01.093 DEBG registered endpoint, path: /producers, method: POST, local_addr: [::1]:12223, component: dropshot +... +---- + +Once everything is up and running, you can use `curl` directly to hit either of +the servers. But it's easier to use the xref:cli.adoc[`oxide` CLI]. diff --git a/docs/how-to-run.adoc b/docs/how-to-run.adoc new file mode 100644 index 00000000000..11505406a89 --- /dev/null +++ b/docs/how-to-run.adoc @@ -0,0 +1,114 @@ +:showtitle: +:toc: left +:icons: font + += Running Omicron (Non-Simulated) + +Omicron is the control plane for an Oxide rack. It expects to execute +on Helios systems, and Sleds use Helios-specific interfaces to manage +resources. + +If you're interested in running the control plane on other platforms, including +Linux and Mac, refer to the guide on xref:how-to-run-simulated.adoc[running +simulated Omicron]. + +== Installing Prerequisite Software + +A major prerequisite is to have a machine already running Helios. An easy way to +do this is by using a https://github.com/oxidecomputer/helios-engvm[Helios VM]. +ISOs are also available for download https://pkg.oxide.computer/install[here]. + +Any additional prerequisite software may be installed with the following script: + +[source,text] +---- +$ ./tools/install_prerequisites.sh +---- + +== Deploying Omicron + +The control plane repository contains a packaging tool which bundles binaries +and SMF manifests. After building the expected binaries, they can be packaged +in a format which lets them be transferred to a Helios machine. + +This tool acts on a `package-manifest.toml` file which describes the packages to be +bundled in the build. + +Configuration files are used to select IP addresses, and to manage Zpools +utilized by the Sled Agent. These configuration files are located within +`smf/`, and likely need to be modified to use addresses and zpools which match +your hardware. Much of this configuration will be automated in the future +(e.g., IP addresses will be inferred and posted to a DNS system, Zpools will +automatically be detected on discovered disks), but for now it remains +hard-coded. + +[source,text] +---- +$ cargo run --release --bin omicron-package -- package +---- + +NOTE: Running in `release` mode isn't strictly required, but improves +the performance of the packaging tools significantly. + +The aforementioned package command fills a target directory of choice +(by default, `out/` within the omicron repository) with tarballs ready +to be unpacked as services. + +To install the services on a target machine, the following command +may be executed: + +[source,text] +---- +$ pfexec cargo run --release --bin omicron-package -- install +---- + +This service installs a bootstrap service, which itself loads other +requested services. The bootstrap service is currently the only +service which is "persistent" across reboots - although it will +initialize other service as part of its setup sequence anyway. + +[source,text] +---- +# List all services: +$ svcs +# View zones managed by Omicron (prefixed with "oxz_"): +$ zoneadm list -cv +# View logs for a service: +$ pfexec tail -f $(pfexec svcs -z oxz_nexus -L nexus) +---- + +To uninstall all Omicron services from a machine, the following may be +executed: + +[source,text] +---- +$ pfexec cargo run --release --bin omicron-package -- uninstall +---- + +=== Test Environment + +When we deploy, we're effectively creating a number of different zones +for all the components that make up Omicron (Nexus, Clickhouse, Crucible, etc). +Since all these services run in different zones they cannot communicate with +each other (and Sled Agent in the global zone) via `localhost`. In practice, +we'll assign addresses as per RFD 63 as well as incorporating DNS based +service discovery. + +For the purposes of local development today, we specify some hardcoded IPv6 +unique local addresses in `fd00:1de::/16`: + +[options="header"] +|=================================================================================================== +| Service | Endpoint +| Sled Agent: Bootstrap | `[::]:12346` +| Sled Agent: Dropshot API | `[fd00:1de::]:12345` +| Cockroach DB | `[fd00:1de::5]:32221` +| Oximeter | `[fd00:1de::6]:12223` +| Nexus: External API | `[fd00:1de::7]:12220` +| Nexus: Internal API | `[fd00:1de::7]:12221` +| Clickhouse | `[fd00:1de::8]:8123` +| Crucible Downstairs | `[fd00:1de::9]:32345`, `[fd00:1de::10]:32345`, `[fd00:1de::11]:32345` +|=================================================================================================== + +Note that Sled Agent runs in the global zone and is the one responsible for bringing up all the other +other services and allocating them with vNICs and IPv6 addresses. From 323e62bef963b0f11b74208398701164221684b4 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 11 Apr 2022 22:05:43 -0400 Subject: [PATCH 16/16] alan's feedback --- docs/cli.adoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/cli.adoc b/docs/cli.adoc index 5cb5101dd12..c1cdfe2a0d2 100644 --- a/docs/cli.adoc +++ b/docs/cli.adoc @@ -34,14 +34,14 @@ $ oxide project create myproject \ $ oxide instance create myinstance \ --description "My instance" \ --project myproject \ - --org myorg \ + --organization myorg \ --hostname "myinstance.maze-war.com" \ --ncpus 1 \ --memory 8 ✔ Created instance myinstance in myorg/myproject $ oxide instance view myinstance \ - --org myorg \ + --organization myorg \ --project myproject \ --format json { @@ -67,7 +67,7 @@ $ oxide disk create nginx \ $ oxide disk view nginx \ - --org myorg \ + --organization myorg \ --project myproject \ --format json {