Skip to content

Commit

Permalink
Example of chromium using deployment scripts (#786)
Browse files Browse the repository at this point in the history
  • Loading branch information
adam-singer committed Apr 2, 2024
1 parent 4146a34 commit 0aa7f65
Show file tree
Hide file tree
Showing 20 changed files with 795 additions and 0 deletions.
1 change: 1 addition & 0 deletions .github/styles/config/vocabularies/TraceMachina/accept.txt
Expand Up @@ -25,3 +25,4 @@ mutex
parsable
rebase
remoteable
Chromium
1 change: 1 addition & 0 deletions deployment-examples/chromium/00_infra.sh
22 changes: 22 additions & 0 deletions deployment-examples/chromium/01_operations.sh
@@ -0,0 +1,22 @@
# This script configures a cluster with a few standard deployments.

# TODO(aaronmondal): Add Grafana, OpenTelemetry and the various other standard
# deployments one would expect in a cluster.

set -xeuo pipefail

SRC_ROOT=$(git rev-parse --show-toplevel)

kubectl apply -f ${SRC_ROOT}/deployment-examples/chromium/gateway.yaml

# The image for the scheduler and CAS.
nix run .#image.copyTo \
docker://localhost:5001/nativelink:local \
-- \
--dest-tls-verify=false

# Wrap it with nativelink to turn it into a worker.
nix run .#nativelink-worker-siso-chromium.copyTo \
docker://localhost:5001/nativelink-worker-siso-chromium:local \
-- \
--dest-tls-verify=false
11 changes: 11 additions & 0 deletions deployment-examples/chromium/02_application.sh
@@ -0,0 +1,11 @@
# Get the nix derivation hash from the toolchain container, change the
# `TOOLCHAIN_TAG` variable in the `worker.json.template` to that hash and apply
# the configuration.

KUSTOMIZE_DIR=$(git rev-parse --show-toplevel)/deployment-examples/chromium

kubectl apply -k "$KUSTOMIZE_DIR"

kubectl rollout status deploy/nativelink-cas
kubectl rollout status deploy/nativelink-scheduler
kubectl rollout status deploy/nativelink-worker-chromium
69 changes: 69 additions & 0 deletions deployment-examples/chromium/03_build_chrome_tests.sh
@@ -0,0 +1,69 @@
set -euo pipefail

function fetch_chromium() {
mkdir -p ${HOME}/chromium
cd ${HOME}/chromium
fetch --no-history chromium
}

# Based on requirements Ubuntu is the most well supported system
# https://chromium.googlesource.com/chromium/src/+/main/docs/linux/build_instructions.md
if ! grep -q 'ID=ubuntu' /etc/os-release; then
echo "This system is not running Ubuntu."
exit 0
fi

if [ -d "${HOME}/chromium/src" ]; then
echo "Using existing chromium checkout"
cd ${HOME}/chromium
set +e
gclient sync --no-history
exit_status=$?
set -e
if [ $exit_status -ne 0 ]; then
echo "Failed to sync, removing files in ${HOME}/chromium"
rm -rf ${HOME}/chromium/
fetch_chromium
fi

cd src
else
echo "This script will modify the local system by adding depot_tools to .bashrc,"
echo "downloading chrome code base and installing dependencies based on instructions"
echo "https://chromium.googlesource.com/chromium/src/+/main/docs/linux/build_instructions.md."
echo "Do you want to continue? (yes/no)"
read answer
answer=$(echo "$answer" | tr '[:upper:]' '[:lower:]')
if [[ "$answer" != "yes" ]]; then
echo "Exiting."
# Exit or handle "no" logic here
exit 0
fi

# Add deport_tools to path
if [[ "$PATH" != *"/depot_tools"* ]]; then
cd ${HOME}
git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
echo 'export PATH="${HOME}/depot_tools:$PATH"' >> ${HOME}/.bashrc
source ${HOME}/.bashrc
fi

# Checkout chromium into home directory without history
fetch_chromium
cd src

# Install dependencies required for clients to have on chromium builds
./build/install-build-deps.sh
fi

echo "Generating ninja projects"
gn gen --args="use_remoteexec=true rbe_cfg_dir=\"../../buildtools/reclient_cfgs/linux\"" out/Default

# Fetch cache and schedular IP address for passing to ninja
CACHE=$(kubectl get gtw cache -o=jsonpath='{.status.addresses[0].value}')
SCHEDULER=$(kubectl get gtw scheduler -o=jsonpath='{.status.addresses[0].value}')
SCHEDULER_ADDRESS=${SCHEDULER}:50052
CACHE_ADDRESS=${CACHE}:50051

echo "Starting autoninja build"
RBE_service=${SCHEDULER_ADDRESS} RBE_cas_service=${CACHE_ADDRESS} RBE_instance=main RBE_reclient_timeout=60m RBE_exec_timeout=4m RBE_alsologtostderr=true RBE_service_no_security=true RBE_service_no_auth=true RBE_local_resource_fraction=0.00001 RBE_automatic_auth=false RBE_gcert_refresh_timeout=20 RBE_compression_threshold=-1 RBE_metrics_namespace=main RBE_platform= RBE_experimental_credentials_helper= RBE_experimental_credentials_helper_args= RBE_log_http_calls=true RBE_use_rpc_credentials=false RBE_exec_strategy=remote_local_fallback autoninja -v -j 50 -C out/Default cc_unittests
7 changes: 7 additions & 0 deletions deployment-examples/chromium/04_delete_application.sh
@@ -0,0 +1,7 @@
# Get the nix derivation hash from the toolchain container, change the
# `TOOLCHAIN_TAG` variable in the `worker.json.template` to that hash and delete
# the configuration.

KUSTOMIZE_DIR=$(git rev-parse --show-toplevel)/deployment-examples/chromium

kubectl delete -k "$KUSTOMIZE_DIR"
76 changes: 76 additions & 0 deletions deployment-examples/chromium/README.md
@@ -0,0 +1,76 @@
# Chromium example

This deployment sets up a 4-container deployment with separate CAS, scheduler
and worker. Don't use this example deployment in production. It's insecure.

> [!WARN]
> The client build request is best done from a Ubuntu image, `./03_build_chrome_tests.sh`
> will check if the image is Ubuntu and fail otherwise.
All commands should be run from nix to ensure all dependencies exist in the environment.

```bash
nix develop
```

In this example we're using `kind` to set up the cluster `cilium` to provide a
`LoadBalancer` and `GatewayController`.

First set up a local development cluster:

```bash
./00_infra.sh
```

Next start a few standard deployments. This part also builds the remote
execution containers and makes them available to the cluster:

```bash
./01_operations.sh
```

Finally, deploy NativeLink:

```bash
./02_application.sh
```

> [!TIP]
> You can use `./04_delete_application.sh` to remove just the `nativelink`
> deployments but leave the rest of the cluster intact.
This demo setup creates two gateways to expose the `cas` and `scheduler`
deployments via your local docker network:

```bash
CACHE=$(kubectl get gtw cache -o=jsonpath='{.status.addresses[0].value}')
SCHEDULER=$(kubectl get gtw scheduler -o=jsonpath='{.status.addresses[0].value}')

echo "Cache IP: $CACHE"
echo "Scheduler IP: $SCHEDULER"
```

Using `./03_build_chrome_tests.sh` example script will download needed dependencies
for building Chromium unit tests using NativeLink CAS and Scheduler. The initial part
of the script checks if some dependencies exist, if not installs them, then moves on
to downloading and building Chromium tests. The script simplifies the setup described
in [linux/build_instructions.md](https://chromium.googlesource.com/chromium/src/+/main/docs/linux/build_instructions.md)

```bash
./03_build_chrome_tests.sh
```

> [!TIP]
> You can monitor the logs of container groups with `kubectl logs`:
> ```bash
> kubectl logs -f -l app=nativelink-cas
> kubectl logs -f -l app=nativelink-scheduler
> kubectl logs -f -l app=nativelink-worker-chromium --all-containers=true
> watch $HOME/chromium/src/buildtools/reclient/reproxystatus
> ```
When you're done testing, delete the cluster:

```bash
kind delete cluster
```
114 changes: 114 additions & 0 deletions deployment-examples/chromium/cas.json
@@ -0,0 +1,114 @@
// This configuration places objects in various directories in
// `~/.cache/nativelink`. When this location is mounted as a PersistentVolume
// it persists the cache across restarts.
{
"stores": {
"CAS_MAIN_STORE": {
"existence_cache": {
"backend": {
"compression": {
"compression_algorithm": {
"lz4": {}
},
"backend": {
"filesystem": {
"content_path": "~/.cache/nativelink/content_path-cas",
"temp_path": "~/.cache/nativelink/tmp_path-cas",
"eviction_policy": {
// 10gb.
"max_bytes": 10000000000,
}
}
}
}
}
}
},
"AC_MAIN_STORE": {
"completeness_checking": {
"backend": {
"filesystem": {
"content_path": "~/.cache/nativelink/content_path-ac",
"temp_path": "~/.cache/nativelink/tmp_path-ac",
"eviction_policy": {
// 500mb.
"max_bytes": 500000000,
}
}
},
"cas_store": {
"ref_store": {
"name": "CAS_MAIN_STORE"
}
}
}
}
},
"servers": [{
"listener": {
"http": {
"socket_address": "0.0.0.0:50051"
}
},
"services": {
"cas": {
"main": {
"cas_store": "CAS_MAIN_STORE"
}
},
"ac": {
"main": {
"ac_store": "AC_MAIN_STORE"
}
},
"capabilities": {},
"bytestream": {
"cas_stores": {
"main": "CAS_MAIN_STORE",
},
}
}
},
{
// Only publish metrics on a private port.
"listener": {
"http": {
"socket_address": "0.0.0.0:50061"
}
},
"services": {
"experimental_prometheus": {
"path": "/metrics"
}
}
},
{
"listener": {
"http": {
"socket_address": "0.0.0.0:50071",
"tls": {
"cert_file": "/root/example-do-not-use-in-prod-rootca.crt",
"key_file": "/root/example-do-not-use-in-prod-key.pem"
}
}
},
"services": {
"cas": {
"main": {
"cas_store": "CAS_MAIN_STORE"
}
},
"ac": {
"main": {
"ac_store": "AC_MAIN_STORE"
}
},
"capabilities": {},
"bytestream": {
"cas_stores": {
"main": "CAS_MAIN_STORE",
}
}
}
}]
}
61 changes: 61 additions & 0 deletions deployment-examples/chromium/cas.yaml
@@ -0,0 +1,61 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nativelink-cas
spec:
replicas: 1
selector:
matchLabels:
app: nativelink-cas
template:
metadata:
labels:
app: nativelink-cas
spec:
containers:
- name: nativelink-cas
image: "localhost:5001/nativelink:local"
env:
- name: RUST_LOG
value: info
ports:
- containerPort: 50051
- containerPort: 50061
- containerPort: 50071
volumeMounts:
- name: cas-config
mountPath: /cas.json
subPath: cas.json
- name: tls-volume
mountPath: /root
readOnly: true
args: ["/cas.json"]
volumes:
- name: cas-config
configMap:
name: cas
- name: tls-volume
secret:
secretName: tls-secret
---
apiVersion: v1
kind: Service
metadata:
name: nativelink-cas
spec:
selector:
app: nativelink-cas
ports:
- name: http
protocol: TCP
port: 50051
targetPort: 50051
- name: metrics
protocol: TCP
port: 50061
targetPort: 50061
- name: https
protocol: TCP
port: 50071
targetPort: 50071

0 comments on commit 0aa7f65

Please sign in to comment.