From eea0c382fd866e2609f823499c382d88c69d84fb Mon Sep 17 00:00:00 2001 From: Nathan Flurry Date: Tue, 17 Jun 2025 04:09:55 +0000 Subject: [PATCH] feat(pegboard): expose rivet server from within containers for docker compose --- docker/dev-full/docker-compose.yml | 4 +- docker/dev-full/rivet-client/config.jsonc | 13 +- docker/dev-full/rivet-client/entrypoint.sh | 304 ++++++++---------- docker/universal/Dockerfile | 6 +- .../system-test-actor/src/container/main.ts | 19 ++ 5 files changed, 177 insertions(+), 169 deletions(-) diff --git a/docker/dev-full/docker-compose.yml b/docker/dev-full/docker-compose.yml index 3120f6bdee..5f0185a783 100644 --- a/docker/dev-full/docker-compose.yml +++ b/docker/dev-full/docker-compose.yml @@ -213,6 +213,9 @@ services: - RUST_LOG=debug,hyper=info stop_grace_period: 0s depends_on: + # HACK: rivet-server dependency since we need to do a DNS lookup in entrypoint.sh for the iptables chain + rivet-server: + condition: service_healthy rivet-edge-server: condition: service_healthy foundationdb: @@ -220,7 +223,6 @@ services: volumes: # - ./rivet-client/entrypoint.sh:/usr/local/bin/entrypoint.sh:ro - ./rivet-client/config.jsonc:/etc/rivet-client/config.jsonc:ro - - ./rivet-client/rivet-actor.conflist:/opt/cni/config/rivet-actor.conflist:ro - client-data:/var/lib/rivet-client ports: # Enable host networking for actors diff --git a/docker/dev-full/rivet-client/config.jsonc b/docker/dev-full/rivet-client/config.jsonc index 4ce78caad8..dbb0ecad35 100644 --- a/docker/dev-full/rivet-client/config.jsonc +++ b/docker/dev-full/rivet-client/config.jsonc @@ -2,7 +2,18 @@ "client": { "runner": { "flavor": "container", - "use_resource_constraints": false + // Resource constraints are not supported in Docker + // TODO: Needs further investigation in to why nested cgroups v2 resource constraints aren't working + "use_resource_constraints": false, + "custom_hosts": [ + // Define host for the API server + // + // This is a custom IP defined in entrypoint.sh iptables + { + "ip": "192.168.100.1", + "hostname": "rivet-server" + } + ] }, "cluster": { // This is safe to hardcode diff --git a/docker/dev-full/rivet-client/entrypoint.sh b/docker/dev-full/rivet-client/entrypoint.sh index 81a054e1d3..4a33223670 100755 --- a/docker/dev-full/rivet-client/entrypoint.sh +++ b/docker/dev-full/rivet-client/entrypoint.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -euf # SEE ALSO: packages/core/services/cluster/src/workflows/server/install/install_scripts/files/pegboard_configure.sh @@ -28,12 +28,13 @@ set -euf # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# Create admin chain that only accepts traffic from the GG subnet -# -# See Nomad equivalent: https://github.com/hashicorp/nomad/blob/a8f0f2612ef9d283ed903721f8453a0c0c3f51c5/client/allocrunner/networking_bridge_linux.go#L73 -# ADMIN_CHAIN="RIVET-ADMIN" +ADMIN_CHAIN="RIVET-ADMIN" +NAT_CHAIN="RIVET-NAT" SUBNET_IPV4="172.26.64.0/20" SUBNET_IPV6="fd00:db8:2::/64" +RIVET_SERVER_IPV4="192.168.100.1" + +# Create admin chain that only accepts traffic from the GG subnet # PUBLIC_IFACE="eth0" # VLAN_IFACE="eth1" # GG_VLAN_SUBNET="0.0.0.0/0" @@ -41,7 +42,7 @@ SUBNET_IPV6="fd00:db8:2::/64" # # MARK: Linux Traffic Control # for iface in $PUBLIC_IFACE $VLAN_IFACE; do # # Check if the HTB qdisc already exists -# if ! tc qdisc show dev \$iface | grep -q "htb 1:"; then +# if ! tc qdisc show dev $iface | grep -q "htb 1:"; then # # # Set up a HTB queuing discipline. # # @@ -53,14 +54,14 @@ SUBNET_IPV6="fd00:db8:2::/64" # # tc -s class show dev eth1 # # # # Read more: https://lartc.org/howto/lartc.qdisc.classful.html#AEN1071 -# tc qdisc add dev \$iface \ +# tc qdisc add dev $iface \ # root \ # handle 1: \ # htb \ # default 10 # # # Create a root class with a max bandwidth -# tc class add dev \$iface \ +# tc class add dev $iface \ # parent 1: \ # classid 1:1 \ # htb \ @@ -70,7 +71,7 @@ SUBNET_IPV6="fd00:db8:2::/64" # # # # Low bandwidth limit = game servers are not expected to use much bandwidth # # High priority = packets take priority in the case of congestion -# tc class add dev \$iface \ +# tc class add dev $iface \ # parent 1:1 \ # classid 1:10 \ # htb \ @@ -81,7 +82,7 @@ SUBNET_IPV6="fd00:db8:2::/64" # # # # High bandwidth = peak performance when there is no network congestion # # Low priority = packets are dropped first in the case of congestion -# tc class add dev \$iface \ +# tc class add dev $iface \ # parent 1:1 \ # classid 1:20 \ # htb \ @@ -94,13 +95,13 @@ SUBNET_IPV6="fd00:db8:2::/64" # # handle x = handle packets marked x by iptables # # fw classid x = send matched packets to class x # # action change dsfield set x = set the packet's TOS (0x10 = low delay, 0x8 = high throughput) -# tc filter add dev \$iface \ +# tc filter add dev $iface \ # protocol ip \ # parent 1:0 \ # prio 1 \ # handle 1 \ # fw classid 1:10 -# tc filter add dev \$iface \ +# tc filter add dev $iface \ # protocol ip \ # parent 1:0 \ # prio 2 \ @@ -113,163 +114,130 @@ SUBNET_IPV6="fd00:db8:2::/64" # echo "HTB qdisc and class rules already exist." # fi # done -# -# # MARK: iptables -# add_ipt_chain() { -# local ipt="\$1" -# local table="\$2" -# local chain="\$3" -# -# if ! "\$ipt" -t "\$table" -L "\$chain" &>/dev/null; then -# "\$ipt" -t "\$table" -N "\$chain" -# echo "Created \$ipt \$table chain: \$chain" -# else -# echo "Chain already exists in \$ipt \$table: \$chain" -# fi -# } -# -# add_ipt_rule() { -# local ipt="\$1" -# local table="\$2" -# local chain="\$3" -# local rule="\$4" -# -# if ! "\$ipt" -t \$table -C "\$chain" \$rule &>/dev/null; then -# "\$ipt" -t \$table -A "\$chain" \$rule -# echo "Added \$ipt \$table \$chain rule: \$rule" -# else -# echo "Rule already exists in \$ipt \$table \$chain: \$rule" -# fi -# } -# -# for ipt in iptables ip6tables; do -# # Define SUBNET_VAR based on iptables version -# if [ "\$ipt" == "iptables" ]; then -# SUBNET_VAR="$SUBNET_IPV4" -# else -# SUBNET_VAR="$SUBNET_IPV6" -# fi -# -# # MARK: Chains -# add_ipt_chain "\$ipt" "filter" "$ADMIN_CHAIN" -# -# add_ipt_chain "\$ipt" "mangle" "RIVET-FORWARD" -# add_ipt_rule "\$ipt" "mangle" "FORWARD" "-j RIVET-FORWARD" -# -# add_ipt_chain "\$ipt" "filter" "RIVET-INPUT" -# add_ipt_rule "\$ipt" "filter" "INPUT" "-j RIVET-INPUT" -# -# add_ipt_chain "\$ipt" "mangle" "RIVET-INPUT" -# add_ipt_rule "\$ipt" "mangle" "INPUT" "-j RIVET-INPUT" -# -# # MARK: Create GG TOS -# # -# # Sets the TOS to minimize delay if not already set. -# if ! "\$ipt" -t mangle -L "RIVET-TOS-GG" &>/dev/null; then -# "\$ipt" -t mangle -N "RIVET-TOS-GG" -# echo "Created \$ipt chain: RIVET-TOS-GG" -# else -# echo "Chain already exists in \$ipt: RIVET-TOS-GG" -# fi -# add_ipt_rule "\$ipt" "mangle" "RIVET-TOS-GG" "-m tos ! --tos 0x0 -j RETURN" -# add_ipt_rule "\$ipt" "mangle" "RIVET-TOS-GG" "-j TOS --set-tos 0x10" -# -# # VLAN only applicable to IPv4 -# if [ "\$ipt" == "iptables" ]; then -# # MARK: GG TOS -# add_ipt_rule "\$ipt" "mangle" "RIVET-FORWARD" "-s $GG_VLAN_SUBNET -d \$SUBNET_VAR -j RIVET-TOS-GG" -# add_ipt_rule "\$ipt" "mangle" "RIVET-FORWARD" "-s \$SUBNET_VAR -d $GG_VLAN_SUBNET -j RIVET-TOS-GG" -# -# # MARK: GG ingress -# # Prioritize traffic -# add_ipt_rule "\$ipt" "filter" "$ADMIN_CHAIN" "-s $GG_VLAN_SUBNET -d \$SUBNET_VAR -j MARK --set-mark 1" -# # Accept traffic -# add_ipt_rule "\$ipt" "filter" "$ADMIN_CHAIN" "-s $GG_VLAN_SUBNET -d \$SUBNET_VAR -j ACCEPT" -# -# # MARK: GG egress -# # Prioritize response traffic -# add_ipt_rule "\$ipt" "filter" "$ADMIN_CHAIN" "-s \$SUBNET_VAR -m conntrack --ctstate NEW,ESTABLISHED -j MARK --set-mark 1" -# # Enable conntrack to allow traffic to flow back to the GG subnet -# add_ipt_rule "\$ipt" "filter" "$ADMIN_CHAIN" "-s \$SUBNET_VAR -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT" -# -# # MARK: ATS ingress -# # Maximize throughput from ATS -# add_ipt_rule "\$ipt" "mangle" "RIVET-INPUT" "-s __ATS_VLAN_SUBNET__ -j TOS --set-tos Maximize-Throughput" -# # Deprioritize traffic so game traffic takes priority -# add_ipt_rule "\$ipt" "filter" "RIVET-INPUT" "-s __ATS_VLAN_SUBNET__ -j MARK --set-mark 2" -# fi -# -# # MARK: Public egress -# # Prioritize traffic -# add_ipt_rule "\$ipt" "filter" "$ADMIN_CHAIN" "-s \$SUBNET_VAR -o $PUBLIC_IFACE -j MARK --set-mark 1" -# # Allow egress traffic -# add_ipt_rule "\$ipt" "filter" "$ADMIN_CHAIN" "-s \$SUBNET_VAR -o $PUBLIC_IFACE -j ACCEPT" -# -# # Allow public ingress traffic on all ports because this is already mapped by CNI -# add_ipt_rule "\$ipt" "filter" "$ADMIN_CHAIN" "-p tcp -d \$SUBNET_VAR -i $PUBLIC_IFACE -j ACCEPT" -# add_ipt_rule "\$ipt" "filter" "$ADMIN_CHAIN" "-p udp -d \$SUBNET_VAR -i $PUBLIC_IFACE -j ACCEPT" -# -# # MARK: Deny -# # Deny all other egress traffic -# add_ipt_rule "\$ipt" "filter" "$ADMIN_CHAIN" "-s \$SUBNET_VAR -j DROP" -# done + +# MARK: iptables +add_ipt_chain() { + local ipt="$1" + local table="$2" + local chain="$3" + + if ! "$ipt" -t "$table" -L "$chain" &>/dev/null; then + "$ipt" -t "$table" -N "$chain" + echo "Created $ipt $table chain: $chain" + else + echo "Chain already exists in $ipt $table: $chain" + fi +} + +add_ipt_rule() { + local ipt="$1" + local table="$2" + local chain="$3" + local rule="$4" + + if ! "$ipt" -t $table -C "$chain" $rule &>/dev/null; then + "$ipt" -t $table -A "$chain" $rule + echo "Added $ipt $table $chain rule: $rule" + else + echo "Rule already exists in $ipt $table $chain: $rule" + fi +} + +for ipt in iptables ip6tables; do + # Define SUBNET_VAR based on iptables version + if [ "$ipt" == "iptables" ]; then + SUBNET_VAR="$SUBNET_IPV4" + else + SUBNET_VAR="$SUBNET_IPV6" + fi + + # MARK: Chains + add_ipt_chain "$ipt" "filter" "$ADMIN_CHAIN" + add_ipt_chain "$ipt" "nat" "$NAT_CHAIN" + + # Jump to RIVET-NAT chain from OUTPUT (for locally generated traffic) + # and PREROUTING (for container traffic) + # + # CNI firewall plugin will automatically jump to $ADMIN_CHAIN on "filter" + add_ipt_rule "$ipt" "nat" "OUTPUT" "-j $NAT_CHAIN" + add_ipt_rule "$ipt" "nat" "PREROUTING" "-j $NAT_CHAIN" + + # TODO: This IP will change + # MARK: Expose rivet-server inside the container at 192.168.100.1 + # Only applicable to IPv4 (assuming Docker networking stack is IPv4 only) + if [ "$ipt" == "iptables" ]; then + RIVET_SERVER_HOST="$(dig +short rivet-server)" + echo "Adding rule to forward $RIVET_SERVER_IPV4 to rivet-server ($RIVET_SERVER_HOST)" + add_ipt_rule "$ipt" "nat" "$NAT_CHAIN" "-d $RIVET_SERVER_IPV4 -j DNAT --to-destination $RIVET_SERVER_HOST" + + fi + + # add_ipt_chain "$ipt" "mangle" "RIVET-FORWARD" + # add_ipt_rule "$ipt" "mangle" "FORWARD" "-j RIVET-FORWARD" + # + # add_ipt_chain "$ipt" "filter" "RIVET-INPUT" + # add_ipt_rule "$ipt" "filter" "INPUT" "-j RIVET-INPUT" + # + # add_ipt_chain "$ipt" "mangle" "RIVET-INPUT" + # add_ipt_rule "$ipt" "mangle" "INPUT" "-j RIVET-INPUT" + # + # # MARK: Create GG TOS + # # + # # Sets the TOS to minimize delay if not already set. + # if ! "$ipt" -t mangle -L "RIVET-TOS-GG" &>/dev/null; then + # "$ipt" -t mangle -N "RIVET-TOS-GG" + # echo "Created $ipt chain: RIVET-TOS-GG" + # else + # echo "Chain already exists in $ipt: RIVET-TOS-GG" + # fi + # add_ipt_rule "$ipt" "mangle" "RIVET-TOS-GG" "-m tos ! --tos 0x0 -j RETURN" + # add_ipt_rule "$ipt" "mangle" "RIVET-TOS-GG" "-j TOS --set-tos 0x10" + # + # # VLAN only applicable to IPv4 + # if [ "$ipt" == "iptables" ]; then + # # MARK: GG TOS + # add_ipt_rule "$ipt" "mangle" "RIVET-FORWARD" "-s $GG_VLAN_SUBNET -d $SUBNET_VAR -j RIVET-TOS-GG" + # add_ipt_rule "$ipt" "mangle" "RIVET-FORWARD" "-s $SUBNET_VAR -d $GG_VLAN_SUBNET -j RIVET-TOS-GG" + # + # # MARK: GG ingress + # # Prioritize traffic + # add_ipt_rule "$ipt" "filter" "$ADMIN_CHAIN" "-s $GG_VLAN_SUBNET -d $SUBNET_VAR -j MARK --set-mark 1" + # # Accept traffic + # add_ipt_rule "$ipt" "filter" "$ADMIN_CHAIN" "-s $GG_VLAN_SUBNET -d $SUBNET_VAR -j ACCEPT" + # + # # MARK: GG egress + # # Prioritize response traffic + # add_ipt_rule "$ipt" "filter" "$ADMIN_CHAIN" "-s $SUBNET_VAR -m conntrack --ctstate NEW,ESTABLISHED -j MARK --set-mark 1" + # # Enable conntrack to allow traffic to flow back to the GG subnet + # add_ipt_rule "$ipt" "filter" "$ADMIN_CHAIN" "-s $SUBNET_VAR -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT" + # + # # MARK: ATS ingress + # # Maximize throughput from ATS + # add_ipt_rule "$ipt" "mangle" "RIVET-INPUT" "-s __ATS_VLAN_SUBNET__ -j TOS --set-tos Maximize-Throughput" + # # Deprioritize traffic so game traffic takes priority + # add_ipt_rule "$ipt" "filter" "RIVET-INPUT" "-s __ATS_VLAN_SUBNET__ -j MARK --set-mark 2" + # fi + # + # # MARK: Public egress + # # Prioritize traffic + # add_ipt_rule "$ipt" "filter" "$ADMIN_CHAIN" "-s $SUBNET_VAR -o $PUBLIC_IFACE -j MARK --set-mark 1" + # # Allow egress traffic + # add_ipt_rule "$ipt" "filter" "$ADMIN_CHAIN" "-s $SUBNET_VAR -o $PUBLIC_IFACE -j ACCEPT" + # + # # Allow public ingress traffic on all ports because this is already mapped by CNI + # add_ipt_rule "$ipt" "filter" "$ADMIN_CHAIN" "-p tcp -d $SUBNET_VAR -i $PUBLIC_IFACE -j ACCEPT" + # add_ipt_rule "$ipt" "filter" "$ADMIN_CHAIN" "-p udp -d $SUBNET_VAR -i $PUBLIC_IFACE -j ACCEPT" + # + # # MARK: Deny + # # Deny all other egress traffic + # add_ipt_rule "$ipt" "filter" "$ADMIN_CHAIN" "-s $SUBNET_VAR -j DROP" +done # MARK: CNI -# +# # Dual-stack CNI config # # We use ptp instead of bridge networking in order to isolate the pod's traffic. It's also more performant than bridge networking. -# -# See default Nomad configuration: https://github.com/hashicorp/nomad/blob/a8f0f2612ef9d283ed903721f8453a0c0c3f51c5/client/allocrunner/networking_bridge_linux.go#L152 -# cat << EOF > /opt/cni/config/rivet-actor.conflist -# { -# "cniVersion": "0.4.0", -# "name": "rivet-actor", -# "plugins": [ -# { -# "type": "loopback" -# }, -# { -# "type": "ptp", -# "ipMasq": true, -# "ipam": { -# "type": "host-local", -# "ranges": [ -# [ -# { "subnet": "$SUBNET_IPV4" } -# ], -# [ -# { "subnet": "$SUBNET_IPV6" } -# ] -# ], -# "routes": [ -# { "dst": "0.0.0.0/0" }, -# { "dst": "::/0" } -# ] -# }, -# "dns": { -# "nameservers": [ -# "8.8.8.8", -# "8.8.4.4", -# "2001:4860:4860::8888", -# "2001:4860:4860::8844" -# ], -# "options": ["rotate", "edns0", "attempts:2"] -# } -# }, -# { -# "type": "firewall", -# "backend": "iptables", -# "iptablesAdminChainName": "$ADMIN_CHAIN" -# }, -# { -# "type": "portmap", -# "capabilities": { "portMappings": true }, -# "snat": true -# } -# ] -# } -# EOF cat << EOF > /opt/cni/config/rivet-actor.conflist { "cniVersion": "0.4.0", @@ -306,6 +274,11 @@ cat << EOF > /opt/cni/config/rivet-actor.conflist "options": ["rotate", "edns0", "attempts:2"] } }, + { + "type": "firewall", + "backend": "iptables", + "iptablesAdminChainName": "$ADMIN_CHAIN" + }, { "type": "portmap", "capabilities": { "portMappings": true }, @@ -316,5 +289,6 @@ cat << EOF > /opt/cni/config/rivet-actor.conflist EOF # MARK: Entrypoint -/usr/bin/tini -- rivet-client -c /etc/rivet-client/config.jsonc +echo "Starting client" +rivet-client "$@" diff --git a/docker/universal/Dockerfile b/docker/universal/Dockerfile index 6b2920b075..e13102837e 100644 --- a/docker/universal/Dockerfile +++ b/docker/universal/Dockerfile @@ -123,7 +123,7 @@ RUN apt-get update -y && \ # MARK: Runner (Full) FROM --platform=linux/amd64 base-runner AS client-full ARG CNI_PLUGINS_VERSION=1.3.0 -RUN apt-get install -y skopeo iproute2 runc && \ +RUN apt-get install -y skopeo iproute2 runc dnsutils && \ echo "Downloading lz4" && \ curl -L https://releases.rivet.gg/tools/lz4/1.10.0/debian11-amd64/lz4 -o /usr/local/bin/lz4 && \ chmod +x /usr/local/bin/lz4 && \ @@ -137,8 +137,10 @@ RUN apt-get install -y skopeo iproute2 runc && \ mkdir -p /opt/cni/bin /opt/cni/config && \ curl -L https://github.com/containernetworking/plugins/releases/download/v${CNI_PLUGINS_VERSION}/cni-plugins-linux-amd64-v${CNI_PLUGINS_VERSION}.tgz | \ tar -xz -C /opt/cni/bin +COPY --chmod=755 ./docker/dev-full/rivet-client/entrypoint.sh /usr/local/bin/entrypoint.sh +COPY ./docker/dev-full/rivet-client/rivet-actor.conflist /opt/cni/config/rivet-actor.conflist COPY --from=builder /app/dist/rivet-client /app/dist/rivet-container-runner /usr/local/bin/ -ENTRYPOINT ["/usr/bin/tini", "--", "rivet-client"] +ENTRYPOINT ["/usr/bin/tini", "--", "entrypoint.sh"] # MARK: Monlith FROM --platform=linux/amd64 debian:12.9-slim AS monolith diff --git a/examples/system-test-actor/src/container/main.ts b/examples/system-test-actor/src/container/main.ts index 98190c13d2..e6f3a38c39 100644 --- a/examples/system-test-actor/src/container/main.ts +++ b/examples/system-test-actor/src/container/main.ts @@ -2,6 +2,17 @@ import { serve } from "@hono/node-server"; import { createNodeWebSocket } from "@hono/node-ws"; import { createAndStartServer } from "../shared/server.js"; import dgram from 'dgram'; +import fs from 'fs'; + +// Print hosts file contents before starting +try { + const hostsContent = fs.readFileSync('/etc/hosts', 'utf8'); + console.log('=== /etc/hosts contents ==='); + console.log(hostsContent); + console.log('=== End of /etc/hosts ==='); +} catch (err) { + console.error('Failed to read /etc/hosts:', err); +} let injectWebSocket: any; const { app, port } = createAndStartServer((app) => { @@ -15,6 +26,14 @@ const server = serve({ fetch: app.fetch, port }); injectWebSocket(server); +// async function contactApi() { +// console.log('Contacting', process.env.RIVET_API_ENDPOINT); +// const res = await fetch(process.env.RIVET_API_ENDPOINT!); +// console.log('API response', res.ok, res.status); +// } +// +// contactApi(); + // Get port from environment const portEnv = typeof Deno !== "undefined"