Skip to content

Commit 7eced16

Browse files
committed
Reduce required infra
1 parent e6c5a64 commit 7eced16

File tree

37 files changed

+649
-272
lines changed

37 files changed

+649
-272
lines changed
Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
# Start times
2+
3+
## Low-end machine
4+
5+
> System
6+
>
7+
> - Debian GNU/Linux 11
8+
> - Shared VM, 4 VCPUs (of AMD EPYC 7713 16-Core 2GHz)
9+
> - 8GB memory
10+
11+
### `nix-shell` setup time (fresh)
12+
13+
- Before building `bolt`: 1m31s
14+
- Building `bolt`: 2m15s
15+
16+
### Services (Minimal setup)
17+
18+
| step | up |
19+
| ------------------ | ----- |
20+
| k8s-cluster | 20s |
21+
| k8s-infra | 2m31s |
22+
| redis | 1s |
23+
| cockroach | 1s |
24+
| clickhouse | 1s |
25+
| s3 | 24s |
26+
| infra-artifacts | 50s |
27+
| migrate | 62s |
28+
| up (containerized) | 7s |
29+
| total | 5m17s |
30+
31+
### `k8s-infra` breakdown
32+
33+
_Note, these are not additive as they run in parallel_
34+
35+
_First loki, promtail, and prometheus are provisioned then the rest follow_
36+
37+
| service | up |
38+
| -------------- | ----- |
39+
| promtail | 3s |
40+
| prometheus | 43s |
41+
| loki | 1m14s |
42+
| k8s_dashboard | 3s |
43+
| traefik tunnel | 20s |
44+
| traefik | 20s |
45+
| traffic_server | 26s |
46+
| nats | 27s |
47+
| imagor | 29s |
48+
| minio | 35s |
49+
| nomad_server | 46s |
50+
| clickhouse | 47s |
51+
| redis | 51s |
52+
| nsfw_api | 56s |
53+
| cockroachdb | 1m6s |
54+
55+
## Higher-end machine
56+
57+
> System
58+
>
59+
> - Debian GNU/Linux 11
60+
> - AMD EPYC 7713 16-Core 2GHz
61+
> - 32GB memory
62+
63+
### Services (Complex setup)
64+
65+
_This setup uses postgres as the terraform config storage method, adding overhead to each step_
66+
67+
| step | up | destroy |
68+
| ------------------ | -------- | -------- |
69+
| k8s-cluster | 27s | 16s |
70+
| k8s-infra | 2m34s | - |
71+
| tls | 4m29s | 5s |
72+
| redis | 11s | - |
73+
| cockroach | 10s | - |
74+
| clickhouse | 10s | - |
75+
| vector | 19s | - |
76+
| pools | 2m43s | 1m57s |
77+
| dns | 2m48s | 9s |
78+
| better uptime | untested | untested |
79+
| cf-workers | 15s | 6s |
80+
| cf-tunnels | 18s | 12s |
81+
| s3 | 35s | - |
82+
| infra-artifacts | 35s | - |
83+
| migrate | 58s | - |
84+
| up (containerized) | 7s | - |
85+
| total | 17m2s | 2m40s |
86+
87+
### `k8s-infra` breakdown
88+
89+
| service | up |
90+
| -------------- | ----- |
91+
| promtail | 6s |
92+
| prometheus | 48s |
93+
| loki | 1m20s |
94+
| k8s_dashboard | 6s |
95+
| imagor | 8s |
96+
| traefik | 12s |
97+
| traefik tunnel | 14s |
98+
| traffic_server | 16s |
99+
| minio | 22s |
100+
| nats | 28s |
101+
| clickhouse | 30s |
102+
| redis | 33s |
103+
| nsfw_api | 36s |
104+
| nomad_server | 46s |
105+
| cockroachdb | 49s |

errors/feature/disabled.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
---
2+
name = "FEATURE_DISABLED"
3+
description = "The given feature is disabled: {feature}"
4+
http_status = 400
5+
---
6+
7+
# Feature Disabled
8+
9+
A feature required to use/access this resource is disabled.

infra/tf/k8s_infra/clickhouse.tf

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
locals {
2-
clickhouse_k8s = var.clickhouse_provider == "kubernetes"
2+
clickhouse_enabled = var.clickhouse_enabled && var.clickhouse_provider == "kubernetes"
33
service_clickhouse = lookup(var.services, "clickhouse", {
44
count = 1
55
resources = {
@@ -10,7 +10,7 @@ locals {
1010
}
1111

1212
module "clickhouse_secrets" {
13-
count = local.clickhouse_k8s ? 1 : 0
13+
count = local.clickhouse_enabled ? 1 : 0
1414

1515
source = "../modules/secrets"
1616

@@ -20,14 +20,16 @@ module "clickhouse_secrets" {
2020
}
2121

2222
resource "kubernetes_namespace" "clickhouse" {
23-
count = local.clickhouse_k8s ? 1 : 0
23+
count = local.clickhouse_enabled ? 1 : 0
2424

2525
metadata {
2626
name = "clickhouse"
2727
}
2828
}
2929

3030
resource "kubernetes_priority_class" "clickhouse_priority" {
31+
count = local.clickhouse_enabled ? 1 : 0
32+
3133
metadata {
3234
name = "clickhouse-priority"
3335
}
@@ -36,12 +38,11 @@ resource "kubernetes_priority_class" "clickhouse_priority" {
3638
}
3739

3840
resource "helm_release" "clickhouse" {
41+
count = local.clickhouse_enabled ? 1 : 0
3942
depends_on = [null_resource.daemons]
4043

41-
count = local.clickhouse_k8s ? 1 : 0
42-
4344
name = "clickhouse"
44-
namespace = kubernetes_namespace.clickhouse[0].metadata.0.name
45+
namespace = kubernetes_namespace.clickhouse.0.metadata.0.name
4546
chart = "../../helm/clickhouse"
4647
# repository = "oci://registry-1.docker.io/bitnamicharts"
4748
# chart = "clickhouse"
@@ -56,7 +57,7 @@ resource "helm_release" "clickhouse" {
5657
replicaCount = 1
5758
}
5859

59-
priorityClassName = kubernetes_priority_class.clickhouse_priority.metadata.0.name
60+
priorityClassName = kubernetes_priority_class.clickhouse_priority.0.metadata.0.name
6061
resources = var.limit_resources ? {
6162
limits = {
6263
memory = "${local.service_clickhouse.resources.memory}Mi"
@@ -121,15 +122,15 @@ resource "helm_release" "clickhouse" {
121122
# Admin auth
122123
auth = {
123124
username = "default"
124-
password = module.clickhouse_secrets[0].values["clickhouse/users/default/password"]
125+
password = module.clickhouse_secrets.0.values["clickhouse/users/default/password"]
125126
}
126127

127128
metrics = {
128129
enabled = true
129130

130131
serviceMonitor = {
131132
enabled = true
132-
namespace = kubernetes_namespace.clickhouse[0].metadata.0.name
133+
namespace = kubernetes_namespace.clickhouse.0.metadata.0.name
133134
}
134135

135136
# TODO:
@@ -142,26 +143,25 @@ resource "helm_release" "clickhouse" {
142143
}
143144

144145
data "kubernetes_secret" "clickhouse_ca" {
145-
count = local.clickhouse_k8s ? 1 : 0
146-
146+
count = local.clickhouse_enabled ? 1 : 0
147147
depends_on = [helm_release.clickhouse]
148148

149149
metadata {
150150
name = "clickhouse-crt"
151-
namespace = kubernetes_namespace.clickhouse[0].metadata.0.name
151+
namespace = kubernetes_namespace.clickhouse.0.metadata.0.name
152152
}
153153
}
154154

155155
resource "kubernetes_config_map" "clickhouse_ca" {
156-
for_each = local.clickhouse_k8s ? toset(["rivet-service", "bolt", "vector"]) : toset([])
156+
for_each = local.clickhouse_enabled ? toset(["rivet-service", "bolt", "vector"]) : toset([])
157157

158158
metadata {
159159
name = "clickhouse-ca"
160160
namespace = each.value
161161
}
162162

163163
data = {
164-
"ca.crt" = data.kubernetes_secret.clickhouse_ca[0].data["ca.crt"]
164+
"ca.crt" = data.kubernetes_secret.clickhouse_ca.0.data["ca.crt"]
165165
}
166166
}
167167

infra/tf/k8s_infra/grafana.tf

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@ locals {
88
}
99

1010
resource "kubernetes_config_map" "grafana_dashboard" {
11-
for_each = local.grafana_dashboards
11+
for_each = var.prometheus_enabled ? local.grafana_dashboards : {}
1212

1313
metadata {
14-
namespace = kubernetes_namespace.prometheus.metadata.0.name
14+
namespace = kubernetes_namespace.prometheus.0.metadata.0.name
1515
name = "prometheus-rivet-${each.key}"
1616
labels = {
1717
grafana_dashboard = "1"

0 commit comments

Comments
 (0)