-
Notifications
You must be signed in to change notification settings - Fork 0
/
global-lb.tf
90 lines (83 loc) · 3.18 KB
/
global-lb.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
// Locate the whereami service that runs in Cluster A
// This provides information about the backend Network Endpoint Groups
data "kubernetes_service" "lb-backend-A" {
provider = kubernetes.cluster-A
metadata {
name = "whereami"
}
}
// Locate the whereami service that runs in Cluster B
// This provides information about the backend Network Endpoint Groups
data "kubernetes_service" "lb-backend-B" {
provider = kubernetes.cluster-B
metadata {
name = "whereami"
}
}
locals {
svc_port = data.kubernetes_service.lb-backend-A.spec.0.port.0.port
// NEG info for backend A
// this info is encoded in the annotations on the kubernetes object, after the NEG is created.
// so we use a 'data' object to retrieve current annotations, rather than 'resource'.
svc_a_neg_notes = jsondecode(data.kubernetes_service.lb-backend-A.metadata[0].annotations["cloud.google.com/neg-status"])
svc_a_neg_name = local.svc_a_neg_notes["network_endpoint_groups"][local.svc_port]
svc_a_neg_zone = local.svc_a_neg_notes["zones"][0]
// NEG info for backend B
svc_b_neg_notes = jsondecode(data.kubernetes_service.lb-backend-B.metadata[0].annotations["cloud.google.com/neg-status"])
svc_b_neg_name = local.svc_b_neg_notes["network_endpoint_groups"][local.svc_port]
svc_b_neg_zone = local.svc_b_neg_notes["zones"][0]
}
data "google_compute_network_endpoint_group" "neg-A" {
name = local.svc_a_neg_name
zone = local.svc_a_neg_zone
}
data "google_compute_network_endpoint_group" "neg-B" {
name = local.svc_b_neg_name
zone = local.svc_b_neg_zone
}
// Health check, using the serving port on the kubernetes service object.
resource "google_compute_health_check" "default" {
name = "health-check"
check_interval_sec = 2
healthy_threshold = 1
http_health_check {
port = 8080
}
}
resource "google_compute_backend_service" "default" {
name = "${var.cluster_name}-backend-service"
load_balancing_scheme = "EXTERNAL_MANAGED"
protocol = "HTTP"
health_checks = [google_compute_health_check.default.id]
// for demonstration, use a random backend.
locality_lb_policy = "RANDOM"
// NEGS go here.
// NOTE: zero is not a valid max_rate. must remove whole block to drain.
backend {
group = data.google_compute_network_endpoint_group.neg-A.self_link
balancing_mode = "RATE"
max_rate_per_endpoint = 100
}
backend {
group = data.google_compute_network_endpoint_group.neg-B.self_link
balancing_mode = "RATE"
max_rate_per_endpoint = 100
}
}
resource "google_compute_url_map" "default" {
name = "${var.cluster_name}-urlmap"
default_service = google_compute_backend_service.default.id
}
resource "google_compute_target_http_proxy" "default" {
name = "${var.cluster_name}-proxy"
url_map = google_compute_url_map.default.id
}
resource "google_compute_global_forwarding_rule" "default" {
name = "${var.cluster_name}-fr"
load_balancing_scheme = "EXTERNAL_MANAGED"
port_range = "80"
target = google_compute_target_http_proxy.default.id
}
output "loadbalancer_url" {
value = "http://${google_compute_global_forwarding_rule.default.ip_address}/"
}