Skip to content

Commit

Permalink
haproxy load balancer plugin
Browse files Browse the repository at this point in the history
  • Loading branch information
Rajat Chopra committed Oct 22, 2014
1 parent 92b5b47 commit d74ebe6
Show file tree
Hide file tree
Showing 10 changed files with 682 additions and 0 deletions.
47 changes: 47 additions & 0 deletions cmd/openshift-router/openshift-router.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
package main

import (
"flag"
"time"

kubeclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/version/verflag"
"github.com/golang/glog"

osclient "github.com/openshift/origin/pkg/client"
lbmanager "github.com/openshift/origin/pkg/router/lbmanager"
"github.com/openshift/origin/plugins/router/haproxy"
)

var (
master = flag.String("master", "", "The address of the Kubernetes API server")
debug = flag.Bool("verbose", false, "Boolean flag to turn on debug messages")
)

func main() {
flag.Parse()
util.InitLogs()
defer util.FlushLogs()

verflag.PrintAndExitIfRequested()

if len(*master) == 0 {
glog.Fatal("usage: openshift-router -master <master>")
}

config := &kubeclient.Config{Host: *master}
kubeClient, err := kubeclient.New(config)
if err != nil {
glog.Fatalf("Invalid -master: %v", err)
}

osClient, errc := osclient.New(config)
if errc != nil {
glog.Fatalf("Could not reach master for routes: %v", errc)
}
routes := haproxy.NewRouter()
controllerManager := lbmanager.NewLBManager(routes, kubeClient, osClient)
controllerManager.Run(10 * time.Second)
select {}
}
19 changes: 19 additions & 0 deletions images/routers/haproxy/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
FROM fedora
RUN yum clean metadata && yum update -y --exclude='rhc*,node*' && \
yum -y install wget gcc make openssl-devel pcre-devel socat && \
wget http://haproxy.1wt.eu/download/1.5/src/snapshot/haproxy-ss-LATEST.tar.gz && \
tar xvzf haproxy-ss-LATEST.tar.gz && \
groupadd haproxy && \
useradd -g haproxy haproxy && \
cd haproxy-ss-* && make TARGET=linux2628 CPU=native USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 && make install && \
cd .. && rm -rf haproxy-ss-* && \
mkdir -p /usr/bin && \
mkdir -p /var/lib/containers/router/ && \
mkdir -p /var/lib/haproxy/{conf,run,bin,log} && \
touch /var/lib/haproxy/conf/{host_be.map,host_be_ws.map,host_be_ressl.map,host_be_sni.map,haproxy.config} && \
yum -y remove gcc wget && \
yum clean all
ADD conf/ /var/lib/haproxy/conf/
ADD bin/openshift-router /usr/bin/
EXPOSE 80
CMD ["/bin/bash"]
17 changes: 17 additions & 0 deletions images/routers/haproxy/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
Building the haproxy router image
---------------------------------

The openshift-router is supposed be run as a container. To build the image,

$ cd images/router/haproxy
$ ./build.sh
$ docker build -t openshift-router .

Running the router
------------------

Take the image above and run it anywhere where the networking allows the container to reach other pods. Only notable requirement is that the port 80 needs to be exposed to the node, so that DNS entries can point to the host/node where the router container is running.

$ docker run --rm -it -p 80:80 openshift-router /usr/bin/openshift-router -master $kube-master-url

example of kube-master-url : http://10.0.2.15:8080
2 changes: 2 additions & 0 deletions images/routers/haproxy/bin/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
*
!.gitignore
5 changes: 5 additions & 0 deletions images/routers/haproxy/build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/bin/bash
set -x
pushd `dirname $0`/../../..
hack/build-go.sh cmd/openshift-router
cp _output/go/bin/openshift-router images/routers/haproxy/bin/
34 changes: 34 additions & 0 deletions images/routers/haproxy/conf/default_pub_keys.pem
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCrKEJQc5H4vjyoziNoPdYcz1MF4wxdpTiMAz8i9Y6cOz6fed7g
KeAo6e227vnQLItRtIedyw/oyK4lZa7Do8/q8D1wRyfXYjcPsPbeg4loKFY6o1cc
7kxj4VmM0hAg05PdbY+bII8mM/bYhIxUid5k7TOUOwXT8yD9KZa5AvG3JQIDAQAB
AoGAJsBIs1pXNFAt/7IcRh+p+H/gNjb/t7wOn3SkuM+L6EdpmEXJdktE2z6LeZ2F
peJP8MHE6q7s/TFibEknVlgiXJoYFeiO+F1tyPJzfRsKj0j/w9MqhDW0/f9wTz71
Q7KlLd8bSfwP8VbqO/FRkuJrp4SxlhiatVanrm6Clniw0UECQQDUd0iY2Jrbvw/i
CKLSG+y7WfdspyNVmOVxLJRRqmtMvJKS1Ib4o1tnjvTlgXeS9lohm3qFgRkuxdmW
Hi1rJ/WJAkEAzjoqBCUxM06EPP9JatWL7V01zyncZosO1p5hTxRVJyzbvXvyzMHp
h0wzVnwS5b9lKRYaSti32FFAYqCW/gqpvQJBAMvDvq9ABXy8h7oK+ESznV2tEdt1
jDC6m0noLDMKUy/cJmKFSxDIcpAmDX7SpOAgPh6/I0lIn+M4ws3zETIUt6kCQEsp
q9Ki85GtlkjdCpcjicJTbJ86tJezKw9R3kmCMZ77nIMtP8sdCc5EGZn10rVl0qrs
4VxSLH9s7biEq0OgA0kCQQDLpq9QC4BEoCoAuF26lhCUNN/yPDVrTpYFNuuQjZ8m
61gHtBFA5iDvxYiDwJlwSY2R909yyJY/EgxbMO8hE+Z4
-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIDJzCCApCgAwIBAgICd9QwDQYJKoZIhvcNAQEFBQAwga8xCzAJBgNVBAYTAi0t
MRIwEAYDVQQIDAlTb21lU3RhdGUxETAPBgNVBAcMCFNvbWVDaXR5MRkwFwYDVQQK
DBBTb21lT3JnYW5pemF0aW9uMR8wHQYDVQQLDBZTb21lT3JnYW5pemF0aW9uYWxV
bml0MRgwFgYDVQQDDA9pcC0xMC0zNS02My0xOTIxIzAhBgkqhkiG9w0BCQEWFHJv
b3RAaXAtMTAtMzUtNjMtMTkyMB4XDTE0MDIyNDA1NTY0NloXDTE1MDIyNDA1NTY0
Nlowga8xCzAJBgNVBAYTAi0tMRIwEAYDVQQIDAlTb21lU3RhdGUxETAPBgNVBAcM
CFNvbWVDaXR5MRkwFwYDVQQKDBBTb21lT3JnYW5pemF0aW9uMR8wHQYDVQQLDBZT
b21lT3JnYW5pemF0aW9uYWxVbml0MRgwFgYDVQQDDA9pcC0xMC0zNS02My0xOTIx
IzAhBgkqhkiG9w0BCQEWFHJvb3RAaXAtMTAtMzUtNjMtMTkyMIGfMA0GCSqGSIb3
DQEBAQUAA4GNADCBiQKBgQCrKEJQc5H4vjyoziNoPdYcz1MF4wxdpTiMAz8i9Y6c
Oz6fed7gKeAo6e227vnQLItRtIedyw/oyK4lZa7Do8/q8D1wRyfXYjcPsPbeg4lo
KFY6o1cc7kxj4VmM0hAg05PdbY+bII8mM/bYhIxUid5k7TOUOwXT8yD9KZa5AvG3
JQIDAQABo1AwTjAdBgNVHQ4EFgQUPKiyCj48hlylkoZjUwnRKBFcamwwHwYDVR0j
BBgwFoAUPKiyCj48hlylkoZjUwnRKBFcamwwDAYDVR0TBAUwAwEB/zANBgkqhkiG
9w0BAQUFAAOBgQCO8hi9MkOVO0lgsG7AXbdX/t/ATQXLrMk76EwH57LDjzwJd872
TyjP9yzXHuweftxVLuqbic814b/knR+OaXhSc2mR96ZpXN4brCxBx41aoxIWlXXX
RxKJW6ORCJejRasEJt/XPYjrbQnhlmNdji4R3iDcaYZq/UweEgOqVhdOmg==
-----END CERTIFICATE-----
71 changes: 71 additions & 0 deletions images/routers/haproxy/conf/haproxy_template.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
global
# maxconn 4096
user haproxy
group haproxy
daemon
ca-base /etc/ssl
crt-base /etc/ssl
stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin
stats timeout 2m

defaults
# maxconn 4096
mode http
# Add x-forwarded-for header.
timeout connect 5s
timeout client 30s
timeout server 30s
# Long timeout for WebSocket connections.
timeout tunnel 1h

frontend public
bind :80
mode http
tcp-request inspect-delay 5s
tcp-request content accept if HTTP

acl is_ws hdr(Upgrade) -i WebSocket
# websocket traffic, no ssl
use_backend be_ws_%[hdr(host),map(/var/lib/haproxy/conf/host_be_ws.map)] if is_ws

# http traffic
use_backend be_%[hdr(host),map(/var/lib/haproxy/conf/host_be.map)] if TRUE
default_backend openshift_default

frontend public_ssl
bind :443
mode tcp
tcp-request inspect-delay 5s
tcp-request content accept if { req_ssl_hello_type 1 }
use_backend be_sni_%[hdr(host),map(/var/lib/haproxy/conf/host_be_sni.map)] if { ssl_fc_has_sni }
default_backend be_no_sni

##------------- helper frontends/backends to dissect ssl/sni ----
#TODO : handle wss

frontend fe_no_sni
# terminate ssl on edge
bind 127.0.0.1:10443 ssl crt /var/lib/haproxy/conf/default_pub_keys.pem accept-proxy

# re-ssl?
use_backend be_ressl_%[hdr(host),map(/var/lib/haproxy/conf/host_be_ressl.map)] if TRUE

# regular http
use_backend be_%[hdr(host),map(/var/lib/haproxy/conf/host_be.map)] if TRUE

default_backend openshift_default

# backend for when sni does not exist, or ssl term needs to happen on the edge
backend be_no_sni
mode tcp
server fe_no_sni 127.0.0.1:10443 weight 1 send-proxy

backend openshift_default
mode http
option forwardfor
#option http-keep-alive
option http-pretend-keepalive
server openshift_backend 127.0.0.1:8080

##-------------- app level backends ----------------

186 changes: 186 additions & 0 deletions pkg/router/lbmanager/lbmanager.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
package lbmanager

import (
"strings"
"sync"
"time"

"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
kubeclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
"github.com/golang/glog"

osclient "github.com/openshift/origin/pkg/client"
routeapi "github.com/openshift/origin/pkg/route/api"
"github.com/openshift/origin/pkg/router"
)

// LBManager is responsible for synchronizing endpoint objects stored
// in the system with actual running pods.
type LBManager struct {
routes router.Router
endpointWatcher kubeclient.EndpointsInterface
routeWatcher osclient.Interface
lock sync.Mutex
}

// NewLBManager creates a new LBManager.
func NewLBManager(routes router.Router, endpointWatcher kubeclient.EndpointsInterface, routeWatcher osclient.Interface) *LBManager {
lm := &LBManager{
routes: routes,
endpointWatcher: endpointWatcher,
routeWatcher: routeWatcher,
}
return lm
}

// Run begins watching and syncing.
func (lm *LBManager) Run(period time.Duration) {
resourceVersion := uint64(0)
go util.Forever(func() { lm.watchEndpoints(&resourceVersion) }, period)
go util.Forever(func() { lm.watchRoutes(&resourceVersion) }, period)
}

// resourceVersion is a pointer to the resource version to use/update.
func (lm *LBManager) watchRoutes(resourceVersion *uint64) {
ctx := kapi.NewContext()
watching, err := lm.routeWatcher.WatchRoutes(
ctx,
labels.Everything(),
labels.Everything(),
*resourceVersion,
)
if err != nil {
glog.Errorf("Unexpected failure to watch: %v", err)
time.Sleep(5 * time.Second)
return
}

glog.V(4).Infof("Now entering watch mode.\n")
for {
select {
case event, open := <-watching.ResultChan():
if !open {
// watchChannel has been closed, or something else went
// wrong with our etcd watch call. Let the util.Forever()
// that called us call us again.
return
}
rc, ok := event.Object.(*routeapi.Route)
if !ok {
glog.Errorf("unexpected object: %#v", event.Object)
continue
}
*resourceVersion = rc.ResourceVersion + 1
// Sync even if this is a deletion event, to ensure that we leave
// it in the desired state.
//glog.Infof("About to sync from watch: %v", *rc)
lm.syncRoutes(event.Type, *rc)
}
}
}

// resourceVersion is a pointer to the resource version to use/update.
func (lm *LBManager) watchEndpoints(resourceVersion *uint64) {
ctx := kapi.NewContext()
watching, err := lm.endpointWatcher.WatchEndpoints(
ctx,
labels.Everything(),
labels.Everything(),
*resourceVersion,
)
if err != nil {
glog.Errorf("Unexpected failure to watch: %v", err)
time.Sleep(5 * time.Second)
return
}

glog.V(4).Infof("Now entering watch mode.\n")
for {
select {
case event, open := <-watching.ResultChan():
if !open {
// watchChannel has been closed, or something else went
// wrong with our etcd watch call. Let the util.Forever()
// that called us call us again.
return
}
rc, ok := event.Object.(*api.Endpoints)
if !ok {
glog.Errorf("unexpected object: %#v", event.Object)
continue
}
*resourceVersion = rc.ResourceVersion + 1
// Sync even if this is a deletion event, to ensure that we leave
// it in the desired state.
//glog.Infof("About to sync from watch: %v", *rc)
if event.Type != watch.Error {
lm.syncEndpoints(event.Type, *rc)
} else {
break
}
}
}
}

func (lm *LBManager) syncRoutes(event watch.EventType, app routeapi.Route) {
lm.lock.Lock()
defer lm.lock.Unlock()
glog.V(4).Infof("App Name : %s\n", app.ServiceName)
glog.V(4).Infof("\tAlias : %s\n", app.Host)
glog.V(4).Infof("\tEvent : %s\n", event)

_, ok := lm.routes.FindFrontend(app.ServiceName)
if !ok {
lm.routes.CreateFrontend(app.ServiceName, "")
}

if event == watch.Added || event == watch.Modified {
glog.V(4).Infof("Modifying routes for %s\n", app.ServiceName)
lm.routes.AddAlias(app.Host, app.ServiceName)
}
lm.routes.WriteConfig()
lm.routes.ReloadRouter()
}

func (lm *LBManager) syncEndpoints(event watch.EventType, app api.Endpoints) {
lm.lock.Lock()
defer lm.lock.Unlock()
glog.V(4).Infof("App Name : %s\n", app.ID)
glog.V(4).Infof("\tNumber of endpoints : %d\n", len(app.Endpoints))
for i, e := range app.Endpoints {
glog.V(4).Infof("\tEndpoint %d : %s\n", i, e)
}
_, ok := lm.routes.FindFrontend(app.ID)
if !ok {
lm.routes.CreateFrontend(app.ID, "") //"www."+app.ID+".com"
}

// Delete the endpoints only
lm.routes.DeleteBackends(app.ID)

if event == watch.Added || event == watch.Modified {
glog.V(4).Infof("Modifying endpoints for %s\n", app.ID)
eps := make([]router.Endpoint, len(app.Endpoints))
for i, e := range app.Endpoints {
ep := router.Endpoint{}
if strings.Contains(e, ":") {
eArr := strings.Split(e, ":")
ep.IP = eArr[0]
ep.Port = eArr[1]
} else if e == "" {
continue
} else {
ep.IP = e
ep.Port = "80"
}
eps[i] = ep
}
lm.routes.AddRoute(app.ID, "", "", nil, eps)
}
lm.routes.WriteConfig()
lm.routes.ReloadRouter()
}

1 comment on commit d74ebe6

@akram
Copy link
Contributor

@akram akram commented on d74ebe6 Nov 21, 2014

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rajatchopra
Hi,
I'am trying to write some unit test around the AddRoute method.
I got some issues because the maps a.EndpointTable and a.Backends not got initialized.
Can you please enlighten me ?

Please sign in to comment.