Skip to content

Commit

Permalink
Route LDAP IDPs in the oauth server through guest VPC
Browse files Browse the repository at this point in the history
This changes makes us route LDAP IDPs in the oauth server to the guest
VPC by:
* Setting the ALL_PROXY env var to the ldap dialer will use the socks5
  sidecar
* Extending the socks5 sidecar to optionally resolve everything through
  the guest clusters cluster-dns
  • Loading branch information
alvaroaleman committed Sep 2, 2022
1 parent 1eea925 commit f60f77b
Show file tree
Hide file tree
Showing 2 changed files with 111 additions and 14 deletions.
Expand Up @@ -142,6 +142,10 @@ func buildOAuthContainerMain(image string) func(c *corev1.Container) {
Name: "HTTPS_PROXY",
Value: fmt.Sprintf("socks5://127.0.0.1:%d", konnectivity.KonnectivityServerLocalPort),
},
{
Name: "ALL_PROXY",
Value: fmt.Sprintf("socks5://127.0.0.1:%d", konnectivity.KonnectivityServerLocalPort),
},
{
Name: "NO_PROXY",
Value: manifests.KubeAPIServerService("").Name,
Expand Down Expand Up @@ -246,7 +250,7 @@ func socks5ProxyContainer(socks5ProxyImage string) corev1.Container {
c := corev1.Container{
Name: socks5ProxyContainerName,
Image: socks5ProxyImage,
Command: []string{"/usr/bin/control-plane-operator", "konnectivity-socks5-proxy"},
Command: []string{"/usr/bin/control-plane-operator", "konnectivity-socks5-proxy", "--resolve-from-guest-cluster-dns=true"},
Args: []string{"run"},
Env: []corev1.EnvVar{{
Name: "KUBECONFIG",
Expand All @@ -263,5 +267,6 @@ func socks5ProxyContainer(socks5ProxyImage string) corev1.Container {
{Name: "konnectivity-proxy-cert", MountPath: "/etc/konnectivity-proxy-tls"},
},
}

return c
}
118 changes: 105 additions & 13 deletions konnectivity-socks5-proxy/main.go
Expand Up @@ -4,24 +4,35 @@ import (
"bufio"
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"net/http"
"strings"
"sync"

socks5 "github.com/armon/go-socks5"
"github.com/openshift/hypershift/pkg/version"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
"golang.org/x/net/proxy"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"

"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/apiserver-network-proxy/pkg/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)

func NewStartCommand() *cobra.Command {
l := log.Log.WithName("konnectivity-socks5-proxy")
log.SetLogger(zap.New(zap.UseDevMode(true), zap.JSONEncoder(func(o *zapcore.EncoderConfig) {
o.EncodeTime = zapcore.RFC3339TimeEncoder
})))
cmd := &cobra.Command{
Use: "konnectivity-socks5-proxy",
Short: "Runs the konnectivity socks5 proxy server.",
Expand All @@ -40,27 +51,37 @@ func NewStartCommand() *cobra.Command {
var clientCertPath string
var clientKeyPath string
var connectDirectlyToCloudAPIs bool
var resolveFromGuestClusterDNS bool

cmd.Flags().StringVar(&proxyHostname, "konnectivity-hostname", "konnectivity-server-local", "The hostname of the konnectivity service.")
cmd.Flags().IntVar(&proxyPort, "konnectivity-port", 8090, "The konnectivity port that socks5 proxy should connect to.")
cmd.Flags().IntVar(&servingPort, "serving-port", 8090, "The port that socks5 proxy should serve on.")
cmd.Flags().BoolVar(&connectDirectlyToCloudAPIs, "connect-directly-to-cloud-apis", false, "If true, traffic destined for AWS or Azure APIs should be sent there directly rather than going through konnectivity. If enabled, proxy env vars from the mgmt cluster must be propagated to this container")
cmd.Flags().BoolVar(&resolveFromGuestClusterDNS, "resolve-from-guest-cluster-dns", false, "If DNS resolving should use the guest clusters cluster-dns")

cmd.Flags().StringVar(&caCertPath, "ca-cert-path", "/etc/konnectivity-proxy-tls/ca.crt", "The path to the konnectivity client's ca-cert.")
cmd.Flags().StringVar(&clientCertPath, "tls-cert-path", "/etc/konnectivity-proxy-tls/tls.crt", "The path to the konnectivity client's tls certificate.")
cmd.Flags().StringVar(&clientKeyPath, "tls-key-path", "/etc/konnectivity-proxy-tls/tls.key", "The path to the konnectivity client's private key.")

cmd.Run = func(cmd *cobra.Command, args []string) {
fmt.Printf("Starting proxy. Version %s\n", version.String())
l.Info("Starting proxy", "version", version.String())
client, err := client.New(ctrl.GetConfigOrDie(), client.Options{})
if err != nil {
panic(err)
}

dialFunc := dialFunc(caCertPath, clientCertPath, clientKeyPath, proxyHostname, proxyPort, connectDirectlyToCloudAPIs)
conf := &socks5.Config{
Dial: dialFunc(caCertPath, clientCertPath, clientKeyPath, proxyHostname, proxyPort, connectDirectlyToCloudAPIs),
Resolver: k8sServiceResolver{
client: client,
Dial: dialFunc,
Resolver: proxyResolver{
client: client,
resolveFromGuestCluster: resolveFromGuestClusterDNS,
guestClusterResolver: &guestClusterResolver{
log: l,
client: client,
konnektivityDialFunc: dialFunc,
},
log: l,
},
}
server, err := socks5.New(conf)
Expand Down Expand Up @@ -123,26 +144,97 @@ func dialDirect(ctx context.Context, network, addr string) (net.Conn, error) {
return proxy.Dial(ctx, network, addr)
}

// k8sServiceResolver attempts to resolve the hostname by matching it to a Kubernetes Service, but will fallback to the system DNS if an error is encountered.
type k8sServiceResolver struct {
client client.Client
type guestClusterResolver struct {
log logr.Logger
client client.Client
konnektivityDialFunc func(ctx context.Context, network string, addr string) (net.Conn, error)
resolver *net.Resolver
resolverLock sync.Mutex
}

func (d k8sServiceResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) {
func (gr *guestClusterResolver) getResolver(ctx context.Context) (*net.Resolver, error) {
gr.resolverLock.Lock()
defer gr.resolverLock.Unlock()
if gr.resolver != nil {
return gr.resolver, nil
}
dnsService := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: "openshift-dns", Name: "dns-default"}}
if err := gr.client.Get(ctx, client.ObjectKeyFromObject(dnsService), dnsService); err != nil {
return nil, fmt.Errorf("failed to get dns service from guest cluster: %w", err)
}
clusterDNSAddress := dnsService.Spec.ClusterIP + ":53"
gr.resolver = &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
return gr.konnektivityDialFunc(ctx, "tcp", clusterDNSAddress)
},
}

return gr.resolver, nil
}

func (gr *guestClusterResolver) resolve(ctx context.Context, name string) (net.IP, error) {
resolver, err := gr.getResolver(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get resolver: %w", err)

}
addresses, err := resolver.LookupHost(ctx, name)
if err != nil {
return nil, fmt.Errorf("failed to resolve %q: %w", name, err)
}
if len(addresses) == 0 {
return nil, errors.New("no addresses found")
}
address := net.ParseIP(addresses[0])
if address == nil {
return nil, fmt.Errorf("failed to parse address %q as IP", addresses[0])
}
return address, nil
}

// proxyResolver attempts to resolve the hostname by matching it to a Kubernetes Service, but will fallback to the system DNS if an error is encountered.
// proxyResolver tries to resolve addresses using the following steps in order:
// 1. Not at all for cloud provider apis, as we do not want to tunnel them through Konnektivity
// 2. If the address is a valid Kubernetes service and that service exists in the guest cluster, it's clusterIP is returned
// 2. If --resolve-from-guest-cluster-dns is set, it uses the guest clusters dns. If that fails, an error is returned
// 4. Lastly, golangs default resolver is used
type proxyResolver struct {
client client.Client
resolveFromGuestCluster bool
guestClusterResolver *guestClusterResolver
log logr.Logger
}

func (d proxyResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) {
// Preserve the host so we can recognize it
if isCloudAPI(name) {
return ctx, nil, nil
}
_, ip, err := d.ResolveK8sService(ctx, name)
l := d.log.WithValues("name", name)
_, ip, err := d.ResolveK8sService(ctx, l, name)
if err != nil {
fmt.Printf("Error resolving k8s service %v\n", err)
return socks5.DNSResolver{}.Resolve(ctx, name)

l.Info("failed to resolve address from Kubernetes service", "err", err.Error())
if !d.resolveFromGuestCluster {
return socks5.DNSResolver{}.Resolve(ctx, name)
}

l.Info("looking up address from guest cluster cluster-dns")
address, err := d.guestClusterResolver.resolve(ctx, name)
if err != nil {
l.Error(err, "failed to look up address from guest cluster")
return ctx, nil, fmt.Errorf("failed to look up name %s from guest cluster cluster-dns: %w", name, err)
}
l.WithValues("address", address.String()).Info("Successfully looked up address from guest cluster")
return ctx, address, nil

}

return ctx, ip, nil
}

func (d k8sServiceResolver) ResolveK8sService(ctx context.Context, name string) (context.Context, net.IP, error) {
func (d proxyResolver) ResolveK8sService(ctx context.Context, l logr.Logger, name string) (context.Context, net.IP, error) {
namespaceNamedService := strings.Split(name, ".")
if len(namespaceNamedService) < 2 {
return nil, nil, fmt.Errorf("unable to derive namespacedName from %v", name)
Expand All @@ -164,7 +256,7 @@ func (d k8sServiceResolver) ResolveK8sService(ctx context.Context, name string)
return nil, nil, fmt.Errorf("unable to parse IP %v", ip)
}

fmt.Printf("%s resolved to %v\n", name, ip)
l.Info("resolved address from Kubernetes service", "ip", ip.String())

return ctx, ip, nil
}
Expand Down

0 comments on commit f60f77b

Please sign in to comment.