Skip to content

Commit

Permalink
Merge pull request openshift#227 from dulek/octavia-tags
Browse files Browse the repository at this point in the history
Improve OpenStack resource naming and tagging
  • Loading branch information
openshift-merge-robot committed Jul 15, 2019
2 parents 069a39d + a8b4705 commit 7d33b88
Show file tree
Hide file tree
Showing 260 changed files with 15,866 additions and 466 deletions.
7 changes: 5 additions & 2 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Gopkg.toml
Original file line number Diff line number Diff line change
Expand Up @@ -76,4 +76,4 @@ required = [

[[constraint]]
name = "github.com/gophercloud/gophercloud"
revision="f27ceddc323ff01fdd909ac8377fb06b12db7f4f"
revision="17584a22adf89c48eb1f2518e71326b3b01ba573"
70 changes: 48 additions & 22 deletions pkg/platform/openstack/kuryr_bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ const (
CloudName = "openstack"
CloudsSecretKey = "clouds.yaml"
// NOTE(dulek): This one is hardcoded in openshift/installer.
InfrastructureCRDName = "cluster"
InfrastructureCRDName = "cluster"
MinOctaviaVersionWithTagSupport = "v2.5"
)

func GetClusterID(kubeClient client.Client) (string, error) {
Expand Down Expand Up @@ -425,11 +426,9 @@ func ensureOpenStackSgRule(client *gophercloud.ServiceClient, sgId, remotePrefix
}
_, err := rules.Create(client, opts).Extract()
if err != nil {
if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok {
if errCode.Actual == 409 {
// Ignoring 409 Conflict as that means the rule is already there.
return nil
}
if _, ok := err.(gophercloud.ErrDefault409); ok {
// Ignoring 409 Conflict as that means the rule is already there.
return nil
}
return errors.Wrap(err, "failed to create SG rule")
}
Expand All @@ -453,12 +452,28 @@ func waitForOpenStackLb(client *gophercloud.ServiceClient, lbId string) error {

// Looks for a Octavia load balancer by name, address and subnet ID. If it does
// not exist creates it. Will fail if multiple LB's are matching all criteria.
func ensureOpenStackLb(client *gophercloud.ServiceClient, name, vipAddress, vipSubnetId string) (string, error) {
page, err := loadbalancers.List(client, loadbalancers.ListOpts{
func ensureOpenStackLb(client *gophercloud.ServiceClient, name, vipAddress, vipSubnetId, tag string) (string, error) {
// We need to figure out if Octavia supports tags and use description field if it's too old. To do that
// we list available API versions and look for 2.5. This is because we support Queens and Rocky releases of
// OpenStack and those were before tags got implemented.
// TODO(dulek): This workaround can be removed once we stop supporting Queens and Rocky OpenStack releases.
octaviaTagSupport, err := IsOctaviaVersionSupported(client, MinOctaviaVersionWithTagSupport)
if err != nil {
return "", errors.Wrap(err, "failed to determine if Octavia supports tags")
}

opts := loadbalancers.ListOpts{
Name: name,
VipAddress: vipAddress,
VipSubnetID: vipSubnetId,
}).AllPages()
}
if octaviaTagSupport {
opts.Tags = []string{tag}
} else {
opts.Description = tag
}

page, err := loadbalancers.List(client, opts).AllPages()
if err != nil {
return "", errors.Wrap(err, "failed to get LB list")
}
Expand All @@ -467,7 +482,7 @@ func ensureOpenStackLb(client *gophercloud.ServiceClient, name, vipAddress, vipS
return "", errors.Wrap(err, "failed to extract LB list")
}
if len(lbs) > 1 {
return "", errors.Errorf("found multiple LB matching name %s, cannot proceed", name)
return "", errors.Errorf("found multiple LB matching name %s, tag %s, cannot proceed", name, tag)
} else if len(lbs) == 1 {
return lbs[0].ID, nil
} else {
Expand All @@ -476,6 +491,11 @@ func ensureOpenStackLb(client *gophercloud.ServiceClient, name, vipAddress, vipS
VipAddress: vipAddress,
VipSubnetID: vipSubnetId,
}
if octaviaTagSupport {
opts.Tags = []string{tag}
} else {
opts.Description = tag
}
lb, err := loadbalancers.Create(client, opts).Extract()
if err != nil {
return "", errors.Wrap(err, "failed to create LB")
Expand Down Expand Up @@ -626,6 +646,10 @@ func getProjectID(keystone *gophercloud.ServiceClient, username, projectName str
return proj.ID, nil
}

func generateName(name, clusterID string) string {
return fmt.Sprintf("%s-%s", clusterID, name)
}

// Logs into OpenStack and creates all the resources that are required to run
// Kuryr based on conf NetworkConfigSpec. Basically this includes service
// network and subnet, pods subnetpool, security group and load balancer for
Expand Down Expand Up @@ -689,7 +713,7 @@ func BootstrapKuryr(conf *operv1.NetworkSpec, kubeClient client.Client) (*bootst
log.Printf("Using %s as resources tag", tag)

log.Print("Ensuring services network")
svcNetId, err := ensureOpenStackNetwork(client, "kuryr-service-network", tag)
svcNetId, err := ensureOpenStackNetwork(client, generateName("kuryr-service-network", clusterID), tag)
if err != nil {
return nil, errors.Wrap(err, "failed to create service network")
}
Expand All @@ -705,7 +729,7 @@ func BootstrapKuryr(conf *operv1.NetworkSpec, kubeClient client.Client) (*bootst
ip := iputil.LastUsableIP(*svcNet)
ipStr := ip.String()
log.Printf("Ensuring services subnet with %s CIDR and %s gateway", conf.ServiceNetwork[0], ipStr)
svcSubnetId, err := ensureOpenStackSubnet(client, "kuryr-service-subnet", tag,
svcSubnetId, err := ensureOpenStackSubnet(client, generateName("kuryr-service-subnet", clusterID), tag,
svcNetId, conf.ServiceNetwork[0], &ipStr)
if err != nil {
return nil, errors.Wrap(err, "failed to create service subnet")
Expand All @@ -721,18 +745,19 @@ func BootstrapKuryr(conf *operv1.NetworkSpec, kubeClient client.Client) (*bootst
// we need to validate if all of them are the same - that's how it can work in OpenStack.
prefixLen := conf.ClusterNetwork[0].HostPrefix
log.Printf("Ensuring pod subnetpool with following CIDRs: %v", podSubnetCidrs)
podSubnetpoolId, err := ensureOpenStackSubnetpool(client, "kuryr-pod-subnetpool", tag, podSubnetCidrs, prefixLen)
podSubnetpoolId, err := ensureOpenStackSubnetpool(client, generateName("kuryr-pod-subnetpool", clusterID), tag,
podSubnetCidrs, prefixLen)
if err != nil {
return nil, errors.Wrap(err, "failed to create pod subnetpool")
}
log.Printf("Pod subnetpool %s present", podSubnetpoolId)

workerSubnet, err := findOpenStackSubnet(client, fmt.Sprintf("%s-nodes", clusterID), tag)
workerSubnet, err := findOpenStackSubnet(client, generateName("nodes", clusterID), tag)
if err != nil {
return nil, errors.Wrap(err, "failed to find worker nodes subnet")
}
log.Printf("Found worker nodes subnet %s", workerSubnet.ID)
routerId, err := findOpenStackRouterId(client, fmt.Sprintf("%s-external-router", clusterID), tag)
routerId, err := findOpenStackRouterId(client, generateName("external-router", clusterID), tag)
if err != nil {
return nil, errors.Wrap(err, "failed to find worker nodes router")
}
Expand All @@ -744,7 +769,7 @@ func BootstrapKuryr(conf *operv1.NetworkSpec, kubeClient client.Client) (*bootst

if !lookupOpenStackPort(ps, svcSubnetId) {
log.Printf("Ensuring service subnet router port with %s IP", ipStr)
portId, err := ensureOpenStackPort(client, "kuryr-service-subnet-router-port", tag,
portId, err := ensureOpenStackPort(client, generateName("kuryr-service-subnet-router-port", clusterID), tag,
svcNetId, svcSubnetId, ipStr)
if err != nil {
return nil, errors.Wrap(err, "failed to create service subnet router port")
Expand All @@ -756,19 +781,19 @@ func BootstrapKuryr(conf *operv1.NetworkSpec, kubeClient client.Client) (*bootst
}
}

masterSgId, err := findOpenStackSgId(client, fmt.Sprintf("%s-master", clusterID), tag)
masterSgId, err := findOpenStackSgId(client, generateName("master", clusterID), tag)
if err != nil {
return nil, errors.Wrap(err, "failed to find master nodes security group")
}
log.Printf("Found master nodes security group %s", masterSgId)
workerSgId, err := findOpenStackSgId(client, fmt.Sprintf("%s-worker", clusterID), tag)
workerSgId, err := findOpenStackSgId(client, generateName("worker", clusterID), tag)
if err != nil {
return nil, errors.Wrap(err, "failed to find worker nodes security group")
}
log.Printf("Found worker nodes security group %s", workerSgId)

log.Print("Ensuring pods security group")
podSgId, err := ensureOpenStackSg(client, "kuryr-pods-security-group", tag)
podSgId, err := ensureOpenStackSg(client, generateName("kuryr-pods-security-group", clusterID), tag)
log.Printf("Pods security group %s present", podSgId)

log.Print("Allowing traffic from masters and nodes to pods")
Expand Down Expand Up @@ -806,21 +831,22 @@ func BootstrapKuryr(conf *operv1.NetworkSpec, kubeClient client.Client) (*bootst
ip = iputil.FirstUsableIP(*svcNet)
ipStr = ip.String()
log.Printf("Creating OpenShift API loadbalancer with IP %s", ipStr)
lbId, err := ensureOpenStackLb(lbClient, "kuryr-api-loadbalancer", ipStr, svcSubnetId)
lbId, err := ensureOpenStackLb(lbClient, generateName("kuryr-api-loadbalancer", clusterID), ipStr, svcSubnetId, tag)
if err != nil {
return nil, errors.Wrap(err, "failed to create OpenShift API loadbalancer")
}
log.Printf("OpenShift API loadbalancer %s present", lbId)

log.Print("Creating OpenShift API loadbalancer pool")
poolId, err := ensureOpenStackLbPool(lbClient, "kuryr-api-loadbalancer-pool", lbId)
poolId, err := ensureOpenStackLbPool(lbClient, generateName("kuryr-api-loadbalancer-pool", clusterID), lbId)
if err != nil {
return nil, errors.Wrap(err, "failed to create OpenShift API loadbalancer pool")
}
log.Printf("OpenShift API loadbalancer pool %s present", poolId)

log.Print("Creating OpenShift API loadbalancer listener")
listenerId, err := ensureOpenStackLbListener(lbClient, "kuryr-api-loadbalancer-listener", lbId, poolId, 443)
listenerId, err := ensureOpenStackLbListener(lbClient, generateName("kuryr-api-loadbalancer-listener", clusterID),
lbId, poolId, 443)
if err != nil {
return nil, errors.Wrap(err, "failed to create OpenShift API loadbalancer listener")
}
Expand Down
64 changes: 64 additions & 0 deletions pkg/platform/openstack/loadbalancer.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
package openstack

import (
"log"

"github.com/pkg/errors"

"github.com/Masterminds/semver"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/apiversions"
)

var maxOctaviaVersion *semver.Version = nil

func getMaxOctaviaAPIVersion(client *gophercloud.ServiceClient) (*semver.Version, error) {
allPages, err := apiversions.List(client).AllPages()
if err != nil {
return nil, err
}

apiVersions, err := apiversions.ExtractAPIVersions(allPages)
if err != nil {
return nil, err
}

var max *semver.Version = nil
for _, apiVersion := range apiVersions {
ver, err := semver.NewVersion(apiVersion.ID)

if err != nil {
// We're ignoring the error, if Octavia is returning anything odd we don't care.
log.Printf("Error when parsing Octavia API version %s: %v. Ignoring it", apiVersion.ID, err)
continue
}

if max == nil || ver.GreaterThan(max) {
max = ver
}
}

if max == nil {
// If we have max == nil at this point, then we couldn't read the versions at all.
// This happens for 2.0 API and let's return that.
max = semver.MustParse("v2.0")
}

log.Printf("Detected Octavia API v%s", max)

return max, nil
}

func IsOctaviaVersionSupported(client *gophercloud.ServiceClient, constraint string) (bool, error) {
if maxOctaviaVersion == nil {
var err error
maxOctaviaVersion, err = getMaxOctaviaAPIVersion(client)
if err != nil {
return false, errors.Wrap(err, "cannot get Octavia API versions")
}
}

constraintVer := semver.MustParse(constraint)

return !constraintVer.GreaterThan(maxOctaviaVersion), nil
}

0 comments on commit 7d33b88

Please sign in to comment.