Skip to content

Commit

Permalink
Introduce IBMCloud provider, skip/fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
csrwng committed Apr 3, 2020
1 parent d8d3b94 commit 7d67b22
Show file tree
Hide file tree
Showing 6 changed files with 146 additions and 1 deletion.
3 changes: 3 additions & 0 deletions test/extended/apiserver/root_403.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@ var _ = g.Describe("[Feature:APIServer]", func() {
})

func anonymousHttpTransport(restConfig *rest.Config) (*http.Transport, error) {
if len(restConfig.TLSClientConfig.CAData) == 0 {
return &http.Transport{}, nil
}
pool := x509.NewCertPool()
if ok := pool.AppendCertsFromPEM(restConfig.TLSClientConfig.CAData); !ok {
return nil, errors.New("failed to add server CA certificates to client pool")
Expand Down
29 changes: 29 additions & 0 deletions test/extended/util/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,9 @@ func (c *CLI) SetupProject() {
_, err := c.ProjectClient().ProjectV1().ProjectRequests().Create(&projectv1.ProjectRequest{
ObjectMeta: metav1.ObjectMeta{Name: newNamespace},
})
if apierrors.IsForbidden(err) {
err = setupSelfProvisionerRoleBinding()
}
o.Expect(err).NotTo(o.HaveOccurred())

c.kubeFramework.AddNamespacesToDelete(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: newNamespace}})
Expand Down Expand Up @@ -266,6 +269,29 @@ func (c *CLI) SetupProject() {
e2e.Logf("Project %q has been fully provisioned.", newNamespace)
}

func (c *CLI) setupSelfProvisionerRoleBinding() error {
e2e.Logf("Creating role binding to allow self provisioning of projects")
rb := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-self-provisioners",
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.SchemeGroupVersion.Group,
Kind: "ClusterRole",
Name: "self-provisioner",
},
Subjects: []rbacv1.Subject{
{
APIGroup: rbacv1.SchemeGroupVersion.Group,
Kind: "Group",
Name: "system:authenticated:oauth",
},
},
}
_, err := c.AdminKubeClient().RbacV1().ClusterRoleBindings().Create(rb)
return err
}

// SetupProject creates a new project and assign a random user to the project.
// All resources will be then created within this project.
// TODO this should be removed. It's only used by image tests.
Expand All @@ -275,6 +301,9 @@ func (c *CLI) CreateProject() string {
_, err := c.ProjectClient().ProjectV1().ProjectRequests().Create(&projectv1.ProjectRequest{
ObjectMeta: metav1.ObjectMeta{Name: newNamespace},
})
if apierrors.IsForbidden(err) {
err = c.setupSelfProvisionerRoleBinding()
}
o.Expect(err).NotTo(o.HaveOccurred())

actualNs, err := c.AdminKubeClient().CoreV1().Namespaces().Get(newNamespace, metav1.GetOptions{})
Expand Down
41 changes: 40 additions & 1 deletion test/extended/util/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ import (
"github.com/openshift/library-go/pkg/git"
"github.com/openshift/library-go/pkg/image/imageutil"
"github.com/openshift/origin/test/extended/testdata"
"github.com/openshift/origin/test/extended/util/ibmcloud"

. "github.com/onsi/gomega"
)
Expand All @@ -68,6 +69,24 @@ func WaitForInternalRegistryHostname(oc *CLI) (string, error) {
foundOCMLogs := false
isOCMProgressing := true
podLogs := map[string]string{}
isIBMCloud := e2e.TestContext.Provider == ibmcloud.ProviderName
testImageStreamName := ""
if isIBMCloud {
is := &imagev1.ImageStream{}
is.GenerateName = "internal-registry-test"
is, err := oc.AdminImageClient().ImageV1().ImageStreams("openshift").Create(is)
if err != nil {
e2e.Logf("Error creating internal registry test imagestream: %v", err)
return "", err
}
testImageStreamName = is.Name
defer func() {
err := oc.AdminImageClient().ImageV1().ImageStreams("openshift").Delete(is.Name, &metav1.DeleteOptions{})
if err != nil {
e2e.Logf("Failed to cleanup internal-registry-test imagestream")
}
}()
}
err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
imageConfig, err := oc.AsAdmin().AdminConfigClient().ConfigV1().Images().Get("cluster", metav1.GetOptions{})
if err != nil {
Expand All @@ -88,6 +107,26 @@ func WaitForInternalRegistryHostname(oc *CLI) (string, error) {
return false, nil
}

if len(testImageStreamName) > 0 {
is, err := oc.AdminImageClient().ImageV1().ImageStreams("openshift").Get(testImageStreamName, metav1.GetOptions{})
if err != nil {
e2e.Logf("Failed to fetch test imagestream openshift/%s: %v", testImageStreamName, err)
return false, err
}
if len(is.Status.DockerImageRepository) == 0 {
return false, nil
}
imgRef, err := imageutil.ParseDockerImageReference(is.Status.DockerImageRepository)
if err != nil {
e2e.Logf("Failed to parse dockerimage repository in test imagestream (%s): %v", is.Status.DockerImageRepository, err)
return false, err
}
if imgRef.Registry != registryHostname {
return false, nil
}
return true, nil
}

// verify that the OCM config's internal registry hostname matches
// the image config's internal registry hostname
ocm, err := oc.AdminOperatorClient().OperatorV1().OpenShiftControllerManagers().Get("cluster", metav1.GetOptions{})
Expand Down Expand Up @@ -173,7 +212,7 @@ func WaitForInternalRegistryHostname(oc *CLI) (string, error) {
return false, nil
})

if !foundOCMLogs {
if !foundOCMLogs && !isIBMCloud {
e2e.Logf("dumping OCM pod logs since we never found the internal registry hostname and start build controller sequence")
for podName, podLog := range podLogs {
e2e.Logf("pod %s logs:\n%s", podName, podLog)
Expand Down
20 changes: 20 additions & 0 deletions test/extended/util/ibmcloud/provider.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
package ibmcloud

import (
"k8s.io/kubernetes/test/e2e/framework"
)

const ProviderName = "ibmcloud"

func init() {
framework.RegisterProvider(ProviderName, newProvider)
}

func newProvider() (framework.ProviderInterface, error) {
return &Provider{}, nil
}

// Provider is a structure to handle IBMCloud for e2e testing
type Provider struct {
framework.NullProvider
}
4 changes: 4 additions & 0 deletions test/extended/util/oauthserver/oauthserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"fmt"
"math/rand"
"net/http"
"os"
"path"
"time"

Expand Down Expand Up @@ -472,6 +473,9 @@ func randomString(size int) string {

// getImage will grab the hypershift image version from openshift-authentication ns
func getImage(oc *exutil.CLI) (string, error) {
if image := os.Getenv("OPENSHIFT_OAUTH_IMAGE"); image != "" {
return image, nil
}
selector, _ := labels.Parse("app=oauth-openshift")
pods, err := oc.AdminKubeClient().CoreV1().Pods("openshift-authentication").List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
Expand Down
50 changes: 50 additions & 0 deletions test/extended/util/test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import (
projectv1 "github.com/openshift/api/project/v1"
securityv1client "github.com/openshift/client-go/security/clientset/versioned"
"github.com/openshift/origin/pkg/version"
_ "github.com/openshift/origin/test/extended/util/ibmcloud"
)

var (
Expand Down Expand Up @@ -484,6 +485,55 @@ var (
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV`,
},
"[Skipped:ibmcloud]": {
// skip Gluster tests (not supported on ROKS worker nodes)
`\[Driver: gluster\]`,
`GlusterFS`,
`GlusterDynamicProvisioner`,

// IBM ROKS does not enable the kubeadmin secret and does not respond to it being added to the kube-system namespace
"The bootstrap user should successfully login with password decoded from kubeadmin secret",

// TestFrontProxy relies on the aggregator client secret to be present on the cluster.
// On a hosted control plane cluster, that secret lives on the management side.
`\[Feature:Authentication\] TestFrontProxy should succeed`,

// Requires the kube-control-plane-signer secret which is not present in a ROKS cluster
`\[Feature:OAuthServer\] OAuth server has the correct token and certificate fallback semantics`,

// Requires oauth-openshift route in cluster. In ROKS, the oauth server lives outside of the cluster
`\[Feature:OAuthServer\] well-known endpoint should be reachable`,

// Nodes in ROKS have access to secrets in the cluster to handle encryption
`\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error`,
`\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a non-existent secret should exit with the Forbidden error, not a NotFound error`,
`\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a secret for a workload the node has access to should succeed`,
`\[sig-auth\] \[Feature:NodeAuthorizer\] Getting an existing configmap should exit with the Forbidden error`,
`\[sig-auth\] \[Feature:NodeAuthorizer\] Getting an existing secret should exit with the Forbidden error`,

// Access to node external address is blocked from pods within a ROKS cluster by Calico
`\[sig-auth\] \[Feature:NodeAuthenticator\] The kubelet's main port 10250 should reject requests with no credentials`,
`\[sig-auth\] \[Feature:NodeAuthenticator\] The kubelet can delegate ServiceAccount tokens to the API server`,

// Node approval in ROKS is not handled in cluster
`node client cert requests armoring: node-approver SA token compromised, don't approve random CSRs with client auth`,

// Ignition is not served from a ROKS cluster
`node client cert requests armoring: deny pod's access to /config/master API endpoint`,

// No metrics are available for control plane components
`\[Feature:Prometheus\]\[Conformance\] Prometheus when installed on the cluster should start and expose a secured proxy and unsecured metrics`,

// The cluster-network-operator creates the kube-proxy daemonset pods without mem/cpu requests,
// resulting in a qosClass of BestEffort
`\[Feature:Platform\] Managed cluster should ensure control plane pods do not run in best-effort QoS`,

// etcd service is not part of the cluster in a ROKS cluster
`\[Serial\] API data in etcd should be stored at the correct location and version for all resources \[Suite:openshift/conformance/serial\]`,

// oauth server is not present in a ROKS cluster
`\[Serial\] \[Feature:OAuthServer\] \[RequestHeaders\] \[IdP\] test RequestHeaders IdP \[Suite:openshift/conformance/serial\]`,
},
"[Skipped:gce]": {
// Requires creation of a different compute instance in a different zone and is not compatible with volumeBindingMode of WaitForFirstConsumer which we use in 4.x
`\[sig-scheduling\] Multi-AZ Cluster Volumes \[sig-storage\] should only be allowed to provision PDs in zones where nodes exist`,
Expand Down

0 comments on commit 7d67b22

Please sign in to comment.